mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-31 08:08:18 -05:00
Compare commits
6 Commits
poc/option
...
e2e-debugg
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
35720e3f71 | ||
|
|
6effcf5d53 | ||
|
|
bd778dad3a | ||
|
|
ceadf6e5c9 | ||
|
|
22769ed486 | ||
|
|
b960e54e00 |
@@ -27,7 +27,6 @@ go_library(
|
||||
"receive_blob.go",
|
||||
"receive_block.go",
|
||||
"receive_data_column.go",
|
||||
"receive_proof.go",
|
||||
"service.go",
|
||||
"setup_forkchoice.go",
|
||||
"tracked_proposer.go",
|
||||
@@ -50,7 +49,6 @@ go_library(
|
||||
"//beacon-chain/core/electra:go_default_library",
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/operation:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
@@ -76,7 +74,6 @@ go_library(
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/async/event"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/operation"
|
||||
statefeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/filesystem"
|
||||
@@ -227,14 +226,6 @@ func WithDataColumnStorage(b *filesystem.DataColumnStorage) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithProofStorage sets the proof storage backend for the blockchain service.
|
||||
func WithProofStorage(p *filesystem.ProofStorage) Option {
|
||||
return func(s *Service) error {
|
||||
s.proofStorage = p
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithSyncChecker sets the sync checker for the blockchain service.
|
||||
func WithSyncChecker(checker Checker) Option {
|
||||
return func(s *Service) error {
|
||||
@@ -275,10 +266,3 @@ func WithStartWaitingDataColumnSidecars(c chan bool) Option {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithOperationNotifier(operationNotifier operation.Notifier) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.OperationNotifier = operationNotifier
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,7 +15,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/filesystem"
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v7/beacon-chain/forkchoice/types"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
@@ -114,7 +113,6 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
|
||||
s.updateCachesPostBlockProcessing(cfg)
|
||||
}()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -663,17 +661,10 @@ func (s *Service) isDataAvailable(
|
||||
return errors.New("invalid nil beacon block")
|
||||
}
|
||||
|
||||
root, blockVersion := roBlock.Root(), roBlock.Version()
|
||||
root := roBlock.Root()
|
||||
blockVersion := block.Version()
|
||||
if blockVersion >= version.Fulu {
|
||||
if err := s.areExecutionProofsAvailable(ctx, roBlock); err != nil {
|
||||
return fmt.Errorf("are execution proofs available: %w", err)
|
||||
}
|
||||
|
||||
if err := s.areDataColumnsAvailable(ctx, root, block); err != nil {
|
||||
return fmt.Errorf("are data columns available: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
return s.areDataColumnsAvailable(ctx, root, block)
|
||||
}
|
||||
|
||||
if blockVersion >= version.Deneb {
|
||||
@@ -683,77 +674,6 @@ func (s *Service) isDataAvailable(
|
||||
return nil
|
||||
}
|
||||
|
||||
// areExecutionProofsAvailable blocks until we have enough execution proofs to import the block,
|
||||
// or an error or context cancellation occurs.
|
||||
// This check is only performed for lightweight verifier nodes that need zkVM proofs
|
||||
// to validate block execution (nodes without execution layer + proof generation capability).
|
||||
// A nil result means that the data availability check is successful.
|
||||
func (s *Service) areExecutionProofsAvailable(ctx context.Context, roBlock consensusblocks.ROBlock) error {
|
||||
// Return early if zkVM features are disabled (no need to check for execution proofs),
|
||||
// or if the generation proof is enabled (we will generate proofs ourselves).
|
||||
if !features.Get().EnableZkvm || len(flags.Get().ProofGenerationTypes) > 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
root, slot := roBlock.Root(), roBlock.Block().Slot()
|
||||
|
||||
requiredProofCount := params.BeaconConfig().MinProofsRequired
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"slot": slot,
|
||||
"requiredProofCount": requiredProofCount,
|
||||
})
|
||||
|
||||
// Subscribe to newly execution proofs stored in the database.
|
||||
subscription, identChan := s.proofStorage.Subscribe()
|
||||
defer subscription.Unsubscribe()
|
||||
|
||||
// Return early if we already have enough proofs.
|
||||
if actualProofCount := uint64(s.proofStorage.Summary(root).Count()); actualProofCount >= requiredProofCount {
|
||||
log.WithField("actualProofCount", actualProofCount).Debug("Already have enough execution proofs")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Log for DA checks that cross over into the next slot; helpful for debugging.
|
||||
nextSlot, err := slots.StartTime(s.genesisTime, roBlock.Block().Slot()+1)
|
||||
if err != nil {
|
||||
return fmt.Errorf("start time: %w", err)
|
||||
}
|
||||
|
||||
// Avoid logging if DA check is called after next slot start.
|
||||
if nextSlot.After(time.Now()) {
|
||||
timer := time.AfterFunc(time.Until(nextSlot), func() {
|
||||
actualCount := uint64(s.proofStorage.Summary(root).Count())
|
||||
if actualCount >= requiredProofCount {
|
||||
return
|
||||
}
|
||||
|
||||
log.WithField("proofsRetrieved", actualCount).Warning("Execution proofs still missing at slot end")
|
||||
})
|
||||
|
||||
defer timer.Stop()
|
||||
}
|
||||
|
||||
// Some proofs are missing; wait for them.
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case proofIdent := <-identChan:
|
||||
// Skip if the proof is for a different block.
|
||||
if proofIdent.BlockRoot != root {
|
||||
continue
|
||||
}
|
||||
|
||||
// Return if we have enough proofs.
|
||||
if actualProofCount := uint64(s.proofStorage.Summary(root).Count()); actualProofCount >= requiredProofCount {
|
||||
log.WithField("actualProofCount", actualProofCount).Debug("Got enough execution proofs")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// areDataColumnsAvailable blocks until all data columns committed to in the block are available,
|
||||
// or an error or context cancellation occurs. A nil result means that the data availability check is successful.
|
||||
func (s *Service) areDataColumnsAvailable(
|
||||
@@ -890,7 +810,14 @@ func (s *Service) areDataColumnsAvailable(
|
||||
}
|
||||
|
||||
case <-ctx.Done():
|
||||
return errors.Wrapf(ctx.Err(), "data column sidecars slot: %d, BlockRoot: %#x, missing: %v", block.Slot(), root, helpers.SortedPrettySliceFromMap(missing))
|
||||
var missingIndices any = "all"
|
||||
missingIndicesCount := len(missing)
|
||||
|
||||
if missingIndicesCount < fieldparams.NumberOfColumns {
|
||||
missingIndices = helpers.SortedPrettySliceFromMap(missing)
|
||||
}
|
||||
|
||||
return errors.Wrapf(ctx.Err(), "data column sidecars slot: %d, BlockRoot: %#x, missing: %v", block.Slot(), root, missingIndices)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -60,12 +60,6 @@ type DataColumnReceiver interface {
|
||||
ReceiveDataColumns([]blocks.VerifiedRODataColumn) error
|
||||
}
|
||||
|
||||
// ProofReceiver interface defines the methods of chain service for receiving new
|
||||
// execution proofs
|
||||
type ProofReceiver interface {
|
||||
ReceiveProof(proof *ethpb.ExecutionProof) error
|
||||
}
|
||||
|
||||
// SlashingReceiver interface defines the methods of chain service for receiving validated slashing over the wire.
|
||||
type SlashingReceiver interface {
|
||||
ReceiveAttesterSlashing(ctx context.Context, slashing ethpb.AttSlashing)
|
||||
|
||||
@@ -1,15 +0,0 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ReceiveProof saves an execution proof to storage.
|
||||
func (s *Service) ReceiveProof(proof *ethpb.ExecutionProof) error {
|
||||
if err := s.proofStorage.Save([]*ethpb.ExecutionProof{proof}); err != nil {
|
||||
return errors.Wrap(err, "save proof")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/async/event"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/operation"
|
||||
statefeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
coreTime "github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
|
||||
@@ -65,7 +64,6 @@ type Service struct {
|
||||
blockBeingSynced *currentlySyncingBlock
|
||||
blobStorage *filesystem.BlobStorage
|
||||
dataColumnStorage *filesystem.DataColumnStorage
|
||||
proofStorage *filesystem.ProofStorage
|
||||
slasherEnabled bool
|
||||
lcStore *lightClient.Store
|
||||
startWaitingDataColumnSidecars chan bool // for testing purposes only
|
||||
@@ -88,7 +86,6 @@ type config struct {
|
||||
P2P p2p.Accessor
|
||||
MaxRoutines int
|
||||
StateNotifier statefeed.Notifier
|
||||
OperationNotifier operation.Notifier
|
||||
ForkChoiceStore f.ForkChoicer
|
||||
AttService *attestations.Service
|
||||
StateGen *stategen.State
|
||||
@@ -212,8 +209,7 @@ func (s *Service) Start() {
|
||||
if err := s.StartFromSavedState(s.cfg.FinalizedStateAtStartUp); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
go s.spawnProcessAttestationsRoutine()
|
||||
s.spawnProcessAttestationsRoutine()
|
||||
go s.runLateBlockTasks()
|
||||
}
|
||||
|
||||
|
||||
@@ -75,7 +75,6 @@ type ChainService struct {
|
||||
SyncingRoot [32]byte
|
||||
Blobs []blocks.VerifiedROBlob
|
||||
DataColumns []blocks.VerifiedRODataColumn
|
||||
Proofs []*ethpb.ExecutionProof
|
||||
TargetRoot [32]byte
|
||||
MockHeadSlot *primitives.Slot
|
||||
}
|
||||
@@ -758,12 +757,6 @@ func (c *ChainService) ReceiveDataColumns(dcs []blocks.VerifiedRODataColumn) err
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReceiveProof implements the same method in chain service
|
||||
func (c *ChainService) ReceiveProof(proof *ethpb.ExecutionProof) error {
|
||||
c.Proofs = append(c.Proofs, proof)
|
||||
return nil
|
||||
}
|
||||
|
||||
// DependentRootForEpoch mocks the same method in the chain service
|
||||
func (c *ChainService) DependentRootForEpoch(_ [32]byte, _ primitives.Epoch) ([32]byte, error) {
|
||||
return c.TargetRoot, nil
|
||||
|
||||
@@ -46,9 +46,6 @@ const (
|
||||
|
||||
// DataColumnReceived is sent after a data column has been seen after gossip validation rules.
|
||||
DataColumnReceived = 12
|
||||
|
||||
// ExecutionProofReceived is sent after a execution proof object has been received from gossip or rpc.
|
||||
ExecutionProofReceived = 13
|
||||
)
|
||||
|
||||
// UnAggregatedAttReceivedData is the data sent with UnaggregatedAttReceived events.
|
||||
@@ -80,11 +77,6 @@ type BLSToExecutionChangeReceivedData struct {
|
||||
Change *ethpb.SignedBLSToExecutionChange
|
||||
}
|
||||
|
||||
// ExecutionProofReceivedData is the data sent with ExecutionProofReceived events.
|
||||
type ExecutionProofReceivedData struct {
|
||||
ExecutionProof *ethpb.ExecutionProof
|
||||
}
|
||||
|
||||
// BlobSidecarReceivedData is the data sent with BlobSidecarReceived events.
|
||||
type BlobSidecarReceivedData struct {
|
||||
Blob *blocks.VerifiedROBlob
|
||||
|
||||
@@ -7,6 +7,7 @@ go_library(
|
||||
"cache.go",
|
||||
"data_column.go",
|
||||
"data_column_cache.go",
|
||||
"doc.go",
|
||||
"iteration.go",
|
||||
"layout.go",
|
||||
"layout_by_epoch.go",
|
||||
@@ -14,8 +15,6 @@ go_library(
|
||||
"log.go",
|
||||
"metrics.go",
|
||||
"mock.go",
|
||||
"proof.go",
|
||||
"proof_cache.go",
|
||||
"pruner.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/db/filesystem",
|
||||
@@ -31,7 +30,6 @@ go_library(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//io/file:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/logging:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
@@ -54,7 +52,6 @@ go_test(
|
||||
"iteration_test.go",
|
||||
"layout_test.go",
|
||||
"migration_test.go",
|
||||
"proof_test.go",
|
||||
"pruner_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
|
||||
104
beacon-chain/db/filesystem/doc.go
Normal file
104
beacon-chain/db/filesystem/doc.go
Normal file
@@ -0,0 +1,104 @@
|
||||
package filesystem
|
||||
|
||||
// nolint:dupword
|
||||
/*
|
||||
Data column sidecars storage documentation
|
||||
==========================================
|
||||
|
||||
File organisation
|
||||
-----------------
|
||||
- The first byte represents the version of the file structure (up to 0xff = 255).
|
||||
We set it to 0x01.
|
||||
Note: This is not strictly needed, but it will help a lot if, in the future,
|
||||
we want to modify the file structure.
|
||||
- The next 4 bytes represents the size of a SSZ encoded data column sidecar.
|
||||
(See the `Computation of the maximum size of a DataColumnSidecar` section to a description
|
||||
of how this value is computed).
|
||||
- The next 128 bytes represent the index in the file of a given column.
|
||||
The first bit of each byte in the index is set to 0 if there is no data column,
|
||||
and set to 1 if there is a data column.
|
||||
The remaining 7 bits (from 0 to 127) represent the index of the data column.
|
||||
This sentinel bit is needed to distinguish between the column with index 0 and no column.
|
||||
Example: If the column with index 5 is in the 3th position in the file, then indices[5] = 0x80 + 0x03 = 0x83.
|
||||
- The rest of the file is a repeat of the SSZ encoded data column sidecars.
|
||||
|
||||
|
||||
|------------------------------------------|------------------------------------------------------------------------------------|
|
||||
| Byte offset | Description |
|
||||
|------------------------------------------|------------------------------------------------------------------------------------|
|
||||
| 0 | version (1 byte) | sszEncodedDataColumnSidecarSize (4 bytes) | indices (128 bytes) |
|
||||
|133 + 0*sszEncodedDataColumnSidecarSize | sszEncodedDataColumnSidecar (sszEncodedDataColumnSidecarSize bytes) |
|
||||
|133 + 1*sszEncodedDataColumnSidecarSize | sszEncodedDataColumnSidecar (sszEncodedDataColumnSidecarSize bytes) |
|
||||
|133 + 2*sszEncodedDataColumnSidecarSize | sszEncodedDataColumnSidecar (sszEncodedDataColumnSidecarSize bytes) |
|
||||
| ... | ... |
|
||||
|133 + 127*sszEncodedDataColumnSidecarSize | sszEncodedDataColumnSidecar (sszEncodedDataColumnSidecarSize bytes) |
|
||||
|------------------------------------------|------------------------------------------------------------------------------------|
|
||||
|
||||
Each file is named after the block root where the data columns were data columns are committed to.
|
||||
Example: `0x259c6d2f6a0bb75e2405cea7cb248e5663dc26b9404fd3bcd777afc20de91c1e.sszs`
|
||||
|
||||
Database organisation
|
||||
---------------------
|
||||
SSZ encoded data column sidecars are stored following the `by-epoch` layout.
|
||||
- The first layer is a directory corresponding to the `period`, which corresponds to the epoch divided by the 4096.
|
||||
- The second layer is a directory corresponding to the epoch.
|
||||
- Then all files are stored in the epoch directory.
|
||||
|
||||
Example:
|
||||
data-columns
|
||||
├── 0
|
||||
│ ├── 3638
|
||||
│ │ ├── 0x259c6d2f6a0bb75e2405cea7cb248e5663dc26b9404fd3bcd777afc20de91c1e.sszs
|
||||
│ │ ├── 0x2a855b1f6e9a2f04f8383e336325bf7d5ba02d1eab3ef90ef183736f8c768533.sszs
|
||||
│ │ ├── ...
|
||||
│ │ ├── 0xeb78e2b2350a71c640f1e96fea9e42f38e65705ab7e6e100c8bc9c589f2c5f2b.sszs
|
||||
│ │ └── 0xeb7ee68da988fd20d773d45aad01dd62527734367a146e2b048715bd68a4e370.sszs
|
||||
│ └── 3639
|
||||
│ ├── 0x0fd231fe95e57936fa44f6c712c490b9e337a481b661dfd46768901e90444330.sszs
|
||||
│ ├── 0x1bf5edff6b6ba2b65b1db325ff3312bbb57da461ef2ae651bd741af851aada3a.sszs
|
||||
│ ├── ...
|
||||
│ ├── 0xa156a527e631f858fee79fab7ef1fde3f6117a2e1201d47c09fbab0c6780c937.sszs
|
||||
│ └── 0xcd80bc535ddc467dea1d19e0c39c1160875ccd1989061bcd8ce206e3c1261c87.sszs
|
||||
└── 1
|
||||
├── 4096
|
||||
│ ├── 0x0d244009093e2bedb72eb265280290199e8c7bf1d90d7583c41af40d9f662269.sszs
|
||||
│ ├── 0x11f420928d8de41c50e735caab0369996824a5299c5f054e097965855925697d.sszs
|
||||
│ ├── ...
|
||||
│ ├── 0xbe91fc782877ed400d95c02c61aebfdd592635d11f8e64c94b46abd84f45c967.sszs
|
||||
│ └── 0xf246189f078f02d30173ff74605cf31c9e65b5e463275ebdbeb40476638135ff.sszs
|
||||
└── 4097
|
||||
├── 0x454d000674793c479e90504c0fe9827b50bb176ae022dab4e37d6a21471ab570.sszs
|
||||
├── 0xac5eb7437d7190c48cfa863e3c45f96a7f8af371d47ac12ccda07129a06af763.sszs
|
||||
├── ...
|
||||
├── 0xb7df30561d9d92ab5fafdd96bca8b44526497c8debf0fc425c7a0770b2abeb83.sszs
|
||||
└── 0xc1dd0b1ae847b6ec62303a36d08c6a4a2e9e3ec4be3ff70551972a0ee3de9c14.sszs
|
||||
|
||||
Computation of the maximum size of a DataColumnSidecar
|
||||
------------------------------------------------------
|
||||
https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/das-core.md#datacolumnsidecar
|
||||
|
||||
|
||||
class DataColumnSidecar(Container):
|
||||
index: ColumnIndex # Index of column in extended matrix
|
||||
column: List[Cell, MAX_BLOB_COMMITMENTS_PER_BLOCK]
|
||||
kzg_commitments: List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK]
|
||||
kzg_proofs: List[KZGProof, MAX_BLOB_COMMITMENTS_PER_BLOCK]
|
||||
signed_block_header: SignedBeaconBlockHeader
|
||||
kzg_commitments_inclusion_proof: Vector[Bytes32, KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH]
|
||||
|
||||
|
||||
- index: 2 bytes (ColumnIndex)
|
||||
- `column`: 4,096 (MAX_BLOB_COMMITMENTS_PER_BLOCK) * 64 (FIELD_ELEMENTS_PER_CELL) * 32 bytes (BYTES_PER_FIELD_ELEMENT) = 8,388,608 bytes
|
||||
- kzg_commitments: 4,096 (MAX_BLOB_COMMITMENTS_PER_BLOCK) * 48 bytes (KZGCommitment) = 196,608 bytes
|
||||
- kzg_proofs: 4,096 (MAX_BLOB_COMMITMENTS_PER_BLOCK) * 48 bytes (KZGProof) = 196,608 bytes
|
||||
- signed_block_header: 2 bytes (Slot) + 2 bytes (ValidatorIndex) + 3 * 2 bytes (Root) + 96 bytes (BLSSignature) = 106 bytes
|
||||
- kzg_commitments_inclusion_proof: 4 (KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH) * 32 bytes = 128 bytes
|
||||
|
||||
TOTAL: 8,782,060 bytes = 70,256,480 bits
|
||||
log(70,256,480) / log(2) ~= 26.07
|
||||
|
||||
==> 32 bits (4 bytes) are enough to store the maximum size of a data column sidecar.
|
||||
|
||||
The maximum size of an SSZ encoded data column can be 2**32 bits = 536,879,912 bytes,
|
||||
which left a room of 536,879,912 bytes - 8,782,060 bytes ~= 503 mega bytes to store the extra data needed by SSZ encoding (which is more than enough.)
|
||||
*/
|
||||
@@ -1,197 +0,0 @@
|
||||
# Filesystem storage documentation
|
||||
|
||||
This document describes the file formats and database organization for storing data column sidecars and execution proofs.
|
||||
|
||||
---
|
||||
|
||||
# Data column sidecars
|
||||
|
||||
## File organisation
|
||||
|
||||
- The first byte represents the version of the file structure (up to `0xff = 255`).
|
||||
We set it to `0x01`.
|
||||
|
||||
_(Note: This is not strictly needed, but it will help a lot if, in the future, we want to modify the file structure.)_
|
||||
|
||||
- The next 4 bytes represents the size of a SSZ encoded data column sidecar.
|
||||
(See the [Computation of the maximum size of a DataColumnSidecar](#computation-of-the-maximum-size-of-a-datacolumnsidecar) section for a description
|
||||
of how this value is computed).
|
||||
|
||||
- The next 128 bytes represent the index in the file of a given column.
|
||||
The first bit of each byte in the index is set to 0 if there is no data column,
|
||||
and set to 1 if there is a data column.
|
||||
The remaining 7 bits (from 0 to 127) represent the index of the data column.
|
||||
This sentinel bit is needed to distinguish between the column with index 0 and no column.
|
||||
|
||||
**Example:** If the column with index 5 is in the 3rd position in the file, then `indices[5] = 0x80 + 0x03 = 0x83`.
|
||||
|
||||
- The rest of the file is a repeat of the SSZ encoded data column sidecars.
|
||||
|
||||
### File layout
|
||||
|
||||
| Byte offset | Description |
|
||||
|-------------|-------------|
|
||||
| `0` | `version (1 byte) \| sszEncodedDataColumnSidecarSize (4 bytes) \| indices (128 bytes)` |
|
||||
| `133 + 0×sszEncodedDataColumnSidecarSize` | `sszEncodedDataColumnSidecar (sszEncodedDataColumnSidecarSize bytes)` |
|
||||
| `133 + 1×sszEncodedDataColumnSidecarSize` | `sszEncodedDataColumnSidecar (sszEncodedDataColumnSidecarSize bytes)` |
|
||||
| `133 + 2×sszEncodedDataColumnSidecarSize` | `sszEncodedDataColumnSidecar (sszEncodedDataColumnSidecarSize bytes)` |
|
||||
| ... | ... |
|
||||
| `133 + 127×sszEncodedDataColumnSidecarSize` | `sszEncodedDataColumnSidecar (sszEncodedDataColumnSidecarSize bytes)` |
|
||||
|
||||
Each file is named after the block root where the data columns are committed to.
|
||||
|
||||
**Example:** `0x259c6d2f6a0bb75e2405cea7cb248e5663dc26b9404fd3bcd777afc20de91c1e.sszs`
|
||||
|
||||
## Database organisation
|
||||
|
||||
SSZ encoded data column sidecars are stored following the `by-epoch` layout.
|
||||
|
||||
- The first layer is a directory corresponding to the `period`, which corresponds to the epoch divided by 4096.
|
||||
- The second layer is a directory corresponding to the epoch.
|
||||
- Then all files are stored in the epoch directory.
|
||||
|
||||
### Example directory structure
|
||||
|
||||
```
|
||||
data-columns
|
||||
├── 0
|
||||
│ ├── 3638
|
||||
│ │ ├── 0x259c6d2f6a0bb75e2405cea7cb248e5663dc26b9404fd3bcd777afc20de91c1e.sszs
|
||||
│ │ ├── 0x2a855b1f6e9a2f04f8383e336325bf7d5ba02d1eab3ef90ef183736f8c768533.sszs
|
||||
│ │ ├── ...
|
||||
│ │ ├── 0xeb78e2b2350a71c640f1e96fea9e42f38e65705ab7e6e100c8bc9c589f2c5f2b.sszs
|
||||
│ │ └── 0xeb7ee68da988fd20d773d45aad01dd62527734367a146e2b048715bd68a4e370.sszs
|
||||
│ └── 3639
|
||||
│ ├── 0x0fd231fe95e57936fa44f6c712c490b9e337a481b661dfd46768901e90444330.sszs
|
||||
│ ├── 0x1bf5edff6b6ba2b65b1db325ff3312bbb57da461ef2ae651bd741af851aada3a.sszs
|
||||
│ ├── ...
|
||||
│ ├── 0xa156a527e631f858fee79fab7ef1fde3f6117a2e1201d47c09fbab0c6780c937.sszs
|
||||
│ └── 0xcd80bc535ddc467dea1d19e0c39c1160875ccd1989061bcd8ce206e3c1261c87.sszs
|
||||
└── 1
|
||||
├── 4096
|
||||
│ ├── 0x0d244009093e2bedb72eb265280290199e8c7bf1d90d7583c41af40d9f662269.sszs
|
||||
│ ├── 0x11f420928d8de41c50e735caab0369996824a5299c5f054e097965855925697d.sszs
|
||||
│ ├── ...
|
||||
│ ├── 0xbe91fc782877ed400d95c02c61aebfdd592635d11f8e64c94b46abd84f45c967.sszs
|
||||
│ └── 0xf246189f078f02d30173ff74605cf31c9e65b5e463275ebdbeb40476638135ff.sszs
|
||||
└── 4097
|
||||
├── 0x454d000674793c479e90504c0fe9827b50bb176ae022dab4e37d6a21471ab570.sszs
|
||||
├── 0xac5eb7437d7190c48cfa863e3c45f96a7f8af371d47ac12ccda07129a06af763.sszs
|
||||
├── ...
|
||||
├── 0xb7df30561d9d92ab5fafdd96bca8b44526497c8debf0fc425c7a0770b2abeb83.sszs
|
||||
└── 0xc1dd0b1ae847b6ec62303a36d08c6a4a2e9e3ec4be3ff70551972a0ee3de9c14.sszs
|
||||
```
|
||||
|
||||
## Computation of the maximum size of a `DataColumnSidecar`
|
||||
|
||||
Reference: [Ethereum Consensus Specs - Fulu DAS Core](https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/das-core.md#datacolumnsidecar)
|
||||
|
||||
```python
|
||||
class DataColumnSidecar(Container):
|
||||
index: ColumnIndex # Index of column in extended matrix
|
||||
column: List[Cell, MAX_BLOB_COMMITMENTS_PER_BLOCK]
|
||||
kzg_commitments: List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK]
|
||||
kzg_proofs: List[KZGProof, MAX_BLOB_COMMITMENTS_PER_BLOCK]
|
||||
signed_block_header: SignedBeaconBlockHeader
|
||||
kzg_commitments_inclusion_proof: Vector[Bytes32, KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH]
|
||||
```
|
||||
|
||||
### Size breakdown
|
||||
|
||||
| Field | Calculation | Size |
|
||||
|-------|-------------|------|
|
||||
| `index` | `ColumnIndex` | `2 bytes` |
|
||||
| `column` | `4,096 (MAX_BLOB_COMMITMENTS_PER_BLOCK) × 64 (FIELD_ELEMENTS_PER_CELL) × 32 bytes (BYTES_PER_FIELD_ELEMENT)` | `8,388,608 bytes` |
|
||||
| `kzg_commitments` | `4,096 (MAX_BLOB_COMMITMENTS_PER_BLOCK) × 48 bytes (KZGCommitment)` | `196,608 bytes` |
|
||||
| `kzg_proofs` | `4,096 (MAX_BLOB_COMMITMENTS_PER_BLOCK) × 48 bytes (KZGProof)` | `196,608 bytes` |
|
||||
| `signed_block_header` | `2 bytes (Slot) + 2 bytes (ValidatorIndex) + 3 × 2 bytes (Root) + 96 bytes (BLSSignature)` | `106 bytes` |
|
||||
| `kzg_commitments_inclusion_proof` | `4 (KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH) × 32 bytes` | `128 bytes` |
|
||||
|
||||
**TOTAL:** `8,782,060 bytes = 70,256,480 bits`
|
||||
|
||||
```
|
||||
log(70,256,480) / log(2) ≈ 26.07
|
||||
```
|
||||
|
||||
**Conclusion:** 32 bits (4 bytes) are enough to store the maximum size of a data column sidecar.
|
||||
|
||||
The maximum size of an SSZ encoded data column can be `2³² bits = 536,879,912 bytes`,
|
||||
which leaves a room of `536,879,912 bytes - 8,782,060 bytes ≈ 503 megabytes` to store the extra data needed by SSZ encoding (which is more than enough).
|
||||
|
||||
---
|
||||
|
||||
# Execution proofs
|
||||
|
||||
## File organisation
|
||||
|
||||
Unlike data column sidecars (which have a fixed size per block), execution proofs have variable sizes.
|
||||
To handle this, we use an offset table that stores the position and size of each proof.
|
||||
|
||||
- The first byte represents the version of the file structure (up to `0xff = 255`).
|
||||
We set it to `0x01`.
|
||||
|
||||
- The next 64 bytes represent the offset table with 8 slots (one per proof type).
|
||||
Each slot contains:
|
||||
- 4 bytes for the offset (relative to end of header)
|
||||
- 4 bytes for the size of the SSZ-encoded proof
|
||||
|
||||
If the size is 0, the proof is not present.
|
||||
|
||||
- The rest of the file contains the SSZ encoded proofs, stored contiguously.
|
||||
|
||||
### File layout
|
||||
|
||||
| Byte offset | Description |
|
||||
|-------------|-------------|
|
||||
| `0` | `version (1 byte) \| offsetTable (64 bytes)` |
|
||||
| `65 + offsetTable[0].offset` | `sszEncodedProof (offsetTable[0].size bytes)` |
|
||||
| `65 + offsetTable[1].offset` | `sszEncodedProof (offsetTable[1].size bytes)` |
|
||||
| ... | ... |
|
||||
| `65 + offsetTable[7].offset` | `sszEncodedProof (offsetTable[7].size bytes)` |
|
||||
|
||||
**Header size:** 1 (version) + 64 (offset table) = **65 bytes**
|
||||
|
||||
### Offset table entry format
|
||||
|
||||
Each slot in the offset table (8 bytes per slot):
|
||||
- `offset` (4 bytes, big-endian): Offset from end of header where proof data begins
|
||||
- `size` (4 bytes, big-endian): Size of the SSZ-encoded proof in bytes
|
||||
|
||||
**Note:** Offsets are relative to the end of the header (byte 65), not the start of the file.
|
||||
This maximizes the usable range of the 4-byte offset field.
|
||||
|
||||
### Reading a proof with `proofID=N (O(1) access)`
|
||||
|
||||
1. Read header (65 bytes)
|
||||
2. Check slot N: if `size == 0`, proof not present
|
||||
3. Seek to `(65 + offset)`, read `size` bytes, SSZ unmarshal
|
||||
|
||||
Each file is named after the block root.
|
||||
|
||||
**Example:** `0x259c6d2f6a0bb75e2405cea7cb248e5663dc26b9404fd3bcd777afc20de91c1e.sszs`
|
||||
|
||||
## Database Organisation
|
||||
|
||||
SSZ encoded execution proofs are stored following the same `by-epoch` layout as data column sidecars.
|
||||
|
||||
- The first layer is a directory corresponding to the `period`, which corresponds to the epoch divided by 4096.
|
||||
- The second layer is a directory corresponding to the epoch.
|
||||
- Then all files are stored in the epoch directory.
|
||||
|
||||
### Example Directory Structure
|
||||
|
||||
```
|
||||
proofs
|
||||
├── 0
|
||||
│ ├── 100
|
||||
│ │ ├── 0x259c6d2f6a0bb75e2405cea7cb248e5663dc26b9404fd3bcd777afc20de91c1e.sszs
|
||||
│ │ ├── 0x2a855b1f6e9a2f04f8383e336325bf7d5ba02d1eab3ef90ef183736f8c768533.sszs
|
||||
│ │ └── ...
|
||||
│ └── 101
|
||||
│ ├── 0x0fd231fe95e57936fa44f6c712c490b9e337a481b661dfd46768901e90444330.sszs
|
||||
│ └── ...
|
||||
└── 1
|
||||
└── 4096
|
||||
├── 0x0d244009093e2bedb72eb265280290199e8c7bf1d90d7583c41af40d9f662269.sszs
|
||||
└── ...
|
||||
```
|
||||
@@ -70,36 +70,4 @@ var (
|
||||
Name: "data_column_prune_latency",
|
||||
Help: "Latency of data column prune operations in milliseconds",
|
||||
})
|
||||
|
||||
// Proofs
|
||||
proofSaveLatency = promauto.NewHistogram(prometheus.HistogramOpts{
|
||||
Name: "proof_storage_save_latency",
|
||||
Help: "Latency of proof storage save operations in milliseconds",
|
||||
Buckets: []float64{3, 5, 7, 9, 11, 13, 20, 50},
|
||||
})
|
||||
proofFetchLatency = promauto.NewHistogram(prometheus.HistogramOpts{
|
||||
Name: "proof_storage_get_latency",
|
||||
Help: "Latency of proof storage get operations in milliseconds",
|
||||
Buckets: []float64{3, 5, 7, 9, 11, 13},
|
||||
})
|
||||
proofPrunedCounter = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "proof_pruned",
|
||||
Help: "Number of proof files pruned.",
|
||||
})
|
||||
proofWrittenCounter = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "proof_written",
|
||||
Help: "Number of proof files written",
|
||||
})
|
||||
proofDiskCount = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "proof_disk_count",
|
||||
Help: "Approximate number of proof files in storage",
|
||||
})
|
||||
proofFileSyncLatency = promauto.NewSummary(prometheus.SummaryOpts{
|
||||
Name: "proof_file_sync_latency",
|
||||
Help: "Latency of sync operations when saving proofs in milliseconds",
|
||||
})
|
||||
proofPruneLatency = promauto.NewSummary(prometheus.SummaryOpts{
|
||||
Name: "proof_prune_latency",
|
||||
Help: "Latency of proof prune operations in milliseconds",
|
||||
})
|
||||
)
|
||||
|
||||
@@ -144,45 +144,3 @@ func NewEphemeralDataColumnStorageWithMocker(t testing.TB) (*DataColumnMocker, *
|
||||
fs, dcs := NewEphemeralDataColumnStorageAndFs(t)
|
||||
return &DataColumnMocker{fs: fs, dcs: dcs}, dcs
|
||||
}
|
||||
|
||||
// Proofs
|
||||
// ------
|
||||
|
||||
// NewEphemeralProofStorage should only be used for tests.
|
||||
// The instance of ProofStorage returned is backed by an in-memory virtual filesystem,
|
||||
// improving test performance and simplifying cleanup.
|
||||
func NewEphemeralProofStorage(t testing.TB, opts ...ProofStorageOption) *ProofStorage {
|
||||
return NewWarmedEphemeralProofStorageUsingFs(t, afero.NewMemMapFs(), opts...)
|
||||
}
|
||||
|
||||
// NewEphemeralProofStorageAndFs can be used by tests that want access to the virtual filesystem
|
||||
// in order to interact with it outside the parameters of the ProofStorage API.
|
||||
func NewEphemeralProofStorageAndFs(t testing.TB, opts ...ProofStorageOption) (afero.Fs, *ProofStorage) {
|
||||
fs := afero.NewMemMapFs()
|
||||
ps := NewWarmedEphemeralProofStorageUsingFs(t, fs, opts...)
|
||||
return fs, ps
|
||||
}
|
||||
|
||||
// NewEphemeralProofStorageUsingFs creates a ProofStorage backed by the provided filesystem.
|
||||
func NewEphemeralProofStorageUsingFs(t testing.TB, fs afero.Fs, opts ...ProofStorageOption) *ProofStorage {
|
||||
defaultOpts := []ProofStorageOption{
|
||||
WithProofRetentionEpochs(params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest),
|
||||
WithProofFs(fs),
|
||||
}
|
||||
// User opts come last so they can override defaults
|
||||
allOpts := append(defaultOpts, opts...)
|
||||
|
||||
ps, err := NewProofStorage(context.Background(), allOpts...)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
return ps
|
||||
}
|
||||
|
||||
// NewWarmedEphemeralProofStorageUsingFs creates a ProofStorage with a warmed cache.
|
||||
func NewWarmedEphemeralProofStorageUsingFs(t testing.TB, fs afero.Fs, opts ...ProofStorageOption) *ProofStorage {
|
||||
ps := NewEphemeralProofStorageUsingFs(t, fs, opts...)
|
||||
ps.WarmCache()
|
||||
return ps
|
||||
}
|
||||
|
||||
@@ -1,964 +0,0 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/async"
|
||||
"github.com/OffchainLabs/prysm/v7/async/event"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/io/file"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
const (
|
||||
proofVersion = 0x01
|
||||
proofVersionSize = 1 // bytes
|
||||
maxProofTypes = 8 // ExecutionProofId max value (EXECUTION_PROOF_TYPE_COUNT)
|
||||
proofOffsetSize = 4 // bytes for offset (uint32)
|
||||
proofSizeSize = 4 // bytes for size (uint32)
|
||||
proofSlotSize = proofOffsetSize + proofSizeSize // 8 bytes per slot
|
||||
proofOffsetTableSize = maxProofTypes * proofSlotSize // 64 bytes
|
||||
proofHeaderSize = proofVersionSize + proofOffsetTableSize // 65 bytes
|
||||
proofsFileExtension = "sszs"
|
||||
proofPrunePeriod = 1 * time.Minute
|
||||
)
|
||||
|
||||
var (
|
||||
errProofIDTooLarge = errors.New("proof ID too large")
|
||||
errWrongProofBytesWritten = errors.New("wrong number of bytes written")
|
||||
errWrongProofVersion = errors.New("wrong proof version")
|
||||
errWrongProofBytesRead = errors.New("wrong number of bytes read")
|
||||
errNoProofBasePath = errors.New("ProofStorage base path not specified in init")
|
||||
errProofAlreadyExists = errors.New("proof already exists")
|
||||
)
|
||||
|
||||
type (
|
||||
// ProofIdent is a unique identifier for a proof.
|
||||
ProofIdent struct {
|
||||
BlockRoot [fieldparams.RootLength]byte
|
||||
Epoch primitives.Epoch
|
||||
ProofID uint64
|
||||
}
|
||||
|
||||
// ProofsIdent is a collection of unique identifiers for proofs.
|
||||
ProofsIdent struct {
|
||||
BlockRoot [fieldparams.RootLength]byte
|
||||
Epoch primitives.Epoch
|
||||
ProofIDs []uint64
|
||||
}
|
||||
|
||||
// ProofStorage is the concrete implementation of the filesystem backend for saving and retrieving ExecutionProofs.
|
||||
ProofStorage struct {
|
||||
base string
|
||||
retentionEpochs primitives.Epoch
|
||||
fs afero.Fs
|
||||
cache *proofCache
|
||||
proofFeed *event.Feed
|
||||
pruneMu sync.RWMutex
|
||||
|
||||
mu sync.Mutex // protects muChans
|
||||
muChans map[[fieldparams.RootLength]byte]*proofMuChan
|
||||
}
|
||||
|
||||
// ProofStorageOption is a functional option for configuring a ProofStorage.
|
||||
ProofStorageOption func(*ProofStorage) error
|
||||
|
||||
proofMuChan struct {
|
||||
mu *sync.RWMutex
|
||||
toStore chan []*ethpb.ExecutionProof
|
||||
}
|
||||
|
||||
// proofSlotEntry represents the offset and size for a proof in the file.
|
||||
proofSlotEntry struct {
|
||||
offset uint32
|
||||
size uint32
|
||||
}
|
||||
|
||||
// proofOffsetTable is the offset table with 8 slots indexed by proofID.
|
||||
proofOffsetTable [maxProofTypes]proofSlotEntry
|
||||
|
||||
// proofFileMetadata contains metadata extracted from a proof file path.
|
||||
proofFileMetadata struct {
|
||||
period uint64
|
||||
epoch primitives.Epoch
|
||||
blockRoot [fieldparams.RootLength]byte
|
||||
}
|
||||
)
|
||||
|
||||
// WithProofBasePath is a required option that sets the base path of proof storage.
|
||||
func WithProofBasePath(base string) ProofStorageOption {
|
||||
return func(ps *ProofStorage) error {
|
||||
ps.base = base
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithProofRetentionEpochs is an option that changes the number of epochs proofs will be persisted.
|
||||
func WithProofRetentionEpochs(e primitives.Epoch) ProofStorageOption {
|
||||
return func(ps *ProofStorage) error {
|
||||
ps.retentionEpochs = e
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithProofFs allows the afero.Fs implementation to be customized.
|
||||
// Used by tests to substitute an in-memory filesystem.
|
||||
func WithProofFs(fs afero.Fs) ProofStorageOption {
|
||||
return func(ps *ProofStorage) error {
|
||||
ps.fs = fs
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// NewProofStorage creates a new instance of the ProofStorage object.
|
||||
func NewProofStorage(ctx context.Context, opts ...ProofStorageOption) (*ProofStorage, error) {
|
||||
storage := &ProofStorage{
|
||||
proofFeed: new(event.Feed),
|
||||
muChans: make(map[[fieldparams.RootLength]byte]*proofMuChan),
|
||||
}
|
||||
|
||||
for _, o := range opts {
|
||||
if err := o(storage); err != nil {
|
||||
return nil, fmt.Errorf("failed to create proof storage: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Allow tests to set up a different fs using WithProofFs.
|
||||
if storage.fs == nil {
|
||||
if storage.base == "" {
|
||||
return nil, errNoProofBasePath
|
||||
}
|
||||
|
||||
storage.base = path.Clean(storage.base)
|
||||
if err := file.MkdirAll(storage.base); err != nil {
|
||||
return nil, fmt.Errorf("failed to create proof storage at %s: %w", storage.base, err)
|
||||
}
|
||||
|
||||
storage.fs = afero.NewBasePathFs(afero.NewOsFs(), storage.base)
|
||||
}
|
||||
|
||||
storage.cache = newProofCache()
|
||||
|
||||
async.RunEvery(ctx, proofPrunePeriod, func() {
|
||||
storage.pruneMu.Lock()
|
||||
defer storage.pruneMu.Unlock()
|
||||
|
||||
storage.prune()
|
||||
})
|
||||
|
||||
return storage, nil
|
||||
}
|
||||
|
||||
// WarmCache warms the cache of the proof filesystem.
|
||||
func (ps *ProofStorage) WarmCache() {
|
||||
start := time.Now()
|
||||
log.Info("Proof filesystem cache warm-up started")
|
||||
|
||||
ps.pruneMu.Lock()
|
||||
defer ps.pruneMu.Unlock()
|
||||
|
||||
// List all period directories
|
||||
periodFileInfos, err := afero.ReadDir(ps.fs, ".")
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error reading top directory during proof warm cache")
|
||||
return
|
||||
}
|
||||
|
||||
// Iterate through periods
|
||||
for _, periodFileInfo := range periodFileInfos {
|
||||
if !periodFileInfo.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
periodPath := periodFileInfo.Name()
|
||||
|
||||
// List all epoch directories in this period
|
||||
epochFileInfos, err := afero.ReadDir(ps.fs, periodPath)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("period", periodPath).Error("Error reading period directory during proof warm cache")
|
||||
continue
|
||||
}
|
||||
|
||||
// Iterate through epochs
|
||||
for _, epochFileInfo := range epochFileInfos {
|
||||
if !epochFileInfo.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
epochPath := path.Join(periodPath, epochFileInfo.Name())
|
||||
|
||||
// List all .sszs files in this epoch
|
||||
files, err := ps.listProofEpochFiles(epochPath)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("epoch", epochPath).Error("Error listing epoch files during proof warm cache")
|
||||
continue
|
||||
}
|
||||
|
||||
// Process all files in this epoch in parallel
|
||||
ps.processProofEpochFiles(files)
|
||||
}
|
||||
}
|
||||
|
||||
// Prune the cache and the filesystem
|
||||
ps.prune()
|
||||
|
||||
totalElapsed := time.Since(start)
|
||||
log.WithField("elapsed", totalElapsed).Info("Proof filesystem cache warm-up complete")
|
||||
}
|
||||
|
||||
// listProofEpochFiles lists all .sszs files in an epoch directory.
|
||||
func (ps *ProofStorage) listProofEpochFiles(epochPath string) ([]string, error) {
|
||||
fileInfos, err := afero.ReadDir(ps.fs, epochPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read epoch directory: %w", err)
|
||||
}
|
||||
|
||||
files := make([]string, 0, len(fileInfos))
|
||||
for _, fileInfo := range fileInfos {
|
||||
if fileInfo.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
fileName := fileInfo.Name()
|
||||
if strings.HasSuffix(fileName, "."+proofsFileExtension) {
|
||||
files = append(files, path.Join(epochPath, fileName))
|
||||
}
|
||||
}
|
||||
|
||||
return files, nil
|
||||
}
|
||||
|
||||
// processProofEpochFiles processes all proof files in an epoch in parallel.
|
||||
func (ps *ProofStorage) processProofEpochFiles(files []string) {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
for _, filePath := range files {
|
||||
wg.Go(func() {
|
||||
if err := ps.processProofFile(filePath); err != nil {
|
||||
log.WithError(err).WithField("file", filePath).Error("Error processing proof file during warm cache")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// processProofFile processes a single .sszs proof file for cache warming.
|
||||
func (ps *ProofStorage) processProofFile(filePath string) error {
|
||||
// Extract metadata from the file path
|
||||
fileMetadata, err := extractProofFileMetadata(filePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("extract proof file metadata: %w", err)
|
||||
}
|
||||
|
||||
// Open the file
|
||||
f, err := ps.fs.Open(filePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("open file: %w", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if closeErr := f.Close(); closeErr != nil {
|
||||
log.WithError(closeErr).WithField("file", filePath).Error("Error closing file during proof warm cache")
|
||||
}
|
||||
}()
|
||||
|
||||
// Read the offset table
|
||||
offsetTable, _, err := ps.readHeader(f)
|
||||
if err != nil {
|
||||
return fmt.Errorf("read header: %w", err)
|
||||
}
|
||||
|
||||
// Add all present proofs to the cache
|
||||
for proofID, entry := range offsetTable {
|
||||
if entry.size == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
proofIdent := ProofIdent{
|
||||
BlockRoot: fileMetadata.blockRoot,
|
||||
Epoch: fileMetadata.epoch,
|
||||
ProofID: uint64(proofID),
|
||||
}
|
||||
|
||||
ps.cache.set(proofIdent)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Summary returns the ProofStorageSummary for a given root.
|
||||
func (ps *ProofStorage) Summary(root [fieldparams.RootLength]byte) ProofStorageSummary {
|
||||
return ps.cache.Summary(root)
|
||||
}
|
||||
|
||||
// Save saves execution proofs into the database.
|
||||
func (ps *ProofStorage) Save(proofs []*ethpb.ExecutionProof) error {
|
||||
startTime := time.Now()
|
||||
|
||||
if len(proofs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
proofsByRoot := make(map[[fieldparams.RootLength]byte][]*ethpb.ExecutionProof)
|
||||
|
||||
// Group proofs by root.
|
||||
for _, proof := range proofs {
|
||||
// Check if the proof ID is valid.
|
||||
proofID := uint64(proof.ProofId)
|
||||
if proofID >= maxProofTypes {
|
||||
return errProofIDTooLarge
|
||||
}
|
||||
|
||||
// Extract block root from proof.
|
||||
var blockRoot [fieldparams.RootLength]byte
|
||||
copy(blockRoot[:], proof.BlockRoot)
|
||||
|
||||
// Group proofs by root.
|
||||
proofsByRoot[blockRoot] = append(proofsByRoot[blockRoot], proof)
|
||||
}
|
||||
|
||||
for blockRoot, proofsForRoot := range proofsByRoot {
|
||||
// Compute epoch from slot.
|
||||
epoch := slots.ToEpoch(proofsForRoot[0].Slot)
|
||||
|
||||
// Save proofs in the filesystem.
|
||||
if err := ps.saveFilesystem(blockRoot, epoch, proofsForRoot); err != nil {
|
||||
return fmt.Errorf("save filesystem: %w", err)
|
||||
}
|
||||
|
||||
// Get all proof IDs.
|
||||
proofIDs := make([]uint64, 0, len(proofsForRoot))
|
||||
for _, proof := range proofsForRoot {
|
||||
proofIDs = append(proofIDs, uint64(proof.ProofId))
|
||||
}
|
||||
|
||||
// Compute the proofs ident.
|
||||
proofsIdent := ProofsIdent{BlockRoot: blockRoot, Epoch: epoch, ProofIDs: proofIDs}
|
||||
|
||||
// Set proofs in the cache.
|
||||
ps.cache.setMultiple(proofsIdent)
|
||||
|
||||
// Notify the proof feed.
|
||||
ps.proofFeed.Send(proofsIdent)
|
||||
}
|
||||
|
||||
proofSaveLatency.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// saveFilesystem saves proofs into the database.
|
||||
// This function expects all proofs to belong to the same block.
|
||||
func (ps *ProofStorage) saveFilesystem(root [fieldparams.RootLength]byte, epoch primitives.Epoch, proofs []*ethpb.ExecutionProof) error {
|
||||
// Compute the file path.
|
||||
filePath := proofFilePath(root, epoch)
|
||||
|
||||
ps.pruneMu.RLock()
|
||||
defer ps.pruneMu.RUnlock()
|
||||
|
||||
fileMu, toStore := ps.fileMutexChan(root)
|
||||
toStore <- proofs
|
||||
|
||||
fileMu.Lock()
|
||||
defer fileMu.Unlock()
|
||||
|
||||
// Check if the file exists.
|
||||
exists, err := afero.Exists(ps.fs, filePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("afero exists: %w", err)
|
||||
}
|
||||
|
||||
if exists {
|
||||
if err := ps.saveProofExistingFile(filePath, toStore); err != nil {
|
||||
return fmt.Errorf("save proof existing file: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := ps.saveProofNewFile(filePath, toStore); err != nil {
|
||||
return fmt.Errorf("save proof new file: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Subscribe subscribes to the proof feed.
|
||||
// It returns the subscription and a 1-size buffer channel to receive proof idents.
|
||||
func (ps *ProofStorage) Subscribe() (event.Subscription, <-chan ProofsIdent) {
|
||||
identsChan := make(chan ProofsIdent, 1)
|
||||
subscription := ps.proofFeed.Subscribe(identsChan)
|
||||
return subscription, identsChan
|
||||
}
|
||||
|
||||
// Get retrieves execution proofs from the database.
|
||||
// If one of the requested proofs is not found, it is just skipped.
|
||||
// If proofIDs is nil, then all stored proofs are returned.
|
||||
func (ps *ProofStorage) Get(root [fieldparams.RootLength]byte, proofIDs []uint64) ([]*ethpb.ExecutionProof, error) {
|
||||
ps.pruneMu.RLock()
|
||||
defer ps.pruneMu.RUnlock()
|
||||
|
||||
fileMu, _ := ps.fileMutexChan(root)
|
||||
fileMu.RLock()
|
||||
defer fileMu.RUnlock()
|
||||
|
||||
startTime := time.Now()
|
||||
|
||||
// Build all proofIDs if none are provided.
|
||||
if proofIDs == nil {
|
||||
proofIDs = make([]uint64, maxProofTypes)
|
||||
for i := range proofIDs {
|
||||
proofIDs[i] = uint64(i)
|
||||
}
|
||||
}
|
||||
|
||||
summary, ok := ps.cache.get(root)
|
||||
if !ok {
|
||||
// Nothing found in db. Exit early.
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Check if any requested proofID exists.
|
||||
if !slices.ContainsFunc(proofIDs, summary.HasProof) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Compute the file path.
|
||||
filePath := proofFilePath(root, summary.epoch)
|
||||
|
||||
// Open the proof file.
|
||||
file, err := ps.fs.Open(filePath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("proof file open: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if closeErr := file.Close(); closeErr != nil {
|
||||
log.WithError(closeErr).WithField("file", filePath).Error("Error closing proof file")
|
||||
}
|
||||
}()
|
||||
|
||||
// Read the header.
|
||||
offsetTable, _, err := ps.readHeader(file)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read header: %w", err)
|
||||
}
|
||||
|
||||
// Retrieve proofs from the file.
|
||||
proofs := make([]*ethpb.ExecutionProof, 0, len(proofIDs))
|
||||
for _, proofID := range proofIDs {
|
||||
if proofID >= maxProofTypes {
|
||||
continue
|
||||
}
|
||||
|
||||
entry := offsetTable[proofID]
|
||||
// Skip if the proof is not saved.
|
||||
if entry.size == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Seek to the proof offset (offset is relative to end of header).
|
||||
_, err = file.Seek(proofHeaderSize+int64(entry.offset), io.SeekStart)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("seek: %w", err)
|
||||
}
|
||||
|
||||
// Read the SSZ encoded proof.
|
||||
sszProof := make([]byte, entry.size)
|
||||
n, err := io.ReadFull(file, sszProof)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read proof: %w", err)
|
||||
}
|
||||
if n != int(entry.size) {
|
||||
return nil, errWrongProofBytesRead
|
||||
}
|
||||
|
||||
// Unmarshal the proof.
|
||||
proof := new(ethpb.ExecutionProof)
|
||||
if err := proof.UnmarshalSSZ(sszProof); err != nil {
|
||||
return nil, fmt.Errorf("unmarshal proof: %w", err)
|
||||
}
|
||||
|
||||
proofs = append(proofs, proof)
|
||||
}
|
||||
|
||||
proofFetchLatency.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
|
||||
return proofs, nil
|
||||
}
|
||||
|
||||
// Remove deletes all proofs for a given root.
|
||||
func (ps *ProofStorage) Remove(blockRoot [fieldparams.RootLength]byte) error {
|
||||
ps.pruneMu.RLock()
|
||||
defer ps.pruneMu.RUnlock()
|
||||
|
||||
fileMu, _ := ps.fileMutexChan(blockRoot)
|
||||
fileMu.Lock()
|
||||
defer fileMu.Unlock()
|
||||
|
||||
summary, ok := ps.cache.get(blockRoot)
|
||||
if !ok {
|
||||
// Nothing found in db. Exit early.
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove the proofs from the cache.
|
||||
ps.cache.evict(blockRoot)
|
||||
|
||||
// Remove the proof file.
|
||||
filePath := proofFilePath(blockRoot, summary.epoch)
|
||||
if err := ps.fs.Remove(filePath); err != nil {
|
||||
return fmt.Errorf("remove: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Clear deletes all files on the filesystem.
|
||||
func (ps *ProofStorage) Clear() error {
|
||||
ps.pruneMu.Lock()
|
||||
defer ps.pruneMu.Unlock()
|
||||
|
||||
dirs, err := listDir(ps.fs, ".")
|
||||
if err != nil {
|
||||
return fmt.Errorf("list dir: %w", err)
|
||||
}
|
||||
|
||||
ps.cache.clear()
|
||||
|
||||
for _, dir := range dirs {
|
||||
if err := ps.fs.RemoveAll(dir); err != nil {
|
||||
return fmt.Errorf("remove all: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// saveProofNewFile saves proofs to a new file.
|
||||
func (ps *ProofStorage) saveProofNewFile(filePath string, inputProofs chan []*ethpb.ExecutionProof) (err error) {
|
||||
// Initialize the offset table.
|
||||
var offsetTable proofOffsetTable
|
||||
|
||||
var sszEncodedProofs []byte
|
||||
currentOffset := uint32(0)
|
||||
|
||||
for {
|
||||
proofs := pullProofChan(inputProofs)
|
||||
if len(proofs) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
for _, proof := range proofs {
|
||||
proofID := uint64(proof.ProofId)
|
||||
if proofID >= maxProofTypes {
|
||||
continue
|
||||
}
|
||||
|
||||
// Skip if already in offset table (duplicate).
|
||||
if offsetTable[proofID].size != 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// SSZ encode the proof.
|
||||
sszProof, err := proof.MarshalSSZ()
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshal proof SSZ: %w", err)
|
||||
}
|
||||
|
||||
proofSize := uint32(len(sszProof))
|
||||
|
||||
// Update offset table.
|
||||
offsetTable[proofID] = proofSlotEntry{
|
||||
offset: currentOffset,
|
||||
size: proofSize,
|
||||
}
|
||||
|
||||
// Append SSZ encoded proof.
|
||||
sszEncodedProofs = append(sszEncodedProofs, sszProof...)
|
||||
currentOffset += proofSize
|
||||
}
|
||||
}
|
||||
|
||||
if len(sszEncodedProofs) == 0 {
|
||||
// Nothing to save.
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create directory structure.
|
||||
dir := filepath.Dir(filePath)
|
||||
if err := ps.fs.MkdirAll(dir, directoryPermissions()); err != nil {
|
||||
return fmt.Errorf("mkdir all: %w", err)
|
||||
}
|
||||
|
||||
file, err := ps.fs.Create(filePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create proof file: %w", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
closeErr := file.Close()
|
||||
if closeErr != nil && err == nil {
|
||||
err = closeErr
|
||||
}
|
||||
}()
|
||||
|
||||
// Build the file content.
|
||||
countToWrite := proofHeaderSize + len(sszEncodedProofs)
|
||||
bytes := make([]byte, 0, countToWrite)
|
||||
|
||||
// Write version byte.
|
||||
bytes = append(bytes, byte(proofVersion))
|
||||
|
||||
// Write offset table.
|
||||
bytes = append(bytes, encodeOffsetTable(offsetTable)...)
|
||||
|
||||
// Write SSZ encoded proofs.
|
||||
bytes = append(bytes, sszEncodedProofs...)
|
||||
|
||||
countWritten, err := file.Write(bytes)
|
||||
if err != nil {
|
||||
return fmt.Errorf("write: %w", err)
|
||||
}
|
||||
if countWritten != countToWrite {
|
||||
return errWrongProofBytesWritten
|
||||
}
|
||||
|
||||
syncStart := time.Now()
|
||||
if err := file.Sync(); err != nil {
|
||||
return fmt.Errorf("sync: %w", err)
|
||||
}
|
||||
proofFileSyncLatency.Observe(float64(time.Since(syncStart).Milliseconds()))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// saveProofExistingFile saves proofs to an existing file.
|
||||
func (ps *ProofStorage) saveProofExistingFile(filePath string, inputProofs chan []*ethpb.ExecutionProof) (err error) {
|
||||
// Open the file for read/write.
|
||||
file, err := ps.fs.OpenFile(filePath, os.O_RDWR, os.FileMode(0600))
|
||||
if err != nil {
|
||||
return fmt.Errorf("open proof file: %w", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
closeErr := file.Close()
|
||||
if closeErr != nil && err == nil {
|
||||
err = closeErr
|
||||
}
|
||||
}()
|
||||
|
||||
// Read current header.
|
||||
offsetTable, fileSize, err := ps.readHeader(file)
|
||||
if err != nil {
|
||||
return fmt.Errorf("read header: %w", err)
|
||||
}
|
||||
|
||||
var sszEncodedProofs []byte
|
||||
currentOffset := uint32(fileSize - proofHeaderSize)
|
||||
modified := false
|
||||
|
||||
for {
|
||||
proofs := pullProofChan(inputProofs)
|
||||
if len(proofs) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
for _, proof := range proofs {
|
||||
proofID := uint64(proof.ProofId)
|
||||
if proofID >= maxProofTypes {
|
||||
continue
|
||||
}
|
||||
|
||||
// Skip if proof already exists.
|
||||
if offsetTable[proofID].size != 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// SSZ encode the proof.
|
||||
sszProof, err := proof.MarshalSSZ()
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshal proof SSZ: %w", err)
|
||||
}
|
||||
|
||||
proofSize := uint32(len(sszProof))
|
||||
|
||||
// Update offset table.
|
||||
offsetTable[proofID] = proofSlotEntry{
|
||||
offset: currentOffset,
|
||||
size: proofSize,
|
||||
}
|
||||
|
||||
// Append SSZ encoded proof.
|
||||
sszEncodedProofs = append(sszEncodedProofs, sszProof...)
|
||||
currentOffset += proofSize
|
||||
modified = true
|
||||
}
|
||||
}
|
||||
|
||||
if !modified {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write updated offset table back to file (at position 1, after version byte).
|
||||
encodedTable := encodeOffsetTable(offsetTable)
|
||||
count, err := file.WriteAt(encodedTable, int64(proofVersionSize))
|
||||
if err != nil {
|
||||
return fmt.Errorf("write offset table: %w", err)
|
||||
}
|
||||
if count != proofOffsetTableSize {
|
||||
return errWrongProofBytesWritten
|
||||
}
|
||||
|
||||
// Append the SSZ encoded proofs to the end of the file.
|
||||
count, err = file.WriteAt(sszEncodedProofs, fileSize)
|
||||
if err != nil {
|
||||
return fmt.Errorf("write SSZ encoded proofs: %w", err)
|
||||
}
|
||||
if count != len(sszEncodedProofs) {
|
||||
return errWrongProofBytesWritten
|
||||
}
|
||||
|
||||
syncStart := time.Now()
|
||||
if err := file.Sync(); err != nil {
|
||||
return fmt.Errorf("sync: %w", err)
|
||||
}
|
||||
proofFileSyncLatency.Observe(float64(time.Since(syncStart).Milliseconds()))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// readHeader reads the file header and returns the offset table and file size.
|
||||
func (ps *ProofStorage) readHeader(file afero.File) (proofOffsetTable, int64, error) {
|
||||
var header [proofHeaderSize]byte
|
||||
countRead, err := file.ReadAt(header[:], 0)
|
||||
if err != nil {
|
||||
return proofOffsetTable{}, 0, fmt.Errorf("read at: %w", err)
|
||||
}
|
||||
if countRead != proofHeaderSize {
|
||||
return proofOffsetTable{}, 0, errWrongProofBytesRead
|
||||
}
|
||||
|
||||
// Check version.
|
||||
fileVersion := int(header[0])
|
||||
if fileVersion != proofVersion {
|
||||
return proofOffsetTable{}, 0, errWrongProofVersion
|
||||
}
|
||||
|
||||
// Decode offset table and compute file size.
|
||||
var offsetTable proofOffsetTable
|
||||
fileSize := int64(proofHeaderSize)
|
||||
for i := range offsetTable {
|
||||
pos := proofVersionSize + i*proofSlotSize
|
||||
offsetTable[i].offset = binary.BigEndian.Uint32(header[pos : pos+proofOffsetSize])
|
||||
offsetTable[i].size = binary.BigEndian.Uint32(header[pos+proofOffsetSize : pos+proofSlotSize])
|
||||
fileSize += int64(offsetTable[i].size)
|
||||
}
|
||||
|
||||
return offsetTable, fileSize, nil
|
||||
}
|
||||
|
||||
// prune cleans the cache, the filesystem and mutexes.
|
||||
func (ps *ProofStorage) prune() {
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
proofPruneLatency.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
}()
|
||||
|
||||
highestStoredEpoch := ps.cache.HighestEpoch()
|
||||
|
||||
// Check if we need to prune.
|
||||
if highestStoredEpoch < ps.retentionEpochs {
|
||||
return
|
||||
}
|
||||
|
||||
highestEpochToPrune := highestStoredEpoch - ps.retentionEpochs
|
||||
highestPeriodToPrune := proofPeriod(highestEpochToPrune)
|
||||
|
||||
// Prune the cache.
|
||||
prunedCount := ps.cache.pruneUpTo(highestEpochToPrune)
|
||||
|
||||
if prunedCount == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
proofPrunedCounter.Add(float64(prunedCount))
|
||||
|
||||
// Prune the filesystem.
|
||||
periodFileInfos, err := afero.ReadDir(ps.fs, ".")
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error encountered while reading top directory during proof prune")
|
||||
return
|
||||
}
|
||||
|
||||
for _, periodFileInfo := range periodFileInfos {
|
||||
periodStr := periodFileInfo.Name()
|
||||
period, err := strconv.ParseUint(periodStr, 10, 64)
|
||||
if err != nil {
|
||||
log.WithError(err).Errorf("Error encountered while parsing period %s", periodStr)
|
||||
continue
|
||||
}
|
||||
|
||||
if period < highestPeriodToPrune {
|
||||
// Remove everything lower than highest period to prune.
|
||||
if err := ps.fs.RemoveAll(periodStr); err != nil {
|
||||
log.WithError(err).Error("Error encountered while removing period directory during proof prune")
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if period > highestPeriodToPrune {
|
||||
// Do not remove anything higher than highest period to prune.
|
||||
continue
|
||||
}
|
||||
|
||||
// if period == highestPeriodToPrune
|
||||
epochFileInfos, err := afero.ReadDir(ps.fs, periodStr)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error encountered while reading epoch directory during proof prune")
|
||||
continue
|
||||
}
|
||||
|
||||
for _, epochFileInfo := range epochFileInfos {
|
||||
epochStr := epochFileInfo.Name()
|
||||
epochDir := path.Join(periodStr, epochStr)
|
||||
|
||||
epoch, err := strconv.ParseUint(epochStr, 10, 64)
|
||||
if err != nil {
|
||||
log.WithError(err).Errorf("Error encountered while parsing epoch %s", epochStr)
|
||||
continue
|
||||
}
|
||||
|
||||
if primitives.Epoch(epoch) > highestEpochToPrune {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := ps.fs.RemoveAll(epochDir); err != nil {
|
||||
log.WithError(err).Error("Error encountered while removing epoch directory during proof prune")
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ps.mu.Lock()
|
||||
defer ps.mu.Unlock()
|
||||
clear(ps.muChans)
|
||||
}
|
||||
|
||||
// fileMutexChan returns the file mutex and channel for a given block root.
|
||||
func (ps *ProofStorage) fileMutexChan(root [fieldparams.RootLength]byte) (*sync.RWMutex, chan []*ethpb.ExecutionProof) {
|
||||
ps.mu.Lock()
|
||||
defer ps.mu.Unlock()
|
||||
|
||||
mc, ok := ps.muChans[root]
|
||||
if !ok {
|
||||
mc = &proofMuChan{
|
||||
mu: new(sync.RWMutex),
|
||||
toStore: make(chan []*ethpb.ExecutionProof, 1),
|
||||
}
|
||||
ps.muChans[root] = mc
|
||||
return mc.mu, mc.toStore
|
||||
}
|
||||
|
||||
return mc.mu, mc.toStore
|
||||
}
|
||||
|
||||
// pullProofChan pulls proofs from the input channel until it is empty.
|
||||
func pullProofChan(inputProofs chan []*ethpb.ExecutionProof) []*ethpb.ExecutionProof {
|
||||
proofs := make([]*ethpb.ExecutionProof, 0, maxProofTypes)
|
||||
|
||||
for {
|
||||
select {
|
||||
case batch := <-inputProofs:
|
||||
proofs = append(proofs, batch...)
|
||||
default:
|
||||
return proofs
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// proofFilePath builds the file path in database for a given root and epoch.
|
||||
func proofFilePath(root [fieldparams.RootLength]byte, epoch primitives.Epoch) string {
|
||||
return path.Join(
|
||||
fmt.Sprintf("%d", proofPeriod(epoch)),
|
||||
fmt.Sprintf("%d", epoch),
|
||||
fmt.Sprintf("%#x.%s", root, proofsFileExtension),
|
||||
)
|
||||
}
|
||||
|
||||
// extractProofFileMetadata extracts the metadata from a proof file path.
|
||||
func extractProofFileMetadata(path string) (*proofFileMetadata, error) {
|
||||
// Use filepath.Separator to handle both Windows (\) and Unix (/) path separators
|
||||
parts := strings.Split(path, string(filepath.Separator))
|
||||
if len(parts) != 3 {
|
||||
return nil, fmt.Errorf("unexpected proof file %s", path)
|
||||
}
|
||||
|
||||
period, err := strconv.ParseUint(parts[0], 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse period from %s: %w", path, err)
|
||||
}
|
||||
|
||||
epoch, err := strconv.ParseUint(parts[1], 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse epoch from %s: %w", path, err)
|
||||
}
|
||||
|
||||
partsRoot := strings.Split(parts[2], ".")
|
||||
if len(partsRoot) != 2 {
|
||||
return nil, fmt.Errorf("failed to parse root from %s", path)
|
||||
}
|
||||
|
||||
blockRootString := partsRoot[0]
|
||||
if len(blockRootString) != 2+2*fieldparams.RootLength || blockRootString[:2] != "0x" {
|
||||
return nil, fmt.Errorf("unexpected proof file name %s", path)
|
||||
}
|
||||
|
||||
if partsRoot[1] != proofsFileExtension {
|
||||
return nil, fmt.Errorf("unexpected extension %s", path)
|
||||
}
|
||||
|
||||
blockRootSlice, err := hex.DecodeString(blockRootString[2:])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("decode string from %s: %w", path, err)
|
||||
}
|
||||
|
||||
var blockRoot [fieldparams.RootLength]byte
|
||||
copy(blockRoot[:], blockRootSlice)
|
||||
|
||||
result := &proofFileMetadata{period: period, epoch: primitives.Epoch(epoch), blockRoot: blockRoot}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// proofPeriod computes the period of a given epoch.
|
||||
func proofPeriod(epoch primitives.Epoch) uint64 {
|
||||
return uint64(epoch / params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest)
|
||||
}
|
||||
|
||||
// encodeOffsetTable encodes the offset table to bytes.
|
||||
func encodeOffsetTable(table proofOffsetTable) []byte {
|
||||
result := make([]byte, proofOffsetTableSize)
|
||||
for i, entry := range table {
|
||||
offset := i * proofSlotSize
|
||||
binary.BigEndian.PutUint32(result[offset:offset+proofOffsetSize], entry.offset)
|
||||
binary.BigEndian.PutUint32(result[offset+proofOffsetSize:offset+proofSlotSize], entry.size)
|
||||
}
|
||||
return result
|
||||
}
|
||||
@@ -1,206 +0,0 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"slices"
|
||||
"sync"
|
||||
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
)
|
||||
|
||||
// ProofStorageSummary represents cached information about the proofs on disk for each root the cache knows about.
|
||||
type ProofStorageSummary struct {
|
||||
epoch primitives.Epoch
|
||||
proofIDs map[uint64]bool
|
||||
}
|
||||
|
||||
// HasProof returns true if the proof with the given proofID is available in the filesystem.
|
||||
func (s ProofStorageSummary) HasProof(proofID uint64) bool {
|
||||
if s.proofIDs == nil {
|
||||
return false
|
||||
}
|
||||
_, ok := s.proofIDs[proofID]
|
||||
return ok
|
||||
}
|
||||
|
||||
// Count returns the number of available proofs.
|
||||
func (s ProofStorageSummary) Count() int {
|
||||
return len(s.proofIDs)
|
||||
}
|
||||
|
||||
// All returns all stored proofIDs sorted in ascending order.
|
||||
func (s ProofStorageSummary) All() []uint64 {
|
||||
if s.proofIDs == nil {
|
||||
return nil
|
||||
}
|
||||
ids := make([]uint64, 0, len(s.proofIDs))
|
||||
for id := range s.proofIDs {
|
||||
ids = append(ids, id)
|
||||
}
|
||||
slices.Sort(ids)
|
||||
return ids
|
||||
}
|
||||
|
||||
type proofCache struct {
|
||||
mu sync.RWMutex
|
||||
proofCount float64
|
||||
lowestCachedEpoch primitives.Epoch
|
||||
highestCachedEpoch primitives.Epoch
|
||||
cache map[[fieldparams.RootLength]byte]ProofStorageSummary
|
||||
}
|
||||
|
||||
func newProofCache() *proofCache {
|
||||
return &proofCache{
|
||||
cache: make(map[[fieldparams.RootLength]byte]ProofStorageSummary),
|
||||
lowestCachedEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
}
|
||||
|
||||
// Summary returns the ProofStorageSummary for `root`.
|
||||
// The ProofStorageSummary can be used to check for the presence of proofs based on proofID.
|
||||
func (pc *proofCache) Summary(root [fieldparams.RootLength]byte) ProofStorageSummary {
|
||||
pc.mu.RLock()
|
||||
defer pc.mu.RUnlock()
|
||||
|
||||
return pc.cache[root]
|
||||
}
|
||||
|
||||
// HighestEpoch returns the highest cached epoch.
|
||||
func (pc *proofCache) HighestEpoch() primitives.Epoch {
|
||||
pc.mu.RLock()
|
||||
defer pc.mu.RUnlock()
|
||||
|
||||
return pc.highestCachedEpoch
|
||||
}
|
||||
|
||||
// set adds a proof to the cache.
|
||||
func (pc *proofCache) set(ident ProofIdent) {
|
||||
pc.mu.Lock()
|
||||
defer pc.mu.Unlock()
|
||||
|
||||
summary := pc.cache[ident.BlockRoot]
|
||||
if summary.proofIDs == nil {
|
||||
summary.proofIDs = make(map[uint64]bool)
|
||||
}
|
||||
summary.epoch = ident.Epoch
|
||||
|
||||
if _, exists := summary.proofIDs[ident.ProofID]; exists {
|
||||
pc.cache[ident.BlockRoot] = summary
|
||||
return
|
||||
}
|
||||
|
||||
summary.proofIDs[ident.ProofID] = true
|
||||
pc.lowestCachedEpoch = min(pc.lowestCachedEpoch, ident.Epoch)
|
||||
pc.highestCachedEpoch = max(pc.highestCachedEpoch, ident.Epoch)
|
||||
|
||||
pc.cache[ident.BlockRoot] = summary
|
||||
|
||||
pc.proofCount++
|
||||
proofDiskCount.Set(pc.proofCount)
|
||||
proofWrittenCounter.Inc()
|
||||
}
|
||||
|
||||
// setMultiple adds multiple proofs to the cache.
|
||||
func (pc *proofCache) setMultiple(ident ProofsIdent) {
|
||||
pc.mu.Lock()
|
||||
defer pc.mu.Unlock()
|
||||
|
||||
summary := pc.cache[ident.BlockRoot]
|
||||
if summary.proofIDs == nil {
|
||||
summary.proofIDs = make(map[uint64]bool)
|
||||
}
|
||||
summary.epoch = ident.Epoch
|
||||
|
||||
addedCount := 0
|
||||
for _, proofID := range ident.ProofIDs {
|
||||
if _, exists := summary.proofIDs[proofID]; exists {
|
||||
continue
|
||||
}
|
||||
summary.proofIDs[proofID] = true
|
||||
addedCount++
|
||||
}
|
||||
|
||||
if addedCount == 0 {
|
||||
pc.cache[ident.BlockRoot] = summary
|
||||
return
|
||||
}
|
||||
|
||||
pc.lowestCachedEpoch = min(pc.lowestCachedEpoch, ident.Epoch)
|
||||
pc.highestCachedEpoch = max(pc.highestCachedEpoch, ident.Epoch)
|
||||
|
||||
pc.cache[ident.BlockRoot] = summary
|
||||
|
||||
pc.proofCount += float64(addedCount)
|
||||
proofDiskCount.Set(pc.proofCount)
|
||||
proofWrittenCounter.Add(float64(addedCount))
|
||||
}
|
||||
|
||||
// get returns the ProofStorageSummary for the given block root.
|
||||
// If the root is not in the cache, the second return value will be false.
|
||||
func (pc *proofCache) get(blockRoot [fieldparams.RootLength]byte) (ProofStorageSummary, bool) {
|
||||
pc.mu.RLock()
|
||||
defer pc.mu.RUnlock()
|
||||
|
||||
summary, ok := pc.cache[blockRoot]
|
||||
return summary, ok
|
||||
}
|
||||
|
||||
// evict removes the ProofStorageSummary for the given block root from the cache.
|
||||
func (pc *proofCache) evict(blockRoot [fieldparams.RootLength]byte) int {
|
||||
pc.mu.Lock()
|
||||
defer pc.mu.Unlock()
|
||||
|
||||
summary, ok := pc.cache[blockRoot]
|
||||
if !ok {
|
||||
return 0
|
||||
}
|
||||
|
||||
deleted := len(summary.proofIDs)
|
||||
delete(pc.cache, blockRoot)
|
||||
|
||||
if deleted > 0 {
|
||||
pc.proofCount -= float64(deleted)
|
||||
proofDiskCount.Set(pc.proofCount)
|
||||
}
|
||||
|
||||
return deleted
|
||||
}
|
||||
|
||||
// pruneUpTo removes all entries from the cache up to the given target epoch included.
|
||||
func (pc *proofCache) pruneUpTo(targetEpoch primitives.Epoch) uint64 {
|
||||
pc.mu.Lock()
|
||||
defer pc.mu.Unlock()
|
||||
|
||||
prunedCount := uint64(0)
|
||||
newLowestCachedEpoch := params.BeaconConfig().FarFutureEpoch
|
||||
newHighestCachedEpoch := primitives.Epoch(0)
|
||||
|
||||
for blockRoot, summary := range pc.cache {
|
||||
epoch := summary.epoch
|
||||
|
||||
if epoch > targetEpoch {
|
||||
newLowestCachedEpoch = min(newLowestCachedEpoch, epoch)
|
||||
newHighestCachedEpoch = max(newHighestCachedEpoch, epoch)
|
||||
}
|
||||
|
||||
if epoch <= targetEpoch {
|
||||
prunedCount += uint64(len(summary.proofIDs))
|
||||
delete(pc.cache, blockRoot)
|
||||
}
|
||||
}
|
||||
|
||||
if prunedCount > 0 {
|
||||
pc.lowestCachedEpoch = newLowestCachedEpoch
|
||||
pc.highestCachedEpoch = newHighestCachedEpoch
|
||||
pc.proofCount -= float64(prunedCount)
|
||||
proofDiskCount.Set(pc.proofCount)
|
||||
}
|
||||
|
||||
return prunedCount
|
||||
}
|
||||
|
||||
// clear removes all entries from the cache.
|
||||
func (pc *proofCache) clear() uint64 {
|
||||
return pc.pruneUpTo(params.BeaconConfig().FarFutureEpoch)
|
||||
}
|
||||
@@ -1,407 +0,0 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
func createTestProof(t *testing.T, slot primitives.Slot, proofID uint64, blockRoot [32]byte) *ethpb.ExecutionProof {
|
||||
t.Helper()
|
||||
|
||||
return ðpb.ExecutionProof{
|
||||
ProofId: primitives.ExecutionProofId(proofID),
|
||||
Slot: slot,
|
||||
BlockHash: make([]byte, 32),
|
||||
BlockRoot: blockRoot[:],
|
||||
ProofData: []byte("test proof data for proofID " + string(rune('0'+proofID))),
|
||||
}
|
||||
}
|
||||
|
||||
// assertProofsEqual compares two proofs by comparing their SSZ-encoded bytes.
|
||||
func assertProofsEqual(t *testing.T, expected, actual *ethpb.ExecutionProof) {
|
||||
t.Helper()
|
||||
|
||||
expectedSSZ, err := expected.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
actualSSZ, err := actual.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, expectedSSZ, actualSSZ)
|
||||
}
|
||||
|
||||
func TestNewProofStorage(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
t.Run("No base path", func(t *testing.T) {
|
||||
_, err := NewProofStorage(ctx)
|
||||
require.ErrorIs(t, err, errNoProofBasePath)
|
||||
})
|
||||
|
||||
t.Run("Nominal", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
storage, err := NewProofStorage(ctx, WithProofBasePath(dir))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, dir, storage.base)
|
||||
})
|
||||
}
|
||||
|
||||
func TestProofSaveAndGet(t *testing.T) {
|
||||
t.Run("proof ID too large", func(t *testing.T) {
|
||||
_, proofStorage := NewEphemeralProofStorageAndFs(t)
|
||||
|
||||
proof := ðpb.ExecutionProof{
|
||||
ProofId: primitives.ExecutionProofId(maxProofTypes), // too large
|
||||
Slot: 1,
|
||||
BlockHash: make([]byte, 32),
|
||||
BlockRoot: make([]byte, 32),
|
||||
ProofData: []byte("test"),
|
||||
}
|
||||
|
||||
err := proofStorage.Save([]*ethpb.ExecutionProof{proof})
|
||||
require.ErrorIs(t, err, errProofIDTooLarge)
|
||||
})
|
||||
|
||||
t.Run("save empty slice", func(t *testing.T) {
|
||||
_, proofStorage := NewEphemeralProofStorageAndFs(t)
|
||||
|
||||
err := proofStorage.Save([]*ethpb.ExecutionProof{})
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("save and get single proof", func(t *testing.T) {
|
||||
_, proofStorage := NewEphemeralProofStorageAndFs(t)
|
||||
|
||||
blockRoot := [32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}
|
||||
proof := createTestProof(t, 32, 2, blockRoot)
|
||||
|
||||
err := proofStorage.Save([]*ethpb.ExecutionProof{proof})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check summary
|
||||
summary := proofStorage.Summary(blockRoot)
|
||||
require.Equal(t, true, summary.HasProof(2))
|
||||
require.Equal(t, false, summary.HasProof(0))
|
||||
require.Equal(t, false, summary.HasProof(1))
|
||||
require.Equal(t, 1, summary.Count())
|
||||
|
||||
// Get the proof
|
||||
proofs, err := proofStorage.Get(blockRoot, []uint64{2})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(proofs))
|
||||
assertProofsEqual(t, proof, proofs[0])
|
||||
})
|
||||
|
||||
t.Run("save and get multiple proofs", func(t *testing.T) {
|
||||
_, proofStorage := NewEphemeralProofStorageAndFs(t)
|
||||
|
||||
blockRoot := [32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}
|
||||
|
||||
// Save first proof
|
||||
proof1 := createTestProof(t, 32, 0, blockRoot)
|
||||
err := proofStorage.Save([]*ethpb.ExecutionProof{proof1})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Save second proof (should append to existing file)
|
||||
proof2 := createTestProof(t, 32, 3, blockRoot)
|
||||
err = proofStorage.Save([]*ethpb.ExecutionProof{proof2})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Save third proof
|
||||
proof3 := createTestProof(t, 32, 7, blockRoot)
|
||||
err = proofStorage.Save([]*ethpb.ExecutionProof{proof3})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check summary
|
||||
summary := proofStorage.Summary(blockRoot)
|
||||
require.Equal(t, true, summary.HasProof(0))
|
||||
require.Equal(t, false, summary.HasProof(1))
|
||||
require.Equal(t, false, summary.HasProof(2))
|
||||
require.Equal(t, true, summary.HasProof(3))
|
||||
require.Equal(t, false, summary.HasProof(4))
|
||||
require.Equal(t, false, summary.HasProof(5))
|
||||
require.Equal(t, false, summary.HasProof(6))
|
||||
require.Equal(t, true, summary.HasProof(7))
|
||||
require.Equal(t, 3, summary.Count())
|
||||
|
||||
// Get all proofs
|
||||
proofs, err := proofStorage.Get(blockRoot, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, len(proofs))
|
||||
|
||||
// Get specific proofs
|
||||
proofs, err = proofStorage.Get(blockRoot, []uint64{0, 3})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(proofs))
|
||||
assertProofsEqual(t, proof1, proofs[0])
|
||||
assertProofsEqual(t, proof2, proofs[1])
|
||||
})
|
||||
|
||||
t.Run("duplicate proof is ignored", func(t *testing.T) {
|
||||
_, proofStorage := NewEphemeralProofStorageAndFs(t)
|
||||
|
||||
blockRoot := [32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}
|
||||
proof := createTestProof(t, 32, 2, blockRoot)
|
||||
|
||||
// Save first time
|
||||
err := proofStorage.Save([]*ethpb.ExecutionProof{proof})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Save same proof again (should be silently ignored)
|
||||
err = proofStorage.Save([]*ethpb.ExecutionProof{proof})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check count
|
||||
summary := proofStorage.Summary(blockRoot)
|
||||
require.Equal(t, 1, summary.Count())
|
||||
|
||||
// Get the proof
|
||||
proofs, err := proofStorage.Get(blockRoot, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(proofs))
|
||||
})
|
||||
|
||||
t.Run("get non-existent root", func(t *testing.T) {
|
||||
_, proofStorage := NewEphemeralProofStorageAndFs(t)
|
||||
|
||||
proofs, err := proofStorage.Get([fieldparams.RootLength]byte{1}, []uint64{0, 1, 2})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(proofs))
|
||||
})
|
||||
|
||||
t.Run("get non-existent proofIDs", func(t *testing.T) {
|
||||
_, proofStorage := NewEphemeralProofStorageAndFs(t)
|
||||
|
||||
blockRoot := [32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}
|
||||
proof := createTestProof(t, 32, 2, blockRoot)
|
||||
|
||||
err := proofStorage.Save([]*ethpb.ExecutionProof{proof})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Try to get proofIDs that don't exist
|
||||
proofs, err := proofStorage.Get(blockRoot, []uint64{0, 1, 3, 4})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(proofs))
|
||||
})
|
||||
}
|
||||
|
||||
func TestProofRemove(t *testing.T) {
|
||||
t.Run("remove non-existent", func(t *testing.T) {
|
||||
_, proofStorage := NewEphemeralProofStorageAndFs(t)
|
||||
err := proofStorage.Remove([fieldparams.RootLength]byte{1})
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("remove existing", func(t *testing.T) {
|
||||
_, proofStorage := NewEphemeralProofStorageAndFs(t)
|
||||
|
||||
blockRoot1 := [32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}
|
||||
blockRoot2 := [32]byte{32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}
|
||||
|
||||
proof1 := createTestProof(t, 32, 0, blockRoot1)
|
||||
proof2 := createTestProof(t, 64, 1, blockRoot2)
|
||||
|
||||
err := proofStorage.Save([]*ethpb.ExecutionProof{proof1})
|
||||
require.NoError(t, err)
|
||||
err = proofStorage.Save([]*ethpb.ExecutionProof{proof2})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Remove first proof
|
||||
err = proofStorage.Remove(blockRoot1)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check first proof is gone
|
||||
summary := proofStorage.Summary(blockRoot1)
|
||||
require.Equal(t, 0, summary.Count())
|
||||
|
||||
proofs, err := proofStorage.Get(blockRoot1, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(proofs))
|
||||
|
||||
// Check second proof still exists
|
||||
summary = proofStorage.Summary(blockRoot2)
|
||||
require.Equal(t, 1, summary.Count())
|
||||
|
||||
proofs, err = proofStorage.Get(blockRoot2, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(proofs))
|
||||
})
|
||||
}
|
||||
|
||||
func TestProofClear(t *testing.T) {
|
||||
_, proofStorage := NewEphemeralProofStorageAndFs(t)
|
||||
|
||||
blockRoot1 := [32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}
|
||||
blockRoot2 := [32]byte{32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}
|
||||
|
||||
proof1 := createTestProof(t, 32, 0, blockRoot1)
|
||||
proof2 := createTestProof(t, 64, 1, blockRoot2)
|
||||
|
||||
err := proofStorage.Save([]*ethpb.ExecutionProof{proof1})
|
||||
require.NoError(t, err)
|
||||
err = proofStorage.Save([]*ethpb.ExecutionProof{proof2})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Clear all
|
||||
err = proofStorage.Clear()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check both are gone
|
||||
summary := proofStorage.Summary(blockRoot1)
|
||||
require.Equal(t, 0, summary.Count())
|
||||
|
||||
summary = proofStorage.Summary(blockRoot2)
|
||||
require.Equal(t, 0, summary.Count())
|
||||
}
|
||||
|
||||
func TestProofWarmCache(t *testing.T) {
|
||||
fs, proofStorage := NewEphemeralProofStorageAndFs(t)
|
||||
|
||||
blockRoot1 := [32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}
|
||||
blockRoot2 := [32]byte{32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}
|
||||
|
||||
// Save proofs
|
||||
proof1a := createTestProof(t, 32, 0, blockRoot1)
|
||||
proof1b := createTestProof(t, 32, 3, blockRoot1)
|
||||
proof2 := createTestProof(t, 64, 5, blockRoot2)
|
||||
|
||||
err := proofStorage.Save([]*ethpb.ExecutionProof{proof1a})
|
||||
require.NoError(t, err)
|
||||
err = proofStorage.Save([]*ethpb.ExecutionProof{proof1b})
|
||||
require.NoError(t, err)
|
||||
err = proofStorage.Save([]*ethpb.ExecutionProof{proof2})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify files exist
|
||||
files, err := afero.ReadDir(fs, "0/1")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(files))
|
||||
|
||||
files, err = afero.ReadDir(fs, "0/2")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(files))
|
||||
|
||||
// Create a new storage with the same filesystem
|
||||
proofStorage2 := NewEphemeralProofStorageUsingFs(t, fs)
|
||||
|
||||
// Before warm cache, cache should be empty
|
||||
summary := proofStorage2.Summary(blockRoot1)
|
||||
require.Equal(t, 0, summary.Count())
|
||||
|
||||
// Warm cache
|
||||
proofStorage2.WarmCache()
|
||||
|
||||
// After warm cache, cache should be populated
|
||||
summary = proofStorage2.Summary(blockRoot1)
|
||||
require.Equal(t, 2, summary.Count())
|
||||
require.Equal(t, true, summary.HasProof(0))
|
||||
require.Equal(t, true, summary.HasProof(3))
|
||||
|
||||
summary = proofStorage2.Summary(blockRoot2)
|
||||
require.Equal(t, 1, summary.Count())
|
||||
require.Equal(t, true, summary.HasProof(5))
|
||||
}
|
||||
|
||||
func TestProofSubscribe(t *testing.T) {
|
||||
_, proofStorage := NewEphemeralProofStorageAndFs(t)
|
||||
|
||||
sub, ch := proofStorage.Subscribe()
|
||||
defer sub.Unsubscribe()
|
||||
|
||||
blockRoot := [32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}
|
||||
proof := createTestProof(t, 32, 2, blockRoot)
|
||||
|
||||
err := proofStorage.Save([]*ethpb.ExecutionProof{proof})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Should receive notification
|
||||
ident := <-ch
|
||||
require.Equal(t, blockRoot, ident.BlockRoot)
|
||||
require.DeepEqual(t, []uint64{2}, ident.ProofIDs)
|
||||
require.Equal(t, primitives.Epoch(1), ident.Epoch)
|
||||
}
|
||||
|
||||
func TestProofReadHeader(t *testing.T) {
|
||||
t.Run("wrong version", func(t *testing.T) {
|
||||
_, proofStorage := NewEphemeralProofStorageAndFs(t)
|
||||
|
||||
blockRoot := [32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}
|
||||
proof := createTestProof(t, 32, 0, blockRoot)
|
||||
|
||||
err := proofStorage.Save([]*ethpb.ExecutionProof{proof})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Get the file path
|
||||
filePath := proofFilePath(blockRoot, 1)
|
||||
|
||||
// Alter the version
|
||||
file, err := proofStorage.fs.OpenFile(filePath, os.O_RDWR, os.FileMode(0600))
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = file.Write([]byte{42}) // wrong version
|
||||
require.NoError(t, err)
|
||||
|
||||
// Try to read header
|
||||
_, _, err = proofStorage.readHeader(file)
|
||||
require.ErrorIs(t, err, errWrongProofVersion)
|
||||
|
||||
err = file.Close()
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestEncodeOffsetTable(t *testing.T) {
|
||||
var table proofOffsetTable
|
||||
table[0] = proofSlotEntry{offset: 0, size: 100}
|
||||
table[3] = proofSlotEntry{offset: 100, size: 200}
|
||||
table[7] = proofSlotEntry{offset: 300, size: 300}
|
||||
|
||||
encoded := encodeOffsetTable(table)
|
||||
require.Equal(t, proofOffsetTableSize, len(encoded))
|
||||
|
||||
// Decode manually and verify
|
||||
var decoded proofOffsetTable
|
||||
for i := range decoded {
|
||||
pos := i * proofSlotSize
|
||||
decoded[i].offset = binary.BigEndian.Uint32(encoded[pos : pos+proofOffsetSize])
|
||||
decoded[i].size = binary.BigEndian.Uint32(encoded[pos+proofOffsetSize : pos+proofSlotSize])
|
||||
}
|
||||
require.Equal(t, table, decoded)
|
||||
}
|
||||
|
||||
func TestProofFilePath(t *testing.T) {
|
||||
blockRoot := [32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}
|
||||
epoch := primitives.Epoch(100)
|
||||
|
||||
path := proofFilePath(blockRoot, epoch)
|
||||
require.Equal(t, "0/100/0x0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20.sszs", path)
|
||||
}
|
||||
|
||||
func TestExtractProofFileMetadata(t *testing.T) {
|
||||
t.Run("valid path", func(t *testing.T) {
|
||||
path := "0/100/0x0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20.sszs"
|
||||
metadata, err := extractProofFileMetadata(path)
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedRoot := [32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}
|
||||
require.Equal(t, uint64(0), metadata.period)
|
||||
require.Equal(t, primitives.Epoch(100), metadata.epoch)
|
||||
require.Equal(t, expectedRoot, metadata.blockRoot)
|
||||
})
|
||||
|
||||
t.Run("invalid path - wrong number of parts", func(t *testing.T) {
|
||||
_, err := extractProofFileMetadata("invalid/path.sszs")
|
||||
require.ErrorContains(t, "unexpected proof file", err)
|
||||
})
|
||||
|
||||
t.Run("invalid path - wrong extension", func(t *testing.T) {
|
||||
_, err := extractProofFileMetadata("0/100/0x0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20.txt")
|
||||
require.ErrorContains(t, "unexpected extension", err)
|
||||
})
|
||||
}
|
||||
@@ -67,7 +67,6 @@ func getSubscriptionStatusFromDB(t *testing.T, db *Store) bool {
|
||||
return subscribed
|
||||
}
|
||||
|
||||
|
||||
func TestUpdateCustodyInfo(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
|
||||
@@ -123,8 +123,6 @@ type BeaconNode struct {
|
||||
BlobStorageOptions []filesystem.BlobStorageOption
|
||||
DataColumnStorage *filesystem.DataColumnStorage
|
||||
DataColumnStorageOptions []filesystem.DataColumnStorageOption
|
||||
ProofStorage *filesystem.ProofStorage
|
||||
ProofStorageOptions []filesystem.ProofStorageOption
|
||||
verifyInitWaiter *verification.InitializerWaiter
|
||||
lhsp *verification.LazyHeadStateProvider
|
||||
syncChecker *initialsync.SyncChecker
|
||||
@@ -229,15 +227,6 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
|
||||
return nil, errors.Wrap(err, "could not clear data column storage")
|
||||
}
|
||||
|
||||
if beacon.ProofStorage == nil {
|
||||
proofStorage, err := filesystem.NewProofStorage(cliCtx.Context, beacon.ProofStorageOptions...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "new proof storage")
|
||||
}
|
||||
|
||||
beacon.ProofStorage = proofStorage
|
||||
}
|
||||
|
||||
bfs, err := startBaseServices(cliCtx, beacon, depositAddress, dbClearer)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not start modules")
|
||||
@@ -758,13 +747,11 @@ func (b *BeaconNode) registerBlockchainService(fc forkchoice.ForkChoicer, gs *st
|
||||
blockchain.WithSyncComplete(syncComplete),
|
||||
blockchain.WithBlobStorage(b.BlobStorage),
|
||||
blockchain.WithDataColumnStorage(b.DataColumnStorage),
|
||||
blockchain.WithProofStorage(b.ProofStorage),
|
||||
blockchain.WithTrackedValidatorsCache(b.trackedValidatorsCache),
|
||||
blockchain.WithPayloadIDCache(b.payloadIDCache),
|
||||
blockchain.WithSyncChecker(b.syncChecker),
|
||||
blockchain.WithSlasherEnabled(b.slasherEnabled),
|
||||
blockchain.WithLightClientStore(b.lcStore),
|
||||
blockchain.WithOperationNotifier(b),
|
||||
)
|
||||
|
||||
blockchainService, err := blockchain.NewService(b.ctx, opts...)
|
||||
@@ -849,7 +836,6 @@ func (b *BeaconNode) registerSyncService(initialSyncComplete chan struct{}, bFil
|
||||
regularsync.WithStateNotifier(b),
|
||||
regularsync.WithBlobStorage(b.BlobStorage),
|
||||
regularsync.WithDataColumnStorage(b.DataColumnStorage),
|
||||
regularsync.WithExecutionProofStorage(b.ProofStorage),
|
||||
regularsync.WithVerifierWaiter(b.verifyInitWaiter),
|
||||
regularsync.WithAvailableBlocker(bFillStore),
|
||||
regularsync.WithTrackedValidatorsCache(b.trackedValidatorsCache),
|
||||
@@ -976,7 +962,6 @@ func (b *BeaconNode) registerRPCService(router *http.ServeMux) error {
|
||||
BlockReceiver: chainService,
|
||||
BlobReceiver: chainService,
|
||||
DataColumnReceiver: chainService,
|
||||
ProofReceiver: chainService,
|
||||
AttestationReceiver: chainService,
|
||||
GenesisTimeFetcher: chainService,
|
||||
GenesisFetcher: chainService,
|
||||
|
||||
@@ -35,13 +35,6 @@ func WithBuilderFlagOptions(opts []builder.Option) Option {
|
||||
}
|
||||
}
|
||||
|
||||
func WithConfigOptions(opt ...params.Option) Option {
|
||||
return func(bn *BeaconNode) error {
|
||||
bn.ConfigOptions = append(bn.ConfigOptions, opt...)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithBlobStorage sets the BlobStorage backend for the BeaconNode
|
||||
func WithBlobStorage(bs *filesystem.BlobStorage) Option {
|
||||
return func(bn *BeaconNode) error {
|
||||
@@ -59,6 +52,13 @@ func WithBlobStorageOptions(opt ...filesystem.BlobStorageOption) Option {
|
||||
}
|
||||
}
|
||||
|
||||
func WithConfigOptions(opt ...params.Option) Option {
|
||||
return func(bn *BeaconNode) error {
|
||||
bn.ConfigOptions = append(bn.ConfigOptions, opt...)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithDataColumnStorage sets the DataColumnStorage backend for the BeaconNode
|
||||
func WithDataColumnStorage(bs *filesystem.DataColumnStorage) Option {
|
||||
return func(bn *BeaconNode) error {
|
||||
@@ -75,20 +75,3 @@ func WithDataColumnStorageOptions(opt ...filesystem.DataColumnStorageOption) Opt
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithDataColumnStorage sets the DataColumnStorage backend for the BeaconNode
|
||||
func WithProofStorage(bs *filesystem.ProofStorage) Option {
|
||||
return func(bn *BeaconNode) error {
|
||||
bn.ProofStorage = bs
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithDataColumnStorageOptions appends 1 or more filesystem.DataColumnStorageOption on the beacon node,
|
||||
// to be used when initializing data column storage.
|
||||
func WithProofStorageOption(opt ...filesystem.ProofStorageOption) Option {
|
||||
return func(bn *BeaconNode) error {
|
||||
bn.ProofStorageOptions = append(bn.ProofStorageOptions, opt...)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -166,7 +166,6 @@ go_test(
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/state/stategen/mock:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
|
||||
@@ -589,11 +589,6 @@ func (s *Service) createLocalNode(
|
||||
localNode.Set(quicEntry)
|
||||
}
|
||||
|
||||
if features.Get().EnableZkvm {
|
||||
zkvmKeyEntry := enr.WithEntry(zkvmEnabledKeyEnrKey, true)
|
||||
localNode.Set(zkvmKeyEntry)
|
||||
}
|
||||
|
||||
localNode.SetFallbackIP(ipAddr)
|
||||
localNode.SetFallbackUDP(udpPort)
|
||||
|
||||
|
||||
@@ -25,7 +25,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers/scorers"
|
||||
testp2p "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/wrapper"
|
||||
leakybucket "github.com/OffchainLabs/prysm/v7/container/leaky-bucket"
|
||||
@@ -244,19 +243,12 @@ func TestCreateLocalNode(t *testing.T) {
|
||||
name string
|
||||
cfg *Config
|
||||
expectedError bool
|
||||
zkvmEnabled bool
|
||||
}{
|
||||
{
|
||||
name: "valid config",
|
||||
cfg: &Config{},
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "valid config with zkVM enabled",
|
||||
cfg: &Config{},
|
||||
expectedError: false,
|
||||
zkvmEnabled: true,
|
||||
},
|
||||
{
|
||||
name: "invalid host address",
|
||||
cfg: &Config{HostAddress: "invalid"},
|
||||
@@ -281,15 +273,6 @@ func TestCreateLocalNode(t *testing.T) {
|
||||
|
||||
for _, tt := range testCases {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if tt.zkvmEnabled {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnableZkvm: true,
|
||||
})
|
||||
t.Cleanup(func() {
|
||||
resetCfg()
|
||||
})
|
||||
}
|
||||
|
||||
// Define ports. Use unique ports since this test validates ENR content.
|
||||
const (
|
||||
udpPort = 3100
|
||||
@@ -365,14 +348,6 @@ func TestCreateLocalNode(t *testing.T) {
|
||||
custodyGroupCount := new(uint64)
|
||||
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(params.BeaconNetworkConfig().CustodyGroupCountKey, custodyGroupCount)))
|
||||
require.Equal(t, custodyRequirement, *custodyGroupCount)
|
||||
|
||||
// Check zkVM enabled key if applicable.
|
||||
if tt.zkvmEnabled {
|
||||
zkvmEnabled := new(bool)
|
||||
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(params.BeaconNetworkConfig().ZkvmEnabledKey, zkvmEnabled)))
|
||||
require.Equal(t, features.Get().EnableZkvm, *zkvmEnabled)
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -52,9 +52,6 @@ const (
|
||||
// lightClientFinalityUpdateWeight specifies the scoring weight that we apply to
|
||||
// our light client finality update topic.
|
||||
lightClientFinalityUpdateWeight = 0.05
|
||||
// executionProofWeight specifies the scoring weight that we apply to
|
||||
// our execution proof topic.
|
||||
executionProofWeight = 0.05
|
||||
|
||||
// maxInMeshScore describes the max score a peer can attain from being in the mesh.
|
||||
maxInMeshScore = 10
|
||||
@@ -148,8 +145,6 @@ func (s *Service) topicScoreParams(topic string) (*pubsub.TopicScoreParams, erro
|
||||
return defaultLightClientOptimisticUpdateTopicParams(), nil
|
||||
case strings.Contains(topic, GossipLightClientFinalityUpdateMessage):
|
||||
return defaultLightClientFinalityUpdateTopicParams(), nil
|
||||
case strings.Contains(topic, GossipExecutionProofMessage):
|
||||
return defaultExecutionProofTopicParams(), nil
|
||||
default:
|
||||
return nil, errors.Errorf("unrecognized topic provided for parameter registration: %s", topic)
|
||||
}
|
||||
@@ -515,28 +510,6 @@ func defaultBlsToExecutionChangeTopicParams() *pubsub.TopicScoreParams {
|
||||
}
|
||||
}
|
||||
|
||||
func defaultExecutionProofTopicParams() *pubsub.TopicScoreParams {
|
||||
return &pubsub.TopicScoreParams{
|
||||
TopicWeight: executionProofWeight,
|
||||
TimeInMeshWeight: maxInMeshScore / inMeshCap(),
|
||||
TimeInMeshQuantum: inMeshTime(),
|
||||
TimeInMeshCap: inMeshCap(),
|
||||
FirstMessageDeliveriesWeight: 2,
|
||||
FirstMessageDeliveriesDecay: scoreDecay(oneHundredEpochs),
|
||||
FirstMessageDeliveriesCap: 5,
|
||||
MeshMessageDeliveriesWeight: 0,
|
||||
MeshMessageDeliveriesDecay: 0,
|
||||
MeshMessageDeliveriesCap: 0,
|
||||
MeshMessageDeliveriesThreshold: 0,
|
||||
MeshMessageDeliveriesWindow: 0,
|
||||
MeshMessageDeliveriesActivation: 0,
|
||||
MeshFailurePenaltyWeight: 0,
|
||||
MeshFailurePenaltyDecay: 0,
|
||||
InvalidMessageDeliveriesWeight: -2000,
|
||||
InvalidMessageDeliveriesDecay: scoreDecay(invalidDecayPeriod),
|
||||
}
|
||||
}
|
||||
|
||||
func defaultLightClientOptimisticUpdateTopicParams() *pubsub.TopicScoreParams {
|
||||
return &pubsub.TopicScoreParams{
|
||||
TopicWeight: lightClientOptimisticUpdateWeight,
|
||||
|
||||
@@ -25,7 +25,6 @@ var gossipTopicMappings = map[string]func() proto.Message{
|
||||
LightClientOptimisticUpdateTopicFormat: func() proto.Message { return ðpb.LightClientOptimisticUpdateAltair{} },
|
||||
LightClientFinalityUpdateTopicFormat: func() proto.Message { return ðpb.LightClientFinalityUpdateAltair{} },
|
||||
DataColumnSubnetTopicFormat: func() proto.Message { return ðpb.DataColumnSidecar{} },
|
||||
ExecutionProofSubnetTopicFormat: func() proto.Message { return ðpb.ExecutionProof{} },
|
||||
}
|
||||
|
||||
// GossipTopicMappings is a function to return the assigned data type
|
||||
|
||||
@@ -602,33 +602,6 @@ func (p *Status) All() []peer.ID {
|
||||
return pids
|
||||
}
|
||||
|
||||
// ZkvmEnabledPeers returns all connected peers that have zkvm enabled in their ENR.
|
||||
func (p *Status) ZkvmEnabledPeers() []peer.ID {
|
||||
p.store.RLock()
|
||||
defer p.store.RUnlock()
|
||||
|
||||
peers := make([]peer.ID, 0)
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
if peerData.ConnState != Connected {
|
||||
continue
|
||||
}
|
||||
if peerData.Enr == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
var enabled bool
|
||||
entry := enr.WithEntry(params.BeaconNetworkConfig().ZkvmEnabledKey, &enabled)
|
||||
if err := peerData.Enr.Load(entry); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if enabled {
|
||||
peers = append(peers, pid)
|
||||
}
|
||||
}
|
||||
return peers
|
||||
}
|
||||
|
||||
// Prune clears out and removes outdated and disconnected peers.
|
||||
func (p *Status) Prune() {
|
||||
p.store.Lock()
|
||||
|
||||
@@ -1341,75 +1341,3 @@ func createPeer(t *testing.T, p *peers.Status, addr ma.Multiaddr,
|
||||
p.SetConnectionState(id, state)
|
||||
return id
|
||||
}
|
||||
|
||||
func TestZkvmEnabledPeers(t *testing.T) {
|
||||
p := peers.NewStatus(t.Context(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: 1,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
// Create peer 1: Connected, zkVM enabled
|
||||
pid1 := addPeer(t, p, peers.Connected)
|
||||
record1 := new(enr.Record)
|
||||
zkvmEnabled := true
|
||||
record1.Set(enr.WithEntry(params.BeaconNetworkConfig().ZkvmEnabledKey, &zkvmEnabled))
|
||||
p.Add(record1, pid1, nil, network.DirOutbound)
|
||||
p.SetConnectionState(pid1, peers.Connected)
|
||||
|
||||
// Create peer 2: Connected, zkVM disabled
|
||||
pid2 := addPeer(t, p, peers.Connected)
|
||||
record2 := new(enr.Record)
|
||||
zkvmDisabled := false
|
||||
record2.Set(enr.WithEntry(params.BeaconNetworkConfig().ZkvmEnabledKey, &zkvmDisabled))
|
||||
p.Add(record2, pid2, nil, network.DirOutbound)
|
||||
p.SetConnectionState(pid2, peers.Connected)
|
||||
|
||||
// Create peer 3: Connected, zkVM enabled
|
||||
pid3 := addPeer(t, p, peers.Connected)
|
||||
record3 := new(enr.Record)
|
||||
record3.Set(enr.WithEntry(params.BeaconNetworkConfig().ZkvmEnabledKey, &zkvmEnabled))
|
||||
p.Add(record3, pid3, nil, network.DirOutbound)
|
||||
p.SetConnectionState(pid3, peers.Connected)
|
||||
|
||||
// Create peer 4: Disconnected, zkVM enabled (should not be included)
|
||||
pid4 := addPeer(t, p, peers.Disconnected)
|
||||
record4 := new(enr.Record)
|
||||
record4.Set(enr.WithEntry(params.BeaconNetworkConfig().ZkvmEnabledKey, &zkvmEnabled))
|
||||
p.Add(record4, pid4, nil, network.DirOutbound)
|
||||
p.SetConnectionState(pid4, peers.Disconnected)
|
||||
|
||||
// Create peer 5: Connected, no ENR (should not be included)
|
||||
pid5 := addPeer(t, p, peers.Connected)
|
||||
p.Add(nil, pid5, nil, network.DirOutbound)
|
||||
p.SetConnectionState(pid5, peers.Connected)
|
||||
|
||||
// Create peer 6: Connected, no zkVM key in ENR (should not be included)
|
||||
pid6 := addPeer(t, p, peers.Connected)
|
||||
record6 := new(enr.Record)
|
||||
record6.Set(enr.WithEntry("other_key", "other_value"))
|
||||
p.Add(record6, pid6, nil, network.DirOutbound)
|
||||
p.SetConnectionState(pid6, peers.Connected)
|
||||
|
||||
// Get zkVM enabled peers
|
||||
zkvmPeers := p.ZkvmEnabledPeers()
|
||||
|
||||
// Should return only pid1 and pid3 (connected peers with zkVM enabled)
|
||||
assert.Equal(t, 2, len(zkvmPeers), "Expected 2 zkVM enabled peers")
|
||||
|
||||
// Verify the returned peers are correct
|
||||
zkvmPeerMap := make(map[peer.ID]bool)
|
||||
for _, pid := range zkvmPeers {
|
||||
zkvmPeerMap[pid] = true
|
||||
}
|
||||
|
||||
assert.Equal(t, true, zkvmPeerMap[pid1], "pid1 should be in zkVM enabled peers")
|
||||
assert.Equal(t, true, zkvmPeerMap[pid3], "pid3 should be in zkVM enabled peers")
|
||||
assert.Equal(t, false, zkvmPeerMap[pid2], "pid2 should not be in zkVM enabled peers (disabled)")
|
||||
assert.Equal(t, false, zkvmPeerMap[pid4], "pid4 should not be in zkVM enabled peers (disconnected)")
|
||||
assert.Equal(t, false, zkvmPeerMap[pid5], "pid5 should not be in zkVM enabled peers (no ENR)")
|
||||
assert.Equal(t, false, zkvmPeerMap[pid6], "pid6 should not be in zkVM enabled peers (no zkVM key)")
|
||||
}
|
||||
|
||||
@@ -67,9 +67,6 @@ const (
|
||||
|
||||
// DataColumnSidecarsByRangeName is the name for the DataColumnSidecarsByRange v1 message topic.
|
||||
DataColumnSidecarsByRangeName = "/data_column_sidecars_by_range"
|
||||
|
||||
// ExecutionProofsByRootName is the name for the ExecutionProofsByRoot v1 message topic.
|
||||
ExecutionProofsByRootName = "/execution_proofs_by_root"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -109,9 +106,6 @@ const (
|
||||
// RPCDataColumnSidecarsByRangeTopicV1 is a topic for requesting data column sidecars by their slot.
|
||||
// /eth2/beacon_chain/req/data_column_sidecars_by_range/1 - New in Fulu.
|
||||
RPCDataColumnSidecarsByRangeTopicV1 = protocolPrefix + DataColumnSidecarsByRangeName + SchemaVersionV1
|
||||
// RPCExecutionProofsByRootTopicV1 is a topic for requesting execution proofs by their block root.
|
||||
// /eth2/beacon_chain/req/execution_proofs_by_root/1 - New in Fulu.
|
||||
RPCExecutionProofsByRootTopicV1 = protocolPrefix + ExecutionProofsByRootName + SchemaVersionV1
|
||||
|
||||
// V2 RPC Topics
|
||||
// RPCStatusTopicV2 defines the v1 topic for the status rpc method.
|
||||
@@ -176,9 +170,6 @@ var (
|
||||
|
||||
// DataColumnSidecarsByRoot v1 Message
|
||||
RPCDataColumnSidecarsByRootTopicV1: p2ptypes.DataColumnsByRootIdentifiers{},
|
||||
|
||||
// ExecutionProofsByRoot v1 Message
|
||||
RPCExecutionProofsByRootTopicV1: new(pb.ExecutionProofsByRootRequest),
|
||||
}
|
||||
|
||||
// Maps all registered protocol prefixes.
|
||||
@@ -202,7 +193,6 @@ var (
|
||||
LightClientOptimisticUpdateName: true,
|
||||
DataColumnSidecarsByRootName: true,
|
||||
DataColumnSidecarsByRangeName: true,
|
||||
ExecutionProofsByRootName: true,
|
||||
}
|
||||
|
||||
// Maps all the RPC messages which are to updated in altair.
|
||||
|
||||
@@ -36,7 +36,6 @@ var (
|
||||
attSubnetEnrKey = params.BeaconNetworkConfig().AttSubnetKey
|
||||
syncCommsSubnetEnrKey = params.BeaconNetworkConfig().SyncCommsSubnetKey
|
||||
custodyGroupCountEnrKey = params.BeaconNetworkConfig().CustodyGroupCountKey
|
||||
zkvmEnabledKeyEnrKey = params.BeaconNetworkConfig().ZkvmEnabledKey
|
||||
)
|
||||
|
||||
// The value used with the subnet, in order
|
||||
|
||||
@@ -46,8 +46,6 @@ const (
|
||||
GossipLightClientOptimisticUpdateMessage = "light_client_optimistic_update"
|
||||
// GossipDataColumnSidecarMessage is the name for the data column sidecar message type.
|
||||
GossipDataColumnSidecarMessage = "data_column_sidecar"
|
||||
// GossipExecutionProofMessage is the name for the execution proof message type.
|
||||
GossipExecutionProofMessage = "execution_proof"
|
||||
|
||||
// Topic Formats
|
||||
//
|
||||
@@ -77,8 +75,6 @@ const (
|
||||
LightClientOptimisticUpdateTopicFormat = GossipProtocolAndDigest + GossipLightClientOptimisticUpdateMessage
|
||||
// DataColumnSubnetTopicFormat is the topic format for the data column subnet.
|
||||
DataColumnSubnetTopicFormat = GossipProtocolAndDigest + GossipDataColumnSidecarMessage + "_%d"
|
||||
// ExecutionProofSubnetTopicFormat is the topic format for the execution proof subnet.
|
||||
ExecutionProofSubnetTopicFormat = GossipProtocolAndDigest + GossipExecutionProofMessage // + "_%d" (PoC only have one global topic)
|
||||
)
|
||||
|
||||
// topic is a struct representing a single gossipsub topic.
|
||||
@@ -162,7 +158,6 @@ func (s *Service) allTopics() []topic {
|
||||
newTopic(altair, future, empty, GossipLightClientOptimisticUpdateMessage),
|
||||
newTopic(altair, future, empty, GossipLightClientFinalityUpdateMessage),
|
||||
newTopic(capella, future, empty, GossipBlsToExecutionChangeMessage),
|
||||
newTopic(fulu, future, empty, GossipExecutionProofMessage),
|
||||
}
|
||||
last := params.GetNetworkScheduleEntry(genesis)
|
||||
schedule := []params.NetworkScheduleEntry{last}
|
||||
|
||||
@@ -575,7 +575,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".PublishBlockV2",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.PublishBlockV2,
|
||||
@@ -586,7 +586,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".PublishBlindedBlockV2",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.PublishBlindedBlockV2,
|
||||
|
||||
@@ -26,8 +26,8 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/voluntaryexits/mock"
|
||||
p2pMock "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/core"
|
||||
mockSync "github.com/OffchainLabs/prysm/v7/beacon-chain/sync/initial-sync/testing"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
mockSync "github.com/OffchainLabs/prysm/v7/beacon-chain/sync/initial-sync/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/bls"
|
||||
|
||||
@@ -178,11 +178,6 @@ func TestGetSpec(t *testing.T) {
|
||||
config.BuilderPaymentThresholdNumerator = 104
|
||||
config.BuilderPaymentThresholdDenominator = 105
|
||||
|
||||
// EIP-8025
|
||||
config.MaxProofDataBytes = 200
|
||||
config.MinEpochsForExecutionProofRequests = 201
|
||||
config.MinProofsRequired = 202
|
||||
|
||||
var dbp [4]byte
|
||||
copy(dbp[:], []byte{'0', '0', '0', '1'})
|
||||
config.DomainBeaconProposer = dbp
|
||||
@@ -615,12 +610,6 @@ func TestGetSpec(t *testing.T) {
|
||||
assert.Equal(t, "102", v)
|
||||
case "SYNC_MESSAGE_DUE_BPS":
|
||||
assert.Equal(t, "103", v)
|
||||
case "MAX_PROOF_DATA_BYTES":
|
||||
assert.Equal(t, "200", v)
|
||||
case "MIN_EPOCHS_FOR_EXECUTION_PROOF_REQUESTS":
|
||||
assert.Equal(t, "201", v)
|
||||
case "MIN_PROOFS_REQUIRED":
|
||||
assert.Equal(t, "202", v)
|
||||
case "BUILDER_PAYMENT_THRESHOLD_NUMERATOR":
|
||||
assert.Equal(t, "104", v)
|
||||
case "BUILDER_PAYMENT_THRESHOLD_DENOMINATOR":
|
||||
|
||||
@@ -48,7 +48,6 @@ go_test(
|
||||
"@com_github_ethereum_go_ethereum//crypto:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@org_golang_google_grpc//:go_default_library",
|
||||
"@org_golang_google_grpc//metadata:go_default_library",
|
||||
"@org_golang_google_grpc//reflection:go_default_library",
|
||||
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
|
||||
"@org_golang_google_protobuf//types/known/timestamppb:go_default_library",
|
||||
|
||||
@@ -35,19 +35,18 @@ import (
|
||||
// providing RPC endpoints for verifying a beacon node's sync status, genesis and
|
||||
// version information, and services the node implements and runs.
|
||||
type Server struct {
|
||||
LogsStreamer logs.Streamer
|
||||
StreamLogsBufferSize int
|
||||
SyncChecker sync.Checker
|
||||
Server *grpc.Server
|
||||
BeaconDB db.ReadOnlyDatabase
|
||||
PeersFetcher p2p.PeersProvider
|
||||
PeerManager p2p.PeerManager
|
||||
GenesisTimeFetcher blockchain.TimeFetcher
|
||||
GenesisFetcher blockchain.GenesisFetcher
|
||||
POWChainInfoFetcher execution.ChainInfoFetcher
|
||||
BeaconMonitoringHost string
|
||||
BeaconMonitoringPort int
|
||||
OptimisticModeFetcher blockchain.OptimisticModeFetcher
|
||||
LogsStreamer logs.Streamer
|
||||
StreamLogsBufferSize int
|
||||
SyncChecker sync.Checker
|
||||
Server *grpc.Server
|
||||
BeaconDB db.ReadOnlyDatabase
|
||||
PeersFetcher p2p.PeersProvider
|
||||
PeerManager p2p.PeerManager
|
||||
GenesisTimeFetcher blockchain.TimeFetcher
|
||||
GenesisFetcher blockchain.GenesisFetcher
|
||||
POWChainInfoFetcher execution.ChainInfoFetcher
|
||||
BeaconMonitoringHost string
|
||||
BeaconMonitoringPort int
|
||||
}
|
||||
|
||||
// Deprecated: The gRPC API will remain the default and fully supported through v8 (expected in 2026) but will be eventually removed in favor of REST API.
|
||||
@@ -62,28 +61,21 @@ func (ns *Server) GetHealth(ctx context.Context, request *ethpb.HealthRequest) (
|
||||
ctx, cancel := context.WithTimeout(ctx, timeoutDuration)
|
||||
defer cancel() // Important to avoid a context leak
|
||||
|
||||
// Check optimistic status - validators should not participate when optimistic
|
||||
isOptimistic, err := ns.OptimisticModeFetcher.IsOptimistic(ctx)
|
||||
if err != nil {
|
||||
return &empty.Empty{}, status.Errorf(codes.Internal, "Could not check optimistic status: %v", err)
|
||||
}
|
||||
|
||||
if ns.SyncChecker.Synced() && !isOptimistic {
|
||||
if ns.SyncChecker.Synced() {
|
||||
return &empty.Empty{}, nil
|
||||
}
|
||||
if ns.SyncChecker.Syncing() || ns.SyncChecker.Initialized() {
|
||||
// Set header for REST API clients (via gRPC-gateway)
|
||||
if err := grpc.SetHeader(ctx, metadata.Pairs("x-http-code", strconv.FormatUint(http.StatusPartialContent, 10))); err != nil {
|
||||
return &empty.Empty{}, status.Errorf(codes.Internal, "Could not set status code header: %v", err)
|
||||
if request.SyncingStatus != 0 {
|
||||
// override the 200 success with the provided request status
|
||||
if err := grpc.SetHeader(ctx, metadata.Pairs("x-http-code", strconv.FormatUint(request.SyncingStatus, 10))); err != nil {
|
||||
return &empty.Empty{}, status.Errorf(codes.Internal, "Could not set custom success code header: %v", err)
|
||||
}
|
||||
return &empty.Empty{}, nil
|
||||
}
|
||||
return &empty.Empty{}, status.Error(codes.Unavailable, "node is syncing")
|
||||
}
|
||||
if isOptimistic {
|
||||
// Set header for REST API clients (via gRPC-gateway)
|
||||
if err := grpc.SetHeader(ctx, metadata.Pairs("x-http-code", strconv.FormatUint(http.StatusPartialContent, 10))); err != nil {
|
||||
return &empty.Empty{}, status.Errorf(codes.Internal, "Could not set status code header: %v", err)
|
||||
return &empty.Empty{}, status.Errorf(codes.Internal, "Could not set custom success code header: %v", err)
|
||||
}
|
||||
return &empty.Empty{}, status.Error(codes.Unavailable, "node is optimistic")
|
||||
return &empty.Empty{}, nil
|
||||
}
|
||||
return &empty.Empty{}, status.Errorf(codes.Unavailable, "service unavailable")
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package node
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"maps"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -22,7 +21,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/reflection"
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
@@ -189,71 +187,32 @@ func TestNodeServer_GetETH1ConnectionStatus(t *testing.T) {
|
||||
assert.Equal(t, errStr, res.CurrentConnectionError)
|
||||
}
|
||||
|
||||
// mockServerTransportStream implements grpc.ServerTransportStream for testing
|
||||
type mockServerTransportStream struct {
|
||||
headers map[string][]string
|
||||
}
|
||||
|
||||
func (m *mockServerTransportStream) Method() string { return "" }
|
||||
func (m *mockServerTransportStream) SetHeader(md metadata.MD) error {
|
||||
maps.Copy(m.headers, md)
|
||||
return nil
|
||||
}
|
||||
func (m *mockServerTransportStream) SendHeader(metadata.MD) error { return nil }
|
||||
func (m *mockServerTransportStream) SetTrailer(metadata.MD) error { return nil }
|
||||
|
||||
func TestNodeServer_GetHealth(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input *mockSync.Sync
|
||||
isOptimistic bool
|
||||
customStatus uint64
|
||||
wantedErr string
|
||||
}{
|
||||
{
|
||||
name: "happy path - synced and not optimistic",
|
||||
input: &mockSync.Sync{IsSyncing: false, IsSynced: true},
|
||||
isOptimistic: false,
|
||||
name: "happy path",
|
||||
input: &mockSync.Sync{IsSyncing: false, IsSynced: true},
|
||||
},
|
||||
{
|
||||
name: "returns error when not synced and not syncing",
|
||||
input: &mockSync.Sync{IsSyncing: false, IsSynced: false},
|
||||
isOptimistic: false,
|
||||
wantedErr: "service unavailable",
|
||||
},
|
||||
{
|
||||
name: "returns error when syncing",
|
||||
input: &mockSync.Sync{IsSyncing: true, IsSynced: false},
|
||||
isOptimistic: false,
|
||||
wantedErr: "node is syncing",
|
||||
},
|
||||
{
|
||||
name: "returns error when synced but optimistic",
|
||||
input: &mockSync.Sync{IsSyncing: false, IsSynced: true},
|
||||
isOptimistic: true,
|
||||
wantedErr: "node is optimistic",
|
||||
},
|
||||
{
|
||||
name: "returns error when syncing and optimistic",
|
||||
input: &mockSync.Sync{IsSyncing: true, IsSynced: false},
|
||||
isOptimistic: true,
|
||||
wantedErr: "node is syncing",
|
||||
name: "syncing",
|
||||
input: &mockSync.Sync{IsSyncing: false},
|
||||
wantedErr: "service unavailable",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
server := grpc.NewServer()
|
||||
ns := &Server{
|
||||
SyncChecker: tt.input,
|
||||
OptimisticModeFetcher: &mock.ChainService{Optimistic: tt.isOptimistic},
|
||||
SyncChecker: tt.input,
|
||||
}
|
||||
ethpb.RegisterNodeServer(server, ns)
|
||||
reflection.Register(server)
|
||||
|
||||
// Create context with mock transport stream so grpc.SetHeader works
|
||||
stream := &mockServerTransportStream{headers: make(map[string][]string)}
|
||||
ctx := grpc.NewContextWithServerTransportStream(t.Context(), stream)
|
||||
|
||||
_, err := ns.GetHealth(ctx, ðpb.HealthRequest{})
|
||||
_, err := ns.GetHealth(t.Context(), ðpb.HealthRequest{SyncingStatus: tt.customStatus})
|
||||
if tt.wantedErr == "" {
|
||||
require.NoError(t, err)
|
||||
return
|
||||
|
||||
@@ -42,7 +42,6 @@ go_library(
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/builder:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//beacon-chain/cache/depositsnapshot:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/electra:go_default_library",
|
||||
|
||||
@@ -19,7 +19,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/kv"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
@@ -322,91 +321,38 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
|
||||
return nil, status.Errorf(codes.Internal, "%s: %v", "handle block failed", err)
|
||||
}
|
||||
|
||||
var wg errgroup.Group
|
||||
blockBroadcastDone := make(chan bool)
|
||||
var wg sync.WaitGroup
|
||||
errChan := make(chan error, 1)
|
||||
|
||||
wg.Go(func() error {
|
||||
if err := vs.broadcastReceiveBlock(ctx, blockBroadcastDone, block, root); err != nil {
|
||||
return fmt.Errorf("broadcast receive block: %w", err)
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
if err := vs.broadcastReceiveBlock(ctx, &wg, block, root); err != nil {
|
||||
errChan <- errors.Wrap(err, "broadcast/receive block failed")
|
||||
return
|
||||
}
|
||||
errChan <- nil
|
||||
}()
|
||||
|
||||
return nil
|
||||
})
|
||||
wg.Wait()
|
||||
|
||||
wg.Go(func() error {
|
||||
if err := vs.broadcastAndReceiveSidecars(ctx, blockBroadcastDone, block, root, blobSidecars, dataColumnSidecars); err != nil {
|
||||
return fmt.Errorf("broadcast and receive sidecars: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
if err := wg.Wait(); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not broadcast/receive block/sidecars: %v", err)
|
||||
if err := vs.broadcastAndReceiveSidecars(ctx, block, root, blobSidecars, dataColumnSidecars); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not broadcast/receive sidecars: %v", err)
|
||||
}
|
||||
if err := <-errChan; err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not broadcast/receive block: %v", err)
|
||||
}
|
||||
|
||||
// Generate and broadcast execution proofs.
|
||||
go vs.generateAndBroadcastExecutionProofs(ctx, rob)
|
||||
|
||||
return ðpb.ProposeResponse{BlockRoot: root[:]}, nil
|
||||
}
|
||||
|
||||
// TODO: This is a duplicate from the same function in the sync package.
|
||||
func (vs *Server) generateAndBroadcastExecutionProofs(ctx context.Context, roBlock blocks.ROBlock) {
|
||||
const delay = 2 * time.Second
|
||||
proofTypes := flags.Get().ProofGenerationTypes
|
||||
|
||||
if len(proofTypes) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
var wg errgroup.Group
|
||||
for _, proofType := range proofTypes {
|
||||
wg.Go(func() error {
|
||||
execProof, err := generateExecProof(roBlock, primitives.ExecutionProofId(proofType), delay)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("generate exec proof: %w", err)
|
||||
}
|
||||
|
||||
if err := vs.P2P.Broadcast(ctx, execProof); err != nil {
|
||||
return fmt.Errorf("broadcast exec proof: %w", err)
|
||||
}
|
||||
|
||||
// Save the proof to storage.
|
||||
if vs.ProofReceiver != nil {
|
||||
if err := vs.ProofReceiver.ReceiveProof(execProof); err != nil {
|
||||
return fmt.Errorf("receive proof: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := wg.Wait(); err != nil {
|
||||
log.WithError(err).Error("Failed to generate and broadcast execution proofs")
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", roBlock.Root()),
|
||||
"slot": roBlock.Block().Slot(),
|
||||
"count": len(proofTypes),
|
||||
}).Debug("Generated and broadcasted execution proofs")
|
||||
}
|
||||
|
||||
// broadcastAndReceiveSidecars broadcasts and receives sidecars.
|
||||
func (vs *Server) broadcastAndReceiveSidecars(
|
||||
ctx context.Context,
|
||||
blockBroadcastDone <-chan bool,
|
||||
block interfaces.SignedBeaconBlock,
|
||||
root [fieldparams.RootLength]byte,
|
||||
blobSidecars []*ethpb.BlobSidecar,
|
||||
dataColumnSidecars []blocks.RODataColumn,
|
||||
) error {
|
||||
// Wait for block broadcast to complete before broadcasting sidecars.
|
||||
<-blockBroadcastDone
|
||||
|
||||
if block.Version() >= version.Fulu {
|
||||
if err := vs.broadcastAndReceiveDataColumns(ctx, dataColumnSidecars); err != nil {
|
||||
return errors.Wrap(err, "broadcast and receive data columns")
|
||||
@@ -488,14 +434,11 @@ func (vs *Server) handleUnblindedBlock(
|
||||
}
|
||||
|
||||
// broadcastReceiveBlock broadcasts a block and handles its reception.
|
||||
// It closes the blockBroadcastDone channel once broadcasting is complete (but before receiving the block).
|
||||
func (vs *Server) broadcastReceiveBlock(ctx context.Context, blockBroadcastDone chan<- bool, block interfaces.SignedBeaconBlock, root [fieldparams.RootLength]byte) error {
|
||||
if err := vs.broadcastBlock(ctx, block, root); err != nil {
|
||||
func (vs *Server) broadcastReceiveBlock(ctx context.Context, wg *sync.WaitGroup, block interfaces.SignedBeaconBlock, root [fieldparams.RootLength]byte) error {
|
||||
if err := vs.broadcastBlock(ctx, wg, block, root); err != nil {
|
||||
return errors.Wrap(err, "broadcast block")
|
||||
}
|
||||
|
||||
close(blockBroadcastDone)
|
||||
|
||||
vs.BlockNotifier.BlockFeed().Send(&feed.Event{
|
||||
Type: blockfeed.ReceivedBlock,
|
||||
Data: &blockfeed.ReceivedBlockData{SignedBlock: block},
|
||||
@@ -508,7 +451,9 @@ func (vs *Server) broadcastReceiveBlock(ctx context.Context, blockBroadcastDone
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vs *Server) broadcastBlock(ctx context.Context, block interfaces.SignedBeaconBlock, root [fieldparams.RootLength]byte) error {
|
||||
func (vs *Server) broadcastBlock(ctx context.Context, wg *sync.WaitGroup, block interfaces.SignedBeaconBlock, root [fieldparams.RootLength]byte) error {
|
||||
defer wg.Done()
|
||||
|
||||
protoBlock, err := block.Proto()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "protobuf conversion failed")
|
||||
@@ -764,57 +709,3 @@ func blobsAndProofs(req *ethpb.GenericSignedBeaconBlock) ([][]byte, [][]byte, er
|
||||
return nil, nil, errors.Errorf("unknown request type provided: %T", req)
|
||||
}
|
||||
}
|
||||
|
||||
// generateExecProof returns a dummy execution proof after the specified delay.
|
||||
// TODO: This is a duplicate from the same function in the sync package.
|
||||
func generateExecProof(roBlock blocks.ROBlock, proofID primitives.ExecutionProofId, delay time.Duration) (*ethpb.ExecutionProof, error) {
|
||||
// Simulate proof generation work
|
||||
time.Sleep(delay)
|
||||
|
||||
// Create a dummy proof with some deterministic data
|
||||
block := roBlock.Block()
|
||||
if block == nil {
|
||||
return nil, errors.New("nil block")
|
||||
}
|
||||
|
||||
body := block.Body()
|
||||
if body == nil {
|
||||
return nil, errors.New("nil block body")
|
||||
}
|
||||
|
||||
executionData, err := body.Execution()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("execution: %w", err)
|
||||
}
|
||||
|
||||
if executionData == nil {
|
||||
return nil, errors.New("nil execution data")
|
||||
}
|
||||
|
||||
hash, err := executionData.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("hash tree root: %w", err)
|
||||
}
|
||||
|
||||
proofData := []byte{
|
||||
0xFF, // Magic byte for dummy proof
|
||||
byte(proofID),
|
||||
// Include some payload hash bytes
|
||||
hash[0],
|
||||
hash[1],
|
||||
hash[2],
|
||||
hash[3],
|
||||
}
|
||||
|
||||
blockRoot := roBlock.Root()
|
||||
|
||||
proof := ðpb.ExecutionProof{
|
||||
ProofId: proofID,
|
||||
Slot: block.Slot(),
|
||||
BlockHash: hash[:],
|
||||
BlockRoot: blockRoot[:],
|
||||
ProofData: proofData,
|
||||
}
|
||||
|
||||
return proof, nil
|
||||
}
|
||||
|
||||
@@ -70,7 +70,6 @@ type Server struct {
|
||||
BlockReceiver blockchain.BlockReceiver
|
||||
BlobReceiver blockchain.BlobReceiver
|
||||
DataColumnReceiver blockchain.DataColumnReceiver
|
||||
ProofReceiver blockchain.ProofReceiver
|
||||
MockEth1Votes bool
|
||||
Eth1BlockFetcher execution.POWBlockFetcher
|
||||
PendingDepositsFetcher depositsnapshot.PendingDepositsFetcher
|
||||
|
||||
@@ -90,7 +90,6 @@ type Config struct {
|
||||
BlockReceiver blockchain.BlockReceiver
|
||||
BlobReceiver blockchain.BlobReceiver
|
||||
DataColumnReceiver blockchain.DataColumnReceiver
|
||||
ProofReceiver blockchain.ProofReceiver
|
||||
ExecutionChainService execution.Chain
|
||||
ChainStartFetcher execution.ChainStartFetcher
|
||||
ExecutionChainInfoFetcher execution.ChainInfoFetcher
|
||||
@@ -241,7 +240,6 @@ func NewService(ctx context.Context, cfg *Config) *Service {
|
||||
BlockReceiver: s.cfg.BlockReceiver,
|
||||
BlobReceiver: s.cfg.BlobReceiver,
|
||||
DataColumnReceiver: s.cfg.DataColumnReceiver,
|
||||
ProofReceiver: s.cfg.ProofReceiver,
|
||||
MockEth1Votes: s.cfg.MockEth1Votes,
|
||||
Eth1BlockFetcher: s.cfg.ExecutionChainService,
|
||||
PendingDepositsFetcher: s.cfg.PendingDepositFetcher,
|
||||
@@ -261,19 +259,18 @@ func NewService(ctx context.Context, cfg *Config) *Service {
|
||||
}
|
||||
s.validatorServer = validatorServer
|
||||
nodeServer := &nodev1alpha1.Server{
|
||||
LogsStreamer: logs.NewStreamServer(),
|
||||
StreamLogsBufferSize: 1000, // Enough to handle bursts of beacon node logs for gRPC streaming.
|
||||
BeaconDB: s.cfg.BeaconDB,
|
||||
Server: s.grpcServer,
|
||||
SyncChecker: s.cfg.SyncService,
|
||||
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||
PeersFetcher: s.cfg.PeersFetcher,
|
||||
PeerManager: s.cfg.PeerManager,
|
||||
GenesisFetcher: s.cfg.GenesisFetcher,
|
||||
POWChainInfoFetcher: s.cfg.ExecutionChainInfoFetcher,
|
||||
BeaconMonitoringHost: s.cfg.BeaconMonitoringHost,
|
||||
BeaconMonitoringPort: s.cfg.BeaconMonitoringPort,
|
||||
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
|
||||
LogsStreamer: logs.NewStreamServer(),
|
||||
StreamLogsBufferSize: 1000, // Enough to handle bursts of beacon node logs for gRPC streaming.
|
||||
BeaconDB: s.cfg.BeaconDB,
|
||||
Server: s.grpcServer,
|
||||
SyncChecker: s.cfg.SyncService,
|
||||
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||
PeersFetcher: s.cfg.PeersFetcher,
|
||||
PeerManager: s.cfg.PeerManager,
|
||||
GenesisFetcher: s.cfg.GenesisFetcher,
|
||||
POWChainInfoFetcher: s.cfg.ExecutionChainInfoFetcher,
|
||||
BeaconMonitoringHost: s.cfg.BeaconMonitoringHost,
|
||||
BeaconMonitoringPort: s.cfg.BeaconMonitoringPort,
|
||||
}
|
||||
beaconChainServer := &beaconv1alpha1.Server{
|
||||
Ctx: s.ctx,
|
||||
|
||||
@@ -14,7 +14,6 @@ go_library(
|
||||
"decode_pubsub.go",
|
||||
"doc.go",
|
||||
"error.go",
|
||||
"exec_proofs.go",
|
||||
"fork_watcher.go",
|
||||
"fuzz_exports.go", # keep
|
||||
"log.go",
|
||||
@@ -32,7 +31,6 @@ go_library(
|
||||
"rpc_chunked_response.go",
|
||||
"rpc_data_column_sidecars_by_range.go",
|
||||
"rpc_data_column_sidecars_by_root.go",
|
||||
"rpc_execution_proofs_by_root_topic.go",
|
||||
"rpc_goodbye.go",
|
||||
"rpc_light_client.go",
|
||||
"rpc_metadata.go",
|
||||
@@ -48,7 +46,6 @@ go_library(
|
||||
"subscriber_blob_sidecar.go",
|
||||
"subscriber_bls_to_execution_change.go",
|
||||
"subscriber_data_column_sidecar.go",
|
||||
"subscriber_execution_proofs.go",
|
||||
"subscriber_handlers.go",
|
||||
"subscriber_sync_committee_message.go",
|
||||
"subscriber_sync_contribution_proof.go",
|
||||
@@ -60,7 +57,6 @@ go_library(
|
||||
"validate_blob.go",
|
||||
"validate_bls_to_execution_change.go",
|
||||
"validate_data_column.go",
|
||||
"validate_execution_proof.go",
|
||||
"validate_light_client.go",
|
||||
"validate_proposer_slashing.go",
|
||||
"validate_sync_committee_message.go",
|
||||
|
||||
@@ -1,65 +0,0 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"errors"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// generateExecProof returns a dummy execution proof after the specified delay.
|
||||
func generateExecProof(roBlock blocks.ROBlock, proofID primitives.ExecutionProofId, delay time.Duration) (*ethpb.ExecutionProof, error) {
|
||||
// Simulate proof generation work
|
||||
time.Sleep(delay)
|
||||
|
||||
// Create a dummy proof with some deterministic data
|
||||
block := roBlock.Block()
|
||||
if block == nil {
|
||||
return nil, errors.New("nil block")
|
||||
}
|
||||
|
||||
body := block.Body()
|
||||
if body == nil {
|
||||
return nil, errors.New("nil block body")
|
||||
}
|
||||
|
||||
executionData, err := body.Execution()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("execution: %w", err)
|
||||
}
|
||||
|
||||
if executionData == nil {
|
||||
return nil, errors.New("nil execution data")
|
||||
}
|
||||
|
||||
hash, err := executionData.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("hash tree root: %w", err)
|
||||
}
|
||||
|
||||
proofData := []byte{
|
||||
0xFF, // Magic byte for dummy proof
|
||||
byte(proofID),
|
||||
// Include some payload hash bytes
|
||||
hash[0],
|
||||
hash[1],
|
||||
hash[2],
|
||||
hash[3],
|
||||
}
|
||||
|
||||
blockRoot := roBlock.Root()
|
||||
|
||||
proof := ðpb.ExecutionProof{
|
||||
ProofId: proofID,
|
||||
Slot: block.Slot(),
|
||||
BlockHash: hash[:],
|
||||
BlockRoot: blockRoot[:],
|
||||
ProofData: proofData,
|
||||
}
|
||||
|
||||
return proof, nil
|
||||
}
|
||||
@@ -167,25 +167,17 @@ func WithStateNotifier(n statefeed.Notifier) Option {
|
||||
}
|
||||
|
||||
// WithBlobStorage gives the sync package direct access to BlobStorage.
|
||||
func WithBlobStorage(storage *filesystem.BlobStorage) Option {
|
||||
func WithBlobStorage(b *filesystem.BlobStorage) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.blobStorage = storage
|
||||
s.cfg.blobStorage = b
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithDataColumnStorage gives the sync package direct access to DataColumnStorage.
|
||||
func WithDataColumnStorage(storage *filesystem.DataColumnStorage) Option {
|
||||
func WithDataColumnStorage(b *filesystem.DataColumnStorage) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.dataColumnStorage = storage
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithDataColumnStorage gives the sync package direct access to DataColumnStorage.
|
||||
func WithExecutionProofStorage(storage *filesystem.ProofStorage) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.proofStorage = storage
|
||||
s.cfg.dataColumnStorage = b
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -259,10 +259,6 @@ func (s *Service) processBlock(ctx context.Context, b interfaces.ReadOnlySignedB
|
||||
return errors.Wrap(err, "request and save missing data column sidecars")
|
||||
}
|
||||
|
||||
if err := s.requestAndSaveMissingExecutionProofs([]blocks.ROBlock{roBlock}); err != nil {
|
||||
return errors.Wrap(err, "request and save missing execution proofs")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -100,10 +100,6 @@ func newRateLimiter(p2pProvider p2p.P2P) *limiter {
|
||||
topicMap[addEncoding(p2p.RPCDataColumnSidecarsByRootTopicV1)] = dataColumnSidecars
|
||||
// DataColumnSidecarsByRangeV1
|
||||
topicMap[addEncoding(p2p.RPCDataColumnSidecarsByRangeTopicV1)] = dataColumnSidecars
|
||||
|
||||
executionProofs := leakybucket.NewCollector(1, defaultBurstLimit, leakyBucketPeriod, false /* deleteEmptyBuckets */);
|
||||
// ExecutionProofsByRootV1
|
||||
topicMap[addEncoding(p2p.RPCExecutionProofsByRootTopicV1)] = executionProofs
|
||||
|
||||
// General topic for all rpc requests.
|
||||
topicMap[rpcLimiterTopic] = leakybucket.NewCollector(5, defaultBurstLimit*2, leakyBucketPeriod, false /* deleteEmptyBuckets */)
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
|
||||
func TestNewRateLimiter(t *testing.T) {
|
||||
rlimiter := newRateLimiter(mockp2p.NewTestP2P(t))
|
||||
assert.Equal(t, len(rlimiter.limiterMap), 21, "correct number of topics not registered")
|
||||
assert.Equal(t, len(rlimiter.limiterMap), 20, "correct number of topics not registered")
|
||||
}
|
||||
|
||||
func TestNewRateLimiter_FreeCorrectly(t *testing.T) {
|
||||
|
||||
@@ -51,7 +51,6 @@ func (s *Service) rpcHandlerByTopicFromFork(forkIndex int) (map[string]rpcHandle
|
||||
p2p.RPCBlobSidecarsByRangeTopicV1: s.blobSidecarsByRangeRPCHandler, // Modified in Fulu
|
||||
p2p.RPCDataColumnSidecarsByRootTopicV1: s.dataColumnSidecarByRootRPCHandler, // Added in Fulu
|
||||
p2p.RPCDataColumnSidecarsByRangeTopicV1: s.dataColumnSidecarsByRangeRPCHandler, // Added in Fulu
|
||||
p2p.RPCExecutionProofsByRootTopicV1: s.executionProofsByRootRPCHandler, // Added in Fulu
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -11,14 +11,11 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/types"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/sync/verify"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
libp2pcore "github.com/libp2p/go-libp2p/core"
|
||||
@@ -90,84 +87,9 @@ func (s *Service) sendBeaconBlocksRequest(ctx context.Context, requests *types.B
|
||||
return errors.Wrap(err, "request and save missing data columns")
|
||||
}
|
||||
|
||||
if err := s.requestAndSaveMissingExecutionProofs(postFuluBlocks); err != nil {
|
||||
return errors.Wrap(err, "request and save missing execution proofs")
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *Service) requestAndSaveMissingExecutionProofs(blks []blocks.ROBlock) error {
|
||||
if len(blks) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO: Parallelize requests for multiple blocks.
|
||||
for _, blk := range blks {
|
||||
if err := s.sendAndSaveExecutionProofs(s.ctx, blk); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) sendAndSaveExecutionProofs(ctx context.Context, block blocks.ROBlock) error {
|
||||
if !features.Get().EnableZkvm {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check proof retention period.
|
||||
blockEpoch := slots.ToEpoch(block.Block().Slot())
|
||||
currentEpoch := slots.ToEpoch(s.cfg.clock.CurrentSlot())
|
||||
if !params.WithinExecutionProofPeriod(blockEpoch, currentEpoch) {
|
||||
return nil
|
||||
}
|
||||
// Check how many proofs are needed with Execution Proof Pool.
|
||||
// TODO: All should return the same type ExecutionProofId.
|
||||
root := block.Root()
|
||||
proofStorage := s.cfg.proofStorage
|
||||
storedIds := proofStorage.Summary(root).All()
|
||||
|
||||
count := uint64(len(storedIds))
|
||||
if count >= params.BeaconConfig().MinProofsRequired {
|
||||
return nil
|
||||
}
|
||||
|
||||
alreadyHave := make([]primitives.ExecutionProofId, 0, len(storedIds))
|
||||
for _, id := range storedIds {
|
||||
alreadyHave = append(alreadyHave, primitives.ExecutionProofId(id))
|
||||
}
|
||||
|
||||
// Construct request
|
||||
req := ðpb.ExecutionProofsByRootRequest{
|
||||
BlockRoot: root[:],
|
||||
CountNeeded: params.BeaconConfig().MinProofsRequired - count,
|
||||
AlreadyHave: alreadyHave,
|
||||
}
|
||||
|
||||
// Call SendExecutionProofByRootRequest
|
||||
zkvmEnabledPeers := s.cfg.p2p.Peers().ZkvmEnabledPeers()
|
||||
if len(zkvmEnabledPeers) == 0 {
|
||||
return fmt.Errorf("no zkVM enabled peers available to request execution proofs")
|
||||
}
|
||||
|
||||
// TODO: For simplicity, just pick the first peer for now.
|
||||
// In the future, we can implement better peer selection logic.
|
||||
pid := zkvmEnabledPeers[0]
|
||||
proofs, err := SendExecutionProofsByRootRequest(ctx, s.cfg.clock, s.cfg.p2p, pid, req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("send execution proofs by root request: %w", err)
|
||||
}
|
||||
|
||||
// Save the proofs into storage.
|
||||
if err := proofStorage.Save(proofs); err != nil {
|
||||
return fmt.Errorf("proof storage save: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// requestAndSaveMissingDataColumns checks if the data columns are missing for the given block.
|
||||
// If so, requests them and saves them to the storage.
|
||||
func (s *Service) requestAndSaveMissingDataColumnSidecars(blks []blocks.ROBlock) error {
|
||||
|
||||
@@ -182,21 +182,3 @@ func WriteDataColumnSidecarChunk(stream libp2pcore.Stream, tor blockchain.Tempor
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func WriteExecutionProofChunk(stream libp2pcore.Stream, encoding encoder.NetworkEncoding, proof *ethpb.ExecutionProof) error {
|
||||
// Success response code.
|
||||
if _, err := stream.Write([]byte{responseCodeSuccess}); err != nil {
|
||||
return errors.Wrap(err, "stream write")
|
||||
}
|
||||
ctxBytes := params.ForkDigest(slots.ToEpoch(proof.Slot))
|
||||
if err := writeContextToStream(ctxBytes[:], stream); err != nil {
|
||||
return errors.Wrap(err, "write context to stream")
|
||||
}
|
||||
|
||||
// Execution proof.
|
||||
if _, err := encoding.EncodeWithMaxLength(stream, proof); err != nil {
|
||||
return errors.Wrap(err, "encode with max length")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,228 +0,0 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
libp2pcore "github.com/libp2p/go-libp2p/core"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// SendExecutionProofsByRootRequest sends ExecutionProofsByRoot request and returns fetched execution proofs, if any.
|
||||
func SendExecutionProofsByRootRequest(
|
||||
ctx context.Context,
|
||||
clock blockchain.TemporalOracle,
|
||||
p2pProvider p2p.P2P,
|
||||
pid peer.ID,
|
||||
req *ethpb.ExecutionProofsByRootRequest,
|
||||
) ([]*ethpb.ExecutionProof, error) {
|
||||
// Validate request
|
||||
if req.CountNeeded == 0 {
|
||||
return nil, errors.New("count_needed must be greater than 0")
|
||||
}
|
||||
|
||||
topic, err := p2p.TopicFromMessage(p2p.ExecutionProofsByRootName, slots.ToEpoch(clock.CurrentSlot()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"topic": topic,
|
||||
"block_root": bytesutil.ToBytes32(req.BlockRoot),
|
||||
"count": req.CountNeeded,
|
||||
"already": len(req.AlreadyHave),
|
||||
}).Debug("Sending execution proofs by root request")
|
||||
|
||||
stream, err := p2pProvider.Send(ctx, req, topic, pid)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer closeStream(stream, log)
|
||||
|
||||
// Read execution proofs from stream
|
||||
proofs := make([]*ethpb.ExecutionProof, 0, req.CountNeeded)
|
||||
alreadyHaveSet := make(map[primitives.ExecutionProofId]struct{})
|
||||
for _, id := range req.AlreadyHave {
|
||||
alreadyHaveSet[id] = struct{}{}
|
||||
}
|
||||
|
||||
for i := uint64(0); i < req.CountNeeded; i++ {
|
||||
isFirstChunk := i == 0
|
||||
proof, err := ReadChunkedExecutionProof(stream, p2pProvider, isFirstChunk)
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Validate proof
|
||||
if err := validateExecutionProof(proof, req, alreadyHaveSet); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
proofs = append(proofs, proof)
|
||||
}
|
||||
|
||||
return proofs, nil
|
||||
}
|
||||
|
||||
// ReadChunkedExecutionProof reads a chunked execution proof from the stream.
|
||||
func ReadChunkedExecutionProof(
|
||||
stream libp2pcore.Stream,
|
||||
encoding p2p.EncodingProvider,
|
||||
isFirstChunk bool,
|
||||
) (*ethpb.ExecutionProof, error) {
|
||||
// Read status code for each chunk (like data columns, not like blocks)
|
||||
code, errMsg, err := ReadStatusCode(stream, encoding.Encoding())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if code != 0 {
|
||||
return nil, errors.New(errMsg)
|
||||
}
|
||||
|
||||
// Read context bytes (fork digest)
|
||||
_, err = readContextFromStream(stream)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read context from stream: %w", err)
|
||||
}
|
||||
|
||||
// Decode the proof
|
||||
proof := ðpb.ExecutionProof{}
|
||||
if err := encoding.Encoding().DecodeWithMaxLength(stream, proof); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return proof, nil
|
||||
}
|
||||
|
||||
// validateExecutionProof validates a received execution proof against the request.
|
||||
func validateExecutionProof(
|
||||
proof *ethpb.ExecutionProof,
|
||||
req *ethpb.ExecutionProofsByRootRequest,
|
||||
alreadyHaveSet map[primitives.ExecutionProofId]struct{},
|
||||
) error {
|
||||
// Check block root matches
|
||||
proofRoot := bytesutil.ToBytes32(proof.BlockRoot)
|
||||
reqRoot := bytesutil.ToBytes32(req.BlockRoot)
|
||||
if proofRoot != reqRoot {
|
||||
return fmt.Errorf("proof block root %#x does not match requested root %#x",
|
||||
proofRoot, reqRoot)
|
||||
}
|
||||
|
||||
// Check we didn't already have this proof
|
||||
if _, ok := alreadyHaveSet[proof.ProofId]; ok {
|
||||
return fmt.Errorf("received proof we already have: proof_id=%d", proof.ProofId)
|
||||
}
|
||||
|
||||
// Check proof ID is valid (within max range)
|
||||
if !proof.ProofId.IsValid() {
|
||||
return fmt.Errorf("invalid proof_id: %d", proof.ProofId)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// executionProofsByRootRPCHandler handles incoming ExecutionProofsByRoot RPC requests.
|
||||
func (s *Service) executionProofsByRootRPCHandler(ctx context.Context, msg any, stream libp2pcore.Stream) error {
|
||||
ctx, span := trace.StartSpan(ctx, "sync.executionProofsByRootRPCHandler")
|
||||
defer span.End()
|
||||
|
||||
_, cancel := context.WithTimeout(ctx, ttfbTimeout)
|
||||
defer cancel()
|
||||
|
||||
req, ok := msg.(*ethpb.ExecutionProofsByRootRequest)
|
||||
if !ok {
|
||||
return errors.New("message is not type ExecutionProofsByRootRequest")
|
||||
}
|
||||
|
||||
remotePeer := stream.Conn().RemotePeer()
|
||||
SetRPCStreamDeadlines(stream)
|
||||
|
||||
// Validate request
|
||||
if err := s.rateLimiter.validateRequest(stream, 1); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Penalize peers that send invalid requests.
|
||||
if err := validateExecutionProofsByRootRequest(req); err != nil {
|
||||
s.downscorePeer(remotePeer, "executionProofsByRootRPCHandlerValidationError")
|
||||
s.writeErrorResponseToStream(responseCodeInvalidRequest, err.Error(), stream)
|
||||
return fmt.Errorf("validate execution proofs by root request: %w", err)
|
||||
}
|
||||
|
||||
blockRoot := bytesutil.ToBytes32(req.BlockRoot)
|
||||
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"blockroot": fmt.Sprintf("%#x", blockRoot),
|
||||
"neededCount": req.CountNeeded,
|
||||
"alreadyHave": req.AlreadyHave,
|
||||
"peer": remotePeer.String(),
|
||||
})
|
||||
|
||||
s.rateLimiter.add(stream, 1)
|
||||
defer closeStream(stream, log)
|
||||
|
||||
// Get proofs from execution proof pool
|
||||
summary := s.cfg.proofStorage.Summary(blockRoot)
|
||||
|
||||
// Filter out not requested proofs
|
||||
alreadyHave := make(map[primitives.ExecutionProofId]bool)
|
||||
for _, id := range req.AlreadyHave {
|
||||
alreadyHave[id] = true
|
||||
}
|
||||
|
||||
// Determine which proofs to fetch (not already had by requester)
|
||||
proofIDsToFetch := make([]uint64, 0, len(summary.All()))
|
||||
for _, proofId := range summary.All() {
|
||||
if !alreadyHave[primitives.ExecutionProofId(proofId)] {
|
||||
proofIDsToFetch = append(proofIDsToFetch, proofId)
|
||||
}
|
||||
}
|
||||
|
||||
// Load all proofs at once
|
||||
proofs, err := s.cfg.proofStorage.Get(blockRoot, proofIDsToFetch)
|
||||
if err != nil {
|
||||
return fmt.Errorf("proof storage get: %w", err)
|
||||
}
|
||||
|
||||
// Send proofs
|
||||
sentCount := uint64(0)
|
||||
for _, proof := range proofs {
|
||||
if sentCount >= req.CountNeeded {
|
||||
break
|
||||
}
|
||||
|
||||
// Write proof to stream
|
||||
SetStreamWriteDeadline(stream, defaultWriteDuration)
|
||||
if err := WriteExecutionProofChunk(stream, s.cfg.p2p.Encoding(), proof); err != nil {
|
||||
log.WithError(err).Debug("Could not send execution proof")
|
||||
s.writeErrorResponseToStream(responseCodeServerError, "could not send execution proof", stream)
|
||||
return err
|
||||
}
|
||||
|
||||
sentCount++
|
||||
}
|
||||
|
||||
log.WithField("sentCount", sentCount).Debug("Responded to execution proofs by root request")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateExecutionProofsByRootRequest(req *ethpb.ExecutionProofsByRootRequest) error {
|
||||
if req.CountNeeded == 0 {
|
||||
return errors.New("count_needed must be greater than 0")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -70,7 +70,6 @@ const (
|
||||
seenProposerSlashingSize = 100
|
||||
badBlockSize = 1000
|
||||
syncMetricsInterval = 10 * time.Second
|
||||
seenExecutionProofSize = 100
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -110,7 +109,6 @@ type config struct {
|
||||
stateNotifier statefeed.Notifier
|
||||
blobStorage *filesystem.BlobStorage
|
||||
dataColumnStorage *filesystem.DataColumnStorage
|
||||
proofStorage *filesystem.ProofStorage
|
||||
batchVerifierLimit int
|
||||
}
|
||||
|
||||
@@ -119,7 +117,6 @@ type blockchainService interface {
|
||||
blockchain.BlockReceiver
|
||||
blockchain.BlobReceiver
|
||||
blockchain.DataColumnReceiver
|
||||
blockchain.ProofReceiver
|
||||
blockchain.HeadFetcher
|
||||
blockchain.FinalizationFetcher
|
||||
blockchain.ForkFetcher
|
||||
@@ -152,7 +149,6 @@ type Service struct {
|
||||
seenBlobLock sync.RWMutex
|
||||
seenBlobCache *lru.Cache
|
||||
seenDataColumnCache *slotAwareCache
|
||||
seenProofCache *slotAwareCache
|
||||
seenAggregatedAttestationLock sync.RWMutex
|
||||
seenAggregatedAttestationCache *lru.Cache
|
||||
seenUnAggregatedAttestationLock sync.RWMutex
|
||||
@@ -177,7 +173,6 @@ type Service struct {
|
||||
verifierWaiter *verification.InitializerWaiter
|
||||
newBlobVerifier verification.NewBlobVerifier
|
||||
newColumnsVerifier verification.NewDataColumnsVerifier
|
||||
newProofsVerifier verification.NewExecutionProofsVerifier
|
||||
columnSidecarsExecSingleFlight singleflight.Group
|
||||
reconstructionSingleFlight singleflight.Group
|
||||
availableBlocker coverage.AvailableBlocker
|
||||
@@ -239,6 +234,7 @@ func NewService(ctx context.Context, opts ...Option) *Service {
|
||||
r.subHandler = newSubTopicHandler()
|
||||
r.rateLimiter = newRateLimiter(r.cfg.p2p)
|
||||
r.initCaches()
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
@@ -254,12 +250,6 @@ func newDataColumnsVerifierFromInitializer(ini *verification.Initializer) verifi
|
||||
}
|
||||
}
|
||||
|
||||
func newExecutionProofsVerifierFromInitializer(ini *verification.Initializer) verification.NewExecutionProofsVerifier {
|
||||
return func(proofs []blocks.ROExecutionProof, reqs []verification.Requirement) verification.ExecutionProofsVerifier {
|
||||
return ini.NewExecutionProofsVerifier(proofs, reqs)
|
||||
}
|
||||
}
|
||||
|
||||
// Start the regular sync service.
|
||||
func (s *Service) Start() {
|
||||
v, err := s.verifierWaiter.WaitForInitializer(s.ctx)
|
||||
@@ -269,7 +259,6 @@ func (s *Service) Start() {
|
||||
}
|
||||
s.newBlobVerifier = newBlobVerifierFromInitializer(v)
|
||||
s.newColumnsVerifier = newDataColumnsVerifierFromInitializer(v)
|
||||
s.newProofsVerifier = newExecutionProofsVerifierFromInitializer(v)
|
||||
|
||||
go s.verifierRoutine()
|
||||
go s.startDiscoveryAndSubscriptions()
|
||||
@@ -359,7 +348,6 @@ func (s *Service) initCaches() {
|
||||
s.seenBlockCache = lruwrpr.New(seenBlockSize)
|
||||
s.seenBlobCache = lruwrpr.New(seenBlockSize * params.BeaconConfig().DeprecatedMaxBlobsPerBlockElectra)
|
||||
s.seenDataColumnCache = newSlotAwareCache(seenDataColumnSize)
|
||||
s.seenProofCache = newSlotAwareCache(seenExecutionProofSize)
|
||||
s.seenAggregatedAttestationCache = lruwrpr.New(seenAggregatedAttSize)
|
||||
s.seenUnAggregatedAttestationCache = lruwrpr.New(seenUnaggregatedAttSize)
|
||||
s.seenSyncMessageCache = lruwrpr.New(seenSyncMsgSize)
|
||||
|
||||
@@ -329,17 +329,6 @@ func (s *Service) registerSubscribers(nse params.NetworkScheduleEntry) bool {
|
||||
getSubnetsRequiringPeers: s.allDataColumnSubnets,
|
||||
})
|
||||
})
|
||||
|
||||
if features.Get().EnableZkvm {
|
||||
s.spawn(func() {
|
||||
s.subscribe(
|
||||
p2p.ExecutionProofSubnetTopicFormat,
|
||||
s.validateExecutionProof,
|
||||
s.executionProofSubscriber,
|
||||
nse,
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition/interop"
|
||||
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
@@ -23,7 +22,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
@@ -79,11 +77,6 @@ func (s *Service) beaconBlockSubscriber(ctx context.Context, msg proto.Message)
|
||||
return err
|
||||
}
|
||||
|
||||
// We use the service context to ensure this context is not cancelled
|
||||
// when the current function returns.
|
||||
// TODO: Do not broadcast proofs for blocks we have already seen.
|
||||
go s.generateAndBroadcastExecutionProofs(s.ctx, roBlock)
|
||||
|
||||
if err := s.processPendingAttsForBlock(ctx, root); err != nil {
|
||||
return errors.Wrap(err, "process pending atts for block")
|
||||
}
|
||||
@@ -91,47 +84,6 @@ func (s *Service) beaconBlockSubscriber(ctx context.Context, msg proto.Message)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) generateAndBroadcastExecutionProofs(ctx context.Context, roBlock blocks.ROBlock) {
|
||||
const delay = 2 * time.Second
|
||||
proofTypes := flags.Get().ProofGenerationTypes
|
||||
|
||||
// Exit early if proof generation is disabled.
|
||||
if len(proofTypes) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
var wg errgroup.Group
|
||||
for _, proofType := range proofTypes {
|
||||
wg.Go(func() error {
|
||||
execProof, err := generateExecProof(roBlock, primitives.ExecutionProofId(proofType), delay)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("generate exec proof: %w", err)
|
||||
}
|
||||
|
||||
if err := s.cfg.p2p.Broadcast(ctx, execProof); err != nil {
|
||||
return fmt.Errorf("broadcast exec proof: %w", err)
|
||||
}
|
||||
|
||||
if err := s.cfg.chain.ReceiveProof(execProof); err != nil {
|
||||
return errors.Wrap(err, "receive proof")
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := wg.Wait(); err != nil {
|
||||
log.WithError(err).Error("Failed to generate and broadcast execution proofs")
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", roBlock.Root()),
|
||||
"slot": roBlock.Block().Slot(),
|
||||
"count": len(proofTypes),
|
||||
}).Debug("Generated and broadcasted execution proofs")
|
||||
}
|
||||
|
||||
// processSidecarsFromExecutionFromBlock retrieves (if available) sidecars data from the execution client,
|
||||
// builds corresponding sidecars, save them to the storage, and broadcasts them over P2P if necessary.
|
||||
func (s *Service) processSidecarsFromExecutionFromBlock(ctx context.Context, roBlock blocks.ROBlock) error {
|
||||
|
||||
@@ -1,36 +0,0 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed"
|
||||
opfeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/operation"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func (s *Service) executionProofSubscriber(_ context.Context, msg proto.Message) error {
|
||||
verifiedProof, ok := msg.(blocks.VerifiedROExecutionProof)
|
||||
if !ok {
|
||||
return errors.Errorf("incorrect type of message received, wanted %T but got %T", blocks.VerifiedROExecutionProof{}, msg)
|
||||
}
|
||||
|
||||
// Insert the execution proof into the pool
|
||||
s.setSeenProof(verifiedProof.Slot(), verifiedProof.BlockRoot(), verifiedProof.ProofId())
|
||||
|
||||
// Save the proof to storage.
|
||||
if err := s.cfg.chain.ReceiveProof(verifiedProof.ExecutionProof); err != nil {
|
||||
return errors.Wrap(err, "receive proof")
|
||||
}
|
||||
|
||||
// Notify subscribers about the new execution proof
|
||||
s.cfg.operationNotifier.OperationFeed().Send(&feed.Event{
|
||||
Type: opfeed.ExecutionProofReceived,
|
||||
Data: &opfeed.ExecutionProofReceivedData{
|
||||
ExecutionProof: verifiedProof.ExecutionProof,
|
||||
},
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,110 +0,0 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func (s *Service) validateExecutionProof(ctx context.Context, pid peer.ID, msg *pubsub.Message) (pubsub.ValidationResult, error) {
|
||||
// Always accept our own messages.
|
||||
if pid == s.cfg.p2p.PeerID() {
|
||||
return pubsub.ValidationAccept, nil
|
||||
}
|
||||
|
||||
// Ignore messages during initial sync.
|
||||
if s.cfg.initialSync.Syncing() {
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
|
||||
// Reject messages with a nil topic.
|
||||
if msg.Topic == nil {
|
||||
return pubsub.ValidationReject, p2p.ErrInvalidTopic
|
||||
}
|
||||
|
||||
// Decode the message, reject if it fails.
|
||||
m, err := s.decodePubsubMessage(msg)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to decode message")
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
// Reject messages that are not of the expected type.
|
||||
executionProof, ok := m.(*ethpb.ExecutionProof)
|
||||
if !ok {
|
||||
log.WithField("message", m).Error("Message is not of type *ethpb.ExecutionProof")
|
||||
return pubsub.ValidationReject, errWrongMessage
|
||||
}
|
||||
|
||||
// Convert to ROExecutionProof.
|
||||
roProof, err := blocks.NewROExecutionProof(executionProof)
|
||||
if err != nil {
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
// Check if the proof has already been seen.
|
||||
if s.hasSeenProof(roProof.BlockRoot(), roProof.ProofId()) {
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
|
||||
// Create the verifier with gossip requirements.
|
||||
verifier := s.newProofsVerifier([]blocks.ROExecutionProof{roProof}, verification.GossipExecutionProofRequirements)
|
||||
|
||||
// Run verifications.
|
||||
if err := verifier.NotFromFutureSlot(); err != nil {
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
if err := verifier.ProofSizeLimits(); err != nil {
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
if err := verifier.ProofVerified(); err != nil {
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
// Get verified proofs.
|
||||
verifiedProofs, err := verifier.VerifiedROExecutionProofs()
|
||||
if err != nil {
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", roProof.BlockRoot()),
|
||||
"slot": roProof.Slot(),
|
||||
"id": roProof.ProofId(),
|
||||
}).Debug("Accepted execution proof")
|
||||
|
||||
// Set validator data to the verified proof.
|
||||
msg.ValidatorData = verifiedProofs[0]
|
||||
return pubsub.ValidationAccept, nil
|
||||
}
|
||||
|
||||
// hasSeenProof returns true if the proof with the same block root and proof ID has been seen before.
|
||||
func (s *Service) hasSeenProof(blockRoot [32]byte, proofId primitives.ExecutionProofId) bool {
|
||||
key := computeProofCacheKey(blockRoot, proofId)
|
||||
_, seen := s.seenProofCache.Get(key)
|
||||
return seen
|
||||
}
|
||||
|
||||
// setSeenProof marks the proof with the given block root and proof ID as seen.
|
||||
func (s *Service) setSeenProof(slot primitives.Slot, blockRoot [32]byte, proofId primitives.ExecutionProofId) {
|
||||
key := computeProofCacheKey(blockRoot, proofId)
|
||||
s.seenProofCache.Add(slot, key, true)
|
||||
}
|
||||
|
||||
func computeProofCacheKey(blockRoot [32]byte, proofId primitives.ExecutionProofId) string {
|
||||
key := make([]byte, 0, 33)
|
||||
|
||||
key = append(key, blockRoot[:]...)
|
||||
key = append(key, bytesutil.Bytes1(uint64(proofId))...)
|
||||
|
||||
return string(key)
|
||||
}
|
||||
@@ -8,7 +8,6 @@ go_library(
|
||||
"cache.go",
|
||||
"data_column.go",
|
||||
"error.go",
|
||||
"execution_proof.go",
|
||||
"fake.go",
|
||||
"filesystem.go",
|
||||
"initializer.go",
|
||||
|
||||
@@ -30,10 +30,6 @@ const (
|
||||
// Data columns specific.
|
||||
RequireValidFields
|
||||
RequireCorrectSubnet
|
||||
|
||||
// Execution proof specific.
|
||||
RequireProofSizeLimits
|
||||
RequireProofVerified
|
||||
)
|
||||
|
||||
var allBlobSidecarRequirements = []Requirement{
|
||||
|
||||
@@ -1027,10 +1027,10 @@ func TestGetVerifyingStateEdgeCases(t *testing.T) {
|
||||
sc: signatureCache,
|
||||
sr: &mockStateByRooter{sbr: sbrErrorIfCalled(t)}, // Should not be called
|
||||
hsp: &mockHeadStateProvider{
|
||||
headRoot: parentRoot[:], // Same as parent
|
||||
headSlot: 32, // Epoch 1
|
||||
headState: fuluState.Copy(), // HeadState (not ReadOnly) for ProcessSlots
|
||||
headStateReadOnly: nil, // Should not use ReadOnly path
|
||||
headRoot: parentRoot[:], // Same as parent
|
||||
headSlot: 32, // Epoch 1
|
||||
headState: fuluState.Copy(), // HeadState (not ReadOnly) for ProcessSlots
|
||||
headStateReadOnly: nil, // Should not use ReadOnly path
|
||||
},
|
||||
fc: &mockForkchoicer{
|
||||
// Return same root for both to simulate same chain
|
||||
@@ -1045,8 +1045,8 @@ func TestGetVerifyingStateEdgeCases(t *testing.T) {
|
||||
// Wrap to detect HeadState call
|
||||
originalHsp := initializer.shared.hsp.(*mockHeadStateProvider)
|
||||
wrappedHsp := &mockHeadStateProvider{
|
||||
headRoot: originalHsp.headRoot,
|
||||
headSlot: originalHsp.headSlot,
|
||||
headRoot: originalHsp.headRoot,
|
||||
headSlot: originalHsp.headSlot,
|
||||
headState: originalHsp.headState,
|
||||
}
|
||||
initializer.shared.hsp = &headStateCallTracker{
|
||||
|
||||
@@ -81,17 +81,6 @@ var (
|
||||
errDataColumnVerificationImplementationFault = errors.New("could not verify blob data or create a valid VerifiedROBlob")
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrProofInvalid is joined with all other execution proof verification errors.
|
||||
ErrProofInvalid = AsVerificationFailure(errors.New("invalid execution proof"))
|
||||
|
||||
// ErrProofSizeTooLarge means RequireProofSizeLimits failed.
|
||||
ErrProofSizeTooLarge = errors.Join(ErrProofInvalid, errors.New("proof data exceeds maximum size"))
|
||||
|
||||
// errProofsInvalid is a general error for proof verification failures.
|
||||
errProofsInvalid = errors.New("execution proofs failed verification")
|
||||
)
|
||||
|
||||
// VerificationMultiError is a custom error that can be used to access individual verification failures.
|
||||
type VerificationMultiError struct {
|
||||
r *results
|
||||
|
||||
@@ -1,124 +0,0 @@
|
||||
package verification
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// GossipExecutionProofRequirements defines the set of requirements that ExecutionProofs received on gossip
|
||||
// must satisfy in order to upgrade an ROExecutionProof to a VerifiedROExecutionProof.
|
||||
var GossipExecutionProofRequirements = []Requirement{
|
||||
RequireNotFromFutureSlot,
|
||||
RequireProofSizeLimits,
|
||||
RequireProofVerified,
|
||||
}
|
||||
|
||||
// ROExecutionProofsVerifier verifies execution proofs.
|
||||
type ROExecutionProofsVerifier struct {
|
||||
*sharedResources
|
||||
results *results
|
||||
proofs []blocks.ROExecutionProof
|
||||
}
|
||||
|
||||
var _ ExecutionProofsVerifier = &ROExecutionProofsVerifier{}
|
||||
|
||||
// VerifiedROExecutionProofs "upgrades" wrapped ROExecutionProofs to VerifiedROExecutionProofs.
|
||||
// If any of the verifications ran against the proofs failed, or some required verifications
|
||||
// were not run, an error will be returned.
|
||||
func (v *ROExecutionProofsVerifier) VerifiedROExecutionProofs() ([]blocks.VerifiedROExecutionProof, error) {
|
||||
if !v.results.allSatisfied() {
|
||||
return nil, v.results.errors(errProofsInvalid)
|
||||
}
|
||||
|
||||
verifiedProofs := make([]blocks.VerifiedROExecutionProof, 0, len(v.proofs))
|
||||
for _, proof := range v.proofs {
|
||||
verifiedProof := blocks.NewVerifiedROExecutionProof(proof)
|
||||
verifiedProofs = append(verifiedProofs, verifiedProof)
|
||||
}
|
||||
|
||||
return verifiedProofs, nil
|
||||
}
|
||||
|
||||
// SatisfyRequirement allows the caller to assert that a requirement has been satisfied.
|
||||
func (v *ROExecutionProofsVerifier) SatisfyRequirement(req Requirement) {
|
||||
v.recordResult(req, nil)
|
||||
}
|
||||
|
||||
func (v *ROExecutionProofsVerifier) recordResult(req Requirement, err *error) {
|
||||
if err == nil || *err == nil {
|
||||
v.results.record(req, nil)
|
||||
return
|
||||
}
|
||||
v.results.record(req, *err)
|
||||
}
|
||||
|
||||
// NotFromFutureSlot verifies that the execution proof is not from a future slot.
|
||||
func (v *ROExecutionProofsVerifier) NotFromFutureSlot() (err error) {
|
||||
if ok, err := v.results.cached(RequireNotFromFutureSlot); ok {
|
||||
return err
|
||||
}
|
||||
|
||||
defer v.recordResult(RequireNotFromFutureSlot, &err)
|
||||
|
||||
currentSlot := v.clock.CurrentSlot()
|
||||
now := v.clock.Now()
|
||||
maximumGossipClockDisparity := params.BeaconConfig().MaximumGossipClockDisparityDuration()
|
||||
|
||||
for _, proof := range v.proofs {
|
||||
proofSlot := proof.Slot()
|
||||
|
||||
if currentSlot == proofSlot {
|
||||
continue
|
||||
}
|
||||
|
||||
earliestStart, err := v.clock.SlotStart(proofSlot)
|
||||
if err != nil {
|
||||
return proofErrBuilder(errors.Wrap(err, "failed to determine slot start time from clock"))
|
||||
}
|
||||
earliestStart = earliestStart.Add(-maximumGossipClockDisparity)
|
||||
|
||||
if now.Before(earliestStart) {
|
||||
return proofErrBuilder(errFromFutureSlot)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProofSizeLimits verifies that the execution proof data does not exceed the maximum allowed size.
|
||||
func (v *ROExecutionProofsVerifier) ProofSizeLimits() (err error) {
|
||||
if ok, err := v.results.cached(RequireProofSizeLimits); ok {
|
||||
return err
|
||||
}
|
||||
|
||||
defer v.recordResult(RequireProofSizeLimits, &err)
|
||||
|
||||
maxProofDataBytes := params.BeaconConfig().MaxProofDataBytes
|
||||
|
||||
for _, proof := range v.proofs {
|
||||
if uint64(len(proof.ProofData)) > maxProofDataBytes {
|
||||
return proofErrBuilder(ErrProofSizeTooLarge)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProofVerified performs zkVM proof verification.
|
||||
// Currently a no-op, will be implemented when actual proof verification is added.
|
||||
func (v *ROExecutionProofsVerifier) ProofVerified() (err error) {
|
||||
if ok, err := v.results.cached(RequireProofVerified); ok {
|
||||
return err
|
||||
}
|
||||
|
||||
defer v.recordResult(RequireProofVerified, &err)
|
||||
|
||||
// For now, all proofs are considered valid.
|
||||
// TODO: Implement actual zkVM proof verification.
|
||||
return nil
|
||||
}
|
||||
|
||||
func proofErrBuilder(baseErr error) error {
|
||||
return errors.Wrap(baseErr, errProofsInvalid.Error())
|
||||
}
|
||||
@@ -86,16 +86,6 @@ func (ini *Initializer) NewDataColumnsVerifier(roDataColumns []blocks.RODataColu
|
||||
}
|
||||
}
|
||||
|
||||
// NewExecutionProofsVerifier creates an ExecutionProofsVerifier for a slice of execution proofs,
|
||||
// with the given set of requirements.
|
||||
func (ini *Initializer) NewExecutionProofsVerifier(proofs []blocks.ROExecutionProof, reqs []Requirement) *ROExecutionProofsVerifier {
|
||||
return &ROExecutionProofsVerifier{
|
||||
sharedResources: ini.shared,
|
||||
proofs: proofs,
|
||||
results: newResults(reqs...),
|
||||
}
|
||||
}
|
||||
|
||||
// InitializerWaiter provides an Initializer once all dependent resources are ready
|
||||
// via the WaitForInitializer method.
|
||||
type InitializerWaiter struct {
|
||||
|
||||
@@ -54,17 +54,3 @@ type DataColumnsVerifier interface {
|
||||
// NewDataColumnsVerifier is a function signature that can be used to mock a setup where a
|
||||
// column verifier can be easily initialized.
|
||||
type NewDataColumnsVerifier func(dataColumns []blocks.RODataColumn, reqs []Requirement) DataColumnsVerifier
|
||||
|
||||
// ExecutionProofsVerifier defines the methods implemented by ROExecutionProofsVerifier.
|
||||
type ExecutionProofsVerifier interface {
|
||||
VerifiedROExecutionProofs() ([]blocks.VerifiedROExecutionProof, error)
|
||||
SatisfyRequirement(Requirement)
|
||||
|
||||
NotFromFutureSlot() error
|
||||
ProofSizeLimits() error
|
||||
ProofVerified() error
|
||||
}
|
||||
|
||||
// NewExecutionProofsVerifier is a function signature that can be used to mock a setup where an
|
||||
// execution proofs verifier can be easily initialized.
|
||||
type NewExecutionProofsVerifier func(proofs []blocks.ROExecutionProof, reqs []Requirement) ExecutionProofsVerifier
|
||||
|
||||
@@ -29,14 +29,6 @@ func (r Requirement) String() string {
|
||||
return "RequireSidecarKzgProofVerified"
|
||||
case RequireSidecarProposerExpected:
|
||||
return "RequireSidecarProposerExpected"
|
||||
case RequireValidFields:
|
||||
return "RequireValidFields"
|
||||
case RequireCorrectSubnet:
|
||||
return "RequireCorrectSubnet"
|
||||
case RequireProofSizeLimits:
|
||||
return "RequireProofSizeLimits"
|
||||
case RequireProofVerified:
|
||||
return "RequireProofVerified"
|
||||
default:
|
||||
return unknownRequirementName
|
||||
}
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
### Added
|
||||
|
||||
- Added new proofCollector type to ssz-query
|
||||
|
||||
### Ignored
|
||||
- Added testing covering the production of Merkle proof from Phase0 beacon state and benchmarked against real Hoodi beacon state (Fulu version)
|
||||
@@ -1,3 +0,0 @@
|
||||
### Changed
|
||||
|
||||
- gRPC health endpoint will now return an error on syncing or optimistic status showing that it's unavailable.
|
||||
@@ -1,8 +1,6 @@
|
||||
package flags
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
@@ -19,18 +17,9 @@ var (
|
||||
Value: uint64(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest),
|
||||
Aliases: []string{"extend-blob-retention-epoch"},
|
||||
}
|
||||
ExecutionProofRetentionEpochFlag = &cli.Uint64Flag{
|
||||
Name: "execution-proof-retention-epochs",
|
||||
Usage: fmt.Sprintf(
|
||||
"Override the default execution proof retention period (measured in epochs). The node will exit with an error at startup if the value is less than the default of %d epochs.",
|
||||
params.BeaconConfig().MinEpochsForExecutionProofRequests,
|
||||
),
|
||||
Value: uint64(params.BeaconConfig().MinEpochsForExecutionProofRequests),
|
||||
}
|
||||
)
|
||||
|
||||
var Flags = []cli.Flag{
|
||||
BackfillOldestSlot,
|
||||
BlobRetentionEpochFlag,
|
||||
ExecutionProofRetentionEpochFlag,
|
||||
}
|
||||
|
||||
@@ -20,7 +20,6 @@ go_library(
|
||||
"//cmd:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
|
||||
@@ -368,12 +368,4 @@ var (
|
||||
Usage: "Disables the engine_getBlobsV2 usage.",
|
||||
Hidden: true,
|
||||
}
|
||||
// ZKVM Generation Proof Type
|
||||
ZkvmGenerationProofTypeFlag = &cli.IntSliceFlag{
|
||||
Name: "zkvm-generation-proof-types",
|
||||
Usage: `
|
||||
Comma-separated list of proof type IDs to generate
|
||||
(e.g., '0,1' where 0=SP1+Reth, 1=Risc0+Geth).
|
||||
Optional - nodes can verify proofs without generating them.`,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/cmd"
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
@@ -29,7 +28,6 @@ type GlobalFlags struct {
|
||||
DataColumnBatchLimit int
|
||||
DataColumnBatchLimitBurstFactor int
|
||||
StateDiffExponents []int
|
||||
ProofGenerationTypes []primitives.ExecutionProofId
|
||||
}
|
||||
|
||||
var globalConfig *GlobalFlags
|
||||
@@ -92,19 +90,6 @@ func ConfigureGlobalFlags(ctx *cli.Context) error {
|
||||
}
|
||||
}
|
||||
|
||||
// zkVM Proof Generation Types
|
||||
proofTypes := make([]primitives.ExecutionProofId, 0, len(ctx.IntSlice(ZkvmGenerationProofTypeFlag.Name)))
|
||||
for _, t := range ctx.IntSlice(ZkvmGenerationProofTypeFlag.Name) {
|
||||
proofTypes = append(proofTypes, primitives.ExecutionProofId(t))
|
||||
}
|
||||
cfg.ProofGenerationTypes = proofTypes
|
||||
|
||||
if features.Get().EnableZkvm {
|
||||
if err := validateZkvmProofGenerationTypes(cfg.ProofGenerationTypes); err != nil {
|
||||
return fmt.Errorf("validate Zkvm proof generation types: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
cfg.BlockBatchLimit = ctx.Int(BlockBatchLimit.Name)
|
||||
cfg.BlockBatchLimitBurstFactor = ctx.Int(BlockBatchLimitBurstFactor.Name)
|
||||
cfg.BlobBatchLimit = ctx.Int(BlobBatchLimit.Name)
|
||||
@@ -156,13 +141,3 @@ func validateStateDiffExponents(exponents []int) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateZkvmProofGenerationTypes validates the provided proof IDs.
|
||||
func validateZkvmProofGenerationTypes(types []primitives.ExecutionProofId) error {
|
||||
for _, t := range types {
|
||||
if t >= primitives.EXECUTION_PROOF_TYPE_COUNT {
|
||||
return fmt.Errorf("invalid zkvm proof generation type: %d; valid types are between 0 and %d", t, primitives.EXECUTION_PROOF_TYPE_COUNT-1)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -162,7 +162,6 @@ var appFlags = []cli.Flag{
|
||||
flags.BatchVerifierLimit,
|
||||
flags.StateDiffExponents,
|
||||
flags.DisableEphemeralLogFile,
|
||||
flags.ZkvmGenerationProofTypeFlag,
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
@@ -32,10 +32,6 @@ var (
|
||||
Name: "data-column-path",
|
||||
Usage: "Location for data column storage. Default location will be a 'data-columns' directory next to the beacon db.",
|
||||
}
|
||||
ExecutionProofStoragePathFlag = &cli.PathFlag{
|
||||
Name: "execution-proof-path",
|
||||
Usage: "Location for execution proof storage. Default location will be a 'execution-proofs' directory next to the beacon db.",
|
||||
}
|
||||
)
|
||||
|
||||
// Flags is the list of CLI flags for configuring blob storage.
|
||||
@@ -43,7 +39,6 @@ var Flags = []cli.Flag{
|
||||
BlobStoragePathFlag,
|
||||
BlobStorageLayout,
|
||||
DataColumnStoragePathFlag,
|
||||
ExecutionProofStoragePathFlag,
|
||||
}
|
||||
|
||||
func layoutOptions() string {
|
||||
@@ -76,13 +71,11 @@ func BeaconNodeOptions(c *cli.Context) ([]node.Option, error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "detecting blob storage layout")
|
||||
}
|
||||
|
||||
if layout == filesystem.LayoutNameFlat {
|
||||
log.Warningf(
|
||||
"Existing '%s' blob storage layout detected. Consider setting the flag --%s=%s for faster startup and more reliable pruning. Setting this flag will automatically migrate your existing blob storage to the newer layout on the next restart.",
|
||||
log.Warnf("Existing '%s' blob storage layout detected. Consider setting the flag --%s=%s for faster startup and more reliable pruning. Setting this flag will automatically migrate your existing blob storage to the newer layout on the next restart.",
|
||||
|
||||
filesystem.LayoutNameFlat, BlobStorageLayout.Name, filesystem.LayoutNameByEpoch)
|
||||
}
|
||||
|
||||
blobStorageOptions := node.WithBlobStorageOptions(
|
||||
filesystem.WithBlobRetentionEpochs(blobRetentionEpoch),
|
||||
filesystem.WithBasePath(blobPath),
|
||||
@@ -99,17 +92,7 @@ func BeaconNodeOptions(c *cli.Context) ([]node.Option, error) {
|
||||
filesystem.WithDataColumnBasePath(dataColumnStoragePath(c)),
|
||||
)
|
||||
|
||||
executionProofRetentionEpoch, err := executionProofRetentionEpoch(c)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "execution proof retention epoch")
|
||||
}
|
||||
|
||||
proofStorageOption := node.WithProofStorageOption(
|
||||
filesystem.WithProofRetentionEpochs(executionProofRetentionEpoch),
|
||||
filesystem.WithProofBasePath(executionProofStoragePath(c)),
|
||||
)
|
||||
|
||||
opts := []node.Option{blobStorageOptions, dataColumnStorageOption, proofStorageOption}
|
||||
opts := []node.Option{blobStorageOptions, dataColumnStorageOption}
|
||||
return opts, nil
|
||||
}
|
||||
|
||||
@@ -181,17 +164,6 @@ func dataColumnStoragePath(c *cli.Context) string {
|
||||
return dataColumnsPath
|
||||
}
|
||||
|
||||
// TODO: Create a generic function for these storage path getters.
|
||||
func executionProofStoragePath(c *cli.Context) string {
|
||||
executionProofPath := c.Path(ExecutionProofStoragePathFlag.Name)
|
||||
if executionProofPath == "" {
|
||||
// append a "execution-proofs" subdir to the end of the data dir path
|
||||
executionProofPath = filepath.Join(c.String(cmd.DataDirFlag.Name), "execution-proofs")
|
||||
}
|
||||
|
||||
return executionProofPath
|
||||
}
|
||||
|
||||
var errInvalidBlobRetentionEpochs = errors.New("value is smaller than spec minimum")
|
||||
|
||||
// blobRetentionEpoch returns the spec default MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUEST
|
||||
@@ -232,26 +204,6 @@ func dataColumnRetentionEpoch(cliCtx *cli.Context) (primitives.Epoch, error) {
|
||||
return customValue, nil
|
||||
}
|
||||
|
||||
// executionProofRetentionEpoch returns the spec default MIN_EPOCHS_FOR_EXECUTION_PROOFS_REQUEST
|
||||
// or a user-specified flag overriding this value. If a user-specified override is
|
||||
// smaller than the spec default, an error will be returned.
|
||||
// TODO: Create a generic function for these retention epoch getters.
|
||||
func executionProofRetentionEpoch(cliCtx *cli.Context) (primitives.Epoch, error) {
|
||||
defaultValue := params.BeaconConfig().MinEpochsForExecutionProofRequests
|
||||
if !cliCtx.IsSet(das.ExecutionProofRetentionEpochFlag.Name) {
|
||||
return defaultValue, nil
|
||||
}
|
||||
|
||||
customValue := primitives.Epoch(cliCtx.Uint64(das.ExecutionProofRetentionEpochFlag.Name))
|
||||
|
||||
// Validate the epoch value against the spec default.
|
||||
if customValue < defaultValue {
|
||||
return defaultValue, errors.Wrapf(errInvalidBlobRetentionEpochs, "%s=%d, spec=%d", das.ExecutionProofRetentionEpochFlag.Name, customValue, defaultValue)
|
||||
}
|
||||
|
||||
return customValue, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
BlobStorageLayout.Action = validateLayoutFlag
|
||||
}
|
||||
|
||||
@@ -234,12 +234,6 @@ var appHelpFlagGroups = []flagGroup{
|
||||
flags.SetGCPercent,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "zkvm",
|
||||
Flags: []cli.Flag{
|
||||
flags.ZkvmGenerationProofTypeFlag,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
@@ -49,7 +49,6 @@ type Flags struct {
|
||||
DisableDutiesV2 bool // DisableDutiesV2 sets validator client to use the get Duties endpoint
|
||||
EnableWeb bool // EnableWeb enables the webui on the validator client
|
||||
EnableStateDiff bool // EnableStateDiff enables the experimental state diff feature for the beacon node.
|
||||
EnableZkvm bool // EnableZkvm enables zkVM related features.
|
||||
|
||||
// Logging related toggles.
|
||||
DisableGRPCConnectionLogs bool // Disables logging when a new grpc client has connected.
|
||||
@@ -305,11 +304,6 @@ func ConfigureBeaconChain(ctx *cli.Context) error {
|
||||
}
|
||||
}
|
||||
|
||||
if ctx.IsSet(EnableZkvmFlag.Name) {
|
||||
logEnabled(EnableZkvmFlag)
|
||||
cfg.EnableZkvm = true
|
||||
}
|
||||
|
||||
cfg.AggregateIntervals = [3]time.Duration{aggregateFirstInterval.Value, aggregateSecondInterval.Value, aggregateThirdInterval.Value}
|
||||
Init(cfg)
|
||||
return nil
|
||||
|
||||
@@ -220,17 +220,6 @@ var (
|
||||
Name: "ignore-unviable-attestations",
|
||||
Usage: "Ignores attestations whose target state is not viable with respect to the current head (avoid expensive state replay from lagging attesters).",
|
||||
}
|
||||
// Activate ZKVM execution proof mode
|
||||
EnableZkvmFlag = &cli.BoolFlag{
|
||||
Name: "activate-zkvm",
|
||||
Usage: `
|
||||
Activates ZKVM execution proof mode. Enables the node to subscribe to the
|
||||
execution_proof gossip topic, receive and verify execution proofs from peers,
|
||||
and advertise zkVM support in its ENR for peer discovery.
|
||||
Use --zkvm-generation-proof-types to specify which proof types this node
|
||||
should generate (optional - nodes can verify without generating).
|
||||
`,
|
||||
}
|
||||
)
|
||||
|
||||
// devModeFlags holds list of flags that are set when development mode is on.
|
||||
@@ -295,7 +284,6 @@ var BeaconChainFlags = combinedFlags([]cli.Flag{
|
||||
forceHeadFlag,
|
||||
blacklistRoots,
|
||||
enableHashtree,
|
||||
EnableZkvmFlag,
|
||||
}, deprecatedBeaconFlags, deprecatedFlags, upcomingDeprecation)
|
||||
|
||||
func combinedFlags(flags ...[]cli.Flag) []cli.Flag {
|
||||
|
||||
@@ -323,11 +323,6 @@ type BeaconChainConfig struct {
|
||||
// Blobs Values
|
||||
BlobSchedule []BlobScheduleEntry `yaml:"BLOB_SCHEDULE" spec:"true"`
|
||||
|
||||
// EIP-8025: Optional Execution Proofs
|
||||
MaxProofDataBytes uint64 `yaml:"MAX_PROOF_DATA_BYTES" spec:"true"` // MaxProofDataBytes is the maximum number of bytes for execution proof data.
|
||||
MinProofsRequired uint64 `yaml:"MIN_PROOFS_REQUIRED" spec:"true"` // MinProofsRequired is the minimum number of execution proofs required for a block to be considered valid.
|
||||
MinEpochsForExecutionProofRequests primitives.Epoch `yaml:"MIN_EPOCHS_FOR_EXECUTION_PROOF_REQUESTS" spec:"true"` // MinEpochsForExecutionProofRequests is the minimum number of epochs the node will keep the execution proofs for.
|
||||
|
||||
// Deprecated_MaxBlobsPerBlock defines the max blobs that could exist in a block.
|
||||
// Deprecated: This field is no longer supported. Avoid using it.
|
||||
DeprecatedMaxBlobsPerBlock int `yaml:"MAX_BLOBS_PER_BLOCK" spec:"true"`
|
||||
@@ -751,20 +746,6 @@ func WithinDAPeriod(block, current primitives.Epoch) bool {
|
||||
return block+BeaconConfig().MinEpochsForBlobsSidecarsRequest >= current
|
||||
}
|
||||
|
||||
// WithinExecutionProofPeriod checks if the given epoch is within the execution proof retention period.
|
||||
// This is used to determine whether execution proofs should be requested or generated for blocks at the given epoch.
|
||||
// Returns true if the epoch is at or after the retention boundary (Fulu fork epoch or proof retention epoch).
|
||||
func WithinExecutionProofPeriod(epoch, current primitives.Epoch) bool {
|
||||
proofRetentionEpoch := primitives.Epoch(0)
|
||||
if current >= primitives.Epoch(BeaconConfig().MinEpochsForExecutionProofRequests) {
|
||||
proofRetentionEpoch = current - primitives.Epoch(BeaconConfig().MinEpochsForExecutionProofRequests)
|
||||
}
|
||||
|
||||
boundaryEpoch := primitives.MaxEpoch(BeaconConfig().FuluForkEpoch, proofRetentionEpoch)
|
||||
|
||||
return epoch >= boundaryEpoch
|
||||
}
|
||||
|
||||
// EpochsDuration returns the time duration of the given number of epochs.
|
||||
func EpochsDuration(count primitives.Epoch, b *BeaconChainConfig) time.Duration {
|
||||
return SlotsDuration(SlotsForEpochs(count, b), b)
|
||||
|
||||
@@ -40,7 +40,6 @@ var mainnetNetworkConfig = &NetworkConfig{
|
||||
AttSubnetKey: "attnets",
|
||||
SyncCommsSubnetKey: "syncnets",
|
||||
CustodyGroupCountKey: "cgc",
|
||||
ZkvmEnabledKey: "zkvm",
|
||||
MinimumPeersInSubnetSearch: 20,
|
||||
ContractDeploymentBlock: 11184524, // Note: contract was deployed in block 11052984 but no transactions were sent until 11184524.
|
||||
BootstrapNodes: []string{
|
||||
@@ -372,11 +371,6 @@ var mainnetBeaconConfig = &BeaconChainConfig{
|
||||
MaxBlobsPerBlock: 21,
|
||||
},
|
||||
},
|
||||
|
||||
// EIP-8025: Optional Execution Proofs
|
||||
MaxProofDataBytes: 1_048_576, // 1 MiB
|
||||
MinProofsRequired: 2,
|
||||
MinEpochsForExecutionProofRequests: 2,
|
||||
}
|
||||
|
||||
// MainnetTestConfig provides a version of the mainnet config that has a different name
|
||||
|
||||
@@ -11,7 +11,6 @@ type NetworkConfig struct {
|
||||
AttSubnetKey string // AttSubnetKey is the ENR key of the subnet bitfield.
|
||||
SyncCommsSubnetKey string // SyncCommsSubnetKey is the ENR key of the sync committee subnet bitfield.
|
||||
CustodyGroupCountKey string // CustodyGroupsCountKey is the ENR key of the custody group count.
|
||||
ZkvmEnabledKey string // ZkvmEnabledKey is the ENR key of whether zkVM mode is enabled or not.
|
||||
MinimumPeersInSubnetSearch uint64 // PeersInSubnetSearch is the required amount of peers that we need to be able to lookup in a subnet search.
|
||||
|
||||
// Chain Network Config
|
||||
|
||||
@@ -13,7 +13,6 @@ go_library(
|
||||
"roblob.go",
|
||||
"roblock.go",
|
||||
"rodatacolumn.go",
|
||||
"roexecutionproof.go",
|
||||
"setters.go",
|
||||
"signed_execution_bid.go",
|
||||
"types.go",
|
||||
|
||||
@@ -1,99 +0,0 @@
|
||||
package blocks
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
var (
|
||||
errNilExecutionProof = errors.New("execution proof is nil")
|
||||
errEmptyBlockRoot = errors.New("block root is empty")
|
||||
errInvalidBlockRootSize = errors.New("block root has invalid size")
|
||||
errInvalidBlockHashSize = errors.New("block hash has invalid size")
|
||||
)
|
||||
|
||||
// ROExecutionProof represents a read-only execution proof with its block root.
|
||||
type ROExecutionProof struct {
|
||||
*ethpb.ExecutionProof
|
||||
blockRoot [fieldparams.RootLength]byte
|
||||
}
|
||||
|
||||
func roExecutionProofNilCheck(ep *ethpb.ExecutionProof) error {
|
||||
if ep == nil {
|
||||
return errNilExecutionProof
|
||||
}
|
||||
|
||||
if len(ep.BlockRoot) == 0 {
|
||||
return errEmptyBlockRoot
|
||||
}
|
||||
|
||||
if len(ep.BlockRoot) != fieldparams.RootLength {
|
||||
return errInvalidBlockRootSize
|
||||
}
|
||||
|
||||
if len(ep.BlockHash) != fieldparams.RootLength {
|
||||
return errInvalidBlockHashSize
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewROExecutionProof creates a new ROExecutionProof from the given ExecutionProof.
|
||||
// The block root is extracted from the ExecutionProof's BlockRoot field.
|
||||
func NewROExecutionProof(ep *ethpb.ExecutionProof) (ROExecutionProof, error) {
|
||||
if err := roExecutionProofNilCheck(ep); err != nil {
|
||||
return ROExecutionProof{}, err
|
||||
}
|
||||
|
||||
return ROExecutionProof{
|
||||
ExecutionProof: ep,
|
||||
blockRoot: bytesutil.ToBytes32(ep.BlockRoot),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewROExecutionProofWithRoot creates a new ROExecutionProof with a given root.
|
||||
func NewROExecutionProofWithRoot(ep *ethpb.ExecutionProof, root [fieldparams.RootLength]byte) (ROExecutionProof, error) {
|
||||
if err := roExecutionProofNilCheck(ep); err != nil {
|
||||
return ROExecutionProof{}, err
|
||||
}
|
||||
|
||||
return ROExecutionProof{
|
||||
ExecutionProof: ep,
|
||||
blockRoot: root,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// BlockRoot returns the block root of the execution proof.
|
||||
func (p *ROExecutionProof) BlockRoot() [fieldparams.RootLength]byte {
|
||||
return p.blockRoot
|
||||
}
|
||||
|
||||
// Slot returns the slot of the execution proof.
|
||||
func (p *ROExecutionProof) Slot() primitives.Slot {
|
||||
return p.ExecutionProof.Slot
|
||||
}
|
||||
|
||||
// ProofId returns the proof ID of the execution proof.
|
||||
func (p *ROExecutionProof) ProofId() primitives.ExecutionProofId {
|
||||
return p.ExecutionProof.ProofId
|
||||
}
|
||||
|
||||
// BlockHash returns the block hash of the execution proof.
|
||||
func (p *ROExecutionProof) BlockHash() [32]byte {
|
||||
return bytesutil.ToBytes32(p.ExecutionProof.BlockHash)
|
||||
}
|
||||
|
||||
// VerifiedROExecutionProof represents an ROExecutionProof that has undergone full verification.
|
||||
type VerifiedROExecutionProof struct {
|
||||
ROExecutionProof
|
||||
}
|
||||
|
||||
// NewVerifiedROExecutionProof "upgrades" an ROExecutionProof to a VerifiedROExecutionProof.
|
||||
// This method should only be used by the verification package.
|
||||
func NewVerifiedROExecutionProof(ro ROExecutionProof) VerifiedROExecutionProof {
|
||||
return VerifiedROExecutionProof{ROExecutionProof: ro}
|
||||
}
|
||||
@@ -11,7 +11,6 @@ go_library(
|
||||
"domain.go",
|
||||
"epoch.go",
|
||||
"execution_address.go",
|
||||
"execution_proof_id.go",
|
||||
"kzg.go",
|
||||
"payload_id.go",
|
||||
"slot.go",
|
||||
@@ -37,7 +36,6 @@ go_test(
|
||||
"committee_index_test.go",
|
||||
"domain_test.go",
|
||||
"epoch_test.go",
|
||||
"execution_proof_id_test.go",
|
||||
"slot_test.go",
|
||||
"sszbytes_test.go",
|
||||
"sszuint64_test.go",
|
||||
|
||||
@@ -1,64 +0,0 @@
|
||||
package primitives
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
fssz "github.com/prysmaticlabs/fastssz"
|
||||
)
|
||||
|
||||
var _ fssz.HashRoot = (ExecutionProofId)(0)
|
||||
var _ fssz.Marshaler = (*ExecutionProofId)(nil)
|
||||
var _ fssz.Unmarshaler = (*ExecutionProofId)(nil)
|
||||
|
||||
// Number of execution proofs
|
||||
// Each proof represents a different zkVM+EL combination
|
||||
//
|
||||
// TODO(zkproofs): The number 8 is a parameter that we will want to configure in the future
|
||||
const EXECUTION_PROOF_TYPE_COUNT = 8
|
||||
|
||||
// ExecutionProofId identifies which zkVM/proof system a proof belongs to.
|
||||
type ExecutionProofId uint8
|
||||
|
||||
func (id *ExecutionProofId) IsValid() bool {
|
||||
return uint8(*id) < EXECUTION_PROOF_TYPE_COUNT
|
||||
}
|
||||
|
||||
// HashTreeRoot --
|
||||
func (id ExecutionProofId) HashTreeRoot() ([32]byte, error) {
|
||||
return fssz.HashWithDefaultHasher(id)
|
||||
}
|
||||
|
||||
// HashTreeRootWith --
|
||||
func (id ExecutionProofId) HashTreeRootWith(hh *fssz.Hasher) error {
|
||||
hh.PutUint8(uint8(id))
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnmarshalSSZ --
|
||||
func (id *ExecutionProofId) UnmarshalSSZ(buf []byte) error {
|
||||
if len(buf) != id.SizeSSZ() {
|
||||
return fmt.Errorf("expected buffer of length %d received %d", id.SizeSSZ(), len(buf))
|
||||
}
|
||||
*id = ExecutionProofId(fssz.UnmarshallUint8(buf))
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalSSZTo --
|
||||
func (id *ExecutionProofId) MarshalSSZTo(buf []byte) ([]byte, error) {
|
||||
marshalled, err := id.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return append(buf, marshalled...), nil
|
||||
}
|
||||
|
||||
// MarshalSSZ --
|
||||
func (id *ExecutionProofId) MarshalSSZ() ([]byte, error) {
|
||||
marshalled := fssz.MarshalUint8([]byte{}, uint8(*id))
|
||||
return marshalled, nil
|
||||
}
|
||||
|
||||
// SizeSSZ --
|
||||
func (id *ExecutionProofId) SizeSSZ() int {
|
||||
return 1
|
||||
}
|
||||
@@ -1,73 +0,0 @@
|
||||
package primitives_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
)
|
||||
|
||||
func TestExecutionProofId_IsValid(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
id primitives.ExecutionProofId
|
||||
valid bool
|
||||
}{
|
||||
{
|
||||
name: "valid proof id 0",
|
||||
id: 0,
|
||||
valid: true,
|
||||
},
|
||||
{
|
||||
name: "valid proof id 1",
|
||||
id: 1,
|
||||
valid: true,
|
||||
},
|
||||
{
|
||||
name: "valid proof id 7 (max valid)",
|
||||
id: 7,
|
||||
valid: true,
|
||||
},
|
||||
{
|
||||
name: "invalid proof id 8 (at limit)",
|
||||
id: 8,
|
||||
valid: false,
|
||||
},
|
||||
{
|
||||
name: "invalid proof id 255",
|
||||
id: 255,
|
||||
valid: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := tt.id.IsValid(); got != tt.valid {
|
||||
t.Errorf("ExecutionProofId.IsValid() = %v, want %v", got, tt.valid)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestExecutionProofId_Casting(t *testing.T) {
|
||||
id := primitives.ExecutionProofId(5)
|
||||
|
||||
t.Run("uint8", func(t *testing.T) {
|
||||
if uint8(id) != 5 {
|
||||
t.Errorf("Casting to uint8 failed: got %v, want 5", uint8(id))
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("from uint8", func(t *testing.T) {
|
||||
var x uint8 = 7
|
||||
if primitives.ExecutionProofId(x) != 7 {
|
||||
t.Errorf("Casting from uint8 failed: got %v, want 7", primitives.ExecutionProofId(x))
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("int", func(t *testing.T) {
|
||||
var x = 3
|
||||
if primitives.ExecutionProofId(x) != 3 {
|
||||
t.Errorf("Casting from int failed: got %v, want 3", primitives.ExecutionProofId(x))
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -163,18 +163,3 @@ func Uint256ToSSZBytes(num string) ([]byte, error) {
|
||||
}
|
||||
return PadTo(ReverseByteOrder(uint256.Bytes()), 32), nil
|
||||
}
|
||||
|
||||
// PutLittleEndian writes an unsigned integer value in little-endian format.
|
||||
// Supports sizes 1, 2, 4, or 8 bytes for uint8/16/32/64 respectively.
|
||||
func PutLittleEndian(dst []byte, val uint64, size int) {
|
||||
switch size {
|
||||
case 1:
|
||||
dst[0] = byte(val)
|
||||
case 2:
|
||||
binary.LittleEndian.PutUint16(dst, uint16(val))
|
||||
case 4:
|
||||
binary.LittleEndian.PutUint32(dst, uint32(val))
|
||||
case 8:
|
||||
binary.LittleEndian.PutUint64(dst, val)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,9 +9,7 @@ go_library(
|
||||
"container.go",
|
||||
"generalized_index.go",
|
||||
"list.go",
|
||||
"merkle_proof.go",
|
||||
"path.go",
|
||||
"proof_collector.go",
|
||||
"query.go",
|
||||
"ssz_info.go",
|
||||
"ssz_object.go",
|
||||
@@ -22,12 +20,7 @@ go_library(
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/encoding/ssz/query",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//container/trie:go_default_library",
|
||||
"//crypto/hash/htr:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -36,24 +29,15 @@ go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"generalized_index_test.go",
|
||||
"merkle_proof_test.go",
|
||||
"path_test.go",
|
||||
"proof_collector_test.go",
|
||||
"query_test.go",
|
||||
"tag_parser_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/state/stateutil:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/ssz:go_default_library",
|
||||
":go_default_library",
|
||||
"//encoding/ssz/query/testutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/ssz_query/testing:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
package query
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
fastssz "github.com/prysmaticlabs/fastssz"
|
||||
)
|
||||
|
||||
// Prove is the entrypoint to generate an SSZ Merkle proof for the given generalized index.
|
||||
// Parameters:
|
||||
// - gindex: the generalized index of the node to prove inclusion for.
|
||||
// Returns:
|
||||
// - fastssz.Proof: the Merkle proof containing the leaf, index, and sibling hashes.
|
||||
// - error: any error encountered during proof generation.
|
||||
func (info *SszInfo) Prove(gindex uint64) (*fastssz.Proof, error) {
|
||||
if info == nil {
|
||||
return nil, fmt.Errorf("nil SszInfo")
|
||||
}
|
||||
|
||||
collector := newProofCollector()
|
||||
collector.addTarget(gindex)
|
||||
|
||||
// info.source is guaranteed to be valid and dereferenced by AnalyzeObject
|
||||
v := reflect.ValueOf(info.source).Elem()
|
||||
|
||||
// Start the merkleization and proof collection process.
|
||||
// In SSZ generalized indices, the root is always at index 1.
|
||||
if _, err := collector.merkleize(info, v, 1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return collector.toProof()
|
||||
}
|
||||
@@ -1,163 +0,0 @@
|
||||
package query_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/go-bitfield"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/ssz/query"
|
||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
)
|
||||
|
||||
func TestProve_FixedTestContainer(t *testing.T) {
|
||||
obj := createFixedTestContainer()
|
||||
|
||||
tests := []string{
|
||||
".field_uint32",
|
||||
".nested.value2",
|
||||
".vector_field[3]",
|
||||
".bitvector64_field",
|
||||
".trailing_field",
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc, func(t *testing.T) {
|
||||
proveAndVerify(t, obj, tc)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestProve_VariableTestContainer(t *testing.T) {
|
||||
obj := createVariableTestContainer()
|
||||
|
||||
tests := []string{
|
||||
".leading_field",
|
||||
".field_list_uint64[2]",
|
||||
"len(field_list_uint64)",
|
||||
".nested.nested_list_field[1]",
|
||||
".variable_container_list[0].inner_1.field_list_uint64[1]",
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc, func(t *testing.T) {
|
||||
proveAndVerify(t, obj, tc)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestProve_BeaconBlock(t *testing.T) {
|
||||
randaoReveal := make([]byte, 96)
|
||||
for i := range randaoReveal {
|
||||
randaoReveal[i] = 0x42
|
||||
}
|
||||
root32 := make([]byte, 32)
|
||||
for i := range root32 {
|
||||
root32[i] = 0x24
|
||||
}
|
||||
sig := make([]byte, 96)
|
||||
for i := range sig {
|
||||
sig[i] = 0x99
|
||||
}
|
||||
|
||||
att := ð.Attestation{
|
||||
AggregationBits: bitfield.Bitlist{0x01},
|
||||
Data: ð.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
BeaconBlockRoot: root32,
|
||||
Source: ð.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: root32,
|
||||
},
|
||||
Target: ð.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: root32,
|
||||
},
|
||||
},
|
||||
Signature: sig,
|
||||
}
|
||||
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 123
|
||||
b.Block.Body.RandaoReveal = randaoReveal
|
||||
b.Block.Body.Attestations = []*eth.Attestation{att}
|
||||
|
||||
sb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
|
||||
protoBlock, err := sb.Block().Proto()
|
||||
require.NoError(t, err)
|
||||
|
||||
obj, ok := protoBlock.(query.SSZObject)
|
||||
require.Equal(t, true, ok, "block proto does not implement query.SSZObject")
|
||||
|
||||
tests := []string{
|
||||
".slot",
|
||||
".body.randao_reveal",
|
||||
".body.attestations[0].data.slot",
|
||||
"len(body.attestations)",
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc, func(t *testing.T) {
|
||||
proveAndVerify(t, obj, tc)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestProve_BeaconState(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisState(t, 16)
|
||||
require.NoError(t, st.SetSlot(primitives.Slot(42)))
|
||||
|
||||
sszObj, ok := st.ToProtoUnsafe().(query.SSZObject)
|
||||
require.Equal(t, true, ok, "state proto does not implement query.SSZObject")
|
||||
|
||||
tests := []string{
|
||||
".slot",
|
||||
".latest_block_header",
|
||||
".validators[0].effective_balance",
|
||||
"len(validators)",
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc, func(t *testing.T) {
|
||||
proveAndVerify(t, sszObj, tc)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// proveAndVerify helper to analyze an object, generate a merkle proof for the given path,
|
||||
// and verify the proof against the object's root.
|
||||
func proveAndVerify(t *testing.T, obj query.SSZObject, pathStr string) {
|
||||
t.Helper()
|
||||
|
||||
info, err := query.AnalyzeObject(obj)
|
||||
require.NoError(t, err)
|
||||
|
||||
path, err := query.ParsePath(pathStr)
|
||||
require.NoError(t, err)
|
||||
|
||||
gi, err := query.GetGeneralizedIndexFromPath(info, path)
|
||||
require.NoError(t, err)
|
||||
|
||||
proof, err := info.Prove(gi)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int(gi), proof.Index)
|
||||
|
||||
root, err := obj.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
ok, err := ssz.VerifyProof(root[:], proof)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, ok, "merkle proof verification failed")
|
||||
|
||||
require.Equal(t, 32, len(proof.Leaf))
|
||||
for i, h := range proof.Hashes {
|
||||
require.Equal(t, 32, len(h), "proof hash %d is not 32 bytes", i)
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,672 +0,0 @@
|
||||
package query
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/bits"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"slices"
|
||||
"sync"
|
||||
|
||||
"github.com/OffchainLabs/go-bitfield"
|
||||
"github.com/OffchainLabs/prysm/v7/container/trie"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/hash/htr"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
ssz "github.com/OffchainLabs/prysm/v7/encoding/ssz"
|
||||
"github.com/OffchainLabs/prysm/v7/math"
|
||||
fastssz "github.com/prysmaticlabs/fastssz"
|
||||
)
|
||||
|
||||
// proofCollector collects sibling hashes and leaves needed for Merkle proofs.
|
||||
//
|
||||
// Multiproof-ready design:
|
||||
// - requiredSiblings/requiredLeaves store which gindices we want to collect (registered before merkleization).
|
||||
// - siblings/leaves store the actual collected hashes.
|
||||
//
|
||||
// Concurrency:
|
||||
// - required* maps are read-only during merkleization.
|
||||
// - siblings/leaves writes are protected by mutex.
|
||||
type proofCollector struct {
|
||||
sync.Mutex
|
||||
|
||||
// Required gindices (registered before merkleization)
|
||||
requiredSiblings map[uint64]struct{}
|
||||
requiredLeaves map[uint64]struct{}
|
||||
|
||||
// Collected hashes
|
||||
siblings map[uint64][32]byte
|
||||
leaves map[uint64][32]byte
|
||||
}
|
||||
|
||||
func newProofCollector() *proofCollector {
|
||||
return &proofCollector{
|
||||
requiredSiblings: make(map[uint64]struct{}),
|
||||
requiredLeaves: make(map[uint64]struct{}),
|
||||
siblings: make(map[uint64][32]byte),
|
||||
leaves: make(map[uint64][32]byte),
|
||||
}
|
||||
}
|
||||
|
||||
func (pc *proofCollector) reset() {
|
||||
pc.Lock()
|
||||
defer pc.Unlock()
|
||||
|
||||
pc.requiredSiblings = make(map[uint64]struct{})
|
||||
pc.requiredLeaves = make(map[uint64]struct{})
|
||||
pc.siblings = make(map[uint64][32]byte)
|
||||
pc.leaves = make(map[uint64][32]byte)
|
||||
}
|
||||
|
||||
// addTarget register the target leaf and its required sibling nodes for proof construction.
|
||||
// Registration should happen before merkleization begins.
|
||||
func (pc *proofCollector) addTarget(gindex uint64) {
|
||||
pc.Lock()
|
||||
defer pc.Unlock()
|
||||
|
||||
pc.requiredLeaves[gindex] = struct{}{}
|
||||
|
||||
// Walk from the target leaf up to (but not including) the root (gindex=1).
|
||||
// At each step, register the sibling node required to prove inclusion.
|
||||
nodeGindex := gindex
|
||||
for nodeGindex > 1 {
|
||||
siblingGindex := nodeGindex ^ 1 // flip the last bit: left<->right sibling
|
||||
pc.requiredSiblings[siblingGindex] = struct{}{}
|
||||
|
||||
// Move to parent
|
||||
nodeGindex /= 2
|
||||
}
|
||||
}
|
||||
|
||||
// toProof converts the collected siblings and leaves into a fastssz.Proof structure.
|
||||
// Current behavior expects a single target leaf (single proof).
|
||||
func (pc *proofCollector) toProof() (*fastssz.Proof, error) {
|
||||
pc.Lock()
|
||||
defer pc.Unlock()
|
||||
|
||||
proof := &fastssz.Proof{}
|
||||
if len(pc.leaves) == 0 {
|
||||
return nil, errors.New("no leaves collected: add target leaves before merkleization")
|
||||
}
|
||||
|
||||
leafGindices := make([]uint64, 0, len(pc.leaves))
|
||||
for g := range pc.leaves {
|
||||
leafGindices = append(leafGindices, g)
|
||||
}
|
||||
slices.Sort(leafGindices)
|
||||
|
||||
// single proof resides in leafGindices[0]
|
||||
targetGindex := leafGindices[0]
|
||||
proofIndex, err := math.Int(targetGindex)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("gindex %d overflows int: %w", targetGindex, err)
|
||||
}
|
||||
proof.Index = proofIndex
|
||||
|
||||
// store the leaf
|
||||
leaf := pc.leaves[targetGindex]
|
||||
leafBuf := make([]byte, 32)
|
||||
copy(leafBuf, leaf[:])
|
||||
proof.Leaf = leafBuf
|
||||
|
||||
// Walk from target up to root, collecting siblings.
|
||||
steps := bits.Len64(targetGindex) - 1
|
||||
proof.Hashes = make([][]byte, 0, steps)
|
||||
|
||||
for targetGindex > 1 {
|
||||
sib := targetGindex ^ 1
|
||||
h, ok := pc.siblings[sib]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("missing sibling hash for gindex %d", sib)
|
||||
}
|
||||
proof.Hashes = append(proof.Hashes, h[:])
|
||||
targetGindex /= 2
|
||||
}
|
||||
|
||||
return proof, nil
|
||||
}
|
||||
|
||||
// collectLeaf checks if the given gindex is a required leaf for the proof,
|
||||
// and if so, stores the provided leaf hash in the collector.
|
||||
func (pc *proofCollector) collectLeaf(gindex uint64, leaf [32]byte) {
|
||||
if _, ok := pc.requiredLeaves[gindex]; !ok {
|
||||
return
|
||||
}
|
||||
pc.Lock()
|
||||
pc.leaves[gindex] = leaf
|
||||
pc.Unlock()
|
||||
}
|
||||
|
||||
// collectSibling stores the hash for a sibling node identified by gindex.
|
||||
// It only stores the hash if gindex was pre-registered via addTarget (present in requiredSiblings).
|
||||
// Writes to the collected siblings map are protected by the collector mutex.
|
||||
func (pc *proofCollector) collectSibling(gindex uint64, hash [32]byte) {
|
||||
if _, ok := pc.requiredSiblings[gindex]; !ok {
|
||||
return
|
||||
}
|
||||
pc.Lock()
|
||||
pc.siblings[gindex] = hash
|
||||
pc.Unlock()
|
||||
}
|
||||
|
||||
// Merkleizers and proof collection methods
|
||||
|
||||
// merkleize recursively traverses an SSZ info and computes the Merkle root of the subtree.
|
||||
//
|
||||
// Proof collection:
|
||||
// - During traversal it calls collectLeaf/collectSibling with the SSZ generalized indices (gindices)
|
||||
// of visited nodes.
|
||||
// - The collector only stores hashes for gindices that were pre-registered via addTarget
|
||||
// (requiredLeaves/requiredSiblings). This makes the traversal multiproof-ready: you can register
|
||||
// multiple targets before calling merkleize.
|
||||
//
|
||||
// SSZ types handled: basic types, containers, lists, vectors, bitlists, and bitvectors.
|
||||
//
|
||||
// Parameters:
|
||||
// - info: SSZ type metadata for the current value.
|
||||
// - v: reflect.Value of the current value.
|
||||
// - currentGindex: generalized index of the current subtree root.
|
||||
//
|
||||
// Returns:
|
||||
// - [32]byte: Merkle root of the current subtree.
|
||||
// - error: any error encountered during traversal/merkleization.
|
||||
func (pc *proofCollector) merkleize(info *SszInfo, v reflect.Value, currentGindex uint64) ([32]byte, error) {
|
||||
if info.sszType.isBasic() {
|
||||
return pc.merkleizeBasicType(info.sszType, v, currentGindex)
|
||||
}
|
||||
switch info.sszType {
|
||||
case Container:
|
||||
return pc.merkleizeContainer(info, v, currentGindex)
|
||||
case List:
|
||||
return pc.merkleizeList(info, v, currentGindex)
|
||||
case Vector:
|
||||
return pc.merkleizeVector(info, v, currentGindex)
|
||||
case Bitlist:
|
||||
return pc.merkleizeBitlist(info, v, currentGindex)
|
||||
case Bitvector:
|
||||
return pc.merkleizeBitvector(info, v, currentGindex)
|
||||
default:
|
||||
return [32]byte{}, fmt.Errorf("unsupported SSZ type: %v", info.sszType)
|
||||
}
|
||||
}
|
||||
|
||||
// merkleizeBasicType serializes a basic SSZ value into a 32-byte leaf chunk (little-endian, zero-padded).
|
||||
//
|
||||
// Proof collection:
|
||||
// - It calls collectLeaf(currentGindex, leaf) and stores the leaf if currentGindex was pre-registered via addTarget.
|
||||
//
|
||||
// Parameters:
|
||||
// - t: the SSZType (basic).
|
||||
// - v: the reflect.Value of the basic value.
|
||||
// - currentGindex: the generalized index (gindex) of this leaf.
|
||||
//
|
||||
// Returns:
|
||||
// - [32]byte: the 32-byte SSZ leaf chunk.
|
||||
// - error: if the SSZType is not a supported basic type.
|
||||
func (pc *proofCollector) merkleizeBasicType(t SSZType, v reflect.Value, currentGindex uint64) ([32]byte, error) {
|
||||
var leaf [32]byte
|
||||
|
||||
// Serialize the value into a 32-byte chunk (little-endian, zero-padded)
|
||||
switch t {
|
||||
case Uint8:
|
||||
leaf[0] = uint8(v.Uint())
|
||||
case Uint16:
|
||||
binary.LittleEndian.PutUint16(leaf[:2], uint16(v.Uint()))
|
||||
case Uint32:
|
||||
binary.LittleEndian.PutUint32(leaf[:4], uint32(v.Uint()))
|
||||
case Uint64:
|
||||
binary.LittleEndian.PutUint64(leaf[:8], v.Uint())
|
||||
case Boolean:
|
||||
if v.Bool() {
|
||||
leaf[0] = 1
|
||||
}
|
||||
default:
|
||||
return [32]byte{}, fmt.Errorf("unexpected basic type: %v", t)
|
||||
}
|
||||
|
||||
pc.collectLeaf(currentGindex, leaf)
|
||||
|
||||
return leaf, nil
|
||||
}
|
||||
|
||||
// merkleizeContainer computes the Merkle root of an SSZ container by:
|
||||
// 1. Merkleizing each field into a 32-byte subtree root
|
||||
// 2. Merkleizing the field roots into the container root (padding to the next power-of-2)
|
||||
//
|
||||
// Generalized indices (gindices): depth = ssz.Depth(uint64(N)) and field i has gindex = (currentGindex << depth) + uint64(i).
|
||||
// Proof collection: merkleize() computes each field root, merkleizeVectorAndCollect collects required siblings, and collectLeaf stores the container root if registered.
|
||||
//
|
||||
// Parameters:
|
||||
// - info: SSZ type metadata for the container.
|
||||
// - v: reflect.Value of the container value.
|
||||
// - currentGindex: generalized index (gindex) of the container root.
|
||||
//
|
||||
// Returns:
|
||||
// - [32]byte: Merkle root of the container.
|
||||
// - error: any error encountered while merkleizing fields.
|
||||
func (pc *proofCollector) merkleizeContainer(info *SszInfo, v reflect.Value, currentGindex uint64) ([32]byte, error) {
|
||||
// If the container root itself is the target, compute directly and return early.
|
||||
// This avoids full subtree merkleization when we only need the root.
|
||||
if _, ok := pc.requiredLeaves[currentGindex]; ok {
|
||||
root, err := info.HashTreeRoot()
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
pc.collectLeaf(currentGindex, root)
|
||||
return root, nil
|
||||
}
|
||||
|
||||
ci, err := info.ContainerInfo()
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
|
||||
v = dereferencePointer(v)
|
||||
|
||||
// Calculate depth: how many levels from container root to field leaves
|
||||
numFields := len(ci.order)
|
||||
depth := ssz.Depth(uint64(numFields))
|
||||
|
||||
// Step 1: Compute HTR for each subtree (field)
|
||||
fieldRoots := make([][32]byte, numFields)
|
||||
|
||||
for i, name := range ci.order {
|
||||
fieldInfo := ci.fields[name]
|
||||
fieldVal := v.FieldByName(fieldInfo.goFieldName)
|
||||
|
||||
// Field i's gindex: shift currentGindex left by depth, then OR with field index
|
||||
fieldGindex := currentGindex<<depth + uint64(i)
|
||||
|
||||
htr, err := pc.merkleize(fieldInfo.sszInfo, fieldVal, fieldGindex)
|
||||
if err != nil {
|
||||
return [32]byte{}, fmt.Errorf("field %s: %w", name, err)
|
||||
}
|
||||
fieldRoots[i] = htr
|
||||
}
|
||||
|
||||
// Step 2: Merkleize the field hashes into the container root,
|
||||
// collecting sibling hashes if target is within this subtree
|
||||
root := pc.merkleizeVectorAndCollect(fieldRoots, currentGindex, uint64(depth))
|
||||
|
||||
return root, nil
|
||||
}
|
||||
|
||||
// merkleizeVectorBody computes the Merkle root of the "data" subtree for vector-like SSZ types
|
||||
// (vectors and the data-part of lists/bitlists).
|
||||
//
|
||||
// Generalized indices (gindices): depth = ssz.Depth(limit); leafBase = subtreeRootGindex << depth; element/chunk i gindex = leafBase + uint64(i).
|
||||
// Proof collection: merkleize() is called for composite elements; merkleizeVectorAndCollect collects required siblings at this layer.
|
||||
// Padding: merkleizeVectorAndCollect uses trie.ZeroHashes as needed.
|
||||
//
|
||||
// Parameters:
|
||||
// - elemInfo: SSZ type metadata for the element.
|
||||
// - v: reflect.Value of the vector/list data.
|
||||
// - length: number of actual elements present.
|
||||
// - limit: virtual leaf capacity used for padding/Depth (fixed length for vectors, limit for lists).
|
||||
// - subtreeRootGindex: gindex of the data subtree root.
|
||||
//
|
||||
// Returns:
|
||||
// - [32]byte: Merkle root of the data subtree.
|
||||
// - error: any error encountered while merkleizing composite elements.
|
||||
func (pc *proofCollector) merkleizeVectorBody(elemInfo *SszInfo, v reflect.Value, length int, limit uint64, subtreeRootGindex uint64) ([32]byte, error) {
|
||||
depth := uint64(ssz.Depth(limit))
|
||||
|
||||
var chunks [][32]byte
|
||||
if elemInfo.sszType.isBasic() {
|
||||
// Serialize basic elements and pack into 32-byte chunks using ssz.PackByChunk.
|
||||
elemSize, err := math.Int(itemLength(elemInfo))
|
||||
if err != nil {
|
||||
return [32]byte{}, fmt.Errorf("element size %d overflows int: %w", itemLength(elemInfo), err)
|
||||
}
|
||||
serialized := make([][]byte, length)
|
||||
// Single contiguous allocation for all element data
|
||||
allData := make([]byte, length*elemSize)
|
||||
for i := range length {
|
||||
buf := allData[i*elemSize : (i+1)*elemSize]
|
||||
elem := v.Index(i)
|
||||
if elemInfo.sszType == Boolean && elem.Bool() {
|
||||
buf[0] = 1
|
||||
} else {
|
||||
bytesutil.PutLittleEndian(buf, elem.Uint(), elemSize)
|
||||
}
|
||||
serialized[i] = buf
|
||||
}
|
||||
chunks, err = ssz.PackByChunk(serialized)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
} else {
|
||||
// Composite elements: compute each element root (no padding here; merkleizeVectorAndCollect pads).
|
||||
chunks = make([][32]byte, length)
|
||||
|
||||
// Fall back to per-element merkleization with proper gindices for proof collection.
|
||||
// Parallel execution
|
||||
workerCount := min(runtime.GOMAXPROCS(0), length)
|
||||
|
||||
jobs := make(chan int, workerCount*16)
|
||||
errCh := make(chan error, 1) // only need the first error
|
||||
stopCh := make(chan struct{})
|
||||
var stopOnce sync.Once
|
||||
var wg sync.WaitGroup
|
||||
|
||||
worker := func() {
|
||||
defer wg.Done()
|
||||
for idx := range jobs {
|
||||
select {
|
||||
case <-stopCh:
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
elemGindex := subtreeRootGindex<<depth + uint64(idx)
|
||||
htr, err := pc.merkleize(elemInfo, v.Index(idx), elemGindex)
|
||||
if err != nil {
|
||||
stopOnce.Do(func() { close(stopCh) })
|
||||
select {
|
||||
case errCh <- fmt.Errorf("index %d: %w", idx, err):
|
||||
default:
|
||||
}
|
||||
return
|
||||
}
|
||||
chunks[idx] = htr
|
||||
}
|
||||
}
|
||||
|
||||
wg.Add(workerCount)
|
||||
for range workerCount {
|
||||
go worker()
|
||||
}
|
||||
|
||||
// Enqueue jobs; stop early if any worker reports an error.
|
||||
enqueue:
|
||||
for i := range length {
|
||||
select {
|
||||
case <-stopCh:
|
||||
break enqueue
|
||||
case jobs <- i:
|
||||
}
|
||||
}
|
||||
close(jobs)
|
||||
|
||||
wg.Wait()
|
||||
|
||||
select {
|
||||
case err := <-errCh:
|
||||
return [32]byte{}, err
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
root := pc.merkleizeVectorAndCollect(chunks, subtreeRootGindex, depth)
|
||||
return root, nil
|
||||
}
|
||||
|
||||
// merkleizeVector computes the Merkle root of an SSZ vector (fixed-length).
|
||||
//
|
||||
// Generalized indices (gindices): currentGindex is the gindex of the vector root; element/chunk gindices are derived
|
||||
// inside merkleizeVectorBody using leafBase = currentGindex << ssz.Depth(leaves).
|
||||
//
|
||||
// Proof collection: merkleizeVectorBody performs element/chunk merkleization and collects required siblings at the
|
||||
// vector layer; collectLeaf stores the vector root if currentGindex was registered via addTarget.
|
||||
//
|
||||
// Parameters:
|
||||
// - info: SSZ type metadata for the vector.
|
||||
// - v: reflect.Value of the vector value.
|
||||
// - currentGindex: generalized index (gindex) of the vector root.
|
||||
//
|
||||
// Returns:
|
||||
// - [32]byte: Merkle root of the vector.
|
||||
// - error: any error encountered while merkleizing composite elements.
|
||||
func (pc *proofCollector) merkleizeVector(info *SszInfo, v reflect.Value, currentGindex uint64) ([32]byte, error) {
|
||||
vi, err := info.VectorInfo()
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
|
||||
length, err := math.Int(vi.Length())
|
||||
if err != nil {
|
||||
return [32]byte{}, fmt.Errorf("vector length %d overflows int: %w", vi.Length(), err)
|
||||
}
|
||||
elemInfo := vi.element
|
||||
|
||||
// Determine the virtual leaf capacity for the vector.
|
||||
leaves, err := getChunkCount(info)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
|
||||
root, err := pc.merkleizeVectorBody(elemInfo, v, length, leaves, currentGindex)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
|
||||
// If the vector root itself is the target
|
||||
pc.collectLeaf(currentGindex, root)
|
||||
|
||||
return root, nil
|
||||
}
|
||||
|
||||
// merkleizeList computes the Merkle root of an SSZ list by merkleizing its data subtree and mixing in the length.
|
||||
//
|
||||
// Generalized indices (gindices): dataRoot is the left child of the list root (dataRootGindex = currentGindex*2); the length mixin is the right child (currentGindex*2+1).
|
||||
// Proof collection: merkleizeVectorBody computes the data root (collecting required siblings in the data subtree), and mixinLengthAndCollect collects required siblings at the length-mixin level; collectLeaf stores the list root if registered.
|
||||
//
|
||||
// Parameters:
|
||||
// - info: SSZ type metadata for the list.
|
||||
// - v: reflect.Value of the list value.
|
||||
// - currentGindex: generalized index (gindex) of the list root.
|
||||
//
|
||||
// Returns:
|
||||
// - [32]byte: Merkle root of the list.
|
||||
// - error: any error encountered while merkleizing the data subtree.
|
||||
func (pc *proofCollector) merkleizeList(info *SszInfo, v reflect.Value, currentGindex uint64) ([32]byte, error) {
|
||||
li, err := info.ListInfo()
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
|
||||
length := v.Len()
|
||||
elemInfo := li.element
|
||||
|
||||
chunks := make([][32]byte, 2)
|
||||
// Compute the length hash (little-endian uint256)
|
||||
binary.LittleEndian.PutUint64(chunks[1][:8], uint64(length))
|
||||
|
||||
// Data subtree root is the left child of the list root.
|
||||
dataRootGindex := currentGindex * 2
|
||||
|
||||
// Compute virtual leaf capacity for the data subtree.
|
||||
leaves, err := getChunkCount(info)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
|
||||
chunks[0], err = pc.merkleizeVectorBody(elemInfo, v, length, leaves, dataRootGindex)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
|
||||
// Handle the length mixin level (and proof bookkeeping at this level).
|
||||
// Compute the final list root: hash(dataRoot || lengthHash)
|
||||
root := pc.mixinLengthAndCollect(currentGindex, chunks)
|
||||
|
||||
// If the list root itself is the target
|
||||
pc.collectLeaf(currentGindex, root)
|
||||
|
||||
return root, nil
|
||||
}
|
||||
|
||||
// merkleizeBitvectorBody computes the Merkle root of a bitvector-like byte sequence by packing it into 32-byte chunks
|
||||
// and merkleizing those chunks as a fixed-capacity vector (padding with trie.ZeroHashes as needed).
|
||||
//
|
||||
// Generalized indices (gindices): depth = ssz.Depth(chunkLimit); leafBase = subtreeRootGindex << depth; chunk i uses gindex = leafBase + uint64(i).
|
||||
// Proof collection: merkleizeVectorAndCollect collects required sibling hashes at the chunk-merkleization layer.
|
||||
//
|
||||
// Parameters:
|
||||
// - data: raw byte sequence representing the bitvector payload.
|
||||
// - chunkLimit: fixed/limit number of 32-byte chunks (used for padding/Depth).
|
||||
// - subtreeRootGindex: gindex of the bitvector data subtree root.
|
||||
//
|
||||
// Returns:
|
||||
// - [32]byte: Merkle root of the bitvector data subtree.
|
||||
// - error: any error encountered while packing data into chunks.
|
||||
func (pc *proofCollector) merkleizeBitvectorBody(data []byte, chunkLimit uint64, subtreeRootGindex uint64) ([32]byte, error) {
|
||||
depth := ssz.Depth(chunkLimit)
|
||||
chunks, err := ssz.PackByChunk([][]byte{data})
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
root := pc.merkleizeVectorAndCollect(chunks, subtreeRootGindex, uint64(depth))
|
||||
return root, nil
|
||||
}
|
||||
|
||||
// merkleizeBitvector computes the Merkle root of a fixed-length SSZ bitvector and collects proof nodes for targets.
|
||||
//
|
||||
// Parameters:
|
||||
// - info: SSZ type metadata for the bitvector.
|
||||
// - v: reflect.Value of the bitvector value.
|
||||
// - currentGindex: generalized index (gindex) of the bitvector root.
|
||||
//
|
||||
// Returns:
|
||||
// - [32]byte: Merkle root of the bitvector.
|
||||
// - error: any error encountered during packing or merkleization.
|
||||
func (pc *proofCollector) merkleizeBitvector(info *SszInfo, v reflect.Value, currentGindex uint64) ([32]byte, error) {
|
||||
bitvectorBytes := v.Bytes()
|
||||
if len(bitvectorBytes) == 0 {
|
||||
return [32]byte{}, fmt.Errorf("bitvector field is uninitialized (nil or empty slice)")
|
||||
}
|
||||
|
||||
// Compute virtual leaf capacity for the bitvector.
|
||||
numChunks, err := getChunkCount(info)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
|
||||
root, err := pc.merkleizeBitvectorBody(bitvectorBytes, numChunks, currentGindex)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
|
||||
pc.collectLeaf(currentGindex, root)
|
||||
|
||||
return root, nil
|
||||
}
|
||||
|
||||
// merkleizeBitlist computes the Merkle root of an SSZ bitlist by merkleizing its data chunks and mixing in the bit length.
|
||||
//
|
||||
// Generalized indices (gindices): dataRoot is the left child (dataRootGindex = currentGindex*2) and the length mixin is the right child (currentGindex*2+1).
|
||||
// Proof collection: merkleizeBitvectorBody computes the data root (collecting required siblings under dataRootGindex), and mixinLengthAndCollect collects required siblings at the length-mixin level; collectLeaf stores the bitlist root if registered.
|
||||
//
|
||||
// Parameters:
|
||||
// - info: SSZ type metadata for the bitlist.
|
||||
// - v: reflect.Value of the bitlist value.
|
||||
// - currentGindex: generalized index (gindex) of the bitlist root.
|
||||
//
|
||||
// Returns:
|
||||
// - [32]byte: Merkle root of the bitlist.
|
||||
// - error: any error encountered while merkleizing the data subtree.
|
||||
func (pc *proofCollector) merkleizeBitlist(info *SszInfo, v reflect.Value, currentGindex uint64) ([32]byte, error) {
|
||||
bi, err := info.BitlistInfo()
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
|
||||
bitlistBytes := v.Bytes()
|
||||
|
||||
// Use go-bitfield to get bytes with termination bit cleared
|
||||
bl := bitfield.Bitlist(bitlistBytes)
|
||||
data := bl.BytesNoTrim()
|
||||
|
||||
// Get the bit length from bitlistInfo
|
||||
bitLength := bi.Length()
|
||||
|
||||
// Get the chunk limit from getChunkCount
|
||||
limitChunks, err := getChunkCount(info)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
|
||||
chunks := make([][32]byte, 2)
|
||||
// Compute the length hash (little-endian uint256)
|
||||
binary.LittleEndian.PutUint64(chunks[1][:8], uint64(bitLength))
|
||||
|
||||
dataRootGindex := currentGindex * 2
|
||||
chunks[0], err = pc.merkleizeBitvectorBody(data, limitChunks, dataRootGindex)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
|
||||
// Handle the length mixin level (and proof bookkeeping at this level).
|
||||
root := pc.mixinLengthAndCollect(currentGindex, chunks)
|
||||
|
||||
pc.collectLeaf(currentGindex, root)
|
||||
|
||||
return root, nil
|
||||
}
|
||||
|
||||
// merkleizeVectorAndCollect merkleizes a slice of 32-byte leaf nodes into a subtree root, padding to a virtual size of 2^depth.
|
||||
//
|
||||
// Generalized indices (gindices): at layer i (0-based), nodes have gindices levelBase = subtreeGeneralizedIndex << (depth-i) and node gindex = levelBase + idx.
|
||||
// Proof collection: for each layer it calls collectSibling(nodeGindex, nodeHash) and stores only those gindices registered via addTarget.
|
||||
//
|
||||
// Parameters:
|
||||
// - elements: leaf-level hashes (may be shorter than 2^depth; padding is applied with trie.ZeroHashes).
|
||||
// - subtreeGeneralizedIndex: gindex of the subtree root.
|
||||
// - depth: number of merkleization layers from subtree root to leaves.
|
||||
//
|
||||
// Returns:
|
||||
// - [32]byte: Merkle root of the subtree.
|
||||
func (pc *proofCollector) merkleizeVectorAndCollect(elements [][32]byte, subtreeGeneralizedIndex uint64, depth uint64) [32]byte {
|
||||
// Return zerohash at depth
|
||||
if len(elements) == 0 {
|
||||
return trie.ZeroHashes[depth]
|
||||
}
|
||||
for i := range depth {
|
||||
layerLen := len(elements)
|
||||
oddNodeLength := layerLen%2 == 1
|
||||
if oddNodeLength {
|
||||
zerohash := trie.ZeroHashes[i]
|
||||
elements = append(elements, zerohash)
|
||||
}
|
||||
|
||||
levelBaseGindex := subtreeGeneralizedIndex << (depth - i)
|
||||
for idx := range elements {
|
||||
gindex := levelBaseGindex + uint64(idx)
|
||||
pc.collectSibling(gindex, elements[idx])
|
||||
pc.collectLeaf(gindex, elements[idx])
|
||||
}
|
||||
|
||||
elements = htr.VectorizedSha256(elements)
|
||||
}
|
||||
return elements[0]
|
||||
}
|
||||
|
||||
// mixinLengthAndCollect computes the final mix-in root for list/bitlist values:
|
||||
//
|
||||
// root = hash(dataRoot, lengthHash)
|
||||
//
|
||||
// where chunks[0] is dataRoot and chunks[1] is the 32-byte length hash.
|
||||
//
|
||||
// Generalized indices (gindices): dataRoot is the left child (dataRootGindex = currentGindex*2) and lengthHash is the right child (lengthHashGindex = currentGindex*2+1).
|
||||
// Proof collection: it calls collectSibling/collectLeaf for both child gindices; the collector stores them only if they were registered via addTarget.
|
||||
//
|
||||
// Parameters:
|
||||
// - currentGindex: gindex of the parent node (list/bitlist root).
|
||||
// - chunks: two 32-byte nodes: [dataRoot, lengthHash].
|
||||
//
|
||||
// Returns:
|
||||
// - [32]byte: mixed-in Merkle root (or zero value on hashing error).
|
||||
// - error: any error encountered during hashing.
|
||||
func (pc *proofCollector) mixinLengthAndCollect(currentGindex uint64, chunks [][32]byte) [32]byte {
|
||||
dataRoot, lengthHash := chunks[0], chunks[1]
|
||||
dataRootGindex, lengthHashGindex := currentGindex*2, currentGindex*2+1
|
||||
|
||||
pc.collectSibling(dataRootGindex, dataRoot)
|
||||
pc.collectSibling(lengthHashGindex, lengthHash)
|
||||
|
||||
pc.collectLeaf(dataRootGindex, dataRoot)
|
||||
pc.collectLeaf(lengthHashGindex, lengthHash)
|
||||
|
||||
return ssz.MixInLength(dataRoot, lengthHash[:])
|
||||
}
|
||||
@@ -1,531 +0,0 @@
|
||||
package query
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
"reflect"
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/go-bitfield"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stateutil"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
ssz "github.com/OffchainLabs/prysm/v7/encoding/ssz"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
sszquerypb "github.com/OffchainLabs/prysm/v7/proto/ssz_query/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
)
|
||||
|
||||
func TestProofCollector_New(t *testing.T) {
|
||||
pc := newProofCollector()
|
||||
|
||||
require.NotNil(t, pc)
|
||||
require.Equal(t, 0, len(pc.requiredSiblings))
|
||||
require.Equal(t, 0, len(pc.requiredLeaves))
|
||||
require.Equal(t, 0, len(pc.siblings))
|
||||
require.Equal(t, 0, len(pc.leaves))
|
||||
}
|
||||
|
||||
func TestProofCollector_Reset(t *testing.T) {
|
||||
pc := newProofCollector()
|
||||
pc.requiredSiblings[3] = struct{}{}
|
||||
pc.requiredLeaves[5] = struct{}{}
|
||||
pc.siblings[3] = [32]byte{1}
|
||||
pc.leaves[5] = [32]byte{2}
|
||||
|
||||
pc.reset()
|
||||
|
||||
require.Equal(t, 0, len(pc.requiredSiblings))
|
||||
require.Equal(t, 0, len(pc.requiredLeaves))
|
||||
require.Equal(t, 0, len(pc.siblings))
|
||||
require.Equal(t, 0, len(pc.leaves))
|
||||
}
|
||||
|
||||
func TestProofCollector_AddTarget(t *testing.T) {
|
||||
pc := newProofCollector()
|
||||
pc.addTarget(5)
|
||||
|
||||
_, hasLeaf := pc.requiredLeaves[5]
|
||||
_, hasSibling4 := pc.requiredSiblings[4]
|
||||
_, hasSibling3 := pc.requiredSiblings[3]
|
||||
_, hasSibling1 := pc.requiredSiblings[1] // GI 1 is the root
|
||||
|
||||
require.Equal(t, true, hasLeaf)
|
||||
require.Equal(t, true, hasSibling4)
|
||||
require.Equal(t, true, hasSibling3)
|
||||
require.Equal(t, false, hasSibling1)
|
||||
}
|
||||
|
||||
func TestProofCollector_ToProof(t *testing.T) {
|
||||
pc := newProofCollector()
|
||||
pc.addTarget(5)
|
||||
|
||||
leaf := [32]byte{9}
|
||||
sibling4 := [32]byte{4}
|
||||
sibling3 := [32]byte{3}
|
||||
|
||||
pc.collectLeaf(5, leaf)
|
||||
pc.collectSibling(4, sibling4)
|
||||
pc.collectSibling(3, sibling3)
|
||||
|
||||
proof, err := pc.toProof()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 5, proof.Index)
|
||||
require.DeepEqual(t, leaf[:], proof.Leaf)
|
||||
require.Equal(t, 2, len(proof.Hashes))
|
||||
require.DeepEqual(t, sibling4[:], proof.Hashes[0])
|
||||
require.DeepEqual(t, sibling3[:], proof.Hashes[1])
|
||||
}
|
||||
|
||||
func TestProofCollector_ToProof_NoLeaves(t *testing.T) {
|
||||
pc := newProofCollector()
|
||||
_, err := pc.toProof()
|
||||
require.NotNil(t, err)
|
||||
}
|
||||
|
||||
func TestProofCollector_CollectLeaf(t *testing.T) {
|
||||
pc := newProofCollector()
|
||||
leaf := [32]byte{7}
|
||||
|
||||
pc.collectLeaf(10, leaf)
|
||||
require.Equal(t, 0, len(pc.leaves))
|
||||
|
||||
pc.addTarget(10)
|
||||
pc.collectLeaf(10, leaf)
|
||||
stored, ok := pc.leaves[10]
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, leaf, stored)
|
||||
}
|
||||
|
||||
func TestProofCollector_CollectSibling(t *testing.T) {
|
||||
pc := newProofCollector()
|
||||
hash := [32]byte{5}
|
||||
|
||||
pc.collectSibling(4, hash)
|
||||
require.Equal(t, 0, len(pc.siblings))
|
||||
|
||||
pc.addTarget(5)
|
||||
pc.collectSibling(4, hash)
|
||||
stored, ok := pc.siblings[4]
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, hash, stored)
|
||||
}
|
||||
|
||||
func TestProofCollector_Merkleize_BasicTypes(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
sszType SSZType
|
||||
value any
|
||||
expected [32]byte
|
||||
}{
|
||||
{
|
||||
name: "uint8",
|
||||
sszType: Uint8,
|
||||
value: uint8(0x11),
|
||||
expected: func() [32]byte {
|
||||
var leaf [32]byte
|
||||
leaf[0] = 0x11
|
||||
return leaf
|
||||
}(),
|
||||
},
|
||||
{
|
||||
name: "uint16",
|
||||
sszType: Uint16,
|
||||
value: uint16(0x2211),
|
||||
expected: func() [32]byte {
|
||||
var leaf [32]byte
|
||||
binary.LittleEndian.PutUint16(leaf[:2], 0x2211)
|
||||
return leaf
|
||||
}(),
|
||||
},
|
||||
{
|
||||
name: "uint32",
|
||||
sszType: Uint32,
|
||||
value: uint32(0x44332211),
|
||||
expected: func() [32]byte {
|
||||
var leaf [32]byte
|
||||
binary.LittleEndian.PutUint32(leaf[:4], 0x44332211)
|
||||
return leaf
|
||||
}(),
|
||||
},
|
||||
{
|
||||
name: "uint64",
|
||||
sszType: Uint64,
|
||||
value: uint64(0x8877665544332211),
|
||||
expected: func() [32]byte {
|
||||
var leaf [32]byte
|
||||
binary.LittleEndian.PutUint64(leaf[:8], 0x8877665544332211)
|
||||
return leaf
|
||||
}(),
|
||||
},
|
||||
{
|
||||
name: "bool",
|
||||
sszType: Boolean,
|
||||
value: true,
|
||||
expected: func() [32]byte {
|
||||
var leaf [32]byte
|
||||
leaf[0] = 1
|
||||
return leaf
|
||||
}(),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
pc := newProofCollector()
|
||||
gindex := uint64(3)
|
||||
pc.addTarget(gindex)
|
||||
|
||||
leaf, err := pc.merkleizeBasicType(tc.sszType, reflect.ValueOf(tc.value), gindex)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.expected, leaf)
|
||||
|
||||
stored, ok := pc.leaves[gindex]
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, tc.expected, stored)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestProofCollector_Merkleize_Container(t *testing.T) {
|
||||
container := makeFixedTestContainer()
|
||||
|
||||
info, err := AnalyzeObject(container)
|
||||
require.NoError(t, err)
|
||||
|
||||
pc := newProofCollector()
|
||||
pc.addTarget(1)
|
||||
|
||||
root, err := pc.merkleize(info, reflect.ValueOf(container), 1)
|
||||
require.NoError(t, err)
|
||||
|
||||
expected, err := container.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, root)
|
||||
|
||||
stored, ok := pc.leaves[1]
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, expected, stored)
|
||||
}
|
||||
|
||||
func TestProofCollector_Merkleize_Vector(t *testing.T) {
|
||||
container := makeFixedTestContainer()
|
||||
info, err := AnalyzeObject(container)
|
||||
require.NoError(t, err)
|
||||
|
||||
ci, err := info.ContainerInfo()
|
||||
require.NoError(t, err)
|
||||
field := ci.fields["vector_field"]
|
||||
|
||||
pc := newProofCollector()
|
||||
root, err := pc.merkleizeVector(field.sszInfo, reflect.ValueOf(container.VectorField), 1)
|
||||
require.NoError(t, err)
|
||||
|
||||
serialized := make([][]byte, len(container.VectorField))
|
||||
for i, v := range container.VectorField {
|
||||
buf := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(buf, v)
|
||||
serialized[i] = buf
|
||||
}
|
||||
chunks, err := ssz.PackByChunk(serialized)
|
||||
require.NoError(t, err)
|
||||
limit, err := getChunkCount(field.sszInfo)
|
||||
require.NoError(t, err)
|
||||
expected := ssz.MerkleizeVector(chunks, limit)
|
||||
|
||||
require.Equal(t, expected, root)
|
||||
}
|
||||
|
||||
func TestProofCollector_Merkleize_List(t *testing.T) {
|
||||
list := []*sszquerypb.FixedNestedContainer{
|
||||
makeFixedNestedContainer(1),
|
||||
makeFixedNestedContainer(2),
|
||||
}
|
||||
container := makeVariableTestContainer(list, bitfield.NewBitlist(1))
|
||||
info, err := AnalyzeObject(container)
|
||||
require.NoError(t, err)
|
||||
|
||||
ci, err := info.ContainerInfo()
|
||||
require.NoError(t, err)
|
||||
field := ci.fields["field_list_container"]
|
||||
|
||||
pc := newProofCollector()
|
||||
root, err := pc.merkleizeList(field.sszInfo, reflect.ValueOf(list), 1)
|
||||
require.NoError(t, err)
|
||||
|
||||
listInfo, err := field.sszInfo.ListInfo()
|
||||
require.NoError(t, err)
|
||||
expected, err := ssz.MerkleizeListSSZ(list, listInfo.Limit())
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, expected, root)
|
||||
}
|
||||
|
||||
func TestProofCollector_Merkleize_Bitvector(t *testing.T) {
|
||||
container := makeFixedTestContainer()
|
||||
info, err := AnalyzeObject(container)
|
||||
require.NoError(t, err)
|
||||
|
||||
ci, err := info.ContainerInfo()
|
||||
require.NoError(t, err)
|
||||
field := ci.fields["bitvector64_field"]
|
||||
|
||||
pc := newProofCollector()
|
||||
root, err := pc.merkleizeBitvector(field.sszInfo, reflect.ValueOf(container.Bitvector64Field), 1)
|
||||
require.NoError(t, err)
|
||||
|
||||
expected, err := ssz.MerkleizeByteSliceSSZ([]byte(container.Bitvector64Field))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, root)
|
||||
}
|
||||
|
||||
func TestProofCollector_Merkleize_Bitlist(t *testing.T) {
|
||||
bitlist := bitfield.NewBitlist(16)
|
||||
bitlist.SetBitAt(3, true)
|
||||
bitlist.SetBitAt(8, true)
|
||||
|
||||
container := makeVariableTestContainer(nil, bitlist)
|
||||
info, err := AnalyzeObject(container)
|
||||
require.NoError(t, err)
|
||||
|
||||
ci, err := info.ContainerInfo()
|
||||
require.NoError(t, err)
|
||||
field := ci.fields["bitlist_field"]
|
||||
|
||||
pc := newProofCollector()
|
||||
root, err := pc.merkleizeBitlist(field.sszInfo, reflect.ValueOf(container.BitlistField), 1)
|
||||
require.NoError(t, err)
|
||||
|
||||
bitlistInfo, err := field.sszInfo.BitlistInfo()
|
||||
require.NoError(t, err)
|
||||
expected, err := ssz.BitlistRoot(bitfield.Bitlist(bitlist), bitlistInfo.Limit())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, root)
|
||||
}
|
||||
|
||||
func TestProofCollector_MerkleizeVectorBody_Basic(t *testing.T) {
|
||||
container := makeFixedTestContainer()
|
||||
info, err := AnalyzeObject(container)
|
||||
require.NoError(t, err)
|
||||
|
||||
ci, err := info.ContainerInfo()
|
||||
require.NoError(t, err)
|
||||
field := ci.fields["vector_field"]
|
||||
vectorInfo, err := field.sszInfo.VectorInfo()
|
||||
require.NoError(t, err)
|
||||
length := len(container.VectorField)
|
||||
limit, err := getChunkCount(field.sszInfo)
|
||||
require.NoError(t, err)
|
||||
|
||||
pc := newProofCollector()
|
||||
root, err := pc.merkleizeVectorBody(vectorInfo.element, reflect.ValueOf(container.VectorField), length, limit, 2)
|
||||
require.NoError(t, err)
|
||||
|
||||
serialized := make([][]byte, len(container.VectorField))
|
||||
for i, v := range container.VectorField {
|
||||
buf := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(buf, v)
|
||||
serialized[i] = buf
|
||||
}
|
||||
chunks, err := ssz.PackByChunk(serialized)
|
||||
require.NoError(t, err)
|
||||
expected := ssz.MerkleizeVector(chunks, limit)
|
||||
|
||||
require.Equal(t, expected, root)
|
||||
}
|
||||
|
||||
func TestProofCollector_MerkleizeVectorAndCollect(t *testing.T) {
|
||||
pc := newProofCollector()
|
||||
pc.addTarget(6)
|
||||
|
||||
elements := [][32]byte{{1}, {2}}
|
||||
expected := ssz.MerkleizeVector(slices.Clone(elements), 2)
|
||||
root := pc.merkleizeVectorAndCollect(elements, 3, 1)
|
||||
|
||||
storedLeaf, hasLeaf := pc.leaves[6]
|
||||
storedSibling, hasSibling := pc.siblings[7]
|
||||
|
||||
require.Equal(t, true, hasLeaf)
|
||||
require.Equal(t, true, hasSibling)
|
||||
require.Equal(t, elements[0], storedLeaf)
|
||||
require.Equal(t, elements[1], storedSibling)
|
||||
|
||||
require.Equal(t, expected, root)
|
||||
}
|
||||
|
||||
func TestProofCollector_MixinLengthAndCollect(t *testing.T) {
|
||||
list := []*sszquerypb.FixedNestedContainer{
|
||||
makeFixedNestedContainer(1),
|
||||
makeFixedNestedContainer(2),
|
||||
}
|
||||
container := makeVariableTestContainer(list, bitfield.NewBitlist(1))
|
||||
info, err := AnalyzeObject(container)
|
||||
require.NoError(t, err)
|
||||
|
||||
ci, err := info.ContainerInfo()
|
||||
require.NoError(t, err)
|
||||
field := ci.fields["field_list_container"]
|
||||
|
||||
// Target gindex 2 (data root) - sibling at gindex 3 (length hash) should be collected
|
||||
pc := newProofCollector()
|
||||
pc.addTarget(2)
|
||||
root, err := pc.merkleizeList(field.sszInfo, reflect.ValueOf(list), 1)
|
||||
require.NoError(t, err)
|
||||
|
||||
listInfo, err := field.sszInfo.ListInfo()
|
||||
require.NoError(t, err)
|
||||
expected, err := ssz.MerkleizeListSSZ(list, listInfo.Limit())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, root)
|
||||
|
||||
// Verify data root is collected as leaf at gindex 2
|
||||
storedLeaf, hasLeaf := pc.leaves[2]
|
||||
require.Equal(t, true, hasLeaf)
|
||||
|
||||
// Verify length hash is collected as sibling at gindex 3
|
||||
storedSibling, hasSibling := pc.siblings[3]
|
||||
require.Equal(t, true, hasSibling)
|
||||
|
||||
// Verify the root is hash(dataRoot || lengthHash)
|
||||
expectedBuf := append(storedLeaf[:], storedSibling[:]...)
|
||||
expectedRoot := sha256.Sum256(expectedBuf)
|
||||
require.Equal(t, expectedRoot, root)
|
||||
}
|
||||
|
||||
func BenchmarkOptimizedValidatorRoots(b *testing.B) {
|
||||
validators := make([]*ethpb.Validator, 1000)
|
||||
for i := range validators {
|
||||
validators[i] = makeTestValidator(i)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for b.Loop() {
|
||||
_, err := stateutil.OptimizedValidatorRoots(validators)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkProofCollectorMerkleize(b *testing.B) {
|
||||
validators := make([]*ethpb.Validator, 1000)
|
||||
for i := range validators {
|
||||
validators[i] = makeTestValidator(i)
|
||||
}
|
||||
|
||||
info, err := AnalyzeObject(validators[0])
|
||||
require.NoError(b, err)
|
||||
|
||||
b.ResetTimer()
|
||||
for b.Loop() {
|
||||
for _, val := range validators {
|
||||
pc := newProofCollector()
|
||||
v := reflect.ValueOf(val)
|
||||
_, err := pc.merkleize(info, v, 1)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func makeTestValidator(i int) *ethpb.Validator {
|
||||
pubkey := make([]byte, 48)
|
||||
for j := range pubkey {
|
||||
pubkey[j] = byte(i + j)
|
||||
}
|
||||
|
||||
withdrawalCredentials := make([]byte, 32)
|
||||
for j := range withdrawalCredentials {
|
||||
withdrawalCredentials[j] = byte(255 - ((i + j) % 256))
|
||||
}
|
||||
|
||||
return ðpb.Validator{
|
||||
PublicKey: pubkey,
|
||||
WithdrawalCredentials: withdrawalCredentials,
|
||||
EffectiveBalance: uint64(32000000000 + i),
|
||||
Slashed: i%2 == 0,
|
||||
ActivationEligibilityEpoch: primitives.Epoch(i),
|
||||
ActivationEpoch: primitives.Epoch(i + 1),
|
||||
ExitEpoch: primitives.Epoch(i + 2),
|
||||
WithdrawableEpoch: primitives.Epoch(i + 3),
|
||||
}
|
||||
}
|
||||
|
||||
func makeFixedNestedContainer(value uint64) *sszquerypb.FixedNestedContainer {
|
||||
value2 := make([]byte, 32)
|
||||
for i := range value2 {
|
||||
value2[i] = byte(i)
|
||||
}
|
||||
return &sszquerypb.FixedNestedContainer{
|
||||
Value1: value,
|
||||
Value2: value2,
|
||||
}
|
||||
}
|
||||
|
||||
func makeFixedTestContainer() *sszquerypb.FixedTestContainer {
|
||||
fieldBytes32 := make([]byte, 32)
|
||||
for i := range fieldBytes32 {
|
||||
fieldBytes32[i] = byte(i)
|
||||
}
|
||||
|
||||
vectorField := make([]uint64, 24)
|
||||
for i := range vectorField {
|
||||
vectorField[i] = uint64(i)
|
||||
}
|
||||
|
||||
rows := make([][]byte, 5)
|
||||
for i := range rows {
|
||||
row := make([]byte, 32)
|
||||
for j := range row {
|
||||
row[j] = byte(i) + byte(j)
|
||||
}
|
||||
rows[i] = row
|
||||
}
|
||||
|
||||
bitvector64 := bitfield.NewBitvector64()
|
||||
bitvector64.SetBitAt(1, true)
|
||||
bitvector512 := bitfield.NewBitvector512()
|
||||
bitvector512.SetBitAt(10, true)
|
||||
|
||||
trailing := make([]byte, 56)
|
||||
for i := range trailing {
|
||||
trailing[i] = byte(i)
|
||||
}
|
||||
|
||||
return &sszquerypb.FixedTestContainer{
|
||||
FieldUint32: 1,
|
||||
FieldUint64: 2,
|
||||
FieldBool: true,
|
||||
FieldBytes32: fieldBytes32,
|
||||
Nested: makeFixedNestedContainer(3),
|
||||
VectorField: vectorField,
|
||||
TwoDimensionBytesField: rows,
|
||||
Bitvector64Field: bitvector64,
|
||||
Bitvector512Field: bitvector512,
|
||||
TrailingField: trailing,
|
||||
}
|
||||
}
|
||||
|
||||
func makeVariableTestContainer(list []*sszquerypb.FixedNestedContainer, bitlist bitfield.Bitlist) *sszquerypb.VariableTestContainer {
|
||||
leading := make([]byte, 32)
|
||||
for i := range leading {
|
||||
leading[i] = byte(i)
|
||||
}
|
||||
trailing := make([]byte, 56)
|
||||
for i := range trailing {
|
||||
trailing[i] = byte(255 - i)
|
||||
}
|
||||
|
||||
if bitlist == nil {
|
||||
bitlist = bitfield.NewBitlist(0)
|
||||
}
|
||||
|
||||
return &sszquerypb.VariableTestContainer{
|
||||
LeadingField: leading,
|
||||
FieldListContainer: list,
|
||||
BitlistField: bitlist,
|
||||
TrailingField: trailing,
|
||||
}
|
||||
}
|
||||
@@ -389,7 +389,6 @@ func TestHashTreeRoot(t *testing.T) {
|
||||
require.NoError(t, err, "HashTreeRoot should not return an error")
|
||||
expectedHashTreeRoot, err := tt.obj.HashTreeRoot()
|
||||
require.NoError(t, err, "HashTreeRoot on original object should not return an error")
|
||||
// Verify the Merkle tree root matches with the SSZ generated HashTreeRoot
|
||||
require.Equal(t, expectedHashTreeRoot, hashTreeRoot, "HashTreeRoot from sszInfo should match original object's HashTreeRoot")
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,72 +0,0 @@
|
||||
# Kurtosis scripts for EIP-8025
|
||||
|
||||
## How to run
|
||||
|
||||
I slightly modified [Manu's tip](https://hackmd.io/8z4thpsyQJioaU6jj0Wazw) by adding those in my `~/.zshrc`.
|
||||
|
||||
```zsh
|
||||
# Kurtosis Aliases
|
||||
blog() {
|
||||
docker logs -f "$(docker ps | grep cl-"$1"-prysm-geth | awk '{print $NF}')" 2>&1
|
||||
}
|
||||
|
||||
vlog() {
|
||||
docker logs -f "$(docker ps | grep vc-"$1"-geth-prysm | awk '{print $NF}')" 2>&1
|
||||
}
|
||||
|
||||
dora() {
|
||||
open http://localhost:$(docker ps --format '{{.Ports}} {{.Names}}' | awk '/dora/ {split($1, a, "->"); split(a[1], b, ":"); print b[2]}')
|
||||
}
|
||||
|
||||
graf() {
|
||||
open http://localhost:$(docker ps --format '{{.Ports}} {{.Names}}' | awk '/grafana/ {split($1, a, "->"); split(a[1], b, ":"); print b[2]}')
|
||||
}
|
||||
|
||||
devnet () {
|
||||
local args_file_path="./kurtosis/default.yaml"
|
||||
if [ ! -z "$1" ]; then
|
||||
args_file_path="$1"
|
||||
echo "Using custom args-file path: $args_file_path"
|
||||
else
|
||||
echo "Using default args-file path: $args_file_path"
|
||||
fi
|
||||
|
||||
kurtosis clean -a &&
|
||||
bazel build //cmd/beacon-chain:oci_image_tarball --platforms=@io_bazel_rules_go//go/toolchain:linux_arm64_cgo --config=release &&
|
||||
docker load -i bazel-bin/cmd/beacon-chain/oci_image_tarball/tarball.tar &&
|
||||
docker tag gcr.io/offchainlabs/prysm/beacon-chain prysm-bn-custom-image &&
|
||||
bazel build //cmd/validator:oci_image_tarball --platforms=@io_bazel_rules_go//go/toolchain:linux_arm64_cgo --config=release &&
|
||||
docker load -i bazel-bin/cmd/validator/oci_image_tarball/tarball.tar &&
|
||||
docker tag gcr.io/offchainlabs/prysm/validator prysm-vc-custom-image &&
|
||||
kurtosis run github.com/ethpandaops/ethereum-package --args-file="$args_file_path" --verbosity brief &&
|
||||
dora
|
||||
}
|
||||
|
||||
stop() {
|
||||
kurtosis clean -a
|
||||
}
|
||||
|
||||
dps() {
|
||||
docker ps --format "table {{.ID}}\\t{{.Image}}\\t{{.Status}}\\t{{.Names}}" -a
|
||||
}
|
||||
```
|
||||
|
||||
At the project directory, you can simply spin up a devnet with:
|
||||
|
||||
```bash
|
||||
$ devnet
|
||||
```
|
||||
|
||||
Or you can specify the network parameter YAML file like:
|
||||
|
||||
```bash
|
||||
$ devnet ./kurtosis/proof_verify.yaml
|
||||
```
|
||||
|
||||
### Running scripts with local images
|
||||
|
||||
Images from Prysm can be automatically loaded from `devnet` command, but if you want to run a script with `lighthouse`:
|
||||
|
||||
#### `./kurtosis/interop.yaml`
|
||||
|
||||
- `lighthouse:local`: Please build your own image following [Lighthouse's guide](https://lighthouse-book.sigmaprime.io/installation_docker.html?highlight=docker#building-the-docker-image) on [`kevaundray/kw/sel-alternative`](https://github.com/kevaundray/lighthouse/tree/kw/sel-alternative/) branch.
|
||||
@@ -1,16 +0,0 @@
|
||||
participants:
|
||||
- el_type: geth
|
||||
cl_type: prysm
|
||||
cl_image: prysm-bn-custom-image
|
||||
cl_extra_params:
|
||||
- --activate-zkvm
|
||||
- --zkvm-generation-proof-types=0,1
|
||||
vc_image: prysm-vc-custom-image
|
||||
count: 4
|
||||
network_params:
|
||||
seconds_per_slot: 2
|
||||
global_log_level: debug
|
||||
snooper_enabled: false
|
||||
additional_services:
|
||||
- dora
|
||||
- prometheus_grafana
|
||||
@@ -1,38 +0,0 @@
|
||||
# 3 nodes (2 from Prysm, 1 from Lighthouse) generate proofs and
|
||||
# 1 node only verifies
|
||||
participants:
|
||||
# Prysm: Proof generating nodes (nodes 1-2)
|
||||
- el_type: geth
|
||||
el_image: ethereum/client-go:latest
|
||||
cl_type: prysm
|
||||
cl_image: prysm-bn-custom-image
|
||||
cl_extra_params:
|
||||
- --activate-zkvm
|
||||
- --zkvm-generation-proof-types=0,1
|
||||
vc_image: prysm-vc-custom-image
|
||||
count: 2
|
||||
# Lighthouse: Proof generating nodes (node 3)
|
||||
- el_type: geth
|
||||
el_image: ethereum/client-go:latest
|
||||
cl_type: lighthouse
|
||||
cl_image: lighthouse:local
|
||||
cl_extra_params:
|
||||
- --activate-zkvm
|
||||
- --zkvm-generation-proof-types=0,1
|
||||
- --target-peers=3
|
||||
count: 1
|
||||
# Prysm: Proof verifying only node (node 4)
|
||||
- el_type: dummy
|
||||
cl_type: prysm
|
||||
cl_image: prysm-bn-custom-image
|
||||
cl_extra_params:
|
||||
- --activate-zkvm
|
||||
vc_image: prysm-vc-custom-image
|
||||
count: 1
|
||||
network_params:
|
||||
seconds_per_slot: 2
|
||||
global_log_level: debug
|
||||
snooper_enabled: false
|
||||
additional_services:
|
||||
- dora
|
||||
- prometheus_grafana
|
||||
@@ -1,27 +0,0 @@
|
||||
# 3 nodes generate proofs, 1 node only verifies
|
||||
participants:
|
||||
# Proof generating nodes (nodes 1-3)
|
||||
- el_type: geth
|
||||
el_image: ethereum/client-go:latest
|
||||
cl_type: prysm
|
||||
cl_image: prysm-bn-custom-image
|
||||
cl_extra_params:
|
||||
- --activate-zkvm
|
||||
- --zkvm-generation-proof-types=0,1
|
||||
vc_image: prysm-vc-custom-image
|
||||
count: 3
|
||||
# Proof verifying only node (node 4)
|
||||
- el_type: dummy
|
||||
cl_type: prysm
|
||||
cl_image: prysm-bn-custom-image
|
||||
cl_extra_params:
|
||||
- --activate-zkvm
|
||||
vc_image: prysm-vc-custom-image
|
||||
count: 1
|
||||
network_params:
|
||||
seconds_per_slot: 2
|
||||
global_log_level: debug
|
||||
snooper_enabled: false
|
||||
additional_services:
|
||||
- dora
|
||||
- prometheus_grafana
|
||||
@@ -26,21 +26,21 @@ func TestLifecycle(t *testing.T) {
|
||||
port := 1000 + rand.Intn(1000)
|
||||
prometheusService := NewService(t.Context(), fmt.Sprintf(":%d", port), nil)
|
||||
prometheusService.Start()
|
||||
// Actively wait until the service responds on /metrics (faster and less flaky than a fixed sleep)
|
||||
deadline := time.Now().Add(3 * time.Second)
|
||||
for {
|
||||
if time.Now().After(deadline) {
|
||||
t.Fatalf("metrics endpoint not ready within timeout")
|
||||
}
|
||||
resp, err := http.Get(fmt.Sprintf("http://localhost:%d/metrics", port))
|
||||
if err == nil {
|
||||
_ = resp.Body.Close()
|
||||
if resp.StatusCode == http.StatusOK {
|
||||
break
|
||||
}
|
||||
}
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
// Actively wait until the service responds on /metrics (faster and less flaky than a fixed sleep)
|
||||
deadline := time.Now().Add(3 * time.Second)
|
||||
for {
|
||||
if time.Now().After(deadline) {
|
||||
t.Fatalf("metrics endpoint not ready within timeout")
|
||||
}
|
||||
resp, err := http.Get(fmt.Sprintf("http://localhost:%d/metrics", port))
|
||||
if err == nil {
|
||||
_ = resp.Body.Close()
|
||||
if resp.StatusCode == http.StatusOK {
|
||||
break
|
||||
}
|
||||
}
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
|
||||
// Query the service to ensure it really started.
|
||||
resp, err := http.Get(fmt.Sprintf("http://localhost:%d/metrics", port))
|
||||
@@ -49,18 +49,18 @@ func TestLifecycle(t *testing.T) {
|
||||
|
||||
err = prometheusService.Stop()
|
||||
require.NoError(t, err)
|
||||
// Actively wait until the service stops responding on /metrics
|
||||
deadline = time.Now().Add(3 * time.Second)
|
||||
for {
|
||||
if time.Now().After(deadline) {
|
||||
t.Fatalf("metrics endpoint still reachable after timeout")
|
||||
}
|
||||
_, err = http.Get(fmt.Sprintf("http://localhost:%d/metrics", port))
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
// Actively wait until the service stops responding on /metrics
|
||||
deadline = time.Now().Add(3 * time.Second)
|
||||
for {
|
||||
if time.Now().After(deadline) {
|
||||
t.Fatalf("metrics endpoint still reachable after timeout")
|
||||
}
|
||||
_, err = http.Get(fmt.Sprintf("http://localhost:%d/metrics", port))
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
|
||||
// Query the service to ensure it really stopped.
|
||||
_, err = http.Get(fmt.Sprintf("http://localhost:%d/metrics", port))
|
||||
|
||||
@@ -371,11 +371,6 @@ go_library(
|
||||
"beacon_block.go",
|
||||
"cloners.go",
|
||||
"eip_7521.go",
|
||||
"execution_proof.go",
|
||||
# NOTE: ExecutionProof includes an alias type of uint8,
|
||||
# which is not supported by fastssz sszgen.
|
||||
# Temporarily managed manually.
|
||||
"execution_proof.ssz.go",
|
||||
"gloas.go",
|
||||
"log.go",
|
||||
"sync_committee_mainnet.go",
|
||||
@@ -432,7 +427,6 @@ ssz_proto_files(
|
||||
"beacon_state.proto",
|
||||
"blobs.proto",
|
||||
"data_columns.proto",
|
||||
"execution_proof.proto",
|
||||
"gloas.proto",
|
||||
"light_client.proto",
|
||||
"sync_committee.proto",
|
||||
|
||||
@@ -1,18 +0,0 @@
|
||||
package eth
|
||||
|
||||
import "github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
|
||||
// Copy --
|
||||
func (e *ExecutionProof) Copy() *ExecutionProof {
|
||||
if e == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &ExecutionProof{
|
||||
ProofId: e.ProofId,
|
||||
Slot: e.Slot,
|
||||
BlockHash: bytesutil.SafeCopyBytes(e.BlockHash),
|
||||
BlockRoot: bytesutil.SafeCopyBytes(e.BlockRoot),
|
||||
ProofData: bytesutil.SafeCopyBytes(e.ProofData),
|
||||
}
|
||||
}
|
||||
268
proto/prysm/v1alpha1/execution_proof.pb.go
generated
268
proto/prysm/v1alpha1/execution_proof.pb.go
generated
@@ -1,268 +0,0 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.36.3
|
||||
// protoc v3.21.7
|
||||
// source: proto/prysm/v1alpha1/execution_proof.proto
|
||||
|
||||
package eth
|
||||
|
||||
import (
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
|
||||
github_com_OffchainLabs_prysm_v7_consensus_types_primitives "github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
_ "github.com/OffchainLabs/prysm/v7/proto/eth/ext"
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type ExecutionProof struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
ProofId github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ExecutionProofId `protobuf:"varint,1,opt,name=proof_id,json=proofId,proto3" json:"proof_id,omitempty" cast-type:"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.ExecutionProofId"`
|
||||
Slot github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Slot `protobuf:"varint,2,opt,name=slot,proto3" json:"slot,omitempty" cast-type:"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.Slot"`
|
||||
BlockHash []byte `protobuf:"bytes,3,opt,name=block_hash,json=blockHash,proto3" json:"block_hash,omitempty" ssz-size:"32"`
|
||||
BlockRoot []byte `protobuf:"bytes,4,opt,name=block_root,json=blockRoot,proto3" json:"block_root,omitempty" ssz-size:"32"`
|
||||
ProofData []byte `protobuf:"bytes,5,opt,name=proof_data,json=proofData,proto3" json:"proof_data,omitempty" ssz-max:"1048576"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *ExecutionProof) Reset() {
|
||||
*x = ExecutionProof{}
|
||||
mi := &file_proto_prysm_v1alpha1_execution_proof_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *ExecutionProof) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*ExecutionProof) ProtoMessage() {}
|
||||
|
||||
func (x *ExecutionProof) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_proto_prysm_v1alpha1_execution_proof_proto_msgTypes[0]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use ExecutionProof.ProtoReflect.Descriptor instead.
|
||||
func (*ExecutionProof) Descriptor() ([]byte, []int) {
|
||||
return file_proto_prysm_v1alpha1_execution_proof_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *ExecutionProof) GetProofId() github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ExecutionProofId {
|
||||
if x != nil {
|
||||
return x.ProofId
|
||||
}
|
||||
return github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ExecutionProofId(0)
|
||||
}
|
||||
|
||||
func (x *ExecutionProof) GetSlot() github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Slot {
|
||||
if x != nil {
|
||||
return x.Slot
|
||||
}
|
||||
return github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Slot(0)
|
||||
}
|
||||
|
||||
func (x *ExecutionProof) GetBlockHash() []byte {
|
||||
if x != nil {
|
||||
return x.BlockHash
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *ExecutionProof) GetBlockRoot() []byte {
|
||||
if x != nil {
|
||||
return x.BlockRoot
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *ExecutionProof) GetProofData() []byte {
|
||||
if x != nil {
|
||||
return x.ProofData
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type ExecutionProofsByRootRequest struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
BlockRoot []byte `protobuf:"bytes,1,opt,name=block_root,json=blockRoot,proto3" json:"block_root,omitempty" ssz-size:"32"`
|
||||
CountNeeded uint64 `protobuf:"varint,2,opt,name=count_needed,json=countNeeded,proto3" json:"count_needed,omitempty"`
|
||||
AlreadyHave []github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ExecutionProofId `protobuf:"varint,3,rep,packed,name=already_have,json=alreadyHave,proto3" json:"already_have,omitempty" cast-type:"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.ExecutionProofId" ssz-max:"8"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *ExecutionProofsByRootRequest) Reset() {
|
||||
*x = ExecutionProofsByRootRequest{}
|
||||
mi := &file_proto_prysm_v1alpha1_execution_proof_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *ExecutionProofsByRootRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*ExecutionProofsByRootRequest) ProtoMessage() {}
|
||||
|
||||
func (x *ExecutionProofsByRootRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_proto_prysm_v1alpha1_execution_proof_proto_msgTypes[1]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use ExecutionProofsByRootRequest.ProtoReflect.Descriptor instead.
|
||||
func (*ExecutionProofsByRootRequest) Descriptor() ([]byte, []int) {
|
||||
return file_proto_prysm_v1alpha1_execution_proof_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *ExecutionProofsByRootRequest) GetBlockRoot() []byte {
|
||||
if x != nil {
|
||||
return x.BlockRoot
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *ExecutionProofsByRootRequest) GetCountNeeded() uint64 {
|
||||
if x != nil {
|
||||
return x.CountNeeded
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *ExecutionProofsByRootRequest) GetAlreadyHave() []github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ExecutionProofId {
|
||||
if x != nil {
|
||||
return x.AlreadyHave
|
||||
}
|
||||
return []github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ExecutionProofId(nil)
|
||||
}
|
||||
|
||||
var File_proto_prysm_v1alpha1_execution_proof_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_proto_prysm_v1alpha1_execution_proof_proto_rawDesc = []byte{
|
||||
0x0a, 0x2a, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x31,
|
||||
0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e,
|
||||
0x5f, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x65, 0x74,
|
||||
0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70,
|
||||
0x68, 0x61, 0x31, 0x1a, 0x1b, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x65, 0x74, 0x68, 0x2f, 0x65,
|
||||
0x78, 0x74, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x22, 0xd1, 0x02, 0x0a, 0x0e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72,
|
||||
0x6f, 0x6f, 0x66, 0x12, 0x6b, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x5f, 0x69, 0x64, 0x18,
|
||||
0x01, 0x20, 0x01, 0x28, 0x04, 0x42, 0x50, 0x82, 0xb5, 0x18, 0x4c, 0x67, 0x69, 0x74, 0x68, 0x75,
|
||||
0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61,
|
||||
0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x37, 0x2f, 0x63, 0x6f, 0x6e, 0x73,
|
||||
0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x72, 0x69, 0x6d,
|
||||
0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e,
|
||||
0x50, 0x72, 0x6f, 0x6f, 0x66, 0x49, 0x64, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x49, 0x64,
|
||||
0x12, 0x58, 0x0a, 0x04, 0x73, 0x6c, 0x6f, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x42, 0x44,
|
||||
0x82, 0xb5, 0x18, 0x40, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f,
|
||||
0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73,
|
||||
0x6d, 0x2f, 0x76, 0x37, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x74,
|
||||
0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2e,
|
||||
0x53, 0x6c, 0x6f, 0x74, 0x52, 0x04, 0x73, 0x6c, 0x6f, 0x74, 0x12, 0x25, 0x0a, 0x0a, 0x62, 0x6c,
|
||||
0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06,
|
||||
0x8a, 0xb5, 0x18, 0x02, 0x33, 0x32, 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73,
|
||||
0x68, 0x12, 0x25, 0x0a, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18,
|
||||
0x04, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x33, 0x32, 0x52, 0x09, 0x62,
|
||||
0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x2a, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x6f,
|
||||
0x66, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x0b, 0x92, 0xb5,
|
||||
0x18, 0x07, 0x31, 0x30, 0x34, 0x38, 0x35, 0x37, 0x36, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x6f, 0x66,
|
||||
0x44, 0x61, 0x74, 0x61, 0x22, 0xe2, 0x01, 0x0a, 0x1c, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69,
|
||||
0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x73, 0x42, 0x79, 0x52, 0x6f, 0x6f, 0x74, 0x52, 0x65,
|
||||
0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x72,
|
||||
0x6f, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x33,
|
||||
0x32, 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x21, 0x0a, 0x0c,
|
||||
0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x6e, 0x65, 0x65, 0x64, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01,
|
||||
0x28, 0x04, 0x52, 0x0b, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x4e, 0x65, 0x65, 0x64, 0x65, 0x64, 0x12,
|
||||
0x78, 0x0a, 0x0c, 0x61, 0x6c, 0x72, 0x65, 0x61, 0x64, 0x79, 0x5f, 0x68, 0x61, 0x76, 0x65, 0x18,
|
||||
0x03, 0x20, 0x03, 0x28, 0x04, 0x42, 0x55, 0x82, 0xb5, 0x18, 0x4c, 0x67, 0x69, 0x74, 0x68, 0x75,
|
||||
0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61,
|
||||
0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x37, 0x2f, 0x63, 0x6f, 0x6e, 0x73,
|
||||
0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x72, 0x69, 0x6d,
|
||||
0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e,
|
||||
0x50, 0x72, 0x6f, 0x6f, 0x66, 0x49, 0x64, 0x92, 0xb5, 0x18, 0x01, 0x38, 0x52, 0x0b, 0x61, 0x6c,
|
||||
0x72, 0x65, 0x61, 0x64, 0x79, 0x48, 0x61, 0x76, 0x65, 0x42, 0x9d, 0x01, 0x0a, 0x19, 0x6f, 0x72,
|
||||
0x67, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76,
|
||||
0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x13, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69,
|
||||
0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x39,
|
||||
0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68,
|
||||
0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x37,
|
||||
0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x31, 0x61,
|
||||
0x6c, 0x70, 0x68, 0x61, 0x31, 0x3b, 0x65, 0x74, 0x68, 0xaa, 0x02, 0x15, 0x45, 0x74, 0x68, 0x65,
|
||||
0x72, 0x65, 0x75, 0x6d, 0x2e, 0x45, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61,
|
||||
0x31, 0xca, 0x02, 0x15, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x5c, 0x45, 0x74, 0x68,
|
||||
0x5c, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_proto_prysm_v1alpha1_execution_proof_proto_rawDescOnce sync.Once
|
||||
file_proto_prysm_v1alpha1_execution_proof_proto_rawDescData = file_proto_prysm_v1alpha1_execution_proof_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_proto_prysm_v1alpha1_execution_proof_proto_rawDescGZIP() []byte {
|
||||
file_proto_prysm_v1alpha1_execution_proof_proto_rawDescOnce.Do(func() {
|
||||
file_proto_prysm_v1alpha1_execution_proof_proto_rawDescData = protoimpl.X.CompressGZIP(file_proto_prysm_v1alpha1_execution_proof_proto_rawDescData)
|
||||
})
|
||||
return file_proto_prysm_v1alpha1_execution_proof_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_proto_prysm_v1alpha1_execution_proof_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
|
||||
var file_proto_prysm_v1alpha1_execution_proof_proto_goTypes = []any{
|
||||
(*ExecutionProof)(nil), // 0: ethereum.eth.v1alpha1.ExecutionProof
|
||||
(*ExecutionProofsByRootRequest)(nil), // 1: ethereum.eth.v1alpha1.ExecutionProofsByRootRequest
|
||||
}
|
||||
var file_proto_prysm_v1alpha1_execution_proof_proto_depIdxs = []int32{
|
||||
0, // [0:0] is the sub-list for method output_type
|
||||
0, // [0:0] is the sub-list for method input_type
|
||||
0, // [0:0] is the sub-list for extension type_name
|
||||
0, // [0:0] is the sub-list for extension extendee
|
||||
0, // [0:0] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_proto_prysm_v1alpha1_execution_proof_proto_init() }
|
||||
func file_proto_prysm_v1alpha1_execution_proof_proto_init() {
|
||||
if File_proto_prysm_v1alpha1_execution_proof_proto != nil {
|
||||
return
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_proto_prysm_v1alpha1_execution_proof_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 2,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_proto_prysm_v1alpha1_execution_proof_proto_goTypes,
|
||||
DependencyIndexes: file_proto_prysm_v1alpha1_execution_proof_proto_depIdxs,
|
||||
MessageInfos: file_proto_prysm_v1alpha1_execution_proof_proto_msgTypes,
|
||||
}.Build()
|
||||
File_proto_prysm_v1alpha1_execution_proof_proto = out.File
|
||||
file_proto_prysm_v1alpha1_execution_proof_proto_rawDesc = nil
|
||||
file_proto_prysm_v1alpha1_execution_proof_proto_goTypes = nil
|
||||
file_proto_prysm_v1alpha1_execution_proof_proto_depIdxs = nil
|
||||
}
|
||||
@@ -1,52 +0,0 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package ethereum.eth.v1alpha1;
|
||||
|
||||
import "proto/eth/ext/options.proto";
|
||||
|
||||
option csharp_namespace = "Ethereum.Eth.v1alpha1";
|
||||
option go_package = "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1;eth";
|
||||
option java_multiple_files = true;
|
||||
option java_outer_classname = "ExecutionProofProto";
|
||||
option java_package = "org.ethereum.eth.v1alpha1";
|
||||
option php_namespace = "Ethereum\\Eth\\v1alpha1";
|
||||
|
||||
message ExecutionProof {
|
||||
// Which proof type (zkVM+EL combination) this proof belongs to
|
||||
// Examples: 0=SP1+Reth, 1=Risc0+Geth, 2=SP1+Geth, etc.
|
||||
uint64 proof_id = 1 [
|
||||
(ethereum.eth.ext.cast_type) =
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.ExecutionProofId"
|
||||
];
|
||||
|
||||
// The slot of the beacon block this proof validates
|
||||
uint64 slot = 2 [
|
||||
(ethereum.eth.ext.cast_type) =
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.Slot"
|
||||
];
|
||||
|
||||
// The block hash of the execution payload this proof validates
|
||||
bytes block_hash = 3 [ (ethereum.eth.ext.ssz_size) = "32" ];
|
||||
|
||||
// The beacon block root corresponding to the beacon block
|
||||
// with the execution payload, that this proof attests to.
|
||||
bytes block_root = 4 [ (ethereum.eth.ext.ssz_size) = "32" ];
|
||||
|
||||
// The actual proof data
|
||||
bytes proof_data = 5 [ (ethereum.eth.ext.ssz_max) = "1048576" ];
|
||||
}
|
||||
|
||||
message ExecutionProofsByRootRequest {
|
||||
// The block root we need proofs for
|
||||
bytes block_root = 1 [ (ethereum.eth.ext.ssz_size) = "32" ];
|
||||
|
||||
// The number of proofs needed
|
||||
uint64 count_needed = 2;
|
||||
|
||||
// We already have these proof IDs, so don't send them again
|
||||
repeated uint64 already_have = 3 [
|
||||
(ethereum.eth.ext.ssz_max) = "8",
|
||||
(ethereum.eth.ext.cast_type) =
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.ExecutionProofId"
|
||||
];
|
||||
}
|
||||
@@ -1,300 +0,0 @@
|
||||
// NOTE: This file is auto-generated by sszgen, but modified manually
|
||||
// to handle the alias type ExecutionProofId which is based on uint8.
|
||||
package eth
|
||||
|
||||
import (
|
||||
github_com_OffchainLabs_prysm_v7_consensus_types_primitives "github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
)
|
||||
|
||||
// MarshalSSZ ssz marshals the ExecutionProof object
|
||||
func (e *ExecutionProof) MarshalSSZ() ([]byte, error) {
|
||||
return ssz.MarshalSSZ(e)
|
||||
}
|
||||
|
||||
// MarshalSSZTo ssz marshals the ExecutionProof object to a target array
|
||||
func (e *ExecutionProof) MarshalSSZTo(buf []byte) (dst []byte, err error) {
|
||||
dst = buf
|
||||
offset := int(77)
|
||||
|
||||
// Field (0) 'ProofId'
|
||||
dst = ssz.MarshalUint8(dst, uint8(e.ProofId))
|
||||
|
||||
// Field (1) 'Slot'
|
||||
dst = ssz.MarshalUint64(dst, uint64(e.Slot))
|
||||
|
||||
// Field (2) 'BlockHash'
|
||||
if size := len(e.BlockHash); size != 32 {
|
||||
err = ssz.ErrBytesLengthFn("--.BlockHash", size, 32)
|
||||
return
|
||||
}
|
||||
dst = append(dst, e.BlockHash...)
|
||||
|
||||
// Field (3) 'BlockRoot'
|
||||
if size := len(e.BlockRoot); size != 32 {
|
||||
err = ssz.ErrBytesLengthFn("--.BlockRoot", size, 32)
|
||||
return
|
||||
}
|
||||
dst = append(dst, e.BlockRoot...)
|
||||
|
||||
// Offset (4) 'ProofData'
|
||||
dst = ssz.WriteOffset(dst, offset)
|
||||
offset += len(e.ProofData)
|
||||
|
||||
// Field (4) 'ProofData'
|
||||
if size := len(e.ProofData); size > 1048576 {
|
||||
err = ssz.ErrBytesLengthFn("--.ProofData", size, 1048576)
|
||||
return
|
||||
}
|
||||
dst = append(dst, e.ProofData...)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalSSZ ssz unmarshals the ExecutionProof object
|
||||
func (e *ExecutionProof) UnmarshalSSZ(buf []byte) error {
|
||||
var err error
|
||||
size := uint64(len(buf))
|
||||
if size < 77 {
|
||||
return ssz.ErrSize
|
||||
}
|
||||
|
||||
tail := buf
|
||||
var o4 uint64
|
||||
|
||||
// Field (0) 'ProofId'
|
||||
e.ProofId = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ExecutionProofId(ssz.UnmarshallUint8(buf[0:1]))
|
||||
|
||||
// Field (1) 'Slot'
|
||||
e.Slot = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Slot(ssz.UnmarshallUint64(buf[1:9]))
|
||||
|
||||
// Field (2) 'BlockHash'
|
||||
if cap(e.BlockHash) == 0 {
|
||||
e.BlockHash = make([]byte, 0, len(buf[9:41]))
|
||||
}
|
||||
e.BlockHash = append(e.BlockHash, buf[9:41]...)
|
||||
|
||||
// Field (3) 'BlockRoot'
|
||||
if cap(e.BlockRoot) == 0 {
|
||||
e.BlockRoot = make([]byte, 0, len(buf[41:73]))
|
||||
}
|
||||
e.BlockRoot = append(e.BlockRoot, buf[41:73]...)
|
||||
|
||||
// Offset (4) 'ProofData'
|
||||
if o4 = ssz.ReadOffset(buf[73:77]); o4 > size {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
if o4 != 77 {
|
||||
return ssz.ErrInvalidVariableOffset
|
||||
}
|
||||
|
||||
// Field (4) 'ProofData'
|
||||
{
|
||||
buf = tail[o4:]
|
||||
if len(buf) > 1048576 {
|
||||
return ssz.ErrBytesLength
|
||||
}
|
||||
if cap(e.ProofData) == 0 {
|
||||
e.ProofData = make([]byte, 0, len(buf))
|
||||
}
|
||||
e.ProofData = append(e.ProofData, buf...)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// SizeSSZ returns the ssz encoded size in bytes for the ExecutionProof object
|
||||
func (e *ExecutionProof) SizeSSZ() (size int) {
|
||||
size = 77
|
||||
|
||||
// Field (4) 'ProofData'
|
||||
size += len(e.ProofData)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// HashTreeRoot ssz hashes the ExecutionProof object
|
||||
func (e *ExecutionProof) HashTreeRoot() ([32]byte, error) {
|
||||
return ssz.HashWithDefaultHasher(e)
|
||||
}
|
||||
|
||||
// HashTreeRootWith ssz hashes the ExecutionProof object with a hasher
|
||||
func (e *ExecutionProof) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
indx := hh.Index()
|
||||
|
||||
// Field (0) 'ProofId'
|
||||
hh.PutUint8(uint8(e.ProofId))
|
||||
|
||||
// Field (1) 'Slot'
|
||||
hh.PutUint64(uint64(e.Slot))
|
||||
|
||||
// Field (2) 'BlockHash'
|
||||
if size := len(e.BlockHash); size != 32 {
|
||||
err = ssz.ErrBytesLengthFn("--.BlockHash", size, 32)
|
||||
return
|
||||
}
|
||||
hh.PutBytes(e.BlockHash)
|
||||
|
||||
// Field (3) 'BlockRoot'
|
||||
if size := len(e.BlockRoot); size != 32 {
|
||||
err = ssz.ErrBytesLengthFn("--.BlockRoot", size, 32)
|
||||
return
|
||||
}
|
||||
hh.PutBytes(e.BlockRoot)
|
||||
|
||||
// Field (4) 'ProofData'
|
||||
{
|
||||
elemIndx := hh.Index()
|
||||
byteLen := uint64(len(e.ProofData))
|
||||
if byteLen > 1048576 {
|
||||
err = ssz.ErrIncorrectListSize
|
||||
return
|
||||
}
|
||||
hh.PutBytes(e.ProofData)
|
||||
hh.MerkleizeWithMixin(elemIndx, byteLen, (1048576+31)/32)
|
||||
}
|
||||
|
||||
hh.Merkleize(indx)
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalSSZ ssz marshals the ExecutionProofsByRootRequest object
|
||||
func (e *ExecutionProofsByRootRequest) MarshalSSZ() ([]byte, error) {
|
||||
return ssz.MarshalSSZ(e)
|
||||
}
|
||||
|
||||
// MarshalSSZTo ssz marshals the ExecutionProofsByRootRequest object to a target array
|
||||
func (e *ExecutionProofsByRootRequest) MarshalSSZTo(buf []byte) (dst []byte, err error) {
|
||||
dst = buf
|
||||
offset := int(44)
|
||||
|
||||
// Field (0) 'BlockRoot'
|
||||
if size := len(e.BlockRoot); size != 32 {
|
||||
err = ssz.ErrBytesLengthFn("--.BlockRoot", size, 32)
|
||||
return
|
||||
}
|
||||
dst = append(dst, e.BlockRoot...)
|
||||
|
||||
// Field (1) 'CountNeeded'
|
||||
dst = ssz.MarshalUint64(dst, e.CountNeeded)
|
||||
|
||||
// Offset (2) 'AlreadyHave'
|
||||
dst = ssz.WriteOffset(dst, offset)
|
||||
offset += len(e.AlreadyHave) * 1
|
||||
|
||||
// Field (2) 'AlreadyHave'
|
||||
if size := len(e.AlreadyHave); size > 8 {
|
||||
err = ssz.ErrListTooBigFn("--.AlreadyHave", size, 8)
|
||||
return
|
||||
}
|
||||
for ii := 0; ii < len(e.AlreadyHave); ii++ {
|
||||
dst = ssz.MarshalUint8(dst, uint8(e.AlreadyHave[ii]))
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalSSZ ssz unmarshals the ExecutionProofsByRootRequest object
|
||||
func (e *ExecutionProofsByRootRequest) UnmarshalSSZ(buf []byte) error {
|
||||
var err error
|
||||
size := uint64(len(buf))
|
||||
if size < 44 {
|
||||
return ssz.ErrSize
|
||||
}
|
||||
|
||||
tail := buf
|
||||
var o2 uint64
|
||||
|
||||
// Field (0) 'BlockRoot'
|
||||
if cap(e.BlockRoot) == 0 {
|
||||
e.BlockRoot = make([]byte, 0, len(buf[0:32]))
|
||||
}
|
||||
e.BlockRoot = append(e.BlockRoot, buf[0:32]...)
|
||||
|
||||
// Field (1) 'CountNeeded'
|
||||
e.CountNeeded = ssz.UnmarshallUint64(buf[32:40])
|
||||
|
||||
// Offset (2) 'AlreadyHave'
|
||||
if o2 = ssz.ReadOffset(buf[40:44]); o2 > size {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
if o2 != 44 {
|
||||
return ssz.ErrInvalidVariableOffset
|
||||
}
|
||||
|
||||
// Field (2) 'AlreadyHave'
|
||||
{
|
||||
buf = tail[o2:]
|
||||
num, err := ssz.DivideInt2(len(buf), 1, 8)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// `primitives.ExecutionProofId` is an alias of `uint8`,
|
||||
// but we need to handle the conversion manually here
|
||||
// to call `ssz.ExtendUint8`.
|
||||
alreadyHave := make([]uint8, len(e.AlreadyHave))
|
||||
for i, v := range e.AlreadyHave {
|
||||
alreadyHave[i] = uint8(v)
|
||||
}
|
||||
alreadyHave = ssz.ExtendUint8(alreadyHave, num)
|
||||
alreadyHave2 := make([]github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ExecutionProofId, len(alreadyHave))
|
||||
for i, v := range alreadyHave {
|
||||
alreadyHave2[i] = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ExecutionProofId(v)
|
||||
}
|
||||
e.AlreadyHave = alreadyHave2
|
||||
for ii := range num {
|
||||
e.AlreadyHave[ii] = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ExecutionProofId(ssz.UnmarshallUint8(buf[ii*1 : (ii+1)*1]))
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// SizeSSZ returns the ssz encoded size in bytes for the ExecutionProofsByRootRequest object
|
||||
func (e *ExecutionProofsByRootRequest) SizeSSZ() (size int) {
|
||||
size = 44
|
||||
|
||||
// Field (2) 'AlreadyHave'
|
||||
size += len(e.AlreadyHave) * 1
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// HashTreeRoot ssz hashes the ExecutionProofsByRootRequest object
|
||||
func (e *ExecutionProofsByRootRequest) HashTreeRoot() ([32]byte, error) {
|
||||
return ssz.HashWithDefaultHasher(e)
|
||||
}
|
||||
|
||||
// HashTreeRootWith ssz hashes the ExecutionProofsByRootRequest object with a hasher
|
||||
func (e *ExecutionProofsByRootRequest) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
indx := hh.Index()
|
||||
|
||||
// Field (0) 'BlockRoot'
|
||||
if size := len(e.BlockRoot); size != 32 {
|
||||
err = ssz.ErrBytesLengthFn("--.BlockRoot", size, 32)
|
||||
return
|
||||
}
|
||||
hh.PutBytes(e.BlockRoot)
|
||||
|
||||
// Field (1) 'CountNeeded'
|
||||
hh.PutUint64(e.CountNeeded)
|
||||
|
||||
// Field (2) 'AlreadyHave'
|
||||
{
|
||||
if size := len(e.AlreadyHave); size > 8 {
|
||||
err = ssz.ErrListTooBigFn("--.AlreadyHave", size, 8)
|
||||
return
|
||||
}
|
||||
subIndx := hh.Index()
|
||||
for _, i := range e.AlreadyHave {
|
||||
hh.AppendUint8(uint8(i))
|
||||
}
|
||||
hh.FillUpTo32()
|
||||
|
||||
numItems := uint64(len(e.AlreadyHave))
|
||||
hh.MerkleizeWithMixin(subIndx, numItems, ssz.CalculateLimit(8, numItems, 1))
|
||||
}
|
||||
|
||||
hh.Merkleize(indx)
|
||||
return
|
||||
}
|
||||
@@ -47,6 +47,7 @@ go_library(
|
||||
"@in_gopkg_yaml_v2//:go_default_library",
|
||||
"@io_bazel_rules_go//go/tools/bazel:go_default_library",
|
||||
"@org_golang_x_sync//errgroup:go_default_library",
|
||||
"@org_golang_x_sys//unix:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
cmdshared "github.com/OffchainLabs/prysm/v7/cmd"
|
||||
@@ -35,11 +36,12 @@ var _ e2etypes.BeaconNodeSet = (*BeaconNodeSet)(nil)
|
||||
// BeaconNodeSet represents set of beacon nodes.
|
||||
type BeaconNodeSet struct {
|
||||
e2etypes.ComponentRunner
|
||||
config *e2etypes.E2EConfig
|
||||
nodes []e2etypes.ComponentRunner
|
||||
enr string
|
||||
ids []string
|
||||
started chan struct{}
|
||||
config *e2etypes.E2EConfig
|
||||
nodes []e2etypes.ComponentRunner
|
||||
enr string
|
||||
ids []string
|
||||
multiAddrs []string
|
||||
started chan struct{}
|
||||
}
|
||||
|
||||
// SetENR assigns ENR to the set of beacon nodes.
|
||||
@@ -74,8 +76,10 @@ func (s *BeaconNodeSet) Start(ctx context.Context) error {
|
||||
if s.config.UseFixedPeerIDs {
|
||||
for i := range nodes {
|
||||
s.ids = append(s.ids, nodes[i].(*BeaconNode).peerID)
|
||||
s.multiAddrs = append(s.multiAddrs, nodes[i].(*BeaconNode).multiAddr)
|
||||
}
|
||||
s.config.PeerIDs = s.ids
|
||||
s.config.PeerMultiAddrs = s.multiAddrs
|
||||
}
|
||||
// All nodes started, close channel, so that all services waiting on a set, can proceed.
|
||||
close(s.started)
|
||||
@@ -141,6 +145,14 @@ func (s *BeaconNodeSet) StopAtIndex(i int) error {
|
||||
return s.nodes[i].Stop()
|
||||
}
|
||||
|
||||
// RestartAtIndex restarts the component at the desired index.
|
||||
func (s *BeaconNodeSet) RestartAtIndex(ctx context.Context, i int) error {
|
||||
if i >= len(s.nodes) {
|
||||
return errors.Errorf("provided index exceeds slice size: %d >= %d", i, len(s.nodes))
|
||||
}
|
||||
return s.nodes[i].(*BeaconNode).Restart(ctx)
|
||||
}
|
||||
|
||||
// ComponentAtIndex returns the component at the provided index.
|
||||
func (s *BeaconNodeSet) ComponentAtIndex(i int) (e2etypes.ComponentRunner, error) {
|
||||
if i >= len(s.nodes) {
|
||||
@@ -152,12 +164,14 @@ func (s *BeaconNodeSet) ComponentAtIndex(i int) (e2etypes.ComponentRunner, error
|
||||
// BeaconNode represents beacon node.
|
||||
type BeaconNode struct {
|
||||
e2etypes.ComponentRunner
|
||||
config *e2etypes.E2EConfig
|
||||
started chan struct{}
|
||||
index int
|
||||
enr string
|
||||
peerID string
|
||||
cmd *exec.Cmd
|
||||
config *e2etypes.E2EConfig
|
||||
started chan struct{}
|
||||
index int
|
||||
enr string
|
||||
peerID string
|
||||
multiAddr string
|
||||
cmd *exec.Cmd
|
||||
args []string
|
||||
}
|
||||
|
||||
// NewBeaconNode creates and returns a beacon node.
|
||||
@@ -290,6 +304,7 @@ func (node *BeaconNode) Start(ctx context.Context) error {
|
||||
args = append(args, fmt.Sprintf("--%s=%s:%d", flags.MevRelayEndpoint.Name, "http://127.0.0.1", e2e.TestParams.Ports.Eth1ProxyPort+index))
|
||||
}
|
||||
args = append(args, config.BeaconFlags...)
|
||||
node.args = args
|
||||
|
||||
cmd := exec.CommandContext(ctx, binaryPath, args...) // #nosec G204 -- Safe
|
||||
// Write stderr to log files.
|
||||
@@ -318,6 +333,18 @@ func (node *BeaconNode) Start(ctx context.Context) error {
|
||||
return fmt.Errorf("could not find peer id: %w", err)
|
||||
}
|
||||
node.peerID = peerId
|
||||
|
||||
// Extract QUIC multiaddr for Lighthouse to connect to this node.
|
||||
// Prysm logs: msg="Node started p2p server" multiAddr="/ip4/192.168.0.14/udp/4250/quic-v1/p2p/16Uiu2..."
|
||||
// We prefer QUIC over TCP as it's more reliable in E2E tests.
|
||||
multiAddr, err := helpers.FindFollowingTextInFile(stdOutFile, "multiAddr=\"/ip4/192.168.0.14/udp/")
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not find QUIC multiaddr: %w", err)
|
||||
}
|
||||
// The extracted text will be like: 4250/quic-v1/p2p/16Uiu2..."
|
||||
// We need to prepend "/ip4/192.168.0.14/udp/" and strip the trailing quote
|
||||
multiAddr = strings.TrimSuffix(multiAddr, "\"")
|
||||
node.multiAddr = "/ip4/192.168.0.14/udp/" + multiAddr
|
||||
}
|
||||
|
||||
// Mark node as ready.
|
||||
@@ -347,6 +374,96 @@ func (node *BeaconNode) Stop() error {
|
||||
return node.cmd.Process.Kill()
|
||||
}
|
||||
|
||||
// Restart gracefully stops the beacon node and starts a new process.
|
||||
// This is useful for testing resilience as it allows the P2P layer to reinitialize
|
||||
// and discover peers again (unlike SIGSTOP/SIGCONT which breaks QUIC connections permanently).
|
||||
func (node *BeaconNode) Restart(ctx context.Context) error {
|
||||
binaryPath, found := bazel.FindBinary("cmd/beacon-chain", "beacon-chain")
|
||||
if !found {
|
||||
return errors.New("beacon chain binary not found")
|
||||
}
|
||||
|
||||
// First, continue the process if it's stopped (from PauseAtIndex).
|
||||
// A stopped process (SIGSTOP) cannot receive SIGTERM until continued.
|
||||
_ = node.cmd.Process.Signal(syscall.SIGCONT)
|
||||
|
||||
if err := node.cmd.Process.Signal(syscall.SIGTERM); err != nil {
|
||||
return fmt.Errorf("failed to send SIGTERM: %w", err)
|
||||
}
|
||||
|
||||
// Wait for process to exit by polling. We can't call cmd.Wait() here because
|
||||
// the Start() method's goroutine is already waiting on the command, and calling
|
||||
// Wait() twice on the same process causes "waitid: no child processes" error.
|
||||
// Instead, poll using Signal(0) which returns an error when the process no longer exists.
|
||||
processExited := false
|
||||
for range 100 {
|
||||
if err := node.cmd.Process.Signal(syscall.Signal(0)); err != nil {
|
||||
processExited = true
|
||||
break
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
if !processExited {
|
||||
log.Warnf("Beacon node %d did not exit within 10 seconds after SIGTERM, proceeding with restart anyway", node.index)
|
||||
}
|
||||
|
||||
restartArgs := make([]string, 0, len(node.args))
|
||||
for _, arg := range node.args {
|
||||
if !strings.Contains(arg, cmdshared.ForceClearDB.Name) {
|
||||
restartArgs = append(restartArgs, arg)
|
||||
}
|
||||
}
|
||||
|
||||
stdOutFile, err := os.OpenFile(
|
||||
path.Join(e2e.TestParams.LogPath, fmt.Sprintf(e2e.BeaconNodeLogFileName, node.index)),
|
||||
os.O_APPEND|os.O_WRONLY,
|
||||
0644,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open log file: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := stdOutFile.Close(); err != nil {
|
||||
log.WithError(err).Error("Failed to close stdout file")
|
||||
}
|
||||
}()
|
||||
|
||||
cmd := exec.CommandContext(ctx, binaryPath, restartArgs...)
|
||||
stderr, err := os.OpenFile(
|
||||
path.Join(e2e.TestParams.LogPath, fmt.Sprintf("beacon_node_%d_stderr.log", node.index)),
|
||||
os.O_APPEND|os.O_WRONLY|os.O_CREATE,
|
||||
0644,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open stderr file: %w", err)
|
||||
}
|
||||
cmd.Stderr = stderr
|
||||
|
||||
log.Infof("Restarting beacon chain %d with flags: %s", node.index, strings.Join(restartArgs, " "))
|
||||
if err = cmd.Start(); err != nil {
|
||||
if closeErr := stderr.Close(); closeErr != nil {
|
||||
log.WithError(closeErr).Error("Failed to close stderr file")
|
||||
}
|
||||
return fmt.Errorf("failed to restart beacon node: %w", err)
|
||||
}
|
||||
// Close the parent's file handle after Start(). The child process has its own
|
||||
// copy of the file descriptor via fork/exec, so this won't affect its ability to write.
|
||||
if err := stderr.Close(); err != nil {
|
||||
log.WithError(err).Error("Failed to close stderr file")
|
||||
}
|
||||
|
||||
if err = helpers.WaitForTextInFile(stdOutFile, "Beacon chain gRPC server listening"); err != nil {
|
||||
return fmt.Errorf("beacon node %d failed to restart properly: %w", node.index, err)
|
||||
}
|
||||
|
||||
node.cmd = cmd
|
||||
go func() {
|
||||
_ = cmd.Wait()
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (node *BeaconNode) UnderlyingProcess() *os.Process {
|
||||
return node.cmd.Process
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user