Compare commits

..

69 Commits

Author SHA1 Message Date
Manu NALEPA
483baeedb3 Add proof endpoint 2026-02-12 17:10:23 +01:00
Manu NALEPA
6ad9c8dd32 Merge branch 'develop' into poc/optional-proofs 2026-01-29 14:55:10 +01:00
Manu NALEPA
6d9ef4f1e7 Add execution proof verification and related structures 2026-01-29 13:47:31 +01:00
Manu NALEPA
1899974ade Implement proof storage. 2026-01-28 15:11:56 +01:00
Manu NALEPA
ffdc4e67b8 Generate proofs when proposing a block (if the node is a proover) 2026-01-23 12:11:23 +01:00
Manu NALEPA
79e93d3faa Set msg.ValidatorData back in the validation function. 2026-01-23 09:17:52 +01:00
Manu NALEPA
66f63aee9c [WIP] simplify 2026-01-22 14:20:13 +01:00
Jun Song
698b6922f0 Use dummy for el_type when we want to launch zk attester node (#11) 2026-01-05 19:32:53 +09:00
Jun Song
ca228fca44 Merge branch 'develop' into poc/optional-proofs 2026-01-05 15:43:22 +09:00
Jun Song
4d6663b4de Implement exec proof service & pruning logics (#10)
* Initialize exec proof service

* Fix wrong condition for starting exec proof pool service
2025-12-26 18:35:08 +09:00
Jun Song
e713560a68 Add interop.yaml script with guide (#8) 2025-12-26 16:02:18 +09:00
Jun Song
4571e50609 Implement RPC for execution proofs & Fix broken unit tests (#9)
* Add ExecutionProofsByRootRequest struct with SSZ support

* Add skeleton for requesting execution proofs

* Check proof retention before sending the request

* Implement sendAndSaveExecutionProofs with skeleton SendExecutionProofsByRootRequest

* Nuke deprecated request alias

* Implement handler and sender without peer selection logic

* Add peer selection logic with zkvm entry key

* Fix broken tests

* Add TestZkvmEnabledPeers

* Fix stream read code for execution proof & Add unit test for handler

* Add sender test
2025-12-26 16:02:08 +09:00
Jun Song
175738919e Check whether proof generation is needed (#7)
* Check proof retention

* Check whether we already have requested execution proof or not
2025-12-24 15:57:53 +09:00
Jun Song
f1cbdc9fa6 Verify execution proofs received from gossip (#6) 2025-12-24 15:31:40 +09:00
Jun Song
156383c9c8 Merge branch 'develop' into poc/optional-proofs 2025-12-24 14:52:53 +09:00
Developer Uche
5ede7c8fe0 Merge pull request #5 from syjn99/fix/proof-gen-devnet
Skip DA check when node is able to generate proofs & Add some kurtosis scripts
2025-12-18 21:15:11 +01:00
Jun Song
3324c7b655 Add proof_verify devnet script 2025-12-16 01:01:48 +09:00
Jun Song
d477bcfa20 Add useful logs 2025-12-16 01:01:32 +09:00
Jun Song
38183471da Add default kurtosis script for proof gen devnet 2025-12-16 00:44:34 +09:00
Jun Song
3c3e2b42e9 Skip waiting for proof if it's proof generator node 2025-12-16 00:44:21 +09:00
Developer Uche
d496f7bfab Merge pull request #4 from syjn99/fix/zkvm-enr
Set zkVM ENR entry correctly if mode is enabled
2025-12-15 11:10:15 +01:00
Developer Uche
55e2663f82 Merge pull request #3 from syjn99/fix/optional-p2p
Add missing pieces regarding Gossip
2025-12-15 11:09:42 +01:00
Jun Song
5f0afd09c6 Add DA failure case 2025-12-10 17:37:04 +09:00
Jun Song
95fff68b11 Add waiting case for DA 2025-12-10 17:33:28 +09:00
Jun Song
d0bc0fcda8 Add happy case for execution proofs DA 2025-12-10 17:26:57 +09:00
Jun Song
8b2acd5f47 Add validate_execution_proof_test.go 2025-12-10 17:04:05 +09:00
Jun Song
fb071ebe20 Add execution proofs pool tests 2025-12-10 16:25:22 +09:00
Jun Song
a174d0cd53 Set zkVM entry correctly if mode is enabled 2025-12-10 16:16:16 +09:00
Jun Song
06655dcd1f Resolve build issues 2025-12-10 13:07:58 +09:00
Jun Song
c1dcf97c0c Fix mock exec proof pool 2025-12-10 12:55:32 +09:00
Jun Song
f596223096 Add blocking logic for DA in EIP-8025 2025-12-10 12:53:06 +09:00
Jun Song
a184afdfb4 Implement execution proof pool 2025-12-10 12:10:20 +09:00
Jun Song
056843bcae Register execution proof pool for sync/blockchain services 2025-12-10 12:00:32 +09:00
Jun Song
a587a9dd6e Add skeletons for pool and verifier logics 2025-12-10 11:53:08 +09:00
Jun Song
dde9dc3dd9 Mark proof as seen 2025-12-10 11:48:29 +09:00
Jun Song
960d666801 Add proof size validation 2025-12-10 11:44:22 +09:00
Jun Song
1468c20c54 Add basic validation logics for execution proof gossip 2025-12-09 23:19:56 +09:00
Jun Song
68d8988121 Use alias of BeaconBlockByRootsReq for ExecutionProofsByRoot 2025-12-09 22:37:37 +09:00
Jun Song
9ca5bf0119 Build issue with Bazel 2025-12-09 22:36:48 +09:00
Jun Song
bf8f494792 Use different gossip param weight 2025-12-09 12:57:21 +09:00
Jun Song
cab25267b5 Fix gossip subscriber match with BLSToExecutionChange 2025-12-09 12:49:33 +09:00
Jun Song
b9c23dae89 Run gazelle 2025-12-09 12:44:56 +09:00
developeruche
7944731ccf done with p2p sub-task save; SendExecutionProofByRootRequest, executionProofsByRootRPCHandler 2025-12-07 23:41:54 +01:00
developeruche
4d2a61a2e0 Merge branch 'poc/optional-proofs' of https://github.com/developeruche/prysm into poc/optional-proofs 2025-12-07 19:02:36 +01:00
developeruche
8708c198c9 gossip functionality ready save validation logic 2025-12-07 19:01:50 +01:00
Developer Uche
2857eeae6e Merge pull request #1 from syjn99/feat/dummy-proof-gen-service 2025-12-06 01:27:44 +01:00
Jun Song
4912c29d06 Generate proofs that are registered without checking 2025-12-04 18:22:09 +09:00
Jun Song
d520158510 Register dummy registry 2025-12-04 18:15:13 +09:00
Jun Song
c13d61a959 Add basic flow for proof generation 2025-12-04 18:09:08 +09:00
Jun Song
f5c61ebaea Print as hex string 2025-12-04 17:32:49 +09:00
Jun Song
ae3d465615 Add missing flag activation 2025-12-04 17:31:54 +09:00
Jun Song
f23210853d Subscribe to the block import event in proofgen service 2025-12-04 17:10:34 +09:00
developeruche
6dc49b41f2 change execution proof topic from subnet to global 2025-12-04 09:04:36 +01:00
developeruche
e56550af48 added execution proof to gossip topics 2025-12-02 06:23:44 +01:00
developeruche
20f617ecc9 enr zkvm config 2025-12-01 16:41:20 +01:00
developeruche
adb1de9caa moved proof_cache to beacon cache 2025-12-01 13:03:53 +01:00
Jun Song
2d9e6ad2c8 Add skeleton proof generation service 2025-11-29 21:35:56 +09:00
Jun Song
e8eb022145 Parse flag & Register in the global context 2025-11-29 21:24:37 +09:00
Jun Song
38be9400f1 Rename with underscores 2025-11-29 20:27:36 +09:00
Jun Song
b01e760e0a Make compatible with codebase 2025-11-29 20:25:52 +09:00
Jun Song
da4a8f1dd3 Add ExecutionProofId & ExecutionProof type 2025-11-29 19:36:42 +09:00
Jun Song
0dca170953 Merge branch 'develop' into poc/optional-proofs-2 2025-11-29 19:36:37 +09:00
developeruche
cd549abbfa added cli flags 2025-11-10 07:15:14 +01:00
developeruche
28a661518e lastest consensus-type, zkvm-execution-layer 2025-11-08 17:07:43 +01:00
developeruche
4ab5888c4c add registry proof gen/verification 2025-11-07 21:20:57 +01:00
developeruche
0d818bc687 add proof gen n verify interfaces 2025-11-07 15:47:35 +01:00
developeruche
0e90a0f2d8 add proof cache 2025-11-07 10:40:00 +01:00
developeruche
2de069d543 add config 2025-11-07 10:38:09 +01:00
developeruche
50e88045bb add consensus types 2025-11-05 10:13:19 +01:00
113 changed files with 4523 additions and 1468 deletions

View File

@@ -27,6 +27,7 @@ go_library(
"receive_blob.go",
"receive_block.go",
"receive_data_column.go",
"receive_proof.go",
"service.go",
"setup_forkchoice.go",
"tracked_proposer.go",
@@ -49,6 +50,7 @@ go_library(
"//beacon-chain/core/electra:go_default_library",
"//beacon-chain/core/epoch/precompute:go_default_library",
"//beacon-chain/core/feed:go_default_library",
"//beacon-chain/core/feed/operation:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/peerdas:go_default_library",

View File

@@ -5,6 +5,7 @@ import (
"github.com/OffchainLabs/prysm/v7/async/event"
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/operation"
statefeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/state"
"github.com/OffchainLabs/prysm/v7/beacon-chain/db"
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/filesystem"
@@ -226,6 +227,14 @@ func WithDataColumnStorage(b *filesystem.DataColumnStorage) Option {
}
}
// WithProofStorage sets the proof storage backend for the blockchain service.
func WithProofStorage(p *filesystem.ProofStorage) Option {
return func(s *Service) error {
s.proofStorage = p
return nil
}
}
// WithSyncChecker sets the sync checker for the blockchain service.
func WithSyncChecker(checker Checker) Option {
return func(s *Service) error {
@@ -266,3 +275,10 @@ func WithStartWaitingDataColumnSidecars(c chan bool) Option {
return nil
}
}
func WithOperationNotifier(operationNotifier operation.Notifier) Option {
return func(s *Service) error {
s.cfg.OperationNotifier = operationNotifier
return nil
}
}

View File

@@ -113,6 +113,7 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
s.updateCachesPostBlockProcessing(cfg)
}()
}
return nil
}
@@ -661,10 +662,17 @@ func (s *Service) isDataAvailable(
return errors.New("invalid nil beacon block")
}
root := roBlock.Root()
blockVersion := block.Version()
root, blockVersion := roBlock.Root(), roBlock.Version()
if blockVersion >= version.Fulu {
return s.areDataColumnsAvailable(ctx, root, block)
if err := s.areExecutionProofsAvailable(ctx, roBlock); err != nil {
return fmt.Errorf("are execution proofs available: %w", err)
}
if err := s.areDataColumnsAvailable(ctx, root, block); err != nil {
return fmt.Errorf("are data columns available: %w", err)
}
return nil
}
if blockVersion >= version.Deneb {
@@ -674,6 +682,77 @@ func (s *Service) isDataAvailable(
return nil
}
// areExecutionProofsAvailable blocks until we have enough execution proofs to import the block,
// or an error or context cancellation occurs.
// This check is only performed for lightweight verifier nodes that need zkVM proofs
// to validate block execution (nodes without execution layer + proof generation capability).
// A nil result means that the data availability check is successful.
func (s *Service) areExecutionProofsAvailable(ctx context.Context, roBlock consensusblocks.ROBlock) error {
// Return early if zkVM features are disabled (no need to check for execution proofs),
// or if the generation proof is enabled (we will generate proofs ourselves).
if !features.Get().EnableZkvm {
return nil
}
root, slot := roBlock.Root(), roBlock.Block().Slot()
requiredProofCount := params.BeaconConfig().MinProofsRequired
log := log.WithFields(logrus.Fields{
"root": fmt.Sprintf("%#x", root),
"slot": slot,
"requiredProofCount": requiredProofCount,
})
// Subscribe to newly execution proofs stored in the database.
subscription, identChan := s.proofStorage.Subscribe()
defer subscription.Unsubscribe()
// Return early if we already have enough proofs.
if actualProofCount := uint64(s.proofStorage.Summary(root).Count()); actualProofCount >= requiredProofCount {
log.WithField("actualProofCount", actualProofCount).Debug("Already have enough execution proofs")
return nil
}
// Log for DA checks that cross over into the next slot; helpful for debugging.
nextSlot, err := slots.StartTime(s.genesisTime, roBlock.Block().Slot()+1)
if err != nil {
return fmt.Errorf("start time: %w", err)
}
// Avoid logging if DA check is called after next slot start.
if nextSlot.After(time.Now()) {
timer := time.AfterFunc(time.Until(nextSlot), func() {
actualCount := uint64(s.proofStorage.Summary(root).Count())
if actualCount >= requiredProofCount {
return
}
log.WithField("proofsRetrieved", actualCount).Warning("Execution proofs still missing at slot end")
})
defer timer.Stop()
}
// Some proofs are missing; wait for them.
for {
select {
case <-ctx.Done():
return ctx.Err()
case proofIdent := <-identChan:
// Skip if the proof is for a different block.
if proofIdent.BlockRoot != root {
continue
}
// Return if we have enough proofs.
if actualProofCount := uint64(s.proofStorage.Summary(root).Count()); actualProofCount >= requiredProofCount {
log.WithField("actualProofCount", actualProofCount).Debug("Got enough execution proofs")
return nil
}
}
}
}
// areDataColumnsAvailable blocks until all data columns committed to in the block are available,
// or an error or context cancellation occurs. A nil result means that the data availability check is successful.
func (s *Service) areDataColumnsAvailable(
@@ -810,14 +889,7 @@ func (s *Service) areDataColumnsAvailable(
}
case <-ctx.Done():
var missingIndices any = "all"
missingIndicesCount := len(missing)
if missingIndicesCount < fieldparams.NumberOfColumns {
missingIndices = helpers.SortedPrettySliceFromMap(missing)
}
return errors.Wrapf(ctx.Err(), "data column sidecars slot: %d, BlockRoot: %#x, missing: %v", block.Slot(), root, missingIndices)
return errors.Wrapf(ctx.Err(), "data column sidecars slot: %d, BlockRoot: %#x, missing: %v", block.Slot(), root, helpers.SortedPrettySliceFromMap(missing))
}
}
}

View File

@@ -60,6 +60,12 @@ type DataColumnReceiver interface {
ReceiveDataColumns([]blocks.VerifiedRODataColumn) error
}
// ProofReceiver interface defines the methods of chain service for receiving new
// execution proofs
type ProofReceiver interface {
ReceiveProof(blocks.VerifiedROSignedExecutionProof) error
}
// SlashingReceiver interface defines the methods of chain service for receiving validated slashing over the wire.
type SlashingReceiver interface {
ReceiveAttesterSlashing(ctx context.Context, slashing ethpb.AttSlashing)

View File

@@ -0,0 +1,15 @@
package blockchain
import (
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/pkg/errors"
)
// ReceiveProof saves an execution proof to storage.
func (s *Service) ReceiveProof(proof blocks.VerifiedROSignedExecutionProof) error {
if err := s.proofStorage.Save([]blocks.VerifiedROSignedExecutionProof{proof}); err != nil {
return errors.Wrap(err, "save proof")
}
return nil
}

View File

@@ -12,6 +12,7 @@ import (
"github.com/OffchainLabs/prysm/v7/async/event"
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/operation"
statefeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/state"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
coreTime "github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
@@ -64,6 +65,7 @@ type Service struct {
blockBeingSynced *currentlySyncingBlock
blobStorage *filesystem.BlobStorage
dataColumnStorage *filesystem.DataColumnStorage
proofStorage *filesystem.ProofStorage
slasherEnabled bool
lcStore *lightClient.Store
startWaitingDataColumnSidecars chan bool // for testing purposes only
@@ -86,6 +88,7 @@ type config struct {
P2P p2p.Accessor
MaxRoutines int
StateNotifier statefeed.Notifier
OperationNotifier operation.Notifier
ForkChoiceStore f.ForkChoicer
AttService *attestations.Service
StateGen *stategen.State

View File

@@ -75,6 +75,7 @@ type ChainService struct {
SyncingRoot [32]byte
Blobs []blocks.VerifiedROBlob
DataColumns []blocks.VerifiedRODataColumn
Proofs []blocks.VerifiedROSignedExecutionProof
TargetRoot [32]byte
MockHeadSlot *primitives.Slot
}
@@ -757,6 +758,12 @@ func (c *ChainService) ReceiveDataColumns(dcs []blocks.VerifiedRODataColumn) err
return nil
}
// ReceiveProof implements the same method in chain service
func (c *ChainService) ReceiveProof(proof blocks.VerifiedROSignedExecutionProof) error {
c.Proofs = append(c.Proofs, proof)
return nil
}
// DependentRootForEpoch mocks the same method in the chain service
func (c *ChainService) DependentRootForEpoch(_ [32]byte, _ primitives.Epoch) ([32]byte, error) {
return c.TargetRoot, nil

View File

@@ -46,6 +46,9 @@ const (
// DataColumnReceived is sent after a data column has been seen after gossip validation rules.
DataColumnReceived = 12
// ExecutionProofReceived is sent after a execution proof object has been received from gossip or rpc.
ExecutionProofReceived = 13
)
// UnAggregatedAttReceivedData is the data sent with UnaggregatedAttReceived events.
@@ -77,6 +80,11 @@ type BLSToExecutionChangeReceivedData struct {
Change *ethpb.SignedBLSToExecutionChange
}
// ExecutionProofReceivedData is the data sent with ExecutionProofReceived events.
type ExecutionProofReceivedData struct {
ExecutionProof *blocks.VerifiedROSignedExecutionProof
}
// BlobSidecarReceivedData is the data sent with BlobSidecarReceived events.
type BlobSidecarReceivedData struct {
Blob *blocks.VerifiedROBlob

View File

@@ -7,7 +7,6 @@ go_library(
"payload_attestation.go",
"pending_payment.go",
"proposer_slashing.go",
"withdrawal.go",
],
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas",
visibility = ["//visibility:public"],
@@ -39,7 +38,6 @@ go_test(
"payload_attestation_test.go",
"pending_payment_test.go",
"proposer_slashing_test.go",
"withdrawal_test.go",
],
embed = [":go_default_library"],
deps = [

View File

@@ -114,32 +114,17 @@ func payloadCommittee(ctx context.Context, st state.ReadOnlyBeaconState, slot pr
}
committeesPerSlot := helpers.SlotCommitteeCount(activeCount)
out := make([]primitives.ValidatorIndex, 0, activeCount/uint64(params.BeaconConfig().SlotsPerEpoch))
selected := make([]primitives.ValidatorIndex, 0, fieldparams.PTCSize)
var i uint64
for uint64(len(selected)) < fieldparams.PTCSize {
if ctx.Err() != nil {
return nil, ctx.Err()
}
for committeeIndex := primitives.CommitteeIndex(0); committeeIndex < primitives.CommitteeIndex(committeesPerSlot); committeeIndex++ {
if uint64(len(selected)) >= fieldparams.PTCSize {
break
}
committee, err := helpers.BeaconCommitteeFromState(ctx, st, slot, committeeIndex)
if err != nil {
return nil, errors.Wrapf(err, "failed to get beacon committee %d", committeeIndex)
}
selected, i, err = selectByBalanceFill(ctx, st, committee, seed, selected, i)
if err != nil {
return nil, errors.Wrapf(err, "failed to sample beacon committee %d", committeeIndex)
}
for i := primitives.CommitteeIndex(0); i < primitives.CommitteeIndex(committeesPerSlot); i++ {
committee, err := helpers.BeaconCommitteeFromState(ctx, st, slot, i)
if err != nil {
return nil, errors.Wrapf(err, "failed to get beacon committee %d", i)
}
out = append(out, committee...)
}
return selected, nil
return selectByBalance(ctx, st, out, seed, fieldparams.PTCSize)
}
// ptcSeed computes the seed for the payload timeliness committee.
@@ -163,39 +148,33 @@ func ptcSeed(st state.ReadOnlyBeaconState, epoch primitives.Epoch, slot primitiv
// if compute_balance_weighted_acceptance(state, indices[next], seed, i):
// selected.append(indices[next])
// i += 1
func selectByBalanceFill(
ctx context.Context,
st state.ReadOnlyBeaconState,
candidates []primitives.ValidatorIndex,
seed [32]byte,
selected []primitives.ValidatorIndex,
i uint64,
) ([]primitives.ValidatorIndex, uint64, error) {
func selectByBalance(ctx context.Context, st state.ReadOnlyBeaconState, candidates []primitives.ValidatorIndex, seed [32]byte, count uint64) ([]primitives.ValidatorIndex, error) {
if len(candidates) == 0 {
return nil, errors.New("no candidates for balance weighted selection")
}
hashFunc := hash.CustomSHA256Hasher()
// Pre-allocate buffer for hash input: seed (32 bytes) + round counter (8 bytes).
var buf [40]byte
copy(buf[:], seed[:])
maxBalance := params.BeaconConfig().MaxEffectiveBalanceElectra
for _, idx := range candidates {
selected := make([]primitives.ValidatorIndex, 0, count)
total := uint64(len(candidates))
for i := uint64(0); uint64(len(selected)) < count; i++ {
if ctx.Err() != nil {
return nil, i, ctx.Err()
return nil, ctx.Err()
}
idx := candidates[i%total]
ok, err := acceptByBalance(st, idx, buf[:], hashFunc, maxBalance, i)
if err != nil {
return nil, i, err
return nil, err
}
if ok {
selected = append(selected, idx)
}
if uint64(len(selected)) == fieldparams.PTCSize {
break
}
i++
}
return selected, i, nil
return selected, nil
}
// acceptByBalance determines if a validator is accepted based on its effective balance.

View File

@@ -1,105 +0,0 @@
package gloas
import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/pkg/errors"
)
// ProcessWithdrawals applies withdrawals to the state for Gloas.
//
// Spec v1.7.0-alpha.1 (pseudocode):
//
// def process_withdrawals(
//
// state: BeaconState,
// # [Modified in Gloas:EIP7732]
// # Removed `payload`
//
// ) -> None:
//
// # [New in Gloas:EIP7732]
// # Return early if the parent block is empty
// if not is_parent_block_full(state):
// return
//
// # Get expected withdrawals
// expected = get_expected_withdrawals(state)
//
// # Apply expected withdrawals
// apply_withdrawals(state, expected.withdrawals)
//
// # Update withdrawals fields in the state
// update_next_withdrawal_index(state, expected.withdrawals)
// # [New in Gloas:EIP7732]
// update_payload_expected_withdrawals(state, expected.withdrawals)
// # [New in Gloas:EIP7732]
// update_builder_pending_withdrawals(state, expected.processed_builder_withdrawals_count)
// update_pending_partial_withdrawals(state, expected.processed_partial_withdrawals_count)
// # [New in Gloas:EIP7732]
// update_next_withdrawal_builder_index(state, expected.processed_builders_sweep_count)
// update_next_withdrawal_validator_index(state, expected.withdrawals)
func ProcessWithdrawals(st state.BeaconState) error {
full, err := st.IsParentBlockFull()
if err != nil {
return errors.Wrap(err, "could not get parent block full status")
}
if !full {
return nil
}
expected, err := st.ExpectedWithdrawalsGloas()
if err != nil {
return errors.Wrap(err, "could not get expected withdrawals")
}
if err := st.DecreaseWithdrawalBalances(expected.Withdrawals); err != nil {
return errors.Wrap(err, "could not decrease withdrawal balances")
}
if len(expected.Withdrawals) > 0 {
if err := st.SetNextWithdrawalIndex(expected.Withdrawals[len(expected.Withdrawals)-1].Index + 1); err != nil {
return errors.Wrap(err, "could not set next withdrawal index")
}
}
err = st.SetPayloadExpectedWithdrawals(expected.Withdrawals)
if err != nil {
return errors.Wrap(err, "could not set payload expected withdrawals")
}
err = st.DequeueBuilderPendingWithdrawals(expected.ProcessedBuilderWithdrawalsCount)
if err != nil {
return errors.Wrap(err, "unable to dequeue builder pending withdrawals from state")
}
if err := st.DequeuePendingPartialWithdrawals(expected.ProcessedPartialWithdrawalsCount); err != nil {
return errors.Wrap(err, "unable to dequeue partial withdrawals from state")
}
err = st.SetNextWithdrawalBuilderIndex(expected.NextWithdrawalBuilderIndex)
if err != nil {
return errors.Wrap(err, "could not set next withdrawal builder index")
}
var nextValidatorIndex primitives.ValidatorIndex
if uint64(len(expected.Withdrawals)) < params.BeaconConfig().MaxWithdrawalsPerPayload {
nextValidatorIndex, err = st.NextWithdrawalValidatorIndex()
if err != nil {
return errors.Wrap(err, "could not get next withdrawal validator index")
}
nextValidatorIndex += primitives.ValidatorIndex(params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep)
nextValidatorIndex = nextValidatorIndex % primitives.ValidatorIndex(st.NumValidators())
} else {
nextValidatorIndex = expected.Withdrawals[len(expected.Withdrawals)-1].ValidatorIndex + 1
if nextValidatorIndex == primitives.ValidatorIndex(st.NumValidators()) {
nextValidatorIndex = 0
}
}
if err := st.SetNextWithdrawalValidatorIndex(nextValidatorIndex); err != nil {
return errors.Wrap(err, "could not set next withdrawal validator index")
}
return nil
}

View File

@@ -1,34 +0,0 @@
package gloas
import (
"testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/require"
)
func TestProcessWithdrawals_ParentBlockNotFull(t *testing.T) {
state, err := state_native.InitializeFromProtoUnsafeGloas(&ethpb.BeaconStateGloas{})
require.NoError(t, err)
st := &withdrawalsState{BeaconState: state}
require.NoError(t, ProcessWithdrawals(st))
require.Equal(t, false, st.expectedCalled)
}
type withdrawalsState struct {
state.BeaconState
expectedCalled bool
decreaseCalled bool
}
func (w *withdrawalsState) IsParentBlockFull() (bool, error) {
return false, nil
}
func (w *withdrawalsState) ExpectedWithdrawalsGloas() (state.ExpectedWithdrawalsGloasResult, error) {
w.expectedCalled = true
return state.ExpectedWithdrawalsGloasResult{}, nil
}

View File

@@ -7,7 +7,6 @@ go_library(
"cache.go",
"data_column.go",
"data_column_cache.go",
"doc.go",
"iteration.go",
"layout.go",
"layout_by_epoch.go",
@@ -15,6 +14,8 @@ go_library(
"log.go",
"metrics.go",
"mock.go",
"proof.go",
"proof_cache.go",
"pruner.go",
],
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/db/filesystem",
@@ -30,6 +31,7 @@ go_library(
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//io/file:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/logging:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",

View File

@@ -1,104 +0,0 @@
package filesystem
// nolint:dupword
/*
Data column sidecars storage documentation
==========================================
File organisation
-----------------
- The first byte represents the version of the file structure (up to 0xff = 255).
We set it to 0x01.
Note: This is not strictly needed, but it will help a lot if, in the future,
we want to modify the file structure.
- The next 4 bytes represents the size of a SSZ encoded data column sidecar.
(See the `Computation of the maximum size of a DataColumnSidecar` section to a description
of how this value is computed).
- The next 128 bytes represent the index in the file of a given column.
The first bit of each byte in the index is set to 0 if there is no data column,
and set to 1 if there is a data column.
The remaining 7 bits (from 0 to 127) represent the index of the data column.
This sentinel bit is needed to distinguish between the column with index 0 and no column.
Example: If the column with index 5 is in the 3th position in the file, then indices[5] = 0x80 + 0x03 = 0x83.
- The rest of the file is a repeat of the SSZ encoded data column sidecars.
|------------------------------------------|------------------------------------------------------------------------------------|
| Byte offset | Description |
|------------------------------------------|------------------------------------------------------------------------------------|
| 0 | version (1 byte) | sszEncodedDataColumnSidecarSize (4 bytes) | indices (128 bytes) |
|133 + 0*sszEncodedDataColumnSidecarSize | sszEncodedDataColumnSidecar (sszEncodedDataColumnSidecarSize bytes) |
|133 + 1*sszEncodedDataColumnSidecarSize | sszEncodedDataColumnSidecar (sszEncodedDataColumnSidecarSize bytes) |
|133 + 2*sszEncodedDataColumnSidecarSize | sszEncodedDataColumnSidecar (sszEncodedDataColumnSidecarSize bytes) |
| ... | ... |
|133 + 127*sszEncodedDataColumnSidecarSize | sszEncodedDataColumnSidecar (sszEncodedDataColumnSidecarSize bytes) |
|------------------------------------------|------------------------------------------------------------------------------------|
Each file is named after the block root where the data columns were data columns are committed to.
Example: `0x259c6d2f6a0bb75e2405cea7cb248e5663dc26b9404fd3bcd777afc20de91c1e.sszs`
Database organisation
---------------------
SSZ encoded data column sidecars are stored following the `by-epoch` layout.
- The first layer is a directory corresponding to the `period`, which corresponds to the epoch divided by the 4096.
- The second layer is a directory corresponding to the epoch.
- Then all files are stored in the epoch directory.
Example:
data-columns
├── 0
│   ├── 3638
│   │   ├── 0x259c6d2f6a0bb75e2405cea7cb248e5663dc26b9404fd3bcd777afc20de91c1e.sszs
│   │   ├── 0x2a855b1f6e9a2f04f8383e336325bf7d5ba02d1eab3ef90ef183736f8c768533.sszs
│   │   ├── ...
│   │   ├── 0xeb78e2b2350a71c640f1e96fea9e42f38e65705ab7e6e100c8bc9c589f2c5f2b.sszs
│   │   └── 0xeb7ee68da988fd20d773d45aad01dd62527734367a146e2b048715bd68a4e370.sszs
│   └── 3639
│      ├── 0x0fd231fe95e57936fa44f6c712c490b9e337a481b661dfd46768901e90444330.sszs
│      ├── 0x1bf5edff6b6ba2b65b1db325ff3312bbb57da461ef2ae651bd741af851aada3a.sszs
│      ├── ...
│      ├── 0xa156a527e631f858fee79fab7ef1fde3f6117a2e1201d47c09fbab0c6780c937.sszs
│      └── 0xcd80bc535ddc467dea1d19e0c39c1160875ccd1989061bcd8ce206e3c1261c87.sszs
└── 1
├── 4096
│   ├── 0x0d244009093e2bedb72eb265280290199e8c7bf1d90d7583c41af40d9f662269.sszs
│   ├── 0x11f420928d8de41c50e735caab0369996824a5299c5f054e097965855925697d.sszs
│   ├── ...
│   ├── 0xbe91fc782877ed400d95c02c61aebfdd592635d11f8e64c94b46abd84f45c967.sszs
│   └── 0xf246189f078f02d30173ff74605cf31c9e65b5e463275ebdbeb40476638135ff.sszs
└── 4097
   ├── 0x454d000674793c479e90504c0fe9827b50bb176ae022dab4e37d6a21471ab570.sszs
   ├── 0xac5eb7437d7190c48cfa863e3c45f96a7f8af371d47ac12ccda07129a06af763.sszs
   ├── ...
   ├── 0xb7df30561d9d92ab5fafdd96bca8b44526497c8debf0fc425c7a0770b2abeb83.sszs
   └── 0xc1dd0b1ae847b6ec62303a36d08c6a4a2e9e3ec4be3ff70551972a0ee3de9c14.sszs
Computation of the maximum size of a DataColumnSidecar
------------------------------------------------------
https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/das-core.md#datacolumnsidecar
class DataColumnSidecar(Container):
index: ColumnIndex # Index of column in extended matrix
column: List[Cell, MAX_BLOB_COMMITMENTS_PER_BLOCK]
kzg_commitments: List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK]
kzg_proofs: List[KZGProof, MAX_BLOB_COMMITMENTS_PER_BLOCK]
signed_block_header: SignedBeaconBlockHeader
kzg_commitments_inclusion_proof: Vector[Bytes32, KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH]
- index: 2 bytes (ColumnIndex)
- `column`: 4,096 (MAX_BLOB_COMMITMENTS_PER_BLOCK) * 64 (FIELD_ELEMENTS_PER_CELL) * 32 bytes (BYTES_PER_FIELD_ELEMENT) = 8,388,608 bytes
- kzg_commitments: 4,096 (MAX_BLOB_COMMITMENTS_PER_BLOCK) * 48 bytes (KZGCommitment) = 196,608 bytes
- kzg_proofs: 4,096 (MAX_BLOB_COMMITMENTS_PER_BLOCK) * 48 bytes (KZGProof) = 196,608 bytes
- signed_block_header: 2 bytes (Slot) + 2 bytes (ValidatorIndex) + 3 * 2 bytes (Root) + 96 bytes (BLSSignature) = 106 bytes
- kzg_commitments_inclusion_proof: 4 (KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH) * 32 bytes = 128 bytes
TOTAL: 8,782,060 bytes = 70,256,480 bits
log(70,256,480) / log(2) ~= 26.07
==> 32 bits (4 bytes) are enough to store the maximum size of a data column sidecar.
The maximum size of an SSZ encoded data column can be 2**32 bits = 536,879,912 bytes,
which left a room of 536,879,912 bytes - 8,782,060 bytes ~= 503 mega bytes to store the extra data needed by SSZ encoding (which is more than enough.)
*/

View File

@@ -0,0 +1,197 @@
# Filesystem storage documentation
This document describes the file formats and database organization for storing data column sidecars and execution proofs.
---
# Data column sidecars
## File organisation
- The first byte represents the version of the file structure (up to `0xff = 255`).
We set it to `0x01`.
_(Note: This is not strictly needed, but it will help a lot if, in the future, we want to modify the file structure.)_
- The next 4 bytes represents the size of a SSZ encoded data column sidecar.
(See the [Computation of the maximum size of a DataColumnSidecar](#computation-of-the-maximum-size-of-a-datacolumnsidecar) section for a description
of how this value is computed).
- The next 128 bytes represent the index in the file of a given column.
The first bit of each byte in the index is set to 0 if there is no data column,
and set to 1 if there is a data column.
The remaining 7 bits (from 0 to 127) represent the index of the data column.
This sentinel bit is needed to distinguish between the column with index 0 and no column.
**Example:** If the column with index 5 is in the 3rd position in the file, then `indices[5] = 0x80 + 0x03 = 0x83`.
- The rest of the file is a repeat of the SSZ encoded data column sidecars.
### File layout
| Byte offset | Description |
|-------------|-------------|
| `0` | `version (1 byte) \| sszEncodedDataColumnSidecarSize (4 bytes) \| indices (128 bytes)` |
| `133 + 0×sszEncodedDataColumnSidecarSize` | `sszEncodedDataColumnSidecar (sszEncodedDataColumnSidecarSize bytes)` |
| `133 + 1×sszEncodedDataColumnSidecarSize` | `sszEncodedDataColumnSidecar (sszEncodedDataColumnSidecarSize bytes)` |
| `133 + 2×sszEncodedDataColumnSidecarSize` | `sszEncodedDataColumnSidecar (sszEncodedDataColumnSidecarSize bytes)` |
| ... | ... |
| `133 + 127×sszEncodedDataColumnSidecarSize` | `sszEncodedDataColumnSidecar (sszEncodedDataColumnSidecarSize bytes)` |
Each file is named after the block root where the data columns are committed to.
**Example:** `0x259c6d2f6a0bb75e2405cea7cb248e5663dc26b9404fd3bcd777afc20de91c1e.sszs`
## Database organisation
SSZ encoded data column sidecars are stored following the `by-epoch` layout.
- The first layer is a directory corresponding to the `period`, which corresponds to the epoch divided by 4096.
- The second layer is a directory corresponding to the epoch.
- Then all files are stored in the epoch directory.
### Example directory structure
```
data-columns
├── 0
│ ├── 3638
│ │ ├── 0x259c6d2f6a0bb75e2405cea7cb248e5663dc26b9404fd3bcd777afc20de91c1e.sszs
│ │ ├── 0x2a855b1f6e9a2f04f8383e336325bf7d5ba02d1eab3ef90ef183736f8c768533.sszs
│ │ ├── ...
│ │ ├── 0xeb78e2b2350a71c640f1e96fea9e42f38e65705ab7e6e100c8bc9c589f2c5f2b.sszs
│ │ └── 0xeb7ee68da988fd20d773d45aad01dd62527734367a146e2b048715bd68a4e370.sszs
│ └── 3639
│ ├── 0x0fd231fe95e57936fa44f6c712c490b9e337a481b661dfd46768901e90444330.sszs
│ ├── 0x1bf5edff6b6ba2b65b1db325ff3312bbb57da461ef2ae651bd741af851aada3a.sszs
│ ├── ...
│ ├── 0xa156a527e631f858fee79fab7ef1fde3f6117a2e1201d47c09fbab0c6780c937.sszs
│ └── 0xcd80bc535ddc467dea1d19e0c39c1160875ccd1989061bcd8ce206e3c1261c87.sszs
└── 1
├── 4096
│ ├── 0x0d244009093e2bedb72eb265280290199e8c7bf1d90d7583c41af40d9f662269.sszs
│ ├── 0x11f420928d8de41c50e735caab0369996824a5299c5f054e097965855925697d.sszs
│ ├── ...
│ ├── 0xbe91fc782877ed400d95c02c61aebfdd592635d11f8e64c94b46abd84f45c967.sszs
│ └── 0xf246189f078f02d30173ff74605cf31c9e65b5e463275ebdbeb40476638135ff.sszs
└── 4097
├── 0x454d000674793c479e90504c0fe9827b50bb176ae022dab4e37d6a21471ab570.sszs
├── 0xac5eb7437d7190c48cfa863e3c45f96a7f8af371d47ac12ccda07129a06af763.sszs
├── ...
├── 0xb7df30561d9d92ab5fafdd96bca8b44526497c8debf0fc425c7a0770b2abeb83.sszs
└── 0xc1dd0b1ae847b6ec62303a36d08c6a4a2e9e3ec4be3ff70551972a0ee3de9c14.sszs
```
## Computation of the maximum size of a `DataColumnSidecar`
Reference: [Ethereum Consensus Specs - Fulu DAS Core](https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/das-core.md#datacolumnsidecar)
```python
class DataColumnSidecar(Container):
index: ColumnIndex # Index of column in extended matrix
column: List[Cell, MAX_BLOB_COMMITMENTS_PER_BLOCK]
kzg_commitments: List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK]
kzg_proofs: List[KZGProof, MAX_BLOB_COMMITMENTS_PER_BLOCK]
signed_block_header: SignedBeaconBlockHeader
kzg_commitments_inclusion_proof: Vector[Bytes32, KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH]
```
### Size breakdown
| Field | Calculation | Size |
|-------|-------------|------|
| `index` | `ColumnIndex` | `2 bytes` |
| `column` | `4,096 (MAX_BLOB_COMMITMENTS_PER_BLOCK) × 64 (FIELD_ELEMENTS_PER_CELL) × 32 bytes (BYTES_PER_FIELD_ELEMENT)` | `8,388,608 bytes` |
| `kzg_commitments` | `4,096 (MAX_BLOB_COMMITMENTS_PER_BLOCK) × 48 bytes (KZGCommitment)` | `196,608 bytes` |
| `kzg_proofs` | `4,096 (MAX_BLOB_COMMITMENTS_PER_BLOCK) × 48 bytes (KZGProof)` | `196,608 bytes` |
| `signed_block_header` | `2 bytes (Slot) + 2 bytes (ValidatorIndex) + 3 × 2 bytes (Root) + 96 bytes (BLSSignature)` | `106 bytes` |
| `kzg_commitments_inclusion_proof` | `4 (KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH) × 32 bytes` | `128 bytes` |
**TOTAL:** `8,782,060 bytes = 70,256,480 bits`
```
log(70,256,480) / log(2) ≈ 26.07
```
**Conclusion:** 32 bits (4 bytes) are enough to store the maximum size of a data column sidecar.
The maximum size of an SSZ encoded data column can be `2³² bits = 536,879,912 bytes`,
which leaves a room of `536,879,912 bytes - 8,782,060 bytes ≈ 503 megabytes` to store the extra data needed by SSZ encoding (which is more than enough).
---
# Execution proofs
## File organisation
Unlike data column sidecars (which have a fixed size per block), execution proofs have variable sizes.
To handle this, we use an offset table that stores the position and size of each proof.
- The first byte represents the version of the file structure (up to `0xff = 255`).
We set it to `0x01`.
- The next 64 bytes represent the offset table with 8 slots (one per proof type).
Each slot contains:
- 4 bytes for the offset (relative to end of header)
- 4 bytes for the size of the SSZ-encoded proof
If the size is 0, the proof is not present.
- The rest of the file contains the SSZ encoded proofs, stored contiguously.
### File layout
| Byte offset | Description |
|-------------|-------------|
| `0` | `version (1 byte) \| offsetTable (64 bytes)` |
| `65 + offsetTable[0].offset` | `sszEncodedProof (offsetTable[0].size bytes)` |
| `65 + offsetTable[1].offset` | `sszEncodedProof (offsetTable[1].size bytes)` |
| ... | ... |
| `65 + offsetTable[7].offset` | `sszEncodedProof (offsetTable[7].size bytes)` |
**Header size:** 1 (version) + 64 (offset table) = **65 bytes**
### Offset table entry format
Each slot in the offset table (8 bytes per slot):
- `offset` (4 bytes, big-endian): Offset from end of header where proof data begins
- `size` (4 bytes, big-endian): Size of the SSZ-encoded proof in bytes
**Note:** Offsets are relative to the end of the header (byte 65), not the start of the file.
This maximizes the usable range of the 4-byte offset field.
### Reading a proof with `proofID=N (O(1) access)`
1. Read header (65 bytes)
2. Check slot N: if `size == 0`, proof not present
3. Seek to `(65 + offset)`, read `size` bytes, SSZ unmarshal
Each file is named after the block root.
**Example:** `0x259c6d2f6a0bb75e2405cea7cb248e5663dc26b9404fd3bcd777afc20de91c1e.sszs`
## Database Organisation
SSZ encoded execution proofs are stored following the same `by-epoch` layout as data column sidecars.
- The first layer is a directory corresponding to the `period`, which corresponds to the epoch divided by 4096.
- The second layer is a directory corresponding to the epoch.
- Then all files are stored in the epoch directory.
### Example Directory Structure
```
proofs
├── 0
│ ├── 100
│ │ ├── 0x259c6d2f6a0bb75e2405cea7cb248e5663dc26b9404fd3bcd777afc20de91c1e.sszs
│ │ ├── 0x2a855b1f6e9a2f04f8383e336325bf7d5ba02d1eab3ef90ef183736f8c768533.sszs
│ │ └── ...
│ └── 101
│ ├── 0x0fd231fe95e57936fa44f6c712c490b9e337a481b661dfd46768901e90444330.sszs
│ └── ...
└── 1
└── 4096
├── 0x0d244009093e2bedb72eb265280290199e8c7bf1d90d7583c41af40d9f662269.sszs
└── ...
```

View File

@@ -70,4 +70,36 @@ var (
Name: "data_column_prune_latency",
Help: "Latency of data column prune operations in milliseconds",
})
// Proofs
proofSaveLatency = promauto.NewHistogram(prometheus.HistogramOpts{
Name: "proof_storage_save_latency",
Help: "Latency of proof storage save operations in milliseconds",
Buckets: []float64{3, 5, 7, 9, 11, 13, 20, 50},
})
proofFetchLatency = promauto.NewHistogram(prometheus.HistogramOpts{
Name: "proof_storage_get_latency",
Help: "Latency of proof storage get operations in milliseconds",
Buckets: []float64{3, 5, 7, 9, 11, 13},
})
proofPrunedCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "proof_pruned",
Help: "Number of proof files pruned.",
})
proofWrittenCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "proof_written",
Help: "Number of proof files written",
})
proofDiskCount = promauto.NewGauge(prometheus.GaugeOpts{
Name: "proof_disk_count",
Help: "Approximate number of proof files in storage",
})
proofFileSyncLatency = promauto.NewSummary(prometheus.SummaryOpts{
Name: "proof_file_sync_latency",
Help: "Latency of sync operations when saving proofs in milliseconds",
})
proofPruneLatency = promauto.NewSummary(prometheus.SummaryOpts{
Name: "proof_prune_latency",
Help: "Latency of proof prune operations in milliseconds",
})
)

View File

@@ -144,3 +144,45 @@ func NewEphemeralDataColumnStorageWithMocker(t testing.TB) (*DataColumnMocker, *
fs, dcs := NewEphemeralDataColumnStorageAndFs(t)
return &DataColumnMocker{fs: fs, dcs: dcs}, dcs
}
// Proofs
// ------
// NewEphemeralProofStorage should only be used for tests.
// The instance of ProofStorage returned is backed by an in-memory virtual filesystem,
// improving test performance and simplifying cleanup.
func NewEphemeralProofStorage(t testing.TB, opts ...ProofStorageOption) *ProofStorage {
return NewWarmedEphemeralProofStorageUsingFs(t, afero.NewMemMapFs(), opts...)
}
// NewEphemeralProofStorageAndFs can be used by tests that want access to the virtual filesystem
// in order to interact with it outside the parameters of the ProofStorage API.
func NewEphemeralProofStorageAndFs(t testing.TB, opts ...ProofStorageOption) (afero.Fs, *ProofStorage) {
fs := afero.NewMemMapFs()
ps := NewWarmedEphemeralProofStorageUsingFs(t, fs, opts...)
return fs, ps
}
// NewEphemeralProofStorageUsingFs creates a ProofStorage backed by the provided filesystem.
func NewEphemeralProofStorageUsingFs(t testing.TB, fs afero.Fs, opts ...ProofStorageOption) *ProofStorage {
defaultOpts := []ProofStorageOption{
WithProofRetentionEpochs(params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest),
WithProofFs(fs),
}
// User opts come last so they can override defaults
allOpts := append(defaultOpts, opts...)
ps, err := NewProofStorage(context.Background(), allOpts...)
if err != nil {
t.Fatal(err)
}
return ps
}
// NewWarmedEphemeralProofStorageUsingFs creates a ProofStorage with a warmed cache.
func NewWarmedEphemeralProofStorageUsingFs(t testing.TB, fs afero.Fs, opts ...ProofStorageOption) *ProofStorage {
ps := NewEphemeralProofStorageUsingFs(t, fs, opts...)
ps.WarmCache()
return ps
}

View File

@@ -0,0 +1,952 @@
package filesystem
import (
"context"
"encoding/binary"
"encoding/hex"
"errors"
"fmt"
"io"
"os"
"path"
"path/filepath"
"slices"
"strconv"
"strings"
"sync"
"time"
"github.com/OffchainLabs/prysm/v7/async"
"github.com/OffchainLabs/prysm/v7/async/event"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/io/file"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/spf13/afero"
)
const (
proofVersion = 0x01
proofVersionSize = 1 // bytes
maxProofTypes = 8 // ExecutionProofId max value (EXECUTION_PROOF_TYPE_COUNT)
proofOffsetSize = 4 // bytes for offset (uint32)
proofSizeSize = 4 // bytes for size (uint32)
proofSlotSize = proofOffsetSize + proofSizeSize // 8 bytes per slot
proofOffsetTableSize = maxProofTypes * proofSlotSize // 64 bytes
proofHeaderSize = proofVersionSize + proofOffsetTableSize // 65 bytes
proofsFileExtension = "sszs"
proofPrunePeriod = 1 * time.Minute
)
var (
errProofIDTooLarge = errors.New("proof ID too large")
errWrongProofBytesWritten = errors.New("wrong number of bytes written")
errWrongProofVersion = errors.New("wrong proof version")
errWrongProofBytesRead = errors.New("wrong number of bytes read")
errNoProofBasePath = errors.New("ProofStorage base path not specified in init")
errProofAlreadyExists = errors.New("proof already exists")
)
type (
// ProofIdent is a unique identifier for a proof.
ProofIdent struct {
BlockRoot [fieldparams.RootLength]byte
Epoch primitives.Epoch
ProofType uint8
}
// ProofsIdent is a collection of unique identifiers for proofs.
ProofsIdent struct {
BlockRoot [fieldparams.RootLength]byte
Epoch primitives.Epoch
ProofTypes []uint8
}
// ProofStorage is the concrete implementation of the filesystem backend for saving and retrieving ExecutionProofs.
ProofStorage struct {
base string
retentionEpochs primitives.Epoch
fs afero.Fs
cache *proofCache
proofFeed *event.Feed
pruneMu sync.RWMutex
mu sync.Mutex // protects muChans
muChans map[[fieldparams.RootLength]byte]*proofMuChan
}
// ProofStorageOption is a functional option for configuring a ProofStorage.
ProofStorageOption func(*ProofStorage) error
proofMuChan struct {
mu *sync.RWMutex
toStore chan []blocks.VerifiedROSignedExecutionProof
}
// proofSlotEntry represents the offset and size for a proof in the file.
proofSlotEntry struct {
offset uint32
size uint32
}
// proofOffsetTable is the offset table with 8 slots indexed by proofID.
proofOffsetTable [maxProofTypes]proofSlotEntry
// proofFileMetadata contains metadata extracted from a proof file path.
proofFileMetadata struct {
period uint64
epoch primitives.Epoch
blockRoot [fieldparams.RootLength]byte
}
)
// WithProofBasePath is a required option that sets the base path of proof storage.
func WithProofBasePath(base string) ProofStorageOption {
return func(ps *ProofStorage) error {
ps.base = base
return nil
}
}
// WithProofRetentionEpochs is an option that changes the number of epochs proofs will be persisted.
func WithProofRetentionEpochs(e primitives.Epoch) ProofStorageOption {
return func(ps *ProofStorage) error {
ps.retentionEpochs = e
return nil
}
}
// WithProofFs allows the afero.Fs implementation to be customized.
// Used by tests to substitute an in-memory filesystem.
func WithProofFs(fs afero.Fs) ProofStorageOption {
return func(ps *ProofStorage) error {
ps.fs = fs
return nil
}
}
// NewProofStorage creates a new instance of the ProofStorage object.
func NewProofStorage(ctx context.Context, opts ...ProofStorageOption) (*ProofStorage, error) {
storage := &ProofStorage{
proofFeed: new(event.Feed),
muChans: make(map[[fieldparams.RootLength]byte]*proofMuChan),
}
for _, o := range opts {
if err := o(storage); err != nil {
return nil, fmt.Errorf("failed to create proof storage: %w", err)
}
}
// Allow tests to set up a different fs using WithProofFs.
if storage.fs == nil {
if storage.base == "" {
return nil, errNoProofBasePath
}
storage.base = path.Clean(storage.base)
if err := file.MkdirAll(storage.base); err != nil {
return nil, fmt.Errorf("failed to create proof storage at %s: %w", storage.base, err)
}
storage.fs = afero.NewBasePathFs(afero.NewOsFs(), storage.base)
}
storage.cache = newProofCache()
async.RunEvery(ctx, proofPrunePeriod, func() {
storage.pruneMu.Lock()
defer storage.pruneMu.Unlock()
storage.prune()
})
return storage, nil
}
// WarmCache warms the cache of the proof filesystem.
func (ps *ProofStorage) WarmCache() {
start := time.Now()
log.Info("Proof filesystem cache warm-up started")
ps.pruneMu.Lock()
defer ps.pruneMu.Unlock()
// List all period directories
periodFileInfos, err := afero.ReadDir(ps.fs, ".")
if err != nil {
log.WithError(err).Error("Error reading top directory during proof warm cache")
return
}
// Iterate through periods
for _, periodFileInfo := range periodFileInfos {
if !periodFileInfo.IsDir() {
continue
}
periodPath := periodFileInfo.Name()
// List all epoch directories in this period
epochFileInfos, err := afero.ReadDir(ps.fs, periodPath)
if err != nil {
log.WithError(err).WithField("period", periodPath).Error("Error reading period directory during proof warm cache")
continue
}
// Iterate through epochs
for _, epochFileInfo := range epochFileInfos {
if !epochFileInfo.IsDir() {
continue
}
epochPath := path.Join(periodPath, epochFileInfo.Name())
// List all .sszs files in this epoch
files, err := ps.listProofEpochFiles(epochPath)
if err != nil {
log.WithError(err).WithField("epoch", epochPath).Error("Error listing epoch files during proof warm cache")
continue
}
// Process all files in this epoch in parallel
ps.processProofEpochFiles(files)
}
}
// Prune the cache and the filesystem
ps.prune()
totalElapsed := time.Since(start)
log.WithField("elapsed", totalElapsed).Info("Proof filesystem cache warm-up complete")
}
// listProofEpochFiles lists all .sszs files in an epoch directory.
func (ps *ProofStorage) listProofEpochFiles(epochPath string) ([]string, error) {
fileInfos, err := afero.ReadDir(ps.fs, epochPath)
if err != nil {
return nil, fmt.Errorf("read epoch directory: %w", err)
}
files := make([]string, 0, len(fileInfos))
for _, fileInfo := range fileInfos {
if fileInfo.IsDir() {
continue
}
fileName := fileInfo.Name()
if strings.HasSuffix(fileName, "."+proofsFileExtension) {
files = append(files, path.Join(epochPath, fileName))
}
}
return files, nil
}
// processProofEpochFiles processes all proof files in an epoch in parallel.
func (ps *ProofStorage) processProofEpochFiles(files []string) {
var wg sync.WaitGroup
for _, filePath := range files {
wg.Go(func() {
if err := ps.processProofFile(filePath); err != nil {
log.WithError(err).WithField("file", filePath).Error("Error processing proof file during warm cache")
}
})
}
wg.Wait()
}
// processProofFile processes a single .sszs proof file for cache warming.
func (ps *ProofStorage) processProofFile(filePath string) error {
// Extract metadata from the file path
fileMetadata, err := extractProofFileMetadata(filePath)
if err != nil {
return fmt.Errorf("extract proof file metadata: %w", err)
}
// Open the file
f, err := ps.fs.Open(filePath)
if err != nil {
return fmt.Errorf("open file: %w", err)
}
defer func() {
if closeErr := f.Close(); closeErr != nil {
log.WithError(closeErr).WithField("file", filePath).Error("Error closing file during proof warm cache")
}
}()
// Read the offset table
offsetTable, _, err := ps.readHeader(f)
if err != nil {
return fmt.Errorf("read header: %w", err)
}
// Add all present proofs to the cache
for proofID, entry := range offsetTable {
if entry.size == 0 {
continue
}
proofIdent := ProofIdent{
BlockRoot: fileMetadata.blockRoot,
Epoch: fileMetadata.epoch,
ProofType: uint8(proofID),
}
ps.cache.set(proofIdent)
}
return nil
}
// Summary returns the ProofStorageSummary for a given root.
func (ps *ProofStorage) Summary(root [fieldparams.RootLength]byte) ProofStorageSummary {
return ps.cache.Summary(root)
}
// Save saves execution proofs into the database.
// The proofs must all belong to the same block (same block root).
func (ps *ProofStorage) Save(proofs []blocks.VerifiedROSignedExecutionProof) error {
startTime := time.Now()
if len(proofs) == 0 {
return nil
}
// Safely retrieve the block root and the epoch.
first := proofs[0]
blockRoot := first.BlockRoot()
epoch := first.Epoch()
proofTypes := make([]uint8, 0, len(proofs))
for _, proof := range proofs {
// Check if the proof ID is valid.
proofType := proof.Message.ProofType[0]
if proofType >= maxProofTypes {
return errProofIDTooLarge
}
// Save proofs in the filesystem.
if err := ps.saveFilesystem(proof.BlockRoot(), proof.Epoch(), proofs); err != nil {
return fmt.Errorf("save filesystem: %w", err)
}
proofTypes = append(proofTypes, proof.Message.ProofType[0])
}
// Compute the proofs ident.
proofsIdent := ProofsIdent{BlockRoot: blockRoot, Epoch: epoch, ProofTypes: proofTypes}
// Set proofs in the cache.
ps.cache.setMultiple(proofsIdent)
// Notify the proof feed.
ps.proofFeed.Send(proofsIdent)
proofSaveLatency.Observe(float64(time.Since(startTime).Milliseconds()))
return nil
}
// saveFilesystem saves proofs into the database.
// This function expects all proofs to belong to the same block.
func (ps *ProofStorage) saveFilesystem(root [fieldparams.RootLength]byte, epoch primitives.Epoch, proofs []blocks.VerifiedROSignedExecutionProof) error {
// Compute the file path.
filePath := proofFilePath(root, epoch)
ps.pruneMu.RLock()
defer ps.pruneMu.RUnlock()
fileMu, toStore := ps.fileMutexChan(root)
toStore <- proofs
fileMu.Lock()
defer fileMu.Unlock()
// Check if the file exists.
exists, err := afero.Exists(ps.fs, filePath)
if err != nil {
return fmt.Errorf("afero exists: %w", err)
}
if exists {
if err := ps.saveProofExistingFile(filePath, toStore); err != nil {
return fmt.Errorf("save proof existing file: %w", err)
}
return nil
}
if err := ps.saveProofNewFile(filePath, toStore); err != nil {
return fmt.Errorf("save proof new file: %w", err)
}
return nil
}
// Subscribe subscribes to the proof feed.
// It returns the subscription and a 1-size buffer channel to receive proof idents.
func (ps *ProofStorage) Subscribe() (event.Subscription, <-chan ProofsIdent) {
identsChan := make(chan ProofsIdent, 1)
subscription := ps.proofFeed.Subscribe(identsChan)
return subscription, identsChan
}
// Get retrieves signed execution proofs from the database.
// If one of the requested proofs is not found, it is just skipped.
// If proofIDs is nil, then all stored proofs are returned.
func (ps *ProofStorage) Get(root [fieldparams.RootLength]byte, proofIDs []uint8) ([]*ethpb.SignedExecutionProof, error) {
ps.pruneMu.RLock()
defer ps.pruneMu.RUnlock()
fileMu, _ := ps.fileMutexChan(root)
fileMu.RLock()
defer fileMu.RUnlock()
startTime := time.Now()
// Build all proofIDs if none are provided.
if proofIDs == nil {
proofIDs = make([]uint8, maxProofTypes)
for i := range proofIDs {
proofIDs[i] = uint8(i)
}
}
summary, ok := ps.cache.get(root)
if !ok {
// Nothing found in db. Exit early.
return nil, nil
}
// Check if any requested proofID exists.
if !slices.ContainsFunc(proofIDs, summary.HasProof) {
return nil, nil
}
// Compute the file path.
filePath := proofFilePath(root, summary.epoch)
// Open the proof file.
file, err := ps.fs.Open(filePath)
if err != nil {
return nil, fmt.Errorf("proof file open: %w", err)
}
defer func() {
if closeErr := file.Close(); closeErr != nil {
log.WithError(closeErr).WithField("file", filePath).Error("Error closing proof file")
}
}()
// Read the header.
offsetTable, _, err := ps.readHeader(file)
if err != nil {
return nil, fmt.Errorf("read header: %w", err)
}
// Retrieve proofs from the file.
proofs := make([]*ethpb.SignedExecutionProof, 0, len(proofIDs))
for _, proofID := range proofIDs {
if proofID >= maxProofTypes {
continue
}
entry := offsetTable[proofID]
// Skip if the proof is not saved.
if entry.size == 0 {
continue
}
// Seek to the proof offset (offset is relative to end of header).
_, err = file.Seek(proofHeaderSize+int64(entry.offset), io.SeekStart)
if err != nil {
return nil, fmt.Errorf("seek: %w", err)
}
// Read the SSZ encoded proof.
sszProof := make([]byte, entry.size)
n, err := io.ReadFull(file, sszProof)
if err != nil {
return nil, fmt.Errorf("read proof: %w", err)
}
if n != int(entry.size) {
return nil, errWrongProofBytesRead
}
// Unmarshal the signed proof.
proof := new(ethpb.SignedExecutionProof)
if err := proof.UnmarshalSSZ(sszProof); err != nil {
return nil, fmt.Errorf("unmarshal proof: %w", err)
}
proofs = append(proofs, proof)
}
proofFetchLatency.Observe(float64(time.Since(startTime).Milliseconds()))
return proofs, nil
}
// Remove deletes all proofs for a given root.
func (ps *ProofStorage) Remove(blockRoot [fieldparams.RootLength]byte) error {
ps.pruneMu.RLock()
defer ps.pruneMu.RUnlock()
fileMu, _ := ps.fileMutexChan(blockRoot)
fileMu.Lock()
defer fileMu.Unlock()
summary, ok := ps.cache.get(blockRoot)
if !ok {
// Nothing found in db. Exit early.
return nil
}
// Remove the proofs from the cache.
ps.cache.evict(blockRoot)
// Remove the proof file.
filePath := proofFilePath(blockRoot, summary.epoch)
if err := ps.fs.Remove(filePath); err != nil {
return fmt.Errorf("remove: %w", err)
}
return nil
}
// Clear deletes all files on the filesystem.
func (ps *ProofStorage) Clear() error {
ps.pruneMu.Lock()
defer ps.pruneMu.Unlock()
dirs, err := listDir(ps.fs, ".")
if err != nil {
return fmt.Errorf("list dir: %w", err)
}
ps.cache.clear()
for _, dir := range dirs {
if err := ps.fs.RemoveAll(dir); err != nil {
return fmt.Errorf("remove all: %w", err)
}
}
return nil
}
// saveProofNewFile saves proofs to a new file.
func (ps *ProofStorage) saveProofNewFile(filePath string, inputProofs chan []blocks.VerifiedROSignedExecutionProof) (err error) {
// Initialize the offset table.
var offsetTable proofOffsetTable
var sszEncodedProofs []byte
currentOffset := uint32(0)
for {
proofs := pullProofChan(inputProofs)
if len(proofs) == 0 {
break
}
for _, proof := range proofs {
proofType := proof.Message.ProofType[0]
if proofType >= maxProofTypes {
continue
}
// Skip if already in offset table (duplicate).
if offsetTable[proofType].size != 0 {
continue
}
// SSZ encode the full signed proof.
sszProof, err := proof.SignedExecutionProof.MarshalSSZ()
if err != nil {
return fmt.Errorf("marshal proof SSZ: %w", err)
}
proofSize := uint32(len(sszProof))
// Update offset table.
offsetTable[proofType] = proofSlotEntry{
offset: currentOffset,
size: proofSize,
}
// Append SSZ encoded proof.
sszEncodedProofs = append(sszEncodedProofs, sszProof...)
currentOffset += proofSize
}
}
if len(sszEncodedProofs) == 0 {
// Nothing to save.
return nil
}
// Create directory structure.
dir := filepath.Dir(filePath)
if err := ps.fs.MkdirAll(dir, directoryPermissions()); err != nil {
return fmt.Errorf("mkdir all: %w", err)
}
file, err := ps.fs.Create(filePath)
if err != nil {
return fmt.Errorf("create proof file: %w", err)
}
defer func() {
closeErr := file.Close()
if closeErr != nil && err == nil {
err = closeErr
}
}()
// Build the file content.
countToWrite := proofHeaderSize + len(sszEncodedProofs)
bytes := make([]byte, 0, countToWrite)
// Write version byte.
bytes = append(bytes, byte(proofVersion))
// Write offset table.
bytes = append(bytes, encodeOffsetTable(offsetTable)...)
// Write SSZ encoded proofs.
bytes = append(bytes, sszEncodedProofs...)
countWritten, err := file.Write(bytes)
if err != nil {
return fmt.Errorf("write: %w", err)
}
if countWritten != countToWrite {
return errWrongProofBytesWritten
}
syncStart := time.Now()
if err := file.Sync(); err != nil {
return fmt.Errorf("sync: %w", err)
}
proofFileSyncLatency.Observe(float64(time.Since(syncStart).Milliseconds()))
return nil
}
// saveProofExistingFile saves proofs to an existing file.
func (ps *ProofStorage) saveProofExistingFile(filePath string, inputProofs chan []blocks.VerifiedROSignedExecutionProof) (err error) {
// Open the file for read/write.
file, err := ps.fs.OpenFile(filePath, os.O_RDWR, os.FileMode(0600))
if err != nil {
return fmt.Errorf("open proof file: %w", err)
}
defer func() {
closeErr := file.Close()
if closeErr != nil && err == nil {
err = closeErr
}
}()
// Read current header.
offsetTable, fileSize, err := ps.readHeader(file)
if err != nil {
return fmt.Errorf("read header: %w", err)
}
var sszEncodedProofs []byte
currentOffset := uint32(fileSize - proofHeaderSize)
modified := false
for {
proofs := pullProofChan(inputProofs)
if len(proofs) == 0 {
break
}
for _, proof := range proofs {
proofType := proof.Message.ProofType[0]
if proofType >= maxProofTypes {
continue
}
// Skip if proof already exists.
if offsetTable[proofType].size != 0 {
continue
}
// SSZ encode the full signed proof.
sszProof, err := proof.SignedExecutionProof.MarshalSSZ()
if err != nil {
return fmt.Errorf("marshal proof SSZ: %w", err)
}
proofSize := uint32(len(sszProof))
// Update offset table.
offsetTable[proofType] = proofSlotEntry{
offset: currentOffset,
size: proofSize,
}
// Append SSZ encoded proof.
sszEncodedProofs = append(sszEncodedProofs, sszProof...)
currentOffset += proofSize
modified = true
}
}
if !modified {
return nil
}
// Write updated offset table back to file (at position 1, after version byte).
encodedTable := encodeOffsetTable(offsetTable)
count, err := file.WriteAt(encodedTable, int64(proofVersionSize))
if err != nil {
return fmt.Errorf("write offset table: %w", err)
}
if count != proofOffsetTableSize {
return errWrongProofBytesWritten
}
// Append the SSZ encoded proofs to the end of the file.
count, err = file.WriteAt(sszEncodedProofs, fileSize)
if err != nil {
return fmt.Errorf("write SSZ encoded proofs: %w", err)
}
if count != len(sszEncodedProofs) {
return errWrongProofBytesWritten
}
syncStart := time.Now()
if err := file.Sync(); err != nil {
return fmt.Errorf("sync: %w", err)
}
proofFileSyncLatency.Observe(float64(time.Since(syncStart).Milliseconds()))
return nil
}
// readHeader reads the file header and returns the offset table and file size.
func (ps *ProofStorage) readHeader(file afero.File) (proofOffsetTable, int64, error) {
var header [proofHeaderSize]byte
countRead, err := file.ReadAt(header[:], 0)
if err != nil {
return proofOffsetTable{}, 0, fmt.Errorf("read at: %w", err)
}
if countRead != proofHeaderSize {
return proofOffsetTable{}, 0, errWrongProofBytesRead
}
// Check version.
fileVersion := int(header[0])
if fileVersion != proofVersion {
return proofOffsetTable{}, 0, errWrongProofVersion
}
// Decode offset table and compute file size.
var offsetTable proofOffsetTable
fileSize := int64(proofHeaderSize)
for i := range offsetTable {
pos := proofVersionSize + i*proofSlotSize
offsetTable[i].offset = binary.BigEndian.Uint32(header[pos : pos+proofOffsetSize])
offsetTable[i].size = binary.BigEndian.Uint32(header[pos+proofOffsetSize : pos+proofSlotSize])
fileSize += int64(offsetTable[i].size)
}
return offsetTable, fileSize, nil
}
// prune cleans the cache, the filesystem and mutexes.
func (ps *ProofStorage) prune() {
startTime := time.Now()
defer func() {
proofPruneLatency.Observe(float64(time.Since(startTime).Milliseconds()))
}()
highestStoredEpoch := ps.cache.HighestEpoch()
// Check if we need to prune.
if highestStoredEpoch < ps.retentionEpochs {
return
}
highestEpochToPrune := highestStoredEpoch - ps.retentionEpochs
highestPeriodToPrune := proofPeriod(highestEpochToPrune)
// Prune the cache.
prunedCount := ps.cache.pruneUpTo(highestEpochToPrune)
if prunedCount == 0 {
return
}
proofPrunedCounter.Add(float64(prunedCount))
// Prune the filesystem.
periodFileInfos, err := afero.ReadDir(ps.fs, ".")
if err != nil {
log.WithError(err).Error("Error encountered while reading top directory during proof prune")
return
}
for _, periodFileInfo := range periodFileInfos {
periodStr := periodFileInfo.Name()
period, err := strconv.ParseUint(periodStr, 10, 64)
if err != nil {
log.WithError(err).Errorf("Error encountered while parsing period %s", periodStr)
continue
}
if period < highestPeriodToPrune {
// Remove everything lower than highest period to prune.
if err := ps.fs.RemoveAll(periodStr); err != nil {
log.WithError(err).Error("Error encountered while removing period directory during proof prune")
}
continue
}
if period > highestPeriodToPrune {
// Do not remove anything higher than highest period to prune.
continue
}
// if period == highestPeriodToPrune
epochFileInfos, err := afero.ReadDir(ps.fs, periodStr)
if err != nil {
log.WithError(err).Error("Error encountered while reading epoch directory during proof prune")
continue
}
for _, epochFileInfo := range epochFileInfos {
epochStr := epochFileInfo.Name()
epochDir := path.Join(periodStr, epochStr)
epoch, err := strconv.ParseUint(epochStr, 10, 64)
if err != nil {
log.WithError(err).Errorf("Error encountered while parsing epoch %s", epochStr)
continue
}
if primitives.Epoch(epoch) > highestEpochToPrune {
continue
}
if err := ps.fs.RemoveAll(epochDir); err != nil {
log.WithError(err).Error("Error encountered while removing epoch directory during proof prune")
continue
}
}
}
ps.mu.Lock()
defer ps.mu.Unlock()
clear(ps.muChans)
}
// fileMutexChan returns the file mutex and channel for a given block root.
func (ps *ProofStorage) fileMutexChan(root [fieldparams.RootLength]byte) (*sync.RWMutex, chan []blocks.VerifiedROSignedExecutionProof) {
ps.mu.Lock()
defer ps.mu.Unlock()
mc, ok := ps.muChans[root]
if !ok {
mc = &proofMuChan{
mu: new(sync.RWMutex),
toStore: make(chan []blocks.VerifiedROSignedExecutionProof, 1),
}
ps.muChans[root] = mc
return mc.mu, mc.toStore
}
return mc.mu, mc.toStore
}
// pullProofChan pulls proofs from the input channel until it is empty.
func pullProofChan(inputProofs chan []blocks.VerifiedROSignedExecutionProof) []blocks.VerifiedROSignedExecutionProof {
proofs := make([]blocks.VerifiedROSignedExecutionProof, 0, maxProofTypes)
for {
select {
case batch := <-inputProofs:
proofs = append(proofs, batch...)
default:
return proofs
}
}
}
// proofFilePath builds the file path in database for a given root and epoch.
func proofFilePath(root [fieldparams.RootLength]byte, epoch primitives.Epoch) string {
return path.Join(
fmt.Sprintf("%d", proofPeriod(epoch)),
fmt.Sprintf("%d", epoch),
fmt.Sprintf("%#x.%s", root, proofsFileExtension),
)
}
// extractProofFileMetadata extracts the metadata from a proof file path.
func extractProofFileMetadata(path string) (*proofFileMetadata, error) {
// Use filepath.Separator to handle both Windows (\) and Unix (/) path separators
parts := strings.Split(path, string(filepath.Separator))
if len(parts) != 3 {
return nil, fmt.Errorf("unexpected proof file %s", path)
}
period, err := strconv.ParseUint(parts[0], 10, 64)
if err != nil {
return nil, fmt.Errorf("failed to parse period from %s: %w", path, err)
}
epoch, err := strconv.ParseUint(parts[1], 10, 64)
if err != nil {
return nil, fmt.Errorf("failed to parse epoch from %s: %w", path, err)
}
partsRoot := strings.Split(parts[2], ".")
if len(partsRoot) != 2 {
return nil, fmt.Errorf("failed to parse root from %s", path)
}
blockRootString := partsRoot[0]
if len(blockRootString) != 2+2*fieldparams.RootLength || blockRootString[:2] != "0x" {
return nil, fmt.Errorf("unexpected proof file name %s", path)
}
if partsRoot[1] != proofsFileExtension {
return nil, fmt.Errorf("unexpected extension %s", path)
}
blockRootSlice, err := hex.DecodeString(blockRootString[2:])
if err != nil {
return nil, fmt.Errorf("decode string from %s: %w", path, err)
}
var blockRoot [fieldparams.RootLength]byte
copy(blockRoot[:], blockRootSlice)
result := &proofFileMetadata{period: period, epoch: primitives.Epoch(epoch), blockRoot: blockRoot}
return result, nil
}
// proofPeriod computes the period of a given epoch.
func proofPeriod(epoch primitives.Epoch) uint64 {
return uint64(epoch / params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest)
}
// encodeOffsetTable encodes the offset table to bytes.
func encodeOffsetTable(table proofOffsetTable) []byte {
result := make([]byte, proofOffsetTableSize)
for i, entry := range table {
offset := i * proofSlotSize
binary.BigEndian.PutUint32(result[offset:offset+proofOffsetSize], entry.offset)
binary.BigEndian.PutUint32(result[offset+proofOffsetSize:offset+proofSlotSize], entry.size)
}
return result
}

View File

@@ -0,0 +1,206 @@
package filesystem
import (
"slices"
"sync"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
)
// ProofStorageSummary represents cached information about the proofs on disk for each root the cache knows about.
type ProofStorageSummary struct {
epoch primitives.Epoch
proofTypes map[uint8]bool
}
// HasProof returns true if the proof with the given proofID is available in the filesystem.
func (s ProofStorageSummary) HasProof(proofID uint8) bool {
if s.proofTypes == nil {
return false
}
_, ok := s.proofTypes[proofID]
return ok
}
// Count returns the number of available proofs.
func (s ProofStorageSummary) Count() int {
return len(s.proofTypes)
}
// All returns all stored proofIDs sorted in ascending order.
func (s ProofStorageSummary) All() []uint8 {
if s.proofTypes == nil {
return nil
}
proofTypes := make([]uint8, 0, len(s.proofTypes))
for proofType := range s.proofTypes {
proofTypes = append(proofTypes, proofType)
}
slices.Sort(proofTypes)
return proofTypes
}
type proofCache struct {
mu sync.RWMutex
proofCount float64
lowestCachedEpoch primitives.Epoch
highestCachedEpoch primitives.Epoch
cache map[[fieldparams.RootLength]byte]ProofStorageSummary
}
func newProofCache() *proofCache {
return &proofCache{
cache: make(map[[fieldparams.RootLength]byte]ProofStorageSummary),
lowestCachedEpoch: params.BeaconConfig().FarFutureEpoch,
}
}
// Summary returns the ProofStorageSummary for `root`.
// The ProofStorageSummary can be used to check for the presence of proofs based on proofID.
func (pc *proofCache) Summary(root [fieldparams.RootLength]byte) ProofStorageSummary {
pc.mu.RLock()
defer pc.mu.RUnlock()
return pc.cache[root]
}
// HighestEpoch returns the highest cached epoch.
func (pc *proofCache) HighestEpoch() primitives.Epoch {
pc.mu.RLock()
defer pc.mu.RUnlock()
return pc.highestCachedEpoch
}
// set adds a proof to the cache.
func (pc *proofCache) set(ident ProofIdent) {
pc.mu.Lock()
defer pc.mu.Unlock()
summary := pc.cache[ident.BlockRoot]
if summary.proofTypes == nil {
summary.proofTypes = make(map[uint8]bool)
}
summary.epoch = ident.Epoch
if _, exists := summary.proofTypes[ident.ProofType]; exists {
pc.cache[ident.BlockRoot] = summary
return
}
summary.proofTypes[ident.ProofType] = true
pc.lowestCachedEpoch = min(pc.lowestCachedEpoch, ident.Epoch)
pc.highestCachedEpoch = max(pc.highestCachedEpoch, ident.Epoch)
pc.cache[ident.BlockRoot] = summary
pc.proofCount++
proofDiskCount.Set(pc.proofCount)
proofWrittenCounter.Inc()
}
// setMultiple adds multiple proofs to the cache.
func (pc *proofCache) setMultiple(ident ProofsIdent) {
pc.mu.Lock()
defer pc.mu.Unlock()
summary := pc.cache[ident.BlockRoot]
if summary.proofTypes == nil {
summary.proofTypes = make(map[uint8]bool)
}
summary.epoch = ident.Epoch
addedCount := 0
for _, proofID := range ident.ProofTypes {
if _, exists := summary.proofTypes[proofID]; exists {
continue
}
summary.proofTypes[proofID] = true
addedCount++
}
if addedCount == 0 {
pc.cache[ident.BlockRoot] = summary
return
}
pc.lowestCachedEpoch = min(pc.lowestCachedEpoch, ident.Epoch)
pc.highestCachedEpoch = max(pc.highestCachedEpoch, ident.Epoch)
pc.cache[ident.BlockRoot] = summary
pc.proofCount += float64(addedCount)
proofDiskCount.Set(pc.proofCount)
proofWrittenCounter.Add(float64(addedCount))
}
// get returns the ProofStorageSummary for the given block root.
// If the root is not in the cache, the second return value will be false.
func (pc *proofCache) get(blockRoot [fieldparams.RootLength]byte) (ProofStorageSummary, bool) {
pc.mu.RLock()
defer pc.mu.RUnlock()
summary, ok := pc.cache[blockRoot]
return summary, ok
}
// evict removes the ProofStorageSummary for the given block root from the cache.
func (pc *proofCache) evict(blockRoot [fieldparams.RootLength]byte) int {
pc.mu.Lock()
defer pc.mu.Unlock()
summary, ok := pc.cache[blockRoot]
if !ok {
return 0
}
deleted := len(summary.proofTypes)
delete(pc.cache, blockRoot)
if deleted > 0 {
pc.proofCount -= float64(deleted)
proofDiskCount.Set(pc.proofCount)
}
return deleted
}
// pruneUpTo removes all entries from the cache up to the given target epoch included.
func (pc *proofCache) pruneUpTo(targetEpoch primitives.Epoch) uint64 {
pc.mu.Lock()
defer pc.mu.Unlock()
prunedCount := uint64(0)
newLowestCachedEpoch := params.BeaconConfig().FarFutureEpoch
newHighestCachedEpoch := primitives.Epoch(0)
for blockRoot, summary := range pc.cache {
epoch := summary.epoch
if epoch > targetEpoch {
newLowestCachedEpoch = min(newLowestCachedEpoch, epoch)
newHighestCachedEpoch = max(newHighestCachedEpoch, epoch)
}
if epoch <= targetEpoch {
prunedCount += uint64(len(summary.proofTypes))
delete(pc.cache, blockRoot)
}
}
if prunedCount > 0 {
pc.lowestCachedEpoch = newLowestCachedEpoch
pc.highestCachedEpoch = newHighestCachedEpoch
pc.proofCount -= float64(prunedCount)
proofDiskCount.Set(pc.proofCount)
}
return prunedCount
}
// clear removes all entries from the cache.
func (pc *proofCache) clear() uint64 {
return pc.pruneUpTo(params.BeaconConfig().FarFutureEpoch)
}

View File

@@ -123,6 +123,8 @@ type BeaconNode struct {
BlobStorageOptions []filesystem.BlobStorageOption
DataColumnStorage *filesystem.DataColumnStorage
DataColumnStorageOptions []filesystem.DataColumnStorageOption
ProofStorage *filesystem.ProofStorage
ProofStorageOptions []filesystem.ProofStorageOption
verifyInitWaiter *verification.InitializerWaiter
lhsp *verification.LazyHeadStateProvider
syncChecker *initialsync.SyncChecker
@@ -227,6 +229,15 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
return nil, errors.Wrap(err, "could not clear data column storage")
}
if beacon.ProofStorage == nil {
proofStorage, err := filesystem.NewProofStorage(cliCtx.Context, beacon.ProofStorageOptions...)
if err != nil {
return nil, errors.Wrap(err, "new proof storage")
}
beacon.ProofStorage = proofStorage
}
bfs, err := startBaseServices(cliCtx, beacon, depositAddress, dbClearer)
if err != nil {
return nil, errors.Wrap(err, "could not start modules")
@@ -747,11 +758,13 @@ func (b *BeaconNode) registerBlockchainService(fc forkchoice.ForkChoicer, gs *st
blockchain.WithSyncComplete(syncComplete),
blockchain.WithBlobStorage(b.BlobStorage),
blockchain.WithDataColumnStorage(b.DataColumnStorage),
blockchain.WithProofStorage(b.ProofStorage),
blockchain.WithTrackedValidatorsCache(b.trackedValidatorsCache),
blockchain.WithPayloadIDCache(b.payloadIDCache),
blockchain.WithSyncChecker(b.syncChecker),
blockchain.WithSlasherEnabled(b.slasherEnabled),
blockchain.WithLightClientStore(b.lcStore),
blockchain.WithOperationNotifier(b),
)
blockchainService, err := blockchain.NewService(b.ctx, opts...)
@@ -836,6 +849,7 @@ func (b *BeaconNode) registerSyncService(initialSyncComplete chan struct{}, bFil
regularsync.WithStateNotifier(b),
regularsync.WithBlobStorage(b.BlobStorage),
regularsync.WithDataColumnStorage(b.DataColumnStorage),
regularsync.WithExecutionProofStorage(b.ProofStorage),
regularsync.WithVerifierWaiter(b.verifyInitWaiter),
regularsync.WithAvailableBlocker(bFillStore),
regularsync.WithTrackedValidatorsCache(b.trackedValidatorsCache),
@@ -962,6 +976,7 @@ func (b *BeaconNode) registerRPCService(router *http.ServeMux) error {
BlockReceiver: chainService,
BlobReceiver: chainService,
DataColumnReceiver: chainService,
ProofReceiver: chainService,
AttestationReceiver: chainService,
GenesisTimeFetcher: chainService,
GenesisFetcher: chainService,

View File

@@ -35,6 +35,13 @@ func WithBuilderFlagOptions(opts []builder.Option) Option {
}
}
func WithConfigOptions(opt ...params.Option) Option {
return func(bn *BeaconNode) error {
bn.ConfigOptions = append(bn.ConfigOptions, opt...)
return nil
}
}
// WithBlobStorage sets the BlobStorage backend for the BeaconNode
func WithBlobStorage(bs *filesystem.BlobStorage) Option {
return func(bn *BeaconNode) error {
@@ -52,13 +59,6 @@ func WithBlobStorageOptions(opt ...filesystem.BlobStorageOption) Option {
}
}
func WithConfigOptions(opt ...params.Option) Option {
return func(bn *BeaconNode) error {
bn.ConfigOptions = append(bn.ConfigOptions, opt...)
return nil
}
}
// WithDataColumnStorage sets the DataColumnStorage backend for the BeaconNode
func WithDataColumnStorage(bs *filesystem.DataColumnStorage) Option {
return func(bn *BeaconNode) error {
@@ -75,3 +75,20 @@ func WithDataColumnStorageOptions(opt ...filesystem.DataColumnStorageOption) Opt
return nil
}
}
// WithDataColumnStorage sets the DataColumnStorage backend for the BeaconNode
func WithProofStorage(bs *filesystem.ProofStorage) Option {
return func(bn *BeaconNode) error {
bn.ProofStorage = bs
return nil
}
}
// WithDataColumnStorageOptions appends 1 or more filesystem.DataColumnStorageOption on the beacon node,
// to be used when initializing data column storage.
func WithProofStorageOption(opt ...filesystem.ProofStorageOption) Option {
return func(bn *BeaconNode) error {
bn.ProofStorageOptions = append(bn.ProofStorageOptions, opt...)
return nil
}
}

View File

@@ -166,6 +166,7 @@ go_test(
"//beacon-chain/startup:go_default_library",
"//beacon-chain/state/stategen/mock:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",

View File

@@ -589,6 +589,11 @@ func (s *Service) createLocalNode(
localNode.Set(quicEntry)
}
if features.Get().EnableZkvm {
zkvmKeyEntry := enr.WithEntry(zkvmEnabledKeyEnrKey, true)
localNode.Set(zkvmKeyEntry)
}
localNode.SetFallbackIP(ipAddr)
localNode.SetFallbackUDP(udpPort)

View File

@@ -25,6 +25,7 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers/scorers"
testp2p "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/startup"
"github.com/OffchainLabs/prysm/v7/config/features"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/wrapper"
leakybucket "github.com/OffchainLabs/prysm/v7/container/leaky-bucket"
@@ -243,12 +244,19 @@ func TestCreateLocalNode(t *testing.T) {
name string
cfg *Config
expectedError bool
zkvmEnabled bool
}{
{
name: "valid config",
cfg: &Config{},
expectedError: false,
},
{
name: "valid config with zkVM enabled",
cfg: &Config{},
expectedError: false,
zkvmEnabled: true,
},
{
name: "invalid host address",
cfg: &Config{HostAddress: "invalid"},
@@ -273,6 +281,15 @@ func TestCreateLocalNode(t *testing.T) {
for _, tt := range testCases {
t.Run(tt.name, func(t *testing.T) {
if tt.zkvmEnabled {
resetCfg := features.InitWithReset(&features.Flags{
EnableZkvm: true,
})
t.Cleanup(func() {
resetCfg()
})
}
// Define ports. Use unique ports since this test validates ENR content.
const (
udpPort = 3100
@@ -348,6 +365,14 @@ func TestCreateLocalNode(t *testing.T) {
custodyGroupCount := new(uint64)
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(params.BeaconNetworkConfig().CustodyGroupCountKey, custodyGroupCount)))
require.Equal(t, custodyRequirement, *custodyGroupCount)
// Check zkVM enabled key if applicable.
if tt.zkvmEnabled {
zkvmEnabled := new(bool)
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(params.BeaconNetworkConfig().ZkvmEnabledKey, zkvmEnabled)))
require.Equal(t, features.Get().EnableZkvm, *zkvmEnabled)
}
})
}
}

View File

@@ -52,6 +52,9 @@ const (
// lightClientFinalityUpdateWeight specifies the scoring weight that we apply to
// our light client finality update topic.
lightClientFinalityUpdateWeight = 0.05
// executionProofWeight specifies the scoring weight that we apply to
// our execution proof topic.
executionProofWeight = 0.05
// maxInMeshScore describes the max score a peer can attain from being in the mesh.
maxInMeshScore = 10
@@ -145,6 +148,8 @@ func (s *Service) topicScoreParams(topic string) (*pubsub.TopicScoreParams, erro
return defaultLightClientOptimisticUpdateTopicParams(), nil
case strings.Contains(topic, GossipLightClientFinalityUpdateMessage):
return defaultLightClientFinalityUpdateTopicParams(), nil
case strings.Contains(topic, GossipExecutionProofMessage):
return defaultExecutionProofTopicParams(), nil
default:
return nil, errors.Errorf("unrecognized topic provided for parameter registration: %s", topic)
}
@@ -510,6 +515,28 @@ func defaultBlsToExecutionChangeTopicParams() *pubsub.TopicScoreParams {
}
}
func defaultExecutionProofTopicParams() *pubsub.TopicScoreParams {
return &pubsub.TopicScoreParams{
TopicWeight: executionProofWeight,
TimeInMeshWeight: maxInMeshScore / inMeshCap(),
TimeInMeshQuantum: inMeshTime(),
TimeInMeshCap: inMeshCap(),
FirstMessageDeliveriesWeight: 2,
FirstMessageDeliveriesDecay: scoreDecay(oneHundredEpochs),
FirstMessageDeliveriesCap: 5,
MeshMessageDeliveriesWeight: 0,
MeshMessageDeliveriesDecay: 0,
MeshMessageDeliveriesCap: 0,
MeshMessageDeliveriesThreshold: 0,
MeshMessageDeliveriesWindow: 0,
MeshMessageDeliveriesActivation: 0,
MeshFailurePenaltyWeight: 0,
MeshFailurePenaltyDecay: 0,
InvalidMessageDeliveriesWeight: -2000,
InvalidMessageDeliveriesDecay: scoreDecay(invalidDecayPeriod),
}
}
func defaultLightClientOptimisticUpdateTopicParams() *pubsub.TopicScoreParams {
return &pubsub.TopicScoreParams{
TopicWeight: lightClientOptimisticUpdateWeight,

View File

@@ -25,6 +25,7 @@ var gossipTopicMappings = map[string]func() proto.Message{
LightClientOptimisticUpdateTopicFormat: func() proto.Message { return &ethpb.LightClientOptimisticUpdateAltair{} },
LightClientFinalityUpdateTopicFormat: func() proto.Message { return &ethpb.LightClientFinalityUpdateAltair{} },
DataColumnSubnetTopicFormat: func() proto.Message { return &ethpb.DataColumnSidecar{} },
ExecutionProofSubnetTopicFormat: func() proto.Message { return &ethpb.ExecutionProof{} },
}
// GossipTopicMappings is a function to return the assigned data type

View File

@@ -602,6 +602,33 @@ func (p *Status) All() []peer.ID {
return pids
}
// ZkvmEnabledPeers returns all connected peers that have zkvm enabled in their ENR.
func (p *Status) ZkvmEnabledPeers() []peer.ID {
p.store.RLock()
defer p.store.RUnlock()
peers := make([]peer.ID, 0)
for pid, peerData := range p.store.Peers() {
if peerData.ConnState != Connected {
continue
}
if peerData.Enr == nil {
continue
}
var enabled bool
entry := enr.WithEntry(params.BeaconNetworkConfig().ZkvmEnabledKey, &enabled)
if err := peerData.Enr.Load(entry); err != nil {
continue
}
if enabled {
peers = append(peers, pid)
}
}
return peers
}
// Prune clears out and removes outdated and disconnected peers.
func (p *Status) Prune() {
p.store.Lock()

View File

@@ -1341,3 +1341,75 @@ func createPeer(t *testing.T, p *peers.Status, addr ma.Multiaddr,
p.SetConnectionState(id, state)
return id
}
func TestZkvmEnabledPeers(t *testing.T) {
p := peers.NewStatus(t.Context(), &peers.StatusConfig{
PeerLimit: 30,
ScorerParams: &scorers.Config{
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
Threshold: 1,
},
},
})
// Create peer 1: Connected, zkVM enabled
pid1 := addPeer(t, p, peers.Connected)
record1 := new(enr.Record)
zkvmEnabled := true
record1.Set(enr.WithEntry(params.BeaconNetworkConfig().ZkvmEnabledKey, &zkvmEnabled))
p.Add(record1, pid1, nil, network.DirOutbound)
p.SetConnectionState(pid1, peers.Connected)
// Create peer 2: Connected, zkVM disabled
pid2 := addPeer(t, p, peers.Connected)
record2 := new(enr.Record)
zkvmDisabled := false
record2.Set(enr.WithEntry(params.BeaconNetworkConfig().ZkvmEnabledKey, &zkvmDisabled))
p.Add(record2, pid2, nil, network.DirOutbound)
p.SetConnectionState(pid2, peers.Connected)
// Create peer 3: Connected, zkVM enabled
pid3 := addPeer(t, p, peers.Connected)
record3 := new(enr.Record)
record3.Set(enr.WithEntry(params.BeaconNetworkConfig().ZkvmEnabledKey, &zkvmEnabled))
p.Add(record3, pid3, nil, network.DirOutbound)
p.SetConnectionState(pid3, peers.Connected)
// Create peer 4: Disconnected, zkVM enabled (should not be included)
pid4 := addPeer(t, p, peers.Disconnected)
record4 := new(enr.Record)
record4.Set(enr.WithEntry(params.BeaconNetworkConfig().ZkvmEnabledKey, &zkvmEnabled))
p.Add(record4, pid4, nil, network.DirOutbound)
p.SetConnectionState(pid4, peers.Disconnected)
// Create peer 5: Connected, no ENR (should not be included)
pid5 := addPeer(t, p, peers.Connected)
p.Add(nil, pid5, nil, network.DirOutbound)
p.SetConnectionState(pid5, peers.Connected)
// Create peer 6: Connected, no zkVM key in ENR (should not be included)
pid6 := addPeer(t, p, peers.Connected)
record6 := new(enr.Record)
record6.Set(enr.WithEntry("other_key", "other_value"))
p.Add(record6, pid6, nil, network.DirOutbound)
p.SetConnectionState(pid6, peers.Connected)
// Get zkVM enabled peers
zkvmPeers := p.ZkvmEnabledPeers()
// Should return only pid1 and pid3 (connected peers with zkVM enabled)
assert.Equal(t, 2, len(zkvmPeers), "Expected 2 zkVM enabled peers")
// Verify the returned peers are correct
zkvmPeerMap := make(map[peer.ID]bool)
for _, pid := range zkvmPeers {
zkvmPeerMap[pid] = true
}
assert.Equal(t, true, zkvmPeerMap[pid1], "pid1 should be in zkVM enabled peers")
assert.Equal(t, true, zkvmPeerMap[pid3], "pid3 should be in zkVM enabled peers")
assert.Equal(t, false, zkvmPeerMap[pid2], "pid2 should not be in zkVM enabled peers (disabled)")
assert.Equal(t, false, zkvmPeerMap[pid4], "pid4 should not be in zkVM enabled peers (disconnected)")
assert.Equal(t, false, zkvmPeerMap[pid5], "pid5 should not be in zkVM enabled peers (no ENR)")
assert.Equal(t, false, zkvmPeerMap[pid6], "pid6 should not be in zkVM enabled peers (no zkVM key)")
}

View File

@@ -67,6 +67,9 @@ const (
// DataColumnSidecarsByRangeName is the name for the DataColumnSidecarsByRange v1 message topic.
DataColumnSidecarsByRangeName = "/data_column_sidecars_by_range"
// ExecutionProofsByRootName is the name for the ExecutionProofsByRoot v1 message topic.
ExecutionProofsByRootName = "/execution_proofs_by_root"
)
const (
@@ -106,6 +109,9 @@ const (
// RPCDataColumnSidecarsByRangeTopicV1 is a topic for requesting data column sidecars by their slot.
// /eth2/beacon_chain/req/data_column_sidecars_by_range/1 - New in Fulu.
RPCDataColumnSidecarsByRangeTopicV1 = protocolPrefix + DataColumnSidecarsByRangeName + SchemaVersionV1
// RPCExecutionProofsByRootTopicV1 is a topic for requesting execution proofs by their block root.
// /eth2/beacon_chain/req/execution_proofs_by_root/1 - New in Fulu.
RPCExecutionProofsByRootTopicV1 = protocolPrefix + ExecutionProofsByRootName + SchemaVersionV1
// V2 RPC Topics
// RPCStatusTopicV2 defines the v1 topic for the status rpc method.
@@ -170,6 +176,9 @@ var (
// DataColumnSidecarsByRoot v1 Message
RPCDataColumnSidecarsByRootTopicV1: p2ptypes.DataColumnsByRootIdentifiers{},
// ExecutionProofsByRoot v1 Message
RPCExecutionProofsByRootTopicV1: new(pb.ExecutionProofsByRootRequest),
}
// Maps all registered protocol prefixes.
@@ -193,6 +202,7 @@ var (
LightClientOptimisticUpdateName: true,
DataColumnSidecarsByRootName: true,
DataColumnSidecarsByRangeName: true,
ExecutionProofsByRootName: true,
}
// Maps all the RPC messages which are to updated in altair.

View File

@@ -36,6 +36,7 @@ var (
attSubnetEnrKey = params.BeaconNetworkConfig().AttSubnetKey
syncCommsSubnetEnrKey = params.BeaconNetworkConfig().SyncCommsSubnetKey
custodyGroupCountEnrKey = params.BeaconNetworkConfig().CustodyGroupCountKey
zkvmEnabledKeyEnrKey = params.BeaconNetworkConfig().ZkvmEnabledKey
)
// The value used with the subnet, in order

View File

@@ -46,6 +46,8 @@ const (
GossipLightClientOptimisticUpdateMessage = "light_client_optimistic_update"
// GossipDataColumnSidecarMessage is the name for the data column sidecar message type.
GossipDataColumnSidecarMessage = "data_column_sidecar"
// GossipExecutionProofMessage is the name for the execution proof message type.
GossipExecutionProofMessage = "execution_proof"
// Topic Formats
//
@@ -75,6 +77,8 @@ const (
LightClientOptimisticUpdateTopicFormat = GossipProtocolAndDigest + GossipLightClientOptimisticUpdateMessage
// DataColumnSubnetTopicFormat is the topic format for the data column subnet.
DataColumnSubnetTopicFormat = GossipProtocolAndDigest + GossipDataColumnSidecarMessage + "_%d"
// ExecutionProofSubnetTopicFormat is the topic format for the execution proof subnet.
ExecutionProofSubnetTopicFormat = GossipProtocolAndDigest + GossipExecutionProofMessage // + "_%d" (PoC only have one global topic)
)
// topic is a struct representing a single gossipsub topic.
@@ -158,6 +162,7 @@ func (s *Service) allTopics() []topic {
newTopic(altair, future, empty, GossipLightClientOptimisticUpdateMessage),
newTopic(altair, future, empty, GossipLightClientFinalityUpdateMessage),
newTopic(capella, future, empty, GossipBlsToExecutionChangeMessage),
newTopic(fulu, future, empty, GossipExecutionProofMessage),
}
last := params.GetNetworkScheduleEntry(genesis)
schedule := []params.NetworkScheduleEntry{last}

View File

@@ -38,6 +38,7 @@ go_library(
"//beacon-chain/rpc/eth/events:go_default_library",
"//beacon-chain/rpc/eth/light-client:go_default_library",
"//beacon-chain/rpc/eth/node:go_default_library",
"//beacon-chain/rpc/eth/prover:go_default_library",
"//beacon-chain/rpc/eth/rewards:go_default_library",
"//beacon-chain/rpc/eth/validator:go_default_library",
"//beacon-chain/rpc/lookup:go_default_library",

View File

@@ -13,6 +13,7 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/eth/events"
lightclient "github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/eth/light-client"
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/eth/node"
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/eth/prover"
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/eth/rewards"
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/eth/validator"
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/lookup"
@@ -98,6 +99,7 @@ func (s *Service) endpoints(
endpoints = append(endpoints, s.prysmBeaconEndpoints(ch, stater, blocker, coreService)...)
endpoints = append(endpoints, s.prysmNodeEndpoints()...)
endpoints = append(endpoints, s.prysmValidatorEndpoints(stater, coreService)...)
endpoints = append(endpoints, s.proverEndpoints()...)
if features.Get().EnableLightClient {
endpoints = append(endpoints, s.lightClientEndpoints()...)
@@ -1288,3 +1290,22 @@ func (s *Service) prysmValidatorEndpoints(stater lookup.Stater, coreService *cor
},
}
}
func (*Service) proverEndpoints() []endpoint {
server := &prover.Server{}
const namespace = "prover"
return []endpoint{
{
template: "/eth/v1/prover/execution_proofs",
name: namespace + ".SubmitExecutionProof",
middleware: []middleware.Middleware{
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.SubmitExecutionProof,
methods: []string{http.MethodPost},
},
}
}

View File

@@ -84,7 +84,6 @@ func TestGetSpec(t *testing.T) {
config.FuluForkVersion = []byte("FuluForkVersion")
config.FuluForkEpoch = 109
config.GloasForkEpoch = 110
config.MaxBuildersPerWithdrawalsSweep = 112
config.BLSWithdrawalPrefixByte = byte('b')
config.ETH1AddressWithdrawalPrefixByte = byte('c')
config.GenesisDelay = 24
@@ -179,6 +178,11 @@ func TestGetSpec(t *testing.T) {
config.BuilderPaymentThresholdNumerator = 104
config.BuilderPaymentThresholdDenominator = 105
// EIP-8025
config.MaxProofDataBytes = 200
config.MinEpochsForExecutionProofRequests = 201
config.MinProofsRequired = 202
var dbp [4]byte
copy(dbp[:], []byte{'0', '0', '0', '1'})
config.DomainBeaconProposer = dbp
@@ -221,7 +225,7 @@ func TestGetSpec(t *testing.T) {
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp))
data, ok := resp.Data.(map[string]any)
require.Equal(t, true, ok)
assert.Equal(t, 193, len(data))
assert.Equal(t, 192, len(data))
for k, v := range data {
t.Run(k, func(t *testing.T) {
switch k {
@@ -303,8 +307,6 @@ func TestGetSpec(t *testing.T) {
assert.Equal(t, "109", v)
case "GLOAS_FORK_EPOCH":
assert.Equal(t, "110", v)
case "MAX_BUILDERS_PER_WITHDRAWALS_SWEEP":
assert.Equal(t, "112", v)
case "MIN_ANCHOR_POW_BLOCK_DIFFICULTY":
assert.Equal(t, "1000", v)
case "BLS_WITHDRAWAL_PREFIX":
@@ -613,6 +615,12 @@ func TestGetSpec(t *testing.T) {
assert.Equal(t, "102", v)
case "SYNC_MESSAGE_DUE_BPS":
assert.Equal(t, "103", v)
case "MAX_PROOF_DATA_BYTES":
assert.Equal(t, "200", v)
case "MIN_EPOCHS_FOR_EXECUTION_PROOF_REQUESTS":
assert.Equal(t, "201", v)
case "MIN_PROOFS_REQUIRED":
assert.Equal(t, "202", v)
case "BUILDER_PAYMENT_THRESHOLD_NUMERATOR":
assert.Equal(t, "104", v)
case "BUILDER_PAYMENT_THRESHOLD_DENOMINATOR":

View File

@@ -0,0 +1,17 @@
load("@prysm//tools/go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"handlers.go",
"log.go",
"server.go",
],
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/eth/prover",
visibility = ["//visibility:public"],
deps = [
"//monitoring/tracing/trace:go_default_library",
"//network/httputil:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)

View File

@@ -0,0 +1,39 @@
package prover
import (
"encoding/json"
"io"
"net/http"
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
"github.com/OffchainLabs/prysm/v7/network/httputil"
)
// SubmitExecutionProof handles POST requests to /eth/v1/prover/execution_proofs.
// It receives execution proofs from provers and logs them.
func (s *Server) SubmitExecutionProof(w http.ResponseWriter, r *http.Request) {
_, span := trace.StartSpan(r.Context(), "prover.SubmitExecutionProof")
defer span.End()
body, err := io.ReadAll(r.Body)
if err != nil {
httputil.HandleError(w, "Could not read request body: "+err.Error(), http.StatusBadRequest)
return
}
if len(body) == 0 {
httputil.HandleError(w, "No data submitted", http.StatusBadRequest)
return
}
// Parse the JSON to extract fields for logging
var proof map[string]any
if err := json.Unmarshal(body, &proof); err != nil {
httputil.HandleError(w, "Could not decode request body: "+err.Error(), http.StatusBadRequest)
return
}
log.Info("Received execution proof")
w.WriteHeader(http.StatusOK)
}

View File

@@ -0,0 +1,5 @@
package prover
import "github.com/sirupsen/logrus"
var log = logrus.WithField("package", "beacon-chain/rpc/eth/prover")

View File

@@ -0,0 +1,5 @@
// Package prover defines handlers for the prover API endpoints.
package prover
// Server defines a server implementation for the prover API endpoints.
type Server struct{}

View File

@@ -42,6 +42,7 @@ go_library(
"//beacon-chain/blockchain/kzg:go_default_library",
"//beacon-chain/builder:go_default_library",
"//beacon-chain/cache:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
"//beacon-chain/cache/depositsnapshot:go_default_library",
"//beacon-chain/core/blocks:go_default_library",
"//beacon-chain/core/electra:go_default_library",

View File

@@ -70,6 +70,7 @@ type Server struct {
BlockReceiver blockchain.BlockReceiver
BlobReceiver blockchain.BlobReceiver
DataColumnReceiver blockchain.DataColumnReceiver
ProofReceiver blockchain.ProofReceiver
MockEth1Votes bool
Eth1BlockFetcher execution.POWBlockFetcher
PendingDepositsFetcher depositsnapshot.PendingDepositsFetcher

View File

@@ -90,6 +90,7 @@ type Config struct {
BlockReceiver blockchain.BlockReceiver
BlobReceiver blockchain.BlobReceiver
DataColumnReceiver blockchain.DataColumnReceiver
ProofReceiver blockchain.ProofReceiver
ExecutionChainService execution.Chain
ChainStartFetcher execution.ChainStartFetcher
ExecutionChainInfoFetcher execution.ChainInfoFetcher
@@ -240,6 +241,7 @@ func NewService(ctx context.Context, cfg *Config) *Service {
BlockReceiver: s.cfg.BlockReceiver,
BlobReceiver: s.cfg.BlobReceiver,
DataColumnReceiver: s.cfg.DataColumnReceiver,
ProofReceiver: s.cfg.ProofReceiver,
MockEth1Votes: s.cfg.MockEth1Votes,
Eth1BlockFetcher: s.cfg.ExecutionChainService,
PendingDepositsFetcher: s.cfg.PendingDepositFetcher,

View File

@@ -3,7 +3,6 @@ package state
import (
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
)
@@ -14,10 +13,6 @@ type writeOnlyGloasFields interface {
RotateBuilderPendingPayments() error
AppendBuilderPendingWithdrawals([]*ethpb.BuilderPendingWithdrawal) error
UpdateExecutionPayloadAvailabilityAtIndex(idx uint64, val byte) error
SetPayloadExpectedWithdrawals(withdrawals []*enginev1.Withdrawal) error
DecreaseWithdrawalBalances(withdrawals []*enginev1.Withdrawal) error
DequeueBuilderPendingWithdrawals(num uint64) error
SetNextWithdrawalBuilderIndex(idx primitives.BuilderIndex) error
}
type readOnlyGloasFields interface {
@@ -26,15 +21,4 @@ type readOnlyGloasFields interface {
CanBuilderCoverBid(primitives.BuilderIndex, primitives.Gwei) (bool, error)
LatestBlockHash() ([32]byte, error)
BuilderPendingPayments() ([]*ethpb.BuilderPendingPayment, error)
IsParentBlockFull() (bool, error)
ExpectedWithdrawalsGloas() (ExpectedWithdrawalsGloasResult, error)
}
// ExpectedWithdrawalsGloasResult bundles the expected withdrawals and related counters
// for the Gloas fork to avoid positional return mistakes.
type ExpectedWithdrawalsGloasResult struct {
Withdrawals []*enginev1.Withdrawal
ProcessedBuilderWithdrawalsCount uint64
ProcessedPartialWithdrawalsCount uint64
NextWithdrawalBuilderIndex primitives.BuilderIndex
}

View File

@@ -1,17 +1,13 @@
package state_native
import (
"bytes"
"fmt"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/OffchainLabs/prysm/v7/time/slots"
)
// LatestBlockHash returns the hash of the latest execution block.
@@ -151,220 +147,3 @@ func (b *BeaconState) BuilderPendingPayments() ([]*ethpb.BuilderPendingPayment,
return b.builderPendingPaymentsVal(), nil
}
// IsParentBlockFull returns true if the last committed payload bid was fulfilled with a payload,
// which can only happen when both beacon block and payload were present.
// This function must be called on a beacon state before processing the execution payload bid in the block.
// Spec v1.7.0-alpha.2 (pseudocode):
// def is_parent_block_full(state: BeaconState) -> bool:
//
// return state.latest_execution_payload_bid.block_hash == state.latest_block_hash
func (b *BeaconState) IsParentBlockFull() (bool, error) {
if b.version < version.Gloas {
return false, errNotSupported("IsParentBlockFull", b.version)
}
b.lock.RLock()
defer b.lock.RUnlock()
if b.latestExecutionPayloadBid == nil {
return false, nil
}
return bytes.Equal(b.latestExecutionPayloadBid.BlockHash, b.latestBlockHash), nil
}
// ExpectedWithdrawalsGloas returns the withdrawals that a proposer will need to pack in the next block
// applied to the current state. It is also used by validators to check that the execution payload carried
// the right number of withdrawals.
//
// Spec v1.7.0-alpha.1:
//
// def get_expected_withdrawals(state: BeaconState) -> ExpectedWithdrawals:
// withdrawal_index = state.next_withdrawal_index
// withdrawals: List[Withdrawal] = []
//
// # [New in Gloas:EIP7732]
// # Get builder withdrawals
// builder_withdrawals, withdrawal_index, processed_builder_withdrawals_count = (
// get_builder_withdrawals(state, withdrawal_index, withdrawals)
// )
// withdrawals.extend(builder_withdrawals)
//
// # Get partial withdrawals
// partial_withdrawals, withdrawal_index, processed_partial_withdrawals_count = (
// get_pending_partial_withdrawals(state, withdrawal_index, withdrawals)
// )
// withdrawals.extend(partial_withdrawals)
//
// # [New in Gloas:EIP7732]
// # Get builders sweep withdrawals
// builders_sweep_withdrawals, withdrawal_index, processed_builders_sweep_count = (
// get_builders_sweep_withdrawals(state, withdrawal_index, withdrawals)
// )
// withdrawals.extend(builders_sweep_withdrawals)
//
// # Get validators sweep withdrawals
// validators_sweep_withdrawals, withdrawal_index, processed_validators_sweep_count = (
// get_validators_sweep_withdrawals(state, withdrawal_index, withdrawals)
// )
// withdrawals.extend(validators_sweep_withdrawals)
//
// return ExpectedWithdrawals(
// withdrawals,
// # [New in Gloas:EIP7732]
// processed_builder_withdrawals_count,
// processed_partial_withdrawals_count,
// # [New in Gloas:EIP7732]
// processed_builders_sweep_count,
// processed_validators_sweep_count,
// )
func (b *BeaconState) ExpectedWithdrawalsGloas() (state.ExpectedWithdrawalsGloasResult, error) {
if b.version < version.Gloas {
return state.ExpectedWithdrawalsGloasResult{}, errNotSupported("ExpectedWithdrawalsGloas", b.version)
}
b.lock.RLock()
defer b.lock.RUnlock()
cfg := params.BeaconConfig()
withdrawals := make([]*enginev1.Withdrawal, 0, cfg.MaxWithdrawalsPerPayload)
withdrawalIndex := b.nextWithdrawalIndex
withdrawalIndex, processedBuilderWithdrawalsCount, err := b.appendBuilderWithdrawals(withdrawalIndex, &withdrawals)
if err != nil {
return state.ExpectedWithdrawalsGloasResult{}, err
}
withdrawalIndex, processedPartialWithdrawalsCount, err := b.appendPendingPartialWithdrawals(withdrawalIndex, &withdrawals)
if err != nil {
return state.ExpectedWithdrawalsGloasResult{}, err
}
withdrawalIndex, nextBuilderIndex, err := b.appendBuildersSweepWithdrawals(withdrawalIndex, &withdrawals)
if err != nil {
return state.ExpectedWithdrawalsGloasResult{}, err
}
err = b.appendValidatorsSweepWithdrawals(withdrawalIndex, &withdrawals)
if err != nil {
return state.ExpectedWithdrawalsGloasResult{}, err
}
return state.ExpectedWithdrawalsGloasResult{
Withdrawals: withdrawals,
ProcessedBuilderWithdrawalsCount: processedBuilderWithdrawalsCount,
ProcessedPartialWithdrawalsCount: processedPartialWithdrawalsCount,
NextWithdrawalBuilderIndex: nextBuilderIndex,
}, nil
}
// appendBuilderWithdrawals returns builder pending withdrawals, the updated withdrawal index,
// and the processed count, following spec v1.7.0-alpha.2:
//
// def get_builder_withdrawals(state, withdrawal_index, prior_withdrawals):
// withdrawals_limit = MAX_WITHDRAWALS_PER_PAYLOAD - 1
// assert len(prior_withdrawals) <= withdrawals_limit
// processed_count = 0
// withdrawals = []
// for withdrawal in state.builder_pending_withdrawals:
// if len(prior_withdrawals + withdrawals) >= withdrawals_limit:
// break
// withdrawals.append(Withdrawal(
// index=withdrawal_index,
// validator_index=convert_builder_index_to_validator_index(withdrawal.builder_index),
// address=withdrawal.fee_recipient,
// amount=withdrawal.amount,
// ))
// withdrawal_index += 1
// processed_count += 1
// return withdrawals, withdrawal_index, processed_count
func (b *BeaconState) appendBuilderWithdrawals(withdrawalIndex uint64, withdrawals *[]*enginev1.Withdrawal) (uint64, uint64, error) {
cfg := params.BeaconConfig()
withdrawalsLimit := int(cfg.MaxWithdrawalsPerPayload - 1)
ws := *withdrawals
if len(ws) > withdrawalsLimit {
return withdrawalIndex, 0, fmt.Errorf("prior withdrawals length %d exceeds limit %d", len(ws), withdrawalsLimit)
}
var processedCount uint64
for _, w := range b.builderPendingWithdrawals {
if len(ws) >= withdrawalsLimit {
break
}
ws = append(ws, &enginev1.Withdrawal{
Index: withdrawalIndex,
ValidatorIndex: w.BuilderIndex.ToValidatorIndex(),
Address: w.FeeRecipient,
Amount: uint64(w.Amount),
})
withdrawalIndex++
processedCount++
}
*withdrawals = ws
return withdrawalIndex, processedCount, nil
}
// appendBuildersSweepWithdrawals returns builder sweep withdrawals, the updated withdrawal index,
// and the processed count, following spec v1.7.0-alpha.2:
//
// def get_builders_sweep_withdrawals(state, withdrawal_index, prior_withdrawals):
// epoch = get_current_epoch(state)
// builders_limit = min(len(state.builders), MAX_BUILDERS_PER_WITHDRAWALS_SWEEP)
// withdrawals_limit = MAX_WITHDRAWALS_PER_PAYLOAD - 1
// assert len(prior_withdrawals) <= withdrawals_limit
// processed_count = 0
// withdrawals = []
// builder_index = state.next_withdrawal_builder_index
// for _ in range(builders_limit):
// if len(prior_withdrawals + withdrawals) >= withdrawals_limit:
// break
// builder = state.builders[builder_index]
// if builder.withdrawable_epoch <= epoch and builder.balance > 0:
// withdrawals.append(Withdrawal(
// index=withdrawal_index,
// validator_index=convert_builder_index_to_validator_index(builder_index),
// address=builder.execution_address,
// amount=builder.balance,
// ))
// withdrawal_index += 1
// builder_index = BuilderIndex((builder_index + 1) % len(state.builders))
// processed_count += 1
// return withdrawals, withdrawal_index, processed_count
func (b *BeaconState) appendBuildersSweepWithdrawals(withdrawalIndex uint64, withdrawals *[]*enginev1.Withdrawal) (uint64, primitives.BuilderIndex, error) {
cfg := params.BeaconConfig()
withdrawalsLimit := int(cfg.MaxWithdrawalsPerPayload - 1)
if len(*withdrawals) > withdrawalsLimit {
return withdrawalIndex, 0, fmt.Errorf("prior withdrawals length %d exceeds limit %d", len(*withdrawals), withdrawalsLimit)
}
ws := *withdrawals
buildersLimit := min(len(b.builders), int(cfg.MaxBuildersPerWithdrawalsSweep))
builderIndex := b.nextWithdrawalBuilderIndex
epoch := slots.ToEpoch(b.slot)
for range buildersLimit {
if len(ws) >= withdrawalsLimit {
break
}
builder := b.builders[builderIndex]
if builder != nil && builder.WithdrawableEpoch <= epoch && builder.Balance > 0 {
ws = append(ws, &enginev1.Withdrawal{
Index: withdrawalIndex,
ValidatorIndex: builderIndex.ToValidatorIndex(),
Address: builder.ExecutionAddress,
Amount: uint64(builder.Balance),
})
withdrawalIndex++
}
builderIndex = primitives.BuilderIndex((uint64(builderIndex) + 1) % uint64(len(b.builders)))
}
*withdrawals = ws
return withdrawalIndex, builderIndex, nil
}

View File

@@ -1,27 +1,26 @@
package state_native
package state_native_test
import (
"bytes"
"testing"
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/OffchainLabs/prysm/v7/testing/require"
"github.com/OffchainLabs/prysm/v7/time/slots"
"github.com/OffchainLabs/prysm/v7/testing/util"
)
func TestLatestBlockHash(t *testing.T) {
t.Run("returns error before gloas", func(t *testing.T) {
st := &BeaconState{version: version.Fulu}
st, _ := util.DeterministicGenesisState(t, 1)
_, err := st.LatestBlockHash()
require.ErrorContains(t, "is not supported", err)
})
t.Run("returns zero hash when unset", func(t *testing.T) {
st, err := InitializeFromProtoGloas(&ethpb.BeaconStateGloas{})
st, err := state_native.InitializeFromProtoGloas(&ethpb.BeaconStateGloas{})
require.NoError(t, err)
got, err := st.LatestBlockHash()
@@ -34,7 +33,7 @@ func TestLatestBlockHash(t *testing.T) {
var want [32]byte
copy(want[:], hashBytes)
st, err := InitializeFromProtoGloas(&ethpb.BeaconStateGloas{
st, err := state_native.InitializeFromProtoGloas(&ethpb.BeaconStateGloas{
LatestBlockHash: hashBytes,
})
require.NoError(t, err)
@@ -47,14 +46,17 @@ func TestLatestBlockHash(t *testing.T) {
func TestBuilderPubkey(t *testing.T) {
t.Run("returns error before gloas", func(t *testing.T) {
st := &BeaconState{version: version.Fulu}
_, err := st.BuilderPubkey(0)
stIface, _ := util.DeterministicGenesisState(t, 1)
native, ok := stIface.(*state_native.BeaconState)
require.Equal(t, true, ok)
_, err := native.BuilderPubkey(0)
require.ErrorContains(t, "is not supported", err)
})
t.Run("returns pubkey copy", func(t *testing.T) {
pubkey := bytes.Repeat([]byte{0xAA}, 48)
stIface, err := InitializeFromProtoGloas(&ethpb.BeaconStateGloas{
stIface, err := state_native.InitializeFromProtoGloas(&ethpb.BeaconStateGloas{
Builders: []*ethpb.Builder{
{
Pubkey: pubkey,
@@ -78,12 +80,12 @@ func TestBuilderPubkey(t *testing.T) {
})
t.Run("out of range returns error", func(t *testing.T) {
stIface, err := InitializeFromProtoGloas(&ethpb.BeaconStateGloas{
stIface, err := state_native.InitializeFromProtoGloas(&ethpb.BeaconStateGloas{
Builders: []*ethpb.Builder{},
})
require.NoError(t, err)
st := stIface.(*BeaconState)
st := stIface.(*state_native.BeaconState)
_, err = st.BuilderPubkey(1)
require.ErrorContains(t, "out of range", err)
})
@@ -91,7 +93,7 @@ func TestBuilderPubkey(t *testing.T) {
func TestBuilderHelpers(t *testing.T) {
t.Run("is active builder", func(t *testing.T) {
st, err := InitializeFromProtoGloas(&ethpb.BeaconStateGloas{
st, err := state_native.InitializeFromProtoGloas(&ethpb.BeaconStateGloas{
Builders: []*ethpb.Builder{
{
Balance: 10,
@@ -118,7 +120,7 @@ func TestBuilderHelpers(t *testing.T) {
},
FinalizedCheckpoint: &ethpb.Checkpoint{Epoch: 2},
}
stInactive, err := InitializeFromProtoGloas(stProto)
stInactive, err := state_native.InitializeFromProtoGloas(stProto)
require.NoError(t, err)
active, err = stInactive.IsActiveBuilder(0)
@@ -127,7 +129,7 @@ func TestBuilderHelpers(t *testing.T) {
})
t.Run("can builder cover bid", func(t *testing.T) {
stIface, err := InitializeFromProtoGloas(&ethpb.BeaconStateGloas{
stIface, err := state_native.InitializeFromProtoGloas(&ethpb.BeaconStateGloas{
Builders: []*ethpb.Builder{
{
Balance: primitives.Gwei(params.BeaconConfig().MinDepositAmount + 50),
@@ -145,7 +147,7 @@ func TestBuilderHelpers(t *testing.T) {
})
require.NoError(t, err)
st := stIface.(*BeaconState)
st := stIface.(*state_native.BeaconState)
ok, err := st.CanBuilderCoverBid(0, 20)
require.NoError(t, err)
require.Equal(t, true, ok)
@@ -157,245 +159,10 @@ func TestBuilderHelpers(t *testing.T) {
}
func TestBuilderPendingPayments_UnsupportedVersion(t *testing.T) {
stIface, err := InitializeFromProtoElectra(&ethpb.BeaconStateElectra{})
stIface, err := state_native.InitializeFromProtoElectra(&ethpb.BeaconStateElectra{})
require.NoError(t, err)
st := stIface.(*BeaconState)
st := stIface.(*state_native.BeaconState)
_, err = st.BuilderPendingPayments()
require.ErrorContains(t, "BuilderPendingPayments", err)
}
func TestIsParentBlockFull(t *testing.T) {
t.Run("returns error before gloas", func(t *testing.T) {
st := &BeaconState{version: version.Fulu}
_, err := st.IsParentBlockFull()
require.ErrorContains(t, "is not supported", err)
})
t.Run("returns false when bid is nil", func(t *testing.T) {
st := &BeaconState{version: version.Gloas}
got, err := st.IsParentBlockFull()
require.NoError(t, err)
require.Equal(t, false, got)
})
t.Run("returns true when hashes match", func(t *testing.T) {
hash := bytes.Repeat([]byte{0xAB}, 32)
st := &BeaconState{
version: version.Gloas,
latestExecutionPayloadBid: &ethpb.ExecutionPayloadBid{
BlockHash: hash,
},
latestBlockHash: hash,
}
got, err := st.IsParentBlockFull()
require.NoError(t, err)
require.Equal(t, true, got)
})
t.Run("returns false when hashes differ", func(t *testing.T) {
hash := bytes.Repeat([]byte{0xAB}, 32)
other := bytes.Repeat([]byte{0xCD}, 32)
st := &BeaconState{
version: version.Gloas,
latestExecutionPayloadBid: &ethpb.ExecutionPayloadBid{
BlockHash: hash,
},
latestBlockHash: other,
}
got, err := st.IsParentBlockFull()
require.NoError(t, err)
require.Equal(t, false, got)
})
}
func TestAppendBuilderWithdrawals(t *testing.T) {
t.Run("errors when prior withdrawals exceed limit", func(t *testing.T) {
st := &BeaconState{}
limit := params.BeaconConfig().MaxWithdrawalsPerPayload - 1
withdrawals := make([]*enginev1.Withdrawal, limit+1)
nextIndex, processed, err := st.appendBuilderWithdrawals(5, &withdrawals)
require.ErrorContains(t, "exceeds limit", err)
require.Equal(t, uint64(5), nextIndex)
require.Equal(t, uint64(0), processed)
require.Equal(t, int(limit+1), len(withdrawals))
})
t.Run("appends builder withdrawals and increments index", func(t *testing.T) {
st := &BeaconState{
builderPendingWithdrawals: []*ethpb.BuilderPendingWithdrawal{
{BuilderIndex: 1, FeeRecipient: []byte{0x01}, Amount: 11},
{BuilderIndex: 2, FeeRecipient: []byte{0x02}, Amount: 22},
{BuilderIndex: 3, FeeRecipient: []byte{0x03}, Amount: 33},
},
}
withdrawals := []*enginev1.Withdrawal{
{Index: 7, ValidatorIndex: 9, Address: []byte{0xAA}, Amount: 99},
}
nextIndex, processed, err := st.appendBuilderWithdrawals(10, &withdrawals)
require.NoError(t, err)
require.Equal(t, uint64(13), nextIndex)
require.Equal(t, uint64(3), processed)
require.Equal(t, 4, len(withdrawals))
require.DeepEqual(t, &enginev1.Withdrawal{
Index: 10,
ValidatorIndex: primitives.BuilderIndex(1).ToValidatorIndex(),
Address: []byte{0x01},
Amount: 11,
}, withdrawals[1])
require.DeepEqual(t, &enginev1.Withdrawal{
Index: 11,
ValidatorIndex: primitives.BuilderIndex(2).ToValidatorIndex(),
Address: []byte{0x02},
Amount: 22,
}, withdrawals[2])
require.DeepEqual(t, &enginev1.Withdrawal{
Index: 12,
ValidatorIndex: primitives.BuilderIndex(3).ToValidatorIndex(),
Address: []byte{0x03},
Amount: 33,
}, withdrawals[3])
})
t.Run("respects per-payload limit", func(t *testing.T) {
limit := params.BeaconConfig().MaxWithdrawalsPerPayload - 1
st := &BeaconState{
builderPendingWithdrawals: []*ethpb.BuilderPendingWithdrawal{
{BuilderIndex: 4, FeeRecipient: []byte{0x04}, Amount: 44},
{BuilderIndex: 5, FeeRecipient: []byte{0x05}, Amount: 55},
},
}
withdrawals := make([]*enginev1.Withdrawal, limit-1)
nextIndex, processed, err := st.appendBuilderWithdrawals(20, &withdrawals)
require.NoError(t, err)
require.Equal(t, uint64(21), nextIndex)
require.Equal(t, uint64(1), processed)
require.Equal(t, int(limit), len(withdrawals))
require.DeepEqual(t, &enginev1.Withdrawal{
Index: 20,
ValidatorIndex: primitives.BuilderIndex(4).ToValidatorIndex(),
Address: []byte{0x04},
Amount: 44,
}, withdrawals[len(withdrawals)-1])
})
t.Run("does not append when already at limit", func(t *testing.T) {
limit := params.BeaconConfig().MaxWithdrawalsPerPayload - 1
if limit == 0 {
t.Skip("withdrawals limit too small")
}
st := &BeaconState{
builderPendingWithdrawals: []*ethpb.BuilderPendingWithdrawal{
{BuilderIndex: 6, FeeRecipient: []byte{0x06}, Amount: 66},
},
}
withdrawals := make([]*enginev1.Withdrawal, limit)
nextIndex, processed, err := st.appendBuilderWithdrawals(30, &withdrawals)
require.NoError(t, err)
require.Equal(t, uint64(30), nextIndex)
require.Equal(t, uint64(0), processed)
require.Equal(t, int(limit), len(withdrawals))
})
}
func TestAppendBuildersSweepWithdrawals(t *testing.T) {
t.Run("errors when prior withdrawals exceed limit", func(t *testing.T) {
st := &BeaconState{}
limit := params.BeaconConfig().MaxWithdrawalsPerPayload - 1
withdrawals := make([]*enginev1.Withdrawal, limit+1)
nextIndex, nextBuilderIndex, err := st.appendBuildersSweepWithdrawals(5, &withdrawals)
require.ErrorContains(t, "exceeds limit", err)
require.Equal(t, uint64(5), nextIndex)
require.Equal(t, primitives.BuilderIndex(0), nextBuilderIndex)
require.Equal(t, int(limit+1), len(withdrawals))
})
t.Run("appends eligible builders, skips ineligible", func(t *testing.T) {
epoch := primitives.Epoch(3)
st := &BeaconState{
slot: slots.UnsafeEpochStart(epoch),
nextWithdrawalBuilderIndex: 2,
builders: []*ethpb.Builder{
{ExecutionAddress: []byte{0x01}, Balance: 0, WithdrawableEpoch: epoch},
{ExecutionAddress: []byte{0x02}, Balance: 10, WithdrawableEpoch: epoch + 1},
{ExecutionAddress: []byte{0x03}, Balance: 20, WithdrawableEpoch: epoch},
},
}
withdrawals := []*enginev1.Withdrawal{}
nextIndex, nextBuilderIndex, err := st.appendBuildersSweepWithdrawals(100, &withdrawals)
require.NoError(t, err)
require.Equal(t, uint64(101), nextIndex)
require.Equal(t, primitives.BuilderIndex(2), nextBuilderIndex)
require.Equal(t, 1, len(withdrawals))
require.DeepEqual(t, &enginev1.Withdrawal{
Index: 100,
ValidatorIndex: primitives.BuilderIndex(2).ToValidatorIndex(),
Address: []byte{0x03},
Amount: 20,
}, withdrawals[0])
})
t.Run("respects max builders per sweep", func(t *testing.T) {
cfg := params.BeaconConfig()
max := int(cfg.MaxBuildersPerWithdrawalsSweep)
epoch := primitives.Epoch(1)
builders := make([]*ethpb.Builder, max+2)
for i := range builders {
builders[i] = &ethpb.Builder{
ExecutionAddress: []byte{byte(i + 1)},
Balance: 1,
WithdrawableEpoch: epoch,
}
}
start := len(builders) - 1
st := &BeaconState{
slot: slots.UnsafeEpochStart(epoch),
nextWithdrawalBuilderIndex: primitives.BuilderIndex(start),
builders: builders,
}
withdrawals := []*enginev1.Withdrawal{}
nextIndex, nextBuilderIndex, err := st.appendBuildersSweepWithdrawals(7, &withdrawals)
require.NoError(t, err)
limit := int(cfg.MaxWithdrawalsPerPayload - 1)
expectedCount := min(max, limit)
require.Equal(t, uint64(7)+uint64(expectedCount), nextIndex)
require.Equal(t, expectedCount, len(withdrawals))
expectedNext := primitives.BuilderIndex((uint64(start) + uint64(expectedCount)) % uint64(len(builders)))
require.Equal(t, expectedNext, nextBuilderIndex)
})
t.Run("stops when payload limit reached", func(t *testing.T) {
cfg := params.BeaconConfig()
limit := cfg.MaxWithdrawalsPerPayload - 1
if limit < 1 {
t.Skip("withdrawals limit too small")
}
epoch := primitives.Epoch(2)
builders := []*ethpb.Builder{
{ExecutionAddress: []byte{0x0A}, Balance: 3, WithdrawableEpoch: epoch},
{ExecutionAddress: []byte{0x0B}, Balance: 4, WithdrawableEpoch: epoch},
}
st := &BeaconState{
slot: slots.UnsafeEpochStart(epoch),
nextWithdrawalBuilderIndex: 0,
builders: builders,
}
withdrawals := make([]*enginev1.Withdrawal, limit)
nextIndex, nextBuilderIndex, err := st.appendBuildersSweepWithdrawals(20, &withdrawals)
require.NoError(t, err)
require.Equal(t, uint64(20), nextIndex)
require.Equal(t, int(limit), len(withdrawals))
require.Equal(t, primitives.BuilderIndex(0), nextBuilderIndex)
})
}

View File

@@ -133,22 +133,11 @@ func (b *BeaconState) appendPendingPartialWithdrawals(withdrawalIndex uint64, wi
return withdrawalIndex, 0, nil
}
cfg := params.BeaconConfig()
withdrawalsLimit := min(
len(*withdrawals)+int(cfg.MaxPendingPartialsPerWithdrawalsSweep),
int(cfg.MaxWithdrawalsPerPayload-1),
)
if len(*withdrawals) > withdrawalsLimit {
return withdrawalIndex, 0, fmt.Errorf("prior withdrawals length %d exceeds limit %d", len(*withdrawals), withdrawalsLimit)
}
ws := *withdrawals
epoch := slots.ToEpoch(b.slot)
var processedPartialWithdrawalsCount uint64
for _, w := range b.pendingPartialWithdrawals {
isWithdrawable := w.WithdrawableEpoch <= epoch
hasReachedLimit := len(ws) >= withdrawalsLimit
if !isWithdrawable || hasReachedLimit {
if w.WithdrawableEpoch > epoch || len(ws) >= int(params.BeaconConfig().MaxPendingPartialsPerWithdrawalsSweep) {
break
}
@@ -160,7 +149,7 @@ func (b *BeaconState) appendPendingPartialWithdrawals(withdrawalIndex uint64, wi
if err != nil {
return withdrawalIndex, 0, fmt.Errorf("could not retrieve balance at index %d: %w", w.Index, err)
}
hasSufficientEffectiveBalance := v.EffectiveBalance() >= cfg.MinActivationBalance
hasSufficientEffectiveBalance := v.EffectiveBalance() >= params.BeaconConfig().MinActivationBalance
var totalWithdrawn uint64
for _, wi := range ws {
if wi.ValidatorIndex == w.Index {
@@ -171,9 +160,9 @@ func (b *BeaconState) appendPendingPartialWithdrawals(withdrawalIndex uint64, wi
if err != nil {
return withdrawalIndex, 0, errors.Wrapf(err, "failed to subtract balance %d with total withdrawn %d", vBal, totalWithdrawn)
}
hasExcessBalance := balance > cfg.MinActivationBalance
if v.ExitEpoch() == cfg.FarFutureEpoch && hasSufficientEffectiveBalance && hasExcessBalance {
amount := min(balance-cfg.MinActivationBalance, w.Amount)
hasExcessBalance := balance > params.BeaconConfig().MinActivationBalance
if v.ExitEpoch() == params.BeaconConfig().FarFutureEpoch && hasSufficientEffectiveBalance && hasExcessBalance {
amount := min(balance-params.BeaconConfig().MinActivationBalance, w.Amount)
ws = append(ws, &enginev1.Withdrawal{
Index: withdrawalIndex,
ValidatorIndex: w.Index,
@@ -194,7 +183,7 @@ func (b *BeaconState) appendValidatorsSweepWithdrawals(withdrawalIndex uint64, w
validatorIndex := b.nextWithdrawalValidatorIndex
validatorsLen := b.validatorsLen()
epoch := slots.ToEpoch(b.slot)
bound := min(validatorsLen, int(params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep))
bound := min(uint64(validatorsLen), params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep)
for range bound {
val, err := b.validatorAtIndexReadOnly(validatorIndex)
if err != nil {
@@ -233,7 +222,7 @@ func (b *BeaconState) appendValidatorsSweepWithdrawals(withdrawalIndex uint64, w
})
withdrawalIndex++
}
if len(ws) == int(params.BeaconConfig().MaxWithdrawalsPerPayload) {
if uint64(len(ws)) == params.BeaconConfig().MaxWithdrawalsPerPayload {
break
}
validatorIndex += 1

View File

@@ -1,7 +1,6 @@
package state_native
import (
"errors"
"fmt"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native/types"
@@ -9,10 +8,8 @@ import (
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
pkgerrors "github.com/pkg/errors"
)
// RotateBuilderPendingPayments rotates the queue by dropping slots per epoch payments from the
@@ -164,154 +161,3 @@ func (b *BeaconState) UpdateExecutionPayloadAvailabilityAtIndex(idx uint64, val
b.markFieldAsDirty(types.ExecutionPayloadAvailability)
return nil
}
// SetPayloadExpectedWithdrawals stores the expected withdrawals for the next payload.
func (b *BeaconState) SetPayloadExpectedWithdrawals(withdrawals []*enginev1.Withdrawal) error {
if b.version < version.Gloas {
return errNotSupported("SetPayloadExpectedWithdrawals", b.version)
}
b.lock.Lock()
defer b.lock.Unlock()
b.payloadExpectedWithdrawals = withdrawals
b.markFieldAsDirty(types.PayloadExpectedWithdrawals)
return nil
}
// DequeueBuilderPendingWithdrawals removes processed builder withdrawals from the front of the queue.
func (b *BeaconState) DequeueBuilderPendingWithdrawals(n uint64) error {
if b.version < version.Gloas {
return errNotSupported("DequeueBuilderPendingWithdrawals", b.version)
}
if n == 0 {
return nil
}
b.lock.Lock()
defer b.lock.Unlock()
if n > uint64(len(b.builderPendingWithdrawals)) {
return errors.New("cannot dequeue more builder withdrawals than are in the queue")
}
if b.sharedFieldReferences[types.BuilderPendingWithdrawals].Refs() > 1 {
withdrawals := make([]*ethpb.BuilderPendingWithdrawal, len(b.builderPendingWithdrawals))
copy(withdrawals, b.builderPendingWithdrawals)
b.builderPendingWithdrawals = withdrawals
b.sharedFieldReferences[types.BuilderPendingWithdrawals].MinusRef()
b.sharedFieldReferences[types.BuilderPendingWithdrawals] = stateutil.NewRef(1)
}
b.builderPendingWithdrawals = b.builderPendingWithdrawals[n:]
b.markFieldAsDirty(types.BuilderPendingWithdrawals)
b.rebuildTrie[types.BuilderPendingWithdrawals] = true
return nil
}
// SetNextWithdrawalBuilderIndex sets the next builder index for the withdrawals sweep.
func (b *BeaconState) SetNextWithdrawalBuilderIndex(index primitives.BuilderIndex) error {
if b.version < version.Gloas {
return errNotSupported("SetNextWithdrawalBuilderIndex", b.version)
}
b.lock.Lock()
defer b.lock.Unlock()
b.nextWithdrawalBuilderIndex = index
b.markFieldAsDirty(types.NextWithdrawalBuilderIndex)
return nil
}
// DecreaseWithdrawalBalances applies withdrawal balance decreases for validators and builders.
// This method holds the state lock for the full batch to avoid lock churn.
func (b *BeaconState) DecreaseWithdrawalBalances(withdrawals []*enginev1.Withdrawal) error {
if b.version < version.Gloas {
return errNotSupported("DecreaseWithdrawalBalances", b.version)
}
if len(withdrawals) == 0 {
return nil
}
b.lock.Lock()
defer b.lock.Unlock()
var (
balanceIndices []uint64
builderIndices []uint64
)
for _, withdrawal := range withdrawals {
if withdrawal == nil {
return errors.New("withdrawal is nil")
}
if withdrawal.Amount == 0 {
continue
}
if withdrawal.ValidatorIndex.IsBuilderIndex() {
builderIndex := withdrawal.ValidatorIndex.ToBuilderIndex()
if err := b.decreaseBuilderBalanceLockFree(builderIndex, withdrawal.Amount); err != nil {
return err
}
builderIndices = append(builderIndices, uint64(builderIndex))
continue
}
balAtIdx, err := b.balanceAtIndex(withdrawal.ValidatorIndex)
if err != nil {
return err
}
newBal := decreaseBalanceWithVal(balAtIdx, withdrawal.Amount)
if err := b.balancesMultiValue.UpdateAt(b, uint64(withdrawal.ValidatorIndex), newBal); err != nil {
return pkgerrors.Wrap(err, "could not update balances")
}
balanceIndices = append(balanceIndices, uint64(withdrawal.ValidatorIndex))
}
if len(balanceIndices) > 0 {
b.markFieldAsDirty(types.Balances)
b.addDirtyIndices(types.Balances, balanceIndices)
}
if len(builderIndices) > 0 {
b.markFieldAsDirty(types.Builders)
b.addDirtyIndices(types.Builders, builderIndices)
}
return nil
}
func (b *BeaconState) decreaseBuilderBalanceLockFree(builderIndex primitives.BuilderIndex, amount uint64) error {
idx := uint64(builderIndex)
if idx >= uint64(len(b.builders)) {
return fmt.Errorf("builder index %d out of range (len=%d)", builderIndex, len(b.builders))
}
if b.sharedFieldReferences[types.Builders].Refs() > 1 {
builders := make([]*ethpb.Builder, len(b.builders))
copy(builders, b.builders)
b.builders = builders
b.sharedFieldReferences[types.Builders].MinusRef()
b.sharedFieldReferences[types.Builders] = stateutil.NewRef(1)
}
builder := b.builders[idx]
bal := uint64(builder.Balance)
if amount >= bal {
builder.Balance = 0
} else {
builder.Balance = primitives.Gwei(bal - amount)
}
return nil
}
func decreaseBalanceWithVal(currBalance, delta uint64) uint64 {
if delta > currBalance {
return 0
}
return currBalance - delta
}

View File

@@ -8,7 +8,6 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stateutil"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/OffchainLabs/prysm/v7/testing/require"
@@ -230,235 +229,6 @@ func TestRotateBuilderPendingPayments_UnsupportedVersion(t *testing.T) {
require.ErrorContains(t, "RotateBuilderPendingPayments", err)
}
func TestSetPayloadExpectedWithdrawals(t *testing.T) {
t.Run("previous fork returns expected error", func(t *testing.T) {
st := &BeaconState{version: version.Fulu}
err := st.SetPayloadExpectedWithdrawals([]*enginev1.Withdrawal{})
require.ErrorContains(t, "SetPayloadExpectedWithdrawals", err)
})
t.Run("allows nil input and marks dirty", func(t *testing.T) {
st := &BeaconState{
version: version.Gloas,
dirtyFields: make(map[types.FieldIndex]bool),
}
require.NoError(t, st.SetPayloadExpectedWithdrawals(nil))
require.Equal(t, true, st.payloadExpectedWithdrawals == nil)
require.Equal(t, true, st.dirtyFields[types.PayloadExpectedWithdrawals])
})
t.Run("sets and marks dirty", func(t *testing.T) {
st := &BeaconState{
version: version.Gloas,
dirtyFields: make(map[types.FieldIndex]bool),
payloadExpectedWithdrawals: []*enginev1.Withdrawal{{Index: 1}, {Index: 2}},
}
withdrawals := []*enginev1.Withdrawal{{Index: 3}}
require.NoError(t, st.SetPayloadExpectedWithdrawals(withdrawals))
require.DeepEqual(t, withdrawals, st.payloadExpectedWithdrawals)
require.Equal(t, true, st.dirtyFields[types.PayloadExpectedWithdrawals])
})
}
func TestDecreaseWithdrawalBalances(t *testing.T) {
t.Run("previous fork returns expected error", func(t *testing.T) {
st := &BeaconState{version: version.Fulu}
err := st.DecreaseWithdrawalBalances([]*enginev1.Withdrawal{{}})
require.ErrorContains(t, "DecreaseWithdrawalBalances", err)
})
t.Run("rejects nil withdrawal", func(t *testing.T) {
st := &BeaconState{version: version.Gloas}
err := st.DecreaseWithdrawalBalances([]*enginev1.Withdrawal{nil})
require.ErrorContains(t, "withdrawal is nil", err)
})
t.Run("no-op on empty input", func(t *testing.T) {
st := &BeaconState{
version: version.Gloas,
dirtyFields: make(map[types.FieldIndex]bool),
dirtyIndices: make(map[types.FieldIndex][]uint64),
rebuildTrie: make(map[types.FieldIndex]bool),
}
require.NoError(t, st.DecreaseWithdrawalBalances(nil))
require.Equal(t, 0, len(st.dirtyFields))
require.Equal(t, 0, len(st.dirtyIndices))
})
t.Run("updates validator and builder balances and tracks dirty indices", func(t *testing.T) {
st := &BeaconState{
version: version.Gloas,
dirtyFields: make(map[types.FieldIndex]bool),
dirtyIndices: make(map[types.FieldIndex][]uint64),
rebuildTrie: make(map[types.FieldIndex]bool),
sharedFieldReferences: map[types.FieldIndex]*stateutil.Reference{
types.Builders: stateutil.NewRef(1),
},
balancesMultiValue: NewMultiValueBalances([]uint64{100, 200, 300}),
builders: []*ethpb.Builder{
{Balance: 1000},
{Balance: 50},
},
}
withdrawals := []*enginev1.Withdrawal{
{ValidatorIndex: primitives.ValidatorIndex(1), Amount: 20},
{ValidatorIndex: primitives.BuilderIndex(1).ToValidatorIndex(), Amount: 30},
{ValidatorIndex: primitives.ValidatorIndex(2), Amount: 400},
{ValidatorIndex: primitives.BuilderIndex(0).ToValidatorIndex(), Amount: 2000},
{ValidatorIndex: primitives.ValidatorIndex(0), Amount: 0},
}
require.NoError(t, st.DecreaseWithdrawalBalances(withdrawals))
require.DeepEqual(t, []uint64{100, 180, 0}, st.Balances())
require.Equal(t, primitives.Gwei(0), st.builders[0].Balance)
require.Equal(t, primitives.Gwei(20), st.builders[1].Balance)
require.Equal(t, true, st.dirtyFields[types.Balances])
require.Equal(t, true, st.dirtyFields[types.Builders])
require.DeepEqual(t, []uint64{1, 2}, st.dirtyIndices[types.Balances])
require.DeepEqual(t, []uint64{1, 0}, st.dirtyIndices[types.Builders])
})
t.Run("returns error on builder index out of range", func(t *testing.T) {
st := &BeaconState{
version: version.Gloas,
dirtyFields: make(map[types.FieldIndex]bool),
dirtyIndices: make(map[types.FieldIndex][]uint64),
rebuildTrie: make(map[types.FieldIndex]bool),
sharedFieldReferences: map[types.FieldIndex]*stateutil.Reference{
types.Builders: stateutil.NewRef(1),
},
builders: []*ethpb.Builder{{Balance: 5}},
}
err := st.DecreaseWithdrawalBalances([]*enginev1.Withdrawal{
{ValidatorIndex: primitives.BuilderIndex(2).ToValidatorIndex(), Amount: 1},
})
require.ErrorContains(t, "out of range", err)
require.Equal(t, false, st.dirtyFields[types.Builders])
require.Equal(t, 0, len(st.dirtyIndices[types.Builders]))
})
}
func TestDequeueBuilderPendingWithdrawals(t *testing.T) {
t.Run("previous fork returns expected error", func(t *testing.T) {
st := &BeaconState{version: version.Fulu}
err := st.DequeueBuilderPendingWithdrawals(1)
require.ErrorContains(t, "DequeueBuilderPendingWithdrawals", err)
})
t.Run("returns error when dequeueing more than length", func(t *testing.T) {
st := &BeaconState{
version: version.Gloas,
dirtyFields: make(map[types.FieldIndex]bool),
sharedFieldReferences: map[types.FieldIndex]*stateutil.Reference{
types.BuilderPendingWithdrawals: stateutil.NewRef(1),
},
builderPendingWithdrawals: []*ethpb.BuilderPendingWithdrawal{{Amount: 1}},
}
err := st.DequeueBuilderPendingWithdrawals(2)
require.ErrorContains(t, "cannot dequeue more builder withdrawals", err)
require.Equal(t, 1, len(st.builderPendingWithdrawals))
require.Equal(t, false, st.dirtyFields[types.BuilderPendingWithdrawals])
})
t.Run("no-op on zero", func(t *testing.T) {
st := &BeaconState{
version: version.Gloas,
dirtyFields: make(map[types.FieldIndex]bool),
sharedFieldReferences: map[types.FieldIndex]*stateutil.Reference{
types.BuilderPendingWithdrawals: stateutil.NewRef(1),
},
builderPendingWithdrawals: []*ethpb.BuilderPendingWithdrawal{{Amount: 1}},
}
require.NoError(t, st.DequeueBuilderPendingWithdrawals(0))
require.Equal(t, 1, len(st.builderPendingWithdrawals))
require.Equal(t, false, st.dirtyFields[types.BuilderPendingWithdrawals])
require.Equal(t, false, st.rebuildTrie[types.BuilderPendingWithdrawals])
})
t.Run("dequeues and marks dirty", func(t *testing.T) {
st := &BeaconState{
version: version.Gloas,
dirtyFields: make(map[types.FieldIndex]bool),
sharedFieldReferences: map[types.FieldIndex]*stateutil.Reference{
types.BuilderPendingWithdrawals: stateutil.NewRef(1),
},
builderPendingWithdrawals: []*ethpb.BuilderPendingWithdrawal{
{Amount: 1},
{Amount: 2},
{Amount: 3},
},
rebuildTrie: make(map[types.FieldIndex]bool),
}
require.NoError(t, st.DequeueBuilderPendingWithdrawals(2))
require.Equal(t, 1, len(st.builderPendingWithdrawals))
require.Equal(t, primitives.Gwei(3), st.builderPendingWithdrawals[0].Amount)
require.Equal(t, true, st.dirtyFields[types.BuilderPendingWithdrawals])
require.Equal(t, true, st.rebuildTrie[types.BuilderPendingWithdrawals])
})
t.Run("copy-on-write preserves shared state", func(t *testing.T) {
sharedRef := stateutil.NewRef(2)
sharedWithdrawals := []*ethpb.BuilderPendingWithdrawal{
{Amount: 1},
{Amount: 2},
{Amount: 3},
}
st1 := &BeaconState{
version: version.Gloas,
dirtyFields: make(map[types.FieldIndex]bool),
sharedFieldReferences: map[types.FieldIndex]*stateutil.Reference{
types.BuilderPendingWithdrawals: sharedRef,
},
builderPendingWithdrawals: sharedWithdrawals,
rebuildTrie: make(map[types.FieldIndex]bool),
}
st2 := &BeaconState{
sharedFieldReferences: map[types.FieldIndex]*stateutil.Reference{
types.BuilderPendingWithdrawals: sharedRef,
},
builderPendingWithdrawals: sharedWithdrawals,
}
require.NoError(t, st1.DequeueBuilderPendingWithdrawals(2))
require.Equal(t, primitives.Gwei(3), st1.builderPendingWithdrawals[0].Amount)
require.Equal(t, 3, len(st2.builderPendingWithdrawals))
require.Equal(t, primitives.Gwei(1), st2.builderPendingWithdrawals[0].Amount)
require.Equal(t, uint(1), st1.sharedFieldReferences[types.BuilderPendingWithdrawals].Refs())
require.Equal(t, uint(1), st2.sharedFieldReferences[types.BuilderPendingWithdrawals].Refs())
})
}
func TestSetNextWithdrawalBuilderIndex(t *testing.T) {
t.Run("previous fork returns expected error", func(t *testing.T) {
st := &BeaconState{version: version.Fulu}
err := st.SetNextWithdrawalBuilderIndex(1)
require.ErrorContains(t, "SetNextWithdrawalBuilderIndex", err)
})
t.Run("sets and marks dirty", func(t *testing.T) {
st := &BeaconState{
version: version.Gloas,
dirtyFields: make(map[types.FieldIndex]bool),
}
require.NoError(t, st.SetNextWithdrawalBuilderIndex(7))
require.Equal(t, primitives.BuilderIndex(7), st.nextWithdrawalBuilderIndex)
require.Equal(t, true, st.dirtyFields[types.NextWithdrawalBuilderIndex])
})
}
func TestAppendBuilderPendingWithdrawal_CopyOnWrite(t *testing.T) {
wd := &ethpb.BuilderPendingWithdrawal{
FeeRecipient: make([]byte, 20),

View File

@@ -31,6 +31,7 @@ go_library(
"rpc_chunked_response.go",
"rpc_data_column_sidecars_by_range.go",
"rpc_data_column_sidecars_by_root.go",
"rpc_execution_proofs_by_root_topic.go",
"rpc_goodbye.go",
"rpc_light_client.go",
"rpc_metadata.go",
@@ -46,6 +47,7 @@ go_library(
"subscriber_blob_sidecar.go",
"subscriber_bls_to_execution_change.go",
"subscriber_data_column_sidecar.go",
"subscriber_execution_proofs.go",
"subscriber_handlers.go",
"subscriber_sync_committee_message.go",
"subscriber_sync_contribution_proof.go",
@@ -57,6 +59,7 @@ go_library(
"validate_blob.go",
"validate_bls_to_execution_change.go",
"validate_data_column.go",
"validate_execution_proof.go",
"validate_light_client.go",
"validate_proposer_slashing.go",
"validate_sync_committee_message.go",
@@ -126,6 +129,7 @@ go_library(
"//math:go_default_library",
"//monitoring/tracing:go_default_library",
"//monitoring/tracing/trace:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/attestation:go_default_library",
"//proto/prysm/v1alpha1/metadata:go_default_library",

View File

@@ -167,17 +167,25 @@ func WithStateNotifier(n statefeed.Notifier) Option {
}
// WithBlobStorage gives the sync package direct access to BlobStorage.
func WithBlobStorage(b *filesystem.BlobStorage) Option {
func WithBlobStorage(storage *filesystem.BlobStorage) Option {
return func(s *Service) error {
s.cfg.blobStorage = b
s.cfg.blobStorage = storage
return nil
}
}
// WithDataColumnStorage gives the sync package direct access to DataColumnStorage.
func WithDataColumnStorage(b *filesystem.DataColumnStorage) Option {
func WithDataColumnStorage(storage *filesystem.DataColumnStorage) Option {
return func(s *Service) error {
s.cfg.dataColumnStorage = b
s.cfg.dataColumnStorage = storage
return nil
}
}
// WithDataColumnStorage gives the sync package direct access to DataColumnStorage.
func WithExecutionProofStorage(storage *filesystem.ProofStorage) Option {
return func(s *Service) error {
s.cfg.proofStorage = storage
return nil
}
}

View File

@@ -259,6 +259,10 @@ func (s *Service) processBlock(ctx context.Context, b interfaces.ReadOnlySignedB
return errors.Wrap(err, "request and save missing data column sidecars")
}
if err := s.requestAndSaveMissingExecutionProofs([]blocks.ROBlock{roBlock}); err != nil {
return errors.Wrap(err, "request and save missing execution proofs")
}
return nil
}

View File

@@ -100,6 +100,10 @@ func newRateLimiter(p2pProvider p2p.P2P) *limiter {
topicMap[addEncoding(p2p.RPCDataColumnSidecarsByRootTopicV1)] = dataColumnSidecars
// DataColumnSidecarsByRangeV1
topicMap[addEncoding(p2p.RPCDataColumnSidecarsByRangeTopicV1)] = dataColumnSidecars
executionProofs := leakybucket.NewCollector(1, defaultBurstLimit, leakyBucketPeriod, false /* deleteEmptyBuckets */);
// ExecutionProofsByRootV1
topicMap[addEncoding(p2p.RPCExecutionProofsByRootTopicV1)] = executionProofs
// General topic for all rpc requests.
topicMap[rpcLimiterTopic] = leakybucket.NewCollector(5, defaultBurstLimit*2, leakyBucketPeriod, false /* deleteEmptyBuckets */)

View File

@@ -17,7 +17,7 @@ import (
func TestNewRateLimiter(t *testing.T) {
rlimiter := newRateLimiter(mockp2p.NewTestP2P(t))
assert.Equal(t, len(rlimiter.limiterMap), 20, "correct number of topics not registered")
assert.Equal(t, len(rlimiter.limiterMap), 21, "correct number of topics not registered")
}
func TestNewRateLimiter_FreeCorrectly(t *testing.T) {

View File

@@ -51,6 +51,7 @@ func (s *Service) rpcHandlerByTopicFromFork(forkIndex int) (map[string]rpcHandle
p2p.RPCBlobSidecarsByRangeTopicV1: s.blobSidecarsByRangeRPCHandler, // Modified in Fulu
p2p.RPCDataColumnSidecarsByRootTopicV1: s.dataColumnSidecarByRootRPCHandler, // Added in Fulu
p2p.RPCDataColumnSidecarsByRangeTopicV1: s.dataColumnSidecarsByRangeRPCHandler, // Added in Fulu
p2p.RPCExecutionProofsByRootTopicV1: s.executionProofsByRootRPCHandler, // Added in Fulu
}, nil
}

View File

@@ -11,11 +11,13 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/types"
"github.com/OffchainLabs/prysm/v7/beacon-chain/sync/verify"
"github.com/OffchainLabs/prysm/v7/beacon-chain/verification"
"github.com/OffchainLabs/prysm/v7/config/features"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/OffchainLabs/prysm/v7/time/slots"
libp2pcore "github.com/libp2p/go-libp2p/core"
@@ -87,9 +89,84 @@ func (s *Service) sendBeaconBlocksRequest(ctx context.Context, requests *types.B
return errors.Wrap(err, "request and save missing data columns")
}
if err := s.requestAndSaveMissingExecutionProofs(postFuluBlocks); err != nil {
return errors.Wrap(err, "request and save missing execution proofs")
}
return err
}
func (s *Service) requestAndSaveMissingExecutionProofs(blks []blocks.ROBlock) error {
if len(blks) == 0 {
return nil
}
// TODO: Parallelize requests for multiple blocks.
for _, blk := range blks {
if err := s.sendAndSaveExecutionProofs(s.ctx, blk); err != nil {
return err
}
}
return nil
}
func (s *Service) sendAndSaveExecutionProofs(ctx context.Context, block blocks.ROBlock) error {
if !features.Get().EnableZkvm {
return nil
}
// Check proof retention period.
blockEpoch := slots.ToEpoch(block.Block().Slot())
currentEpoch := slots.ToEpoch(s.cfg.clock.CurrentSlot())
if !params.WithinExecutionProofPeriod(blockEpoch, currentEpoch) {
return nil
}
// Check how many proofs are needed with Execution Proof Pool.
// TODO: All should return the same type ExecutionProofId.
root := block.Root()
proofStorage := s.cfg.proofStorage
storedIds := proofStorage.Summary(root).All()
count := uint64(len(storedIds))
if count >= params.BeaconConfig().MinProofsRequired {
return nil
}
// Construct request
request := &ethpb.ExecutionProofsByRootRequest{
BlockRoot: root[:],
}
// Call SendExecutionProofByRootRequest
zkvmEnabledPeers := s.cfg.p2p.Peers().ZkvmEnabledPeers()
if len(zkvmEnabledPeers) == 0 {
return fmt.Errorf("no zkVM enabled peers available to request execution proofs")
}
// TODO: For simplicity, just pick the first peer for now.
// In the future, we can implement better peer selection logic.
pid := zkvmEnabledPeers[0]
proofs, err := SendExecutionProofsByRootRequest(ctx, s.cfg.clock, s.cfg.p2p, pid, request, blockEpoch)
if err != nil {
return fmt.Errorf("send execution proofs by root request: %w", err)
}
// TODO: Verify really instead of blindly converting to verified sidecars.
verifiedProofs := make([]blocks.VerifiedROSignedExecutionProof, 0, len(proofs))
for _, proof := range proofs {
verifiedProof := blocks.NewVerifiedROSignedExecutionProof(proof)
verifiedProofs = append(verifiedProofs, verifiedProof)
}
// Save the proofs into storage.
if err := proofStorage.Save(verifiedProofs); err != nil {
return fmt.Errorf("proof storage save: %w", err)
}
return nil
}
// requestAndSaveMissingDataColumns checks if the data columns are missing for the given block.
// If so, requests them and saves them to the storage.
func (s *Service) requestAndSaveMissingDataColumnSidecars(blks []blocks.ROBlock) error {

View File

@@ -8,6 +8,7 @@ import (
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/time/slots"
libp2pcore "github.com/libp2p/go-libp2p/core"
@@ -182,3 +183,21 @@ func WriteDataColumnSidecarChunk(stream libp2pcore.Stream, tor blockchain.Tempor
return nil
}
func WriteExecutionProofChunk(stream libp2pcore.Stream, encoding encoder.NetworkEncoding, slot primitives.Slot, proof *ethpb.SignedExecutionProof) error {
// Success response code.
if _, err := stream.Write([]byte{responseCodeSuccess}); err != nil {
return errors.Wrap(err, "stream write")
}
ctxBytes := params.ForkDigest(slots.ToEpoch(slot))
if err := writeContextToStream(ctxBytes[:], stream); err != nil {
return errors.Wrap(err, "write context to stream")
}
// Signed execution proof.
if _, err := encoding.EncodeWithMaxLength(stream, proof); err != nil {
return errors.Wrap(err, "encode with max length")
}
return nil
}

View File

@@ -0,0 +1,89 @@
package sync
import (
"context"
"errors"
"fmt"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
libp2pcore "github.com/libp2p/go-libp2p/core"
"github.com/sirupsen/logrus"
)
// executionProofsByRootRPCHandler handles incoming ExecutionProofsByRoot RPC requests.
func (s *Service) executionProofsByRootRPCHandler(ctx context.Context, msg any, stream libp2pcore.Stream) error {
ctx, span := trace.StartSpan(ctx, "sync.executionProofsByRootRPCHandler")
defer span.End()
_, cancel := context.WithTimeout(ctx, ttfbTimeout)
defer cancel()
req, ok := msg.(*ethpb.ExecutionProofsByRootRequest)
if !ok {
return errors.New("message is not type ExecutionProofsByRootRequest")
}
remotePeer := stream.Conn().RemotePeer()
SetRPCStreamDeadlines(stream)
// Validate request
if err := s.rateLimiter.validateRequest(stream, 1); err != nil {
return err
}
blockRoot := bytesutil.ToBytes32(req.BlockRoot)
log := log.WithFields(logrus.Fields{
"blockRoot": fmt.Sprintf("%#x", blockRoot),
"peer": remotePeer.String(),
})
s.rateLimiter.add(stream, 1)
defer closeStream(stream, log)
// Retrieve the slot corresponding to the block root.
roSignedBeaconBlock, err := s.cfg.beaconDB.Block(ctx, blockRoot)
if err != nil {
return fmt.Errorf("fetch block from db: %w", err)
}
if roSignedBeaconBlock == nil {
return fmt.Errorf("block not found for root %#x", blockRoot)
}
roBeaconBlock := roSignedBeaconBlock.Block()
if roBeaconBlock == nil {
return fmt.Errorf("beacon block is nil for root %#x", blockRoot)
}
slot := roBeaconBlock.Slot()
// Get proofs from execution proof pool
summary := s.cfg.proofStorage.Summary(blockRoot)
if summary.Count() == 0 {
return nil
}
// Load all proofs at once
proofs, err := s.cfg.proofStorage.Get(blockRoot, nil)
if err != nil {
return fmt.Errorf("proof storage get: %w", err)
}
// Send proofs
for _, proof := range proofs {
// Write proof to stream
SetStreamWriteDeadline(stream, defaultWriteDuration)
if err := WriteExecutionProofChunk(stream, s.cfg.p2p.Encoding(), slot, proof); err != nil {
log.WithError(err).Debug("Could not send execution proof")
s.writeErrorResponseToStream(responseCodeServerError, "could not send execution proof", stream)
return err
}
}
log.WithField("proofCount", len(proofs)).Debug("Responded to execution proofs by root request")
return nil
}

View File

@@ -21,6 +21,7 @@ import (
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/OffchainLabs/prysm/v7/time/slots"
libp2pcore "github.com/libp2p/go-libp2p/core"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
goPeer "github.com/libp2p/go-libp2p/core/peer"
@@ -828,3 +829,98 @@ func DataColumnSidecarsByRangeRequest(columns []uint64, start, end primitives.Sl
Columns: columns,
}, nil
}
// ----------------
// Execution proofs
// ----------------
// SendExecutionProofsByRootRequest sends a request for execution proofs by root
// and returns the fetched execution proofs.
func SendExecutionProofsByRootRequest(
ctx context.Context,
clock blockchain.TemporalOracle,
p2pProvider p2p.P2P,
pid peer.ID,
request *ethpb.ExecutionProofsByRootRequest,
blockEpoch primitives.Epoch,
) ([]blocks.ROSignedExecutionProof, error) {
// Return early if nothing to request.
if request == nil {
return nil, nil
}
// Build the topic.
topic, err := p2p.TopicFromMessage(p2p.ExecutionProofsByRootName, slots.ToEpoch(clock.CurrentSlot()))
if err != nil {
return nil, fmt.Errorf("topic from message: %w", err)
}
log.WithFields(logrus.Fields{
"topic": topic,
"blockRoot": fmt.Sprintf("%#x", request.BlockRoot),
}).Debug("Sending execution proofs by root request")
// Send the request.
stream, err := p2pProvider.Send(ctx, request, topic, pid)
if err != nil {
return nil, fmt.Errorf("send: %w", err)
}
defer closeStream(stream, log)
// Read execution proofs from stream
// TODO: Set capacity to MAX_EXECUTION_PROOFS_PER_PAYLOAD
proofs := make([]blocks.ROSignedExecutionProof, 0, 4)
// TODO: Use MAX_EXECUTION_PROOFS_PER_PAYLOAD instead of 4.
// TODO: Verify that the peer does not send more than MAX_EXECUTION_PROOFS_PER_PAYLOAD proofs, and downscore if it does.
for range 4 {
proof, err := readChunkedExecutionProof(stream, p2pProvider, request.BlockRoot, blockEpoch)
if errors.Is(err, io.EOF) {
break
}
if err != nil {
return nil, fmt.Errorf("read chunked execution proof: %w", err)
}
proofs = append(proofs, *proof)
}
return proofs, nil
}
// ReadChunkedExecutionProof reads a chunked execution proof from the stream.
// TODO: Add validation here
// TODO: Add msgVersion check with ctxMap
func readChunkedExecutionProof(
stream libp2pcore.Stream,
encoding p2p.EncodingProvider,
blockRoot []byte,
blockEpoch primitives.Epoch,
) (*blocks.ROSignedExecutionProof, error) {
// Read the status statusCode from the stream.
statusCode, errMessage, err := ReadStatusCode(stream, encoding.Encoding())
if err != nil {
return nil, fmt.Errorf("read status code: %w", err)
}
if statusCode != 0 {
return nil, errors.New(errMessage)
}
// Read context bytes (fork digest)
_, err = readContextFromStream(stream)
if err != nil {
return nil, fmt.Errorf("read context from stream: %w", err)
}
// Decode the execution proof from the stream.
proof := new(ethpb.SignedExecutionProof)
if err := encoding.Encoding().DecodeWithMaxLength(stream, proof); err != nil {
return nil, fmt.Errorf("decode execution proof: %w", err)
}
// Create a read-only execution proof from the proof.
roProof, err := blocks.NewROSignedExecutionProof(proof, bytesutil.ToBytes32(blockRoot), blockEpoch)
return &roProof, err
}

View File

@@ -70,6 +70,7 @@ const (
seenProposerSlashingSize = 100
badBlockSize = 1000
syncMetricsInterval = 10 * time.Second
seenExecutionProofSize = 100
)
var (
@@ -109,6 +110,7 @@ type config struct {
stateNotifier statefeed.Notifier
blobStorage *filesystem.BlobStorage
dataColumnStorage *filesystem.DataColumnStorage
proofStorage *filesystem.ProofStorage
batchVerifierLimit int
}
@@ -117,6 +119,7 @@ type blockchainService interface {
blockchain.BlockReceiver
blockchain.BlobReceiver
blockchain.DataColumnReceiver
blockchain.ProofReceiver
blockchain.HeadFetcher
blockchain.FinalizationFetcher
blockchain.ForkFetcher
@@ -146,9 +149,12 @@ type Service struct {
rateLimiter *limiter
seenBlockLock sync.RWMutex
seenBlockCache *lru.Cache
seenNewPayloadRequestCache *lru.Cache
seenBlobLock sync.RWMutex
seenBlobCache *lru.Cache
seenDataColumnCache *slotAwareCache
seenProofCache *lru.Cache
seenValidProofCache *lru.Cache
seenAggregatedAttestationLock sync.RWMutex
seenAggregatedAttestationCache *lru.Cache
seenUnAggregatedAttestationLock sync.RWMutex
@@ -173,6 +179,7 @@ type Service struct {
verifierWaiter *verification.InitializerWaiter
newBlobVerifier verification.NewBlobVerifier
newColumnsVerifier verification.NewDataColumnsVerifier
newSignedExecutionProofsVerifier verification.NewSignedExecutionProofsVerifier
columnSidecarsExecSingleFlight singleflight.Group
reconstructionSingleFlight singleflight.Group
availableBlocker coverage.AvailableBlocker
@@ -250,6 +257,12 @@ func newDataColumnsVerifierFromInitializer(ini *verification.Initializer) verifi
}
}
func newExecutionProofsVerifierFromInitializer(ini *verification.Initializer) verification.NewSignedExecutionProofsVerifier {
return func(proofs []blocks.ROSignedExecutionProof, reqs []verification.Requirement) verification.SignedExecutionProofsVerifier {
return ini.NewExecutionProofsVerifier(proofs, reqs)
}
}
// Start the regular sync service.
func (s *Service) Start() {
v, err := s.verifierWaiter.WaitForInitializer(s.ctx)
@@ -259,6 +272,7 @@ func (s *Service) Start() {
}
s.newBlobVerifier = newBlobVerifierFromInitializer(v)
s.newColumnsVerifier = newDataColumnsVerifierFromInitializer(v)
s.newSignedExecutionProofsVerifier = newExecutionProofsVerifierFromInitializer(v)
go s.verifierRoutine()
go s.startDiscoveryAndSubscriptions()
@@ -348,6 +362,8 @@ func (s *Service) initCaches() {
s.seenBlockCache = lruwrpr.New(seenBlockSize)
s.seenBlobCache = lruwrpr.New(seenBlockSize * params.BeaconConfig().DeprecatedMaxBlobsPerBlockElectra)
s.seenDataColumnCache = newSlotAwareCache(seenDataColumnSize)
s.seenProofCache = lruwrpr.New(seenBlockSize * 8 * 128) // TODO: Replace 8 with the actual max number of proofs per block and 128 with the maximal estimated prover count.
s.seenValidProofCache = lruwrpr.New(seenBlockSize * 8) // TODO: Replace 8 with the actual max number of proofs per block.
s.seenAggregatedAttestationCache = lruwrpr.New(seenAggregatedAttSize)
s.seenUnAggregatedAttestationCache = lruwrpr.New(seenUnaggregatedAttSize)
s.seenSyncMessageCache = lruwrpr.New(seenSyncMsgSize)
@@ -357,6 +373,7 @@ func (s *Service) initCaches() {
s.seenAttesterSlashingCache = make(map[uint64]bool)
s.seenProposerSlashingCache = lruwrpr.New(seenProposerSlashingSize)
s.badBlockCache = lruwrpr.New(badBlockSize)
s.seenNewPayloadRequestCache = lruwrpr.New(seenBlockSize)
}
func (s *Service) waitForChainStart() {

View File

@@ -329,6 +329,17 @@ func (s *Service) registerSubscribers(nse params.NetworkScheduleEntry) bool {
getSubnetsRequiringPeers: s.allDataColumnSubnets,
})
})
if features.Get().EnableZkvm {
s.spawn(func() {
s.subscribe(
p2p.ExecutionProofSubnetTopicFormat,
s.validateExecutionProof,
s.executionProofSubscriber,
nse,
)
})
}
}
return true
}

View File

@@ -18,6 +18,7 @@ import (
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/io/file"
engine "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/OffchainLabs/prysm/v7/time/slots"
"github.com/pkg/errors"
@@ -34,13 +35,12 @@ func (s *Service) beaconBlockSubscriber(ctx context.Context, msg proto.Message)
return err
}
s.setSeenBlockIndexSlot(signed.Block().Slot(), signed.Block().ProposerIndex())
block := signed.Block()
s.setSeenBlockIndexSlot(block.Slot(), block.ProposerIndex())
root, err := block.HashTreeRoot()
if err != nil {
return err
return fmt.Errorf("hash tree root: %w", err)
}
roBlock, err := blocks.NewROBlockWithRoot(signed, root)
@@ -48,6 +48,11 @@ func (s *Service) beaconBlockSubscriber(ctx context.Context, msg proto.Message)
return errors.Wrap(err, "new ro block with root")
}
// Cache the new payload request hash tree root corresponding to this block.
if err := s.cacheNewPayloadRequestRoot(roBlock); err != nil {
return fmt.Errorf("cacheNewPayloadRequestRoot: %w", err)
}
go func() {
if err := s.processSidecarsFromExecutionFromBlock(ctx, roBlock); err != nil {
log.WithError(err).WithFields(logrus.Fields{
@@ -84,6 +89,94 @@ func (s *Service) beaconBlockSubscriber(ctx context.Context, msg proto.Message)
return nil
}
func (s *Service) cacheNewPayloadRequestRoot(roBlock blocks.ROBlock) error {
block := roBlock.Block()
body := block.Body()
execution, err := body.Execution()
if err != nil {
return fmt.Errorf("execution: %w", err)
}
transactions, err := execution.Transactions()
if err != nil {
return fmt.Errorf("transactions: %w", err)
}
withdrawals, err := execution.Withdrawals()
if err != nil {
return fmt.Errorf("withdrawals: %w", err)
}
blobGasUsed, err := execution.BlobGasUsed()
if err != nil {
return fmt.Errorf("blob gas used: %w", err)
}
excessBlobGas, err := execution.ExcessBlobGas()
if err != nil {
return fmt.Errorf("excess blob gas: %w", err)
}
executionPayload := &engine.ExecutionPayloadDeneb{
ParentHash: execution.ParentHash(),
FeeRecipient: execution.FeeRecipient(),
StateRoot: execution.StateRoot(),
ReceiptsRoot: execution.ReceiptsRoot(),
LogsBloom: execution.LogsBloom(),
PrevRandao: execution.PrevRandao(),
BlockNumber: execution.BlockNumber(),
GasLimit: execution.GasLimit(),
GasUsed: execution.GasUsed(),
Timestamp: execution.Timestamp(),
ExtraData: execution.ExtraData(),
BaseFeePerGas: execution.BaseFeePerGas(),
BlockHash: execution.BlockHash(),
Transactions: transactions,
Withdrawals: withdrawals,
BlobGasUsed: blobGasUsed,
ExcessBlobGas: excessBlobGas,
}
kzgCommitments, err := body.BlobKzgCommitments()
if err != nil {
return fmt.Errorf("blob kzg commitments: %w", err)
}
versionedHashes := make([][]byte, len(kzgCommitments))
for _, kzgCommitment := range kzgCommitments {
versionedHash := primitives.ConvertKzgCommitmentToVersionedHash(kzgCommitment)
versionedHashes = append(versionedHashes, versionedHash[:])
}
parentBlockRoot := block.ParentRoot()
executionRequests, err := body.ExecutionRequests()
if err != nil {
return fmt.Errorf("execution requests: %w", err)
}
newPayloadRequest := engine.NewPayloadRequest{
ExecutionPayload: executionPayload,
VersionedHashes: versionedHashes,
ParentBlockRoot: parentBlockRoot[:],
ExecutionRequests: executionRequests,
}
rootEpoch := rootEpoch{
root: roBlock.Root(),
epoch: slots.ToEpoch(block.Slot()),
}
newPayloadRequestRoot, err := newPayloadRequest.HashTreeRoot()
if err != nil {
return fmt.Errorf("hash tree root: %w", err)
}
s.setSeenNewPayloadRequest(newPayloadRequestRoot, rootEpoch)
return nil
}
// processSidecarsFromExecutionFromBlock retrieves (if available) sidecars data from the execution client,
// builds corresponding sidecars, save them to the storage, and broadcasts them over P2P if necessary.
func (s *Service) processSidecarsFromExecutionFromBlock(ctx context.Context, roBlock blocks.ROBlock) error {

View File

@@ -0,0 +1,36 @@
package sync
import (
"context"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed"
opfeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/operation"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/pkg/errors"
"google.golang.org/protobuf/proto"
)
func (s *Service) executionProofSubscriber(_ context.Context, msg proto.Message) error {
verifiedRoSignedExecutionProof, ok := msg.(blocks.VerifiedROSignedExecutionProof)
if !ok {
return errors.Errorf("incorrect type of message received, wanted %T but got %T", blocks.VerifiedROSignedExecutionProof{}, msg)
}
// Insert the execution proof into the pool
s.setSeenValidProof(&verifiedRoSignedExecutionProof.ROSignedExecutionProof)
// Save the proof to storage.
if err := s.cfg.chain.ReceiveProof(verifiedRoSignedExecutionProof); err != nil {
return errors.Wrap(err, "receive proof")
}
// Notify subscribers about the new execution proof
s.cfg.operationNotifier.OperationFeed().Send(&feed.Event{
Type: opfeed.ExecutionProofReceived,
Data: &opfeed.ExecutionProofReceivedData{
ExecutionProof: &verifiedRoSignedExecutionProof,
},
})
return nil
}

View File

@@ -38,6 +38,11 @@ var (
ErrSlashingSignatureFailure = errors.New("proposer slashing signature verification failed")
)
type rootEpoch struct {
root [32]byte
epoch primitives.Epoch
}
// validateBeaconBlockPubSub checks that the incoming block has a valid BLS signature.
// Blocks that have already been seen are ignored. If the BLS signature is any valid signature,
// this method rebroadcasts the message.
@@ -472,6 +477,25 @@ func (s *Service) setSeenBlockIndexSlot(slot primitives.Slot, proposerIdx primit
s.seenBlockCache.Add(string(b), true)
}
func (s *Service) hasSeenNewPayloadRequest(newPayloadRequestRoot [32]byte) (bool, rootEpoch) {
v, ok := s.seenNewPayloadRequestCache.Get(newPayloadRequestRoot)
if !ok {
return false, rootEpoch{}
}
re, ok := v.(rootEpoch)
if !ok {
log.Error("Cannot cast value to rootEpoch")
return false, rootEpoch{}
}
return true, re
}
func (s *Service) setSeenNewPayloadRequest(newPayloadRequestRoot [32]byte, re rootEpoch) {
s.seenNewPayloadRequestCache.Add(newPayloadRequestRoot, re)
}
// Returns true if the block is marked as a bad block.
func (s *Service) hasBadBlock(root [32]byte) bool {
if features.BlacklistedBlock(root) {

View File

@@ -0,0 +1,179 @@
package sync
import (
"context"
"fmt"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
"github.com/OffchainLabs/prysm/v7/beacon-chain/verification"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/sirupsen/logrus"
)
func (s *Service) validateExecutionProof(ctx context.Context, pid peer.ID, msg *pubsub.Message) (pubsub.ValidationResult, error) {
// Always accept our own messages.
if pid == s.cfg.p2p.PeerID() {
return pubsub.ValidationAccept, nil
}
// Ignore messages during initial sync.
if s.cfg.initialSync.Syncing() {
return pubsub.ValidationIgnore, nil
}
// Reject messages with a nil topic.
if msg.Topic == nil {
return pubsub.ValidationReject, p2p.ErrInvalidTopic
}
// Decode the message, reject if it fails.
m, err := s.decodePubsubMessage(msg)
if err != nil {
log.WithError(err).Error("Failed to decode message")
return pubsub.ValidationReject, err
}
// Reject messages that are not of the expected type.
signedExecutionProof, ok := m.(*ethpb.SignedExecutionProof)
if !ok {
log.WithField("message", m).Error("Message is not of type *ethpb.SignedExecutionProof")
return pubsub.ValidationReject, errWrongMessage
}
executionProof := signedExecutionProof.Message
// [IGNORE] The proof's corresponding new payload request
// (identified by `proof.message.public_input.new_payload_request_root`)
// has been seen (via gossip or non-gossip sources)
// (a client MAY queue proofs for processing once the new payload request is
// retrieved).
newPayloadRequestRoot := bytesutil.ToBytes32(executionProof.PublicInput.NewPayloadRequestRoot)
ok, blockRootEpoch := s.hasSeenNewPayloadRequest(newPayloadRequestRoot)
if !ok {
return pubsub.ValidationIgnore, nil
}
blockRoot, blockEpoch := blockRootEpoch.root, blockRootEpoch.epoch
// Convert to ROSignedExecutionProof.
roSignedProof, err := blocks.NewROSignedExecutionProof(signedExecutionProof, blockRoot, blockEpoch)
if err != nil {
return pubsub.ValidationReject, err
}
// [IGNORE] The proof is the first proof received for the tuple
// `(proof.message.public_input.new_payload_request_root, proof.message.proof_type, proof.prover_pubkey)`
// -- i.e. the first valid or invalid proof for `proof.message.proof_type` from `proof.prover_pubkey`.
if s.hasSeenProof(&roSignedProof) {
return pubsub.ValidationIgnore, nil
}
// Mark the proof as seen regardless of whether it is valid or not,
// to prevent processing multiple proofs with the same
// (new payload request root, proof type, prover pubkey) tuple.
defer s.setSeenProof(&roSignedProof)
// Create the verifier with gossip requirements.
verifier := s.newSignedExecutionProofsVerifier([]blocks.ROSignedExecutionProof{roSignedProof}, verification.GossipSignedExecutionProofRequirements)
// Run verifications.
// [REJECT] `proof.prover_pubkey` is associated with an active validator.
if err := verifier.IsFromActiveValidator(); err != nil {
return pubsub.ValidationReject, err
}
// [REJECT] `proof.signature` is valid with respect to the prover's public key.
if err := verifier.ValidProverSignature(); err != nil {
return pubsub.ValidationReject, err
}
// [REJECT] `proof.message.proof_data` is non-empty.
if err := verifier.ProofDataNonEmpty(); err != nil {
return pubsub.ValidationReject, err
}
// [REJECT] `proof.message.proof_data` is not larger than MAX_PROOF_SIZE.
if err := verifier.ProofDataNotTooLarge(); err != nil {
return pubsub.ValidationReject, err
}
// [REJECT] `proof.message` is a valid execution proof.
if err := verifier.ProofVerified(); err != nil {
return pubsub.ValidationReject, err
}
// [IGNORE] The proof is the first proof received for the tuple
// `(proof.message.public_input.new_payload_request_root, proof.message.proof_type)`
// -- i.e. the first valid proof for `proof.message.proof_type` from any prover.
if s.hasSeenValidProof(&roSignedProof) {
return pubsub.ValidationIgnore, nil
}
// Get verified proofs.
verifiedProofs, err := verifier.VerifiedROSignedExecutionProofs()
if err != nil {
return pubsub.ValidationIgnore, err
}
log.WithFields(logrus.Fields{
"blockRoot": fmt.Sprintf("%#x", roSignedProof.BlockRoot()),
"type": roSignedProof.Message.ProofType,
}).Debug("Accepted execution proof")
// Set validator data to the verified proof.
msg.ValidatorData = verifiedProofs[0]
return pubsub.ValidationAccept, nil
}
// hasSeenProof returns true if the proof with the same new payload request root, proof type and prover pubkey has been seen before, false otherwise.
func (s *Service) hasSeenProof(roSignedProof *blocks.ROSignedExecutionProof) bool {
key := computeProofCacheKey(roSignedProof)
_, ok := s.seenProofCache.Get(key)
return ok
}
// setSeenProof marks the proof with the given new payload request root, proof type and prover pubkey as seen before.
func (s *Service) setSeenProof(roSignedProof *blocks.ROSignedExecutionProof) {
key := computeProofCacheKey(roSignedProof)
s.seenProofCache.Add(key, true)
}
// hasSeenValidProof returns true if a proof with the same new payload request root and proof type has been seen before, false otherwise.
func (s *Service) hasSeenValidProof(roSignedProof *blocks.ROSignedExecutionProof) bool {
key := computeValidProofCacheKey(*roSignedProof)
_, ok := s.seenValidProofCache.Get(key)
return ok
}
// setSeenValidProof marks a proof with the given new payload request root and proof type as seen before.
func (s *Service) setSeenValidProof(roSignedProof *blocks.ROSignedExecutionProof) {
key := computeValidProofCacheKey(*roSignedProof)
s.seenValidProofCache.Add(key, true)
}
func computeProofCacheKey(roSignedProof *blocks.ROSignedExecutionProof) []byte {
executionProof := roSignedProof.Message
key := make([]byte, 0, 81)
key = append(key, executionProof.PublicInput.NewPayloadRequestRoot...)
key = append(key, executionProof.ProofType...)
key = append(key, roSignedProof.ProverPubkey...)
return key
}
func computeValidProofCacheKey(roSignedProof blocks.ROSignedExecutionProof) []byte {
executionProof := roSignedProof.Message
key := make([]byte, 0, 33)
key = append(key, executionProof.PublicInput.NewPayloadRequestRoot...)
key = append(key, executionProof.ProofType...)
return key
}

View File

@@ -8,6 +8,7 @@ go_library(
"cache.go",
"data_column.go",
"error.go",
"execution_proof.go",
"fake.go",
"filesystem.go",
"initializer.go",

View File

@@ -30,6 +30,13 @@ const (
// Data columns specific.
RequireValidFields
RequireCorrectSubnet
// Execution proof specific.
RequireActiveValidator
RequireValidProverSignature
RequireProofDataNonEmpty
RequireProofDataNotTooLarge
RequireProofVerified
)
var allBlobSidecarRequirements = []Requirement{

View File

@@ -81,6 +81,20 @@ var (
errDataColumnVerificationImplementationFault = errors.New("could not verify blob data or create a valid VerifiedROBlob")
)
var (
// ErrProofInvalid is joined with all other execution proof verification errors.
ErrProofInvalid = AsVerificationFailure(errors.New("invalid execution proof"))
// ErrProofDataNonEmpty means RequireProofDataNonEmpty failed.
ErrProofDataEmpty = errors.Join(ErrProofInvalid, errors.New("proof data is empty"))
// ErrProofSizeTooLarge means RequireProofSizeLimits failed.
ErrProofSizeTooLarge = errors.Join(ErrProofInvalid, errors.New("proof data exceeds maximum size"))
// errProofsInvalid is a general error for proof verification failures.
errProofsInvalid = errors.New("execution proofs failed verification")
)
// VerificationMultiError is a custom error that can be used to access individual verification failures.
type VerificationMultiError struct {
r *results

View File

@@ -0,0 +1,129 @@
package verification
import (
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/pkg/errors"
)
// GossipSignedExecutionProofRequirements defines the set of requirements that SignedExecutionProofs received on gossip
// must satisfy in order to upgrade an ROSignedExecutionProof to a VerifiedROSignedExecutionProof.
var GossipSignedExecutionProofRequirements = []Requirement{
RequireActiveValidator,
RequireValidProverSignature,
RequireProofDataNonEmpty,
RequireProofDataNotTooLarge,
RequireProofVerified,
}
// ROSignedExecutionProofsVerifier verifies execution proofs.
type ROSignedExecutionProofsVerifier struct {
*sharedResources
results *results
proofs []blocks.ROSignedExecutionProof
}
var _ SignedExecutionProofsVerifier = &ROSignedExecutionProofsVerifier{}
// VerifiedROSignedExecutionProofs "upgrades" wrapped ROSignedExecutionProofs to VerifiedROSignedExecutionProofs.
// If any of the verifications ran against the proofs failed, or some required verifications
// were not run, an error will be returned.
func (v *ROSignedExecutionProofsVerifier) VerifiedROSignedExecutionProofs() ([]blocks.VerifiedROSignedExecutionProof, error) {
if !v.results.allSatisfied() {
return nil, v.results.errors(errProofsInvalid)
}
verifiedSignedProofs := make([]blocks.VerifiedROSignedExecutionProof, 0, len(v.proofs))
for _, proof := range v.proofs {
verifiedProof := blocks.NewVerifiedROSignedExecutionProof(proof)
verifiedSignedProofs = append(verifiedSignedProofs, verifiedProof)
}
return verifiedSignedProofs, nil
}
// SatisfyRequirement allows the caller to assert that a requirement has been satisfied.
func (v *ROSignedExecutionProofsVerifier) SatisfyRequirement(req Requirement) {
v.recordResult(req, nil)
}
func (v *ROSignedExecutionProofsVerifier) recordResult(req Requirement, err *error) {
if err == nil || *err == nil {
v.results.record(req, nil)
return
}
v.results.record(req, *err)
}
func (v *ROSignedExecutionProofsVerifier) IsFromActiveValidator() (err error) {
if ok, err := v.results.cached(RequireActiveValidator); ok {
return err
}
defer v.recordResult(RequireActiveValidator, &err)
// TODO: To implement
return nil
}
func (v *ROSignedExecutionProofsVerifier) ValidProverSignature() (err error) {
if ok, err := v.results.cached(RequireValidProverSignature); ok {
return err
}
defer v.recordResult(RequireValidProverSignature, &err)
// TODO: To implement
return nil
}
func (v *ROSignedExecutionProofsVerifier) ProofDataNonEmpty() (err error) {
if ok, err := v.results.cached(RequireProofDataNonEmpty); ok {
return err
}
defer v.recordResult(RequireProofDataNonEmpty, &err)
for _, proof := range v.proofs {
if len(proof.Message.ProofData) == 0 {
return proofErrBuilder(ErrProofDataEmpty)
}
}
return nil
}
func (v *ROSignedExecutionProofsVerifier) ProofDataNotTooLarge() (err error) {
if ok, err := v.results.cached(RequireProofDataNotTooLarge); ok {
return err
}
defer v.recordResult(RequireProofDataNotTooLarge, &err)
maxProofDataBytes := params.BeaconConfig().MaxProofDataBytes
for _, proof := range v.proofs {
if uint64(len(proof.Message.ProofData)) > maxProofDataBytes {
return proofErrBuilder(ErrProofSizeTooLarge)
}
}
return nil
}
// ProofVerified performs zkVM proof verification.
// Currently a no-op, will be implemented when actual proof verification is added.
func (v *ROSignedExecutionProofsVerifier) ProofVerified() (err error) {
if ok, err := v.results.cached(RequireProofVerified); ok {
return err
}
defer v.recordResult(RequireProofVerified, &err)
// TODO: To implement
return nil
}
func proofErrBuilder(baseErr error) error {
return errors.Wrap(baseErr, errProofsInvalid.Error())
}

View File

@@ -86,6 +86,16 @@ func (ini *Initializer) NewDataColumnsVerifier(roDataColumns []blocks.RODataColu
}
}
// NewExecutionProofsVerifier creates an ExecutionProofsVerifier for a slice of execution proofs,
// with the given set of requirements.
func (ini *Initializer) NewExecutionProofsVerifier(proofs []blocks.ROSignedExecutionProof, reqs []Requirement) *ROSignedExecutionProofsVerifier {
return &ROSignedExecutionProofsVerifier{
sharedResources: ini.shared,
proofs: proofs,
results: newResults(reqs...),
}
}
// InitializerWaiter provides an Initializer once all dependent resources are ready
// via the WaitForInitializer method.
type InitializerWaiter struct {

View File

@@ -54,3 +54,19 @@ type DataColumnsVerifier interface {
// NewDataColumnsVerifier is a function signature that can be used to mock a setup where a
// column verifier can be easily initialized.
type NewDataColumnsVerifier func(dataColumns []blocks.RODataColumn, reqs []Requirement) DataColumnsVerifier
// SignedExecutionProofsVerifier defines the methods implemented by ROSignedExecutionProofsVerifier.
type SignedExecutionProofsVerifier interface {
VerifiedROSignedExecutionProofs() ([]blocks.VerifiedROSignedExecutionProof, error)
SatisfyRequirement(Requirement)
IsFromActiveValidator() error
ValidProverSignature() error
ProofDataNonEmpty() error
ProofDataNotTooLarge() error
ProofVerified() error
}
// NewSignedExecutionProofsVerifier is a function signature that can be used to mock a setup where an
// execution proofs verifier can be easily initialized.
type NewSignedExecutionProofsVerifier func(proofs []blocks.ROSignedExecutionProof, reqs []Requirement) SignedExecutionProofsVerifier

View File

@@ -29,6 +29,12 @@ func (r Requirement) String() string {
return "RequireSidecarKzgProofVerified"
case RequireSidecarProposerExpected:
return "RequireSidecarProposerExpected"
case RequireValidFields:
return "RequireValidFields"
case RequireCorrectSubnet:
return "RequireCorrectSubnet"
case RequireProofVerified:
return "RequireProofVerified"
default:
return unknownRequirementName
}

View File

@@ -1,3 +0,0 @@
### Added
- Added README for maintaining specrefs.

View File

@@ -1,3 +0,0 @@
### Added
- The ability to download the nightly reference tests from a specific day.

View File

@@ -1,2 +0,0 @@
### Changed
- Sample PTC per committee to reduce allocations.

View File

@@ -1,2 +0,0 @@
### Added
- add modified process withdrawals for gloas

View File

@@ -1,6 +1,8 @@
package flags
import (
"fmt"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/urfave/cli/v2"
)
@@ -17,9 +19,18 @@ var (
Value: uint64(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest),
Aliases: []string{"extend-blob-retention-epoch"},
}
ExecutionProofRetentionEpochFlag = &cli.Uint64Flag{
Name: "execution-proof-retention-epochs",
Usage: fmt.Sprintf(
"Override the default execution proof retention period (measured in epochs). The node will exit with an error at startup if the value is less than the default of %d epochs.",
params.BeaconConfig().MinEpochsForExecutionProofRequests,
),
Value: uint64(params.BeaconConfig().MinEpochsForExecutionProofRequests),
}
)
var Flags = []cli.Flag{
BackfillOldestSlot,
BlobRetentionEpochFlag,
ExecutionProofRetentionEpochFlag,
}

View File

@@ -32,6 +32,10 @@ var (
Name: "data-column-path",
Usage: "Location for data column storage. Default location will be a 'data-columns' directory next to the beacon db.",
}
ExecutionProofStoragePathFlag = &cli.PathFlag{
Name: "execution-proof-path",
Usage: "Location for execution proof storage. Default location will be a 'execution-proofs' directory next to the beacon db.",
}
)
// Flags is the list of CLI flags for configuring blob storage.
@@ -39,6 +43,7 @@ var Flags = []cli.Flag{
BlobStoragePathFlag,
BlobStorageLayout,
DataColumnStoragePathFlag,
ExecutionProofStoragePathFlag,
}
func layoutOptions() string {
@@ -71,9 +76,10 @@ func BeaconNodeOptions(c *cli.Context) ([]node.Option, error) {
if err != nil {
return nil, errors.Wrap(err, "detecting blob storage layout")
}
if layout == filesystem.LayoutNameFlat {
log.Warnf("Existing '%s' blob storage layout detected. Consider setting the flag --%s=%s for faster startup and more reliable pruning. Setting this flag will automatically migrate your existing blob storage to the newer layout on the next restart.",
if layout == filesystem.LayoutNameFlat {
log.Warnf(
"Existing '%s' blob storage layout detected. Consider setting the flag --%s=%s for faster startup and more reliable pruning. Setting this flag will automatically migrate your existing blob storage to the newer layout on the next restart.",
filesystem.LayoutNameFlat, BlobStorageLayout.Name, filesystem.LayoutNameByEpoch)
}
blobStorageOptions := node.WithBlobStorageOptions(
@@ -92,7 +98,17 @@ func BeaconNodeOptions(c *cli.Context) ([]node.Option, error) {
filesystem.WithDataColumnBasePath(dataColumnStoragePath(c)),
)
opts := []node.Option{blobStorageOptions, dataColumnStorageOption}
executionProofRetentionEpoch, err := executionProofRetentionEpoch(c)
if err != nil {
return nil, errors.Wrap(err, "execution proof retention epoch")
}
proofStorageOption := node.WithProofStorageOption(
filesystem.WithProofRetentionEpochs(executionProofRetentionEpoch),
filesystem.WithProofBasePath(executionProofStoragePath(c)),
)
opts := []node.Option{blobStorageOptions, dataColumnStorageOption, proofStorageOption}
return opts, nil
}
@@ -164,6 +180,17 @@ func dataColumnStoragePath(c *cli.Context) string {
return dataColumnsPath
}
// TODO: Create a generic function for these storage path getters.
func executionProofStoragePath(c *cli.Context) string {
executionProofPath := c.Path(ExecutionProofStoragePathFlag.Name)
if executionProofPath == "" {
// append a "execution-proofs" subdir to the end of the data dir path
executionProofPath = filepath.Join(c.String(cmd.DataDirFlag.Name), "execution-proofs")
}
return executionProofPath
}
var errInvalidBlobRetentionEpochs = errors.New("value is smaller than spec minimum")
// blobRetentionEpoch returns the spec default MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUEST
@@ -204,6 +231,26 @@ func dataColumnRetentionEpoch(cliCtx *cli.Context) (primitives.Epoch, error) {
return customValue, nil
}
// executionProofRetentionEpoch returns the spec default MIN_EPOCHS_FOR_EXECUTION_PROOFS_REQUEST
// or a user-specified flag overriding this value. If a user-specified override is
// smaller than the spec default, an error will be returned.
// TODO: Create a generic function for these retention epoch getters.
func executionProofRetentionEpoch(cliCtx *cli.Context) (primitives.Epoch, error) {
defaultValue := params.BeaconConfig().MinEpochsForExecutionProofRequests
if !cliCtx.IsSet(das.ExecutionProofRetentionEpochFlag.Name) {
return defaultValue, nil
}
customValue := primitives.Epoch(cliCtx.Uint64(das.ExecutionProofRetentionEpochFlag.Name))
// Validate the epoch value against the spec default.
if customValue < defaultValue {
return defaultValue, errors.Wrapf(errInvalidBlobRetentionEpochs, "%s=%d, spec=%d", das.ExecutionProofRetentionEpochFlag.Name, customValue, defaultValue)
}
return customValue, nil
}
func init() {
BlobStorageLayout.Action = validateLayoutFlag
}

View File

@@ -49,6 +49,7 @@ type Flags struct {
DisableDutiesV2 bool // DisableDutiesV2 sets validator client to use the get Duties endpoint
EnableWeb bool // EnableWeb enables the webui on the validator client
EnableStateDiff bool // EnableStateDiff enables the experimental state diff feature for the beacon node.
EnableZkvm bool // EnableZkvm enables zkVM related features.
// Logging related toggles.
DisableGRPCConnectionLogs bool // Disables logging when a new grpc client has connected.
@@ -304,6 +305,11 @@ func ConfigureBeaconChain(ctx *cli.Context) error {
}
}
if ctx.IsSet(EnableZkvmFlag.Name) {
logEnabled(EnableZkvmFlag)
cfg.EnableZkvm = true
}
cfg.AggregateIntervals = [3]time.Duration{aggregateFirstInterval.Value, aggregateSecondInterval.Value, aggregateThirdInterval.Value}
Init(cfg)
return nil

View File

@@ -220,6 +220,17 @@ var (
Name: "ignore-unviable-attestations",
Usage: "Ignores attestations whose target state is not viable with respect to the current head (avoid expensive state replay from lagging attesters).",
}
// Activate ZKVM execution proof mode
EnableZkvmFlag = &cli.BoolFlag{
Name: "activate-zkvm",
Usage: `
Activates ZKVM execution proof mode. Enables the node to subscribe to the
execution_proof gossip topic, receive and verify execution proofs from peers,
and advertise zkVM support in its ENR for peer discovery.
Use --zkvm-generation-proof-types to specify which proof types this node
should generate (optional - nodes can verify without generating).
`,
}
)
// devModeFlags holds list of flags that are set when development mode is on.
@@ -284,6 +295,7 @@ var BeaconChainFlags = combinedFlags([]cli.Flag{
forceHeadFlag,
blacklistRoots,
enableHashtree,
EnableZkvmFlag,
}, deprecatedBeaconFlags, deprecatedFlags, upcomingDeprecation)
func combinedFlags(flags ...[]cli.Flag) []cli.Flag {

View File

@@ -130,7 +130,6 @@ type BeaconChainConfig struct {
MaxWithdrawalsPerPayload uint64 `yaml:"MAX_WITHDRAWALS_PER_PAYLOAD" spec:"true"` // MaxWithdrawalsPerPayload defines the maximum number of withdrawals in a block.
MaxBlsToExecutionChanges uint64 `yaml:"MAX_BLS_TO_EXECUTION_CHANGES" spec:"true"` // MaxBlsToExecutionChanges defines the maximum number of BLS-to-execution-change objects in a block.
MaxValidatorsPerWithdrawalsSweep uint64 `yaml:"MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP" spec:"true"` // MaxValidatorsPerWithdrawalsSweep bounds the size of the sweep searching for withdrawals per slot.
MaxBuildersPerWithdrawalsSweep uint64 `yaml:"MAX_BUILDERS_PER_WITHDRAWALS_SWEEP" spec:"true"` // MaxBuildersPerWithdrawalsSweep bounds the size of the builder withdrawals sweep per slot.
// BLS domain values.
DomainBeaconProposer [4]byte `yaml:"DOMAIN_BEACON_PROPOSER" spec:"true"` // DomainBeaconProposer defines the BLS signature domain for beacon proposal verification.
@@ -324,6 +323,11 @@ type BeaconChainConfig struct {
// Blobs Values
BlobSchedule []BlobScheduleEntry `yaml:"BLOB_SCHEDULE" spec:"true"`
// EIP-8025: Optional Execution Proofs
MaxProofDataBytes uint64 `yaml:"MAX_PROOF_DATA_BYTES" spec:"true"` // MaxProofDataBytes is the maximum number of bytes for execution proof data.
MinProofsRequired uint64 `yaml:"MIN_PROOFS_REQUIRED" spec:"true"` // MinProofsRequired is the minimum number of execution proofs required for a block to be considered valid.
MinEpochsForExecutionProofRequests primitives.Epoch `yaml:"MIN_EPOCHS_FOR_EXECUTION_PROOF_REQUESTS" spec:"true"` // MinEpochsForExecutionProofRequests is the minimum number of epochs the node will keep the execution proofs for.
// Deprecated_MaxBlobsPerBlock defines the max blobs that could exist in a block.
// Deprecated: This field is no longer supported. Avoid using it.
DeprecatedMaxBlobsPerBlock int `yaml:"MAX_BLOBS_PER_BLOCK" spec:"true"`
@@ -747,6 +751,20 @@ func WithinDAPeriod(block, current primitives.Epoch) bool {
return block+BeaconConfig().MinEpochsForBlobsSidecarsRequest >= current
}
// WithinExecutionProofPeriod checks if the given epoch is within the execution proof retention period.
// This is used to determine whether execution proofs should be requested or generated for blocks at the given epoch.
// Returns true if the epoch is at or after the retention boundary (Fulu fork epoch or proof retention epoch).
func WithinExecutionProofPeriod(epoch, current primitives.Epoch) bool {
proofRetentionEpoch := primitives.Epoch(0)
if current >= primitives.Epoch(BeaconConfig().MinEpochsForExecutionProofRequests) {
proofRetentionEpoch = current - primitives.Epoch(BeaconConfig().MinEpochsForExecutionProofRequests)
}
boundaryEpoch := primitives.MaxEpoch(BeaconConfig().FuluForkEpoch, proofRetentionEpoch)
return epoch >= boundaryEpoch
}
// EpochsDuration returns the time duration of the given number of epochs.
func EpochsDuration(count primitives.Epoch, b *BeaconChainConfig) time.Duration {
return SlotsDuration(SlotsForEpochs(count, b), b)

View File

@@ -194,7 +194,6 @@ func ConfigToYaml(cfg *BeaconChainConfig) []byte {
fmt.Sprintf("SHARD_COMMITTEE_PERIOD: %d", cfg.ShardCommitteePeriod),
fmt.Sprintf("MIN_VALIDATOR_WITHDRAWABILITY_DELAY: %d", cfg.MinValidatorWithdrawabilityDelay),
fmt.Sprintf("MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP: %d", cfg.MaxValidatorsPerWithdrawalsSweep),
fmt.Sprintf("MAX_BUILDERS_PER_WITHDRAWALS_SWEEP: %d", cfg.MaxBuildersPerWithdrawalsSweep),
fmt.Sprintf("MAX_SEED_LOOKAHEAD: %d", cfg.MaxSeedLookahead),
fmt.Sprintf("EJECTION_BALANCE: %d", cfg.EjectionBalance),
fmt.Sprintf("MIN_PER_EPOCH_CHURN_LIMIT: %d", cfg.MinPerEpochChurnLimit),

View File

@@ -40,6 +40,7 @@ var mainnetNetworkConfig = &NetworkConfig{
AttSubnetKey: "attnets",
SyncCommsSubnetKey: "syncnets",
CustodyGroupCountKey: "cgc",
ZkvmEnabledKey: "zkvm",
MinimumPeersInSubnetSearch: 20,
ContractDeploymentBlock: 11184524, // Note: contract was deployed in block 11052984 but no transactions were sent until 11184524.
BootstrapNodes: []string{
@@ -175,7 +176,6 @@ var mainnetBeaconConfig = &BeaconChainConfig{
MaxWithdrawalsPerPayload: 16,
MaxBlsToExecutionChanges: 16,
MaxValidatorsPerWithdrawalsSweep: 16384,
MaxBuildersPerWithdrawalsSweep: 16384,
// BLS domain values.
DomainBeaconProposer: bytesutil.Uint32ToBytes4(0x00000000),
@@ -372,6 +372,11 @@ var mainnetBeaconConfig = &BeaconChainConfig{
MaxBlobsPerBlock: 21,
},
},
// EIP-8025: Optional Execution Proofs
MaxProofDataBytes: 1_048_576, // 1 MiB
MinProofsRequired: 2,
MinEpochsForExecutionProofRequests: 2,
}
// MainnetTestConfig provides a version of the mainnet config that has a different name

View File

@@ -11,6 +11,7 @@ type NetworkConfig struct {
AttSubnetKey string // AttSubnetKey is the ENR key of the subnet bitfield.
SyncCommsSubnetKey string // SyncCommsSubnetKey is the ENR key of the sync committee subnet bitfield.
CustodyGroupCountKey string // CustodyGroupsCountKey is the ENR key of the custody group count.
ZkvmEnabledKey string // ZkvmEnabledKey is the ENR key of whether zkVM mode is enabled or not.
MinimumPeersInSubnetSearch uint64 // PeersInSubnetSearch is the required amount of peers that we need to be able to lookup in a subnet search.
// Chain Network Config

View File

@@ -94,7 +94,6 @@ func compareConfigs(t *testing.T, expected, actual *params.BeaconChainConfig) {
require.DeepEqual(t, expected.MaxDeposits, actual.MaxDeposits)
require.DeepEqual(t, expected.MaxVoluntaryExits, actual.MaxVoluntaryExits)
require.DeepEqual(t, expected.MaxWithdrawalsPerPayload, actual.MaxWithdrawalsPerPayload)
require.DeepEqual(t, expected.MaxBuildersPerWithdrawalsSweep, actual.MaxBuildersPerWithdrawalsSweep)
require.DeepEqual(t, expected.DomainBeaconProposer, actual.DomainBeaconProposer)
require.DeepEqual(t, expected.DomainRandao, actual.DomainRandao)
require.DeepEqual(t, expected.DomainBeaconAttester, actual.DomainBeaconAttester)

View File

@@ -13,6 +13,7 @@ go_library(
"roblob.go",
"roblock.go",
"rodatacolumn.go",
"roexecutionproof.go",
"setters.go",
"signed_execution_bid.go",
"types.go",

View File

@@ -0,0 +1,91 @@
package blocks
import (
"errors"
"fmt"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
)
var (
errNilExecutionProof = errors.New("execution proof is nil")
errEmptyProverPubkey = errors.New("prover pubkey is empty")
errEmptyProofData = errors.New("proof data is empty")
errEmptyNewPayloadRequestRoot = errors.New("new payload request root is empty")
)
// ROExecutionProof represents a read-only execution proof with its block root.
type ROSignedExecutionProof struct {
*ethpb.SignedExecutionProof
blockRoot [fieldparams.RootLength]byte
epoch primitives.Epoch
}
func roSignedExecutionProofNilCheck(sep *ethpb.SignedExecutionProof) error {
if sep == nil {
return errNilExecutionProof
}
if len(sep.ProverPubkey) == 0 {
return errEmptyProverPubkey
}
ep := sep.Message
if len(ep.ProofData) == 0 {
return errEmptyProofData
}
if len(ep.PublicInput.NewPayloadRequestRoot) == 0 {
return errEmptyNewPayloadRequestRoot
}
return nil
}
// NewROSignedExecutionProofWithRoot creates a new ROSignedExecutionProof with a given root.
func NewROSignedExecutionProof(
signedExecutionProof *ethpb.SignedExecutionProof,
root [fieldparams.RootLength]byte,
epoch primitives.Epoch,
) (ROSignedExecutionProof, error) {
if err := roSignedExecutionProofNilCheck(signedExecutionProof); err != nil {
return ROSignedExecutionProof{}, fmt.Errorf("ro signed execution proof nil check: %w", err)
}
roSignedExecutionProof := ROSignedExecutionProof{
SignedExecutionProof: signedExecutionProof,
blockRoot: root,
epoch: epoch,
}
return roSignedExecutionProof, nil
}
// BlockRoot returns the block root of the execution proof.
func (p *ROSignedExecutionProof) BlockRoot() [fieldparams.RootLength]byte {
return p.blockRoot
}
// Epoch returns the epoch of the execution proof.
func (p *ROSignedExecutionProof) Epoch() primitives.Epoch {
return p.epoch
}
// // ProofType returns the proof type of the execution proof.
// func (p *ROExecutionProof) ProofType() primitives.ProofType {
// return p.ExecutionProof.ProofType
// }
// VerifiedROExecutionProof represents an ROExecutionProof that has undergone full verification.
type VerifiedROSignedExecutionProof struct {
ROSignedExecutionProof
}
// NewVerifiedROExecutionProof "upgrades" an ROExecutionProof to a VerifiedROExecutionProof.
// This method should only be used by the verification package.
func NewVerifiedROSignedExecutionProof(ro ROSignedExecutionProof) VerifiedROSignedExecutionProof {
return VerifiedROSignedExecutionProof{ROSignedExecutionProof: ro}
}

View File

@@ -13,16 +13,6 @@ var _ fssz.Unmarshaler = (*BuilderIndex)(nil)
// BuilderIndex is an index into the builder registry.
type BuilderIndex uint64
// ToValidatorIndex sets the builder flag on a builder index.
//
// Spec v1.6.1 (pseudocode):
// def convert_builder_index_to_validator_index(builder_index: BuilderIndex) -> ValidatorIndex:
//
// return ValidatorIndex(builder_index | BUILDER_INDEX_FLAG)
func (b BuilderIndex) ToValidatorIndex() ValidatorIndex {
return ValidatorIndex(uint64(b) | BuilderIndexFlag)
}
// HashTreeRoot returns the SSZ hash tree root of the index.
func (b BuilderIndex) HashTreeRoot() ([32]byte, error) {
return fssz.HashWithDefaultHasher(b)

View File

@@ -10,34 +10,9 @@ var _ fssz.HashRoot = (ValidatorIndex)(0)
var _ fssz.Marshaler = (*ValidatorIndex)(nil)
var _ fssz.Unmarshaler = (*ValidatorIndex)(nil)
// BuilderIndexFlag marks a ValidatorIndex as a BuilderIndex when the bit is set.
//
// Spec v1.6.1: BUILDER_INDEX_FLAG.
const BuilderIndexFlag uint64 = 1 << 40
// ValidatorIndex in eth2.
type ValidatorIndex uint64
// IsBuilderIndex returns true when the BuilderIndex flag is set on the validator index.
//
// Spec v1.6.1 (pseudocode):
// def is_builder_index(validator_index: ValidatorIndex) -> bool:
//
// return (validator_index & BUILDER_INDEX_FLAG) != 0
func (v ValidatorIndex) IsBuilderIndex() bool {
return uint64(v)&BuilderIndexFlag != 0
}
// ToBuilderIndex strips the builder flag from a validator index.
//
// Spec v1.6.1 (pseudocode):
// def convert_validator_index_to_builder_index(validator_index: ValidatorIndex) -> BuilderIndex:
//
// return BuilderIndex(validator_index & ~BUILDER_INDEX_FLAG)
func (v ValidatorIndex) ToBuilderIndex() BuilderIndex {
return BuilderIndex(uint64(v) & ^BuilderIndexFlag)
}
// Div divides validator index by x.
// This method panics if dividing by zero!
func (v ValidatorIndex) Div(x uint64) ValidatorIndex {

View File

@@ -33,32 +33,3 @@ func TestValidatorIndex_Casting(t *testing.T) {
}
})
}
func TestValidatorIndex_BuilderIndexFlagConversions(t *testing.T) {
base := uint64(42)
unflagged := ValidatorIndex(base)
if unflagged.IsBuilderIndex() {
t.Fatalf("expected unflagged validator index to not be a builder index")
}
if got, want := unflagged.ToBuilderIndex(), BuilderIndex(base); got != want {
t.Fatalf("unexpected builder index: got %d want %d", got, want)
}
flagged := ValidatorIndex(base | BuilderIndexFlag)
if !flagged.IsBuilderIndex() {
t.Fatalf("expected flagged validator index to be a builder index")
}
if got, want := flagged.ToBuilderIndex(), BuilderIndex(base); got != want {
t.Fatalf("unexpected builder index: got %d want %d", got, want)
}
builder := BuilderIndex(base)
roundTrip := builder.ToValidatorIndex()
if !roundTrip.IsBuilderIndex() {
t.Fatalf("expected round-tripped validator index to be a builder index")
}
if got, want := roundTrip.ToBuilderIndex(), builder; got != want {
t.Fatalf("unexpected round-trip builder index: got %d want %d", got, want)
}
}

72
kurtosis/README.md Normal file
View File

@@ -0,0 +1,72 @@
# Kurtosis scripts for EIP-8025
## How to run
I slightly modified [Manu's tip](https://hackmd.io/8z4thpsyQJioaU6jj0Wazw) by adding those in my `~/.zshrc`.
```zsh
# Kurtosis Aliases
blog() {
docker logs -f "$(docker ps | grep cl-"$1"-prysm-geth | awk '{print $NF}')" 2>&1
}
vlog() {
docker logs -f "$(docker ps | grep vc-"$1"-geth-prysm | awk '{print $NF}')" 2>&1
}
dora() {
open http://localhost:$(docker ps --format '{{.Ports}} {{.Names}}' | awk '/dora/ {split($1, a, "->"); split(a[1], b, ":"); print b[2]}')
}
graf() {
open http://localhost:$(docker ps --format '{{.Ports}} {{.Names}}' | awk '/grafana/ {split($1, a, "->"); split(a[1], b, ":"); print b[2]}')
}
devnet () {
local args_file_path="./kurtosis/default.yaml"
if [ ! -z "$1" ]; then
args_file_path="$1"
echo "Using custom args-file path: $args_file_path"
else
echo "Using default args-file path: $args_file_path"
fi
kurtosis clean -a &&
bazel build //cmd/beacon-chain:oci_image_tarball --platforms=@io_bazel_rules_go//go/toolchain:linux_arm64_cgo --config=release &&
docker load -i bazel-bin/cmd/beacon-chain/oci_image_tarball/tarball.tar &&
docker tag gcr.io/offchainlabs/prysm/beacon-chain prysm-bn-custom-image &&
bazel build //cmd/validator:oci_image_tarball --platforms=@io_bazel_rules_go//go/toolchain:linux_arm64_cgo --config=release &&
docker load -i bazel-bin/cmd/validator/oci_image_tarball/tarball.tar &&
docker tag gcr.io/offchainlabs/prysm/validator prysm-vc-custom-image &&
kurtosis run github.com/ethpandaops/ethereum-package --args-file="$args_file_path" --verbosity brief &&
dora
}
stop() {
kurtosis clean -a
}
dps() {
docker ps --format "table {{.ID}}\\t{{.Image}}\\t{{.Status}}\\t{{.Names}}" -a
}
```
At the project directory, you can simply spin up a devnet with:
```bash
$ devnet
```
Or you can specify the network parameter YAML file like:
```bash
$ devnet ./kurtosis/proof_verify.yaml
```
### Running scripts with local images
Images from Prysm can be automatically loaded from `devnet` command, but if you want to run a script with `lighthouse`:
#### `./kurtosis/interop.yaml`
- `lighthouse:local`: Please build your own image following [Lighthouse's guide](https://lighthouse-book.sigmaprime.io/installation_docker.html?highlight=docker#building-the-docker-image) on [`kevaundray/kw/sel-alternative`](https://github.com/kevaundray/lighthouse/tree/kw/sel-alternative/) branch.

16
kurtosis/default.yaml Normal file
View File

@@ -0,0 +1,16 @@
participants:
- el_type: geth
cl_type: prysm
cl_image: prysm-bn-custom-image
cl_extra_params:
- --activate-zkvm
- --zkvm-generation-proof-types=0,1
vc_image: prysm-vc-custom-image
count: 4
network_params:
seconds_per_slot: 2
global_log_level: debug
snooper_enabled: false
additional_services:
- dora
- prometheus_grafana

38
kurtosis/interop.yaml Normal file
View File

@@ -0,0 +1,38 @@
# 3 nodes (2 from Prysm, 1 from Lighthouse) generate proofs and
# 1 node only verifies
participants:
# Prysm: Proof generating nodes (nodes 1-2)
- el_type: geth
el_image: ethereum/client-go:latest
cl_type: prysm
cl_image: prysm-bn-custom-image
cl_extra_params:
- --activate-zkvm
- --zkvm-generation-proof-types=0,1
vc_image: prysm-vc-custom-image
count: 2
# Lighthouse: Proof generating nodes (node 3)
- el_type: geth
el_image: ethereum/client-go:latest
cl_type: lighthouse
cl_image: lighthouse:local
cl_extra_params:
- --activate-zkvm
- --zkvm-generation-proof-types=0,1
- --target-peers=3
count: 1
# Prysm: Proof verifying only node (node 4)
- el_type: dummy
cl_type: prysm
cl_image: prysm-bn-custom-image
cl_extra_params:
- --activate-zkvm
vc_image: prysm-vc-custom-image
count: 1
network_params:
seconds_per_slot: 2
global_log_level: debug
snooper_enabled: false
additional_services:
- dora
- prometheus_grafana

View File

@@ -0,0 +1,27 @@
# 3 nodes generate proofs, 1 node only verifies
participants:
# Proof generating nodes (nodes 1-3)
- el_type: geth
el_image: ethereum/client-go:latest
cl_type: prysm
cl_image: prysm-bn-custom-image
cl_extra_params:
- --activate-zkvm
- --zkvm-generation-proof-types=0,1
vc_image: prysm-vc-custom-image
count: 3
# Proof verifying only node (node 4)
- el_type: dummy
cl_type: prysm
cl_image: prysm-bn-custom-image
cl_extra_params:
- --activate-zkvm
vc_image: prysm-vc-custom-image
count: 1
network_params:
seconds_per_slot: 2
global_log_level: debug
snooper_enabled: false
additional_services:
- dora
- prometheus_grafana

View File

@@ -51,6 +51,7 @@ ssz_gen_marshal(
"DepositRequest",
"ConsolidationRequest",
"ExecutionRequests",
"NewPayloadRequest",
],
)

View File

@@ -3580,3 +3580,211 @@ func (b *BlobsBundleV2) HashTreeRootWith(hh *ssz.Hasher) (err error) {
hh.Merkleize(indx)
return
}
// MarshalSSZ ssz marshals the NewPayloadRequest object
func (n *NewPayloadRequest) MarshalSSZ() ([]byte, error) {
return ssz.MarshalSSZ(n)
}
// MarshalSSZTo ssz marshals the NewPayloadRequest object to a target array
func (n *NewPayloadRequest) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
offset := int(44)
// Offset (0) 'ExecutionPayload'
dst = ssz.WriteOffset(dst, offset)
if n.ExecutionPayload == nil {
n.ExecutionPayload = new(ExecutionPayloadDeneb)
}
offset += n.ExecutionPayload.SizeSSZ()
// Offset (1) 'VersionedHashes'
dst = ssz.WriteOffset(dst, offset)
offset += len(n.VersionedHashes) * 32
// Field (2) 'ParentBlockRoot'
if size := len(n.ParentBlockRoot); size != 32 {
err = ssz.ErrBytesLengthFn("--.ParentBlockRoot", size, 32)
return
}
dst = append(dst, n.ParentBlockRoot...)
// Offset (3) 'ExecutionRequests'
dst = ssz.WriteOffset(dst, offset)
if n.ExecutionRequests == nil {
n.ExecutionRequests = new(ExecutionRequests)
}
offset += n.ExecutionRequests.SizeSSZ()
// Field (0) 'ExecutionPayload'
if dst, err = n.ExecutionPayload.MarshalSSZTo(dst); err != nil {
return
}
// Field (1) 'VersionedHashes'
if size := len(n.VersionedHashes); size > 4096 {
err = ssz.ErrListTooBigFn("--.VersionedHashes", size, 4096)
return
}
for ii := 0; ii < len(n.VersionedHashes); ii++ {
if size := len(n.VersionedHashes[ii]); size != 32 {
err = ssz.ErrBytesLengthFn("--.VersionedHashes[ii]", size, 32)
return
}
dst = append(dst, n.VersionedHashes[ii]...)
}
// Field (3) 'ExecutionRequests'
if dst, err = n.ExecutionRequests.MarshalSSZTo(dst); err != nil {
return
}
return
}
// UnmarshalSSZ ssz unmarshals the NewPayloadRequest object
func (n *NewPayloadRequest) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size < 44 {
return ssz.ErrSize
}
tail := buf
var o0, o1, o3 uint64
// Offset (0) 'ExecutionPayload'
if o0 = ssz.ReadOffset(buf[0:4]); o0 > size {
return ssz.ErrOffset
}
if o0 != 44 {
return ssz.ErrInvalidVariableOffset
}
// Offset (1) 'VersionedHashes'
if o1 = ssz.ReadOffset(buf[4:8]); o1 > size || o0 > o1 {
return ssz.ErrOffset
}
// Field (2) 'ParentBlockRoot'
if cap(n.ParentBlockRoot) == 0 {
n.ParentBlockRoot = make([]byte, 0, len(buf[8:40]))
}
n.ParentBlockRoot = append(n.ParentBlockRoot, buf[8:40]...)
// Offset (3) 'ExecutionRequests'
if o3 = ssz.ReadOffset(buf[40:44]); o3 > size || o1 > o3 {
return ssz.ErrOffset
}
// Field (0) 'ExecutionPayload'
{
buf = tail[o0:o1]
if n.ExecutionPayload == nil {
n.ExecutionPayload = new(ExecutionPayloadDeneb)
}
if err = n.ExecutionPayload.UnmarshalSSZ(buf); err != nil {
return err
}
}
// Field (1) 'VersionedHashes'
{
buf = tail[o1:o3]
num, err := ssz.DivideInt2(len(buf), 32, 4096)
if err != nil {
return err
}
n.VersionedHashes = make([][]byte, num)
for ii := 0; ii < num; ii++ {
if cap(n.VersionedHashes[ii]) == 0 {
n.VersionedHashes[ii] = make([]byte, 0, len(buf[ii*32:(ii+1)*32]))
}
n.VersionedHashes[ii] = append(n.VersionedHashes[ii], buf[ii*32:(ii+1)*32]...)
}
}
// Field (3) 'ExecutionRequests'
{
buf = tail[o3:]
if n.ExecutionRequests == nil {
n.ExecutionRequests = new(ExecutionRequests)
}
if err = n.ExecutionRequests.UnmarshalSSZ(buf); err != nil {
return err
}
}
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the NewPayloadRequest object
func (n *NewPayloadRequest) SizeSSZ() (size int) {
size = 44
// Field (0) 'ExecutionPayload'
if n.ExecutionPayload == nil {
n.ExecutionPayload = new(ExecutionPayloadDeneb)
}
size += n.ExecutionPayload.SizeSSZ()
// Field (1) 'VersionedHashes'
size += len(n.VersionedHashes) * 32
// Field (3) 'ExecutionRequests'
if n.ExecutionRequests == nil {
n.ExecutionRequests = new(ExecutionRequests)
}
size += n.ExecutionRequests.SizeSSZ()
return
}
// HashTreeRoot ssz hashes the NewPayloadRequest object
func (n *NewPayloadRequest) HashTreeRoot() ([32]byte, error) {
return ssz.HashWithDefaultHasher(n)
}
// HashTreeRootWith ssz hashes the NewPayloadRequest object with a hasher
func (n *NewPayloadRequest) HashTreeRootWith(hh *ssz.Hasher) (err error) {
indx := hh.Index()
// Field (0) 'ExecutionPayload'
if err = n.ExecutionPayload.HashTreeRootWith(hh); err != nil {
return
}
// Field (1) 'VersionedHashes'
{
if size := len(n.VersionedHashes); size > 4096 {
err = ssz.ErrListTooBigFn("--.VersionedHashes", size, 4096)
return
}
subIndx := hh.Index()
for _, i := range n.VersionedHashes {
if len(i) != 32 {
err = ssz.ErrBytesLength
return
}
hh.Append(i)
}
numItems := uint64(len(n.VersionedHashes))
hh.MerkleizeWithMixin(subIndx, numItems, 4096)
}
// Field (2) 'ParentBlockRoot'
if size := len(n.ParentBlockRoot); size != 32 {
err = ssz.ErrBytesLengthFn("--.ParentBlockRoot", size, 32)
return
}
hh.PutBytes(n.ParentBlockRoot)
// Field (3) 'ExecutionRequests'
if err = n.ExecutionRequests.HashTreeRootWith(hh); err != nil {
return
}
hh.Merkleize(indx)
return
}

View File

@@ -295,4 +295,5 @@ message BlobAndProofV2 {
(ethereum.eth.ext.ssz_size) = "48",
(ethereum.eth.ext.ssz_max) = "max_cell_proofs_length.size"
];
}
}

View File

@@ -10,6 +10,7 @@ import (
reflect "reflect"
sync "sync"
_ "github.com/OffchainLabs/prysm/v7/proto/eth/ext"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
)
@@ -97,43 +98,134 @@ func (x *ExecutionBundleFulu) GetExecutionRequests() [][]byte {
return nil
}
type NewPayloadRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
ExecutionPayload *ExecutionPayloadDeneb `protobuf:"bytes,1,opt,name=execution_payload,json=executionPayload,proto3" json:"execution_payload,omitempty"`
VersionedHashes [][]byte `protobuf:"bytes,2,rep,name=versioned_hashes,json=versionedHashes,proto3" json:"versioned_hashes,omitempty" ssz-max:"4096" ssz-size:"?,32"`
ParentBlockRoot []byte `protobuf:"bytes,3,opt,name=parent_block_root,json=parentBlockRoot,proto3" json:"parent_block_root,omitempty" ssz-size:"32"`
ExecutionRequests *ExecutionRequests `protobuf:"bytes,4,opt,name=execution_requests,json=executionRequests,proto3" json:"execution_requests,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewPayloadRequest) Reset() {
*x = NewPayloadRequest{}
mi := &file_proto_engine_v1_fulu_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewPayloadRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewPayloadRequest) ProtoMessage() {}
func (x *NewPayloadRequest) ProtoReflect() protoreflect.Message {
mi := &file_proto_engine_v1_fulu_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewPayloadRequest.ProtoReflect.Descriptor instead.
func (*NewPayloadRequest) Descriptor() ([]byte, []int) {
return file_proto_engine_v1_fulu_proto_rawDescGZIP(), []int{1}
}
func (x *NewPayloadRequest) GetExecutionPayload() *ExecutionPayloadDeneb {
if x != nil {
return x.ExecutionPayload
}
return nil
}
func (x *NewPayloadRequest) GetVersionedHashes() [][]byte {
if x != nil {
return x.VersionedHashes
}
return nil
}
func (x *NewPayloadRequest) GetParentBlockRoot() []byte {
if x != nil {
return x.ParentBlockRoot
}
return nil
}
func (x *NewPayloadRequest) GetExecutionRequests() *ExecutionRequests {
if x != nil {
return x.ExecutionRequests
}
return nil
}
var File_proto_engine_v1_fulu_proto protoreflect.FileDescriptor
var file_proto_engine_v1_fulu_proto_rawDesc = []byte{
0x0a, 0x1a, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x2f, 0x76,
0x31, 0x2f, 0x66, 0x75, 0x6c, 0x75, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x65, 0x74,
0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x2e, 0x76, 0x31,
0x1a, 0x26, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x2f, 0x76,
0x31, 0x2f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x67, 0x69,
0x6e, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x9d, 0x02, 0x0a, 0x13, 0x45, 0x78, 0x65,
0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x46, 0x75, 0x6c, 0x75,
0x12, 0x43, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x29, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x6e, 0x67,
0x69, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e,
0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x44, 0x65, 0x6e, 0x65, 0x62, 0x52, 0x07, 0x70, 0x61,
0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02,
0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x44, 0x0a, 0x0c, 0x62,
0x6c, 0x6f, 0x62, 0x73, 0x5f, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x21, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x6e, 0x67,
0x69, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x6c, 0x6f, 0x62, 0x73, 0x42, 0x75, 0x6e, 0x64,
0x6c, 0x65, 0x56, 0x32, 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x42, 0x75, 0x6e, 0x64, 0x6c,
0x65, 0x12, 0x36, 0x0a, 0x17, 0x73, 0x68, 0x6f, 0x75, 0x6c, 0x64, 0x5f, 0x6f, 0x76, 0x65, 0x72,
0x72, 0x69, 0x64, 0x65, 0x5f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01,
0x28, 0x08, 0x52, 0x15, 0x73, 0x68, 0x6f, 0x75, 0x6c, 0x64, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69,
0x64, 0x65, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x12, 0x2d, 0x0a, 0x12, 0x65, 0x78, 0x65,
0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18,
0x05, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x11, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e,
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x42, 0x8e, 0x01, 0x0a, 0x16, 0x6f, 0x72, 0x67,
0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65,
0x2e, 0x76, 0x31, 0x42, 0x0c, 0x45, 0x6c, 0x65, 0x63, 0x74, 0x72, 0x61, 0x50, 0x72, 0x6f, 0x74,
0x6f, 0x50, 0x01, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
0x70, 0x72, 0x79, 0x73, 0x6d, 0x61, 0x74, 0x69, 0x63, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72,
0x79, 0x73, 0x6d, 0x2f, 0x76, 0x35, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x65, 0x6e, 0x67,
0x69, 0x6e, 0x65, 0x2f, 0x76, 0x31, 0x3b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x76, 0x31, 0xaa,
0x02, 0x12, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e,
0x65, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x12, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x5c,
0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5c, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x33,
0x1a, 0x1b, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x65, 0x74, 0x68, 0x2f, 0x65, 0x78, 0x74, 0x2f,
0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x26, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x65,
0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x65, 0x6e, 0x67,
0x69, 0x6e, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x72, 0x61, 0x2e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x22, 0x9d, 0x02, 0x0a, 0x13, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69,
0x6f, 0x6e, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x46, 0x75, 0x6c, 0x75, 0x12, 0x43, 0x0a, 0x07,
0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e,
0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x2e,
0x76, 0x31, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x79, 0x6c,
0x6f, 0x61, 0x64, 0x44, 0x65, 0x6e, 0x65, 0x62, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61,
0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c,
0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x44, 0x0a, 0x0c, 0x62, 0x6c, 0x6f, 0x62, 0x73,
0x5f, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e,
0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x2e,
0x76, 0x31, 0x2e, 0x42, 0x6c, 0x6f, 0x62, 0x73, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x56, 0x32,
0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x12, 0x36, 0x0a,
0x17, 0x73, 0x68, 0x6f, 0x75, 0x6c, 0x64, 0x5f, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65,
0x5f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15,
0x73, 0x68, 0x6f, 0x75, 0x6c, 0x64, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x42, 0x75,
0x69, 0x6c, 0x64, 0x65, 0x72, 0x12, 0x2d, 0x0a, 0x12, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69,
0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28,
0x0c, 0x52, 0x11, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75,
0x65, 0x73, 0x74, 0x73, 0x22, 0xb2, 0x02, 0x0a, 0x11, 0x4e, 0x65, 0x77, 0x50, 0x61, 0x79, 0x6c,
0x6f, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x56, 0x0a, 0x11, 0x65, 0x78,
0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18,
0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d,
0x2e, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75,
0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x44, 0x65, 0x6e, 0x65, 0x62,
0x52, 0x10, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f,
0x61, 0x64, 0x12, 0x3b, 0x0a, 0x10, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x64, 0x5f,
0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0c, 0x42, 0x10, 0x8a, 0xb5,
0x18, 0x04, 0x3f, 0x2c, 0x33, 0x32, 0x92, 0xb5, 0x18, 0x04, 0x34, 0x30, 0x39, 0x36, 0x52, 0x0f,
0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x64, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x12,
0x32, 0x0a, 0x11, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f,
0x72, 0x6f, 0x6f, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02,
0x33, 0x32, 0x52, 0x0f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52,
0x6f, 0x6f, 0x74, 0x12, 0x54, 0x0a, 0x12, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e,
0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x25, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x6e, 0x67, 0x69, 0x6e,
0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65,
0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x52, 0x11, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f,
0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x42, 0x8e, 0x01, 0x0a, 0x16, 0x6f, 0x72,
0x67, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x6e, 0x67, 0x69, 0x6e,
0x65, 0x2e, 0x76, 0x31, 0x42, 0x0c, 0x45, 0x6c, 0x65, 0x63, 0x74, 0x72, 0x61, 0x50, 0x72, 0x6f,
0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x61, 0x74, 0x69, 0x63, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x70,
0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x35, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x65, 0x6e,
0x67, 0x69, 0x6e, 0x65, 0x2f, 0x76, 0x31, 0x3b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x76, 0x31,
0xaa, 0x02, 0x12, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x45, 0x6e, 0x67, 0x69,
0x6e, 0x65, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x12, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d,
0x5c, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5c, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x33,
}
var (
@@ -148,20 +240,24 @@ func file_proto_engine_v1_fulu_proto_rawDescGZIP() []byte {
return file_proto_engine_v1_fulu_proto_rawDescData
}
var file_proto_engine_v1_fulu_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
var file_proto_engine_v1_fulu_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
var file_proto_engine_v1_fulu_proto_goTypes = []any{
(*ExecutionBundleFulu)(nil), // 0: ethereum.engine.v1.ExecutionBundleFulu
(*ExecutionPayloadDeneb)(nil), // 1: ethereum.engine.v1.ExecutionPayloadDeneb
(*BlobsBundleV2)(nil), // 2: ethereum.engine.v1.BlobsBundleV2
(*NewPayloadRequest)(nil), // 1: ethereum.engine.v1.NewPayloadRequest
(*ExecutionPayloadDeneb)(nil), // 2: ethereum.engine.v1.ExecutionPayloadDeneb
(*BlobsBundleV2)(nil), // 3: ethereum.engine.v1.BlobsBundleV2
(*ExecutionRequests)(nil), // 4: ethereum.engine.v1.ExecutionRequests
}
var file_proto_engine_v1_fulu_proto_depIdxs = []int32{
1, // 0: ethereum.engine.v1.ExecutionBundleFulu.payload:type_name -> ethereum.engine.v1.ExecutionPayloadDeneb
2, // 1: ethereum.engine.v1.ExecutionBundleFulu.blobs_bundle:type_name -> ethereum.engine.v1.BlobsBundleV2
2, // [2:2] is the sub-list for method output_type
2, // [2:2] is the sub-list for method input_type
2, // [2:2] is the sub-list for extension type_name
2, // [2:2] is the sub-list for extension extendee
0, // [0:2] is the sub-list for field type_name
2, // 0: ethereum.engine.v1.ExecutionBundleFulu.payload:type_name -> ethereum.engine.v1.ExecutionPayloadDeneb
3, // 1: ethereum.engine.v1.ExecutionBundleFulu.blobs_bundle:type_name -> ethereum.engine.v1.BlobsBundleV2
2, // 2: ethereum.engine.v1.NewPayloadRequest.execution_payload:type_name -> ethereum.engine.v1.ExecutionPayloadDeneb
4, // 3: ethereum.engine.v1.NewPayloadRequest.execution_requests:type_name -> ethereum.engine.v1.ExecutionRequests
4, // [4:4] is the sub-list for method output_type
4, // [4:4] is the sub-list for method input_type
4, // [4:4] is the sub-list for extension type_name
4, // [4:4] is the sub-list for extension extendee
0, // [0:4] is the sub-list for field type_name
}
func init() { file_proto_engine_v1_fulu_proto_init() }
@@ -170,13 +266,14 @@ func file_proto_engine_v1_fulu_proto_init() {
return
}
file_proto_engine_v1_execution_engine_proto_init()
file_proto_engine_v1_electra_proto_init()
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_proto_engine_v1_fulu_proto_rawDesc,
NumEnums: 0,
NumMessages: 1,
NumMessages: 2,
NumExtensions: 0,
NumServices: 0,
},

View File

@@ -2,7 +2,9 @@ syntax = "proto3";
package ethereum.engine.v1;
import "proto/eth/ext/options.proto";
import "proto/engine/v1/execution_engine.proto";
import "proto/engine/v1/electra.proto";
option csharp_namespace = "Ethereum.Engine.V1";
option go_package = "github.com/prysmaticlabs/prysm/v5/proto/engine/v1;enginev1";
@@ -18,3 +20,15 @@ message ExecutionBundleFulu {
bool should_override_builder = 4;
repeated bytes execution_requests = 5;
}
message NewPayloadRequest {
ExecutionPayloadDeneb execution_payload = 1;
repeated bytes versioned_hashes = 2 [
(ethereum.eth.ext.ssz_size) = "?,32",
(ethereum.eth.ext.ssz_max) = "4096"
];
bytes parent_block_root = 3 [ (ethereum.eth.ext.ssz_size) = "32" ];
ExecutionRequests execution_requests = 4;
}

View File

@@ -192,6 +192,7 @@ ssz_fulu_objs = [
"SignedBeaconBlockContentsFulu",
"SignedBeaconBlockFulu",
"SignedBlindedBeaconBlockFulu",
"SignedExecutionProof",
]
ssz_gloas_objs = [
@@ -371,6 +372,7 @@ go_library(
"beacon_block.go",
"cloners.go",
"eip_7521.go",
"execution_proof.go",
"gloas.go",
"log.go",
"sync_committee_mainnet.go",
@@ -427,6 +429,7 @@ ssz_proto_files(
"beacon_state.proto",
"blobs.proto",
"data_columns.proto",
"execution_proof.proto",
"gloas.proto",
"light_client.proto",
"sync_committee.proto",

View File

@@ -0,0 +1,18 @@
package eth
import "github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
// Copy --
func (e *ExecutionProof) Copy() *ExecutionProof {
if e == nil {
return nil
}
return &ExecutionProof{
ProofData: bytesutil.SafeCopyBytes(e.ProofData),
ProofType: e.ProofType,
PublicInput: &PublicInput{
NewPayloadRequestRoot: bytesutil.SafeCopyBytes(e.PublicInput.NewPayloadRequestRoot),
},
}
}

338
proto/prysm/v1alpha1/execution_proof.pb.go generated Executable file
View File

@@ -0,0 +1,338 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.3
// protoc v3.21.7
// source: proto/prysm/v1alpha1/execution_proof.proto
package eth
import (
reflect "reflect"
sync "sync"
_ "github.com/OffchainLabs/prysm/v7/proto/eth/ext"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type SignedExecutionProof struct {
state protoimpl.MessageState `protogen:"open.v1"`
Message *ExecutionProof `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"`
ProverPubkey []byte `protobuf:"bytes,2,opt,name=prover_pubkey,json=proverPubkey,proto3" json:"prover_pubkey,omitempty" ssz-size:"48"`
Signature []byte `protobuf:"bytes,3,opt,name=signature,proto3" json:"signature,omitempty" ssz-size:"96"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SignedExecutionProof) Reset() {
*x = SignedExecutionProof{}
mi := &file_proto_prysm_v1alpha1_execution_proof_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SignedExecutionProof) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SignedExecutionProof) ProtoMessage() {}
func (x *SignedExecutionProof) ProtoReflect() protoreflect.Message {
mi := &file_proto_prysm_v1alpha1_execution_proof_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SignedExecutionProof.ProtoReflect.Descriptor instead.
func (*SignedExecutionProof) Descriptor() ([]byte, []int) {
return file_proto_prysm_v1alpha1_execution_proof_proto_rawDescGZIP(), []int{0}
}
func (x *SignedExecutionProof) GetMessage() *ExecutionProof {
if x != nil {
return x.Message
}
return nil
}
func (x *SignedExecutionProof) GetProverPubkey() []byte {
if x != nil {
return x.ProverPubkey
}
return nil
}
func (x *SignedExecutionProof) GetSignature() []byte {
if x != nil {
return x.Signature
}
return nil
}
type ExecutionProof struct {
state protoimpl.MessageState `protogen:"open.v1"`
ProofData []byte `protobuf:"bytes,1,opt,name=proof_data,json=proofData,proto3" json:"proof_data,omitempty" ssz-max:"307200"`
ProofType []byte `protobuf:"bytes,2,opt,name=proof_type,json=proofType,proto3" json:"proof_type,omitempty" ssz-max:"1"`
PublicInput *PublicInput `protobuf:"bytes,3,opt,name=public_input,json=publicInput,proto3" json:"public_input,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ExecutionProof) Reset() {
*x = ExecutionProof{}
mi := &file_proto_prysm_v1alpha1_execution_proof_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ExecutionProof) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ExecutionProof) ProtoMessage() {}
func (x *ExecutionProof) ProtoReflect() protoreflect.Message {
mi := &file_proto_prysm_v1alpha1_execution_proof_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ExecutionProof.ProtoReflect.Descriptor instead.
func (*ExecutionProof) Descriptor() ([]byte, []int) {
return file_proto_prysm_v1alpha1_execution_proof_proto_rawDescGZIP(), []int{1}
}
func (x *ExecutionProof) GetProofData() []byte {
if x != nil {
return x.ProofData
}
return nil
}
func (x *ExecutionProof) GetProofType() []byte {
if x != nil {
return x.ProofType
}
return nil
}
func (x *ExecutionProof) GetPublicInput() *PublicInput {
if x != nil {
return x.PublicInput
}
return nil
}
type PublicInput struct {
state protoimpl.MessageState `protogen:"open.v1"`
NewPayloadRequestRoot []byte `protobuf:"bytes,1,opt,name=new_payload_request_root,json=newPayloadRequestRoot,proto3" json:"new_payload_request_root,omitempty" ssz-size:"32"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PublicInput) Reset() {
*x = PublicInput{}
mi := &file_proto_prysm_v1alpha1_execution_proof_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PublicInput) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PublicInput) ProtoMessage() {}
func (x *PublicInput) ProtoReflect() protoreflect.Message {
mi := &file_proto_prysm_v1alpha1_execution_proof_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PublicInput.ProtoReflect.Descriptor instead.
func (*PublicInput) Descriptor() ([]byte, []int) {
return file_proto_prysm_v1alpha1_execution_proof_proto_rawDescGZIP(), []int{2}
}
func (x *PublicInput) GetNewPayloadRequestRoot() []byte {
if x != nil {
return x.NewPayloadRequestRoot
}
return nil
}
type ExecutionProofsByRootRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
BlockRoot []byte `protobuf:"bytes,1,opt,name=block_root,json=blockRoot,proto3" json:"block_root,omitempty" ssz-size:"32"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ExecutionProofsByRootRequest) Reset() {
*x = ExecutionProofsByRootRequest{}
mi := &file_proto_prysm_v1alpha1_execution_proof_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ExecutionProofsByRootRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ExecutionProofsByRootRequest) ProtoMessage() {}
func (x *ExecutionProofsByRootRequest) ProtoReflect() protoreflect.Message {
mi := &file_proto_prysm_v1alpha1_execution_proof_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ExecutionProofsByRootRequest.ProtoReflect.Descriptor instead.
func (*ExecutionProofsByRootRequest) Descriptor() ([]byte, []int) {
return file_proto_prysm_v1alpha1_execution_proof_proto_rawDescGZIP(), []int{3}
}
func (x *ExecutionProofsByRootRequest) GetBlockRoot() []byte {
if x != nil {
return x.BlockRoot
}
return nil
}
var File_proto_prysm_v1alpha1_execution_proof_proto protoreflect.FileDescriptor
var file_proto_prysm_v1alpha1_execution_proof_proto_rawDesc = []byte{
0x0a, 0x2a, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x31,
0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e,
0x5f, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x65, 0x74,
0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70,
0x68, 0x61, 0x31, 0x1a, 0x1b, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x65, 0x74, 0x68, 0x2f, 0x65,
0x78, 0x74, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x22, 0xaa, 0x01, 0x0a, 0x14, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x45, 0x78, 0x65, 0x63, 0x75,
0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x3f, 0x0a, 0x07, 0x6d, 0x65, 0x73,
0x73, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x74, 0x68,
0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68,
0x61, 0x31, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6f,
0x66, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2b, 0x0a, 0x0d, 0x70, 0x72,
0x6f, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x75, 0x62, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28,
0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x34, 0x38, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x65,
0x72, 0x50, 0x75, 0x62, 0x6b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61,
0x74, 0x75, 0x72, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02,
0x39, 0x36, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0xa8, 0x01,
0x0a, 0x0e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66,
0x12, 0x29, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01,
0x20, 0x01, 0x28, 0x0c, 0x42, 0x0a, 0x92, 0xb5, 0x18, 0x06, 0x33, 0x30, 0x37, 0x32, 0x30, 0x30,
0x52, 0x09, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x44, 0x61, 0x74, 0x61, 0x12, 0x24, 0x0a, 0x0a, 0x70,
0x72, 0x6f, 0x6f, 0x66, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42,
0x05, 0x92, 0xb5, 0x18, 0x01, 0x31, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x54, 0x79, 0x70,
0x65, 0x12, 0x45, 0x0a, 0x0c, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x69, 0x6e, 0x70, 0x75,
0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65,
0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e,
0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x52, 0x0b, 0x70, 0x75, 0x62,
0x6c, 0x69, 0x63, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x22, 0x4e, 0x0a, 0x0b, 0x50, 0x75, 0x62, 0x6c,
0x69, 0x63, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x3f, 0x0a, 0x18, 0x6e, 0x65, 0x77, 0x5f, 0x70,
0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x72,
0x6f, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x33,
0x32, 0x52, 0x15, 0x6e, 0x65, 0x77, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x71,
0x75, 0x65, 0x73, 0x74, 0x52, 0x6f, 0x6f, 0x74, 0x22, 0x45, 0x0a, 0x1c, 0x45, 0x78, 0x65, 0x63,
0x75, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x73, 0x42, 0x79, 0x52, 0x6f, 0x6f,
0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0a, 0x62, 0x6c, 0x6f, 0x63,
0x6b, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5,
0x18, 0x02, 0x33, 0x32, 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x6f, 0x6f, 0x74, 0x42,
0x9d, 0x01, 0x0a, 0x19, 0x6f, 0x72, 0x67, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d,
0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x13, 0x45,
0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x50, 0x72, 0x6f,
0x74, 0x6f, 0x50, 0x01, 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72,
0x79, 0x73, 0x6d, 0x2f, 0x76, 0x37, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x72, 0x79,
0x73, 0x6d, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x3b, 0x65, 0x74, 0x68, 0xaa,
0x02, 0x15, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x45, 0x74, 0x68, 0x2e, 0x76,
0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xca, 0x02, 0x15, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65,
0x75, 0x6d, 0x5c, 0x45, 0x74, 0x68, 0x5c, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x62,
0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_proto_prysm_v1alpha1_execution_proof_proto_rawDescOnce sync.Once
file_proto_prysm_v1alpha1_execution_proof_proto_rawDescData = file_proto_prysm_v1alpha1_execution_proof_proto_rawDesc
)
func file_proto_prysm_v1alpha1_execution_proof_proto_rawDescGZIP() []byte {
file_proto_prysm_v1alpha1_execution_proof_proto_rawDescOnce.Do(func() {
file_proto_prysm_v1alpha1_execution_proof_proto_rawDescData = protoimpl.X.CompressGZIP(file_proto_prysm_v1alpha1_execution_proof_proto_rawDescData)
})
return file_proto_prysm_v1alpha1_execution_proof_proto_rawDescData
}
var file_proto_prysm_v1alpha1_execution_proof_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
var file_proto_prysm_v1alpha1_execution_proof_proto_goTypes = []any{
(*SignedExecutionProof)(nil), // 0: ethereum.eth.v1alpha1.SignedExecutionProof
(*ExecutionProof)(nil), // 1: ethereum.eth.v1alpha1.ExecutionProof
(*PublicInput)(nil), // 2: ethereum.eth.v1alpha1.PublicInput
(*ExecutionProofsByRootRequest)(nil), // 3: ethereum.eth.v1alpha1.ExecutionProofsByRootRequest
}
var file_proto_prysm_v1alpha1_execution_proof_proto_depIdxs = []int32{
1, // 0: ethereum.eth.v1alpha1.SignedExecutionProof.message:type_name -> ethereum.eth.v1alpha1.ExecutionProof
2, // 1: ethereum.eth.v1alpha1.ExecutionProof.public_input:type_name -> ethereum.eth.v1alpha1.PublicInput
2, // [2:2] is the sub-list for method output_type
2, // [2:2] is the sub-list for method input_type
2, // [2:2] is the sub-list for extension type_name
2, // [2:2] is the sub-list for extension extendee
0, // [0:2] is the sub-list for field type_name
}
func init() { file_proto_prysm_v1alpha1_execution_proof_proto_init() }
func file_proto_prysm_v1alpha1_execution_proof_proto_init() {
if File_proto_prysm_v1alpha1_execution_proof_proto != nil {
return
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_proto_prysm_v1alpha1_execution_proof_proto_rawDesc,
NumEnums: 0,
NumMessages: 4,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_proto_prysm_v1alpha1_execution_proof_proto_goTypes,
DependencyIndexes: file_proto_prysm_v1alpha1_execution_proof_proto_depIdxs,
MessageInfos: file_proto_prysm_v1alpha1_execution_proof_proto_msgTypes,
}.Build()
File_proto_prysm_v1alpha1_execution_proof_proto = out.File
file_proto_prysm_v1alpha1_execution_proof_proto_rawDesc = nil
file_proto_prysm_v1alpha1_execution_proof_proto_goTypes = nil
file_proto_prysm_v1alpha1_execution_proof_proto_depIdxs = nil
}

Some files were not shown because too many files have changed in this diff Show More