Compare commits

...

1 Commits

Author SHA1 Message Date
Manu NALEPA
483baeedb3 Add proof endpoint 2026-02-12 17:10:23 +01:00
58 changed files with 1849 additions and 1752 deletions

View File

@@ -76,7 +76,6 @@ go_library(
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//beacon-chain/verification:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",

View File

@@ -15,7 +15,6 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/filesystem"
forkchoicetypes "github.com/OffchainLabs/prysm/v7/beacon-chain/forkchoice/types"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
"github.com/OffchainLabs/prysm/v7/config/features"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
@@ -691,7 +690,7 @@ func (s *Service) isDataAvailable(
func (s *Service) areExecutionProofsAvailable(ctx context.Context, roBlock consensusblocks.ROBlock) error {
// Return early if zkVM features are disabled (no need to check for execution proofs),
// or if the generation proof is enabled (we will generate proofs ourselves).
if !features.Get().EnableZkvm || len(flags.Get().ProofGenerationTypes) > 0 {
if !features.Get().EnableZkvm {
return nil
}

View File

@@ -63,7 +63,7 @@ type DataColumnReceiver interface {
// ProofReceiver interface defines the methods of chain service for receiving new
// execution proofs
type ProofReceiver interface {
ReceiveProof(proof *ethpb.ExecutionProof) error
ReceiveProof(blocks.VerifiedROSignedExecutionProof) error
}
// SlashingReceiver interface defines the methods of chain service for receiving validated slashing over the wire.

View File

@@ -1,13 +1,13 @@
package blockchain
import (
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/pkg/errors"
)
// ReceiveProof saves an execution proof to storage.
func (s *Service) ReceiveProof(proof *ethpb.ExecutionProof) error {
if err := s.proofStorage.Save([]*ethpb.ExecutionProof{proof}); err != nil {
func (s *Service) ReceiveProof(proof blocks.VerifiedROSignedExecutionProof) error {
if err := s.proofStorage.Save([]blocks.VerifiedROSignedExecutionProof{proof}); err != nil {
return errors.Wrap(err, "save proof")
}

View File

@@ -212,8 +212,7 @@ func (s *Service) Start() {
if err := s.StartFromSavedState(s.cfg.FinalizedStateAtStartUp); err != nil {
log.Fatal(err)
}
go s.spawnProcessAttestationsRoutine()
s.spawnProcessAttestationsRoutine()
go s.runLateBlockTasks()
}

View File

@@ -75,7 +75,7 @@ type ChainService struct {
SyncingRoot [32]byte
Blobs []blocks.VerifiedROBlob
DataColumns []blocks.VerifiedRODataColumn
Proofs []*ethpb.ExecutionProof
Proofs []blocks.VerifiedROSignedExecutionProof
TargetRoot [32]byte
MockHeadSlot *primitives.Slot
}
@@ -759,7 +759,7 @@ func (c *ChainService) ReceiveDataColumns(dcs []blocks.VerifiedRODataColumn) err
}
// ReceiveProof implements the same method in chain service
func (c *ChainService) ReceiveProof(proof *ethpb.ExecutionProof) error {
func (c *ChainService) ReceiveProof(proof blocks.VerifiedROSignedExecutionProof) error {
c.Proofs = append(c.Proofs, proof)
return nil
}

View File

@@ -82,7 +82,7 @@ type BLSToExecutionChangeReceivedData struct {
// ExecutionProofReceivedData is the data sent with ExecutionProofReceived events.
type ExecutionProofReceivedData struct {
ExecutionProof *ethpb.ExecutionProof
ExecutionProof *blocks.VerifiedROSignedExecutionProof
}
// BlobSidecarReceivedData is the data sent with BlobSidecarReceived events.

View File

@@ -54,7 +54,6 @@ go_test(
"iteration_test.go",
"layout_test.go",
"migration_test.go",
"proof_test.go",
"pruner_test.go",
],
embed = [":go_default_library"],

View File

@@ -20,10 +20,10 @@ import (
"github.com/OffchainLabs/prysm/v7/async/event"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/io/file"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/time/slots"
"github.com/spf13/afero"
)
@@ -54,14 +54,14 @@ type (
ProofIdent struct {
BlockRoot [fieldparams.RootLength]byte
Epoch primitives.Epoch
ProofID uint64
ProofType uint8
}
// ProofsIdent is a collection of unique identifiers for proofs.
ProofsIdent struct {
BlockRoot [fieldparams.RootLength]byte
Epoch primitives.Epoch
ProofIDs []uint64
BlockRoot [fieldparams.RootLength]byte
Epoch primitives.Epoch
ProofTypes []uint8
}
// ProofStorage is the concrete implementation of the filesystem backend for saving and retrieving ExecutionProofs.
@@ -82,7 +82,7 @@ type (
proofMuChan struct {
mu *sync.RWMutex
toStore chan []*ethpb.ExecutionProof
toStore chan []blocks.VerifiedROSignedExecutionProof
}
// proofSlotEntry represents the offset and size for a proof in the file.
@@ -295,7 +295,7 @@ func (ps *ProofStorage) processProofFile(filePath string) error {
proofIdent := ProofIdent{
BlockRoot: fileMetadata.blockRoot,
Epoch: fileMetadata.epoch,
ProofID: uint64(proofID),
ProofType: uint8(proofID),
}
ps.cache.set(proofIdent)
@@ -310,56 +310,44 @@ func (ps *ProofStorage) Summary(root [fieldparams.RootLength]byte) ProofStorageS
}
// Save saves execution proofs into the database.
func (ps *ProofStorage) Save(proofs []*ethpb.ExecutionProof) error {
// The proofs must all belong to the same block (same block root).
func (ps *ProofStorage) Save(proofs []blocks.VerifiedROSignedExecutionProof) error {
startTime := time.Now()
if len(proofs) == 0 {
return nil
}
proofsByRoot := make(map[[fieldparams.RootLength]byte][]*ethpb.ExecutionProof)
// Safely retrieve the block root and the epoch.
first := proofs[0]
blockRoot := first.BlockRoot()
epoch := first.Epoch()
// Group proofs by root.
proofTypes := make([]uint8, 0, len(proofs))
for _, proof := range proofs {
// Check if the proof ID is valid.
proofID := uint64(proof.ProofId)
if proofID >= maxProofTypes {
proofType := proof.Message.ProofType[0]
if proofType >= maxProofTypes {
return errProofIDTooLarge
}
// Extract block root from proof.
var blockRoot [fieldparams.RootLength]byte
copy(blockRoot[:], proof.BlockRoot)
// Group proofs by root.
proofsByRoot[blockRoot] = append(proofsByRoot[blockRoot], proof)
}
for blockRoot, proofsForRoot := range proofsByRoot {
// Compute epoch from slot.
epoch := slots.ToEpoch(proofsForRoot[0].Slot)
// Save proofs in the filesystem.
if err := ps.saveFilesystem(blockRoot, epoch, proofsForRoot); err != nil {
if err := ps.saveFilesystem(proof.BlockRoot(), proof.Epoch(), proofs); err != nil {
return fmt.Errorf("save filesystem: %w", err)
}
// Get all proof IDs.
proofIDs := make([]uint64, 0, len(proofsForRoot))
for _, proof := range proofsForRoot {
proofIDs = append(proofIDs, uint64(proof.ProofId))
}
// Compute the proofs ident.
proofsIdent := ProofsIdent{BlockRoot: blockRoot, Epoch: epoch, ProofIDs: proofIDs}
// Set proofs in the cache.
ps.cache.setMultiple(proofsIdent)
// Notify the proof feed.
ps.proofFeed.Send(proofsIdent)
proofTypes = append(proofTypes, proof.Message.ProofType[0])
}
// Compute the proofs ident.
proofsIdent := ProofsIdent{BlockRoot: blockRoot, Epoch: epoch, ProofTypes: proofTypes}
// Set proofs in the cache.
ps.cache.setMultiple(proofsIdent)
// Notify the proof feed.
ps.proofFeed.Send(proofsIdent)
proofSaveLatency.Observe(float64(time.Since(startTime).Milliseconds()))
return nil
@@ -367,7 +355,7 @@ func (ps *ProofStorage) Save(proofs []*ethpb.ExecutionProof) error {
// saveFilesystem saves proofs into the database.
// This function expects all proofs to belong to the same block.
func (ps *ProofStorage) saveFilesystem(root [fieldparams.RootLength]byte, epoch primitives.Epoch, proofs []*ethpb.ExecutionProof) error {
func (ps *ProofStorage) saveFilesystem(root [fieldparams.RootLength]byte, epoch primitives.Epoch, proofs []blocks.VerifiedROSignedExecutionProof) error {
// Compute the file path.
filePath := proofFilePath(root, epoch)
@@ -409,10 +397,10 @@ func (ps *ProofStorage) Subscribe() (event.Subscription, <-chan ProofsIdent) {
return subscription, identsChan
}
// Get retrieves execution proofs from the database.
// Get retrieves signed execution proofs from the database.
// If one of the requested proofs is not found, it is just skipped.
// If proofIDs is nil, then all stored proofs are returned.
func (ps *ProofStorage) Get(root [fieldparams.RootLength]byte, proofIDs []uint64) ([]*ethpb.ExecutionProof, error) {
func (ps *ProofStorage) Get(root [fieldparams.RootLength]byte, proofIDs []uint8) ([]*ethpb.SignedExecutionProof, error) {
ps.pruneMu.RLock()
defer ps.pruneMu.RUnlock()
@@ -424,9 +412,9 @@ func (ps *ProofStorage) Get(root [fieldparams.RootLength]byte, proofIDs []uint64
// Build all proofIDs if none are provided.
if proofIDs == nil {
proofIDs = make([]uint64, maxProofTypes)
proofIDs = make([]uint8, maxProofTypes)
for i := range proofIDs {
proofIDs[i] = uint64(i)
proofIDs[i] = uint8(i)
}
}
@@ -462,7 +450,7 @@ func (ps *ProofStorage) Get(root [fieldparams.RootLength]byte, proofIDs []uint64
}
// Retrieve proofs from the file.
proofs := make([]*ethpb.ExecutionProof, 0, len(proofIDs))
proofs := make([]*ethpb.SignedExecutionProof, 0, len(proofIDs))
for _, proofID := range proofIDs {
if proofID >= maxProofTypes {
continue
@@ -490,8 +478,8 @@ func (ps *ProofStorage) Get(root [fieldparams.RootLength]byte, proofIDs []uint64
return nil, errWrongProofBytesRead
}
// Unmarshal the proof.
proof := new(ethpb.ExecutionProof)
// Unmarshal the signed proof.
proof := new(ethpb.SignedExecutionProof)
if err := proof.UnmarshalSSZ(sszProof); err != nil {
return nil, fmt.Errorf("unmarshal proof: %w", err)
}
@@ -553,7 +541,7 @@ func (ps *ProofStorage) Clear() error {
}
// saveProofNewFile saves proofs to a new file.
func (ps *ProofStorage) saveProofNewFile(filePath string, inputProofs chan []*ethpb.ExecutionProof) (err error) {
func (ps *ProofStorage) saveProofNewFile(filePath string, inputProofs chan []blocks.VerifiedROSignedExecutionProof) (err error) {
// Initialize the offset table.
var offsetTable proofOffsetTable
@@ -567,18 +555,18 @@ func (ps *ProofStorage) saveProofNewFile(filePath string, inputProofs chan []*et
}
for _, proof := range proofs {
proofID := uint64(proof.ProofId)
if proofID >= maxProofTypes {
proofType := proof.Message.ProofType[0]
if proofType >= maxProofTypes {
continue
}
// Skip if already in offset table (duplicate).
if offsetTable[proofID].size != 0 {
if offsetTable[proofType].size != 0 {
continue
}
// SSZ encode the proof.
sszProof, err := proof.MarshalSSZ()
// SSZ encode the full signed proof.
sszProof, err := proof.SignedExecutionProof.MarshalSSZ()
if err != nil {
return fmt.Errorf("marshal proof SSZ: %w", err)
}
@@ -586,7 +574,7 @@ func (ps *ProofStorage) saveProofNewFile(filePath string, inputProofs chan []*et
proofSize := uint32(len(sszProof))
// Update offset table.
offsetTable[proofID] = proofSlotEntry{
offsetTable[proofType] = proofSlotEntry{
offset: currentOffset,
size: proofSize,
}
@@ -651,7 +639,7 @@ func (ps *ProofStorage) saveProofNewFile(filePath string, inputProofs chan []*et
}
// saveProofExistingFile saves proofs to an existing file.
func (ps *ProofStorage) saveProofExistingFile(filePath string, inputProofs chan []*ethpb.ExecutionProof) (err error) {
func (ps *ProofStorage) saveProofExistingFile(filePath string, inputProofs chan []blocks.VerifiedROSignedExecutionProof) (err error) {
// Open the file for read/write.
file, err := ps.fs.OpenFile(filePath, os.O_RDWR, os.FileMode(0600))
if err != nil {
@@ -682,18 +670,18 @@ func (ps *ProofStorage) saveProofExistingFile(filePath string, inputProofs chan
}
for _, proof := range proofs {
proofID := uint64(proof.ProofId)
if proofID >= maxProofTypes {
proofType := proof.Message.ProofType[0]
if proofType >= maxProofTypes {
continue
}
// Skip if proof already exists.
if offsetTable[proofID].size != 0 {
if offsetTable[proofType].size != 0 {
continue
}
// SSZ encode the proof.
sszProof, err := proof.MarshalSSZ()
// SSZ encode the full signed proof.
sszProof, err := proof.SignedExecutionProof.MarshalSSZ()
if err != nil {
return fmt.Errorf("marshal proof SSZ: %w", err)
}
@@ -701,7 +689,7 @@ func (ps *ProofStorage) saveProofExistingFile(filePath string, inputProofs chan
proofSize := uint32(len(sszProof))
// Update offset table.
offsetTable[proofID] = proofSlotEntry{
offsetTable[proofType] = proofSlotEntry{
offset: currentOffset,
size: proofSize,
}
@@ -863,7 +851,7 @@ func (ps *ProofStorage) prune() {
}
// fileMutexChan returns the file mutex and channel for a given block root.
func (ps *ProofStorage) fileMutexChan(root [fieldparams.RootLength]byte) (*sync.RWMutex, chan []*ethpb.ExecutionProof) {
func (ps *ProofStorage) fileMutexChan(root [fieldparams.RootLength]byte) (*sync.RWMutex, chan []blocks.VerifiedROSignedExecutionProof) {
ps.mu.Lock()
defer ps.mu.Unlock()
@@ -871,7 +859,7 @@ func (ps *ProofStorage) fileMutexChan(root [fieldparams.RootLength]byte) (*sync.
if !ok {
mc = &proofMuChan{
mu: new(sync.RWMutex),
toStore: make(chan []*ethpb.ExecutionProof, 1),
toStore: make(chan []blocks.VerifiedROSignedExecutionProof, 1),
}
ps.muChans[root] = mc
return mc.mu, mc.toStore
@@ -881,8 +869,8 @@ func (ps *ProofStorage) fileMutexChan(root [fieldparams.RootLength]byte) (*sync.
}
// pullProofChan pulls proofs from the input channel until it is empty.
func pullProofChan(inputProofs chan []*ethpb.ExecutionProof) []*ethpb.ExecutionProof {
proofs := make([]*ethpb.ExecutionProof, 0, maxProofTypes)
func pullProofChan(inputProofs chan []blocks.VerifiedROSignedExecutionProof) []blocks.VerifiedROSignedExecutionProof {
proofs := make([]blocks.VerifiedROSignedExecutionProof, 0, maxProofTypes)
for {
select {

View File

@@ -11,35 +11,35 @@ import (
// ProofStorageSummary represents cached information about the proofs on disk for each root the cache knows about.
type ProofStorageSummary struct {
epoch primitives.Epoch
proofIDs map[uint64]bool
epoch primitives.Epoch
proofTypes map[uint8]bool
}
// HasProof returns true if the proof with the given proofID is available in the filesystem.
func (s ProofStorageSummary) HasProof(proofID uint64) bool {
if s.proofIDs == nil {
func (s ProofStorageSummary) HasProof(proofID uint8) bool {
if s.proofTypes == nil {
return false
}
_, ok := s.proofIDs[proofID]
_, ok := s.proofTypes[proofID]
return ok
}
// Count returns the number of available proofs.
func (s ProofStorageSummary) Count() int {
return len(s.proofIDs)
return len(s.proofTypes)
}
// All returns all stored proofIDs sorted in ascending order.
func (s ProofStorageSummary) All() []uint64 {
if s.proofIDs == nil {
func (s ProofStorageSummary) All() []uint8 {
if s.proofTypes == nil {
return nil
}
ids := make([]uint64, 0, len(s.proofIDs))
for id := range s.proofIDs {
ids = append(ids, id)
proofTypes := make([]uint8, 0, len(s.proofTypes))
for proofType := range s.proofTypes {
proofTypes = append(proofTypes, proofType)
}
slices.Sort(ids)
return ids
slices.Sort(proofTypes)
return proofTypes
}
type proofCache struct {
@@ -80,17 +80,17 @@ func (pc *proofCache) set(ident ProofIdent) {
defer pc.mu.Unlock()
summary := pc.cache[ident.BlockRoot]
if summary.proofIDs == nil {
summary.proofIDs = make(map[uint64]bool)
if summary.proofTypes == nil {
summary.proofTypes = make(map[uint8]bool)
}
summary.epoch = ident.Epoch
if _, exists := summary.proofIDs[ident.ProofID]; exists {
if _, exists := summary.proofTypes[ident.ProofType]; exists {
pc.cache[ident.BlockRoot] = summary
return
}
summary.proofIDs[ident.ProofID] = true
summary.proofTypes[ident.ProofType] = true
pc.lowestCachedEpoch = min(pc.lowestCachedEpoch, ident.Epoch)
pc.highestCachedEpoch = max(pc.highestCachedEpoch, ident.Epoch)
@@ -107,17 +107,17 @@ func (pc *proofCache) setMultiple(ident ProofsIdent) {
defer pc.mu.Unlock()
summary := pc.cache[ident.BlockRoot]
if summary.proofIDs == nil {
summary.proofIDs = make(map[uint64]bool)
if summary.proofTypes == nil {
summary.proofTypes = make(map[uint8]bool)
}
summary.epoch = ident.Epoch
addedCount := 0
for _, proofID := range ident.ProofIDs {
if _, exists := summary.proofIDs[proofID]; exists {
for _, proofID := range ident.ProofTypes {
if _, exists := summary.proofTypes[proofID]; exists {
continue
}
summary.proofIDs[proofID] = true
summary.proofTypes[proofID] = true
addedCount++
}
@@ -156,7 +156,7 @@ func (pc *proofCache) evict(blockRoot [fieldparams.RootLength]byte) int {
return 0
}
deleted := len(summary.proofIDs)
deleted := len(summary.proofTypes)
delete(pc.cache, blockRoot)
if deleted > 0 {
@@ -185,7 +185,7 @@ func (pc *proofCache) pruneUpTo(targetEpoch primitives.Epoch) uint64 {
}
if epoch <= targetEpoch {
prunedCount += uint64(len(summary.proofIDs))
prunedCount += uint64(len(summary.proofTypes))
delete(pc.cache, blockRoot)
}
}

View File

@@ -1,407 +0,0 @@
package filesystem
import (
"encoding/binary"
"os"
"testing"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/require"
"github.com/spf13/afero"
)
func createTestProof(t *testing.T, slot primitives.Slot, proofID uint64, blockRoot [32]byte) *ethpb.ExecutionProof {
t.Helper()
return &ethpb.ExecutionProof{
ProofId: primitives.ExecutionProofId(proofID),
Slot: slot,
BlockHash: make([]byte, 32),
BlockRoot: blockRoot[:],
ProofData: []byte("test proof data for proofID " + string(rune('0'+proofID))),
}
}
// assertProofsEqual compares two proofs by comparing their SSZ-encoded bytes.
func assertProofsEqual(t *testing.T, expected, actual *ethpb.ExecutionProof) {
t.Helper()
expectedSSZ, err := expected.MarshalSSZ()
require.NoError(t, err)
actualSSZ, err := actual.MarshalSSZ()
require.NoError(t, err)
require.DeepEqual(t, expectedSSZ, actualSSZ)
}
func TestNewProofStorage(t *testing.T) {
ctx := t.Context()
t.Run("No base path", func(t *testing.T) {
_, err := NewProofStorage(ctx)
require.ErrorIs(t, err, errNoProofBasePath)
})
t.Run("Nominal", func(t *testing.T) {
dir := t.TempDir()
storage, err := NewProofStorage(ctx, WithProofBasePath(dir))
require.NoError(t, err)
require.Equal(t, dir, storage.base)
})
}
func TestProofSaveAndGet(t *testing.T) {
t.Run("proof ID too large", func(t *testing.T) {
_, proofStorage := NewEphemeralProofStorageAndFs(t)
proof := &ethpb.ExecutionProof{
ProofId: primitives.ExecutionProofId(maxProofTypes), // too large
Slot: 1,
BlockHash: make([]byte, 32),
BlockRoot: make([]byte, 32),
ProofData: []byte("test"),
}
err := proofStorage.Save([]*ethpb.ExecutionProof{proof})
require.ErrorIs(t, err, errProofIDTooLarge)
})
t.Run("save empty slice", func(t *testing.T) {
_, proofStorage := NewEphemeralProofStorageAndFs(t)
err := proofStorage.Save([]*ethpb.ExecutionProof{})
require.NoError(t, err)
})
t.Run("save and get single proof", func(t *testing.T) {
_, proofStorage := NewEphemeralProofStorageAndFs(t)
blockRoot := [32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}
proof := createTestProof(t, 32, 2, blockRoot)
err := proofStorage.Save([]*ethpb.ExecutionProof{proof})
require.NoError(t, err)
// Check summary
summary := proofStorage.Summary(blockRoot)
require.Equal(t, true, summary.HasProof(2))
require.Equal(t, false, summary.HasProof(0))
require.Equal(t, false, summary.HasProof(1))
require.Equal(t, 1, summary.Count())
// Get the proof
proofs, err := proofStorage.Get(blockRoot, []uint64{2})
require.NoError(t, err)
require.Equal(t, 1, len(proofs))
assertProofsEqual(t, proof, proofs[0])
})
t.Run("save and get multiple proofs", func(t *testing.T) {
_, proofStorage := NewEphemeralProofStorageAndFs(t)
blockRoot := [32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}
// Save first proof
proof1 := createTestProof(t, 32, 0, blockRoot)
err := proofStorage.Save([]*ethpb.ExecutionProof{proof1})
require.NoError(t, err)
// Save second proof (should append to existing file)
proof2 := createTestProof(t, 32, 3, blockRoot)
err = proofStorage.Save([]*ethpb.ExecutionProof{proof2})
require.NoError(t, err)
// Save third proof
proof3 := createTestProof(t, 32, 7, blockRoot)
err = proofStorage.Save([]*ethpb.ExecutionProof{proof3})
require.NoError(t, err)
// Check summary
summary := proofStorage.Summary(blockRoot)
require.Equal(t, true, summary.HasProof(0))
require.Equal(t, false, summary.HasProof(1))
require.Equal(t, false, summary.HasProof(2))
require.Equal(t, true, summary.HasProof(3))
require.Equal(t, false, summary.HasProof(4))
require.Equal(t, false, summary.HasProof(5))
require.Equal(t, false, summary.HasProof(6))
require.Equal(t, true, summary.HasProof(7))
require.Equal(t, 3, summary.Count())
// Get all proofs
proofs, err := proofStorage.Get(blockRoot, nil)
require.NoError(t, err)
require.Equal(t, 3, len(proofs))
// Get specific proofs
proofs, err = proofStorage.Get(blockRoot, []uint64{0, 3})
require.NoError(t, err)
require.Equal(t, 2, len(proofs))
assertProofsEqual(t, proof1, proofs[0])
assertProofsEqual(t, proof2, proofs[1])
})
t.Run("duplicate proof is ignored", func(t *testing.T) {
_, proofStorage := NewEphemeralProofStorageAndFs(t)
blockRoot := [32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}
proof := createTestProof(t, 32, 2, blockRoot)
// Save first time
err := proofStorage.Save([]*ethpb.ExecutionProof{proof})
require.NoError(t, err)
// Save same proof again (should be silently ignored)
err = proofStorage.Save([]*ethpb.ExecutionProof{proof})
require.NoError(t, err)
// Check count
summary := proofStorage.Summary(blockRoot)
require.Equal(t, 1, summary.Count())
// Get the proof
proofs, err := proofStorage.Get(blockRoot, nil)
require.NoError(t, err)
require.Equal(t, 1, len(proofs))
})
t.Run("get non-existent root", func(t *testing.T) {
_, proofStorage := NewEphemeralProofStorageAndFs(t)
proofs, err := proofStorage.Get([fieldparams.RootLength]byte{1}, []uint64{0, 1, 2})
require.NoError(t, err)
require.Equal(t, 0, len(proofs))
})
t.Run("get non-existent proofIDs", func(t *testing.T) {
_, proofStorage := NewEphemeralProofStorageAndFs(t)
blockRoot := [32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}
proof := createTestProof(t, 32, 2, blockRoot)
err := proofStorage.Save([]*ethpb.ExecutionProof{proof})
require.NoError(t, err)
// Try to get proofIDs that don't exist
proofs, err := proofStorage.Get(blockRoot, []uint64{0, 1, 3, 4})
require.NoError(t, err)
require.Equal(t, 0, len(proofs))
})
}
func TestProofRemove(t *testing.T) {
t.Run("remove non-existent", func(t *testing.T) {
_, proofStorage := NewEphemeralProofStorageAndFs(t)
err := proofStorage.Remove([fieldparams.RootLength]byte{1})
require.NoError(t, err)
})
t.Run("remove existing", func(t *testing.T) {
_, proofStorage := NewEphemeralProofStorageAndFs(t)
blockRoot1 := [32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}
blockRoot2 := [32]byte{32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}
proof1 := createTestProof(t, 32, 0, blockRoot1)
proof2 := createTestProof(t, 64, 1, blockRoot2)
err := proofStorage.Save([]*ethpb.ExecutionProof{proof1})
require.NoError(t, err)
err = proofStorage.Save([]*ethpb.ExecutionProof{proof2})
require.NoError(t, err)
// Remove first proof
err = proofStorage.Remove(blockRoot1)
require.NoError(t, err)
// Check first proof is gone
summary := proofStorage.Summary(blockRoot1)
require.Equal(t, 0, summary.Count())
proofs, err := proofStorage.Get(blockRoot1, nil)
require.NoError(t, err)
require.Equal(t, 0, len(proofs))
// Check second proof still exists
summary = proofStorage.Summary(blockRoot2)
require.Equal(t, 1, summary.Count())
proofs, err = proofStorage.Get(blockRoot2, nil)
require.NoError(t, err)
require.Equal(t, 1, len(proofs))
})
}
func TestProofClear(t *testing.T) {
_, proofStorage := NewEphemeralProofStorageAndFs(t)
blockRoot1 := [32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}
blockRoot2 := [32]byte{32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}
proof1 := createTestProof(t, 32, 0, blockRoot1)
proof2 := createTestProof(t, 64, 1, blockRoot2)
err := proofStorage.Save([]*ethpb.ExecutionProof{proof1})
require.NoError(t, err)
err = proofStorage.Save([]*ethpb.ExecutionProof{proof2})
require.NoError(t, err)
// Clear all
err = proofStorage.Clear()
require.NoError(t, err)
// Check both are gone
summary := proofStorage.Summary(blockRoot1)
require.Equal(t, 0, summary.Count())
summary = proofStorage.Summary(blockRoot2)
require.Equal(t, 0, summary.Count())
}
func TestProofWarmCache(t *testing.T) {
fs, proofStorage := NewEphemeralProofStorageAndFs(t)
blockRoot1 := [32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}
blockRoot2 := [32]byte{32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}
// Save proofs
proof1a := createTestProof(t, 32, 0, blockRoot1)
proof1b := createTestProof(t, 32, 3, blockRoot1)
proof2 := createTestProof(t, 64, 5, blockRoot2)
err := proofStorage.Save([]*ethpb.ExecutionProof{proof1a})
require.NoError(t, err)
err = proofStorage.Save([]*ethpb.ExecutionProof{proof1b})
require.NoError(t, err)
err = proofStorage.Save([]*ethpb.ExecutionProof{proof2})
require.NoError(t, err)
// Verify files exist
files, err := afero.ReadDir(fs, "0/1")
require.NoError(t, err)
require.Equal(t, 1, len(files))
files, err = afero.ReadDir(fs, "0/2")
require.NoError(t, err)
require.Equal(t, 1, len(files))
// Create a new storage with the same filesystem
proofStorage2 := NewEphemeralProofStorageUsingFs(t, fs)
// Before warm cache, cache should be empty
summary := proofStorage2.Summary(blockRoot1)
require.Equal(t, 0, summary.Count())
// Warm cache
proofStorage2.WarmCache()
// After warm cache, cache should be populated
summary = proofStorage2.Summary(blockRoot1)
require.Equal(t, 2, summary.Count())
require.Equal(t, true, summary.HasProof(0))
require.Equal(t, true, summary.HasProof(3))
summary = proofStorage2.Summary(blockRoot2)
require.Equal(t, 1, summary.Count())
require.Equal(t, true, summary.HasProof(5))
}
func TestProofSubscribe(t *testing.T) {
_, proofStorage := NewEphemeralProofStorageAndFs(t)
sub, ch := proofStorage.Subscribe()
defer sub.Unsubscribe()
blockRoot := [32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}
proof := createTestProof(t, 32, 2, blockRoot)
err := proofStorage.Save([]*ethpb.ExecutionProof{proof})
require.NoError(t, err)
// Should receive notification
ident := <-ch
require.Equal(t, blockRoot, ident.BlockRoot)
require.DeepEqual(t, []uint64{2}, ident.ProofIDs)
require.Equal(t, primitives.Epoch(1), ident.Epoch)
}
func TestProofReadHeader(t *testing.T) {
t.Run("wrong version", func(t *testing.T) {
_, proofStorage := NewEphemeralProofStorageAndFs(t)
blockRoot := [32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}
proof := createTestProof(t, 32, 0, blockRoot)
err := proofStorage.Save([]*ethpb.ExecutionProof{proof})
require.NoError(t, err)
// Get the file path
filePath := proofFilePath(blockRoot, 1)
// Alter the version
file, err := proofStorage.fs.OpenFile(filePath, os.O_RDWR, os.FileMode(0600))
require.NoError(t, err)
_, err = file.Write([]byte{42}) // wrong version
require.NoError(t, err)
// Try to read header
_, _, err = proofStorage.readHeader(file)
require.ErrorIs(t, err, errWrongProofVersion)
err = file.Close()
require.NoError(t, err)
})
}
func TestEncodeOffsetTable(t *testing.T) {
var table proofOffsetTable
table[0] = proofSlotEntry{offset: 0, size: 100}
table[3] = proofSlotEntry{offset: 100, size: 200}
table[7] = proofSlotEntry{offset: 300, size: 300}
encoded := encodeOffsetTable(table)
require.Equal(t, proofOffsetTableSize, len(encoded))
// Decode manually and verify
var decoded proofOffsetTable
for i := range decoded {
pos := i * proofSlotSize
decoded[i].offset = binary.BigEndian.Uint32(encoded[pos : pos+proofOffsetSize])
decoded[i].size = binary.BigEndian.Uint32(encoded[pos+proofOffsetSize : pos+proofSlotSize])
}
require.Equal(t, table, decoded)
}
func TestProofFilePath(t *testing.T) {
blockRoot := [32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}
epoch := primitives.Epoch(100)
path := proofFilePath(blockRoot, epoch)
require.Equal(t, "0/100/0x0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20.sszs", path)
}
func TestExtractProofFileMetadata(t *testing.T) {
t.Run("valid path", func(t *testing.T) {
path := "0/100/0x0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20.sszs"
metadata, err := extractProofFileMetadata(path)
require.NoError(t, err)
expectedRoot := [32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}
require.Equal(t, uint64(0), metadata.period)
require.Equal(t, primitives.Epoch(100), metadata.epoch)
require.Equal(t, expectedRoot, metadata.blockRoot)
})
t.Run("invalid path - wrong number of parts", func(t *testing.T) {
_, err := extractProofFileMetadata("invalid/path.sszs")
require.ErrorContains(t, "unexpected proof file", err)
})
t.Run("invalid path - wrong extension", func(t *testing.T) {
_, err := extractProofFileMetadata("0/100/0x0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20.txt")
require.ErrorContains(t, "unexpected extension", err)
})
}

View File

@@ -38,6 +38,7 @@ go_library(
"//beacon-chain/rpc/eth/events:go_default_library",
"//beacon-chain/rpc/eth/light-client:go_default_library",
"//beacon-chain/rpc/eth/node:go_default_library",
"//beacon-chain/rpc/eth/prover:go_default_library",
"//beacon-chain/rpc/eth/rewards:go_default_library",
"//beacon-chain/rpc/eth/validator:go_default_library",
"//beacon-chain/rpc/lookup:go_default_library",

View File

@@ -13,6 +13,7 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/eth/events"
lightclient "github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/eth/light-client"
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/eth/node"
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/eth/prover"
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/eth/rewards"
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/eth/validator"
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/lookup"
@@ -98,6 +99,7 @@ func (s *Service) endpoints(
endpoints = append(endpoints, s.prysmBeaconEndpoints(ch, stater, blocker, coreService)...)
endpoints = append(endpoints, s.prysmNodeEndpoints()...)
endpoints = append(endpoints, s.prysmValidatorEndpoints(stater, coreService)...)
endpoints = append(endpoints, s.proverEndpoints()...)
if features.Get().EnableLightClient {
endpoints = append(endpoints, s.lightClientEndpoints()...)
@@ -1288,3 +1290,22 @@ func (s *Service) prysmValidatorEndpoints(stater lookup.Stater, coreService *cor
},
}
}
func (*Service) proverEndpoints() []endpoint {
server := &prover.Server{}
const namespace = "prover"
return []endpoint{
{
template: "/eth/v1/prover/execution_proofs",
name: namespace + ".SubmitExecutionProof",
middleware: []middleware.Middleware{
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.SubmitExecutionProof,
methods: []string{http.MethodPost},
},
}
}

View File

@@ -0,0 +1,17 @@
load("@prysm//tools/go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"handlers.go",
"log.go",
"server.go",
],
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/eth/prover",
visibility = ["//visibility:public"],
deps = [
"//monitoring/tracing/trace:go_default_library",
"//network/httputil:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)

View File

@@ -0,0 +1,39 @@
package prover
import (
"encoding/json"
"io"
"net/http"
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
"github.com/OffchainLabs/prysm/v7/network/httputil"
)
// SubmitExecutionProof handles POST requests to /eth/v1/prover/execution_proofs.
// It receives execution proofs from provers and logs them.
func (s *Server) SubmitExecutionProof(w http.ResponseWriter, r *http.Request) {
_, span := trace.StartSpan(r.Context(), "prover.SubmitExecutionProof")
defer span.End()
body, err := io.ReadAll(r.Body)
if err != nil {
httputil.HandleError(w, "Could not read request body: "+err.Error(), http.StatusBadRequest)
return
}
if len(body) == 0 {
httputil.HandleError(w, "No data submitted", http.StatusBadRequest)
return
}
// Parse the JSON to extract fields for logging
var proof map[string]any
if err := json.Unmarshal(body, &proof); err != nil {
httputil.HandleError(w, "Could not decode request body: "+err.Error(), http.StatusBadRequest)
return
}
log.Info("Received execution proof")
w.WriteHeader(http.StatusOK)
}

View File

@@ -0,0 +1,5 @@
package prover
import "github.com/sirupsen/logrus"
var log = logrus.WithField("package", "beacon-chain/rpc/eth/prover")

View File

@@ -0,0 +1,5 @@
// Package prover defines handlers for the prover API endpoints.
package prover
// Server defines a server implementation for the prover API endpoints.
type Server struct{}

View File

@@ -19,7 +19,6 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/kv"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
@@ -322,91 +321,38 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
return nil, status.Errorf(codes.Internal, "%s: %v", "handle block failed", err)
}
var wg errgroup.Group
blockBroadcastDone := make(chan bool)
var wg sync.WaitGroup
errChan := make(chan error, 1)
wg.Go(func() error {
if err := vs.broadcastReceiveBlock(ctx, blockBroadcastDone, block, root); err != nil {
return fmt.Errorf("broadcast receive block: %w", err)
wg.Add(1)
go func() {
if err := vs.broadcastReceiveBlock(ctx, &wg, block, root); err != nil {
errChan <- errors.Wrap(err, "broadcast/receive block failed")
return
}
errChan <- nil
}()
return nil
})
wg.Wait()
wg.Go(func() error {
if err := vs.broadcastAndReceiveSidecars(ctx, blockBroadcastDone, block, root, blobSidecars, dataColumnSidecars); err != nil {
return fmt.Errorf("broadcast and receive sidecars: %w", err)
}
return nil
})
if err := wg.Wait(); err != nil {
return nil, status.Errorf(codes.Internal, "Could not broadcast/receive block/sidecars: %v", err)
if err := vs.broadcastAndReceiveSidecars(ctx, block, root, blobSidecars, dataColumnSidecars); err != nil {
return nil, status.Errorf(codes.Internal, "Could not broadcast/receive sidecars: %v", err)
}
if err := <-errChan; err != nil {
return nil, status.Errorf(codes.Internal, "Could not broadcast/receive block: %v", err)
}
// Generate and broadcast execution proofs.
go vs.generateAndBroadcastExecutionProofs(ctx, rob)
return &ethpb.ProposeResponse{BlockRoot: root[:]}, nil
}
// TODO: This is a duplicate from the same function in the sync package.
func (vs *Server) generateAndBroadcastExecutionProofs(ctx context.Context, roBlock blocks.ROBlock) {
const delay = 2 * time.Second
proofTypes := flags.Get().ProofGenerationTypes
if len(proofTypes) == 0 {
return
}
var wg errgroup.Group
for _, proofType := range proofTypes {
wg.Go(func() error {
execProof, err := generateExecProof(roBlock, primitives.ExecutionProofId(proofType), delay)
if err != nil {
return fmt.Errorf("generate exec proof: %w", err)
}
if err := vs.P2P.Broadcast(ctx, execProof); err != nil {
return fmt.Errorf("broadcast exec proof: %w", err)
}
// Save the proof to storage.
if vs.ProofReceiver != nil {
if err := vs.ProofReceiver.ReceiveProof(execProof); err != nil {
return fmt.Errorf("receive proof: %w", err)
}
}
return nil
})
}
if err := wg.Wait(); err != nil {
log.WithError(err).Error("Failed to generate and broadcast execution proofs")
}
log.WithFields(logrus.Fields{
"root": fmt.Sprintf("%#x", roBlock.Root()),
"slot": roBlock.Block().Slot(),
"count": len(proofTypes),
}).Debug("Generated and broadcasted execution proofs")
}
// broadcastAndReceiveSidecars broadcasts and receives sidecars.
func (vs *Server) broadcastAndReceiveSidecars(
ctx context.Context,
blockBroadcastDone <-chan bool,
block interfaces.SignedBeaconBlock,
root [fieldparams.RootLength]byte,
blobSidecars []*ethpb.BlobSidecar,
dataColumnSidecars []blocks.RODataColumn,
) error {
// Wait for block broadcast to complete before broadcasting sidecars.
<-blockBroadcastDone
if block.Version() >= version.Fulu {
if err := vs.broadcastAndReceiveDataColumns(ctx, dataColumnSidecars); err != nil {
return errors.Wrap(err, "broadcast and receive data columns")
@@ -488,14 +434,11 @@ func (vs *Server) handleUnblindedBlock(
}
// broadcastReceiveBlock broadcasts a block and handles its reception.
// It closes the blockBroadcastDone channel once broadcasting is complete (but before receiving the block).
func (vs *Server) broadcastReceiveBlock(ctx context.Context, blockBroadcastDone chan<- bool, block interfaces.SignedBeaconBlock, root [fieldparams.RootLength]byte) error {
if err := vs.broadcastBlock(ctx, block, root); err != nil {
func (vs *Server) broadcastReceiveBlock(ctx context.Context, wg *sync.WaitGroup, block interfaces.SignedBeaconBlock, root [fieldparams.RootLength]byte) error {
if err := vs.broadcastBlock(ctx, wg, block, root); err != nil {
return errors.Wrap(err, "broadcast block")
}
close(blockBroadcastDone)
vs.BlockNotifier.BlockFeed().Send(&feed.Event{
Type: blockfeed.ReceivedBlock,
Data: &blockfeed.ReceivedBlockData{SignedBlock: block},
@@ -508,7 +451,9 @@ func (vs *Server) broadcastReceiveBlock(ctx context.Context, blockBroadcastDone
return nil
}
func (vs *Server) broadcastBlock(ctx context.Context, block interfaces.SignedBeaconBlock, root [fieldparams.RootLength]byte) error {
func (vs *Server) broadcastBlock(ctx context.Context, wg *sync.WaitGroup, block interfaces.SignedBeaconBlock, root [fieldparams.RootLength]byte) error {
defer wg.Done()
protoBlock, err := block.Proto()
if err != nil {
return errors.Wrap(err, "protobuf conversion failed")
@@ -764,57 +709,3 @@ func blobsAndProofs(req *ethpb.GenericSignedBeaconBlock) ([][]byte, [][]byte, er
return nil, nil, errors.Errorf("unknown request type provided: %T", req)
}
}
// generateExecProof returns a dummy execution proof after the specified delay.
// TODO: This is a duplicate from the same function in the sync package.
func generateExecProof(roBlock blocks.ROBlock, proofID primitives.ExecutionProofId, delay time.Duration) (*ethpb.ExecutionProof, error) {
// Simulate proof generation work
time.Sleep(delay)
// Create a dummy proof with some deterministic data
block := roBlock.Block()
if block == nil {
return nil, errors.New("nil block")
}
body := block.Body()
if body == nil {
return nil, errors.New("nil block body")
}
executionData, err := body.Execution()
if err != nil {
return nil, fmt.Errorf("execution: %w", err)
}
if executionData == nil {
return nil, errors.New("nil execution data")
}
hash, err := executionData.HashTreeRoot()
if err != nil {
return nil, fmt.Errorf("hash tree root: %w", err)
}
proofData := []byte{
0xFF, // Magic byte for dummy proof
byte(proofID),
// Include some payload hash bytes
hash[0],
hash[1],
hash[2],
hash[3],
}
blockRoot := roBlock.Root()
proof := &ethpb.ExecutionProof{
ProofId: proofID,
Slot: block.Slot(),
BlockHash: hash[:],
BlockRoot: blockRoot[:],
ProofData: proofData,
}
return proof, nil
}

View File

@@ -14,7 +14,6 @@ go_library(
"decode_pubsub.go",
"doc.go",
"error.go",
"exec_proofs.go",
"fork_watcher.go",
"fuzz_exports.go", # keep
"log.go",
@@ -130,6 +129,7 @@ go_library(
"//math:go_default_library",
"//monitoring/tracing:go_default_library",
"//monitoring/tracing/trace:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/attestation:go_default_library",
"//proto/prysm/v1alpha1/metadata:go_default_library",

View File

@@ -1,65 +0,0 @@
package sync
import (
"fmt"
"time"
"errors"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
)
// generateExecProof returns a dummy execution proof after the specified delay.
func generateExecProof(roBlock blocks.ROBlock, proofID primitives.ExecutionProofId, delay time.Duration) (*ethpb.ExecutionProof, error) {
// Simulate proof generation work
time.Sleep(delay)
// Create a dummy proof with some deterministic data
block := roBlock.Block()
if block == nil {
return nil, errors.New("nil block")
}
body := block.Body()
if body == nil {
return nil, errors.New("nil block body")
}
executionData, err := body.Execution()
if err != nil {
return nil, fmt.Errorf("execution: %w", err)
}
if executionData == nil {
return nil, errors.New("nil execution data")
}
hash, err := executionData.HashTreeRoot()
if err != nil {
return nil, fmt.Errorf("hash tree root: %w", err)
}
proofData := []byte{
0xFF, // Magic byte for dummy proof
byte(proofID),
// Include some payload hash bytes
hash[0],
hash[1],
hash[2],
hash[3],
}
blockRoot := roBlock.Root()
proof := &ethpb.ExecutionProof{
ProofId: proofID,
Slot: block.Slot(),
BlockHash: hash[:],
BlockRoot: blockRoot[:],
ProofData: proofData,
}
return proof, nil
}

View File

@@ -16,7 +16,6 @@ import (
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
@@ -134,16 +133,9 @@ func (s *Service) sendAndSaveExecutionProofs(ctx context.Context, block blocks.R
return nil
}
alreadyHave := make([]primitives.ExecutionProofId, 0, len(storedIds))
for _, id := range storedIds {
alreadyHave = append(alreadyHave, primitives.ExecutionProofId(id))
}
// Construct request
req := &ethpb.ExecutionProofsByRootRequest{
BlockRoot: root[:],
CountNeeded: params.BeaconConfig().MinProofsRequired - count,
AlreadyHave: alreadyHave,
request := &ethpb.ExecutionProofsByRootRequest{
BlockRoot: root[:],
}
// Call SendExecutionProofByRootRequest
@@ -155,13 +147,20 @@ func (s *Service) sendAndSaveExecutionProofs(ctx context.Context, block blocks.R
// TODO: For simplicity, just pick the first peer for now.
// In the future, we can implement better peer selection logic.
pid := zkvmEnabledPeers[0]
proofs, err := SendExecutionProofsByRootRequest(ctx, s.cfg.clock, s.cfg.p2p, pid, req)
proofs, err := SendExecutionProofsByRootRequest(ctx, s.cfg.clock, s.cfg.p2p, pid, request, blockEpoch)
if err != nil {
return fmt.Errorf("send execution proofs by root request: %w", err)
}
// TODO: Verify really instead of blindly converting to verified sidecars.
verifiedProofs := make([]blocks.VerifiedROSignedExecutionProof, 0, len(proofs))
for _, proof := range proofs {
verifiedProof := blocks.NewVerifiedROSignedExecutionProof(proof)
verifiedProofs = append(verifiedProofs, verifiedProof)
}
// Save the proofs into storage.
if err := proofStorage.Save(proofs); err != nil {
if err := proofStorage.Save(verifiedProofs); err != nil {
return fmt.Errorf("proof storage save: %w", err)
}

View File

@@ -8,6 +8,7 @@ import (
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/time/slots"
libp2pcore "github.com/libp2p/go-libp2p/core"
@@ -183,17 +184,17 @@ func WriteDataColumnSidecarChunk(stream libp2pcore.Stream, tor blockchain.Tempor
return nil
}
func WriteExecutionProofChunk(stream libp2pcore.Stream, encoding encoder.NetworkEncoding, proof *ethpb.ExecutionProof) error {
func WriteExecutionProofChunk(stream libp2pcore.Stream, encoding encoder.NetworkEncoding, slot primitives.Slot, proof *ethpb.SignedExecutionProof) error {
// Success response code.
if _, err := stream.Write([]byte{responseCodeSuccess}); err != nil {
return errors.Wrap(err, "stream write")
}
ctxBytes := params.ForkDigest(slots.ToEpoch(proof.Slot))
ctxBytes := params.ForkDigest(slots.ToEpoch(slot))
if err := writeContextToStream(ctxBytes[:], stream); err != nil {
return errors.Wrap(err, "write context to stream")
}
// Execution proof.
// Signed execution proof.
if _, err := encoding.EncodeWithMaxLength(stream, proof); err != nil {
return errors.Wrap(err, "encode with max length")
}

View File

@@ -4,136 +4,14 @@ import (
"context"
"errors"
"fmt"
"io"
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/time/slots"
libp2pcore "github.com/libp2p/go-libp2p/core"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/sirupsen/logrus"
)
// SendExecutionProofsByRootRequest sends ExecutionProofsByRoot request and returns fetched execution proofs, if any.
func SendExecutionProofsByRootRequest(
ctx context.Context,
clock blockchain.TemporalOracle,
p2pProvider p2p.P2P,
pid peer.ID,
req *ethpb.ExecutionProofsByRootRequest,
) ([]*ethpb.ExecutionProof, error) {
// Validate request
if req.CountNeeded == 0 {
return nil, errors.New("count_needed must be greater than 0")
}
topic, err := p2p.TopicFromMessage(p2p.ExecutionProofsByRootName, slots.ToEpoch(clock.CurrentSlot()))
if err != nil {
return nil, err
}
log.WithFields(logrus.Fields{
"topic": topic,
"block_root": bytesutil.ToBytes32(req.BlockRoot),
"count": req.CountNeeded,
"already": len(req.AlreadyHave),
}).Debug("Sending execution proofs by root request")
stream, err := p2pProvider.Send(ctx, req, topic, pid)
if err != nil {
return nil, err
}
defer closeStream(stream, log)
// Read execution proofs from stream
proofs := make([]*ethpb.ExecutionProof, 0, req.CountNeeded)
alreadyHaveSet := make(map[primitives.ExecutionProofId]struct{})
for _, id := range req.AlreadyHave {
alreadyHaveSet[id] = struct{}{}
}
for i := uint64(0); i < req.CountNeeded; i++ {
isFirstChunk := i == 0
proof, err := ReadChunkedExecutionProof(stream, p2pProvider, isFirstChunk)
if errors.Is(err, io.EOF) {
break
}
if err != nil {
return nil, err
}
// Validate proof
if err := validateExecutionProof(proof, req, alreadyHaveSet); err != nil {
return nil, err
}
proofs = append(proofs, proof)
}
return proofs, nil
}
// ReadChunkedExecutionProof reads a chunked execution proof from the stream.
func ReadChunkedExecutionProof(
stream libp2pcore.Stream,
encoding p2p.EncodingProvider,
isFirstChunk bool,
) (*ethpb.ExecutionProof, error) {
// Read status code for each chunk (like data columns, not like blocks)
code, errMsg, err := ReadStatusCode(stream, encoding.Encoding())
if err != nil {
return nil, err
}
if code != 0 {
return nil, errors.New(errMsg)
}
// Read context bytes (fork digest)
_, err = readContextFromStream(stream)
if err != nil {
return nil, fmt.Errorf("read context from stream: %w", err)
}
// Decode the proof
proof := &ethpb.ExecutionProof{}
if err := encoding.Encoding().DecodeWithMaxLength(stream, proof); err != nil {
return nil, err
}
return proof, nil
}
// validateExecutionProof validates a received execution proof against the request.
func validateExecutionProof(
proof *ethpb.ExecutionProof,
req *ethpb.ExecutionProofsByRootRequest,
alreadyHaveSet map[primitives.ExecutionProofId]struct{},
) error {
// Check block root matches
proofRoot := bytesutil.ToBytes32(proof.BlockRoot)
reqRoot := bytesutil.ToBytes32(req.BlockRoot)
if proofRoot != reqRoot {
return fmt.Errorf("proof block root %#x does not match requested root %#x",
proofRoot, reqRoot)
}
// Check we didn't already have this proof
if _, ok := alreadyHaveSet[proof.ProofId]; ok {
return fmt.Errorf("received proof we already have: proof_id=%d", proof.ProofId)
}
// Check proof ID is valid (within max range)
if !proof.ProofId.IsValid() {
return fmt.Errorf("invalid proof_id: %d", proof.ProofId)
}
return nil
}
// executionProofsByRootRPCHandler handles incoming ExecutionProofsByRoot RPC requests.
func (s *Service) executionProofsByRootRPCHandler(ctx context.Context, msg any, stream libp2pcore.Stream) error {
ctx, span := trace.StartSpan(ctx, "sync.executionProofsByRootRPCHandler")
@@ -155,74 +33,57 @@ func (s *Service) executionProofsByRootRPCHandler(ctx context.Context, msg any,
return err
}
// Penalize peers that send invalid requests.
if err := validateExecutionProofsByRootRequest(req); err != nil {
s.downscorePeer(remotePeer, "executionProofsByRootRPCHandlerValidationError")
s.writeErrorResponseToStream(responseCodeInvalidRequest, err.Error(), stream)
return fmt.Errorf("validate execution proofs by root request: %w", err)
}
blockRoot := bytesutil.ToBytes32(req.BlockRoot)
log := log.WithFields(logrus.Fields{
"blockroot": fmt.Sprintf("%#x", blockRoot),
"neededCount": req.CountNeeded,
"alreadyHave": req.AlreadyHave,
"peer": remotePeer.String(),
"blockRoot": fmt.Sprintf("%#x", blockRoot),
"peer": remotePeer.String(),
})
s.rateLimiter.add(stream, 1)
defer closeStream(stream, log)
// Get proofs from execution proof pool
summary := s.cfg.proofStorage.Summary(blockRoot)
// Filter out not requested proofs
alreadyHave := make(map[primitives.ExecutionProofId]bool)
for _, id := range req.AlreadyHave {
alreadyHave[id] = true
// Retrieve the slot corresponding to the block root.
roSignedBeaconBlock, err := s.cfg.beaconDB.Block(ctx, blockRoot)
if err != nil {
return fmt.Errorf("fetch block from db: %w", err)
}
// Determine which proofs to fetch (not already had by requester)
proofIDsToFetch := make([]uint64, 0, len(summary.All()))
for _, proofId := range summary.All() {
if !alreadyHave[primitives.ExecutionProofId(proofId)] {
proofIDsToFetch = append(proofIDsToFetch, proofId)
}
if roSignedBeaconBlock == nil {
return fmt.Errorf("block not found for root %#x", blockRoot)
}
roBeaconBlock := roSignedBeaconBlock.Block()
if roBeaconBlock == nil {
return fmt.Errorf("beacon block is nil for root %#x", blockRoot)
}
slot := roBeaconBlock.Slot()
// Get proofs from execution proof pool
summary := s.cfg.proofStorage.Summary(blockRoot)
if summary.Count() == 0 {
return nil
}
// Load all proofs at once
proofs, err := s.cfg.proofStorage.Get(blockRoot, proofIDsToFetch)
proofs, err := s.cfg.proofStorage.Get(blockRoot, nil)
if err != nil {
return fmt.Errorf("proof storage get: %w", err)
}
// Send proofs
sentCount := uint64(0)
for _, proof := range proofs {
if sentCount >= req.CountNeeded {
break
}
// Write proof to stream
SetStreamWriteDeadline(stream, defaultWriteDuration)
if err := WriteExecutionProofChunk(stream, s.cfg.p2p.Encoding(), proof); err != nil {
if err := WriteExecutionProofChunk(stream, s.cfg.p2p.Encoding(), slot, proof); err != nil {
log.WithError(err).Debug("Could not send execution proof")
s.writeErrorResponseToStream(responseCodeServerError, "could not send execution proof", stream)
return err
}
sentCount++
}
log.WithField("sentCount", sentCount).Debug("Responded to execution proofs by root request")
log.WithField("proofCount", len(proofs)).Debug("Responded to execution proofs by root request")
return nil
}
func validateExecutionProofsByRootRequest(req *ethpb.ExecutionProofsByRootRequest) error {
if req.CountNeeded == 0 {
return errors.New("count_needed must be greater than 0")
}
return nil
}

View File

@@ -21,6 +21,7 @@ import (
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/OffchainLabs/prysm/v7/time/slots"
libp2pcore "github.com/libp2p/go-libp2p/core"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
goPeer "github.com/libp2p/go-libp2p/core/peer"
@@ -828,3 +829,98 @@ func DataColumnSidecarsByRangeRequest(columns []uint64, start, end primitives.Sl
Columns: columns,
}, nil
}
// ----------------
// Execution proofs
// ----------------
// SendExecutionProofsByRootRequest sends a request for execution proofs by root
// and returns the fetched execution proofs.
func SendExecutionProofsByRootRequest(
ctx context.Context,
clock blockchain.TemporalOracle,
p2pProvider p2p.P2P,
pid peer.ID,
request *ethpb.ExecutionProofsByRootRequest,
blockEpoch primitives.Epoch,
) ([]blocks.ROSignedExecutionProof, error) {
// Return early if nothing to request.
if request == nil {
return nil, nil
}
// Build the topic.
topic, err := p2p.TopicFromMessage(p2p.ExecutionProofsByRootName, slots.ToEpoch(clock.CurrentSlot()))
if err != nil {
return nil, fmt.Errorf("topic from message: %w", err)
}
log.WithFields(logrus.Fields{
"topic": topic,
"blockRoot": fmt.Sprintf("%#x", request.BlockRoot),
}).Debug("Sending execution proofs by root request")
// Send the request.
stream, err := p2pProvider.Send(ctx, request, topic, pid)
if err != nil {
return nil, fmt.Errorf("send: %w", err)
}
defer closeStream(stream, log)
// Read execution proofs from stream
// TODO: Set capacity to MAX_EXECUTION_PROOFS_PER_PAYLOAD
proofs := make([]blocks.ROSignedExecutionProof, 0, 4)
// TODO: Use MAX_EXECUTION_PROOFS_PER_PAYLOAD instead of 4.
// TODO: Verify that the peer does not send more than MAX_EXECUTION_PROOFS_PER_PAYLOAD proofs, and downscore if it does.
for range 4 {
proof, err := readChunkedExecutionProof(stream, p2pProvider, request.BlockRoot, blockEpoch)
if errors.Is(err, io.EOF) {
break
}
if err != nil {
return nil, fmt.Errorf("read chunked execution proof: %w", err)
}
proofs = append(proofs, *proof)
}
return proofs, nil
}
// ReadChunkedExecutionProof reads a chunked execution proof from the stream.
// TODO: Add validation here
// TODO: Add msgVersion check with ctxMap
func readChunkedExecutionProof(
stream libp2pcore.Stream,
encoding p2p.EncodingProvider,
blockRoot []byte,
blockEpoch primitives.Epoch,
) (*blocks.ROSignedExecutionProof, error) {
// Read the status statusCode from the stream.
statusCode, errMessage, err := ReadStatusCode(stream, encoding.Encoding())
if err != nil {
return nil, fmt.Errorf("read status code: %w", err)
}
if statusCode != 0 {
return nil, errors.New(errMessage)
}
// Read context bytes (fork digest)
_, err = readContextFromStream(stream)
if err != nil {
return nil, fmt.Errorf("read context from stream: %w", err)
}
// Decode the execution proof from the stream.
proof := new(ethpb.SignedExecutionProof)
if err := encoding.Encoding().DecodeWithMaxLength(stream, proof); err != nil {
return nil, fmt.Errorf("decode execution proof: %w", err)
}
// Create a read-only execution proof from the proof.
roProof, err := blocks.NewROSignedExecutionProof(proof, bytesutil.ToBytes32(blockRoot), blockEpoch)
return &roProof, err
}

View File

@@ -149,10 +149,12 @@ type Service struct {
rateLimiter *limiter
seenBlockLock sync.RWMutex
seenBlockCache *lru.Cache
seenNewPayloadRequestCache *lru.Cache
seenBlobLock sync.RWMutex
seenBlobCache *lru.Cache
seenDataColumnCache *slotAwareCache
seenProofCache *slotAwareCache
seenProofCache *lru.Cache
seenValidProofCache *lru.Cache
seenAggregatedAttestationLock sync.RWMutex
seenAggregatedAttestationCache *lru.Cache
seenUnAggregatedAttestationLock sync.RWMutex
@@ -177,7 +179,7 @@ type Service struct {
verifierWaiter *verification.InitializerWaiter
newBlobVerifier verification.NewBlobVerifier
newColumnsVerifier verification.NewDataColumnsVerifier
newProofsVerifier verification.NewExecutionProofsVerifier
newSignedExecutionProofsVerifier verification.NewSignedExecutionProofsVerifier
columnSidecarsExecSingleFlight singleflight.Group
reconstructionSingleFlight singleflight.Group
availableBlocker coverage.AvailableBlocker
@@ -239,6 +241,7 @@ func NewService(ctx context.Context, opts ...Option) *Service {
r.subHandler = newSubTopicHandler()
r.rateLimiter = newRateLimiter(r.cfg.p2p)
r.initCaches()
return r
}
@@ -254,8 +257,8 @@ func newDataColumnsVerifierFromInitializer(ini *verification.Initializer) verifi
}
}
func newExecutionProofsVerifierFromInitializer(ini *verification.Initializer) verification.NewExecutionProofsVerifier {
return func(proofs []blocks.ROExecutionProof, reqs []verification.Requirement) verification.ExecutionProofsVerifier {
func newExecutionProofsVerifierFromInitializer(ini *verification.Initializer) verification.NewSignedExecutionProofsVerifier {
return func(proofs []blocks.ROSignedExecutionProof, reqs []verification.Requirement) verification.SignedExecutionProofsVerifier {
return ini.NewExecutionProofsVerifier(proofs, reqs)
}
}
@@ -269,7 +272,7 @@ func (s *Service) Start() {
}
s.newBlobVerifier = newBlobVerifierFromInitializer(v)
s.newColumnsVerifier = newDataColumnsVerifierFromInitializer(v)
s.newProofsVerifier = newExecutionProofsVerifierFromInitializer(v)
s.newSignedExecutionProofsVerifier = newExecutionProofsVerifierFromInitializer(v)
go s.verifierRoutine()
go s.startDiscoveryAndSubscriptions()
@@ -359,7 +362,8 @@ func (s *Service) initCaches() {
s.seenBlockCache = lruwrpr.New(seenBlockSize)
s.seenBlobCache = lruwrpr.New(seenBlockSize * params.BeaconConfig().DeprecatedMaxBlobsPerBlockElectra)
s.seenDataColumnCache = newSlotAwareCache(seenDataColumnSize)
s.seenProofCache = newSlotAwareCache(seenExecutionProofSize)
s.seenProofCache = lruwrpr.New(seenBlockSize * 8 * 128) // TODO: Replace 8 with the actual max number of proofs per block and 128 with the maximal estimated prover count.
s.seenValidProofCache = lruwrpr.New(seenBlockSize * 8) // TODO: Replace 8 with the actual max number of proofs per block.
s.seenAggregatedAttestationCache = lruwrpr.New(seenAggregatedAttSize)
s.seenUnAggregatedAttestationCache = lruwrpr.New(seenUnaggregatedAttSize)
s.seenSyncMessageCache = lruwrpr.New(seenSyncMsgSize)
@@ -369,6 +373,7 @@ func (s *Service) initCaches() {
s.seenAttesterSlashingCache = make(map[uint64]bool)
s.seenProposerSlashingCache = lruwrpr.New(seenProposerSlashingSize)
s.badBlockCache = lruwrpr.New(badBlockSize)
s.seenNewPayloadRequestCache = lruwrpr.New(seenBlockSize)
}
func (s *Service) waitForChainStart() {

View File

@@ -11,7 +11,6 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition/interop"
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
"github.com/OffchainLabs/prysm/v7/config/features"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
@@ -19,11 +18,11 @@ import (
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/io/file"
engine "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/OffchainLabs/prysm/v7/time/slots"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
"google.golang.org/protobuf/proto"
)
@@ -36,13 +35,12 @@ func (s *Service) beaconBlockSubscriber(ctx context.Context, msg proto.Message)
return err
}
s.setSeenBlockIndexSlot(signed.Block().Slot(), signed.Block().ProposerIndex())
block := signed.Block()
s.setSeenBlockIndexSlot(block.Slot(), block.ProposerIndex())
root, err := block.HashTreeRoot()
if err != nil {
return err
return fmt.Errorf("hash tree root: %w", err)
}
roBlock, err := blocks.NewROBlockWithRoot(signed, root)
@@ -50,6 +48,11 @@ func (s *Service) beaconBlockSubscriber(ctx context.Context, msg proto.Message)
return errors.Wrap(err, "new ro block with root")
}
// Cache the new payload request hash tree root corresponding to this block.
if err := s.cacheNewPayloadRequestRoot(roBlock); err != nil {
return fmt.Errorf("cacheNewPayloadRequestRoot: %w", err)
}
go func() {
if err := s.processSidecarsFromExecutionFromBlock(ctx, roBlock); err != nil {
log.WithError(err).WithFields(logrus.Fields{
@@ -79,11 +82,6 @@ func (s *Service) beaconBlockSubscriber(ctx context.Context, msg proto.Message)
return err
}
// We use the service context to ensure this context is not cancelled
// when the current function returns.
// TODO: Do not broadcast proofs for blocks we have already seen.
go s.generateAndBroadcastExecutionProofs(s.ctx, roBlock)
if err := s.processPendingAttsForBlock(ctx, root); err != nil {
return errors.Wrap(err, "process pending atts for block")
}
@@ -91,45 +89,92 @@ func (s *Service) beaconBlockSubscriber(ctx context.Context, msg proto.Message)
return nil
}
func (s *Service) generateAndBroadcastExecutionProofs(ctx context.Context, roBlock blocks.ROBlock) {
const delay = 2 * time.Second
proofTypes := flags.Get().ProofGenerationTypes
func (s *Service) cacheNewPayloadRequestRoot(roBlock blocks.ROBlock) error {
block := roBlock.Block()
body := block.Body()
// Exit early if proof generation is disabled.
if len(proofTypes) == 0 {
return
execution, err := body.Execution()
if err != nil {
return fmt.Errorf("execution: %w", err)
}
var wg errgroup.Group
for _, proofType := range proofTypes {
wg.Go(func() error {
execProof, err := generateExecProof(roBlock, primitives.ExecutionProofId(proofType), delay)
if err != nil {
return fmt.Errorf("generate exec proof: %w", err)
}
if err := s.cfg.p2p.Broadcast(ctx, execProof); err != nil {
return fmt.Errorf("broadcast exec proof: %w", err)
}
if err := s.cfg.chain.ReceiveProof(execProof); err != nil {
return errors.Wrap(err, "receive proof")
}
return nil
})
transactions, err := execution.Transactions()
if err != nil {
return fmt.Errorf("transactions: %w", err)
}
if err := wg.Wait(); err != nil {
log.WithError(err).Error("Failed to generate and broadcast execution proofs")
withdrawals, err := execution.Withdrawals()
if err != nil {
return fmt.Errorf("withdrawals: %w", err)
}
log.WithFields(logrus.Fields{
"root": fmt.Sprintf("%#x", roBlock.Root()),
"slot": roBlock.Block().Slot(),
"count": len(proofTypes),
}).Debug("Generated and broadcasted execution proofs")
blobGasUsed, err := execution.BlobGasUsed()
if err != nil {
return fmt.Errorf("blob gas used: %w", err)
}
excessBlobGas, err := execution.ExcessBlobGas()
if err != nil {
return fmt.Errorf("excess blob gas: %w", err)
}
executionPayload := &engine.ExecutionPayloadDeneb{
ParentHash: execution.ParentHash(),
FeeRecipient: execution.FeeRecipient(),
StateRoot: execution.StateRoot(),
ReceiptsRoot: execution.ReceiptsRoot(),
LogsBloom: execution.LogsBloom(),
PrevRandao: execution.PrevRandao(),
BlockNumber: execution.BlockNumber(),
GasLimit: execution.GasLimit(),
GasUsed: execution.GasUsed(),
Timestamp: execution.Timestamp(),
ExtraData: execution.ExtraData(),
BaseFeePerGas: execution.BaseFeePerGas(),
BlockHash: execution.BlockHash(),
Transactions: transactions,
Withdrawals: withdrawals,
BlobGasUsed: blobGasUsed,
ExcessBlobGas: excessBlobGas,
}
kzgCommitments, err := body.BlobKzgCommitments()
if err != nil {
return fmt.Errorf("blob kzg commitments: %w", err)
}
versionedHashes := make([][]byte, len(kzgCommitments))
for _, kzgCommitment := range kzgCommitments {
versionedHash := primitives.ConvertKzgCommitmentToVersionedHash(kzgCommitment)
versionedHashes = append(versionedHashes, versionedHash[:])
}
parentBlockRoot := block.ParentRoot()
executionRequests, err := body.ExecutionRequests()
if err != nil {
return fmt.Errorf("execution requests: %w", err)
}
newPayloadRequest := engine.NewPayloadRequest{
ExecutionPayload: executionPayload,
VersionedHashes: versionedHashes,
ParentBlockRoot: parentBlockRoot[:],
ExecutionRequests: executionRequests,
}
rootEpoch := rootEpoch{
root: roBlock.Root(),
epoch: slots.ToEpoch(block.Slot()),
}
newPayloadRequestRoot, err := newPayloadRequest.HashTreeRoot()
if err != nil {
return fmt.Errorf("hash tree root: %w", err)
}
s.setSeenNewPayloadRequest(newPayloadRequestRoot, rootEpoch)
return nil
}
// processSidecarsFromExecutionFromBlock retrieves (if available) sidecars data from the execution client,

View File

@@ -11,16 +11,16 @@ import (
)
func (s *Service) executionProofSubscriber(_ context.Context, msg proto.Message) error {
verifiedProof, ok := msg.(blocks.VerifiedROExecutionProof)
verifiedRoSignedExecutionProof, ok := msg.(blocks.VerifiedROSignedExecutionProof)
if !ok {
return errors.Errorf("incorrect type of message received, wanted %T but got %T", blocks.VerifiedROExecutionProof{}, msg)
return errors.Errorf("incorrect type of message received, wanted %T but got %T", blocks.VerifiedROSignedExecutionProof{}, msg)
}
// Insert the execution proof into the pool
s.setSeenProof(verifiedProof.Slot(), verifiedProof.BlockRoot(), verifiedProof.ProofId())
s.setSeenValidProof(&verifiedRoSignedExecutionProof.ROSignedExecutionProof)
// Save the proof to storage.
if err := s.cfg.chain.ReceiveProof(verifiedProof.ExecutionProof); err != nil {
if err := s.cfg.chain.ReceiveProof(verifiedRoSignedExecutionProof); err != nil {
return errors.Wrap(err, "receive proof")
}
@@ -28,7 +28,7 @@ func (s *Service) executionProofSubscriber(_ context.Context, msg proto.Message)
s.cfg.operationNotifier.OperationFeed().Send(&feed.Event{
Type: opfeed.ExecutionProofReceived,
Data: &opfeed.ExecutionProofReceivedData{
ExecutionProof: verifiedProof.ExecutionProof,
ExecutionProof: &verifiedRoSignedExecutionProof,
},
})

View File

@@ -38,6 +38,11 @@ var (
ErrSlashingSignatureFailure = errors.New("proposer slashing signature verification failed")
)
type rootEpoch struct {
root [32]byte
epoch primitives.Epoch
}
// validateBeaconBlockPubSub checks that the incoming block has a valid BLS signature.
// Blocks that have already been seen are ignored. If the BLS signature is any valid signature,
// this method rebroadcasts the message.
@@ -472,6 +477,25 @@ func (s *Service) setSeenBlockIndexSlot(slot primitives.Slot, proposerIdx primit
s.seenBlockCache.Add(string(b), true)
}
func (s *Service) hasSeenNewPayloadRequest(newPayloadRequestRoot [32]byte) (bool, rootEpoch) {
v, ok := s.seenNewPayloadRequestCache.Get(newPayloadRequestRoot)
if !ok {
return false, rootEpoch{}
}
re, ok := v.(rootEpoch)
if !ok {
log.Error("Cannot cast value to rootEpoch")
return false, rootEpoch{}
}
return true, re
}
func (s *Service) setSeenNewPayloadRequest(newPayloadRequestRoot [32]byte, re rootEpoch) {
s.seenNewPayloadRequestCache.Add(newPayloadRequestRoot, re)
}
// Returns true if the block is marked as a bad block.
func (s *Service) hasBadBlock(root [32]byte) bool {
if features.BlacklistedBlock(root) {

View File

@@ -7,7 +7,6 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
"github.com/OffchainLabs/prysm/v7/beacon-chain/verification"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
pubsub "github.com/libp2p/go-libp2p-pubsub"
@@ -39,47 +38,90 @@ func (s *Service) validateExecutionProof(ctx context.Context, pid peer.ID, msg *
}
// Reject messages that are not of the expected type.
executionProof, ok := m.(*ethpb.ExecutionProof)
signedExecutionProof, ok := m.(*ethpb.SignedExecutionProof)
if !ok {
log.WithField("message", m).Error("Message is not of type *ethpb.ExecutionProof")
log.WithField("message", m).Error("Message is not of type *ethpb.SignedExecutionProof")
return pubsub.ValidationReject, errWrongMessage
}
// Convert to ROExecutionProof.
roProof, err := blocks.NewROExecutionProof(executionProof)
executionProof := signedExecutionProof.Message
// [IGNORE] The proof's corresponding new payload request
// (identified by `proof.message.public_input.new_payload_request_root`)
// has been seen (via gossip or non-gossip sources)
// (a client MAY queue proofs for processing once the new payload request is
// retrieved).
newPayloadRequestRoot := bytesutil.ToBytes32(executionProof.PublicInput.NewPayloadRequestRoot)
ok, blockRootEpoch := s.hasSeenNewPayloadRequest(newPayloadRequestRoot)
if !ok {
return pubsub.ValidationIgnore, nil
}
blockRoot, blockEpoch := blockRootEpoch.root, blockRootEpoch.epoch
// Convert to ROSignedExecutionProof.
roSignedProof, err := blocks.NewROSignedExecutionProof(signedExecutionProof, blockRoot, blockEpoch)
if err != nil {
return pubsub.ValidationReject, err
}
// Check if the proof has already been seen.
if s.hasSeenProof(roProof.BlockRoot(), roProof.ProofId()) {
// [IGNORE] The proof is the first proof received for the tuple
// `(proof.message.public_input.new_payload_request_root, proof.message.proof_type, proof.prover_pubkey)`
// -- i.e. the first valid or invalid proof for `proof.message.proof_type` from `proof.prover_pubkey`.
if s.hasSeenProof(&roSignedProof) {
return pubsub.ValidationIgnore, nil
}
// Mark the proof as seen regardless of whether it is valid or not,
// to prevent processing multiple proofs with the same
// (new payload request root, proof type, prover pubkey) tuple.
defer s.setSeenProof(&roSignedProof)
// Create the verifier with gossip requirements.
verifier := s.newProofsVerifier([]blocks.ROExecutionProof{roProof}, verification.GossipExecutionProofRequirements)
verifier := s.newSignedExecutionProofsVerifier([]blocks.ROSignedExecutionProof{roSignedProof}, verification.GossipSignedExecutionProofRequirements)
// Run verifications.
if err := verifier.NotFromFutureSlot(); err != nil {
// [REJECT] `proof.prover_pubkey` is associated with an active validator.
if err := verifier.IsFromActiveValidator(); err != nil {
return pubsub.ValidationReject, err
}
if err := verifier.ProofSizeLimits(); err != nil {
// [REJECT] `proof.signature` is valid with respect to the prover's public key.
if err := verifier.ValidProverSignature(); err != nil {
return pubsub.ValidationReject, err
}
// [REJECT] `proof.message.proof_data` is non-empty.
if err := verifier.ProofDataNonEmpty(); err != nil {
return pubsub.ValidationReject, err
}
// [REJECT] `proof.message.proof_data` is not larger than MAX_PROOF_SIZE.
if err := verifier.ProofDataNotTooLarge(); err != nil {
return pubsub.ValidationReject, err
}
// [REJECT] `proof.message` is a valid execution proof.
if err := verifier.ProofVerified(); err != nil {
return pubsub.ValidationReject, err
}
// [IGNORE] The proof is the first proof received for the tuple
// `(proof.message.public_input.new_payload_request_root, proof.message.proof_type)`
// -- i.e. the first valid proof for `proof.message.proof_type` from any prover.
if s.hasSeenValidProof(&roSignedProof) {
return pubsub.ValidationIgnore, nil
}
// Get verified proofs.
verifiedProofs, err := verifier.VerifiedROExecutionProofs()
verifiedProofs, err := verifier.VerifiedROSignedExecutionProofs()
if err != nil {
return pubsub.ValidationIgnore, err
}
log.WithFields(logrus.Fields{
"root": fmt.Sprintf("%#x", roProof.BlockRoot()),
"slot": roProof.Slot(),
"id": roProof.ProofId(),
"blockRoot": fmt.Sprintf("%#x", roSignedProof.BlockRoot()),
"type": roSignedProof.Message.ProofType,
}).Debug("Accepted execution proof")
// Set validator data to the verified proof.
@@ -87,24 +129,51 @@ func (s *Service) validateExecutionProof(ctx context.Context, pid peer.ID, msg *
return pubsub.ValidationAccept, nil
}
// hasSeenProof returns true if the proof with the same block root and proof ID has been seen before.
func (s *Service) hasSeenProof(blockRoot [32]byte, proofId primitives.ExecutionProofId) bool {
key := computeProofCacheKey(blockRoot, proofId)
_, seen := s.seenProofCache.Get(key)
return seen
// hasSeenProof returns true if the proof with the same new payload request root, proof type and prover pubkey has been seen before, false otherwise.
func (s *Service) hasSeenProof(roSignedProof *blocks.ROSignedExecutionProof) bool {
key := computeProofCacheKey(roSignedProof)
_, ok := s.seenProofCache.Get(key)
return ok
}
// setSeenProof marks the proof with the given block root and proof ID as seen.
func (s *Service) setSeenProof(slot primitives.Slot, blockRoot [32]byte, proofId primitives.ExecutionProofId) {
key := computeProofCacheKey(blockRoot, proofId)
s.seenProofCache.Add(slot, key, true)
// setSeenProof marks the proof with the given new payload request root, proof type and prover pubkey as seen before.
func (s *Service) setSeenProof(roSignedProof *blocks.ROSignedExecutionProof) {
key := computeProofCacheKey(roSignedProof)
s.seenProofCache.Add(key, true)
}
func computeProofCacheKey(blockRoot [32]byte, proofId primitives.ExecutionProofId) string {
// hasSeenValidProof returns true if a proof with the same new payload request root and proof type has been seen before, false otherwise.
func (s *Service) hasSeenValidProof(roSignedProof *blocks.ROSignedExecutionProof) bool {
key := computeValidProofCacheKey(*roSignedProof)
_, ok := s.seenValidProofCache.Get(key)
return ok
}
// setSeenValidProof marks a proof with the given new payload request root and proof type as seen before.
func (s *Service) setSeenValidProof(roSignedProof *blocks.ROSignedExecutionProof) {
key := computeValidProofCacheKey(*roSignedProof)
s.seenValidProofCache.Add(key, true)
}
func computeProofCacheKey(roSignedProof *blocks.ROSignedExecutionProof) []byte {
executionProof := roSignedProof.Message
key := make([]byte, 0, 81)
key = append(key, executionProof.PublicInput.NewPayloadRequestRoot...)
key = append(key, executionProof.ProofType...)
key = append(key, roSignedProof.ProverPubkey...)
return key
}
func computeValidProofCacheKey(roSignedProof blocks.ROSignedExecutionProof) []byte {
executionProof := roSignedProof.Message
key := make([]byte, 0, 33)
key = append(key, executionProof.PublicInput.NewPayloadRequestRoot...)
key = append(key, executionProof.ProofType...)
key = append(key, blockRoot[:]...)
key = append(key, bytesutil.Bytes1(uint64(proofId))...)
return string(key)
return key
}

View File

@@ -32,7 +32,10 @@ const (
RequireCorrectSubnet
// Execution proof specific.
RequireProofSizeLimits
RequireActiveValidator
RequireValidProverSignature
RequireProofDataNonEmpty
RequireProofDataNotTooLarge
RequireProofVerified
)

View File

@@ -85,6 +85,9 @@ var (
// ErrProofInvalid is joined with all other execution proof verification errors.
ErrProofInvalid = AsVerificationFailure(errors.New("invalid execution proof"))
// ErrProofDataNonEmpty means RequireProofDataNonEmpty failed.
ErrProofDataEmpty = errors.Join(ErrProofInvalid, errors.New("proof data is empty"))
// ErrProofSizeTooLarge means RequireProofSizeLimits failed.
ErrProofSizeTooLarge = errors.Join(ErrProofInvalid, errors.New("proof data exceeds maximum size"))

View File

@@ -6,46 +6,48 @@ import (
"github.com/pkg/errors"
)
// GossipExecutionProofRequirements defines the set of requirements that ExecutionProofs received on gossip
// must satisfy in order to upgrade an ROExecutionProof to a VerifiedROExecutionProof.
var GossipExecutionProofRequirements = []Requirement{
RequireNotFromFutureSlot,
RequireProofSizeLimits,
// GossipSignedExecutionProofRequirements defines the set of requirements that SignedExecutionProofs received on gossip
// must satisfy in order to upgrade an ROSignedExecutionProof to a VerifiedROSignedExecutionProof.
var GossipSignedExecutionProofRequirements = []Requirement{
RequireActiveValidator,
RequireValidProverSignature,
RequireProofDataNonEmpty,
RequireProofDataNotTooLarge,
RequireProofVerified,
}
// ROExecutionProofsVerifier verifies execution proofs.
type ROExecutionProofsVerifier struct {
// ROSignedExecutionProofsVerifier verifies execution proofs.
type ROSignedExecutionProofsVerifier struct {
*sharedResources
results *results
proofs []blocks.ROExecutionProof
proofs []blocks.ROSignedExecutionProof
}
var _ ExecutionProofsVerifier = &ROExecutionProofsVerifier{}
var _ SignedExecutionProofsVerifier = &ROSignedExecutionProofsVerifier{}
// VerifiedROExecutionProofs "upgrades" wrapped ROExecutionProofs to VerifiedROExecutionProofs.
// VerifiedROSignedExecutionProofs "upgrades" wrapped ROSignedExecutionProofs to VerifiedROSignedExecutionProofs.
// If any of the verifications ran against the proofs failed, or some required verifications
// were not run, an error will be returned.
func (v *ROExecutionProofsVerifier) VerifiedROExecutionProofs() ([]blocks.VerifiedROExecutionProof, error) {
func (v *ROSignedExecutionProofsVerifier) VerifiedROSignedExecutionProofs() ([]blocks.VerifiedROSignedExecutionProof, error) {
if !v.results.allSatisfied() {
return nil, v.results.errors(errProofsInvalid)
}
verifiedProofs := make([]blocks.VerifiedROExecutionProof, 0, len(v.proofs))
verifiedSignedProofs := make([]blocks.VerifiedROSignedExecutionProof, 0, len(v.proofs))
for _, proof := range v.proofs {
verifiedProof := blocks.NewVerifiedROExecutionProof(proof)
verifiedProofs = append(verifiedProofs, verifiedProof)
verifiedProof := blocks.NewVerifiedROSignedExecutionProof(proof)
verifiedSignedProofs = append(verifiedSignedProofs, verifiedProof)
}
return verifiedProofs, nil
return verifiedSignedProofs, nil
}
// SatisfyRequirement allows the caller to assert that a requirement has been satisfied.
func (v *ROExecutionProofsVerifier) SatisfyRequirement(req Requirement) {
func (v *ROSignedExecutionProofsVerifier) SatisfyRequirement(req Requirement) {
v.recordResult(req, nil)
}
func (v *ROExecutionProofsVerifier) recordResult(req Requirement, err *error) {
func (v *ROSignedExecutionProofsVerifier) recordResult(req Requirement, err *error) {
if err == nil || *err == nil {
v.results.record(req, nil)
return
@@ -53,51 +55,55 @@ func (v *ROExecutionProofsVerifier) recordResult(req Requirement, err *error) {
v.results.record(req, *err)
}
// NotFromFutureSlot verifies that the execution proof is not from a future slot.
func (v *ROExecutionProofsVerifier) NotFromFutureSlot() (err error) {
if ok, err := v.results.cached(RequireNotFromFutureSlot); ok {
func (v *ROSignedExecutionProofsVerifier) IsFromActiveValidator() (err error) {
if ok, err := v.results.cached(RequireActiveValidator); ok {
return err
}
defer v.recordResult(RequireNotFromFutureSlot, &err)
defer v.recordResult(RequireActiveValidator, &err)
currentSlot := v.clock.CurrentSlot()
now := v.clock.Now()
maximumGossipClockDisparity := params.BeaconConfig().MaximumGossipClockDisparityDuration()
// TODO: To implement
return nil
}
func (v *ROSignedExecutionProofsVerifier) ValidProverSignature() (err error) {
if ok, err := v.results.cached(RequireValidProverSignature); ok {
return err
}
defer v.recordResult(RequireValidProverSignature, &err)
// TODO: To implement
return nil
}
func (v *ROSignedExecutionProofsVerifier) ProofDataNonEmpty() (err error) {
if ok, err := v.results.cached(RequireProofDataNonEmpty); ok {
return err
}
defer v.recordResult(RequireProofDataNonEmpty, &err)
for _, proof := range v.proofs {
proofSlot := proof.Slot()
if currentSlot == proofSlot {
continue
}
earliestStart, err := v.clock.SlotStart(proofSlot)
if err != nil {
return proofErrBuilder(errors.Wrap(err, "failed to determine slot start time from clock"))
}
earliestStart = earliestStart.Add(-maximumGossipClockDisparity)
if now.Before(earliestStart) {
return proofErrBuilder(errFromFutureSlot)
if len(proof.Message.ProofData) == 0 {
return proofErrBuilder(ErrProofDataEmpty)
}
}
return nil
}
// ProofSizeLimits verifies that the execution proof data does not exceed the maximum allowed size.
func (v *ROExecutionProofsVerifier) ProofSizeLimits() (err error) {
if ok, err := v.results.cached(RequireProofSizeLimits); ok {
func (v *ROSignedExecutionProofsVerifier) ProofDataNotTooLarge() (err error) {
if ok, err := v.results.cached(RequireProofDataNotTooLarge); ok {
return err
}
defer v.recordResult(RequireProofSizeLimits, &err)
defer v.recordResult(RequireProofDataNotTooLarge, &err)
maxProofDataBytes := params.BeaconConfig().MaxProofDataBytes
for _, proof := range v.proofs {
if uint64(len(proof.ProofData)) > maxProofDataBytes {
if uint64(len(proof.Message.ProofData)) > maxProofDataBytes {
return proofErrBuilder(ErrProofSizeTooLarge)
}
}
@@ -107,15 +113,14 @@ func (v *ROExecutionProofsVerifier) ProofSizeLimits() (err error) {
// ProofVerified performs zkVM proof verification.
// Currently a no-op, will be implemented when actual proof verification is added.
func (v *ROExecutionProofsVerifier) ProofVerified() (err error) {
func (v *ROSignedExecutionProofsVerifier) ProofVerified() (err error) {
if ok, err := v.results.cached(RequireProofVerified); ok {
return err
}
defer v.recordResult(RequireProofVerified, &err)
// For now, all proofs are considered valid.
// TODO: Implement actual zkVM proof verification.
// TODO: To implement
return nil
}

View File

@@ -88,8 +88,8 @@ func (ini *Initializer) NewDataColumnsVerifier(roDataColumns []blocks.RODataColu
// NewExecutionProofsVerifier creates an ExecutionProofsVerifier for a slice of execution proofs,
// with the given set of requirements.
func (ini *Initializer) NewExecutionProofsVerifier(proofs []blocks.ROExecutionProof, reqs []Requirement) *ROExecutionProofsVerifier {
return &ROExecutionProofsVerifier{
func (ini *Initializer) NewExecutionProofsVerifier(proofs []blocks.ROSignedExecutionProof, reqs []Requirement) *ROSignedExecutionProofsVerifier {
return &ROSignedExecutionProofsVerifier{
sharedResources: ini.shared,
proofs: proofs,
results: newResults(reqs...),

View File

@@ -55,16 +55,18 @@ type DataColumnsVerifier interface {
// column verifier can be easily initialized.
type NewDataColumnsVerifier func(dataColumns []blocks.RODataColumn, reqs []Requirement) DataColumnsVerifier
// ExecutionProofsVerifier defines the methods implemented by ROExecutionProofsVerifier.
type ExecutionProofsVerifier interface {
VerifiedROExecutionProofs() ([]blocks.VerifiedROExecutionProof, error)
// SignedExecutionProofsVerifier defines the methods implemented by ROSignedExecutionProofsVerifier.
type SignedExecutionProofsVerifier interface {
VerifiedROSignedExecutionProofs() ([]blocks.VerifiedROSignedExecutionProof, error)
SatisfyRequirement(Requirement)
NotFromFutureSlot() error
ProofSizeLimits() error
IsFromActiveValidator() error
ValidProverSignature() error
ProofDataNonEmpty() error
ProofDataNotTooLarge() error
ProofVerified() error
}
// NewExecutionProofsVerifier is a function signature that can be used to mock a setup where an
// NewSignedExecutionProofsVerifier is a function signature that can be used to mock a setup where an
// execution proofs verifier can be easily initialized.
type NewExecutionProofsVerifier func(proofs []blocks.ROExecutionProof, reqs []Requirement) ExecutionProofsVerifier
type NewSignedExecutionProofsVerifier func(proofs []blocks.ROSignedExecutionProof, reqs []Requirement) SignedExecutionProofsVerifier

View File

@@ -33,8 +33,6 @@ func (r Requirement) String() string {
return "RequireValidFields"
case RequireCorrectSubnet:
return "RequireCorrectSubnet"
case RequireProofSizeLimits:
return "RequireProofSizeLimits"
case RequireProofVerified:
return "RequireProofVerified"
default:

View File

@@ -20,7 +20,6 @@ go_library(
"//cmd:go_default_library",
"//config/features:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_urfave_cli_v2//:go_default_library",

View File

@@ -368,12 +368,4 @@ var (
Usage: "Disables the engine_getBlobsV2 usage.",
Hidden: true,
}
// ZKVM Generation Proof Type
ZkvmGenerationProofTypeFlag = &cli.IntSliceFlag{
Name: "zkvm-generation-proof-types",
Usage: `
Comma-separated list of proof type IDs to generate
(e.g., '0,1' where 0=SP1+Reth, 1=Risc0+Geth).
Optional - nodes can verify proofs without generating them.`,
}
)

View File

@@ -5,7 +5,6 @@ import (
"github.com/OffchainLabs/prysm/v7/cmd"
"github.com/OffchainLabs/prysm/v7/config/features"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/pkg/errors"
"github.com/urfave/cli/v2"
)
@@ -29,7 +28,6 @@ type GlobalFlags struct {
DataColumnBatchLimit int
DataColumnBatchLimitBurstFactor int
StateDiffExponents []int
ProofGenerationTypes []primitives.ExecutionProofId
}
var globalConfig *GlobalFlags
@@ -92,19 +90,6 @@ func ConfigureGlobalFlags(ctx *cli.Context) error {
}
}
// zkVM Proof Generation Types
proofTypes := make([]primitives.ExecutionProofId, 0, len(ctx.IntSlice(ZkvmGenerationProofTypeFlag.Name)))
for _, t := range ctx.IntSlice(ZkvmGenerationProofTypeFlag.Name) {
proofTypes = append(proofTypes, primitives.ExecutionProofId(t))
}
cfg.ProofGenerationTypes = proofTypes
if features.Get().EnableZkvm {
if err := validateZkvmProofGenerationTypes(cfg.ProofGenerationTypes); err != nil {
return fmt.Errorf("validate Zkvm proof generation types: %w", err)
}
}
cfg.BlockBatchLimit = ctx.Int(BlockBatchLimit.Name)
cfg.BlockBatchLimitBurstFactor = ctx.Int(BlockBatchLimitBurstFactor.Name)
cfg.BlobBatchLimit = ctx.Int(BlobBatchLimit.Name)
@@ -156,13 +141,3 @@ func validateStateDiffExponents(exponents []int) error {
}
return nil
}
// validateZkvmProofGenerationTypes validates the provided proof IDs.
func validateZkvmProofGenerationTypes(types []primitives.ExecutionProofId) error {
for _, t := range types {
if t >= primitives.EXECUTION_PROOF_TYPE_COUNT {
return fmt.Errorf("invalid zkvm proof generation type: %d; valid types are between 0 and %d", t, primitives.EXECUTION_PROOF_TYPE_COUNT-1)
}
}
return nil
}

View File

@@ -162,7 +162,6 @@ var appFlags = []cli.Flag{
flags.BatchVerifierLimit,
flags.StateDiffExponents,
flags.DisableEphemeralLogFile,
flags.ZkvmGenerationProofTypeFlag,
}
func init() {

View File

@@ -78,11 +78,10 @@ func BeaconNodeOptions(c *cli.Context) ([]node.Option, error) {
}
if layout == filesystem.LayoutNameFlat {
log.Warningf(
log.Warnf(
"Existing '%s' blob storage layout detected. Consider setting the flag --%s=%s for faster startup and more reliable pruning. Setting this flag will automatically migrate your existing blob storage to the newer layout on the next restart.",
filesystem.LayoutNameFlat, BlobStorageLayout.Name, filesystem.LayoutNameByEpoch)
}
blobStorageOptions := node.WithBlobStorageOptions(
filesystem.WithBlobRetentionEpochs(blobRetentionEpoch),
filesystem.WithBasePath(blobPath),

View File

@@ -234,12 +234,6 @@ var appHelpFlagGroups = []flagGroup{
flags.SetGCPercent,
},
},
{
Name: "zkvm",
Flags: []cli.Flag{
flags.ZkvmGenerationProofTypeFlag,
},
},
}
func init() {

View File

@@ -2,98 +2,90 @@ package blocks
import (
"errors"
"fmt"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
)
var (
errNilExecutionProof = errors.New("execution proof is nil")
errEmptyBlockRoot = errors.New("block root is empty")
errInvalidBlockRootSize = errors.New("block root has invalid size")
errInvalidBlockHashSize = errors.New("block hash has invalid size")
errNilExecutionProof = errors.New("execution proof is nil")
errEmptyProverPubkey = errors.New("prover pubkey is empty")
errEmptyProofData = errors.New("proof data is empty")
errEmptyNewPayloadRequestRoot = errors.New("new payload request root is empty")
)
// ROExecutionProof represents a read-only execution proof with its block root.
type ROExecutionProof struct {
*ethpb.ExecutionProof
type ROSignedExecutionProof struct {
*ethpb.SignedExecutionProof
blockRoot [fieldparams.RootLength]byte
epoch primitives.Epoch
}
func roExecutionProofNilCheck(ep *ethpb.ExecutionProof) error {
if ep == nil {
func roSignedExecutionProofNilCheck(sep *ethpb.SignedExecutionProof) error {
if sep == nil {
return errNilExecutionProof
}
if len(ep.BlockRoot) == 0 {
return errEmptyBlockRoot
if len(sep.ProverPubkey) == 0 {
return errEmptyProverPubkey
}
if len(ep.BlockRoot) != fieldparams.RootLength {
return errInvalidBlockRootSize
ep := sep.Message
if len(ep.ProofData) == 0 {
return errEmptyProofData
}
if len(ep.BlockHash) != fieldparams.RootLength {
return errInvalidBlockHashSize
if len(ep.PublicInput.NewPayloadRequestRoot) == 0 {
return errEmptyNewPayloadRequestRoot
}
return nil
}
// NewROExecutionProof creates a new ROExecutionProof from the given ExecutionProof.
// The block root is extracted from the ExecutionProof's BlockRoot field.
func NewROExecutionProof(ep *ethpb.ExecutionProof) (ROExecutionProof, error) {
if err := roExecutionProofNilCheck(ep); err != nil {
return ROExecutionProof{}, err
// NewROSignedExecutionProofWithRoot creates a new ROSignedExecutionProof with a given root.
func NewROSignedExecutionProof(
signedExecutionProof *ethpb.SignedExecutionProof,
root [fieldparams.RootLength]byte,
epoch primitives.Epoch,
) (ROSignedExecutionProof, error) {
if err := roSignedExecutionProofNilCheck(signedExecutionProof); err != nil {
return ROSignedExecutionProof{}, fmt.Errorf("ro signed execution proof nil check: %w", err)
}
return ROExecutionProof{
ExecutionProof: ep,
blockRoot: bytesutil.ToBytes32(ep.BlockRoot),
}, nil
}
// NewROExecutionProofWithRoot creates a new ROExecutionProof with a given root.
func NewROExecutionProofWithRoot(ep *ethpb.ExecutionProof, root [fieldparams.RootLength]byte) (ROExecutionProof, error) {
if err := roExecutionProofNilCheck(ep); err != nil {
return ROExecutionProof{}, err
roSignedExecutionProof := ROSignedExecutionProof{
SignedExecutionProof: signedExecutionProof,
blockRoot: root,
epoch: epoch,
}
return ROExecutionProof{
ExecutionProof: ep,
blockRoot: root,
}, nil
return roSignedExecutionProof, nil
}
// BlockRoot returns the block root of the execution proof.
func (p *ROExecutionProof) BlockRoot() [fieldparams.RootLength]byte {
func (p *ROSignedExecutionProof) BlockRoot() [fieldparams.RootLength]byte {
return p.blockRoot
}
// Slot returns the slot of the execution proof.
func (p *ROExecutionProof) Slot() primitives.Slot {
return p.ExecutionProof.Slot
// Epoch returns the epoch of the execution proof.
func (p *ROSignedExecutionProof) Epoch() primitives.Epoch {
return p.epoch
}
// ProofId returns the proof ID of the execution proof.
func (p *ROExecutionProof) ProofId() primitives.ExecutionProofId {
return p.ExecutionProof.ProofId
}
// BlockHash returns the block hash of the execution proof.
func (p *ROExecutionProof) BlockHash() [32]byte {
return bytesutil.ToBytes32(p.ExecutionProof.BlockHash)
}
// // ProofType returns the proof type of the execution proof.
// func (p *ROExecutionProof) ProofType() primitives.ProofType {
// return p.ExecutionProof.ProofType
// }
// VerifiedROExecutionProof represents an ROExecutionProof that has undergone full verification.
type VerifiedROExecutionProof struct {
ROExecutionProof
type VerifiedROSignedExecutionProof struct {
ROSignedExecutionProof
}
// NewVerifiedROExecutionProof "upgrades" an ROExecutionProof to a VerifiedROExecutionProof.
// This method should only be used by the verification package.
func NewVerifiedROExecutionProof(ro ROExecutionProof) VerifiedROExecutionProof {
return VerifiedROExecutionProof{ROExecutionProof: ro}
func NewVerifiedROSignedExecutionProof(ro ROSignedExecutionProof) VerifiedROSignedExecutionProof {
return VerifiedROSignedExecutionProof{ROSignedExecutionProof: ro}
}

View File

@@ -11,7 +11,6 @@ go_library(
"domain.go",
"epoch.go",
"execution_address.go",
"execution_proof_id.go",
"kzg.go",
"payload_id.go",
"slot.go",
@@ -37,7 +36,6 @@ go_test(
"committee_index_test.go",
"domain_test.go",
"epoch_test.go",
"execution_proof_id_test.go",
"slot_test.go",
"sszbytes_test.go",
"sszuint64_test.go",

View File

@@ -1,64 +0,0 @@
package primitives
import (
"fmt"
fssz "github.com/prysmaticlabs/fastssz"
)
var _ fssz.HashRoot = (ExecutionProofId)(0)
var _ fssz.Marshaler = (*ExecutionProofId)(nil)
var _ fssz.Unmarshaler = (*ExecutionProofId)(nil)
// Number of execution proofs
// Each proof represents a different zkVM+EL combination
//
// TODO(zkproofs): The number 8 is a parameter that we will want to configure in the future
const EXECUTION_PROOF_TYPE_COUNT = 8
// ExecutionProofId identifies which zkVM/proof system a proof belongs to.
type ExecutionProofId uint8
func (id *ExecutionProofId) IsValid() bool {
return uint8(*id) < EXECUTION_PROOF_TYPE_COUNT
}
// HashTreeRoot --
func (id ExecutionProofId) HashTreeRoot() ([32]byte, error) {
return fssz.HashWithDefaultHasher(id)
}
// HashTreeRootWith --
func (id ExecutionProofId) HashTreeRootWith(hh *fssz.Hasher) error {
hh.PutUint8(uint8(id))
return nil
}
// UnmarshalSSZ --
func (id *ExecutionProofId) UnmarshalSSZ(buf []byte) error {
if len(buf) != id.SizeSSZ() {
return fmt.Errorf("expected buffer of length %d received %d", id.SizeSSZ(), len(buf))
}
*id = ExecutionProofId(fssz.UnmarshallUint8(buf))
return nil
}
// MarshalSSZTo --
func (id *ExecutionProofId) MarshalSSZTo(buf []byte) ([]byte, error) {
marshalled, err := id.MarshalSSZ()
if err != nil {
return nil, err
}
return append(buf, marshalled...), nil
}
// MarshalSSZ --
func (id *ExecutionProofId) MarshalSSZ() ([]byte, error) {
marshalled := fssz.MarshalUint8([]byte{}, uint8(*id))
return marshalled, nil
}
// SizeSSZ --
func (id *ExecutionProofId) SizeSSZ() int {
return 1
}

View File

@@ -1,73 +0,0 @@
package primitives_test
import (
"testing"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
)
func TestExecutionProofId_IsValid(t *testing.T) {
tests := []struct {
name string
id primitives.ExecutionProofId
valid bool
}{
{
name: "valid proof id 0",
id: 0,
valid: true,
},
{
name: "valid proof id 1",
id: 1,
valid: true,
},
{
name: "valid proof id 7 (max valid)",
id: 7,
valid: true,
},
{
name: "invalid proof id 8 (at limit)",
id: 8,
valid: false,
},
{
name: "invalid proof id 255",
id: 255,
valid: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := tt.id.IsValid(); got != tt.valid {
t.Errorf("ExecutionProofId.IsValid() = %v, want %v", got, tt.valid)
}
})
}
}
func TestExecutionProofId_Casting(t *testing.T) {
id := primitives.ExecutionProofId(5)
t.Run("uint8", func(t *testing.T) {
if uint8(id) != 5 {
t.Errorf("Casting to uint8 failed: got %v, want 5", uint8(id))
}
})
t.Run("from uint8", func(t *testing.T) {
var x uint8 = 7
if primitives.ExecutionProofId(x) != 7 {
t.Errorf("Casting from uint8 failed: got %v, want 7", primitives.ExecutionProofId(x))
}
})
t.Run("int", func(t *testing.T) {
var x = 3
if primitives.ExecutionProofId(x) != 3 {
t.Errorf("Casting from int failed: got %v, want 3", primitives.ExecutionProofId(x))
}
})
}

View File

@@ -51,6 +51,7 @@ ssz_gen_marshal(
"DepositRequest",
"ConsolidationRequest",
"ExecutionRequests",
"NewPayloadRequest",
],
)

View File

@@ -3580,3 +3580,211 @@ func (b *BlobsBundleV2) HashTreeRootWith(hh *ssz.Hasher) (err error) {
hh.Merkleize(indx)
return
}
// MarshalSSZ ssz marshals the NewPayloadRequest object
func (n *NewPayloadRequest) MarshalSSZ() ([]byte, error) {
return ssz.MarshalSSZ(n)
}
// MarshalSSZTo ssz marshals the NewPayloadRequest object to a target array
func (n *NewPayloadRequest) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
offset := int(44)
// Offset (0) 'ExecutionPayload'
dst = ssz.WriteOffset(dst, offset)
if n.ExecutionPayload == nil {
n.ExecutionPayload = new(ExecutionPayloadDeneb)
}
offset += n.ExecutionPayload.SizeSSZ()
// Offset (1) 'VersionedHashes'
dst = ssz.WriteOffset(dst, offset)
offset += len(n.VersionedHashes) * 32
// Field (2) 'ParentBlockRoot'
if size := len(n.ParentBlockRoot); size != 32 {
err = ssz.ErrBytesLengthFn("--.ParentBlockRoot", size, 32)
return
}
dst = append(dst, n.ParentBlockRoot...)
// Offset (3) 'ExecutionRequests'
dst = ssz.WriteOffset(dst, offset)
if n.ExecutionRequests == nil {
n.ExecutionRequests = new(ExecutionRequests)
}
offset += n.ExecutionRequests.SizeSSZ()
// Field (0) 'ExecutionPayload'
if dst, err = n.ExecutionPayload.MarshalSSZTo(dst); err != nil {
return
}
// Field (1) 'VersionedHashes'
if size := len(n.VersionedHashes); size > 4096 {
err = ssz.ErrListTooBigFn("--.VersionedHashes", size, 4096)
return
}
for ii := 0; ii < len(n.VersionedHashes); ii++ {
if size := len(n.VersionedHashes[ii]); size != 32 {
err = ssz.ErrBytesLengthFn("--.VersionedHashes[ii]", size, 32)
return
}
dst = append(dst, n.VersionedHashes[ii]...)
}
// Field (3) 'ExecutionRequests'
if dst, err = n.ExecutionRequests.MarshalSSZTo(dst); err != nil {
return
}
return
}
// UnmarshalSSZ ssz unmarshals the NewPayloadRequest object
func (n *NewPayloadRequest) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size < 44 {
return ssz.ErrSize
}
tail := buf
var o0, o1, o3 uint64
// Offset (0) 'ExecutionPayload'
if o0 = ssz.ReadOffset(buf[0:4]); o0 > size {
return ssz.ErrOffset
}
if o0 != 44 {
return ssz.ErrInvalidVariableOffset
}
// Offset (1) 'VersionedHashes'
if o1 = ssz.ReadOffset(buf[4:8]); o1 > size || o0 > o1 {
return ssz.ErrOffset
}
// Field (2) 'ParentBlockRoot'
if cap(n.ParentBlockRoot) == 0 {
n.ParentBlockRoot = make([]byte, 0, len(buf[8:40]))
}
n.ParentBlockRoot = append(n.ParentBlockRoot, buf[8:40]...)
// Offset (3) 'ExecutionRequests'
if o3 = ssz.ReadOffset(buf[40:44]); o3 > size || o1 > o3 {
return ssz.ErrOffset
}
// Field (0) 'ExecutionPayload'
{
buf = tail[o0:o1]
if n.ExecutionPayload == nil {
n.ExecutionPayload = new(ExecutionPayloadDeneb)
}
if err = n.ExecutionPayload.UnmarshalSSZ(buf); err != nil {
return err
}
}
// Field (1) 'VersionedHashes'
{
buf = tail[o1:o3]
num, err := ssz.DivideInt2(len(buf), 32, 4096)
if err != nil {
return err
}
n.VersionedHashes = make([][]byte, num)
for ii := 0; ii < num; ii++ {
if cap(n.VersionedHashes[ii]) == 0 {
n.VersionedHashes[ii] = make([]byte, 0, len(buf[ii*32:(ii+1)*32]))
}
n.VersionedHashes[ii] = append(n.VersionedHashes[ii], buf[ii*32:(ii+1)*32]...)
}
}
// Field (3) 'ExecutionRequests'
{
buf = tail[o3:]
if n.ExecutionRequests == nil {
n.ExecutionRequests = new(ExecutionRequests)
}
if err = n.ExecutionRequests.UnmarshalSSZ(buf); err != nil {
return err
}
}
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the NewPayloadRequest object
func (n *NewPayloadRequest) SizeSSZ() (size int) {
size = 44
// Field (0) 'ExecutionPayload'
if n.ExecutionPayload == nil {
n.ExecutionPayload = new(ExecutionPayloadDeneb)
}
size += n.ExecutionPayload.SizeSSZ()
// Field (1) 'VersionedHashes'
size += len(n.VersionedHashes) * 32
// Field (3) 'ExecutionRequests'
if n.ExecutionRequests == nil {
n.ExecutionRequests = new(ExecutionRequests)
}
size += n.ExecutionRequests.SizeSSZ()
return
}
// HashTreeRoot ssz hashes the NewPayloadRequest object
func (n *NewPayloadRequest) HashTreeRoot() ([32]byte, error) {
return ssz.HashWithDefaultHasher(n)
}
// HashTreeRootWith ssz hashes the NewPayloadRequest object with a hasher
func (n *NewPayloadRequest) HashTreeRootWith(hh *ssz.Hasher) (err error) {
indx := hh.Index()
// Field (0) 'ExecutionPayload'
if err = n.ExecutionPayload.HashTreeRootWith(hh); err != nil {
return
}
// Field (1) 'VersionedHashes'
{
if size := len(n.VersionedHashes); size > 4096 {
err = ssz.ErrListTooBigFn("--.VersionedHashes", size, 4096)
return
}
subIndx := hh.Index()
for _, i := range n.VersionedHashes {
if len(i) != 32 {
err = ssz.ErrBytesLength
return
}
hh.Append(i)
}
numItems := uint64(len(n.VersionedHashes))
hh.MerkleizeWithMixin(subIndx, numItems, 4096)
}
// Field (2) 'ParentBlockRoot'
if size := len(n.ParentBlockRoot); size != 32 {
err = ssz.ErrBytesLengthFn("--.ParentBlockRoot", size, 32)
return
}
hh.PutBytes(n.ParentBlockRoot)
// Field (3) 'ExecutionRequests'
if err = n.ExecutionRequests.HashTreeRootWith(hh); err != nil {
return
}
hh.Merkleize(indx)
return
}

View File

@@ -295,4 +295,5 @@ message BlobAndProofV2 {
(ethereum.eth.ext.ssz_size) = "48",
(ethereum.eth.ext.ssz_max) = "max_cell_proofs_length.size"
];
}
}

View File

@@ -10,6 +10,7 @@ import (
reflect "reflect"
sync "sync"
_ "github.com/OffchainLabs/prysm/v7/proto/eth/ext"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
)
@@ -97,43 +98,134 @@ func (x *ExecutionBundleFulu) GetExecutionRequests() [][]byte {
return nil
}
type NewPayloadRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
ExecutionPayload *ExecutionPayloadDeneb `protobuf:"bytes,1,opt,name=execution_payload,json=executionPayload,proto3" json:"execution_payload,omitempty"`
VersionedHashes [][]byte `protobuf:"bytes,2,rep,name=versioned_hashes,json=versionedHashes,proto3" json:"versioned_hashes,omitempty" ssz-max:"4096" ssz-size:"?,32"`
ParentBlockRoot []byte `protobuf:"bytes,3,opt,name=parent_block_root,json=parentBlockRoot,proto3" json:"parent_block_root,omitempty" ssz-size:"32"`
ExecutionRequests *ExecutionRequests `protobuf:"bytes,4,opt,name=execution_requests,json=executionRequests,proto3" json:"execution_requests,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewPayloadRequest) Reset() {
*x = NewPayloadRequest{}
mi := &file_proto_engine_v1_fulu_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewPayloadRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewPayloadRequest) ProtoMessage() {}
func (x *NewPayloadRequest) ProtoReflect() protoreflect.Message {
mi := &file_proto_engine_v1_fulu_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewPayloadRequest.ProtoReflect.Descriptor instead.
func (*NewPayloadRequest) Descriptor() ([]byte, []int) {
return file_proto_engine_v1_fulu_proto_rawDescGZIP(), []int{1}
}
func (x *NewPayloadRequest) GetExecutionPayload() *ExecutionPayloadDeneb {
if x != nil {
return x.ExecutionPayload
}
return nil
}
func (x *NewPayloadRequest) GetVersionedHashes() [][]byte {
if x != nil {
return x.VersionedHashes
}
return nil
}
func (x *NewPayloadRequest) GetParentBlockRoot() []byte {
if x != nil {
return x.ParentBlockRoot
}
return nil
}
func (x *NewPayloadRequest) GetExecutionRequests() *ExecutionRequests {
if x != nil {
return x.ExecutionRequests
}
return nil
}
var File_proto_engine_v1_fulu_proto protoreflect.FileDescriptor
var file_proto_engine_v1_fulu_proto_rawDesc = []byte{
0x0a, 0x1a, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x2f, 0x76,
0x31, 0x2f, 0x66, 0x75, 0x6c, 0x75, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x65, 0x74,
0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x2e, 0x76, 0x31,
0x1a, 0x26, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x2f, 0x76,
0x31, 0x2f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x67, 0x69,
0x6e, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x9d, 0x02, 0x0a, 0x13, 0x45, 0x78, 0x65,
0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x46, 0x75, 0x6c, 0x75,
0x12, 0x43, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x29, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x6e, 0x67,
0x69, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e,
0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x44, 0x65, 0x6e, 0x65, 0x62, 0x52, 0x07, 0x70, 0x61,
0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02,
0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x44, 0x0a, 0x0c, 0x62,
0x6c, 0x6f, 0x62, 0x73, 0x5f, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x21, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x6e, 0x67,
0x69, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x6c, 0x6f, 0x62, 0x73, 0x42, 0x75, 0x6e, 0x64,
0x6c, 0x65, 0x56, 0x32, 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x42, 0x75, 0x6e, 0x64, 0x6c,
0x65, 0x12, 0x36, 0x0a, 0x17, 0x73, 0x68, 0x6f, 0x75, 0x6c, 0x64, 0x5f, 0x6f, 0x76, 0x65, 0x72,
0x72, 0x69, 0x64, 0x65, 0x5f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01,
0x28, 0x08, 0x52, 0x15, 0x73, 0x68, 0x6f, 0x75, 0x6c, 0x64, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69,
0x64, 0x65, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x12, 0x2d, 0x0a, 0x12, 0x65, 0x78, 0x65,
0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18,
0x05, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x11, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e,
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x42, 0x8e, 0x01, 0x0a, 0x16, 0x6f, 0x72, 0x67,
0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65,
0x2e, 0x76, 0x31, 0x42, 0x0c, 0x45, 0x6c, 0x65, 0x63, 0x74, 0x72, 0x61, 0x50, 0x72, 0x6f, 0x74,
0x6f, 0x50, 0x01, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
0x70, 0x72, 0x79, 0x73, 0x6d, 0x61, 0x74, 0x69, 0x63, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72,
0x79, 0x73, 0x6d, 0x2f, 0x76, 0x35, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x65, 0x6e, 0x67,
0x69, 0x6e, 0x65, 0x2f, 0x76, 0x31, 0x3b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x76, 0x31, 0xaa,
0x02, 0x12, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e,
0x65, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x12, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x5c,
0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5c, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x33,
0x1a, 0x1b, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x65, 0x74, 0x68, 0x2f, 0x65, 0x78, 0x74, 0x2f,
0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x26, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x65,
0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x65, 0x6e, 0x67,
0x69, 0x6e, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x72, 0x61, 0x2e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x22, 0x9d, 0x02, 0x0a, 0x13, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69,
0x6f, 0x6e, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x46, 0x75, 0x6c, 0x75, 0x12, 0x43, 0x0a, 0x07,
0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e,
0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x2e,
0x76, 0x31, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x79, 0x6c,
0x6f, 0x61, 0x64, 0x44, 0x65, 0x6e, 0x65, 0x62, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61,
0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c,
0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x44, 0x0a, 0x0c, 0x62, 0x6c, 0x6f, 0x62, 0x73,
0x5f, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e,
0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x2e,
0x76, 0x31, 0x2e, 0x42, 0x6c, 0x6f, 0x62, 0x73, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x56, 0x32,
0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x12, 0x36, 0x0a,
0x17, 0x73, 0x68, 0x6f, 0x75, 0x6c, 0x64, 0x5f, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65,
0x5f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15,
0x73, 0x68, 0x6f, 0x75, 0x6c, 0x64, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x42, 0x75,
0x69, 0x6c, 0x64, 0x65, 0x72, 0x12, 0x2d, 0x0a, 0x12, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69,
0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28,
0x0c, 0x52, 0x11, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75,
0x65, 0x73, 0x74, 0x73, 0x22, 0xb2, 0x02, 0x0a, 0x11, 0x4e, 0x65, 0x77, 0x50, 0x61, 0x79, 0x6c,
0x6f, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x56, 0x0a, 0x11, 0x65, 0x78,
0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18,
0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d,
0x2e, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75,
0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x44, 0x65, 0x6e, 0x65, 0x62,
0x52, 0x10, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f,
0x61, 0x64, 0x12, 0x3b, 0x0a, 0x10, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x64, 0x5f,
0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0c, 0x42, 0x10, 0x8a, 0xb5,
0x18, 0x04, 0x3f, 0x2c, 0x33, 0x32, 0x92, 0xb5, 0x18, 0x04, 0x34, 0x30, 0x39, 0x36, 0x52, 0x0f,
0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x64, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x12,
0x32, 0x0a, 0x11, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f,
0x72, 0x6f, 0x6f, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02,
0x33, 0x32, 0x52, 0x0f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52,
0x6f, 0x6f, 0x74, 0x12, 0x54, 0x0a, 0x12, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e,
0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x25, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x6e, 0x67, 0x69, 0x6e,
0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65,
0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x52, 0x11, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f,
0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x42, 0x8e, 0x01, 0x0a, 0x16, 0x6f, 0x72,
0x67, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x6e, 0x67, 0x69, 0x6e,
0x65, 0x2e, 0x76, 0x31, 0x42, 0x0c, 0x45, 0x6c, 0x65, 0x63, 0x74, 0x72, 0x61, 0x50, 0x72, 0x6f,
0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x61, 0x74, 0x69, 0x63, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x70,
0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x35, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x65, 0x6e,
0x67, 0x69, 0x6e, 0x65, 0x2f, 0x76, 0x31, 0x3b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x76, 0x31,
0xaa, 0x02, 0x12, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x45, 0x6e, 0x67, 0x69,
0x6e, 0x65, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x12, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d,
0x5c, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5c, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x33,
}
var (
@@ -148,20 +240,24 @@ func file_proto_engine_v1_fulu_proto_rawDescGZIP() []byte {
return file_proto_engine_v1_fulu_proto_rawDescData
}
var file_proto_engine_v1_fulu_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
var file_proto_engine_v1_fulu_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
var file_proto_engine_v1_fulu_proto_goTypes = []any{
(*ExecutionBundleFulu)(nil), // 0: ethereum.engine.v1.ExecutionBundleFulu
(*ExecutionPayloadDeneb)(nil), // 1: ethereum.engine.v1.ExecutionPayloadDeneb
(*BlobsBundleV2)(nil), // 2: ethereum.engine.v1.BlobsBundleV2
(*NewPayloadRequest)(nil), // 1: ethereum.engine.v1.NewPayloadRequest
(*ExecutionPayloadDeneb)(nil), // 2: ethereum.engine.v1.ExecutionPayloadDeneb
(*BlobsBundleV2)(nil), // 3: ethereum.engine.v1.BlobsBundleV2
(*ExecutionRequests)(nil), // 4: ethereum.engine.v1.ExecutionRequests
}
var file_proto_engine_v1_fulu_proto_depIdxs = []int32{
1, // 0: ethereum.engine.v1.ExecutionBundleFulu.payload:type_name -> ethereum.engine.v1.ExecutionPayloadDeneb
2, // 1: ethereum.engine.v1.ExecutionBundleFulu.blobs_bundle:type_name -> ethereum.engine.v1.BlobsBundleV2
2, // [2:2] is the sub-list for method output_type
2, // [2:2] is the sub-list for method input_type
2, // [2:2] is the sub-list for extension type_name
2, // [2:2] is the sub-list for extension extendee
0, // [0:2] is the sub-list for field type_name
2, // 0: ethereum.engine.v1.ExecutionBundleFulu.payload:type_name -> ethereum.engine.v1.ExecutionPayloadDeneb
3, // 1: ethereum.engine.v1.ExecutionBundleFulu.blobs_bundle:type_name -> ethereum.engine.v1.BlobsBundleV2
2, // 2: ethereum.engine.v1.NewPayloadRequest.execution_payload:type_name -> ethereum.engine.v1.ExecutionPayloadDeneb
4, // 3: ethereum.engine.v1.NewPayloadRequest.execution_requests:type_name -> ethereum.engine.v1.ExecutionRequests
4, // [4:4] is the sub-list for method output_type
4, // [4:4] is the sub-list for method input_type
4, // [4:4] is the sub-list for extension type_name
4, // [4:4] is the sub-list for extension extendee
0, // [0:4] is the sub-list for field type_name
}
func init() { file_proto_engine_v1_fulu_proto_init() }
@@ -170,13 +266,14 @@ func file_proto_engine_v1_fulu_proto_init() {
return
}
file_proto_engine_v1_execution_engine_proto_init()
file_proto_engine_v1_electra_proto_init()
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_proto_engine_v1_fulu_proto_rawDesc,
NumEnums: 0,
NumMessages: 1,
NumMessages: 2,
NumExtensions: 0,
NumServices: 0,
},

View File

@@ -2,7 +2,9 @@ syntax = "proto3";
package ethereum.engine.v1;
import "proto/eth/ext/options.proto";
import "proto/engine/v1/execution_engine.proto";
import "proto/engine/v1/electra.proto";
option csharp_namespace = "Ethereum.Engine.V1";
option go_package = "github.com/prysmaticlabs/prysm/v5/proto/engine/v1;enginev1";
@@ -18,3 +20,15 @@ message ExecutionBundleFulu {
bool should_override_builder = 4;
repeated bytes execution_requests = 5;
}
message NewPayloadRequest {
ExecutionPayloadDeneb execution_payload = 1;
repeated bytes versioned_hashes = 2 [
(ethereum.eth.ext.ssz_size) = "?,32",
(ethereum.eth.ext.ssz_max) = "4096"
];
bytes parent_block_root = 3 [ (ethereum.eth.ext.ssz_size) = "32" ];
ExecutionRequests execution_requests = 4;
}

View File

@@ -192,6 +192,7 @@ ssz_fulu_objs = [
"SignedBeaconBlockContentsFulu",
"SignedBeaconBlockFulu",
"SignedBlindedBeaconBlockFulu",
"SignedExecutionProof",
]
ssz_gloas_objs = [
@@ -372,10 +373,6 @@ go_library(
"cloners.go",
"eip_7521.go",
"execution_proof.go",
# NOTE: ExecutionProof includes an alias type of uint8,
# which is not supported by fastssz sszgen.
# Temporarily managed manually.
"execution_proof.ssz.go",
"gloas.go",
"log.go",
"sync_committee_mainnet.go",

View File

@@ -9,10 +9,10 @@ func (e *ExecutionProof) Copy() *ExecutionProof {
}
return &ExecutionProof{
ProofId: e.ProofId,
Slot: e.Slot,
BlockHash: bytesutil.SafeCopyBytes(e.BlockHash),
BlockRoot: bytesutil.SafeCopyBytes(e.BlockRoot),
ProofData: bytesutil.SafeCopyBytes(e.ProofData),
ProofType: e.ProofType,
PublicInput: &PublicInput{
NewPayloadRequestRoot: bytesutil.SafeCopyBytes(e.PublicInput.NewPayloadRequestRoot),
},
}
}

View File

@@ -10,7 +10,6 @@ import (
reflect "reflect"
sync "sync"
github_com_OffchainLabs_prysm_v7_consensus_types_primitives "github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
_ "github.com/OffchainLabs/prysm/v7/proto/eth/ext"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
@@ -23,20 +22,78 @@ const (
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type SignedExecutionProof struct {
state protoimpl.MessageState `protogen:"open.v1"`
Message *ExecutionProof `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"`
ProverPubkey []byte `protobuf:"bytes,2,opt,name=prover_pubkey,json=proverPubkey,proto3" json:"prover_pubkey,omitempty" ssz-size:"48"`
Signature []byte `protobuf:"bytes,3,opt,name=signature,proto3" json:"signature,omitempty" ssz-size:"96"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SignedExecutionProof) Reset() {
*x = SignedExecutionProof{}
mi := &file_proto_prysm_v1alpha1_execution_proof_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SignedExecutionProof) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SignedExecutionProof) ProtoMessage() {}
func (x *SignedExecutionProof) ProtoReflect() protoreflect.Message {
mi := &file_proto_prysm_v1alpha1_execution_proof_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SignedExecutionProof.ProtoReflect.Descriptor instead.
func (*SignedExecutionProof) Descriptor() ([]byte, []int) {
return file_proto_prysm_v1alpha1_execution_proof_proto_rawDescGZIP(), []int{0}
}
func (x *SignedExecutionProof) GetMessage() *ExecutionProof {
if x != nil {
return x.Message
}
return nil
}
func (x *SignedExecutionProof) GetProverPubkey() []byte {
if x != nil {
return x.ProverPubkey
}
return nil
}
func (x *SignedExecutionProof) GetSignature() []byte {
if x != nil {
return x.Signature
}
return nil
}
type ExecutionProof struct {
state protoimpl.MessageState `protogen:"open.v1"`
ProofId github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ExecutionProofId `protobuf:"varint,1,opt,name=proof_id,json=proofId,proto3" json:"proof_id,omitempty" cast-type:"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.ExecutionProofId"`
Slot github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Slot `protobuf:"varint,2,opt,name=slot,proto3" json:"slot,omitempty" cast-type:"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.Slot"`
BlockHash []byte `protobuf:"bytes,3,opt,name=block_hash,json=blockHash,proto3" json:"block_hash,omitempty" ssz-size:"32"`
BlockRoot []byte `protobuf:"bytes,4,opt,name=block_root,json=blockRoot,proto3" json:"block_root,omitempty" ssz-size:"32"`
ProofData []byte `protobuf:"bytes,5,opt,name=proof_data,json=proofData,proto3" json:"proof_data,omitempty" ssz-max:"1048576"`
state protoimpl.MessageState `protogen:"open.v1"`
ProofData []byte `protobuf:"bytes,1,opt,name=proof_data,json=proofData,proto3" json:"proof_data,omitempty" ssz-max:"307200"`
ProofType []byte `protobuf:"bytes,2,opt,name=proof_type,json=proofType,proto3" json:"proof_type,omitempty" ssz-max:"1"`
PublicInput *PublicInput `protobuf:"bytes,3,opt,name=public_input,json=publicInput,proto3" json:"public_input,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ExecutionProof) Reset() {
*x = ExecutionProof{}
mi := &file_proto_prysm_v1alpha1_execution_proof_proto_msgTypes[0]
mi := &file_proto_prysm_v1alpha1_execution_proof_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -48,7 +105,7 @@ func (x *ExecutionProof) String() string {
func (*ExecutionProof) ProtoMessage() {}
func (x *ExecutionProof) ProtoReflect() protoreflect.Message {
mi := &file_proto_prysm_v1alpha1_execution_proof_proto_msgTypes[0]
mi := &file_proto_prysm_v1alpha1_execution_proof_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -61,35 +118,7 @@ func (x *ExecutionProof) ProtoReflect() protoreflect.Message {
// Deprecated: Use ExecutionProof.ProtoReflect.Descriptor instead.
func (*ExecutionProof) Descriptor() ([]byte, []int) {
return file_proto_prysm_v1alpha1_execution_proof_proto_rawDescGZIP(), []int{0}
}
func (x *ExecutionProof) GetProofId() github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ExecutionProofId {
if x != nil {
return x.ProofId
}
return github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ExecutionProofId(0)
}
func (x *ExecutionProof) GetSlot() github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Slot {
if x != nil {
return x.Slot
}
return github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Slot(0)
}
func (x *ExecutionProof) GetBlockHash() []byte {
if x != nil {
return x.BlockHash
}
return nil
}
func (x *ExecutionProof) GetBlockRoot() []byte {
if x != nil {
return x.BlockRoot
}
return nil
return file_proto_prysm_v1alpha1_execution_proof_proto_rawDescGZIP(), []int{1}
}
func (x *ExecutionProof) GetProofData() []byte {
@@ -99,18 +128,74 @@ func (x *ExecutionProof) GetProofData() []byte {
return nil
}
func (x *ExecutionProof) GetProofType() []byte {
if x != nil {
return x.ProofType
}
return nil
}
func (x *ExecutionProof) GetPublicInput() *PublicInput {
if x != nil {
return x.PublicInput
}
return nil
}
type PublicInput struct {
state protoimpl.MessageState `protogen:"open.v1"`
NewPayloadRequestRoot []byte `protobuf:"bytes,1,opt,name=new_payload_request_root,json=newPayloadRequestRoot,proto3" json:"new_payload_request_root,omitempty" ssz-size:"32"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PublicInput) Reset() {
*x = PublicInput{}
mi := &file_proto_prysm_v1alpha1_execution_proof_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PublicInput) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PublicInput) ProtoMessage() {}
func (x *PublicInput) ProtoReflect() protoreflect.Message {
mi := &file_proto_prysm_v1alpha1_execution_proof_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PublicInput.ProtoReflect.Descriptor instead.
func (*PublicInput) Descriptor() ([]byte, []int) {
return file_proto_prysm_v1alpha1_execution_proof_proto_rawDescGZIP(), []int{2}
}
func (x *PublicInput) GetNewPayloadRequestRoot() []byte {
if x != nil {
return x.NewPayloadRequestRoot
}
return nil
}
type ExecutionProofsByRootRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
BlockRoot []byte `protobuf:"bytes,1,opt,name=block_root,json=blockRoot,proto3" json:"block_root,omitempty" ssz-size:"32"`
CountNeeded uint64 `protobuf:"varint,2,opt,name=count_needed,json=countNeeded,proto3" json:"count_needed,omitempty"`
AlreadyHave []github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ExecutionProofId `protobuf:"varint,3,rep,packed,name=already_have,json=alreadyHave,proto3" json:"already_have,omitempty" cast-type:"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.ExecutionProofId" ssz-max:"8"`
state protoimpl.MessageState `protogen:"open.v1"`
BlockRoot []byte `protobuf:"bytes,1,opt,name=block_root,json=blockRoot,proto3" json:"block_root,omitempty" ssz-size:"32"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ExecutionProofsByRootRequest) Reset() {
*x = ExecutionProofsByRootRequest{}
mi := &file_proto_prysm_v1alpha1_execution_proof_proto_msgTypes[1]
mi := &file_proto_prysm_v1alpha1_execution_proof_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -122,7 +207,7 @@ func (x *ExecutionProofsByRootRequest) String() string {
func (*ExecutionProofsByRootRequest) ProtoMessage() {}
func (x *ExecutionProofsByRootRequest) ProtoReflect() protoreflect.Message {
mi := &file_proto_prysm_v1alpha1_execution_proof_proto_msgTypes[1]
mi := &file_proto_prysm_v1alpha1_execution_proof_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -135,7 +220,7 @@ func (x *ExecutionProofsByRootRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ExecutionProofsByRootRequest.ProtoReflect.Descriptor instead.
func (*ExecutionProofsByRootRequest) Descriptor() ([]byte, []int) {
return file_proto_prysm_v1alpha1_execution_proof_proto_rawDescGZIP(), []int{1}
return file_proto_prysm_v1alpha1_execution_proof_proto_rawDescGZIP(), []int{3}
}
func (x *ExecutionProofsByRootRequest) GetBlockRoot() []byte {
@@ -145,20 +230,6 @@ func (x *ExecutionProofsByRootRequest) GetBlockRoot() []byte {
return nil
}
func (x *ExecutionProofsByRootRequest) GetCountNeeded() uint64 {
if x != nil {
return x.CountNeeded
}
return 0
}
func (x *ExecutionProofsByRootRequest) GetAlreadyHave() []github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ExecutionProofId {
if x != nil {
return x.AlreadyHave
}
return []github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ExecutionProofId(nil)
}
var File_proto_prysm_v1alpha1_execution_proof_proto protoreflect.FileDescriptor
var file_proto_prysm_v1alpha1_execution_proof_proto_rawDesc = []byte{
@@ -168,53 +239,48 @@ var file_proto_prysm_v1alpha1_execution_proof_proto_rawDesc = []byte{
0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70,
0x68, 0x61, 0x31, 0x1a, 0x1b, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x65, 0x74, 0x68, 0x2f, 0x65,
0x78, 0x74, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x22, 0xd1, 0x02, 0x0a, 0x0e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72,
0x6f, 0x6f, 0x66, 0x12, 0x6b, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x5f, 0x69, 0x64, 0x18,
0x01, 0x20, 0x01, 0x28, 0x04, 0x42, 0x50, 0x82, 0xb5, 0x18, 0x4c, 0x67, 0x69, 0x74, 0x68, 0x75,
0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61,
0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x37, 0x2f, 0x63, 0x6f, 0x6e, 0x73,
0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x72, 0x69, 0x6d,
0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e,
0x50, 0x72, 0x6f, 0x6f, 0x66, 0x49, 0x64, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x49, 0x64,
0x12, 0x58, 0x0a, 0x04, 0x73, 0x6c, 0x6f, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x42, 0x44,
0x82, 0xb5, 0x18, 0x40, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f,
0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73,
0x6d, 0x2f, 0x76, 0x37, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x74,
0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2e,
0x53, 0x6c, 0x6f, 0x74, 0x52, 0x04, 0x73, 0x6c, 0x6f, 0x74, 0x12, 0x25, 0x0a, 0x0a, 0x62, 0x6c,
0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06,
0x8a, 0xb5, 0x18, 0x02, 0x33, 0x32, 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73,
0x68, 0x12, 0x25, 0x0a, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18,
0x04, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x33, 0x32, 0x52, 0x09, 0x62,
0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x2a, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x6f,
0x66, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x0b, 0x92, 0xb5,
0x18, 0x07, 0x31, 0x30, 0x34, 0x38, 0x35, 0x37, 0x36, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x6f, 0x66,
0x44, 0x61, 0x74, 0x61, 0x22, 0xe2, 0x01, 0x0a, 0x1c, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69,
0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x73, 0x42, 0x79, 0x52, 0x6f, 0x6f, 0x74, 0x52, 0x65,
0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x72,
0x22, 0xaa, 0x01, 0x0a, 0x14, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x45, 0x78, 0x65, 0x63, 0x75,
0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x3f, 0x0a, 0x07, 0x6d, 0x65, 0x73,
0x73, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x74, 0x68,
0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68,
0x61, 0x31, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6f,
0x66, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2b, 0x0a, 0x0d, 0x70, 0x72,
0x6f, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x75, 0x62, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28,
0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x34, 0x38, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x65,
0x72, 0x50, 0x75, 0x62, 0x6b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61,
0x74, 0x75, 0x72, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02,
0x39, 0x36, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0xa8, 0x01,
0x0a, 0x0e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66,
0x12, 0x29, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01,
0x20, 0x01, 0x28, 0x0c, 0x42, 0x0a, 0x92, 0xb5, 0x18, 0x06, 0x33, 0x30, 0x37, 0x32, 0x30, 0x30,
0x52, 0x09, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x44, 0x61, 0x74, 0x61, 0x12, 0x24, 0x0a, 0x0a, 0x70,
0x72, 0x6f, 0x6f, 0x66, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42,
0x05, 0x92, 0xb5, 0x18, 0x01, 0x31, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x54, 0x79, 0x70,
0x65, 0x12, 0x45, 0x0a, 0x0c, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x69, 0x6e, 0x70, 0x75,
0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65,
0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e,
0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x52, 0x0b, 0x70, 0x75, 0x62,
0x6c, 0x69, 0x63, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x22, 0x4e, 0x0a, 0x0b, 0x50, 0x75, 0x62, 0x6c,
0x69, 0x63, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x3f, 0x0a, 0x18, 0x6e, 0x65, 0x77, 0x5f, 0x70,
0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x72,
0x6f, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x33,
0x32, 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x21, 0x0a, 0x0c,
0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x6e, 0x65, 0x65, 0x64, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01,
0x28, 0x04, 0x52, 0x0b, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x4e, 0x65, 0x65, 0x64, 0x65, 0x64, 0x12,
0x78, 0x0a, 0x0c, 0x61, 0x6c, 0x72, 0x65, 0x61, 0x64, 0x79, 0x5f, 0x68, 0x61, 0x76, 0x65, 0x18,
0x03, 0x20, 0x03, 0x28, 0x04, 0x42, 0x55, 0x82, 0xb5, 0x18, 0x4c, 0x67, 0x69, 0x74, 0x68, 0x75,
0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61,
0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x37, 0x2f, 0x63, 0x6f, 0x6e, 0x73,
0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x72, 0x69, 0x6d,
0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e,
0x50, 0x72, 0x6f, 0x6f, 0x66, 0x49, 0x64, 0x92, 0xb5, 0x18, 0x01, 0x38, 0x52, 0x0b, 0x61, 0x6c,
0x72, 0x65, 0x61, 0x64, 0x79, 0x48, 0x61, 0x76, 0x65, 0x42, 0x9d, 0x01, 0x0a, 0x19, 0x6f, 0x72,
0x67, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76,
0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x13, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69,
0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x39,
0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68,
0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x37,
0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x31, 0x61,
0x6c, 0x70, 0x68, 0x61, 0x31, 0x3b, 0x65, 0x74, 0x68, 0xaa, 0x02, 0x15, 0x45, 0x74, 0x68, 0x65,
0x72, 0x65, 0x75, 0x6d, 0x2e, 0x45, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61,
0x31, 0xca, 0x02, 0x15, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x5c, 0x45, 0x74, 0x68,
0x5c, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x33,
0x32, 0x52, 0x15, 0x6e, 0x65, 0x77, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x71,
0x75, 0x65, 0x73, 0x74, 0x52, 0x6f, 0x6f, 0x74, 0x22, 0x45, 0x0a, 0x1c, 0x45, 0x78, 0x65, 0x63,
0x75, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x73, 0x42, 0x79, 0x52, 0x6f, 0x6f,
0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0a, 0x62, 0x6c, 0x6f, 0x63,
0x6b, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5,
0x18, 0x02, 0x33, 0x32, 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x6f, 0x6f, 0x74, 0x42,
0x9d, 0x01, 0x0a, 0x19, 0x6f, 0x72, 0x67, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d,
0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x13, 0x45,
0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x50, 0x72, 0x6f,
0x74, 0x6f, 0x50, 0x01, 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72,
0x79, 0x73, 0x6d, 0x2f, 0x76, 0x37, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x72, 0x79,
0x73, 0x6d, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x3b, 0x65, 0x74, 0x68, 0xaa,
0x02, 0x15, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x45, 0x74, 0x68, 0x2e, 0x76,
0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xca, 0x02, 0x15, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65,
0x75, 0x6d, 0x5c, 0x45, 0x74, 0x68, 0x5c, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x62,
0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@@ -229,17 +295,21 @@ func file_proto_prysm_v1alpha1_execution_proof_proto_rawDescGZIP() []byte {
return file_proto_prysm_v1alpha1_execution_proof_proto_rawDescData
}
var file_proto_prysm_v1alpha1_execution_proof_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
var file_proto_prysm_v1alpha1_execution_proof_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
var file_proto_prysm_v1alpha1_execution_proof_proto_goTypes = []any{
(*ExecutionProof)(nil), // 0: ethereum.eth.v1alpha1.ExecutionProof
(*ExecutionProofsByRootRequest)(nil), // 1: ethereum.eth.v1alpha1.ExecutionProofsByRootRequest
(*SignedExecutionProof)(nil), // 0: ethereum.eth.v1alpha1.SignedExecutionProof
(*ExecutionProof)(nil), // 1: ethereum.eth.v1alpha1.ExecutionProof
(*PublicInput)(nil), // 2: ethereum.eth.v1alpha1.PublicInput
(*ExecutionProofsByRootRequest)(nil), // 3: ethereum.eth.v1alpha1.ExecutionProofsByRootRequest
}
var file_proto_prysm_v1alpha1_execution_proof_proto_depIdxs = []int32{
0, // [0:0] is the sub-list for method output_type
0, // [0:0] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
1, // 0: ethereum.eth.v1alpha1.SignedExecutionProof.message:type_name -> ethereum.eth.v1alpha1.ExecutionProof
2, // 1: ethereum.eth.v1alpha1.ExecutionProof.public_input:type_name -> ethereum.eth.v1alpha1.PublicInput
2, // [2:2] is the sub-list for method output_type
2, // [2:2] is the sub-list for method input_type
2, // [2:2] is the sub-list for extension type_name
2, // [2:2] is the sub-list for extension extendee
0, // [0:2] is the sub-list for field type_name
}
func init() { file_proto_prysm_v1alpha1_execution_proof_proto_init() }
@@ -253,7 +323,7 @@ func file_proto_prysm_v1alpha1_execution_proof_proto_init() {
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_proto_prysm_v1alpha1_execution_proof_proto_rawDesc,
NumEnums: 0,
NumMessages: 2,
NumMessages: 4,
NumExtensions: 0,
NumServices: 0,
},

View File

@@ -11,42 +11,26 @@ option java_outer_classname = "ExecutionProofProto";
option java_package = "org.ethereum.eth.v1alpha1";
option php_namespace = "Ethereum\\Eth\\v1alpha1";
// https://github.com/ethereum/consensus-specs/blob/master/specs/_features/eip8025/beacon-chain.md#new-signedexecutionproof
message SignedExecutionProof {
ExecutionProof message = 1;
bytes prover_pubkey = 2 [ (ethereum.eth.ext.ssz_size) = "48" ];
bytes signature = 3 [ (ethereum.eth.ext.ssz_size) = "96" ];
}
// https://github.com/ethereum/consensus-specs/blob/master/specs/_features/eip8025/beacon-chain.md#new-executionproof
message ExecutionProof {
// Which proof type (zkVM+EL combination) this proof belongs to
// Examples: 0=SP1+Reth, 1=Risc0+Geth, 2=SP1+Geth, etc.
uint64 proof_id = 1 [
(ethereum.eth.ext.cast_type) =
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.ExecutionProofId"
];
bytes proof_data = 1 [ (ethereum.eth.ext.ssz_max) = "proof.size" ];
// proof_type should be an uint8
bytes proof_type = 2 [ (ethereum.eth.ext.ssz_max) = "1" ];
PublicInput public_input = 3;
}
// The slot of the beacon block this proof validates
uint64 slot = 2 [
(ethereum.eth.ext.cast_type) =
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.Slot"
];
// The block hash of the execution payload this proof validates
bytes block_hash = 3 [ (ethereum.eth.ext.ssz_size) = "32" ];
// The beacon block root corresponding to the beacon block
// with the execution payload, that this proof attests to.
bytes block_root = 4 [ (ethereum.eth.ext.ssz_size) = "32" ];
// The actual proof data
bytes proof_data = 5 [ (ethereum.eth.ext.ssz_max) = "1048576" ];
// https://github.com/ethereum/consensus-specs/blob/master/specs/_features/eip8025/beacon-chain.md#new-publicinput
message PublicInput {
bytes new_payload_request_root = 1 [ (ethereum.eth.ext.ssz_size) = "32" ];
}
message ExecutionProofsByRootRequest {
// The block root we need proofs for
bytes block_root = 1 [ (ethereum.eth.ext.ssz_size) = "32" ];
// The number of proofs needed
uint64 count_needed = 2;
// We already have these proof IDs, so don't send them again
repeated uint64 already_have = 3 [
(ethereum.eth.ext.ssz_max) = "8",
(ethereum.eth.ext.cast_type) =
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.ExecutionProofId"
];
}
}

View File

@@ -1,300 +0,0 @@
// NOTE: This file is auto-generated by sszgen, but modified manually
// to handle the alias type ExecutionProofId which is based on uint8.
package eth
import (
github_com_OffchainLabs_prysm_v7_consensus_types_primitives "github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
ssz "github.com/prysmaticlabs/fastssz"
)
// MarshalSSZ ssz marshals the ExecutionProof object
func (e *ExecutionProof) MarshalSSZ() ([]byte, error) {
return ssz.MarshalSSZ(e)
}
// MarshalSSZTo ssz marshals the ExecutionProof object to a target array
func (e *ExecutionProof) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
offset := int(77)
// Field (0) 'ProofId'
dst = ssz.MarshalUint8(dst, uint8(e.ProofId))
// Field (1) 'Slot'
dst = ssz.MarshalUint64(dst, uint64(e.Slot))
// Field (2) 'BlockHash'
if size := len(e.BlockHash); size != 32 {
err = ssz.ErrBytesLengthFn("--.BlockHash", size, 32)
return
}
dst = append(dst, e.BlockHash...)
// Field (3) 'BlockRoot'
if size := len(e.BlockRoot); size != 32 {
err = ssz.ErrBytesLengthFn("--.BlockRoot", size, 32)
return
}
dst = append(dst, e.BlockRoot...)
// Offset (4) 'ProofData'
dst = ssz.WriteOffset(dst, offset)
offset += len(e.ProofData)
// Field (4) 'ProofData'
if size := len(e.ProofData); size > 1048576 {
err = ssz.ErrBytesLengthFn("--.ProofData", size, 1048576)
return
}
dst = append(dst, e.ProofData...)
return
}
// UnmarshalSSZ ssz unmarshals the ExecutionProof object
func (e *ExecutionProof) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size < 77 {
return ssz.ErrSize
}
tail := buf
var o4 uint64
// Field (0) 'ProofId'
e.ProofId = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ExecutionProofId(ssz.UnmarshallUint8(buf[0:1]))
// Field (1) 'Slot'
e.Slot = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Slot(ssz.UnmarshallUint64(buf[1:9]))
// Field (2) 'BlockHash'
if cap(e.BlockHash) == 0 {
e.BlockHash = make([]byte, 0, len(buf[9:41]))
}
e.BlockHash = append(e.BlockHash, buf[9:41]...)
// Field (3) 'BlockRoot'
if cap(e.BlockRoot) == 0 {
e.BlockRoot = make([]byte, 0, len(buf[41:73]))
}
e.BlockRoot = append(e.BlockRoot, buf[41:73]...)
// Offset (4) 'ProofData'
if o4 = ssz.ReadOffset(buf[73:77]); o4 > size {
return ssz.ErrOffset
}
if o4 != 77 {
return ssz.ErrInvalidVariableOffset
}
// Field (4) 'ProofData'
{
buf = tail[o4:]
if len(buf) > 1048576 {
return ssz.ErrBytesLength
}
if cap(e.ProofData) == 0 {
e.ProofData = make([]byte, 0, len(buf))
}
e.ProofData = append(e.ProofData, buf...)
}
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the ExecutionProof object
func (e *ExecutionProof) SizeSSZ() (size int) {
size = 77
// Field (4) 'ProofData'
size += len(e.ProofData)
return
}
// HashTreeRoot ssz hashes the ExecutionProof object
func (e *ExecutionProof) HashTreeRoot() ([32]byte, error) {
return ssz.HashWithDefaultHasher(e)
}
// HashTreeRootWith ssz hashes the ExecutionProof object with a hasher
func (e *ExecutionProof) HashTreeRootWith(hh *ssz.Hasher) (err error) {
indx := hh.Index()
// Field (0) 'ProofId'
hh.PutUint8(uint8(e.ProofId))
// Field (1) 'Slot'
hh.PutUint64(uint64(e.Slot))
// Field (2) 'BlockHash'
if size := len(e.BlockHash); size != 32 {
err = ssz.ErrBytesLengthFn("--.BlockHash", size, 32)
return
}
hh.PutBytes(e.BlockHash)
// Field (3) 'BlockRoot'
if size := len(e.BlockRoot); size != 32 {
err = ssz.ErrBytesLengthFn("--.BlockRoot", size, 32)
return
}
hh.PutBytes(e.BlockRoot)
// Field (4) 'ProofData'
{
elemIndx := hh.Index()
byteLen := uint64(len(e.ProofData))
if byteLen > 1048576 {
err = ssz.ErrIncorrectListSize
return
}
hh.PutBytes(e.ProofData)
hh.MerkleizeWithMixin(elemIndx, byteLen, (1048576+31)/32)
}
hh.Merkleize(indx)
return
}
// MarshalSSZ ssz marshals the ExecutionProofsByRootRequest object
func (e *ExecutionProofsByRootRequest) MarshalSSZ() ([]byte, error) {
return ssz.MarshalSSZ(e)
}
// MarshalSSZTo ssz marshals the ExecutionProofsByRootRequest object to a target array
func (e *ExecutionProofsByRootRequest) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
offset := int(44)
// Field (0) 'BlockRoot'
if size := len(e.BlockRoot); size != 32 {
err = ssz.ErrBytesLengthFn("--.BlockRoot", size, 32)
return
}
dst = append(dst, e.BlockRoot...)
// Field (1) 'CountNeeded'
dst = ssz.MarshalUint64(dst, e.CountNeeded)
// Offset (2) 'AlreadyHave'
dst = ssz.WriteOffset(dst, offset)
offset += len(e.AlreadyHave) * 1
// Field (2) 'AlreadyHave'
if size := len(e.AlreadyHave); size > 8 {
err = ssz.ErrListTooBigFn("--.AlreadyHave", size, 8)
return
}
for ii := 0; ii < len(e.AlreadyHave); ii++ {
dst = ssz.MarshalUint8(dst, uint8(e.AlreadyHave[ii]))
}
return
}
// UnmarshalSSZ ssz unmarshals the ExecutionProofsByRootRequest object
func (e *ExecutionProofsByRootRequest) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size < 44 {
return ssz.ErrSize
}
tail := buf
var o2 uint64
// Field (0) 'BlockRoot'
if cap(e.BlockRoot) == 0 {
e.BlockRoot = make([]byte, 0, len(buf[0:32]))
}
e.BlockRoot = append(e.BlockRoot, buf[0:32]...)
// Field (1) 'CountNeeded'
e.CountNeeded = ssz.UnmarshallUint64(buf[32:40])
// Offset (2) 'AlreadyHave'
if o2 = ssz.ReadOffset(buf[40:44]); o2 > size {
return ssz.ErrOffset
}
if o2 != 44 {
return ssz.ErrInvalidVariableOffset
}
// Field (2) 'AlreadyHave'
{
buf = tail[o2:]
num, err := ssz.DivideInt2(len(buf), 1, 8)
if err != nil {
return err
}
// `primitives.ExecutionProofId` is an alias of `uint8`,
// but we need to handle the conversion manually here
// to call `ssz.ExtendUint8`.
alreadyHave := make([]uint8, len(e.AlreadyHave))
for i, v := range e.AlreadyHave {
alreadyHave[i] = uint8(v)
}
alreadyHave = ssz.ExtendUint8(alreadyHave, num)
alreadyHave2 := make([]github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ExecutionProofId, len(alreadyHave))
for i, v := range alreadyHave {
alreadyHave2[i] = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ExecutionProofId(v)
}
e.AlreadyHave = alreadyHave2
for ii := range num {
e.AlreadyHave[ii] = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ExecutionProofId(ssz.UnmarshallUint8(buf[ii*1 : (ii+1)*1]))
}
}
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the ExecutionProofsByRootRequest object
func (e *ExecutionProofsByRootRequest) SizeSSZ() (size int) {
size = 44
// Field (2) 'AlreadyHave'
size += len(e.AlreadyHave) * 1
return
}
// HashTreeRoot ssz hashes the ExecutionProofsByRootRequest object
func (e *ExecutionProofsByRootRequest) HashTreeRoot() ([32]byte, error) {
return ssz.HashWithDefaultHasher(e)
}
// HashTreeRootWith ssz hashes the ExecutionProofsByRootRequest object with a hasher
func (e *ExecutionProofsByRootRequest) HashTreeRootWith(hh *ssz.Hasher) (err error) {
indx := hh.Index()
// Field (0) 'BlockRoot'
if size := len(e.BlockRoot); size != 32 {
err = ssz.ErrBytesLengthFn("--.BlockRoot", size, 32)
return
}
hh.PutBytes(e.BlockRoot)
// Field (1) 'CountNeeded'
hh.PutUint64(e.CountNeeded)
// Field (2) 'AlreadyHave'
{
if size := len(e.AlreadyHave); size > 8 {
err = ssz.ErrListTooBigFn("--.AlreadyHave", size, 8)
return
}
subIndx := hh.Index()
for _, i := range e.AlreadyHave {
hh.AppendUint8(uint8(i))
}
hh.FillUpTo32()
numItems := uint64(len(e.AlreadyHave))
hh.MerkleizeWithMixin(subIndx, numItems, ssz.CalculateLimit(8, numItems, 1))
}
hh.Merkleize(indx)
return
}

View File

@@ -2366,6 +2366,354 @@ func (d *DataColumnsByRootIdentifier) HashTreeRootWith(hh *ssz.Hasher) (err erro
return
}
// MarshalSSZ ssz marshals the SignedExecutionProof object
func (s *SignedExecutionProof) MarshalSSZ() ([]byte, error) {
return ssz.MarshalSSZ(s)
}
// MarshalSSZTo ssz marshals the SignedExecutionProof object to a target array
func (s *SignedExecutionProof) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
offset := int(148)
// Offset (0) 'Message'
dst = ssz.WriteOffset(dst, offset)
if s.Message == nil {
s.Message = new(ExecutionProof)
}
offset += s.Message.SizeSSZ()
// Field (1) 'ProverPubkey'
if size := len(s.ProverPubkey); size != 48 {
err = ssz.ErrBytesLengthFn("--.ProverPubkey", size, 48)
return
}
dst = append(dst, s.ProverPubkey...)
// Field (2) 'Signature'
if size := len(s.Signature); size != 96 {
err = ssz.ErrBytesLengthFn("--.Signature", size, 96)
return
}
dst = append(dst, s.Signature...)
// Field (0) 'Message'
if dst, err = s.Message.MarshalSSZTo(dst); err != nil {
return
}
return
}
// UnmarshalSSZ ssz unmarshals the SignedExecutionProof object
func (s *SignedExecutionProof) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size < 148 {
return ssz.ErrSize
}
tail := buf
var o0 uint64
// Offset (0) 'Message'
if o0 = ssz.ReadOffset(buf[0:4]); o0 > size {
return ssz.ErrOffset
}
if o0 != 148 {
return ssz.ErrInvalidVariableOffset
}
// Field (1) 'ProverPubkey'
if cap(s.ProverPubkey) == 0 {
s.ProverPubkey = make([]byte, 0, len(buf[4:52]))
}
s.ProverPubkey = append(s.ProverPubkey, buf[4:52]...)
// Field (2) 'Signature'
if cap(s.Signature) == 0 {
s.Signature = make([]byte, 0, len(buf[52:148]))
}
s.Signature = append(s.Signature, buf[52:148]...)
// Field (0) 'Message'
{
buf = tail[o0:]
if s.Message == nil {
s.Message = new(ExecutionProof)
}
if err = s.Message.UnmarshalSSZ(buf); err != nil {
return err
}
}
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the SignedExecutionProof object
func (s *SignedExecutionProof) SizeSSZ() (size int) {
size = 148
// Field (0) 'Message'
if s.Message == nil {
s.Message = new(ExecutionProof)
}
size += s.Message.SizeSSZ()
return
}
// HashTreeRoot ssz hashes the SignedExecutionProof object
func (s *SignedExecutionProof) HashTreeRoot() ([32]byte, error) {
return ssz.HashWithDefaultHasher(s)
}
// HashTreeRootWith ssz hashes the SignedExecutionProof object with a hasher
func (s *SignedExecutionProof) HashTreeRootWith(hh *ssz.Hasher) (err error) {
indx := hh.Index()
// Field (0) 'Message'
if err = s.Message.HashTreeRootWith(hh); err != nil {
return
}
// Field (1) 'ProverPubkey'
if size := len(s.ProverPubkey); size != 48 {
err = ssz.ErrBytesLengthFn("--.ProverPubkey", size, 48)
return
}
hh.PutBytes(s.ProverPubkey)
// Field (2) 'Signature'
if size := len(s.Signature); size != 96 {
err = ssz.ErrBytesLengthFn("--.Signature", size, 96)
return
}
hh.PutBytes(s.Signature)
hh.Merkleize(indx)
return
}
// MarshalSSZ ssz marshals the ExecutionProof object
func (e *ExecutionProof) MarshalSSZ() ([]byte, error) {
return ssz.MarshalSSZ(e)
}
// MarshalSSZTo ssz marshals the ExecutionProof object to a target array
func (e *ExecutionProof) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
offset := int(40)
// Offset (0) 'ProofData'
dst = ssz.WriteOffset(dst, offset)
offset += len(e.ProofData)
// Offset (1) 'ProofType'
dst = ssz.WriteOffset(dst, offset)
offset += len(e.ProofType)
// Field (2) 'PublicInput'
if e.PublicInput == nil {
e.PublicInput = new(PublicInput)
}
if dst, err = e.PublicInput.MarshalSSZTo(dst); err != nil {
return
}
// Field (0) 'ProofData'
if size := len(e.ProofData); size > 307200 {
err = ssz.ErrBytesLengthFn("--.ProofData", size, 307200)
return
}
dst = append(dst, e.ProofData...)
// Field (1) 'ProofType'
if size := len(e.ProofType); size > 1 {
err = ssz.ErrBytesLengthFn("--.ProofType", size, 1)
return
}
dst = append(dst, e.ProofType...)
return
}
// UnmarshalSSZ ssz unmarshals the ExecutionProof object
func (e *ExecutionProof) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size < 40 {
return ssz.ErrSize
}
tail := buf
var o0, o1 uint64
// Offset (0) 'ProofData'
if o0 = ssz.ReadOffset(buf[0:4]); o0 > size {
return ssz.ErrOffset
}
if o0 != 40 {
return ssz.ErrInvalidVariableOffset
}
// Offset (1) 'ProofType'
if o1 = ssz.ReadOffset(buf[4:8]); o1 > size || o0 > o1 {
return ssz.ErrOffset
}
// Field (2) 'PublicInput'
if e.PublicInput == nil {
e.PublicInput = new(PublicInput)
}
if err = e.PublicInput.UnmarshalSSZ(buf[8:40]); err != nil {
return err
}
// Field (0) 'ProofData'
{
buf = tail[o0:o1]
if len(buf) > 307200 {
return ssz.ErrBytesLength
}
if cap(e.ProofData) == 0 {
e.ProofData = make([]byte, 0, len(buf))
}
e.ProofData = append(e.ProofData, buf...)
}
// Field (1) 'ProofType'
{
buf = tail[o1:]
if len(buf) > 1 {
return ssz.ErrBytesLength
}
if cap(e.ProofType) == 0 {
e.ProofType = make([]byte, 0, len(buf))
}
e.ProofType = append(e.ProofType, buf...)
}
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the ExecutionProof object
func (e *ExecutionProof) SizeSSZ() (size int) {
size = 40
// Field (0) 'ProofData'
size += len(e.ProofData)
// Field (1) 'ProofType'
size += len(e.ProofType)
return
}
// HashTreeRoot ssz hashes the ExecutionProof object
func (e *ExecutionProof) HashTreeRoot() ([32]byte, error) {
return ssz.HashWithDefaultHasher(e)
}
// HashTreeRootWith ssz hashes the ExecutionProof object with a hasher
func (e *ExecutionProof) HashTreeRootWith(hh *ssz.Hasher) (err error) {
indx := hh.Index()
// Field (0) 'ProofData'
{
elemIndx := hh.Index()
byteLen := uint64(len(e.ProofData))
if byteLen > 307200 {
err = ssz.ErrIncorrectListSize
return
}
hh.PutBytes(e.ProofData)
hh.MerkleizeWithMixin(elemIndx, byteLen, (307200+31)/32)
}
// Field (1) 'ProofType'
{
elemIndx := hh.Index()
byteLen := uint64(len(e.ProofType))
if byteLen > 1 {
err = ssz.ErrIncorrectListSize
return
}
hh.PutBytes(e.ProofType)
hh.MerkleizeWithMixin(elemIndx, byteLen, (1+31)/32)
}
// Field (2) 'PublicInput'
if err = e.PublicInput.HashTreeRootWith(hh); err != nil {
return
}
hh.Merkleize(indx)
return
}
// MarshalSSZ ssz marshals the PublicInput object
func (p *PublicInput) MarshalSSZ() ([]byte, error) {
return ssz.MarshalSSZ(p)
}
// MarshalSSZTo ssz marshals the PublicInput object to a target array
func (p *PublicInput) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
// Field (0) 'NewPayloadRequestRoot'
if size := len(p.NewPayloadRequestRoot); size != 32 {
err = ssz.ErrBytesLengthFn("--.NewPayloadRequestRoot", size, 32)
return
}
dst = append(dst, p.NewPayloadRequestRoot...)
return
}
// UnmarshalSSZ ssz unmarshals the PublicInput object
func (p *PublicInput) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size != 32 {
return ssz.ErrSize
}
// Field (0) 'NewPayloadRequestRoot'
if cap(p.NewPayloadRequestRoot) == 0 {
p.NewPayloadRequestRoot = make([]byte, 0, len(buf[0:32]))
}
p.NewPayloadRequestRoot = append(p.NewPayloadRequestRoot, buf[0:32]...)
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the PublicInput object
func (p *PublicInput) SizeSSZ() (size int) {
size = 32
return
}
// HashTreeRoot ssz hashes the PublicInput object
func (p *PublicInput) HashTreeRoot() ([32]byte, error) {
return ssz.HashWithDefaultHasher(p)
}
// HashTreeRootWith ssz hashes the PublicInput object with a hasher
func (p *PublicInput) HashTreeRootWith(hh *ssz.Hasher) (err error) {
indx := hh.Index()
// Field (0) 'NewPayloadRequestRoot'
if size := len(p.NewPayloadRequestRoot); size != 32 {
err = ssz.ErrBytesLengthFn("--.NewPayloadRequestRoot", size, 32)
return
}
hh.PutBytes(p.NewPayloadRequestRoot)
hh.Merkleize(indx)
return
}
// MarshalSSZ ssz marshals the StatusV2 object
func (s *StatusV2) MarshalSSZ() ([]byte, error) {
return ssz.MarshalSSZ(s)

262
proto/prysm/v1alpha1/partial_data_columns.pb.go generated Executable file
View File

@@ -0,0 +1,262 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.3
// protoc v3.21.7
// source: proto/prysm/v1alpha1/partial_data_columns.proto
package eth
import (
reflect "reflect"
sync "sync"
github_com_OffchainLabs_go_bitfield "github.com/OffchainLabs/go-bitfield"
_ "github.com/OffchainLabs/prysm/v7/proto/eth/ext"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type PartialDataColumnSidecar struct {
state protoimpl.MessageState `protogen:"open.v1"`
CellsPresentBitmap github_com_OffchainLabs_go_bitfield.Bitlist `protobuf:"bytes,1,opt,name=cells_present_bitmap,json=cellsPresentBitmap,proto3" json:"cells_present_bitmap,omitempty" cast-type:"github.com/OffchainLabs/go-bitfield.Bitlist" ssz-max:"512"`
PartialColumn [][]byte `protobuf:"bytes,2,rep,name=partial_column,json=partialColumn,proto3" json:"partial_column,omitempty" ssz-max:"4096" ssz-size:"?,2048"`
KzgProofs [][]byte `protobuf:"bytes,3,rep,name=kzg_proofs,json=kzgProofs,proto3" json:"kzg_proofs,omitempty" ssz-max:"4096" ssz-size:"?,48"`
Header []*PartialDataColumnHeader `protobuf:"bytes,4,rep,name=header,proto3" json:"header,omitempty" ssz-max:"1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PartialDataColumnSidecar) Reset() {
*x = PartialDataColumnSidecar{}
mi := &file_proto_prysm_v1alpha1_partial_data_columns_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PartialDataColumnSidecar) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PartialDataColumnSidecar) ProtoMessage() {}
func (x *PartialDataColumnSidecar) ProtoReflect() protoreflect.Message {
mi := &file_proto_prysm_v1alpha1_partial_data_columns_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PartialDataColumnSidecar.ProtoReflect.Descriptor instead.
func (*PartialDataColumnSidecar) Descriptor() ([]byte, []int) {
return file_proto_prysm_v1alpha1_partial_data_columns_proto_rawDescGZIP(), []int{0}
}
func (x *PartialDataColumnSidecar) GetCellsPresentBitmap() github_com_OffchainLabs_go_bitfield.Bitlist {
if x != nil {
return x.CellsPresentBitmap
}
return github_com_OffchainLabs_go_bitfield.Bitlist(nil)
}
func (x *PartialDataColumnSidecar) GetPartialColumn() [][]byte {
if x != nil {
return x.PartialColumn
}
return nil
}
func (x *PartialDataColumnSidecar) GetKzgProofs() [][]byte {
if x != nil {
return x.KzgProofs
}
return nil
}
func (x *PartialDataColumnSidecar) GetHeader() []*PartialDataColumnHeader {
if x != nil {
return x.Header
}
return nil
}
type PartialDataColumnHeader struct {
state protoimpl.MessageState `protogen:"open.v1"`
KzgCommitments [][]byte `protobuf:"bytes,1,rep,name=kzg_commitments,json=kzgCommitments,proto3" json:"kzg_commitments,omitempty" ssz-max:"4096" ssz-size:"?,48"`
SignedBlockHeader *SignedBeaconBlockHeader `protobuf:"bytes,2,opt,name=signed_block_header,json=signedBlockHeader,proto3" json:"signed_block_header,omitempty"`
KzgCommitmentsInclusionProof [][]byte `protobuf:"bytes,3,rep,name=kzg_commitments_inclusion_proof,json=kzgCommitmentsInclusionProof,proto3" json:"kzg_commitments_inclusion_proof,omitempty" ssz-size:"4,32"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PartialDataColumnHeader) Reset() {
*x = PartialDataColumnHeader{}
mi := &file_proto_prysm_v1alpha1_partial_data_columns_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PartialDataColumnHeader) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PartialDataColumnHeader) ProtoMessage() {}
func (x *PartialDataColumnHeader) ProtoReflect() protoreflect.Message {
mi := &file_proto_prysm_v1alpha1_partial_data_columns_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PartialDataColumnHeader.ProtoReflect.Descriptor instead.
func (*PartialDataColumnHeader) Descriptor() ([]byte, []int) {
return file_proto_prysm_v1alpha1_partial_data_columns_proto_rawDescGZIP(), []int{1}
}
func (x *PartialDataColumnHeader) GetKzgCommitments() [][]byte {
if x != nil {
return x.KzgCommitments
}
return nil
}
func (x *PartialDataColumnHeader) GetSignedBlockHeader() *SignedBeaconBlockHeader {
if x != nil {
return x.SignedBlockHeader
}
return nil
}
func (x *PartialDataColumnHeader) GetKzgCommitmentsInclusionProof() [][]byte {
if x != nil {
return x.KzgCommitmentsInclusionProof
}
return nil
}
var File_proto_prysm_v1alpha1_partial_data_columns_proto protoreflect.FileDescriptor
var file_proto_prysm_v1alpha1_partial_data_columns_proto_rawDesc = []byte{
0x0a, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x31,
0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x64,
0x61, 0x74, 0x61, 0x5f, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x12, 0x15, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e,
0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x1a, 0x1b, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f,
0x65, 0x74, 0x68, 0x2f, 0x65, 0x78, 0x74, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2c, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x72, 0x79,
0x73, 0x6d, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x62, 0x65, 0x61, 0x63,
0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x72, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x22, 0xbf, 0x02, 0x0a, 0x18, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x44,
0x61, 0x74, 0x61, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x53, 0x69, 0x64, 0x65, 0x63, 0x61, 0x72,
0x12, 0x68, 0x0a, 0x14, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e,
0x74, 0x5f, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x36,
0x82, 0xb5, 0x18, 0x2b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f,
0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62, 0x73, 0x2f, 0x67, 0x6f, 0x2d, 0x62,
0x69, 0x74, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x2e, 0x42, 0x69, 0x74, 0x6c, 0x69, 0x73, 0x74, 0x92,
0xb5, 0x18, 0x03, 0x35, 0x31, 0x32, 0x52, 0x12, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x50, 0x72, 0x65,
0x73, 0x65, 0x6e, 0x74, 0x42, 0x69, 0x74, 0x6d, 0x61, 0x70, 0x12, 0x39, 0x0a, 0x0e, 0x70, 0x61,
0x72, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x18, 0x02, 0x20, 0x03,
0x28, 0x0c, 0x42, 0x12, 0x8a, 0xb5, 0x18, 0x06, 0x3f, 0x2c, 0x32, 0x30, 0x34, 0x38, 0x92, 0xb5,
0x18, 0x04, 0x34, 0x30, 0x39, 0x36, 0x52, 0x0d, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x43,
0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x12, 0x2f, 0x0a, 0x0a, 0x6b, 0x7a, 0x67, 0x5f, 0x70, 0x72, 0x6f,
0x6f, 0x66, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c, 0x42, 0x10, 0x8a, 0xb5, 0x18, 0x04, 0x3f,
0x2c, 0x34, 0x38, 0x92, 0xb5, 0x18, 0x04, 0x34, 0x30, 0x39, 0x36, 0x52, 0x09, 0x6b, 0x7a, 0x67,
0x50, 0x72, 0x6f, 0x6f, 0x66, 0x73, 0x12, 0x4d, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72,
0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75,
0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x50,
0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e,
0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x42, 0x05, 0x92, 0xb5, 0x18, 0x01, 0x31, 0x52, 0x06, 0x68,
0x65, 0x61, 0x64, 0x65, 0x72, 0x22, 0x85, 0x02, 0x0a, 0x17, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61,
0x6c, 0x44, 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x48, 0x65, 0x61, 0x64, 0x65,
0x72, 0x12, 0x39, 0x0a, 0x0f, 0x6b, 0x7a, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d,
0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x42, 0x10, 0x8a, 0xb5, 0x18, 0x04,
0x3f, 0x2c, 0x34, 0x38, 0x92, 0xb5, 0x18, 0x04, 0x34, 0x30, 0x39, 0x36, 0x52, 0x0e, 0x6b, 0x7a,
0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x5e, 0x0a, 0x13,
0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x65, 0x61,
0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x74, 0x68, 0x65,
0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61,
0x31, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x42, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x42, 0x6c,
0x6f, 0x63, 0x6b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x11, 0x73, 0x69, 0x67, 0x6e, 0x65,
0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x4f, 0x0a, 0x1f,
0x6b, 0x7a, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x5f,
0x69, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18,
0x03, 0x20, 0x03, 0x28, 0x0c, 0x42, 0x08, 0x8a, 0xb5, 0x18, 0x04, 0x34, 0x2c, 0x33, 0x32, 0x52,
0x1c, 0x6b, 0x7a, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x49,
0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x42, 0x3b, 0x5a,
0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63,
0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76,
0x36, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x31,
0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x3b, 0x65, 0x74, 0x68, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x33,
}
var (
file_proto_prysm_v1alpha1_partial_data_columns_proto_rawDescOnce sync.Once
file_proto_prysm_v1alpha1_partial_data_columns_proto_rawDescData = file_proto_prysm_v1alpha1_partial_data_columns_proto_rawDesc
)
func file_proto_prysm_v1alpha1_partial_data_columns_proto_rawDescGZIP() []byte {
file_proto_prysm_v1alpha1_partial_data_columns_proto_rawDescOnce.Do(func() {
file_proto_prysm_v1alpha1_partial_data_columns_proto_rawDescData = protoimpl.X.CompressGZIP(file_proto_prysm_v1alpha1_partial_data_columns_proto_rawDescData)
})
return file_proto_prysm_v1alpha1_partial_data_columns_proto_rawDescData
}
var file_proto_prysm_v1alpha1_partial_data_columns_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
var file_proto_prysm_v1alpha1_partial_data_columns_proto_goTypes = []any{
(*PartialDataColumnSidecar)(nil), // 0: ethereum.eth.v1alpha1.PartialDataColumnSidecar
(*PartialDataColumnHeader)(nil), // 1: ethereum.eth.v1alpha1.PartialDataColumnHeader
(*SignedBeaconBlockHeader)(nil), // 2: ethereum.eth.v1alpha1.SignedBeaconBlockHeader
}
var file_proto_prysm_v1alpha1_partial_data_columns_proto_depIdxs = []int32{
1, // 0: ethereum.eth.v1alpha1.PartialDataColumnSidecar.header:type_name -> ethereum.eth.v1alpha1.PartialDataColumnHeader
2, // 1: ethereum.eth.v1alpha1.PartialDataColumnHeader.signed_block_header:type_name -> ethereum.eth.v1alpha1.SignedBeaconBlockHeader
2, // [2:2] is the sub-list for method output_type
2, // [2:2] is the sub-list for method input_type
2, // [2:2] is the sub-list for extension type_name
2, // [2:2] is the sub-list for extension extendee
0, // [0:2] is the sub-list for field type_name
}
func init() { file_proto_prysm_v1alpha1_partial_data_columns_proto_init() }
func file_proto_prysm_v1alpha1_partial_data_columns_proto_init() {
if File_proto_prysm_v1alpha1_partial_data_columns_proto != nil {
return
}
file_proto_prysm_v1alpha1_beacon_core_types_proto_init()
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_proto_prysm_v1alpha1_partial_data_columns_proto_rawDesc,
NumEnums: 0,
NumMessages: 2,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_proto_prysm_v1alpha1_partial_data_columns_proto_goTypes,
DependencyIndexes: file_proto_prysm_v1alpha1_partial_data_columns_proto_depIdxs,
MessageInfos: file_proto_prysm_v1alpha1_partial_data_columns_proto_msgTypes,
}.Build()
File_proto_prysm_v1alpha1_partial_data_columns_proto = out.File
file_proto_prysm_v1alpha1_partial_data_columns_proto_rawDesc = nil
file_proto_prysm_v1alpha1_partial_data_columns_proto_goTypes = nil
file_proto_prysm_v1alpha1_partial_data_columns_proto_depIdxs = nil
}

View File

@@ -49,6 +49,7 @@ mainnet = {
"execution_payload_availability.size": "1024", # Gloas: SLOTS_PER_HISTORICAL_ROOT
"builder_pending_payments.size": "64", # Gloas: vector length (2 * SLOTS_PER_EPOCH)
"builder_registry_limit": "1099511627776", # Gloas: BUILDER_REGISTRY_LIMIT (same for mainnet/minimal)
"proof.size": "307200", # EIP-8025: MAX_PROOF_SIZE
}
minimal = {
@@ -94,6 +95,7 @@ minimal = {
"execution_payload_availability.size": "8", # Gloas: SLOTS_PER_HISTORICAL_ROOT
"builder_pending_payments.size": "16", # Gloas: vector length (2 * SLOTS_PER_EPOCH)
"builder_registry_limit": "1099511627776", # Gloas: BUILDER_REGISTRY_LIMIT (same for mainnet/minimal)
"proof.size": "307200", # EIP-8025: MAX_PROOF_SIZE
}
###### Rules definitions #######