mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 05:47:59 -05:00
Compare commits
11 Commits
inconsiste
...
fix-bid-ve
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0e0b6d49ae | ||
|
|
b63cd49834 | ||
|
|
447a3d8add | ||
|
|
b00aaef202 | ||
|
|
0f6070a866 | ||
|
|
2a09c9f681 | ||
|
|
9c6ccd67c1 | ||
|
|
36e5d4926b | ||
|
|
17b7d3ff12 | ||
|
|
fb2bceece8 | ||
|
|
d012ab653c |
@@ -208,7 +208,7 @@ load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_depe
|
||||
go_rules_dependencies()
|
||||
|
||||
go_register_toolchains(
|
||||
go_version = "1.24.5",
|
||||
go_version = "1.24.6",
|
||||
nogo = "@//:nogo",
|
||||
)
|
||||
|
||||
|
||||
@@ -7,10 +7,15 @@ type currentlySyncingBlock struct {
|
||||
roots map[[32]byte]struct{}
|
||||
}
|
||||
|
||||
func (b *currentlySyncingBlock) set(root [32]byte) {
|
||||
func (b *currentlySyncingBlock) set(root [32]byte) error {
|
||||
b.Lock()
|
||||
defer b.Unlock()
|
||||
_, ok := b.roots[root]
|
||||
if ok {
|
||||
return errBlockBeingSynced
|
||||
}
|
||||
b.roots[root] = struct{}{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *currentlySyncingBlock) unset(root [32]byte) {
|
||||
|
||||
@@ -44,6 +44,8 @@ var (
|
||||
errMaxBlobsExceeded = verification.AsVerificationFailure(errors.New("expected commitments in block exceeds MAX_BLOBS_PER_BLOCK"))
|
||||
// errMaxDataColumnsExceeded is returned when the number of data columns exceeds the maximum allowed.
|
||||
errMaxDataColumnsExceeded = verification.AsVerificationFailure(errors.New("expected data columns for node exceeds NUMBER_OF_COLUMNS"))
|
||||
// errBlockBeingSynced is returned when a block is being synced.
|
||||
errBlockBeingSynced = errors.New("block is being synced")
|
||||
)
|
||||
|
||||
// An invalid block is the block that fails state transition based on the core protocol rules.
|
||||
|
||||
@@ -84,7 +84,11 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
}
|
||||
|
||||
receivedTime := time.Now()
|
||||
s.blockBeingSynced.set(blockRoot)
|
||||
err := s.blockBeingSynced.set(blockRoot)
|
||||
if errors.Is(err, errBlockBeingSynced) {
|
||||
log.WithField("blockRoot", fmt.Sprintf("%#x", blockRoot)).Debug("Ignoring block currently being synced")
|
||||
return nil
|
||||
}
|
||||
defer s.blockBeingSynced.unset(blockRoot)
|
||||
|
||||
blockCopy, err := block.Copy()
|
||||
|
||||
@@ -311,7 +311,10 @@ func TestService_HasBlock(t *testing.T) {
|
||||
r, err = b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, s.HasBlock(t.Context(), r))
|
||||
s.blockBeingSynced.set(r)
|
||||
err = s.blockBeingSynced.set(r)
|
||||
require.NoError(t, err)
|
||||
err = s.blockBeingSynced.set(r)
|
||||
require.ErrorIs(t, err, errBlockBeingSynced)
|
||||
require.Equal(t, false, s.HasBlock(t.Context(), r))
|
||||
}
|
||||
|
||||
|
||||
@@ -10,6 +10,11 @@ type ReadOnlyDatabase = iface.ReadOnlyDatabase
|
||||
// about head info. For head info, use github.com/prysmaticlabs/prysm/blockchain.HeadFetcher.
|
||||
type NoHeadAccessDatabase = iface.NoHeadAccessDatabase
|
||||
|
||||
// ReadOnlyDatabaseWithSeqNum exposes Prysm's Ethereum data backend for read access only, no information about
|
||||
// head info, but with read/write access to the p2p metadata sequence number.
|
||||
// This is used for the p2p service.
|
||||
type ReadOnlyDatabaseWithSeqNum = iface.ReadOnlyDatabaseWithSeqNum
|
||||
|
||||
// HeadAccessDatabase exposes Prysm's Ethereum backend for read/write access with information about
|
||||
// chain head information. This interface should be used sparingly as the HeadFetcher is the source
|
||||
// of truth around chain head information while this interface serves as persistent storage for the
|
||||
|
||||
@@ -64,6 +64,18 @@ type ReadOnlyDatabase interface {
|
||||
// Origin checkpoint sync support
|
||||
OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
BackfillStatus(context.Context) (*dbval.BackfillStatus, error)
|
||||
|
||||
// P2P Metadata operations.
|
||||
MetadataSeqNum(ctx context.Context) (uint64, error)
|
||||
}
|
||||
|
||||
// ReadOnlyDatabaseWithSeqNum defines a struct which has read access to database methods
|
||||
// and also has read/write access to the p2p metadata sequence number.
|
||||
// Only used for the p2p service.
|
||||
type ReadOnlyDatabaseWithSeqNum interface {
|
||||
ReadOnlyDatabase
|
||||
|
||||
SaveMetadataSeqNum(ctx context.Context, seqNum uint64) error
|
||||
}
|
||||
|
||||
// NoHeadAccessDatabase defines a struct without access to chain head data.
|
||||
@@ -106,6 +118,9 @@ type NoHeadAccessDatabase interface {
|
||||
// Custody operations.
|
||||
UpdateSubscribedToAllDataSubnets(ctx context.Context, subscribed bool) (bool, error)
|
||||
UpdateCustodyInfo(ctx context.Context, earliestAvailableSlot primitives.Slot, custodyGroupCount uint64) (primitives.Slot, uint64, error)
|
||||
|
||||
// P2P Metadata operations.
|
||||
SaveMetadataSeqNum(ctx context.Context, seqNum uint64) error
|
||||
}
|
||||
|
||||
// HeadAccessDatabase defines a struct with access to reading chain head data.
|
||||
|
||||
@@ -24,6 +24,7 @@ go_library(
|
||||
"migration_block_slot_index.go",
|
||||
"migration_finalized_parent.go",
|
||||
"migration_state_validators.go",
|
||||
"p2p.go",
|
||||
"schema.go",
|
||||
"state.go",
|
||||
"state_summary.go",
|
||||
@@ -96,6 +97,7 @@ go_test(
|
||||
"migration_archived_index_test.go",
|
||||
"migration_block_slot_index_test.go",
|
||||
"migration_state_validators_test.go",
|
||||
"p2p_test.go",
|
||||
"state_summary_test.go",
|
||||
"state_test.go",
|
||||
"utils_test.go",
|
||||
|
||||
@@ -19,6 +19,9 @@ var ErrNotFoundGenesisBlockRoot = errors.Wrap(ErrNotFound, "OriginGenesisRoot")
|
||||
// ErrNotFoundFeeRecipient is a not found error specifically for the fee recipient getter
|
||||
var ErrNotFoundFeeRecipient = errors.Wrap(ErrNotFound, "fee recipient")
|
||||
|
||||
// ErrNotFoundMetadataSeqNum is a not found error specifically for the metadata sequence number getter
|
||||
var ErrNotFoundMetadataSeqNum = errors.Wrap(ErrNotFound, "metadata sequence number")
|
||||
|
||||
var errEmptyBlockSlice = errors.New("[]blocks.ROBlock is empty")
|
||||
var errIncorrectBlockParent = errors.New("unexpected missing or forked blocks in a []ROBlock")
|
||||
var errFinalizedChildNotFound = errors.New("unable to find finalized root descending from backfill batch")
|
||||
|
||||
42
beacon-chain/db/kv/p2p.go
Normal file
42
beacon-chain/db/kv/p2p.go
Normal file
@@ -0,0 +1,42 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
// MetadataSeqNum retrieves the p2p metadata sequence number from the database.
|
||||
// It returns 0 and ErrNotFoundMetadataSeqNum if the key does not exist.
|
||||
func (s *Store) MetadataSeqNum(ctx context.Context) (uint64, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.MetadataSeqNum")
|
||||
defer span.End()
|
||||
|
||||
var seqNum uint64
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(chainMetadataBucket)
|
||||
val := bkt.Get(metadataSequenceNumberKey)
|
||||
if val == nil {
|
||||
return ErrNotFoundMetadataSeqNum
|
||||
}
|
||||
|
||||
seqNum = bytesutil.BytesToUint64BigEndian(val)
|
||||
return nil
|
||||
})
|
||||
|
||||
return seqNum, err
|
||||
}
|
||||
|
||||
// SaveMetadataSeqNum saves the p2p metadata sequence number to the database.
|
||||
func (s *Store) SaveMetadataSeqNum(ctx context.Context, seqNum uint64) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.SaveMetadataSeqNum")
|
||||
defer span.End()
|
||||
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(chainMetadataBucket)
|
||||
val := bytesutil.Uint64ToBytesBigEndian(seqNum)
|
||||
return bkt.Put(metadataSequenceNumberKey, val)
|
||||
})
|
||||
}
|
||||
33
beacon-chain/db/kv/p2p_test.go
Normal file
33
beacon-chain/db/kv/p2p_test.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
)
|
||||
|
||||
func TestStore_MetadataSeqNum(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
db := setupDB(t)
|
||||
|
||||
seqNum, err := db.MetadataSeqNum(ctx)
|
||||
require.ErrorIs(t, err, ErrNotFoundMetadataSeqNum)
|
||||
assert.Equal(t, uint64(0), seqNum)
|
||||
|
||||
initialSeqNum := uint64(42)
|
||||
err = db.SaveMetadataSeqNum(ctx, initialSeqNum)
|
||||
require.NoError(t, err)
|
||||
|
||||
retrievedSeqNum, err := db.MetadataSeqNum(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, initialSeqNum, retrievedSeqNum)
|
||||
|
||||
updatedSeqNum := uint64(43)
|
||||
err = db.SaveMetadataSeqNum(ctx, updatedSeqNum)
|
||||
require.NoError(t, err)
|
||||
|
||||
retrievedSeqNum, err = db.MetadataSeqNum(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, updatedSeqNum, retrievedSeqNum)
|
||||
}
|
||||
@@ -42,6 +42,7 @@ var (
|
||||
finalizedCheckpointKey = []byte("finalized-checkpoint")
|
||||
powchainDataKey = []byte("powchain-data")
|
||||
lastValidatedCheckpointKey = []byte("last-validated-checkpoint")
|
||||
metadataSequenceNumberKey = []byte("metadata-seq-number")
|
||||
|
||||
// Below keys are used to identify objects are to be fork compatible.
|
||||
// Objects that are only compatible with specific forks should be prefixed with such keys.
|
||||
|
||||
@@ -253,6 +253,10 @@ func (s *Store) saveStatesEfficientInternal(ctx context.Context, tx *bolt.Tx, bl
|
||||
if err := s.processElectra(ctx, rawType, rt[:], bucket, valIdxBkt, validatorKeys[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
case *ethpb.BeaconStateFulu:
|
||||
if err := s.processFulu(ctx, rawType, rt[:], bucket, valIdxBkt, validatorKeys[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return errors.New("invalid state type")
|
||||
}
|
||||
@@ -368,6 +372,24 @@ func (s *Store) processElectra(ctx context.Context, pbState *ethpb.BeaconStateEl
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Store) processFulu(ctx context.Context, pbState *ethpb.BeaconStateFulu, rootHash []byte, bucket, valIdxBkt *bolt.Bucket, validatorKey []byte) error {
|
||||
valEntries := pbState.Validators
|
||||
pbState.Validators = make([]*ethpb.Validator, 0)
|
||||
rawObj, err := pbState.MarshalSSZ()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
encodedState := snappy.Encode(nil, append(fuluKey, rawObj...))
|
||||
if err := bucket.Put(rootHash, encodedState); err != nil {
|
||||
return err
|
||||
}
|
||||
pbState.Validators = valEntries
|
||||
if err := valIdxBkt.Put(rootHash, validatorKey); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Store) storeValidatorEntriesSeparately(ctx context.Context, tx *bolt.Tx, validatorsEntries map[string]*ethpb.Validator) error {
|
||||
valBkt := tx.Bucket(stateValidatorsBucket)
|
||||
for hashStr, validatorEntry := range validatorsEntries {
|
||||
|
||||
@@ -702,7 +702,6 @@ func (b *BeaconNode) registerP2P(cliCtx *cli.Context) error {
|
||||
HostDNS: cliCtx.String(cmd.P2PHostDNS.Name),
|
||||
PrivateKey: cliCtx.String(cmd.P2PPrivKey.Name),
|
||||
StaticPeerID: cliCtx.Bool(cmd.P2PStaticID.Name),
|
||||
MetaDataDir: cliCtx.String(cmd.P2PMetadata.Name),
|
||||
QUICPort: cliCtx.Uint(cmd.P2PQUICPort.Name),
|
||||
TCPPort: cliCtx.Uint(cmd.P2PTCPPort.Name),
|
||||
UDPPort: cliCtx.Uint(cmd.P2PUDPPort.Name),
|
||||
|
||||
@@ -49,6 +49,7 @@ go_library(
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/kv:go_default_library",
|
||||
"//beacon-chain/p2p/encoder:go_default_library",
|
||||
"//beacon-chain/p2p/peers:go_default_library",
|
||||
"//beacon-chain/p2p/peers/peerdata:go_default_library",
|
||||
|
||||
@@ -265,7 +265,8 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
|
||||
s.metaData = wrapper.WrappedMetadataV0(new(ethpb.MetaDataV0))
|
||||
bitV := bitfield.NewBitvector64()
|
||||
bitV.SetBitAt(subnet, true)
|
||||
s.updateSubnetRecordWithMetadata(bitV)
|
||||
err := s.updateSubnetRecordWithMetadata(bitV)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
assert.NoError(t, err, "Could not start discovery for node")
|
||||
listeners = append(listeners, listener)
|
||||
|
||||
@@ -27,7 +27,6 @@ type Config struct {
|
||||
PrivateKey string
|
||||
DataDir string
|
||||
DiscoveryDir string
|
||||
MetaDataDir string
|
||||
QUICPort uint
|
||||
TCPPort uint
|
||||
UDPPort uint
|
||||
@@ -37,7 +36,7 @@ type Config struct {
|
||||
AllowListCIDR string
|
||||
DenyListCIDR []string
|
||||
StateNotifier statefeed.Notifier
|
||||
DB db.ReadOnlyDatabase
|
||||
DB db.ReadOnlyDatabaseWithSeqNum
|
||||
ClockWaiter startup.ClockWaiter
|
||||
}
|
||||
|
||||
|
||||
@@ -211,7 +211,10 @@ func (s *Service) RefreshPersistentSubnets() {
|
||||
}
|
||||
|
||||
// Some data changed. Update the record and the metadata.
|
||||
s.updateSubnetRecordWithMetadata(bitV)
|
||||
// Not returning early here because the error comes from saving the metadata sequence number.
|
||||
if err := s.updateSubnetRecordWithMetadata(bitV); err != nil {
|
||||
log.WithError(err).Error("Failed to update subnet record with metadata")
|
||||
}
|
||||
|
||||
// Ping all peers.
|
||||
s.pingPeersAndLogEnr()
|
||||
@@ -269,7 +272,10 @@ func (s *Service) RefreshPersistentSubnets() {
|
||||
}
|
||||
|
||||
// Some data have changed, update our record and metadata.
|
||||
s.updateSubnetRecordWithMetadataV2(bitV, bitS, custodyGroupCount)
|
||||
// Not returning early here because the error comes from saving the metadata sequence number.
|
||||
if err := s.updateSubnetRecordWithMetadataV2(bitV, bitS, custodyGroupCount); err != nil {
|
||||
log.WithError(err).Error("Failed to update subnet record with metadata")
|
||||
}
|
||||
|
||||
// Ping all peers to inform them of new metadata
|
||||
s.pingPeersAndLogEnr()
|
||||
@@ -289,7 +295,10 @@ func (s *Service) RefreshPersistentSubnets() {
|
||||
}
|
||||
|
||||
// Some data changed. Update the record and the metadata.
|
||||
s.updateSubnetRecordWithMetadataV3(bitV, bitS, custodyGroupCount)
|
||||
// Not returning early here because the error comes from saving the metadata sequence number.
|
||||
if err := s.updateSubnetRecordWithMetadataV3(bitV, bitS, custodyGroupCount); err != nil {
|
||||
log.WithError(err).Error("Failed to update subnet record with metadata")
|
||||
}
|
||||
|
||||
// Ping all peers.
|
||||
s.pingPeersAndLogEnr()
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
|
||||
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/peerdata"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
|
||||
@@ -361,6 +362,8 @@ func TestStaticPeering_PeersAreAdded(t *testing.T) {
|
||||
cfg.StaticPeers = staticPeers
|
||||
cfg.StateNotifier = &mock.MockStateNotifier{}
|
||||
cfg.NoDiscovery = true
|
||||
cfg.DB = testDB.SetupDB(t)
|
||||
|
||||
s, err := NewService(t.Context(), cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -828,7 +831,7 @@ func TestRefreshPersistentSubnets(t *testing.T) {
|
||||
actualPingCount++
|
||||
return nil
|
||||
},
|
||||
cfg: &Config{UDPPort: 2000},
|
||||
cfg: &Config{UDPPort: 2000, DB: testDB.SetupDB(t)},
|
||||
peers: p2p.Peers(),
|
||||
genesisTime: time.Now().Add(-time.Duration(tc.epochSinceGenesis*secondsPerEpoch) * time.Second),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
|
||||
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
@@ -31,12 +32,15 @@ func TestStartDiscv5_DifferentForkDigests(t *testing.T) {
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
genesisTime := time.Now()
|
||||
genesisValidatorsRoot := make([]byte, fieldparams.RootLength)
|
||||
db := testDB.SetupDB(t)
|
||||
|
||||
s := &Service{
|
||||
cfg: &Config{
|
||||
UDPPort: uint(port),
|
||||
StateNotifier: &mock.MockStateNotifier{},
|
||||
PingInterval: testPingInterval,
|
||||
DisableLivenessCheck: true,
|
||||
DB: db,
|
||||
},
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
@@ -57,6 +61,7 @@ func TestStartDiscv5_DifferentForkDigests(t *testing.T) {
|
||||
StateNotifier: &mock.MockStateNotifier{},
|
||||
PingInterval: testPingInterval,
|
||||
DisableLivenessCheck: true,
|
||||
DB: db,
|
||||
}
|
||||
|
||||
var listeners []*listenerWrapper
|
||||
@@ -132,8 +137,10 @@ func TestStartDiscv5_SameForkDigests_DifferentNextForkData(t *testing.T) {
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
genesisTime := time.Now()
|
||||
genesisValidatorsRoot := make([]byte, 32)
|
||||
db := testDB.SetupDB(t)
|
||||
|
||||
s := &Service{
|
||||
cfg: &Config{UDPPort: uint(port), PingInterval: testPingInterval, DisableLivenessCheck: true},
|
||||
cfg: &Config{UDPPort: uint(port), PingInterval: testPingInterval, DisableLivenessCheck: true, DB: db},
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
custodyInfo: &custodyInfo{},
|
||||
@@ -152,6 +159,7 @@ func TestStartDiscv5_SameForkDigests_DifferentNextForkData(t *testing.T) {
|
||||
UDPPort: uint(port),
|
||||
PingInterval: testPingInterval,
|
||||
DisableLivenessCheck: true,
|
||||
DB: db,
|
||||
}
|
||||
|
||||
var listeners []*listenerWrapper
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/encoder"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
@@ -343,7 +344,7 @@ func TestService_MonitorsStateForkUpdates(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(t.Context(), 3*time.Second)
|
||||
defer cancel()
|
||||
cs := startup.NewClockSynchronizer()
|
||||
s, err := NewService(ctx, &Config{ClockWaiter: cs})
|
||||
s, err := NewService(ctx, &Config{ClockWaiter: cs, DB: testDB.SetupDB(t)})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, false, s.isInitialized())
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"time"
|
||||
|
||||
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/encoder"
|
||||
testp2p "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
@@ -21,6 +22,7 @@ func TestService_PublishToTopicConcurrentMapWrite(t *testing.T) {
|
||||
s, err := NewService(t.Context(), &Config{
|
||||
StateNotifier: &mock.MockStateNotifier{},
|
||||
ClockWaiter: cs,
|
||||
DB: testDB.SetupDB(t),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
ctx, cancel := context.WithTimeout(t.Context(), 3*time.Second)
|
||||
|
||||
@@ -112,7 +112,7 @@ func NewService(ctx context.Context, cfg *Config) (*Service, error) {
|
||||
return nil, errors.Wrapf(err, "failed to generate p2p private key")
|
||||
}
|
||||
|
||||
metaData, err := metaDataFromConfig(cfg)
|
||||
metaData, err := metaDataFromDB(ctx, cfg.DB)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to create peer metadata")
|
||||
return nil, err
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"time"
|
||||
|
||||
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/encoder"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
|
||||
@@ -85,7 +86,7 @@ func createHost(t *testing.T, port uint) (host.Host, *ecdsa.PrivateKey, net.IP)
|
||||
|
||||
func TestService_Stop_SetsStartedToFalse(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
s, err := NewService(t.Context(), &Config{StateNotifier: &mock.MockStateNotifier{}})
|
||||
s, err := NewService(t.Context(), &Config{StateNotifier: &mock.MockStateNotifier{}, DB: testDB.SetupDB(t)})
|
||||
require.NoError(t, err)
|
||||
s.started = true
|
||||
s.dv5Listener = &mockListener{}
|
||||
@@ -95,7 +96,7 @@ func TestService_Stop_SetsStartedToFalse(t *testing.T) {
|
||||
|
||||
func TestService_Stop_DontPanicIfDv5ListenerIsNotInited(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
s, err := NewService(t.Context(), &Config{StateNotifier: &mock.MockStateNotifier{}})
|
||||
s, err := NewService(t.Context(), &Config{StateNotifier: &mock.MockStateNotifier{}, DB: testDB.SetupDB(t)})
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, s.Stop())
|
||||
}
|
||||
@@ -110,6 +111,7 @@ func TestService_Start_OnlyStartsOnce(t *testing.T) {
|
||||
TCPPort: 3000,
|
||||
QUICPort: 3000,
|
||||
ClockWaiter: cs,
|
||||
DB: testDB.SetupDB(t),
|
||||
}
|
||||
s, err := NewService(t.Context(), cfg)
|
||||
require.NoError(t, err)
|
||||
@@ -159,6 +161,7 @@ func TestService_Start_NoDiscoverFlag(t *testing.T) {
|
||||
StateNotifier: &mock.MockStateNotifier{},
|
||||
NoDiscovery: true, // <-- no s.dv5Listener is created
|
||||
ClockWaiter: cs,
|
||||
DB: testDB.SetupDB(t),
|
||||
}
|
||||
s, err := NewService(t.Context(), cfg)
|
||||
require.NoError(t, err)
|
||||
@@ -194,6 +197,7 @@ func TestListenForNewNodes(t *testing.T) {
|
||||
)
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
db := testDB.SetupDB(t)
|
||||
|
||||
// Setup bootnode.
|
||||
cfg := &Config{
|
||||
@@ -201,6 +205,7 @@ func TestListenForNewNodes(t *testing.T) {
|
||||
PingInterval: testPingInterval,
|
||||
DisableLivenessCheck: true,
|
||||
UDPPort: port,
|
||||
DB: db,
|
||||
}
|
||||
|
||||
_, pkey := createAddrAndPrivKey(t)
|
||||
@@ -246,6 +251,7 @@ func TestListenForNewNodes(t *testing.T) {
|
||||
ClockWaiter: cs,
|
||||
UDPPort: port + i,
|
||||
TCPPort: port + i,
|
||||
DB: db,
|
||||
}
|
||||
|
||||
h, pkey, ipAddr := createHost(t, port+i)
|
||||
@@ -343,7 +349,7 @@ func TestService_JoinLeaveTopic(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(t.Context(), 3*time.Second)
|
||||
defer cancel()
|
||||
gs := startup.NewClockSynchronizer()
|
||||
s, err := NewService(ctx, &Config{StateNotifier: &mock.MockStateNotifier{}, ClockWaiter: gs})
|
||||
s, err := NewService(ctx, &Config{StateNotifier: &mock.MockStateNotifier{}, ClockWaiter: gs, DB: testDB.SetupDB(t)})
|
||||
require.NoError(t, err)
|
||||
|
||||
go s.awaitStateInitialized()
|
||||
|
||||
@@ -57,6 +57,8 @@ const blobSubnetLockerVal = 110
|
||||
// chosen more than sync, attestation and blob subnet (6) combined.
|
||||
const dataColumnSubnetVal = 150
|
||||
|
||||
const errSavingSequenceNumber = "saving sequence number after updating subnets: %w"
|
||||
|
||||
// nodeFilter returns a function that filters nodes based on the subnet topic and subnet index.
|
||||
func (s *Service) nodeFilter(topic string, indices map[uint64]int) (func(node *enode.Node) (map[uint64]bool, error), error) {
|
||||
switch {
|
||||
@@ -377,13 +379,18 @@ func (s *Service) hasPeerWithSubnet(subnetTopic string) bool {
|
||||
// with a new value for a bitfield of subnets tracked. It also updates
|
||||
// the node's metadata by increasing the sequence number and the
|
||||
// subnets tracked by the node.
|
||||
func (s *Service) updateSubnetRecordWithMetadata(bitV bitfield.Bitvector64) {
|
||||
func (s *Service) updateSubnetRecordWithMetadata(bitV bitfield.Bitvector64) error {
|
||||
entry := enr.WithEntry(attSubnetEnrKey, &bitV)
|
||||
s.dv5Listener.LocalNode().Set(entry)
|
||||
s.metaData = wrapper.WrappedMetadataV0(&pb.MetaDataV0{
|
||||
SeqNumber: s.metaData.SequenceNumber() + 1,
|
||||
Attnets: bitV,
|
||||
})
|
||||
|
||||
if err := s.saveSequenceNumberIfNeeded(); err != nil {
|
||||
return fmt.Errorf(errSavingSequenceNumber, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Updates the service's discv5 listener record's attestation subnet
|
||||
@@ -394,7 +401,7 @@ func (s *Service) updateSubnetRecordWithMetadataV2(
|
||||
bitVAtt bitfield.Bitvector64,
|
||||
bitVSync bitfield.Bitvector4,
|
||||
custodyGroupCount uint64,
|
||||
) {
|
||||
) error {
|
||||
entry := enr.WithEntry(attSubnetEnrKey, &bitVAtt)
|
||||
subEntry := enr.WithEntry(syncCommsSubnetEnrKey, &bitVSync)
|
||||
|
||||
@@ -412,6 +419,11 @@ func (s *Service) updateSubnetRecordWithMetadataV2(
|
||||
Attnets: bitVAtt,
|
||||
Syncnets: bitVSync,
|
||||
})
|
||||
|
||||
if err := s.saveSequenceNumberIfNeeded(); err != nil {
|
||||
return fmt.Errorf(errSavingSequenceNumber, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateSubnetRecordWithMetadataV3 updates:
|
||||
@@ -423,7 +435,7 @@ func (s *Service) updateSubnetRecordWithMetadataV3(
|
||||
bitVAtt bitfield.Bitvector64,
|
||||
bitVSync bitfield.Bitvector4,
|
||||
custodyGroupCount uint64,
|
||||
) {
|
||||
) error {
|
||||
attSubnetsEntry := enr.WithEntry(attSubnetEnrKey, &bitVAtt)
|
||||
syncSubnetsEntry := enr.WithEntry(syncCommsSubnetEnrKey, &bitVSync)
|
||||
custodyGroupCountEntry := enr.WithEntry(custodyGroupCountEnrKey, custodyGroupCount)
|
||||
@@ -439,6 +451,23 @@ func (s *Service) updateSubnetRecordWithMetadataV3(
|
||||
Syncnets: bitVSync,
|
||||
CustodyGroupCount: custodyGroupCount,
|
||||
})
|
||||
|
||||
if err := s.saveSequenceNumberIfNeeded(); err != nil {
|
||||
return fmt.Errorf(errSavingSequenceNumber, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// saveSequenceNumberIfNeeded saves the sequence number in DB if either of the following conditions is met:
|
||||
// - the static peer ID flag is set
|
||||
// - the fulu epoch is set
|
||||
func (s *Service) saveSequenceNumberIfNeeded() error {
|
||||
// Short-circuit if we don't need to save the sequence number.
|
||||
if !(s.cfg.StaticPeerID || params.FuluEnabled()) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return s.cfg.DB.SaveMetadataSeqNum(s.ctx, s.metaData.SequenceNumber())
|
||||
}
|
||||
|
||||
func initializePersistentSubnets(id enode.ID, epoch primitives.Epoch) error {
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
ecdsaprysm "github.com/OffchainLabs/prysm/v6/crypto/ecdsa"
|
||||
@@ -93,6 +94,7 @@ func TestStartDiscV5_FindAndDialPeersWithSubnet(t *testing.T) {
|
||||
// Create 3 nodes, each subscribed to a different subnet.
|
||||
// Each node is connected to the bootstrap node.
|
||||
services := make([]*Service, 0, subnetCount)
|
||||
db := testDB.SetupDB(t)
|
||||
|
||||
for i := uint64(1); i <= subnetCount; i++ {
|
||||
service, err := NewService(ctx, &Config{
|
||||
@@ -103,6 +105,7 @@ func TestStartDiscV5_FindAndDialPeersWithSubnet(t *testing.T) {
|
||||
QUICPort: uint(3000 + i),
|
||||
PingInterval: testPingInterval,
|
||||
DisableLivenessCheck: true,
|
||||
DB: db,
|
||||
})
|
||||
|
||||
require.NoError(t, err)
|
||||
@@ -152,6 +155,7 @@ func TestStartDiscV5_FindAndDialPeersWithSubnet(t *testing.T) {
|
||||
UDPPort: 2010,
|
||||
TCPPort: 3010,
|
||||
QUICPort: 3010,
|
||||
DB: db,
|
||||
}
|
||||
|
||||
service, err := NewService(ctx, cfg)
|
||||
|
||||
@@ -2,6 +2,7 @@ package p2p
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
@@ -12,6 +13,8 @@ import (
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/kv"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/wrapper"
|
||||
ecdsaprysm "github.com/OffchainLabs/prysm/v6/crypto/ecdsa"
|
||||
@@ -27,11 +30,9 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/sirupsen/logrus"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
const keyPath = "network-keys"
|
||||
const metaDataPath = "metaData"
|
||||
|
||||
const dialTimeout = 1 * time.Second
|
||||
|
||||
@@ -121,45 +122,24 @@ func privKeyFromFile(path string) (*ecdsa.PrivateKey, error) {
|
||||
return ecdsaprysm.ConvertFromInterfacePrivKey(unmarshalledKey)
|
||||
}
|
||||
|
||||
// Retrieves node p2p metadata from a set of configuration values
|
||||
// from the p2p service.
|
||||
// TODO: Figure out how to do a v1/v2 check.
|
||||
func metaDataFromConfig(cfg *Config) (metadata.Metadata, error) {
|
||||
defaultKeyPath := path.Join(cfg.DataDir, metaDataPath)
|
||||
metaDataPath := cfg.MetaDataDir
|
||||
// Retrieves metadata sequence number from DB and returns a Metadata(V0) object
|
||||
func metaDataFromDB(ctx context.Context, db db.ReadOnlyDatabaseWithSeqNum) (metadata.Metadata, error) {
|
||||
seqNum, err := db.MetadataSeqNum(ctx)
|
||||
// We can proceed if error is `kv.ErrNotFoundMetadataSeqNum` by using default value of 0 for sequence number.
|
||||
if err != nil && !errors.Is(err, kv.ErrNotFoundMetadataSeqNum) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err := os.Stat(defaultKeyPath)
|
||||
defaultMetadataExist := !os.IsNotExist(err)
|
||||
if err != nil && defaultMetadataExist {
|
||||
return nil, err
|
||||
}
|
||||
if metaDataPath == "" && !defaultMetadataExist {
|
||||
metaData := &pb.MetaDataV0{
|
||||
SeqNumber: 0,
|
||||
Attnets: bitfield.NewBitvector64(),
|
||||
}
|
||||
dst, err := proto.Marshal(metaData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := file.WriteFile(defaultKeyPath, dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return wrapper.WrappedMetadataV0(metaData), nil
|
||||
}
|
||||
if defaultMetadataExist && metaDataPath == "" {
|
||||
metaDataPath = defaultKeyPath
|
||||
}
|
||||
src, err := os.ReadFile(metaDataPath) // #nosec G304
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error reading metadata from file")
|
||||
return nil, err
|
||||
}
|
||||
metaData := &pb.MetaDataV0{}
|
||||
if err := proto.Unmarshal(src, metaData); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return wrapper.WrappedMetadataV0(metaData), nil
|
||||
// NOTE: Load V0 metadata because:
|
||||
// - As the p2p service accesses metadata as an interface, and all versions implement the interface,
|
||||
// there is no error in calling the fields of higher versions. It just returns the default value.
|
||||
// - This approach allows us to avoid unnecessary code changes when the metadata version bumps.
|
||||
// - `RefreshPersistentSubnets` runs twice every slot and it manages updating and saving metadata.
|
||||
metadata := wrapper.WrappedMetadataV0(&pb.MetaDataV0{
|
||||
SeqNumber: seqNum,
|
||||
Attnets: bitfield.NewBitvector64(),
|
||||
})
|
||||
return metadata, nil
|
||||
}
|
||||
|
||||
// Attempt to dial an address to verify its connectivity
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
@@ -80,3 +82,27 @@ func TestConvertPeerIDToNodeID(t *testing.T) {
|
||||
actualNodeIDStr := actualNodeID.String()
|
||||
require.Equal(t, expectedNodeIDStr, actualNodeIDStr)
|
||||
}
|
||||
|
||||
func TestMetadataFromDB(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
|
||||
t.Run("Metadata from DB", func(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
err := beaconDB.SaveMetadataSeqNum(t.Context(), 42)
|
||||
require.NoError(t, err)
|
||||
|
||||
metaData, err := metaDataFromDB(context.Background(), beaconDB)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, uint64(42), metaData.SequenceNumber())
|
||||
})
|
||||
|
||||
t.Run("Use default sequence number (=0) as Metadata not found on DB", func(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
metaData, err := metaDataFromDB(context.Background(), beaconDB)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, uint64(0), metaData.SequenceNumber())
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1029,8 +1029,8 @@ func (s *Server) GetProposerDuties(w http.ResponseWriter, r *http.Request) {
|
||||
httputil.HandleError(w, fmt.Sprintf("Could not get head state: %v ", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
// Advance state with empty transitions up to the requested epoch start slot.
|
||||
if st.Slot() < epochStartSlot {
|
||||
// Advance state with empty transitions up to the requested epoch start slot for pre fulu state only. Fulu state utilizes proposer look ahead field.
|
||||
if st.Slot() < epochStartSlot && st.Version() != version.Fulu {
|
||||
headRoot, err := s.HeadFetcher.HeadRoot(ctx)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, fmt.Sprintf("Could not get head root: %v ", err), http.StatusInternalServerError)
|
||||
|
||||
@@ -2645,6 +2645,78 @@ func TestGetProposerDuties(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetProposerDuties_FuluState(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
// Create a Fulu state with slot 0 (before epoch 1 start slot which is 32)
|
||||
fuluState, err := util.NewBeaconStateFulu()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fuluState.SetSlot(0)) // Set to slot 0
|
||||
|
||||
// Create some validators for the test
|
||||
depChainStart := params.BeaconConfig().MinGenesisActiveValidatorCount
|
||||
deposits, _, err := util.DeterministicDepositsAndKeys(depChainStart)
|
||||
require.NoError(t, err)
|
||||
|
||||
validators := make([]*ethpbalpha.Validator, len(deposits))
|
||||
for i, deposit := range deposits {
|
||||
validators[i] = ðpbalpha.Validator{
|
||||
PublicKey: deposit.Data.PublicKey,
|
||||
ActivationEpoch: 0,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
}
|
||||
}
|
||||
require.NoError(t, fuluState.SetValidators(validators))
|
||||
|
||||
// Set up block roots
|
||||
genesis := util.NewBeaconBlock()
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
roots := make([][]byte, fieldparams.BlockRootsLength)
|
||||
roots[0] = genesisRoot[:]
|
||||
require.NoError(t, fuluState.SetBlockRoots(roots))
|
||||
|
||||
chainSlot := primitives.Slot(0)
|
||||
chain := &mockChain.ChainService{
|
||||
State: fuluState, Root: genesisRoot[:], Slot: &chainSlot,
|
||||
}
|
||||
|
||||
db := dbutil.SetupDB(t)
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(t.Context(), genesisRoot))
|
||||
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{StatesBySlot: map[primitives.Slot]state.BeaconState{0: fuluState}},
|
||||
HeadFetcher: chain,
|
||||
TimeFetcher: chain,
|
||||
OptimisticModeFetcher: chain,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
PayloadIDCache: cache.NewPayloadIDCache(),
|
||||
TrackedValidatorsCache: cache.NewTrackedValidatorsCache(),
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
// Request epoch 1 duties, which should require advancing from slot 0 to slot 32
|
||||
// But for Fulu state, this advancement should be skipped
|
||||
request := httptest.NewRequest(http.MethodGet, "http://www.example.com/eth/v1/validator/duties/proposer/{epoch}", nil)
|
||||
request.SetPathValue("epoch", "1")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetProposerDuties(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
|
||||
// Verify the state was not advanced - it should still be at slot 0
|
||||
// This is the key assertion for the regression test
|
||||
assert.Equal(t, primitives.Slot(0), fuluState.Slot(), "Fulu state should not have been advanced")
|
||||
|
||||
resp := &structs.GetProposerDutiesResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
|
||||
// Should still return proposer duties despite not advancing the state
|
||||
assert.Equal(t, true, len(resp.Data) > 0, "Should return proposer duties even without state advancement")
|
||||
}
|
||||
|
||||
func TestGetSyncCommitteeDuties(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
params.SetupTestConfigCleanup(t)
|
||||
|
||||
@@ -81,6 +81,7 @@ go_library(
|
||||
"//crypto/rand:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//io/file:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
|
||||
@@ -2,21 +2,37 @@ package validator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
coreTime "github.com/OffchainLabs/prysm/v6/beacon-chain/core/time"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/core"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/io/file"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
const (
|
||||
// validatorLookupThreshold determines when to use full assignment map vs cached linear search.
|
||||
// For requests with fewer validators, we use cached linear search to avoid the overhead
|
||||
// of building a complete assignment map for all validators in the epoch.
|
||||
validatorLookupThreshold = 3000
|
||||
)
|
||||
|
||||
// GetDutiesV2 returns the duties assigned to a list of validators specified
|
||||
// in the request object.
|
||||
//
|
||||
@@ -25,7 +41,19 @@ func (vs *Server) GetDutiesV2(ctx context.Context, req *ethpb.DutiesRequest) (*e
|
||||
if vs.SyncChecker.Syncing() {
|
||||
return nil, status.Error(codes.Unavailable, "Syncing to latest head, not ready to respond")
|
||||
}
|
||||
return vs.dutiesv2(ctx, req)
|
||||
|
||||
start := time.Now()
|
||||
|
||||
// Start background profiling that will capture if this takes too long
|
||||
var profileCancel func()
|
||||
if features.Get().SlowDutiesProfile {
|
||||
profileCancel = vs.startSlowDutiesProfiler(start, len(req.PublicKeys), req.Epoch)
|
||||
defer profileCancel()
|
||||
}
|
||||
|
||||
resp, err := vs.dutiesv2(ctx, req)
|
||||
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// Compute the validator duties from the head state's corresponding epoch
|
||||
@@ -53,8 +81,7 @@ func (vs *Server) dutiesv2(ctx context.Context, req *ethpb.DutiesRequest) (*ethp
|
||||
span.SetAttributes(trace.Int64Attribute("num_pubkeys", int64(len(req.PublicKeys))))
|
||||
defer span.End()
|
||||
|
||||
// Load committee and proposer metadata
|
||||
meta, err := loadDutiesMetadata(ctx, s, req.Epoch)
|
||||
meta, err := loadDutiesMetadata(ctx, s, req.Epoch, len(req.PublicKeys))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -68,24 +95,22 @@ func (vs *Server) dutiesv2(ctx context.Context, req *ethpb.DutiesRequest) (*ethp
|
||||
return nil, status.Errorf(codes.Aborted, "Could not continue fetching assignments: %v", ctx.Err())
|
||||
}
|
||||
|
||||
idx, ok := s.ValidatorIndexByPubkey(bytesutil.ToBytes48(pubKey))
|
||||
validatorIndex, ok := s.ValidatorIndexByPubkey(bytesutil.ToBytes48(pubKey))
|
||||
if !ok {
|
||||
// Unknown validator: still append placeholder duty with UNKNOWN_STATUS
|
||||
validatorAssignments = append(validatorAssignments, ðpb.DutiesV2Response_Duty{
|
||||
unknownDuty := ðpb.DutiesV2Response_Duty{
|
||||
PublicKey: pubKey,
|
||||
Status: ethpb.ValidatorStatus_UNKNOWN_STATUS,
|
||||
})
|
||||
nextValidatorAssignments = append(nextValidatorAssignments, ðpb.DutiesV2Response_Duty{
|
||||
PublicKey: pubKey,
|
||||
Status: ethpb.ValidatorStatus_UNKNOWN_STATUS,
|
||||
})
|
||||
}
|
||||
validatorAssignments = append(validatorAssignments, unknownDuty)
|
||||
nextValidatorAssignments = append(nextValidatorAssignments, unknownDuty)
|
||||
continue
|
||||
}
|
||||
|
||||
meta.current.liteAssignment = helpers.AssignmentForValidator(meta.current.committeesBySlot, meta.current.startSlot, idx)
|
||||
meta.next.liteAssignment = helpers.AssignmentForValidator(meta.next.committeesBySlot, meta.next.startSlot, idx)
|
||||
meta.current.liteAssignment = vs.getValidatorAssignment(meta.current, validatorIndex)
|
||||
|
||||
assignment, nextAssignment, err := vs.buildValidatorDuty(pubKey, idx, s, req.Epoch, meta)
|
||||
meta.next.liteAssignment = vs.getValidatorAssignment(meta.next, validatorIndex)
|
||||
|
||||
assignment, nextAssignment, err := vs.buildValidatorDuty(pubKey, validatorIndex, s, req.Epoch, meta)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -143,17 +168,18 @@ type dutiesMetadata struct {
|
||||
}
|
||||
|
||||
type metadata struct {
|
||||
committeesAtSlot uint64
|
||||
proposalSlots map[primitives.ValidatorIndex][]primitives.Slot
|
||||
startSlot primitives.Slot
|
||||
committeesBySlot [][][]primitives.ValidatorIndex
|
||||
liteAssignment *helpers.LiteAssignment
|
||||
committeesAtSlot uint64
|
||||
proposalSlots map[primitives.ValidatorIndex][]primitives.Slot
|
||||
startSlot primitives.Slot
|
||||
committeesBySlot [][][]primitives.ValidatorIndex
|
||||
validatorAssignmentMap map[primitives.ValidatorIndex]*helpers.LiteAssignment
|
||||
liteAssignment *helpers.LiteAssignment
|
||||
}
|
||||
|
||||
func loadDutiesMetadata(ctx context.Context, s state.BeaconState, reqEpoch primitives.Epoch) (*dutiesMetadata, error) {
|
||||
func loadDutiesMetadata(ctx context.Context, s state.BeaconState, reqEpoch primitives.Epoch, numValidators int) (*dutiesMetadata, error) {
|
||||
meta := &dutiesMetadata{}
|
||||
var err error
|
||||
meta.current, err = loadMetadata(ctx, s, reqEpoch)
|
||||
meta.current, err = loadMetadata(ctx, s, reqEpoch, numValidators)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -163,14 +189,14 @@ func loadDutiesMetadata(ctx context.Context, s state.BeaconState, reqEpoch primi
|
||||
return nil, status.Errorf(codes.Internal, "Could not compute proposer slots: %v", err)
|
||||
}
|
||||
|
||||
meta.next, err = loadMetadata(ctx, s, reqEpoch+1)
|
||||
meta.next, err = loadMetadata(ctx, s, reqEpoch+1, numValidators)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return meta, nil
|
||||
}
|
||||
|
||||
func loadMetadata(ctx context.Context, s state.BeaconState, reqEpoch primitives.Epoch) (*metadata, error) {
|
||||
func loadMetadata(ctx context.Context, s state.BeaconState, reqEpoch primitives.Epoch, numValidators int) (*metadata, error) {
|
||||
meta := &metadata{}
|
||||
|
||||
if err := helpers.VerifyAssignmentEpoch(reqEpoch, s); err != nil {
|
||||
@@ -193,9 +219,48 @@ func loadMetadata(ctx context.Context, s state.BeaconState, reqEpoch primitives.
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if numValidators >= validatorLookupThreshold {
|
||||
meta.validatorAssignmentMap = buildValidatorAssignmentMap(meta.committeesBySlot, meta.startSlot)
|
||||
}
|
||||
|
||||
return meta, nil
|
||||
}
|
||||
|
||||
// buildValidatorAssignmentMap creates a map from validator index to assignment for O(1) lookup.
|
||||
func buildValidatorAssignmentMap(
|
||||
bySlot [][][]primitives.ValidatorIndex,
|
||||
startSlot primitives.Slot,
|
||||
) map[primitives.ValidatorIndex]*helpers.LiteAssignment {
|
||||
validatorToAssignment := make(map[primitives.ValidatorIndex]*helpers.LiteAssignment)
|
||||
|
||||
for relativeSlot, committees := range bySlot {
|
||||
for cIdx, committee := range committees {
|
||||
for pos, vIdx := range committee {
|
||||
validatorToAssignment[vIdx] = &helpers.LiteAssignment{
|
||||
AttesterSlot: startSlot + primitives.Slot(relativeSlot),
|
||||
CommitteeIndex: primitives.CommitteeIndex(cIdx),
|
||||
CommitteeLength: uint64(len(committee)),
|
||||
ValidatorCommitteeIndex: uint64(pos),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return validatorToAssignment
|
||||
}
|
||||
|
||||
// getValidatorAssignment retrieves the assignment for a validator using either
|
||||
// the pre-built assignment map (for large requests) or linear search (for small requests).
|
||||
func (vs *Server) getValidatorAssignment(meta *metadata, validatorIndex primitives.ValidatorIndex) *helpers.LiteAssignment {
|
||||
if meta.validatorAssignmentMap != nil {
|
||||
if assignment, exists := meta.validatorAssignmentMap[validatorIndex]; exists {
|
||||
return assignment
|
||||
}
|
||||
return &helpers.LiteAssignment{}
|
||||
}
|
||||
|
||||
return helpers.AssignmentForValidator(meta.committeesBySlot, meta.startSlot, validatorIndex)
|
||||
}
|
||||
|
||||
// buildValidatorDuty builds both current‑epoch and next‑epoch V2 duty objects
|
||||
// for a single validator index.
|
||||
func (vs *Server) buildValidatorDuty(
|
||||
@@ -270,3 +335,138 @@ func populateCommitteeFields(duty *ethpb.DutiesV2Response_Duty, la *helpers.Lite
|
||||
duty.ValidatorCommitteeIndex = la.ValidatorCommitteeIndex
|
||||
duty.AttesterSlot = la.AttesterSlot
|
||||
}
|
||||
|
||||
// startSlowDutiesProfiler starts background profiling that triggers after 2s
|
||||
// Returns a cancel function that should be called when the operation completes
|
||||
func (vs *Server) startSlowDutiesProfiler(startTime time.Time, numValidators int, epoch primitives.Epoch) func() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
go func() {
|
||||
// Wait for 2 seconds
|
||||
select {
|
||||
case <-time.After(2 * time.Second):
|
||||
// Operation is taking too long, start profiling
|
||||
vs.captureSlowDutiesProfile(startTime, numValidators, epoch, ctx)
|
||||
case <-ctx.Done():
|
||||
// Operation completed before 2s, no profiling needed
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
return cancel
|
||||
}
|
||||
|
||||
// captureSlowDutiesProfile captures CPU and mutex profiles when GetDutiesV2 is slow
|
||||
func (vs *Server) captureSlowDutiesProfile(startTime time.Time, numValidators int, epoch primitives.Epoch, ctx context.Context) {
|
||||
timestamp := time.Now().Format("20060102-150405")
|
||||
|
||||
// Get the datadir from the database path and create debug subdirectory
|
||||
// Cast to Database interface to access DatabasePath method
|
||||
dbWithPath, ok := vs.BeaconDB.(interface{ DatabasePath() string })
|
||||
if !ok {
|
||||
log.Error("Cannot access database path for profiling - database does not implement DatabasePath method")
|
||||
return
|
||||
}
|
||||
dbPath := dbWithPath.DatabasePath()
|
||||
profileDir := filepath.Join(filepath.Dir(dbPath), "debug")
|
||||
|
||||
// Create profile directory if it doesn't exist
|
||||
if err := file.MkdirAll(profileDir); err != nil {
|
||||
log.WithError(err).Warn("Failed to create profile directory")
|
||||
return
|
||||
}
|
||||
|
||||
currentDuration := time.Since(startTime)
|
||||
log.WithFields(logrus.Fields{
|
||||
"currentDuration": currentDuration,
|
||||
"numValidators": numValidators,
|
||||
"epoch": epoch,
|
||||
"profileDir": profileDir,
|
||||
}).Warn("GetDutiesV2 taking longer than 2s, capturing profiles")
|
||||
|
||||
// Start CPU profiling immediately
|
||||
cpuFile, err := os.Create(fmt.Sprintf("%s/cpu-duties-%s.prof", profileDir, timestamp))
|
||||
if err != nil {
|
||||
log.WithError(err).Warn("Failed to create CPU profile file")
|
||||
} else {
|
||||
if err := pprof.StartCPUProfile(cpuFile); err != nil {
|
||||
log.WithError(err).Warn("Failed to start CPU profile")
|
||||
if closeErr := cpuFile.Close(); closeErr != nil {
|
||||
log.WithError(closeErr).Warn("Failed to close CPU profile file")
|
||||
}
|
||||
} else {
|
||||
// Profile for up to 10 seconds or until context is cancelled
|
||||
go func() {
|
||||
defer func() {
|
||||
pprof.StopCPUProfile()
|
||||
if closeErr := cpuFile.Close(); closeErr != nil {
|
||||
log.WithError(closeErr).Warn("Failed to close CPU profile file")
|
||||
}
|
||||
log.WithField("file", cpuFile.Name()).Info("CPU profile captured")
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-time.After(10 * time.Second):
|
||||
// Stop profiling after 10s max
|
||||
case <-ctx.Done():
|
||||
// Stop profiling when operation completes
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// Enable mutex profiling
|
||||
runtime.SetMutexProfileFraction(1)
|
||||
|
||||
// Capture snapshot profiles immediately
|
||||
vs.captureSnapshotProfiles(profileDir, timestamp)
|
||||
}
|
||||
|
||||
// captureSnapshotProfiles captures point-in-time profiles
|
||||
func (vs *Server) captureSnapshotProfiles(profileDir, timestamp string) {
|
||||
// Capture mutex profile
|
||||
mutexFile, err := os.Create(fmt.Sprintf("%s/mutex-duties-%s.prof", profileDir, timestamp))
|
||||
if err != nil {
|
||||
log.WithError(err).Warn("Failed to create mutex profile file")
|
||||
} else {
|
||||
if err := pprof.Lookup("mutex").WriteTo(mutexFile, 0); err != nil {
|
||||
log.WithError(err).Warn("Failed to write mutex profile")
|
||||
} else {
|
||||
log.WithField("file", mutexFile.Name()).Info("Mutex profile captured")
|
||||
}
|
||||
if closeErr := mutexFile.Close(); closeErr != nil {
|
||||
log.WithError(closeErr).Warn("Failed to close mutex profile file")
|
||||
}
|
||||
}
|
||||
|
||||
// Capture goroutine profile
|
||||
goroutineFile, err := os.Create(fmt.Sprintf("%s/goroutine-duties-%s.prof", profileDir, timestamp))
|
||||
if err != nil {
|
||||
log.WithError(err).Warn("Failed to create goroutine profile file")
|
||||
} else {
|
||||
if err := pprof.Lookup("goroutine").WriteTo(goroutineFile, 0); err != nil {
|
||||
log.WithError(err).Warn("Failed to write goroutine profile")
|
||||
} else {
|
||||
log.WithField("file", goroutineFile.Name()).Info("Goroutine profile captured")
|
||||
}
|
||||
if closeErr := goroutineFile.Close(); closeErr != nil {
|
||||
log.WithError(closeErr).Warn("Failed to close goroutine profile file")
|
||||
}
|
||||
}
|
||||
|
||||
// Capture heap profile
|
||||
heapFile, err := os.Create(fmt.Sprintf("%s/heap-duties-%s.prof", profileDir, timestamp))
|
||||
if err != nil {
|
||||
log.WithError(err).Warn("Failed to create heap profile file")
|
||||
} else {
|
||||
runtime.GC() // Force GC before heap profile
|
||||
if err := pprof.Lookup("heap").WriteTo(heapFile, 0); err != nil {
|
||||
log.WithError(err).Warn("Failed to write heap profile")
|
||||
} else {
|
||||
log.WithField("file", heapFile.Name()).Info("Heap profile captured")
|
||||
}
|
||||
if closeErr := heapFile.Close(); closeErr != nil {
|
||||
log.WithError(closeErr).Warn("Failed to close heap profile file")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -559,3 +559,170 @@ func TestGetDutiesV2_SyncNotReady(t *testing.T) {
|
||||
_, err := vs.GetDutiesV2(t.Context(), ðpb.DutiesRequest{})
|
||||
assert.ErrorContains(t, "Syncing to latest head", err)
|
||||
}
|
||||
|
||||
func TestBuildValidatorAssignmentMap(t *testing.T) {
|
||||
start := primitives.Slot(200)
|
||||
bySlot := [][][]primitives.ValidatorIndex{
|
||||
{{1, 2, 3}}, // slot 200, committee 0
|
||||
{{7, 8, 9}}, // slot 201, committee 0
|
||||
{{4, 5}, {10, 11}}, // slot 202, committee 0 & 1
|
||||
}
|
||||
|
||||
assignmentMap := buildValidatorAssignmentMap(bySlot, start)
|
||||
|
||||
// Test validator 8 assignment (slot 201, committee 0, position 1)
|
||||
vIdx := primitives.ValidatorIndex(8)
|
||||
got, exists := assignmentMap[vIdx]
|
||||
assert.Equal(t, true, exists)
|
||||
require.NotNil(t, got)
|
||||
assert.Equal(t, start+1, got.AttesterSlot)
|
||||
assert.Equal(t, primitives.CommitteeIndex(0), got.CommitteeIndex)
|
||||
assert.Equal(t, uint64(3), got.CommitteeLength)
|
||||
assert.Equal(t, uint64(1), got.ValidatorCommitteeIndex)
|
||||
|
||||
// Test validator 1 assignment (slot 200, committee 0, position 0)
|
||||
vIdx1 := primitives.ValidatorIndex(1)
|
||||
got1, exists1 := assignmentMap[vIdx1]
|
||||
assert.Equal(t, true, exists1)
|
||||
require.NotNil(t, got1)
|
||||
assert.Equal(t, start, got1.AttesterSlot)
|
||||
assert.Equal(t, primitives.CommitteeIndex(0), got1.CommitteeIndex)
|
||||
assert.Equal(t, uint64(3), got1.CommitteeLength)
|
||||
assert.Equal(t, uint64(0), got1.ValidatorCommitteeIndex)
|
||||
|
||||
// Test validator 10 assignment (slot 202, committee 1, position 0)
|
||||
vIdx10 := primitives.ValidatorIndex(10)
|
||||
got10, exists10 := assignmentMap[vIdx10]
|
||||
assert.Equal(t, true, exists10)
|
||||
require.NotNil(t, got10)
|
||||
assert.Equal(t, start+2, got10.AttesterSlot)
|
||||
assert.Equal(t, primitives.CommitteeIndex(1), got10.CommitteeIndex)
|
||||
assert.Equal(t, uint64(2), got10.CommitteeLength)
|
||||
assert.Equal(t, uint64(0), got10.ValidatorCommitteeIndex)
|
||||
|
||||
// Test non-existent validator
|
||||
_, exists99 := assignmentMap[primitives.ValidatorIndex(99)]
|
||||
assert.Equal(t, false, exists99)
|
||||
|
||||
// Verify that we get the same results as the linear search
|
||||
for _, committees := range bySlot {
|
||||
for _, committee := range committees {
|
||||
for _, validatorIdx := range committee {
|
||||
linearResult := helpers.AssignmentForValidator(bySlot, start, validatorIdx)
|
||||
mapResult, mapExists := assignmentMap[validatorIdx]
|
||||
assert.Equal(t, true, mapExists)
|
||||
require.DeepEqual(t, linearResult, mapResult)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetValidatorAssignment_WithAssignmentMap(t *testing.T) {
|
||||
start := primitives.Slot(100)
|
||||
bySlot := [][][]primitives.ValidatorIndex{
|
||||
{{1, 2, 3}},
|
||||
{{4, 5, 6}},
|
||||
}
|
||||
|
||||
// Test with pre-built assignment map (large request scenario)
|
||||
meta := &metadata{
|
||||
startSlot: start,
|
||||
committeesBySlot: bySlot,
|
||||
validatorAssignmentMap: buildValidatorAssignmentMap(bySlot, start),
|
||||
}
|
||||
|
||||
vs := &Server{}
|
||||
|
||||
// Test existing validator (validator 2 is at position 1 in the committee, not position 2)
|
||||
assignment := vs.getValidatorAssignment(meta, primitives.ValidatorIndex(2))
|
||||
require.NotNil(t, assignment)
|
||||
assert.Equal(t, start, assignment.AttesterSlot)
|
||||
assert.Equal(t, primitives.CommitteeIndex(0), assignment.CommitteeIndex)
|
||||
assert.Equal(t, uint64(1), assignment.ValidatorCommitteeIndex)
|
||||
|
||||
// Test non-existent validator should return empty assignment
|
||||
assignment = vs.getValidatorAssignment(meta, primitives.ValidatorIndex(99))
|
||||
require.NotNil(t, assignment)
|
||||
assert.Equal(t, primitives.Slot(0), assignment.AttesterSlot)
|
||||
assert.Equal(t, primitives.CommitteeIndex(0), assignment.CommitteeIndex)
|
||||
}
|
||||
|
||||
func TestGetValidatorAssignment_WithoutAssignmentMap(t *testing.T) {
|
||||
start := primitives.Slot(100)
|
||||
bySlot := [][][]primitives.ValidatorIndex{
|
||||
{{1, 2, 3}},
|
||||
{{4, 5, 6}},
|
||||
}
|
||||
|
||||
// Test without assignment map (small request scenario)
|
||||
meta := &metadata{
|
||||
startSlot: start,
|
||||
committeesBySlot: bySlot,
|
||||
validatorAssignmentMap: nil, // No map - should use linear search
|
||||
}
|
||||
|
||||
vs := &Server{}
|
||||
|
||||
// Test existing validator
|
||||
assignment := vs.getValidatorAssignment(meta, primitives.ValidatorIndex(5))
|
||||
require.NotNil(t, assignment)
|
||||
assert.Equal(t, start+1, assignment.AttesterSlot)
|
||||
assert.Equal(t, primitives.CommitteeIndex(0), assignment.CommitteeIndex)
|
||||
assert.Equal(t, uint64(1), assignment.ValidatorCommitteeIndex)
|
||||
|
||||
// Test non-existent validator should return empty assignment
|
||||
assignment = vs.getValidatorAssignment(meta, primitives.ValidatorIndex(99))
|
||||
require.NotNil(t, assignment)
|
||||
assert.Equal(t, primitives.Slot(0), assignment.AttesterSlot)
|
||||
assert.Equal(t, primitives.CommitteeIndex(0), assignment.CommitteeIndex)
|
||||
}
|
||||
|
||||
func TestLoadMetadata_ThresholdBehavior(t *testing.T) {
|
||||
state, _ := util.DeterministicGenesisState(t, 128)
|
||||
epoch := primitives.Epoch(0)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
numValidators int
|
||||
expectAssignmentMap bool
|
||||
}{
|
||||
{
|
||||
name: "Small request - below threshold",
|
||||
numValidators: 100,
|
||||
expectAssignmentMap: false,
|
||||
},
|
||||
{
|
||||
name: "Large request - at threshold",
|
||||
numValidators: validatorLookupThreshold,
|
||||
expectAssignmentMap: true,
|
||||
},
|
||||
{
|
||||
name: "Large request - above threshold",
|
||||
numValidators: validatorLookupThreshold + 1000,
|
||||
expectAssignmentMap: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
meta, err := loadMetadata(t.Context(), state, epoch, tt.numValidators)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, meta)
|
||||
|
||||
if tt.expectAssignmentMap {
|
||||
require.NotNil(t, meta.validatorAssignmentMap, "Expected assignment map to be built for large requests")
|
||||
assert.Equal(t, true, len(meta.validatorAssignmentMap) > 0, "Assignment map should not be empty")
|
||||
} else {
|
||||
// For small requests, the map should be nil (not initialized)
|
||||
if meta.validatorAssignmentMap != nil {
|
||||
t.Errorf("Expected no assignment map for small requests, got: %v", meta.validatorAssignmentMap)
|
||||
}
|
||||
}
|
||||
|
||||
// Common fields should always be set
|
||||
assert.Equal(t, true, meta.committeesAtSlot > 0)
|
||||
require.NotNil(t, meta.committeesBySlot)
|
||||
assert.Equal(t, true, len(meta.committeesBySlot) > 0)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -471,6 +471,11 @@ func isVersionCompatible(bidVersion, headBlockVersion int) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Allow Capella bids for Bellatrix blocks - they have compatible payload formats
|
||||
if bidVersion == version.Capella && headBlockVersion == version.Bellatrix {
|
||||
return true
|
||||
}
|
||||
|
||||
// For all other cases, require exact version match
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -1378,6 +1378,12 @@ func TestIsVersionCompatible(t *testing.T) {
|
||||
headBlockVersion: version.Capella,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "Capella bid with Bellatrix head block - Compatible",
|
||||
bidVersion: version.Capella,
|
||||
headBlockVersion: version.Bellatrix,
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "Phase0 bid with Altair head block - Not compatible",
|
||||
bidVersion: version.Phase0,
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
gcache "github.com/patrickmn/go-cache"
|
||||
@@ -22,7 +21,7 @@ func NewRegularSyncFuzz(opts ...Option) *Service {
|
||||
cancel: cancel,
|
||||
slotToPendingBlocks: gcache.New(time.Second, 2*time.Second),
|
||||
seenPendingBlocks: make(map[[32]byte]bool),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]any),
|
||||
}
|
||||
r.rateLimiter = newRateLimiter(r.cfg.p2p)
|
||||
|
||||
|
||||
@@ -28,14 +28,14 @@ import (
|
||||
var processPendingAttsPeriod = slots.DivideSlotBy(2 /* twice per slot */)
|
||||
var pendingAttsLimit = 10000
|
||||
|
||||
// This processes pending attestation queues on every `processPendingAttsPeriod`.
|
||||
func (s *Service) processPendingAttsQueue() {
|
||||
// This processes pending attestation queues on every processPendingAttsPeriod.
|
||||
func (s *Service) runPendingAttsQueue() {
|
||||
// Prevents multiple queue processing goroutines (invoked by RunEvery) from contending for data.
|
||||
mutex := new(sync.Mutex)
|
||||
async.RunEvery(s.ctx, processPendingAttsPeriod, func() {
|
||||
mutex.Lock()
|
||||
if err := s.processPendingAtts(s.ctx); err != nil {
|
||||
log.WithError(err).Debugf("Could not process pending attestation: %v", err)
|
||||
log.WithError(err).Debug("Could not process pending attestation")
|
||||
}
|
||||
mutex.Unlock()
|
||||
})
|
||||
@@ -51,7 +51,7 @@ func (s *Service) processPendingAtts(ctx context.Context) error {
|
||||
|
||||
// Before a node processes pending attestations queue, it verifies
|
||||
// the attestations in the queue are still valid. Attestations will
|
||||
// be deleted from the queue if invalid (ie. getting staled from falling too many slots behind).
|
||||
// be deleted from the queue if invalid (i.e. getting stalled from falling too many slots behind).
|
||||
s.validatePendingAtts(ctx, s.cfg.clock.CurrentSlot())
|
||||
|
||||
s.pendingAttsLock.RLock()
|
||||
@@ -68,7 +68,7 @@ func (s *Service) processPendingAtts(ctx context.Context) error {
|
||||
attestations := s.blkRootToPendingAtts[bRoot]
|
||||
s.pendingAttsLock.RUnlock()
|
||||
// has the pending attestation's missing block arrived and the node processed block yet?
|
||||
if s.cfg.beaconDB.HasBlock(ctx, bRoot) && (s.cfg.beaconDB.HasState(ctx, bRoot) || s.cfg.beaconDB.HasStateSummary(ctx, bRoot)) {
|
||||
if s.cfg.beaconDB.HasBlock(ctx, bRoot) && (s.cfg.beaconDB.HasState(ctx, bRoot) || s.cfg.beaconDB.HasStateSummary(ctx, bRoot)) && s.cfg.chain.InForkchoice(bRoot) {
|
||||
s.processAttestations(ctx, attestations)
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockRoot": hex.EncodeToString(bytesutil.Trunc(bRoot[:])),
|
||||
@@ -91,52 +91,59 @@ func (s *Service) processPendingAtts(ctx context.Context) error {
|
||||
return s.sendBatchRootRequest(ctx, pendingRoots, randGen)
|
||||
}
|
||||
|
||||
func (s *Service) processAttestations(ctx context.Context, attestations []ethpb.SignedAggregateAttAndProof) {
|
||||
func (s *Service) processAttestations(ctx context.Context, attestations []any) {
|
||||
for _, signedAtt := range attestations {
|
||||
att := signedAtt.AggregateAttestationAndProof().AggregateVal()
|
||||
// The pending attestations can arrive in both aggregated and unaggregated forms,
|
||||
// each from has distinct validation steps.
|
||||
if att.IsAggregated() {
|
||||
s.processAggregated(ctx, signedAtt)
|
||||
} else {
|
||||
s.processUnaggregated(ctx, att)
|
||||
// The pending attestations can arrive as both aggregates and attestations,
|
||||
// and each form has to be processed differently.
|
||||
switch t := signedAtt.(type) {
|
||||
case ethpb.Att:
|
||||
s.processAtt(ctx, t)
|
||||
case ethpb.SignedAggregateAttAndProof:
|
||||
s.processAggregate(ctx, t)
|
||||
default:
|
||||
log.Warnf("Unexpected item of type %T in pending attestation queue. Item will not be processed", t)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) processAggregated(ctx context.Context, att ethpb.SignedAggregateAttAndProof) {
|
||||
aggregate := att.AggregateAttestationAndProof().AggregateVal()
|
||||
func (s *Service) processAggregate(ctx context.Context, aggregate ethpb.SignedAggregateAttAndProof) {
|
||||
att := aggregate.AggregateAttestationAndProof().AggregateVal()
|
||||
|
||||
// Save the pending aggregated attestation to the pool if it passes the aggregated
|
||||
// validation steps.
|
||||
valRes, err := s.validateAggregatedAtt(ctx, att)
|
||||
valRes, err := s.validateAggregatedAtt(ctx, aggregate)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Pending aggregated attestation failed validation")
|
||||
}
|
||||
aggValid := pubsub.ValidationAccept == valRes
|
||||
if s.validateBlockInAttestation(ctx, att) && aggValid {
|
||||
if s.validateBlockInAttestation(ctx, aggregate) && aggValid {
|
||||
if features.Get().EnableExperimentalAttestationPool {
|
||||
if err = s.cfg.attestationCache.Add(aggregate); err != nil {
|
||||
log.WithError(err).Debug("Could not save aggregate attestation")
|
||||
if err = s.cfg.attestationCache.Add(att); err != nil {
|
||||
log.WithError(err).Debug("Could not save aggregated attestation")
|
||||
return
|
||||
}
|
||||
} else {
|
||||
if err := s.cfg.attPool.SaveAggregatedAttestation(aggregate); err != nil {
|
||||
log.WithError(err).Debug("Could not save aggregate attestation")
|
||||
if att.IsAggregated() {
|
||||
if err = s.cfg.attPool.SaveAggregatedAttestation(att); err != nil {
|
||||
log.WithError(err).Debug("Could not save aggregated attestation")
|
||||
return
|
||||
}
|
||||
} else if err = s.cfg.attPool.SaveUnaggregatedAttestation(att); err != nil {
|
||||
log.WithError(err).Debug("Could not save unaggregated attestation")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
s.setAggregatorIndexEpochSeen(aggregate.GetData().Target.Epoch, att.AggregateAttestationAndProof().GetAggregatorIndex())
|
||||
s.setAggregatorIndexEpochSeen(att.GetData().Target.Epoch, aggregate.AggregateAttestationAndProof().GetAggregatorIndex())
|
||||
|
||||
// Broadcasting the signed attestation again once a node is able to process it.
|
||||
if err := s.cfg.p2p.Broadcast(ctx, att); err != nil {
|
||||
if err := s.cfg.p2p.Broadcast(ctx, aggregate); err != nil {
|
||||
log.WithError(err).Debug("Could not broadcast")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) processUnaggregated(ctx context.Context, att ethpb.Att) {
|
||||
func (s *Service) processAtt(ctx context.Context, att ethpb.Att) {
|
||||
data := att.GetData()
|
||||
|
||||
// This is an important validation before retrieving attestation pre state to defend against
|
||||
@@ -239,13 +246,41 @@ func (s *Service) processUnaggregated(ctx context.Context, att ethpb.Att) {
|
||||
}
|
||||
}
|
||||
|
||||
// This defines how pending attestations is saved in the map. The key is the
|
||||
// root of the missing block. The value is the list of pending attestations
|
||||
// This defines how pending aggregates are saved in the map. The key is the
|
||||
// root of the missing block. The value is the list of pending attestations/aggregates
|
||||
// that voted for that block root. The caller of this function is responsible
|
||||
// for not sending repeated aggregates to the pending queue.
|
||||
func (s *Service) savePendingAggregate(agg ethpb.SignedAggregateAttAndProof) {
|
||||
root := bytesutil.ToBytes32(agg.AggregateAttestationAndProof().AggregateVal().GetData().BeaconBlockRoot)
|
||||
|
||||
s.savePending(root, agg, func(other any) bool {
|
||||
a, ok := other.(ethpb.SignedAggregateAttAndProof)
|
||||
return ok && pendingAggregatesAreEqual(agg, a)
|
||||
})
|
||||
}
|
||||
|
||||
// This defines how pending attestations are saved in the map. The key is the
|
||||
// root of the missing block. The value is the list of pending attestations/aggregates
|
||||
// that voted for that block root. The caller of this function is responsible
|
||||
// for not sending repeated attestations to the pending queue.
|
||||
func (s *Service) savePendingAtt(att ethpb.SignedAggregateAttAndProof) {
|
||||
root := bytesutil.ToBytes32(att.AggregateAttestationAndProof().AggregateVal().GetData().BeaconBlockRoot)
|
||||
func (s *Service) savePendingAtt(att ethpb.Att) {
|
||||
if att.Version() >= version.Electra && !att.IsSingle() {
|
||||
log.Debug("Non-single attestation sent to pending attestation pool. Attestation will be ignored")
|
||||
return
|
||||
}
|
||||
|
||||
root := bytesutil.ToBytes32(att.GetData().BeaconBlockRoot)
|
||||
|
||||
s.savePending(root, att, func(other any) bool {
|
||||
a, ok := other.(ethpb.Att)
|
||||
return ok && pendingAttsAreEqual(att, a)
|
||||
})
|
||||
}
|
||||
|
||||
// We want to avoid saving duplicate items, which is the purpose of the passed-in closure.
|
||||
// It is the responsibility of the caller to provide a function that correctly determines quality
|
||||
// in the context of the pending queue.
|
||||
func (s *Service) savePending(root [32]byte, pending any, isEqual func(other any) bool) {
|
||||
s.pendingAttsLock.Lock()
|
||||
defer s.pendingAttsLock.Unlock()
|
||||
|
||||
@@ -261,62 +296,60 @@ func (s *Service) savePendingAtt(att ethpb.SignedAggregateAttAndProof) {
|
||||
_, ok := s.blkRootToPendingAtts[root]
|
||||
if !ok {
|
||||
pendingAttCount.Inc()
|
||||
s.blkRootToPendingAtts[root] = []ethpb.SignedAggregateAttAndProof{att}
|
||||
s.blkRootToPendingAtts[root] = []any{pending}
|
||||
return
|
||||
}
|
||||
// Skip if the attestation from the same aggregator already exists in
|
||||
|
||||
// Skip if the attestation/aggregate from the same validator already exists in
|
||||
// the pending queue.
|
||||
for _, a := range s.blkRootToPendingAtts[root] {
|
||||
if attsAreEqual(att, a) {
|
||||
if isEqual(a) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
pendingAttCount.Inc()
|
||||
s.blkRootToPendingAtts[root] = append(s.blkRootToPendingAtts[root], att)
|
||||
s.blkRootToPendingAtts[root] = append(s.blkRootToPendingAtts[root], pending)
|
||||
}
|
||||
|
||||
func attsAreEqual(a, b ethpb.SignedAggregateAttAndProof) bool {
|
||||
func pendingAggregatesAreEqual(a, b ethpb.SignedAggregateAttAndProof) bool {
|
||||
if a.Version() != b.Version() {
|
||||
return false
|
||||
}
|
||||
|
||||
if a.GetSignature() != nil {
|
||||
return b.GetSignature() != nil && a.AggregateAttestationAndProof().GetAggregatorIndex() == b.AggregateAttestationAndProof().GetAggregatorIndex()
|
||||
}
|
||||
if b.GetSignature() != nil {
|
||||
if a.AggregateAttestationAndProof().GetAggregatorIndex() != b.AggregateAttestationAndProof().GetAggregatorIndex() {
|
||||
return false
|
||||
}
|
||||
|
||||
aAggregate := a.AggregateAttestationAndProof().AggregateVal()
|
||||
bAggregate := b.AggregateAttestationAndProof().AggregateVal()
|
||||
aData := aAggregate.GetData()
|
||||
bData := bAggregate.GetData()
|
||||
|
||||
if aData.Slot != bData.Slot {
|
||||
aAtt := a.AggregateAttestationAndProof().AggregateVal()
|
||||
bAtt := b.AggregateAttestationAndProof().AggregateVal()
|
||||
if aAtt.GetData().Slot != bAtt.GetData().Slot {
|
||||
return false
|
||||
}
|
||||
if aAtt.GetCommitteeIndex() != bAtt.GetCommitteeIndex() {
|
||||
return false
|
||||
}
|
||||
return bytes.Equal(aAtt.GetAggregationBits(), bAtt.GetAggregationBits())
|
||||
}
|
||||
|
||||
func pendingAttsAreEqual(a, b ethpb.Att) bool {
|
||||
if a.Version() != b.Version() {
|
||||
return false
|
||||
}
|
||||
if a.GetData().Slot != b.GetData().Slot {
|
||||
return false
|
||||
}
|
||||
if a.Version() >= version.Electra {
|
||||
if aAggregate.IsSingle() != bAggregate.IsSingle() {
|
||||
return false
|
||||
}
|
||||
if aAggregate.IsSingle() && aAggregate.GetAttestingIndex() != bAggregate.GetAttestingIndex() {
|
||||
return false
|
||||
}
|
||||
if !bytes.Equal(aAggregate.CommitteeBitsVal().Bytes(), bAggregate.CommitteeBitsVal().Bytes()) {
|
||||
return false
|
||||
}
|
||||
} else if aData.CommitteeIndex != bData.CommitteeIndex {
|
||||
return a.GetAttestingIndex() == b.GetAttestingIndex()
|
||||
}
|
||||
if a.GetCommitteeIndex() != b.GetCommitteeIndex() {
|
||||
return false
|
||||
}
|
||||
|
||||
return bytes.Equal(aAggregate.GetAggregationBits(), bAggregate.GetAggregationBits())
|
||||
return bytes.Equal(a.GetAggregationBits(), b.GetAggregationBits())
|
||||
}
|
||||
|
||||
// This validates the pending attestations in the queue are still valid.
|
||||
// If not valid, a node will remove it in the queue in place. The validity
|
||||
// check specifies the pending attestation could not fall one epoch behind
|
||||
// of the current slot.
|
||||
// If not valid, a node will remove it from the queue in place. The validity
|
||||
// check specifies the pending attestation cannot fall one epoch behind
|
||||
// the current slot.
|
||||
func (s *Service) validatePendingAtts(ctx context.Context, slot primitives.Slot) {
|
||||
_, span := trace.StartSpan(ctx, "validatePendingAtts")
|
||||
defer span.End()
|
||||
@@ -326,9 +359,23 @@ func (s *Service) validatePendingAtts(ctx context.Context, slot primitives.Slot)
|
||||
|
||||
for bRoot, atts := range s.blkRootToPendingAtts {
|
||||
for i := len(atts) - 1; i >= 0; i-- {
|
||||
if slot >= atts[i].AggregateAttestationAndProof().AggregateVal().GetData().Slot+params.BeaconConfig().SlotsPerEpoch {
|
||||
// Remove the pending attestation from the list in place.
|
||||
atts = append(atts[:i], atts[i+1:]...)
|
||||
var attSlot primitives.Slot
|
||||
switch t := atts[i].(type) {
|
||||
case ethpb.Att:
|
||||
attSlot = t.GetData().Slot
|
||||
case ethpb.SignedAggregateAttAndProof:
|
||||
attSlot = t.AggregateAttestationAndProof().AggregateVal().GetData().Slot
|
||||
default:
|
||||
log.Debugf("Unexpected item of type %T in pending attestation queue. Item will be removed", t)
|
||||
// Remove the pending attestation from the map in place.
|
||||
atts[i] = atts[len(atts)-1]
|
||||
atts = atts[:len(atts)-1]
|
||||
continue
|
||||
}
|
||||
if slot >= attSlot+params.BeaconConfig().SlotsPerEpoch {
|
||||
// Remove the pending attestation from the map in place.
|
||||
atts[i] = atts[len(atts)-1]
|
||||
atts = atts[:len(atts)-1]
|
||||
}
|
||||
}
|
||||
s.blkRootToPendingAtts[bRoot] = atts
|
||||
|
||||
@@ -56,17 +56,17 @@ func TestProcessPendingAtts_NoBlockRequestBlock(t *testing.T) {
|
||||
chain := &mock.ChainService{Genesis: prysmTime.Now(), FinalizedCheckPoint: ðpb.Checkpoint{}}
|
||||
r := &Service{
|
||||
cfg: &config{p2p: p1, beaconDB: db, chain: chain, clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot)},
|
||||
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]any),
|
||||
chainStarted: abool.New(),
|
||||
}
|
||||
|
||||
a := ðpb.AggregateAttestationAndProof{Aggregate: ðpb.Attestation{Data: ðpb.AttestationData{Target: ðpb.Checkpoint{Root: make([]byte, 32)}}}}
|
||||
r.blkRootToPendingAtts[[32]byte{'A'}] = []ethpb.SignedAggregateAttAndProof{ðpb.SignedAggregateAttestationAndProof{Message: a}}
|
||||
a := ðpb.Attestation{Data: ðpb.AttestationData{Target: ðpb.Checkpoint{Root: make([]byte, 32)}}}
|
||||
r.blkRootToPendingAtts[[32]byte{'A'}] = []any{a}
|
||||
require.NoError(t, r.processPendingAtts(t.Context()))
|
||||
require.LogsContain(t, hook, "Requesting block by root")
|
||||
}
|
||||
|
||||
func TestProcessPendingAtts_HasBlockSaveUnAggregatedAtt(t *testing.T) {
|
||||
func TestProcessPendingAtts_HasBlockSaveUnaggregatedAtt(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
db := dbtest.SetupDB(t)
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
@@ -102,16 +102,12 @@ func TestProcessPendingAtts_HasBlockSaveUnAggregatedAtt(t *testing.T) {
|
||||
att.Signature = privKeys[i].Sign(hashTreeRoot[:]).Marshal()
|
||||
}
|
||||
|
||||
aggregateAndProof := ðpb.AggregateAttestationAndProof{
|
||||
Aggregate: att,
|
||||
}
|
||||
|
||||
require.NoError(t, beaconState.SetGenesisTime(time.Now()))
|
||||
|
||||
chain := &mock.ChainService{Genesis: time.Now(),
|
||||
State: beaconState,
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{
|
||||
Root: aggregateAndProof.Aggregate.Data.BeaconBlockRoot,
|
||||
Root: att.Data.BeaconBlockRoot,
|
||||
Epoch: 0,
|
||||
},
|
||||
}
|
||||
@@ -132,7 +128,7 @@ func TestProcessPendingAtts_HasBlockSaveUnAggregatedAtt(t *testing.T) {
|
||||
attPool: attestations.NewPool(),
|
||||
attestationNotifier: &mock.SimpleNotifier{Feed: opn},
|
||||
},
|
||||
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]any),
|
||||
seenUnAggregatedAttestationCache: lruwrpr.New(10),
|
||||
signatureChan: make(chan *signatureVerifier, verifierLimit),
|
||||
}
|
||||
@@ -142,7 +138,9 @@ func TestProcessPendingAtts_HasBlockSaveUnAggregatedAtt(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, r.cfg.beaconDB.SaveState(t.Context(), s, root))
|
||||
|
||||
r.blkRootToPendingAtts[root] = []ethpb.SignedAggregateAttAndProof{ðpb.SignedAggregateAttestationAndProof{Message: aggregateAndProof}}
|
||||
require.NoError(t, r.cfg.beaconDB.SaveState(t.Context(), s, root))
|
||||
|
||||
r.blkRootToPendingAtts[root] = []any{att}
|
||||
require.NoError(t, r.processPendingAtts(t.Context()))
|
||||
|
||||
var wg sync.WaitGroup
|
||||
@@ -169,7 +167,7 @@ func TestProcessPendingAtts_HasBlockSaveUnAggregatedAtt(t *testing.T) {
|
||||
cancel()
|
||||
}
|
||||
|
||||
func TestProcessPendingAtts_HasBlockSaveUnAggregatedAttElectra(t *testing.T) {
|
||||
func TestProcessPendingAtts_HasBlockSaveUnaggregatedAttElectra(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
db := dbtest.SetupDB(t)
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
@@ -189,9 +187,6 @@ func TestProcessPendingAtts_HasBlockSaveUnAggregatedAttElectra(t *testing.T) {
|
||||
Target: ðpb.Checkpoint{Epoch: 0, Root: root[:]},
|
||||
},
|
||||
}
|
||||
aggregateAndProof := ðpb.AggregateAttestationAndProofSingle{
|
||||
Aggregate: att,
|
||||
}
|
||||
|
||||
committee, err := helpers.BeaconCommitteeFromState(t.Context(), beaconState, att.Data.Slot, att.Data.CommitteeIndex)
|
||||
assert.NoError(t, err)
|
||||
@@ -207,7 +202,7 @@ func TestProcessPendingAtts_HasBlockSaveUnAggregatedAttElectra(t *testing.T) {
|
||||
chain := &mock.ChainService{Genesis: time.Now(),
|
||||
State: beaconState,
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{
|
||||
Root: aggregateAndProof.Aggregate.Data.BeaconBlockRoot,
|
||||
Root: att.Data.BeaconBlockRoot,
|
||||
Epoch: 0,
|
||||
},
|
||||
}
|
||||
@@ -227,7 +222,7 @@ func TestProcessPendingAtts_HasBlockSaveUnAggregatedAttElectra(t *testing.T) {
|
||||
attPool: attestations.NewPool(),
|
||||
attestationNotifier: &mock.SimpleNotifier{Feed: opn},
|
||||
},
|
||||
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]any),
|
||||
seenUnAggregatedAttestationCache: lruwrpr.New(10),
|
||||
signatureChan: make(chan *signatureVerifier, verifierLimit),
|
||||
}
|
||||
@@ -237,7 +232,9 @@ func TestProcessPendingAtts_HasBlockSaveUnAggregatedAttElectra(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, r.cfg.beaconDB.SaveState(t.Context(), s, root))
|
||||
|
||||
r.blkRootToPendingAtts[root] = []ethpb.SignedAggregateAttAndProof{ðpb.SignedAggregateAttestationAndProofSingle{Message: aggregateAndProof}}
|
||||
require.NoError(t, r.cfg.beaconDB.SaveState(t.Context(), s, root))
|
||||
|
||||
r.blkRootToPendingAtts[root] = []any{att}
|
||||
require.NoError(t, r.processPendingAtts(t.Context()))
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
@@ -308,9 +305,6 @@ func TestProcessPendingAtts_HasBlockSaveUnAggregatedAttElectra_VerifyAlreadySeen
|
||||
CommitteeIndex: 0,
|
||||
},
|
||||
}
|
||||
aggregateAndProof := ðpb.AggregateAttestationAndProofSingle{
|
||||
Aggregate: att,
|
||||
}
|
||||
|
||||
// Retrieve the beacon committee and set the attester index.
|
||||
committee, err := helpers.BeaconCommitteeFromState(t.Context(), beaconState, att.Data.Slot, att.CommitteeId)
|
||||
@@ -332,7 +326,7 @@ func TestProcessPendingAtts_HasBlockSaveUnAggregatedAttElectra_VerifyAlreadySeen
|
||||
Genesis: time.Now(),
|
||||
State: beaconState,
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{
|
||||
Root: aggregateAndProof.Aggregate.Data.BeaconBlockRoot,
|
||||
Root: att.Data.BeaconBlockRoot,
|
||||
Epoch: 0,
|
||||
},
|
||||
}
|
||||
@@ -358,7 +352,7 @@ func TestProcessPendingAtts_HasBlockSaveUnAggregatedAttElectra_VerifyAlreadySeen
|
||||
attPool: attestations.NewPool(),
|
||||
attestationNotifier: &mock.SimpleNotifier{Feed: opn},
|
||||
},
|
||||
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]any),
|
||||
seenUnAggregatedAttestationCache: lruwrpr.New(10),
|
||||
signatureChan: make(chan *signatureVerifier, verifierLimit),
|
||||
}
|
||||
@@ -370,8 +364,8 @@ func TestProcessPendingAtts_HasBlockSaveUnAggregatedAttElectra_VerifyAlreadySeen
|
||||
require.NoError(t, r.cfg.beaconDB.SaveState(t.Context(), s, root))
|
||||
|
||||
// Add the pending attestation.
|
||||
r.blkRootToPendingAtts[root] = []ethpb.SignedAggregateAttAndProof{
|
||||
ðpb.SignedAggregateAttestationAndProofSingle{Message: aggregateAndProof},
|
||||
r.blkRootToPendingAtts[root] = []any{
|
||||
att,
|
||||
}
|
||||
require.NoError(t, r.processPendingAtts(t.Context()))
|
||||
|
||||
@@ -426,54 +420,72 @@ func TestProcessPendingAtts_HasBlockSaveUnAggregatedAttElectra_VerifyAlreadySeen
|
||||
}
|
||||
|
||||
func TestProcessPendingAtts_NoBroadcastWithBadSignature(t *testing.T) {
|
||||
db := dbtest.SetupDB(t)
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
|
||||
db := dbtest.SetupDB(t)
|
||||
p2p := p2ptest.NewTestP2P(t)
|
||||
st, privKeys := util.DeterministicGenesisState(t, 256)
|
||||
require.NoError(t, st.SetGenesisTime(time.Now()))
|
||||
b := util.NewBeaconBlock()
|
||||
r32, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, t.Context(), db, b)
|
||||
require.NoError(t, db.SaveState(t.Context(), st, r32))
|
||||
|
||||
s, _ := util.DeterministicGenesisState(t, 256)
|
||||
chain := &mock.ChainService{
|
||||
State: s,
|
||||
Genesis: prysmTime.Now(), FinalizedCheckPoint: ðpb.Checkpoint{Root: make([]byte, 32)}}
|
||||
r := &Service{
|
||||
State: st,
|
||||
Genesis: prysmTime.Now(),
|
||||
DB: db,
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{
|
||||
Root: r32[:],
|
||||
Epoch: 0,
|
||||
},
|
||||
}
|
||||
|
||||
s := &Service{
|
||||
ctx: ctx,
|
||||
cfg: &config{
|
||||
p2p: p1,
|
||||
p2p: p2p,
|
||||
beaconDB: db,
|
||||
chain: chain,
|
||||
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
||||
attPool: attestations.NewPool(),
|
||||
},
|
||||
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]any),
|
||||
signatureChan: make(chan *signatureVerifier, verifierLimit),
|
||||
seenAggregatedAttestationCache: lruwrpr.New(10),
|
||||
}
|
||||
go s.verifierRoutine()
|
||||
|
||||
committee, err := helpers.BeaconCommitteeFromState(t.Context(), st, 0, 0)
|
||||
assert.NoError(t, err)
|
||||
// Arbitrary aggregator index for testing purposes.
|
||||
aggregatorIndex := committee[0]
|
||||
|
||||
priv, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
aggBits := bitfield.NewBitlist(8)
|
||||
aggBits.SetBitAt(1, true)
|
||||
|
||||
a := ðpb.AggregateAttestationAndProof{
|
||||
Aggregate: ðpb.Attestation{
|
||||
Signature: priv.Sign([]byte("foo")).Marshal(),
|
||||
AggregationBits: bitfield.Bitlist{0x02},
|
||||
AggregationBits: aggBits,
|
||||
Data: util.HydrateAttestationData(ðpb.AttestationData{}),
|
||||
},
|
||||
SelectionProof: make([]byte, fieldparams.BLSSignatureLength),
|
||||
AggregatorIndex: aggregatorIndex,
|
||||
SelectionProof: make([]byte, fieldparams.BLSSignatureLength),
|
||||
}
|
||||
|
||||
b := util.NewBeaconBlock()
|
||||
r32, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, t.Context(), r.cfg.beaconDB, b)
|
||||
require.NoError(t, r.cfg.beaconDB.SaveState(t.Context(), s, r32))
|
||||
s.blkRootToPendingAtts[r32] = []any{ðpb.SignedAggregateAttestationAndProof{Message: a, Signature: make([]byte, fieldparams.BLSSignatureLength)}}
|
||||
require.NoError(t, s.processPendingAtts(t.Context()))
|
||||
|
||||
r.blkRootToPendingAtts[r32] = []ethpb.SignedAggregateAttAndProof{ðpb.SignedAggregateAttestationAndProof{Message: a, Signature: make([]byte, fieldparams.BLSSignatureLength)}}
|
||||
require.NoError(t, r.processPendingAtts(t.Context()))
|
||||
assert.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcasted bad aggregate")
|
||||
|
||||
assert.Equal(t, false, p1.BroadcastCalled.Load(), "Broadcasted bad aggregate")
|
||||
// Clear pool.
|
||||
err = r.cfg.attPool.DeleteUnaggregatedAttestation(a.Aggregate)
|
||||
err = s.cfg.attPool.DeleteUnaggregatedAttestation(a.Aggregate)
|
||||
require.NoError(t, err)
|
||||
|
||||
validators := uint64(256)
|
||||
|
||||
_, privKeys := util.DeterministicGenesisState(t, validators)
|
||||
aggBits := bitfield.NewBitlist(8)
|
||||
aggBits.SetBitAt(1, true)
|
||||
att := ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
BeaconBlockRoot: r32[:],
|
||||
@@ -482,11 +494,10 @@ func TestProcessPendingAtts_NoBroadcastWithBadSignature(t *testing.T) {
|
||||
},
|
||||
AggregationBits: aggBits,
|
||||
}
|
||||
committee, err := helpers.BeaconCommitteeFromState(t.Context(), s, att.Data.Slot, att.Data.CommitteeIndex)
|
||||
assert.NoError(t, err)
|
||||
|
||||
attestingIndices, err := attestation.AttestingIndices(att, committee)
|
||||
require.NoError(t, err)
|
||||
attesterDomain, err := signing.Domain(s.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, s.GenesisValidatorsRoot())
|
||||
attesterDomain, err := signing.Domain(st.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, st.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
hashTreeRoot, err := signing.ComputeSigningRoot(att.Data, attesterDomain)
|
||||
assert.NoError(t, err)
|
||||
@@ -494,47 +505,22 @@ func TestProcessPendingAtts_NoBroadcastWithBadSignature(t *testing.T) {
|
||||
att.Signature = privKeys[i].Sign(hashTreeRoot[:]).Marshal()
|
||||
}
|
||||
|
||||
// Arbitrary aggregator index for testing purposes.
|
||||
aggregatorIndex := committee[0]
|
||||
sszSlot := primitives.SSZUint64(att.Data.Slot)
|
||||
sig, err := signing.ComputeDomainAndSign(s, 0, &sszSlot, params.BeaconConfig().DomainSelectionProof, privKeys[aggregatorIndex])
|
||||
sig, err := signing.ComputeDomainAndSign(st, 0, &sszSlot, params.BeaconConfig().DomainSelectionProof, privKeys[aggregatorIndex])
|
||||
require.NoError(t, err)
|
||||
aggregateAndProof := ðpb.AggregateAttestationAndProof{
|
||||
SelectionProof: sig,
|
||||
Aggregate: att,
|
||||
AggregatorIndex: aggregatorIndex,
|
||||
}
|
||||
aggreSig, err := signing.ComputeDomainAndSign(s, 0, aggregateAndProof, params.BeaconConfig().DomainAggregateAndProof, privKeys[aggregatorIndex])
|
||||
aggreSig, err := signing.ComputeDomainAndSign(st, 0, aggregateAndProof, params.BeaconConfig().DomainAggregateAndProof, privKeys[aggregatorIndex])
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, s.SetGenesisTime(time.Now()))
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
chain2 := &mock.ChainService{Genesis: time.Now(),
|
||||
State: s,
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{
|
||||
Root: aggregateAndProof.Aggregate.Data.BeaconBlockRoot,
|
||||
Epoch: 0,
|
||||
}}
|
||||
r = &Service{
|
||||
ctx: ctx,
|
||||
cfg: &config{
|
||||
p2p: p1,
|
||||
beaconDB: db,
|
||||
chain: chain2,
|
||||
clock: startup.NewClock(chain2.Genesis, chain2.ValidatorsRoot),
|
||||
attPool: attestations.NewPool(),
|
||||
attestationNotifier: &mock.MockOperationNotifier{},
|
||||
},
|
||||
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
|
||||
seenUnAggregatedAttestationCache: lruwrpr.New(10),
|
||||
signatureChan: make(chan *signatureVerifier, verifierLimit),
|
||||
}
|
||||
go r.verifierRoutine()
|
||||
s.blkRootToPendingAtts[r32] = []any{ðpb.SignedAggregateAttestationAndProof{Message: aggregateAndProof, Signature: aggreSig}}
|
||||
require.NoError(t, s.processPendingAtts(t.Context()))
|
||||
|
||||
r.blkRootToPendingAtts[r32] = []ethpb.SignedAggregateAttAndProof{ðpb.SignedAggregateAttestationAndProof{Message: aggregateAndProof, Signature: aggreSig}}
|
||||
require.NoError(t, r.processPendingAtts(t.Context()))
|
||||
assert.Equal(t, true, p2p.BroadcastCalled.Load(), "The good aggregate was not broadcasted")
|
||||
|
||||
assert.Equal(t, true, p1.BroadcastCalled.Load(), "Could not broadcast the good aggregate")
|
||||
cancel()
|
||||
}
|
||||
|
||||
@@ -610,7 +596,7 @@ func TestProcessPendingAtts_HasBlockSaveAggregatedAtt(t *testing.T) {
|
||||
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
||||
attPool: attestations.NewPool(),
|
||||
},
|
||||
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]any),
|
||||
seenAggregatedAttestationCache: lruwrpr.New(10),
|
||||
signatureChan: make(chan *signatureVerifier, verifierLimit),
|
||||
}
|
||||
@@ -619,7 +605,9 @@ func TestProcessPendingAtts_HasBlockSaveAggregatedAtt(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, r.cfg.beaconDB.SaveState(t.Context(), s, root))
|
||||
|
||||
r.blkRootToPendingAtts[root] = []ethpb.SignedAggregateAttAndProof{ðpb.SignedAggregateAttestationAndProof{Message: aggregateAndProof, Signature: aggreSig}}
|
||||
require.NoError(t, r.cfg.beaconDB.SaveState(t.Context(), s, root))
|
||||
|
||||
r.blkRootToPendingAtts[root] = []any{ðpb.SignedAggregateAttestationAndProof{Message: aggregateAndProof, Signature: aggreSig}}
|
||||
require.NoError(t, r.processPendingAtts(t.Context()))
|
||||
|
||||
assert.Equal(t, 1, len(r.cfg.attPool.AggregatedAttestations()), "Did not save aggregated att")
|
||||
@@ -630,9 +618,188 @@ func TestProcessPendingAtts_HasBlockSaveAggregatedAtt(t *testing.T) {
|
||||
cancel()
|
||||
}
|
||||
|
||||
func TestProcessPendingAtts_HasBlockSaveAggregatedAttElectra(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
db := dbtest.SetupDB(t)
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
validators := uint64(256)
|
||||
|
||||
beaconState, privKeys := util.DeterministicGenesisState(t, validators)
|
||||
|
||||
sb := util.NewBeaconBlock()
|
||||
util.SaveBlock(t, t.Context(), db, sb)
|
||||
root, err := sb.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
committeeBits := primitives.NewAttestationCommitteeBits()
|
||||
committeeBits.SetBitAt(0, true)
|
||||
aggBits := bitfield.NewBitlist(validators / uint64(params.BeaconConfig().SlotsPerEpoch))
|
||||
aggBits.SetBitAt(0, true)
|
||||
aggBits.SetBitAt(1, true)
|
||||
att := ðpb.AttestationElectra{
|
||||
Data: ðpb.AttestationData{
|
||||
BeaconBlockRoot: root[:],
|
||||
Source: ðpb.Checkpoint{Epoch: 0, Root: bytesutil.PadTo([]byte("hello-world"), 32)},
|
||||
Target: ðpb.Checkpoint{Epoch: 0, Root: root[:]},
|
||||
},
|
||||
CommitteeBits: committeeBits,
|
||||
AggregationBits: aggBits,
|
||||
}
|
||||
|
||||
committee, err := helpers.BeaconCommitteeFromState(t.Context(), beaconState, att.Data.Slot, att.GetCommitteeIndex())
|
||||
assert.NoError(t, err)
|
||||
attestingIndices, err := attestation.AttestingIndices(att, committee)
|
||||
require.NoError(t, err)
|
||||
attesterDomain, err := signing.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
hashTreeRoot, err := signing.ComputeSigningRoot(att.Data, attesterDomain)
|
||||
assert.NoError(t, err)
|
||||
sigs := make([]bls.Signature, len(attestingIndices))
|
||||
for i, indice := range attestingIndices {
|
||||
sig := privKeys[indice].Sign(hashTreeRoot[:])
|
||||
sigs[i] = sig
|
||||
}
|
||||
att.Signature = bls.AggregateSignatures(sigs).Marshal()
|
||||
|
||||
// Arbitrary aggregator index for testing purposes.
|
||||
aggregatorIndex := committee[0]
|
||||
sszUint := primitives.SSZUint64(att.Data.Slot)
|
||||
sig, err := signing.ComputeDomainAndSign(beaconState, 0, &sszUint, params.BeaconConfig().DomainSelectionProof, privKeys[aggregatorIndex])
|
||||
require.NoError(t, err)
|
||||
aggregateAndProof := ðpb.AggregateAttestationAndProofElectra{
|
||||
SelectionProof: sig,
|
||||
Aggregate: att,
|
||||
AggregatorIndex: aggregatorIndex,
|
||||
}
|
||||
aggreSig, err := signing.ComputeDomainAndSign(beaconState, 0, aggregateAndProof, params.BeaconConfig().DomainAggregateAndProof, privKeys[aggregatorIndex])
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, beaconState.SetGenesisTime(time.Now()))
|
||||
|
||||
chain := &mock.ChainService{Genesis: time.Now(),
|
||||
DB: db,
|
||||
State: beaconState,
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{
|
||||
Root: aggregateAndProof.Aggregate.Data.BeaconBlockRoot,
|
||||
Epoch: 0,
|
||||
}}
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
r := &Service{
|
||||
ctx: ctx,
|
||||
cfg: &config{
|
||||
p2p: p1,
|
||||
beaconDB: db,
|
||||
chain: chain,
|
||||
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
||||
attPool: attestations.NewPool(),
|
||||
},
|
||||
blkRootToPendingAtts: make(map[[32]byte][]any),
|
||||
seenAggregatedAttestationCache: lruwrpr.New(10),
|
||||
signatureChan: make(chan *signatureVerifier, verifierLimit),
|
||||
}
|
||||
go r.verifierRoutine()
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, r.cfg.beaconDB.SaveState(t.Context(), s, root))
|
||||
|
||||
r.blkRootToPendingAtts[root] = []any{ðpb.SignedAggregateAttestationAndProofElectra{Message: aggregateAndProof, Signature: aggreSig}}
|
||||
require.NoError(t, r.processPendingAtts(t.Context()))
|
||||
|
||||
assert.Equal(t, 1, len(r.cfg.attPool.AggregatedAttestations()), "Did not save aggregated att")
|
||||
assert.DeepEqual(t, att, r.cfg.attPool.AggregatedAttestations()[0], "Incorrect saved att")
|
||||
atts := r.cfg.attPool.UnaggregatedAttestations()
|
||||
assert.Equal(t, 0, len(atts), "Did save aggregated att")
|
||||
require.LogsContain(t, hook, "Verified and saved pending attestations to pool")
|
||||
cancel()
|
||||
}
|
||||
|
||||
func TestProcessPendingAtts_BlockNotInForkChoice(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
db := dbtest.SetupDB(t)
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
validators := uint64(256)
|
||||
|
||||
beaconState, privKeys := util.DeterministicGenesisState(t, validators)
|
||||
|
||||
sb := util.NewBeaconBlock()
|
||||
util.SaveBlock(t, t.Context(), db, sb)
|
||||
root, err := sb.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
aggBits := bitfield.NewBitlist(8)
|
||||
aggBits.SetBitAt(1, true)
|
||||
att := ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
BeaconBlockRoot: root[:],
|
||||
Source: ðpb.Checkpoint{Epoch: 0, Root: bytesutil.PadTo([]byte("hello-world"), 32)},
|
||||
Target: ðpb.Checkpoint{Epoch: 0, Root: root[:]},
|
||||
},
|
||||
AggregationBits: aggBits,
|
||||
}
|
||||
|
||||
committee, err := helpers.BeaconCommitteeFromState(t.Context(), beaconState, att.Data.Slot, att.Data.CommitteeIndex)
|
||||
assert.NoError(t, err)
|
||||
attestingIndices, err := attestation.AttestingIndices(att, committee)
|
||||
require.NoError(t, err)
|
||||
attesterDomain, err := signing.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
hashTreeRoot, err := signing.ComputeSigningRoot(att.Data, attesterDomain)
|
||||
assert.NoError(t, err)
|
||||
for _, i := range attestingIndices {
|
||||
att.Signature = privKeys[i].Sign(hashTreeRoot[:]).Marshal()
|
||||
}
|
||||
|
||||
aggregateAndProof := ðpb.AggregateAttestationAndProof{
|
||||
Aggregate: att,
|
||||
}
|
||||
|
||||
require.NoError(t, beaconState.SetGenesisTime(time.Now()))
|
||||
|
||||
// Mock chain service that returns false for InForkchoice
|
||||
chain := &mock.ChainService{Genesis: time.Now(),
|
||||
State: beaconState,
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{
|
||||
Root: aggregateAndProof.Aggregate.Data.BeaconBlockRoot,
|
||||
Epoch: 0,
|
||||
},
|
||||
// Set NotFinalized to true so InForkchoice returns false
|
||||
NotFinalized: true,
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
defer cancel()
|
||||
r := &Service{
|
||||
ctx: ctx,
|
||||
cfg: &config{
|
||||
p2p: p1,
|
||||
beaconDB: db,
|
||||
chain: chain,
|
||||
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
||||
attPool: attestations.NewPool(),
|
||||
},
|
||||
blkRootToPendingAtts: make(map[[32]byte][]any),
|
||||
}
|
||||
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, r.cfg.beaconDB.SaveState(t.Context(), s, root))
|
||||
|
||||
// Add pending attestation
|
||||
r.blkRootToPendingAtts[root] = []any{ðpb.SignedAggregateAttestationAndProof{Message: aggregateAndProof}}
|
||||
|
||||
// Process pending attestations - should not process because block is not in fork choice
|
||||
require.NoError(t, r.processPendingAtts(t.Context()))
|
||||
|
||||
// Verify attestations were not processed (should still be pending)
|
||||
assert.Equal(t, 1, len(r.blkRootToPendingAtts[root]), "Attestations should still be pending")
|
||||
assert.Equal(t, 0, len(r.cfg.attPool.UnaggregatedAttestations()), "Should not save attestation when block not in fork choice")
|
||||
assert.Equal(t, 0, len(r.cfg.attPool.AggregatedAttestations()), "Should not save attestation when block not in fork choice")
|
||||
require.LogsDoNotContain(t, hook, "Verified and saved pending attestations to pool")
|
||||
}
|
||||
|
||||
func TestValidatePendingAtts_CanPruneOldAtts(t *testing.T) {
|
||||
s := &Service{
|
||||
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]any),
|
||||
}
|
||||
|
||||
// 100 Attestations per block root.
|
||||
@@ -641,21 +808,9 @@ func TestValidatePendingAtts_CanPruneOldAtts(t *testing.T) {
|
||||
r3 := [32]byte{'C'}
|
||||
|
||||
for i := primitives.Slot(0); i < 100; i++ {
|
||||
s.savePendingAtt(ðpb.SignedAggregateAttestationAndProof{
|
||||
Message: ðpb.AggregateAttestationAndProof{
|
||||
AggregatorIndex: primitives.ValidatorIndex(i),
|
||||
Aggregate: ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{Slot: i, BeaconBlockRoot: r1[:]}}}})
|
||||
s.savePendingAtt(ðpb.SignedAggregateAttestationAndProof{
|
||||
Message: ðpb.AggregateAttestationAndProof{
|
||||
AggregatorIndex: primitives.ValidatorIndex(i*2 + i),
|
||||
Aggregate: ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{Slot: i, BeaconBlockRoot: r2[:]}}}})
|
||||
s.savePendingAtt(ðpb.SignedAggregateAttestationAndProof{
|
||||
Message: ðpb.AggregateAttestationAndProof{
|
||||
AggregatorIndex: primitives.ValidatorIndex(i*3 + i),
|
||||
Aggregate: ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{Slot: i, BeaconBlockRoot: r3[:]}}}})
|
||||
s.savePendingAtt(ðpb.Attestation{Data: ðpb.AttestationData{Slot: i, BeaconBlockRoot: r1[:]}})
|
||||
s.savePendingAtt(ðpb.Attestation{Data: ðpb.AttestationData{Slot: i, BeaconBlockRoot: r2[:]}})
|
||||
s.savePendingAtt(ðpb.Attestation{Data: ðpb.AttestationData{Slot: i, BeaconBlockRoot: r3[:]}})
|
||||
}
|
||||
|
||||
assert.Equal(t, 100, len(s.blkRootToPendingAtts[r1]), "Did not save pending atts")
|
||||
@@ -680,26 +835,14 @@ func TestValidatePendingAtts_CanPruneOldAtts(t *testing.T) {
|
||||
|
||||
func TestValidatePendingAtts_NoDuplicatingAtts(t *testing.T) {
|
||||
s := &Service{
|
||||
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]any),
|
||||
}
|
||||
|
||||
r1 := [32]byte{'A'}
|
||||
r2 := [32]byte{'B'}
|
||||
s.savePendingAtt(ðpb.SignedAggregateAttestationAndProof{
|
||||
Message: ðpb.AggregateAttestationAndProof{
|
||||
AggregatorIndex: 1,
|
||||
Aggregate: ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{Slot: 1, BeaconBlockRoot: r1[:]}}}})
|
||||
s.savePendingAtt(ðpb.SignedAggregateAttestationAndProof{
|
||||
Message: ðpb.AggregateAttestationAndProof{
|
||||
AggregatorIndex: 2,
|
||||
Aggregate: ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{Slot: 2, BeaconBlockRoot: r2[:]}}}})
|
||||
s.savePendingAtt(ðpb.SignedAggregateAttestationAndProof{
|
||||
Message: ðpb.AggregateAttestationAndProof{
|
||||
AggregatorIndex: 2,
|
||||
Aggregate: ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{Slot: 2, BeaconBlockRoot: r2[:]}}}})
|
||||
s.savePendingAtt(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1, BeaconBlockRoot: r1[:]}})
|
||||
s.savePendingAtt(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2, BeaconBlockRoot: r2[:]}})
|
||||
s.savePendingAtt(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2, BeaconBlockRoot: r2[:]}})
|
||||
|
||||
assert.Equal(t, 1, len(s.blkRootToPendingAtts[r1]), "Did not save pending atts")
|
||||
assert.Equal(t, 1, len(s.blkRootToPendingAtts[r2]), "Did not save pending atts")
|
||||
@@ -707,15 +850,11 @@ func TestValidatePendingAtts_NoDuplicatingAtts(t *testing.T) {
|
||||
|
||||
func TestSavePendingAtts_BeyondLimit(t *testing.T) {
|
||||
s := &Service{
|
||||
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]any),
|
||||
}
|
||||
|
||||
for i := 0; i < pendingAttsLimit; i++ {
|
||||
s.savePendingAtt(ðpb.SignedAggregateAttestationAndProof{
|
||||
Message: ðpb.AggregateAttestationAndProof{
|
||||
AggregatorIndex: primitives.ValidatorIndex(i),
|
||||
Aggregate: ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{Slot: 1, BeaconBlockRoot: bytesutil.Bytes32(uint64(i))}}}})
|
||||
s.savePendingAtt(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1, BeaconBlockRoot: bytesutil.Bytes32(uint64(i))}})
|
||||
}
|
||||
r1 := [32]byte(bytesutil.Bytes32(0))
|
||||
r2 := [32]byte(bytesutil.Bytes32(uint64(pendingAttsLimit) - 1))
|
||||
@@ -724,11 +863,7 @@ func TestSavePendingAtts_BeyondLimit(t *testing.T) {
|
||||
assert.Equal(t, 1, len(s.blkRootToPendingAtts[r2]), "Did not save pending atts")
|
||||
|
||||
for i := pendingAttsLimit; i < pendingAttsLimit+20; i++ {
|
||||
s.savePendingAtt(ðpb.SignedAggregateAttestationAndProof{
|
||||
Message: ðpb.AggregateAttestationAndProof{
|
||||
AggregatorIndex: primitives.ValidatorIndex(i),
|
||||
Aggregate: ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{Slot: 1, BeaconBlockRoot: bytesutil.Bytes32(uint64(i))}}}})
|
||||
s.savePendingAtt(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1, BeaconBlockRoot: bytesutil.Bytes32(uint64(i))}})
|
||||
}
|
||||
|
||||
r1 = [32]byte(bytesutil.Bytes32(uint64(pendingAttsLimit)))
|
||||
@@ -738,137 +873,137 @@ func TestSavePendingAtts_BeyondLimit(t *testing.T) {
|
||||
assert.Equal(t, 0, len(s.blkRootToPendingAtts[r2]), "Saved pending atts")
|
||||
}
|
||||
|
||||
func Test_attsAreEqual_Committee(t *testing.T) {
|
||||
t.Run("Phase 0 equal", func(t *testing.T) {
|
||||
att1 := ðpb.SignedAggregateAttestationAndProof{
|
||||
func Test_pendingAggregatesAreEqual(t *testing.T) {
|
||||
t.Run("equal", func(t *testing.T) {
|
||||
a := ðpb.SignedAggregateAttestationAndProof{
|
||||
Message: ðpb.AggregateAttestationAndProof{
|
||||
Aggregate: ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
CommitteeIndex: 0}}}}
|
||||
att2 := ðpb.SignedAggregateAttestationAndProof{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b1111},
|
||||
}}}
|
||||
b := ðpb.SignedAggregateAttestationAndProof{
|
||||
Message: ðpb.AggregateAttestationAndProof{
|
||||
Aggregate: ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
CommitteeIndex: 0}}}}
|
||||
assert.Equal(t, true, attsAreEqual(att1, att2))
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b1111},
|
||||
}}}
|
||||
assert.Equal(t, true, pendingAggregatesAreEqual(a, b))
|
||||
})
|
||||
t.Run("Phase 0 not equal", func(t *testing.T) {
|
||||
att1 := ðpb.SignedAggregateAttestationAndProof{
|
||||
t.Run("different version", func(t *testing.T) {
|
||||
a := ðpb.SignedAggregateAttestationAndProof{Message: ðpb.AggregateAttestationAndProof{AggregatorIndex: 1}}
|
||||
b := ðpb.SignedAggregateAttestationAndProofElectra{Message: ðpb.AggregateAttestationAndProofElectra{AggregatorIndex: 1}}
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b))
|
||||
})
|
||||
t.Run("different aggregator index", func(t *testing.T) {
|
||||
a := ðpb.SignedAggregateAttestationAndProof{Message: ðpb.AggregateAttestationAndProof{AggregatorIndex: 1}}
|
||||
b := ðpb.SignedAggregateAttestationAndProof{Message: ðpb.AggregateAttestationAndProof{AggregatorIndex: 2}}
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b))
|
||||
})
|
||||
t.Run("different slot", func(t *testing.T) {
|
||||
a := ðpb.SignedAggregateAttestationAndProof{
|
||||
Message: ðpb.AggregateAttestationAndProof{
|
||||
Aggregate: ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
CommitteeIndex: 0}}}}
|
||||
att2 := ðpb.SignedAggregateAttestationAndProof{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b1111},
|
||||
}}}
|
||||
b := ðpb.SignedAggregateAttestationAndProof{
|
||||
Message: ðpb.AggregateAttestationAndProof{
|
||||
Aggregate: ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
CommitteeIndex: 1}}}}
|
||||
assert.Equal(t, false, attsAreEqual(att1, att2))
|
||||
})
|
||||
t.Run("Electra equal", func(t *testing.T) {
|
||||
cb1 := primitives.NewAttestationCommitteeBits()
|
||||
cb1.SetBitAt(0, true)
|
||||
att1 := ðpb.SignedAggregateAttestationAndProofElectra{
|
||||
Message: ðpb.AggregateAttestationAndProofElectra{
|
||||
Aggregate: ðpb.AttestationElectra{
|
||||
Data: ðpb.AttestationData{},
|
||||
CommitteeBits: cb1,
|
||||
Slot: 2,
|
||||
CommitteeIndex: 1,
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b1111},
|
||||
}}}
|
||||
cb2 := primitives.NewAttestationCommitteeBits()
|
||||
cb2.SetBitAt(0, true)
|
||||
att2 := ðpb.SignedAggregateAttestationAndProofElectra{
|
||||
Message: ðpb.AggregateAttestationAndProofElectra{
|
||||
Aggregate: ðpb.AttestationElectra{
|
||||
Data: ðpb.AttestationData{},
|
||||
CommitteeBits: cb2,
|
||||
}}}
|
||||
assert.Equal(t, true, attsAreEqual(att1, att2))
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b))
|
||||
})
|
||||
t.Run("Electra not equal", func(t *testing.T) {
|
||||
cb1 := primitives.NewAttestationCommitteeBits()
|
||||
cb1.SetBitAt(0, true)
|
||||
att1 := ðpb.SignedAggregateAttestationAndProofElectra{
|
||||
Message: ðpb.AggregateAttestationAndProofElectra{
|
||||
Aggregate: ðpb.AttestationElectra{
|
||||
Data: ðpb.AttestationData{},
|
||||
CommitteeBits: cb1,
|
||||
t.Run("different committee index", func(t *testing.T) {
|
||||
a := ðpb.SignedAggregateAttestationAndProof{
|
||||
Message: ðpb.AggregateAttestationAndProof{
|
||||
Aggregate: ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b1111},
|
||||
}}}
|
||||
cb2 := primitives.NewAttestationCommitteeBits()
|
||||
cb2.SetBitAt(1, true)
|
||||
att2 := ðpb.SignedAggregateAttestationAndProofElectra{
|
||||
Message: ðpb.AggregateAttestationAndProofElectra{
|
||||
Aggregate: ðpb.AttestationElectra{
|
||||
Data: ðpb.AttestationData{},
|
||||
CommitteeBits: cb2,
|
||||
b := ðpb.SignedAggregateAttestationAndProof{
|
||||
Message: ðpb.AggregateAttestationAndProof{
|
||||
Aggregate: ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 2,
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b1111},
|
||||
}}}
|
||||
assert.Equal(t, false, attsAreEqual(att1, att2))
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b))
|
||||
})
|
||||
t.Run("Single and Electra not equal", func(t *testing.T) {
|
||||
cb := primitives.NewAttestationCommitteeBits()
|
||||
cb.SetBitAt(0, true)
|
||||
att1 := ðpb.SignedAggregateAttestationAndProofElectra{
|
||||
Message: ðpb.AggregateAttestationAndProofElectra{
|
||||
Aggregate: ðpb.AttestationElectra{
|
||||
Data: ðpb.AttestationData{},
|
||||
CommitteeBits: cb,
|
||||
t.Run("different aggregation bits", func(t *testing.T) {
|
||||
a := ðpb.SignedAggregateAttestationAndProof{
|
||||
Message: ðpb.AggregateAttestationAndProof{
|
||||
Aggregate: ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b1111},
|
||||
}}}
|
||||
att2 := ðpb.SignedAggregateAttestationAndProofSingle{
|
||||
Message: ðpb.AggregateAttestationAndProofSingle{
|
||||
Aggregate: ðpb.SingleAttestation{
|
||||
CommitteeId: 0,
|
||||
AttesterIndex: 0,
|
||||
Data: ðpb.AttestationData{},
|
||||
},
|
||||
},
|
||||
}
|
||||
assert.Equal(t, false, attsAreEqual(att1, att2))
|
||||
})
|
||||
t.Run("Single equal", func(t *testing.T) {
|
||||
att1 := ðpb.SignedAggregateAttestationAndProofSingle{
|
||||
Message: ðpb.AggregateAttestationAndProofSingle{
|
||||
Aggregate: ðpb.SingleAttestation{
|
||||
CommitteeId: 0,
|
||||
AttesterIndex: 0,
|
||||
Data: ðpb.AttestationData{},
|
||||
},
|
||||
},
|
||||
}
|
||||
att2 := ðpb.SignedAggregateAttestationAndProofSingle{
|
||||
Message: ðpb.AggregateAttestationAndProofSingle{
|
||||
Aggregate: ðpb.SingleAttestation{
|
||||
CommitteeId: 0,
|
||||
AttesterIndex: 0,
|
||||
Data: ðpb.AttestationData{},
|
||||
},
|
||||
},
|
||||
}
|
||||
assert.Equal(t, true, attsAreEqual(att1, att2))
|
||||
})
|
||||
t.Run("Single not equal", func(t *testing.T) {
|
||||
// Same AttesterIndex but different CommitteeId
|
||||
att1 := ðpb.SignedAggregateAttestationAndProofSingle{
|
||||
Message: ðpb.AggregateAttestationAndProofSingle{
|
||||
Aggregate: ðpb.SingleAttestation{
|
||||
CommitteeId: 0,
|
||||
AttesterIndex: 0,
|
||||
Data: ðpb.AttestationData{},
|
||||
},
|
||||
},
|
||||
}
|
||||
att2 := ðpb.SignedAggregateAttestationAndProofSingle{
|
||||
Message: ðpb.AggregateAttestationAndProofSingle{
|
||||
Aggregate: ðpb.SingleAttestation{
|
||||
CommitteeId: 1,
|
||||
AttesterIndex: 0,
|
||||
Data: ðpb.AttestationData{},
|
||||
},
|
||||
},
|
||||
}
|
||||
assert.Equal(t, false, attsAreEqual(att1, att2))
|
||||
|
||||
// Same CommitteeId but different AttesterIndex
|
||||
att2.Message.Aggregate.CommitteeId = 0
|
||||
att2.Message.Aggregate.AttesterIndex = 1
|
||||
assert.Equal(t, false, attsAreEqual(att1, att2))
|
||||
b := ðpb.SignedAggregateAttestationAndProof{
|
||||
Message: ðpb.AggregateAttestationAndProof{
|
||||
Aggregate: ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b1000},
|
||||
}}}
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b))
|
||||
})
|
||||
}
|
||||
|
||||
func Test_pendingAttsAreEqual(t *testing.T) {
|
||||
t.Run("equal Phase0", func(t *testing.T) {
|
||||
a := ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1, CommitteeIndex: 1}, AggregationBits: bitfield.Bitlist{0b1111}}
|
||||
b := ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1, CommitteeIndex: 1}, AggregationBits: bitfield.Bitlist{0b1111}}
|
||||
assert.Equal(t, true, pendingAttsAreEqual(a, b))
|
||||
})
|
||||
t.Run("equal Electra", func(t *testing.T) {
|
||||
a := ðpb.SingleAttestation{Data: ðpb.AttestationData{Slot: 1}, AttesterIndex: 1}
|
||||
b := ðpb.SingleAttestation{Data: ðpb.AttestationData{Slot: 1}, AttesterIndex: 1}
|
||||
assert.Equal(t, true, pendingAttsAreEqual(a, b))
|
||||
})
|
||||
t.Run("different version", func(t *testing.T) {
|
||||
a := ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1, CommitteeIndex: 1}, AggregationBits: bitfield.Bitlist{0b1111}}
|
||||
b := ðpb.SingleAttestation{Data: ðpb.AttestationData{Slot: 1}, AttesterIndex: 1}
|
||||
assert.Equal(t, false, pendingAttsAreEqual(a, b))
|
||||
})
|
||||
t.Run("different slot", func(t *testing.T) {
|
||||
a := ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1, CommitteeIndex: 1}, AggregationBits: bitfield.Bitlist{0b1111}}
|
||||
b := ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2, CommitteeIndex: 1}, AggregationBits: bitfield.Bitlist{0b1111}}
|
||||
assert.Equal(t, false, pendingAttsAreEqual(a, b))
|
||||
})
|
||||
t.Run("different committee index", func(t *testing.T) {
|
||||
a := ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1, CommitteeIndex: 1}, AggregationBits: bitfield.Bitlist{0b1111}}
|
||||
b := ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1, CommitteeIndex: 2}, AggregationBits: bitfield.Bitlist{0b1111}}
|
||||
assert.Equal(t, false, pendingAttsAreEqual(a, b))
|
||||
})
|
||||
t.Run("different aggregation bits", func(t *testing.T) {
|
||||
a := ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1, CommitteeIndex: 1}, AggregationBits: bitfield.Bitlist{0b1111}}
|
||||
b := ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1, CommitteeIndex: 1}, AggregationBits: bitfield.Bitlist{0b1000}}
|
||||
assert.Equal(t, false, pendingAttsAreEqual(a, b))
|
||||
})
|
||||
t.Run("different attester index", func(t *testing.T) {
|
||||
a := ðpb.SingleAttestation{Data: ðpb.AttestationData{Slot: 1}, AttesterIndex: 1}
|
||||
b := ðpb.SingleAttestation{Data: ðpb.AttestationData{Slot: 1}, AttesterIndex: 2}
|
||||
assert.Equal(t, false, pendingAttsAreEqual(a, b))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -38,7 +38,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
leakybucket "github.com/OffchainLabs/prysm/v6/container/leaky-bucket"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/rand"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime"
|
||||
prysmTime "github.com/OffchainLabs/prysm/v6/time"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
@@ -134,7 +133,7 @@ type Service struct {
|
||||
cancel context.CancelFunc
|
||||
slotToPendingBlocks *gcache.Cache
|
||||
seenPendingBlocks map[[32]byte]bool
|
||||
blkRootToPendingAtts map[[32]byte][]ethpb.SignedAggregateAttAndProof
|
||||
blkRootToPendingAtts map[[32]byte][]any
|
||||
subHandler *subTopicHandler
|
||||
pendingAttsLock sync.RWMutex
|
||||
pendingQueueLock sync.RWMutex
|
||||
@@ -190,7 +189,7 @@ func NewService(ctx context.Context, opts ...Option) *Service {
|
||||
cfg: &config{clock: startup.NewClock(time.Unix(0, 0), [32]byte{})},
|
||||
slotToPendingBlocks: gcache.New(pendingBlockExpTime /* exp time */, 0 /* disable janitor */),
|
||||
seenPendingBlocks: make(map[[32]byte]bool),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]any),
|
||||
dataColumnLogCh: make(chan dataColumnLogEntry, 1000),
|
||||
reconstructionRandGen: rand.NewGenerator(),
|
||||
}
|
||||
@@ -265,7 +264,7 @@ func (s *Service) Start() {
|
||||
s.cfg.p2p.AddPingMethod(s.sendPingRequest)
|
||||
|
||||
s.processPendingBlocksQueue()
|
||||
s.processPendingAttsQueue()
|
||||
s.runPendingAttsQueue()
|
||||
s.maintainPeerStatuses()
|
||||
|
||||
if params.FuluEnabled() {
|
||||
|
||||
@@ -246,7 +246,7 @@ func (s *Service) validateBlockInAttestation(ctx context.Context, satt ethpb.Sig
|
||||
blockRoot := bytesutil.ToBytes32(satt.AggregateAttestationAndProof().AggregateVal().GetData().BeaconBlockRoot)
|
||||
if !s.hasBlockAndState(ctx, blockRoot) {
|
||||
// A node doesn't have the block, it'll request from peer while saving the pending attestation to a queue.
|
||||
s.savePendingAtt(satt)
|
||||
s.savePendingAggregate(satt)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
|
||||
@@ -219,7 +219,7 @@ func TestValidateAggregateAndProof_NoBlock(t *testing.T) {
|
||||
attPool: attestations.NewPool(),
|
||||
chain: &mock.ChainService{},
|
||||
},
|
||||
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]any),
|
||||
seenAggregatedAttestationCache: c,
|
||||
}
|
||||
r.initCaches()
|
||||
@@ -372,7 +372,7 @@ func TestValidateAggregateAndProof_ExistedInPool(t *testing.T) {
|
||||
attestationNotifier: (&mock.ChainService{}).OperationNotifier(),
|
||||
},
|
||||
seenAggregatedAttestationCache: lruwrpr.New(10),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]any),
|
||||
}
|
||||
r.initCaches()
|
||||
|
||||
|
||||
@@ -105,7 +105,7 @@ func (s *Service) validateCommitteeIndexBeaconAttestation(
|
||||
// Verify the block being voted and the processed state is in beaconDB and the block has passed validation if it's in the beaconDB.
|
||||
blockRoot := bytesutil.ToBytes32(data.BeaconBlockRoot)
|
||||
if !s.hasBlockAndState(ctx, blockRoot) {
|
||||
return s.saveToPendingAttPool(att)
|
||||
s.savePendingAtt(att)
|
||||
}
|
||||
if !s.cfg.chain.InForkchoice(blockRoot) {
|
||||
tracing.AnnotateError(span, blockchain.ErrNotDescendantOfFinalized)
|
||||
@@ -403,31 +403,3 @@ func (s *Service) hasBlockAndState(ctx context.Context, blockRoot [32]byte) bool
|
||||
hasState := hasStateSummary || s.cfg.beaconDB.HasState(ctx, blockRoot)
|
||||
return hasState && s.cfg.chain.HasBlock(ctx, blockRoot)
|
||||
}
|
||||
|
||||
func (s *Service) saveToPendingAttPool(att eth.Att) (pubsub.ValidationResult, error) {
|
||||
// A node doesn't have the block, it'll request from peer while saving the pending attestation to a queue.
|
||||
if att.Version() >= version.Electra {
|
||||
a, ok := att.(*eth.SingleAttestation)
|
||||
// This will never fail in practice because we asserted the version
|
||||
if !ok {
|
||||
return pubsub.ValidationIgnore, fmt.Errorf("attestation has wrong type (expected %T, got %T)", ð.SingleAttestation{}, att)
|
||||
}
|
||||
// Even though there is no AggregateAndProof type to hold a single attestation, our design of pending atts pool
|
||||
// requires to have an AggregateAndProof object, even for unaggregated attestations.
|
||||
// Because of this we need to have a single attestation version of it to be able to save single attestations into the pool.
|
||||
// It's not possible to convert the single attestation into an electra attestation before saving to the pool
|
||||
// because crucial verification steps can't be performed without the block, and converting prior to these checks
|
||||
// opens up DoS attacks.
|
||||
// The AggregateAndProof object is discarded once we process the pending attestation and code paths dealing
|
||||
// with "real" AggregateAndProof objects (ones that hold actual aggregates) don't use the single attestation version anywhere.
|
||||
s.savePendingAtt(ð.SignedAggregateAttestationAndProofSingle{Message: ð.AggregateAttestationAndProofSingle{Aggregate: a}})
|
||||
} else {
|
||||
a, ok := att.(*eth.Attestation)
|
||||
// This will never fail in practice because we asserted the version
|
||||
if !ok {
|
||||
return pubsub.ValidationIgnore, fmt.Errorf("attestation has wrong type (expected %T, got %T)", ð.Attestation{}, att)
|
||||
}
|
||||
s.savePendingAtt(ð.SignedAggregateAttestationAndProof{Message: ð.AggregateAttestationAndProof{Aggregate: a}})
|
||||
}
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
|
||||
@@ -51,7 +51,7 @@ func TestService_validateCommitteeIndexBeaconAttestation(t *testing.T) {
|
||||
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
||||
attestationNotifier: (&mockChain.ChainService{}).OperationNotifier(),
|
||||
},
|
||||
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]any),
|
||||
seenUnAggregatedAttestationCache: lruwrpr.New(10),
|
||||
signatureChan: make(chan *signatureVerifier, verifierLimit),
|
||||
}
|
||||
@@ -343,7 +343,7 @@ func TestService_validateCommitteeIndexBeaconAttestationElectra(t *testing.T) {
|
||||
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
||||
attestationNotifier: (&mockChain.ChainService{}).OperationNotifier(),
|
||||
},
|
||||
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]any),
|
||||
seenUnAggregatedAttestationCache: lruwrpr.New(10),
|
||||
signatureChan: make(chan *signatureVerifier, verifierLimit),
|
||||
}
|
||||
|
||||
3
changelog/jtraglia_fix-blobs-bundle-v2-max-proofs.md
Normal file
3
changelog/jtraglia_fix-blobs-bundle-v2-max-proofs.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- Fixed the max proofs in `BlobsBundleV2`.
|
||||
3
changelog/potuz_double_receive_block.md
Normal file
3
changelog/potuz_double_receive_block.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- Prevent a race on double `ReceiveBlock`.
|
||||
3
changelog/pvl-go-1.24.6.md
Normal file
3
changelog/pvl-go-1.24.6.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Security
|
||||
|
||||
- Updated go to version 1.24.6
|
||||
3
changelog/radek_redesign-pending-att-queue.md
Normal file
3
changelog/radek_redesign-pending-att-queue.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Redesign the pending attestation queue.
|
||||
7
changelog/syjn99_persist-p2p-seqnum.md
Normal file
7
changelog/syjn99_persist-p2p-seqnum.md
Normal file
@@ -0,0 +1,7 @@
|
||||
### Fixed
|
||||
|
||||
- Fixed [#15544](https://github.com/OffchainLabs/prysm/issues/15544): Persist metadata sequence number if it is needed (e.g., use static peer ID option or Fulu enabled).
|
||||
|
||||
### Deprecated
|
||||
|
||||
- Deprecated `p2p-metadata` flag.
|
||||
3
changelog/syjn99_save-state-efficient-fulu.md
Normal file
3
changelog/syjn99_save-state-efficient-fulu.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Added
|
||||
|
||||
- Add Fulu case for `saveStatesEfficientInternal`
|
||||
3
changelog/tt_check_pending_att.md
Normal file
3
changelog/tt_check_pending_att.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Check pending block is in forkchoice before importing pending attestation.
|
||||
3
changelog/tt_duty.md
Normal file
3
changelog/tt_duty.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- Beacon-api proposer duty fulu computation
|
||||
3
changelog/tt_opt-val-lookup.md
Normal file
3
changelog/tt_opt-val-lookup.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Beacon api optimize validator lookup for large batch request size.
|
||||
3
changelog/ttsao_add-capella-bellatrix-compatibility.md
Normal file
3
changelog/ttsao_add-capella-bellatrix-compatibility.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- Add Capella-Bellatrix bid compatibility to proposer version check
|
||||
@@ -100,7 +100,6 @@ var appFlags = []cli.Flag{
|
||||
cmd.P2PMaxPeers,
|
||||
cmd.P2PPrivKey,
|
||||
cmd.P2PStaticID,
|
||||
cmd.P2PMetadata,
|
||||
cmd.P2PAllowList,
|
||||
cmd.P2PDenyList,
|
||||
cmd.PubsubQueueSize,
|
||||
|
||||
@@ -88,7 +88,6 @@ var appHelpFlagGroups = []flagGroup{
|
||||
cmd.P2PHostDNS,
|
||||
cmd.P2PIP,
|
||||
cmd.P2PMaxPeers,
|
||||
cmd.P2PMetadata,
|
||||
cmd.P2PPrivKey,
|
||||
cmd.P2PQUICPort,
|
||||
cmd.P2PStaticID,
|
||||
|
||||
@@ -158,12 +158,6 @@ var (
|
||||
Usage: "Enables the peer id of the node to be fixed by saving the generated network key to the default key path.",
|
||||
Value: false,
|
||||
}
|
||||
// P2PMetadata defines a flag to specify the location of the peer metadata file.
|
||||
P2PMetadata = &cli.StringFlag{
|
||||
Name: "p2p-metadata",
|
||||
Usage: "The file containing the metadata to communicate with other peers.",
|
||||
Value: "",
|
||||
}
|
||||
// P2PMaxPeers defines a flag to specify the max number of peers in libp2p.
|
||||
P2PMaxPeers = &cli.IntFlag{
|
||||
Name: "p2p-max-peers",
|
||||
|
||||
@@ -79,6 +79,8 @@ type Flags struct {
|
||||
SaveInvalidBlock bool // SaveInvalidBlock saves invalid block to temp.
|
||||
SaveInvalidBlob bool // SaveInvalidBlob saves invalid blob to temp.
|
||||
|
||||
SlowDutiesProfile bool // SlowDutiesProfile enables performance profiling when GetDutiesV2 is slow.
|
||||
|
||||
EnableDiscoveryReboot bool // EnableDiscoveryReboot allows the node to have its local listener to be rebooted in the event of discovery issues.
|
||||
|
||||
// KeystoreImportDebounceInterval specifies the time duration the validator waits to reload new keys if they have
|
||||
@@ -202,6 +204,11 @@ func ConfigureBeaconChain(ctx *cli.Context) error {
|
||||
cfg.SaveInvalidBlob = true
|
||||
}
|
||||
|
||||
if ctx.IsSet(slowDutiesProfileFlag.Name) {
|
||||
logEnabled(slowDutiesProfileFlag)
|
||||
cfg.SlowDutiesProfile = true
|
||||
}
|
||||
|
||||
if ctx.IsSet(disableGRPCConnectionLogging.Name) {
|
||||
logDisabled(disableGRPCConnectionLogging)
|
||||
cfg.DisableGRPCConnectionLogs = true
|
||||
|
||||
@@ -108,6 +108,11 @@ var (
|
||||
Usage: deprecatedUsage,
|
||||
Hidden: true,
|
||||
}
|
||||
deprecatedP2PMetadata = &cli.StringFlag{
|
||||
Name: "p2p-metadata",
|
||||
Usage: deprecatedUsage,
|
||||
Hidden: true,
|
||||
}
|
||||
)
|
||||
|
||||
// Deprecated flags for both the beacon node and validator client.
|
||||
@@ -130,6 +135,7 @@ var deprecatedFlags = []cli.Flag{
|
||||
deprecatedEnableQuic,
|
||||
deprecatedAttestTimely,
|
||||
deprecatedDisableExperimentalState,
|
||||
deprecatedP2PMetadata,
|
||||
}
|
||||
|
||||
var upcomingDeprecation = []cli.Flag{
|
||||
|
||||
@@ -45,6 +45,10 @@ var (
|
||||
Name: "save-invalid-blob-temp",
|
||||
Usage: "Writes invalid blobs to temp directory.",
|
||||
}
|
||||
slowDutiesProfileFlag = &cli.BoolFlag{
|
||||
Name: "slow-duties-profile",
|
||||
Usage: "Enable performance profiling when GetDutiesV2 takes longer than 2s. Saves profiles to <datadir>/debug.",
|
||||
}
|
||||
disableGRPCConnectionLogging = &cli.BoolFlag{
|
||||
Name: "disable-grpc-connection-logging",
|
||||
Usage: `WARNING: The gRPC API will remain the default and fully supported through v8 (expected in 2026) but will be eventually removed in favor of REST API..
|
||||
@@ -232,6 +236,7 @@ var BeaconChainFlags = combinedFlags([]cli.Flag{
|
||||
writeSSZStateTransitionsFlag,
|
||||
saveInvalidBlockTempFlag,
|
||||
saveInvalidBlobTempFlag,
|
||||
slowDutiesProfileFlag,
|
||||
disableGRPCConnectionLogging,
|
||||
HoleskyTestnet,
|
||||
SepoliaTestnet,
|
||||
|
||||
@@ -3387,8 +3387,8 @@ func (b *BlobsBundleV2) MarshalSSZTo(buf []byte) (dst []byte, err error) {
|
||||
}
|
||||
|
||||
// Field (1) 'Proofs'
|
||||
if size := len(b.Proofs); size > 524288 {
|
||||
err = ssz.ErrListTooBigFn("--.Proofs", size, 524288)
|
||||
if size := len(b.Proofs); size > 33554432 {
|
||||
err = ssz.ErrListTooBigFn("--.Proofs", size, 33554432)
|
||||
return
|
||||
}
|
||||
for ii := 0; ii < len(b.Proofs); ii++ {
|
||||
@@ -3464,7 +3464,7 @@ func (b *BlobsBundleV2) UnmarshalSSZ(buf []byte) error {
|
||||
// Field (1) 'Proofs'
|
||||
{
|
||||
buf = tail[o1:o2]
|
||||
num, err := ssz.DivideInt2(len(buf), 48, 524288)
|
||||
num, err := ssz.DivideInt2(len(buf), 48, 33554432)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -3541,8 +3541,8 @@ func (b *BlobsBundleV2) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
|
||||
// Field (1) 'Proofs'
|
||||
{
|
||||
if size := len(b.Proofs); size > 524288 {
|
||||
err = ssz.ErrListTooBigFn("--.Proofs", size, 524288)
|
||||
if size := len(b.Proofs); size > 33554432 {
|
||||
err = ssz.ErrListTooBigFn("--.Proofs", size, 33554432)
|
||||
return
|
||||
}
|
||||
subIndx := hh.Index()
|
||||
@@ -3555,7 +3555,7 @@ func (b *BlobsBundleV2) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
}
|
||||
|
||||
numItems := uint64(len(b.Proofs))
|
||||
hh.MerkleizeWithMixin(subIndx, numItems, 524288)
|
||||
hh.MerkleizeWithMixin(subIndx, numItems, 33554432)
|
||||
}
|
||||
|
||||
// Field (2) 'Blobs'
|
||||
|
||||
67
proto/engine/v1/execution_engine.pb.go
generated
67
proto/engine/v1/execution_engine.pb.go
generated
@@ -1766,7 +1766,7 @@ type BlobsBundleV2 struct {
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
KzgCommitments [][]byte `protobuf:"bytes,1,rep,name=kzg_commitments,json=kzgCommitments,proto3" json:"kzg_commitments,omitempty" ssz-max:"4096" ssz-size:"?,48"`
|
||||
Proofs [][]byte `protobuf:"bytes,2,rep,name=proofs,proto3" json:"proofs,omitempty" ssz-max:"524288" ssz-size:"?,48"`
|
||||
Proofs [][]byte `protobuf:"bytes,2,rep,name=proofs,proto3" json:"proofs,omitempty" ssz-max:"33554432" ssz-size:"?,48"`
|
||||
Blobs [][]byte `protobuf:"bytes,3,rep,name=blobs,proto3" json:"blobs,omitempty" ssz-max:"4096" ssz-size:"?,131072"`
|
||||
}
|
||||
|
||||
@@ -1931,7 +1931,7 @@ type BlobAndProofV2 struct {
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Blob []byte `protobuf:"bytes,1,opt,name=blob,proto3" json:"blob,omitempty" ssz-size:"131072"`
|
||||
KzgProofs [][]byte `protobuf:"bytes,2,rep,name=kzg_proofs,json=kzgProofs,proto3" json:"kzg_proofs,omitempty" ssz-max:"524288" ssz-size:"48"`
|
||||
KzgProofs [][]byte `protobuf:"bytes,2,rep,name=kzg_proofs,json=kzgProofs,proto3" json:"kzg_proofs,omitempty" ssz-max:"33554432" ssz-size:"48"`
|
||||
}
|
||||
|
||||
func (x *BlobAndProofV2) Reset() {
|
||||
@@ -2375,41 +2375,42 @@ var file_proto_engine_v1_execution_engine_proto_rawDesc = []byte{
|
||||
0x34, 0x30, 0x39, 0x36, 0x52, 0x06, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x73, 0x12, 0x2a, 0x0a, 0x05,
|
||||
0x62, 0x6c, 0x6f, 0x62, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c, 0x42, 0x14, 0x8a, 0xb5, 0x18,
|
||||
0x08, 0x3f, 0x2c, 0x31, 0x33, 0x31, 0x30, 0x37, 0x32, 0x92, 0xb5, 0x18, 0x04, 0x34, 0x30, 0x39,
|
||||
0x36, 0x52, 0x05, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x22, 0xa2, 0x01, 0x0a, 0x0d, 0x42, 0x6c, 0x6f,
|
||||
0x36, 0x52, 0x05, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x22, 0xa4, 0x01, 0x0a, 0x0d, 0x42, 0x6c, 0x6f,
|
||||
0x62, 0x73, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x56, 0x32, 0x12, 0x39, 0x0a, 0x0f, 0x6b, 0x7a,
|
||||
0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20,
|
||||
0x03, 0x28, 0x0c, 0x42, 0x10, 0x8a, 0xb5, 0x18, 0x04, 0x3f, 0x2c, 0x34, 0x38, 0x92, 0xb5, 0x18,
|
||||
0x04, 0x34, 0x30, 0x39, 0x36, 0x52, 0x0e, 0x6b, 0x7a, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74,
|
||||
0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2a, 0x0a, 0x06, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x73, 0x18,
|
||||
0x02, 0x20, 0x03, 0x28, 0x0c, 0x42, 0x12, 0x8a, 0xb5, 0x18, 0x04, 0x3f, 0x2c, 0x34, 0x38, 0x92,
|
||||
0xb5, 0x18, 0x06, 0x35, 0x32, 0x34, 0x32, 0x38, 0x38, 0x52, 0x06, 0x70, 0x72, 0x6f, 0x6f, 0x66,
|
||||
0x73, 0x12, 0x2a, 0x0a, 0x05, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c,
|
||||
0x42, 0x14, 0x8a, 0xb5, 0x18, 0x08, 0x3f, 0x2c, 0x31, 0x33, 0x31, 0x30, 0x37, 0x32, 0x92, 0xb5,
|
||||
0x18, 0x04, 0x34, 0x30, 0x39, 0x36, 0x52, 0x05, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x22, 0x26, 0x0a,
|
||||
0x04, 0x42, 0x6c, 0x6f, 0x62, 0x12, 0x1e, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20,
|
||||
0x01, 0x28, 0x0c, 0x42, 0x0a, 0x8a, 0xb5, 0x18, 0x06, 0x31, 0x33, 0x31, 0x30, 0x37, 0x32, 0x52,
|
||||
0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x53, 0x0a, 0x0c, 0x42, 0x6c, 0x6f, 0x62, 0x41, 0x6e, 0x64,
|
||||
0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x1e, 0x0a, 0x04, 0x62, 0x6c, 0x6f, 0x62, 0x18, 0x01, 0x20,
|
||||
0x01, 0x28, 0x0c, 0x42, 0x0a, 0x8a, 0xb5, 0x18, 0x06, 0x31, 0x33, 0x31, 0x30, 0x37, 0x32, 0x52,
|
||||
0x04, 0x62, 0x6c, 0x6f, 0x62, 0x12, 0x23, 0x0a, 0x09, 0x6b, 0x7a, 0x67, 0x5f, 0x70, 0x72, 0x6f,
|
||||
0x6f, 0x66, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x34, 0x38,
|
||||
0x52, 0x08, 0x6b, 0x7a, 0x67, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x22, 0x61, 0x0a, 0x0e, 0x42, 0x6c,
|
||||
0x6f, 0x62, 0x41, 0x6e, 0x64, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x56, 0x32, 0x12, 0x1e, 0x0a, 0x04,
|
||||
0x62, 0x6c, 0x6f, 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x0a, 0x8a, 0xb5, 0x18, 0x06,
|
||||
0x31, 0x33, 0x31, 0x30, 0x37, 0x32, 0x52, 0x04, 0x62, 0x6c, 0x6f, 0x62, 0x12, 0x2f, 0x0a, 0x0a,
|
||||
0x6b, 0x7a, 0x67, 0x5f, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0c,
|
||||
0x42, 0x10, 0x8a, 0xb5, 0x18, 0x02, 0x34, 0x38, 0x92, 0xb5, 0x18, 0x06, 0x35, 0x32, 0x34, 0x32,
|
||||
0x38, 0x38, 0x52, 0x09, 0x6b, 0x7a, 0x67, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x73, 0x42, 0x95, 0x01,
|
||||
0x0a, 0x16, 0x6f, 0x72, 0x67, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65,
|
||||
0x6e, 0x67, 0x69, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x14, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74,
|
||||
0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
|
||||
0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66,
|
||||
0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f,
|
||||
0x76, 0x36, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x2f,
|
||||
0x76, 0x31, 0x3b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x76, 0x31, 0xaa, 0x02, 0x12, 0x45, 0x74,
|
||||
0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x2e, 0x56, 0x31,
|
||||
0xca, 0x02, 0x12, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x5c, 0x45, 0x6e, 0x67, 0x69,
|
||||
0x6e, 0x65, 0x5c, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2c, 0x0a, 0x06, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x73, 0x18,
|
||||
0x02, 0x20, 0x03, 0x28, 0x0c, 0x42, 0x14, 0x8a, 0xb5, 0x18, 0x04, 0x3f, 0x2c, 0x34, 0x38, 0x92,
|
||||
0xb5, 0x18, 0x08, 0x33, 0x33, 0x35, 0x35, 0x34, 0x34, 0x33, 0x32, 0x52, 0x06, 0x70, 0x72, 0x6f,
|
||||
0x6f, 0x66, 0x73, 0x12, 0x2a, 0x0a, 0x05, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x18, 0x03, 0x20, 0x03,
|
||||
0x28, 0x0c, 0x42, 0x14, 0x8a, 0xb5, 0x18, 0x08, 0x3f, 0x2c, 0x31, 0x33, 0x31, 0x30, 0x37, 0x32,
|
||||
0x92, 0xb5, 0x18, 0x04, 0x34, 0x30, 0x39, 0x36, 0x52, 0x05, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x22,
|
||||
0x26, 0x0a, 0x04, 0x42, 0x6c, 0x6f, 0x62, 0x12, 0x1e, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18,
|
||||
0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x0a, 0x8a, 0xb5, 0x18, 0x06, 0x31, 0x33, 0x31, 0x30, 0x37,
|
||||
0x32, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x53, 0x0a, 0x0c, 0x42, 0x6c, 0x6f, 0x62, 0x41,
|
||||
0x6e, 0x64, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x1e, 0x0a, 0x04, 0x62, 0x6c, 0x6f, 0x62, 0x18,
|
||||
0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x0a, 0x8a, 0xb5, 0x18, 0x06, 0x31, 0x33, 0x31, 0x30, 0x37,
|
||||
0x32, 0x52, 0x04, 0x62, 0x6c, 0x6f, 0x62, 0x12, 0x23, 0x0a, 0x09, 0x6b, 0x7a, 0x67, 0x5f, 0x70,
|
||||
0x72, 0x6f, 0x6f, 0x66, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02,
|
||||
0x34, 0x38, 0x52, 0x08, 0x6b, 0x7a, 0x67, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x22, 0x63, 0x0a, 0x0e,
|
||||
0x42, 0x6c, 0x6f, 0x62, 0x41, 0x6e, 0x64, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x56, 0x32, 0x12, 0x1e,
|
||||
0x0a, 0x04, 0x62, 0x6c, 0x6f, 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x0a, 0x8a, 0xb5,
|
||||
0x18, 0x06, 0x31, 0x33, 0x31, 0x30, 0x37, 0x32, 0x52, 0x04, 0x62, 0x6c, 0x6f, 0x62, 0x12, 0x31,
|
||||
0x0a, 0x0a, 0x6b, 0x7a, 0x67, 0x5f, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x73, 0x18, 0x02, 0x20, 0x03,
|
||||
0x28, 0x0c, 0x42, 0x12, 0x8a, 0xb5, 0x18, 0x02, 0x34, 0x38, 0x92, 0xb5, 0x18, 0x08, 0x33, 0x33,
|
||||
0x35, 0x35, 0x34, 0x34, 0x33, 0x32, 0x52, 0x09, 0x6b, 0x7a, 0x67, 0x50, 0x72, 0x6f, 0x6f, 0x66,
|
||||
0x73, 0x42, 0x95, 0x01, 0x0a, 0x16, 0x6f, 0x72, 0x67, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65,
|
||||
0x75, 0x6d, 0x2e, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x14, 0x45, 0x78,
|
||||
0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x50, 0x72, 0x6f,
|
||||
0x74, 0x6f, 0x50, 0x01, 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
|
||||
0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72,
|
||||
0x79, 0x73, 0x6d, 0x2f, 0x76, 0x36, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x65, 0x6e, 0x67,
|
||||
0x69, 0x6e, 0x65, 0x2f, 0x76, 0x31, 0x3b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x76, 0x31, 0xaa,
|
||||
0x02, 0x12, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e,
|
||||
0x65, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x12, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x5c,
|
||||
0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5c, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
|
||||
@@ -631,7 +631,7 @@ func TestJsonMarshalUnmarshal(t *testing.T) {
|
||||
BlobGasUsed: 1024,
|
||||
ExcessBlobGas: 2048,
|
||||
}
|
||||
|
||||
|
||||
bundleV2 := &enginev1.BlobsBundleV2{
|
||||
KzgCommitments: [][]byte{make([]byte, 48), make([]byte, 48)},
|
||||
Proofs: [][]byte{make([]byte, 48), make([]byte, 48)},
|
||||
|
||||
@@ -530,21 +530,6 @@ func (a *AggregateAttestationAndProofElectra) AggregateVal() Att {
|
||||
return a.Aggregate
|
||||
}
|
||||
|
||||
// Version --
|
||||
func (a *AggregateAttestationAndProofSingle) Version() int {
|
||||
return version.Electra
|
||||
}
|
||||
|
||||
// IsNil --
|
||||
func (a *AggregateAttestationAndProofSingle) IsNil() bool {
|
||||
return a == nil || a.Aggregate == nil || a.Aggregate.IsNil()
|
||||
}
|
||||
|
||||
// AggregateVal --
|
||||
func (a *AggregateAttestationAndProofSingle) AggregateVal() Att {
|
||||
return a.Aggregate
|
||||
}
|
||||
|
||||
// Version --
|
||||
func (a *SignedAggregateAttestationAndProof) Version() int {
|
||||
return version.Phase0
|
||||
@@ -574,18 +559,3 @@ func (a *SignedAggregateAttestationAndProofElectra) IsNil() bool {
|
||||
func (a *SignedAggregateAttestationAndProofElectra) AggregateAttestationAndProof() AggregateAttAndProof {
|
||||
return a.Message
|
||||
}
|
||||
|
||||
// Version --
|
||||
func (a *SignedAggregateAttestationAndProofSingle) Version() int {
|
||||
return version.Electra
|
||||
}
|
||||
|
||||
// IsNil --
|
||||
func (a *SignedAggregateAttestationAndProofSingle) IsNil() bool {
|
||||
return a == nil || a.Message == nil || a.Message.IsNil()
|
||||
}
|
||||
|
||||
// AggregateAttestationAndProof --
|
||||
func (a *SignedAggregateAttestationAndProofSingle) AggregateAttestationAndProof() AggregateAttAndProof {
|
||||
return a.Message
|
||||
}
|
||||
|
||||
278
proto/prysm/v1alpha1/attestation.pb.go
generated
278
proto/prysm/v1alpha1/attestation.pb.go
generated
@@ -528,124 +528,6 @@ func (x *AttestationElectra) GetCommitteeBits() github_com_prysmaticlabs_go_bitf
|
||||
return github_com_prysmaticlabs_go_bitfield.Bitvector64(nil)
|
||||
}
|
||||
|
||||
type SignedAggregateAttestationAndProofSingle struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Message *AggregateAttestationAndProofSingle `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"`
|
||||
Signature []byte `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty" ssz-size:"96"`
|
||||
}
|
||||
|
||||
func (x *SignedAggregateAttestationAndProofSingle) Reset() {
|
||||
*x = SignedAggregateAttestationAndProofSingle{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_proto_prysm_v1alpha1_attestation_proto_msgTypes[8]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *SignedAggregateAttestationAndProofSingle) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*SignedAggregateAttestationAndProofSingle) ProtoMessage() {}
|
||||
|
||||
func (x *SignedAggregateAttestationAndProofSingle) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_proto_prysm_v1alpha1_attestation_proto_msgTypes[8]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use SignedAggregateAttestationAndProofSingle.ProtoReflect.Descriptor instead.
|
||||
func (*SignedAggregateAttestationAndProofSingle) Descriptor() ([]byte, []int) {
|
||||
return file_proto_prysm_v1alpha1_attestation_proto_rawDescGZIP(), []int{8}
|
||||
}
|
||||
|
||||
func (x *SignedAggregateAttestationAndProofSingle) GetMessage() *AggregateAttestationAndProofSingle {
|
||||
if x != nil {
|
||||
return x.Message
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *SignedAggregateAttestationAndProofSingle) GetSignature() []byte {
|
||||
if x != nil {
|
||||
return x.Signature
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type AggregateAttestationAndProofSingle struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
AggregatorIndex github_com_OffchainLabs_prysm_v6_consensus_types_primitives.ValidatorIndex `protobuf:"varint,1,opt,name=aggregator_index,json=aggregatorIndex,proto3" json:"aggregator_index,omitempty" cast-type:"github.com/OffchainLabs/prysm/v6/consensus-types/primitives.ValidatorIndex"`
|
||||
Aggregate *SingleAttestation `protobuf:"bytes,3,opt,name=aggregate,proto3" json:"aggregate,omitempty"`
|
||||
SelectionProof []byte `protobuf:"bytes,2,opt,name=selection_proof,json=selectionProof,proto3" json:"selection_proof,omitempty" ssz-size:"96"`
|
||||
}
|
||||
|
||||
func (x *AggregateAttestationAndProofSingle) Reset() {
|
||||
*x = AggregateAttestationAndProofSingle{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_proto_prysm_v1alpha1_attestation_proto_msgTypes[9]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *AggregateAttestationAndProofSingle) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*AggregateAttestationAndProofSingle) ProtoMessage() {}
|
||||
|
||||
func (x *AggregateAttestationAndProofSingle) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_proto_prysm_v1alpha1_attestation_proto_msgTypes[9]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use AggregateAttestationAndProofSingle.ProtoReflect.Descriptor instead.
|
||||
func (*AggregateAttestationAndProofSingle) Descriptor() ([]byte, []int) {
|
||||
return file_proto_prysm_v1alpha1_attestation_proto_rawDescGZIP(), []int{9}
|
||||
}
|
||||
|
||||
func (x *AggregateAttestationAndProofSingle) GetAggregatorIndex() github_com_OffchainLabs_prysm_v6_consensus_types_primitives.ValidatorIndex {
|
||||
if x != nil {
|
||||
return x.AggregatorIndex
|
||||
}
|
||||
return github_com_OffchainLabs_prysm_v6_consensus_types_primitives.ValidatorIndex(0)
|
||||
}
|
||||
|
||||
func (x *AggregateAttestationAndProofSingle) GetAggregate() *SingleAttestation {
|
||||
if x != nil {
|
||||
return x.Aggregate
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *AggregateAttestationAndProofSingle) GetSelectionProof() []byte {
|
||||
if x != nil {
|
||||
return x.SelectionProof
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type SingleAttestation struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
@@ -660,7 +542,7 @@ type SingleAttestation struct {
|
||||
func (x *SingleAttestation) Reset() {
|
||||
*x = SingleAttestation{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_proto_prysm_v1alpha1_attestation_proto_msgTypes[10]
|
||||
mi := &file_proto_prysm_v1alpha1_attestation_proto_msgTypes[8]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
@@ -673,7 +555,7 @@ func (x *SingleAttestation) String() string {
|
||||
func (*SingleAttestation) ProtoMessage() {}
|
||||
|
||||
func (x *SingleAttestation) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_proto_prysm_v1alpha1_attestation_proto_msgTypes[10]
|
||||
mi := &file_proto_prysm_v1alpha1_attestation_proto_msgTypes[8]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
@@ -686,7 +568,7 @@ func (x *SingleAttestation) ProtoReflect() protoreflect.Message {
|
||||
|
||||
// Deprecated: Use SingleAttestation.ProtoReflect.Descriptor instead.
|
||||
func (*SingleAttestation) Descriptor() ([]byte, []int) {
|
||||
return file_proto_prysm_v1alpha1_attestation_proto_rawDescGZIP(), []int{10}
|
||||
return file_proto_prysm_v1alpha1_attestation_proto_rawDescGZIP(), []int{8}
|
||||
}
|
||||
|
||||
func (x *SingleAttestation) GetCommitteeId() github_com_OffchainLabs_prysm_v6_consensus_types_primitives.CommitteeIndex {
|
||||
@@ -848,67 +730,39 @@ var file_proto_prysm_v1alpha1_attestation_proto_rawDesc = []byte{
|
||||
0x6c, 0x61, 0x62, 0x73, 0x2f, 0x67, 0x6f, 0x2d, 0x62, 0x69, 0x74, 0x66, 0x69, 0x65, 0x6c, 0x64,
|
||||
0x2e, 0x42, 0x69, 0x74, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x36, 0x34, 0x8a, 0xb5, 0x18, 0x01,
|
||||
0x38, 0x52, 0x0d, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x65, 0x42, 0x69, 0x74, 0x73,
|
||||
0x22, 0xa5, 0x01, 0x0a, 0x28, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x41, 0x67, 0x67, 0x72, 0x65,
|
||||
0x67, 0x61, 0x74, 0x65, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41,
|
||||
0x6e, 0x64, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x12, 0x53, 0x0a,
|
||||
0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39,
|
||||
0x22, 0xdf, 0x02, 0x0a, 0x11, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x41, 0x74, 0x74, 0x65, 0x73,
|
||||
0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x71, 0x0a, 0x0c, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74,
|
||||
0x74, 0x65, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x42, 0x4e, 0x82, 0xb5,
|
||||
0x18, 0x4a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66,
|
||||
0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f,
|
||||
0x76, 0x36, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x74, 0x79, 0x70,
|
||||
0x65, 0x73, 0x2f, 0x70, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2e, 0x43, 0x6f,
|
||||
0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x65, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x0b, 0x63, 0x6f,
|
||||
0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x65, 0x49, 0x64, 0x12, 0x75, 0x0a, 0x0e, 0x61, 0x74, 0x74,
|
||||
0x65, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28,
|
||||
0x04, 0x42, 0x4e, 0x82, 0xb5, 0x18, 0x4a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f,
|
||||
0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62, 0x73, 0x2f, 0x70,
|
||||
0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x36, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75,
|
||||
0x73, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76,
|
||||
0x65, 0x73, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x49, 0x6e, 0x64, 0x65,
|
||||
0x78, 0x52, 0x0d, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x65, 0x72, 0x49, 0x6e, 0x64, 0x65, 0x78,
|
||||
0x12, 0x3a, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26,
|
||||
0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31,
|
||||
0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65,
|
||||
0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6e, 0x64, 0x50, 0x72,
|
||||
0x6f, 0x6f, 0x66, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61,
|
||||
0x67, 0x65, 0x12, 0x24, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18,
|
||||
0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x39, 0x36, 0x52, 0x09, 0x73,
|
||||
0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0x98, 0x02, 0x0a, 0x22, 0x41, 0x67, 0x67,
|
||||
0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f,
|
||||
0x6e, 0x41, 0x6e, 0x64, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x12,
|
||||
0x79, 0x0a, 0x10, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x6f, 0x72, 0x5f, 0x69, 0x6e,
|
||||
0x64, 0x65, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x42, 0x4e, 0x82, 0xb5, 0x18, 0x4a, 0x67,
|
||||
0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68, 0x61,
|
||||
0x69, 0x6e, 0x4c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x36, 0x2f,
|
||||
0x63, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f,
|
||||
0x70, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64,
|
||||
0x61, 0x74, 0x6f, 0x72, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65,
|
||||
0x67, 0x61, 0x74, 0x6f, 0x72, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x46, 0x0a, 0x09, 0x61, 0x67,
|
||||
0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e,
|
||||
0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61,
|
||||
0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x41, 0x74, 0x74, 0x65,
|
||||
0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61,
|
||||
0x74, 0x65, 0x12, 0x2f, 0x0a, 0x0f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
|
||||
0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18,
|
||||
0x02, 0x39, 0x36, 0x52, 0x0e, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72,
|
||||
0x6f, 0x6f, 0x66, 0x22, 0xdf, 0x02, 0x0a, 0x11, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x41, 0x74,
|
||||
0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x71, 0x0a, 0x0c, 0x63, 0x6f, 0x6d,
|
||||
0x6d, 0x69, 0x74, 0x74, 0x65, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x42,
|
||||
0x4e, 0x82, 0xb5, 0x18, 0x4a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
|
||||
0x4f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79,
|
||||
0x73, 0x6d, 0x2f, 0x76, 0x36, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d,
|
||||
0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73,
|
||||
0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x65, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52,
|
||||
0x0b, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x65, 0x49, 0x64, 0x12, 0x75, 0x0a, 0x0e,
|
||||
0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x02,
|
||||
0x20, 0x01, 0x28, 0x04, 0x42, 0x4e, 0x82, 0xb5, 0x18, 0x4a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
|
||||
0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62,
|
||||
0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x36, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x65,
|
||||
0x6e, 0x73, 0x75, 0x73, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x72, 0x69, 0x6d, 0x69,
|
||||
0x74, 0x69, 0x76, 0x65, 0x73, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x49,
|
||||
0x6e, 0x64, 0x65, 0x78, 0x52, 0x0d, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x65, 0x72, 0x49, 0x6e,
|
||||
0x64, 0x65, 0x78, 0x12, 0x3a, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28,
|
||||
0x0b, 0x32, 0x26, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68,
|
||||
0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74,
|
||||
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12,
|
||||
0x24, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x04, 0x20, 0x01,
|
||||
0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x39, 0x36, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e,
|
||||
0x61, 0x74, 0x75, 0x72, 0x65, 0x42, 0x9a, 0x01, 0x0a, 0x19, 0x6f, 0x72, 0x67, 0x2e, 0x65, 0x74,
|
||||
0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70,
|
||||
0x68, 0x61, 0x31, 0x42, 0x10, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
|
||||
0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e,
|
||||
0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62, 0x73,
|
||||
0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x36, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f,
|
||||
0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x3b, 0x65,
|
||||
0x74, 0x68, 0xaa, 0x02, 0x15, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x45, 0x74,
|
||||
0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xca, 0x02, 0x15, 0x45, 0x74, 0x68,
|
||||
0x65, 0x72, 0x65, 0x75, 0x6d, 0x5c, 0x45, 0x74, 0x68, 0x5c, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68,
|
||||
0x61, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69,
|
||||
0x6f, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x24, 0x0a, 0x09,
|
||||
0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x42,
|
||||
0x06, 0x8a, 0xb5, 0x18, 0x02, 0x39, 0x36, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75,
|
||||
0x72, 0x65, 0x42, 0x9a, 0x01, 0x0a, 0x19, 0x6f, 0x72, 0x67, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72,
|
||||
0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31,
|
||||
0x42, 0x10, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f,
|
||||
0x74, 0x6f, 0x50, 0x01, 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
|
||||
0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72,
|
||||
0x79, 0x73, 0x6d, 0x2f, 0x76, 0x36, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x72, 0x79,
|
||||
0x73, 0x6d, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x3b, 0x65, 0x74, 0x68, 0xaa,
|
||||
0x02, 0x15, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x45, 0x74, 0x68, 0x2e, 0x76,
|
||||
0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xca, 0x02, 0x15, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65,
|
||||
0x75, 0x6d, 0x5c, 0x45, 0x74, 0x68, 0x5c, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x62,
|
||||
0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -923,7 +777,7 @@ func file_proto_prysm_v1alpha1_attestation_proto_rawDescGZIP() []byte {
|
||||
return file_proto_prysm_v1alpha1_attestation_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_proto_prysm_v1alpha1_attestation_proto_msgTypes = make([]protoimpl.MessageInfo, 11)
|
||||
var file_proto_prysm_v1alpha1_attestation_proto_msgTypes = make([]protoimpl.MessageInfo, 9)
|
||||
var file_proto_prysm_v1alpha1_attestation_proto_goTypes = []interface{}{
|
||||
(*SignedAggregateAttestationAndProof)(nil), // 0: ethereum.eth.v1alpha1.SignedAggregateAttestationAndProof
|
||||
(*AggregateAttestationAndProof)(nil), // 1: ethereum.eth.v1alpha1.AggregateAttestationAndProof
|
||||
@@ -933,27 +787,23 @@ var file_proto_prysm_v1alpha1_attestation_proto_goTypes = []interface{}{
|
||||
(*SignedAggregateAttestationAndProofElectra)(nil), // 5: ethereum.eth.v1alpha1.SignedAggregateAttestationAndProofElectra
|
||||
(*AggregateAttestationAndProofElectra)(nil), // 6: ethereum.eth.v1alpha1.AggregateAttestationAndProofElectra
|
||||
(*AttestationElectra)(nil), // 7: ethereum.eth.v1alpha1.AttestationElectra
|
||||
(*SignedAggregateAttestationAndProofSingle)(nil), // 8: ethereum.eth.v1alpha1.SignedAggregateAttestationAndProofSingle
|
||||
(*AggregateAttestationAndProofSingle)(nil), // 9: ethereum.eth.v1alpha1.AggregateAttestationAndProofSingle
|
||||
(*SingleAttestation)(nil), // 10: ethereum.eth.v1alpha1.SingleAttestation
|
||||
(*SingleAttestation)(nil), // 8: ethereum.eth.v1alpha1.SingleAttestation
|
||||
}
|
||||
var file_proto_prysm_v1alpha1_attestation_proto_depIdxs = []int32{
|
||||
1, // 0: ethereum.eth.v1alpha1.SignedAggregateAttestationAndProof.message:type_name -> ethereum.eth.v1alpha1.AggregateAttestationAndProof
|
||||
2, // 1: ethereum.eth.v1alpha1.AggregateAttestationAndProof.aggregate:type_name -> ethereum.eth.v1alpha1.Attestation
|
||||
3, // 2: ethereum.eth.v1alpha1.Attestation.data:type_name -> ethereum.eth.v1alpha1.AttestationData
|
||||
4, // 3: ethereum.eth.v1alpha1.AttestationData.source:type_name -> ethereum.eth.v1alpha1.Checkpoint
|
||||
4, // 4: ethereum.eth.v1alpha1.AttestationData.target:type_name -> ethereum.eth.v1alpha1.Checkpoint
|
||||
6, // 5: ethereum.eth.v1alpha1.SignedAggregateAttestationAndProofElectra.message:type_name -> ethereum.eth.v1alpha1.AggregateAttestationAndProofElectra
|
||||
7, // 6: ethereum.eth.v1alpha1.AggregateAttestationAndProofElectra.aggregate:type_name -> ethereum.eth.v1alpha1.AttestationElectra
|
||||
3, // 7: ethereum.eth.v1alpha1.AttestationElectra.data:type_name -> ethereum.eth.v1alpha1.AttestationData
|
||||
9, // 8: ethereum.eth.v1alpha1.SignedAggregateAttestationAndProofSingle.message:type_name -> ethereum.eth.v1alpha1.AggregateAttestationAndProofSingle
|
||||
10, // 9: ethereum.eth.v1alpha1.AggregateAttestationAndProofSingle.aggregate:type_name -> ethereum.eth.v1alpha1.SingleAttestation
|
||||
3, // 10: ethereum.eth.v1alpha1.SingleAttestation.data:type_name -> ethereum.eth.v1alpha1.AttestationData
|
||||
11, // [11:11] is the sub-list for method output_type
|
||||
11, // [11:11] is the sub-list for method input_type
|
||||
11, // [11:11] is the sub-list for extension type_name
|
||||
11, // [11:11] is the sub-list for extension extendee
|
||||
0, // [0:11] is the sub-list for field type_name
|
||||
1, // 0: ethereum.eth.v1alpha1.SignedAggregateAttestationAndProof.message:type_name -> ethereum.eth.v1alpha1.AggregateAttestationAndProof
|
||||
2, // 1: ethereum.eth.v1alpha1.AggregateAttestationAndProof.aggregate:type_name -> ethereum.eth.v1alpha1.Attestation
|
||||
3, // 2: ethereum.eth.v1alpha1.Attestation.data:type_name -> ethereum.eth.v1alpha1.AttestationData
|
||||
4, // 3: ethereum.eth.v1alpha1.AttestationData.source:type_name -> ethereum.eth.v1alpha1.Checkpoint
|
||||
4, // 4: ethereum.eth.v1alpha1.AttestationData.target:type_name -> ethereum.eth.v1alpha1.Checkpoint
|
||||
6, // 5: ethereum.eth.v1alpha1.SignedAggregateAttestationAndProofElectra.message:type_name -> ethereum.eth.v1alpha1.AggregateAttestationAndProofElectra
|
||||
7, // 6: ethereum.eth.v1alpha1.AggregateAttestationAndProofElectra.aggregate:type_name -> ethereum.eth.v1alpha1.AttestationElectra
|
||||
3, // 7: ethereum.eth.v1alpha1.AttestationElectra.data:type_name -> ethereum.eth.v1alpha1.AttestationData
|
||||
3, // 8: ethereum.eth.v1alpha1.SingleAttestation.data:type_name -> ethereum.eth.v1alpha1.AttestationData
|
||||
9, // [9:9] is the sub-list for method output_type
|
||||
9, // [9:9] is the sub-list for method input_type
|
||||
9, // [9:9] is the sub-list for extension type_name
|
||||
9, // [9:9] is the sub-list for extension extendee
|
||||
0, // [0:9] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_proto_prysm_v1alpha1_attestation_proto_init() }
|
||||
@@ -1059,30 +909,6 @@ func file_proto_prysm_v1alpha1_attestation_proto_init() {
|
||||
}
|
||||
}
|
||||
file_proto_prysm_v1alpha1_attestation_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*SignedAggregateAttestationAndProofSingle); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_proto_prysm_v1alpha1_attestation_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*AggregateAttestationAndProofSingle); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_proto_prysm_v1alpha1_attestation_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*SingleAttestation); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
@@ -1101,7 +927,7 @@ func file_proto_prysm_v1alpha1_attestation_proto_init() {
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_proto_prysm_v1alpha1_attestation_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 11,
|
||||
NumMessages: 9,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
|
||||
@@ -154,29 +154,6 @@ message AttestationElectra {
|
||||
];
|
||||
}
|
||||
|
||||
message SignedAggregateAttestationAndProofSingle {
|
||||
// The aggregated attestation and selection proof itself.
|
||||
AggregateAttestationAndProofSingle message = 1;
|
||||
|
||||
// 96 byte BLS aggregate signature signed by the aggregator over the message.
|
||||
bytes signature = 2 [ (ethereum.eth.ext.ssz_size) = "96" ];
|
||||
}
|
||||
|
||||
message AggregateAttestationAndProofSingle {
|
||||
// The aggregator index that submitted this aggregated attestation and proof.
|
||||
uint64 aggregator_index = 1
|
||||
[ (ethereum.eth.ext.cast_type) =
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/"
|
||||
"primitives.ValidatorIndex" ];
|
||||
|
||||
// The aggregated attestation that was submitted.
|
||||
SingleAttestation aggregate = 3;
|
||||
|
||||
// 96 byte selection proof signed by the aggregator, which is the signature of
|
||||
// the slot to aggregate.
|
||||
bytes selection_proof = 2 [ (ethereum.eth.ext.ssz_size) = "96" ];
|
||||
}
|
||||
|
||||
message SingleAttestation {
|
||||
uint64 committee_id = 1 [ (ethereum.eth.ext.cast_type) =
|
||||
"github.com/OffchainLabs/prysm/v6/"
|
||||
|
||||
134
proto/prysm/v1alpha1/beacon_block.pb.go
generated
134
proto/prysm/v1alpha1/beacon_block.pb.go
generated
@@ -4865,7 +4865,7 @@ type SignedBeaconBlockContentsFulu struct {
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Block *SignedBeaconBlockFulu `protobuf:"bytes,1,opt,name=block,proto3" json:"block,omitempty"`
|
||||
KzgProofs [][]byte `protobuf:"bytes,2,rep,name=kzg_proofs,json=kzgProofs,proto3" json:"kzg_proofs,omitempty" ssz-max:"524288" ssz-size:"?,48"`
|
||||
KzgProofs [][]byte `protobuf:"bytes,2,rep,name=kzg_proofs,json=kzgProofs,proto3" json:"kzg_proofs,omitempty" ssz-max:"33554432" ssz-size:"?,48"`
|
||||
Blobs [][]byte `protobuf:"bytes,3,rep,name=blobs,proto3" json:"blobs,omitempty" ssz-max:"4096" ssz-size:"?,131072"`
|
||||
}
|
||||
|
||||
@@ -4983,7 +4983,7 @@ type BeaconBlockContentsFulu struct {
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Block *BeaconBlockElectra `protobuf:"bytes,1,opt,name=block,proto3" json:"block,omitempty"`
|
||||
KzgProofs [][]byte `protobuf:"bytes,2,rep,name=kzg_proofs,json=kzgProofs,proto3" json:"kzg_proofs,omitempty" ssz-max:"524288" ssz-size:"?,48"`
|
||||
KzgProofs [][]byte `protobuf:"bytes,2,rep,name=kzg_proofs,json=kzgProofs,proto3" json:"kzg_proofs,omitempty" ssz-max:"33554432" ssz-size:"?,48"`
|
||||
Blobs [][]byte `protobuf:"bytes,3,rep,name=blobs,proto3" json:"blobs,omitempty" ssz-max:"4096" ssz-size:"?,131072"`
|
||||
}
|
||||
|
||||
@@ -6598,83 +6598,83 @@ var file_proto_prysm_v1alpha1_beacon_block_proto_rawDesc = []byte{
|
||||
0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x52, 0x04, 0x64,
|
||||
0x61, 0x74, 0x61, 0x12, 0x24, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65,
|
||||
0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x39, 0x36, 0x52, 0x09,
|
||||
0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0xc2, 0x01, 0x0a, 0x1d, 0x53, 0x69,
|
||||
0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0xc4, 0x01, 0x0a, 0x1d, 0x53, 0x69,
|
||||
0x67, 0x6e, 0x65, 0x64, 0x42, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x43,
|
||||
0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x73, 0x46, 0x75, 0x6c, 0x75, 0x12, 0x42, 0x0a, 0x05, 0x62,
|
||||
0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x65, 0x74, 0x68,
|
||||
0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68,
|
||||
0x61, 0x31, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x42, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x42,
|
||||
0x6c, 0x6f, 0x63, 0x6b, 0x46, 0x75, 0x6c, 0x75, 0x52, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x12,
|
||||
0x31, 0x0a, 0x0a, 0x6b, 0x7a, 0x67, 0x5f, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x73, 0x18, 0x02, 0x20,
|
||||
0x03, 0x28, 0x0c, 0x42, 0x12, 0x8a, 0xb5, 0x18, 0x04, 0x3f, 0x2c, 0x34, 0x38, 0x92, 0xb5, 0x18,
|
||||
0x06, 0x35, 0x32, 0x34, 0x32, 0x38, 0x38, 0x52, 0x09, 0x6b, 0x7a, 0x67, 0x50, 0x72, 0x6f, 0x6f,
|
||||
0x33, 0x0a, 0x0a, 0x6b, 0x7a, 0x67, 0x5f, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x73, 0x18, 0x02, 0x20,
|
||||
0x03, 0x28, 0x0c, 0x42, 0x14, 0x8a, 0xb5, 0x18, 0x04, 0x3f, 0x2c, 0x34, 0x38, 0x92, 0xb5, 0x18,
|
||||
0x08, 0x33, 0x33, 0x35, 0x35, 0x34, 0x34, 0x33, 0x32, 0x52, 0x09, 0x6b, 0x7a, 0x67, 0x50, 0x72,
|
||||
0x6f, 0x6f, 0x66, 0x73, 0x12, 0x2a, 0x0a, 0x05, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x18, 0x03, 0x20,
|
||||
0x03, 0x28, 0x0c, 0x42, 0x14, 0x8a, 0xb5, 0x18, 0x08, 0x3f, 0x2c, 0x31, 0x33, 0x31, 0x30, 0x37,
|
||||
0x32, 0x92, 0xb5, 0x18, 0x04, 0x34, 0x30, 0x39, 0x36, 0x52, 0x05, 0x62, 0x6c, 0x6f, 0x62, 0x73,
|
||||
0x22, 0x7e, 0x0a, 0x15, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x42, 0x65, 0x61, 0x63, 0x6f, 0x6e,
|
||||
0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x46, 0x75, 0x6c, 0x75, 0x12, 0x3f, 0x0a, 0x05, 0x62, 0x6c, 0x6f,
|
||||
0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72,
|
||||
0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31,
|
||||
0x2e, 0x42, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x45, 0x6c, 0x65, 0x63,
|
||||
0x74, 0x72, 0x61, 0x52, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x24, 0x0a, 0x09, 0x73, 0x69,
|
||||
0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a,
|
||||
0xb5, 0x18, 0x02, 0x39, 0x36, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65,
|
||||
0x22, 0xbb, 0x01, 0x0a, 0x17, 0x42, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b,
|
||||
0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x73, 0x46, 0x75, 0x6c, 0x75, 0x12, 0x3f, 0x0a, 0x05,
|
||||
0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x74,
|
||||
0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70,
|
||||
0x68, 0x61, 0x31, 0x2e, 0x42, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x45,
|
||||
0x6c, 0x65, 0x63, 0x74, 0x72, 0x61, 0x52, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x33, 0x0a,
|
||||
0x0a, 0x6b, 0x7a, 0x67, 0x5f, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28,
|
||||
0x0c, 0x42, 0x14, 0x8a, 0xb5, 0x18, 0x04, 0x3f, 0x2c, 0x34, 0x38, 0x92, 0xb5, 0x18, 0x08, 0x33,
|
||||
0x33, 0x35, 0x35, 0x34, 0x34, 0x33, 0x32, 0x52, 0x09, 0x6b, 0x7a, 0x67, 0x50, 0x72, 0x6f, 0x6f,
|
||||
0x66, 0x73, 0x12, 0x2a, 0x0a, 0x05, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28,
|
||||
0x0c, 0x42, 0x14, 0x8a, 0xb5, 0x18, 0x08, 0x3f, 0x2c, 0x31, 0x33, 0x31, 0x30, 0x37, 0x32, 0x92,
|
||||
0xb5, 0x18, 0x04, 0x34, 0x30, 0x39, 0x36, 0x52, 0x05, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x22, 0x7e,
|
||||
0x0a, 0x15, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x42, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x42, 0x6c,
|
||||
0x6f, 0x63, 0x6b, 0x46, 0x75, 0x6c, 0x75, 0x12, 0x3f, 0x0a, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b,
|
||||
0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75,
|
||||
0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x42,
|
||||
0x65, 0x61, 0x63, 0x6f, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x45, 0x6c, 0x65, 0x63, 0x74, 0x72,
|
||||
0x61, 0x52, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x24, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e,
|
||||
0xb5, 0x18, 0x04, 0x34, 0x30, 0x39, 0x36, 0x52, 0x05, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x22, 0x8d,
|
||||
0x01, 0x0a, 0x1c, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x42, 0x6c, 0x69, 0x6e, 0x64, 0x65, 0x64,
|
||||
0x42, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x46, 0x75, 0x6c, 0x75, 0x12,
|
||||
0x47, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
|
||||
0x32, 0x2d, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e,
|
||||
0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x42, 0x6c, 0x69, 0x6e, 0x64, 0x65, 0x64,
|
||||
0x42, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x46, 0x75, 0x6c, 0x75, 0x52,
|
||||
0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x24, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e,
|
||||
0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18,
|
||||
0x02, 0x39, 0x36, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0xb9,
|
||||
0x01, 0x0a, 0x17, 0x42, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x43, 0x6f,
|
||||
0x6e, 0x74, 0x65, 0x6e, 0x74, 0x73, 0x46, 0x75, 0x6c, 0x75, 0x12, 0x3f, 0x0a, 0x05, 0x62, 0x6c,
|
||||
0x6f, 0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x74, 0x68, 0x65,
|
||||
0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61,
|
||||
0x31, 0x2e, 0x42, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x45, 0x6c, 0x65,
|
||||
0x63, 0x74, 0x72, 0x61, 0x52, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x31, 0x0a, 0x0a, 0x6b,
|
||||
0x7a, 0x67, 0x5f, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0c, 0x42,
|
||||
0x12, 0x8a, 0xb5, 0x18, 0x04, 0x3f, 0x2c, 0x34, 0x38, 0x92, 0xb5, 0x18, 0x06, 0x35, 0x32, 0x34,
|
||||
0x32, 0x38, 0x38, 0x52, 0x09, 0x6b, 0x7a, 0x67, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x73, 0x12, 0x2a,
|
||||
0x0a, 0x05, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c, 0x42, 0x14, 0x8a,
|
||||
0xb5, 0x18, 0x08, 0x3f, 0x2c, 0x31, 0x33, 0x31, 0x30, 0x37, 0x32, 0x92, 0xb5, 0x18, 0x04, 0x34,
|
||||
0x30, 0x39, 0x36, 0x52, 0x05, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x22, 0x8d, 0x01, 0x0a, 0x1c, 0x53,
|
||||
0x69, 0x67, 0x6e, 0x65, 0x64, 0x42, 0x6c, 0x69, 0x6e, 0x64, 0x65, 0x64, 0x42, 0x65, 0x61, 0x63,
|
||||
0x6f, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x46, 0x75, 0x6c, 0x75, 0x12, 0x47, 0x0a, 0x07, 0x6d,
|
||||
0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x65,
|
||||
0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c,
|
||||
0x70, 0x68, 0x61, 0x31, 0x2e, 0x42, 0x6c, 0x69, 0x6e, 0x64, 0x65, 0x64, 0x42, 0x65, 0x61, 0x63,
|
||||
0x6f, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x46, 0x75, 0x6c, 0x75, 0x52, 0x07, 0x6d, 0x65, 0x73,
|
||||
0x73, 0x61, 0x67, 0x65, 0x12, 0x24, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72,
|
||||
0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x39, 0x36, 0x52,
|
||||
0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0x83, 0x03, 0x0a, 0x16, 0x42,
|
||||
0x6c, 0x69, 0x6e, 0x64, 0x65, 0x64, 0x42, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x42, 0x6c, 0x6f, 0x63,
|
||||
0x6b, 0x46, 0x75, 0x6c, 0x75, 0x12, 0x58, 0x0a, 0x04, 0x73, 0x6c, 0x6f, 0x74, 0x18, 0x01, 0x20,
|
||||
0x01, 0x28, 0x04, 0x42, 0x44, 0x82, 0xb5, 0x18, 0x40, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e,
|
||||
0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62, 0x73,
|
||||
0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x36, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x6e,
|
||||
0x73, 0x75, 0x73, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x72, 0x69, 0x6d, 0x69, 0x74,
|
||||
0x69, 0x76, 0x65, 0x73, 0x2e, 0x53, 0x6c, 0x6f, 0x74, 0x52, 0x04, 0x73, 0x6c, 0x6f, 0x74, 0x12,
|
||||
0x75, 0x0a, 0x0e, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x64, 0x65,
|
||||
0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x42, 0x4e, 0x82, 0xb5, 0x18, 0x4a, 0x67, 0x69, 0x74,
|
||||
0x02, 0x39, 0x36, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0x83,
|
||||
0x03, 0x0a, 0x16, 0x42, 0x6c, 0x69, 0x6e, 0x64, 0x65, 0x64, 0x42, 0x65, 0x61, 0x63, 0x6f, 0x6e,
|
||||
0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x46, 0x75, 0x6c, 0x75, 0x12, 0x58, 0x0a, 0x04, 0x73, 0x6c, 0x6f,
|
||||
0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x42, 0x44, 0x82, 0xb5, 0x18, 0x40, 0x67, 0x69, 0x74,
|
||||
0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e,
|
||||
0x4c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x36, 0x2f, 0x63, 0x6f,
|
||||
0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x72,
|
||||
0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74,
|
||||
0x6f, 0x72, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x0d, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65,
|
||||
0x72, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x27, 0x0a, 0x0b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74,
|
||||
0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18,
|
||||
0x02, 0x33, 0x32, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x52, 0x6f, 0x6f, 0x74, 0x12,
|
||||
0x25, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x04, 0x20,
|
||||
0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x33, 0x32, 0x52, 0x09, 0x73, 0x74, 0x61,
|
||||
0x74, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x48, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x05,
|
||||
0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e,
|
||||
0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x42, 0x6c, 0x69,
|
||||
0x6e, 0x64, 0x65, 0x64, 0x42, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x42,
|
||||
0x6f, 0x64, 0x79, 0x45, 0x6c, 0x65, 0x63, 0x74, 0x72, 0x61, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79,
|
||||
0x42, 0x9a, 0x01, 0x0a, 0x19, 0x6f, 0x72, 0x67, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75,
|
||||
0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x10,
|
||||
0x42, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x50, 0x01, 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f,
|
||||
0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73,
|
||||
0x6d, 0x2f, 0x76, 0x36, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d,
|
||||
0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x3b, 0x65, 0x74, 0x68, 0xaa, 0x02, 0x15,
|
||||
0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x45, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61,
|
||||
0x6c, 0x70, 0x68, 0x61, 0x31, 0xca, 0x02, 0x15, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d,
|
||||
0x5c, 0x45, 0x74, 0x68, 0x5c, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x62, 0x06, 0x70,
|
||||
0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2e, 0x53, 0x6c, 0x6f, 0x74, 0x52, 0x04, 0x73,
|
||||
0x6c, 0x6f, 0x74, 0x12, 0x75, 0x0a, 0x0e, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x72, 0x5f,
|
||||
0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x42, 0x4e, 0x82, 0xb5, 0x18,
|
||||
0x4a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63,
|
||||
0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76,
|
||||
0x36, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x74, 0x79, 0x70, 0x65,
|
||||
0x73, 0x2f, 0x70, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2e, 0x56, 0x61, 0x6c,
|
||||
0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x0d, 0x70, 0x72, 0x6f,
|
||||
0x70, 0x6f, 0x73, 0x65, 0x72, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x27, 0x0a, 0x0b, 0x70, 0x61,
|
||||
0x72, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x42,
|
||||
0x06, 0x8a, 0xb5, 0x18, 0x02, 0x33, 0x32, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x52,
|
||||
0x6f, 0x6f, 0x74, 0x12, 0x25, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x72, 0x6f, 0x6f,
|
||||
0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x33, 0x32, 0x52,
|
||||
0x09, 0x73, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x48, 0x0a, 0x04, 0x62, 0x6f,
|
||||
0x64, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72,
|
||||
0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31,
|
||||
0x2e, 0x42, 0x6c, 0x69, 0x6e, 0x64, 0x65, 0x64, 0x42, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x42, 0x6c,
|
||||
0x6f, 0x63, 0x6b, 0x42, 0x6f, 0x64, 0x79, 0x45, 0x6c, 0x65, 0x63, 0x74, 0x72, 0x61, 0x52, 0x04,
|
||||
0x62, 0x6f, 0x64, 0x79, 0x42, 0x9a, 0x01, 0x0a, 0x19, 0x6f, 0x72, 0x67, 0x2e, 0x65, 0x74, 0x68,
|
||||
0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68,
|
||||
0x61, 0x31, 0x42, 0x10, 0x42, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x50,
|
||||
0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63,
|
||||
0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62, 0x73, 0x2f,
|
||||
0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x36, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70,
|
||||
0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x3b, 0x65, 0x74,
|
||||
0x68, 0xaa, 0x02, 0x15, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x45, 0x74, 0x68,
|
||||
0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xca, 0x02, 0x15, 0x45, 0x74, 0x68, 0x65,
|
||||
0x72, 0x65, 0x75, 0x6d, 0x5c, 0x45, 0x74, 0x68, 0x5c, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61,
|
||||
0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
|
||||
@@ -383,181 +383,6 @@ func (a *AttestationElectra) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalSSZ ssz marshals the SignedAggregateAttestationAndProofSingle object
|
||||
func (s *SignedAggregateAttestationAndProofSingle) MarshalSSZ() ([]byte, error) {
|
||||
return ssz.MarshalSSZ(s)
|
||||
}
|
||||
|
||||
// MarshalSSZTo ssz marshals the SignedAggregateAttestationAndProofSingle object to a target array
|
||||
func (s *SignedAggregateAttestationAndProofSingle) MarshalSSZTo(buf []byte) (dst []byte, err error) {
|
||||
dst = buf
|
||||
|
||||
// Field (0) 'Message'
|
||||
if s.Message == nil {
|
||||
s.Message = new(AggregateAttestationAndProofSingle)
|
||||
}
|
||||
if dst, err = s.Message.MarshalSSZTo(dst); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Field (1) 'Signature'
|
||||
if size := len(s.Signature); size != 96 {
|
||||
err = ssz.ErrBytesLengthFn("--.Signature", size, 96)
|
||||
return
|
||||
}
|
||||
dst = append(dst, s.Signature...)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalSSZ ssz unmarshals the SignedAggregateAttestationAndProofSingle object
|
||||
func (s *SignedAggregateAttestationAndProofSingle) UnmarshalSSZ(buf []byte) error {
|
||||
var err error
|
||||
size := uint64(len(buf))
|
||||
if size != 440 {
|
||||
return ssz.ErrSize
|
||||
}
|
||||
|
||||
// Field (0) 'Message'
|
||||
if s.Message == nil {
|
||||
s.Message = new(AggregateAttestationAndProofSingle)
|
||||
}
|
||||
if err = s.Message.UnmarshalSSZ(buf[0:344]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Field (1) 'Signature'
|
||||
if cap(s.Signature) == 0 {
|
||||
s.Signature = make([]byte, 0, len(buf[344:440]))
|
||||
}
|
||||
s.Signature = append(s.Signature, buf[344:440]...)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// SizeSSZ returns the ssz encoded size in bytes for the SignedAggregateAttestationAndProofSingle object
|
||||
func (s *SignedAggregateAttestationAndProofSingle) SizeSSZ() (size int) {
|
||||
size = 440
|
||||
return
|
||||
}
|
||||
|
||||
// HashTreeRoot ssz hashes the SignedAggregateAttestationAndProofSingle object
|
||||
func (s *SignedAggregateAttestationAndProofSingle) HashTreeRoot() ([32]byte, error) {
|
||||
return ssz.HashWithDefaultHasher(s)
|
||||
}
|
||||
|
||||
// HashTreeRootWith ssz hashes the SignedAggregateAttestationAndProofSingle object with a hasher
|
||||
func (s *SignedAggregateAttestationAndProofSingle) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
indx := hh.Index()
|
||||
|
||||
// Field (0) 'Message'
|
||||
if err = s.Message.HashTreeRootWith(hh); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Field (1) 'Signature'
|
||||
if size := len(s.Signature); size != 96 {
|
||||
err = ssz.ErrBytesLengthFn("--.Signature", size, 96)
|
||||
return
|
||||
}
|
||||
hh.PutBytes(s.Signature)
|
||||
|
||||
hh.Merkleize(indx)
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalSSZ ssz marshals the AggregateAttestationAndProofSingle object
|
||||
func (a *AggregateAttestationAndProofSingle) MarshalSSZ() ([]byte, error) {
|
||||
return ssz.MarshalSSZ(a)
|
||||
}
|
||||
|
||||
// MarshalSSZTo ssz marshals the AggregateAttestationAndProofSingle object to a target array
|
||||
func (a *AggregateAttestationAndProofSingle) MarshalSSZTo(buf []byte) (dst []byte, err error) {
|
||||
dst = buf
|
||||
|
||||
// Field (0) 'AggregatorIndex'
|
||||
dst = ssz.MarshalUint64(dst, uint64(a.AggregatorIndex))
|
||||
|
||||
// Field (1) 'Aggregate'
|
||||
if a.Aggregate == nil {
|
||||
a.Aggregate = new(SingleAttestation)
|
||||
}
|
||||
if dst, err = a.Aggregate.MarshalSSZTo(dst); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Field (2) 'SelectionProof'
|
||||
if size := len(a.SelectionProof); size != 96 {
|
||||
err = ssz.ErrBytesLengthFn("--.SelectionProof", size, 96)
|
||||
return
|
||||
}
|
||||
dst = append(dst, a.SelectionProof...)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalSSZ ssz unmarshals the AggregateAttestationAndProofSingle object
|
||||
func (a *AggregateAttestationAndProofSingle) UnmarshalSSZ(buf []byte) error {
|
||||
var err error
|
||||
size := uint64(len(buf))
|
||||
if size != 344 {
|
||||
return ssz.ErrSize
|
||||
}
|
||||
|
||||
// Field (0) 'AggregatorIndex'
|
||||
a.AggregatorIndex = github_com_OffchainLabs_prysm_v6_consensus_types_primitives.ValidatorIndex(ssz.UnmarshallUint64(buf[0:8]))
|
||||
|
||||
// Field (1) 'Aggregate'
|
||||
if a.Aggregate == nil {
|
||||
a.Aggregate = new(SingleAttestation)
|
||||
}
|
||||
if err = a.Aggregate.UnmarshalSSZ(buf[8:248]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Field (2) 'SelectionProof'
|
||||
if cap(a.SelectionProof) == 0 {
|
||||
a.SelectionProof = make([]byte, 0, len(buf[248:344]))
|
||||
}
|
||||
a.SelectionProof = append(a.SelectionProof, buf[248:344]...)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// SizeSSZ returns the ssz encoded size in bytes for the AggregateAttestationAndProofSingle object
|
||||
func (a *AggregateAttestationAndProofSingle) SizeSSZ() (size int) {
|
||||
size = 344
|
||||
return
|
||||
}
|
||||
|
||||
// HashTreeRoot ssz hashes the AggregateAttestationAndProofSingle object
|
||||
func (a *AggregateAttestationAndProofSingle) HashTreeRoot() ([32]byte, error) {
|
||||
return ssz.HashWithDefaultHasher(a)
|
||||
}
|
||||
|
||||
// HashTreeRootWith ssz hashes the AggregateAttestationAndProofSingle object with a hasher
|
||||
func (a *AggregateAttestationAndProofSingle) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
indx := hh.Index()
|
||||
|
||||
// Field (0) 'AggregatorIndex'
|
||||
hh.PutUint64(uint64(a.AggregatorIndex))
|
||||
|
||||
// Field (1) 'Aggregate'
|
||||
if err = a.Aggregate.HashTreeRootWith(hh); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Field (2) 'SelectionProof'
|
||||
if size := len(a.SelectionProof); size != 96 {
|
||||
err = ssz.ErrBytesLengthFn("--.SelectionProof", size, 96)
|
||||
return
|
||||
}
|
||||
hh.PutBytes(a.SelectionProof)
|
||||
|
||||
hh.Merkleize(indx)
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalSSZ ssz marshals the SingleAttestation object
|
||||
func (s *SingleAttestation) MarshalSSZ() ([]byte, error) {
|
||||
return ssz.MarshalSSZ(s)
|
||||
|
||||
@@ -38,8 +38,8 @@ func (s *SignedBeaconBlockContentsFulu) MarshalSSZTo(buf []byte) (dst []byte, er
|
||||
}
|
||||
|
||||
// Field (1) 'KzgProofs'
|
||||
if size := len(s.KzgProofs); size > 524288 {
|
||||
err = ssz.ErrListTooBigFn("--.KzgProofs", size, 524288)
|
||||
if size := len(s.KzgProofs); size > 33554432 {
|
||||
err = ssz.ErrListTooBigFn("--.KzgProofs", size, 33554432)
|
||||
return
|
||||
}
|
||||
for ii := 0; ii < len(s.KzgProofs); ii++ {
|
||||
@@ -110,7 +110,7 @@ func (s *SignedBeaconBlockContentsFulu) UnmarshalSSZ(buf []byte) error {
|
||||
// Field (1) 'KzgProofs'
|
||||
{
|
||||
buf = tail[o1:o2]
|
||||
num, err := ssz.DivideInt2(len(buf), 48, 524288)
|
||||
num, err := ssz.DivideInt2(len(buf), 48, 33554432)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -176,8 +176,8 @@ func (s *SignedBeaconBlockContentsFulu) HashTreeRootWith(hh *ssz.Hasher) (err er
|
||||
|
||||
// Field (1) 'KzgProofs'
|
||||
{
|
||||
if size := len(s.KzgProofs); size > 524288 {
|
||||
err = ssz.ErrListTooBigFn("--.KzgProofs", size, 524288)
|
||||
if size := len(s.KzgProofs); size > 33554432 {
|
||||
err = ssz.ErrListTooBigFn("--.KzgProofs", size, 33554432)
|
||||
return
|
||||
}
|
||||
subIndx := hh.Index()
|
||||
@@ -190,7 +190,7 @@ func (s *SignedBeaconBlockContentsFulu) HashTreeRootWith(hh *ssz.Hasher) (err er
|
||||
}
|
||||
|
||||
numItems := uint64(len(s.KzgProofs))
|
||||
hh.MerkleizeWithMixin(subIndx, numItems, 524288)
|
||||
hh.MerkleizeWithMixin(subIndx, numItems, 33554432)
|
||||
}
|
||||
|
||||
// Field (2) 'Blobs'
|
||||
@@ -356,8 +356,8 @@ func (b *BeaconBlockContentsFulu) MarshalSSZTo(buf []byte) (dst []byte, err erro
|
||||
}
|
||||
|
||||
// Field (1) 'KzgProofs'
|
||||
if size := len(b.KzgProofs); size > 524288 {
|
||||
err = ssz.ErrListTooBigFn("--.KzgProofs", size, 524288)
|
||||
if size := len(b.KzgProofs); size > 33554432 {
|
||||
err = ssz.ErrListTooBigFn("--.KzgProofs", size, 33554432)
|
||||
return
|
||||
}
|
||||
for ii := 0; ii < len(b.KzgProofs); ii++ {
|
||||
@@ -428,7 +428,7 @@ func (b *BeaconBlockContentsFulu) UnmarshalSSZ(buf []byte) error {
|
||||
// Field (1) 'KzgProofs'
|
||||
{
|
||||
buf = tail[o1:o2]
|
||||
num, err := ssz.DivideInt2(len(buf), 48, 524288)
|
||||
num, err := ssz.DivideInt2(len(buf), 48, 33554432)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -494,8 +494,8 @@ func (b *BeaconBlockContentsFulu) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
|
||||
// Field (1) 'KzgProofs'
|
||||
{
|
||||
if size := len(b.KzgProofs); size > 524288 {
|
||||
err = ssz.ErrListTooBigFn("--.KzgProofs", size, 524288)
|
||||
if size := len(b.KzgProofs); size > 33554432 {
|
||||
err = ssz.ErrListTooBigFn("--.KzgProofs", size, 33554432)
|
||||
return
|
||||
}
|
||||
subIndx := hh.Index()
|
||||
@@ -508,7 +508,7 @@ func (b *BeaconBlockContentsFulu) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
}
|
||||
|
||||
numItems := uint64(len(b.KzgProofs))
|
||||
hh.MerkleizeWithMixin(subIndx, numItems, 524288)
|
||||
hh.MerkleizeWithMixin(subIndx, numItems, 33554432)
|
||||
}
|
||||
|
||||
// Field (2) 'Blobs'
|
||||
|
||||
@@ -25,7 +25,7 @@ mainnet = {
|
||||
"extra_data.size": "32",
|
||||
"max_blobs_per_block.size": "6",
|
||||
"max_blob_commitments.size": "4096",
|
||||
"max_cell_proofs_length.size": "524288", # CELLS_PER_EXT_BLOB * MAX_BLOB_COMMITMENTS_PER_BLOCK
|
||||
"max_cell_proofs_length.size": "33554432", # FIELD_ELEMENTS_PER_EXT_BLOB * MAX_BLOB_COMMITMENTS_PER_BLOCK
|
||||
"kzg_commitment_inclusion_proof_depth.size": "17",
|
||||
"max_withdrawal_requests_per_payload.size": "16",
|
||||
"max_deposit_requests_per_payload.size": "8192",
|
||||
@@ -64,7 +64,7 @@ minimal = {
|
||||
"extra_data.size": "32",
|
||||
"max_blobs_per_block.size": "6",
|
||||
"max_blob_commitments.size": "32",
|
||||
"max_cell_proofs_length.size": "524288", # CELLS_PER_EXT_BLOB * MAX_BLOB_COMMITMENTS_PER_BLOCK
|
||||
"max_cell_proofs_length.size": "33554432", # FIELD_ELEMENTS_PER_EXT_BLOB * MAX_BLOB_COMMITMENTS_PER_BLOCK
|
||||
"kzg_commitment_inclusion_proof_depth.size": "10",
|
||||
"max_withdrawal_requests_per_payload.size": "16",
|
||||
"max_deposit_requests_per_payload.size": "8192",
|
||||
|
||||
Reference in New Issue
Block a user