Compare commits

...

6 Commits

Author SHA1 Message Date
nisdas
764aab3822 add it 2024-11-28 14:01:34 +08:00
Potuz
f27092fa91 Check if validator exists when applying pending deposit (#14666)
* Check if validator exists when applying pending deposit

* Add test TestProcessPendingDepositsMultiplesSameDeposits

* keep a map of added pubkeys

---------

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2024-11-25 20:31:02 +00:00
Radosław Kapka
67cef41cbf Better attestation packing for Electra (#14534)
* Better attestation packing for Electra

* changelog <3

* bzl

* sort before constructing on-chain aggregates

* move ctx to top

* extract Electra logic and add comments

* benchmark
2024-11-25 18:41:51 +00:00
Manu NALEPA
258908d50e Diverse log improvements, comment additions and small refactors. (#14658)
* `logProposedBlock`: Fix log.

Before, the value of the pointer to the function were printed for `blockNumber`
instead of the block number itself.

* Add blob prefix before sidecars.

In order to prepare for data columns sidecars.

* Verification: Add log prefix.

* `validate_aggregate_proof.go`: Add comments.

* `blobSubscriber`: Fix error message.

* `registerHandlers`: Rename, add comments and little refactor.

* Remove duplicate `pb` vs. `ethpb` import.

* `rpc_ping.go`: Factorize / Add comments.

* `blobSidecarsByRangeRPCHandler`: Do not write error response if rate limited.

* `sendRecentBeaconBlocksRequest` ==> `sendBeaconBlocksRequest`.

The function itself does not know anything about the age of the beacon block.

* `beaconBlocksByRangeRPCHandler`: Refactor and add logs.

* `retentionSeconds` ==> `retentionDuration`.

* `oneEpoch`: Add documentation.

* `TestProposer_ProposeBlock_OK`: Improve error message.

* `getLocalPayloadFromEngine`: Tiny refactor.

* `eth1DataMajorityVote`: Improve log message.

* Implement `ConvertPeerIDToNodeID`and do note generate random private key if peerDAS is enabled.

* Remove useless `_`.

* `parsePeersEnr`: Fix error mesages.

* `ShouldOverrideFCU`: Fix error message.

* `blocks.go`: Minor comments improvements.

* CI: Upgrade golanci and enable spancheck.

* `ConvertPeerIDToNodeID`: Add godoc comment.

* Update CHANGELOG.md

Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com>

* Update beacon-chain/sync/initial-sync/service_test.go

Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com>

* Update beacon-chain/sync/rpc_beacon_blocks_by_range.go

Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com>

* Update beacon-chain/sync/rpc_blob_sidecars_by_range.go

Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com>

* Update beacon-chain/sync/rpc_ping.go

Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com>

* Remove trailing whitespace in godoc.

---------

Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com>
2024-11-25 09:22:33 +00:00
Manu NALEPA
415a42a4aa Add proto for DataColumnIdentifier, DataColumnSidecar, DataColumnSidecarsByRangeRequest and MetadataV2. (#14649)
* Add data column sidecars proto.

* Fix Terence's comment.

* Re-add everything.
2024-11-22 09:50:06 +00:00
kasey
25eae3acda Fix eventstream electra atts (#14655)
* fix handler for electra atts

* same fix for attestation_slashing

* changelog

---------

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2024-11-22 03:04:00 +00:00
64 changed files with 2067 additions and 345 deletions

View File

@@ -54,7 +54,7 @@ jobs:
- name: Golangci-lint
uses: golangci/golangci-lint-action@v5
with:
version: v1.55.2
version: v1.56.1
args: --config=.golangci.yml --out-${NO_FUTURE}format colored-line-number
build:

View File

@@ -73,6 +73,7 @@ linters:
- promlinter
- protogetter
- revive
- spancheck
- staticcheck
- stylecheck
- tagalign

View File

@@ -26,6 +26,8 @@ The format is based on Keep a Changelog, and this project adheres to Semantic Ve
- Validator REST mode Electra block support.
- Added validator index label to `validator_statuses` metric.
- Added Validator REST mode use of Attestation V2 endpoints and Electra attestations.
- PeerDAS: Added proto for `DataColumnIdentifier`, `DataColumnSidecar`, `DataColumnSidecarsByRangeRequest` and `MetadataV2`.
- Better attestation packing for Electra. [PR](https://github.com/prysmaticlabs/prysm/pull/14534)
### Changed
@@ -60,6 +62,7 @@ The format is based on Keep a Changelog, and this project adheres to Semantic Ve
- Updated light client protobufs. [PR](https://github.com/prysmaticlabs/prysm/pull/14650)
- Added `Eth-Consensus-Version` header to `ListAttestationsV2` and `GetAggregateAttestationV2` endpoints.
- Updated light client consensus types. [PR](https://github.com/prysmaticlabs/prysm/pull/14652)
- Fixed pending deposits processing on Electra.
### Deprecated
@@ -88,6 +91,8 @@ The format is based on Keep a Changelog, and this project adheres to Semantic Ve
- Fix panic in validator REST mode when checking status after removing all keys
- Fix panic on attestation interface since we call data before validation
- corrects nil check on some interface attestation types
- temporary solution to handling electra attesation and attester_slashing events. [pr](14655)
- Diverse log improvements and comment additions.
### Security

View File

@@ -386,8 +386,14 @@ func batchProcessNewPendingDeposits(ctx context.Context, state state.BeaconState
return errors.Wrap(err, "batch signature verification failed")
}
pubKeyMap := make(map[[48]byte]struct{}, len(pendingDeposits))
// Process each deposit individually
for _, pendingDeposit := range pendingDeposits {
_, found := pubKeyMap[bytesutil.ToBytes48(pendingDeposit.PublicKey)]
if !found {
pubKeyMap[bytesutil.ToBytes48(pendingDeposit.PublicKey)] = struct{}{}
}
validSignature := allSignaturesVerified
// If batch verification failed, check the individual deposit signature
@@ -405,9 +411,16 @@ func batchProcessNewPendingDeposits(ctx context.Context, state state.BeaconState
// Add validator to the registry if the signature is valid
if validSignature {
err = AddValidatorToRegistry(state, pendingDeposit.PublicKey, pendingDeposit.WithdrawalCredentials, pendingDeposit.Amount)
if err != nil {
return errors.Wrap(err, "failed to add validator to registry")
if found {
index, _ := state.ValidatorIndexByPubkey(bytesutil.ToBytes48(pendingDeposit.PublicKey))
if err := helpers.IncreaseBalance(state, index, pendingDeposit.Amount); err != nil {
return errors.Wrap(err, "could not increase balance")
}
} else {
err = AddValidatorToRegistry(state, pendingDeposit.PublicKey, pendingDeposit.WithdrawalCredentials, pendingDeposit.Amount)
if err != nil {
return errors.Wrap(err, "failed to add validator to registry")
}
}
}
}

View File

@@ -22,6 +22,40 @@ import (
"github.com/prysmaticlabs/prysm/v5/testing/util"
)
func TestProcessPendingDepositsMultiplesSameDeposits(t *testing.T) {
st := stateWithActiveBalanceETH(t, 1000)
deps := make([]*eth.PendingDeposit, 2) // Make same deposit twice
validators := st.Validators()
sk, err := bls.RandKey()
require.NoError(t, err)
for i := 0; i < len(deps); i += 1 {
wc := make([]byte, 32)
wc[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
wc[31] = byte(i)
validators[i].PublicKey = sk.PublicKey().Marshal()
validators[i].WithdrawalCredentials = wc
deps[i] = stateTesting.GeneratePendingDeposit(t, sk, 32, bytesutil.ToBytes32(wc), 0)
}
require.NoError(t, st.SetPendingDeposits(deps))
err = electra.ProcessPendingDeposits(context.TODO(), st, 10000)
require.NoError(t, err)
val := st.Validators()
seenPubkeys := make(map[string]struct{})
for i := 0; i < len(val); i += 1 {
if len(val[i].PublicKey) == 0 {
continue
}
_, ok := seenPubkeys[string(val[i].PublicKey)]
if ok {
t.Fatalf("duplicated pubkeys")
} else {
seenPubkeys[string(val[i].PublicKey)] = struct{}{}
}
}
}
func TestProcessPendingDeposits(t *testing.T) {
tests := []struct {
name string
@@ -285,7 +319,7 @@ func TestBatchProcessNewPendingDeposits(t *testing.T) {
wc[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
wc[31] = byte(0)
validDep := stateTesting.GeneratePendingDeposit(t, sk, params.BeaconConfig().MinActivationBalance, bytesutil.ToBytes32(wc), 0)
invalidDep := &eth.PendingDeposit{}
invalidDep := &eth.PendingDeposit{PublicKey: make([]byte, 48)}
// have a combination of valid and invalid deposits
deps := []*eth.PendingDeposit{validDep, invalidDep}
require.NoError(t, electra.BatchProcessNewPendingDeposits(context.Background(), st, deps))

View File

@@ -23,10 +23,10 @@ import (
bolt "go.etcd.io/bbolt"
)
// used to represent errors for inconsistent slot ranges.
// Used to represent errors for inconsistent slot ranges.
var errInvalidSlotRange = errors.New("invalid end slot and start slot provided")
// Block retrieval by root.
// Block retrieval by root. Return nil if block is not found.
func (s *Store) Block(ctx context.Context, blockRoot [32]byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.Block")
defer span.End()

View File

@@ -53,7 +53,7 @@ func (f *ForkChoice) ShouldOverrideFCU() (override bool) {
// Only reorg blocks that arrive late
early, err := head.arrivedEarly(f.store.genesisTime)
if err != nil {
log.WithError(err).Error("could not check if block arrived early")
log.WithError(err).Error("Could not check if block arrived early")
return
}
if early {

View File

@@ -7,6 +7,7 @@ go_library(
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/attestations/mock",
visibility = ["//visibility:public"],
deps = [
"//beacon-chain/operations/attestations:go_default_library",
"//consensus-types/primitives:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
],

View File

@@ -3,13 +3,17 @@ package mock
import (
"context"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
)
var _ attestations.Pool = &PoolMock{}
// PoolMock --
type PoolMock struct {
AggregatedAtts []*ethpb.Attestation
AggregatedAtts []ethpb.Att
UnaggregatedAtts []ethpb.Att
}
// AggregateUnaggregatedAttestations --
@@ -23,18 +27,18 @@ func (*PoolMock) AggregateUnaggregatedAttestationsBySlotIndex(_ context.Context,
}
// SaveAggregatedAttestation --
func (*PoolMock) SaveAggregatedAttestation(_ *ethpb.Attestation) error {
func (*PoolMock) SaveAggregatedAttestation(_ ethpb.Att) error {
panic("implement me")
}
// SaveAggregatedAttestations --
func (m *PoolMock) SaveAggregatedAttestations(atts []*ethpb.Attestation) error {
func (m *PoolMock) SaveAggregatedAttestations(atts []ethpb.Att) error {
m.AggregatedAtts = append(m.AggregatedAtts, atts...)
return nil
}
// AggregatedAttestations --
func (m *PoolMock) AggregatedAttestations() []*ethpb.Attestation {
func (m *PoolMock) AggregatedAttestations() []ethpb.Att {
return m.AggregatedAtts
}
@@ -43,13 +47,18 @@ func (*PoolMock) AggregatedAttestationsBySlotIndex(_ context.Context, _ primitiv
panic("implement me")
}
// AggregatedAttestationsBySlotIndexElectra --
func (*PoolMock) AggregatedAttestationsBySlotIndexElectra(_ context.Context, _ primitives.Slot, _ primitives.CommitteeIndex) []*ethpb.AttestationElectra {
panic("implement me")
}
// DeleteAggregatedAttestation --
func (*PoolMock) DeleteAggregatedAttestation(_ *ethpb.Attestation) error {
func (*PoolMock) DeleteAggregatedAttestation(_ ethpb.Att) error {
panic("implement me")
}
// HasAggregatedAttestation --
func (*PoolMock) HasAggregatedAttestation(_ *ethpb.Attestation) (bool, error) {
func (*PoolMock) HasAggregatedAttestation(_ ethpb.Att) (bool, error) {
panic("implement me")
}
@@ -59,18 +68,19 @@ func (*PoolMock) AggregatedAttestationCount() int {
}
// SaveUnaggregatedAttestation --
func (*PoolMock) SaveUnaggregatedAttestation(_ *ethpb.Attestation) error {
func (*PoolMock) SaveUnaggregatedAttestation(_ ethpb.Att) error {
panic("implement me")
}
// SaveUnaggregatedAttestations --
func (*PoolMock) SaveUnaggregatedAttestations(_ []*ethpb.Attestation) error {
panic("implement me")
func (m *PoolMock) SaveUnaggregatedAttestations(atts []ethpb.Att) error {
m.UnaggregatedAtts = append(m.UnaggregatedAtts, atts...)
return nil
}
// UnaggregatedAttestations --
func (*PoolMock) UnaggregatedAttestations() ([]*ethpb.Attestation, error) {
panic("implement me")
func (m *PoolMock) UnaggregatedAttestations() ([]ethpb.Att, error) {
return m.UnaggregatedAtts, nil
}
// UnaggregatedAttestationsBySlotIndex --
@@ -78,8 +88,13 @@ func (*PoolMock) UnaggregatedAttestationsBySlotIndex(_ context.Context, _ primit
panic("implement me")
}
// UnaggregatedAttestationsBySlotIndexElectra --
func (*PoolMock) UnaggregatedAttestationsBySlotIndexElectra(_ context.Context, _ primitives.Slot, _ primitives.CommitteeIndex) []*ethpb.AttestationElectra {
panic("implement me")
}
// DeleteUnaggregatedAttestation --
func (*PoolMock) DeleteUnaggregatedAttestation(_ *ethpb.Attestation) error {
func (*PoolMock) DeleteUnaggregatedAttestation(_ ethpb.Att) error {
panic("implement me")
}
@@ -94,42 +109,42 @@ func (*PoolMock) UnaggregatedAttestationCount() int {
}
// SaveBlockAttestation --
func (*PoolMock) SaveBlockAttestation(_ *ethpb.Attestation) error {
func (*PoolMock) SaveBlockAttestation(_ ethpb.Att) error {
panic("implement me")
}
// SaveBlockAttestations --
func (*PoolMock) SaveBlockAttestations(_ []*ethpb.Attestation) error {
func (*PoolMock) SaveBlockAttestations(_ []ethpb.Att) error {
panic("implement me")
}
// BlockAttestations --
func (*PoolMock) BlockAttestations() []*ethpb.Attestation {
func (*PoolMock) BlockAttestations() []ethpb.Att {
panic("implement me")
}
// DeleteBlockAttestation --
func (*PoolMock) DeleteBlockAttestation(_ *ethpb.Attestation) error {
func (*PoolMock) DeleteBlockAttestation(_ ethpb.Att) error {
panic("implement me")
}
// SaveForkchoiceAttestation --
func (*PoolMock) SaveForkchoiceAttestation(_ *ethpb.Attestation) error {
func (*PoolMock) SaveForkchoiceAttestation(_ ethpb.Att) error {
panic("implement me")
}
// SaveForkchoiceAttestations --
func (*PoolMock) SaveForkchoiceAttestations(_ []*ethpb.Attestation) error {
func (*PoolMock) SaveForkchoiceAttestations(_ []ethpb.Att) error {
panic("implement me")
}
// ForkchoiceAttestations --
func (*PoolMock) ForkchoiceAttestations() []*ethpb.Attestation {
func (*PoolMock) ForkchoiceAttestations() []ethpb.Att {
panic("implement me")
}
// DeleteForkchoiceAttestation --
func (*PoolMock) DeleteForkchoiceAttestation(_ *ethpb.Attestation) error {
func (*PoolMock) DeleteForkchoiceAttestation(_ ethpb.Att) error {
panic("implement me")
}

View File

@@ -75,6 +75,8 @@ go_library(
"//runtime/version:go_default_library",
"//time:go_default_library",
"//time/slots:go_default_library",
"@com_github_btcsuite_btcd_btcec_v2//:go_default_library",
"@com_github_ethereum_go_ethereum//crypto:go_default_library",
"@com_github_ethereum_go_ethereum//p2p/discover:go_default_library",
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",

View File

@@ -165,14 +165,14 @@ func (s *Service) pubsubOptions() []pubsub.Option {
func parsePeersEnr(peers []string) ([]peer.AddrInfo, error) {
addrs, err := PeersFromStringAddrs(peers)
if err != nil {
return nil, fmt.Errorf("Cannot convert peers raw ENRs into multiaddresses: %w", err)
return nil, fmt.Errorf("cannot convert peers raw ENRs into multiaddresses: %w", err)
}
if len(addrs) == 0 {
return nil, fmt.Errorf("Converting peers raw ENRs into multiaddresses resulted in an empty list")
return nil, fmt.Errorf("converting peers raw ENRs into multiaddresses resulted in an empty list")
}
directAddrInfos, err := peer.AddrInfosFromP2pAddrs(addrs...)
if err != nil {
return nil, fmt.Errorf("Cannot convert peers multiaddresses into AddrInfos: %w", err)
return nil, fmt.Errorf("cannot convert peers multiaddresses into AddrInfos: %w", err)
}
return directAddrInfos, nil
}

View File

@@ -27,148 +27,148 @@ func NewFuzzTestP2P() *FakeP2P {
}
// Encoding -- fake.
func (_ *FakeP2P) Encoding() encoder.NetworkEncoding {
func (*FakeP2P) Encoding() encoder.NetworkEncoding {
return &encoder.SszNetworkEncoder{}
}
// AddConnectionHandler -- fake.
func (_ *FakeP2P) AddConnectionHandler(_, _ func(ctx context.Context, id peer.ID) error) {
func (*FakeP2P) AddConnectionHandler(_, _ func(ctx context.Context, id peer.ID) error) {
}
// AddDisconnectionHandler -- fake.
func (_ *FakeP2P) AddDisconnectionHandler(_ func(ctx context.Context, id peer.ID) error) {
func (*FakeP2P) AddDisconnectionHandler(_ func(ctx context.Context, id peer.ID) error) {
}
// AddPingMethod -- fake.
func (_ *FakeP2P) AddPingMethod(_ func(ctx context.Context, id peer.ID) error) {
func (*FakeP2P) AddPingMethod(_ func(ctx context.Context, id peer.ID) error) {
}
// PeerID -- fake.
func (_ *FakeP2P) PeerID() peer.ID {
func (*FakeP2P) PeerID() peer.ID {
return "fake"
}
// ENR returns the enr of the local peer.
func (_ *FakeP2P) ENR() *enr.Record {
func (*FakeP2P) ENR() *enr.Record {
return new(enr.Record)
}
// DiscoveryAddresses -- fake
func (_ *FakeP2P) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
func (*FakeP2P) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
return nil, nil
}
// FindPeersWithSubnet mocks the p2p func.
func (_ *FakeP2P) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ int) (bool, error) {
func (*FakeP2P) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ int) (bool, error) {
return false, nil
}
// RefreshENR mocks the p2p func.
func (_ *FakeP2P) RefreshENR() {}
func (*FakeP2P) RefreshENR() {}
// LeaveTopic -- fake.
func (_ *FakeP2P) LeaveTopic(_ string) error {
func (*FakeP2P) LeaveTopic(_ string) error {
return nil
}
// Metadata -- fake.
func (_ *FakeP2P) Metadata() metadata.Metadata {
func (*FakeP2P) Metadata() metadata.Metadata {
return nil
}
// Peers -- fake.
func (_ *FakeP2P) Peers() *peers.Status {
func (*FakeP2P) Peers() *peers.Status {
return nil
}
// PublishToTopic -- fake.
func (_ *FakeP2P) PublishToTopic(_ context.Context, _ string, _ []byte, _ ...pubsub.PubOpt) error {
func (*FakeP2P) PublishToTopic(_ context.Context, _ string, _ []byte, _ ...pubsub.PubOpt) error {
return nil
}
// Send -- fake.
func (_ *FakeP2P) Send(_ context.Context, _ interface{}, _ string, _ peer.ID) (network.Stream, error) {
func (*FakeP2P) Send(_ context.Context, _ interface{}, _ string, _ peer.ID) (network.Stream, error) {
return nil, nil
}
// PubSub -- fake.
func (_ *FakeP2P) PubSub() *pubsub.PubSub {
func (*FakeP2P) PubSub() *pubsub.PubSub {
return nil
}
// MetadataSeq -- fake.
func (_ *FakeP2P) MetadataSeq() uint64 {
func (*FakeP2P) MetadataSeq() uint64 {
return 0
}
// SetStreamHandler -- fake.
func (_ *FakeP2P) SetStreamHandler(_ string, _ network.StreamHandler) {
func (*FakeP2P) SetStreamHandler(_ string, _ network.StreamHandler) {
}
// SubscribeToTopic -- fake.
func (_ *FakeP2P) SubscribeToTopic(_ string, _ ...pubsub.SubOpt) (*pubsub.Subscription, error) {
func (*FakeP2P) SubscribeToTopic(_ string, _ ...pubsub.SubOpt) (*pubsub.Subscription, error) {
return nil, nil
}
// JoinTopic -- fake.
func (_ *FakeP2P) JoinTopic(_ string, _ ...pubsub.TopicOpt) (*pubsub.Topic, error) {
func (*FakeP2P) JoinTopic(_ string, _ ...pubsub.TopicOpt) (*pubsub.Topic, error) {
return nil, nil
}
// Host -- fake.
func (_ *FakeP2P) Host() host.Host {
func (*FakeP2P) Host() host.Host {
return nil
}
// Disconnect -- fake.
func (_ *FakeP2P) Disconnect(_ peer.ID) error {
func (*FakeP2P) Disconnect(_ peer.ID) error {
return nil
}
// Broadcast -- fake.
func (_ *FakeP2P) Broadcast(_ context.Context, _ proto.Message) error {
func (*FakeP2P) Broadcast(_ context.Context, _ proto.Message) error {
return nil
}
// BroadcastAttestation -- fake.
func (_ *FakeP2P) BroadcastAttestation(_ context.Context, _ uint64, _ ethpb.Att) error {
func (*FakeP2P) BroadcastAttestation(_ context.Context, _ uint64, _ ethpb.Att) error {
return nil
}
// BroadcastSyncCommitteeMessage -- fake.
func (_ *FakeP2P) BroadcastSyncCommitteeMessage(_ context.Context, _ uint64, _ *ethpb.SyncCommitteeMessage) error {
func (*FakeP2P) BroadcastSyncCommitteeMessage(_ context.Context, _ uint64, _ *ethpb.SyncCommitteeMessage) error {
return nil
}
// BroadcastBlob -- fake.
func (_ *FakeP2P) BroadcastBlob(_ context.Context, _ uint64, _ *ethpb.BlobSidecar) error {
func (*FakeP2P) BroadcastBlob(_ context.Context, _ uint64, _ *ethpb.BlobSidecar) error {
return nil
}
// InterceptPeerDial -- fake.
func (_ *FakeP2P) InterceptPeerDial(peer.ID) (allow bool) {
func (*FakeP2P) InterceptPeerDial(peer.ID) (allow bool) {
return true
}
// InterceptAddrDial -- fake.
func (_ *FakeP2P) InterceptAddrDial(peer.ID, multiaddr.Multiaddr) (allow bool) {
func (*FakeP2P) InterceptAddrDial(peer.ID, multiaddr.Multiaddr) (allow bool) {
return true
}
// InterceptAccept -- fake.
func (_ *FakeP2P) InterceptAccept(_ network.ConnMultiaddrs) (allow bool) {
func (*FakeP2P) InterceptAccept(_ network.ConnMultiaddrs) (allow bool) {
return true
}
// InterceptSecured -- fake.
func (_ *FakeP2P) InterceptSecured(network.Direction, peer.ID, network.ConnMultiaddrs) (allow bool) {
func (*FakeP2P) InterceptSecured(network.Direction, peer.ID, network.ConnMultiaddrs) (allow bool) {
return true
}
// InterceptUpgraded -- fake.
func (_ *FakeP2P) InterceptUpgraded(network.Conn) (allow bool, reason control.DisconnectReason) {
func (*FakeP2P) InterceptUpgraded(network.Conn) (allow bool, reason control.DisconnectReason) {
return true, 0
}

View File

@@ -18,12 +18,12 @@ type MockHost struct {
}
// ID --
func (_ *MockHost) ID() peer.ID {
func (*MockHost) ID() peer.ID {
return ""
}
// Peerstore --
func (_ *MockHost) Peerstore() peerstore.Peerstore {
func (*MockHost) Peerstore() peerstore.Peerstore {
return nil
}
@@ -33,46 +33,46 @@ func (m *MockHost) Addrs() []ma.Multiaddr {
}
// Network --
func (_ *MockHost) Network() network.Network {
func (*MockHost) Network() network.Network {
return nil
}
// Mux --
func (_ *MockHost) Mux() protocol.Switch {
func (*MockHost) Mux() protocol.Switch {
return nil
}
// Connect --
func (_ *MockHost) Connect(_ context.Context, _ peer.AddrInfo) error {
func (*MockHost) Connect(_ context.Context, _ peer.AddrInfo) error {
return nil
}
// SetStreamHandler --
func (_ *MockHost) SetStreamHandler(_ protocol.ID, _ network.StreamHandler) {}
func (*MockHost) SetStreamHandler(_ protocol.ID, _ network.StreamHandler) {}
// SetStreamHandlerMatch --
func (_ *MockHost) SetStreamHandlerMatch(protocol.ID, func(id protocol.ID) bool, network.StreamHandler) {
func (*MockHost) SetStreamHandlerMatch(protocol.ID, func(id protocol.ID) bool, network.StreamHandler) {
}
// RemoveStreamHandler --
func (_ *MockHost) RemoveStreamHandler(_ protocol.ID) {}
func (*MockHost) RemoveStreamHandler(_ protocol.ID) {}
// NewStream --
func (_ *MockHost) NewStream(_ context.Context, _ peer.ID, _ ...protocol.ID) (network.Stream, error) {
func (*MockHost) NewStream(_ context.Context, _ peer.ID, _ ...protocol.ID) (network.Stream, error) {
return nil, nil
}
// Close --
func (_ *MockHost) Close() error {
func (*MockHost) Close() error {
return nil
}
// ConnManager --
func (_ *MockHost) ConnManager() connmgr.ConnManager {
func (*MockHost) ConnManager() connmgr.ConnManager {
return nil
}
// EventBus --
func (_ *MockHost) EventBus() event.Bus {
func (*MockHost) EventBus() event.Bus {
return nil
}

View File

@@ -12,10 +12,15 @@ import (
"path"
"time"
"github.com/btcsuite/btcd/btcec/v2"
gCrypto "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/enr"
"github.com/libp2p/go-libp2p/core/crypto"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/pkg/errors"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/wrapper"
ecdsaprysm "github.com/prysmaticlabs/prysm/v5/crypto/ecdsa"
"github.com/prysmaticlabs/prysm/v5/io/file"
@@ -62,6 +67,7 @@ func privKey(cfg *Config) (*ecdsa.PrivateKey, error) {
}
if defaultKeysExist {
log.WithField("filePath", defaultKeyPath).Info("Reading static P2P private key from a file. To generate a new random private key at every start, please remove this file.")
return privKeyFromFile(defaultKeyPath)
}
@@ -71,8 +77,8 @@ func privKey(cfg *Config) (*ecdsa.PrivateKey, error) {
return nil, err
}
// If the StaticPeerID flag is not set, return the private key.
if !cfg.StaticPeerID {
// If the StaticPeerID flag is not set and if peerDAS is not enabled, return the private key.
if !(cfg.StaticPeerID || params.PeerDASEnabled()) {
return ecdsaprysm.ConvertFromInterfacePrivKey(priv)
}
@@ -89,7 +95,7 @@ func privKey(cfg *Config) (*ecdsa.PrivateKey, error) {
return nil, err
}
log.Info("Wrote network key to file")
log.WithField("path", defaultKeyPath).Info("Wrote network key to file")
// Read the key from the defaultKeyPath file just written
// for the strongest guarantee that the next start will be the same as this one.
return privKeyFromFile(defaultKeyPath)
@@ -173,3 +179,27 @@ func verifyConnectivity(addr string, port uint, protocol string) {
}
}
}
// ConvertPeerIDToNodeID converts a peer ID (libp2p) to a node ID (devp2p).
func ConvertPeerIDToNodeID(pid peer.ID) (enode.ID, error) {
// Retrieve the public key object of the peer under "crypto" form.
pubkeyObjCrypto, err := pid.ExtractPublicKey()
if err != nil {
return [32]byte{}, errors.Wrapf(err, "extract public key from peer ID `%s`", pid)
}
// Extract the bytes representation of the public key.
compressedPubKeyBytes, err := pubkeyObjCrypto.Raw()
if err != nil {
return [32]byte{}, errors.Wrap(err, "public key raw")
}
// Retrieve the public key object of the peer under "SECP256K1" form.
pubKeyObjSecp256k1, err := btcec.ParsePubKey(compressedPubKeyBytes)
if err != nil {
return [32]byte{}, errors.Wrap(err, "parse public key")
}
newPubkey := &ecdsa.PublicKey{Curve: gCrypto.S256(), X: pubKeyObjSecp256k1.X(), Y: pubKeyObjSecp256k1.Y()}
return enode.PubkeyToIDV4(newPubkey), nil
}

View File

@@ -6,6 +6,7 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
"github.com/prysmaticlabs/prysm/v5/testing/require"
@@ -64,3 +65,19 @@ func TestSerializeENR(t *testing.T) {
assert.ErrorContains(t, "could not serialize nil record", err)
})
}
func TestConvertPeerIDToNodeID(t *testing.T) {
const (
peerIDStr = "16Uiu2HAmRrhnqEfybLYimCiAYer2AtZKDGamQrL1VwRCyeh2YiFc"
expectedNodeIDStr = "eed26c5d2425ab95f57246a5dca87317c41cacee4bcafe8bbe57e5965527c290"
)
peerID, err := peer.Decode(peerIDStr)
require.NoError(t, err)
actualNodeID, err := ConvertPeerIDToNodeID(peerID)
require.NoError(t, err)
actualNodeIDStr := actualNodeID.String()
require.Equal(t, expectedNodeIDStr, actualNodeIDStr)
}

View File

@@ -79,6 +79,7 @@ func TestGetSpec(t *testing.T) {
config.DenebForkEpoch = 105
config.ElectraForkVersion = []byte("ElectraForkVersion")
config.ElectraForkEpoch = 107
config.Eip7594ForkEpoch = 109
config.BLSWithdrawalPrefixByte = byte('b')
config.ETH1AddressWithdrawalPrefixByte = byte('c')
config.GenesisDelay = 24
@@ -189,7 +190,7 @@ func TestGetSpec(t *testing.T) {
data, ok := resp.Data.(map[string]interface{})
require.Equal(t, true, ok)
assert.Equal(t, 155, len(data))
assert.Equal(t, 156, len(data))
for k, v := range data {
t.Run(k, func(t *testing.T) {
switch k {
@@ -267,6 +268,8 @@ func TestGetSpec(t *testing.T) {
assert.Equal(t, "0x"+hex.EncodeToString([]byte("ElectraForkVersion")), v)
case "ELECTRA_FORK_EPOCH":
assert.Equal(t, "107", v)
case "EIP7594_FORK_EPOCH":
assert.Equal(t, "109", v)
case "MIN_ANCHOR_POW_BLOCK_DIFFICULTY":
assert.Equal(t, "1000", v)
case "BLS_WITHDRAWAL_PREFIX":

View File

@@ -451,14 +451,20 @@ func (s *Server) lazyReaderForEvent(ctx context.Context, event *feed.Event, topi
return jsonMarshalReader(eventName, att)
}, nil
case *operation.UnAggregatedAttReceivedData:
att, ok := v.Attestation.(*eth.Attestation)
if !ok {
switch att := v.Attestation.(type) {
case *eth.Attestation:
return func() io.Reader {
att := structs.AttFromConsensus(att)
return jsonMarshalReader(eventName, att)
}, nil
case *eth.AttestationElectra:
return func() io.Reader {
att := structs.AttElectraFromConsensus(att)
return jsonMarshalReader(eventName, att)
}, nil
default:
return nil, errors.Wrapf(errUnhandledEventData, "Unexpected type %T for the .Attestation field of UnAggregatedAttReceivedData", v.Attestation)
}
return func() io.Reader {
att := structs.AttFromConsensus(att)
return jsonMarshalReader(eventName, att)
}, nil
case *operation.ExitReceivedData:
return func() io.Reader {
return jsonMarshalReader(eventName, structs.SignedExitFromConsensus(v.Exit))
@@ -483,13 +489,18 @@ func (s *Server) lazyReaderForEvent(ctx context.Context, event *feed.Event, topi
})
}, nil
case *operation.AttesterSlashingReceivedData:
slashing, ok := v.AttesterSlashing.(*eth.AttesterSlashing)
if !ok {
switch slashing := v.AttesterSlashing.(type) {
case *eth.AttesterSlashing:
return func() io.Reader {
return jsonMarshalReader(eventName, structs.AttesterSlashingFromConsensus(slashing))
}, nil
case *eth.AttesterSlashingElectra:
return func() io.Reader {
return jsonMarshalReader(eventName, structs.AttesterSlashingElectraFromConsensus(slashing))
}, nil
default:
return nil, errors.Wrapf(errUnhandledEventData, "Unexpected type %T for the .AttesterSlashing field of AttesterSlashingReceivedData", v.AttesterSlashing)
}
return func() io.Reader {
return jsonMarshalReader(eventName, structs.AttesterSlashingFromConsensus(slashing))
}, nil
case *operation.ProposerSlashingReceivedData:
return func() io.Reader {
return jsonMarshalReader(eventName, structs.ProposerSlashingFromConsensus(v.ProposerSlashing))

View File

@@ -212,7 +212,9 @@ go_test(
embed = [":go_default_library"],
eth_network = "minimal",
tags = ["minimal"],
deps = common_deps,
deps = common_deps + [
"//beacon-chain/operations/attestations/mock:go_default_library",
],
)
go_test(

View File

@@ -339,7 +339,7 @@ func (vs *Server) handleBlindedBlock(ctx context.Context, block interfaces.Signe
sidecars, err := unblindBlobsSidecars(copiedBlock, bundle)
if err != nil {
return nil, nil, errors.Wrap(err, "unblind sidecars failed")
return nil, nil, errors.Wrap(err, "unblind blobs sidecars: commitment value doesn't match block")
}
return copiedBlock, sidecars, nil

View File

@@ -91,14 +91,7 @@ func (vs *Server) packAttestations(ctx context.Context, latestState state.Beacon
var attsForInclusion proposerAtts
if postElectra {
// TODO: hack for Electra devnet-1, take only one aggregate per ID
// (which essentially means one aggregate for an attestation_data+committee combination
topAggregates := make([]ethpb.Att, 0)
for _, v := range attsById {
topAggregates = append(topAggregates, v[0])
}
attsForInclusion, err = computeOnChainAggregate(topAggregates)
attsForInclusion, err = onChainAggregates(attsById)
if err != nil {
return nil, err
}
@@ -113,14 +106,68 @@ func (vs *Server) packAttestations(ctx context.Context, latestState state.Beacon
if err != nil {
return nil, err
}
sorted, err := deduped.sort()
if err != nil {
return nil, err
var sorted proposerAtts
if postElectra {
sorted, err = deduped.sortOnChainAggregates()
if err != nil {
return nil, err
}
} else {
sorted, err = deduped.sort()
if err != nil {
return nil, err
}
}
atts = sorted.limitToMaxAttestations()
return vs.filterAttestationBySignature(ctx, atts, latestState)
}
func onChainAggregates(attsById map[attestation.Id][]ethpb.Att) (proposerAtts, error) {
var result proposerAtts
var err error
// When constructing on-chain aggregates, we want to combine the most profitable
// aggregate for each ID, then the second most profitable, and so on and so forth.
// Because of this we sort attestations at the beginning.
for id, as := range attsById {
attsById[id], err = proposerAtts(as).sort()
if err != nil {
return nil, err
}
}
// We construct the first on-chain aggregate by taking the first aggregate for each ID.
// We construct the second on-chain aggregate by taking the second aggregate for each ID.
// We continue doing this until we run out of aggregates.
idx := 0
for {
topAggregates := make([]ethpb.Att, 0, len(attsById))
for _, as := range attsById {
// In case there are no more aggregates for an ID, we skip that ID.
if len(as) > idx {
topAggregates = append(topAggregates, as[idx])
}
}
// Once there are no more aggregates for any ID, we are done.
if len(topAggregates) == 0 {
break
}
onChainAggs, err := computeOnChainAggregate(topAggregates)
if err != nil {
return nil, err
}
result = append(result, onChainAggs...)
idx++
}
return result, nil
}
// filter separates attestation list into two groups: valid and invalid attestations.
// The first group passes the all the required checks for attestation to be considered for proposing.
// And attestations from the second group should be deleted.
@@ -223,6 +270,14 @@ func (a proposerAtts) sort() (proposerAtts, error) {
return a.sortBySlotAndCommittee()
}
func (a proposerAtts) sortOnChainAggregates() (proposerAtts, error) {
if len(a) < 2 {
return a, nil
}
return a.sortByProfitabilityUsingMaxCover()
}
// Separate attestations by slot, as slot number takes higher precedence when sorting.
// Also separate by committee index because maxcover will prefer attestations for the same
// committee with disjoint bits over attestations for different committees with overlapping
@@ -231,7 +286,6 @@ func (a proposerAtts) sortBySlotAndCommittee() (proposerAtts, error) {
type slotAtts struct {
candidates map[primitives.CommitteeIndex]proposerAtts
selected map[primitives.CommitteeIndex]proposerAtts
leftover map[primitives.CommitteeIndex]proposerAtts
}
var slots []primitives.Slot
@@ -250,7 +304,6 @@ func (a proposerAtts) sortBySlotAndCommittee() (proposerAtts, error) {
var err error
for _, sa := range attsBySlot {
sa.selected = make(map[primitives.CommitteeIndex]proposerAtts)
sa.leftover = make(map[primitives.CommitteeIndex]proposerAtts)
for ci, committeeAtts := range sa.candidates {
sa.selected[ci], err = committeeAtts.sortByProfitabilityUsingMaxCover_committeeAwarePacking()
if err != nil {
@@ -266,9 +319,6 @@ func (a proposerAtts) sortBySlotAndCommittee() (proposerAtts, error) {
for _, slot := range slots {
sortedAtts = append(sortedAtts, sortSlotAttestations(attsBySlot[slot].selected)...)
}
for _, slot := range slots {
sortedAtts = append(sortedAtts, sortSlotAttestations(attsBySlot[slot].leftover)...)
}
return sortedAtts, nil
}
@@ -287,15 +337,11 @@ func (a proposerAtts) sortByProfitabilityUsingMaxCover_committeeAwarePacking() (
return nil, err
}
}
// Add selected candidates on top, those that are not selected - append at bottom.
selectedKeys, _, err := aggregation.MaxCover(candidates, len(candidates), true /* allowOverlaps */)
if err != nil {
log.WithError(err).Debug("MaxCover aggregation failed")
return a, nil
}
// Pick selected attestations first, leftover attestations will be appended at the end.
// Both lists will be sorted by number of bits set.
selected := make(proposerAtts, selectedKeys.Count())
for i, key := range selectedKeys.BitIndices() {
selected[i] = a[key]

View File

@@ -13,6 +13,9 @@ import (
// computeOnChainAggregate constructs a final aggregate form a list of network aggregates with equal attestation data.
// It assumes that each network aggregate has exactly one committee bit set.
//
// Our implementation allows to pass aggregates for different attestation data, in which case the function will return
// one final aggregate per attestation data.
//
// Spec definition:
//
// def compute_on_chain_aggregate(network_aggregates: Sequence[Attestation]) -> Attestation:

View File

@@ -3,16 +3,21 @@ package validator
import (
"bytes"
"context"
"math/rand"
"sort"
"strconv"
"testing"
"github.com/prysmaticlabs/go-bitfield"
chainMock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/attestations/mock"
"github.com/prysmaticlabs/prysm/v5/config/features"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/crypto/bls/blst"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
"github.com/prysmaticlabs/prysm/v5/testing/require"
@@ -680,6 +685,212 @@ func Test_packAttestations(t *testing.T) {
})
}
func Test_packAttestations_ElectraOnChainAggregates(t *testing.T) {
ctx := context.Background()
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.ElectraForkEpoch = 1
params.OverrideBeaconConfig(cfg)
key, err := blst.RandKey()
require.NoError(t, err)
sig := key.Sign([]byte{'X'})
cb0 := primitives.NewAttestationCommitteeBits()
cb0.SetBitAt(0, true)
cb1 := primitives.NewAttestationCommitteeBits()
cb1.SetBitAt(1, true)
data0 := util.HydrateAttestationData(&ethpb.AttestationData{BeaconBlockRoot: bytesutil.PadTo([]byte{'0'}, 32)})
data1 := util.HydrateAttestationData(&ethpb.AttestationData{BeaconBlockRoot: bytesutil.PadTo([]byte{'1'}, 32)})
// Glossary:
// - Single Aggregate: aggregate with exactly one committee bit set, from which an On-Chain Aggregate is constructed
// - On-Chain Aggregate: final aggregate packed into a block
//
// We construct the following number of single aggregates:
// - data_root_0 and committee_index_0: 3 single aggregates
// - data_root_0 and committee_index_1: 2 single aggregates
// - data_root_1 and committee_index_0: 1 single aggregate
// - data_root_1 and committee_index_1: 3 single aggregates
//
// Because the function tries to aggregate attestations, we have to create attestations which are not aggregatable
// and are not redundant when using MaxCover.
// The function should also sort attestation by ID before computing the On-Chain Aggregate, so we want unsorted aggregation bits
// to test the sorting part.
//
// The result should be the following six on-chain aggregates:
// - for data_root_0 combining the most profitable aggregate for each committee
// - for data_root_0 combining the second most profitable aggregate for each committee
// - for data_root_0 constructed from the single aggregate at index 2 for committee_index_0
// - for data_root_1 combining the most profitable aggregate for each committee
// - for data_root_1 constructed from the single aggregate at index 1 for committee_index_1
// - for data_root_1 constructed from the single aggregate at index 2 for committee_index_1
d0_c0_a1 := &ethpb.AttestationElectra{
AggregationBits: bitfield.Bitlist{0b1000011},
CommitteeBits: cb0,
Data: data0,
Signature: sig.Marshal(),
}
d0_c0_a2 := &ethpb.AttestationElectra{
AggregationBits: bitfield.Bitlist{0b1100101},
CommitteeBits: cb0,
Data: data0,
Signature: sig.Marshal(),
}
d0_c0_a3 := &ethpb.AttestationElectra{
AggregationBits: bitfield.Bitlist{0b1111000},
CommitteeBits: cb0,
Data: data0,
Signature: sig.Marshal(),
}
d0_c1_a1 := &ethpb.AttestationElectra{
AggregationBits: bitfield.Bitlist{0b1111100},
CommitteeBits: cb1,
Data: data0,
Signature: sig.Marshal(),
}
d0_c1_a2 := &ethpb.AttestationElectra{
AggregationBits: bitfield.Bitlist{0b1001111},
CommitteeBits: cb1,
Data: data0,
Signature: sig.Marshal(),
}
d1_c0_a1 := &ethpb.AttestationElectra{
AggregationBits: bitfield.Bitlist{0b1111111},
CommitteeBits: cb0,
Data: data1,
Signature: sig.Marshal(),
}
d1_c1_a1 := &ethpb.AttestationElectra{
AggregationBits: bitfield.Bitlist{0b1000011},
CommitteeBits: cb1,
Data: data1,
Signature: sig.Marshal(),
}
d1_c1_a2 := &ethpb.AttestationElectra{
AggregationBits: bitfield.Bitlist{0b1100101},
CommitteeBits: cb1,
Data: data1,
Signature: sig.Marshal(),
}
d1_c1_a3 := &ethpb.AttestationElectra{
AggregationBits: bitfield.Bitlist{0b1111000},
CommitteeBits: cb1,
Data: data1,
Signature: sig.Marshal(),
}
pool := &mock.PoolMock{}
require.NoError(t, pool.SaveAggregatedAttestations([]ethpb.Att{d0_c0_a1, d0_c0_a2, d0_c0_a3, d0_c1_a1, d0_c1_a2, d1_c0_a1, d1_c1_a1, d1_c1_a2, d1_c1_a3}))
slot := primitives.Slot(1)
s := &Server{AttPool: pool, HeadFetcher: &chainMock.ChainService{}, TimeFetcher: &chainMock.ChainService{Slot: &slot}}
// We need the correct number of validators so that there are at least 2 committees per slot
// and each committee has exactly 6 validators (this is because we have 6 aggregation bits).
st, _ := util.DeterministicGenesisStateElectra(t, 192)
require.NoError(t, st.SetSlot(params.BeaconConfig().SlotsPerEpoch+1))
atts, err := s.packAttestations(ctx, st, params.BeaconConfig().SlotsPerEpoch)
require.NoError(t, err)
require.Equal(t, 6, len(atts))
assert.Equal(t, true,
atts[0].GetAggregationBits().Count() >= atts[1].GetAggregationBits().Count() &&
atts[1].GetAggregationBits().Count() >= atts[2].GetAggregationBits().Count() &&
atts[2].GetAggregationBits().Count() >= atts[3].GetAggregationBits().Count() &&
atts[3].GetAggregationBits().Count() >= atts[4].GetAggregationBits().Count() &&
atts[4].GetAggregationBits().Count() >= atts[5].GetAggregationBits().Count(),
"on-chain aggregates are not sorted by aggregation bit count",
)
t.Run("slot takes precedence", func(t *testing.T) {
moreRecentAtt := &ethpb.AttestationElectra{
AggregationBits: bitfield.Bitlist{0b1100000}, // we set only one bit for committee_index_0
CommitteeBits: cb1,
Data: util.HydrateAttestationData(&ethpb.AttestationData{Slot: 1, BeaconBlockRoot: bytesutil.PadTo([]byte{'0'}, 32)}),
Signature: sig.Marshal(),
}
require.NoError(t, pool.SaveUnaggregatedAttestations([]ethpb.Att{moreRecentAtt}))
atts, err = s.packAttestations(ctx, st, params.BeaconConfig().SlotsPerEpoch)
require.NoError(t, err)
require.Equal(t, 7, len(atts))
assert.Equal(t, true, atts[0].GetData().Slot == 1)
})
}
func Benchmark_packAttestations_Electra(b *testing.B) {
ctx := context.Background()
params.SetupTestConfigCleanup(b)
cfg := params.MainnetConfig().Copy()
cfg.ElectraForkEpoch = 1
params.OverrideBeaconConfig(cfg)
valCount := uint64(1048576)
committeeCount := helpers.SlotCommitteeCount(valCount)
valsPerCommittee := valCount / committeeCount / uint64(params.BeaconConfig().SlotsPerEpoch)
st, _ := util.DeterministicGenesisStateElectra(b, valCount)
key, err := blst.RandKey()
require.NoError(b, err)
sig := key.Sign([]byte{'X'})
r := rand.New(rand.NewSource(123))
var atts []ethpb.Att
for c := uint64(0); c < committeeCount; c++ {
for a := uint64(0); a < params.BeaconConfig().TargetAggregatorsPerCommittee; a++ {
cb := primitives.NewAttestationCommitteeBits()
cb.SetBitAt(c, true)
var att *ethpb.AttestationElectra
// Last two aggregators send aggregates for some random block root with only a few bits set.
if a >= params.BeaconConfig().TargetAggregatorsPerCommittee-2 {
root := bytesutil.PadTo([]byte("root_"+strconv.Itoa(r.Intn(100))), 32)
att = &ethpb.AttestationElectra{
Data: util.HydrateAttestationData(&ethpb.AttestationData{Slot: params.BeaconConfig().SlotsPerEpoch - 1, BeaconBlockRoot: root}),
AggregationBits: bitfield.NewBitlist(valsPerCommittee),
CommitteeBits: cb,
Signature: sig.Marshal(),
}
for bit := uint64(0); bit < valsPerCommittee; bit++ {
att.AggregationBits.SetBitAt(bit, r.Intn(100) < 2) // 2% that the bit is set
}
} else {
att = &ethpb.AttestationElectra{
Data: util.HydrateAttestationData(&ethpb.AttestationData{Slot: params.BeaconConfig().SlotsPerEpoch - 1, BeaconBlockRoot: bytesutil.PadTo([]byte("root"), 32)}),
AggregationBits: bitfield.NewBitlist(valsPerCommittee),
CommitteeBits: cb,
Signature: sig.Marshal(),
}
for bit := uint64(0); bit < valsPerCommittee; bit++ {
att.AggregationBits.SetBitAt(bit, r.Intn(100) < 98) // 98% that the bit is set
}
}
atts = append(atts, att)
}
}
pool := &mock.PoolMock{}
require.NoError(b, pool.SaveAggregatedAttestations(atts))
slot := primitives.Slot(1)
s := &Server{AttPool: pool, HeadFetcher: &chainMock.ChainService{}, TimeFetcher: &chainMock.ChainService{Slot: &slot}}
require.NoError(b, st.SetSlot(params.BeaconConfig().SlotsPerEpoch))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err = s.packAttestations(ctx, st, params.BeaconConfig().SlotsPerEpoch+1)
require.NoError(b, err)
}
}
func Test_limitToMaxAttestations(t *testing.T) {
t.Run("Phase 0", func(t *testing.T) {
atts := make([]ethpb.Att, params.BeaconConfig().MaxAttestations+1)

View File

@@ -54,7 +54,7 @@ func (vs *Server) eth1DataMajorityVote(ctx context.Context, beaconState state.Be
// by ETH1_FOLLOW_DISTANCE. The head state should maintain the same ETH1Data until this condition has passed, so
// trust the existing head for the right eth1 vote until we can get a meaningful value from the deposit contract.
if latestValidTime < genesisTime+followDistanceSeconds {
log.WithField("genesisTime", genesisTime).WithField("latestValidTime", latestValidTime).Warn("voting period before genesis + follow distance, using eth1data from head")
log.WithField("genesisTime", genesisTime).WithField("latestValidTime", latestValidTime).Warn("Voting period before genesis + follow distance, using eth1data from head")
return vs.HeadFetcher.HeadETH1Data(), nil
}

View File

@@ -84,7 +84,6 @@ func (vs *Server) getLocalPayloadFromEngine(
}
setFeeRecipientIfBurnAddress(&val)
var err error
if ok && payloadId != [8]byte{} {
// Payload ID is cache hit. Return the cached payload ID.
var pid primitives.PayloadID
@@ -102,7 +101,7 @@ func (vs *Server) getLocalPayloadFromEngine(
return nil, errors.Wrap(err, "could not get cached payload from execution client")
}
}
log.WithFields(logFields).Debug("payload ID cache miss")
log.WithFields(logFields).Debug("Payload ID cache miss")
parentHash, err := vs.getParentBlockHash(ctx, st, slot)
switch {
case errors.Is(err, errActivationNotReached) || errors.Is(err, errNoTerminalBlockHash):
@@ -191,7 +190,7 @@ func (vs *Server) getLocalPayloadFromEngine(
}
warnIfFeeRecipientDiffers(val.FeeRecipient[:], res.ExecutionData.FeeRecipient())
log.WithField("value", res.Bid).Debug("received execution payload from local engine")
log.WithField("value", res.Bid).Debug("Received execution payload from local engine")
return res, nil
}

View File

@@ -912,7 +912,7 @@ func TestProposer_ProposeBlock_OK(t *testing.T) {
return &ethpb.GenericSignedBeaconBlock{Block: blk}
},
useBuilder: true,
err: "unblind sidecars failed: commitment value doesn't match block",
err: "unblind blobs sidecars: commitment value doesn't match block",
},
{
name: "electra block no blob",

View File

@@ -107,7 +107,7 @@ type blobBatchVerifier struct {
func (bbv *blobBatchVerifier) newVerifier(rb blocks.ROBlob) verification.BlobVerifier {
m := bbv.verifiers[rb.BlockRoot()]
m[rb.Index] = bbv.newBlobVerifier(rb, verification.BackfillSidecarRequirements)
m[rb.Index] = bbv.newBlobVerifier(rb, verification.BackfillBlobSidecarRequirements)
bbv.verifiers[rb.BlockRoot()] = m
return m[rb.Index]
}

View File

@@ -388,6 +388,7 @@ func TestService_CheckForPreviousEpochFork(t *testing.T) {
}
}
// oneEpoch returns the duration of one epoch.
func oneEpoch() time.Duration {
return time.Duration(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot)) * time.Second
}

View File

@@ -172,7 +172,7 @@ func (s *Service) processFetchedDataRegSync(
if len(bwb) == 0 {
return
}
bv := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.InitsyncSidecarRequirements)
bv := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.InitsyncBlobSidecarRequirements)
avs := das.NewLazilyPersistentStore(s.cfg.BlobStorage, bv)
batchFields := logrus.Fields{
"firstSlot": data.bwb[0].Block.Block().Slot(),
@@ -331,7 +331,7 @@ func (s *Service) processBatchedBlocks(ctx context.Context, genesis time.Time,
errParentDoesNotExist, first.Block().ParentRoot(), first.Block().Slot())
}
bv := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.InitsyncSidecarRequirements)
bv := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.InitsyncBlobSidecarRequirements)
avs := das.NewLazilyPersistentStore(s.cfg.BlobStorage, bv)
s.logBatchSyncStatus(genesis, first, len(bwb))
for _, bb := range bwb {

View File

@@ -340,7 +340,7 @@ func (s *Service) fetchOriginBlobs(pids []peer.ID) error {
if len(sidecars) != len(req) {
continue
}
bv := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.InitsyncSidecarRequirements)
bv := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.InitsyncBlobSidecarRequirements)
avs := das.NewLazilyPersistentStore(s.cfg.BlobStorage, bv)
current := s.clock.CurrentSlot()
if err := avs.Persist(current, sidecars...); err != nil {

View File

@@ -495,8 +495,8 @@ func TestOriginOutsideRetention(t *testing.T) {
bdb := dbtest.SetupDB(t)
genesis := time.Unix(0, 0)
secsPerEpoch := params.BeaconConfig().SecondsPerSlot * uint64(params.BeaconConfig().SlotsPerEpoch)
retentionSeconds := time.Second * time.Duration(uint64(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest+1)*secsPerEpoch)
outsideRetention := genesis.Add(retentionSeconds)
retentionPeriod := time.Second * time.Duration(uint64(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest+1)*secsPerEpoch)
outsideRetention := genesis.Add(retentionPeriod)
now := func() time.Time {
return outsideRetention
}

View File

@@ -315,7 +315,7 @@ func (s *Service) sendBatchRootRequest(ctx context.Context, roots [][32]byte, ra
if uint64(len(roots)) > maxReqBlock {
req = roots[:maxReqBlock]
}
if err := s.sendRecentBeaconBlocksRequest(ctx, &req, pid); err != nil {
if err := s.sendBeaconBlocksRequest(ctx, &req, pid); err != nil {
tracing.AnnotateError(span, err)
log.WithError(err).Debug("Could not send recent block request")
}

View File

@@ -16,6 +16,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
pb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/time/slots"
"github.com/sirupsen/logrus"
)
// beaconBlocksByRangeRPCHandler looks up the request blocks from the database from a given start block.
@@ -26,15 +27,23 @@ func (s *Service) beaconBlocksByRangeRPCHandler(ctx context.Context, msg interfa
defer cancel()
SetRPCStreamDeadlines(stream)
remotePeer := stream.Conn().RemotePeer()
m, ok := msg.(*pb.BeaconBlocksByRangeRequest)
if !ok {
return errors.New("message is not type *pb.BeaconBlockByRangeRequest")
}
log.WithField("startSlot", m.StartSlot).WithField("count", m.Count).Debug("Serving block by range request")
log.WithFields(logrus.Fields{
"startSlot": m.StartSlot,
"count": m.Count,
"peer": remotePeer,
}).Debug("Serving block by range request")
rp, err := validateRangeRequest(m, s.cfg.clock.CurrentSlot())
if err != nil {
s.writeErrorResponseToStream(responseCodeInvalidRequest, err.Error(), stream)
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(remotePeer)
tracing.AnnotateError(span, err)
return err
}
@@ -50,12 +59,12 @@ func (s *Service) beaconBlocksByRangeRPCHandler(ctx context.Context, msg interfa
if err != nil {
return err
}
remainingBucketCapacity := blockLimiter.Remaining(stream.Conn().RemotePeer().String())
remainingBucketCapacity := blockLimiter.Remaining(remotePeer.String())
span.SetAttributes(
trace.Int64Attribute("start", int64(rp.start)), // lint:ignore uintcast -- This conversion is OK for tracing.
trace.Int64Attribute("end", int64(rp.end)), // lint:ignore uintcast -- This conversion is OK for tracing.
trace.Int64Attribute("count", int64(m.Count)),
trace.StringAttribute("peer", stream.Conn().RemotePeer().String()),
trace.StringAttribute("peer", remotePeer.String()),
trace.Int64Attribute("remaining_capacity", remainingBucketCapacity),
)
@@ -82,12 +91,19 @@ func (s *Service) beaconBlocksByRangeRPCHandler(ctx context.Context, msg interfa
}
rpcBlocksByRangeResponseLatency.Observe(float64(time.Since(batchStart).Milliseconds()))
}
if err := batch.error(); err != nil {
log.WithError(err).Debug("error in BlocksByRange batch")
s.writeErrorResponseToStream(responseCodeServerError, p2ptypes.ErrGeneric.Error(), stream)
log.WithError(err).Debug("Serving block by range request - BlocksByRange batch")
// If a rate limit is hit, it means an error response has already been sent and the stream has been closed.
if !errors.Is(err, p2ptypes.ErrRateLimited) {
s.writeErrorResponseToStream(responseCodeServerError, p2ptypes.ErrGeneric.Error(), stream)
}
tracing.AnnotateError(span, err)
return err
}
closeStream(stream, log)
return nil
}

View File

@@ -20,9 +20,9 @@ import (
"github.com/prysmaticlabs/prysm/v5/time/slots"
)
// sendRecentBeaconBlocksRequest sends a recent beacon blocks request to a peer to get
// sendBeaconBlocksRequest sends a recent beacon blocks request to a peer to get
// those corresponding blocks from that peer.
func (s *Service) sendRecentBeaconBlocksRequest(ctx context.Context, requests *types.BeaconBlockByRootsReq, id peer.ID) error {
func (s *Service) sendBeaconBlocksRequest(ctx context.Context, requests *types.BeaconBlockByRootsReq, id peer.ID) error {
ctx, cancel := context.WithTimeout(ctx, respTimeout)
defer cancel()
@@ -151,7 +151,7 @@ func (s *Service) sendAndSaveBlobSidecars(ctx context.Context, request types.Blo
if len(sidecars) != len(request) {
return fmt.Errorf("received %d blob sidecars, expected %d for RPC", len(sidecars), len(request))
}
bv := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.PendingQueueSidecarRequirements)
bv := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.PendingQueueBlobSidecarRequirements)
for _, sidecar := range sidecars {
if err := verify.BlobAlignsWithBlock(sidecar, RoBlock); err != nil {
return err

View File

@@ -253,7 +253,7 @@ func TestRecentBeaconBlocks_RPCRequestSent(t *testing.T) {
})
p1.Connect(p2)
require.NoError(t, r.sendRecentBeaconBlocksRequest(context.Background(), &expectedRoots, p2.PeerID()))
require.NoError(t, r.sendBeaconBlocksRequest(context.Background(), &expectedRoots, p2.PeerID()))
if util.WaitTimeout(&wg, 1*time.Second) {
t.Fatal("Did not receive stream within 1 sec")
@@ -328,7 +328,7 @@ func TestRecentBeaconBlocks_RPCRequestSent_IncorrectRoot(t *testing.T) {
})
p1.Connect(p2)
require.ErrorContains(t, "received unexpected block with root", r.sendRecentBeaconBlocksRequest(context.Background(), &expectedRoots, p2.PeerID()))
require.ErrorContains(t, "received unexpected block with root", r.sendBeaconBlocksRequest(context.Background(), &expectedRoots, p2.PeerID()))
}
func TestRecentBeaconBlocksRPCHandler_HandleZeroBlocks(t *testing.T) {

View File

@@ -99,6 +99,7 @@ func (s *Service) blobSidecarsByRangeRPCHandler(ctx context.Context, msg interfa
}
var batch blockBatch
wQuota := params.BeaconConfig().MaxRequestBlobSidecars
for batch, ok = batcher.next(ctx, stream); ok; batch, ok = batcher.next(ctx, stream) {
batchStart := time.Now()
@@ -114,7 +115,12 @@ func (s *Service) blobSidecarsByRangeRPCHandler(ctx context.Context, msg interfa
}
if err := batch.error(); err != nil {
log.WithError(err).Debug("error in BlobSidecarsByRange batch")
s.writeErrorResponseToStream(responseCodeServerError, p2ptypes.ErrGeneric.Error(), stream)
// If a rate limit is hit, it means an error response has already been sent and the stream has been closed.
if !errors.Is(err, p2ptypes.ErrRateLimited) {
s.writeErrorResponseToStream(responseCodeServerError, p2ptypes.ErrGeneric.Error(), stream)
}
tracing.AnnotateError(span, err)
return err
}

View File

@@ -2,12 +2,12 @@ package sync
import (
"context"
"errors"
"fmt"
"strings"
libp2pcore "github.com/libp2p/go-libp2p/core"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
p2ptypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/types"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
@@ -16,127 +16,191 @@ import (
)
// pingHandler reads the incoming ping rpc message from the peer.
// If the peer's sequence number is higher than the one stored locally,
// a METADATA request is sent to the peer to retrieve and update the latest metadata.
// Note: This function is misnamed, as it performs more than just reading a ping message.
func (s *Service) pingHandler(_ context.Context, msg interface{}, stream libp2pcore.Stream) error {
SetRPCStreamDeadlines(stream)
// Convert the message to SSW Uint64 type.
m, ok := msg.(*primitives.SSZUint64)
if !ok {
return fmt.Errorf("wrong message type for ping, got %T, wanted *uint64", msg)
}
// Validate the incoming request regarding rate limiting.
if err := s.rateLimiter.validateRequest(stream, 1); err != nil {
return err
return errors.Wrap(err, "validate request")
}
s.rateLimiter.add(stream, 1)
valid, err := s.validateSequenceNum(*m, stream.Conn().RemotePeer())
// Retrieve the peer ID.
peerID := stream.Conn().RemotePeer()
// Check if the peer sequence number is higher than the one we have in our store.
valid, err := s.validateSequenceNum(*m, peerID)
if err != nil {
// Descore peer for giving us a bad sequence number.
if errors.Is(err, p2ptypes.ErrInvalidSequenceNum) {
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(peerID)
s.writeErrorResponseToStream(responseCodeInvalidRequest, p2ptypes.ErrInvalidSequenceNum.Error(), stream)
}
return err
return errors.Wrap(err, "validate sequence number")
}
// We can already prepare a success response to the peer.
if _, err := stream.Write([]byte{responseCodeSuccess}); err != nil {
return err
return errors.Wrap(err, "write response")
}
sq := primitives.SSZUint64(s.cfg.p2p.MetadataSeq())
if _, err := s.cfg.p2p.Encoding().EncodeWithMaxLength(stream, &sq); err != nil {
// Retrieve our own sequence number.
seqNumber := s.cfg.p2p.MetadataSeq()
// SSZ encode our sequence number.
seqNumberSSZ := primitives.SSZUint64(seqNumber)
// Send our sequence number back to the peer.
if _, err := s.cfg.p2p.Encoding().EncodeWithMaxLength(stream, &seqNumberSSZ); err != nil {
return err
}
closeStream(stream, log)
if valid {
// If the sequence number was valid we're done.
// If the peer's sequence numberwas valid we're done.
return nil
}
// The sequence number was not valid. Start our own ping back to the peer.
// The peer's sequence number was not valid. We ask the peer for its metadata.
go func() {
// New context so the calling function doesn't cancel on us.
// Define a new context so the calling function doesn't cancel on us.
ctx, cancel := context.WithTimeout(context.Background(), ttfbTimeout)
defer cancel()
md, err := s.sendMetaDataRequest(ctx, stream.Conn().RemotePeer())
// Send a METADATA request to the peer.
peerMetadata, err := s.sendMetaDataRequest(ctx, peerID)
if err != nil {
// We cannot compare errors directly as the stream muxer error
// type isn't compatible with the error we have, so a direct
// equality checks fails.
if !strings.Contains(err.Error(), p2ptypes.ErrIODeadline.Error()) {
log.WithField("peer", stream.Conn().RemotePeer()).WithError(err).Debug("Could not send metadata request")
log.WithField("peer", peerID).WithError(err).Debug("Could not send metadata request")
}
return
}
// update metadata if there is no error
s.cfg.p2p.Peers().SetMetadata(stream.Conn().RemotePeer(), md)
// Update peer's metadata.
s.cfg.p2p.Peers().SetMetadata(peerID, peerMetadata)
}()
return nil
}
func (s *Service) sendPingRequest(ctx context.Context, id peer.ID) error {
// sendPingRequest first sends a PING request to the peer.
// If the peer responds with a sequence number higher than latest one for it we have in our store,
// then this function sends a METADATA request to the peer, and stores the metadata received.
// This function is actually poorly named, since it does more than just sending a ping request.
func (s *Service) sendPingRequest(ctx context.Context, peerID peer.ID) error {
ctx, cancel := context.WithTimeout(ctx, respTimeout)
defer cancel()
metadataSeq := primitives.SSZUint64(s.cfg.p2p.MetadataSeq())
topic, err := p2p.TopicFromMessage(p2p.PingMessageName, slots.ToEpoch(s.cfg.clock.CurrentSlot()))
// Get the current epoch.
currentSlot := s.cfg.clock.CurrentSlot()
currentEpoch := slots.ToEpoch(currentSlot)
// SSZ encode our metadata sequence number.
metadataSeq := s.cfg.p2p.MetadataSeq()
encodedMetadataSeq := primitives.SSZUint64(metadataSeq)
// Get the PING topic for the current epoch.
topic, err := p2p.TopicFromMessage(p2p.PingMessageName, currentEpoch)
if err != nil {
return err
return errors.Wrap(err, "topic from message")
}
stream, err := s.cfg.p2p.Send(ctx, &metadataSeq, topic, id)
// Send the PING request to the peer.
stream, err := s.cfg.p2p.Send(ctx, &encodedMetadataSeq, topic, peerID)
if err != nil {
return err
return errors.Wrap(err, "send ping request")
}
currentTime := time.Now()
defer closeStream(stream, log)
startTime := time.Now()
// Read the response from the peer.
code, errMsg, err := ReadStatusCode(stream, s.cfg.p2p.Encoding())
if err != nil {
return err
return errors.Wrap(err, "read status code")
}
// Records the latency of the ping request for that peer.
s.cfg.p2p.Host().Peerstore().RecordLatency(id, time.Now().Sub(currentTime))
// Record the latency of the ping request for that peer.
s.cfg.p2p.Host().Peerstore().RecordLatency(peerID, time.Now().Sub(startTime))
// If the peer responded with an error, increment the bad responses scorer.
if code != 0 {
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
return errors.New(errMsg)
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(peerID)
return errors.Errorf("code: %d - %s", code, errMsg)
}
// Decode the sequence number from the peer.
msg := new(primitives.SSZUint64)
if err := s.cfg.p2p.Encoding().DecodeWithMaxLength(stream, msg); err != nil {
return err
return errors.Wrap(err, "decode sequence number")
}
valid, err := s.validateSequenceNum(*msg, stream.Conn().RemotePeer())
// Determine if the peer's sequence number returned by the peer is higher than the one we have in our store.
valid, err := s.validateSequenceNum(*msg, peerID)
if err != nil {
// Descore peer for giving us a bad sequence number.
if errors.Is(err, p2ptypes.ErrInvalidSequenceNum) {
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(peerID)
}
return err
return errors.Wrap(err, "validate sequence number")
}
// The sequence number have in our store for this peer is the same as the one returned by the peer, all good.
if valid {
return nil
}
md, err := s.sendMetaDataRequest(ctx, stream.Conn().RemotePeer())
// We need to send a METADATA request to the peer to get its latest metadata.
md, err := s.sendMetaDataRequest(ctx, peerID)
if err != nil {
// do not increment bad responses, as its
// already done in the request method.
return err
// do not increment bad responses, as its already done in the request method.
return errors.Wrap(err, "send metadata request")
}
s.cfg.p2p.Peers().SetMetadata(stream.Conn().RemotePeer(), md)
// Update the metadata for the peer.
s.cfg.p2p.Peers().SetMetadata(peerID, md)
return nil
}
// validates the peer's sequence number.
// validateSequenceNum validates the peer's sequence number.
// - If the peer's sequence number is greater than the sequence number we have in our store for the peer, return false.
// - If the peer's sequence number is equal to the sequence number we have in our store for the peer, return true.
// - If the peer's sequence number is less than the sequence number we have in our store for the peer, return an error.
func (s *Service) validateSequenceNum(seq primitives.SSZUint64, id peer.ID) (bool, error) {
// Retrieve the metadata for the peer we got in our store.
md, err := s.cfg.p2p.Peers().Metadata(id)
if err != nil {
return false, err
return false, errors.Wrap(err, "get metadata")
}
// If we have no metadata for the peer, return false.
if md == nil || md.IsNil() {
return false, nil
}
// Return error on invalid sequence number.
// The peer's sequence number must be less than or equal to the sequence number we have in our store.
if md.SequenceNumber() > uint64(seq) {
return false, p2ptypes.ErrInvalidSequenceNum
}
// Return true if the peer's sequence number is equal to the sequence number we have in our store.
return md.SequenceNumber() == uint64(seq), nil
}

View File

@@ -19,7 +19,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
pb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"github.com/prysmaticlabs/prysm/v5/time/slots"
"github.com/sirupsen/logrus"
@@ -49,7 +48,7 @@ type BeaconBlockProcessor func(block interfaces.ReadOnlySignedBeaconBlock) error
// SendBeaconBlocksByRangeRequest sends BeaconBlocksByRange and returns fetched blocks, if any.
func SendBeaconBlocksByRangeRequest(
ctx context.Context, tor blockchain.TemporalOracle, p2pProvider p2p.SenderEncoder, pid peer.ID,
req *pb.BeaconBlocksByRangeRequest, blockProcessor BeaconBlockProcessor,
req *ethpb.BeaconBlocksByRangeRequest, blockProcessor BeaconBlockProcessor,
) ([]interfaces.ReadOnlySignedBeaconBlock, error) {
topic, err := p2p.TopicFromMessage(p2p.BeaconBlocksByRangeMessageName, slots.ToEpoch(tor.CurrentSlot()))
if err != nil {
@@ -155,7 +154,7 @@ func SendBeaconBlocksByRootRequest(
return blocks, nil
}
func SendBlobsByRangeRequest(ctx context.Context, tor blockchain.TemporalOracle, p2pApi p2p.SenderEncoder, pid peer.ID, ctxMap ContextByteVersions, req *pb.BlobSidecarsByRangeRequest, bvs ...BlobResponseValidation) ([]blocks.ROBlob, error) {
func SendBlobsByRangeRequest(ctx context.Context, tor blockchain.TemporalOracle, p2pApi p2p.SenderEncoder, pid peer.ID, ctxMap ContextByteVersions, req *ethpb.BlobSidecarsByRangeRequest, bvs ...BlobResponseValidation) ([]blocks.ROBlob, error) {
topic, err := p2p.TopicFromMessage(p2p.BlobSidecarsByRangeName, slots.ToEpoch(tor.CurrentSlot()))
if err != nil {
return nil, err
@@ -298,7 +297,7 @@ func blobValidatorFromRootReq(req *p2ptypes.BlobSidecarsByRootReq) BlobResponseV
}
}
func blobValidatorFromRangeReq(req *pb.BlobSidecarsByRangeRequest) BlobResponseValidation {
func blobValidatorFromRangeReq(req *ethpb.BlobSidecarsByRangeRequest) BlobResponseValidation {
end := req.StartSlot + primitives.Slot(req.Count)
return func(sc blocks.ROBlob) error {
if sc.Slot() < req.StartSlot || sc.Slot() >= end {

View File

@@ -15,6 +15,8 @@ import (
"github.com/libp2p/go-libp2p/core/peer"
gcache "github.com/patrickmn/go-cache"
"github.com/pkg/errors"
"github.com/trailofbits/go-mutexasserts"
"github.com/prysmaticlabs/prysm/v5/async"
"github.com/prysmaticlabs/prysm/v5/async/abool"
"github.com/prysmaticlabs/prysm/v5/async/event"
@@ -44,22 +46,24 @@ import (
"github.com/prysmaticlabs/prysm/v5/runtime"
prysmTime "github.com/prysmaticlabs/prysm/v5/time"
"github.com/prysmaticlabs/prysm/v5/time/slots"
"github.com/trailofbits/go-mutexasserts"
)
var _ runtime.Service = (*Service)(nil)
const rangeLimit uint64 = 1024
const seenBlockSize = 1000
const seenBlobSize = seenBlockSize * 4 // Each block can have max 4 blobs. Worst case 164kB for cache.
const seenUnaggregatedAttSize = 20000
const seenAggregatedAttSize = 16384
const seenSyncMsgSize = 1000 // Maximum of 512 sync committee members, 1000 is a safe amount.
const seenSyncContributionSize = 512 // Maximum of SYNC_COMMITTEE_SIZE as specified by the spec.
const seenExitSize = 100
const seenProposerSlashingSize = 100
const badBlockSize = 1000
const syncMetricsInterval = 10 * time.Second
const (
rangeLimit uint64 = 1024
seenBlockSize = 1000
seenBlobSize = seenBlockSize * 6 // Each block can have max 6 blobs.
seenDataColumnSize = seenBlockSize * 128 // Each block can have max 128 data columns.
seenUnaggregatedAttSize = 20000
seenAggregatedAttSize = 16384
seenSyncMsgSize = 1000 // Maximum of 512 sync committee members, 1000 is a safe amount.
seenSyncContributionSize = 512 // Maximum of SYNC_COMMITTEE_SIZE as specified by the spec.
seenExitSize = 100
seenProposerSlashingSize = 100
badBlockSize = 1000
syncMetricsInterval = 10 * time.Second
)
var (
// Seconds in one epoch.
@@ -162,18 +166,18 @@ type Service struct {
// NewService initializes new regular sync service.
func NewService(ctx context.Context, opts ...Option) *Service {
c := gcache.New(pendingBlockExpTime /* exp time */, 0 /* disable janitor */)
ctx, cancel := context.WithCancel(ctx)
r := &Service{
ctx: ctx,
cancel: cancel,
chainStarted: abool.New(),
cfg: &config{clock: startup.NewClock(time.Unix(0, 0), [32]byte{})},
slotToPendingBlocks: c,
slotToPendingBlocks: gcache.New(pendingBlockExpTime /* exp time */, 0 /* disable janitor */),
seenPendingBlocks: make(map[[32]byte]bool),
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
signatureChan: make(chan *signatureVerifier, verifierLimit),
}
for _, opt := range opts {
if err := opt(r); err != nil {
return nil
@@ -224,7 +228,7 @@ func (s *Service) Start() {
s.newBlobVerifier = newBlobVerifierFromInitializer(v)
go s.verifierRoutine()
go s.registerHandlers()
go s.startTasksPostInitialSync()
s.cfg.p2p.AddConnectionHandler(s.reValidatePeer, s.sendGoodbye)
s.cfg.p2p.AddDisconnectionHandler(func(_ context.Context, _ peer.ID) error {
@@ -315,23 +319,31 @@ func (s *Service) waitForChainStart() {
s.markForChainStart()
}
func (s *Service) registerHandlers() {
func (s *Service) startTasksPostInitialSync() {
// Wait for the chain to start.
s.waitForChainStart()
select {
case <-s.initialSyncComplete:
// Register respective pubsub handlers at state synced event.
digest, err := s.currentForkDigest()
// Compute the current epoch.
currentSlot := slots.CurrentSlot(uint64(s.cfg.clock.GenesisTime().Unix()))
currentEpoch := slots.ToEpoch(currentSlot)
// Compute the current fork forkDigest.
forkDigest, err := s.currentForkDigest()
if err != nil {
log.WithError(err).Error("Could not retrieve current fork digest")
return
}
currentEpoch := slots.ToEpoch(slots.CurrentSlot(uint64(s.cfg.clock.GenesisTime().Unix())))
s.registerSubscribers(currentEpoch, digest)
// Register respective pubsub handlers at state synced event.
s.registerSubscribers(currentEpoch, forkDigest)
// Start the fork watcher.
go s.forkWatcher()
return
case <-s.ctx.Done():
log.Debug("Context closed, exiting goroutine")
return
}
}

View File

@@ -62,7 +62,7 @@ func TestSyncHandlers_WaitToSync(t *testing.T) {
}
topic := "/eth2/%x/beacon_block"
go r.registerHandlers()
go r.startTasksPostInitialSync()
time.Sleep(100 * time.Millisecond)
var vr [32]byte
@@ -143,7 +143,7 @@ func TestSyncHandlers_WaitTillSynced(t *testing.T) {
syncCompleteCh := make(chan bool)
go func() {
r.registerHandlers()
r.startTasksPostInitialSync()
syncCompleteCh <- true
}()
@@ -200,7 +200,7 @@ func TestSyncService_StopCleanly(t *testing.T) {
initialSyncComplete: make(chan struct{}),
}
go r.registerHandlers()
go r.startTasksPostInitialSync()
var vr [32]byte
require.NoError(t, gs.SetClock(startup.NewClock(time.Now(), vr)))
r.waitForChainStart()

View File

@@ -13,7 +13,7 @@ import (
func (s *Service) blobSubscriber(ctx context.Context, msg proto.Message) error {
b, ok := msg.(blocks.VerifiedROBlob)
if !ok {
return fmt.Errorf("message was not type blocks.ROBlob, type=%T", msg)
return fmt.Errorf("message was not type blocks.VerifiedROBlob, type=%T", msg)
}
return s.subscribeBlob(ctx, b)

View File

@@ -117,6 +117,9 @@ func (s *Service) validateAggregateAndProof(ctx context.Context, pid peer.ID, ms
if seen {
return pubsub.ValidationIgnore, nil
}
// Verify the block being voted on is in the beacon chain.
// If not, store this attestation in the map of pending attestations.
if !s.validateBlockInAttestation(ctx, m) {
return pubsub.ValidationIgnore, nil
}
@@ -222,6 +225,8 @@ func (s *Service) validateAggregatedAtt(ctx context.Context, signed ethpb.Signed
return s.validateWithBatchVerifier(ctx, "aggregate", set)
}
// validateBlocksInAttestation checks if the block being voted on is in the beaconDB.
// If not, it store this attestation in the map of pending attestations.
func (s *Service) validateBlockInAttestation(ctx context.Context, satt ethpb.SignedAggregateAttAndProof) bool {
// Verify the block being voted and the processed state is in beaconDB. The block should have passed validation if it's in the beaconDB.
blockRoot := bytesutil.ToBytes32(satt.AggregateAttestationAndProof().AggregateVal().GetData().BeaconBlockRoot)

View File

@@ -211,11 +211,16 @@ func (s *Service) validateBeaconBlockPubSub(ctx context.Context, pid peer.ID, ms
// Log the arrival time of the accepted block
graffiti := blk.Block().Body().Graffiti()
exec, err := blk.Block().Body().Execution()
if err != nil {
log.WithError(err)
}
startTime, err := slots.ToTime(genesisTime, blk.Block().Slot())
logFields := logrus.Fields{
"blockSlot": blk.Block().Slot(),
"proposerIndex": blk.Block().ProposerIndex(),
"graffiti": string(graffiti[:]),
"extraData": string(exec.ExtraData()),
}
if err != nil {
log.WithError(err).WithFields(logFields).Warn("Received block, could not report timing information.")

View File

@@ -51,7 +51,7 @@ func (s *Service) validateBlob(ctx context.Context, pid peer.ID, msg *pubsub.Mes
if err != nil {
return pubsub.ValidationReject, errors.Wrap(err, "roblob conversion failure")
}
vf := s.newBlobVerifier(blob, verification.GossipSidecarRequirements)
vf := s.newBlobVerifier(blob, verification.GossipBlobSidecarRequirements)
if err := vf.BlobIndexInBounds(); err != nil {
return pubsub.ValidationReject, err

View File

@@ -10,6 +10,7 @@ go_library(
"fake.go",
"initializer.go",
"interface.go",
"log.go",
"metrics.go",
"mock.go",
"result.go",

View File

@@ -169,7 +169,7 @@ func TestBatchVerifier(t *testing.T) {
blk, blbs := c.bandb(t, c.nblobs)
reqs := c.reqs
if reqs == nil {
reqs = InitsyncSidecarRequirements
reqs = InitsyncBlobSidecarRequirements
}
bbv := NewBlobBatchVerifier(c.nv(), reqs)
if c.cv == nil {

View File

@@ -2,6 +2,7 @@ package verification
import (
"context"
goError "errors"
"github.com/pkg/errors"
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
@@ -12,7 +13,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/runtime/logging"
"github.com/prysmaticlabs/prysm/v5/time/slots"
log "github.com/sirupsen/logrus"
)
const (
@@ -29,7 +29,7 @@ const (
RequireSidecarProposerExpected
)
var allSidecarRequirements = []Requirement{
var allBlobSidecarRequirements = []Requirement{
RequireBlobIndexInBounds,
RequireNotFromFutureSlot,
RequireSlotAboveFinalized,
@@ -43,21 +43,21 @@ var allSidecarRequirements = []Requirement{
RequireSidecarProposerExpected,
}
// GossipSidecarRequirements defines the set of requirements that BlobSidecars received on gossip
// GossipBlobSidecarRequirements defines the set of requirements that BlobSidecars received on gossip
// must satisfy in order to upgrade an ROBlob to a VerifiedROBlob.
var GossipSidecarRequirements = requirementList(allSidecarRequirements).excluding()
var GossipBlobSidecarRequirements = requirementList(allBlobSidecarRequirements).excluding()
// SpectestSidecarRequirements is used by the forkchoice spectests when verifying blobs used in the on_block tests.
// SpectestBlobSidecarRequirements is used by the forkchoice spectests when verifying blobs used in the on_block tests.
// The only requirements we exclude for these tests are the parent validity and seen tests, as these are specific to
// gossip processing and require the bad block cache that we only use there.
var SpectestSidecarRequirements = requirementList(GossipSidecarRequirements).excluding(
var SpectestBlobSidecarRequirements = requirementList(GossipBlobSidecarRequirements).excluding(
RequireSidecarParentSeen, RequireSidecarParentValid)
// InitsyncSidecarRequirements is the list of verification requirements to be used by the init-sync service
// InitsyncBlobSidecarRequirements is the list of verification requirements to be used by the init-sync service
// for batch-mode syncing. Because we only perform batch verification as part of the IsDataAvailable method
// for blobs after the block has been verified, and the blobs to be verified are keyed in the cache by the
// block root, the list of required verifications is much shorter than gossip.
var InitsyncSidecarRequirements = requirementList(GossipSidecarRequirements).excluding(
var InitsyncBlobSidecarRequirements = requirementList(GossipBlobSidecarRequirements).excluding(
RequireNotFromFutureSlot,
RequireSlotAboveFinalized,
RequireSidecarParentSeen,
@@ -71,36 +71,16 @@ var InitsyncSidecarRequirements = requirementList(GossipSidecarRequirements).exc
// execution layer mempool. Only the KZG proof verification is required.
var ELMemPoolRequirements = []Requirement{RequireSidecarKzgProofVerified}
// BackfillSidecarRequirements is the same as InitsyncSidecarRequirements.
var BackfillSidecarRequirements = requirementList(InitsyncSidecarRequirements).excluding()
// BackfillBlobSidecarRequirements is the same as InitsyncBlobSidecarRequirements.
var BackfillBlobSidecarRequirements = requirementList(InitsyncBlobSidecarRequirements).excluding()
// PendingQueueSidecarRequirements is the same as InitsyncSidecarRequirements, used by the pending blocks queue.
var PendingQueueSidecarRequirements = requirementList(InitsyncSidecarRequirements).excluding()
// PendingQueueBlobSidecarRequirements is the same as InitsyncBlobSidecarRequirements, used by the pending blocks queue.
var PendingQueueBlobSidecarRequirements = requirementList(InitsyncBlobSidecarRequirements).excluding()
var (
ErrBlobInvalid = errors.New("blob failed verification")
// ErrBlobIndexInvalid means RequireBlobIndexInBounds failed.
ErrBlobIndexInvalid = errors.Wrap(ErrBlobInvalid, "incorrect blob sidecar index")
// ErrFromFutureSlot means RequireSlotNotTooEarly failed.
ErrFromFutureSlot = errors.Wrap(ErrBlobInvalid, "slot is too far in the future")
// ErrSlotNotAfterFinalized means RequireSlotAboveFinalized failed.
ErrSlotNotAfterFinalized = errors.Wrap(ErrBlobInvalid, "slot <= finalized checkpoint")
// ErrInvalidProposerSignature means RequireValidProposerSignature failed.
ErrInvalidProposerSignature = errors.Wrap(ErrBlobInvalid, "proposer signature could not be verified")
// ErrSidecarParentNotSeen means RequireSidecarParentSeen failed.
ErrSidecarParentNotSeen = errors.Wrap(ErrBlobInvalid, "parent root has not been seen")
// ErrSidecarParentInvalid means RequireSidecarParentValid failed.
ErrSidecarParentInvalid = errors.Wrap(ErrBlobInvalid, "parent block is not valid")
// ErrSlotNotAfterParent means RequireSidecarParentSlotLower failed.
ErrSlotNotAfterParent = errors.Wrap(ErrBlobInvalid, "slot <= slot")
// ErrSidecarNotFinalizedDescendent means RequireSidecarDescendsFromFinalized failed.
ErrSidecarNotFinalizedDescendent = errors.Wrap(ErrBlobInvalid, "blob parent is not descended from the finalized block")
// ErrSidecarInclusionProofInvalid means RequireSidecarInclusionProven failed.
ErrSidecarInclusionProofInvalid = errors.Wrap(ErrBlobInvalid, "sidecar inclusion proof verification failed")
// ErrSidecarKzgProofInvalid means RequireSidecarKzgProofVerified failed.
ErrSidecarKzgProofInvalid = errors.Wrap(ErrBlobInvalid, "sidecar kzg commitment proof verification failed")
// ErrSidecarUnexpectedProposer means RequireSidecarProposerExpected failed.
ErrSidecarUnexpectedProposer = errors.Wrap(ErrBlobInvalid, "sidecar was not proposed by the expected proposer_index")
ErrBlobIndexInvalid = errors.New("incorrect blob sidecar index")
)
type ROBlobVerifier struct {
@@ -149,7 +129,7 @@ func (bv *ROBlobVerifier) BlobIndexInBounds() (err error) {
defer bv.recordResult(RequireBlobIndexInBounds, &err)
if bv.blob.Index >= fieldparams.MaxBlobsPerBlock {
log.WithFields(logging.BlobFields(bv.blob)).Debug("Sidecar index >= MAX_BLOBS_PER_BLOCK")
return ErrBlobIndexInvalid
return blobErrBuilder(ErrBlobIndexInvalid)
}
return nil
}
@@ -168,7 +148,7 @@ func (bv *ROBlobVerifier) NotFromFutureSlot() (err error) {
// If the system time is still before earliestStart, we consider the blob from a future slot and return an error.
if bv.clock.Now().Before(earliestStart) {
log.WithFields(logging.BlobFields(bv.blob)).Debug("sidecar slot is too far in the future")
return ErrFromFutureSlot
return blobErrBuilder(ErrFromFutureSlot)
}
return nil
}
@@ -181,11 +161,11 @@ func (bv *ROBlobVerifier) SlotAboveFinalized() (err error) {
fcp := bv.fc.FinalizedCheckpoint()
fSlot, err := slots.EpochStart(fcp.Epoch)
if err != nil {
return errors.Wrapf(ErrSlotNotAfterFinalized, "error computing epoch start slot for finalized checkpoint (%d) %s", fcp.Epoch, err.Error())
return errors.Wrapf(blobErrBuilder(ErrSlotNotAfterFinalized), "error computing epoch start slot for finalized checkpoint (%d) %s", fcp.Epoch, err.Error())
}
if bv.blob.Slot() <= fSlot {
log.WithFields(logging.BlobFields(bv.blob)).Debug("sidecar slot is not after finalized checkpoint")
return ErrSlotNotAfterFinalized
return blobErrBuilder(ErrSlotNotAfterFinalized)
}
return nil
}
@@ -203,7 +183,7 @@ func (bv *ROBlobVerifier) ValidProposerSignature(ctx context.Context) (err error
if err != nil {
log.WithFields(logging.BlobFields(bv.blob)).WithError(err).Debug("reusing failed proposer signature validation from cache")
blobVerificationProposerSignatureCache.WithLabelValues("hit-invalid").Inc()
return ErrInvalidProposerSignature
return blobErrBuilder(ErrInvalidProposerSignature)
}
return nil
}
@@ -213,12 +193,12 @@ func (bv *ROBlobVerifier) ValidProposerSignature(ctx context.Context) (err error
parent, err := bv.parentState(ctx)
if err != nil {
log.WithFields(logging.BlobFields(bv.blob)).WithError(err).Debug("could not replay parent state for blob signature verification")
return ErrInvalidProposerSignature
return blobErrBuilder(ErrInvalidProposerSignature)
}
// Full verification, which will subsequently be cached for anything sharing the signature cache.
if err = bv.sc.VerifySignature(sd, parent); err != nil {
log.WithFields(logging.BlobFields(bv.blob)).WithError(err).Debug("signature verification failed")
return ErrInvalidProposerSignature
return blobErrBuilder(ErrInvalidProposerSignature)
}
return nil
}
@@ -235,7 +215,7 @@ func (bv *ROBlobVerifier) SidecarParentSeen(parentSeen func([32]byte) bool) (err
return nil
}
log.WithFields(logging.BlobFields(bv.blob)).Debug("parent root has not been seen")
return ErrSidecarParentNotSeen
return blobErrBuilder(ErrSidecarParentNotSeen)
}
// SidecarParentValid represents the spec verification:
@@ -244,7 +224,7 @@ func (bv *ROBlobVerifier) SidecarParentValid(badParent func([32]byte) bool) (err
defer bv.recordResult(RequireSidecarParentValid, &err)
if badParent != nil && badParent(bv.blob.ParentRoot()) {
log.WithFields(logging.BlobFields(bv.blob)).Debug("parent root is invalid")
return ErrSidecarParentInvalid
return blobErrBuilder(ErrSidecarParentInvalid)
}
return nil
}
@@ -255,10 +235,10 @@ func (bv *ROBlobVerifier) SidecarParentSlotLower() (err error) {
defer bv.recordResult(RequireSidecarParentSlotLower, &err)
parentSlot, err := bv.fc.Slot(bv.blob.ParentRoot())
if err != nil {
return errors.Wrap(ErrSlotNotAfterParent, "parent root not in forkchoice")
return errors.Wrap(blobErrBuilder(ErrSlotNotAfterParent), "parent root not in forkchoice")
}
if parentSlot >= bv.blob.Slot() {
return ErrSlotNotAfterParent
return blobErrBuilder(ErrSlotNotAfterParent)
}
return nil
}
@@ -270,7 +250,7 @@ func (bv *ROBlobVerifier) SidecarDescendsFromFinalized() (err error) {
defer bv.recordResult(RequireSidecarDescendsFromFinalized, &err)
if !bv.fc.HasNode(bv.blob.ParentRoot()) {
log.WithFields(logging.BlobFields(bv.blob)).Debug("parent root not in forkchoice")
return ErrSidecarNotFinalizedDescendent
return blobErrBuilder(ErrSidecarNotFinalizedDescendent)
}
return nil
}
@@ -281,7 +261,7 @@ func (bv *ROBlobVerifier) SidecarInclusionProven() (err error) {
defer bv.recordResult(RequireSidecarInclusionProven, &err)
if err = blocks.VerifyKZGInclusionProof(bv.blob); err != nil {
log.WithError(err).WithFields(logging.BlobFields(bv.blob)).Debug("sidecar inclusion proof verification failed")
return ErrSidecarInclusionProofInvalid
return blobErrBuilder(ErrSidecarInclusionProofInvalid)
}
return nil
}
@@ -293,7 +273,7 @@ func (bv *ROBlobVerifier) SidecarKzgProofVerified() (err error) {
defer bv.recordResult(RequireSidecarKzgProofVerified, &err)
if err = bv.verifyBlobCommitment(bv.blob); err != nil {
log.WithError(err).WithFields(logging.BlobFields(bv.blob)).Debug("kzg commitment proof verification failed")
return ErrSidecarKzgProofInvalid
return blobErrBuilder(ErrSidecarKzgProofInvalid)
}
return nil
}
@@ -311,7 +291,7 @@ func (bv *ROBlobVerifier) SidecarProposerExpected(ctx context.Context) (err erro
}
r, err := bv.fc.TargetRootForEpoch(bv.blob.ParentRoot(), e)
if err != nil {
return ErrSidecarUnexpectedProposer
return blobErrBuilder(ErrSidecarUnexpectedProposer)
}
c := &forkchoicetypes.Checkpoint{Root: r, Epoch: e}
idx, cached := bv.pc.Proposer(c, bv.blob.Slot())
@@ -319,19 +299,19 @@ func (bv *ROBlobVerifier) SidecarProposerExpected(ctx context.Context) (err erro
pst, err := bv.parentState(ctx)
if err != nil {
log.WithError(err).WithFields(logging.BlobFields(bv.blob)).Debug("state replay to parent_root failed")
return ErrSidecarUnexpectedProposer
return blobErrBuilder(ErrSidecarUnexpectedProposer)
}
idx, err = bv.pc.ComputeProposer(ctx, bv.blob.ParentRoot(), bv.blob.Slot(), pst)
if err != nil {
log.WithError(err).WithFields(logging.BlobFields(bv.blob)).Debug("error computing proposer index from parent state")
return ErrSidecarUnexpectedProposer
return blobErrBuilder(ErrSidecarUnexpectedProposer)
}
}
if idx != bv.blob.ProposerIndex() {
log.WithError(ErrSidecarUnexpectedProposer).
log.WithError(blobErrBuilder(ErrSidecarUnexpectedProposer)).
WithFields(logging.BlobFields(bv.blob)).WithField("expectedProposer", idx).
Debug("unexpected blob proposer")
return ErrSidecarUnexpectedProposer
return blobErrBuilder(ErrSidecarUnexpectedProposer)
}
return nil
}
@@ -357,3 +337,7 @@ func blobToSignatureData(b blocks.ROBlob) SignatureData {
Slot: b.Slot(),
}
}
func blobErrBuilder(baseErr error) error {
return goError.Join(ErrBlobInvalid, baseErr)
}

View File

@@ -27,13 +27,13 @@ func TestBlobIndexInBounds(t *testing.T) {
_, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 0, 1)
b := blobs[0]
// set Index to a value that is out of bounds
v := ini.NewBlobVerifier(b, GossipSidecarRequirements)
v := ini.NewBlobVerifier(b, GossipBlobSidecarRequirements)
require.NoError(t, v.BlobIndexInBounds())
require.Equal(t, true, v.results.executed(RequireBlobIndexInBounds))
require.NoError(t, v.results.result(RequireBlobIndexInBounds))
b.Index = fieldparams.MaxBlobsPerBlock
v = ini.NewBlobVerifier(b, GossipSidecarRequirements)
v = ini.NewBlobVerifier(b, GossipBlobSidecarRequirements)
require.ErrorIs(t, v.BlobIndexInBounds(), ErrBlobIndexInvalid)
require.Equal(t, true, v.results.executed(RequireBlobIndexInBounds))
require.NotNil(t, v.results.result(RequireBlobIndexInBounds))
@@ -52,7 +52,7 @@ func TestSlotNotTooEarly(t *testing.T) {
// This clock will give a current slot of 1 on the nose
happyClock := startup.NewClock(genesis, [32]byte{}, startup.WithNower(func() time.Time { return now }))
ini := Initializer{shared: &sharedResources{clock: happyClock}}
v := ini.NewBlobVerifier(b, GossipSidecarRequirements)
v := ini.NewBlobVerifier(b, GossipBlobSidecarRequirements)
require.NoError(t, v.NotFromFutureSlot())
require.Equal(t, true, v.results.executed(RequireNotFromFutureSlot))
require.NoError(t, v.results.result(RequireNotFromFutureSlot))
@@ -61,7 +61,7 @@ func TestSlotNotTooEarly(t *testing.T) {
// but still in the previous slot.
closeClock := startup.NewClock(genesis, [32]byte{}, startup.WithNower(func() time.Time { return now.Add(-1 * params.BeaconConfig().MaximumGossipClockDisparityDuration() / 2) }))
ini = Initializer{shared: &sharedResources{clock: closeClock}}
v = ini.NewBlobVerifier(b, GossipSidecarRequirements)
v = ini.NewBlobVerifier(b, GossipBlobSidecarRequirements)
require.NoError(t, v.NotFromFutureSlot())
// This clock will give a current slot of 0, with now coming more than max clock disparity before slot 1
@@ -69,7 +69,7 @@ func TestSlotNotTooEarly(t *testing.T) {
dispClock := startup.NewClock(genesis, [32]byte{}, startup.WithNower(func() time.Time { return disparate }))
// Set up initializer to use the clock that will set now to a little to far before slot 1
ini = Initializer{shared: &sharedResources{clock: dispClock}}
v = ini.NewBlobVerifier(b, GossipSidecarRequirements)
v = ini.NewBlobVerifier(b, GossipBlobSidecarRequirements)
require.ErrorIs(t, v.NotFromFutureSlot(), ErrFromFutureSlot)
require.Equal(t, true, v.results.executed(RequireNotFromFutureSlot))
require.NotNil(t, v.results.result(RequireNotFromFutureSlot))
@@ -114,7 +114,7 @@ func TestSlotAboveFinalized(t *testing.T) {
_, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 0, 1)
b := blobs[0]
b.SignedBlockHeader.Header.Slot = c.slot
v := ini.NewBlobVerifier(b, GossipSidecarRequirements)
v := ini.NewBlobVerifier(b, GossipBlobSidecarRequirements)
err := v.SlotAboveFinalized()
require.Equal(t, true, v.results.executed(RequireSlotAboveFinalized))
if c.err == nil {
@@ -146,7 +146,7 @@ func TestValidProposerSignature_Cached(t *testing.T) {
},
}
ini := Initializer{shared: &sharedResources{sc: sc, sr: &mockStateByRooter{sbr: sbrErrorIfCalled(t)}}}
v := ini.NewBlobVerifier(b, GossipSidecarRequirements)
v := ini.NewBlobVerifier(b, GossipBlobSidecarRequirements)
require.NoError(t, v.ValidProposerSignature(ctx))
require.Equal(t, true, v.results.executed(RequireValidProposerSignature))
require.NoError(t, v.results.result(RequireValidProposerSignature))
@@ -159,7 +159,7 @@ func TestValidProposerSignature_Cached(t *testing.T) {
return true, errors.New("derp")
}
ini = Initializer{shared: &sharedResources{sc: sc, sr: &mockStateByRooter{sbr: sbrErrorIfCalled(t)}}}
v = ini.NewBlobVerifier(b, GossipSidecarRequirements)
v = ini.NewBlobVerifier(b, GossipBlobSidecarRequirements)
require.ErrorIs(t, v.ValidProposerSignature(ctx), ErrInvalidProposerSignature)
require.Equal(t, true, v.results.executed(RequireValidProposerSignature))
require.NotNil(t, v.results.result(RequireValidProposerSignature))
@@ -182,14 +182,14 @@ func TestValidProposerSignature_CacheMiss(t *testing.T) {
},
}
ini := Initializer{shared: &sharedResources{sc: sc, sr: sbrForValOverride(b.ProposerIndex(), &ethpb.Validator{})}}
v := ini.NewBlobVerifier(b, GossipSidecarRequirements)
v := ini.NewBlobVerifier(b, GossipBlobSidecarRequirements)
require.NoError(t, v.ValidProposerSignature(ctx))
require.Equal(t, true, v.results.executed(RequireValidProposerSignature))
require.NoError(t, v.results.result(RequireValidProposerSignature))
// simulate state not found
ini = Initializer{shared: &sharedResources{sc: sc, sr: sbrNotFound(t, expectedSd.Parent)}}
v = ini.NewBlobVerifier(b, GossipSidecarRequirements)
v = ini.NewBlobVerifier(b, GossipBlobSidecarRequirements)
require.ErrorIs(t, v.ValidProposerSignature(ctx), ErrInvalidProposerSignature)
require.Equal(t, true, v.results.executed(RequireValidProposerSignature))
require.NotNil(t, v.results.result(RequireValidProposerSignature))
@@ -206,7 +206,7 @@ func TestValidProposerSignature_CacheMiss(t *testing.T) {
},
}
ini = Initializer{shared: &sharedResources{sc: sc, sr: sbr}}
v = ini.NewBlobVerifier(b, GossipSidecarRequirements)
v = ini.NewBlobVerifier(b, GossipBlobSidecarRequirements)
// make sure all the histories are clean before calling the method
// so we don't get polluted by previous usages
@@ -255,14 +255,14 @@ func TestSidecarParentSeen(t *testing.T) {
t.Run("happy path", func(t *testing.T) {
ini := Initializer{shared: &sharedResources{fc: fcHas}}
v := ini.NewBlobVerifier(b, GossipSidecarRequirements)
v := ini.NewBlobVerifier(b, GossipBlobSidecarRequirements)
require.NoError(t, v.SidecarParentSeen(nil))
require.Equal(t, true, v.results.executed(RequireSidecarParentSeen))
require.NoError(t, v.results.result(RequireSidecarParentSeen))
})
t.Run("HasNode false, no badParent cb, expected error", func(t *testing.T) {
ini := Initializer{shared: &sharedResources{fc: fcLacks}}
v := ini.NewBlobVerifier(b, GossipSidecarRequirements)
v := ini.NewBlobVerifier(b, GossipBlobSidecarRequirements)
require.ErrorIs(t, v.SidecarParentSeen(nil), ErrSidecarParentNotSeen)
require.Equal(t, true, v.results.executed(RequireSidecarParentSeen))
require.NotNil(t, v.results.result(RequireSidecarParentSeen))
@@ -270,14 +270,14 @@ func TestSidecarParentSeen(t *testing.T) {
t.Run("HasNode false, badParent true", func(t *testing.T) {
ini := Initializer{shared: &sharedResources{fc: fcLacks}}
v := ini.NewBlobVerifier(b, GossipSidecarRequirements)
v := ini.NewBlobVerifier(b, GossipBlobSidecarRequirements)
require.NoError(t, v.SidecarParentSeen(badParentCb(t, b.ParentRoot(), true)))
require.Equal(t, true, v.results.executed(RequireSidecarParentSeen))
require.NoError(t, v.results.result(RequireSidecarParentSeen))
})
t.Run("HasNode false, badParent false", func(t *testing.T) {
ini := Initializer{shared: &sharedResources{fc: fcLacks}}
v := ini.NewBlobVerifier(b, GossipSidecarRequirements)
v := ini.NewBlobVerifier(b, GossipBlobSidecarRequirements)
require.ErrorIs(t, v.SidecarParentSeen(badParentCb(t, b.ParentRoot(), false)), ErrSidecarParentNotSeen)
require.Equal(t, true, v.results.executed(RequireSidecarParentSeen))
require.NotNil(t, v.results.result(RequireSidecarParentSeen))
@@ -289,14 +289,14 @@ func TestSidecarParentValid(t *testing.T) {
b := blobs[0]
t.Run("parent valid", func(t *testing.T) {
ini := Initializer{shared: &sharedResources{}}
v := ini.NewBlobVerifier(b, GossipSidecarRequirements)
v := ini.NewBlobVerifier(b, GossipBlobSidecarRequirements)
require.NoError(t, v.SidecarParentValid(badParentCb(t, b.ParentRoot(), false)))
require.Equal(t, true, v.results.executed(RequireSidecarParentValid))
require.NoError(t, v.results.result(RequireSidecarParentValid))
})
t.Run("parent not valid", func(t *testing.T) {
ini := Initializer{shared: &sharedResources{}}
v := ini.NewBlobVerifier(b, GossipSidecarRequirements)
v := ini.NewBlobVerifier(b, GossipBlobSidecarRequirements)
require.ErrorIs(t, v.SidecarParentValid(badParentCb(t, b.ParentRoot(), true)), ErrSidecarParentInvalid)
require.Equal(t, true, v.results.executed(RequireSidecarParentValid))
require.NotNil(t, v.results.result(RequireSidecarParentValid))
@@ -340,7 +340,7 @@ func TestSidecarParentSlotLower(t *testing.T) {
}
return c.fcSlot, c.fcErr
}}}}
v := ini.NewBlobVerifier(b, GossipSidecarRequirements)
v := ini.NewBlobVerifier(b, GossipBlobSidecarRequirements)
err := v.SidecarParentSlotLower()
require.Equal(t, true, v.results.executed(RequireSidecarParentSlotLower))
if c.err == nil {
@@ -364,7 +364,7 @@ func TestSidecarDescendsFromFinalized(t *testing.T) {
}
return false
}}}}
v := ini.NewBlobVerifier(b, GossipSidecarRequirements)
v := ini.NewBlobVerifier(b, GossipBlobSidecarRequirements)
require.ErrorIs(t, v.SidecarDescendsFromFinalized(), ErrSidecarNotFinalizedDescendent)
require.Equal(t, true, v.results.executed(RequireSidecarDescendsFromFinalized))
require.NotNil(t, v.results.result(RequireSidecarDescendsFromFinalized))
@@ -376,7 +376,7 @@ func TestSidecarDescendsFromFinalized(t *testing.T) {
}
return true
}}}}
v := ini.NewBlobVerifier(b, GossipSidecarRequirements)
v := ini.NewBlobVerifier(b, GossipBlobSidecarRequirements)
require.NoError(t, v.SidecarDescendsFromFinalized())
require.Equal(t, true, v.results.executed(RequireSidecarDescendsFromFinalized))
require.NoError(t, v.results.result(RequireSidecarDescendsFromFinalized))
@@ -389,7 +389,7 @@ func TestSidecarInclusionProven(t *testing.T) {
b := blobs[0]
ini := Initializer{}
v := ini.NewBlobVerifier(b, GossipSidecarRequirements)
v := ini.NewBlobVerifier(b, GossipBlobSidecarRequirements)
require.NoError(t, v.SidecarInclusionProven())
require.Equal(t, true, v.results.executed(RequireSidecarInclusionProven))
require.NoError(t, v.results.result(RequireSidecarInclusionProven))
@@ -397,7 +397,7 @@ func TestSidecarInclusionProven(t *testing.T) {
// Invert bits of the first byte of the body root to mess up the proof
byte0 := b.SignedBlockHeader.Header.BodyRoot[0]
b.SignedBlockHeader.Header.BodyRoot[0] = byte0 ^ 255
v = ini.NewBlobVerifier(b, GossipSidecarRequirements)
v = ini.NewBlobVerifier(b, GossipBlobSidecarRequirements)
require.ErrorIs(t, v.SidecarInclusionProven(), ErrSidecarInclusionProofInvalid)
require.Equal(t, true, v.results.executed(RequireSidecarInclusionProven))
require.NotNil(t, v.results.result(RequireSidecarInclusionProven))
@@ -409,7 +409,7 @@ func TestSidecarInclusionProvenElectra(t *testing.T) {
b := blobs[0]
ini := Initializer{}
v := ini.NewBlobVerifier(b, GossipSidecarRequirements)
v := ini.NewBlobVerifier(b, GossipBlobSidecarRequirements)
require.NoError(t, v.SidecarInclusionProven())
require.Equal(t, true, v.results.executed(RequireSidecarInclusionProven))
require.NoError(t, v.results.result(RequireSidecarInclusionProven))
@@ -417,7 +417,7 @@ func TestSidecarInclusionProvenElectra(t *testing.T) {
// Invert bits of the first byte of the body root to mess up the proof
byte0 := b.SignedBlockHeader.Header.BodyRoot[0]
b.SignedBlockHeader.Header.BodyRoot[0] = byte0 ^ 255
v = ini.NewBlobVerifier(b, GossipSidecarRequirements)
v = ini.NewBlobVerifier(b, GossipBlobSidecarRequirements)
require.ErrorIs(t, v.SidecarInclusionProven(), ErrSidecarInclusionProofInvalid)
require.Equal(t, true, v.results.executed(RequireSidecarInclusionProven))
require.NotNil(t, v.results.result(RequireSidecarInclusionProven))
@@ -452,21 +452,21 @@ func TestSidecarProposerExpected(t *testing.T) {
b := blobs[0]
t.Run("cached, matches", func(t *testing.T) {
ini := Initializer{shared: &sharedResources{pc: &mockProposerCache{ProposerCB: pcReturnsIdx(b.ProposerIndex())}, fc: &mockForkchoicer{TargetRootForEpochCB: fcReturnsTargetRoot([32]byte{})}}}
v := ini.NewBlobVerifier(b, GossipSidecarRequirements)
v := ini.NewBlobVerifier(b, GossipBlobSidecarRequirements)
require.NoError(t, v.SidecarProposerExpected(ctx))
require.Equal(t, true, v.results.executed(RequireSidecarProposerExpected))
require.NoError(t, v.results.result(RequireSidecarProposerExpected))
})
t.Run("cached, does not match", func(t *testing.T) {
ini := Initializer{shared: &sharedResources{pc: &mockProposerCache{ProposerCB: pcReturnsIdx(b.ProposerIndex() + 1)}, fc: &mockForkchoicer{TargetRootForEpochCB: fcReturnsTargetRoot([32]byte{})}}}
v := ini.NewBlobVerifier(b, GossipSidecarRequirements)
v := ini.NewBlobVerifier(b, GossipBlobSidecarRequirements)
require.ErrorIs(t, v.SidecarProposerExpected(ctx), ErrSidecarUnexpectedProposer)
require.Equal(t, true, v.results.executed(RequireSidecarProposerExpected))
require.NotNil(t, v.results.result(RequireSidecarProposerExpected))
})
t.Run("not cached, state lookup failure", func(t *testing.T) {
ini := Initializer{shared: &sharedResources{sr: sbrNotFound(t, b.ParentRoot()), pc: &mockProposerCache{ProposerCB: pcReturnsNotFound()}, fc: &mockForkchoicer{TargetRootForEpochCB: fcReturnsTargetRoot([32]byte{})}}}
v := ini.NewBlobVerifier(b, GossipSidecarRequirements)
v := ini.NewBlobVerifier(b, GossipBlobSidecarRequirements)
require.ErrorIs(t, v.SidecarProposerExpected(ctx), ErrSidecarUnexpectedProposer)
require.Equal(t, true, v.results.executed(RequireSidecarProposerExpected))
require.NotNil(t, v.results.result(RequireSidecarProposerExpected))
@@ -475,14 +475,14 @@ func TestSidecarProposerExpected(t *testing.T) {
t.Run("not cached, proposer matches", func(t *testing.T) {
pc := &mockProposerCache{
ProposerCB: pcReturnsNotFound(),
ComputeProposerCB: func(ctx context.Context, root [32]byte, slot primitives.Slot, pst state.BeaconState) (primitives.ValidatorIndex, error) {
ComputeProposerCB: func(_ context.Context, root [32]byte, slot primitives.Slot, _ state.BeaconState) (primitives.ValidatorIndex, error) {
require.Equal(t, b.ParentRoot(), root)
require.Equal(t, b.Slot(), slot)
return b.ProposerIndex(), nil
},
}
ini := Initializer{shared: &sharedResources{sr: sbrForValOverride(b.ProposerIndex(), &ethpb.Validator{}), pc: pc, fc: &mockForkchoicer{TargetRootForEpochCB: fcReturnsTargetRoot([32]byte{})}}}
v := ini.NewBlobVerifier(b, GossipSidecarRequirements)
v := ini.NewBlobVerifier(b, GossipBlobSidecarRequirements)
require.NoError(t, v.SidecarProposerExpected(ctx))
require.Equal(t, true, v.results.executed(RequireSidecarProposerExpected))
require.NoError(t, v.results.result(RequireSidecarProposerExpected))
@@ -490,14 +490,14 @@ func TestSidecarProposerExpected(t *testing.T) {
t.Run("not cached, proposer does not match", func(t *testing.T) {
pc := &mockProposerCache{
ProposerCB: pcReturnsNotFound(),
ComputeProposerCB: func(ctx context.Context, root [32]byte, slot primitives.Slot, pst state.BeaconState) (primitives.ValidatorIndex, error) {
ComputeProposerCB: func(_ context.Context, root [32]byte, slot primitives.Slot, _ state.BeaconState) (primitives.ValidatorIndex, error) {
require.Equal(t, b.ParentRoot(), root)
require.Equal(t, b.Slot(), slot)
return b.ProposerIndex() + 1, nil
},
}
ini := Initializer{shared: &sharedResources{sr: sbrForValOverride(b.ProposerIndex(), &ethpb.Validator{}), pc: pc, fc: &mockForkchoicer{TargetRootForEpochCB: fcReturnsTargetRoot([32]byte{})}}}
v := ini.NewBlobVerifier(b, GossipSidecarRequirements)
v := ini.NewBlobVerifier(b, GossipBlobSidecarRequirements)
require.ErrorIs(t, v.SidecarProposerExpected(ctx), ErrSidecarUnexpectedProposer)
require.Equal(t, true, v.results.executed(RequireSidecarProposerExpected))
require.NotNil(t, v.results.result(RequireSidecarProposerExpected))
@@ -505,14 +505,14 @@ func TestSidecarProposerExpected(t *testing.T) {
t.Run("not cached, ComputeProposer fails", func(t *testing.T) {
pc := &mockProposerCache{
ProposerCB: pcReturnsNotFound(),
ComputeProposerCB: func(ctx context.Context, root [32]byte, slot primitives.Slot, pst state.BeaconState) (primitives.ValidatorIndex, error) {
ComputeProposerCB: func(_ context.Context, root [32]byte, slot primitives.Slot, _ state.BeaconState) (primitives.ValidatorIndex, error) {
require.Equal(t, b.ParentRoot(), root)
require.Equal(t, b.Slot(), slot)
return 0, errors.New("ComputeProposer failed")
},
}
ini := Initializer{shared: &sharedResources{sr: sbrForValOverride(b.ProposerIndex(), &ethpb.Validator{}), pc: pc, fc: &mockForkchoicer{TargetRootForEpochCB: fcReturnsTargetRoot([32]byte{})}}}
v := ini.NewBlobVerifier(b, GossipSidecarRequirements)
v := ini.NewBlobVerifier(b, GossipBlobSidecarRequirements)
require.ErrorIs(t, v.SidecarProposerExpected(ctx), ErrSidecarUnexpectedProposer)
require.Equal(t, true, v.results.executed(RequireSidecarProposerExpected))
require.NotNil(t, v.results.result(RequireSidecarProposerExpected))
@@ -523,7 +523,7 @@ func TestRequirementSatisfaction(t *testing.T) {
_, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 1)
b := blobs[0]
ini := Initializer{}
v := ini.NewBlobVerifier(b, GossipSidecarRequirements)
v := ini.NewBlobVerifier(b, GossipBlobSidecarRequirements)
_, err := v.VerifiedROBlob()
require.ErrorIs(t, err, ErrBlobInvalid)
@@ -537,7 +537,7 @@ func TestRequirementSatisfaction(t *testing.T) {
}
// satisfy everything through the backdoor and ensure we get the verified ro blob at the end
for _, r := range GossipSidecarRequirements {
for _, r := range GossipBlobSidecarRequirements {
v.results.record(r, nil)
}
require.Equal(t, true, v.results.allSatisfied())

View File

@@ -17,7 +17,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/network/forks"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/time/slots"
log "github.com/sirupsen/logrus"
"github.com/sirupsen/logrus"
)
const (
@@ -50,8 +50,8 @@ type SignatureData struct {
Slot primitives.Slot
}
func (d SignatureData) logFields() log.Fields {
return log.Fields{
func (d SignatureData) logFields() logrus.Fields {
return logrus.Fields{
"root": fmt.Sprintf("%#x", d.Root),
"parentRoot": fmt.Sprintf("%#x", d.Parent),
"signature": fmt.Sprintf("%#x", d.Signature),

View File

@@ -2,8 +2,40 @@ package verification
import "github.com/pkg/errors"
// ErrMissingVerification indicates that the given verification function was never performed on the value.
var ErrMissingVerification = errors.New("verification was not performed for requirement")
var (
// ErrFromFutureSlot means RequireSlotNotTooEarly failed.
ErrFromFutureSlot = errors.New("slot is too far in the future")
// ErrSlotNotAfterFinalized means RequireSlotAboveFinalized failed.
ErrSlotNotAfterFinalized = errors.New("slot <= finalized checkpoint")
// ErrInvalidProposerSignature means RequireValidProposerSignature failed.
ErrInvalidProposerSignature = errors.New("proposer signature could not be verified")
// ErrSidecarParentNotSeen means RequireSidecarParentSeen failed.
ErrSidecarParentNotSeen = errors.New("parent root has not been seen")
// ErrSidecarParentInvalid means RequireSidecarParentValid failed.
ErrSidecarParentInvalid = errors.New("parent block is not valid")
// ErrSlotNotAfterParent means RequireSidecarParentSlotLower failed.
ErrSlotNotAfterParent = errors.New("slot <= slot")
// ErrSidecarNotFinalizedDescendent means RequireSidecarDescendsFromFinalized failed.
ErrSidecarNotFinalizedDescendent = errors.New("parent is not descended from the finalized block")
// ErrSidecarInclusionProofInvalid means RequireSidecarInclusionProven failed.
ErrSidecarInclusionProofInvalid = errors.New("sidecar inclusion proof verification failed")
// ErrSidecarKzgProofInvalid means RequireSidecarKzgProofVerified failed.
ErrSidecarKzgProofInvalid = errors.New("sidecar kzg commitment proof verification failed")
// ErrSidecarUnexpectedProposer means RequireSidecarProposerExpected failed.
ErrSidecarUnexpectedProposer = errors.New("sidecar was not proposed by the expected proposer_index")
// ErrMissingVerification indicates that the given verification function was never performed on the value.
ErrMissingVerification = errors.New("verification was not performed for requirement")
)
// VerificationMultiError is a custom error that can be used to access individual verification failures.
type VerificationMultiError struct {

View File

@@ -0,0 +1,5 @@
package verification
import "github.com/sirupsen/logrus"
var log = logrus.WithField("prefix", "verification")

View File

@@ -39,7 +39,7 @@ func TestResultList(t *testing.T) {
func TestExportedBlobSanityCheck(t *testing.T) {
// make sure all requirement lists contain the bare minimum checks
sanity := []Requirement{RequireValidProposerSignature, RequireSidecarKzgProofVerified, RequireBlobIndexInBounds, RequireSidecarInclusionProven}
reqs := [][]Requirement{GossipSidecarRequirements, SpectestSidecarRequirements, InitsyncSidecarRequirements, BackfillSidecarRequirements, PendingQueueSidecarRequirements}
reqs := [][]Requirement{GossipBlobSidecarRequirements, SpectestBlobSidecarRequirements, InitsyncBlobSidecarRequirements, BackfillBlobSidecarRequirements, PendingQueueBlobSidecarRequirements}
for i := range reqs {
r := reqs[i]
reqMap := make(map[Requirement]struct{})
@@ -51,13 +51,13 @@ func TestExportedBlobSanityCheck(t *testing.T) {
require.Equal(t, true, ok)
}
}
require.DeepEqual(t, allSidecarRequirements, GossipSidecarRequirements)
require.DeepEqual(t, allBlobSidecarRequirements, GossipBlobSidecarRequirements)
}
func TestAllBlobRequirementsHaveStrings(t *testing.T) {
var derp Requirement = math.MaxInt
require.Equal(t, unknownRequirementName, derp.String())
for i := range allSidecarRequirements {
require.NotEqual(t, unknownRequirementName, allSidecarRequirements[i].String())
for i := range allBlobSidecarRequirements {
require.NotEqual(t, unknownRequirementName, allBlobSidecarRequirements[i].String())
}
}

View File

@@ -166,6 +166,7 @@ type BeaconChainConfig struct {
DenebForkEpoch primitives.Epoch `yaml:"DENEB_FORK_EPOCH" spec:"true"` // DenebForkEpoch is used to represent the assigned fork epoch for deneb.
ElectraForkVersion []byte `yaml:"ELECTRA_FORK_VERSION" spec:"true"` // ElectraForkVersion is used to represent the fork version for electra.
ElectraForkEpoch primitives.Epoch `yaml:"ELECTRA_FORK_EPOCH" spec:"true"` // ElectraForkEpoch is used to represent the assigned fork epoch for electra.
Eip7594ForkEpoch primitives.Epoch `yaml:"EIP7594_FORK_EPOCH" spec:"true"` // EIP7594ForkEpoch is used to represent the assigned fork epoch for peer das.
ForkVersionSchedule map[[fieldparams.VersionLength]byte]primitives.Epoch // Schedule of fork epochs by version.
ForkVersionNames map[[fieldparams.VersionLength]byte]string // Human-readable names of fork versions.
@@ -255,6 +256,13 @@ type BeaconChainConfig struct {
MaxDepositRequestsPerPayload uint64 `yaml:"MAX_DEPOSIT_REQUESTS_PER_PAYLOAD" spec:"true"` // MaxDepositRequestsPerPayload is the maximum number of execution layer deposits in each payload
UnsetDepositRequestsStartIndex uint64 `yaml:"UNSET_DEPOSIT_REQUESTS_START_INDEX" spec:"true"` // UnsetDepositRequestsStartIndex is used to check the start index for eip6110
// PeerDAS Values
SamplesPerSlot uint64 `yaml:"SAMPLES_PER_SLOT"` // SamplesPerSlot refers to the number of random samples a node queries per slot.
CustodyRequirement uint64 `yaml:"CUSTODY_REQUIREMENT"` // CustodyRequirement refers to the minimum amount of subnets a peer must custody and serve samples from.
MinEpochsForDataColumnSidecarsRequest primitives.Epoch `yaml:"MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS"` // MinEpochsForDataColumnSidecarsRequest is the minimum number of epochs the node will keep the data columns for.
MaxCellsInExtendedMatrix uint64 `yaml:"MAX_CELLS_IN_EXTENDED_MATRIX" spec:"true"` // MaxCellsInExtendedMatrix is the full data of one-dimensional erasure coding extended blobs (in row major format).
NumberOfColumns uint64 `yaml:"NUMBER_OF_COLUMNS" spec:"true"` // NumberOfColumns in the extended data matrix.
// Networking Specific Parameters
GossipMaxSize uint64 `yaml:"GOSSIP_MAX_SIZE" spec:"true"` // GossipMaxSize is the maximum allowed size of uncompressed gossip messages.
MaxChunkSize uint64 `yaml:"MAX_CHUNK_SIZE" spec:"true"` // MaxChunkSize is the maximum allowed size of uncompressed req/resp chunked responses.
@@ -272,10 +280,6 @@ type BeaconChainConfig struct {
AttestationSubnetPrefixBits uint64 `yaml:"ATTESTATION_SUBNET_PREFIX_BITS" spec:"true"` // AttestationSubnetPrefixBits is defined as (ceillog2(ATTESTATION_SUBNET_COUNT) + ATTESTATION_SUBNET_EXTRA_BITS).
SubnetsPerNode uint64 `yaml:"SUBNETS_PER_NODE" spec:"true"` // SubnetsPerNode is the number of long-lived subnets a beacon node should be subscribed to.
NodeIdBits uint64 `yaml:"NODE_ID_BITS" spec:"true"` // NodeIdBits defines the bit length of a node id.
// PeerDAS
NumberOfColumns uint64 `yaml:"NUMBER_OF_COLUMNS" spec:"true"` // NumberOfColumns in the extended data matrix.
MaxCellsInExtendedMatrix uint64 `yaml:"MAX_CELLS_IN_EXTENDED_MATRIX" spec:"true"` // MaxCellsInExtendedMatrix is the full data of one-dimensional erasure coding extended blobs (in row major format).
}
// InitializeForkSchedule initializes the schedules forks baked into the config.
@@ -360,6 +364,12 @@ func DenebEnabled() bool {
return BeaconConfig().DenebForkEpoch < math.MaxUint64
}
// PeerDASEnabled centralizes the check to determine if code paths
// that are specific to peerdas should be allowed to execute.
func PeerDASEnabled() bool {
return BeaconConfig().Eip7594ForkEpoch < math.MaxUint64
}
// WithinDAPeriod checks if the block epoch is within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS of the given current epoch.
func WithinDAPeriod(block, current primitives.Epoch) bool {
return block+BeaconConfig().MinEpochsForBlobsSidecarsRequest >= current

View File

@@ -25,12 +25,10 @@ import (
// IMPORTANT: Use one field per line and sort these alphabetically to reduce conflicts.
var placeholderFields = []string{
"BYTES_PER_LOGS_BLOOM", // Compile time constant on ExecutionPayload.logs_bloom.
"CUSTODY_REQUIREMENT",
"EIP6110_FORK_EPOCH",
"EIP6110_FORK_VERSION",
"EIP7002_FORK_EPOCH",
"EIP7002_FORK_VERSION",
"EIP7594_FORK_EPOCH",
"EIP7594_FORK_VERSION",
"EIP7732_FORK_EPOCH",
"EIP7732_FORK_VERSION",
@@ -43,7 +41,6 @@ var placeholderFields = []string{
"MAX_REQUEST_PAYLOADS", // Compile time constant on BeaconBlockBody.ExecutionRequests
"MAX_TRANSACTIONS_PER_PAYLOAD", // Compile time constant on ExecutionPayload.transactions.
"REORG_HEAD_WEIGHT_THRESHOLD",
"SAMPLES_PER_SLOT",
"TARGET_NUMBER_OF_PEERS",
"UPDATE_TIMEOUT",
"WHISK_EPOCHS_PER_SHUFFLING_PHASE",

View File

@@ -216,6 +216,7 @@ var mainnetBeaconConfig = &BeaconChainConfig{
DenebForkEpoch: mainnetDenebForkEpoch,
ElectraForkVersion: []byte{5, 0, 0, 0},
ElectraForkEpoch: mainnetElectraForkEpoch,
Eip7594ForkEpoch: math.MaxUint64,
// New values introduced in Altair hard fork 1.
// Participation flag indices.
@@ -295,8 +296,11 @@ var mainnetBeaconConfig = &BeaconChainConfig{
UnsetDepositRequestsStartIndex: math.MaxUint64,
// PeerDAS
NumberOfColumns: 128,
MaxCellsInExtendedMatrix: 768,
NumberOfColumns: 128,
MaxCellsInExtendedMatrix: 768,
SamplesPerSlot: 8,
CustodyRequirement: 4,
MinEpochsForDataColumnSidecarsRequest: 4096,
// Values related to networking parameters.
GossipMaxSize: 10 * 1 << 20, // 10 MiB

View File

@@ -174,6 +174,11 @@ ssz_electra_objs = [
"SignedConsolidation",
]
ssz_fulu_objs = [
"DataColumnIdentifier",
"DataColumnSidecar",
]
ssz_gen_marshal(
name = "ssz_generated_phase0",
out = "phase0.ssz.go",
@@ -251,6 +256,19 @@ ssz_gen_marshal(
objs = ssz_electra_objs,
)
ssz_gen_marshal(
name = "ssz_generated_fulu",
out = "fulu.ssz.go",
exclude_objs = ssz_phase0_objs + ssz_altair_objs + ssz_bellatrix_objs + ssz_capella_objs + ssz_deneb_objs + ssz_electra_objs,
go_proto = ":go_proto",
includes = [
"//consensus-types/primitives:go_default_library",
"//math:go_default_library",
"//proto/engine/v1:go_default_library",
],
objs = ssz_fulu_objs,
)
ssz_gen_marshal(
name = "ssz_generated_non_core",
out = "non-core.ssz.go",
@@ -263,8 +281,10 @@ ssz_gen_marshal(
objs = [
"BeaconBlocksByRangeRequest",
"BlobSidecarsByRangeRequest",
"DataColumnSidecarsByRangeRequest",
"MetaDataV0",
"MetaDataV1",
"MetaDataV2",
"SignedValidatorRegistrationV1",
"ValidatorRegistrationV1",
"BuilderBid",
@@ -313,6 +333,7 @@ go_library(
":ssz_generated_capella", # keep
":ssz_generated_deneb", # keep
":ssz_generated_electra", # keep
":ssz_generated_fulu", # keep
":ssz_generated_non_core", # keep
":ssz_generated_phase0", # keep
],
@@ -354,6 +375,7 @@ ssz_proto_files(
"beacon_state.proto",
"blobs.proto",
"light_client.proto",
"data_columns.proto",
"sync_committee.proto",
"withdrawals.proto",
],

299
proto/prysm/v1alpha1/data_columns.pb.go generated Executable file
View File

@@ -0,0 +1,299 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.31.0
// protoc v4.25.1
// source: proto/prysm/v1alpha1/data_columns.proto
package eth
import (
reflect "reflect"
sync "sync"
_ "github.com/prysmaticlabs/prysm/v5/proto/eth/ext"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type DataColumnSidecar struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
ColumnIndex uint64 `protobuf:"varint,1,opt,name=column_index,json=columnIndex,proto3" json:"column_index,omitempty"`
DataColumn [][]byte `protobuf:"bytes,2,rep,name=data_column,json=dataColumn,proto3" json:"data_column,omitempty" ssz-max:"4096" ssz-size:"?,2048"`
KzgCommitments [][]byte `protobuf:"bytes,3,rep,name=kzg_commitments,json=kzgCommitments,proto3" json:"kzg_commitments,omitempty" ssz-max:"4096" ssz-size:"?,48"`
KzgProof [][]byte `protobuf:"bytes,4,rep,name=kzg_proof,json=kzgProof,proto3" json:"kzg_proof,omitempty" ssz-max:"4096" ssz-size:"?,48"`
SignedBlockHeader *SignedBeaconBlockHeader `protobuf:"bytes,5,opt,name=signed_block_header,json=signedBlockHeader,proto3" json:"signed_block_header,omitempty"`
KzgCommitmentsInclusionProof [][]byte `protobuf:"bytes,6,rep,name=kzg_commitments_inclusion_proof,json=kzgCommitmentsInclusionProof,proto3" json:"kzg_commitments_inclusion_proof,omitempty" ssz-size:"4,32"`
}
func (x *DataColumnSidecar) Reset() {
*x = DataColumnSidecar{}
if protoimpl.UnsafeEnabled {
mi := &file_proto_prysm_v1alpha1_data_columns_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *DataColumnSidecar) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DataColumnSidecar) ProtoMessage() {}
func (x *DataColumnSidecar) ProtoReflect() protoreflect.Message {
mi := &file_proto_prysm_v1alpha1_data_columns_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DataColumnSidecar.ProtoReflect.Descriptor instead.
func (*DataColumnSidecar) Descriptor() ([]byte, []int) {
return file_proto_prysm_v1alpha1_data_columns_proto_rawDescGZIP(), []int{0}
}
func (x *DataColumnSidecar) GetColumnIndex() uint64 {
if x != nil {
return x.ColumnIndex
}
return 0
}
func (x *DataColumnSidecar) GetDataColumn() [][]byte {
if x != nil {
return x.DataColumn
}
return nil
}
func (x *DataColumnSidecar) GetKzgCommitments() [][]byte {
if x != nil {
return x.KzgCommitments
}
return nil
}
func (x *DataColumnSidecar) GetKzgProof() [][]byte {
if x != nil {
return x.KzgProof
}
return nil
}
func (x *DataColumnSidecar) GetSignedBlockHeader() *SignedBeaconBlockHeader {
if x != nil {
return x.SignedBlockHeader
}
return nil
}
func (x *DataColumnSidecar) GetKzgCommitmentsInclusionProof() [][]byte {
if x != nil {
return x.KzgCommitmentsInclusionProof
}
return nil
}
type DataColumnIdentifier struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
BlockRoot []byte `protobuf:"bytes,1,opt,name=block_root,json=blockRoot,proto3" json:"block_root,omitempty" ssz-size:"32"`
ColumnIndex uint64 `protobuf:"varint,2,opt,name=column_index,json=columnIndex,proto3" json:"column_index,omitempty"`
}
func (x *DataColumnIdentifier) Reset() {
*x = DataColumnIdentifier{}
if protoimpl.UnsafeEnabled {
mi := &file_proto_prysm_v1alpha1_data_columns_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *DataColumnIdentifier) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DataColumnIdentifier) ProtoMessage() {}
func (x *DataColumnIdentifier) ProtoReflect() protoreflect.Message {
mi := &file_proto_prysm_v1alpha1_data_columns_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DataColumnIdentifier.ProtoReflect.Descriptor instead.
func (*DataColumnIdentifier) Descriptor() ([]byte, []int) {
return file_proto_prysm_v1alpha1_data_columns_proto_rawDescGZIP(), []int{1}
}
func (x *DataColumnIdentifier) GetBlockRoot() []byte {
if x != nil {
return x.BlockRoot
}
return nil
}
func (x *DataColumnIdentifier) GetColumnIndex() uint64 {
if x != nil {
return x.ColumnIndex
}
return 0
}
var File_proto_prysm_v1alpha1_data_columns_proto protoreflect.FileDescriptor
var file_proto_prysm_v1alpha1_data_columns_proto_rawDesc = []byte{
0x0a, 0x27, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x31,
0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x6f, 0x6c, 0x75,
0x6d, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x65, 0x74, 0x68, 0x65, 0x72,
0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31,
0x1a, 0x1b, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x65, 0x74, 0x68, 0x2f, 0x65, 0x78, 0x74, 0x2f,
0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70,
0x68, 0x61, 0x31, 0x2f, 0x62, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x86, 0x03, 0x0a, 0x11, 0x44, 0x61, 0x74, 0x61, 0x43,
0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x53, 0x69, 0x64, 0x65, 0x63, 0x61, 0x72, 0x12, 0x21, 0x0a, 0x0c,
0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x01, 0x20, 0x01,
0x28, 0x04, 0x52, 0x0b, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12,
0x33, 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x18, 0x02,
0x20, 0x03, 0x28, 0x0c, 0x42, 0x12, 0x8a, 0xb5, 0x18, 0x06, 0x3f, 0x2c, 0x32, 0x30, 0x34, 0x38,
0x92, 0xb5, 0x18, 0x04, 0x34, 0x30, 0x39, 0x36, 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x43, 0x6f,
0x6c, 0x75, 0x6d, 0x6e, 0x12, 0x39, 0x0a, 0x0f, 0x6b, 0x7a, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d,
0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c, 0x42, 0x10, 0x8a,
0xb5, 0x18, 0x04, 0x3f, 0x2c, 0x34, 0x38, 0x92, 0xb5, 0x18, 0x04, 0x34, 0x30, 0x39, 0x36, 0x52,
0x0e, 0x6b, 0x7a, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12,
0x2d, 0x0a, 0x09, 0x6b, 0x7a, 0x67, 0x5f, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18, 0x04, 0x20, 0x03,
0x28, 0x0c, 0x42, 0x10, 0x8a, 0xb5, 0x18, 0x04, 0x3f, 0x2c, 0x34, 0x38, 0x92, 0xb5, 0x18, 0x04,
0x34, 0x30, 0x39, 0x36, 0x52, 0x08, 0x6b, 0x7a, 0x67, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x5e,
0x0a, 0x13, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68,
0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x74,
0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70,
0x68, 0x61, 0x31, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x42, 0x65, 0x61, 0x63, 0x6f, 0x6e,
0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x11, 0x73, 0x69, 0x67,
0x6e, 0x65, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x4f,
0x0a, 0x1f, 0x6b, 0x7a, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74,
0x73, 0x5f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x6f,
0x66, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0c, 0x42, 0x08, 0x8a, 0xb5, 0x18, 0x04, 0x34, 0x2c, 0x33,
0x32, 0x52, 0x1c, 0x6b, 0x7a, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74,
0x73, 0x49, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x22,
0x60, 0x0a, 0x14, 0x44, 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x49, 0x64, 0x65,
0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b,
0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18,
0x02, 0x33, 0x32, 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x21,
0x0a, 0x0c, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x02,
0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x49, 0x6e, 0x64, 0x65,
0x78, 0x42, 0x9b, 0x01, 0x0a, 0x19, 0x6f, 0x72, 0x67, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65,
0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42,
0x10, 0x44, 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x50, 0x72, 0x6f, 0x74,
0x6f, 0x50, 0x01, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
0x70, 0x72, 0x79, 0x73, 0x6d, 0x61, 0x74, 0x69, 0x63, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72,
0x79, 0x73, 0x6d, 0x2f, 0x76, 0x35, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x72, 0x79,
0x73, 0x6d, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x3b, 0x65, 0x74, 0x68, 0xaa,
0x02, 0x15, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x45, 0x74, 0x68, 0x2e, 0x76,
0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xca, 0x02, 0x15, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65,
0x75, 0x6d, 0x5c, 0x45, 0x74, 0x68, 0x5c, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x62,
0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_proto_prysm_v1alpha1_data_columns_proto_rawDescOnce sync.Once
file_proto_prysm_v1alpha1_data_columns_proto_rawDescData = file_proto_prysm_v1alpha1_data_columns_proto_rawDesc
)
func file_proto_prysm_v1alpha1_data_columns_proto_rawDescGZIP() []byte {
file_proto_prysm_v1alpha1_data_columns_proto_rawDescOnce.Do(func() {
file_proto_prysm_v1alpha1_data_columns_proto_rawDescData = protoimpl.X.CompressGZIP(file_proto_prysm_v1alpha1_data_columns_proto_rawDescData)
})
return file_proto_prysm_v1alpha1_data_columns_proto_rawDescData
}
var file_proto_prysm_v1alpha1_data_columns_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
var file_proto_prysm_v1alpha1_data_columns_proto_goTypes = []interface{}{
(*DataColumnSidecar)(nil), // 0: ethereum.eth.v1alpha1.DataColumnSidecar
(*DataColumnIdentifier)(nil), // 1: ethereum.eth.v1alpha1.DataColumnIdentifier
(*SignedBeaconBlockHeader)(nil), // 2: ethereum.eth.v1alpha1.SignedBeaconBlockHeader
}
var file_proto_prysm_v1alpha1_data_columns_proto_depIdxs = []int32{
2, // 0: ethereum.eth.v1alpha1.DataColumnSidecar.signed_block_header:type_name -> ethereum.eth.v1alpha1.SignedBeaconBlockHeader
1, // [1:1] is the sub-list for method output_type
1, // [1:1] is the sub-list for method input_type
1, // [1:1] is the sub-list for extension type_name
1, // [1:1] is the sub-list for extension extendee
0, // [0:1] is the sub-list for field type_name
}
func init() { file_proto_prysm_v1alpha1_data_columns_proto_init() }
func file_proto_prysm_v1alpha1_data_columns_proto_init() {
if File_proto_prysm_v1alpha1_data_columns_proto != nil {
return
}
file_proto_prysm_v1alpha1_beacon_block_proto_init()
if !protoimpl.UnsafeEnabled {
file_proto_prysm_v1alpha1_data_columns_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DataColumnSidecar); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_proto_prysm_v1alpha1_data_columns_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DataColumnIdentifier); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_proto_prysm_v1alpha1_data_columns_proto_rawDesc,
NumEnums: 0,
NumMessages: 2,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_proto_prysm_v1alpha1_data_columns_proto_goTypes,
DependencyIndexes: file_proto_prysm_v1alpha1_data_columns_proto_depIdxs,
MessageInfos: file_proto_prysm_v1alpha1_data_columns_proto_msgTypes,
}.Build()
File_proto_prysm_v1alpha1_data_columns_proto = out.File
file_proto_prysm_v1alpha1_data_columns_proto_rawDesc = nil
file_proto_prysm_v1alpha1_data_columns_proto_goTypes = nil
file_proto_prysm_v1alpha1_data_columns_proto_depIdxs = nil
}

View File

@@ -0,0 +1,41 @@
// Copyright 2024 Offchain Labs.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package ethereum.eth.v1alpha1;
import "proto/eth/ext/options.proto";
import "proto/prysm/v1alpha1/beacon_block.proto";
option csharp_namespace = "Ethereum.Eth.v1alpha1";
option go_package = "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1;eth";
option java_multiple_files = true;
option java_outer_classname = "DataColumnsProto";
option java_package = "org.ethereum.eth.v1alpha1";
option php_namespace = "Ethereum\\Eth\\v1alpha1";
message DataColumnSidecar {
uint64 column_index = 1;
repeated bytes data_column = 2 [(ethereum.eth.ext.ssz_size) = "?,bytes_per_cell.size", (ethereum.eth.ext.ssz_max) = "max_blob_commitments.size"];
repeated bytes kzg_commitments = 3 [(ethereum.eth.ext.ssz_size) = "?,48", (ethereum.eth.ext.ssz_max) = "max_blob_commitments.size"];
repeated bytes kzg_proof = 4 [(ethereum.eth.ext.ssz_size) = "?,48", (ethereum.eth.ext.ssz_max) = "max_blob_commitments.size"];
SignedBeaconBlockHeader signed_block_header = 5;
repeated bytes kzg_commitments_inclusion_proof = 6 [(ethereum.eth.ext.ssz_size) = "kzg_commitments_inclusion_proof_depth.size,32"];
}
message DataColumnIdentifier {
bytes block_root = 1 [(ethereum.eth.ext.ssz_size) = "32"];
uint64 column_index = 2;
}

View File

@@ -0,0 +1,377 @@
// Code generated by fastssz. DO NOT EDIT.
// Hash: 4a3e60c60f0d0729fe9feb7ebc13fa7a8dda757273542e39821ecbd42fe1422d
package eth
import (
ssz "github.com/prysmaticlabs/fastssz"
)
// MarshalSSZ ssz marshals the DataColumnSidecar object
func (d *DataColumnSidecar) MarshalSSZ() ([]byte, error) {
return ssz.MarshalSSZ(d)
}
// MarshalSSZTo ssz marshals the DataColumnSidecar object to a target array
func (d *DataColumnSidecar) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
offset := int(356)
// Field (0) 'ColumnIndex'
dst = ssz.MarshalUint64(dst, d.ColumnIndex)
// Offset (1) 'DataColumn'
dst = ssz.WriteOffset(dst, offset)
offset += len(d.DataColumn) * 2048
// Offset (2) 'KzgCommitments'
dst = ssz.WriteOffset(dst, offset)
offset += len(d.KzgCommitments) * 48
// Offset (3) 'KzgProof'
dst = ssz.WriteOffset(dst, offset)
offset += len(d.KzgProof) * 48
// Field (4) 'SignedBlockHeader'
if d.SignedBlockHeader == nil {
d.SignedBlockHeader = new(SignedBeaconBlockHeader)
}
if dst, err = d.SignedBlockHeader.MarshalSSZTo(dst); err != nil {
return
}
// Field (5) 'KzgCommitmentsInclusionProof'
if size := len(d.KzgCommitmentsInclusionProof); size != 4 {
err = ssz.ErrVectorLengthFn("--.KzgCommitmentsInclusionProof", size, 4)
return
}
for ii := 0; ii < 4; ii++ {
if size := len(d.KzgCommitmentsInclusionProof[ii]); size != 32 {
err = ssz.ErrBytesLengthFn("--.KzgCommitmentsInclusionProof[ii]", size, 32)
return
}
dst = append(dst, d.KzgCommitmentsInclusionProof[ii]...)
}
// Field (1) 'DataColumn'
if size := len(d.DataColumn); size > 4096 {
err = ssz.ErrListTooBigFn("--.DataColumn", size, 4096)
return
}
for ii := 0; ii < len(d.DataColumn); ii++ {
if size := len(d.DataColumn[ii]); size != 2048 {
err = ssz.ErrBytesLengthFn("--.DataColumn[ii]", size, 2048)
return
}
dst = append(dst, d.DataColumn[ii]...)
}
// Field (2) 'KzgCommitments'
if size := len(d.KzgCommitments); size > 4096 {
err = ssz.ErrListTooBigFn("--.KzgCommitments", size, 4096)
return
}
for ii := 0; ii < len(d.KzgCommitments); ii++ {
if size := len(d.KzgCommitments[ii]); size != 48 {
err = ssz.ErrBytesLengthFn("--.KzgCommitments[ii]", size, 48)
return
}
dst = append(dst, d.KzgCommitments[ii]...)
}
// Field (3) 'KzgProof'
if size := len(d.KzgProof); size > 4096 {
err = ssz.ErrListTooBigFn("--.KzgProof", size, 4096)
return
}
for ii := 0; ii < len(d.KzgProof); ii++ {
if size := len(d.KzgProof[ii]); size != 48 {
err = ssz.ErrBytesLengthFn("--.KzgProof[ii]", size, 48)
return
}
dst = append(dst, d.KzgProof[ii]...)
}
return
}
// UnmarshalSSZ ssz unmarshals the DataColumnSidecar object
func (d *DataColumnSidecar) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size < 356 {
return ssz.ErrSize
}
tail := buf
var o1, o2, o3 uint64
// Field (0) 'ColumnIndex'
d.ColumnIndex = ssz.UnmarshallUint64(buf[0:8])
// Offset (1) 'DataColumn'
if o1 = ssz.ReadOffset(buf[8:12]); o1 > size {
return ssz.ErrOffset
}
if o1 != 356 {
return ssz.ErrInvalidVariableOffset
}
// Offset (2) 'KzgCommitments'
if o2 = ssz.ReadOffset(buf[12:16]); o2 > size || o1 > o2 {
return ssz.ErrOffset
}
// Offset (3) 'KzgProof'
if o3 = ssz.ReadOffset(buf[16:20]); o3 > size || o2 > o3 {
return ssz.ErrOffset
}
// Field (4) 'SignedBlockHeader'
if d.SignedBlockHeader == nil {
d.SignedBlockHeader = new(SignedBeaconBlockHeader)
}
if err = d.SignedBlockHeader.UnmarshalSSZ(buf[20:228]); err != nil {
return err
}
// Field (5) 'KzgCommitmentsInclusionProof'
d.KzgCommitmentsInclusionProof = make([][]byte, 4)
for ii := 0; ii < 4; ii++ {
if cap(d.KzgCommitmentsInclusionProof[ii]) == 0 {
d.KzgCommitmentsInclusionProof[ii] = make([]byte, 0, len(buf[228:356][ii*32:(ii+1)*32]))
}
d.KzgCommitmentsInclusionProof[ii] = append(d.KzgCommitmentsInclusionProof[ii], buf[228:356][ii*32:(ii+1)*32]...)
}
// Field (1) 'DataColumn'
{
buf = tail[o1:o2]
num, err := ssz.DivideInt2(len(buf), 2048, 4096)
if err != nil {
return err
}
d.DataColumn = make([][]byte, num)
for ii := 0; ii < num; ii++ {
if cap(d.DataColumn[ii]) == 0 {
d.DataColumn[ii] = make([]byte, 0, len(buf[ii*2048:(ii+1)*2048]))
}
d.DataColumn[ii] = append(d.DataColumn[ii], buf[ii*2048:(ii+1)*2048]...)
}
}
// Field (2) 'KzgCommitments'
{
buf = tail[o2:o3]
num, err := ssz.DivideInt2(len(buf), 48, 4096)
if err != nil {
return err
}
d.KzgCommitments = make([][]byte, num)
for ii := 0; ii < num; ii++ {
if cap(d.KzgCommitments[ii]) == 0 {
d.KzgCommitments[ii] = make([]byte, 0, len(buf[ii*48:(ii+1)*48]))
}
d.KzgCommitments[ii] = append(d.KzgCommitments[ii], buf[ii*48:(ii+1)*48]...)
}
}
// Field (3) 'KzgProof'
{
buf = tail[o3:]
num, err := ssz.DivideInt2(len(buf), 48, 4096)
if err != nil {
return err
}
d.KzgProof = make([][]byte, num)
for ii := 0; ii < num; ii++ {
if cap(d.KzgProof[ii]) == 0 {
d.KzgProof[ii] = make([]byte, 0, len(buf[ii*48:(ii+1)*48]))
}
d.KzgProof[ii] = append(d.KzgProof[ii], buf[ii*48:(ii+1)*48]...)
}
}
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the DataColumnSidecar object
func (d *DataColumnSidecar) SizeSSZ() (size int) {
size = 356
// Field (1) 'DataColumn'
size += len(d.DataColumn) * 2048
// Field (2) 'KzgCommitments'
size += len(d.KzgCommitments) * 48
// Field (3) 'KzgProof'
size += len(d.KzgProof) * 48
return
}
// HashTreeRoot ssz hashes the DataColumnSidecar object
func (d *DataColumnSidecar) HashTreeRoot() ([32]byte, error) {
return ssz.HashWithDefaultHasher(d)
}
// HashTreeRootWith ssz hashes the DataColumnSidecar object with a hasher
func (d *DataColumnSidecar) HashTreeRootWith(hh *ssz.Hasher) (err error) {
indx := hh.Index()
// Field (0) 'ColumnIndex'
hh.PutUint64(d.ColumnIndex)
// Field (1) 'DataColumn'
{
if size := len(d.DataColumn); size > 4096 {
err = ssz.ErrListTooBigFn("--.DataColumn", size, 4096)
return
}
subIndx := hh.Index()
for _, i := range d.DataColumn {
if len(i) != 2048 {
err = ssz.ErrBytesLength
return
}
hh.PutBytes(i)
}
numItems := uint64(len(d.DataColumn))
hh.MerkleizeWithMixin(subIndx, numItems, 4096)
}
// Field (2) 'KzgCommitments'
{
if size := len(d.KzgCommitments); size > 4096 {
err = ssz.ErrListTooBigFn("--.KzgCommitments", size, 4096)
return
}
subIndx := hh.Index()
for _, i := range d.KzgCommitments {
if len(i) != 48 {
err = ssz.ErrBytesLength
return
}
hh.PutBytes(i)
}
numItems := uint64(len(d.KzgCommitments))
hh.MerkleizeWithMixin(subIndx, numItems, 4096)
}
// Field (3) 'KzgProof'
{
if size := len(d.KzgProof); size > 4096 {
err = ssz.ErrListTooBigFn("--.KzgProof", size, 4096)
return
}
subIndx := hh.Index()
for _, i := range d.KzgProof {
if len(i) != 48 {
err = ssz.ErrBytesLength
return
}
hh.PutBytes(i)
}
numItems := uint64(len(d.KzgProof))
hh.MerkleizeWithMixin(subIndx, numItems, 4096)
}
// Field (4) 'SignedBlockHeader'
if err = d.SignedBlockHeader.HashTreeRootWith(hh); err != nil {
return
}
// Field (5) 'KzgCommitmentsInclusionProof'
{
if size := len(d.KzgCommitmentsInclusionProof); size != 4 {
err = ssz.ErrVectorLengthFn("--.KzgCommitmentsInclusionProof", size, 4)
return
}
subIndx := hh.Index()
for _, i := range d.KzgCommitmentsInclusionProof {
if len(i) != 32 {
err = ssz.ErrBytesLength
return
}
hh.Append(i)
}
hh.Merkleize(subIndx)
}
hh.Merkleize(indx)
return
}
// MarshalSSZ ssz marshals the DataColumnIdentifier object
func (d *DataColumnIdentifier) MarshalSSZ() ([]byte, error) {
return ssz.MarshalSSZ(d)
}
// MarshalSSZTo ssz marshals the DataColumnIdentifier object to a target array
func (d *DataColumnIdentifier) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
// Field (0) 'BlockRoot'
if size := len(d.BlockRoot); size != 32 {
err = ssz.ErrBytesLengthFn("--.BlockRoot", size, 32)
return
}
dst = append(dst, d.BlockRoot...)
// Field (1) 'ColumnIndex'
dst = ssz.MarshalUint64(dst, d.ColumnIndex)
return
}
// UnmarshalSSZ ssz unmarshals the DataColumnIdentifier object
func (d *DataColumnIdentifier) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size != 40 {
return ssz.ErrSize
}
// Field (0) 'BlockRoot'
if cap(d.BlockRoot) == 0 {
d.BlockRoot = make([]byte, 0, len(buf[0:32]))
}
d.BlockRoot = append(d.BlockRoot, buf[0:32]...)
// Field (1) 'ColumnIndex'
d.ColumnIndex = ssz.UnmarshallUint64(buf[32:40])
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the DataColumnIdentifier object
func (d *DataColumnIdentifier) SizeSSZ() (size int) {
size = 40
return
}
// HashTreeRoot ssz hashes the DataColumnIdentifier object
func (d *DataColumnIdentifier) HashTreeRoot() ([32]byte, error) {
return ssz.HashWithDefaultHasher(d)
}
// HashTreeRootWith ssz hashes the DataColumnIdentifier object with a hasher
func (d *DataColumnIdentifier) HashTreeRootWith(hh *ssz.Hasher) (err error) {
indx := hh.Index()
// Field (0) 'BlockRoot'
if size := len(d.BlockRoot); size != 32 {
err = ssz.ErrBytesLengthFn("--.BlockRoot", size, 32)
return
}
hh.PutBytes(d.BlockRoot)
// Field (1) 'ColumnIndex'
hh.PutUint64(d.ColumnIndex)
hh.Merkleize(indx)
return
}

View File

@@ -1,5 +1,5 @@
// Code generated by fastssz. DO NOT EDIT.
// Hash: bfd7d6b556134c3bd236b880245717aa01ae79573b33f2746a08c165ba5dcedb
// Hash: c9ac7a1f653faa9b9d8f1ffba7c326db17da4f0005bf98299770bc52c93c7e11
package eth
import (
@@ -551,6 +551,106 @@ func (m *MetaDataV1) HashTreeRootWith(hh *ssz.Hasher) (err error) {
return
}
// MarshalSSZ ssz marshals the MetaDataV2 object
func (m *MetaDataV2) MarshalSSZ() ([]byte, error) {
return ssz.MarshalSSZ(m)
}
// MarshalSSZTo ssz marshals the MetaDataV2 object to a target array
func (m *MetaDataV2) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
// Field (0) 'SeqNumber'
dst = ssz.MarshalUint64(dst, m.SeqNumber)
// Field (1) 'Attnets'
if size := len(m.Attnets); size != 8 {
err = ssz.ErrBytesLengthFn("--.Attnets", size, 8)
return
}
dst = append(dst, m.Attnets...)
// Field (2) 'Syncnets'
if size := len(m.Syncnets); size != 1 {
err = ssz.ErrBytesLengthFn("--.Syncnets", size, 1)
return
}
dst = append(dst, m.Syncnets...)
// Field (3) 'CustodySubnetCount'
dst = ssz.MarshalUint64(dst, m.CustodySubnetCount)
return
}
// UnmarshalSSZ ssz unmarshals the MetaDataV2 object
func (m *MetaDataV2) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size != 25 {
return ssz.ErrSize
}
// Field (0) 'SeqNumber'
m.SeqNumber = ssz.UnmarshallUint64(buf[0:8])
// Field (1) 'Attnets'
if cap(m.Attnets) == 0 {
m.Attnets = make([]byte, 0, len(buf[8:16]))
}
m.Attnets = append(m.Attnets, buf[8:16]...)
// Field (2) 'Syncnets'
if cap(m.Syncnets) == 0 {
m.Syncnets = make([]byte, 0, len(buf[16:17]))
}
m.Syncnets = append(m.Syncnets, buf[16:17]...)
// Field (3) 'CustodySubnetCount'
m.CustodySubnetCount = ssz.UnmarshallUint64(buf[17:25])
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the MetaDataV2 object
func (m *MetaDataV2) SizeSSZ() (size int) {
size = 25
return
}
// HashTreeRoot ssz hashes the MetaDataV2 object
func (m *MetaDataV2) HashTreeRoot() ([32]byte, error) {
return ssz.HashWithDefaultHasher(m)
}
// HashTreeRootWith ssz hashes the MetaDataV2 object with a hasher
func (m *MetaDataV2) HashTreeRootWith(hh *ssz.Hasher) (err error) {
indx := hh.Index()
// Field (0) 'SeqNumber'
hh.PutUint64(m.SeqNumber)
// Field (1) 'Attnets'
if size := len(m.Attnets); size != 8 {
err = ssz.ErrBytesLengthFn("--.Attnets", size, 8)
return
}
hh.PutBytes(m.Attnets)
// Field (2) 'Syncnets'
if size := len(m.Syncnets); size != 1 {
err = ssz.ErrBytesLengthFn("--.Syncnets", size, 1)
return
}
hh.PutBytes(m.Syncnets)
// Field (3) 'CustodySubnetCount'
hh.PutUint64(m.CustodySubnetCount)
hh.Merkleize(indx)
return
}
// MarshalSSZ ssz marshals the BlobSidecarsByRangeRequest object
func (b *BlobSidecarsByRangeRequest) MarshalSSZ() ([]byte, error) {
return ssz.MarshalSSZ(b)
@@ -611,6 +711,124 @@ func (b *BlobSidecarsByRangeRequest) HashTreeRootWith(hh *ssz.Hasher) (err error
return
}
// MarshalSSZ ssz marshals the DataColumnSidecarsByRangeRequest object
func (d *DataColumnSidecarsByRangeRequest) MarshalSSZ() ([]byte, error) {
return ssz.MarshalSSZ(d)
}
// MarshalSSZTo ssz marshals the DataColumnSidecarsByRangeRequest object to a target array
func (d *DataColumnSidecarsByRangeRequest) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
offset := int(20)
// Field (0) 'StartSlot'
dst = ssz.MarshalUint64(dst, uint64(d.StartSlot))
// Field (1) 'Count'
dst = ssz.MarshalUint64(dst, d.Count)
// Offset (2) 'Columns'
dst = ssz.WriteOffset(dst, offset)
offset += len(d.Columns) * 8
// Field (2) 'Columns'
if size := len(d.Columns); size > 128 {
err = ssz.ErrListTooBigFn("--.Columns", size, 128)
return
}
for ii := 0; ii < len(d.Columns); ii++ {
dst = ssz.MarshalUint64(dst, d.Columns[ii])
}
return
}
// UnmarshalSSZ ssz unmarshals the DataColumnSidecarsByRangeRequest object
func (d *DataColumnSidecarsByRangeRequest) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size < 20 {
return ssz.ErrSize
}
tail := buf
var o2 uint64
// Field (0) 'StartSlot'
d.StartSlot = github_com_prysmaticlabs_prysm_v5_consensus_types_primitives.Slot(ssz.UnmarshallUint64(buf[0:8]))
// Field (1) 'Count'
d.Count = ssz.UnmarshallUint64(buf[8:16])
// Offset (2) 'Columns'
if o2 = ssz.ReadOffset(buf[16:20]); o2 > size {
return ssz.ErrOffset
}
if o2 != 20 {
return ssz.ErrInvalidVariableOffset
}
// Field (2) 'Columns'
{
buf = tail[o2:]
num, err := ssz.DivideInt2(len(buf), 8, 128)
if err != nil {
return err
}
d.Columns = ssz.ExtendUint64(d.Columns, num)
for ii := 0; ii < num; ii++ {
d.Columns[ii] = ssz.UnmarshallUint64(buf[ii*8 : (ii+1)*8])
}
}
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the DataColumnSidecarsByRangeRequest object
func (d *DataColumnSidecarsByRangeRequest) SizeSSZ() (size int) {
size = 20
// Field (2) 'Columns'
size += len(d.Columns) * 8
return
}
// HashTreeRoot ssz hashes the DataColumnSidecarsByRangeRequest object
func (d *DataColumnSidecarsByRangeRequest) HashTreeRoot() ([32]byte, error) {
return ssz.HashWithDefaultHasher(d)
}
// HashTreeRootWith ssz hashes the DataColumnSidecarsByRangeRequest object with a hasher
func (d *DataColumnSidecarsByRangeRequest) HashTreeRootWith(hh *ssz.Hasher) (err error) {
indx := hh.Index()
// Field (0) 'StartSlot'
hh.PutUint64(uint64(d.StartSlot))
// Field (1) 'Count'
hh.PutUint64(d.Count)
// Field (2) 'Columns'
{
if size := len(d.Columns); size > 128 {
err = ssz.ErrListTooBigFn("--.Columns", size, 128)
return
}
subIndx := hh.Index()
for _, i := range d.Columns {
hh.AppendUint64(i)
}
hh.FillUpTo32()
numItems := uint64(len(d.Columns))
hh.MerkleizeWithMixin(subIndx, numItems, ssz.CalculateLimit(128, numItems, 8))
}
hh.Merkleize(indx)
return
}
// MarshalSSZ ssz marshals the DepositSnapshot object
func (d *DepositSnapshot) MarshalSSZ() ([]byte, error) {
return ssz.MarshalSSZ(d)

View File

@@ -348,6 +348,77 @@ func (x *MetaDataV1) GetSyncnets() github_com_prysmaticlabs_go_bitfield.Bitvecto
return github_com_prysmaticlabs_go_bitfield.Bitvector4(nil)
}
type MetaDataV2 struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
SeqNumber uint64 `protobuf:"varint,1,opt,name=seq_number,json=seqNumber,proto3" json:"seq_number,omitempty"`
Attnets github_com_prysmaticlabs_go_bitfield.Bitvector64 `protobuf:"bytes,2,opt,name=attnets,proto3" json:"attnets,omitempty" cast-type:"github.com/prysmaticlabs/go-bitfield.Bitvector64" ssz-size:"8"`
Syncnets github_com_prysmaticlabs_go_bitfield.Bitvector4 `protobuf:"bytes,3,opt,name=syncnets,proto3" json:"syncnets,omitempty" cast-type:"github.com/prysmaticlabs/go-bitfield.Bitvector4" ssz-size:"1"`
CustodySubnetCount uint64 `protobuf:"varint,4,opt,name=custody_subnet_count,json=custodySubnetCount,proto3" json:"custody_subnet_count,omitempty"`
}
func (x *MetaDataV2) Reset() {
*x = MetaDataV2{}
if protoimpl.UnsafeEnabled {
mi := &file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *MetaDataV2) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*MetaDataV2) ProtoMessage() {}
func (x *MetaDataV2) ProtoReflect() protoreflect.Message {
mi := &file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes[5]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use MetaDataV2.ProtoReflect.Descriptor instead.
func (*MetaDataV2) Descriptor() ([]byte, []int) {
return file_proto_prysm_v1alpha1_p2p_messages_proto_rawDescGZIP(), []int{5}
}
func (x *MetaDataV2) GetSeqNumber() uint64 {
if x != nil {
return x.SeqNumber
}
return 0
}
func (x *MetaDataV2) GetAttnets() github_com_prysmaticlabs_go_bitfield.Bitvector64 {
if x != nil {
return x.Attnets
}
return github_com_prysmaticlabs_go_bitfield.Bitvector64(nil)
}
func (x *MetaDataV2) GetSyncnets() github_com_prysmaticlabs_go_bitfield.Bitvector4 {
if x != nil {
return x.Syncnets
}
return github_com_prysmaticlabs_go_bitfield.Bitvector4(nil)
}
func (x *MetaDataV2) GetCustodySubnetCount() uint64 {
if x != nil {
return x.CustodySubnetCount
}
return 0
}
type BlobSidecarsByRangeRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -360,7 +431,7 @@ type BlobSidecarsByRangeRequest struct {
func (x *BlobSidecarsByRangeRequest) Reset() {
*x = BlobSidecarsByRangeRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes[5]
mi := &file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -373,7 +444,7 @@ func (x *BlobSidecarsByRangeRequest) String() string {
func (*BlobSidecarsByRangeRequest) ProtoMessage() {}
func (x *BlobSidecarsByRangeRequest) ProtoReflect() protoreflect.Message {
mi := &file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes[5]
mi := &file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes[6]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -386,7 +457,7 @@ func (x *BlobSidecarsByRangeRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use BlobSidecarsByRangeRequest.ProtoReflect.Descriptor instead.
func (*BlobSidecarsByRangeRequest) Descriptor() ([]byte, []int) {
return file_proto_prysm_v1alpha1_p2p_messages_proto_rawDescGZIP(), []int{5}
return file_proto_prysm_v1alpha1_p2p_messages_proto_rawDescGZIP(), []int{6}
}
func (x *BlobSidecarsByRangeRequest) GetStartSlot() github_com_prysmaticlabs_prysm_v5_consensus_types_primitives.Slot {
@@ -403,6 +474,69 @@ func (x *BlobSidecarsByRangeRequest) GetCount() uint64 {
return 0
}
type DataColumnSidecarsByRangeRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
StartSlot github_com_prysmaticlabs_prysm_v5_consensus_types_primitives.Slot `protobuf:"varint,1,opt,name=start_slot,json=startSlot,proto3" json:"start_slot,omitempty" cast-type:"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives.Slot"`
Count uint64 `protobuf:"varint,2,opt,name=count,proto3" json:"count,omitempty"`
Columns []uint64 `protobuf:"varint,3,rep,packed,name=columns,proto3" json:"columns,omitempty" ssz-max:"128"`
}
func (x *DataColumnSidecarsByRangeRequest) Reset() {
*x = DataColumnSidecarsByRangeRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *DataColumnSidecarsByRangeRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DataColumnSidecarsByRangeRequest) ProtoMessage() {}
func (x *DataColumnSidecarsByRangeRequest) ProtoReflect() protoreflect.Message {
mi := &file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes[7]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DataColumnSidecarsByRangeRequest.ProtoReflect.Descriptor instead.
func (*DataColumnSidecarsByRangeRequest) Descriptor() ([]byte, []int) {
return file_proto_prysm_v1alpha1_p2p_messages_proto_rawDescGZIP(), []int{7}
}
func (x *DataColumnSidecarsByRangeRequest) GetStartSlot() github_com_prysmaticlabs_prysm_v5_consensus_types_primitives.Slot {
if x != nil {
return x.StartSlot
}
return github_com_prysmaticlabs_prysm_v5_consensus_types_primitives.Slot(0)
}
func (x *DataColumnSidecarsByRangeRequest) GetCount() uint64 {
if x != nil {
return x.Count
}
return 0
}
func (x *DataColumnSidecarsByRangeRequest) GetColumns() []uint64 {
if x != nil {
return x.Columns
}
return nil
}
var File_proto_prysm_v1alpha1_p2p_messages_proto protoreflect.FileDescriptor
var file_proto_prysm_v1alpha1_p2p_messages_proto_rawDesc = []byte{
@@ -482,27 +616,56 @@ var file_proto_prysm_v1alpha1_p2p_messages_proto_rawDesc = []byte{
0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d,
0x61, 0x74, 0x69, 0x63, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x67, 0x6f, 0x2d, 0x62, 0x69, 0x74, 0x66,
0x69, 0x65, 0x6c, 0x64, 0x2e, 0x42, 0x69, 0x74, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x34, 0x8a,
0xb5, 0x18, 0x01, 0x31, 0x52, 0x08, 0x73, 0x79, 0x6e, 0x63, 0x6e, 0x65, 0x74, 0x73, 0x22, 0x98,
0x01, 0x0a, 0x1a, 0x42, 0x6c, 0x6f, 0x62, 0x53, 0x69, 0x64, 0x65, 0x63, 0x61, 0x72, 0x73, 0x42,
0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x64, 0x0a,
0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x73, 0x6c, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
0x04, 0x42, 0x45, 0x82, 0xb5, 0x18, 0x41, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f,
0x6d, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x61, 0x74, 0x69, 0x63, 0x6c, 0x61, 0x62, 0x73, 0x2f,
0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x35, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73,
0x75, 0x73, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69,
0x76, 0x65, 0x73, 0x2e, 0x53, 0x6c, 0x6f, 0x74, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x53,
0x6c, 0x6f, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01,
0x28, 0x04, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x9b, 0x01, 0x0a, 0x19, 0x6f, 0x72,
0x67, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76,
0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x10, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x73, 0x73,
0x61, 0x67, 0x65, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3a, 0x67, 0x69, 0x74,
0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x61, 0x74, 0x69,
0x63, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x35, 0x2f, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70,
0x68, 0x61, 0x31, 0x3b, 0x65, 0x74, 0x68, 0xaa, 0x02, 0x15, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65,
0x75, 0x6d, 0x2e, 0x45, 0x74, 0x68, 0x2e, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xca,
0x02, 0x15, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x5c, 0x45, 0x74, 0x68, 0x5c, 0x76,
0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
0xb5, 0x18, 0x01, 0x31, 0x52, 0x08, 0x73, 0x79, 0x6e, 0x63, 0x6e, 0x65, 0x74, 0x73, 0x22, 0x88,
0x02, 0x0a, 0x0a, 0x4d, 0x65, 0x74, 0x61, 0x44, 0x61, 0x74, 0x61, 0x56, 0x32, 0x12, 0x1d, 0x0a,
0x0a, 0x73, 0x65, 0x71, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28,
0x04, 0x52, 0x09, 0x73, 0x65, 0x71, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x53, 0x0a, 0x07,
0x61, 0x74, 0x74, 0x6e, 0x65, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x39, 0x82,
0xb5, 0x18, 0x30, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72,
0x79, 0x73, 0x6d, 0x61, 0x74, 0x69, 0x63, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x67, 0x6f, 0x2d, 0x62,
0x69, 0x74, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x2e, 0x42, 0x69, 0x74, 0x76, 0x65, 0x63, 0x74, 0x6f,
0x72, 0x36, 0x34, 0x8a, 0xb5, 0x18, 0x01, 0x38, 0x52, 0x07, 0x61, 0x74, 0x74, 0x6e, 0x65, 0x74,
0x73, 0x12, 0x54, 0x0a, 0x08, 0x73, 0x79, 0x6e, 0x63, 0x6e, 0x65, 0x74, 0x73, 0x18, 0x03, 0x20,
0x01, 0x28, 0x0c, 0x42, 0x38, 0x82, 0xb5, 0x18, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e,
0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x61, 0x74, 0x69, 0x63, 0x6c, 0x61, 0x62,
0x73, 0x2f, 0x67, 0x6f, 0x2d, 0x62, 0x69, 0x74, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x2e, 0x42, 0x69,
0x74, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x34, 0x8a, 0xb5, 0x18, 0x01, 0x31, 0x52, 0x08, 0x73,
0x79, 0x6e, 0x63, 0x6e, 0x65, 0x74, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x63, 0x75, 0x73, 0x74, 0x6f,
0x64, 0x79, 0x5f, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18,
0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x12, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x64, 0x79, 0x53, 0x75,
0x62, 0x6e, 0x65, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x98, 0x01, 0x0a, 0x1a, 0x42, 0x6c,
0x6f, 0x62, 0x53, 0x69, 0x64, 0x65, 0x63, 0x61, 0x72, 0x73, 0x42, 0x79, 0x52, 0x61, 0x6e, 0x67,
0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x64, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72,
0x74, 0x5f, 0x73, 0x6c, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x42, 0x45, 0x82, 0xb5,
0x18, 0x41, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x79,
0x73, 0x6d, 0x61, 0x74, 0x69, 0x63, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d,
0x2f, 0x76, 0x35, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x74, 0x79,
0x70, 0x65, 0x73, 0x2f, 0x70, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2e, 0x53,
0x6c, 0x6f, 0x74, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x53, 0x6c, 0x6f, 0x74, 0x12, 0x14,
0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x63,
0x6f, 0x75, 0x6e, 0x74, 0x22, 0xc1, 0x01, 0x0a, 0x20, 0x44, 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6c,
0x75, 0x6d, 0x6e, 0x53, 0x69, 0x64, 0x65, 0x63, 0x61, 0x72, 0x73, 0x42, 0x79, 0x52, 0x61, 0x6e,
0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x64, 0x0a, 0x0a, 0x73, 0x74, 0x61,
0x72, 0x74, 0x5f, 0x73, 0x6c, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x42, 0x45, 0x82,
0xb5, 0x18, 0x41, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72,
0x79, 0x73, 0x6d, 0x61, 0x74, 0x69, 0x63, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73,
0x6d, 0x2f, 0x76, 0x35, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x74,
0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2e,
0x53, 0x6c, 0x6f, 0x74, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x53, 0x6c, 0x6f, 0x74, 0x12,
0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05,
0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x21, 0x0a, 0x07, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73,
0x18, 0x03, 0x20, 0x03, 0x28, 0x04, 0x42, 0x07, 0x92, 0xb5, 0x18, 0x03, 0x31, 0x32, 0x38, 0x52,
0x07, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x42, 0x9b, 0x01, 0x0a, 0x19, 0x6f, 0x72, 0x67,
0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31,
0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x10, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x73, 0x73, 0x61,
0x67, 0x65, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68,
0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x61, 0x74, 0x69, 0x63,
0x6c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x35, 0x2f, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68,
0x61, 0x31, 0x3b, 0x65, 0x74, 0x68, 0xaa, 0x02, 0x15, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75,
0x6d, 0x2e, 0x45, 0x74, 0x68, 0x2e, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xca, 0x02,
0x15, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x5c, 0x45, 0x74, 0x68, 0x5c, 0x76, 0x31,
0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@@ -517,14 +680,16 @@ func file_proto_prysm_v1alpha1_p2p_messages_proto_rawDescGZIP() []byte {
return file_proto_prysm_v1alpha1_p2p_messages_proto_rawDescData
}
var file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
var file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes = make([]protoimpl.MessageInfo, 8)
var file_proto_prysm_v1alpha1_p2p_messages_proto_goTypes = []interface{}{
(*Status)(nil), // 0: ethereum.eth.v1alpha1.Status
(*BeaconBlocksByRangeRequest)(nil), // 1: ethereum.eth.v1alpha1.BeaconBlocksByRangeRequest
(*ENRForkID)(nil), // 2: ethereum.eth.v1alpha1.ENRForkID
(*MetaDataV0)(nil), // 3: ethereum.eth.v1alpha1.MetaDataV0
(*MetaDataV1)(nil), // 4: ethereum.eth.v1alpha1.MetaDataV1
(*BlobSidecarsByRangeRequest)(nil), // 5: ethereum.eth.v1alpha1.BlobSidecarsByRangeRequest
(*Status)(nil), // 0: ethereum.eth.v1alpha1.Status
(*BeaconBlocksByRangeRequest)(nil), // 1: ethereum.eth.v1alpha1.BeaconBlocksByRangeRequest
(*ENRForkID)(nil), // 2: ethereum.eth.v1alpha1.ENRForkID
(*MetaDataV0)(nil), // 3: ethereum.eth.v1alpha1.MetaDataV0
(*MetaDataV1)(nil), // 4: ethereum.eth.v1alpha1.MetaDataV1
(*MetaDataV2)(nil), // 5: ethereum.eth.v1alpha1.MetaDataV2
(*BlobSidecarsByRangeRequest)(nil), // 6: ethereum.eth.v1alpha1.BlobSidecarsByRangeRequest
(*DataColumnSidecarsByRangeRequest)(nil), // 7: ethereum.eth.v1alpha1.DataColumnSidecarsByRangeRequest
}
var file_proto_prysm_v1alpha1_p2p_messages_proto_depIdxs = []int32{
0, // [0:0] is the sub-list for method output_type
@@ -601,6 +766,18 @@ func file_proto_prysm_v1alpha1_p2p_messages_proto_init() {
}
}
file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*MetaDataV2); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*BlobSidecarsByRangeRequest); i {
case 0:
return &v.state
@@ -612,6 +789,18 @@ func file_proto_prysm_v1alpha1_p2p_messages_proto_init() {
return nil
}
}
file_proto_prysm_v1alpha1_p2p_messages_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DataColumnSidecarsByRangeRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
@@ -619,7 +808,7 @@ func file_proto_prysm_v1alpha1_p2p_messages_proto_init() {
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_proto_prysm_v1alpha1_p2p_messages_proto_rawDesc,
NumEnums: 0,
NumMessages: 6,
NumMessages: 8,
NumExtensions: 0,
NumServices: 0,
},

View File

@@ -51,6 +51,7 @@ message MetaDataV0 {
(
seq_number: uint64
attnets: Bitvector[ATTESTATION_SUBNET_COUNT]
syncnets: Bitvector[SYNC_COMMITTEE_SUBNET_COUNT]
)
*/
message MetaDataV1 {
@@ -59,6 +60,23 @@ message MetaDataV1 {
bytes syncnets = 3 [(ethereum.eth.ext.ssz_size) = "1", (ethereum.eth.ext.cast_type) = "github.com/prysmaticlabs/go-bitfield.Bitvector4"];
}
/*
Spec Definition:
MetaData
(
seq_number: uint64
attnets: Bitvector[ATTESTATION_SUBNET_COUNT]
syncnets: Bitvector[SYNC_COMMITTEE_SUBNET_COUNT]
custody_subnet_count: uint64
)
*/
message MetaDataV2 {
uint64 seq_number = 1;
bytes attnets = 2 [(ethereum.eth.ext.ssz_size) = "8", (ethereum.eth.ext.cast_type) = "github.com/prysmaticlabs/go-bitfield.Bitvector64"];
bytes syncnets = 3 [(ethereum.eth.ext.ssz_size) = "1", (ethereum.eth.ext.cast_type) = "github.com/prysmaticlabs/go-bitfield.Bitvector4"];
uint64 custody_subnet_count = 4;
}
/*
Spec Definition:
(
@@ -70,3 +88,17 @@ message BlobSidecarsByRangeRequest {
uint64 start_slot = 1 [(ethereum.eth.ext.cast_type) = "github.com/prysmaticlabs/prysm/v5/consensus-types/primitives.Slot"];
uint64 count = 2;
}
/*
Spec Definition:
(
start_slot: Slot
count: uint64
columns: List[ColumnIndex]
)
*/
message DataColumnSidecarsByRangeRequest {
uint64 start_slot = 1 [(ethereum.eth.ext.cast_type) = "github.com/prysmaticlabs/prysm/v5/consensus-types/primitives.Slot"];
uint64 count = 2;
repeated uint64 columns = 3 [(ethereum.eth.ext.ssz_max) = "128"];
}

View File

@@ -36,6 +36,11 @@ mainnet = {
"pending_partial_withdrawals_limit": "134217728",
"pending_consolidations_limit": "262144",
"max_consolidation_requests_per_payload.size": "1",
"field_elements_per_cell.size": "64",
"field_elements_per_ext_blob.size": "8192",
"bytes_per_cell.size": "2048", # FIELD_ELEMENTS_PER_CELL * BYTES_PER_FIELD_ELEMENT
"cells_per_blob.size": "128",
"kzg_commitments_inclusion_proof_depth.size": "4",
}
minimal = {
@@ -68,6 +73,11 @@ minimal = {
"pending_partial_withdrawals_limit": "64",
"pending_consolidations_limit": "64",
"max_consolidation_requests_per_payload.size": "1",
"field_elements_per_cell.size": "64",
"field_elements_per_ext_blob.size": "8192",
"bytes_per_cell.size": "2048", # FIELD_ELEMENTS_PER_CELL * BYTES_PER_FIELD_ELEMENT
"cells_per_blob.size": "128",
"kzg_commitments_inclusion_proof_depth.size": "4",
}
###### Rules definitions #######

View File

@@ -372,7 +372,7 @@ func runBlobStep(t *testing.T,
require.NoError(t, err)
ini, err := builder.vwait.WaitForInitializer(context.Background())
require.NoError(t, err)
bv := ini.NewBlobVerifier(ro, verification.SpectestSidecarRequirements)
bv := ini.NewBlobVerifier(ro, verification.SpectestBlobSidecarRequirements)
ctx := context.Background()
if err := bv.BlobIndexInBounds(); err != nil {
t.Logf("BlobIndexInBounds error: %s", err.Error())

View File

@@ -195,7 +195,7 @@ func logProposedBlock(log *logrus.Entry, blk interfaces.SignedBeaconBlock, blkRo
log = log.WithFields(logrus.Fields{
"payloadHash": fmt.Sprintf("%#x", bytesutil.Trunc(p.BlockHash())),
"parentHash": fmt.Sprintf("%#x", bytesutil.Trunc(p.ParentHash())),
"blockNumber": p.BlockNumber,
"blockNumber": p.BlockNumber(),
})
if !blk.IsBlinded() {
txs, err := p.Transactions()