Compare commits

..

7 Commits

Author SHA1 Message Date
Manu NALEPA
23d7f87cef Peer das core (#13877)
* Bump `c-kzg-4844` lib to the `das` branch.

* Implement `MerkleProofKZGCommitments`.

* Implement `das-core.md`.

* Use `peerdas.CustodyColumnSubnets` and `peerdas.CustodyColumns`.

* `CustodyColumnSubnets`: Include `i` in the for loop.

* Remove `computeSubscribedColumnSubnet`.

* Remove `peerdas.CustodyColumns` out of the for loop.
2024-07-29 13:19:42 +02:00
Nishant Das
a4db00d7ef Add Request And Response RPC Methods For Data Columns (#13909)
* Add RPC Handler

* Add Column Requests

* Update beacon-chain/db/filesystem/blob.go

Co-authored-by: Manu NALEPA <enalepa@offchainlabs.com>

* Update beacon-chain/p2p/rpc_topic_mappings.go

Co-authored-by: Manu NALEPA <enalepa@offchainlabs.com>

* Manu's Review

* Manu's Review

* Interface Fixes

* mock manager

---------

Co-authored-by: Manu NALEPA <enalepa@offchainlabs.com>
2024-07-29 13:19:42 +02:00
Nishant Das
b6a4e1213c Add Data Column Gossip Handlers (#13894)
* Add Data Column Subscriber

* Add Data Column Vaidator

* Wire all Handlers In

* Fix Build

* Fix Test

* Fix IP in Test

* Fix IP in Test
2024-07-29 13:19:42 +02:00
Nishant Das
86088d9c85 Add Support For Discovery Of Column Subnets (#13883)
* Add Support For Discovery Of Column Subnets

* Lint for SubnetsPerNode

* Manu's Review

* Change to a better name
2024-07-29 13:19:42 +02:00
Nishant Das
e10e610589 add in networking params (#13866) 2024-07-29 13:19:42 +02:00
Nishant Das
e16a25b6cc add it (#13865) 2024-07-29 13:19:42 +02:00
Manu NALEPA
f129dfdee6 Add in column sidecars protos (#13862) 2024-07-29 13:18:55 +02:00
257 changed files with 7738 additions and 8754 deletions

View File

@@ -317,94 +317,6 @@ type BlindedBeaconBlockBodyDeneb struct {
BlobKzgCommitments []string `json:"blob_kzg_commitments"`
}
type SignedBeaconBlockContentsElectra struct {
SignedBlock *SignedBeaconBlockElectra `json:"signed_block"`
KzgProofs []string `json:"kzg_proofs"`
Blobs []string `json:"blobs"`
}
type BeaconBlockContentsElectra struct {
Block *BeaconBlockElectra `json:"block"`
KzgProofs []string `json:"kzg_proofs"`
Blobs []string `json:"blobs"`
}
type SignedBeaconBlockElectra struct {
Message *BeaconBlockElectra `json:"message"`
Signature string `json:"signature"`
}
var _ SignedMessageJsoner = &SignedBeaconBlockElectra{}
func (s *SignedBeaconBlockElectra) MessageRawJson() ([]byte, error) {
return json.Marshal(s.Message)
}
func (s *SignedBeaconBlockElectra) SigString() string {
return s.Signature
}
type BeaconBlockElectra struct {
Slot string `json:"slot"`
ProposerIndex string `json:"proposer_index"`
ParentRoot string `json:"parent_root"`
StateRoot string `json:"state_root"`
Body *BeaconBlockBodyElectra `json:"body"`
}
type BeaconBlockBodyElectra struct {
RandaoReveal string `json:"randao_reveal"`
Eth1Data *Eth1Data `json:"eth1_data"`
Graffiti string `json:"graffiti"`
ProposerSlashings []*ProposerSlashing `json:"proposer_slashings"`
AttesterSlashings []*AttesterSlashingElectra `json:"attester_slashings"`
Attestations []*AttestationElectra `json:"attestations"`
Deposits []*Deposit `json:"deposits"`
VoluntaryExits []*SignedVoluntaryExit `json:"voluntary_exits"`
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
ExecutionPayload *ExecutionPayloadElectra `json:"execution_payload"`
BLSToExecutionChanges []*SignedBLSToExecutionChange `json:"bls_to_execution_changes"`
BlobKzgCommitments []string `json:"blob_kzg_commitments"`
}
type BlindedBeaconBlockElectra struct {
Slot string `json:"slot"`
ProposerIndex string `json:"proposer_index"`
ParentRoot string `json:"parent_root"`
StateRoot string `json:"state_root"`
Body *BlindedBeaconBlockBodyElectra `json:"body"`
}
type SignedBlindedBeaconBlockElectra struct {
Message *BlindedBeaconBlockElectra `json:"message"`
Signature string `json:"signature"`
}
var _ SignedMessageJsoner = &SignedBlindedBeaconBlockElectra{}
func (s *SignedBlindedBeaconBlockElectra) MessageRawJson() ([]byte, error) {
return json.Marshal(s.Message)
}
func (s *SignedBlindedBeaconBlockElectra) SigString() string {
return s.Signature
}
type BlindedBeaconBlockBodyElectra struct {
RandaoReveal string `json:"randao_reveal"`
Eth1Data *Eth1Data `json:"eth1_data"`
Graffiti string `json:"graffiti"`
ProposerSlashings []*ProposerSlashing `json:"proposer_slashings"`
AttesterSlashings []*AttesterSlashingElectra `json:"attester_slashings"`
Attestations []*AttestationElectra `json:"attestations"`
Deposits []*Deposit `json:"deposits"`
VoluntaryExits []*SignedVoluntaryExit `json:"voluntary_exits"`
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
ExecutionPayloadHeader *ExecutionPayloadHeaderElectra `json:"execution_payload_header"`
BLSToExecutionChanges []*SignedBLSToExecutionChange `json:"bls_to_execution_changes"`
BlobKzgCommitments []string `json:"blob_kzg_commitments"`
}
type SignedBeaconBlockHeaderContainer struct {
Header *SignedBeaconBlockHeader `json:"header"`
Root string `json:"root"`
@@ -533,49 +445,3 @@ type ExecutionPayloadHeaderDeneb struct {
BlobGasUsed string `json:"blob_gas_used"`
ExcessBlobGas string `json:"excess_blob_gas"`
}
type ExecutionPayloadElectra struct {
ParentHash string `json:"parent_hash"`
FeeRecipient string `json:"fee_recipient"`
StateRoot string `json:"state_root"`
ReceiptsRoot string `json:"receipts_root"`
LogsBloom string `json:"logs_bloom"`
PrevRandao string `json:"prev_randao"`
BlockNumber string `json:"block_number"`
GasLimit string `json:"gas_limit"`
GasUsed string `json:"gas_used"`
Timestamp string `json:"timestamp"`
ExtraData string `json:"extra_data"`
BaseFeePerGas string `json:"base_fee_per_gas"`
BlockHash string `json:"block_hash"`
Transactions []string `json:"transactions"`
Withdrawals []*Withdrawal `json:"withdrawals"`
BlobGasUsed string `json:"blob_gas_used"`
ExcessBlobGas string `json:"excess_blob_gas"`
DepositRequests []*DepositRequest `json:"deposit_requests"`
WithdrawalRequests []*WithdrawalRequest `json:"withdrawal_requests"`
ConsolidationRequests []*ConsolidationRequest `json:"consolidation_requests"`
}
type ExecutionPayloadHeaderElectra struct {
ParentHash string `json:"parent_hash"`
FeeRecipient string `json:"fee_recipient"`
StateRoot string `json:"state_root"`
ReceiptsRoot string `json:"receipts_root"`
LogsBloom string `json:"logs_bloom"`
PrevRandao string `json:"prev_randao"`
BlockNumber string `json:"block_number"`
GasLimit string `json:"gas_limit"`
GasUsed string `json:"gas_used"`
Timestamp string `json:"timestamp"`
ExtraData string `json:"extra_data"`
BaseFeePerGas string `json:"base_fee_per_gas"`
BlockHash string `json:"block_hash"`
TransactionsRoot string `json:"transactions_root"`
WithdrawalsRoot string `json:"withdrawals_root"`
BlobGasUsed string `json:"blob_gas_used"`
ExcessBlobGas string `json:"excess_blob_gas"`
DepositRequestsRoot string `json:"deposit_requests_root"`
WithdrawalRequestsRoot string `json:"withdrawal_requests_root"`
ConsolidationRequestsRoot string `json:"consolidation_requests_root"`
}

View File

@@ -340,42 +340,6 @@ func (a *AggregateAttestationAndProof) ToConsensus() (*eth.AggregateAttestationA
}, nil
}
func (s *SignedAggregateAttestationAndProofElectra) ToConsensus() (*eth.SignedAggregateAttestationAndProofElectra, error) {
msg, err := s.Message.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, "Message")
}
sig, err := bytesutil.DecodeHexWithLength(s.Signature, fieldparams.BLSSignatureLength)
if err != nil {
return nil, server.NewDecodeError(err, "Signature")
}
return &eth.SignedAggregateAttestationAndProofElectra{
Message: msg,
Signature: sig,
}, nil
}
func (a *AggregateAttestationAndProofElectra) ToConsensus() (*eth.AggregateAttestationAndProofElectra, error) {
aggIndex, err := strconv.ParseUint(a.AggregatorIndex, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "AggregatorIndex")
}
agg, err := a.Aggregate.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, "Aggregate")
}
proof, err := bytesutil.DecodeHexWithLength(a.SelectionProof, 96)
if err != nil {
return nil, server.NewDecodeError(err, "SelectionProof")
}
return &eth.AggregateAttestationAndProofElectra{
AggregatorIndex: primitives.ValidatorIndex(aggIndex),
Aggregate: agg,
SelectionProof: proof,
}, nil
}
func (a *Attestation) ToConsensus() (*eth.Attestation, error) {
aggBits, err := hexutil.Decode(a.AggregationBits)
if err != nil {
@@ -405,41 +369,6 @@ func AttFromConsensus(a *eth.Attestation) *Attestation {
}
}
func (a *AttestationElectra) ToConsensus() (*eth.AttestationElectra, error) {
aggBits, err := hexutil.Decode(a.AggregationBits)
if err != nil {
return nil, server.NewDecodeError(err, "AggregationBits")
}
data, err := a.Data.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, "Data")
}
sig, err := bytesutil.DecodeHexWithLength(a.Signature, fieldparams.BLSSignatureLength)
if err != nil {
return nil, server.NewDecodeError(err, "Signature")
}
committeeBits, err := hexutil.Decode(a.CommitteeBits)
if err != nil {
return nil, server.NewDecodeError(err, "CommitteeBits")
}
return &eth.AttestationElectra{
AggregationBits: aggBits,
Data: data,
Signature: sig,
CommitteeBits: committeeBits,
}, nil
}
func AttElectraFromConsensus(a *eth.AttestationElectra) *AttestationElectra {
return &AttestationElectra{
AggregationBits: hexutil.Encode(a.AggregationBits),
Data: AttDataFromConsensus(a.Data),
Signature: hexutil.Encode(a.Signature),
CommitteeBits: hexutil.Encode(a.CommitteeBits),
}
}
func (a *AttestationData) ToConsensus() (*eth.AttestationData, error) {
slot, err := strconv.ParseUint(a.Slot, 10, 64)
if err != nil {
@@ -694,18 +623,6 @@ func (s *AttesterSlashing) ToConsensus() (*eth.AttesterSlashing, error) {
return &eth.AttesterSlashing{Attestation_1: att1, Attestation_2: att2}, nil
}
func (s *AttesterSlashingElectra) ToConsensus() (*eth.AttesterSlashingElectra, error) {
att1, err := s.Attestation1.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, "Attestation1")
}
att2, err := s.Attestation2.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, "Attestation2")
}
return &eth.AttesterSlashingElectra{Attestation_1: att1, Attestation_2: att2}, nil
}
func (a *IndexedAttestation) ToConsensus() (*eth.IndexedAttestation, error) {
indices := make([]uint64, len(a.AttestingIndices))
var err error
@@ -731,31 +648,6 @@ func (a *IndexedAttestation) ToConsensus() (*eth.IndexedAttestation, error) {
}, nil
}
func (a *IndexedAttestationElectra) ToConsensus() (*eth.IndexedAttestationElectra, error) {
indices := make([]uint64, len(a.AttestingIndices))
var err error
for i, ix := range a.AttestingIndices {
indices[i], err = strconv.ParseUint(ix, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, fmt.Sprintf("AttestingIndices[%d]", i))
}
}
data, err := a.Data.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, "Data")
}
sig, err := bytesutil.DecodeHexWithLength(a.Signature, fieldparams.BLSSignatureLength)
if err != nil {
return nil, server.NewDecodeError(err, "Signature")
}
return &eth.IndexedAttestationElectra{
AttestingIndices: indices,
Data: data,
Signature: sig,
}, nil
}
func WithdrawalsFromConsensus(ws []*enginev1.Withdrawal) []*Withdrawal {
result := make([]*Withdrawal, len(ws))
for i, w := range ws {
@@ -773,126 +665,6 @@ func WithdrawalFromConsensus(w *enginev1.Withdrawal) *Withdrawal {
}
}
func WithdrawalRequestsFromConsensus(ws []*enginev1.WithdrawalRequest) []*WithdrawalRequest {
result := make([]*WithdrawalRequest, len(ws))
for i, w := range ws {
result[i] = WithdrawalRequestFromConsensus(w)
}
return result
}
func WithdrawalRequestFromConsensus(w *enginev1.WithdrawalRequest) *WithdrawalRequest {
return &WithdrawalRequest{
SourceAddress: hexutil.Encode(w.SourceAddress),
ValidatorPubkey: hexutil.Encode(w.ValidatorPubkey),
Amount: fmt.Sprintf("%d", w.Amount),
}
}
func (w *WithdrawalRequest) ToConsensus() (*enginev1.WithdrawalRequest, error) {
src, err := bytesutil.DecodeHexWithLength(w.SourceAddress, common.AddressLength)
if err != nil {
return nil, server.NewDecodeError(err, "SourceAddress")
}
pubkey, err := bytesutil.DecodeHexWithLength(w.ValidatorPubkey, fieldparams.BLSPubkeyLength)
if err != nil {
return nil, server.NewDecodeError(err, "ValidatorPubkey")
}
amount, err := strconv.ParseUint(w.Amount, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "Amount")
}
return &enginev1.WithdrawalRequest{
SourceAddress: src,
ValidatorPubkey: pubkey,
Amount: amount,
}, nil
}
func ConsolidationRequestsFromConsensus(cs []*enginev1.ConsolidationRequest) []*ConsolidationRequest {
result := make([]*ConsolidationRequest, len(cs))
for i, c := range cs {
result[i] = ConsolidationRequestFromConsensus(c)
}
return result
}
func ConsolidationRequestFromConsensus(c *enginev1.ConsolidationRequest) *ConsolidationRequest {
return &ConsolidationRequest{
SourceAddress: hexutil.Encode(c.SourceAddress),
SourcePubkey: hexutil.Encode(c.SourcePubkey),
TargetPubkey: hexutil.Encode(c.TargetPubkey),
}
}
func (c *ConsolidationRequest) ToConsensus() (*enginev1.ConsolidationRequest, error) {
srcAddress, err := bytesutil.DecodeHexWithLength(c.SourceAddress, common.AddressLength)
if err != nil {
return nil, server.NewDecodeError(err, "SourceAddress")
}
srcPubkey, err := bytesutil.DecodeHexWithLength(c.SourcePubkey, fieldparams.BLSPubkeyLength)
if err != nil {
return nil, server.NewDecodeError(err, "SourcePubkey")
}
targetPubkey, err := bytesutil.DecodeHexWithLength(c.TargetPubkey, fieldparams.BLSPubkeyLength)
if err != nil {
return nil, server.NewDecodeError(err, "TargetPubkey")
}
return &enginev1.ConsolidationRequest{
SourceAddress: srcAddress,
SourcePubkey: srcPubkey,
TargetPubkey: targetPubkey,
}, nil
}
func DepositRequestsFromConsensus(ds []*enginev1.DepositRequest) []*DepositRequest {
result := make([]*DepositRequest, len(ds))
for i, d := range ds {
result[i] = DepositRequestFromConsensus(d)
}
return result
}
func DepositRequestFromConsensus(d *enginev1.DepositRequest) *DepositRequest {
return &DepositRequest{
Pubkey: hexutil.Encode(d.Pubkey),
WithdrawalCredentials: hexutil.Encode(d.WithdrawalCredentials),
Amount: fmt.Sprintf("%d", d.Amount),
Signature: hexutil.Encode(d.Signature),
Index: fmt.Sprintf("%d", d.Index),
}
}
func (d *DepositRequest) ToConsensus() (*enginev1.DepositRequest, error) {
pubkey, err := bytesutil.DecodeHexWithLength(d.Pubkey, fieldparams.BLSPubkeyLength)
if err != nil {
return nil, server.NewDecodeError(err, "Pubkey")
}
withdrawalCredentials, err := bytesutil.DecodeHexWithLength(d.WithdrawalCredentials, fieldparams.RootLength)
if err != nil {
return nil, server.NewDecodeError(err, "WithdrawalCredentials")
}
amount, err := strconv.ParseUint(d.Amount, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "Amount")
}
sig, err := bytesutil.DecodeHexWithLength(d.Signature, fieldparams.BLSSignatureLength)
if err != nil {
return nil, server.NewDecodeError(err, "Signature")
}
index, err := strconv.ParseUint(d.Index, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "Index")
}
return &enginev1.DepositRequest{
Pubkey: pubkey,
WithdrawalCredentials: withdrawalCredentials,
Amount: amount,
Signature: sig,
Index: index,
}, nil
}
func ProposerSlashingsToConsensus(src []*ProposerSlashing) ([]*eth.ProposerSlashing, error) {
if src == nil {
return nil, errNilValue
@@ -1158,138 +930,6 @@ func AttesterSlashingFromConsensus(src *eth.AttesterSlashing) *AttesterSlashing
}
}
func AttesterSlashingsElectraToConsensus(src []*AttesterSlashingElectra) ([]*eth.AttesterSlashingElectra, error) {
if src == nil {
return nil, errNilValue
}
err := slice.VerifyMaxLength(src, 2)
if err != nil {
return nil, err
}
attesterSlashings := make([]*eth.AttesterSlashingElectra, len(src))
for i, s := range src {
if s == nil {
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d]", i))
}
if s.Attestation1 == nil {
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d].Attestation1", i))
}
if s.Attestation2 == nil {
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d].Attestation2", i))
}
a1Sig, err := bytesutil.DecodeHexWithLength(s.Attestation1.Signature, fieldparams.BLSSignatureLength)
if err != nil {
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation1.Signature", i))
}
err = slice.VerifyMaxLength(s.Attestation1.AttestingIndices, 2048)
if err != nil {
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation1.AttestingIndices", i))
}
a1AttestingIndices := make([]uint64, len(s.Attestation1.AttestingIndices))
for j, ix := range s.Attestation1.AttestingIndices {
attestingIndex, err := strconv.ParseUint(ix, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation1.AttestingIndices[%d]", i, j))
}
a1AttestingIndices[j] = attestingIndex
}
a1Data, err := s.Attestation1.Data.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation1.Data", i))
}
a2Sig, err := bytesutil.DecodeHexWithLength(s.Attestation2.Signature, fieldparams.BLSSignatureLength)
if err != nil {
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation2.Signature", i))
}
err = slice.VerifyMaxLength(s.Attestation2.AttestingIndices, 2048)
if err != nil {
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation2.AttestingIndices", i))
}
a2AttestingIndices := make([]uint64, len(s.Attestation2.AttestingIndices))
for j, ix := range s.Attestation2.AttestingIndices {
attestingIndex, err := strconv.ParseUint(ix, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation2.AttestingIndices[%d]", i, j))
}
a2AttestingIndices[j] = attestingIndex
}
a2Data, err := s.Attestation2.Data.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation2.Data", i))
}
attesterSlashings[i] = &eth.AttesterSlashingElectra{
Attestation_1: &eth.IndexedAttestationElectra{
AttestingIndices: a1AttestingIndices,
Data: a1Data,
Signature: a1Sig,
},
Attestation_2: &eth.IndexedAttestationElectra{
AttestingIndices: a2AttestingIndices,
Data: a2Data,
Signature: a2Sig,
},
}
}
return attesterSlashings, nil
}
func AttesterSlashingsElectraFromConsensus(src []*eth.AttesterSlashingElectra) []*AttesterSlashingElectra {
attesterSlashings := make([]*AttesterSlashingElectra, len(src))
for i, s := range src {
attesterSlashings[i] = AttesterSlashingElectraFromConsensus(s)
}
return attesterSlashings
}
func AttesterSlashingElectraFromConsensus(src *eth.AttesterSlashingElectra) *AttesterSlashingElectra {
a1AttestingIndices := make([]string, len(src.Attestation_1.AttestingIndices))
for j, ix := range src.Attestation_1.AttestingIndices {
a1AttestingIndices[j] = fmt.Sprintf("%d", ix)
}
a2AttestingIndices := make([]string, len(src.Attestation_2.AttestingIndices))
for j, ix := range src.Attestation_2.AttestingIndices {
a2AttestingIndices[j] = fmt.Sprintf("%d", ix)
}
return &AttesterSlashingElectra{
Attestation1: &IndexedAttestationElectra{
AttestingIndices: a1AttestingIndices,
Data: &AttestationData{
Slot: fmt.Sprintf("%d", src.Attestation_1.Data.Slot),
CommitteeIndex: fmt.Sprintf("%d", src.Attestation_1.Data.CommitteeIndex),
BeaconBlockRoot: hexutil.Encode(src.Attestation_1.Data.BeaconBlockRoot),
Source: &Checkpoint{
Epoch: fmt.Sprintf("%d", src.Attestation_1.Data.Source.Epoch),
Root: hexutil.Encode(src.Attestation_1.Data.Source.Root),
},
Target: &Checkpoint{
Epoch: fmt.Sprintf("%d", src.Attestation_1.Data.Target.Epoch),
Root: hexutil.Encode(src.Attestation_1.Data.Target.Root),
},
},
Signature: hexutil.Encode(src.Attestation_1.Signature),
},
Attestation2: &IndexedAttestationElectra{
AttestingIndices: a2AttestingIndices,
Data: &AttestationData{
Slot: fmt.Sprintf("%d", src.Attestation_2.Data.Slot),
CommitteeIndex: fmt.Sprintf("%d", src.Attestation_2.Data.CommitteeIndex),
BeaconBlockRoot: hexutil.Encode(src.Attestation_2.Data.BeaconBlockRoot),
Source: &Checkpoint{
Epoch: fmt.Sprintf("%d", src.Attestation_2.Data.Source.Epoch),
Root: hexutil.Encode(src.Attestation_2.Data.Source.Root),
},
Target: &Checkpoint{
Epoch: fmt.Sprintf("%d", src.Attestation_2.Data.Target.Epoch),
Root: hexutil.Encode(src.Attestation_2.Data.Target.Root),
},
},
Signature: hexutil.Encode(src.Attestation_2.Signature),
},
}
}
func AttsToConsensus(src []*Attestation) ([]*eth.Attestation, error) {
if src == nil {
return nil, errNilValue
@@ -1317,33 +957,6 @@ func AttsFromConsensus(src []*eth.Attestation) []*Attestation {
return atts
}
func AttsElectraToConsensus(src []*AttestationElectra) ([]*eth.AttestationElectra, error) {
if src == nil {
return nil, errNilValue
}
err := slice.VerifyMaxLength(src, 8)
if err != nil {
return nil, err
}
atts := make([]*eth.AttestationElectra, len(src))
for i, a := range src {
atts[i], err = a.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d]", i))
}
}
return atts, nil
}
func AttsElectraFromConsensus(src []*eth.AttestationElectra) []*AttestationElectra {
atts := make([]*AttestationElectra, len(src))
for i, a := range src {
atts[i] = AttElectraFromConsensus(a)
}
return atts
}
func DepositsToConsensus(src []*Deposit) ([]*eth.Deposit, error) {
if src == nil {
return nil, errNilValue
@@ -1474,37 +1087,3 @@ func DepositSnapshotFromConsensus(ds *eth.DepositSnapshot) *DepositSnapshot {
ExecutionBlockHeight: fmt.Sprintf("%d", ds.ExecutionDepth),
}
}
func PendingBalanceDepositsFromConsensus(ds []*eth.PendingBalanceDeposit) []*PendingBalanceDeposit {
deposits := make([]*PendingBalanceDeposit, len(ds))
for i, d := range ds {
deposits[i] = &PendingBalanceDeposit{
Index: fmt.Sprintf("%d", d.Index),
Amount: fmt.Sprintf("%d", d.Amount),
}
}
return deposits
}
func PendingPartialWithdrawalsFromConsensus(ws []*eth.PendingPartialWithdrawal) []*PendingPartialWithdrawal {
withdrawals := make([]*PendingPartialWithdrawal, len(ws))
for i, w := range ws {
withdrawals[i] = &PendingPartialWithdrawal{
Index: fmt.Sprintf("%d", w.Index),
Amount: fmt.Sprintf("%d", w.Amount),
WithdrawableEpoch: fmt.Sprintf("%d", w.WithdrawableEpoch),
}
}
return withdrawals
}
func PendingConsolidationsFromConsensus(cs []*eth.PendingConsolidation) []*PendingConsolidation {
consolidations := make([]*PendingConsolidation, len(cs))
for i, c := range cs {
consolidations[i] = &PendingConsolidation{
SourceIndex: fmt.Sprintf("%d", c.SourceIndex),
TargetIndex: fmt.Sprintf("%d", c.TargetIndex),
}
}
return consolidations
}

File diff suppressed because it is too large Load Diff

View File

@@ -593,185 +593,3 @@ func BeaconStateDenebFromConsensus(st beaconState.BeaconState) (*BeaconStateDene
HistoricalSummaries: hs,
}, nil
}
func BeaconStateElectraFromConsensus(st beaconState.BeaconState) (*BeaconStateElectra, error) {
srcBr := st.BlockRoots()
br := make([]string, len(srcBr))
for i, r := range srcBr {
br[i] = hexutil.Encode(r)
}
srcSr := st.StateRoots()
sr := make([]string, len(srcSr))
for i, r := range srcSr {
sr[i] = hexutil.Encode(r)
}
srcHr, err := st.HistoricalRoots()
if err != nil {
return nil, err
}
hr := make([]string, len(srcHr))
for i, r := range srcHr {
hr[i] = hexutil.Encode(r)
}
srcVotes := st.Eth1DataVotes()
votes := make([]*Eth1Data, len(srcVotes))
for i, e := range srcVotes {
votes[i] = Eth1DataFromConsensus(e)
}
srcVals := st.Validators()
vals := make([]*Validator, len(srcVals))
for i, v := range srcVals {
vals[i] = ValidatorFromConsensus(v)
}
srcBals := st.Balances()
bals := make([]string, len(srcBals))
for i, b := range srcBals {
bals[i] = fmt.Sprintf("%d", b)
}
srcRm := st.RandaoMixes()
rm := make([]string, len(srcRm))
for i, m := range srcRm {
rm[i] = hexutil.Encode(m)
}
srcSlashings := st.Slashings()
slashings := make([]string, len(srcSlashings))
for i, s := range srcSlashings {
slashings[i] = fmt.Sprintf("%d", s)
}
srcPrevPart, err := st.PreviousEpochParticipation()
if err != nil {
return nil, err
}
prevPart := make([]string, len(srcPrevPart))
for i, p := range srcPrevPart {
prevPart[i] = fmt.Sprintf("%d", p)
}
srcCurrPart, err := st.CurrentEpochParticipation()
if err != nil {
return nil, err
}
currPart := make([]string, len(srcCurrPart))
for i, p := range srcCurrPart {
currPart[i] = fmt.Sprintf("%d", p)
}
srcIs, err := st.InactivityScores()
if err != nil {
return nil, err
}
is := make([]string, len(srcIs))
for i, s := range srcIs {
is[i] = fmt.Sprintf("%d", s)
}
currSc, err := st.CurrentSyncCommittee()
if err != nil {
return nil, err
}
nextSc, err := st.NextSyncCommittee()
if err != nil {
return nil, err
}
execData, err := st.LatestExecutionPayloadHeader()
if err != nil {
return nil, err
}
srcPayload, ok := execData.Proto().(*enginev1.ExecutionPayloadHeaderElectra)
if !ok {
return nil, errPayloadHeaderNotFound
}
payload, err := ExecutionPayloadHeaderElectraFromConsensus(srcPayload)
if err != nil {
return nil, err
}
srcHs, err := st.HistoricalSummaries()
if err != nil {
return nil, err
}
hs := make([]*HistoricalSummary, len(srcHs))
for i, s := range srcHs {
hs[i] = HistoricalSummaryFromConsensus(s)
}
nwi, err := st.NextWithdrawalIndex()
if err != nil {
return nil, err
}
nwvi, err := st.NextWithdrawalValidatorIndex()
if err != nil {
return nil, err
}
drsi, err := st.DepositRequestsStartIndex()
if err != nil {
return nil, err
}
dbtc, err := st.DepositBalanceToConsume()
if err != nil {
return nil, err
}
ebtc, err := st.ExitBalanceToConsume()
if err != nil {
return nil, err
}
eee, err := st.EarliestExitEpoch()
if err != nil {
return nil, err
}
cbtc, err := st.ConsolidationBalanceToConsume()
if err != nil {
return nil, err
}
ece, err := st.EarliestConsolidationEpoch()
if err != nil {
return nil, err
}
pbd, err := st.PendingBalanceDeposits()
if err != nil {
return nil, err
}
ppw, err := st.PendingPartialWithdrawals()
if err != nil {
return nil, err
}
pc, err := st.PendingConsolidations()
if err != nil {
return nil, err
}
return &BeaconStateElectra{
GenesisTime: fmt.Sprintf("%d", st.GenesisTime()),
GenesisValidatorsRoot: hexutil.Encode(st.GenesisValidatorsRoot()),
Slot: fmt.Sprintf("%d", st.Slot()),
Fork: ForkFromConsensus(st.Fork()),
LatestBlockHeader: BeaconBlockHeaderFromConsensus(st.LatestBlockHeader()),
BlockRoots: br,
StateRoots: sr,
HistoricalRoots: hr,
Eth1Data: Eth1DataFromConsensus(st.Eth1Data()),
Eth1DataVotes: votes,
Eth1DepositIndex: fmt.Sprintf("%d", st.Eth1DepositIndex()),
Validators: vals,
Balances: bals,
RandaoMixes: rm,
Slashings: slashings,
PreviousEpochParticipation: prevPart,
CurrentEpochParticipation: currPart,
JustificationBits: hexutil.Encode(st.JustificationBits()),
PreviousJustifiedCheckpoint: CheckpointFromConsensus(st.PreviousJustifiedCheckpoint()),
CurrentJustifiedCheckpoint: CheckpointFromConsensus(st.CurrentJustifiedCheckpoint()),
FinalizedCheckpoint: CheckpointFromConsensus(st.FinalizedCheckpoint()),
InactivityScores: is,
CurrentSyncCommittee: SyncCommitteeFromConsensus(currSc),
NextSyncCommittee: SyncCommitteeFromConsensus(nextSc),
LatestExecutionPayloadHeader: payload,
NextWithdrawalIndex: fmt.Sprintf("%d", nwi),
NextWithdrawalValidatorIndex: fmt.Sprintf("%d", nwvi),
HistoricalSummaries: hs,
DepositRequestsStartIndex: fmt.Sprintf("%d", drsi),
DepositBalanceToConsume: fmt.Sprintf("%d", dbtc),
ExitBalanceToConsume: fmt.Sprintf("%d", ebtc),
EarliestExitEpoch: fmt.Sprintf("%d", eee),
ConsolidationBalanceToConsume: fmt.Sprintf("%d", cbtc),
EarliestConsolidationEpoch: fmt.Sprintf("%d", ece),
PendingBalanceDeposits: PendingBalanceDepositsFromConsensus(pbd),
PendingPartialWithdrawals: PendingPartialWithdrawalsFromConsensus(ppw),
PendingConsolidations: PendingConsolidationsFromConsensus(pc),
}, nil
}

View File

@@ -225,19 +225,3 @@ type IndividualVote struct {
InclusionDistance string `json:"inclusion_distance"`
InactivityScore string `json:"inactivity_score"`
}
type ChainHead struct {
HeadSlot string `json:"head_slot"`
HeadEpoch string `json:"head_epoch"`
HeadBlockRoot string `json:"head_block_root"`
FinalizedSlot string `json:"finalized_slot"`
FinalizedEpoch string `json:"finalized_epoch"`
FinalizedBlockRoot string `json:"finalized_block_root"`
JustifiedSlot string `json:"justified_slot"`
JustifiedEpoch string `json:"justified_epoch"`
JustifiedBlockRoot string `json:"justified_block_root"`
PreviousJustifiedSlot string `json:"previous_justified_slot"`
PreviousJustifiedEpoch string `json:"previous_justified_epoch"`
PreviousJustifiedBlockRoot string `json:"previous_justified_block_root"`
OptimisticStatus bool `json:"optimistic_status"`
}

View File

@@ -118,34 +118,3 @@ type GetValidatorPerformanceResponse struct {
MissingValidators [][]byte `json:"missing_validators,omitempty"`
InactivityScores []uint64 `json:"inactivity_scores,omitempty"`
}
type GetValidatorParticipationResponse struct {
Epoch string `json:"epoch"`
Finalized bool `json:"finalized"`
Participation *ValidatorParticipation `json:"participation"`
}
type ValidatorParticipation struct {
GlobalParticipationRate string `json:"global_participation_rate" deprecated:"true"`
VotedEther string `json:"voted_ether" deprecated:"true"`
EligibleEther string `json:"eligible_ether" deprecated:"true"`
CurrentEpochActiveGwei string `json:"current_epoch_active_gwei"`
CurrentEpochAttestingGwei string `json:"current_epoch_attesting_gwei"`
CurrentEpochTargetAttestingGwei string `json:"current_epoch_target_attesting_gwei"`
PreviousEpochActiveGwei string `json:"previous_epoch_active_gwei"`
PreviousEpochAttestingGwei string `json:"previous_epoch_attesting_gwei"`
PreviousEpochTargetAttestingGwei string `json:"previous_epoch_target_attesting_gwei"`
PreviousEpochHeadAttestingGwei string `json:"previous_epoch_head_attesting_gwei"`
}
type ActiveSetChanges struct {
Epoch string `json:"epoch"`
ActivatedPublicKeys []string `json:"activated_public_keys"`
ActivatedIndices []string `json:"activated_indices"`
ExitedPublicKeys []string `json:"exited_public_keys"`
ExitedIndices []string `json:"exited_indices"`
SlashedPublicKeys []string `json:"slashed_public_keys"`
SlashedIndices []string `json:"slashed_indices"`
EjectedPublicKeys []string `json:"ejected_public_keys"`
EjectedIndices []string `json:"ejected_indices"`
}

View File

@@ -29,13 +29,6 @@ type Attestation struct {
Signature string `json:"signature"`
}
type AttestationElectra struct {
AggregationBits string `json:"aggregation_bits"`
Data *AttestationData `json:"data"`
Signature string `json:"signature"`
CommitteeBits string `json:"committee_bits"`
}
type AttestationData struct {
Slot string `json:"slot"`
CommitteeIndex string `json:"index"`
@@ -85,17 +78,6 @@ type AggregateAttestationAndProof struct {
SelectionProof string `json:"selection_proof"`
}
type SignedAggregateAttestationAndProofElectra struct {
Message *AggregateAttestationAndProofElectra `json:"message"`
Signature string `json:"signature"`
}
type AggregateAttestationAndProofElectra struct {
AggregatorIndex string `json:"aggregator_index"`
Aggregate *AttestationElectra `json:"aggregate"`
SelectionProof string `json:"selection_proof"`
}
type SyncCommitteeSubscription struct {
ValidatorIndex string `json:"validator_index"`
SyncCommitteeIndices []string `json:"sync_committee_indices"`
@@ -196,11 +178,6 @@ type AttesterSlashing struct {
Attestation2 *IndexedAttestation `json:"attestation_2"`
}
type AttesterSlashingElectra struct {
Attestation1 *IndexedAttestationElectra `json:"attestation_1"`
Attestation2 *IndexedAttestationElectra `json:"attestation_2"`
}
type Deposit struct {
Proof []string `json:"proof"`
Data *DepositData `json:"data"`
@@ -219,12 +196,6 @@ type IndexedAttestation struct {
Signature string `json:"signature"`
}
type IndexedAttestationElectra struct {
AttestingIndices []string `json:"attesting_indices"`
Data *AttestationData `json:"data"`
Signature string `json:"signature"`
}
type SyncAggregate struct {
SyncCommitteeBits string `json:"sync_committee_bits"`
SyncCommitteeSignature string `json:"sync_committee_signature"`
@@ -236,39 +207,3 @@ type Withdrawal struct {
ExecutionAddress string `json:"address"`
Amount string `json:"amount"`
}
type DepositRequest struct {
Pubkey string `json:"pubkey"`
WithdrawalCredentials string `json:"withdrawal_credentials"`
Amount string `json:"amount"`
Signature string `json:"signature"`
Index string `json:"index"`
}
type WithdrawalRequest struct {
SourceAddress string `json:"source_address"`
ValidatorPubkey string `json:"validator_pubkey"`
Amount string `json:"amount"`
}
type ConsolidationRequest struct {
SourceAddress string `json:"source_address"`
SourcePubkey string `json:"source_pubkey"`
TargetPubkey string `json:"target_pubkey"`
}
type PendingBalanceDeposit struct {
Index string `json:"index"`
Amount string `json:"amount"`
}
type PendingPartialWithdrawal struct {
Index string `json:"index"`
Amount string `json:"amount"`
WithdrawableEpoch string `json:"withdrawable_epoch"`
}
type PendingConsolidation struct {
SourceIndex string `json:"source_index"`
TargetIndex string `json:"target_index"`
}

View File

@@ -140,43 +140,3 @@ type BeaconStateDeneb struct {
NextWithdrawalValidatorIndex string `json:"next_withdrawal_validator_index"`
HistoricalSummaries []*HistoricalSummary `json:"historical_summaries"`
}
type BeaconStateElectra struct {
GenesisTime string `json:"genesis_time"`
GenesisValidatorsRoot string `json:"genesis_validators_root"`
Slot string `json:"slot"`
Fork *Fork `json:"fork"`
LatestBlockHeader *BeaconBlockHeader `json:"latest_block_header"`
BlockRoots []string `json:"block_roots"`
StateRoots []string `json:"state_roots"`
HistoricalRoots []string `json:"historical_roots"`
Eth1Data *Eth1Data `json:"eth1_data"`
Eth1DataVotes []*Eth1Data `json:"eth1_data_votes"`
Eth1DepositIndex string `json:"eth1_deposit_index"`
Validators []*Validator `json:"validators"`
Balances []string `json:"balances"`
RandaoMixes []string `json:"randao_mixes"`
Slashings []string `json:"slashings"`
PreviousEpochParticipation []string `json:"previous_epoch_participation"`
CurrentEpochParticipation []string `json:"current_epoch_participation"`
JustificationBits string `json:"justification_bits"`
PreviousJustifiedCheckpoint *Checkpoint `json:"previous_justified_checkpoint"`
CurrentJustifiedCheckpoint *Checkpoint `json:"current_justified_checkpoint"`
FinalizedCheckpoint *Checkpoint `json:"finalized_checkpoint"`
InactivityScores []string `json:"inactivity_scores"`
CurrentSyncCommittee *SyncCommittee `json:"current_sync_committee"`
NextSyncCommittee *SyncCommittee `json:"next_sync_committee"`
LatestExecutionPayloadHeader *ExecutionPayloadHeaderElectra `json:"latest_execution_payload_header"`
NextWithdrawalIndex string `json:"next_withdrawal_index"`
NextWithdrawalValidatorIndex string `json:"next_withdrawal_validator_index"`
HistoricalSummaries []*HistoricalSummary `json:"historical_summaries"`
DepositRequestsStartIndex string `json:"deposit_requests_start_index"`
DepositBalanceToConsume string `json:"deposit_balance_to_consume"`
ExitBalanceToConsume string `json:"exit_balance_to_consume"`
EarliestExitEpoch string `json:"earliest_exit_epoch"`
ConsolidationBalanceToConsume string `json:"consolidation_balance_to_consume"`
EarliestConsolidationEpoch string `json:"earliest_consolidation_epoch"`
PendingBalanceDeposits []*PendingBalanceDeposit `json:"pending_balance_deposits"`
PendingPartialWithdrawals []*PendingPartialWithdrawal `json:"pending_partial_withdrawals"`
PendingConsolidations []*PendingConsolidation `json:"pending_consolidations"`
}

View File

@@ -26,6 +26,7 @@ go_library(
"receive_attestation.go",
"receive_blob.go",
"receive_block.go",
"receive_sidecar.go",
"service.go",
"tracked_proposer.go",
"weak_subjectivity_checks.go",

View File

@@ -22,6 +22,13 @@ func (s *Service) GetProposerHead() [32]byte {
return s.cfg.ForkChoiceStore.GetProposerHead()
}
// ShouldOverrideFCU returns the corresponding value from forkchoice
func (s *Service) ShouldOverrideFCU() bool {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.ShouldOverrideFCU()
}
// SetForkChoiceGenesisTime sets the genesis time in Forkchoice
func (s *Service) SetForkChoiceGenesisTime(timestamp uint64) {
s.cfg.ForkChoiceStore.Lock()
@@ -92,10 +99,3 @@ func (s *Service) FinalizedBlockHash() [32]byte {
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.FinalizedPayloadBlockHash()
}
// ParentRoot wraps a call to the corresponding method in forkchoice
func (s *Service) ParentRoot(root [32]byte) ([32]byte, error) {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.ParentRoot(root)
}

View File

@@ -142,7 +142,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
b := blks[0].Block()
// Retrieve incoming block's pre state.
if err := s.verifyBlkPreState(ctx, b.ParentRoot()); err != nil {
if err := s.verifyBlkPreState(ctx, b); err != nil {
return err
}
preState, err := s.cfg.StateGen.StateByRootInitialSync(ctx, b.ParentRoot())

View File

@@ -14,7 +14,6 @@ import (
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/config/features"
field_params "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
@@ -286,7 +285,7 @@ func (s *Service) getBlockPreState(ctx context.Context, b interfaces.ReadOnlyBea
defer span.End()
// Verify incoming block has a valid pre state.
if err := s.verifyBlkPreState(ctx, b.ParentRoot()); err != nil {
if err := s.verifyBlkPreState(ctx, b); err != nil {
return nil, err
}
@@ -312,10 +311,11 @@ func (s *Service) getBlockPreState(ctx context.Context, b interfaces.ReadOnlyBea
}
// verifyBlkPreState validates input block has a valid pre-state.
func (s *Service) verifyBlkPreState(ctx context.Context, parentRoot [field_params.RootLength]byte) error {
func (s *Service) verifyBlkPreState(ctx context.Context, b interfaces.ReadOnlyBeaconBlock) error {
ctx, span := trace.StartSpan(ctx, "blockChain.verifyBlkPreState")
defer span.End()
parentRoot := b.ParentRoot()
// Loosen the check to HasBlock because state summary gets saved in batches
// during initial syncing. There's no risk given a state summary object is just a
// subset of the block object.

View File

@@ -117,7 +117,7 @@ func TestCachedPreState_CanGetFromStateSummary(t *testing.T) {
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Slot: 1, Root: root[:]}))
require.NoError(t, service.cfg.StateGen.SaveState(ctx, root, st))
require.NoError(t, service.verifyBlkPreState(ctx, wsb.Block().ParentRoot()))
require.NoError(t, service.verifyBlkPreState(ctx, wsb.Block()))
}
func TestFillForkChoiceMissingBlocks_CanSave(t *testing.T) {
@@ -2044,11 +2044,7 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
st, err = service.HeadState(ctx)
require.NoError(t, err)
defaultConfig := util.DefaultBlockGenConfig()
defaultConfig.NumWithdrawalRequests = 1
defaultConfig.NumDepositRequests = 2
defaultConfig.NumConsolidationRequests = 1
b, err := util.GenerateFullBlockElectra(st, keys, defaultConfig, 1)
b, err := util.GenerateFullBlockElectra(st, keys, util.DefaultBlockGenConfig(), 1)
require.NoError(t, err)
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
@@ -2063,7 +2059,7 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
st, err = service.HeadState(ctx)
require.NoError(t, err)
b, err = util.GenerateFullBlockElectra(st, keys, defaultConfig, 2)
b, err = util.GenerateFullBlockElectra(st, keys, util.DefaultBlockGenConfig(), 2)
require.NoError(t, err)
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
@@ -2071,7 +2067,7 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
// prepare another block that is not inserted
st3, err := transition.ExecuteStateTransition(ctx, st, wsb)
require.NoError(t, err)
b3, err := util.GenerateFullBlockElectra(st3, keys, defaultConfig, 3)
b3, err := util.GenerateFullBlockElectra(st3, keys, util.DefaultBlockGenConfig(), 3)
require.NoError(t, err)
wsb3, err := consensusblocks.NewSignedBeaconBlock(b3)
require.NoError(t, err)

View File

@@ -13,7 +13,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"github.com/prysmaticlabs/prysm/v5/time/slots"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
@@ -191,26 +190,13 @@ func (s *Service) processAttestations(ctx context.Context, disparity time.Durati
}
if err := s.receiveAttestationNoPubsub(ctx, a, disparity); err != nil {
var fields logrus.Fields
if a.Version() >= version.Electra {
fields = logrus.Fields{
"slot": a.GetData().Slot,
"committeeCount": a.CommitteeBitsVal().Count(),
"committeeIndices": a.CommitteeBitsVal().BitIndices(),
"beaconBlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(a.GetData().BeaconBlockRoot)),
"targetRoot": fmt.Sprintf("%#x", bytesutil.Trunc(a.GetData().Target.Root)),
"aggregatedCount": a.GetAggregationBits().Count(),
}
} else {
fields = logrus.Fields{
"slot": a.GetData().Slot,
"committeeIndex": a.GetData().CommitteeIndex,
"beaconBlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(a.GetData().BeaconBlockRoot)),
"targetRoot": fmt.Sprintf("%#x", bytesutil.Trunc(a.GetData().Target.Root)),
"aggregatedCount": a.GetAggregationBits().Count(),
}
}
log.WithFields(fields).WithError(err).Warn("Could not process attestation for fork choice")
log.WithFields(logrus.Fields{
"slot": a.GetData().Slot,
"committeeIndex": a.GetData().CommitteeIndex,
"beaconBlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(a.GetData().BeaconBlockRoot)),
"targetRoot": fmt.Sprintf("%#x", bytesutil.Trunc(a.GetData().Target.Root)),
"aggregationCount": a.GetAggregationBits().Count(),
}).WithError(err).Warn("Could not process attestation for fork choice")
}
}
}

View File

@@ -51,6 +51,12 @@ type BlobReceiver interface {
ReceiveBlob(context.Context, blocks.VerifiedROBlob) error
}
// DataColumnReceiver interface defines the methods of chain service for receiving new
// data columns
type DataColumnReceiver interface {
ReceiveDataColumn(context.Context, *ethpb.DataColumnSidecar) error
}
// SlashingReceiver interface defines the methods of chain service for receiving validated slashing over the wire.
type SlashingReceiver interface {
ReceiveAttesterSlashing(ctx context.Context, slashing ethpb.AttSlashing)
@@ -77,20 +83,59 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
if err != nil {
return err
}
rob, err := blocks.NewROBlockWithRoot(block, blockRoot)
if err != nil {
return err
}
preState, err := s.getBlockPreState(ctx, blockCopy.Block())
if err != nil {
return errors.Wrap(err, "could not get block's prestate")
}
// Save current justified and finalized epochs for future use.
currStoreJustifiedEpoch := s.CurrentJustifiedCheckpt().Epoch
currStoreFinalizedEpoch := s.FinalizedCheckpt().Epoch
currentEpoch := coreTime.CurrentEpoch(preState)
currentCheckpoints := s.saveCurrentCheckpoints(preState)
postState, isValidPayload, err := s.validateExecutionAndConsensus(ctx, preState, blockCopy, blockRoot)
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
if err != nil {
return err
}
daWaitedTime, err := s.handleDA(ctx, blockCopy, blockRoot, avs)
if err != nil {
eg, _ := errgroup.WithContext(ctx)
var postState state.BeaconState
eg.Go(func() error {
var err error
postState, err = s.validateStateTransition(ctx, preState, blockCopy)
if err != nil {
return errors.Wrap(err, "failed to validate consensus state transition function")
}
return nil
})
var isValidPayload bool
eg.Go(func() error {
var err error
isValidPayload, err = s.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, blockCopy, blockRoot)
if err != nil {
return errors.Wrap(err, "could not notify the engine of the new payload")
}
return nil
})
if err := eg.Wait(); err != nil {
return err
}
daStartTime := time.Now()
if avs != nil {
if err := avs.IsDataAvailable(ctx, s.CurrentSlot(), rob); err != nil {
return errors.Wrap(err, "could not validate blob data availability (AvailabilityStore.IsDataAvailable)")
}
} else {
if err := s.isDataAvailable(ctx, blockRoot, blockCopy); err != nil {
return errors.Wrap(err, "could not validate blob data availability")
}
}
daWaitedTime := time.Since(daStartTime)
dataAvailWaitedTime.Observe(float64(daWaitedTime.Milliseconds()))
// Defragment the state before continuing block processing.
s.defragmentState(postState)
@@ -112,9 +157,29 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
tracing.AnnotateError(span, err)
return err
}
if err := s.updateCheckpoints(ctx, currentCheckpoints, preState, postState, blockRoot); err != nil {
return err
if coreTime.CurrentEpoch(postState) > currentEpoch && s.cfg.ForkChoiceStore.IsCanonical(blockRoot) {
headSt, err := s.HeadState(ctx)
if err != nil {
return errors.Wrap(err, "could not get head state")
}
if err := reportEpochMetrics(ctx, postState, headSt); err != nil {
log.WithError(err).Error("could not report epoch metrics")
}
}
if err := s.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch); err != nil {
return errors.Wrap(err, "could not update justified checkpoint")
}
newFinalized, err := s.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
if err != nil {
return errors.Wrap(err, "could not update finalized checkpoint")
}
// Send finalized events and finalized deposits in the background
if newFinalized {
// hook to process all post state finalization tasks
s.executePostFinalizationTasks(ctx, postState)
}
// If slasher is configured, forward the attestations in the block via an event feed for processing.
if features.Get().EnableSlasher {
go s.sendBlockAttestationsToSlasher(blockCopy, preState)
@@ -134,140 +199,31 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
if err := s.handleCaches(); err != nil {
return err
}
s.reportPostBlockProcessing(blockCopy, blockRoot, receivedTime, daWaitedTime)
return nil
}
type ffgCheckpoints struct {
j, f, c primitives.Epoch
}
func (s *Service) saveCurrentCheckpoints(state state.BeaconState) (cp ffgCheckpoints) {
// Save current justified and finalized epochs for future use.
cp.j = s.CurrentJustifiedCheckpt().Epoch
cp.f = s.FinalizedCheckpt().Epoch
cp.c = coreTime.CurrentEpoch(state)
return
}
func (s *Service) updateCheckpoints(
ctx context.Context,
cp ffgCheckpoints,
preState, postState state.BeaconState,
blockRoot [32]byte,
) error {
if coreTime.CurrentEpoch(postState) > cp.c && s.cfg.ForkChoiceStore.IsCanonical(blockRoot) {
headSt, err := s.HeadState(ctx)
if err != nil {
return errors.Wrap(err, "could not get head state")
}
if err := reportEpochMetrics(ctx, postState, headSt); err != nil {
log.WithError(err).Error("could not report epoch metrics")
}
}
if err := s.updateJustificationOnBlock(ctx, preState, postState, cp.j); err != nil {
return errors.Wrap(err, "could not update justified checkpoint")
}
newFinalized, err := s.updateFinalizationOnBlock(ctx, preState, postState, cp.f)
if err != nil {
return errors.Wrap(err, "could not update finalized checkpoint")
}
// Send finalized events and finalized deposits in the background
if newFinalized {
// hook to process all post state finalization tasks
s.executePostFinalizationTasks(ctx, postState)
}
return nil
}
func (s *Service) validateExecutionAndConsensus(
ctx context.Context,
preState state.BeaconState,
block interfaces.SignedBeaconBlock,
blockRoot [32]byte,
) (state.BeaconState, bool, error) {
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
if err != nil {
return nil, false, err
}
eg, _ := errgroup.WithContext(ctx)
var postState state.BeaconState
eg.Go(func() error {
var err error
postState, err = s.validateStateTransition(ctx, preState, block)
if err != nil {
return errors.Wrap(err, "failed to validate consensus state transition function")
}
return nil
})
var isValidPayload bool
eg.Go(func() error {
var err error
isValidPayload, err = s.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, block, blockRoot)
if err != nil {
return errors.Wrap(err, "could not notify the engine of the new payload")
}
return nil
})
if err := eg.Wait(); err != nil {
return nil, false, err
}
return postState, isValidPayload, nil
}
func (s *Service) handleDA(
ctx context.Context,
block interfaces.SignedBeaconBlock,
blockRoot [32]byte,
avs das.AvailabilityStore,
) (time.Duration, error) {
daStartTime := time.Now()
if avs != nil {
rob, err := blocks.NewROBlockWithRoot(block, blockRoot)
if err != nil {
return 0, err
}
if err := avs.IsDataAvailable(ctx, s.CurrentSlot(), rob); err != nil {
return 0, errors.Wrap(err, "could not validate blob data availability (AvailabilityStore.IsDataAvailable)")
}
} else {
if err := s.isDataAvailable(ctx, blockRoot, block); err != nil {
return 0, errors.Wrap(err, "could not validate blob data availability")
}
}
daWaitedTime := time.Since(daStartTime)
dataAvailWaitedTime.Observe(float64(daWaitedTime.Milliseconds()))
return daWaitedTime, nil
}
func (s *Service) reportPostBlockProcessing(
block interfaces.SignedBeaconBlock,
blockRoot [32]byte,
receivedTime time.Time,
daWaitedTime time.Duration,
) {
// Reports on block and fork choice metrics.
cp := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
finalized := &ethpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
reportSlotMetrics(block.Block().Slot(), s.HeadSlot(), s.CurrentSlot(), finalized)
reportSlotMetrics(blockCopy.Block().Slot(), s.HeadSlot(), s.CurrentSlot(), finalized)
// Log block sync status.
cp = s.cfg.ForkChoiceStore.JustifiedCheckpoint()
justified := &ethpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
if err := logBlockSyncStatus(block.Block(), blockRoot, justified, finalized, receivedTime, uint64(s.genesisTime.Unix()), daWaitedTime); err != nil {
if err := logBlockSyncStatus(blockCopy.Block(), blockRoot, justified, finalized, receivedTime, uint64(s.genesisTime.Unix()), daWaitedTime); err != nil {
log.WithError(err).Error("Unable to log block sync status")
}
// Log payload data
if err := logPayload(block.Block()); err != nil {
if err := logPayload(blockCopy.Block()); err != nil {
log.WithError(err).Error("Unable to log debug block payload data")
}
// Log state transition data.
if err := logStateTransitionData(block.Block()); err != nil {
if err := logStateTransitionData(blockCopy.Block()); err != nil {
log.WithError(err).Error("Unable to log state transition data")
}
timeWithoutDaWait := time.Since(receivedTime) - daWaitedTime
chainServiceProcessingTime.Observe(float64(timeWithoutDaWait.Milliseconds()))
return nil
}
func (s *Service) executePostFinalizationTasks(ctx context.Context, finalizedState state.BeaconState) {

View File

@@ -0,0 +1,12 @@
package blockchain
import (
"context"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
)
func (s *Service) ReceiveDataColumn(ctx context.Context, ds *ethpb.DataColumnSidecar) error {
// TODO
return nil
}

View File

@@ -628,6 +628,11 @@ func (c *ChainService) ReceiveBlob(_ context.Context, b blocks.VerifiedROBlob) e
return nil
}
// ReceiveDataColumn implements the same method in chain service
func (c *ChainService) ReceiveDataColumn(_ context.Context, _ *ethpb.DataColumnSidecar) error {
return nil
}
// TargetRootForEpoch mocks the same method in the chain service
func (c *ChainService) TargetRootForEpoch(_ [32]byte, _ primitives.Epoch) ([32]byte, error) {
return c.TargetRoot, nil

View File

@@ -8,6 +8,7 @@ go_library(
"attestation_data.go",
"balance_cache_key.go",
"checkpoint_state.go",
"column_subnet_ids.go",
"committee.go",
"committee_disabled.go", # keep
"committees.go",

65
beacon-chain/cache/column_subnet_ids.go vendored Normal file
View File

@@ -0,0 +1,65 @@
package cache
import (
"sync"
"time"
"github.com/patrickmn/go-cache"
"github.com/prysmaticlabs/prysm/v5/config/params"
)
type columnSubnetIDs struct {
colSubCache *cache.Cache
colSubLock sync.RWMutex
}
// ColumnSubnetIDs for column subnet participants
var ColumnSubnetIDs = newColumnSubnetIDs()
const columnKey = "columns"
func newColumnSubnetIDs() *columnSubnetIDs {
epochDuration := time.Duration(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
// Set the default duration of a column subnet subscription as the column expiry period.
subLength := epochDuration * time.Duration(params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest)
persistentCache := cache.New(subLength*time.Second, epochDuration*time.Second)
return &columnSubnetIDs{colSubCache: persistentCache}
}
// GetColumnSubnets retrieves the data column subnets.
func (s *columnSubnetIDs) GetColumnSubnets() ([]uint64, bool, time.Time) {
s.colSubLock.RLock()
defer s.colSubLock.RUnlock()
id, duration, ok := s.colSubCache.GetWithExpiration(columnKey)
if !ok {
return nil, false, time.Time{}
}
// Retrieve indices from the cache.
idxs, ok := id.([]uint64)
if !ok {
return nil, false, time.Time{}
}
return idxs, ok, duration
}
// AddColumnSubnets adds the relevant data column subnets.
func (s *columnSubnetIDs) AddColumnSubnets(colIdx []uint64) {
s.colSubLock.Lock()
defer s.colSubLock.Unlock()
s.colSubCache.Set(columnKey, colIdx, 0)
}
// EmptyAllCaches empties out all the related caches and flushes any stored
// entries on them. This should only ever be used for testing, in normal
// production, handling of the relevant subnets for each role is done
// separately.
func (s *columnSubnetIDs) EmptyAllCaches() {
// Clear the cache.
s.colSubLock.Lock()
defer s.colSubLock.Unlock()
s.colSubCache.Flush()
}

View File

@@ -110,7 +110,25 @@ func VerifyAttestationNoVerifySignature(
var indexedAtt ethpb.IndexedAtt
if att.Version() >= version.Electra {
if att.Version() < version.Electra {
if uint64(att.GetData().CommitteeIndex) >= c {
return fmt.Errorf("committee index %d >= committee count %d", att.GetData().CommitteeIndex, c)
}
if err = helpers.VerifyAttestationBitfieldLengths(ctx, beaconState, att); err != nil {
return errors.Wrap(err, "could not verify attestation bitfields")
}
// Verify attesting indices are correct.
committee, err := helpers.BeaconCommitteeFromState(ctx, beaconState, att.GetData().Slot, att.GetData().CommitteeIndex)
if err != nil {
return err
}
indexedAtt, err = attestation.ConvertToIndexed(ctx, att, committee)
if err != nil {
return err
}
} else {
if att.GetData().CommitteeIndex != 0 {
return errors.New("committee index must be 0 post-Electra")
}
@@ -136,29 +154,6 @@ func VerifyAttestationNoVerifySignature(
if err != nil {
return err
}
} else {
if uint64(att.GetData().CommitteeIndex) >= c {
return fmt.Errorf("committee index %d >= committee count %d", att.GetData().CommitteeIndex, c)
}
// Verify attesting indices are correct.
committee, err := helpers.BeaconCommitteeFromState(ctx, beaconState, att.GetData().Slot, att.GetData().CommitteeIndex)
if err != nil {
return err
}
if committee == nil {
return errors.New("no committee exist for this attestation")
}
if err := helpers.VerifyBitfieldLength(att.GetAggregationBits(), uint64(len(committee))); err != nil {
return errors.Wrap(err, "failed to verify aggregation bitfield")
}
indexedAtt, err = attestation.ConvertToIndexed(ctx, att, committee)
if err != nil {
return err
}
}
return attestation.IsValidAttestationIndices(ctx, indexedAtt)

View File

@@ -1,5 +1,3 @@
package blocks
var ProcessBLSToExecutionChange = processBLSToExecutionChange
var VerifyBlobCommitmentCount = verifyBlobCommitmentCount

View File

@@ -2,13 +2,11 @@ package blocks
import (
"bytes"
"fmt"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
field_params "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
consensus_types "github.com/prysmaticlabs/prysm/v5/consensus-types"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
@@ -202,13 +200,13 @@ func ValidatePayload(st state.BeaconState, payload interfaces.ExecutionData) err
// block_hash=payload.block_hash,
// transactions_root=hash_tree_root(payload.transactions),
// )
func ProcessPayload(st state.BeaconState, body interfaces.ReadOnlyBeaconBlockBody) (state.BeaconState, error) {
payload, err := body.Execution()
if err != nil {
return nil, err
}
if err := verifyBlobCommitmentCount(body); err != nil {
return nil, err
func ProcessPayload(st state.BeaconState, payload interfaces.ExecutionData) (state.BeaconState, error) {
var err error
if st.Version() >= version.Capella {
st, err = ProcessWithdrawals(st, payload)
if err != nil {
return nil, errors.Wrap(err, "could not process withdrawals")
}
}
if err := ValidatePayloadWhenMergeCompletes(st, payload); err != nil {
return nil, err
@@ -222,20 +220,70 @@ func ProcessPayload(st state.BeaconState, body interfaces.ReadOnlyBeaconBlockBod
return st, nil
}
func verifyBlobCommitmentCount(body interfaces.ReadOnlyBeaconBlockBody) error {
if body.Version() < version.Deneb {
return nil
}
kzgs, err := body.BlobKzgCommitments()
// ValidatePayloadHeaderWhenMergeCompletes validates the payload header when the merge completes.
func ValidatePayloadHeaderWhenMergeCompletes(st state.BeaconState, header interfaces.ExecutionData) error {
// Skip validation if the state is not merge compatible.
complete, err := IsMergeTransitionComplete(st)
if err != nil {
return err
}
if len(kzgs) > field_params.MaxBlobsPerBlock {
return fmt.Errorf("too many kzg commitments in block: %d", len(kzgs))
if !complete {
return nil
}
// Validate current header's parent hash matches state header's block hash.
h, err := st.LatestExecutionPayloadHeader()
if err != nil {
return err
}
if !bytes.Equal(header.ParentHash(), h.BlockHash()) {
return ErrInvalidPayloadBlockHash
}
return nil
}
// ValidatePayloadHeader validates the payload header.
func ValidatePayloadHeader(st state.BeaconState, header interfaces.ExecutionData) error {
// Validate header's random mix matches with state in current epoch
random, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
if err != nil {
return err
}
if !bytes.Equal(header.PrevRandao(), random) {
return ErrInvalidPayloadPrevRandao
}
// Validate header's timestamp matches with state in current slot.
t, err := slots.ToTime(st.GenesisTime(), st.Slot())
if err != nil {
return err
}
if header.Timestamp() != uint64(t.Unix()) {
return ErrInvalidPayloadTimeStamp
}
return nil
}
// ProcessPayloadHeader processes the payload header.
func ProcessPayloadHeader(st state.BeaconState, header interfaces.ExecutionData) (state.BeaconState, error) {
var err error
if st.Version() >= version.Capella {
st, err = ProcessWithdrawals(st, header)
if err != nil {
return nil, errors.Wrap(err, "could not process withdrawals")
}
}
if err := ValidatePayloadHeaderWhenMergeCompletes(st, header); err != nil {
return nil, err
}
if err := ValidatePayloadHeader(st, header); err != nil {
return nil, err
}
if err := st.SetLatestExecutionPayloadHeader(header); err != nil {
return nil, err
}
return st, nil
}
// GetBlockPayloadHash returns the hash of the execution payload of the block
func GetBlockPayloadHash(blk interfaces.ReadOnlyBeaconBlock) ([32]byte, error) {
var payloadHash [32]byte

View File

@@ -1,7 +1,6 @@
package blocks_test
import (
"fmt"
"testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
@@ -14,7 +13,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/encoding/ssz"
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/testing/util"
"github.com/prysmaticlabs/prysm/v5/time/slots"
@@ -583,18 +581,14 @@ func Test_ProcessPayload(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
body, err := consensusblocks.NewBeaconBlockBody(&ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: tt.payload,
})
wrappedPayload, err := consensusblocks.WrappedExecutionPayload(tt.payload)
require.NoError(t, err)
st, err := blocks.ProcessPayload(st, body)
st, err := blocks.ProcessPayload(st, wrappedPayload)
if err != nil {
require.Equal(t, tt.err.Error(), err.Error())
} else {
require.Equal(t, tt.err, err)
payload, err := body.Execution()
require.NoError(t, err)
want, err := consensusblocks.PayloadToHeader(payload)
want, err := consensusblocks.PayloadToHeader(wrappedPayload)
require.Equal(t, tt.err, err)
h, err := st.LatestExecutionPayloadHeader()
require.NoError(t, err)
@@ -615,15 +609,13 @@ func Test_ProcessPayloadCapella(t *testing.T) {
random, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
require.NoError(t, err)
payload.PrevRandao = random
body, err := consensusblocks.NewBeaconBlockBody(&ethpb.BeaconBlockBodyCapella{
ExecutionPayload: payload,
})
wrapped, err := consensusblocks.WrappedExecutionPayloadCapella(payload)
require.NoError(t, err)
_, err = blocks.ProcessPayload(st, body)
_, err = blocks.ProcessPayload(st, wrapped)
require.NoError(t, err)
}
func Test_ProcessPayload_Blinded(t *testing.T) {
func Test_ProcessPayloadHeader(t *testing.T) {
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
random, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
require.NoError(t, err)
@@ -671,13 +663,7 @@ func Test_ProcessPayload_Blinded(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
p, ok := tt.header.Proto().(*enginev1.ExecutionPayloadHeader)
require.Equal(t, true, ok)
body, err := consensusblocks.NewBeaconBlockBody(&ethpb.BlindedBeaconBlockBodyBellatrix{
ExecutionPayloadHeader: p,
})
require.NoError(t, err)
st, err := blocks.ProcessPayload(st, body)
st, err := blocks.ProcessPayloadHeader(st, tt.header)
if err != nil {
require.Equal(t, tt.err.Error(), err.Error())
} else {
@@ -742,7 +728,7 @@ func Test_ValidatePayloadHeader(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err = blocks.ValidatePayload(st, tt.header)
err = blocks.ValidatePayloadHeader(st, tt.header)
require.Equal(t, tt.err, err)
})
}
@@ -799,7 +785,7 @@ func Test_ValidatePayloadHeaderWhenMergeCompletes(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err = blocks.ValidatePayloadWhenMergeCompletes(tt.state, tt.header)
err = blocks.ValidatePayloadHeaderWhenMergeCompletes(tt.state, tt.header)
require.Equal(t, tt.err, err)
})
}
@@ -920,15 +906,3 @@ func emptyPayloadCapella() *enginev1.ExecutionPayloadCapella {
Withdrawals: make([]*enginev1.Withdrawal, 0),
}
}
func TestVerifyBlobCommitmentCount(t *testing.T) {
b := &ethpb.BeaconBlockDeneb{Body: &ethpb.BeaconBlockBodyDeneb{}}
rb, err := consensusblocks.NewBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, blocks.VerifyBlobCommitmentCount(rb.Body()))
b = &ethpb.BeaconBlockDeneb{Body: &ethpb.BeaconBlockBodyDeneb{BlobKzgCommitments: make([][]byte, fieldparams.MaxBlobsPerBlock+1)}}
rb, err = consensusblocks.NewBeaconBlock(b)
require.NoError(t, err)
require.ErrorContains(t, fmt.Sprintf("too many kzg commitments in block: %d", fieldparams.MaxBlobsPerBlock+1), blocks.VerifyBlobCommitmentCount(rb.Body()))
}

View File

@@ -96,6 +96,24 @@ func VerifyBlockHeaderSignature(beaconState state.BeaconState, header *ethpb.Sig
return signing.VerifyBlockHeaderSigningRoot(header.Header, proposerPubKey, header.Signature, domain)
}
func VerifyBlockHeaderSignatureUsingCurrentFork(beaconState state.BeaconState, header *ethpb.SignedBeaconBlockHeader) error {
currentEpoch := slots.ToEpoch(header.Header.Slot)
fork, err := forks.Fork(currentEpoch)
if err != nil {
return err
}
domain, err := signing.Domain(fork, currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorsRoot())
if err != nil {
return err
}
proposer, err := beaconState.ValidatorAtIndex(header.Header.ProposerIndex)
if err != nil {
return err
}
proposerPubKey := proposer.PublicKey
return signing.VerifyBlockHeaderSigningRoot(header.Header, proposerPubKey, header.Signature, domain)
}
// VerifyBlockSignatureUsingCurrentFork verifies the proposer signature of a beacon block. This differs
// from the above method by not using fork data from the state and instead retrieving it
// via the respective epoch.

View File

@@ -22,31 +22,29 @@ var (
//
// Spec definition:
//
// def process_operations(state: BeaconState, body: BeaconBlockBody) -> None:
// # [Modified in Electra:EIP6110]
// # Disable former deposit mechanism once all prior deposits are processed
// eth1_deposit_index_limit = min(state.eth1_data.deposit_count, state.deposit_requests_start_index)
// if state.eth1_deposit_index < eth1_deposit_index_limit:
// assert len(body.deposits) == min(MAX_DEPOSITS, eth1_deposit_index_limit - state.eth1_deposit_index)
// else:
// assert len(body.deposits) == 0
// def process_operations(state: BeaconState, body: BeaconBlockBody) -> None:
// # [Modified in Electra:EIP6110]
// # Disable former deposit mechanism once all prior deposits are processed
// eth1_deposit_index_limit = min(state.eth1_data.deposit_count, state.deposit_requests_start_index)
// if state.eth1_deposit_index < eth1_deposit_index_limit:
// assert len(body.deposits) == min(MAX_DEPOSITS, eth1_deposit_index_limit - state.eth1_deposit_index)
// else:
// assert len(body.deposits) == 0
//
// def for_ops(operations: Sequence[Any], fn: Callable[[BeaconState, Any], None]) -> None:
// for operation in operations:
// fn(state, operation)
// def for_ops(operations: Sequence[Any], fn: Callable[[BeaconState, Any], None]) -> None:
// for operation in operations:
// fn(state, operation)
//
// for_ops(body.proposer_slashings, process_proposer_slashing)
// for_ops(body.attester_slashings, process_attester_slashing)
// for_ops(body.attestations, process_attestation) # [Modified in Electra:EIP7549]
// for_ops(body.deposits, process_deposit) # [Modified in Electra:EIP7251]
// for_ops(body.voluntary_exits, process_voluntary_exit) # [Modified in Electra:EIP7251]
// for_ops(body.bls_to_execution_changes, process_bls_to_execution_change)
// for_ops(body.execution_payload.deposit_requests, process_deposit_request) # [New in Electra:EIP6110]
// # [New in Electra:EIP7002:EIP7251]
// for_ops(body.execution_payload.withdrawal_requests, process_withdrawal_request)
// # [New in Electra:EIP7251]
// for_ops(body.execution_payload.consolidation_requests, process_consolidation_request)
// for_ops(body.proposer_slashings, process_proposer_slashing)
// for_ops(body.attester_slashings, process_attester_slashing)
// for_ops(body.attestations, process_attestation) # [Modified in Electra:EIP7549]
// for_ops(body.deposits, process_deposit) # [Modified in Electra:EIP7251]
// for_ops(body.voluntary_exits, process_voluntary_exit) # [Modified in Electra:EIP7251]
// for_ops(body.bls_to_execution_changes, process_bls_to_execution_change)
// # [New in Electra:EIP7002:EIP7251]
// for_ops(body.execution_payload.withdrawal_requests, process_execution_layer_withdrawal_request)
// for_ops(body.execution_payload.deposit_requests, process_deposit_requests) # [New in Electra:EIP6110]
// for_ops(body.consolidations, process_consolidation) # [New in Electra:EIP7251]
func ProcessOperations(
ctx context.Context,
st state.BeaconState,
@@ -86,14 +84,16 @@ func ProcessOperations(
if !ok {
return nil, errors.New("could not cast execution data to electra execution data")
}
st, err = ProcessDepositRequests(ctx, st, exe.DepositRequests())
if err != nil {
return nil, errors.Wrap(err, "could not process deposit receipts")
}
st, err = ProcessWithdrawalRequests(ctx, st, exe.WithdrawalRequests())
if err != nil {
return nil, errors.Wrap(err, "could not process execution layer withdrawal requests")
}
st, err = ProcessDepositRequests(ctx, st, exe.DepositRequests())
if err != nil {
return nil, errors.Wrap(err, "could not process deposit receipts")
}
if err := ProcessConsolidationRequests(ctx, st, exe.ConsolidationRequests()); err != nil {
return nil, fmt.Errorf("could not process consolidation requests: %w", err)
}

View File

@@ -1,15 +1,11 @@
package electra_test
import (
"context"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/electra"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/config/params"
consensusblocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/testing/util"
@@ -51,68 +47,3 @@ func TestVerifyOperationLengths_Electra(t *testing.T) {
require.ErrorContains(t, "incorrect outstanding deposits in block body", err)
})
}
func TestProcessEpoch_CanProcessElectra(t *testing.T) {
st, _ := util.DeterministicGenesisStateElectra(t, params.BeaconConfig().MaxValidatorsPerCommittee)
require.NoError(t, st.SetSlot(10*params.BeaconConfig().SlotsPerEpoch))
require.NoError(t, st.SetDepositBalanceToConsume(100))
amountAvailForProcessing := helpers.ActivationExitChurnLimit(1_000 * 1e9)
deps := make([]*ethpb.PendingBalanceDeposit, 20)
for i := 0; i < len(deps); i += 1 {
deps[i] = &ethpb.PendingBalanceDeposit{
Amount: uint64(amountAvailForProcessing) / 10,
Index: primitives.ValidatorIndex(i),
}
}
require.NoError(t, st.SetPendingBalanceDeposits(deps))
require.NoError(t, st.SetPendingConsolidations([]*ethpb.PendingConsolidation{
{
SourceIndex: 2,
TargetIndex: 3,
},
{
SourceIndex: 0,
TargetIndex: 1,
},
}))
err := electra.ProcessEpoch(context.Background(), st)
require.NoError(t, err)
require.Equal(t, uint64(0), st.Slashings()[2], "Unexpected slashed balance")
b := st.Balances()
require.Equal(t, params.BeaconConfig().MaxValidatorsPerCommittee, uint64(len(b)))
require.Equal(t, uint64(44799839993), b[0])
s, err := st.InactivityScores()
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().MaxValidatorsPerCommittee, uint64(len(s)))
p, err := st.PreviousEpochParticipation()
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().MaxValidatorsPerCommittee, uint64(len(p)))
p, err = st.CurrentEpochParticipation()
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().MaxValidatorsPerCommittee, uint64(len(p)))
sc, err := st.CurrentSyncCommittee()
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().SyncCommitteeSize, uint64(len(sc.Pubkeys)))
sc, err = st.NextSyncCommittee()
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().SyncCommitteeSize, uint64(len(sc.Pubkeys)))
res, err := st.DepositBalanceToConsume()
require.NoError(t, err)
require.Equal(t, primitives.Gwei(100), res)
// Half of the balance deposits should have been processed.
remaining, err := st.PendingBalanceDeposits()
require.NoError(t, err)
require.Equal(t, 10, len(remaining))
num, err := st.NumPendingConsolidations()
require.NoError(t, err)
require.Equal(t, uint64(2), num)
}

View File

@@ -40,7 +40,6 @@ import (
// excess_blob_gas=pre.latest_execution_payload_header.excess_blob_gas,
// deposit_requests_root=Root(), # [New in Electra:EIP6110]
// withdrawal_requests_root=Root(), # [New in Electra:EIP7002],
// consolidation_requests_root=Root(), # [New in Electra:EIP7251]
// )
//
// exit_epochs = [v.exit_epoch for v in pre.validators if v.exit_epoch != FAR_FUTURE_EPOCH]

View File

@@ -22,6 +22,7 @@ go_library(
"//consensus-types/primitives:go_default_library",
"//math:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/attestation:go_default_library",
"//runtime/version:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
@@ -52,6 +53,7 @@ go_test(
"//testing/util:go_default_library",
"@com_github_google_go_cmp//cmp:go_default_library",
"@com_github_google_gofuzz//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
],
)

View File

@@ -20,9 +20,32 @@ import (
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/math"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
)
// AttestingBalance returns the total balance from all the attesting indices.
//
// WARNING: This method allocates a new copy of the attesting validator indices set and is
// considered to be very memory expensive. Avoid using this unless you really
// need to get attesting balance from attestations.
//
// Spec pseudocode definition:
//
// def get_attesting_balance(state: BeaconState, attestations: Sequence[PendingAttestation]) -> Gwei:
// """
// Return the combined effective balance of the set of unslashed validators participating in ``attestations``.
// Note: ``get_total_balance`` returns ``EFFECTIVE_BALANCE_INCREMENT`` Gwei minimum to avoid divisions by zero.
// """
// return get_total_balance(state, get_unslashed_attesting_indices(state, attestations))
func AttestingBalance(ctx context.Context, state state.ReadOnlyBeaconState, atts []*ethpb.PendingAttestation) (uint64, error) {
indices, err := UnslashedAttestingIndices(ctx, state, atts)
if err != nil {
return 0, errors.Wrap(err, "could not get attesting indices")
}
return helpers.TotalBalance(state, indices), nil
}
// ProcessRegistryUpdates rotates validators in and out of active pool.
// the amount to rotate is determined churn limit.
//
@@ -432,3 +455,51 @@ func ProcessFinalUpdates(state state.BeaconState) (state.BeaconState, error) {
return state, nil
}
// UnslashedAttestingIndices returns all the attesting indices from a list of attestations,
// it sorts the indices and filters out the slashed ones.
//
// Spec pseudocode definition:
//
// def get_unslashed_attesting_indices(state: BeaconState,
// attestations: Sequence[PendingAttestation]) -> Set[ValidatorIndex]:
// output = set() # type: Set[ValidatorIndex]
// for a in attestations:
// output = output.union(get_attesting_indices(state, a.data, a.aggregation_bits))
// return set(filter(lambda index: not state.validators[index].slashed, output))
func UnslashedAttestingIndices(ctx context.Context, state state.ReadOnlyBeaconState, atts []*ethpb.PendingAttestation) ([]primitives.ValidatorIndex, error) {
var setIndices []primitives.ValidatorIndex
seen := make(map[uint64]bool)
for _, att := range atts {
committee, err := helpers.BeaconCommitteeFromState(ctx, state, att.GetData().Slot, att.GetData().CommitteeIndex)
if err != nil {
return nil, err
}
attestingIndices, err := attestation.AttestingIndices(att, committee)
if err != nil {
return nil, err
}
// Create a set for attesting indices
for _, index := range attestingIndices {
if !seen[index] {
setIndices = append(setIndices, primitives.ValidatorIndex(index))
}
seen[index] = true
}
}
// Sort the attesting set indices by increasing order.
sort.Slice(setIndices, func(i, j int) bool { return setIndices[i] < setIndices[j] })
// Remove the slashed validator indices.
for i := 0; i < len(setIndices); i++ {
v, err := state.ValidatorAtIndexReadOnly(setIndices[i])
if err != nil {
return nil, errors.Wrap(err, "failed to look up validator")
}
if !v.IsNil() && v.Slashed() {
setIndices = append(setIndices[:i], setIndices[i+1:]...)
}
}
return setIndices, nil
}

View File

@@ -6,6 +6,7 @@ import (
"math"
"testing"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/epoch"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
@@ -23,6 +24,131 @@ import (
"google.golang.org/protobuf/proto"
)
func TestUnslashedAttestingIndices_CanSortAndFilter(t *testing.T) {
// Generate 2 attestations.
atts := make([]*ethpb.PendingAttestation, 2)
for i := 0; i < len(atts); i++ {
atts[i] = &ethpb.PendingAttestation{
Data: &ethpb.AttestationData{Source: &ethpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)},
Target: &ethpb.Checkpoint{Epoch: 0, Root: make([]byte, fieldparams.RootLength)},
},
AggregationBits: bitfield.Bitlist{0x00, 0xFF, 0xFF, 0xFF},
}
}
// Generate validators and state for the 2 attestations.
validatorCount := 1000
validators := make([]*ethpb.Validator, validatorCount)
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
}
}
base := &ethpb.BeaconState{
Validators: validators,
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
}
beaconState, err := state_native.InitializeFromProtoPhase0(base)
require.NoError(t, err)
indices, err := epoch.UnslashedAttestingIndices(context.Background(), beaconState, atts)
require.NoError(t, err)
for i := 0; i < len(indices)-1; i++ {
if indices[i] >= indices[i+1] {
t.Error("sorted indices not sorted or duplicated")
}
}
// Verify the slashed validator is filtered.
slashedValidator := indices[0]
validators = beaconState.Validators()
validators[slashedValidator].Slashed = true
require.NoError(t, beaconState.SetValidators(validators))
indices, err = epoch.UnslashedAttestingIndices(context.Background(), beaconState, atts)
require.NoError(t, err)
for i := 0; i < len(indices); i++ {
assert.NotEqual(t, slashedValidator, indices[i], "Slashed validator %d is not filtered", slashedValidator)
}
}
func TestUnslashedAttestingIndices_DuplicatedAttestations(t *testing.T) {
// Generate 5 of the same attestations.
atts := make([]*ethpb.PendingAttestation, 5)
for i := 0; i < len(atts); i++ {
atts[i] = &ethpb.PendingAttestation{
Data: &ethpb.AttestationData{Source: &ethpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)},
Target: &ethpb.Checkpoint{Epoch: 0}},
AggregationBits: bitfield.Bitlist{0x00, 0xFF, 0xFF, 0xFF},
}
}
// Generate validators and state for the 5 attestations.
validatorCount := 1000
validators := make([]*ethpb.Validator, validatorCount)
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
}
}
base := &ethpb.BeaconState{
Validators: validators,
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
}
beaconState, err := state_native.InitializeFromProtoPhase0(base)
require.NoError(t, err)
indices, err := epoch.UnslashedAttestingIndices(context.Background(), beaconState, atts)
require.NoError(t, err)
for i := 0; i < len(indices)-1; i++ {
if indices[i] >= indices[i+1] {
t.Error("sorted indices not sorted or duplicated")
}
}
}
func TestAttestingBalance_CorrectBalance(t *testing.T) {
helpers.ClearCache()
// Generate 2 attestations.
atts := make([]*ethpb.PendingAttestation, 2)
for i := 0; i < len(atts); i++ {
atts[i] = &ethpb.PendingAttestation{
Data: &ethpb.AttestationData{
Target: &ethpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)},
Source: &ethpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)},
Slot: primitives.Slot(i),
},
AggregationBits: bitfield.Bitlist{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x01},
}
}
// Generate validators with balances and state for the 2 attestations.
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
balances := make([]uint64, params.BeaconConfig().MinGenesisActiveValidatorCount)
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
}
balances[i] = params.BeaconConfig().MaxEffectiveBalance
}
base := &ethpb.BeaconState{
Slot: 2,
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
Validators: validators,
Balances: balances,
}
beaconState, err := state_native.InitializeFromProtoPhase0(base)
require.NoError(t, err)
balance, err := epoch.AttestingBalance(context.Background(), beaconState, atts)
require.NoError(t, err)
wanted := 256 * params.BeaconConfig().MaxEffectiveBalance
assert.Equal(t, wanted, balance)
}
func TestProcessSlashings_NotSlashed(t *testing.T) {
base := &ethpb.BeaconState{
Slot: 0,

View File

@@ -47,6 +47,7 @@ go_test(
embed = [":go_default_library"],
deps = [
"//beacon-chain/core/altair:go_default_library",
"//beacon-chain/core/epoch:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/state:go_default_library",

View File

@@ -6,6 +6,7 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/epoch"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
@@ -58,6 +59,90 @@ func TestProcessRewardsAndPenaltiesPrecompute(t *testing.T) {
assert.Equal(t, wanted, beaconState.Balances()[0], "Unexpected balance")
}
func TestAttestationDeltaPrecompute(t *testing.T) {
e := params.BeaconConfig().SlotsPerEpoch
validatorCount := uint64(2048)
base := buildState(e+2, validatorCount)
atts := make([]*ethpb.PendingAttestation, 3)
var emptyRoot [32]byte
for i := 0; i < len(atts); i++ {
atts[i] = &ethpb.PendingAttestation{
Data: &ethpb.AttestationData{
Target: &ethpb.Checkpoint{
Root: emptyRoot[:],
},
Source: &ethpb.Checkpoint{
Root: emptyRoot[:],
},
BeaconBlockRoot: emptyRoot[:],
},
AggregationBits: bitfield.Bitlist{0xC0, 0xC0, 0xC0, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x01},
InclusionDelay: 1,
}
}
base.PreviousEpochAttestations = atts
beaconState, err := state_native.InitializeFromProtoPhase0(base)
require.NoError(t, err)
slashedAttestedIndices := []primitives.ValidatorIndex{1413}
for _, i := range slashedAttestedIndices {
vs := beaconState.Validators()
vs[i].Slashed = true
require.Equal(t, nil, beaconState.SetValidators(vs))
}
vp, bp, err := New(context.Background(), beaconState)
require.NoError(t, err)
vp, bp, err = ProcessAttestations(context.Background(), beaconState, vp, bp)
require.NoError(t, err)
// Add some variances to target and head balances.
// See: https://github.com/prysmaticlabs/prysm/issues/5593
bp.PrevEpochTargetAttested /= 2
bp.PrevEpochHeadAttested = bp.PrevEpochHeadAttested * 2 / 3
rewards, penalties, err := AttestationsDelta(beaconState, bp, vp)
require.NoError(t, err)
attestedBalance, err := epoch.AttestingBalance(context.Background(), beaconState, atts)
require.NoError(t, err)
totalBalance, err := helpers.TotalActiveBalance(beaconState)
require.NoError(t, err)
attestedIndices := []primitives.ValidatorIndex{55, 1339, 1746, 1811, 1569}
for _, i := range attestedIndices {
base, err := baseReward(beaconState, i)
require.NoError(t, err, "Could not get base reward")
// Base rewards for getting source right
wanted := attestedBalance*base/totalBalance +
bp.PrevEpochTargetAttested*base/totalBalance +
bp.PrevEpochHeadAttested*base/totalBalance
// Base rewards for proposer and attesters working together getting attestation
// on chain in the fatest manner
proposerReward := base / params.BeaconConfig().ProposerRewardQuotient
wanted += (base-proposerReward)*uint64(params.BeaconConfig().MinAttestationInclusionDelay) - 1
assert.Equal(t, wanted, rewards[i], "Unexpected reward balance for validator with index %d", i)
// Since all these validators attested, they shouldn't get penalized.
assert.Equal(t, uint64(0), penalties[i], "Unexpected penalty balance")
}
for _, i := range slashedAttestedIndices {
base, err := baseReward(beaconState, i)
assert.NoError(t, err, "Could not get base reward")
assert.Equal(t, uint64(0), rewards[i], "Unexpected slashed indices reward balance")
assert.Equal(t, 3*base, penalties[i], "Unexpected slashed indices penalty balance")
}
nonAttestedIndices := []primitives.ValidatorIndex{434, 677, 872, 791}
for _, i := range nonAttestedIndices {
base, err := baseReward(beaconState, i)
assert.NoError(t, err, "Could not get base reward")
wanted := 3 * base
// Since all these validators did not attest, they shouldn't get rewarded.
assert.Equal(t, uint64(0), rewards[i], "Unexpected reward balance")
// Base penalties for not attesting.
assert.Equal(t, wanted, penalties[i], "Unexpected penalty balance")
}
}
func TestAttestationDeltas_ZeroEpoch(t *testing.T) {
e := params.BeaconConfig().SlotsPerEpoch
validatorCount := uint64(2048)

View File

@@ -32,6 +32,9 @@ const (
// AttesterSlashingReceived is sent after an attester slashing is received from gossip or rpc
AttesterSlashingReceived = 8
// DataColumnSidecarReceived is sent after a data column sidecar is received from gossip or rpc.
DataColumnSidecarReceived = 9
)
// UnAggregatedAttReceivedData is the data sent with UnaggregatedAttReceived events.
@@ -77,3 +80,7 @@ type ProposerSlashingReceivedData struct {
type AttesterSlashingReceivedData struct {
AttesterSlashing ethpb.AttSlashing
}
type DataColumnSidecarReceivedData struct {
DataColumn *ethpb.DataColumnSidecar
}

View File

@@ -82,47 +82,6 @@ func AttestationCommittees(ctx context.Context, st state.ReadOnlyBeaconState, at
return committees, nil
}
// BeaconCommittees returns the list of all beacon committees for a given state at a given slot.
func BeaconCommittees(ctx context.Context, state state.ReadOnlyBeaconState, slot primitives.Slot) ([][]primitives.ValidatorIndex, error) {
epoch := slots.ToEpoch(slot)
activeCount, err := ActiveValidatorCount(ctx, state, epoch)
if err != nil {
return nil, errors.Wrap(err, "could not compute active validator count")
}
committeesPerSlot := SlotCommitteeCount(activeCount)
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
if err != nil {
return nil, errors.Wrap(err, "could not get seed")
}
committees := make([][]primitives.ValidatorIndex, committeesPerSlot)
var activeIndices []primitives.ValidatorIndex
for idx := primitives.CommitteeIndex(0); idx < primitives.CommitteeIndex(len(committees)); idx++ {
committee, err := committeeCache.Committee(ctx, slot, seed, idx)
if err != nil {
return nil, errors.Wrap(err, "could not interface with committee cache")
}
if committee != nil {
committees[idx] = committee
continue
}
if len(activeIndices) == 0 {
activeIndices, err = ActiveValidatorIndices(ctx, state, epoch)
if err != nil {
return nil, errors.Wrap(err, "could not get active indices")
}
}
committee, err = BeaconCommittee(ctx, activeIndices, seed, slot, idx)
if err != nil {
return nil, errors.Wrap(err, "could not compute beacon committee")
}
committees[idx] = committee
}
return committees, nil
}
// BeaconCommitteeFromState returns the crosslink committee of a given slot and committee index. This
// is a spec implementation where state is used as an argument. In case of state retrieval
// becomes expensive, consider using BeaconCommittee below.
@@ -294,22 +253,36 @@ func CommitteeAssignments(ctx context.Context, state state.BeaconState, epoch pr
if err := verifyAssignmentEpoch(epoch, state); err != nil {
return nil, err
}
// Retrieve active validator count for the specified epoch.
activeValidatorCount, err := ActiveValidatorCount(ctx, state, epoch)
if err != nil {
return nil, err
}
// Determine the number of committees per slot based on the number of active validator indices.
numCommitteesPerSlot := SlotCommitteeCount(activeValidatorCount)
startSlot, err := slots.EpochStart(epoch)
if err != nil {
return nil, err
}
assignments := make(map[primitives.ValidatorIndex]*CommitteeAssignment)
vals := make(map[primitives.ValidatorIndex]struct{})
for _, v := range validators {
vals[v] = struct{}{}
}
assignments := make(map[primitives.ValidatorIndex]*CommitteeAssignment)
// Compute committee assignments for each slot in the epoch.
for slot := startSlot; slot < startSlot+params.BeaconConfig().SlotsPerEpoch; slot++ {
committees, err := BeaconCommittees(ctx, state, slot)
if err != nil {
return nil, errors.Wrap(err, "could not compute beacon committees")
}
for j, committee := range committees {
// Compute committees for the current slot.
for j := uint64(0); j < numCommitteesPerSlot; j++ {
committee, err := BeaconCommitteeFromState(ctx, state, slot, primitives.CommitteeIndex(j))
if err != nil {
return nil, err
}
for _, vIndex := range committee {
if _, ok := vals[vIndex]; !ok { // Skip if the validator is not in the provided validators slice.
continue
@@ -323,6 +296,7 @@ func CommitteeAssignments(ctx context.Context, state state.BeaconState, epoch pr
}
}
}
return assignments, nil
}
@@ -337,6 +311,24 @@ func VerifyBitfieldLength(bf bitfield.Bitfield, committeeSize uint64) error {
return nil
}
// VerifyAttestationBitfieldLengths verifies that an attestations aggregation bitfields is
// a valid length matching the size of the committee.
func VerifyAttestationBitfieldLengths(ctx context.Context, state state.ReadOnlyBeaconState, att ethpb.Att) error {
committee, err := BeaconCommitteeFromState(ctx, state, att.GetData().Slot, att.GetData().CommitteeIndex)
if err != nil {
return errors.Wrap(err, "could not retrieve beacon committees")
}
if committee == nil {
return errors.New("no committee exist for this attestation")
}
if err := VerifyBitfieldLength(att.GetAggregationBits(), uint64(len(committee))); err != nil {
return errors.Wrap(err, "failed to verify aggregation bitfield")
}
return nil
}
// ShuffledIndices uses input beacon state and returns the shuffled indices of the input epoch,
// the shuffled indices then can be used to break up into committees.
func ShuffledIndices(s state.ReadOnlyBeaconState, epoch primitives.Epoch) ([]primitives.ValidatorIndex, error) {

View File

@@ -18,7 +18,6 @@ import (
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/testing/util"
"github.com/prysmaticlabs/prysm/v5/time/slots"
)
@@ -403,12 +402,7 @@ func TestVerifyAttestationBitfieldLengths_OK(t *testing.T) {
helpers.ClearCache()
require.NoError(t, state.SetSlot(tt.stateSlot))
att := tt.attestation
// Verify attesting indices are correct.
committee, err := helpers.BeaconCommitteeFromState(context.Background(), state, att.GetData().Slot, att.GetData().CommitteeIndex)
require.NoError(t, err)
require.NotNil(t, committee)
err = helpers.VerifyBitfieldLength(att.GetAggregationBits(), uint64(len(committee)))
err := helpers.VerifyAttestationBitfieldLengths(context.Background(), state, tt.attestation)
if tt.verificationFailure {
assert.NotNil(t, err, "Verification succeeded when it was supposed to fail")
} else {
@@ -755,27 +749,3 @@ func TestAttestationCommittees(t *testing.T) {
assert.Equal(t, params.BeaconConfig().TargetCommitteeSize, uint64(len(committees[1])))
})
}
func TestBeaconCommittees(t *testing.T) {
prevConfig := params.BeaconConfig().Copy()
defer params.OverrideBeaconConfig(prevConfig)
c := params.BeaconConfig().Copy()
c.MinGenesisActiveValidatorCount = 128
c.SlotsPerEpoch = 4
c.TargetCommitteeSize = 16
params.OverrideBeaconConfig(c)
state, _ := util.DeterministicGenesisState(t, 256)
activeCount, err := helpers.ActiveValidatorCount(context.Background(), state, 0)
require.NoError(t, err)
committeesPerSlot := helpers.SlotCommitteeCount(activeCount)
committees, err := helpers.BeaconCommittees(context.Background(), state, 0)
require.NoError(t, err)
require.Equal(t, committeesPerSlot, uint64(len(committees)))
for idx := primitives.CommitteeIndex(0); idx < primitives.CommitteeIndex(len(committees)); idx++ {
committee, err := helpers.BeaconCommitteeFromState(context.Background(), state, 0, idx)
require.NoError(t, err)
require.DeepEqual(t, committees[idx], committee)
}
}

View File

@@ -0,0 +1,21 @@
load("@prysm//tools/go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["helpers.go"],
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas",
visibility = ["//visibility:public"],
deps = [
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//crypto/hash:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_ethereum_c_kzg_4844//bindings/go:go_default_library",
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
"@com_github_holiman_uint256//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)

View File

@@ -0,0 +1,245 @@
package peerdas
import (
"encoding/binary"
cKzg4844 "github.com/ethereum/c-kzg-4844/bindings/go"
"github.com/holiman/uint256"
"github.com/ethereum/go-ethereum/p2p/enode"
errors "github.com/pkg/errors"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
)
const (
// Number of field elements per extended blob
fieldElementsPerExtBlob = 2 * cKzg4844.FieldElementsPerBlob
// Bytes per cell
bytesPerCell = cKzg4844.FieldElementsPerCell * cKzg4844.BytesPerFieldElement
// Number of cells in the extended matrix
extendedMatrixSize = fieldparams.MaxBlobsPerBlock * cKzg4844.CellsPerExtBlob
)
type (
extendedMatrix []cKzg4844.Cell
cellCoordinate struct {
blobIndex uint64
cellID uint64
}
)
var (
errCustodySubnetCountTooLarge = errors.New("custody subnet count larger than data column sidecar subnet count")
errCellNotFound = errors.New("cell not found (should never happen)")
errCurveOrder = errors.New("could not set bls curve order as big int")
errBlsFieldElementNil = errors.New("bls field element is nil")
errBlsFieldElementBiggerThanCurveOrder = errors.New("bls field element higher than curve order")
errBlsFieldElementDoesNotFit = errors.New("bls field element does not fit in BytesPerFieldElement")
)
// https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/das-core.md#helper-functions
func CustodyColumns(nodeId enode.ID, custodySubnetCount uint64) (map[uint64]bool, error) {
dataColumnSidecarSubnetCount := params.BeaconConfig().DataColumnSidecarSubnetCount
// Compute the custodied subnets.
subnetIds, err := CustodyColumnSubnets(nodeId, custodySubnetCount)
if err != nil {
return nil, errors.Wrap(err, "custody subnets")
}
columnsPerSubnet := cKzg4844.CellsPerExtBlob / dataColumnSidecarSubnetCount
// Knowing the subnet ID and the number of columns per subnet, select all the columns the node should custody.
// Columns belonging to the same subnet are contiguous.
columnIndices := make(map[uint64]bool, custodySubnetCount*columnsPerSubnet)
for i := uint64(0); i < columnsPerSubnet; i++ {
for subnetId := range subnetIds {
columnIndex := dataColumnSidecarSubnetCount*i + subnetId
columnIndices[columnIndex] = true
}
}
return columnIndices, nil
}
func CustodyColumnSubnets(nodeId enode.ID, custodySubnetCount uint64) (map[uint64]bool, error) {
dataColumnSidecarSubnetCount := params.BeaconConfig().DataColumnSidecarSubnetCount
// Check if the custody subnet count is larger than the data column sidecar subnet count.
if custodySubnetCount > dataColumnSidecarSubnetCount {
return nil, errCustodySubnetCountTooLarge
}
// First, compute the subnet IDs that the node should participate in.
subnetIds := make(map[uint64]bool, custodySubnetCount)
for i := uint64(0); uint64(len(subnetIds)) < custodySubnetCount; i++ {
nodeIdUInt256, nextNodeIdUInt256 := new(uint256.Int), new(uint256.Int)
nodeIdUInt256.SetBytes(nodeId.Bytes())
nextNodeIdUInt256.Add(nodeIdUInt256, uint256.NewInt(i))
nextNodeIdUInt64 := nextNodeIdUInt256.Uint64()
nextNodeId := bytesutil.Uint64ToBytesLittleEndian(nextNodeIdUInt64)
hashedNextNodeId := hash.Hash(nextNodeId)
subnetId := binary.LittleEndian.Uint64(hashedNextNodeId[:8]) % dataColumnSidecarSubnetCount
if _, exists := subnetIds[subnetId]; !exists {
subnetIds[subnetId] = true
}
}
return subnetIds, nil
}
// computeExtendedMatrix computes the extended matrix from the blobs.
// https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/das-core.md#compute_extended_matrix
func computeExtendedMatrix(blobs []cKzg4844.Blob) (extendedMatrix, error) {
matrix := make(extendedMatrix, 0, extendedMatrixSize)
for i := range blobs {
// Chunk a non-extended blob into cells representing the corresponding extended blob.
blob := &blobs[i]
cells, err := cKzg4844.ComputeCells(blob)
if err != nil {
return nil, errors.Wrap(err, "compute cells for blob")
}
matrix = append(matrix, cells[:]...)
}
return matrix, nil
}
// recoverMatrix recovers the extended matrix from some cells.
// https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/das-core.md#recover_matrix
func recoverMatrix(cellFromCoordinate map[cellCoordinate]cKzg4844.Cell, blobCount uint64) (extendedMatrix, error) {
matrix := make(extendedMatrix, 0, extendedMatrixSize)
for blobIndex := uint64(0); blobIndex < blobCount; blobIndex++ {
// Filter all cells that belong to the current blob.
cellIds := make([]uint64, 0, cKzg4844.CellsPerExtBlob)
for coordinate := range cellFromCoordinate {
if coordinate.blobIndex == blobIndex {
cellIds = append(cellIds, coordinate.cellID)
}
}
// Retrieve cells corresponding to all `cellIds`.
cellIdsCount := len(cellIds)
cells := make([]cKzg4844.Cell, 0, cellIdsCount)
for _, cellId := range cellIds {
coordinate := cellCoordinate{blobIndex: blobIndex, cellID: cellId}
cell, ok := cellFromCoordinate[coordinate]
if !ok {
return matrix, errCellNotFound
}
cells = append(cells, cell)
}
// Recover all cells.
allCellsForRow, err := cKzg4844.RecoverAllCells(cellIds, cells)
if err != nil {
return matrix, errors.Wrap(err, "recover all cells")
}
matrix = append(matrix, allCellsForRow[:]...)
}
return matrix, nil
}
// https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/das-core.md#recover_matrix
func dataColumnSidecars(signedBlock interfaces.SignedBeaconBlock, blobs []cKzg4844.Blob) ([]ethpb.DataColumnSidecar, error) {
blobsCount := len(blobs)
// Get the signed block header.
signedBlockHeader, err := signedBlock.Header()
if err != nil {
return nil, errors.Wrap(err, "signed block header")
}
// Get the block body.
block := signedBlock.Block()
blockBody := block.Body()
// Get the blob KZG commitments.
blobKzgCommitments, err := blockBody.BlobKzgCommitments()
if err != nil {
return nil, errors.Wrap(err, "blob KZG commitments")
}
// Compute the KZG commitments inclusion proof.
kzgCommitmentsInclusionProof, err := blocks.MerkleProofKZGCommitments(blockBody)
if err != nil {
return nil, errors.Wrap(err, "merkle proof ZKG commitments")
}
// Compute cells and proofs.
cells := make([][cKzg4844.CellsPerExtBlob]cKzg4844.Cell, 0, blobsCount)
proofs := make([][cKzg4844.CellsPerExtBlob]cKzg4844.KZGProof, 0, blobsCount)
for i := range blobs {
blob := &blobs[i]
blobCells, blobProofs, err := cKzg4844.ComputeCellsAndProofs(blob)
if err != nil {
return nil, errors.Wrap(err, "compute cells and proofs")
}
cells = append(cells, blobCells)
proofs = append(proofs, blobProofs)
}
// Get the column sidecars.
sidecars := make([]ethpb.DataColumnSidecar, cKzg4844.CellsPerExtBlob)
for columnIndex := uint64(0); columnIndex < cKzg4844.CellsPerExtBlob; columnIndex++ {
column := make([]cKzg4844.Cell, 0, blobsCount)
kzgProofOfColumn := make([]cKzg4844.KZGProof, 0, blobsCount)
for rowIndex := 0; rowIndex < blobsCount; rowIndex++ {
cell := cells[rowIndex][columnIndex]
column = append(column, cell)
kzgProof := proofs[rowIndex][columnIndex]
kzgProofOfColumn = append(kzgProofOfColumn, kzgProof)
}
columnBytes := make([][]byte, 0, blobsCount)
for i := range column {
cell := column[i]
cellBytes := make([]byte, 0, bytesPerCell)
for _, fieldElement := range cell {
cellBytes = append(cellBytes, fieldElement[:]...)
}
columnBytes = append(columnBytes, cellBytes)
}
kzgProofOfColumnBytes := make([][]byte, 0, blobsCount)
for _, kzgProof := range kzgProofOfColumn {
kzgProofOfColumnBytes = append(kzgProofOfColumnBytes, kzgProof[:])
}
sidecars = append(sidecars, ethpb.DataColumnSidecar{
ColumnIndex: columnIndex,
DataColumn: columnBytes,
KzgCommitments: blobKzgCommitments,
KzgProof: kzgProofOfColumnBytes,
SignedBlockHeader: signedBlockHeader,
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
})
}
return sidecars, nil
}

View File

@@ -29,8 +29,6 @@ import (
"go.opencensus.io/trace"
)
type customProcessingFn func(context.Context, state.BeaconState) error
// ExecuteStateTransition defines the procedure for a state transition function.
//
// Note: This method differs from the spec pseudocode as it uses a batch signature verification.
@@ -175,7 +173,18 @@ func ProcessSlotsIfPossible(ctx context.Context, state state.BeaconState, target
return state, nil
}
// ProcessSlots includes core slot processing as well as a cache
// ProcessSlots process through skip slots and apply epoch transition when it's needed
//
// Spec pseudocode definition:
//
// def process_slots(state: BeaconState, slot: Slot) -> None:
// assert state.slot < slot
// while state.slot < slot:
// process_slot(state)
// # Process epoch on the start slot of the next epoch
// if (state.slot + 1) % SLOTS_PER_EPOCH == 0:
// process_epoch(state)
// state.slot = Slot(state.slot + 1)
func ProcessSlots(ctx context.Context, state state.BeaconState, slot primitives.Slot) (state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "core.state.ProcessSlots")
defer span.End()
@@ -222,63 +231,42 @@ func ProcessSlots(ctx context.Context, state state.BeaconState, slot primitives.
defer func() {
SkipSlotCache.MarkNotInProgress(key)
}()
state, err = ProcessSlotsCore(ctx, span, state, slot, cacheBestBeaconStateOnErrFn(highestSlot, key))
if err != nil {
return nil, err
}
if highestSlot < state.Slot() {
SkipSlotCache.Put(ctx, key, state)
}
return state, nil
}
func cacheBestBeaconStateOnErrFn(highestSlot primitives.Slot, key [32]byte) customProcessingFn {
return func(ctx context.Context, state state.BeaconState) error {
for state.Slot() < slot {
if ctx.Err() != nil {
tracing.AnnotateError(span, ctx.Err())
// Cache last best value.
if highestSlot < state.Slot() {
SkipSlotCache.Put(ctx, key, state)
}
return ctx.Err()
}
return nil
}
}
// ProcessSlotsCore process through skip slots and apply epoch transition when it's needed
//
// Spec pseudocode definition:
//
// def process_slots(state: BeaconState, slot: Slot) -> None:
// assert state.slot < slot
// while state.slot < slot:
// process_slot(state)
// # Process epoch on the start slot of the next epoch
// if (state.slot + 1) % SLOTS_PER_EPOCH == 0:
// process_epoch(state)
// state.slot = Slot(state.slot + 1)
func ProcessSlotsCore(ctx context.Context, span *trace.Span, state state.BeaconState, slot primitives.Slot, fn customProcessingFn) (state.BeaconState, error) {
var err error
for state.Slot() < slot {
if fn != nil {
if err = fn(ctx, state); err != nil {
tracing.AnnotateError(span, err)
return nil, err
if SkipSlotCache.Put(ctx, key, state); err != nil {
log.WithError(err).Error("Failed to put skip slot cache value")
}
}
return nil, ctx.Err()
}
state, err = ProcessSlot(ctx, state)
if err != nil {
tracing.AnnotateError(span, err)
return nil, errors.Wrap(err, "could not process slot")
}
state, err = ProcessEpoch(ctx, state)
if err != nil {
tracing.AnnotateError(span, err)
return nil, err
if time.CanProcessEpoch(state) {
if state.Version() == version.Phase0 {
state, err = ProcessEpochPrecompute(ctx, state)
if err != nil {
tracing.AnnotateError(span, err)
return nil, errors.Wrap(err, "could not process epoch with optimizations")
}
} else if state.Version() <= version.Deneb {
if err = altair.ProcessEpoch(ctx, state); err != nil {
tracing.AnnotateError(span, err)
return nil, errors.Wrap(err, fmt.Sprintf("could not process %s epoch", version.String(state.Version())))
}
} else {
if err = electra.ProcessEpoch(ctx, state); err != nil {
tracing.AnnotateError(span, err)
return nil, errors.Wrap(err, fmt.Sprintf("could not process %s epoch", version.String(state.Version())))
}
}
}
if err := state.SetSlot(state.Slot() + 1); err != nil {
tracing.AnnotateError(span, err)
return nil, errors.Wrap(err, "failed to increment state slot")
@@ -290,46 +278,25 @@ func ProcessSlotsCore(ctx context.Context, span *trace.Span, state state.BeaconS
return nil, errors.Wrap(err, "failed to upgrade state")
}
}
return state, nil
}
// ProcessEpoch is a wrapper on fork specific epoch processing
func ProcessEpoch(ctx context.Context, state state.BeaconState) (state.BeaconState, error) {
var err error
if time.CanProcessEpoch(state) {
if state.Version() == version.Electra {
if err = electra.ProcessEpoch(ctx, state); err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("could not process %s epoch", version.String(state.Version())))
}
} else if state.Version() >= version.Altair {
if err = altair.ProcessEpoch(ctx, state); err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("could not process %s epoch", version.String(state.Version())))
}
} else {
state, err = ProcessEpochPrecompute(ctx, state)
if err != nil {
return nil, errors.Wrap(err, "could not process epoch with optimizations")
}
}
if highestSlot < state.Slot() {
SkipSlotCache.Put(ctx, key, state)
}
return state, err
return state, nil
}
// UpgradeState upgrades the state to the next version if possible.
func UpgradeState(ctx context.Context, state state.BeaconState) (state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "core.state.UpgradeState")
defer span.End()
var err error
upgraded := false
if time.CanUpgradeToAltair(state.Slot()) {
state, err = altair.UpgradeToAltair(ctx, state)
if err != nil {
tracing.AnnotateError(span, err)
return nil, err
}
upgraded = true
}
if time.CanUpgradeToBellatrix(state.Slot()) {
@@ -338,7 +305,6 @@ func UpgradeState(ctx context.Context, state state.BeaconState) (state.BeaconSta
tracing.AnnotateError(span, err)
return nil, err
}
upgraded = true
}
if time.CanUpgradeToCapella(state.Slot()) {
@@ -347,7 +313,6 @@ func UpgradeState(ctx context.Context, state state.BeaconState) (state.BeaconSta
tracing.AnnotateError(span, err)
return nil, err
}
upgraded = true
}
if time.CanUpgradeToDeneb(state.Slot()) {
@@ -356,7 +321,6 @@ func UpgradeState(ctx context.Context, state state.BeaconState) (state.BeaconSta
tracing.AnnotateError(span, err)
return nil, err
}
upgraded = true
}
if time.CanUpgradeToElectra(state.Slot()) {
@@ -365,13 +329,7 @@ func UpgradeState(ctx context.Context, state state.BeaconState) (state.BeaconSta
tracing.AnnotateError(span, err)
return nil, err
}
upgraded = true
}
if upgraded {
log.Debugf("upgraded state to %s", version.String(state.Version()))
}
return state, nil
}

View File

@@ -12,6 +12,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition/interop"
v "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/validators"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
field_params "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
@@ -327,18 +328,20 @@ func ProcessBlockForStateRoot(
if err != nil {
return nil, err
}
if state.Version() >= version.Capella {
state, err = b.ProcessWithdrawals(state, executionData)
if err != nil {
return nil, errors.Wrap(err, "could not process withdrawals")
}
if blk.IsBlinded() {
state, err = b.ProcessPayloadHeader(state, executionData)
} else {
state, err = b.ProcessPayload(state, executionData)
}
state, err = b.ProcessPayload(state, blk.Body())
if err != nil {
return nil, errors.Wrap(err, "could not process execution data")
}
}
if err := VerifyBlobCommitmentCount(blk); err != nil {
return nil, err
}
randaoReveal := signed.Block().Body().RandaoReveal()
state, err = b.ProcessRandaoNoVerify(state, randaoReveal[:])
if err != nil {
@@ -374,6 +377,20 @@ func ProcessBlockForStateRoot(
return state, nil
}
func VerifyBlobCommitmentCount(blk interfaces.ReadOnlyBeaconBlock) error {
if blk.Version() < version.Deneb {
return nil
}
kzgs, err := blk.Body().BlobKzgCommitments()
if err != nil {
return err
}
if len(kzgs) > field_params.MaxBlobsPerBlock {
return fmt.Errorf("too many kzg commitments in block: %d", len(kzgs))
}
return nil
}
// This calls altair block operations.
func altairOperations(
ctx context.Context,

View File

@@ -2,11 +2,13 @@ package transition_test
import (
"context"
"fmt"
"testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
field_params "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
@@ -210,3 +212,15 @@ func TestProcessBlockDifferentVersion(t *testing.T) {
_, _, err = transition.ProcessBlockNoVerifyAnySig(context.Background(), beaconState, wsb)
require.ErrorContains(t, "state and block are different version. 0 != 1", err)
}
func TestVerifyBlobCommitmentCount(t *testing.T) {
b := &ethpb.BeaconBlockDeneb{Body: &ethpb.BeaconBlockBodyDeneb{}}
rb, err := blocks.NewBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, transition.VerifyBlobCommitmentCount(rb))
b = &ethpb.BeaconBlockDeneb{Body: &ethpb.BeaconBlockBodyDeneb{BlobKzgCommitments: make([][]byte, field_params.MaxBlobsPerBlock+1)}}
rb, err = blocks.NewBeaconBlock(b)
require.NoError(t, err)
require.ErrorContains(t, fmt.Sprintf("too many kzg commitments in block: %d", field_params.MaxBlobsPerBlock+1), transition.VerifyBlobCommitmentCount(rb))
}

View File

@@ -246,6 +246,20 @@ func (bs *BlobStorage) Get(root [32]byte, idx uint64) (blocks.VerifiedROBlob, er
return verification.BlobSidecarNoop(ro)
}
// GetColumn retrieves a single DataColumnSidecar by its root and index.
func (bs *BlobStorage) GetColumn(root [32]byte, idx uint64) (*ethpb.DataColumnSidecar, error) {
expected := blobNamer{root: root, index: idx}
encoded, err := afero.ReadFile(bs.fs, expected.path())
if err != nil {
return nil, err
}
s := &ethpb.DataColumnSidecar{}
if err := s.UnmarshalSSZ(encoded); err != nil {
return nil, err
}
return s, nil
}
// Remove removes all blobs for a given root.
func (bs *BlobStorage) Remove(root [32]byte) error {
rootDir := blobNamer{root: root}.dir()

View File

@@ -84,15 +84,11 @@ func (s *mockEngine) callCount(method string) int {
}
func mockParseUintList(t *testing.T, data json.RawMessage) []uint64 {
var list []string
var list []uint64
if err := json.Unmarshal(data, &list); err != nil {
t.Fatalf("failed to parse uint list: %v", err)
}
uints := make([]uint64, len(list))
for i, u := range list {
uints[i] = hexutil.MustDecodeUint64(u)
}
return uints
return list
}
func mockParseHexByteList(t *testing.T, data json.RawMessage) []hexutil.Bytes {
@@ -121,7 +117,7 @@ func TestParseRequest(t *testing.T) {
ctx := context.Background()
cases := []struct {
method string
hexArgs []string // uint64 as hex
uintArgs []uint64
byteArgs []hexutil.Bytes
}{
{
@@ -139,28 +135,26 @@ func TestParseRequest(t *testing.T) {
},
},
{
method: GetPayloadBodiesByRangeV1,
hexArgs: []string{hexutil.EncodeUint64(0), hexutil.EncodeUint64(1)},
method: GetPayloadBodiesByRangeV1,
uintArgs: []uint64{0, 1},
},
{
method: GetPayloadBodiesByRangeV2,
hexArgs: []string{hexutil.EncodeUint64(math.MaxUint64), hexutil.EncodeUint64(1)},
method: GetPayloadBodiesByRangeV2,
uintArgs: []uint64{math.MaxUint64, 1},
},
}
for _, c := range cases {
t.Run(c.method, func(t *testing.T) {
cli, srv := newMockEngine(t)
srv.register(c.method, func(msg *jsonrpcMessage, w http.ResponseWriter, _ *http.Request) {
srv.register(c.method, func(msg *jsonrpcMessage, w http.ResponseWriter, r *http.Request) {
require.Equal(t, c.method, msg.Method)
nr := uint64(len(c.byteArgs))
if len(c.byteArgs) > 0 {
require.DeepEqual(t, c.byteArgs, mockParseHexByteList(t, msg.Params))
}
if len(c.hexArgs) > 0 {
if len(c.uintArgs) > 0 {
rang := mockParseUintList(t, msg.Params)
for i, r := range rang {
require.Equal(t, c.hexArgs[i], hexutil.EncodeUint64(r))
}
require.DeepEqual(t, c.uintArgs, rang)
nr = rang[1]
}
mockWriteResult(t, w, msg, make([]*pb.ExecutionPayloadBody, nr))
@@ -171,18 +165,18 @@ func TestParseRequest(t *testing.T) {
if len(c.byteArgs) > 0 {
args = []interface{}{c.byteArgs}
}
if len(c.hexArgs) > 0 {
args = make([]interface{}, len(c.hexArgs))
for i := range c.hexArgs {
args[i] = c.hexArgs[i]
if len(c.uintArgs) > 0 {
args = make([]interface{}, len(c.uintArgs))
for i := range c.uintArgs {
args[i] = c.uintArgs[i]
}
}
require.NoError(t, cli.CallContext(ctx, &result, c.method, args...))
if len(c.byteArgs) > 0 {
require.Equal(t, len(c.byteArgs), len(result))
}
if len(c.hexArgs) > 0 {
require.Equal(t, int(hexutil.MustDecodeUint64(c.hexArgs[1])), len(result))
if len(c.uintArgs) > 0 {
require.Equal(t, int(c.uintArgs[1]), len(result))
}
})
}
@@ -209,7 +203,7 @@ func TestCallCount(t *testing.T) {
for _, c := range cases {
t.Run(c.method, func(t *testing.T) {
cli, srv := newMockEngine(t)
srv.register(c.method, func(msg *jsonrpcMessage, w http.ResponseWriter, _ *http.Request) {
srv.register(c.method, func(msg *jsonrpcMessage, w http.ResponseWriter, r *http.Request) {
mockWriteResult(t, w, msg, nil)
})
for i := 0; i < c.count; i++ {

View File

@@ -5,7 +5,6 @@ import (
"sort"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
@@ -161,7 +160,7 @@ func computeRanges(hbns []hashBlockNumber) []byRangeReq {
func (r *blindedBlockReconstructor) requestBodiesByRange(ctx context.Context, client RPCClient, method string, req byRangeReq) error {
result := make([]*pb.ExecutionPayloadBody, 0)
if err := client.CallContext(ctx, &result, method, hexutil.EncodeUint64(req.start), hexutil.EncodeUint64(req.count)); err != nil {
if err := client.CallContext(ctx, &result, method, req.start, req.count); err != nil {
return err
}
if uint64(len(result)) != req.count {

View File

@@ -676,18 +676,3 @@ func (f *ForkChoice) TargetRootForEpoch(root [32]byte, epoch primitives.Epoch) (
}
return f.TargetRootForEpoch(targetNode.root, epoch)
}
// ParentRoot returns the block root of the parent node if it is in forkchoice.
// The exception is for the finalized checkpoint root which we return the zero
// hash.
func (f *ForkChoice) ParentRoot(root [32]byte) ([32]byte, error) {
n, ok := f.store.nodeByRoot[root]
if !ok || n == nil {
return [32]byte{}, ErrNilNode
}
// Return the zero hash for the tree root
if n.parent == nil {
return [32]byte{}, nil
}
return n.parent.root, nil
}

View File

@@ -861,29 +861,3 @@ func TestForkChoiceSlot(t *testing.T) {
require.NoError(t, err)
require.Equal(t, primitives.Slot(3), slot)
}
func TestForkchoiceParentRoot(t *testing.T) {
f := setup(0, 0)
ctx := context.Background()
root1 := [32]byte{'a'}
st, root, err := prepareForkchoiceState(ctx, 3, root1, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, root))
root2 := [32]byte{'b'}
st, root, err = prepareForkchoiceState(ctx, 3, root2, root1, [32]byte{'A'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, root))
root, err = f.ParentRoot(root2)
require.NoError(t, err)
require.Equal(t, root1, root)
_, err = f.ParentRoot([32]byte{'c'})
require.ErrorIs(t, err, ErrNilNode)
zeroHash := [32]byte{}
root, err = f.ParentRoot(zeroHash)
require.NoError(t, err)
require.Equal(t, zeroHash, root)
}

View File

@@ -80,7 +80,6 @@ type FastGetter interface {
TargetRootForEpoch([32]byte, primitives.Epoch) ([32]byte, error)
UnrealizedJustifiedPayloadBlockHash() [32]byte
Weight(root [32]byte) (uint64, error)
ParentRoot(root [32]byte) ([32]byte, error)
}
// Setter allows to set forkchoice information

View File

@@ -169,10 +169,3 @@ func (ro *ROForkChoice) TargetRootForEpoch(root [32]byte, epoch primitives.Epoch
defer ro.l.RUnlock()
return ro.getter.TargetRootForEpoch(root, epoch)
}
// ParentRoot delegates to the underlying forkchoice call, under a lock.
func (ro *ROForkChoice) ParentRoot(root [32]byte) ([32]byte, error) {
ro.l.RLock()
defer ro.l.RUnlock()
return ro.getter.ParentRoot(root)
}

View File

@@ -37,7 +37,6 @@ const (
slotCalled
lastRootCalled
targetRootForEpochCalled
parentRootCalled
)
func _discard(t *testing.T, e error) {
@@ -292,8 +291,3 @@ func (ro *mockROForkchoice) TargetRootForEpoch(_ [32]byte, _ primitives.Epoch) (
ro.calls = append(ro.calls, targetRootForEpochCalled)
return [32]byte{}, nil
}
func (ro *mockROForkchoice) ParentRoot(_ [32]byte) ([32]byte, error) {
ro.calls = append(ro.calls, parentRootCalled)
return [32]byte{}, nil
}

View File

@@ -73,21 +73,6 @@ func configureBuilderCircuitBreaker(cliCtx *cli.Context) error {
return err
}
}
if cliCtx.IsSet(flags.MinBuilderBid.Name) {
c := params.BeaconConfig().Copy()
c.MinBuilderBid = cliCtx.Uint64(flags.MinBuilderBid.Name)
if err := params.SetActive(c); err != nil {
return err
}
}
if cliCtx.IsSet(flags.MinBuilderDiff.Name) {
c := params.BeaconConfig().Copy()
c.MinBuilderDiff = cliCtx.Uint64(flags.MinBuilderDiff.Name)
if err := params.SetActive(c); err != nil {
return err
}
}
return nil
}

View File

@@ -46,6 +46,7 @@ go_library(
"//beacon-chain/core/altair:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/peerdas:go_default_library",
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/p2p/encoder:go_default_library",

View File

@@ -47,11 +47,11 @@ type quicProtocol uint16
// quicProtocol is the "quic" key, which holds the QUIC port of the node.
func (quicProtocol) ENRKey() string { return "quic" }
// RefreshENR uses an epoch to refresh the enr entry for our node
// with the tracked committee ids for the epoch, allowing our node
// to be dynamically discoverable by others given our tracked committee ids.
func (s *Service) RefreshENR() {
// return early if discv5 isn't running
// RefreshPersistentSubnets checks that we are tracking our local persistent subnets for a variety of gossip topics.
// This routine checks for our attestation, sync committee and data column subnets and updates them if they have
// been rotated.
func (s *Service) RefreshPersistentSubnets() {
// return early if discv5 isnt running
if s.dv5Listener == nil || !s.isInitialized() {
return
}
@@ -60,6 +60,10 @@ func (s *Service) RefreshENR() {
log.WithError(err).Error("Could not initialize persistent subnets")
return
}
if err := initializePersistentColumnSubnets(s.dv5Listener.LocalNode().ID()); err != nil {
log.WithError(err).Error("Could not initialize persistent column subnets")
return
}
bitV := bitfield.NewBitvector64()
committees := cache.SubnetIDs.GetAllSubnets()

View File

@@ -435,7 +435,7 @@ func addPeer(t *testing.T, p *peers.Status, state peerdata.PeerConnectionState)
return id
}
func TestRefreshENR_ForkBoundaries(t *testing.T) {
func TestRefreshPersistentSubnets_ForkBoundaries(t *testing.T) {
params.SetupTestConfigCleanup(t)
// Clean up caches after usage.
defer cache.SubnetIDs.EmptyAllCaches()
@@ -601,7 +601,7 @@ func TestRefreshENR_ForkBoundaries(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := tt.svcBuilder(t)
s.RefreshENR()
s.RefreshPersistentSubnets()
tt.postValidation(t, s)
s.dv5Listener.Close()
cache.SubnetIDs.EmptyAllCaches()

View File

@@ -22,6 +22,7 @@ var gossipTopicMappings = map[string]func() proto.Message{
SyncCommitteeSubnetTopicFormat: func() proto.Message { return &ethpb.SyncCommitteeMessage{} },
BlsToExecutionChangeSubnetTopicFormat: func() proto.Message { return &ethpb.SignedBLSToExecutionChange{} },
BlobSubnetTopicFormat: func() proto.Message { return &ethpb.BlobSidecar{} },
DataColumnSubnetTopicFormat: func() proto.Message { return &ethpb.DataColumnSidecar{} },
}
// GossipTopicMappings is a function to return the assigned data type

View File

@@ -3,6 +3,7 @@ package p2p
import (
"context"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/enr"
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/libp2p/go-libp2p/core/connmgr"
@@ -81,8 +82,9 @@ type PeerManager interface {
PeerID() peer.ID
Host() host.Host
ENR() *enr.Record
NodeID() enode.ID
DiscoveryAddresses() ([]multiaddr.Multiaddr, error)
RefreshENR()
RefreshPersistentSubnets()
FindPeersWithSubnet(ctx context.Context, topic string, subIndex uint64, threshold int) (bool, error)
AddPingMethod(reqFunc func(ctx context.Context, id peer.ID) error)
}

View File

@@ -90,7 +90,7 @@ func TestService_CanSubscribe(t *testing.T) {
formatting := []interface{}{digest}
// Special case for attestation subnets which have a second formatting placeholder.
if topic == AttestationSubnetTopicFormat || topic == SyncCommitteeSubnetTopicFormat || topic == BlobSubnetTopicFormat {
if topic == AttestationSubnetTopicFormat || topic == SyncCommitteeSubnetTopicFormat || topic == BlobSubnetTopicFormat || topic == DataColumnSubnetTopicFormat {
formatting = append(formatting, 0 /* some subnet ID */)
}

View File

@@ -43,6 +43,9 @@ const BlobSidecarsByRangeName = "/blob_sidecars_by_range"
// BlobSidecarsByRootName is the name for the BlobSidecarsByRoot v1 message topic.
const BlobSidecarsByRootName = "/blob_sidecars_by_root"
// DataColumnSidecarsByRootName is the name for the DataColumnSidecarsByRoot v1 message topic.
const DataColumnSidecarsByRootName = "/data_column_sidecars_by_root"
const (
// V1 RPC Topics
// RPCStatusTopicV1 defines the v1 topic for the status rpc method.
@@ -65,6 +68,9 @@ const (
// RPCBlobSidecarsByRootTopicV1 is a topic for requesting blob sidecars by their block root. New in deneb.
// /eth2/beacon_chain/req/blob_sidecars_by_root/1/
RPCBlobSidecarsByRootTopicV1 = protocolPrefix + BlobSidecarsByRootName + SchemaVersionV1
// RPCDataColumnSidecarsByRootTopicV1 is a topic for requesting data column sidecars by their block root. New in PeerDAS.
// /eth2/beacon_chain/req/data_column_sidecars_by_root/1
RPCDataColumnSidecarsByRootTopicV1 = protocolPrefix + DataColumnSidecarsByRootName + SchemaVersionV1
// V2 RPC Topics
// RPCBlocksByRangeTopicV2 defines v2 the topic for the blocks by range rpc method.

View File

@@ -226,7 +226,7 @@ func (s *Service) Start() {
}
// Initialize metadata according to the
// current epoch.
s.RefreshENR()
s.RefreshPersistentSubnets()
// Periodic functions.
async.RunEvery(s.ctx, params.BeaconConfig().TtfbTimeoutDuration(), func() {
@@ -234,7 +234,7 @@ func (s *Service) Start() {
})
async.RunEvery(s.ctx, 30*time.Minute, s.Peers().Prune)
async.RunEvery(s.ctx, time.Duration(params.BeaconConfig().RespTimeout)*time.Second, s.updateMetrics)
async.RunEvery(s.ctx, refreshRate, s.RefreshENR)
async.RunEvery(s.ctx, refreshRate, s.RefreshPersistentSubnets)
async.RunEvery(s.ctx, 1*time.Minute, func() {
inboundQUICCount := len(s.peers.InboundConnectedWithProtocol(peers.QUIC))
inboundTCPCount := len(s.peers.InboundConnectedWithProtocol(peers.TCP))
@@ -358,6 +358,15 @@ func (s *Service) ENR() *enr.Record {
return s.dv5Listener.Self().Record()
}
// NodeID returns the local node's node ID
// for discovery.
func (s *Service) NodeID() enode.ID {
if s.dv5Listener == nil {
return [32]byte{}
}
return s.dv5Listener.Self().ID()
}
// DiscoveryAddresses represents our enr addresses as multiaddresses.
func (s *Service) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
if s.dv5Listener == nil {

View File

@@ -13,6 +13,7 @@ import (
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
@@ -206,6 +207,25 @@ func initializePersistentSubnets(id enode.ID, epoch primitives.Epoch) error {
return nil
}
func initializePersistentColumnSubnets(id enode.ID) error {
_, ok, expTime := cache.ColumnSubnetIDs.GetColumnSubnets()
if ok && expTime.After(time.Now()) {
return nil
}
subsMap, err := peerdas.CustodyColumnSubnets(id, params.BeaconConfig().CustodyRequirement)
if err != nil {
return err
}
subs := make([]uint64, 0, len(subsMap))
for sub := range subsMap {
subs = append(subs, sub)
}
cache.ColumnSubnetIDs.AddColumnSubnets(subs)
return nil
}
// Spec pseudocode definition:
//
// def compute_subscribed_subnets(node_id: NodeID, epoch: Epoch) -> Sequence[SubnetID]:

View File

@@ -3,6 +3,7 @@ package testing
import (
"context"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/enr"
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/libp2p/go-libp2p/core/control"
@@ -55,6 +56,11 @@ func (_ *FakeP2P) ENR() *enr.Record {
return new(enr.Record)
}
// NodeID returns the node id of the local peer.
func (_ *FakeP2P) NodeID() enode.ID {
return [32]byte{}
}
// DiscoveryAddresses -- fake
func (_ *FakeP2P) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
return nil, nil
@@ -65,8 +71,8 @@ func (_ *FakeP2P) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ i
return false, nil
}
// RefreshENR mocks the p2p func.
func (_ *FakeP2P) RefreshENR() {}
// RefreshPersistentSubnets mocks the p2p func.
func (_ *FakeP2P) RefreshPersistentSubnets() {}
// LeaveTopic -- fake.
func (_ *FakeP2P) LeaveTopic(_ string) error {

View File

@@ -4,6 +4,7 @@ import (
"context"
"errors"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/enr"
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/peer"
@@ -39,6 +40,11 @@ func (m MockPeerManager) ENR() *enr.Record {
return m.Enr
}
// NodeID .
func (m MockPeerManager) NodeID() enode.ID {
return [32]byte{}
}
// DiscoveryAddresses .
func (m MockPeerManager) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
if m.FailDiscoveryAddr {
@@ -47,8 +53,8 @@ func (m MockPeerManager) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
return m.DiscoveryAddr, nil
}
// RefreshENR .
func (_ MockPeerManager) RefreshENR() {}
// RefreshPersistentSubnets .
func (_ MockPeerManager) RefreshPersistentSubnets() {}
// FindPeersWithSubnet .
func (_ MockPeerManager) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ int) (bool, error) {

View File

@@ -16,11 +16,6 @@ import (
log "github.com/sirupsen/logrus"
)
const (
MockRawPeerId0 = "16Uiu2HAkyWZ4Ni1TpvDS8dPxsozmHY85KaiFjodQuV6Tz5tkHVeR"
MockRawPeerId1 = "16Uiu2HAm4HgJ9N1o222xK61o7LSgToYWoAy1wNTJRkh9gLZapVAy"
)
// MockPeersProvider implements PeersProvider for testing.
type MockPeersProvider struct {
lock sync.Mutex
@@ -55,7 +50,7 @@ func (m *MockPeersProvider) Peers() *peers.Status {
},
})
// Pretend we are connected to two peers
id0, err := peer.Decode(MockRawPeerId0)
id0, err := peer.Decode("16Uiu2HAkyWZ4Ni1TpvDS8dPxsozmHY85KaiFjodQuV6Tz5tkHVeR")
if err != nil {
log.WithError(err).Debug("Cannot decode")
}
@@ -66,7 +61,7 @@ func (m *MockPeersProvider) Peers() *peers.Status {
m.peers.Add(createENR(), id0, ma0, network.DirInbound)
m.peers.SetConnectionState(id0, peers.PeerConnected)
m.peers.SetChainState(id0, &pb.Status{FinalizedEpoch: 10})
id1, err := peer.Decode(MockRawPeerId1)
id1, err := peer.Decode("16Uiu2HAm4HgJ9N1o222xK61o7LSgToYWoAy1wNTJRkh9gLZapVAy")
if err != nil {
log.WithError(err).Debug("Cannot decode")
}

View File

@@ -10,6 +10,7 @@ import (
"testing"
"time"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/enr"
pubsub "github.com/libp2p/go-libp2p-pubsub"
core "github.com/libp2p/go-libp2p/core"
@@ -263,6 +264,11 @@ func (_ *TestP2P) ENR() *enr.Record {
return new(enr.Record)
}
// NodeID returns the node id of the local peer.
func (_ *TestP2P) NodeID() enode.ID {
return [32]byte{}
}
// DiscoveryAddresses --
func (_ *TestP2P) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
return nil, nil
@@ -360,8 +366,8 @@ func (_ *TestP2P) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ i
return false, nil
}
// RefreshENR mocks the p2p func.
func (_ *TestP2P) RefreshENR() {}
// RefreshPersistentSubnets mocks the p2p func.
func (_ *TestP2P) RefreshPersistentSubnets() {}
// ForkDigest mocks the p2p func.
func (p *TestP2P) ForkDigest() ([4]byte, error) {

View File

@@ -30,6 +30,9 @@ const (
GossipBlsToExecutionChangeMessage = "bls_to_execution_change"
// GossipBlobSidecarMessage is the name for the blob sidecar message type.
GossipBlobSidecarMessage = "blob_sidecar"
// GossipDataColumnSidecarMessage is the name for the data column sidecar message type.
GossipDataColumnSidecarMessage = "data_column_sidecar"
// Topic Formats
//
// AttestationSubnetTopicFormat is the topic format for the attestation subnet.
@@ -52,4 +55,6 @@ const (
BlsToExecutionChangeSubnetTopicFormat = GossipProtocolAndDigest + GossipBlsToExecutionChangeMessage
// BlobSubnetTopicFormat is the topic format for the blob subnet.
BlobSubnetTopicFormat = GossipProtocolAndDigest + GossipBlobSidecarMessage + "_%d"
// DataColumnSubnetTopicFormat is the topic format for the data column subnet.
DataColumnSubnetTopicFormat = GossipProtocolAndDigest + GossipDataColumnSidecarMessage + "_%d"
)

View File

@@ -9,10 +9,15 @@ var (
ErrInvalidSequenceNum = errors.New("invalid sequence number provided")
ErrGeneric = errors.New("internal service error")
ErrRateLimited = errors.New("rate limited")
ErrIODeadline = errors.New("i/o deadline exceeded")
ErrInvalidRequest = errors.New("invalid range, step or count")
ErrBlobLTMinRequest = errors.New("blob slot < minimum_request_epoch")
ErrMaxBlobReqExceeded = errors.New("requested more than MAX_REQUEST_BLOB_SIDECARS")
ErrRateLimited = errors.New("rate limited")
ErrIODeadline = errors.New("i/o deadline exceeded")
ErrInvalidRequest = errors.New("invalid range, step or count")
ErrBlobLTMinRequest = errors.New("blob epoch < minimum_request_epoch")
ErrDataColumnLTMinRequest = errors.New("data column epoch < minimum_request_epoch")
ErrMaxBlobReqExceeded = errors.New("requested more than MAX_REQUEST_BLOB_SIDECARS")
ErrMaxDataColumnReqExceeded = errors.New("requested more than MAX_REQUEST_DATA_COLUMN_SIDECARS")
ErrResourceUnavailable = errors.New("resource requested unavailable")
ErrInvalidColumnIndex = errors.New("invalid column index requested")
)

View File

@@ -3,7 +3,6 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"beacon.go",
"errors.go",
"log.go",
"service.go",
@@ -21,8 +20,6 @@ go_library(
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/core/transition:go_default_library",
"//beacon-chain/core/validators:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/forkchoice/types:go_default_library",
"//beacon-chain/operations/synccommittee:go_default_library",
"//beacon-chain/p2p:go_default_library",
@@ -31,7 +28,6 @@ go_library(
"//beacon-chain/sync:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/primitives:go_default_library",
"//consensus-types/validator:go_default_library",
"//crypto/bls:go_default_library",

View File

@@ -1,128 +0,0 @@
package core
import (
"context"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/config/params"
consensusblocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/time/slots"
)
// Retrieve chain head information from the DB and the current beacon state.
func (s *Service) ChainHead(ctx context.Context) (*ethpb.ChainHead, *RpcError) {
headBlock, err := s.HeadFetcher.HeadBlock(ctx)
if err != nil {
return nil, &RpcError{
Err: errors.Wrapf(err, "could not get head block"),
Reason: Internal,
}
}
if err := consensusblocks.BeaconBlockIsNil(headBlock); err != nil {
return nil, &RpcError{
Err: errors.Wrapf(err, "head block of chain was nil"),
Reason: NotFound,
}
}
optimisticStatus, err := s.OptimisticModeFetcher.IsOptimistic(ctx)
if err != nil {
return nil, &RpcError{
Err: errors.Wrapf(err, "could not get optimistic status"),
Reason: Internal,
}
}
headBlockRoot, err := headBlock.Block().HashTreeRoot()
if err != nil {
return nil, &RpcError{
Err: errors.Wrapf(err, "could not get head block root"),
Reason: Internal,
}
}
validGenesis := false
validateCP := func(cp *ethpb.Checkpoint, name string) error {
if bytesutil.ToBytes32(cp.Root) == params.BeaconConfig().ZeroHash && cp.Epoch == 0 {
if validGenesis {
return nil
}
// Retrieve genesis block in the event we have genesis checkpoints.
genBlock, err := s.BeaconDB.GenesisBlock(ctx)
if err != nil || consensusblocks.BeaconBlockIsNil(genBlock) != nil {
return errors.New("could not get genesis block")
}
validGenesis = true
return nil
}
b, err := s.BeaconDB.Block(ctx, bytesutil.ToBytes32(cp.Root))
if err != nil {
return errors.Errorf("could not get %s block: %v", name, err)
}
if err := consensusblocks.BeaconBlockIsNil(b); err != nil {
return errors.Errorf("could not get %s block: %v", name, err)
}
return nil
}
finalizedCheckpoint := s.FinalizedFetcher.FinalizedCheckpt()
if err := validateCP(finalizedCheckpoint, "finalized"); err != nil {
return nil, &RpcError{
Err: errors.Wrap(err, "could not get finalized checkpoint"),
Reason: Internal,
}
}
justifiedCheckpoint := s.FinalizedFetcher.CurrentJustifiedCheckpt()
if err := validateCP(justifiedCheckpoint, "justified"); err != nil {
return nil, &RpcError{
Err: errors.Wrap(err, "could not get current justified checkpoint"),
Reason: Internal,
}
}
prevJustifiedCheckpoint := s.FinalizedFetcher.PreviousJustifiedCheckpt()
if err := validateCP(prevJustifiedCheckpoint, "prev justified"); err != nil {
return nil, &RpcError{
Err: errors.Wrap(err, "could not get previous justified checkpoint"),
Reason: Internal,
}
}
fSlot, err := slots.EpochStart(finalizedCheckpoint.Epoch)
if err != nil {
return nil, &RpcError{
Err: errors.Wrapf(err, "could not get epoch start slot from finalized checkpoint epoch"),
Reason: Internal,
}
}
jSlot, err := slots.EpochStart(justifiedCheckpoint.Epoch)
if err != nil {
return nil, &RpcError{
Err: errors.Wrapf(err, "could not get epoch start slot from justified checkpoint epoch"),
Reason: Internal,
}
}
pjSlot, err := slots.EpochStart(prevJustifiedCheckpoint.Epoch)
if err != nil {
return nil, &RpcError{
Err: errors.Wrapf(err, "could not get epoch start slot from prev justified checkpoint epoch"),
Reason: Internal,
}
}
return &ethpb.ChainHead{
HeadSlot: headBlock.Block().Slot(),
HeadEpoch: slots.ToEpoch(headBlock.Block().Slot()),
HeadBlockRoot: headBlockRoot[:],
FinalizedSlot: fSlot,
FinalizedEpoch: finalizedCheckpoint.Epoch,
FinalizedBlockRoot: finalizedCheckpoint.Root,
JustifiedSlot: jSlot,
JustifiedEpoch: justifiedCheckpoint.Epoch,
JustifiedBlockRoot: justifiedCheckpoint.Root,
PreviousJustifiedSlot: pjSlot,
PreviousJustifiedEpoch: prevJustifiedCheckpoint.Epoch,
PreviousJustifiedBlockRoot: prevJustifiedCheckpoint.Root,
OptimisticStatus: optimisticStatus,
}, nil
}

View File

@@ -4,7 +4,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
opfeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/operation"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/synccommittee"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state/stategen"
@@ -12,8 +11,6 @@ import (
)
type Service struct {
BeaconDB db.ReadOnlyDatabase
ChainInfoFetcher blockchain.ChainInfoFetcher
HeadFetcher blockchain.HeadFetcher
FinalizedFetcher blockchain.FinalizationFetcher
GenesisTimeFetcher blockchain.TimeFetcher

View File

@@ -16,7 +16,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
coreTime "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/validators"
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
beaconState "github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
@@ -753,177 +752,3 @@ func subnetsFromCommittee(pubkey []byte, comm *ethpb.SyncCommittee) []uint64 {
}
return positions
}
// ValidatorParticipation retrieves the validator participation information for a given epoch,
// it returns the information about validator's participation rate in voting on the proof of stake
// rules based on their balance compared to the total active validator balance.
func (s *Service) ValidatorParticipation(
ctx context.Context,
requestedEpoch primitives.Epoch,
) (
*ethpb.ValidatorParticipationResponse,
*RpcError,
) {
currentSlot := s.GenesisTimeFetcher.CurrentSlot()
currentEpoch := slots.ToEpoch(currentSlot)
if requestedEpoch > currentEpoch {
return nil, &RpcError{
Err: fmt.Errorf("cannot retrieve information about an epoch greater than current epoch, current epoch %d, requesting %d", currentEpoch, requestedEpoch),
Reason: BadRequest,
}
}
// Use the last slot of requested epoch to obtain current and previous epoch attestations.
// This ensures that we don't miss previous attestations when input requested epochs.
endSlot, err := slots.EpochEnd(requestedEpoch)
if err != nil {
return nil, &RpcError{Reason: Internal, Err: errors.Wrap(err, "could not get slot from requested epoch")}
}
// Get as close as we can to the end of the current epoch without going past the current slot.
// The above check ensures a future *epoch* isn't requested, but the end slot of the requested epoch could still
// be past the current slot. In that case, use the current slot as the best approximation of the requested epoch.
// Replayer will make sure the slot ultimately used is canonical.
if endSlot > currentSlot {
endSlot = currentSlot
}
// ReplayerBuilder ensures that a canonical chain is followed to the slot
beaconSt, err := s.ReplayerBuilder.ReplayerForSlot(endSlot).ReplayBlocks(ctx)
if err != nil {
return nil, &RpcError{Reason: Internal, Err: errors.Wrapf(err, "error replaying blocks for state at slot %d", endSlot)}
}
var v []*precompute.Validator
var b *precompute.Balance
if beaconSt.Version() == version.Phase0 {
v, b, err = precompute.New(ctx, beaconSt)
if err != nil {
return nil, &RpcError{Reason: Internal, Err: errors.Wrap(err, "could not set up pre compute instance")}
}
_, b, err = precompute.ProcessAttestations(ctx, beaconSt, v, b)
if err != nil {
return nil, &RpcError{Reason: Internal, Err: errors.Wrap(err, "could not pre compute attestations")}
}
} else if beaconSt.Version() >= version.Altair {
v, b, err = altair.InitializePrecomputeValidators(ctx, beaconSt)
if err != nil {
return nil, &RpcError{Reason: Internal, Err: errors.Wrap(err, "could not set up altair pre compute instance")}
}
_, b, err = altair.ProcessEpochParticipation(ctx, beaconSt, b, v)
if err != nil {
return nil, &RpcError{Reason: Internal, Err: errors.Wrap(err, "could not pre compute attestations: %v")}
}
} else {
return nil, &RpcError{Reason: Internal, Err: fmt.Errorf("invalid state type retrieved with a version of %s", version.String(beaconSt.Version()))}
}
cp := s.FinalizedFetcher.FinalizedCheckpt()
p := &ethpb.ValidatorParticipationResponse{
Epoch: requestedEpoch,
Finalized: requestedEpoch <= cp.Epoch,
Participation: &ethpb.ValidatorParticipation{
// TODO(7130): Remove these three deprecated fields.
GlobalParticipationRate: float32(b.PrevEpochTargetAttested) / float32(b.ActivePrevEpoch),
VotedEther: b.PrevEpochTargetAttested,
EligibleEther: b.ActivePrevEpoch,
CurrentEpochActiveGwei: b.ActiveCurrentEpoch,
CurrentEpochAttestingGwei: b.CurrentEpochAttested,
CurrentEpochTargetAttestingGwei: b.CurrentEpochTargetAttested,
PreviousEpochActiveGwei: b.ActivePrevEpoch,
PreviousEpochAttestingGwei: b.PrevEpochAttested,
PreviousEpochTargetAttestingGwei: b.PrevEpochTargetAttested,
PreviousEpochHeadAttestingGwei: b.PrevEpochHeadAttested,
},
}
return p, nil
}
// ValidatorActiveSetChanges retrieves the active set changes for a given epoch.
//
// This data includes any activations, voluntary exits, and involuntary
// ejections.
func (s *Service) ValidatorActiveSetChanges(
ctx context.Context,
requestedEpoch primitives.Epoch,
) (
*ethpb.ActiveSetChanges,
*RpcError,
) {
currentEpoch := slots.ToEpoch(s.GenesisTimeFetcher.CurrentSlot())
if requestedEpoch > currentEpoch {
return nil, &RpcError{
Err: errors.Errorf("cannot retrieve information about an epoch in the future, current epoch %d, requesting %d", currentEpoch, requestedEpoch),
Reason: BadRequest,
}
}
slot, err := slots.EpochStart(requestedEpoch)
if err != nil {
return nil, &RpcError{Err: err, Reason: BadRequest}
}
requestedState, err := s.ReplayerBuilder.ReplayerForSlot(slot).ReplayBlocks(ctx)
if err != nil {
return nil, &RpcError{
Err: errors.Wrapf(err, "error replaying blocks for state at slot %d", slot),
Reason: Internal,
}
}
activeValidatorCount, err := helpers.ActiveValidatorCount(ctx, requestedState, coreTime.CurrentEpoch(requestedState))
if err != nil {
return nil, &RpcError{
Err: errors.Wrap(err, "could not get active validator count"),
Reason: Internal,
}
}
vs := requestedState.Validators()
activatedIndices := validators.ActivatedValidatorIndices(coreTime.CurrentEpoch(requestedState), vs)
exitedIndices, err := validators.ExitedValidatorIndices(coreTime.CurrentEpoch(requestedState), vs, activeValidatorCount)
if err != nil {
return nil, &RpcError{
Err: errors.Wrap(err, "could not determine exited validator indices"),
Reason: Internal,
}
}
slashedIndices := validators.SlashedValidatorIndices(coreTime.CurrentEpoch(requestedState), vs)
ejectedIndices, err := validators.EjectedValidatorIndices(coreTime.CurrentEpoch(requestedState), vs, activeValidatorCount)
if err != nil {
return nil, &RpcError{
Err: errors.Wrap(err, "could not determine ejected validator indices"),
Reason: Internal,
}
}
// Retrieve public keys for the indices.
activatedKeys := make([][]byte, len(activatedIndices))
exitedKeys := make([][]byte, len(exitedIndices))
slashedKeys := make([][]byte, len(slashedIndices))
ejectedKeys := make([][]byte, len(ejectedIndices))
for i, idx := range activatedIndices {
pubkey := requestedState.PubkeyAtIndex(idx)
activatedKeys[i] = pubkey[:]
}
for i, idx := range exitedIndices {
pubkey := requestedState.PubkeyAtIndex(idx)
exitedKeys[i] = pubkey[:]
}
for i, idx := range slashedIndices {
pubkey := requestedState.PubkeyAtIndex(idx)
slashedKeys[i] = pubkey[:]
}
for i, idx := range ejectedIndices {
pubkey := requestedState.PubkeyAtIndex(idx)
ejectedKeys[i] = pubkey[:]
}
return &ethpb.ActiveSetChanges{
Epoch: requestedEpoch,
ActivatedPublicKeys: activatedKeys,
ActivatedIndices: activatedIndices,
ExitedPublicKeys: exitedKeys,
ExitedIndices: exitedIndices,
SlashedPublicKeys: slashedKeys,
SlashedIndices: slashedIndices,
EjectedPublicKeys: ejectedKeys,
EjectedIndices: ejectedIndices,
}, nil
}

View File

@@ -70,7 +70,7 @@ func (s *Service) endpoints(
endpoints = append(endpoints, s.eventsEndpoints()...)
endpoints = append(endpoints, s.prysmBeaconEndpoints(ch, stater, coreService)...)
endpoints = append(endpoints, s.prysmNodeEndpoints()...)
endpoints = append(endpoints, s.prysmValidatorEndpoints(stater, coreService)...)
endpoints = append(endpoints, s.prysmValidatorEndpoints(coreService)...)
if enableDebug {
endpoints = append(endpoints, s.debugEndpoints(stater)...)
}
@@ -983,15 +983,6 @@ func (s *Service) prysmBeaconEndpoints(
handler: server.GetIndividualVotes,
methods: []string{http.MethodPost},
},
{
template: "/prysm/v1/beacon/chain_head",
name: namespace + ".GetChainHead",
middleware: []mux.MiddlewareFunc{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
},
handler: server.GetChainHead,
methods: []string{http.MethodGet},
},
}
}
@@ -1069,52 +1060,32 @@ func (s *Service) prysmNodeEndpoints() []endpoint {
}
}
func (s *Service) prysmValidatorEndpoints(stater lookup.Stater, coreService *core.Service) []endpoint {
func (*Service) prysmValidatorEndpoints(coreService *core.Service) []endpoint {
server := &validatorprysm.Server{
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
Stater: stater,
CoreService: coreService,
CoreService: coreService,
}
const namespace = "prysm.validator"
return []endpoint{
{
template: "/prysm/validators/performance",
name: namespace + ".GetPerformance",
name: namespace + ".GetValidatorPerformance",
middleware: []mux.MiddlewareFunc{
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
},
handler: server.GetPerformance,
handler: server.GetValidatorPerformance,
methods: []string{http.MethodPost},
},
{
template: "/prysm/v1/validators/performance",
name: namespace + ".GetPerformance",
name: namespace + ".GetValidatorPerformance",
middleware: []mux.MiddlewareFunc{
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
},
handler: server.GetPerformance,
handler: server.GetValidatorPerformance,
methods: []string{http.MethodPost},
},
{
template: "/prysm/v1/validators/participation",
name: namespace + ".GetParticipation",
middleware: []mux.MiddlewareFunc{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
},
handler: server.GetParticipation,
methods: []string{http.MethodGet},
},
{
template: "/prysm/v1/validators/active_set_changes",
name: namespace + ".GetActiveSetChanges",
middleware: []mux.MiddlewareFunc{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
},
handler: server.GetActiveSetChanges,
methods: []string{http.MethodGet},
},
}
}

View File

@@ -114,7 +114,6 @@ func Test_endpoints(t *testing.T) {
"/prysm/v1/beacon/weak_subjectivity": {http.MethodGet},
"/eth/v1/beacon/states/{state_id}/validator_count": {http.MethodGet},
"/prysm/v1/beacon/states/{state_id}/validator_count": {http.MethodGet},
"/prysm/v1/beacon/chain_head": {http.MethodGet},
}
prysmNodeRoutes := map[string][]string{
@@ -125,10 +124,8 @@ func Test_endpoints(t *testing.T) {
}
prysmValidatorRoutes := map[string][]string{
"/prysm/validators/performance": {http.MethodPost},
"/prysm/v1/validators/performance": {http.MethodPost},
"/prysm/v1/validators/participation": {http.MethodGet},
"/prysm/v1/validators/active_set_changes": {http.MethodGet},
"/prysm/validators/performance": {http.MethodPost},
"/prysm/v1/validators/performance": {http.MethodPost},
}
s := &Service{cfg: &Config{}}

View File

@@ -116,11 +116,9 @@ func (s *Server) getBlockV2Ssz(w http.ResponseWriter, blk interfaces.ReadOnlySig
result, err := s.getBlockResponseBodySsz(blk)
if err != nil {
httputil.HandleError(w, "Could not get signed beacon block: "+err.Error(), http.StatusInternalServerError)
return
}
if result == nil {
httputil.HandleError(w, fmt.Sprintf("Unknown block type %T", blk), http.StatusInternalServerError)
return
}
w.Header().Set(api.VersionHeader, version.String(blk.Version()))
httputil.WriteSsz(w, result, "beacon_block.ssz")
@@ -151,11 +149,6 @@ func (s *Server) getBlockV2Json(ctx context.Context, w http.ResponseWriter, blk
result, err := s.getBlockResponseBodyJson(ctx, blk)
if err != nil {
httputil.HandleError(w, "Error processing request: "+err.Error(), http.StatusInternalServerError)
return
}
if result == nil {
httputil.HandleError(w, fmt.Sprintf("Unknown block type %T", blk), http.StatusInternalServerError)
return
}
w.Header().Set(api.VersionHeader, result.Version)
httputil.WriteJson(w, result)

View File

@@ -52,14 +52,10 @@ go_test(
"//beacon-chain/rpc/lookup:go_default_library",
"//beacon-chain/rpc/testutil:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//beacon-chain/state/stategen/mock:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//network/httputil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",

View File

@@ -154,32 +154,3 @@ func (s *Server) GetIndividualVotes(w http.ResponseWriter, r *http.Request) {
}
httputil.WriteJson(w, response)
}
// GetChainHead retrieves information about the head of the beacon chain from
// the view of the beacon chain node.
func (s *Server) GetChainHead(w http.ResponseWriter, r *http.Request) {
ctx, span := trace.StartSpan(r.Context(), "beacon.GetChainHead")
defer span.End()
ch, rpcError := s.CoreService.ChainHead(ctx)
if rpcError != nil {
httputil.HandleError(w, rpcError.Err.Error(), core.ErrorReasonToHTTP(rpcError.Reason))
return
}
response := &structs.ChainHead{
HeadSlot: fmt.Sprintf("%d", ch.HeadSlot),
HeadEpoch: fmt.Sprintf("%d", ch.HeadEpoch),
HeadBlockRoot: hexutil.Encode(ch.HeadBlockRoot),
FinalizedSlot: fmt.Sprintf("%d", ch.FinalizedSlot),
FinalizedEpoch: fmt.Sprintf("%d", ch.FinalizedEpoch),
FinalizedBlockRoot: hexutil.Encode(ch.FinalizedBlockRoot),
JustifiedSlot: fmt.Sprintf("%d", ch.JustifiedSlot),
JustifiedEpoch: fmt.Sprintf("%d", ch.JustifiedEpoch),
JustifiedBlockRoot: hexutil.Encode(ch.JustifiedBlockRoot),
PreviousJustifiedSlot: fmt.Sprintf("%d", ch.PreviousJustifiedSlot),
PreviousJustifiedEpoch: fmt.Sprintf("%d", ch.PreviousJustifiedEpoch),
PreviousJustifiedBlockRoot: hexutil.Encode(ch.PreviousJustifiedBlockRoot),
OptimisticStatus: ch.OptimisticStatus,
}
httputil.WriteJson(w, response)
}

View File

@@ -19,15 +19,11 @@ import (
dbTest "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/core"
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state/stategen"
mockstategen "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/stategen/mock"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/testing/util"
@@ -231,11 +227,11 @@ func TestServer_GetIndividualVotes_Working(t *testing.T) {
require.NoError(t, beaconState.SetBlockRoots(br))
att2.Data.Target.Root = rt[:]
att2.Data.BeaconBlockRoot = newRt[:]
err = beaconState.AppendPreviousEpochAttestations(&ethpb.PendingAttestation{
err = beaconState.AppendPreviousEpochAttestations(&eth.PendingAttestation{
Data: att1.Data, AggregationBits: bf, InclusionDelay: 1,
})
require.NoError(t, err)
err = beaconState.AppendCurrentEpochAttestations(&ethpb.PendingAttestation{
err = beaconState.AppendCurrentEpochAttestations(&eth.PendingAttestation{
Data: att2.Data, AggregationBits: bf, InclusionDelay: 1,
})
require.NoError(t, err)
@@ -662,211 +658,3 @@ func TestServer_GetIndividualVotes_CapellaEndOfEpoch(t *testing.T) {
}
assert.DeepEqual(t, want, resp, "Unexpected response")
}
// ensures that if any of the checkpoints are zero-valued, an error will be generated without genesis being present
func TestServer_GetChainHead_NoGenesis(t *testing.T) {
db := dbTest.SetupDB(t)
s, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, s.SetSlot(1))
genBlock := util.NewBeaconBlock()
genBlock.Block.ParentRoot = bytesutil.PadTo([]byte{'G'}, fieldparams.RootLength)
util.SaveBlock(t, context.Background(), db, genBlock)
gRoot, err := genBlock.Block.HashTreeRoot()
require.NoError(t, err)
cases := []struct {
name string
zeroSetter func(val *ethpb.Checkpoint) error
}{
{
name: "zero-value prev justified",
zeroSetter: s.SetPreviousJustifiedCheckpoint,
},
{
name: "zero-value current justified",
zeroSetter: s.SetCurrentJustifiedCheckpoint,
},
{
name: "zero-value finalized",
zeroSetter: s.SetFinalizedCheckpoint,
},
}
finalized := &ethpb.Checkpoint{Epoch: 1, Root: gRoot[:]}
prevJustified := &ethpb.Checkpoint{Epoch: 2, Root: gRoot[:]}
justified := &ethpb.Checkpoint{Epoch: 3, Root: gRoot[:]}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
require.NoError(t, s.SetPreviousJustifiedCheckpoint(prevJustified))
require.NoError(t, s.SetCurrentJustifiedCheckpoint(justified))
require.NoError(t, s.SetFinalizedCheckpoint(finalized))
require.NoError(t, c.zeroSetter(&ethpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]}))
})
wsb, err := blocks.NewSignedBeaconBlock(genBlock)
require.NoError(t, err)
s := &Server{
CoreService: &core.Service{
BeaconDB: db,
HeadFetcher: &chainMock.ChainService{Block: wsb, State: s},
FinalizedFetcher: &chainMock.ChainService{
FinalizedCheckPoint: s.FinalizedCheckpoint(),
CurrentJustifiedCheckPoint: s.CurrentJustifiedCheckpoint(),
PreviousJustifiedCheckPoint: s.PreviousJustifiedCheckpoint(),
},
OptimisticModeFetcher: &chainMock.ChainService{},
},
}
url := "http://example.com"
request := httptest.NewRequest(http.MethodGet, url, nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.GetChainHead(writer, request)
require.Equal(t, http.StatusInternalServerError, writer.Code)
require.StringContains(t, "could not get genesis block", writer.Body.String())
}
}
func TestServer_GetChainHead_NoFinalizedBlock(t *testing.T) {
db := dbTest.SetupDB(t)
bs, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, bs.SetSlot(1))
require.NoError(t, bs.SetPreviousJustifiedCheckpoint(&ethpb.Checkpoint{Epoch: 3, Root: bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)}))
require.NoError(t, bs.SetCurrentJustifiedCheckpoint(&ethpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte{'B'}, fieldparams.RootLength)}))
require.NoError(t, bs.SetFinalizedCheckpoint(&ethpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'C'}, fieldparams.RootLength)}))
genBlock := util.NewBeaconBlock()
genBlock.Block.ParentRoot = bytesutil.PadTo([]byte{'G'}, fieldparams.RootLength)
util.SaveBlock(t, context.Background(), db, genBlock)
gRoot, err := genBlock.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, db.SaveGenesisBlockRoot(context.Background(), gRoot))
wsb, err := blocks.NewSignedBeaconBlock(genBlock)
require.NoError(t, err)
s := &Server{
CoreService: &core.Service{
BeaconDB: db,
HeadFetcher: &chainMock.ChainService{Block: wsb, State: bs},
FinalizedFetcher: &chainMock.ChainService{
FinalizedCheckPoint: bs.FinalizedCheckpoint(),
CurrentJustifiedCheckPoint: bs.CurrentJustifiedCheckpoint(),
PreviousJustifiedCheckPoint: bs.PreviousJustifiedCheckpoint()},
OptimisticModeFetcher: &chainMock.ChainService{},
},
}
url := "http://example.com"
request := httptest.NewRequest(http.MethodGet, url, nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.GetChainHead(writer, request)
require.Equal(t, http.StatusInternalServerError, writer.Code)
require.StringContains(t, "ould not get finalized block", writer.Body.String())
}
func TestServer_GetChainHead_NoHeadBlock(t *testing.T) {
s := &Server{
CoreService: &core.Service{
HeadFetcher: &chainMock.ChainService{Block: nil},
OptimisticModeFetcher: &chainMock.ChainService{},
},
}
url := "http://example.com"
request := httptest.NewRequest(http.MethodGet, url, nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.GetChainHead(writer, request)
require.Equal(t, http.StatusNotFound, writer.Code)
require.StringContains(t, "head block of chain was nil", writer.Body.String())
}
func TestServer_GetChainHead(t *testing.T) {
params.SetupTestConfigCleanup(t)
params.OverrideBeaconConfig(params.MinimalSpecConfig())
db := dbTest.SetupDB(t)
genBlock := util.NewBeaconBlock()
genBlock.Block.ParentRoot = bytesutil.PadTo([]byte{'G'}, fieldparams.RootLength)
util.SaveBlock(t, context.Background(), db, genBlock)
gRoot, err := genBlock.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, db.SaveGenesisBlockRoot(context.Background(), gRoot))
finalizedBlock := util.NewBeaconBlock()
finalizedBlock.Block.Slot = 1
finalizedBlock.Block.ParentRoot = bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)
util.SaveBlock(t, context.Background(), db, finalizedBlock)
fRoot, err := finalizedBlock.Block.HashTreeRoot()
require.NoError(t, err)
justifiedBlock := util.NewBeaconBlock()
justifiedBlock.Block.Slot = 2
justifiedBlock.Block.ParentRoot = bytesutil.PadTo([]byte{'B'}, fieldparams.RootLength)
util.SaveBlock(t, context.Background(), db, justifiedBlock)
jRoot, err := justifiedBlock.Block.HashTreeRoot()
require.NoError(t, err)
prevJustifiedBlock := util.NewBeaconBlock()
prevJustifiedBlock.Block.Slot = 3
prevJustifiedBlock.Block.ParentRoot = bytesutil.PadTo([]byte{'C'}, fieldparams.RootLength)
util.SaveBlock(t, context.Background(), db, prevJustifiedBlock)
pjRoot, err := prevJustifiedBlock.Block.HashTreeRoot()
require.NoError(t, err)
st, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
Slot: 1,
PreviousJustifiedCheckpoint: &ethpb.Checkpoint{Epoch: 3, Root: pjRoot[:]},
CurrentJustifiedCheckpoint: &ethpb.Checkpoint{Epoch: 2, Root: jRoot[:]},
FinalizedCheckpoint: &ethpb.Checkpoint{Epoch: 1, Root: fRoot[:]},
})
require.NoError(t, err)
b := util.NewBeaconBlock()
b.Block.Slot, err = slots.EpochStart(st.PreviousJustifiedCheckpoint().Epoch)
require.NoError(t, err)
b.Block.Slot++
wsb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
s := &Server{
CoreService: &core.Service{
BeaconDB: db,
HeadFetcher: &chainMock.ChainService{Block: wsb, State: st},
OptimisticModeFetcher: &chainMock.ChainService{},
FinalizedFetcher: &chainMock.ChainService{
FinalizedCheckPoint: st.FinalizedCheckpoint(),
CurrentJustifiedCheckPoint: st.CurrentJustifiedCheckpoint(),
PreviousJustifiedCheckPoint: st.PreviousJustifiedCheckpoint()},
},
}
url := "http://example.com"
request := httptest.NewRequest(http.MethodGet, url, nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.GetChainHead(writer, request)
require.Equal(t, http.StatusOK, writer.Code)
var ch *structs.ChainHead
err = json.NewDecoder(writer.Body).Decode(&ch)
require.NoError(t, err)
assert.Equal(t, "3", ch.PreviousJustifiedEpoch, "Unexpected PreviousJustifiedEpoch")
assert.Equal(t, "2", ch.JustifiedEpoch, "Unexpected JustifiedEpoch")
assert.Equal(t, "1", ch.FinalizedEpoch, "Unexpected FinalizedEpoch")
assert.Equal(t, "24", ch.PreviousJustifiedSlot, "Unexpected PreviousJustifiedSlot")
assert.Equal(t, "16", ch.JustifiedSlot, "Unexpected JustifiedSlot")
assert.Equal(t, "8", ch.FinalizedSlot, "Unexpected FinalizedSlot")
assert.DeepEqual(t, hexutil.Encode(pjRoot[:]), ch.PreviousJustifiedBlockRoot, "Unexpected PreviousJustifiedBlockRoot")
assert.DeepEqual(t, hexutil.Encode(jRoot[:]), ch.JustifiedBlockRoot, "Unexpected JustifiedBlockRoot")
assert.DeepEqual(t, hexutil.Encode(fRoot[:]), ch.FinalizedBlockRoot, "Unexpected FinalizedBlockRoot")
assert.Equal(t, false, ch.OptimisticStatus)
}

View File

@@ -19,12 +19,15 @@ go_library(
"//api/pagination:go_default_library",
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/cache:go_default_library",
"//beacon-chain/core/altair:go_default_library",
"//beacon-chain/core/epoch/precompute:go_default_library",
"//beacon-chain/core/feed/block:go_default_library",
"//beacon-chain/core/feed/operation:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/core/transition:go_default_library",
"//beacon-chain/core/validators:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/filters:go_default_library",
"//beacon-chain/execution:go_default_library",

View File

@@ -7,12 +7,13 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/api/pagination"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filters"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/core"
"github.com/prysmaticlabs/prysm/v5/cmd"
"github.com/prysmaticlabs/prysm/v5/config/params"
consensusblocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/time/slots"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/types/known/emptypb"
@@ -243,9 +244,91 @@ func (bs *Server) listBlocksForGenesis(ctx context.Context, _ *ethpb.ListBlocksR
// the most recent finalized and justified slots.
// DEPRECATED: This endpoint is superseded by the /eth/v1/beacon API endpoint
func (bs *Server) GetChainHead(ctx context.Context, _ *emptypb.Empty) (*ethpb.ChainHead, error) {
ch, err := bs.CoreService.ChainHead(ctx)
if err != nil {
return nil, status.Errorf(core.ErrorReasonToGRPC(err.Reason), "Could not retrieve chain head: %v", err.Err)
}
return ch, nil
return bs.chainHeadRetrieval(ctx)
}
// Retrieve chain head information from the DB and the current beacon state.
func (bs *Server) chainHeadRetrieval(ctx context.Context) (*ethpb.ChainHead, error) {
headBlock, err := bs.HeadFetcher.HeadBlock(ctx)
if err != nil {
return nil, status.Error(codes.Internal, "Could not get head block")
}
optimisticStatus, err := bs.OptimisticModeFetcher.IsOptimistic(ctx)
if err != nil {
return nil, status.Error(codes.Internal, "Could not get optimistic status")
}
if err := consensusblocks.BeaconBlockIsNil(headBlock); err != nil {
return nil, status.Errorf(codes.NotFound, "Head block of chain was nil: %v", err)
}
headBlockRoot, err := headBlock.Block().HashTreeRoot()
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get head block root: %v", err)
}
validGenesis := false
validateCP := func(cp *ethpb.Checkpoint, name string) error {
if bytesutil.ToBytes32(cp.Root) == params.BeaconConfig().ZeroHash && cp.Epoch == 0 {
if validGenesis {
return nil
}
// Retrieve genesis block in the event we have genesis checkpoints.
genBlock, err := bs.BeaconDB.GenesisBlock(ctx)
if err != nil || consensusblocks.BeaconBlockIsNil(genBlock) != nil {
return status.Error(codes.Internal, "Could not get genesis block")
}
validGenesis = true
return nil
}
b, err := bs.BeaconDB.Block(ctx, bytesutil.ToBytes32(cp.Root))
if err != nil {
return status.Errorf(codes.Internal, "Could not get %s block: %v", name, err)
}
if err := consensusblocks.BeaconBlockIsNil(b); err != nil {
return status.Errorf(codes.Internal, "Could not get %s block: %v", name, err)
}
return nil
}
finalizedCheckpoint := bs.FinalizationFetcher.FinalizedCheckpt()
if err := validateCP(finalizedCheckpoint, "finalized"); err != nil {
return nil, err
}
justifiedCheckpoint := bs.FinalizationFetcher.CurrentJustifiedCheckpt()
if err := validateCP(justifiedCheckpoint, "justified"); err != nil {
return nil, err
}
prevJustifiedCheckpoint := bs.FinalizationFetcher.PreviousJustifiedCheckpt()
if err := validateCP(prevJustifiedCheckpoint, "prev justified"); err != nil {
return nil, err
}
fSlot, err := slots.EpochStart(finalizedCheckpoint.Epoch)
if err != nil {
return nil, errors.Wrap(err, "could not get epoch start slot from finalized checkpoint epoch")
}
jSlot, err := slots.EpochStart(justifiedCheckpoint.Epoch)
if err != nil {
return nil, errors.Wrap(err, "could not get epoch start slot from justified checkpoint epoch")
}
pjSlot, err := slots.EpochStart(prevJustifiedCheckpoint.Epoch)
if err != nil {
return nil, errors.Wrap(err, "could not get epoch start slot from prev justified checkpoint epoch")
}
return &ethpb.ChainHead{
HeadSlot: headBlock.Block().Slot(),
HeadEpoch: slots.ToEpoch(headBlock.Block().Slot()),
HeadBlockRoot: headBlockRoot[:],
FinalizedSlot: fSlot,
FinalizedEpoch: finalizedCheckpoint.Epoch,
FinalizedBlockRoot: finalizedCheckpoint.Root,
JustifiedSlot: jSlot,
JustifiedEpoch: justifiedCheckpoint.Epoch,
JustifiedBlockRoot: justifiedCheckpoint.Root,
PreviousJustifiedSlot: pjSlot,
PreviousJustifiedEpoch: prevJustifiedCheckpoint.Epoch,
PreviousJustifiedBlockRoot: prevJustifiedCheckpoint.Root,
OptimisticStatus: optimisticStatus,
}, nil
}

View File

@@ -8,7 +8,6 @@ import (
chainMock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
dbTest "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/core"
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v5/config/features"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
@@ -69,19 +68,16 @@ func TestServer_GetChainHead_NoGenesis(t *testing.T) {
wsb, err := blocks.NewSignedBeaconBlock(genBlock)
require.NoError(t, err)
bs := &Server{
CoreService: &core.Service{
BeaconDB: db,
HeadFetcher: &chainMock.ChainService{Block: wsb, State: s},
FinalizedFetcher: &chainMock.ChainService{
FinalizedCheckPoint: s.FinalizedCheckpoint(),
CurrentJustifiedCheckPoint: s.CurrentJustifiedCheckpoint(),
PreviousJustifiedCheckPoint: s.PreviousJustifiedCheckpoint(),
},
OptimisticModeFetcher: &chainMock.ChainService{},
},
BeaconDB: db,
HeadFetcher: &chainMock.ChainService{Block: wsb, State: s},
FinalizationFetcher: &chainMock.ChainService{
FinalizedCheckPoint: s.FinalizedCheckpoint(),
CurrentJustifiedCheckPoint: s.CurrentJustifiedCheckpoint(),
PreviousJustifiedCheckPoint: s.PreviousJustifiedCheckpoint()},
OptimisticModeFetcher: &chainMock.ChainService{},
}
_, err = bs.GetChainHead(context.Background(), nil)
require.ErrorContains(t, "could not get genesis block", err)
require.ErrorContains(t, "Could not get genesis block", err)
}
}
@@ -106,30 +102,26 @@ func TestServer_GetChainHead_NoFinalizedBlock(t *testing.T) {
require.NoError(t, err)
bs := &Server{
CoreService: &core.Service{
BeaconDB: db,
HeadFetcher: &chainMock.ChainService{Block: wsb, State: s},
FinalizedFetcher: &chainMock.ChainService{
FinalizedCheckPoint: s.FinalizedCheckpoint(),
CurrentJustifiedCheckPoint: s.CurrentJustifiedCheckpoint(),
PreviousJustifiedCheckPoint: s.PreviousJustifiedCheckpoint()},
OptimisticModeFetcher: &chainMock.ChainService{},
},
BeaconDB: db,
HeadFetcher: &chainMock.ChainService{Block: wsb, State: s},
FinalizationFetcher: &chainMock.ChainService{
FinalizedCheckPoint: s.FinalizedCheckpoint(),
CurrentJustifiedCheckPoint: s.CurrentJustifiedCheckpoint(),
PreviousJustifiedCheckPoint: s.PreviousJustifiedCheckpoint()},
OptimisticModeFetcher: &chainMock.ChainService{},
}
_, err = bs.GetChainHead(context.Background(), nil)
require.ErrorContains(t, "could not get finalized block", err)
require.ErrorContains(t, "Could not get finalized block", err)
}
func TestServer_GetChainHead_NoHeadBlock(t *testing.T) {
bs := &Server{
CoreService: &core.Service{
HeadFetcher: &chainMock.ChainService{Block: nil},
OptimisticModeFetcher: &chainMock.ChainService{},
},
HeadFetcher: &chainMock.ChainService{Block: nil},
OptimisticModeFetcher: &chainMock.ChainService{},
}
_, err := bs.GetChainHead(context.Background(), nil)
assert.ErrorContains(t, "head block of chain was nil", err)
assert.ErrorContains(t, "Head block of chain was nil", err)
}
func TestServer_GetChainHead(t *testing.T) {
@@ -180,15 +172,13 @@ func TestServer_GetChainHead(t *testing.T) {
wsb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
bs := &Server{
CoreService: &core.Service{
BeaconDB: db,
HeadFetcher: &chainMock.ChainService{Block: wsb, State: s},
OptimisticModeFetcher: &chainMock.ChainService{},
FinalizedFetcher: &chainMock.ChainService{
FinalizedCheckPoint: s.FinalizedCheckpoint(),
CurrentJustifiedCheckPoint: s.CurrentJustifiedCheckpoint(),
PreviousJustifiedCheckPoint: s.PreviousJustifiedCheckpoint()},
},
BeaconDB: db,
HeadFetcher: &chainMock.ChainService{Block: wsb, State: s},
OptimisticModeFetcher: &chainMock.ChainService{},
FinalizationFetcher: &chainMock.ChainService{
FinalizedCheckPoint: s.FinalizedCheckpoint(),
CurrentJustifiedCheckPoint: s.CurrentJustifiedCheckpoint(),
PreviousJustifiedCheckPoint: s.PreviousJustifiedCheckpoint()},
}
head, err := bs.GetChainHead(context.Background(), nil)

View File

@@ -7,9 +7,12 @@ import (
"strconv"
"github.com/prysmaticlabs/prysm/v5/api/pagination"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/altair"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/epoch/precompute"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
coreTime "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/validators"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/core"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/cmd"
@@ -389,7 +392,7 @@ func (bs *Server) GetValidator(
func (bs *Server) GetValidatorActiveSetChanges(
ctx context.Context, req *ethpb.GetValidatorActiveSetChangesRequest,
) (*ethpb.ActiveSetChanges, error) {
currentEpoch := slots.ToEpoch(bs.CoreService.GenesisTimeFetcher.CurrentSlot())
currentEpoch := slots.ToEpoch(bs.GenesisTimeFetcher.CurrentSlot())
var requestedEpoch primitives.Epoch
switch q := req.QueryFilter.(type) {
@@ -400,12 +403,72 @@ func (bs *Server) GetValidatorActiveSetChanges(
default:
requestedEpoch = currentEpoch
}
as, err := bs.CoreService.ValidatorActiveSetChanges(ctx, requestedEpoch)
if err != nil {
return nil, status.Errorf(core.ErrorReasonToGRPC(err.Reason), "Could not retrieve validator active set changes: %v", err.Err)
if requestedEpoch > currentEpoch {
return nil, status.Errorf(
codes.InvalidArgument,
errEpoch,
currentEpoch,
requestedEpoch,
)
}
return as, nil
s, err := slots.EpochStart(requestedEpoch)
if err != nil {
return nil, err
}
requestedState, err := bs.ReplayerBuilder.ReplayerForSlot(s).ReplayBlocks(ctx)
if err != nil {
return nil, status.Error(codes.Internal, fmt.Sprintf("error replaying blocks for state at slot %d: %v", s, err))
}
activeValidatorCount, err := helpers.ActiveValidatorCount(ctx, requestedState, coreTime.CurrentEpoch(requestedState))
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get active validator count: %v", err)
}
vs := requestedState.Validators()
activatedIndices := validators.ActivatedValidatorIndices(coreTime.CurrentEpoch(requestedState), vs)
exitedIndices, err := validators.ExitedValidatorIndices(coreTime.CurrentEpoch(requestedState), vs, activeValidatorCount)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not determine exited validator indices: %v", err)
}
slashedIndices := validators.SlashedValidatorIndices(coreTime.CurrentEpoch(requestedState), vs)
ejectedIndices, err := validators.EjectedValidatorIndices(coreTime.CurrentEpoch(requestedState), vs, activeValidatorCount)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not determine ejected validator indices: %v", err)
}
// Retrieve public keys for the indices.
activatedKeys := make([][]byte, len(activatedIndices))
exitedKeys := make([][]byte, len(exitedIndices))
slashedKeys := make([][]byte, len(slashedIndices))
ejectedKeys := make([][]byte, len(ejectedIndices))
for i, idx := range activatedIndices {
pubkey := requestedState.PubkeyAtIndex(idx)
activatedKeys[i] = pubkey[:]
}
for i, idx := range exitedIndices {
pubkey := requestedState.PubkeyAtIndex(idx)
exitedKeys[i] = pubkey[:]
}
for i, idx := range slashedIndices {
pubkey := requestedState.PubkeyAtIndex(idx)
slashedKeys[i] = pubkey[:]
}
for i, idx := range ejectedIndices {
pubkey := requestedState.PubkeyAtIndex(idx)
ejectedKeys[i] = pubkey[:]
}
return &ethpb.ActiveSetChanges{
Epoch: requestedEpoch,
ActivatedPublicKeys: activatedKeys,
ActivatedIndices: activatedIndices,
ExitedPublicKeys: exitedKeys,
ExitedIndices: exitedIndices,
SlashedPublicKeys: slashedKeys,
SlashedIndices: slashedIndices,
EjectedPublicKeys: ejectedKeys,
EjectedIndices: ejectedIndices,
}, nil
}
// GetValidatorParticipation retrieves the validator participation information for a given epoch,
@@ -414,7 +477,7 @@ func (bs *Server) GetValidatorActiveSetChanges(
func (bs *Server) GetValidatorParticipation(
ctx context.Context, req *ethpb.GetValidatorParticipationRequest,
) (*ethpb.ValidatorParticipationResponse, error) {
currentSlot := bs.CoreService.GenesisTimeFetcher.CurrentSlot()
currentSlot := bs.GenesisTimeFetcher.CurrentSlot()
currentEpoch := slots.ToEpoch(currentSlot)
var requestedEpoch primitives.Epoch
@@ -426,11 +489,79 @@ func (bs *Server) GetValidatorParticipation(
default:
requestedEpoch = currentEpoch
}
vp, err := bs.CoreService.ValidatorParticipation(ctx, requestedEpoch)
if err != nil {
return nil, status.Errorf(core.ErrorReasonToGRPC(err.Reason), "Could not retrieve validator participation: %v", err.Err)
if requestedEpoch > currentEpoch {
return nil, status.Errorf(
codes.InvalidArgument,
"Cannot retrieve information about an epoch greater than current epoch, current epoch %d, requesting %d",
currentEpoch,
requestedEpoch,
)
}
return vp, nil
// Use the last slot of requested epoch to obtain current and previous epoch attestations.
// This ensures that we don't miss previous attestations when input requested epochs.
endSlot, err := slots.EpochEnd(requestedEpoch)
if err != nil {
return nil, err
}
// Get as close as we can to the end of the current epoch without going past the current slot.
// The above check ensures a future *epoch* isn't requested, but the end slot of the requested epoch could still
// be past the current slot. In that case, use the current slot as the best approximation of the requested epoch.
// Replayer will make sure the slot ultimately used is canonical.
if endSlot > currentSlot {
endSlot = currentSlot
}
// ReplayerBuilder ensures that a canonical chain is followed to the slot
beaconState, err := bs.ReplayerBuilder.ReplayerForSlot(endSlot).ReplayBlocks(ctx)
if err != nil {
return nil, status.Error(codes.Internal, fmt.Sprintf("error replaying blocks for state at slot %d: %v", endSlot, err))
}
var v []*precompute.Validator
var b *precompute.Balance
if beaconState.Version() == version.Phase0 {
v, b, err = precompute.New(ctx, beaconState)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not set up pre compute instance: %v", err)
}
_, b, err = precompute.ProcessAttestations(ctx, beaconState, v, b)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not pre compute attestations: %v", err)
}
} else if beaconState.Version() >= version.Altair {
v, b, err = altair.InitializePrecomputeValidators(ctx, beaconState)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not set up altair pre compute instance: %v", err)
}
_, b, err = altair.ProcessEpochParticipation(ctx, beaconState, b, v)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not pre compute attestations: %v", err)
}
} else {
return nil, status.Errorf(codes.Internal, "Invalid state type retrieved with a version of %d", beaconState.Version())
}
cp := bs.FinalizationFetcher.FinalizedCheckpt()
p := &ethpb.ValidatorParticipationResponse{
Epoch: requestedEpoch,
Finalized: requestedEpoch <= cp.Epoch,
Participation: &ethpb.ValidatorParticipation{
// TODO(7130): Remove these three deprecated fields.
GlobalParticipationRate: float32(b.PrevEpochTargetAttested) / float32(b.ActivePrevEpoch),
VotedEther: b.PrevEpochTargetAttested,
EligibleEther: b.ActivePrevEpoch,
CurrentEpochActiveGwei: b.ActiveCurrentEpoch,
CurrentEpochAttestingGwei: b.CurrentEpochAttested,
CurrentEpochTargetAttestingGwei: b.CurrentEpochTargetAttested,
PreviousEpochActiveGwei: b.ActivePrevEpoch,
PreviousEpochAttestingGwei: b.PrevEpochAttested,
PreviousEpochTargetAttestingGwei: b.PrevEpochTargetAttested,
PreviousEpochHeadAttestingGwei: b.PrevEpochHeadAttested,
},
}
return p, nil
}
// GetValidatorQueue retrieves the current validator queue information.

View File

@@ -54,13 +54,11 @@ func TestServer_GetValidatorActiveSetChanges_CannotRequestFutureEpoch(t *testing
require.NoError(t, err)
require.NoError(t, st.SetSlot(0))
bs := &Server{
CoreService: &core.Service{
BeaconDB: beaconDB,
GenesisTimeFetcher: &mock.ChainService{},
HeadFetcher: &mock.ChainService{
State: st,
},
GenesisTimeFetcher: &mock.ChainService{},
HeadFetcher: &mock.ChainService{
State: st,
},
BeaconDB: beaconDB,
}
wanted := errNoEpochInfoError
@@ -68,7 +66,7 @@ func TestServer_GetValidatorActiveSetChanges_CannotRequestFutureEpoch(t *testing
ctx,
&ethpb.GetValidatorActiveSetChangesRequest{
QueryFilter: &ethpb.GetValidatorActiveSetChangesRequest_Epoch{
Epoch: slots.ToEpoch(bs.CoreService.GenesisTimeFetcher.CurrentSlot()) + 1,
Epoch: slots.ToEpoch(bs.GenesisTimeFetcher.CurrentSlot()) + 1,
},
},
)
@@ -1031,7 +1029,7 @@ func TestServer_ListValidators_FromOldEpoch(t *testing.T) {
ctx := context.Background()
slot := primitives.Slot(0)
epochs := primitives.Epoch(10)
epochs := 10
numVals := uint64(10)
beaconDB := dbTest.SetupDB(t)
@@ -1067,7 +1065,7 @@ func TestServer_ListValidators_FromOldEpoch(t *testing.T) {
}
res, err := bs.ListValidators(context.Background(), req)
require.NoError(t, err)
assert.Equal(t, int(numVals), len(res.ValidatorList))
assert.Equal(t, epochs, len(res.ValidatorList))
vals := st.Validators()
want := make([]*ethpb.Validators_ValidatorContainer, 0)
@@ -1079,7 +1077,7 @@ func TestServer_ListValidators_FromOldEpoch(t *testing.T) {
}
req = &ethpb.ListValidatorsRequest{
QueryFilter: &ethpb.ListValidatorsRequest_Epoch{
Epoch: epochs,
Epoch: 10,
},
}
res, err = bs.ListValidators(context.Background(), req)
@@ -1285,12 +1283,10 @@ func TestServer_GetValidatorActiveSetChanges(t *testing.T) {
require.NoError(t, beaconDB.SaveState(ctx, headState, gRoot))
bs := &Server{
CoreService: &core.Service{
FinalizedFetcher: &mock.ChainService{
FinalizedCheckPoint: &ethpb.Checkpoint{Epoch: 0, Root: make([]byte, fieldparams.RootLength)},
},
GenesisTimeFetcher: &mock.ChainService{},
FinalizationFetcher: &mock.ChainService{
FinalizedCheckPoint: &ethpb.Checkpoint{Epoch: 0, Root: make([]byte, fieldparams.RootLength)},
},
GenesisTimeFetcher: &mock.ChainService{},
}
addDefaultReplayerBuilder(bs, beaconDB)
res, err := bs.GetValidatorActiveSetChanges(ctx, &ethpb.GetValidatorActiveSetChangesRequest{
@@ -1480,25 +1476,27 @@ func TestServer_GetValidatorQueue_PendingExit(t *testing.T) {
}
func TestServer_GetValidatorParticipation_CannotRequestFutureEpoch(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
headState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, headState.SetSlot(0))
bs := &Server{
CoreService: &core.Service{
HeadFetcher: &mock.ChainService{
State: headState,
},
GenesisTimeFetcher: &mock.ChainService{},
BeaconDB: beaconDB,
HeadFetcher: &mock.ChainService{
State: headState,
},
GenesisTimeFetcher: &mock.ChainService{},
StateGen: stategen.New(beaconDB, doublylinkedtree.New()),
}
wanted := "cannot retrieve information about an epoch"
wanted := "Cannot retrieve information about an epoch"
_, err = bs.GetValidatorParticipation(
ctx,
&ethpb.GetValidatorParticipationRequest{
QueryFilter: &ethpb.GetValidatorParticipationRequest_Epoch{
Epoch: slots.ToEpoch(bs.CoreService.GenesisTimeFetcher.CurrentSlot()) + 1,
Epoch: slots.ToEpoch(bs.GenesisTimeFetcher.CurrentSlot()) + 1,
},
},
)
@@ -1551,20 +1549,18 @@ func TestServer_GetValidatorParticipation_CurrentAndPrevEpoch(t *testing.T) {
m := &mock.ChainService{State: headState}
offset := int64(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
bs := &Server{
BeaconDB: beaconDB,
StateGen: stategen.New(beaconDB, doublylinkedtree.New()),
CoreService: &core.Service{
HeadFetcher: m,
GenesisTimeFetcher: &mock.ChainService{
Genesis: prysmTime.Now().Add(time.Duration(-1*offset) * time.Second),
},
FinalizedFetcher: &mock.ChainService{FinalizedCheckPoint: &ethpb.Checkpoint{Epoch: 100}},
BeaconDB: beaconDB,
HeadFetcher: m,
StateGen: stategen.New(beaconDB, doublylinkedtree.New()),
GenesisTimeFetcher: &mock.ChainService{
Genesis: prysmTime.Now().Add(time.Duration(-1*offset) * time.Second),
},
CanonicalFetcher: &mock.ChainService{
CanonicalRoots: map[[32]byte]bool{
bRoot: true,
},
},
FinalizationFetcher: &mock.ChainService{FinalizedCheckPoint: &ethpb.Checkpoint{Epoch: 100}},
}
addDefaultReplayerBuilder(bs, beaconDB)
@@ -1632,20 +1628,18 @@ func TestServer_GetValidatorParticipation_OrphanedUntilGenesis(t *testing.T) {
m := &mock.ChainService{State: headState}
offset := int64(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
bs := &Server{
BeaconDB: beaconDB,
StateGen: stategen.New(beaconDB, doublylinkedtree.New()),
CoreService: &core.Service{
HeadFetcher: m,
GenesisTimeFetcher: &mock.ChainService{
Genesis: prysmTime.Now().Add(time.Duration(-1*offset) * time.Second),
},
FinalizedFetcher: &mock.ChainService{FinalizedCheckPoint: &ethpb.Checkpoint{Epoch: 100}},
BeaconDB: beaconDB,
HeadFetcher: m,
StateGen: stategen.New(beaconDB, doublylinkedtree.New()),
GenesisTimeFetcher: &mock.ChainService{
Genesis: prysmTime.Now().Add(time.Duration(-1*offset) * time.Second),
},
CanonicalFetcher: &mock.ChainService{
CanonicalRoots: map[[32]byte]bool{
bRoot: true,
},
},
FinalizationFetcher: &mock.ChainService{FinalizedCheckPoint: &ethpb.Checkpoint{Epoch: 100}},
}
addDefaultReplayerBuilder(bs, beaconDB)
@@ -1750,15 +1744,13 @@ func runGetValidatorParticipationCurrentAndPrevEpoch(t *testing.T, genState stat
m := &mock.ChainService{State: genState}
offset := int64(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
bs := &Server{
BeaconDB: beaconDB,
CoreService: &core.Service{
GenesisTimeFetcher: &mock.ChainService{
Genesis: prysmTime.Now().Add(time.Duration(-1*offset) * time.Second),
},
FinalizedFetcher: &mock.ChainService{FinalizedCheckPoint: &ethpb.Checkpoint{Epoch: 100}},
},
BeaconDB: beaconDB,
HeadFetcher: m,
StateGen: stategen.New(beaconDB, doublylinkedtree.New()),
GenesisTimeFetcher: &mock.ChainService{
Genesis: prysmTime.Now().Add(time.Duration(-1*offset) * time.Second),
},
FinalizationFetcher: &mock.ChainService{FinalizedCheckPoint: &ethpb.Checkpoint{Epoch: 100}},
}
addDefaultReplayerBuilder(bs, beaconDB)

View File

@@ -128,12 +128,13 @@ func TestNodeServer_GetPeer(t *testing.T) {
}
ethpb.RegisterNodeServer(server, ns)
reflection.Register(server)
firstPeer := peersProvider.Peers().All()[0]
res, err := ns.GetPeer(context.Background(), &ethpb.PeerRequest{PeerId: mockP2p.MockRawPeerId0})
res, err := ns.GetPeer(context.Background(), &ethpb.PeerRequest{PeerId: firstPeer.String()})
require.NoError(t, err)
assert.Equal(t, "16Uiu2HAkyWZ4Ni1TpvDS8dPxsozmHY85KaiFjodQuV6Tz5tkHVeR" /* first peer's raw id */, res.PeerId, "Unexpected peer ID")
assert.Equal(t, firstPeer.String(), res.PeerId, "Unexpected peer ID")
assert.Equal(t, int(ethpb.PeerDirection_INBOUND), int(res.Direction), "Expected 1st peer to be an inbound connection")
assert.Equal(t, int(ethpb.ConnectionState_CONNECTED), int(res.ConnectionState), "Expected peer to be connected")
assert.Equal(t, ethpb.ConnectionState_CONNECTED, res.ConnectionState, "Expected peer to be connected")
}
func TestNodeServer_ListPeers(t *testing.T) {
@@ -148,25 +149,8 @@ func TestNodeServer_ListPeers(t *testing.T) {
res, err := ns.ListPeers(context.Background(), &emptypb.Empty{})
require.NoError(t, err)
assert.Equal(t, 2, len(res.Peers))
var (
firstPeer *ethpb.Peer
secondPeer *ethpb.Peer
)
for _, p := range res.Peers {
if p.PeerId == mockP2p.MockRawPeerId0 {
firstPeer = p
}
if p.PeerId == mockP2p.MockRawPeerId1 {
secondPeer = p
}
}
assert.NotNil(t, firstPeer)
assert.NotNil(t, secondPeer)
assert.Equal(t, int(ethpb.PeerDirection_INBOUND), int(firstPeer.Direction))
assert.Equal(t, int(ethpb.PeerDirection_OUTBOUND), int(secondPeer.Direction))
assert.Equal(t, int(ethpb.PeerDirection_INBOUND), int(res.Peers[0].Direction))
assert.Equal(t, ethpb.PeerDirection_OUTBOUND, res.Peers[1].Direction)
}
func TestNodeServer_GetETH1ConnectionStatus(t *testing.T) {

View File

@@ -66,12 +66,18 @@ func (vs *Server) ProposeAttestationElectra(ctx context.Context, att *ethpb.Atte
ctx, span := trace.StartSpan(ctx, "AttesterServer.ProposeAttestationElectra")
defer span.End()
committeeIndex, err := att.GetCommitteeIndex()
if err != nil {
return nil, err
if att.GetData().CommitteeIndex != 0 {
return nil, status.Errorf(codes.InvalidArgument, "Committee index must be set to 0")
}
committeeIndices := helpers.CommitteeIndices(att.CommitteeBits)
if len(committeeIndices) == 0 {
return nil, status.Errorf(codes.InvalidArgument, "Committee bits has no bit set")
}
if len(committeeIndices) > 1 {
return nil, status.Errorf(codes.InvalidArgument, "Committee bits has more than one bit set")
}
resp, err := vs.proposeAtt(ctx, att, committeeIndex)
resp, err := vs.proposeAtt(ctx, att, committeeIndices[0])
if err != nil {
return nil, err
}

View File

@@ -113,7 +113,7 @@ func TestProposeAttestation(t *testing.T) {
CommitteeBits: cb,
}
_, err = attesterServer.ProposeAttestationElectra(context.Background(), req)
assert.ErrorContains(t, "attestation data's committee index must be 0 but was 1", err)
assert.ErrorContains(t, "Committee index must be set to 0", err)
})
t.Run("Electra - no committee bit set", func(t *testing.T) {
state, err := util.NewBeaconStateElectra()
@@ -131,7 +131,7 @@ func TestProposeAttestation(t *testing.T) {
CommitteeBits: primitives.NewAttestationCommitteeBits(),
}
_, err = attesterServer.ProposeAttestationElectra(context.Background(), req)
assert.ErrorContains(t, "exactly 1 committee index must be set but 0 were set", err)
assert.ErrorContains(t, "Committee bits has no bit set", err)
})
t.Run("Electra - multiple committee bits set", func(t *testing.T) {
state, err := util.NewBeaconStateElectra()
@@ -152,7 +152,7 @@ func TestProposeAttestation(t *testing.T) {
CommitteeBits: cb,
}
_, err = attesterServer.ProposeAttestationElectra(context.Background(), req)
assert.ErrorContains(t, "exactly 1 committee index must be set but 2 were set", err)
assert.ErrorContains(t, "Committee bits has more than one bit set", err)
})
}

View File

@@ -277,8 +277,8 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
var sidecars []*ethpb.BlobSidecar
if block.IsBlinded() {
block, sidecars, err = vs.handleBlindedBlock(ctx, block)
} else if block.Version() >= version.Deneb {
sidecars, err = vs.blobSidecarsFromUnblindedBlock(block, req)
} else {
sidecars, err = vs.handleUnblindedBlock(block, req)
}
if err != nil {
return nil, status.Errorf(codes.Internal, "%s: %v", "handle block failed", err)
@@ -345,12 +345,13 @@ func (vs *Server) handleBlindedBlock(ctx context.Context, block interfaces.Signe
return copiedBlock, sidecars, nil
}
func (vs *Server) blobSidecarsFromUnblindedBlock(block interfaces.SignedBeaconBlock, req *ethpb.GenericSignedBeaconBlock) ([]*ethpb.BlobSidecar, error) {
rawBlobs, proofs, err := blobsAndProofs(req)
if err != nil {
return nil, err
// handleUnblindedBlock processes unblinded beacon blocks.
func (vs *Server) handleUnblindedBlock(block interfaces.SignedBeaconBlock, req *ethpb.GenericSignedBeaconBlock) ([]*ethpb.BlobSidecar, error) {
dbBlockContents := req.GetDeneb()
if dbBlockContents == nil {
return nil, nil
}
return BuildBlobSidecars(block, rawBlobs, proofs)
return BuildBlobSidecars(block, dbBlockContents.Blobs, dbBlockContents.KzgProofs)
}
// broadcastReceiveBlock broadcasts a block and handles its reception.
@@ -501,16 +502,3 @@ func (vs *Server) SubmitValidatorRegistrations(ctx context.Context, reg *ethpb.S
return &emptypb.Empty{}, nil
}
func blobsAndProofs(req *ethpb.GenericSignedBeaconBlock) ([][]byte, [][]byte, error) {
switch {
case req.GetDeneb() != nil:
dbBlockContents := req.GetDeneb()
return dbBlockContents.Blobs, dbBlockContents.KzgProofs, nil
case req.GetElectra() != nil:
dbBlockContents := req.GetElectra()
return dbBlockContents.Blobs, dbBlockContents.KzgProofs, nil
default:
return nil, nil, errors.Errorf("unknown request type provided: %T", req)
}
}

View File

@@ -11,7 +11,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/config/features"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
@@ -113,12 +112,18 @@ func (vs *Server) packAttestations(ctx context.Context, latestState state.Beacon
if err != nil {
return nil, err
}
sorted, err := deduped.sort()
sorted, err := deduped.sortByProfitability()
if err != nil {
return nil, err
}
atts = sorted.limitToMaxAttestations()
return vs.filterAttestationBySignature(ctx, atts, latestState)
atts, err = vs.filterAttestationBySignature(ctx, atts, latestState)
if err != nil {
return nil, err
}
return atts, nil
}
// filter separates attestation list into two groups: valid and invalid attestations.
@@ -138,6 +143,14 @@ func (a proposerAtts) filter(ctx context.Context, st state.BeaconState) (propose
return validAtts, invalidAtts
}
// sortByProfitability orders attestations by highest slot and by highest aggregation bit count.
func (a proposerAtts) sortByProfitability() (proposerAtts, error) {
if len(a) < 2 {
return a, nil
}
return a.sortByProfitabilityUsingMaxCover()
}
// sortByProfitabilityUsingMaxCover orders attestations by highest slot and by highest aggregation bit count.
// Duplicate bits are counted only once, using max-cover algorithm.
func (a proposerAtts) sortByProfitabilityUsingMaxCover() (proposerAtts, error) {
@@ -205,143 +218,6 @@ func (a proposerAtts) sortByProfitabilityUsingMaxCover() (proposerAtts, error) {
return sortedAtts, nil
}
// sort attestations as follows:
//
// - all attestations selected by max-cover are taken, leftover attestations are discarded
// (with current parameters all bits of a leftover attestation are already covered by selected attestations)
// - selected attestations are ordered by slot, with higher slot coming first
// - within a slot, all top attestations (one per committee) are ordered before any second-best attestations, second-best before third-best etc.
// - within top/second-best/etc. attestations (one per committee), attestations are ordered by bit count, with higher bit count coming first
func (a proposerAtts) sort() (proposerAtts, error) {
if len(a) < 2 {
return a, nil
}
if features.Get().EnableCommitteeAwarePacking {
return a.sortBySlotAndCommittee()
}
return a.sortByProfitabilityUsingMaxCover()
}
// Separate attestations by slot, as slot number takes higher precedence when sorting.
// Also separate by committee index because maxcover will prefer attestations for the same
// committee with disjoint bits over attestations for different committees with overlapping
// bits, even though same bits for different committees are separate votes.
func (a proposerAtts) sortBySlotAndCommittee() (proposerAtts, error) {
type slotAtts struct {
candidates map[primitives.CommitteeIndex]proposerAtts
selected map[primitives.CommitteeIndex]proposerAtts
leftover map[primitives.CommitteeIndex]proposerAtts
}
var slots []primitives.Slot
attsBySlot := map[primitives.Slot]*slotAtts{}
for _, att := range a {
slot := att.GetData().Slot
ci := att.GetData().CommitteeIndex
if _, ok := attsBySlot[slot]; !ok {
attsBySlot[slot] = &slotAtts{}
attsBySlot[slot].candidates = make(map[primitives.CommitteeIndex]proposerAtts)
slots = append(slots, slot)
}
attsBySlot[slot].candidates[ci] = append(attsBySlot[slot].candidates[ci], att)
}
var err error
for _, sa := range attsBySlot {
sa.selected = make(map[primitives.CommitteeIndex]proposerAtts)
sa.leftover = make(map[primitives.CommitteeIndex]proposerAtts)
for ci, committeeAtts := range sa.candidates {
sa.selected[ci], err = committeeAtts.sortByProfitabilityUsingMaxCover_committeeAwarePacking()
if err != nil {
return nil, err
}
}
}
var sortedAtts proposerAtts
sort.Slice(slots, func(i, j int) bool {
return slots[i] > slots[j]
})
for _, slot := range slots {
sortedAtts = append(sortedAtts, sortSlotAttestations(attsBySlot[slot].selected)...)
}
for _, slot := range slots {
sortedAtts = append(sortedAtts, sortSlotAttestations(attsBySlot[slot].leftover)...)
}
return sortedAtts, nil
}
// sortByProfitabilityUsingMaxCover orders attestations by highest aggregation bit count.
// Duplicate bits are counted only once, using max-cover algorithm.
func (a proposerAtts) sortByProfitabilityUsingMaxCover_committeeAwarePacking() (proposerAtts, error) {
if len(a) < 2 {
return a, nil
}
candidates := make([]*bitfield.Bitlist64, len(a))
for i := 0; i < len(a); i++ {
var err error
candidates[i], err = a[i].GetAggregationBits().ToBitlist64()
if err != nil {
return nil, err
}
}
// Add selected candidates on top, those that are not selected - append at bottom.
selectedKeys, _, err := aggregation.MaxCover(candidates, len(candidates), true /* allowOverlaps */)
if err != nil {
log.WithError(err).Debug("MaxCover aggregation failed")
return a, nil
}
// Pick selected attestations first, leftover attestations will be appended at the end.
// Both lists will be sorted by number of bits set.
selected := make(proposerAtts, selectedKeys.Count())
for i, key := range selectedKeys.BitIndices() {
selected[i] = a[key]
}
sort.Slice(selected, func(i, j int) bool {
return selected[i].GetAggregationBits().Count() > selected[j].GetAggregationBits().Count()
})
return selected, nil
}
// sortSlotAttestations assumes each proposerAtts value in the map is ordered by profitability.
// The function takes the first attestation from each value, orders these attestations by bit count
// and places them at the start of the resulting slice. It then takes the second attestation for each value,
// orders these attestations by bit count and appends them to the end.
// It continues this pattern until all attestations are processed.
func sortSlotAttestations(slotAtts map[primitives.CommitteeIndex]proposerAtts) proposerAtts {
attCount := 0
for _, committeeAtts := range slotAtts {
attCount += len(committeeAtts)
}
sorted := make([]ethpb.Att, 0, attCount)
processedCount := 0
index := 0
for processedCount < attCount {
var atts []ethpb.Att
for _, committeeAtts := range slotAtts {
if len(committeeAtts) > index {
atts = append(atts, committeeAtts[index])
}
}
sort.Slice(atts, func(i, j int) bool {
return atts[i].GetAggregationBits().Count() > atts[j].GetAggregationBits().Count()
})
sorted = append(sorted, atts...)
processedCount += len(atts)
index++
}
return sorted
}
// limitToMaxAttestations limits attestations to maximum attestations per block.
func (a proposerAtts) limitToMaxAttestations() proposerAtts {
if len(a) == 0 {

View File

@@ -9,7 +9,6 @@ import (
"github.com/prysmaticlabs/go-bitfield"
chainMock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/v5/config/features"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/crypto/bls/blst"
@@ -20,7 +19,31 @@ import (
"github.com/prysmaticlabs/prysm/v5/time/slots"
)
func TestProposer_ProposerAtts_sort(t *testing.T) {
func TestProposer_ProposerAtts_sortByProfitability(t *testing.T) {
atts := proposerAtts([]ethpb.Att{
util.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Slot: 4}, AggregationBits: bitfield.Bitlist{0b11100000}}),
util.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b11000000}}),
util.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b11100000}}),
util.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Slot: 4}, AggregationBits: bitfield.Bitlist{0b11110000}}),
util.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b11100000}}),
util.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Slot: 3}, AggregationBits: bitfield.Bitlist{0b11000000}}),
})
want := proposerAtts([]ethpb.Att{
util.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Slot: 4}, AggregationBits: bitfield.Bitlist{0b11110000}}),
util.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Slot: 4}, AggregationBits: bitfield.Bitlist{0b11100000}}),
util.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Slot: 3}, AggregationBits: bitfield.Bitlist{0b11000000}}),
util.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b11100000}}),
util.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b11100000}}),
util.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b11000000}}),
})
atts, err := atts.sortByProfitability()
if err != nil {
t.Error(err)
}
require.DeepEqual(t, want, atts)
}
func TestProposer_ProposerAtts_sortByProfitabilityUsingMaxCover(t *testing.T) {
type testData struct {
slot primitives.Slot
bits bitfield.Bitlist
@@ -37,7 +60,7 @@ func TestProposer_ProposerAtts_sort(t *testing.T) {
t.Run("no atts", func(t *testing.T) {
atts := getAtts([]testData{})
want := getAtts([]testData{})
atts, err := atts.sort()
atts, err := atts.sortByProfitability()
if err != nil {
t.Error(err)
}
@@ -51,7 +74,7 @@ func TestProposer_ProposerAtts_sort(t *testing.T) {
want := getAtts([]testData{
{4, bitfield.Bitlist{0b11100000, 0b1}},
})
atts, err := atts.sort()
atts, err := atts.sortByProfitability()
if err != nil {
t.Error(err)
}
@@ -67,7 +90,7 @@ func TestProposer_ProposerAtts_sort(t *testing.T) {
{4, bitfield.Bitlist{0b11100000, 0b1}},
{1, bitfield.Bitlist{0b11000000, 0b1}},
})
atts, err := atts.sort()
atts, err := atts.sortByProfitability()
if err != nil {
t.Error(err)
}
@@ -85,7 +108,7 @@ func TestProposer_ProposerAtts_sort(t *testing.T) {
{4, bitfield.Bitlist{0b11100000, 0b1}},
{1, bitfield.Bitlist{0b11000000, 0b1}},
})
atts, err := atts.sort()
atts, err := atts.sortByProfitability()
if err != nil {
t.Error(err)
}
@@ -106,7 +129,7 @@ func TestProposer_ProposerAtts_sort(t *testing.T) {
{1, bitfield.Bitlist{0b00001100, 0b1}},
{1, bitfield.Bitlist{0b11001000, 0b1}},
})
atts, err := atts.sort()
atts, err := atts.sortByProfitability()
if err != nil {
t.Error(err)
}
@@ -131,14 +154,14 @@ func TestProposer_ProposerAtts_sort(t *testing.T) {
{1, bitfield.Bitlist{0b11100000, 0b1}},
{1, bitfield.Bitlist{0b11000000, 0b1}},
})
atts, err := atts.sort()
atts, err := atts.sortByProfitability()
if err != nil {
t.Error(err)
}
require.DeepEqual(t, want, atts)
})
t.Run("follows max-cover", func(t *testing.T) {
t.Run("selected and non selected atts sorted by bit count", func(t *testing.T) {
// Items at slot 4, must be first split into two lists by max-cover, with
// 0b10000011 scoring higher (as it provides more info in addition to already selected
// attestations) than 0b11100001 (despite naive bit count suggesting otherwise). Then,
@@ -163,7 +186,7 @@ func TestProposer_ProposerAtts_sort(t *testing.T) {
{1, bitfield.Bitlist{0b11100000, 0b1}},
{1, bitfield.Bitlist{0b11000000, 0b1}},
})
atts, err := atts.sort()
atts, err := atts.sortByProfitability()
if err != nil {
t.Error(err)
}
@@ -171,241 +194,6 @@ func TestProposer_ProposerAtts_sort(t *testing.T) {
})
}
func TestProposer_ProposerAtts_committeeAwareSort(t *testing.T) {
type testData struct {
slot primitives.Slot
bits bitfield.Bitlist
}
getAtts := func(data []testData) proposerAtts {
var atts proposerAtts
for _, att := range data {
atts = append(atts, util.HydrateAttestation(&ethpb.Attestation{
Data: &ethpb.AttestationData{Slot: att.slot}, AggregationBits: att.bits}))
}
return atts
}
t.Run("no atts", func(t *testing.T) {
feat := features.Get()
feat.EnableCommitteeAwarePacking = true
reset := features.InitWithReset(feat)
defer reset()
atts := getAtts([]testData{})
want := getAtts([]testData{})
atts, err := atts.sort()
if err != nil {
t.Error(err)
}
require.DeepEqual(t, want, atts)
})
t.Run("single att", func(t *testing.T) {
feat := features.Get()
feat.EnableCommitteeAwarePacking = true
reset := features.InitWithReset(feat)
defer reset()
atts := getAtts([]testData{
{4, bitfield.Bitlist{0b11100000, 0b1}},
})
want := getAtts([]testData{
{4, bitfield.Bitlist{0b11100000, 0b1}},
})
atts, err := atts.sort()
if err != nil {
t.Error(err)
}
require.DeepEqual(t, want, atts)
})
t.Run("single att per slot", func(t *testing.T) {
feat := features.Get()
feat.EnableCommitteeAwarePacking = true
reset := features.InitWithReset(feat)
defer reset()
atts := getAtts([]testData{
{1, bitfield.Bitlist{0b11000000, 0b1}},
{4, bitfield.Bitlist{0b11100000, 0b1}},
})
want := getAtts([]testData{
{4, bitfield.Bitlist{0b11100000, 0b1}},
{1, bitfield.Bitlist{0b11000000, 0b1}},
})
atts, err := atts.sort()
if err != nil {
t.Error(err)
}
require.DeepEqual(t, want, atts)
})
t.Run("two atts on one of the slots", func(t *testing.T) {
feat := features.Get()
feat.EnableCommitteeAwarePacking = true
reset := features.InitWithReset(feat)
defer reset()
atts := getAtts([]testData{
{1, bitfield.Bitlist{0b11000000, 0b1}},
{4, bitfield.Bitlist{0b11100000, 0b1}},
{4, bitfield.Bitlist{0b11110000, 0b1}},
})
want := getAtts([]testData{
{4, bitfield.Bitlist{0b11110000, 0b1}},
{1, bitfield.Bitlist{0b11000000, 0b1}},
})
atts, err := atts.sort()
if err != nil {
t.Error(err)
}
require.DeepEqual(t, want, atts)
})
t.Run("compare to native sort", func(t *testing.T) {
feat := features.Get()
feat.EnableCommitteeAwarePacking = true
reset := features.InitWithReset(feat)
defer reset()
// The max-cover based approach will select 0b00001100 instead, despite lower bit count
// (since it has two new/unknown bits).
t.Run("max-cover", func(t *testing.T) {
atts := getAtts([]testData{
{1, bitfield.Bitlist{0b11000011, 0b1}},
{1, bitfield.Bitlist{0b11001000, 0b1}},
{1, bitfield.Bitlist{0b00001100, 0b1}},
})
want := getAtts([]testData{
{1, bitfield.Bitlist{0b11000011, 0b1}},
{1, bitfield.Bitlist{0b00001100, 0b1}},
})
atts, err := atts.sort()
if err != nil {
t.Error(err)
}
require.DeepEqual(t, want, atts)
})
})
t.Run("multiple slots", func(t *testing.T) {
feat := features.Get()
feat.EnableCommitteeAwarePacking = true
reset := features.InitWithReset(feat)
defer reset()
atts := getAtts([]testData{
{2, bitfield.Bitlist{0b11100000, 0b1}},
{4, bitfield.Bitlist{0b11100000, 0b1}},
{1, bitfield.Bitlist{0b11000000, 0b1}},
{4, bitfield.Bitlist{0b11110000, 0b1}},
{1, bitfield.Bitlist{0b11100000, 0b1}},
{3, bitfield.Bitlist{0b11000000, 0b1}},
})
want := getAtts([]testData{
{4, bitfield.Bitlist{0b11110000, 0b1}},
{3, bitfield.Bitlist{0b11000000, 0b1}},
{2, bitfield.Bitlist{0b11100000, 0b1}},
{1, bitfield.Bitlist{0b11100000, 0b1}},
})
atts, err := atts.sort()
if err != nil {
t.Error(err)
}
require.DeepEqual(t, want, atts)
})
t.Run("follows max-cover", func(t *testing.T) {
feat := features.Get()
feat.EnableCommitteeAwarePacking = true
reset := features.InitWithReset(feat)
defer reset()
// Items at slot 4 must be first split into two lists by max-cover, with
// 0b10000011 being selected and 0b11100001 being leftover (despite naive bit count suggesting otherwise).
atts := getAtts([]testData{
{4, bitfield.Bitlist{0b00000001, 0b1}},
{4, bitfield.Bitlist{0b11100001, 0b1}},
{1, bitfield.Bitlist{0b11000000, 0b1}},
{2, bitfield.Bitlist{0b11100000, 0b1}},
{4, bitfield.Bitlist{0b10000011, 0b1}},
{4, bitfield.Bitlist{0b11111000, 0b1}},
{1, bitfield.Bitlist{0b11100000, 0b1}},
{3, bitfield.Bitlist{0b11000000, 0b1}},
})
want := getAtts([]testData{
{4, bitfield.Bitlist{0b11111000, 0b1}},
{4, bitfield.Bitlist{0b10000011, 0b1}},
{3, bitfield.Bitlist{0b11000000, 0b1}},
{2, bitfield.Bitlist{0b11100000, 0b1}},
{1, bitfield.Bitlist{0b11100000, 0b1}},
})
atts, err := atts.sort()
if err != nil {
t.Error(err)
}
require.DeepEqual(t, want, atts)
})
}
func TestProposer_sort_DifferentCommittees(t *testing.T) {
t.Run("one att per committee", func(t *testing.T) {
feat := features.Get()
feat.EnableCommitteeAwarePacking = true
reset := features.InitWithReset(feat)
defer reset()
c1_a1 := util.HydrateAttestation(&ethpb.Attestation{AggregationBits: bitfield.Bitlist{0b11111000, 0b1}, Data: &ethpb.AttestationData{CommitteeIndex: 1}})
c2_a1 := util.HydrateAttestation(&ethpb.Attestation{AggregationBits: bitfield.Bitlist{0b11100000, 0b1}, Data: &ethpb.AttestationData{CommitteeIndex: 2}})
atts := proposerAtts{c1_a1, c2_a1}
atts, err := atts.sort()
require.NoError(t, err)
want := proposerAtts{c1_a1, c2_a1}
assert.DeepEqual(t, want, atts)
})
t.Run("multiple atts per committee", func(t *testing.T) {
feat := features.Get()
feat.EnableCommitteeAwarePacking = true
reset := features.InitWithReset(feat)
defer reset()
c1_a1 := util.HydrateAttestation(&ethpb.Attestation{AggregationBits: bitfield.Bitlist{0b11111100, 0b1}, Data: &ethpb.AttestationData{CommitteeIndex: 1}})
c1_a2 := util.HydrateAttestation(&ethpb.Attestation{AggregationBits: bitfield.Bitlist{0b10000010, 0b1}, Data: &ethpb.AttestationData{CommitteeIndex: 1}})
c2_a1 := util.HydrateAttestation(&ethpb.Attestation{AggregationBits: bitfield.Bitlist{0b11110000, 0b1}, Data: &ethpb.AttestationData{CommitteeIndex: 2}})
c2_a2 := util.HydrateAttestation(&ethpb.Attestation{AggregationBits: bitfield.Bitlist{0b11100000, 0b1}, Data: &ethpb.AttestationData{CommitteeIndex: 2}})
atts := proposerAtts{c1_a1, c1_a2, c2_a1, c2_a2}
atts, err := atts.sort()
require.NoError(t, err)
want := proposerAtts{c1_a1, c2_a1, c1_a2}
assert.DeepEqual(t, want, atts)
})
t.Run("multiple atts per committee, multiple slots", func(t *testing.T) {
feat := features.Get()
feat.EnableCommitteeAwarePacking = true
reset := features.InitWithReset(feat)
defer reset()
s2_c1_a1 := util.HydrateAttestation(&ethpb.Attestation{AggregationBits: bitfield.Bitlist{0b11111100, 0b1}, Data: &ethpb.AttestationData{Slot: 2, CommitteeIndex: 1}})
s2_c1_a2 := util.HydrateAttestation(&ethpb.Attestation{AggregationBits: bitfield.Bitlist{0b10000010, 0b1}, Data: &ethpb.AttestationData{Slot: 2, CommitteeIndex: 1}})
s2_c2_a1 := util.HydrateAttestation(&ethpb.Attestation{AggregationBits: bitfield.Bitlist{0b11110000, 0b1}, Data: &ethpb.AttestationData{Slot: 2, CommitteeIndex: 2}})
s2_c2_a2 := util.HydrateAttestation(&ethpb.Attestation{AggregationBits: bitfield.Bitlist{0b11000000, 0b1}, Data: &ethpb.AttestationData{Slot: 2, CommitteeIndex: 2}})
s1_c1_a1 := util.HydrateAttestation(&ethpb.Attestation{AggregationBits: bitfield.Bitlist{0b11111100, 0b1}, Data: &ethpb.AttestationData{Slot: 1, CommitteeIndex: 1}})
s1_c1_a2 := util.HydrateAttestation(&ethpb.Attestation{AggregationBits: bitfield.Bitlist{0b10000010, 0b1}, Data: &ethpb.AttestationData{Slot: 1, CommitteeIndex: 1}})
s1_c2_a1 := util.HydrateAttestation(&ethpb.Attestation{AggregationBits: bitfield.Bitlist{0b11110000, 0b1}, Data: &ethpb.AttestationData{Slot: 1, CommitteeIndex: 2}})
s1_c2_a2 := util.HydrateAttestation(&ethpb.Attestation{AggregationBits: bitfield.Bitlist{0b11000000, 0b1}, Data: &ethpb.AttestationData{Slot: 1, CommitteeIndex: 2}})
// Arrange in some random order
atts := proposerAtts{s1_c1_a1, s2_c1_a2, s1_c2_a2, s2_c2_a2, s1_c2_a1, s2_c2_a1, s1_c1_a2, s2_c1_a1}
atts, err := atts.sort()
require.NoError(t, err)
want := proposerAtts{s2_c1_a1, s2_c2_a1, s2_c1_a2, s1_c1_a1, s1_c2_a1, s1_c1_a2}
assert.DeepEqual(t, want, atts)
})
}
func TestProposer_ProposerAtts_dedup(t *testing.T) {
data1 := util.HydrateAttestationData(&ethpb.AttestationData{
Slot: 4,

View File

@@ -96,27 +96,6 @@ func setExecutionData(ctx context.Context, blk interfaces.SignedBeaconBlock, loc
// Compare payload values between local and builder. Default to the local value if it is higher.
localValueGwei := primitives.WeiToGwei(local.Bid)
builderValueGwei := primitives.WeiToGwei(bid.Value())
minBid := primitives.Gwei(params.BeaconConfig().MinBuilderBid)
// Use local block if min bid is not attained
if builderValueGwei < minBid {
log.WithFields(logrus.Fields{
"minBuilderBid": minBid,
"builderGweiValue": builderValueGwei,
}).Warn("Proposer: using local execution payload because min bid not attained")
return local.Bid, local.BlobsBundle, setLocalExecution(blk, local)
}
// Use local block if min difference is not attained
minDiff := localValueGwei + primitives.Gwei(params.BeaconConfig().MinBuilderDiff)
if builderValueGwei < minDiff {
log.WithFields(logrus.Fields{
"localGweiValue": localValueGwei,
"minBidDiff": minDiff,
"builderGweiValue": builderValueGwei,
}).Warn("Proposer: using local execution payload because min difference with local value was not attained")
return local.Bid, local.BlobsBundle, setLocalExecution(blk, local)
}
// Use builder payload if the following in true:
// builder_bid_value * builderBoostFactor(default 100) > local_block_value * (local-block-value-boost + 100)
boost := primitives.Gwei(params.BeaconConfig().LocalBlockValueBoost)
@@ -242,15 +221,6 @@ func (vs *Server) getPayloadHeaderFromBuilder(ctx context.Context, slot primitiv
return nil, fmt.Errorf("incorrect parent hash %#x != %#x", header.ParentHash(), h.BlockHash())
}
reg, err := vs.BlockBuilder.RegistrationByValidatorID(ctx, idx)
if err != nil {
log.WithError(err).Warn("Proposer: failed to get registration by validator ID, could not check gas limit")
} else {
if reg.GasLimit != header.GasLimit() {
return nil, fmt.Errorf("incorrect header gas limit %d != %d", reg.GasLimit, header.GasLimit())
}
}
t, err := slots.ToTime(uint64(vs.TimeFetcher.GenesisTime().Unix()), slot)
if err != nil {
return nil, err

View File

@@ -420,41 +420,7 @@ func TestServer_setExecutionData(t *testing.T) {
require.NoError(t, err)
require.Equal(t, uint64(3), e.BlockNumber()) // Local block
require.LogsContain(t, hook, "\"Proposer: using local execution payload because min difference with local value was not attained\" builderGweiValue=1 localGweiValue=2")
})
t.Run("Builder configured. Builder block does not achieve min bid", func(t *testing.T) {
cfg := params.BeaconConfig().Copy()
cfg.MinBuilderBid = 5
params.OverrideBeaconConfig(cfg)
blk, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlockCapella())
require.NoError(t, err)
elBid := primitives.Uint64ToWei(2 * 1e9)
ed, err := blocks.NewWrappedExecutionData(&v1.ExecutionPayloadCapella{BlockNumber: 3})
require.NoError(t, err)
vs.ExecutionEngineCaller = &powtesting.EngineClient{PayloadIDBytes: id, GetPayloadResponse: &blocks.GetPayloadResponse{ExecutionData: ed, Bid: elBid}}
b := blk.Block()
res, err := vs.getLocalPayload(ctx, b, capellaTransitionState)
require.NoError(t, err)
builderBid, err := vs.getBuilderPayloadAndBlobs(ctx, b.Slot(), b.ProposerIndex())
require.NoError(t, err)
_, err = builderBid.Header()
require.NoError(t, err)
builderKzgCommitments, err := builderBid.BlobKzgCommitments()
if builderBid.Version() >= version.Deneb {
require.NoError(t, err)
}
require.DeepEqual(t, [][]uint8{}, builderKzgCommitments)
_, bundle, err := setExecutionData(context.Background(), blk, res, builderBid, defaultBuilderBoostFactor)
require.NoError(t, err)
require.IsNil(t, bundle)
e, err := blk.Block().Body().Execution()
require.NoError(t, err)
require.Equal(t, uint64(3), e.BlockNumber()) // Local block
require.LogsContain(t, hook, "\"Proposer: using local execution payload because min bid not attained\" builderGweiValue=1 minBuilderBid=5")
cfg.MinBuilderBid = 0
params.OverrideBeaconConfig(cfg)
require.LogsContain(t, hook, "builderGweiValue=1 localBoostPercentage=0 localGweiValue=2")
})
t.Run("Builder configured. Local block and local boost has higher value", func(t *testing.T) {
cfg := params.BeaconConfig().Copy()
@@ -720,29 +686,6 @@ func TestServer_getPayloadHeader(t *testing.T) {
Signature: sk.Sign(srCapella[:]).Marshal(),
}
incorrectGasLimitBid := &ethpb.BuilderBid{
Header: &v1.ExecutionPayloadHeader{
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
StateRoot: make([]byte, fieldparams.RootLength),
ReceiptsRoot: make([]byte, fieldparams.RootLength),
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
PrevRandao: make([]byte, fieldparams.RootLength),
BaseFeePerGas: make([]byte, fieldparams.RootLength),
BlockHash: make([]byte, fieldparams.RootLength),
TransactionsRoot: bytesutil.PadTo([]byte{1}, fieldparams.RootLength),
ParentHash: params.BeaconConfig().ZeroHash[:],
Timestamp: uint64(tiCapella.Unix()),
GasLimit: 100,
},
Pubkey: sk.PublicKey().Marshal(),
Value: bytesutil.PadTo([]byte{1, 2, 3}, 32),
}
signedIncorrectGasLimitBid :=
&ethpb.SignedBuilderBid{
Message: incorrectGasLimitBid,
Signature: sk.Sign(srCapella[:]).Marshal(),
}
require.NoError(t, err)
tests := []struct {
name string
@@ -855,21 +798,6 @@ func TestServer_getPayloadHeader(t *testing.T) {
},
err: "is different from head block version",
},
{
name: "incorrect gas limit",
mock: &builderTest.MockBuilderService{
Bid: signedIncorrectGasLimitBid,
},
fetcher: &blockchainTest.ChainService{
Block: func() interfaces.ReadOnlySignedBeaconBlock {
wb, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlockBellatrix())
require.NoError(t, err)
wb.SetSlot(primitives.Slot(params.BeaconConfig().BellatrixForkEpoch) * params.BeaconConfig().SlotsPerEpoch)
return wb
}(),
},
err: "incorrect header gas limit 0 != 100",
},
{
name: "different bid version during hard fork",
mock: &builderTest.MockBuilderService{
@@ -888,18 +816,9 @@ func TestServer_getPayloadHeader(t *testing.T) {
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
vs := &Server{BeaconDB: dbTest.SetupDB(t), BlockBuilder: tc.mock, HeadFetcher: tc.fetcher, TimeFetcher: &blockchainTest.ChainService{
vs := &Server{BlockBuilder: tc.mock, HeadFetcher: tc.fetcher, TimeFetcher: &blockchainTest.ChainService{
Genesis: genesis,
}}
regCache := cache.NewRegistrationCache()
regCache.UpdateIndexToRegisteredMap(context.Background(), map[primitives.ValidatorIndex]*ethpb.ValidatorRegistrationV1{
0: {
GasLimit: 0,
FeeRecipient: make([]byte, 20),
Pubkey: make([]byte, 48),
},
})
tc.mock.RegistrationCache = regCache
hb, err := vs.HeadFetcher.HeadBlock(context.Background())
require.NoError(t, err)
bid, err := vs.getPayloadHeaderFromBuilder(context.Background(), hb.Block().Slot(), 0)

View File

@@ -59,26 +59,14 @@ func (vs *Server) getLocalPayload(ctx context.Context, blk interfaces.ReadOnlyBe
slot := blk.Slot()
vIdx := blk.ProposerIndex()
headRoot := blk.ParentRoot()
return vs.getLocalPayloadFromEngine(ctx, st, headRoot, slot, vIdx)
}
// This returns the local execution payload of a slot, proposer ID, and parent root assuming payload Is cached.
// If the payload ID is not cached, the function will prepare a new payload through local EL engine and return it by using the head state.
func (vs *Server) getLocalPayloadFromEngine(
ctx context.Context,
st state.BeaconState,
parentRoot [32]byte,
slot primitives.Slot,
proposerId primitives.ValidatorIndex) (*consensusblocks.GetPayloadResponse, error) {
logFields := logrus.Fields{
"validatorIndex": proposerId,
"validatorIndex": vIdx,
"slot": slot,
"headRoot": fmt.Sprintf("%#x", parentRoot),
"headRoot": fmt.Sprintf("%#x", headRoot),
}
payloadId, ok := vs.PayloadIDCache.PayloadID(slot, parentRoot)
payloadId, ok := vs.PayloadIDCache.PayloadID(slot, headRoot)
val, tracked := vs.TrackedValidatorsCache.Validator(proposerId)
val, tracked := vs.TrackedValidatorsCache.Validator(vIdx)
if !tracked {
logrus.WithFields(logFields).Warn("could not find tracked proposer index")
}
@@ -147,7 +135,7 @@ func (vs *Server) getLocalPayloadFromEngine(
PrevRandao: random,
SuggestedFeeRecipient: val.FeeRecipient[:],
Withdrawals: withdrawals,
ParentBeaconBlockRoot: parentRoot[:],
ParentBeaconBlockRoot: headRoot[:],
})
if err != nil {
return nil, err

View File

@@ -879,6 +879,33 @@ func TestProposer_ProposeBlock_OK(t *testing.T) {
},
err: "blob KZG commitments don't match number of blobs or KZG proofs",
},
{
name: "electra block no blob",
block: func(parent [32]byte) *ethpb.GenericSignedBeaconBlock {
sb := &ethpb.SignedBeaconBlockContentsElectra{
Block: &ethpb.SignedBeaconBlockElectra{
Block: &ethpb.BeaconBlockElectra{Slot: 5, ParentRoot: parent[:], Body: util.HydrateBeaconBlockBodyElectra(&ethpb.BeaconBlockBodyElectra{})},
},
}
blk := &ethpb.GenericSignedBeaconBlock_Electra{Electra: sb}
return &ethpb.GenericSignedBeaconBlock{Block: blk, IsBlinded: false}
},
},
{
name: "electra block some blob",
block: func(parent [32]byte) *ethpb.GenericSignedBeaconBlock {
sb := &ethpb.SignedBeaconBlockContentsElectra{
Block: &ethpb.SignedBeaconBlockElectra{
Block: &ethpb.BeaconBlockElectra{Slot: 5, ParentRoot: parent[:], Body: util.HydrateBeaconBlockBodyElectra(&ethpb.BeaconBlockBodyElectra{})},
},
KzgProofs: [][]byte{{0x01}, {0x02}, {0x03}},
Blobs: [][]byte{{0x01}, {0x02}, {0x03}},
}
blk := &ethpb.GenericSignedBeaconBlock_Electra{Electra: sb}
return &ethpb.GenericSignedBeaconBlock{Block: blk, IsBlinded: false}
},
},
{
name: "blind deneb block some blobs",
block: func(parent [32]byte) *ethpb.GenericSignedBeaconBlock {
@@ -916,57 +943,6 @@ func TestProposer_ProposeBlock_OK(t *testing.T) {
useBuilder: true,
err: "unblind sidecars failed: commitment value doesn't match block",
},
{
name: "electra block no blob",
block: func(parent [32]byte) *ethpb.GenericSignedBeaconBlock {
sb := &ethpb.SignedBeaconBlockContentsElectra{
Block: &ethpb.SignedBeaconBlockElectra{
Block: &ethpb.BeaconBlockElectra{Slot: 5, ParentRoot: parent[:], Body: util.HydrateBeaconBlockBodyElectra(&ethpb.BeaconBlockBodyElectra{})},
},
}
blk := &ethpb.GenericSignedBeaconBlock_Electra{Electra: sb}
return &ethpb.GenericSignedBeaconBlock{Block: blk, IsBlinded: false}
},
},
{
name: "electra block some blob",
block: func(parent [32]byte) *ethpb.GenericSignedBeaconBlock {
sb := &ethpb.SignedBeaconBlockContentsElectra{
Block: &ethpb.SignedBeaconBlockElectra{
Block: &ethpb.BeaconBlockElectra{
Slot: 5, ParentRoot: parent[:],
Body: util.HydrateBeaconBlockBodyElectra(&ethpb.BeaconBlockBodyElectra{
BlobKzgCommitments: [][]byte{bytesutil.PadTo([]byte("kc"), 48), bytesutil.PadTo([]byte("kc1"), 48), bytesutil.PadTo([]byte("kc2"), 48)},
}),
},
},
KzgProofs: [][]byte{{0x01}, {0x02}, {0x03}},
Blobs: [][]byte{{0x01}, {0x02}, {0x03}},
}
blk := &ethpb.GenericSignedBeaconBlock_Electra{Electra: sb}
return &ethpb.GenericSignedBeaconBlock{Block: blk, IsBlinded: false}
},
},
{
name: "electra block some blob (kzg and blob count mismatch)",
block: func(parent [32]byte) *ethpb.GenericSignedBeaconBlock {
sb := &ethpb.SignedBeaconBlockContentsElectra{
Block: &ethpb.SignedBeaconBlockElectra{
Block: &ethpb.BeaconBlockElectra{
Slot: 5, ParentRoot: parent[:],
Body: util.HydrateBeaconBlockBodyElectra(&ethpb.BeaconBlockBodyElectra{
BlobKzgCommitments: [][]byte{bytesutil.PadTo([]byte("kc"), 48), bytesutil.PadTo([]byte("kc1"), 48)},
}),
},
},
KzgProofs: [][]byte{{0x01}, {0x02}, {0x03}},
Blobs: [][]byte{{0x01}, {0x02}, {0x03}},
}
blk := &ethpb.GenericSignedBeaconBlock_Electra{Electra: sb}
return &ethpb.GenericSignedBeaconBlock{Block: blk, IsBlinded: false}
},
err: "blob KZG commitments don't match number of blobs or KZG proofs",
},
}
for _, tt := range tests {
@@ -3116,15 +3092,3 @@ func TestProposer_GetParentHeadState(t *testing.T) {
require.LogsContain(t, hook, "late block attempted reorg failed")
})
}
func TestProposer_ElectraBlobsAndProofs(t *testing.T) {
electraContents := &ethpb.SignedBeaconBlockContentsElectra{Block: &ethpb.SignedBeaconBlockElectra{}}
electraContents.KzgProofs = make([][]byte, 10)
electraContents.Blobs = make([][]byte, 10)
genBlock := &ethpb.GenericSignedBeaconBlock{Block: &ethpb.GenericSignedBeaconBlock_Electra{Electra: electraContents}}
blobs, proofs, err := blobsAndProofs(genBlock)
require.NoError(t, err)
require.Equal(t, 10, len(blobs))
require.Equal(t, 10, len(proofs))
}

View File

@@ -49,7 +49,7 @@ func BenchmarkProposerAtts_sortByProfitability(b *testing.B) {
for i, att := range atts {
attsCopy[i] = att.(*ethpb.Attestation).Copy()
}
_, err := attsCopy.sort()
_, err := attsCopy.sortByProfitability()
require.NoError(b, err, "Could not sort attestations by profitability")
}

View File

@@ -3,7 +3,6 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"handlers.go",
"server.go",
"validator_performance.go",
],
@@ -11,17 +10,9 @@ go_library(
visibility = ["//visibility:public"],
deps = [
"//api/server/structs:go_default_library",
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/rpc/core:go_default_library",
"//beacon-chain/rpc/eth/shared:go_default_library",
"//beacon-chain/rpc/lookup:go_default_library",
"//consensus-types/primitives:go_default_library",
"//network/httputil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_gorilla_mux//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@io_opencensus_go//trace:go_default_library",
],
@@ -29,42 +20,23 @@ go_library(
go_test(
name = "go_default_test",
srcs = [
"handlers_test.go",
"validator_performance_test.go",
],
srcs = ["validator_performance_test.go"],
embed = [":go_default_library"],
deps = [
"//api/server/structs:go_default_library",
"//beacon-chain/blockchain/testing:go_default_library",
"//beacon-chain/core/altair:go_default_library",
"//beacon-chain/core/epoch/precompute:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/transition:go_default_library",
"//beacon-chain/db/testing:go_default_library",
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
"//beacon-chain/rpc/core:go_default_library",
"//beacon-chain/rpc/testutil:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//beacon-chain/state/stategen/mock:go_default_library",
"//beacon-chain/sync/initial-sync/testing:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/blocks/testing:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"//time:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_gorilla_mux//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
],
)

View File

@@ -1,117 +0,0 @@
package validator
import (
"fmt"
"net/http"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/gorilla/mux"
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/core"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/shared"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/network/httputil"
"github.com/prysmaticlabs/prysm/v5/time/slots"
"go.opencensus.io/trace"
)
// GetParticipation retrieves the validator participation information for a given epoch,
// it returns the information about validator's participation rate in voting on the proof of stake
// rules based on their balance compared to the total active validator balance.
func (s *Server) GetParticipation(w http.ResponseWriter, r *http.Request) {
ctx, span := trace.StartSpan(r.Context(), "validator.GetParticipation")
defer span.End()
stateId := mux.Vars(r)["state_id"]
if stateId == "" {
httputil.HandleError(w, "state_id is required in URL params", http.StatusBadRequest)
return
}
st, err := s.Stater.State(ctx, []byte(stateId))
if err != nil {
shared.WriteStateFetchError(w, err)
return
}
stEpoch := slots.ToEpoch(st.Slot())
vp, rpcError := s.CoreService.ValidatorParticipation(ctx, stEpoch)
if rpcError != nil {
httputil.HandleError(w, rpcError.Err.Error(), core.ErrorReasonToHTTP(rpcError.Reason))
return
}
response := &structs.GetValidatorParticipationResponse{
Epoch: fmt.Sprintf("%d", vp.Epoch),
Finalized: vp.Finalized,
Participation: &structs.ValidatorParticipation{
GlobalParticipationRate: fmt.Sprintf("%f", vp.Participation.GlobalParticipationRate),
VotedEther: fmt.Sprintf("%d", vp.Participation.VotedEther),
EligibleEther: fmt.Sprintf("%d", vp.Participation.EligibleEther),
CurrentEpochActiveGwei: fmt.Sprintf("%d", vp.Participation.CurrentEpochActiveGwei),
CurrentEpochAttestingGwei: fmt.Sprintf("%d", vp.Participation.CurrentEpochAttestingGwei),
CurrentEpochTargetAttestingGwei: fmt.Sprintf("%d", vp.Participation.CurrentEpochTargetAttestingGwei),
PreviousEpochActiveGwei: fmt.Sprintf("%d", vp.Participation.PreviousEpochActiveGwei),
PreviousEpochAttestingGwei: fmt.Sprintf("%d", vp.Participation.PreviousEpochAttestingGwei),
PreviousEpochTargetAttestingGwei: fmt.Sprintf("%d", vp.Participation.PreviousEpochTargetAttestingGwei),
PreviousEpochHeadAttestingGwei: fmt.Sprintf("%d", vp.Participation.PreviousEpochHeadAttestingGwei),
},
}
httputil.WriteJson(w, response)
}
// GetActiveSetChanges retrieves the active set changes for a given epoch.
//
// This data includes any activations, voluntary exits, and involuntary
// ejections.
func (s *Server) GetActiveSetChanges(w http.ResponseWriter, r *http.Request) {
ctx, span := trace.StartSpan(r.Context(), "validator.GetActiveSetChanges")
defer span.End()
stateId := mux.Vars(r)["state_id"]
if stateId == "" {
httputil.HandleError(w, "state_id is required in URL params", http.StatusBadRequest)
return
}
st, err := s.Stater.State(ctx, []byte(stateId))
if err != nil {
shared.WriteStateFetchError(w, err)
return
}
stEpoch := slots.ToEpoch(st.Slot())
as, rpcError := s.CoreService.ValidatorActiveSetChanges(ctx, stEpoch)
if rpcError != nil {
httputil.HandleError(w, rpcError.Err.Error(), core.ErrorReasonToHTTP(rpcError.Reason))
return
}
response := &structs.ActiveSetChanges{
Epoch: fmt.Sprintf("%d", as.Epoch),
ActivatedPublicKeys: byteSlice2dToStringSlice(as.ActivatedPublicKeys),
ActivatedIndices: uint64SliceToStringSlice(as.ActivatedIndices),
ExitedPublicKeys: byteSlice2dToStringSlice(as.ExitedPublicKeys),
ExitedIndices: uint64SliceToStringSlice(as.ExitedIndices),
SlashedPublicKeys: byteSlice2dToStringSlice(as.SlashedPublicKeys),
SlashedIndices: uint64SliceToStringSlice(as.SlashedIndices),
EjectedPublicKeys: byteSlice2dToStringSlice(as.EjectedPublicKeys),
EjectedIndices: uint64SliceToStringSlice(as.EjectedIndices),
}
httputil.WriteJson(w, response)
}
func byteSlice2dToStringSlice(byteArrays [][]byte) []string {
s := make([]string, len(byteArrays))
for i, b := range byteArrays {
s[i] = hexutil.Encode(b)
}
return s
}
func uint64SliceToStringSlice(indices []primitives.ValidatorIndex) []string {
s := make([]string, len(indices))
for i, u := range indices {
s[i] = fmt.Sprintf("%d", u)
}
return s
}

View File

@@ -1,558 +0,0 @@
package validator
import (
"bytes"
"context"
"encoding/binary"
"encoding/json"
"fmt"
"math"
"net/http"
"net/http/httptest"
"testing"
"time"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/gorilla/mux"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
mock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/altair"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
dbTest "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/core"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/testutil"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state/stategen"
mockstategen "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/stategen/mock"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
blocktest "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks/testing"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/testing/util"
prysmTime "github.com/prysmaticlabs/prysm/v5/time"
"github.com/prysmaticlabs/prysm/v5/time/slots"
)
func addDefaultReplayerBuilder(s *Server, h stategen.HistoryAccessor) {
cc := &mockstategen.CanonicalChecker{Is: true, Err: nil}
cs := &mockstategen.CurrentSlotter{Slot: math.MaxUint64 - 1}
s.CoreService.ReplayerBuilder = stategen.NewCanonicalHistory(h, cc, cs)
}
func TestServer_GetValidatorParticipation_NoState(t *testing.T) {
headState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, headState.SetSlot(0))
var st state.BeaconState
st, _ = util.DeterministicGenesisState(t, 4)
s := &Server{
Stater: &testutil.MockStater{
BeaconState: st,
},
CoreService: &core.Service{
HeadFetcher: &mock.ChainService{
State: headState,
},
GenesisTimeFetcher: &mock.ChainService{},
},
}
url := "http://example.com" + fmt.Sprintf("%d", slots.ToEpoch(s.CoreService.GenesisTimeFetcher.CurrentSlot())+1)
request := httptest.NewRequest(http.MethodGet, url, nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.GetParticipation(writer, request)
require.Equal(t, http.StatusBadRequest, writer.Code)
require.StringContains(t, "state_id is required in URL params", writer.Body.String())
}
func TestServer_GetValidatorParticipation_CurrentAndPrevEpoch(t *testing.T) {
helpers.ClearCache()
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
validatorCount := uint64(32)
validators := make([]*ethpb.Validator, validatorCount)
balances := make([]uint64, validatorCount)
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
PublicKey: bytesutil.ToBytes(uint64(i), 48),
WithdrawalCredentials: make([]byte, 32),
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
}
balances[i] = params.BeaconConfig().MaxEffectiveBalance
}
atts := []*ethpb.PendingAttestation{{
Data: util.HydrateAttestationData(&ethpb.AttestationData{}),
InclusionDelay: 1,
AggregationBits: bitfield.NewBitlist(validatorCount / uint64(params.BeaconConfig().SlotsPerEpoch)),
}}
headState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, headState.SetSlot(8))
require.NoError(t, headState.SetValidators(validators))
require.NoError(t, headState.SetBalances(balances))
require.NoError(t, headState.AppendCurrentEpochAttestations(atts[0]))
require.NoError(t, headState.AppendPreviousEpochAttestations(atts[0]))
b := util.NewBeaconBlock()
b.Block.Slot = 8
util.SaveBlock(t, ctx, beaconDB, b)
bRoot, err := b.Block.HashTreeRoot()
require.NoError(t, beaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Root: bRoot[:]}))
require.NoError(t, beaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Root: params.BeaconConfig().ZeroHash[:]}))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, bRoot))
require.NoError(t, err)
require.NoError(t, beaconDB.SaveState(ctx, headState, bRoot))
require.NoError(t, beaconDB.SaveState(ctx, headState, params.BeaconConfig().ZeroHash))
m := &mock.ChainService{State: headState}
offset := int64(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
var st state.BeaconState
st, _ = util.DeterministicGenesisState(t, 4)
s := &Server{
Stater: &testutil.MockStater{
BeaconState: st,
},
BeaconDB: beaconDB,
CoreService: &core.Service{
HeadFetcher: m,
StateGen: stategen.New(beaconDB, doublylinkedtree.New()),
GenesisTimeFetcher: &mock.ChainService{
Genesis: prysmTime.Now().Add(time.Duration(-1*offset) * time.Second),
},
FinalizedFetcher: &mock.ChainService{FinalizedCheckPoint: &ethpb.Checkpoint{Epoch: 100}},
},
CanonicalFetcher: &mock.ChainService{
CanonicalRoots: map[[32]byte]bool{
bRoot: true,
},
},
}
addDefaultReplayerBuilder(s, beaconDB)
url := "http://example.com"
request := httptest.NewRequest(http.MethodGet, url, nil)
request = mux.SetURLVars(request, map[string]string{"state_id": "head"})
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.GetParticipation(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
want := &structs.GetValidatorParticipationResponse{
Participation: &structs.ValidatorParticipation{
GlobalParticipationRate: fmt.Sprintf("%f", float32(params.BeaconConfig().EffectiveBalanceIncrement)/float32(validatorCount*params.BeaconConfig().MaxEffectiveBalance)),
VotedEther: fmt.Sprintf("%d", params.BeaconConfig().EffectiveBalanceIncrement),
EligibleEther: fmt.Sprintf("%d", validatorCount*params.BeaconConfig().MaxEffectiveBalance),
CurrentEpochActiveGwei: fmt.Sprintf("%d", validatorCount*params.BeaconConfig().MaxEffectiveBalance),
CurrentEpochAttestingGwei: fmt.Sprintf("%d", params.BeaconConfig().EffectiveBalanceIncrement),
CurrentEpochTargetAttestingGwei: fmt.Sprintf("%d", params.BeaconConfig().EffectiveBalanceIncrement),
PreviousEpochActiveGwei: fmt.Sprintf("%d", validatorCount*params.BeaconConfig().MaxEffectiveBalance),
PreviousEpochAttestingGwei: fmt.Sprintf("%d", params.BeaconConfig().EffectiveBalanceIncrement),
PreviousEpochTargetAttestingGwei: fmt.Sprintf("%d", params.BeaconConfig().EffectiveBalanceIncrement),
PreviousEpochHeadAttestingGwei: fmt.Sprintf("%d", params.BeaconConfig().EffectiveBalanceIncrement),
},
}
var vp *structs.GetValidatorParticipationResponse
err = json.NewDecoder(writer.Body).Decode(&vp)
require.NoError(t, err)
// Compare the response with the expected values
assert.Equal(t, true, vp.Finalized, "Incorrect validator participation response")
assert.Equal(t, *want.Participation, *vp.Participation, "Incorrect validator participation response")
}
func TestServer_GetValidatorParticipation_OrphanedUntilGenesis(t *testing.T) {
helpers.ClearCache()
params.SetupTestConfigCleanup(t)
params.OverrideBeaconConfig(params.BeaconConfig())
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
validatorCount := uint64(100)
validators := make([]*ethpb.Validator, validatorCount)
balances := make([]uint64, validatorCount)
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
PublicKey: bytesutil.ToBytes(uint64(i), 48),
WithdrawalCredentials: make([]byte, 32),
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
}
balances[i] = params.BeaconConfig().MaxEffectiveBalance
}
atts := []*ethpb.PendingAttestation{{
Data: util.HydrateAttestationData(&ethpb.AttestationData{}),
InclusionDelay: 1,
AggregationBits: bitfield.NewBitlist(validatorCount / uint64(params.BeaconConfig().SlotsPerEpoch)),
}}
headState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, headState.SetSlot(0))
require.NoError(t, headState.SetValidators(validators))
require.NoError(t, headState.SetBalances(balances))
require.NoError(t, headState.AppendCurrentEpochAttestations(atts[0]))
require.NoError(t, headState.AppendPreviousEpochAttestations(atts[0]))
b := util.NewBeaconBlock()
util.SaveBlock(t, ctx, beaconDB, b)
bRoot, err := b.Block.HashTreeRoot()
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, bRoot))
require.NoError(t, err)
require.NoError(t, beaconDB.SaveState(ctx, headState, bRoot))
require.NoError(t, beaconDB.SaveState(ctx, headState, params.BeaconConfig().ZeroHash))
m := &mock.ChainService{State: headState}
offset := int64(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
var st state.BeaconState
st, _ = util.DeterministicGenesisState(t, 4)
s := &Server{
BeaconDB: beaconDB,
Stater: &testutil.MockStater{
BeaconState: st,
},
CoreService: &core.Service{
HeadFetcher: m,
StateGen: stategen.New(beaconDB, doublylinkedtree.New()),
GenesisTimeFetcher: &mock.ChainService{
Genesis: prysmTime.Now().Add(time.Duration(-1*offset) * time.Second),
},
FinalizedFetcher: &mock.ChainService{FinalizedCheckPoint: &ethpb.Checkpoint{Epoch: 100}},
},
CanonicalFetcher: &mock.ChainService{
CanonicalRoots: map[[32]byte]bool{
bRoot: true,
},
},
}
addDefaultReplayerBuilder(s, beaconDB)
url := "http://example.com"
request := httptest.NewRequest(http.MethodGet, url, nil)
request = mux.SetURLVars(request, map[string]string{"state_id": "head"})
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.GetParticipation(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
want := &structs.GetValidatorParticipationResponse{
Participation: &structs.ValidatorParticipation{
GlobalParticipationRate: fmt.Sprintf("%f", float32(params.BeaconConfig().EffectiveBalanceIncrement)/float32(validatorCount*params.BeaconConfig().MaxEffectiveBalance)),
VotedEther: fmt.Sprintf("%d", params.BeaconConfig().EffectiveBalanceIncrement),
EligibleEther: fmt.Sprintf("%d", validatorCount*params.BeaconConfig().MaxEffectiveBalance),
CurrentEpochActiveGwei: fmt.Sprintf("%d", validatorCount*params.BeaconConfig().MaxEffectiveBalance),
CurrentEpochAttestingGwei: fmt.Sprintf("%d", params.BeaconConfig().EffectiveBalanceIncrement),
CurrentEpochTargetAttestingGwei: fmt.Sprintf("%d", params.BeaconConfig().EffectiveBalanceIncrement),
PreviousEpochActiveGwei: fmt.Sprintf("%d", validatorCount*params.BeaconConfig().MaxEffectiveBalance),
PreviousEpochAttestingGwei: fmt.Sprintf("%d", params.BeaconConfig().EffectiveBalanceIncrement),
PreviousEpochTargetAttestingGwei: fmt.Sprintf("%d", params.BeaconConfig().EffectiveBalanceIncrement),
PreviousEpochHeadAttestingGwei: fmt.Sprintf("%d", params.BeaconConfig().EffectiveBalanceIncrement),
},
}
var vp *structs.GetValidatorParticipationResponse
err = json.NewDecoder(writer.Body).Decode(&vp)
require.NoError(t, err)
assert.DeepEqual(t, true, vp.Finalized, "Incorrect validator participation respond")
assert.DeepEqual(t, want.Participation, vp.Participation, "Incorrect validator participation respond")
}
func TestServer_GetValidatorParticipation_CurrentAndPrevEpochWithBits(t *testing.T) {
params.SetupTestConfigCleanup(t)
params.OverrideBeaconConfig(params.BeaconConfig())
transition.SkipSlotCache.Disable()
t.Run("altair", func(t *testing.T) {
validatorCount := uint64(32)
genState, _ := util.DeterministicGenesisStateAltair(t, validatorCount)
c, err := altair.NextSyncCommittee(context.Background(), genState)
require.NoError(t, err)
require.NoError(t, genState.SetCurrentSyncCommittee(c))
bits := make([]byte, validatorCount)
for i := range bits {
bits[i] = 0xff
}
require.NoError(t, genState.SetCurrentParticipationBits(bits))
require.NoError(t, genState.SetPreviousParticipationBits(bits))
gb, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlockAltair())
assert.NoError(t, err)
runGetValidatorParticipationCurrentEpoch(t, genState, gb)
})
t.Run("bellatrix", func(t *testing.T) {
validatorCount := uint64(32)
genState, _ := util.DeterministicGenesisStateBellatrix(t, validatorCount)
c, err := altair.NextSyncCommittee(context.Background(), genState)
require.NoError(t, err)
require.NoError(t, genState.SetCurrentSyncCommittee(c))
bits := make([]byte, validatorCount)
for i := range bits {
bits[i] = 0xff
}
require.NoError(t, genState.SetCurrentParticipationBits(bits))
require.NoError(t, genState.SetPreviousParticipationBits(bits))
gb, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlockBellatrix())
assert.NoError(t, err)
runGetValidatorParticipationCurrentEpoch(t, genState, gb)
})
t.Run("capella", func(t *testing.T) {
validatorCount := uint64(32)
genState, _ := util.DeterministicGenesisStateCapella(t, validatorCount)
c, err := altair.NextSyncCommittee(context.Background(), genState)
require.NoError(t, err)
require.NoError(t, genState.SetCurrentSyncCommittee(c))
bits := make([]byte, validatorCount)
for i := range bits {
bits[i] = 0xff
}
require.NoError(t, genState.SetCurrentParticipationBits(bits))
require.NoError(t, genState.SetPreviousParticipationBits(bits))
gb, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlockCapella())
assert.NoError(t, err)
runGetValidatorParticipationCurrentEpoch(t, genState, gb)
})
}
func runGetValidatorParticipationCurrentEpoch(t *testing.T, genState state.BeaconState, gb interfaces.SignedBeaconBlock) {
helpers.ClearCache()
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
validatorCount := uint64(32)
gsr, err := genState.HashTreeRoot(ctx)
require.NoError(t, err)
gb, err = blocktest.SetBlockStateRoot(gb, gsr)
require.NoError(t, err)
require.NoError(t, err)
gRoot, err := gb.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveState(ctx, genState, gRoot))
require.NoError(t, beaconDB.SaveBlock(ctx, gb))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, gRoot))
m := &mock.ChainService{State: genState}
offset := int64(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
s := &Server{
BeaconDB: beaconDB,
Stater: &testutil.MockStater{
BeaconState: genState,
},
CoreService: &core.Service{
HeadFetcher: m,
StateGen: stategen.New(beaconDB, doublylinkedtree.New()),
GenesisTimeFetcher: &mock.ChainService{
Genesis: prysmTime.Now().Add(time.Duration(-1*offset) * time.Second),
},
FinalizedFetcher: &mock.ChainService{FinalizedCheckPoint: &ethpb.Checkpoint{Epoch: 100}},
},
}
addDefaultReplayerBuilder(s, beaconDB)
url := "http://example.com"
request := httptest.NewRequest(http.MethodGet, url, nil)
request = mux.SetURLVars(request, map[string]string{"state_id": "head"})
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.GetParticipation(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
want := &structs.GetValidatorParticipationResponse{
Participation: &structs.ValidatorParticipation{
GlobalParticipationRate: "1.000000",
VotedEther: fmt.Sprintf("%d", validatorCount*params.BeaconConfig().MaxEffectiveBalance),
EligibleEther: fmt.Sprintf("%d", validatorCount*params.BeaconConfig().MaxEffectiveBalance),
CurrentEpochActiveGwei: fmt.Sprintf("%d", validatorCount*params.BeaconConfig().MaxEffectiveBalance),
CurrentEpochAttestingGwei: fmt.Sprintf("%d", validatorCount*params.BeaconConfig().MaxEffectiveBalance),
CurrentEpochTargetAttestingGwei: fmt.Sprintf("%d", validatorCount*params.BeaconConfig().MaxEffectiveBalance),
PreviousEpochActiveGwei: fmt.Sprintf("%d", validatorCount*params.BeaconConfig().MaxEffectiveBalance),
PreviousEpochAttestingGwei: fmt.Sprintf("%d", validatorCount*params.BeaconConfig().MaxEffectiveBalance),
PreviousEpochTargetAttestingGwei: fmt.Sprintf("%d", validatorCount*params.BeaconConfig().MaxEffectiveBalance),
PreviousEpochHeadAttestingGwei: fmt.Sprintf("%d", validatorCount*params.BeaconConfig().MaxEffectiveBalance),
},
}
var vp *structs.GetValidatorParticipationResponse
err = json.NewDecoder(writer.Body).Decode(&vp)
require.NoError(t, err)
assert.DeepEqual(t, true, vp.Finalized, "Incorrect validator participation respond")
assert.DeepEqual(t, *want.Participation, *vp.Participation, "Incorrect validator participation respond")
}
func TestServer_GetValidatorActiveSetChanges_NoState(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
var st state.BeaconState
st, _ = util.DeterministicGenesisState(t, 4)
s := &Server{
Stater: &testutil.MockStater{
BeaconState: st,
},
CoreService: &core.Service{
BeaconDB: beaconDB,
GenesisTimeFetcher: &mock.ChainService{},
HeadFetcher: &mock.ChainService{
State: st,
},
},
}
url := "http://example.com" + fmt.Sprintf("%d", slots.ToEpoch(s.CoreService.GenesisTimeFetcher.CurrentSlot())+1)
request := httptest.NewRequest(http.MethodGet, url, nil)
request = mux.SetURLVars(request, map[string]string{"state_id": ""})
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.GetActiveSetChanges(writer, request)
require.Equal(t, http.StatusBadRequest, writer.Code)
require.StringContains(t, "state_id is required in URL params", writer.Body.String())
}
func TestServer_GetValidatorActiveSetChanges(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
validators := make([]*ethpb.Validator, 8)
headState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, headState.SetSlot(0))
require.NoError(t, headState.SetValidators(validators))
for i := 0; i < len(validators); i++ {
activationEpoch := params.BeaconConfig().FarFutureEpoch
withdrawableEpoch := params.BeaconConfig().FarFutureEpoch
exitEpoch := params.BeaconConfig().FarFutureEpoch
slashed := false
balance := params.BeaconConfig().MaxEffectiveBalance
// Mark indices divisible by two as activated.
if i%2 == 0 {
activationEpoch = 0
} else if i%3 == 0 {
// Mark indices divisible by 3 as slashed.
withdrawableEpoch = params.BeaconConfig().EpochsPerSlashingsVector
slashed = true
} else if i%5 == 0 {
// Mark indices divisible by 5 as exited.
exitEpoch = 0
withdrawableEpoch = params.BeaconConfig().MinValidatorWithdrawabilityDelay
} else if i%7 == 0 {
// Mark indices divisible by 7 as ejected.
exitEpoch = 0
withdrawableEpoch = params.BeaconConfig().MinValidatorWithdrawabilityDelay
balance = params.BeaconConfig().EjectionBalance
}
err := headState.UpdateValidatorAtIndex(primitives.ValidatorIndex(i), &ethpb.Validator{
ActivationEpoch: activationEpoch,
PublicKey: pubKey(uint64(i)),
EffectiveBalance: balance,
WithdrawalCredentials: make([]byte, 32),
WithdrawableEpoch: withdrawableEpoch,
Slashed: slashed,
ExitEpoch: exitEpoch,
})
require.NoError(t, err)
}
b := util.NewBeaconBlock()
util.SaveBlock(t, ctx, beaconDB, b)
gRoot, err := b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, gRoot))
require.NoError(t, beaconDB.SaveState(ctx, headState, gRoot))
var st state.BeaconState
st, _ = util.DeterministicGenesisState(t, 4)
s := &Server{
Stater: &testutil.MockStater{
BeaconState: st,
},
CoreService: &core.Service{
FinalizedFetcher: &mock.ChainService{
FinalizedCheckPoint: &ethpb.Checkpoint{Epoch: 0, Root: make([]byte, fieldparams.RootLength)},
},
GenesisTimeFetcher: &mock.ChainService{},
},
}
addDefaultReplayerBuilder(s, beaconDB)
url := "http://example.com"
request := httptest.NewRequest(http.MethodGet, url, nil)
request = mux.SetURLVars(request, map[string]string{"state_id": "genesis"})
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.GetActiveSetChanges(writer, request)
require.Equal(t, http.StatusOK, writer.Code)
wantedActive := []string{
hexutil.Encode(pubKey(0)),
hexutil.Encode(pubKey(2)),
hexutil.Encode(pubKey(4)),
hexutil.Encode(pubKey(6)),
}
wantedActiveIndices := []string{"0", "2", "4", "6"}
wantedExited := []string{
hexutil.Encode(pubKey(5)),
}
wantedExitedIndices := []string{"5"}
wantedSlashed := []string{
hexutil.Encode(pubKey(3)),
}
wantedSlashedIndices := []string{"3"}
wantedEjected := []string{
hexutil.Encode(pubKey(7)),
}
wantedEjectedIndices := []string{"7"}
want := &structs.ActiveSetChanges{
Epoch: "0",
ActivatedPublicKeys: wantedActive,
ActivatedIndices: wantedActiveIndices,
ExitedPublicKeys: wantedExited,
ExitedIndices: wantedExitedIndices,
SlashedPublicKeys: wantedSlashed,
SlashedIndices: wantedSlashedIndices,
EjectedPublicKeys: wantedEjected,
EjectedIndices: wantedEjectedIndices,
}
var as *structs.ActiveSetChanges
err = json.NewDecoder(writer.Body).Decode(&as)
require.NoError(t, err)
require.DeepEqual(t, *want, *as)
}
func pubKey(i uint64) []byte {
pubKey := make([]byte, params.BeaconConfig().BLSPubkeyLength)
binary.LittleEndian.PutUint64(pubKey, i)
return pubKey
}

View File

@@ -1,17 +1,9 @@
package validator
import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/core"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/lookup"
)
type Server struct {
BeaconDB db.ReadOnlyDatabase
Stater lookup.Stater
CanonicalFetcher blockchain.CanonicalFetcher
FinalizationFetcher blockchain.FinalizationFetcher
ChainInfoFetcher blockchain.ChainInfoFetcher
CoreService *core.Service
CoreService *core.Service
}

View File

@@ -13,9 +13,9 @@ import (
"go.opencensus.io/trace"
)
// GetPerformance is an HTTP handler for GetPerformance.
func (s *Server) GetPerformance(w http.ResponseWriter, r *http.Request) {
ctx, span := trace.StartSpan(r.Context(), "validator.GetPerformance")
// GetValidatorPerformance is an HTTP handler for GetValidatorPerformance.
func (s *Server) GetValidatorPerformance(w http.ResponseWriter, r *http.Request) {
ctx, span := trace.StartSpan(r.Context(), "validator.GetValidatorPerformance")
defer span.End()
var req structs.GetValidatorPerformanceRequest

View File

@@ -35,7 +35,7 @@ func TestServer_GetValidatorPerformance(t *testing.T) {
},
}
srv := httptest.NewServer(http.HandlerFunc(vs.GetPerformance))
srv := httptest.NewServer(http.HandlerFunc(vs.GetValidatorPerformance))
req := httptest.NewRequest("POST", "/foo", nil)
client := &http.Client{}
@@ -86,7 +86,7 @@ func TestServer_GetValidatorPerformance(t *testing.T) {
err = json.NewEncoder(&buf).Encode(request)
require.NoError(t, err)
srv := httptest.NewServer(http.HandlerFunc(vs.GetPerformance))
srv := httptest.NewServer(http.HandlerFunc(vs.GetValidatorPerformance))
req := httptest.NewRequest("POST", "/foo", &buf)
client := &http.Client{}
rawResp, err := client.Post(srv.URL, "application/json", req.Body)
@@ -151,7 +151,7 @@ func TestServer_GetValidatorPerformance(t *testing.T) {
err = json.NewEncoder(&buf).Encode(request)
require.NoError(t, err)
srv := httptest.NewServer(http.HandlerFunc(vs.GetPerformance))
srv := httptest.NewServer(http.HandlerFunc(vs.GetValidatorPerformance))
req := httptest.NewRequest("POST", "/foo", &buf)
client := &http.Client{}
rawResp, err := client.Post(srv.URL, "application/json", req.Body)
@@ -216,7 +216,7 @@ func TestServer_GetValidatorPerformance(t *testing.T) {
err = json.NewEncoder(&buf).Encode(request)
require.NoError(t, err)
srv := httptest.NewServer(http.HandlerFunc(vs.GetPerformance))
srv := httptest.NewServer(http.HandlerFunc(vs.GetValidatorPerformance))
req := httptest.NewRequest("POST", "/foo", &buf)
client := &http.Client{}
rawResp, err := client.Post(srv.URL, "application/json", req.Body)
@@ -278,7 +278,7 @@ func TestServer_GetValidatorPerformance(t *testing.T) {
err := json.NewEncoder(&buf).Encode(request)
require.NoError(t, err)
srv := httptest.NewServer(http.HandlerFunc(vs.GetPerformance))
srv := httptest.NewServer(http.HandlerFunc(vs.GetValidatorPerformance))
req := httptest.NewRequest("POST", "/foo", &buf)
client := &http.Client{}
rawResp, err := client.Post(srv.URL, "application/json", req.Body)
@@ -340,7 +340,7 @@ func TestServer_GetValidatorPerformance(t *testing.T) {
err := json.NewEncoder(&buf).Encode(request)
require.NoError(t, err)
srv := httptest.NewServer(http.HandlerFunc(vs.GetPerformance))
srv := httptest.NewServer(http.HandlerFunc(vs.GetValidatorPerformance))
req := httptest.NewRequest("POST", "/foo", &buf)
client := &http.Client{}
rawResp, err := client.Post(srv.URL, "application/json", req.Body)
@@ -402,7 +402,7 @@ func TestServer_GetValidatorPerformance(t *testing.T) {
err := json.NewEncoder(&buf).Encode(request)
require.NoError(t, err)
srv := httptest.NewServer(http.HandlerFunc(vs.GetPerformance))
srv := httptest.NewServer(http.HandlerFunc(vs.GetValidatorPerformance))
req := httptest.NewRequest("POST", "/foo", &buf)
client := &http.Client{}
rawResp, err := client.Post(srv.URL, "application/json", req.Body)

View File

@@ -215,7 +215,6 @@ func NewService(ctx context.Context, cfg *Config) *Service {
}
rewardFetcher := &rewards.BlockRewardService{Replayer: ch, DB: s.cfg.BeaconDB}
coreService := &core.Service{
BeaconDB: s.cfg.BeaconDB,
HeadFetcher: s.cfg.HeadFetcher,
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
SyncChecker: s.cfg.SyncService,
@@ -226,7 +225,6 @@ func NewService(ctx context.Context, cfg *Config) *Service {
StateGen: s.cfg.StateGen,
P2P: s.cfg.Broadcaster,
FinalizedFetcher: s.cfg.FinalizationFetcher,
ReplayerBuilder: ch,
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
}
validatorServer := &validatorv1alpha1.Server{

View File

@@ -199,7 +199,6 @@ type ReadOnlyWithdrawals interface {
NextWithdrawalValidatorIndex() (primitives.ValidatorIndex, error)
NextWithdrawalIndex() (uint64, error)
PendingBalanceToWithdraw(idx primitives.ValidatorIndex) (uint64, error)
PendingPartialWithdrawals() ([]*ethpb.PendingPartialWithdrawal, error)
NumPendingPartialWithdrawals() (uint64, error)
HasPendingBalanceToWithdraw(idx primitives.ValidatorIndex) (bool, error)
}

View File

@@ -3,7 +3,8 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"beacon_state.go",
"beacon_state_mainnet.go",
"beacon_state_minimal.go", # keep
"doc.go",
"error.go",
"getters_attestation.go",

View File

@@ -0,0 +1,203 @@
//go:build !minimal
package state_native
import (
"encoding/json"
"sync"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state/fieldtrie"
customtypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native/custom-types"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native/types"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state/stateutil"
"github.com/prysmaticlabs/prysm/v5/config/features"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
)
// BeaconState defines a struct containing utilities for the Ethereum Beacon Chain state, defining
// getters and setters for its respective values and helpful functions such as HashTreeRoot().
type BeaconState struct {
version int
genesisTime uint64
genesisValidatorsRoot [32]byte
slot primitives.Slot
fork *ethpb.Fork
latestBlockHeader *ethpb.BeaconBlockHeader
blockRoots customtypes.BlockRoots
blockRootsMultiValue *MultiValueBlockRoots
stateRoots customtypes.StateRoots
stateRootsMultiValue *MultiValueStateRoots
historicalRoots customtypes.HistoricalRoots
historicalSummaries []*ethpb.HistoricalSummary
eth1Data *ethpb.Eth1Data
eth1DataVotes []*ethpb.Eth1Data
eth1DepositIndex uint64
validators []*ethpb.Validator
validatorsMultiValue *MultiValueValidators
balances []uint64
balancesMultiValue *MultiValueBalances
randaoMixes customtypes.RandaoMixes
randaoMixesMultiValue *MultiValueRandaoMixes
slashings []uint64
previousEpochAttestations []*ethpb.PendingAttestation
currentEpochAttestations []*ethpb.PendingAttestation
previousEpochParticipation []byte
currentEpochParticipation []byte
justificationBits bitfield.Bitvector4
previousJustifiedCheckpoint *ethpb.Checkpoint
currentJustifiedCheckpoint *ethpb.Checkpoint
finalizedCheckpoint *ethpb.Checkpoint
inactivityScores []uint64
inactivityScoresMultiValue *MultiValueInactivityScores
currentSyncCommittee *ethpb.SyncCommittee
nextSyncCommittee *ethpb.SyncCommittee
latestExecutionPayloadHeader *enginev1.ExecutionPayloadHeader
latestExecutionPayloadHeaderCapella *enginev1.ExecutionPayloadHeaderCapella
latestExecutionPayloadHeaderDeneb *enginev1.ExecutionPayloadHeaderDeneb
latestExecutionPayloadHeaderElectra *enginev1.ExecutionPayloadHeaderElectra
nextWithdrawalIndex uint64
nextWithdrawalValidatorIndex primitives.ValidatorIndex
// Electra fields
depositRequestsStartIndex uint64
depositBalanceToConsume primitives.Gwei
exitBalanceToConsume primitives.Gwei
earliestExitEpoch primitives.Epoch
consolidationBalanceToConsume primitives.Gwei
earliestConsolidationEpoch primitives.Epoch
pendingBalanceDeposits []*ethpb.PendingBalanceDeposit // pending_balance_deposits: List[PendingBalanceDeposit, PENDING_BALANCE_DEPOSITS_LIMIT]
pendingPartialWithdrawals []*ethpb.PendingPartialWithdrawal // pending_partial_withdrawals: List[PartialWithdrawal, PENDING_PARTIAL_WITHDRAWALS_LIMIT]
pendingConsolidations []*ethpb.PendingConsolidation // pending_consolidations: List[PendingConsolidation, PENDING_CONSOLIDATIONS_LIMIT]
id uint64
lock sync.RWMutex
dirtyFields map[types.FieldIndex]bool
dirtyIndices map[types.FieldIndex][]uint64
stateFieldLeaves map[types.FieldIndex]*fieldtrie.FieldTrie
rebuildTrie map[types.FieldIndex]bool
valMapHandler *stateutil.ValidatorMapHandler
validatorIndexCache *finalizedValidatorIndexCache
merkleLayers [][][]byte
sharedFieldReferences map[types.FieldIndex]*stateutil.Reference
}
type beaconStateMarshalable struct {
Version int `json:"version" yaml:"version"`
GenesisTime uint64 `json:"genesis_time" yaml:"genesis_time"`
GenesisValidatorsRoot [32]byte `json:"genesis_validators_root" yaml:"genesis_validators_root"`
Slot primitives.Slot `json:"slot" yaml:"slot"`
Fork *ethpb.Fork `json:"fork" yaml:"fork"`
LatestBlockHeader *ethpb.BeaconBlockHeader `json:"latest_block_header" yaml:"latest_block_header"`
BlockRoots customtypes.BlockRoots `json:"block_roots" yaml:"block_roots"`
StateRoots customtypes.StateRoots `json:"state_roots" yaml:"state_roots"`
HistoricalRoots customtypes.HistoricalRoots `json:"historical_roots" yaml:"historical_roots"`
HistoricalSummaries []*ethpb.HistoricalSummary `json:"historical_summaries" yaml:"historical_summaries"`
Eth1Data *ethpb.Eth1Data `json:"eth_1_data" yaml:"eth_1_data"`
Eth1DataVotes []*ethpb.Eth1Data `json:"eth_1_data_votes" yaml:"eth_1_data_votes"`
Eth1DepositIndex uint64 `json:"eth_1_deposit_index" yaml:"eth_1_deposit_index"`
Validators []*ethpb.Validator `json:"validators" yaml:"validators"`
Balances []uint64 `json:"balances" yaml:"balances"`
RandaoMixes customtypes.RandaoMixes `json:"randao_mixes" yaml:"randao_mixes"`
Slashings []uint64 `json:"slashings" yaml:"slashings"`
PreviousEpochAttestations []*ethpb.PendingAttestation `json:"previous_epoch_attestations" yaml:"previous_epoch_attestations"`
CurrentEpochAttestations []*ethpb.PendingAttestation `json:"current_epoch_attestations" yaml:"current_epoch_attestations"`
PreviousEpochParticipation []byte `json:"previous_epoch_participation" yaml:"previous_epoch_participation"`
CurrentEpochParticipation []byte `json:"current_epoch_participation" yaml:"current_epoch_participation"`
JustificationBits bitfield.Bitvector4 `json:"justification_bits" yaml:"justification_bits"`
PreviousJustifiedCheckpoint *ethpb.Checkpoint `json:"previous_justified_checkpoint" yaml:"previous_justified_checkpoint"`
CurrentJustifiedCheckpoint *ethpb.Checkpoint `json:"current_justified_checkpoint" yaml:"current_justified_checkpoint"`
FinalizedCheckpoint *ethpb.Checkpoint `json:"finalized_checkpoint" yaml:"finalized_checkpoint"`
InactivityScores []uint64 `json:"inactivity_scores" yaml:"inactivity_scores"`
CurrentSyncCommittee *ethpb.SyncCommittee `json:"current_sync_committee" yaml:"current_sync_committee"`
NextSyncCommittee *ethpb.SyncCommittee `json:"next_sync_committee" yaml:"next_sync_committee"`
LatestExecutionPayloadHeader *enginev1.ExecutionPayloadHeader `json:"latest_execution_payload_header" yaml:"latest_execution_payload_header"`
LatestExecutionPayloadHeaderCapella *enginev1.ExecutionPayloadHeaderCapella `json:"latest_execution_payload_header_capella" yaml:"latest_execution_payload_header_capella"`
LatestExecutionPayloadHeaderDeneb *enginev1.ExecutionPayloadHeaderDeneb `json:"latest_execution_payload_header_deneb" yaml:"latest_execution_payload_header_deneb"`
LatestExecutionPayloadHeaderElectra *enginev1.ExecutionPayloadHeaderElectra `json:"latest_execution_payload_header_electra" yaml:"latest_execution_payload_header_electra"`
NextWithdrawalIndex uint64 `json:"next_withdrawal_index" yaml:"next_withdrawal_index"`
NextWithdrawalValidatorIndex primitives.ValidatorIndex `json:"next_withdrawal_validator_index" yaml:"next_withdrawal_validator_index"`
DepositRequestsStartIndex uint64 `json:"deposit_requests_start_index" yaml:"deposit_requests_start_index"`
DepositBalanceToConsume primitives.Gwei `json:"deposit_balance_to_consume" yaml:"deposit_balance_to_consume"`
ExitBalanceToConsume primitives.Gwei `json:"exit_balance_to_consume" yaml:"exit_balance_to_consume"`
EarliestExitEpoch primitives.Epoch `json:"earliest_exit_epoch" yaml:"earliest_exit_epoch"`
ConsolidationBalanceToConsume primitives.Gwei `json:"consolidation_balance_to_consume" yaml:"consolidation_balance_to_consume"`
EarliestConsolidationEpoch primitives.Epoch `json:"earliest_consolidation_epoch" yaml:"earliest_consolidation_epoch"`
PendingBalanceDeposits []*ethpb.PendingBalanceDeposit `json:"pending_balance_deposits" yaml:"pending_balance_deposits"`
PendingPartialWithdrawals []*ethpb.PendingPartialWithdrawal `json:"pending_partial_withdrawals" yaml:"pending_partial_withdrawals"`
PendingConsolidations []*ethpb.PendingConsolidation `json:"pending_consolidations" yaml:"pending_consolidations"`
}
func (b *BeaconState) MarshalJSON() ([]byte, error) {
var bRoots customtypes.BlockRoots
var sRoots customtypes.StateRoots
var mixes customtypes.RandaoMixes
var balances []uint64
var inactivityScores []uint64
var vals []*ethpb.Validator
if features.Get().EnableExperimentalState {
bRoots = b.blockRootsMultiValue.Value(b)
sRoots = b.stateRootsMultiValue.Value(b)
mixes = b.randaoMixesMultiValue.Value(b)
balances = b.balancesMultiValue.Value(b)
inactivityScores = b.inactivityScoresMultiValue.Value(b)
vals = b.validatorsMultiValue.Value(b)
} else {
bRoots = b.blockRoots
sRoots = b.stateRoots
mixes = b.randaoMixes
balances = b.balances
inactivityScores = b.inactivityScores
vals = b.validators
}
marshalable := &beaconStateMarshalable{
Version: b.version,
GenesisTime: b.genesisTime,
GenesisValidatorsRoot: b.genesisValidatorsRoot,
Slot: b.slot,
Fork: b.fork,
LatestBlockHeader: b.latestBlockHeader,
BlockRoots: bRoots,
StateRoots: sRoots,
HistoricalRoots: b.historicalRoots,
HistoricalSummaries: b.historicalSummaries,
Eth1Data: b.eth1Data,
Eth1DataVotes: b.eth1DataVotes,
Eth1DepositIndex: b.eth1DepositIndex,
Validators: vals,
Balances: balances,
RandaoMixes: mixes,
Slashings: b.slashings,
PreviousEpochAttestations: b.previousEpochAttestations,
CurrentEpochAttestations: b.currentEpochAttestations,
PreviousEpochParticipation: b.previousEpochParticipation,
CurrentEpochParticipation: b.currentEpochParticipation,
JustificationBits: b.justificationBits,
PreviousJustifiedCheckpoint: b.previousJustifiedCheckpoint,
CurrentJustifiedCheckpoint: b.currentJustifiedCheckpoint,
FinalizedCheckpoint: b.finalizedCheckpoint,
InactivityScores: inactivityScores,
CurrentSyncCommittee: b.currentSyncCommittee,
NextSyncCommittee: b.nextSyncCommittee,
LatestExecutionPayloadHeader: b.latestExecutionPayloadHeader,
LatestExecutionPayloadHeaderCapella: b.latestExecutionPayloadHeaderCapella,
LatestExecutionPayloadHeaderDeneb: b.latestExecutionPayloadHeaderDeneb,
LatestExecutionPayloadHeaderElectra: b.latestExecutionPayloadHeaderElectra,
NextWithdrawalIndex: b.nextWithdrawalIndex,
NextWithdrawalValidatorIndex: b.nextWithdrawalValidatorIndex,
DepositRequestsStartIndex: b.depositRequestsStartIndex,
DepositBalanceToConsume: b.depositBalanceToConsume,
ExitBalanceToConsume: b.exitBalanceToConsume,
EarliestExitEpoch: b.earliestExitEpoch,
ConsolidationBalanceToConsume: b.consolidationBalanceToConsume,
EarliestConsolidationEpoch: b.earliestConsolidationEpoch,
PendingBalanceDeposits: b.pendingBalanceDeposits,
PendingPartialWithdrawals: b.pendingPartialWithdrawals,
PendingConsolidations: b.pendingConsolidations,
}
return json.Marshal(marshalable)
}

Some files were not shown because too many files have changed in this diff Show More