mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-02-01 08:35:24 -05:00
Compare commits
1 Commits
e2e-debugg
...
use-delay
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9de4eec82a |
@@ -503,77 +503,3 @@ func (s *SignedBlindedBeaconBlockFulu) MessageRawJson() ([]byte, error) {
|
|||||||
func (s *SignedBlindedBeaconBlockFulu) SigString() string {
|
func (s *SignedBlindedBeaconBlockFulu) SigString() string {
|
||||||
return s.Signature
|
return s.Signature
|
||||||
}
|
}
|
||||||
|
|
||||||
// ----------------------------------------------------------------------------
|
|
||||||
// Gloas
|
|
||||||
// ----------------------------------------------------------------------------
|
|
||||||
|
|
||||||
type ExecutionPayloadBid struct {
|
|
||||||
ParentBlockHash string `json:"parent_block_hash"`
|
|
||||||
ParentBlockRoot string `json:"parent_block_root"`
|
|
||||||
BlockHash string `json:"block_hash"`
|
|
||||||
PrevRandao string `json:"prev_randao"`
|
|
||||||
FeeRecipient string `json:"fee_recipient"`
|
|
||||||
GasLimit string `json:"gas_limit"`
|
|
||||||
BuilderIndex string `json:"builder_index"`
|
|
||||||
Slot string `json:"slot"`
|
|
||||||
Value string `json:"value"`
|
|
||||||
ExecutionPayment string `json:"execution_payment"`
|
|
||||||
BlobKzgCommitmentsRoot string `json:"blob_kzg_commitments_root"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type SignedExecutionPayloadBid struct {
|
|
||||||
Message *ExecutionPayloadBid `json:"message"`
|
|
||||||
Signature string `json:"signature"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type PayloadAttestationData struct {
|
|
||||||
BeaconBlockRoot string `json:"beacon_block_root"`
|
|
||||||
Slot string `json:"slot"`
|
|
||||||
PayloadPresent bool `json:"payload_present"`
|
|
||||||
BlobDataAvailable bool `json:"blob_data_available"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type PayloadAttestation struct {
|
|
||||||
AggregationBits string `json:"aggregation_bits"`
|
|
||||||
Data *PayloadAttestationData `json:"data"`
|
|
||||||
Signature string `json:"signature"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type BeaconBlockBodyGloas struct {
|
|
||||||
RandaoReveal string `json:"randao_reveal"`
|
|
||||||
Eth1Data *Eth1Data `json:"eth1_data"`
|
|
||||||
Graffiti string `json:"graffiti"`
|
|
||||||
ProposerSlashings []*ProposerSlashing `json:"proposer_slashings"`
|
|
||||||
AttesterSlashings []*AttesterSlashingElectra `json:"attester_slashings"`
|
|
||||||
Attestations []*AttestationElectra `json:"attestations"`
|
|
||||||
Deposits []*Deposit `json:"deposits"`
|
|
||||||
VoluntaryExits []*SignedVoluntaryExit `json:"voluntary_exits"`
|
|
||||||
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
|
|
||||||
BLSToExecutionChanges []*SignedBLSToExecutionChange `json:"bls_to_execution_changes"`
|
|
||||||
SignedExecutionPayloadBid *SignedExecutionPayloadBid `json:"signed_execution_payload_bid"`
|
|
||||||
PayloadAttestations []*PayloadAttestation `json:"payload_attestations"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type BeaconBlockGloas struct {
|
|
||||||
Slot string `json:"slot"`
|
|
||||||
ProposerIndex string `json:"proposer_index"`
|
|
||||||
ParentRoot string `json:"parent_root"`
|
|
||||||
StateRoot string `json:"state_root"`
|
|
||||||
Body *BeaconBlockBodyGloas `json:"body"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type SignedBeaconBlockGloas struct {
|
|
||||||
Message *BeaconBlockGloas `json:"message"`
|
|
||||||
Signature string `json:"signature"`
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ SignedMessageJsoner = &SignedBeaconBlockGloas{}
|
|
||||||
|
|
||||||
func (s *SignedBeaconBlockGloas) MessageRawJson() ([]byte, error) {
|
|
||||||
return json.Marshal(s.Message)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *SignedBeaconBlockGloas) SigString() string {
|
|
||||||
return s.Signature
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -268,8 +268,6 @@ func SignedBeaconBlockMessageJsoner(block interfaces.ReadOnlySignedBeaconBlock)
|
|||||||
return SignedBlindedBeaconBlockFuluFromConsensus(pbStruct)
|
return SignedBlindedBeaconBlockFuluFromConsensus(pbStruct)
|
||||||
case *eth.SignedBeaconBlockFulu:
|
case *eth.SignedBeaconBlockFulu:
|
||||||
return SignedBeaconBlockFuluFromConsensus(pbStruct)
|
return SignedBeaconBlockFuluFromConsensus(pbStruct)
|
||||||
case *eth.SignedBeaconBlockGloas:
|
|
||||||
return SignedBeaconBlockGloasFromConsensus(pbStruct)
|
|
||||||
default:
|
default:
|
||||||
return nil, ErrUnsupportedConversion
|
return nil, ErrUnsupportedConversion
|
||||||
}
|
}
|
||||||
@@ -2887,379 +2885,3 @@ func SignedBeaconBlockFuluFromConsensus(b *eth.SignedBeaconBlockFulu) (*SignedBe
|
|||||||
Signature: hexutil.Encode(b.Signature),
|
Signature: hexutil.Encode(b.Signature),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ----------------------------------------------------------------------------
|
|
||||||
// Gloas
|
|
||||||
// ----------------------------------------------------------------------------
|
|
||||||
|
|
||||||
func SignedBeaconBlockGloasFromConsensus(b *eth.SignedBeaconBlockGloas) (*SignedBeaconBlockGloas, error) {
|
|
||||||
block, err := BeaconBlockGloasFromConsensus(b.Block)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &SignedBeaconBlockGloas{
|
|
||||||
Message: block,
|
|
||||||
Signature: hexutil.Encode(b.Signature),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func BeaconBlockGloasFromConsensus(b *eth.BeaconBlockGloas) (*BeaconBlockGloas, error) {
|
|
||||||
payloadAttestations := make([]*PayloadAttestation, len(b.Body.PayloadAttestations))
|
|
||||||
for i, pa := range b.Body.PayloadAttestations {
|
|
||||||
payloadAttestations[i] = PayloadAttestationFromConsensus(pa)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &BeaconBlockGloas{
|
|
||||||
Slot: fmt.Sprintf("%d", b.Slot),
|
|
||||||
ProposerIndex: fmt.Sprintf("%d", b.ProposerIndex),
|
|
||||||
ParentRoot: hexutil.Encode(b.ParentRoot),
|
|
||||||
StateRoot: hexutil.Encode(b.StateRoot),
|
|
||||||
Body: &BeaconBlockBodyGloas{
|
|
||||||
RandaoReveal: hexutil.Encode(b.Body.RandaoReveal),
|
|
||||||
Eth1Data: Eth1DataFromConsensus(b.Body.Eth1Data),
|
|
||||||
Graffiti: hexutil.Encode(b.Body.Graffiti),
|
|
||||||
ProposerSlashings: ProposerSlashingsFromConsensus(b.Body.ProposerSlashings),
|
|
||||||
AttesterSlashings: AttesterSlashingsElectraFromConsensus(b.Body.AttesterSlashings),
|
|
||||||
Attestations: AttsElectraFromConsensus(b.Body.Attestations),
|
|
||||||
Deposits: DepositsFromConsensus(b.Body.Deposits),
|
|
||||||
VoluntaryExits: SignedExitsFromConsensus(b.Body.VoluntaryExits),
|
|
||||||
SyncAggregate: SyncAggregateFromConsensus(b.Body.SyncAggregate),
|
|
||||||
BLSToExecutionChanges: SignedBLSChangesFromConsensus(b.Body.BlsToExecutionChanges),
|
|
||||||
SignedExecutionPayloadBid: SignedExecutionPayloadBidFromConsensus(b.Body.SignedExecutionPayloadBid),
|
|
||||||
PayloadAttestations: payloadAttestations,
|
|
||||||
},
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func SignedExecutionPayloadBidFromConsensus(b *eth.SignedExecutionPayloadBid) *SignedExecutionPayloadBid {
|
|
||||||
return &SignedExecutionPayloadBid{
|
|
||||||
Message: ExecutionPayloadBidFromConsensus(b.Message),
|
|
||||||
Signature: hexutil.Encode(b.Signature),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func ExecutionPayloadBidFromConsensus(b *eth.ExecutionPayloadBid) *ExecutionPayloadBid {
|
|
||||||
return &ExecutionPayloadBid{
|
|
||||||
ParentBlockHash: hexutil.Encode(b.ParentBlockHash),
|
|
||||||
ParentBlockRoot: hexutil.Encode(b.ParentBlockRoot),
|
|
||||||
BlockHash: hexutil.Encode(b.BlockHash),
|
|
||||||
PrevRandao: hexutil.Encode(b.PrevRandao),
|
|
||||||
FeeRecipient: hexutil.Encode(b.FeeRecipient),
|
|
||||||
GasLimit: fmt.Sprintf("%d", b.GasLimit),
|
|
||||||
BuilderIndex: fmt.Sprintf("%d", b.BuilderIndex),
|
|
||||||
Slot: fmt.Sprintf("%d", b.Slot),
|
|
||||||
Value: fmt.Sprintf("%d", b.Value),
|
|
||||||
ExecutionPayment: fmt.Sprintf("%d", b.ExecutionPayment),
|
|
||||||
BlobKzgCommitmentsRoot: hexutil.Encode(b.BlobKzgCommitmentsRoot),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func PayloadAttestationFromConsensus(pa *eth.PayloadAttestation) *PayloadAttestation {
|
|
||||||
return &PayloadAttestation{
|
|
||||||
AggregationBits: hexutil.Encode(pa.AggregationBits),
|
|
||||||
Data: PayloadAttestationDataFromConsensus(pa.Data),
|
|
||||||
Signature: hexutil.Encode(pa.Signature),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func PayloadAttestationDataFromConsensus(d *eth.PayloadAttestationData) *PayloadAttestationData {
|
|
||||||
return &PayloadAttestationData{
|
|
||||||
BeaconBlockRoot: hexutil.Encode(d.BeaconBlockRoot),
|
|
||||||
Slot: fmt.Sprintf("%d", d.Slot),
|
|
||||||
PayloadPresent: d.PayloadPresent,
|
|
||||||
BlobDataAvailable: d.BlobDataAvailable,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *SignedBeaconBlockGloas) ToConsensus() (*eth.SignedBeaconBlockGloas, error) {
|
|
||||||
if b == nil {
|
|
||||||
return nil, errNilValue
|
|
||||||
}
|
|
||||||
|
|
||||||
sig, err := bytesutil.DecodeHexWithLength(b.Signature, fieldparams.BLSSignatureLength)
|
|
||||||
if err != nil {
|
|
||||||
return nil, server.NewDecodeError(err, "Signature")
|
|
||||||
}
|
|
||||||
block, err := b.Message.ToConsensus()
|
|
||||||
if err != nil {
|
|
||||||
return nil, server.NewDecodeError(err, "Message")
|
|
||||||
}
|
|
||||||
return ð.SignedBeaconBlockGloas{
|
|
||||||
Block: block,
|
|
||||||
Signature: sig,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BeaconBlockGloas) ToConsensus() (*eth.BeaconBlockGloas, error) {
|
|
||||||
if b == nil {
|
|
||||||
return nil, errNilValue
|
|
||||||
}
|
|
||||||
if b.Body == nil {
|
|
||||||
return nil, server.NewDecodeError(errNilValue, "Body")
|
|
||||||
}
|
|
||||||
if b.Body.Eth1Data == nil {
|
|
||||||
return nil, server.NewDecodeError(errNilValue, "Body.Eth1Data")
|
|
||||||
}
|
|
||||||
if b.Body.SyncAggregate == nil {
|
|
||||||
return nil, server.NewDecodeError(errNilValue, "Body.SyncAggregate")
|
|
||||||
}
|
|
||||||
if b.Body.SignedExecutionPayloadBid == nil {
|
|
||||||
return nil, server.NewDecodeError(errNilValue, "Body.SignedExecutionPayloadBid")
|
|
||||||
}
|
|
||||||
|
|
||||||
slot, err := strconv.ParseUint(b.Slot, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return nil, server.NewDecodeError(err, "Slot")
|
|
||||||
}
|
|
||||||
proposerIndex, err := strconv.ParseUint(b.ProposerIndex, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return nil, server.NewDecodeError(err, "ProposerIndex")
|
|
||||||
}
|
|
||||||
parentRoot, err := bytesutil.DecodeHexWithLength(b.ParentRoot, fieldparams.RootLength)
|
|
||||||
if err != nil {
|
|
||||||
return nil, server.NewDecodeError(err, "ParentRoot")
|
|
||||||
}
|
|
||||||
stateRoot, err := bytesutil.DecodeHexWithLength(b.StateRoot, fieldparams.RootLength)
|
|
||||||
if err != nil {
|
|
||||||
return nil, server.NewDecodeError(err, "StateRoot")
|
|
||||||
}
|
|
||||||
body, err := b.Body.ToConsensus()
|
|
||||||
if err != nil {
|
|
||||||
return nil, server.NewDecodeError(err, "Body")
|
|
||||||
}
|
|
||||||
return ð.BeaconBlockGloas{
|
|
||||||
Slot: primitives.Slot(slot),
|
|
||||||
ProposerIndex: primitives.ValidatorIndex(proposerIndex),
|
|
||||||
ParentRoot: parentRoot,
|
|
||||||
StateRoot: stateRoot,
|
|
||||||
Body: body,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BeaconBlockBodyGloas) ToConsensus() (*eth.BeaconBlockBodyGloas, error) {
|
|
||||||
if b == nil {
|
|
||||||
return nil, errNilValue
|
|
||||||
}
|
|
||||||
|
|
||||||
randaoReveal, err := bytesutil.DecodeHexWithLength(b.RandaoReveal, fieldparams.BLSSignatureLength)
|
|
||||||
if err != nil {
|
|
||||||
return nil, server.NewDecodeError(err, "RandaoReveal")
|
|
||||||
}
|
|
||||||
depositRoot, err := bytesutil.DecodeHexWithLength(b.Eth1Data.DepositRoot, fieldparams.RootLength)
|
|
||||||
if err != nil {
|
|
||||||
return nil, server.NewDecodeError(err, "Eth1Data.DepositRoot")
|
|
||||||
}
|
|
||||||
depositCount, err := strconv.ParseUint(b.Eth1Data.DepositCount, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return nil, server.NewDecodeError(err, "Eth1Data.DepositCount")
|
|
||||||
}
|
|
||||||
blockHash, err := bytesutil.DecodeHexWithLength(b.Eth1Data.BlockHash, fieldparams.RootLength)
|
|
||||||
if err != nil {
|
|
||||||
return nil, server.NewDecodeError(err, "Eth1Data.BlockHash")
|
|
||||||
}
|
|
||||||
graffiti, err := bytesutil.DecodeHexWithLength(b.Graffiti, fieldparams.RootLength)
|
|
||||||
if err != nil {
|
|
||||||
return nil, server.NewDecodeError(err, "Graffiti")
|
|
||||||
}
|
|
||||||
proposerSlashings, err := ProposerSlashingsToConsensus(b.ProposerSlashings)
|
|
||||||
if err != nil {
|
|
||||||
return nil, server.NewDecodeError(err, "ProposerSlashings")
|
|
||||||
}
|
|
||||||
attesterSlashings, err := AttesterSlashingsElectraToConsensus(b.AttesterSlashings)
|
|
||||||
if err != nil {
|
|
||||||
return nil, server.NewDecodeError(err, "AttesterSlashings")
|
|
||||||
}
|
|
||||||
atts, err := AttsElectraToConsensus(b.Attestations)
|
|
||||||
if err != nil {
|
|
||||||
return nil, server.NewDecodeError(err, "Attestations")
|
|
||||||
}
|
|
||||||
deposits, err := DepositsToConsensus(b.Deposits)
|
|
||||||
if err != nil {
|
|
||||||
return nil, server.NewDecodeError(err, "Deposits")
|
|
||||||
}
|
|
||||||
exits, err := SignedExitsToConsensus(b.VoluntaryExits)
|
|
||||||
if err != nil {
|
|
||||||
return nil, server.NewDecodeError(err, "VoluntaryExits")
|
|
||||||
}
|
|
||||||
syncCommitteeBits, err := bytesutil.DecodeHexWithLength(b.SyncAggregate.SyncCommitteeBits, fieldparams.SyncAggregateSyncCommitteeBytesLength)
|
|
||||||
if err != nil {
|
|
||||||
return nil, server.NewDecodeError(err, "SyncAggregate.SyncCommitteeBits")
|
|
||||||
}
|
|
||||||
syncCommitteeSig, err := bytesutil.DecodeHexWithLength(b.SyncAggregate.SyncCommitteeSignature, fieldparams.BLSSignatureLength)
|
|
||||||
if err != nil {
|
|
||||||
return nil, server.NewDecodeError(err, "SyncAggregate.SyncCommitteeSignature")
|
|
||||||
}
|
|
||||||
blsChanges, err := SignedBLSChangesToConsensus(b.BLSToExecutionChanges)
|
|
||||||
if err != nil {
|
|
||||||
return nil, server.NewDecodeError(err, "BLSToExecutionChanges")
|
|
||||||
}
|
|
||||||
signedBid, err := b.SignedExecutionPayloadBid.ToConsensus()
|
|
||||||
if err != nil {
|
|
||||||
return nil, server.NewDecodeError(err, "SignedExecutionPayloadBid")
|
|
||||||
}
|
|
||||||
payloadAttestations, err := PayloadAttestationsToConsensus(b.PayloadAttestations)
|
|
||||||
if err != nil {
|
|
||||||
return nil, server.NewDecodeError(err, "PayloadAttestations")
|
|
||||||
}
|
|
||||||
|
|
||||||
return ð.BeaconBlockBodyGloas{
|
|
||||||
RandaoReveal: randaoReveal,
|
|
||||||
Eth1Data: ð.Eth1Data{
|
|
||||||
DepositRoot: depositRoot,
|
|
||||||
DepositCount: depositCount,
|
|
||||||
BlockHash: blockHash,
|
|
||||||
},
|
|
||||||
Graffiti: graffiti,
|
|
||||||
ProposerSlashings: proposerSlashings,
|
|
||||||
AttesterSlashings: attesterSlashings,
|
|
||||||
Attestations: atts,
|
|
||||||
Deposits: deposits,
|
|
||||||
VoluntaryExits: exits,
|
|
||||||
SyncAggregate: ð.SyncAggregate{
|
|
||||||
SyncCommitteeBits: syncCommitteeBits,
|
|
||||||
SyncCommitteeSignature: syncCommitteeSig,
|
|
||||||
},
|
|
||||||
BlsToExecutionChanges: blsChanges,
|
|
||||||
SignedExecutionPayloadBid: signedBid,
|
|
||||||
PayloadAttestations: payloadAttestations,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *SignedExecutionPayloadBid) ToConsensus() (*eth.SignedExecutionPayloadBid, error) {
|
|
||||||
if b == nil {
|
|
||||||
return nil, errNilValue
|
|
||||||
}
|
|
||||||
sig, err := bytesutil.DecodeHexWithLength(b.Signature, fieldparams.BLSSignatureLength)
|
|
||||||
if err != nil {
|
|
||||||
return nil, server.NewDecodeError(err, "Signature")
|
|
||||||
}
|
|
||||||
message, err := b.Message.ToConsensus()
|
|
||||||
if err != nil {
|
|
||||||
return nil, server.NewDecodeError(err, "Message")
|
|
||||||
}
|
|
||||||
return ð.SignedExecutionPayloadBid{
|
|
||||||
Message: message,
|
|
||||||
Signature: sig,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *ExecutionPayloadBid) ToConsensus() (*eth.ExecutionPayloadBid, error) {
|
|
||||||
if b == nil {
|
|
||||||
return nil, errNilValue
|
|
||||||
}
|
|
||||||
parentBlockHash, err := bytesutil.DecodeHexWithLength(b.ParentBlockHash, fieldparams.RootLength)
|
|
||||||
if err != nil {
|
|
||||||
return nil, server.NewDecodeError(err, "ParentBlockHash")
|
|
||||||
}
|
|
||||||
parentBlockRoot, err := bytesutil.DecodeHexWithLength(b.ParentBlockRoot, fieldparams.RootLength)
|
|
||||||
if err != nil {
|
|
||||||
return nil, server.NewDecodeError(err, "ParentBlockRoot")
|
|
||||||
}
|
|
||||||
blockHash, err := bytesutil.DecodeHexWithLength(b.BlockHash, fieldparams.RootLength)
|
|
||||||
if err != nil {
|
|
||||||
return nil, server.NewDecodeError(err, "BlockHash")
|
|
||||||
}
|
|
||||||
prevRandao, err := bytesutil.DecodeHexWithLength(b.PrevRandao, fieldparams.RootLength)
|
|
||||||
if err != nil {
|
|
||||||
return nil, server.NewDecodeError(err, "PrevRandao")
|
|
||||||
}
|
|
||||||
feeRecipient, err := bytesutil.DecodeHexWithLength(b.FeeRecipient, fieldparams.FeeRecipientLength)
|
|
||||||
if err != nil {
|
|
||||||
return nil, server.NewDecodeError(err, "FeeRecipient")
|
|
||||||
}
|
|
||||||
gasLimit, err := strconv.ParseUint(b.GasLimit, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return nil, server.NewDecodeError(err, "GasLimit")
|
|
||||||
}
|
|
||||||
builderIndex, err := strconv.ParseUint(b.BuilderIndex, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return nil, server.NewDecodeError(err, "BuilderIndex")
|
|
||||||
}
|
|
||||||
slot, err := strconv.ParseUint(b.Slot, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return nil, server.NewDecodeError(err, "Slot")
|
|
||||||
}
|
|
||||||
value, err := strconv.ParseUint(b.Value, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return nil, server.NewDecodeError(err, "Value")
|
|
||||||
}
|
|
||||||
executionPayment, err := strconv.ParseUint(b.ExecutionPayment, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return nil, server.NewDecodeError(err, "ExecutionPayment")
|
|
||||||
}
|
|
||||||
blobKzgCommitmentsRoot, err := bytesutil.DecodeHexWithLength(b.BlobKzgCommitmentsRoot, fieldparams.RootLength)
|
|
||||||
if err != nil {
|
|
||||||
return nil, server.NewDecodeError(err, "BlobKzgCommitmentsRoot")
|
|
||||||
}
|
|
||||||
return ð.ExecutionPayloadBid{
|
|
||||||
ParentBlockHash: parentBlockHash,
|
|
||||||
ParentBlockRoot: parentBlockRoot,
|
|
||||||
BlockHash: blockHash,
|
|
||||||
PrevRandao: prevRandao,
|
|
||||||
FeeRecipient: feeRecipient,
|
|
||||||
GasLimit: gasLimit,
|
|
||||||
BuilderIndex: primitives.BuilderIndex(builderIndex),
|
|
||||||
Slot: primitives.Slot(slot),
|
|
||||||
Value: primitives.Gwei(value),
|
|
||||||
ExecutionPayment: primitives.Gwei(executionPayment),
|
|
||||||
BlobKzgCommitmentsRoot: blobKzgCommitmentsRoot,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func PayloadAttestationsToConsensus(pa []*PayloadAttestation) ([]*eth.PayloadAttestation, error) {
|
|
||||||
if pa == nil {
|
|
||||||
return nil, errNilValue
|
|
||||||
}
|
|
||||||
result := make([]*eth.PayloadAttestation, len(pa))
|
|
||||||
for i, p := range pa {
|
|
||||||
converted, err := p.ToConsensus()
|
|
||||||
if err != nil {
|
|
||||||
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d]", i))
|
|
||||||
}
|
|
||||||
result[i] = converted
|
|
||||||
}
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PayloadAttestation) ToConsensus() (*eth.PayloadAttestation, error) {
|
|
||||||
if p == nil {
|
|
||||||
return nil, errNilValue
|
|
||||||
}
|
|
||||||
aggregationBits, err := hexutil.Decode(p.AggregationBits)
|
|
||||||
if err != nil {
|
|
||||||
return nil, server.NewDecodeError(err, "AggregationBits")
|
|
||||||
}
|
|
||||||
data, err := p.Data.ToConsensus()
|
|
||||||
if err != nil {
|
|
||||||
return nil, server.NewDecodeError(err, "Data")
|
|
||||||
}
|
|
||||||
sig, err := bytesutil.DecodeHexWithLength(p.Signature, fieldparams.BLSSignatureLength)
|
|
||||||
if err != nil {
|
|
||||||
return nil, server.NewDecodeError(err, "Signature")
|
|
||||||
}
|
|
||||||
return ð.PayloadAttestation{
|
|
||||||
AggregationBits: aggregationBits,
|
|
||||||
Data: data,
|
|
||||||
Signature: sig,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *PayloadAttestationData) ToConsensus() (*eth.PayloadAttestationData, error) {
|
|
||||||
if d == nil {
|
|
||||||
return nil, errNilValue
|
|
||||||
}
|
|
||||||
beaconBlockRoot, err := bytesutil.DecodeHexWithLength(d.BeaconBlockRoot, fieldparams.RootLength)
|
|
||||||
if err != nil {
|
|
||||||
return nil, server.NewDecodeError(err, "BeaconBlockRoot")
|
|
||||||
}
|
|
||||||
slot, err := strconv.ParseUint(d.Slot, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return nil, server.NewDecodeError(err, "Slot")
|
|
||||||
}
|
|
||||||
return ð.PayloadAttestationData{
|
|
||||||
BeaconBlockRoot: beaconBlockRoot,
|
|
||||||
Slot: primitives.Slot(slot),
|
|
||||||
PayloadPresent: d.PayloadPresent,
|
|
||||||
BlobDataAvailable: d.BlobDataAvailable,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ package altair
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||||
@@ -23,7 +24,7 @@ func ProcessPreGenesisDeposits(
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "could not process deposit")
|
return nil, errors.Wrap(err, "could not process deposit")
|
||||||
}
|
}
|
||||||
beaconState, err = helpers.ActivateValidatorWithEffectiveBalance(beaconState, deposits)
|
beaconState, err = blocks.ActivateValidatorWithEffectiveBalance(beaconState, deposits)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -36,7 +37,7 @@ func ProcessDeposits(
|
|||||||
beaconState state.BeaconState,
|
beaconState state.BeaconState,
|
||||||
deposits []*ethpb.Deposit,
|
deposits []*ethpb.Deposit,
|
||||||
) (state.BeaconState, error) {
|
) (state.BeaconState, error) {
|
||||||
allSignaturesVerified, err := helpers.BatchVerifyDepositsSignatures(ctx, deposits)
|
allSignaturesVerified, err := blocks.BatchVerifyDepositsSignatures(ctx, deposits)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -81,7 +82,7 @@ func ProcessDeposits(
|
|||||||
// signature=deposit.data.signature,
|
// signature=deposit.data.signature,
|
||||||
// )
|
// )
|
||||||
func ProcessDeposit(beaconState state.BeaconState, deposit *ethpb.Deposit, allSignaturesVerified bool) (state.BeaconState, error) {
|
func ProcessDeposit(beaconState state.BeaconState, deposit *ethpb.Deposit, allSignaturesVerified bool) (state.BeaconState, error) {
|
||||||
if err := helpers.VerifyDeposit(beaconState, deposit); err != nil {
|
if err := blocks.VerifyDeposit(beaconState, deposit); err != nil {
|
||||||
if deposit == nil || deposit.Data == nil {
|
if deposit == nil || deposit.Data == nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -121,7 +122,7 @@ func ApplyDeposit(beaconState state.BeaconState, data *ethpb.Deposit_Data, allSi
|
|||||||
index, ok := beaconState.ValidatorIndexByPubkey(bytesutil.ToBytes48(pubKey))
|
index, ok := beaconState.ValidatorIndexByPubkey(bytesutil.ToBytes48(pubKey))
|
||||||
if !ok {
|
if !ok {
|
||||||
if !allSignaturesVerified {
|
if !allSignaturesVerified {
|
||||||
valid, err := helpers.IsValidDepositSignature(data)
|
valid, err := blocks.IsValidDepositSignature(data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ go_library(
|
|||||||
srcs = [
|
srcs = [
|
||||||
"attestation.go",
|
"attestation.go",
|
||||||
"attester_slashing.go",
|
"attester_slashing.go",
|
||||||
|
"deposit.go",
|
||||||
"error.go",
|
"error.go",
|
||||||
"eth1_data.go",
|
"eth1_data.go",
|
||||||
"exit.go",
|
"exit.go",
|
||||||
@@ -20,7 +21,6 @@ go_library(
|
|||||||
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks",
|
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks",
|
||||||
visibility = ["//visibility:public"],
|
visibility = ["//visibility:public"],
|
||||||
deps = [
|
deps = [
|
||||||
"//beacon-chain/core/gloas:go_default_library",
|
|
||||||
"//beacon-chain/core/helpers:go_default_library",
|
"//beacon-chain/core/helpers:go_default_library",
|
||||||
"//beacon-chain/core/signing:go_default_library",
|
"//beacon-chain/core/signing:go_default_library",
|
||||||
"//beacon-chain/core/time:go_default_library",
|
"//beacon-chain/core/time:go_default_library",
|
||||||
@@ -33,6 +33,8 @@ go_library(
|
|||||||
"//consensus-types/interfaces:go_default_library",
|
"//consensus-types/interfaces:go_default_library",
|
||||||
"//consensus-types/primitives:go_default_library",
|
"//consensus-types/primitives:go_default_library",
|
||||||
"//container/slice:go_default_library",
|
"//container/slice:go_default_library",
|
||||||
|
"//container/trie:go_default_library",
|
||||||
|
"//contracts/deposit:go_default_library",
|
||||||
"//crypto/bls:go_default_library",
|
"//crypto/bls:go_default_library",
|
||||||
"//crypto/hash:go_default_library",
|
"//crypto/hash:go_default_library",
|
||||||
"//encoding/bytesutil:go_default_library",
|
"//encoding/bytesutil:go_default_library",
|
||||||
@@ -59,6 +61,7 @@ go_test(
|
|||||||
"attester_slashing_test.go",
|
"attester_slashing_test.go",
|
||||||
"block_operations_fuzz_test.go",
|
"block_operations_fuzz_test.go",
|
||||||
"block_regression_test.go",
|
"block_regression_test.go",
|
||||||
|
"deposit_test.go",
|
||||||
"eth1_data_test.go",
|
"eth1_data_test.go",
|
||||||
"exit_test.go",
|
"exit_test.go",
|
||||||
"exports_test.go",
|
"exports_test.go",
|
||||||
@@ -87,6 +90,7 @@ go_test(
|
|||||||
"//consensus-types/blocks:go_default_library",
|
"//consensus-types/blocks:go_default_library",
|
||||||
"//consensus-types/interfaces:go_default_library",
|
"//consensus-types/interfaces:go_default_library",
|
||||||
"//consensus-types/primitives:go_default_library",
|
"//consensus-types/primitives:go_default_library",
|
||||||
|
"//container/trie:go_default_library",
|
||||||
"//crypto/bls:go_default_library",
|
"//crypto/bls:go_default_library",
|
||||||
"//crypto/bls/common:go_default_library",
|
"//crypto/bls/common:go_default_library",
|
||||||
"//crypto/hash:go_default_library",
|
"//crypto/hash:go_default_library",
|
||||||
|
|||||||
@@ -3,7 +3,6 @@ package blocks
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
|
||||||
v "github.com/OffchainLabs/prysm/v7/beacon-chain/core/validators"
|
v "github.com/OffchainLabs/prysm/v7/beacon-chain/core/validators"
|
||||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||||
@@ -319,7 +318,7 @@ func TestFuzzverifyDeposit_10000(t *testing.T) {
|
|||||||
fuzzer.Fuzz(deposit)
|
fuzzer.Fuzz(deposit)
|
||||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
err = helpers.VerifyDeposit(s, deposit)
|
err = VerifyDeposit(s, deposit)
|
||||||
_ = err
|
_ = err
|
||||||
fuzz.FreeMemory(i)
|
fuzz.FreeMemory(i)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
package helpers
|
package blocks
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
@@ -1,9 +1,9 @@
|
|||||||
package helpers_test
|
package blocks_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
||||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||||
@@ -45,7 +45,7 @@ func TestBatchVerifyDepositsSignatures_Ok(t *testing.T) {
|
|||||||
|
|
||||||
deposit.Proof = proof
|
deposit.Proof = proof
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
verified, err := helpers.BatchVerifyDepositsSignatures(t.Context(), []*ethpb.Deposit{deposit})
|
verified, err := blocks.BatchVerifyDepositsSignatures(t.Context(), []*ethpb.Deposit{deposit})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, true, verified)
|
require.Equal(t, true, verified)
|
||||||
}
|
}
|
||||||
@@ -68,7 +68,7 @@ func TestBatchVerifyDepositsSignatures_InvalidSignature(t *testing.T) {
|
|||||||
|
|
||||||
deposit.Proof = proof
|
deposit.Proof = proof
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
verified, err := helpers.BatchVerifyDepositsSignatures(t.Context(), []*ethpb.Deposit{deposit})
|
verified, err := blocks.BatchVerifyDepositsSignatures(t.Context(), []*ethpb.Deposit{deposit})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, false, verified)
|
require.Equal(t, false, verified)
|
||||||
}
|
}
|
||||||
@@ -99,7 +99,7 @@ func TestVerifyDeposit_MerkleBranchFailsVerification(t *testing.T) {
|
|||||||
})
|
})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
want := "deposit root did not verify"
|
want := "deposit root did not verify"
|
||||||
err = helpers.VerifyDeposit(beaconState, deposit)
|
err = blocks.VerifyDeposit(beaconState, deposit)
|
||||||
require.ErrorContains(t, want, err)
|
require.ErrorContains(t, want, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -123,7 +123,7 @@ func TestIsValidDepositSignature_Ok(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
sig := sk.Sign(sr[:])
|
sig := sk.Sign(sr[:])
|
||||||
depositData.Signature = sig.Marshal()
|
depositData.Signature = sig.Marshal()
|
||||||
valid, err := helpers.IsValidDepositSignature(depositData)
|
valid, err := blocks.IsValidDepositSignature(depositData)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, true, valid)
|
require.Equal(t, true, valid)
|
||||||
}
|
}
|
||||||
@@ -163,7 +163,7 @@ func TestBatchVerifyPendingDepositsSignatures_Ok(t *testing.T) {
|
|||||||
sig2 := sk2.Sign(sr2[:])
|
sig2 := sk2.Sign(sr2[:])
|
||||||
pendingDeposit2.Signature = sig2.Marshal()
|
pendingDeposit2.Signature = sig2.Marshal()
|
||||||
|
|
||||||
verified, err := helpers.BatchVerifyPendingDepositsSignatures(t.Context(), []*ethpb.PendingDeposit{pendingDeposit, pendingDeposit2})
|
verified, err := blocks.BatchVerifyPendingDepositsSignatures(t.Context(), []*ethpb.PendingDeposit{pendingDeposit, pendingDeposit2})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, true, verified)
|
require.Equal(t, true, verified)
|
||||||
}
|
}
|
||||||
@@ -174,7 +174,7 @@ func TestBatchVerifyPendingDepositsSignatures_InvalidSignature(t *testing.T) {
|
|||||||
WithdrawalCredentials: make([]byte, 32),
|
WithdrawalCredentials: make([]byte, 32),
|
||||||
Signature: make([]byte, 96),
|
Signature: make([]byte, 96),
|
||||||
}
|
}
|
||||||
verified, err := helpers.BatchVerifyPendingDepositsSignatures(t.Context(), []*ethpb.PendingDeposit{pendingDeposit})
|
verified, err := blocks.BatchVerifyPendingDepositsSignatures(t.Context(), []*ethpb.PendingDeposit{pendingDeposit})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, false, verified)
|
require.Equal(t, false, verified)
|
||||||
}
|
}
|
||||||
@@ -4,7 +4,6 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
|
||||||
@@ -12,7 +11,6 @@ import (
|
|||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"google.golang.org/protobuf/proto"
|
"google.golang.org/protobuf/proto"
|
||||||
@@ -128,16 +126,7 @@ func processProposerSlashing(
|
|||||||
if exitInfo == nil {
|
if exitInfo == nil {
|
||||||
return nil, errors.New("exit info is required to process proposer slashing")
|
return nil, errors.New("exit info is required to process proposer slashing")
|
||||||
}
|
}
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
// [New in Gloas:EIP7732]: remove the BuilderPendingPayment corresponding to the slashed proposer within 2 epoch window
|
|
||||||
if beaconState.Version() >= version.Gloas {
|
|
||||||
err = gloas.RemoveBuilderPendingPayment(beaconState, slashing.Header_1.Header)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
beaconState, err = validators.SlashValidator(ctx, beaconState, slashing.Header_1.Header.ProposerIndex, exitInfo)
|
beaconState, err = validators.SlashValidator(ctx, beaconState, slashing.Header_1.Header.ProposerIndex, exitInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "could not slash proposer index %d", slashing.Header_1.Header.ProposerIndex)
|
return nil, errors.Wrapf(err, "could not slash proposer index %d", slashing.Header_1.Header.ProposerIndex)
|
||||||
|
|||||||
@@ -14,11 +14,13 @@ go_library(
|
|||||||
"transition.go",
|
"transition.go",
|
||||||
"upgrade.go",
|
"upgrade.go",
|
||||||
"validator.go",
|
"validator.go",
|
||||||
|
"withdrawals.go",
|
||||||
],
|
],
|
||||||
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/core/electra",
|
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/core/electra",
|
||||||
visibility = ["//visibility:public"],
|
visibility = ["//visibility:public"],
|
||||||
deps = [
|
deps = [
|
||||||
"//beacon-chain/core/altair:go_default_library",
|
"//beacon-chain/core/altair:go_default_library",
|
||||||
|
"//beacon-chain/core/blocks:go_default_library",
|
||||||
"//beacon-chain/core/epoch:go_default_library",
|
"//beacon-chain/core/epoch:go_default_library",
|
||||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||||
"//beacon-chain/core/helpers:go_default_library",
|
"//beacon-chain/core/helpers:go_default_library",
|
||||||
@@ -41,6 +43,8 @@ go_library(
|
|||||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||||
"//runtime/version:go_default_library",
|
"//runtime/version:go_default_library",
|
||||||
"//time/slots:go_default_library",
|
"//time/slots:go_default_library",
|
||||||
|
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||||
|
"@com_github_ethereum_go_ethereum//common/math:go_default_library",
|
||||||
"@com_github_pkg_errors//:go_default_library",
|
"@com_github_pkg_errors//:go_default_library",
|
||||||
"@com_github_sirupsen_logrus//:go_default_library",
|
"@com_github_sirupsen_logrus//:go_default_library",
|
||||||
],
|
],
|
||||||
@@ -60,11 +64,13 @@ go_test(
|
|||||||
"transition_test.go",
|
"transition_test.go",
|
||||||
"upgrade_test.go",
|
"upgrade_test.go",
|
||||||
"validator_test.go",
|
"validator_test.go",
|
||||||
|
"withdrawals_test.go",
|
||||||
],
|
],
|
||||||
data = glob(["testdata/**"]),
|
data = glob(["testdata/**"]),
|
||||||
embed = [":go_default_library"],
|
embed = [":go_default_library"],
|
||||||
deps = [
|
deps = [
|
||||||
"//beacon-chain/core/helpers:go_default_library",
|
"//beacon-chain/core/helpers:go_default_library",
|
||||||
|
"//beacon-chain/core/signing:go_default_library",
|
||||||
"//beacon-chain/core/time:go_default_library",
|
"//beacon-chain/core/time:go_default_library",
|
||||||
"//beacon-chain/state:go_default_library",
|
"//beacon-chain/state:go_default_library",
|
||||||
"//beacon-chain/state/state-native:go_default_library",
|
"//beacon-chain/state/state-native:go_default_library",
|
||||||
@@ -77,12 +83,16 @@ go_test(
|
|||||||
"//encoding/bytesutil:go_default_library",
|
"//encoding/bytesutil:go_default_library",
|
||||||
"//proto/engine/v1:go_default_library",
|
"//proto/engine/v1:go_default_library",
|
||||||
"//proto/prysm/v1alpha1:go_default_library",
|
"//proto/prysm/v1alpha1:go_default_library",
|
||||||
|
"//testing/assert:go_default_library",
|
||||||
"//testing/fuzz:go_default_library",
|
"//testing/fuzz:go_default_library",
|
||||||
"//testing/require:go_default_library",
|
"//testing/require:go_default_library",
|
||||||
"//testing/util:go_default_library",
|
"//testing/util:go_default_library",
|
||||||
"//time/slots:go_default_library",
|
"//time/slots:go_default_library",
|
||||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||||
|
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||||
"@com_github_google_gofuzz//:go_default_library",
|
"@com_github_google_gofuzz//:go_default_library",
|
||||||
"@com_github_pkg_errors//:go_default_library",
|
"@com_github_pkg_errors//:go_default_library",
|
||||||
|
"@com_github_sirupsen_logrus//:go_default_library",
|
||||||
|
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -3,14 +3,19 @@ package electra
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||||
|
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||||
|
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||||
|
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||||
|
"github.com/ethereum/go-ethereum/common/math"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -90,6 +95,217 @@ func ProcessPendingConsolidations(ctx context.Context, st state.BeaconState) err
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ProcessConsolidationRequests implements the spec definition below. This method makes mutating
|
||||||
|
// calls to the beacon state.
|
||||||
|
//
|
||||||
|
// def process_consolidation_request(
|
||||||
|
// state: BeaconState,
|
||||||
|
// consolidation_request: ConsolidationRequest
|
||||||
|
// ) -> None:
|
||||||
|
// if is_valid_switch_to_compounding_request(state, consolidation_request):
|
||||||
|
// validator_pubkeys = [v.pubkey for v in state.validators]
|
||||||
|
// request_source_pubkey = consolidation_request.source_pubkey
|
||||||
|
// source_index = ValidatorIndex(validator_pubkeys.index(request_source_pubkey))
|
||||||
|
// switch_to_compounding_validator(state, source_index)
|
||||||
|
// return
|
||||||
|
//
|
||||||
|
// # Verify that source != target, so a consolidation cannot be used as an exit.
|
||||||
|
// if consolidation_request.source_pubkey == consolidation_request.target_pubkey:
|
||||||
|
// return
|
||||||
|
// # If the pending consolidations queue is full, consolidation requests are ignored
|
||||||
|
// if len(state.pending_consolidations) == PENDING_CONSOLIDATIONS_LIMIT:
|
||||||
|
// return
|
||||||
|
// # If there is too little available consolidation churn limit, consolidation requests are ignored
|
||||||
|
// if get_consolidation_churn_limit(state) <= MIN_ACTIVATION_BALANCE:
|
||||||
|
// return
|
||||||
|
//
|
||||||
|
// validator_pubkeys = [v.pubkey for v in state.validators]
|
||||||
|
// # Verify pubkeys exists
|
||||||
|
// request_source_pubkey = consolidation_request.source_pubkey
|
||||||
|
// request_target_pubkey = consolidation_request.target_pubkey
|
||||||
|
// if request_source_pubkey not in validator_pubkeys:
|
||||||
|
// return
|
||||||
|
// if request_target_pubkey not in validator_pubkeys:
|
||||||
|
// return
|
||||||
|
// source_index = ValidatorIndex(validator_pubkeys.index(request_source_pubkey))
|
||||||
|
// target_index = ValidatorIndex(validator_pubkeys.index(request_target_pubkey))
|
||||||
|
// source_validator = state.validators[source_index]
|
||||||
|
// target_validator = state.validators[target_index]
|
||||||
|
//
|
||||||
|
// # Verify source withdrawal credentials
|
||||||
|
// has_correct_credential = has_execution_withdrawal_credential(source_validator)
|
||||||
|
// is_correct_source_address = (
|
||||||
|
// source_validator.withdrawal_credentials[12:] == consolidation_request.source_address
|
||||||
|
// )
|
||||||
|
// if not (has_correct_credential and is_correct_source_address):
|
||||||
|
// return
|
||||||
|
//
|
||||||
|
// # Verify that target has compounding withdrawal credentials
|
||||||
|
// if not has_compounding_withdrawal_credential(target_validator):
|
||||||
|
// return
|
||||||
|
//
|
||||||
|
// # Verify the source and the target are active
|
||||||
|
// current_epoch = get_current_epoch(state)
|
||||||
|
// if not is_active_validator(source_validator, current_epoch):
|
||||||
|
// return
|
||||||
|
// if not is_active_validator(target_validator, current_epoch):
|
||||||
|
// return
|
||||||
|
// # Verify exits for source and target have not been initiated
|
||||||
|
// if source_validator.exit_epoch != FAR_FUTURE_EPOCH:
|
||||||
|
// return
|
||||||
|
// if target_validator.exit_epoch != FAR_FUTURE_EPOCH:
|
||||||
|
// return
|
||||||
|
//
|
||||||
|
// # Verify the source has been active long enough
|
||||||
|
// if current_epoch < source_validator.activation_epoch + SHARD_COMMITTEE_PERIOD:
|
||||||
|
// return
|
||||||
|
//
|
||||||
|
// # Verify the source has no pending withdrawals in the queue
|
||||||
|
// if get_pending_balance_to_withdraw(state, source_index) > 0:
|
||||||
|
// return
|
||||||
|
// # Initiate source validator exit and append pending consolidation
|
||||||
|
// source_validator.exit_epoch = compute_consolidation_epoch_and_update_churn(
|
||||||
|
// state, source_validator.effective_balance
|
||||||
|
// )
|
||||||
|
// source_validator.withdrawable_epoch = Epoch(
|
||||||
|
// source_validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY
|
||||||
|
// )
|
||||||
|
// state.pending_consolidations.append(PendingConsolidation(
|
||||||
|
// source_index=source_index,
|
||||||
|
// target_index=target_index
|
||||||
|
// ))
|
||||||
|
func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, reqs []*enginev1.ConsolidationRequest) error {
|
||||||
|
if len(reqs) == 0 || st == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
curEpoch := slots.ToEpoch(st.Slot())
|
||||||
|
ffe := params.BeaconConfig().FarFutureEpoch
|
||||||
|
minValWithdrawDelay := params.BeaconConfig().MinValidatorWithdrawabilityDelay
|
||||||
|
pcLimit := params.BeaconConfig().PendingConsolidationsLimit
|
||||||
|
|
||||||
|
for _, cr := range reqs {
|
||||||
|
if cr == nil {
|
||||||
|
return errors.New("nil consolidation request")
|
||||||
|
}
|
||||||
|
if ctx.Err() != nil {
|
||||||
|
return fmt.Errorf("cannot process consolidation requests: %w", ctx.Err())
|
||||||
|
}
|
||||||
|
if IsValidSwitchToCompoundingRequest(st, cr) {
|
||||||
|
srcIdx, ok := st.ValidatorIndexByPubkey(bytesutil.ToBytes48(cr.SourcePubkey))
|
||||||
|
if !ok {
|
||||||
|
log.Error("Failed to find source validator index")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err := SwitchToCompoundingValidator(st, srcIdx); err != nil {
|
||||||
|
log.WithError(err).Error("Failed to switch to compounding validator")
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
sourcePubkey := bytesutil.ToBytes48(cr.SourcePubkey)
|
||||||
|
targetPubkey := bytesutil.ToBytes48(cr.TargetPubkey)
|
||||||
|
if sourcePubkey == targetPubkey {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if npc, err := st.NumPendingConsolidations(); err != nil {
|
||||||
|
return fmt.Errorf("failed to fetch number of pending consolidations: %w", err) // This should never happen.
|
||||||
|
} else if npc >= pcLimit {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
activeBal, err := helpers.TotalActiveBalance(st)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
churnLimit := helpers.ConsolidationChurnLimit(primitives.Gwei(activeBal))
|
||||||
|
if churnLimit <= primitives.Gwei(params.BeaconConfig().MinActivationBalance) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
srcIdx, ok := st.ValidatorIndexByPubkey(sourcePubkey)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
tgtIdx, ok := st.ValidatorIndexByPubkey(targetPubkey)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
srcV, err := st.ValidatorAtIndex(srcIdx)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to fetch source validator: %w", err) // This should never happen.
|
||||||
|
}
|
||||||
|
|
||||||
|
roSrcV, err := state_native.NewValidator(srcV)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
tgtV, err := st.ValidatorAtIndexReadOnly(tgtIdx)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to fetch target validator: %w", err) // This should never happen.
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify source withdrawal credentials
|
||||||
|
if !roSrcV.HasExecutionWithdrawalCredentials() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Confirm source_validator.withdrawal_credentials[12:] == consolidation_request.source_address
|
||||||
|
if len(srcV.WithdrawalCredentials) != 32 || len(cr.SourceAddress) != 20 || !bytes.HasSuffix(srcV.WithdrawalCredentials, cr.SourceAddress) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Target validator must have their withdrawal credentials set appropriately.
|
||||||
|
if !tgtV.HasCompoundingWithdrawalCredentials() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Both validators must be active.
|
||||||
|
if !helpers.IsActiveValidator(srcV, curEpoch) || !helpers.IsActiveValidatorUsingTrie(tgtV, curEpoch) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Neither validator is exiting.
|
||||||
|
if srcV.ExitEpoch != ffe || tgtV.ExitEpoch() != ffe {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
e, overflow := math.SafeAdd(uint64(srcV.ActivationEpoch), uint64(params.BeaconConfig().ShardCommitteePeriod))
|
||||||
|
if overflow {
|
||||||
|
log.Error("Overflow when adding activation epoch and shard committee period")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if uint64(curEpoch) < e {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
hasBal, err := st.HasPendingBalanceToWithdraw(srcIdx)
|
||||||
|
if err != nil {
|
||||||
|
log.WithError(err).Error("Failed to fetch pending balance to withdraw")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if hasBal {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initiate the exit of the source validator.
|
||||||
|
exitEpoch, err := ComputeConsolidationEpochAndUpdateChurn(ctx, st, primitives.Gwei(srcV.EffectiveBalance))
|
||||||
|
if err != nil {
|
||||||
|
log.WithError(err).Error("Failed to compute consolidation epoch")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
srcV.ExitEpoch = exitEpoch
|
||||||
|
srcV.WithdrawableEpoch = exitEpoch + minValWithdrawDelay
|
||||||
|
if err := st.UpdateValidatorAtIndex(srcIdx, srcV); err != nil {
|
||||||
|
return fmt.Errorf("failed to update validator: %w", err) // This should never happen.
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := st.AppendPendingConsolidation(ð.PendingConsolidation{SourceIndex: srcIdx, TargetIndex: tgtIdx}); err != nil {
|
||||||
|
return fmt.Errorf("failed to append pending consolidation: %w", err) // This should never happen.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// IsValidSwitchToCompoundingRequest returns true if the given consolidation request is valid for switching to compounding.
|
// IsValidSwitchToCompoundingRequest returns true if the given consolidation request is valid for switching to compounding.
|
||||||
//
|
//
|
||||||
// Spec code:
|
// Spec code:
|
||||||
|
|||||||
@@ -8,6 +8,8 @@ import (
|
|||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||||
|
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||||
|
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||||
@@ -201,6 +203,275 @@ func TestProcessPendingConsolidations(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestProcessConsolidationRequests(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
state state.BeaconState
|
||||||
|
reqs []*enginev1.ConsolidationRequest
|
||||||
|
validate func(*testing.T, state.BeaconState)
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "nil request",
|
||||||
|
state: func() state.BeaconState {
|
||||||
|
st := ð.BeaconStateElectra{}
|
||||||
|
s, err := state_native.InitializeFromProtoElectra(st)
|
||||||
|
require.NoError(t, err)
|
||||||
|
return s
|
||||||
|
}(),
|
||||||
|
reqs: []*enginev1.ConsolidationRequest{nil},
|
||||||
|
validate: func(t *testing.T, st state.BeaconState) {
|
||||||
|
require.DeepEqual(t, st, st)
|
||||||
|
},
|
||||||
|
wantErr: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "one valid request",
|
||||||
|
state: func() state.BeaconState {
|
||||||
|
st := ð.BeaconStateElectra{
|
||||||
|
Slot: params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().ShardCommitteePeriod)),
|
||||||
|
Validators: createValidatorsWithTotalActiveBalance(32000000000000000), // 32M ETH
|
||||||
|
}
|
||||||
|
// Validator scenario setup. See comments in reqs section.
|
||||||
|
st.Validators[3].WithdrawalCredentials = bytesutil.Bytes32(0)
|
||||||
|
st.Validators[8].WithdrawalCredentials = bytesutil.Bytes32(1)
|
||||||
|
st.Validators[9].ActivationEpoch = params.BeaconConfig().FarFutureEpoch
|
||||||
|
st.Validators[12].ActivationEpoch = params.BeaconConfig().FarFutureEpoch
|
||||||
|
st.Validators[13].ExitEpoch = 10
|
||||||
|
st.Validators[16].ExitEpoch = 10
|
||||||
|
st.PendingPartialWithdrawals = []*eth.PendingPartialWithdrawal{
|
||||||
|
{
|
||||||
|
Index: 17,
|
||||||
|
Amount: 100,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
s, err := state_native.InitializeFromProtoElectra(st)
|
||||||
|
require.NoError(t, err)
|
||||||
|
return s
|
||||||
|
}(),
|
||||||
|
reqs: []*enginev1.ConsolidationRequest{
|
||||||
|
// Source doesn't have withdrawal credentials.
|
||||||
|
{
|
||||||
|
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(1)),
|
||||||
|
SourcePubkey: []byte("val_3"),
|
||||||
|
TargetPubkey: []byte("val_4"),
|
||||||
|
},
|
||||||
|
// Source withdrawal credentials don't match the consolidation address.
|
||||||
|
{
|
||||||
|
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(0)), // Should be 5
|
||||||
|
SourcePubkey: []byte("val_5"),
|
||||||
|
TargetPubkey: []byte("val_6"),
|
||||||
|
},
|
||||||
|
// Target does not have their withdrawal credentials set appropriately. (Using eth1 address prefix)
|
||||||
|
{
|
||||||
|
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(7)),
|
||||||
|
SourcePubkey: []byte("val_7"),
|
||||||
|
TargetPubkey: []byte("val_8"),
|
||||||
|
},
|
||||||
|
// Source is inactive.
|
||||||
|
{
|
||||||
|
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(9)),
|
||||||
|
SourcePubkey: []byte("val_9"),
|
||||||
|
TargetPubkey: []byte("val_10"),
|
||||||
|
},
|
||||||
|
// Target is inactive.
|
||||||
|
{
|
||||||
|
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(11)),
|
||||||
|
SourcePubkey: []byte("val_11"),
|
||||||
|
TargetPubkey: []byte("val_12"),
|
||||||
|
},
|
||||||
|
// Source is exiting.
|
||||||
|
{
|
||||||
|
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(13)),
|
||||||
|
SourcePubkey: []byte("val_13"),
|
||||||
|
TargetPubkey: []byte("val_14"),
|
||||||
|
},
|
||||||
|
// Target is exiting.
|
||||||
|
{
|
||||||
|
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(15)),
|
||||||
|
SourcePubkey: []byte("val_15"),
|
||||||
|
TargetPubkey: []byte("val_16"),
|
||||||
|
},
|
||||||
|
// Source doesn't exist
|
||||||
|
{
|
||||||
|
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(0)),
|
||||||
|
SourcePubkey: []byte("INVALID"),
|
||||||
|
TargetPubkey: []byte("val_0"),
|
||||||
|
},
|
||||||
|
// Target doesn't exist
|
||||||
|
{
|
||||||
|
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(0)),
|
||||||
|
SourcePubkey: []byte("val_0"),
|
||||||
|
TargetPubkey: []byte("INVALID"),
|
||||||
|
},
|
||||||
|
// Source == target
|
||||||
|
{
|
||||||
|
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(0)),
|
||||||
|
SourcePubkey: []byte("val_0"),
|
||||||
|
TargetPubkey: []byte("val_0"),
|
||||||
|
},
|
||||||
|
// Has pending partial withdrawal
|
||||||
|
{
|
||||||
|
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(0)),
|
||||||
|
SourcePubkey: []byte("val_17"),
|
||||||
|
TargetPubkey: []byte("val_1"),
|
||||||
|
},
|
||||||
|
// Valid consolidation request. This should be last to ensure invalid requests do
|
||||||
|
// not end the processing early.
|
||||||
|
{
|
||||||
|
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(1)),
|
||||||
|
SourcePubkey: []byte("val_1"),
|
||||||
|
TargetPubkey: []byte("val_2"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
validate: func(t *testing.T, st state.BeaconState) {
|
||||||
|
// Verify a pending consolidation is created.
|
||||||
|
numPC, err := st.NumPendingConsolidations()
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, uint64(1), numPC)
|
||||||
|
pcs, err := st.PendingConsolidations()
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, primitives.ValidatorIndex(1), pcs[0].SourceIndex)
|
||||||
|
require.Equal(t, primitives.ValidatorIndex(2), pcs[0].TargetIndex)
|
||||||
|
|
||||||
|
// Verify the source validator is exiting.
|
||||||
|
src, err := st.ValidatorAtIndex(1)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NotEqual(t, params.BeaconConfig().FarFutureEpoch, src.ExitEpoch, "source validator exit epoch not updated")
|
||||||
|
require.Equal(t, params.BeaconConfig().MinValidatorWithdrawabilityDelay, src.WithdrawableEpoch-src.ExitEpoch, "source validator withdrawable epoch not set correctly")
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "pending consolidations limit reached",
|
||||||
|
state: func() state.BeaconState {
|
||||||
|
st := ð.BeaconStateElectra{
|
||||||
|
Validators: createValidatorsWithTotalActiveBalance(32000000000000000), // 32M ETH
|
||||||
|
PendingConsolidations: make([]*eth.PendingConsolidation, params.BeaconConfig().PendingConsolidationsLimit),
|
||||||
|
}
|
||||||
|
s, err := state_native.InitializeFromProtoElectra(st)
|
||||||
|
require.NoError(t, err)
|
||||||
|
return s
|
||||||
|
}(),
|
||||||
|
reqs: []*enginev1.ConsolidationRequest{
|
||||||
|
{
|
||||||
|
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(1)),
|
||||||
|
SourcePubkey: []byte("val_1"),
|
||||||
|
TargetPubkey: []byte("val_2"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
validate: func(t *testing.T, st state.BeaconState) {
|
||||||
|
// Verify no pending consolidation is created.
|
||||||
|
numPC, err := st.NumPendingConsolidations()
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, params.BeaconConfig().PendingConsolidationsLimit, numPC)
|
||||||
|
|
||||||
|
// Verify the source validator is not exiting.
|
||||||
|
src, err := st.ValidatorAtIndex(1)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, params.BeaconConfig().FarFutureEpoch, src.ExitEpoch, "source validator exit epoch should not be updated")
|
||||||
|
require.Equal(t, params.BeaconConfig().FarFutureEpoch, src.WithdrawableEpoch, "source validator withdrawable epoch should not be updated")
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "pending consolidations limit reached during processing",
|
||||||
|
state: func() state.BeaconState {
|
||||||
|
st := ð.BeaconStateElectra{
|
||||||
|
Slot: params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().ShardCommitteePeriod)),
|
||||||
|
Validators: createValidatorsWithTotalActiveBalance(32000000000000000), // 32M ETH
|
||||||
|
PendingConsolidations: make([]*eth.PendingConsolidation, params.BeaconConfig().PendingConsolidationsLimit-1),
|
||||||
|
}
|
||||||
|
s, err := state_native.InitializeFromProtoElectra(st)
|
||||||
|
require.NoError(t, err)
|
||||||
|
return s
|
||||||
|
}(),
|
||||||
|
reqs: []*enginev1.ConsolidationRequest{
|
||||||
|
{
|
||||||
|
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(1)),
|
||||||
|
SourcePubkey: []byte("val_1"),
|
||||||
|
TargetPubkey: []byte("val_2"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(3)),
|
||||||
|
SourcePubkey: []byte("val_3"),
|
||||||
|
TargetPubkey: []byte("val_4"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
validate: func(t *testing.T, st state.BeaconState) {
|
||||||
|
// Verify a pending consolidation is created.
|
||||||
|
numPC, err := st.NumPendingConsolidations()
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, params.BeaconConfig().PendingConsolidationsLimit, numPC)
|
||||||
|
|
||||||
|
// The first consolidation was appended.
|
||||||
|
pcs, err := st.PendingConsolidations()
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, primitives.ValidatorIndex(1), pcs[params.BeaconConfig().PendingConsolidationsLimit-1].SourceIndex)
|
||||||
|
require.Equal(t, primitives.ValidatorIndex(2), pcs[params.BeaconConfig().PendingConsolidationsLimit-1].TargetIndex)
|
||||||
|
|
||||||
|
// Verify the second source validator is not exiting.
|
||||||
|
src, err := st.ValidatorAtIndex(3)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, params.BeaconConfig().FarFutureEpoch, src.ExitEpoch, "source validator exit epoch should not be updated")
|
||||||
|
require.Equal(t, params.BeaconConfig().FarFutureEpoch, src.WithdrawableEpoch, "source validator withdrawable epoch should not be updated")
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "pending consolidations limit reached and compounded consolidation after",
|
||||||
|
state: func() state.BeaconState {
|
||||||
|
st := ð.BeaconStateElectra{
|
||||||
|
Slot: params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().ShardCommitteePeriod)),
|
||||||
|
Validators: createValidatorsWithTotalActiveBalance(32000000000000000), // 32M ETH
|
||||||
|
PendingConsolidations: make([]*eth.PendingConsolidation, params.BeaconConfig().PendingConsolidationsLimit),
|
||||||
|
}
|
||||||
|
// To allow compounding consolidation requests.
|
||||||
|
st.Validators[3].WithdrawalCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||||
|
s, err := state_native.InitializeFromProtoElectra(st)
|
||||||
|
require.NoError(t, err)
|
||||||
|
return s
|
||||||
|
}(),
|
||||||
|
reqs: []*enginev1.ConsolidationRequest{
|
||||||
|
{
|
||||||
|
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(1)),
|
||||||
|
SourcePubkey: []byte("val_1"),
|
||||||
|
TargetPubkey: []byte("val_2"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(3)),
|
||||||
|
SourcePubkey: []byte("val_3"),
|
||||||
|
TargetPubkey: []byte("val_3"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
validate: func(t *testing.T, st state.BeaconState) {
|
||||||
|
// Verify a pending consolidation is created.
|
||||||
|
numPC, err := st.NumPendingConsolidations()
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, params.BeaconConfig().PendingConsolidationsLimit, numPC)
|
||||||
|
|
||||||
|
// Verify that the last consolidation was included
|
||||||
|
src, err := st.ValidatorAtIndex(3)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, params.BeaconConfig().CompoundingWithdrawalPrefixByte, src.WithdrawalCredentials[0], "source validator was not compounded")
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
err := electra.ProcessConsolidationRequests(context.TODO(), tt.state, tt.reqs)
|
||||||
|
if (err != nil) != tt.wantErr {
|
||||||
|
t.Errorf("ProcessWithdrawalRequests() error = %v, wantErr %v", err, tt.wantErr)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !tt.wantErr {
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
if tt.validate != nil {
|
||||||
|
tt.validate(t, tt.state)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestIsValidSwitchToCompoundingRequest(t *testing.T) {
|
func TestIsValidSwitchToCompoundingRequest(t *testing.T) {
|
||||||
st, _ := util.DeterministicGenesisStateElectra(t, 1)
|
st, _ := util.DeterministicGenesisStateElectra(t, 1)
|
||||||
t.Run("nil source pubkey", func(t *testing.T) {
|
t.Run("nil source pubkey", func(t *testing.T) {
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ package electra
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||||
@@ -12,6 +13,7 @@ import (
|
|||||||
"github.com/OffchainLabs/prysm/v7/contracts/deposit"
|
"github.com/OffchainLabs/prysm/v7/contracts/deposit"
|
||||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||||
|
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||||
@@ -35,7 +37,7 @@ func ProcessDeposits(
|
|||||||
defer span.End()
|
defer span.End()
|
||||||
// Attempt to verify all deposit signatures at once, if this fails then fall back to processing
|
// Attempt to verify all deposit signatures at once, if this fails then fall back to processing
|
||||||
// individual deposits with signature verification enabled.
|
// individual deposits with signature verification enabled.
|
||||||
allSignaturesVerified, err := helpers.BatchVerifyDepositsSignatures(ctx, deposits)
|
allSignaturesVerified, err := blocks.BatchVerifyDepositsSignatures(ctx, deposits)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "could not verify deposit signatures in batch")
|
return nil, errors.Wrap(err, "could not verify deposit signatures in batch")
|
||||||
}
|
}
|
||||||
@@ -80,7 +82,7 @@ func ProcessDeposits(
|
|||||||
// signature=deposit.data.signature,
|
// signature=deposit.data.signature,
|
||||||
// )
|
// )
|
||||||
func ProcessDeposit(beaconState state.BeaconState, deposit *ethpb.Deposit, allSignaturesVerified bool) (state.BeaconState, error) {
|
func ProcessDeposit(beaconState state.BeaconState, deposit *ethpb.Deposit, allSignaturesVerified bool) (state.BeaconState, error) {
|
||||||
if err := helpers.VerifyDeposit(beaconState, deposit); err != nil {
|
if err := blocks.VerifyDeposit(beaconState, deposit); err != nil {
|
||||||
if deposit == nil || deposit.Data == nil {
|
if deposit == nil || deposit.Data == nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -375,7 +377,7 @@ func batchProcessNewPendingDeposits(ctx context.Context, state state.BeaconState
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
allSignaturesVerified, err := helpers.BatchVerifyPendingDepositsSignatures(ctx, pendingDeposits)
|
allSignaturesVerified, err := blocks.BatchVerifyPendingDepositsSignatures(ctx, pendingDeposits)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "batch signature verification failed")
|
return errors.Wrap(err, "batch signature verification failed")
|
||||||
}
|
}
|
||||||
@@ -384,7 +386,7 @@ func batchProcessNewPendingDeposits(ctx context.Context, state state.BeaconState
|
|||||||
validSig := allSignaturesVerified
|
validSig := allSignaturesVerified
|
||||||
|
|
||||||
if !allSignaturesVerified {
|
if !allSignaturesVerified {
|
||||||
validSig, err = helpers.IsValidDepositSignature(ðpb.Deposit_Data{
|
validSig, err = blocks.IsValidDepositSignature(ðpb.Deposit_Data{
|
||||||
PublicKey: bytesutil.SafeCopyBytes(pd.PublicKey),
|
PublicKey: bytesutil.SafeCopyBytes(pd.PublicKey),
|
||||||
WithdrawalCredentials: bytesutil.SafeCopyBytes(pd.WithdrawalCredentials),
|
WithdrawalCredentials: bytesutil.SafeCopyBytes(pd.WithdrawalCredentials),
|
||||||
Amount: pd.Amount,
|
Amount: pd.Amount,
|
||||||
@@ -439,7 +441,7 @@ func ApplyPendingDeposit(ctx context.Context, st state.BeaconState, deposit *eth
|
|||||||
defer span.End()
|
defer span.End()
|
||||||
index, ok := st.ValidatorIndexByPubkey(bytesutil.ToBytes48(deposit.PublicKey))
|
index, ok := st.ValidatorIndexByPubkey(bytesutil.ToBytes48(deposit.PublicKey))
|
||||||
if !ok {
|
if !ok {
|
||||||
verified, err := helpers.IsValidDepositSignature(ðpb.Deposit_Data{
|
verified, err := blocks.IsValidDepositSignature(ðpb.Deposit_Data{
|
||||||
PublicKey: bytesutil.SafeCopyBytes(deposit.PublicKey),
|
PublicKey: bytesutil.SafeCopyBytes(deposit.PublicKey),
|
||||||
WithdrawalCredentials: bytesutil.SafeCopyBytes(deposit.WithdrawalCredentials),
|
WithdrawalCredentials: bytesutil.SafeCopyBytes(deposit.WithdrawalCredentials),
|
||||||
Amount: deposit.Amount,
|
Amount: deposit.Amount,
|
||||||
@@ -535,3 +537,62 @@ func GetValidatorFromDeposit(pubKey []byte, withdrawalCredentials []byte, amount
|
|||||||
validator.EffectiveBalance = min(amount-(amount%params.BeaconConfig().EffectiveBalanceIncrement), maxEffectiveBalance)
|
validator.EffectiveBalance = min(amount-(amount%params.BeaconConfig().EffectiveBalanceIncrement), maxEffectiveBalance)
|
||||||
return validator, nil
|
return validator, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ProcessDepositRequests is a function as part of electra to process execution layer deposits
|
||||||
|
func ProcessDepositRequests(ctx context.Context, beaconState state.BeaconState, requests []*enginev1.DepositRequest) (state.BeaconState, error) {
|
||||||
|
_, span := trace.StartSpan(ctx, "electra.ProcessDepositRequests")
|
||||||
|
defer span.End()
|
||||||
|
|
||||||
|
if len(requests) == 0 {
|
||||||
|
return beaconState, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
for _, receipt := range requests {
|
||||||
|
beaconState, err = processDepositRequest(beaconState, receipt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "could not apply deposit request")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return beaconState, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// processDepositRequest processes the specific deposit request
|
||||||
|
// def process_deposit_request(state: BeaconState, deposit_request: DepositRequest) -> None:
|
||||||
|
//
|
||||||
|
// # Set deposit request start index
|
||||||
|
// if state.deposit_requests_start_index == UNSET_DEPOSIT_REQUESTS_START_INDEX:
|
||||||
|
// state.deposit_requests_start_index = deposit_request.index
|
||||||
|
//
|
||||||
|
// # Create pending deposit
|
||||||
|
// state.pending_deposits.append(PendingDeposit(
|
||||||
|
// pubkey=deposit_request.pubkey,
|
||||||
|
// withdrawal_credentials=deposit_request.withdrawal_credentials,
|
||||||
|
// amount=deposit_request.amount,
|
||||||
|
// signature=deposit_request.signature,
|
||||||
|
// slot=state.slot,
|
||||||
|
// ))
|
||||||
|
func processDepositRequest(beaconState state.BeaconState, request *enginev1.DepositRequest) (state.BeaconState, error) {
|
||||||
|
requestsStartIndex, err := beaconState.DepositRequestsStartIndex()
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "could not get deposit requests start index")
|
||||||
|
}
|
||||||
|
if request == nil {
|
||||||
|
return nil, errors.New("nil deposit request")
|
||||||
|
}
|
||||||
|
if requestsStartIndex == params.BeaconConfig().UnsetDepositRequestsStartIndex {
|
||||||
|
if err := beaconState.SetDepositRequestsStartIndex(request.Index); err != nil {
|
||||||
|
return nil, errors.Wrap(err, "could not set deposit requests start index")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := beaconState.AppendPendingDeposit(ðpb.PendingDeposit{
|
||||||
|
PublicKey: bytesutil.SafeCopyBytes(request.Pubkey),
|
||||||
|
WithdrawalCredentials: bytesutil.SafeCopyBytes(request.WithdrawalCredentials),
|
||||||
|
Amount: request.Amount,
|
||||||
|
Signature: bytesutil.SafeCopyBytes(request.Signature),
|
||||||
|
Slot: beaconState.Slot(),
|
||||||
|
}); err != nil {
|
||||||
|
return nil, errors.Wrap(err, "could not append deposit request")
|
||||||
|
}
|
||||||
|
return beaconState, nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ import (
|
|||||||
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/electra"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/electra"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||||
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||||
stateTesting "github.com/OffchainLabs/prysm/v7/beacon-chain/state/testing"
|
stateTesting "github.com/OffchainLabs/prysm/v7/beacon-chain/state/testing"
|
||||||
@@ -14,6 +15,7 @@ import (
|
|||||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||||
"github.com/OffchainLabs/prysm/v7/crypto/bls"
|
"github.com/OffchainLabs/prysm/v7/crypto/bls"
|
||||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||||
|
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||||
@@ -359,6 +361,60 @@ func TestBatchProcessNewPendingDeposits(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestProcessDepositRequests(t *testing.T) {
|
||||||
|
st, _ := util.DeterministicGenesisStateElectra(t, 1)
|
||||||
|
sk, err := bls.RandKey()
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, st.SetDepositRequestsStartIndex(1))
|
||||||
|
|
||||||
|
t.Run("empty requests continues", func(t *testing.T) {
|
||||||
|
newSt, err := electra.ProcessDepositRequests(t.Context(), st, []*enginev1.DepositRequest{})
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.DeepEqual(t, newSt, st)
|
||||||
|
})
|
||||||
|
t.Run("nil request errors", func(t *testing.T) {
|
||||||
|
_, err = electra.ProcessDepositRequests(t.Context(), st, []*enginev1.DepositRequest{nil})
|
||||||
|
require.ErrorContains(t, "nil deposit request", err)
|
||||||
|
})
|
||||||
|
|
||||||
|
vals := st.Validators()
|
||||||
|
vals[0].PublicKey = sk.PublicKey().Marshal()
|
||||||
|
vals[0].WithdrawalCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||||
|
require.NoError(t, st.SetValidators(vals))
|
||||||
|
bals := st.Balances()
|
||||||
|
bals[0] = params.BeaconConfig().MinActivationBalance + 2000
|
||||||
|
require.NoError(t, st.SetBalances(bals))
|
||||||
|
require.NoError(t, st.SetPendingDeposits(make([]*eth.PendingDeposit, 0))) // reset pbd as the determinitstic state populates this already
|
||||||
|
withdrawalCred := make([]byte, 32)
|
||||||
|
withdrawalCred[0] = params.BeaconConfig().CompoundingWithdrawalPrefixByte
|
||||||
|
depositMessage := ð.DepositMessage{
|
||||||
|
PublicKey: sk.PublicKey().Marshal(),
|
||||||
|
Amount: 1000,
|
||||||
|
WithdrawalCredentials: withdrawalCred,
|
||||||
|
}
|
||||||
|
domain, err := signing.ComputeDomain(params.BeaconConfig().DomainDeposit, nil, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
sr, err := signing.ComputeSigningRoot(depositMessage, domain)
|
||||||
|
require.NoError(t, err)
|
||||||
|
sig := sk.Sign(sr[:])
|
||||||
|
requests := []*enginev1.DepositRequest{
|
||||||
|
{
|
||||||
|
Pubkey: depositMessage.PublicKey,
|
||||||
|
Index: 0,
|
||||||
|
WithdrawalCredentials: depositMessage.WithdrawalCredentials,
|
||||||
|
Amount: depositMessage.Amount,
|
||||||
|
Signature: sig.Marshal(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
st, err = electra.ProcessDepositRequests(t.Context(), st, requests)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
pbd, err := st.PendingDeposits()
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, 1, len(pbd))
|
||||||
|
require.Equal(t, uint64(1000), pbd[0].Amount)
|
||||||
|
}
|
||||||
|
|
||||||
func TestProcessDeposit_Electra_Simple(t *testing.T) {
|
func TestProcessDeposit_Electra_Simple(t *testing.T) {
|
||||||
deps, _, err := util.DeterministicDepositsAndKeysSameValidator(3)
|
deps, _, err := util.DeterministicDepositsAndKeysSameValidator(3)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
package requests
|
package electra
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
@@ -88,7 +88,7 @@ import (
|
|||||||
// withdrawable_epoch=withdrawable_epoch,
|
// withdrawable_epoch=withdrawable_epoch,
|
||||||
// ))
|
// ))
|
||||||
func ProcessWithdrawalRequests(ctx context.Context, st state.BeaconState, wrs []*enginev1.WithdrawalRequest) (state.BeaconState, error) {
|
func ProcessWithdrawalRequests(ctx context.Context, st state.BeaconState, wrs []*enginev1.WithdrawalRequest) (state.BeaconState, error) {
|
||||||
ctx, span := trace.StartSpan(ctx, "requests.ProcessWithdrawalRequests")
|
ctx, span := trace.StartSpan(ctx, "electra.ProcessWithdrawalRequests")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
currentEpoch := slots.ToEpoch(st.Slot())
|
currentEpoch := slots.ToEpoch(st.Slot())
|
||||||
if len(wrs) == 0 {
|
if len(wrs) == 0 {
|
||||||
@@ -1,9 +1,9 @@
|
|||||||
package requests_test
|
package electra_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/requests"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/electra"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||||
@@ -289,7 +289,7 @@ func TestProcessWithdrawRequests(t *testing.T) {
|
|||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
|
||||||
got, err := requests.ProcessWithdrawalRequests(t.Context(), tt.args.st, tt.args.wrs)
|
got, err := electra.ProcessWithdrawalRequests(t.Context(), tt.args.st, tt.args.wrs)
|
||||||
if (err != nil) != tt.wantErr {
|
if (err != nil) != tt.wantErr {
|
||||||
t.Errorf("ProcessWithdrawalRequests() error = %v, wantErr %v", err, tt.wantErr)
|
t.Errorf("ProcessWithdrawalRequests() error = %v, wantErr %v", err, tt.wantErr)
|
||||||
return
|
return
|
||||||
@@ -2,29 +2,18 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
|||||||
|
|
||||||
go_library(
|
go_library(
|
||||||
name = "go_default_library",
|
name = "go_default_library",
|
||||||
srcs = [
|
srcs = ["bid.go"],
|
||||||
"bid.go",
|
|
||||||
"payload_attestation.go",
|
|
||||||
"pending_payment.go",
|
|
||||||
"proposer_slashing.go",
|
|
||||||
],
|
|
||||||
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas",
|
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas",
|
||||||
visibility = ["//visibility:public"],
|
visibility = ["//visibility:public"],
|
||||||
deps = [
|
deps = [
|
||||||
"//beacon-chain/core/helpers:go_default_library",
|
"//beacon-chain/core/helpers:go_default_library",
|
||||||
"//beacon-chain/core/signing:go_default_library",
|
"//beacon-chain/core/signing:go_default_library",
|
||||||
"//beacon-chain/core/time:go_default_library",
|
|
||||||
"//beacon-chain/state:go_default_library",
|
"//beacon-chain/state:go_default_library",
|
||||||
"//config/fieldparams:go_default_library",
|
|
||||||
"//config/params:go_default_library",
|
"//config/params:go_default_library",
|
||||||
"//consensus-types:go_default_library",
|
|
||||||
"//consensus-types/blocks:go_default_library",
|
"//consensus-types/blocks:go_default_library",
|
||||||
"//consensus-types/interfaces:go_default_library",
|
"//consensus-types/interfaces:go_default_library",
|
||||||
"//consensus-types/primitives:go_default_library",
|
|
||||||
"//crypto/bls:go_default_library",
|
"//crypto/bls:go_default_library",
|
||||||
"//crypto/bls/common:go_default_library",
|
"//crypto/bls/common:go_default_library",
|
||||||
"//crypto/hash:go_default_library",
|
|
||||||
"//encoding/bytesutil:go_default_library",
|
|
||||||
"//proto/prysm/v1alpha1:go_default_library",
|
"//proto/prysm/v1alpha1:go_default_library",
|
||||||
"//time/slots:go_default_library",
|
"//time/slots:go_default_library",
|
||||||
"@com_github_pkg_errors//:go_default_library",
|
"@com_github_pkg_errors//:go_default_library",
|
||||||
@@ -33,20 +22,12 @@ go_library(
|
|||||||
|
|
||||||
go_test(
|
go_test(
|
||||||
name = "go_default_test",
|
name = "go_default_test",
|
||||||
srcs = [
|
srcs = ["bid_test.go"],
|
||||||
"bid_test.go",
|
|
||||||
"payload_attestation_test.go",
|
|
||||||
"pending_payment_test.go",
|
|
||||||
"proposer_slashing_test.go",
|
|
||||||
],
|
|
||||||
embed = [":go_default_library"],
|
embed = [":go_default_library"],
|
||||||
deps = [
|
deps = [
|
||||||
"//beacon-chain/core/helpers:go_default_library",
|
|
||||||
"//beacon-chain/core/signing:go_default_library",
|
"//beacon-chain/core/signing:go_default_library",
|
||||||
"//beacon-chain/state:go_default_library",
|
|
||||||
"//beacon-chain/state/state-native:go_default_library",
|
"//beacon-chain/state/state-native:go_default_library",
|
||||||
"//config/params:go_default_library",
|
"//config/params:go_default_library",
|
||||||
"//consensus-types/blocks:go_default_library",
|
|
||||||
"//consensus-types/interfaces:go_default_library",
|
"//consensus-types/interfaces:go_default_library",
|
||||||
"//consensus-types/primitives:go_default_library",
|
"//consensus-types/primitives:go_default_library",
|
||||||
"//crypto/bls:go_default_library",
|
"//crypto/bls:go_default_library",
|
||||||
@@ -57,10 +38,8 @@ go_test(
|
|||||||
"//proto/prysm/v1alpha1/validator-client:go_default_library",
|
"//proto/prysm/v1alpha1/validator-client:go_default_library",
|
||||||
"//runtime/version:go_default_library",
|
"//runtime/version:go_default_library",
|
||||||
"//testing/require:go_default_library",
|
"//testing/require:go_default_library",
|
||||||
"//testing/util:go_default_library",
|
|
||||||
"//time/slots:go_default_library",
|
"//time/slots:go_default_library",
|
||||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
|
||||||
"@org_golang_google_protobuf//proto:go_default_library",
|
"@org_golang_google_protobuf//proto:go_default_library",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -1,253 +0,0 @@
|
|||||||
package gloas
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"encoding/binary"
|
|
||||||
"fmt"
|
|
||||||
"slices"
|
|
||||||
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
|
||||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
|
||||||
consensus_types "github.com/OffchainLabs/prysm/v7/consensus-types"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/crypto/bls"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/crypto/hash"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
|
||||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ProcessPayloadAttestations validates payload attestations in a block body.
|
|
||||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
|
||||||
// process_payload_attestation(state: BeaconState, payload_attestation: PayloadAttestation):
|
|
||||||
//
|
|
||||||
// data = payload_attestation.data
|
|
||||||
// assert data.beacon_block_root == state.latest_block_header.parent_root
|
|
||||||
// assert data.slot + 1 == state.slot
|
|
||||||
// indexed = get_indexed_payload_attestation(state, data.slot, payload_attestation)
|
|
||||||
// assert is_valid_indexed_payload_attestation(state, indexed)
|
|
||||||
func ProcessPayloadAttestations(ctx context.Context, st state.BeaconState, body interfaces.ReadOnlyBeaconBlockBody) error {
|
|
||||||
atts, err := body.PayloadAttestations()
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "failed to get payload attestations from block body")
|
|
||||||
}
|
|
||||||
if len(atts) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
header := st.LatestBlockHeader()
|
|
||||||
|
|
||||||
for i, att := range atts {
|
|
||||||
data := att.Data
|
|
||||||
if !bytes.Equal(data.BeaconBlockRoot, header.ParentRoot) {
|
|
||||||
return fmt.Errorf("payload attestation %d has wrong parent: got %x want %x", i, data.BeaconBlockRoot, header.ParentRoot)
|
|
||||||
}
|
|
||||||
|
|
||||||
dataSlot, err := data.Slot.SafeAdd(1)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrapf(err, "payload attestation %d has invalid slot addition", i)
|
|
||||||
}
|
|
||||||
if dataSlot != st.Slot() {
|
|
||||||
return fmt.Errorf("payload attestation %d has wrong slot: got %d want %d", i, data.Slot+1, st.Slot())
|
|
||||||
}
|
|
||||||
|
|
||||||
indexed, err := indexedPayloadAttestation(ctx, st, att)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrapf(err, "payload attestation %d failed to convert to indexed form", i)
|
|
||||||
}
|
|
||||||
if err := validIndexedPayloadAttestation(st, indexed); err != nil {
|
|
||||||
return errors.Wrapf(err, "payload attestation %d failed to verify indexed form", i)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// indexedPayloadAttestation converts a payload attestation into its indexed form.
|
|
||||||
func indexedPayloadAttestation(ctx context.Context, st state.ReadOnlyBeaconState, att *eth.PayloadAttestation) (*consensus_types.IndexedPayloadAttestation, error) {
|
|
||||||
committee, err := payloadCommittee(ctx, st, att.Data.Slot)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
indices := make([]primitives.ValidatorIndex, 0, len(committee))
|
|
||||||
for i, idx := range committee {
|
|
||||||
if att.AggregationBits.BitAt(uint64(i)) {
|
|
||||||
indices = append(indices, idx)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
slices.Sort(indices)
|
|
||||||
|
|
||||||
return &consensus_types.IndexedPayloadAttestation{
|
|
||||||
AttestingIndices: indices,
|
|
||||||
Data: att.Data,
|
|
||||||
Signature: att.Signature,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// payloadCommittee returns the payload timeliness committee for a given slot for the state.
|
|
||||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
|
||||||
// get_ptc(state: BeaconState, slot: Slot) -> Vector[ValidatorIndex, PTC_SIZE]:
|
|
||||||
//
|
|
||||||
// epoch = compute_epoch_at_slot(slot)
|
|
||||||
// seed = hash(get_seed(state, epoch, DOMAIN_PTC_ATTESTER) + uint_to_bytes(slot))
|
|
||||||
// indices = []
|
|
||||||
// committees_per_slot = get_committee_count_per_slot(state, epoch)
|
|
||||||
// for i in range(committees_per_slot):
|
|
||||||
// committee = get_beacon_committee(state, slot, CommitteeIndex(i))
|
|
||||||
// indices.extend(committee)
|
|
||||||
// return compute_balance_weighted_selection(state, indices, seed, size=PTC_SIZE, shuffle_indices=False)
|
|
||||||
func payloadCommittee(ctx context.Context, st state.ReadOnlyBeaconState, slot primitives.Slot) ([]primitives.ValidatorIndex, error) {
|
|
||||||
epoch := slots.ToEpoch(slot)
|
|
||||||
seed, err := ptcSeed(st, epoch, slot)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
activeCount, err := helpers.ActiveValidatorCount(ctx, st, epoch)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
committeesPerSlot := helpers.SlotCommitteeCount(activeCount)
|
|
||||||
out := make([]primitives.ValidatorIndex, 0, activeCount/uint64(params.BeaconConfig().SlotsPerEpoch))
|
|
||||||
|
|
||||||
for i := primitives.CommitteeIndex(0); i < primitives.CommitteeIndex(committeesPerSlot); i++ {
|
|
||||||
committee, err := helpers.BeaconCommitteeFromState(ctx, st, slot, i)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "failed to get beacon committee %d", i)
|
|
||||||
}
|
|
||||||
out = append(out, committee...)
|
|
||||||
}
|
|
||||||
|
|
||||||
return selectByBalance(ctx, st, out, seed, fieldparams.PTCSize)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ptcSeed computes the seed for the payload timeliness committee.
|
|
||||||
func ptcSeed(st state.ReadOnlyBeaconState, epoch primitives.Epoch, slot primitives.Slot) ([32]byte, error) {
|
|
||||||
seed, err := helpers.Seed(st, epoch, params.BeaconConfig().DomainPTCAttester)
|
|
||||||
if err != nil {
|
|
||||||
return [32]byte{}, err
|
|
||||||
}
|
|
||||||
return hash.Hash(append(seed[:], bytesutil.Bytes8(uint64(slot))...)), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// selectByBalance selects a balance-weighted subset of input candidates.
|
|
||||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
|
||||||
// compute_balance_weighted_selection(state, indices, seed, size, shuffle_indices):
|
|
||||||
// Note: shuffle_indices is false for PTC.
|
|
||||||
//
|
|
||||||
// total = len(indices); selected = []; i = 0
|
|
||||||
// while len(selected) < size:
|
|
||||||
// next = i % total
|
|
||||||
// if shuffle_indices: next = compute_shuffled_index(next, total, seed)
|
|
||||||
// if compute_balance_weighted_acceptance(state, indices[next], seed, i):
|
|
||||||
// selected.append(indices[next])
|
|
||||||
// i += 1
|
|
||||||
func selectByBalance(ctx context.Context, st state.ReadOnlyBeaconState, candidates []primitives.ValidatorIndex, seed [32]byte, count uint64) ([]primitives.ValidatorIndex, error) {
|
|
||||||
if len(candidates) == 0 {
|
|
||||||
return nil, errors.New("no candidates for balance weighted selection")
|
|
||||||
}
|
|
||||||
|
|
||||||
hashFunc := hash.CustomSHA256Hasher()
|
|
||||||
// Pre-allocate buffer for hash input: seed (32 bytes) + round counter (8 bytes).
|
|
||||||
var buf [40]byte
|
|
||||||
copy(buf[:], seed[:])
|
|
||||||
maxBalance := params.BeaconConfig().MaxEffectiveBalanceElectra
|
|
||||||
|
|
||||||
selected := make([]primitives.ValidatorIndex, 0, count)
|
|
||||||
total := uint64(len(candidates))
|
|
||||||
for i := uint64(0); uint64(len(selected)) < count; i++ {
|
|
||||||
if ctx.Err() != nil {
|
|
||||||
return nil, ctx.Err()
|
|
||||||
}
|
|
||||||
idx := candidates[i%total]
|
|
||||||
ok, err := acceptByBalance(st, idx, buf[:], hashFunc, maxBalance, i)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if ok {
|
|
||||||
selected = append(selected, idx)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return selected, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// acceptByBalance determines if a validator is accepted based on its effective balance.
|
|
||||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
|
||||||
// compute_balance_weighted_acceptance(state, index, seed, i):
|
|
||||||
//
|
|
||||||
// MAX_RANDOM_VALUE = 2**16 - 1
|
|
||||||
// random_bytes = hash(seed + uint_to_bytes(i // 16))
|
|
||||||
// offset = i % 16 * 2
|
|
||||||
// random_value = bytes_to_uint64(random_bytes[offset:offset+2])
|
|
||||||
// effective_balance = state.validators[index].effective_balance
|
|
||||||
// return effective_balance * MAX_RANDOM_VALUE >= MAX_EFFECTIVE_BALANCE_ELECTRA * random_value
|
|
||||||
func acceptByBalance(st state.ReadOnlyBeaconState, idx primitives.ValidatorIndex, seedBuf []byte, hashFunc func([]byte) [32]byte, maxBalance uint64, round uint64) (bool, error) {
|
|
||||||
// Reuse the seed buffer by overwriting the last 8 bytes with the round counter.
|
|
||||||
binary.LittleEndian.PutUint64(seedBuf[len(seedBuf)-8:], round/16)
|
|
||||||
random := hashFunc(seedBuf)
|
|
||||||
offset := (round % 16) * 2
|
|
||||||
randomValue := uint64(binary.LittleEndian.Uint16(random[offset : offset+2])) // 16-bit draw per spec
|
|
||||||
|
|
||||||
val, err := st.ValidatorAtIndex(idx)
|
|
||||||
if err != nil {
|
|
||||||
return false, errors.Wrapf(err, "validator %d", idx)
|
|
||||||
}
|
|
||||||
|
|
||||||
return val.EffectiveBalance*fieldparams.MaxRandomValueElectra >= maxBalance*randomValue, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// validIndexedPayloadAttestation verifies the signature of an indexed payload attestation.
|
|
||||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
|
||||||
// is_valid_indexed_payload_attestation(state: BeaconState, indexed_payload_attestation: IndexedPayloadAttestation) -> bool:
|
|
||||||
//
|
|
||||||
// indices = indexed_payload_attestation.attesting_indices
|
|
||||||
// return len(indices) > 0 and indices == sorted(indices) and
|
|
||||||
// bls.FastAggregateVerify(
|
|
||||||
// [state.validators[i].pubkey for i in indices],
|
|
||||||
// compute_signing_root(indexed_payload_attestation.data, get_domain(state, DOMAIN_PTC_ATTESTER, compute_epoch_at_slot(attestation.data.slot)),
|
|
||||||
// indexed_payload_attestation.signature,
|
|
||||||
// )
|
|
||||||
func validIndexedPayloadAttestation(st state.ReadOnlyBeaconState, att *consensus_types.IndexedPayloadAttestation) error {
|
|
||||||
indices := att.AttestingIndices
|
|
||||||
if len(indices) == 0 || !slices.IsSorted(indices) {
|
|
||||||
return errors.New("attesting indices empty or unsorted")
|
|
||||||
}
|
|
||||||
|
|
||||||
pubkeys := make([]bls.PublicKey, len(indices))
|
|
||||||
for i, idx := range indices {
|
|
||||||
val, err := st.ValidatorAtIndexReadOnly(idx)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrapf(err, "validator %d", idx)
|
|
||||||
}
|
|
||||||
keyBytes := val.PublicKey()
|
|
||||||
key, err := bls.PublicKeyFromBytes(keyBytes[:])
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrapf(err, "pubkey %d", idx)
|
|
||||||
}
|
|
||||||
pubkeys[i] = key
|
|
||||||
}
|
|
||||||
|
|
||||||
domain, err := signing.Domain(st.Fork(), slots.ToEpoch(att.Data.Slot), params.BeaconConfig().DomainPTCAttester, st.GenesisValidatorsRoot())
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
root, err := signing.ComputeSigningRoot(att.Data, domain)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
sig, err := bls.SignatureFromBytes(att.Signature)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if !sig.FastAggregateVerify(pubkeys, root) {
|
|
||||||
return errors.New("invalid signature")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@@ -1,305 +0,0 @@
|
|||||||
package gloas_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/OffchainLabs/go-bitfield"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/crypto/bls"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/crypto/bls/common"
|
|
||||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
|
||||||
testutil "github.com/OffchainLabs/prysm/v7/testing/util"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestProcessPayloadAttestations_WrongParent(t *testing.T) {
|
|
||||||
setupTestConfig(t)
|
|
||||||
|
|
||||||
_, pk := newKey(t)
|
|
||||||
st := newTestState(t, []*eth.Validator{activeValidator(pk)}, 1)
|
|
||||||
require.NoError(t, st.SetSlot(2))
|
|
||||||
parentRoot := bytes.Repeat([]byte{0xaa}, 32)
|
|
||||||
require.NoError(t, st.SetLatestBlockHeader(ð.BeaconBlockHeader{ParentRoot: parentRoot}))
|
|
||||||
|
|
||||||
att := ð.PayloadAttestation{
|
|
||||||
Data: ð.PayloadAttestationData{
|
|
||||||
BeaconBlockRoot: bytes.Repeat([]byte{0xbb}, 32),
|
|
||||||
Slot: 1,
|
|
||||||
},
|
|
||||||
AggregationBits: bitfield.NewBitvector512(),
|
|
||||||
Signature: make([]byte, 96),
|
|
||||||
}
|
|
||||||
body := buildBody(t, att)
|
|
||||||
|
|
||||||
err := gloas.ProcessPayloadAttestations(t.Context(), st, body)
|
|
||||||
require.ErrorContains(t, "wrong parent", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestProcessPayloadAttestations_WrongSlot(t *testing.T) {
|
|
||||||
setupTestConfig(t)
|
|
||||||
|
|
||||||
_, pk := newKey(t)
|
|
||||||
st := newTestState(t, []*eth.Validator{activeValidator(pk)}, 1)
|
|
||||||
require.NoError(t, st.SetSlot(3))
|
|
||||||
parentRoot := bytes.Repeat([]byte{0xaa}, 32)
|
|
||||||
require.NoError(t, st.SetLatestBlockHeader(ð.BeaconBlockHeader{ParentRoot: parentRoot}))
|
|
||||||
|
|
||||||
att := ð.PayloadAttestation{
|
|
||||||
Data: ð.PayloadAttestationData{
|
|
||||||
BeaconBlockRoot: parentRoot,
|
|
||||||
Slot: 1,
|
|
||||||
},
|
|
||||||
AggregationBits: bitfield.NewBitvector512(),
|
|
||||||
Signature: make([]byte, 96),
|
|
||||||
}
|
|
||||||
body := buildBody(t, att)
|
|
||||||
|
|
||||||
err := gloas.ProcessPayloadAttestations(t.Context(), st, body)
|
|
||||||
require.ErrorContains(t, "wrong slot", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestProcessPayloadAttestations_InvalidSignature(t *testing.T) {
|
|
||||||
setupTestConfig(t)
|
|
||||||
|
|
||||||
_, pk1 := newKey(t)
|
|
||||||
sk2, pk2 := newKey(t)
|
|
||||||
vals := []*eth.Validator{activeValidator(pk1), activeValidator(pk2)}
|
|
||||||
st := newTestState(t, vals, 2)
|
|
||||||
parentRoot := bytes.Repeat([]byte{0xaa}, 32)
|
|
||||||
require.NoError(t, st.SetLatestBlockHeader(ð.BeaconBlockHeader{ParentRoot: parentRoot}))
|
|
||||||
|
|
||||||
attData := ð.PayloadAttestationData{
|
|
||||||
BeaconBlockRoot: parentRoot,
|
|
||||||
Slot: 1,
|
|
||||||
}
|
|
||||||
att := ð.PayloadAttestation{
|
|
||||||
Data: attData,
|
|
||||||
AggregationBits: setBits(bitfield.NewBitvector512(), 0),
|
|
||||||
Signature: signAttestation(t, st, attData, []common.SecretKey{sk2}),
|
|
||||||
}
|
|
||||||
body := buildBody(t, att)
|
|
||||||
|
|
||||||
err := gloas.ProcessPayloadAttestations(t.Context(), st, body)
|
|
||||||
require.ErrorContains(t, "failed to verify indexed form", err)
|
|
||||||
require.ErrorContains(t, "invalid signature", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestProcessPayloadAttestations_EmptyAggregationBits(t *testing.T) {
|
|
||||||
setupTestConfig(t)
|
|
||||||
|
|
||||||
_, pk := newKey(t)
|
|
||||||
st := newTestState(t, []*eth.Validator{activeValidator(pk)}, 1)
|
|
||||||
require.NoError(t, st.SetSlot(2))
|
|
||||||
parentRoot := bytes.Repeat([]byte{0xaa}, 32)
|
|
||||||
require.NoError(t, st.SetLatestBlockHeader(ð.BeaconBlockHeader{ParentRoot: parentRoot}))
|
|
||||||
|
|
||||||
attData := ð.PayloadAttestationData{
|
|
||||||
BeaconBlockRoot: parentRoot,
|
|
||||||
Slot: 1,
|
|
||||||
}
|
|
||||||
att := ð.PayloadAttestation{
|
|
||||||
Data: attData,
|
|
||||||
AggregationBits: bitfield.NewBitvector512(),
|
|
||||||
Signature: make([]byte, 96),
|
|
||||||
}
|
|
||||||
body := buildBody(t, att)
|
|
||||||
|
|
||||||
err := gloas.ProcessPayloadAttestations(t.Context(), st, body)
|
|
||||||
require.ErrorContains(t, "failed to verify indexed form", err)
|
|
||||||
require.ErrorContains(t, "attesting indices empty or unsorted", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestProcessPayloadAttestations_HappyPath(t *testing.T) {
|
|
||||||
helpers.ClearCache()
|
|
||||||
setupTestConfig(t)
|
|
||||||
|
|
||||||
sk1, pk1 := newKey(t)
|
|
||||||
sk2, pk2 := newKey(t)
|
|
||||||
vals := []*eth.Validator{activeValidator(pk1), activeValidator(pk2)}
|
|
||||||
|
|
||||||
st := newTestState(t, vals, 2)
|
|
||||||
parentRoot := bytes.Repeat([]byte{0xaa}, 32)
|
|
||||||
require.NoError(t, st.SetLatestBlockHeader(ð.BeaconBlockHeader{ParentRoot: parentRoot}))
|
|
||||||
|
|
||||||
attData := ð.PayloadAttestationData{
|
|
||||||
BeaconBlockRoot: parentRoot,
|
|
||||||
Slot: 1,
|
|
||||||
}
|
|
||||||
aggBits := bitfield.NewBitvector512()
|
|
||||||
aggBits.SetBitAt(0, true)
|
|
||||||
aggBits.SetBitAt(1, true)
|
|
||||||
|
|
||||||
att := ð.PayloadAttestation{
|
|
||||||
Data: attData,
|
|
||||||
AggregationBits: aggBits,
|
|
||||||
Signature: signAttestation(t, st, attData, []common.SecretKey{sk1, sk2}),
|
|
||||||
}
|
|
||||||
body := buildBody(t, att)
|
|
||||||
|
|
||||||
err := gloas.ProcessPayloadAttestations(t.Context(), st, body)
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestProcessPayloadAttestations_MultipleAttestations(t *testing.T) {
|
|
||||||
helpers.ClearCache()
|
|
||||||
setupTestConfig(t)
|
|
||||||
|
|
||||||
sk1, pk1 := newKey(t)
|
|
||||||
sk2, pk2 := newKey(t)
|
|
||||||
vals := []*eth.Validator{activeValidator(pk1), activeValidator(pk2)}
|
|
||||||
|
|
||||||
st := newTestState(t, vals, 2)
|
|
||||||
parentRoot := bytes.Repeat([]byte{0xaa}, 32)
|
|
||||||
require.NoError(t, st.SetLatestBlockHeader(ð.BeaconBlockHeader{ParentRoot: parentRoot}))
|
|
||||||
|
|
||||||
attData1 := ð.PayloadAttestationData{
|
|
||||||
BeaconBlockRoot: parentRoot,
|
|
||||||
Slot: 1,
|
|
||||||
}
|
|
||||||
attData2 := ð.PayloadAttestationData{
|
|
||||||
BeaconBlockRoot: parentRoot,
|
|
||||||
Slot: 1,
|
|
||||||
}
|
|
||||||
|
|
||||||
att1 := ð.PayloadAttestation{
|
|
||||||
Data: attData1,
|
|
||||||
AggregationBits: setBits(bitfield.NewBitvector512(), 0),
|
|
||||||
Signature: signAttestation(t, st, attData1, []common.SecretKey{sk1}),
|
|
||||||
}
|
|
||||||
att2 := ð.PayloadAttestation{
|
|
||||||
Data: attData2,
|
|
||||||
AggregationBits: setBits(bitfield.NewBitvector512(), 1),
|
|
||||||
Signature: signAttestation(t, st, attData2, []common.SecretKey{sk2}),
|
|
||||||
}
|
|
||||||
|
|
||||||
body := buildBody(t, att1, att2)
|
|
||||||
|
|
||||||
err := gloas.ProcessPayloadAttestations(t.Context(), st, body)
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestProcessPayloadAttestations_IndexedVerificationError(t *testing.T) {
|
|
||||||
setupTestConfig(t)
|
|
||||||
|
|
||||||
_, pk := newKey(t)
|
|
||||||
st := newTestState(t, []*eth.Validator{activeValidator(pk)}, 1)
|
|
||||||
parentRoot := bytes.Repeat([]byte{0xaa}, 32)
|
|
||||||
require.NoError(t, st.SetLatestBlockHeader(ð.BeaconBlockHeader{ParentRoot: parentRoot}))
|
|
||||||
|
|
||||||
attData := ð.PayloadAttestationData{
|
|
||||||
BeaconBlockRoot: parentRoot,
|
|
||||||
Slot: 0,
|
|
||||||
}
|
|
||||||
att := ð.PayloadAttestation{
|
|
||||||
Data: attData,
|
|
||||||
AggregationBits: setBits(bitfield.NewBitvector512(), 0),
|
|
||||||
Signature: make([]byte, 96),
|
|
||||||
}
|
|
||||||
body := buildBody(t, att)
|
|
||||||
|
|
||||||
errState := &validatorLookupErrState{
|
|
||||||
BeaconState: st,
|
|
||||||
errIndex: 0,
|
|
||||||
}
|
|
||||||
err := gloas.ProcessPayloadAttestations(t.Context(), errState, body)
|
|
||||||
require.ErrorContains(t, "failed to verify indexed form", err)
|
|
||||||
require.ErrorContains(t, "validator 0", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func newTestState(t *testing.T, vals []*eth.Validator, slot primitives.Slot) state.BeaconState {
|
|
||||||
st, err := testutil.NewBeaconState()
|
|
||||||
require.NoError(t, err)
|
|
||||||
for _, v := range vals {
|
|
||||||
require.NoError(t, st.AppendValidator(v))
|
|
||||||
require.NoError(t, st.AppendBalance(v.EffectiveBalance))
|
|
||||||
}
|
|
||||||
require.NoError(t, st.SetSlot(slot))
|
|
||||||
require.NoError(t, helpers.UpdateCommitteeCache(t.Context(), st, slots.ToEpoch(slot)))
|
|
||||||
return st
|
|
||||||
}
|
|
||||||
|
|
||||||
func setupTestConfig(t *testing.T) {
|
|
||||||
params.SetupTestConfigCleanup(t)
|
|
||||||
cfg := params.BeaconConfig().Copy()
|
|
||||||
cfg.SlotsPerEpoch = 1
|
|
||||||
cfg.MaxEffectiveBalanceElectra = cfg.MaxEffectiveBalance
|
|
||||||
params.OverrideBeaconConfig(cfg)
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildBody(t *testing.T, atts ...*eth.PayloadAttestation) interfaces.ReadOnlyBeaconBlockBody {
|
|
||||||
body := ð.BeaconBlockBodyGloas{
|
|
||||||
PayloadAttestations: atts,
|
|
||||||
RandaoReveal: make([]byte, 96),
|
|
||||||
Eth1Data: ð.Eth1Data{},
|
|
||||||
Graffiti: make([]byte, 32),
|
|
||||||
ProposerSlashings: []*eth.ProposerSlashing{},
|
|
||||||
AttesterSlashings: []*eth.AttesterSlashingElectra{},
|
|
||||||
Attestations: []*eth.AttestationElectra{},
|
|
||||||
Deposits: []*eth.Deposit{},
|
|
||||||
VoluntaryExits: []*eth.SignedVoluntaryExit{},
|
|
||||||
SyncAggregate: ð.SyncAggregate{},
|
|
||||||
BlsToExecutionChanges: []*eth.SignedBLSToExecutionChange{},
|
|
||||||
}
|
|
||||||
wrapped, err := blocks.NewBeaconBlockBody(body)
|
|
||||||
require.NoError(t, err)
|
|
||||||
return wrapped
|
|
||||||
}
|
|
||||||
|
|
||||||
func setBits(bits bitfield.Bitvector512, idx uint64) bitfield.Bitvector512 {
|
|
||||||
bits.SetBitAt(idx, true)
|
|
||||||
return bits
|
|
||||||
}
|
|
||||||
|
|
||||||
func activeValidator(pub []byte) *eth.Validator {
|
|
||||||
return ð.Validator{
|
|
||||||
PublicKey: pub,
|
|
||||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
|
||||||
WithdrawalCredentials: make([]byte, 32),
|
|
||||||
ActivationEligibilityEpoch: 0,
|
|
||||||
ActivationEpoch: 0,
|
|
||||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
|
||||||
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func newKey(t *testing.T) (common.SecretKey, []byte) {
|
|
||||||
sk, err := bls.RandKey()
|
|
||||||
require.NoError(t, err)
|
|
||||||
return sk, sk.PublicKey().Marshal()
|
|
||||||
}
|
|
||||||
|
|
||||||
func signAttestation(t *testing.T, st state.ReadOnlyBeaconState, data *eth.PayloadAttestationData, sks []common.SecretKey) []byte {
|
|
||||||
domain, err := signing.Domain(st.Fork(), slots.ToEpoch(data.Slot), params.BeaconConfig().DomainPTCAttester, st.GenesisValidatorsRoot())
|
|
||||||
require.NoError(t, err)
|
|
||||||
root, err := signing.ComputeSigningRoot(data, domain)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
sigs := make([]common.Signature, len(sks))
|
|
||||||
for i, sk := range sks {
|
|
||||||
sigs[i] = sk.Sign(root[:])
|
|
||||||
}
|
|
||||||
agg := bls.AggregateSignatures(sigs)
|
|
||||||
return agg.Marshal()
|
|
||||||
}
|
|
||||||
|
|
||||||
type validatorLookupErrState struct {
|
|
||||||
state.BeaconState
|
|
||||||
errIndex primitives.ValidatorIndex
|
|
||||||
}
|
|
||||||
|
|
||||||
// ValidatorAtIndexReadOnly is overridden to simulate a missing validator lookup.
|
|
||||||
func (s *validatorLookupErrState) ValidatorAtIndexReadOnly(idx primitives.ValidatorIndex) (state.ReadOnlyValidator, error) {
|
|
||||||
if idx == s.errIndex {
|
|
||||||
return nil, state.ErrNilValidatorsInState
|
|
||||||
}
|
|
||||||
return s.BeaconState.ValidatorAtIndexReadOnly(idx)
|
|
||||||
}
|
|
||||||
@@ -1,76 +0,0 @@
|
|||||||
package gloas
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
|
||||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ProcessBuilderPendingPayments processes the builder pending payments from the previous epoch.
|
|
||||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
|
||||||
// def process_builder_pending_payments(state: BeaconState) -> None:
|
|
||||||
//
|
|
||||||
// quorum = get_builder_payment_quorum_threshold(state)
|
|
||||||
// for payment in state.builder_pending_payments[:SLOTS_PER_EPOCH]:
|
|
||||||
// if payment.weight >= quorum:
|
|
||||||
// state.builder_pending_withdrawals.append(payment.withdrawal)
|
|
||||||
//
|
|
||||||
// old_payments = state.builder_pending_payments[SLOTS_PER_EPOCH:]
|
|
||||||
// new_payments = [BuilderPendingPayment() for _ in range(SLOTS_PER_EPOCH)]
|
|
||||||
// state.builder_pending_payments = old_payments + new_payments
|
|
||||||
func ProcessBuilderPendingPayments(state state.BeaconState) error {
|
|
||||||
quorum, err := builderQuorumThreshold(state)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "could not compute builder payment quorum threshold")
|
|
||||||
}
|
|
||||||
|
|
||||||
payments, err := state.BuilderPendingPayments()
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "could not get builder pending payments")
|
|
||||||
}
|
|
||||||
|
|
||||||
slotsPerEpoch := uint64(params.BeaconConfig().SlotsPerEpoch)
|
|
||||||
var withdrawals []*ethpb.BuilderPendingWithdrawal
|
|
||||||
for _, payment := range payments[:slotsPerEpoch] {
|
|
||||||
if quorum > payment.Weight {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
withdrawals = append(withdrawals, payment.Withdrawal)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := state.AppendBuilderPendingWithdrawals(withdrawals); err != nil {
|
|
||||||
return errors.Wrap(err, "could not append builder pending withdrawals")
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := state.RotateBuilderPendingPayments(); err != nil {
|
|
||||||
return errors.Wrap(err, "could not rotate builder pending payments")
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// builderQuorumThreshold calculates the quorum threshold for builder payments.
|
|
||||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
|
||||||
// def get_builder_payment_quorum_threshold(state: BeaconState) -> uint64:
|
|
||||||
//
|
|
||||||
// per_slot_balance = get_total_active_balance(state) // SLOTS_PER_EPOCH
|
|
||||||
// quorum = per_slot_balance * BUILDER_PAYMENT_THRESHOLD_NUMERATOR
|
|
||||||
// return uint64(quorum // BUILDER_PAYMENT_THRESHOLD_DENOMINATOR)
|
|
||||||
func builderQuorumThreshold(state state.ReadOnlyBeaconState) (primitives.Gwei, error) {
|
|
||||||
activeBalance, err := helpers.TotalActiveBalance(state)
|
|
||||||
if err != nil {
|
|
||||||
return 0, errors.Wrap(err, "could not get total active balance")
|
|
||||||
}
|
|
||||||
|
|
||||||
cfg := params.BeaconConfig()
|
|
||||||
slotsPerEpoch := uint64(cfg.SlotsPerEpoch)
|
|
||||||
numerator := cfg.BuilderPaymentThresholdNumerator
|
|
||||||
denominator := cfg.BuilderPaymentThresholdDenominator
|
|
||||||
|
|
||||||
activeBalancePerSlot := activeBalance / slotsPerEpoch
|
|
||||||
quorum := (activeBalancePerSlot * numerator) / denominator
|
|
||||||
return primitives.Gwei(quorum), nil
|
|
||||||
}
|
|
||||||
@@ -1,119 +0,0 @@
|
|||||||
package gloas
|
|
||||||
|
|
||||||
import (
|
|
||||||
"slices"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
|
||||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
|
||||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestBuilderQuorumThreshold(t *testing.T) {
|
|
||||||
helpers.ClearCache()
|
|
||||||
cfg := params.BeaconConfig()
|
|
||||||
|
|
||||||
validators := []*ethpb.Validator{
|
|
||||||
{EffectiveBalance: cfg.MaxEffectiveBalance, ActivationEpoch: 0, ExitEpoch: 1},
|
|
||||||
{EffectiveBalance: cfg.MaxEffectiveBalance, ActivationEpoch: 0, ExitEpoch: 1},
|
|
||||||
}
|
|
||||||
st, err := state_native.InitializeFromProtoUnsafeGloas(ðpb.BeaconStateGloas{Validators: validators})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
got, err := builderQuorumThreshold(st)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
total := uint64(len(validators)) * cfg.MaxEffectiveBalance
|
|
||||||
perSlot := total / uint64(cfg.SlotsPerEpoch)
|
|
||||||
want := (perSlot * cfg.BuilderPaymentThresholdNumerator) / cfg.BuilderPaymentThresholdDenominator
|
|
||||||
require.Equal(t, primitives.Gwei(want), got)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestProcessBuilderPendingPayments(t *testing.T) {
|
|
||||||
helpers.ClearCache()
|
|
||||||
cfg := params.BeaconConfig()
|
|
||||||
|
|
||||||
buildPayments := func(weights ...primitives.Gwei) []*ethpb.BuilderPendingPayment {
|
|
||||||
p := make([]*ethpb.BuilderPendingPayment, 2*int(cfg.SlotsPerEpoch))
|
|
||||||
for i := range p {
|
|
||||||
p[i] = ðpb.BuilderPendingPayment{
|
|
||||||
Withdrawal: ðpb.BuilderPendingWithdrawal{FeeRecipient: make([]byte, 20)},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for i, w := range weights {
|
|
||||||
p[i].Weight = w
|
|
||||||
p[i].Withdrawal.Amount = 1
|
|
||||||
}
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
validators := []*ethpb.Validator{
|
|
||||||
{EffectiveBalance: cfg.MaxEffectiveBalance, ActivationEpoch: 0, ExitEpoch: 1},
|
|
||||||
{EffectiveBalance: cfg.MaxEffectiveBalance, ActivationEpoch: 0, ExitEpoch: 1},
|
|
||||||
}
|
|
||||||
pbSt, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{Validators: validators})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
total := uint64(len(validators)) * cfg.MaxEffectiveBalance
|
|
||||||
perSlot := total / uint64(cfg.SlotsPerEpoch)
|
|
||||||
quorum := (perSlot * cfg.BuilderPaymentThresholdNumerator) / cfg.BuilderPaymentThresholdDenominator
|
|
||||||
slotsPerEpoch := int(cfg.SlotsPerEpoch)
|
|
||||||
|
|
||||||
t.Run("append qualifying withdrawals", func(t *testing.T) {
|
|
||||||
payments := buildPayments(primitives.Gwei(quorum+1), primitives.Gwei(quorum+2))
|
|
||||||
st := &testProcessState{BeaconState: pbSt, payments: payments}
|
|
||||||
|
|
||||||
require.NoError(t, ProcessBuilderPendingPayments(st))
|
|
||||||
require.Equal(t, 2, len(st.withdrawals))
|
|
||||||
require.Equal(t, payments[0].Withdrawal, st.withdrawals[0])
|
|
||||||
require.Equal(t, payments[1].Withdrawal, st.withdrawals[1])
|
|
||||||
|
|
||||||
require.Equal(t, 2*slotsPerEpoch, len(st.payments))
|
|
||||||
for i := slotsPerEpoch; i < 2*slotsPerEpoch; i++ {
|
|
||||||
require.Equal(t, primitives.Gwei(0), st.payments[i].Weight)
|
|
||||||
require.Equal(t, primitives.Gwei(0), st.payments[i].Withdrawal.Amount)
|
|
||||||
require.Equal(t, 20, len(st.payments[i].Withdrawal.FeeRecipient))
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("no withdrawals when below quorum", func(t *testing.T) {
|
|
||||||
payments := buildPayments(primitives.Gwei(quorum - 1))
|
|
||||||
st := &testProcessState{BeaconState: pbSt, payments: payments}
|
|
||||||
|
|
||||||
require.NoError(t, ProcessBuilderPendingPayments(st))
|
|
||||||
require.Equal(t, 0, len(st.withdrawals))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
type testProcessState struct {
|
|
||||||
state.BeaconState
|
|
||||||
payments []*ethpb.BuilderPendingPayment
|
|
||||||
withdrawals []*ethpb.BuilderPendingWithdrawal
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *testProcessState) BuilderPendingPayments() ([]*ethpb.BuilderPendingPayment, error) {
|
|
||||||
return t.payments, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *testProcessState) AppendBuilderPendingWithdrawals(withdrawals []*ethpb.BuilderPendingWithdrawal) error {
|
|
||||||
t.withdrawals = append(t.withdrawals, withdrawals...)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *testProcessState) RotateBuilderPendingPayments() error {
|
|
||||||
slotsPerEpoch := int(params.BeaconConfig().SlotsPerEpoch)
|
|
||||||
rotated := slices.Clone(t.payments[slotsPerEpoch:])
|
|
||||||
for range slotsPerEpoch {
|
|
||||||
rotated = append(rotated, ðpb.BuilderPendingPayment{
|
|
||||||
Withdrawal: ðpb.BuilderPendingWithdrawal{
|
|
||||||
FeeRecipient: make([]byte, 20),
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
t.payments = rotated
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@@ -1,43 +0,0 @@
|
|||||||
package gloas
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
|
||||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
// RemoveBuilderPendingPayment removes the pending builder payment for the proposal slot.
|
|
||||||
// Spec v1.7.0 (pseudocode):
|
|
||||||
//
|
|
||||||
// slot = header_1.slot
|
|
||||||
// proposal_epoch = compute_epoch_at_slot(slot)
|
|
||||||
// if proposal_epoch == get_current_epoch(state):
|
|
||||||
// payment_index = SLOTS_PER_EPOCH + slot % SLOTS_PER_EPOCH
|
|
||||||
// state.builder_pending_payments[payment_index] = BuilderPendingPayment()
|
|
||||||
// elif proposal_epoch == get_previous_epoch(state):
|
|
||||||
// payment_index = slot % SLOTS_PER_EPOCH
|
|
||||||
// state.builder_pending_payments[payment_index] = BuilderPendingPayment()
|
|
||||||
func RemoveBuilderPendingPayment(st state.BeaconState, header *eth.BeaconBlockHeader) error {
|
|
||||||
proposalEpoch := slots.ToEpoch(header.Slot)
|
|
||||||
currentEpoch := time.CurrentEpoch(st)
|
|
||||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
|
||||||
|
|
||||||
var paymentIndex primitives.Slot
|
|
||||||
if proposalEpoch == currentEpoch {
|
|
||||||
paymentIndex = slotsPerEpoch + header.Slot%slotsPerEpoch
|
|
||||||
} else if proposalEpoch+1 == currentEpoch {
|
|
||||||
paymentIndex = header.Slot % slotsPerEpoch
|
|
||||||
} else {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := st.ClearBuilderPendingPayment(paymentIndex); err != nil {
|
|
||||||
return errors.Wrap(err, "could not clear builder pending payment")
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@@ -1,112 +0,0 @@
|
|||||||
package gloas
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
|
||||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
|
||||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestRemoveBuilderPendingPayment_CurrentEpoch(t *testing.T) {
|
|
||||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
|
||||||
stateSlot := slotsPerEpoch*2 + 1
|
|
||||||
headerSlot := slotsPerEpoch * 2
|
|
||||||
|
|
||||||
st := newGloasStateWithPayments(t, stateSlot)
|
|
||||||
paymentIndex := int(slotsPerEpoch + headerSlot%slotsPerEpoch)
|
|
||||||
|
|
||||||
setPendingPayment(t, st, paymentIndex, 123)
|
|
||||||
|
|
||||||
err := RemoveBuilderPendingPayment(st, ð.BeaconBlockHeader{Slot: headerSlot})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
got := getPendingPayment(t, st, paymentIndex)
|
|
||||||
require.NotNil(t, got.Withdrawal)
|
|
||||||
require.DeepEqual(t, make([]byte, 20), got.Withdrawal.FeeRecipient)
|
|
||||||
require.Equal(t, uint64(0), uint64(got.Withdrawal.Amount))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRemoveBuilderPendingPayment_PreviousEpoch(t *testing.T) {
|
|
||||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
|
||||||
stateSlot := slotsPerEpoch*2 + 1
|
|
||||||
headerSlot := slotsPerEpoch + 7
|
|
||||||
|
|
||||||
st := newGloasStateWithPayments(t, stateSlot)
|
|
||||||
paymentIndex := int(headerSlot % slotsPerEpoch)
|
|
||||||
|
|
||||||
setPendingPayment(t, st, paymentIndex, 456)
|
|
||||||
|
|
||||||
err := RemoveBuilderPendingPayment(st, ð.BeaconBlockHeader{Slot: headerSlot})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
got := getPendingPayment(t, st, paymentIndex)
|
|
||||||
require.NotNil(t, got.Withdrawal)
|
|
||||||
require.DeepEqual(t, make([]byte, 20), got.Withdrawal.FeeRecipient)
|
|
||||||
require.Equal(t, uint64(0), uint64(got.Withdrawal.Amount))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRemoveBuilderPendingPayment_OlderThanTwoEpoch(t *testing.T) {
|
|
||||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
|
||||||
stateSlot := slotsPerEpoch*4 + 1 // current epoch far ahead
|
|
||||||
headerSlot := slotsPerEpoch * 2 // two epochs behind
|
|
||||||
|
|
||||||
st := newGloasStateWithPayments(t, stateSlot)
|
|
||||||
paymentIndex := int(headerSlot % slotsPerEpoch)
|
|
||||||
|
|
||||||
original := getPendingPayment(t, st, paymentIndex)
|
|
||||||
|
|
||||||
err := RemoveBuilderPendingPayment(st, ð.BeaconBlockHeader{Slot: headerSlot})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
after := getPendingPayment(t, st, paymentIndex)
|
|
||||||
require.DeepEqual(t, original.Withdrawal.FeeRecipient, after.Withdrawal.FeeRecipient)
|
|
||||||
require.Equal(t, original.Withdrawal.Amount, after.Withdrawal.Amount)
|
|
||||||
}
|
|
||||||
|
|
||||||
func newGloasStateWithPayments(t *testing.T, slot primitives.Slot) state.BeaconState {
|
|
||||||
t.Helper()
|
|
||||||
|
|
||||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
|
||||||
paymentCount := int(slotsPerEpoch * 2)
|
|
||||||
payments := make([]*eth.BuilderPendingPayment, paymentCount)
|
|
||||||
for i := range payments {
|
|
||||||
payments[i] = ð.BuilderPendingPayment{
|
|
||||||
Withdrawal: ð.BuilderPendingWithdrawal{
|
|
||||||
FeeRecipient: bytes.Repeat([]byte{0x01}, 20),
|
|
||||||
Amount: 1,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
st, err := state_native.InitializeFromProtoUnsafeGloas(ð.BeaconStateGloas{
|
|
||||||
Slot: slot,
|
|
||||||
BuilderPendingPayments: payments,
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
return st
|
|
||||||
}
|
|
||||||
|
|
||||||
func setPendingPayment(t *testing.T, st state.BeaconState, index int, amount uint64) {
|
|
||||||
t.Helper()
|
|
||||||
|
|
||||||
payment := ð.BuilderPendingPayment{
|
|
||||||
Withdrawal: ð.BuilderPendingWithdrawal{
|
|
||||||
FeeRecipient: bytes.Repeat([]byte{0x02}, 20),
|
|
||||||
Amount: primitives.Gwei(amount),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
require.NoError(t, st.SetBuilderPendingPayment(primitives.Slot(index), payment))
|
|
||||||
}
|
|
||||||
|
|
||||||
func getPendingPayment(t *testing.T, st state.BeaconState, index int) *eth.BuilderPendingPayment {
|
|
||||||
t.Helper()
|
|
||||||
|
|
||||||
stateProto := st.ToProtoUnsafe().(*eth.BeaconStateGloas)
|
|
||||||
|
|
||||||
return stateProto.BuilderPendingPayments[index]
|
|
||||||
}
|
|
||||||
@@ -6,7 +6,6 @@ go_library(
|
|||||||
"attestation.go",
|
"attestation.go",
|
||||||
"beacon_committee.go",
|
"beacon_committee.go",
|
||||||
"block.go",
|
"block.go",
|
||||||
"deposit.go",
|
|
||||||
"genesis.go",
|
"genesis.go",
|
||||||
"legacy.go",
|
"legacy.go",
|
||||||
"log.go",
|
"log.go",
|
||||||
@@ -24,7 +23,6 @@ go_library(
|
|||||||
visibility = ["//visibility:public"],
|
visibility = ["//visibility:public"],
|
||||||
deps = [
|
deps = [
|
||||||
"//beacon-chain/cache:go_default_library",
|
"//beacon-chain/cache:go_default_library",
|
||||||
"//beacon-chain/core/signing:go_default_library",
|
|
||||||
"//beacon-chain/core/time:go_default_library",
|
"//beacon-chain/core/time:go_default_library",
|
||||||
"//beacon-chain/forkchoice/types:go_default_library",
|
"//beacon-chain/forkchoice/types:go_default_library",
|
||||||
"//beacon-chain/state:go_default_library",
|
"//beacon-chain/state:go_default_library",
|
||||||
@@ -33,7 +31,6 @@ go_library(
|
|||||||
"//consensus-types/primitives:go_default_library",
|
"//consensus-types/primitives:go_default_library",
|
||||||
"//container/slice:go_default_library",
|
"//container/slice:go_default_library",
|
||||||
"//container/trie:go_default_library",
|
"//container/trie:go_default_library",
|
||||||
"//contracts/deposit:go_default_library",
|
|
||||||
"//crypto/bls:go_default_library",
|
"//crypto/bls:go_default_library",
|
||||||
"//crypto/hash:go_default_library",
|
"//crypto/hash:go_default_library",
|
||||||
"//encoding/bytesutil:go_default_library",
|
"//encoding/bytesutil:go_default_library",
|
||||||
@@ -57,7 +54,6 @@ go_test(
|
|||||||
"attestation_test.go",
|
"attestation_test.go",
|
||||||
"beacon_committee_test.go",
|
"beacon_committee_test.go",
|
||||||
"block_test.go",
|
"block_test.go",
|
||||||
"deposit_test.go",
|
|
||||||
"legacy_test.go",
|
"legacy_test.go",
|
||||||
"private_access_fuzz_noop_test.go", # keep
|
"private_access_fuzz_noop_test.go", # keep
|
||||||
"private_access_test.go",
|
"private_access_test.go",
|
||||||
@@ -76,7 +72,6 @@ go_test(
|
|||||||
tags = ["CI_race_detection"],
|
tags = ["CI_race_detection"],
|
||||||
deps = [
|
deps = [
|
||||||
"//beacon-chain/cache:go_default_library",
|
"//beacon-chain/cache:go_default_library",
|
||||||
"//beacon-chain/core/signing:go_default_library",
|
|
||||||
"//beacon-chain/core/time:go_default_library",
|
"//beacon-chain/core/time:go_default_library",
|
||||||
"//beacon-chain/forkchoice/types:go_default_library",
|
"//beacon-chain/forkchoice/types:go_default_library",
|
||||||
"//beacon-chain/state:go_default_library",
|
"//beacon-chain/state:go_default_library",
|
||||||
@@ -85,8 +80,6 @@ go_test(
|
|||||||
"//config/params:go_default_library",
|
"//config/params:go_default_library",
|
||||||
"//consensus-types/primitives:go_default_library",
|
"//consensus-types/primitives:go_default_library",
|
||||||
"//container/slice:go_default_library",
|
"//container/slice:go_default_library",
|
||||||
"//container/trie:go_default_library",
|
|
||||||
"//crypto/bls:go_default_library",
|
|
||||||
"//crypto/hash:go_default_library",
|
"//crypto/hash:go_default_library",
|
||||||
"//encoding/bytesutil:go_default_library",
|
"//encoding/bytesutil:go_default_library",
|
||||||
"//proto/prysm/v1alpha1:go_default_library",
|
"//proto/prysm/v1alpha1:go_default_library",
|
||||||
|
|||||||
@@ -1,60 +0,0 @@
|
|||||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
|
||||||
|
|
||||||
go_library(
|
|
||||||
name = "go_default_library",
|
|
||||||
srcs = [
|
|
||||||
"consolidations.go",
|
|
||||||
"deposits.go",
|
|
||||||
"log.go",
|
|
||||||
"withdrawals.go",
|
|
||||||
],
|
|
||||||
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/core/requests",
|
|
||||||
visibility = ["//visibility:public"],
|
|
||||||
deps = [
|
|
||||||
"//beacon-chain/core/helpers:go_default_library",
|
|
||||||
"//beacon-chain/core/validators:go_default_library",
|
|
||||||
"//beacon-chain/state:go_default_library",
|
|
||||||
"//beacon-chain/state/state-native:go_default_library",
|
|
||||||
"//config/params:go_default_library",
|
|
||||||
"//consensus-types/primitives:go_default_library",
|
|
||||||
"//crypto/bls/common:go_default_library",
|
|
||||||
"//encoding/bytesutil:go_default_library",
|
|
||||||
"//math:go_default_library",
|
|
||||||
"//monitoring/tracing/trace:go_default_library",
|
|
||||||
"//proto/engine/v1:go_default_library",
|
|
||||||
"//proto/prysm/v1alpha1:go_default_library",
|
|
||||||
"//runtime/version:go_default_library",
|
|
||||||
"//time/slots:go_default_library",
|
|
||||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
|
||||||
"@com_github_ethereum_go_ethereum//common/math:go_default_library",
|
|
||||||
"@com_github_pkg_errors//:go_default_library",
|
|
||||||
"@com_github_sirupsen_logrus//:go_default_library",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
||||||
go_test(
|
|
||||||
name = "go_default_test",
|
|
||||||
srcs = [
|
|
||||||
"consolidations_test.go",
|
|
||||||
"deposits_test.go",
|
|
||||||
"withdrawals_test.go",
|
|
||||||
],
|
|
||||||
deps = [
|
|
||||||
":go_default_library",
|
|
||||||
"//beacon-chain/core/signing:go_default_library",
|
|
||||||
"//beacon-chain/state:go_default_library",
|
|
||||||
"//beacon-chain/state/state-native:go_default_library",
|
|
||||||
"//config/params:go_default_library",
|
|
||||||
"//consensus-types/primitives:go_default_library",
|
|
||||||
"//crypto/bls:go_default_library",
|
|
||||||
"//encoding/bytesutil:go_default_library",
|
|
||||||
"//proto/engine/v1:go_default_library",
|
|
||||||
"//proto/prysm/v1alpha1:go_default_library",
|
|
||||||
"//testing/assert:go_default_library",
|
|
||||||
"//testing/require:go_default_library",
|
|
||||||
"//testing/util:go_default_library",
|
|
||||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
|
||||||
"@com_github_sirupsen_logrus//:go_default_library",
|
|
||||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
@@ -1,365 +0,0 @@
|
|||||||
package requests
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
|
||||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/crypto/bls/common"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
|
||||||
prysmMath "github.com/OffchainLabs/prysm/v7/math"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
|
||||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
|
||||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
|
||||||
"github.com/ethereum/go-ethereum/common/math"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ProcessConsolidationRequests implements the spec definition below. This method makes mutating
|
|
||||||
// calls to the beacon state.
|
|
||||||
//
|
|
||||||
// def process_consolidation_request(
|
|
||||||
// state: BeaconState,
|
|
||||||
// consolidation_request: ConsolidationRequest
|
|
||||||
// ) -> None:
|
|
||||||
// if is_valid_switch_to_compounding_request(state, consolidation_request):
|
|
||||||
// validator_pubkeys = [v.pubkey for v in state.validators]
|
|
||||||
// request_source_pubkey = consolidation_request.source_pubkey
|
|
||||||
// source_index = ValidatorIndex(validator_pubkeys.index(request_source_pubkey))
|
|
||||||
// switch_to_compounding_validator(state, source_index)
|
|
||||||
// return
|
|
||||||
//
|
|
||||||
// # Verify that source != target, so a consolidation cannot be used as an exit.
|
|
||||||
// if consolidation_request.source_pubkey == consolidation_request.target_pubkey:
|
|
||||||
// return
|
|
||||||
// # If the pending consolidations queue is full, consolidation requests are ignored
|
|
||||||
// if len(state.pending_consolidations) == PENDING_CONSOLIDATIONS_LIMIT:
|
|
||||||
// return
|
|
||||||
// # If there is too little available consolidation churn limit, consolidation requests are ignored
|
|
||||||
// if get_consolidation_churn_limit(state) <= MIN_ACTIVATION_BALANCE:
|
|
||||||
// return
|
|
||||||
//
|
|
||||||
// validator_pubkeys = [v.pubkey for v in state.validators]
|
|
||||||
// # Verify pubkeys exists
|
|
||||||
// request_source_pubkey = consolidation_request.source_pubkey
|
|
||||||
// request_target_pubkey = consolidation_request.target_pubkey
|
|
||||||
// if request_source_pubkey not in validator_pubkeys:
|
|
||||||
// return
|
|
||||||
// if request_target_pubkey not in validator_pubkeys:
|
|
||||||
// return
|
|
||||||
// source_index = ValidatorIndex(validator_pubkeys.index(request_source_pubkey))
|
|
||||||
// target_index = ValidatorIndex(validator_pubkeys.index(request_target_pubkey))
|
|
||||||
// source_validator = state.validators[source_index]
|
|
||||||
// target_validator = state.validators[target_index]
|
|
||||||
//
|
|
||||||
// # Verify source withdrawal credentials
|
|
||||||
// has_correct_credential = has_execution_withdrawal_credential(source_validator)
|
|
||||||
// is_correct_source_address = (
|
|
||||||
// source_validator.withdrawal_credentials[12:] == consolidation_request.source_address
|
|
||||||
// )
|
|
||||||
// if not (has_correct_credential and is_correct_source_address):
|
|
||||||
// return
|
|
||||||
//
|
|
||||||
// # Verify that target has compounding withdrawal credentials
|
|
||||||
// if not has_compounding_withdrawal_credential(target_validator):
|
|
||||||
// return
|
|
||||||
//
|
|
||||||
// # Verify the source and the target are active
|
|
||||||
// current_epoch = get_current_epoch(state)
|
|
||||||
// if not is_active_validator(source_validator, current_epoch):
|
|
||||||
// return
|
|
||||||
// if not is_active_validator(target_validator, current_epoch):
|
|
||||||
// return
|
|
||||||
// # Verify exits for source and target have not been initiated
|
|
||||||
// if source_validator.exit_epoch != FAR_FUTURE_EPOCH:
|
|
||||||
// return
|
|
||||||
// if target_validator.exit_epoch != FAR_FUTURE_EPOCH:
|
|
||||||
// return
|
|
||||||
//
|
|
||||||
// # Verify the source has been active long enough
|
|
||||||
// if current_epoch < source_validator.activation_epoch + SHARD_COMMITTEE_PERIOD:
|
|
||||||
// return
|
|
||||||
//
|
|
||||||
// # Verify the source has no pending withdrawals in the queue
|
|
||||||
// if get_pending_balance_to_withdraw(state, source_index) > 0:
|
|
||||||
// return
|
|
||||||
// # Initiate source validator exit and append pending consolidation
|
|
||||||
// source_validator.exit_epoch = compute_consolidation_epoch_and_update_churn(
|
|
||||||
// state, source_validator.effective_balance
|
|
||||||
// )
|
|
||||||
// source_validator.withdrawable_epoch = Epoch(
|
|
||||||
// source_validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY
|
|
||||||
// )
|
|
||||||
// state.pending_consolidations.append(PendingConsolidation(
|
|
||||||
// source_index=source_index,
|
|
||||||
// target_index=target_index
|
|
||||||
// ))
|
|
||||||
func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, reqs []*enginev1.ConsolidationRequest) error {
|
|
||||||
ctx, span := trace.StartSpan(ctx, "requests.ProcessConsolidationRequests")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
if len(reqs) == 0 || st == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
curEpoch := slots.ToEpoch(st.Slot())
|
|
||||||
ffe := params.BeaconConfig().FarFutureEpoch
|
|
||||||
minValWithdrawDelay := params.BeaconConfig().MinValidatorWithdrawabilityDelay
|
|
||||||
pcLimit := params.BeaconConfig().PendingConsolidationsLimit
|
|
||||||
|
|
||||||
for _, cr := range reqs {
|
|
||||||
if cr == nil {
|
|
||||||
return errors.New("nil consolidation request")
|
|
||||||
}
|
|
||||||
if ctx.Err() != nil {
|
|
||||||
return fmt.Errorf("cannot process consolidation requests: %w", ctx.Err())
|
|
||||||
}
|
|
||||||
|
|
||||||
if isValidSwitchToCompoundingRequest(st, cr) {
|
|
||||||
srcIdx, ok := st.ValidatorIndexByPubkey(bytesutil.ToBytes48(cr.SourcePubkey))
|
|
||||||
if !ok {
|
|
||||||
log.Error("Failed to find source validator index")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if err := switchToCompoundingValidator(st, srcIdx); err != nil {
|
|
||||||
log.WithError(err).Error("Failed to switch to compounding validator")
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
sourcePubkey := bytesutil.ToBytes48(cr.SourcePubkey)
|
|
||||||
targetPubkey := bytesutil.ToBytes48(cr.TargetPubkey)
|
|
||||||
if sourcePubkey == targetPubkey {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if npc, err := st.NumPendingConsolidations(); err != nil {
|
|
||||||
return fmt.Errorf("failed to fetch number of pending consolidations: %w", err) // This should never happen.
|
|
||||||
} else if npc >= pcLimit {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
activeBal, err := helpers.TotalActiveBalance(st)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
churnLimit := helpers.ConsolidationChurnLimit(primitives.Gwei(activeBal))
|
|
||||||
if churnLimit <= primitives.Gwei(params.BeaconConfig().MinActivationBalance) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
srcIdx, ok := st.ValidatorIndexByPubkey(sourcePubkey)
|
|
||||||
if !ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
tgtIdx, ok := st.ValidatorIndexByPubkey(targetPubkey)
|
|
||||||
if !ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
srcV, err := st.ValidatorAtIndex(srcIdx)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to fetch source validator: %w", err) // This should never happen.
|
|
||||||
}
|
|
||||||
|
|
||||||
roSrcV, err := state_native.NewValidator(srcV)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
tgtV, err := st.ValidatorAtIndexReadOnly(tgtIdx)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to fetch target validator: %w", err) // This should never happen.
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify source withdrawal credentials.
|
|
||||||
if !roSrcV.HasExecutionWithdrawalCredentials() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Confirm source_validator.withdrawal_credentials[12:] == consolidation_request.source_address.
|
|
||||||
if len(srcV.WithdrawalCredentials) != 32 || len(cr.SourceAddress) != 20 || !bytes.HasSuffix(srcV.WithdrawalCredentials, cr.SourceAddress) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Target validator must have their withdrawal credentials set appropriately.
|
|
||||||
if !tgtV.HasCompoundingWithdrawalCredentials() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Both validators must be active.
|
|
||||||
if !helpers.IsActiveValidator(srcV, curEpoch) || !helpers.IsActiveValidatorUsingTrie(tgtV, curEpoch) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Neither validator is exiting.
|
|
||||||
if srcV.ExitEpoch != ffe || tgtV.ExitEpoch() != ffe {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
e, overflow := math.SafeAdd(uint64(srcV.ActivationEpoch), uint64(params.BeaconConfig().ShardCommitteePeriod))
|
|
||||||
if overflow {
|
|
||||||
log.Error("Overflow when adding activation epoch and shard committee period")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if uint64(curEpoch) < e {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
hasBal, err := st.HasPendingBalanceToWithdraw(srcIdx)
|
|
||||||
if err != nil {
|
|
||||||
log.WithError(err).Error("Failed to fetch pending balance to withdraw")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if hasBal {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
exitEpoch, err := computeConsolidationEpochAndUpdateChurn(st, primitives.Gwei(srcV.EffectiveBalance))
|
|
||||||
if err != nil {
|
|
||||||
log.WithError(err).Error("Failed to compute consolidation epoch")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
srcV.ExitEpoch = exitEpoch
|
|
||||||
srcV.WithdrawableEpoch = exitEpoch + minValWithdrawDelay
|
|
||||||
if err := st.UpdateValidatorAtIndex(srcIdx, srcV); err != nil {
|
|
||||||
return fmt.Errorf("failed to update validator: %w", err) // This should never happen.
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := st.AppendPendingConsolidation(ð.PendingConsolidation{SourceIndex: srcIdx, TargetIndex: tgtIdx}); err != nil {
|
|
||||||
return fmt.Errorf("failed to append pending consolidation: %w", err) // This should never happen.
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func isValidSwitchToCompoundingRequest(st state.BeaconState, req *enginev1.ConsolidationRequest) bool {
|
|
||||||
if req.SourcePubkey == nil || req.TargetPubkey == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if !bytes.Equal(req.SourcePubkey, req.TargetPubkey) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
srcIdx, ok := st.ValidatorIndexByPubkey(bytesutil.ToBytes48(req.SourcePubkey))
|
|
||||||
if !ok {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
srcV, err := st.ValidatorAtIndexReadOnly(srcIdx)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
sourceAddress := req.SourceAddress
|
|
||||||
withdrawalCreds := srcV.GetWithdrawalCredentials()
|
|
||||||
if len(withdrawalCreds) != 32 || len(sourceAddress) != 20 || !bytes.HasSuffix(withdrawalCreds, sourceAddress) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if !srcV.HasETH1WithdrawalCredentials() {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
curEpoch := slots.ToEpoch(st.Slot())
|
|
||||||
if !helpers.IsActiveValidatorUsingTrie(srcV, curEpoch) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if srcV.ExitEpoch() != params.BeaconConfig().FarFutureEpoch {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func switchToCompoundingValidator(st state.BeaconState, idx primitives.ValidatorIndex) error {
|
|
||||||
v, err := st.ValidatorAtIndex(idx)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if len(v.WithdrawalCredentials) == 0 {
|
|
||||||
return errors.New("validator has no withdrawal credentials")
|
|
||||||
}
|
|
||||||
|
|
||||||
v.WithdrawalCredentials[0] = params.BeaconConfig().CompoundingWithdrawalPrefixByte
|
|
||||||
if err := st.UpdateValidatorAtIndex(idx, v); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return queueExcessActiveBalance(st, idx)
|
|
||||||
}
|
|
||||||
|
|
||||||
func queueExcessActiveBalance(st state.BeaconState, idx primitives.ValidatorIndex) error {
|
|
||||||
bal, err := st.BalanceAtIndex(idx)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if bal > params.BeaconConfig().MinActivationBalance {
|
|
||||||
if err := st.UpdateBalancesAtIndex(idx, params.BeaconConfig().MinActivationBalance); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
excessBalance := bal - params.BeaconConfig().MinActivationBalance
|
|
||||||
val, err := st.ValidatorAtIndexReadOnly(idx)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
pk := val.PublicKey()
|
|
||||||
return st.AppendPendingDeposit(ð.PendingDeposit{
|
|
||||||
PublicKey: pk[:],
|
|
||||||
WithdrawalCredentials: val.GetWithdrawalCredentials(),
|
|
||||||
Amount: excessBalance,
|
|
||||||
Signature: common.InfiniteSignature[:],
|
|
||||||
Slot: params.BeaconConfig().GenesisSlot,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func computeConsolidationEpochAndUpdateChurn(st state.BeaconState, consolidationBalance primitives.Gwei) (primitives.Epoch, error) {
|
|
||||||
earliestEpoch, err := st.EarliestConsolidationEpoch()
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
earliestConsolidationEpoch := max(earliestEpoch, helpers.ActivationExitEpoch(slots.ToEpoch(st.Slot())))
|
|
||||||
|
|
||||||
activeBal, err := helpers.TotalActiveBalance(st)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
perEpochConsolidationChurn := helpers.ConsolidationChurnLimit(primitives.Gwei(activeBal))
|
|
||||||
|
|
||||||
var consolidationBalanceToConsume primitives.Gwei
|
|
||||||
if earliestEpoch < earliestConsolidationEpoch {
|
|
||||||
consolidationBalanceToConsume = perEpochConsolidationChurn
|
|
||||||
} else {
|
|
||||||
consolidationBalanceToConsume, err = st.ConsolidationBalanceToConsume()
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if consolidationBalance > consolidationBalanceToConsume {
|
|
||||||
balanceToProcess := consolidationBalance - consolidationBalanceToConsume
|
|
||||||
additionalEpochs, err := prysmMath.Div64(uint64(balanceToProcess-1), uint64(perEpochConsolidationChurn))
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
additionalEpochs++
|
|
||||||
earliestConsolidationEpoch += primitives.Epoch(additionalEpochs)
|
|
||||||
consolidationBalanceToConsume += primitives.Gwei(additionalEpochs) * perEpochConsolidationChurn
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := st.SetConsolidationBalanceToConsume(consolidationBalanceToConsume - consolidationBalance); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
if err := st.SetEarliestConsolidationEpoch(earliestConsolidationEpoch); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return earliestConsolidationEpoch, nil
|
|
||||||
}
|
|
||||||
@@ -1,316 +0,0 @@
|
|||||||
package requests_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/requests"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
|
||||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
|
||||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
|
||||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func createValidatorsWithTotalActiveBalance(totalBal primitives.Gwei) []*eth.Validator {
|
|
||||||
num := totalBal / primitives.Gwei(params.BeaconConfig().MinActivationBalance)
|
|
||||||
vals := make([]*eth.Validator, num)
|
|
||||||
for i := range vals {
|
|
||||||
wd := make([]byte, 32)
|
|
||||||
wd[0] = params.BeaconConfig().CompoundingWithdrawalPrefixByte
|
|
||||||
wd[31] = byte(i)
|
|
||||||
|
|
||||||
vals[i] = ð.Validator{
|
|
||||||
ActivationEpoch: primitives.Epoch(0),
|
|
||||||
EffectiveBalance: params.BeaconConfig().MinActivationBalance,
|
|
||||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
|
||||||
PublicKey: fmt.Appendf(nil, "val_%d", i),
|
|
||||||
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
|
|
||||||
WithdrawalCredentials: wd,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if totalBal%primitives.Gwei(params.BeaconConfig().MinActivationBalance) != 0 {
|
|
||||||
vals = append(vals, ð.Validator{
|
|
||||||
ActivationEpoch: primitives.Epoch(0),
|
|
||||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
|
||||||
EffectiveBalance: uint64(totalBal) % params.BeaconConfig().MinActivationBalance,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return vals
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestProcessConsolidationRequests(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
state state.BeaconState
|
|
||||||
reqs []*enginev1.ConsolidationRequest
|
|
||||||
validate func(*testing.T, state.BeaconState)
|
|
||||||
wantErr bool
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "nil request",
|
|
||||||
state: func() state.BeaconState {
|
|
||||||
st := ð.BeaconStateElectra{}
|
|
||||||
s, err := state_native.InitializeFromProtoElectra(st)
|
|
||||||
require.NoError(t, err)
|
|
||||||
return s
|
|
||||||
}(),
|
|
||||||
reqs: []*enginev1.ConsolidationRequest{nil},
|
|
||||||
validate: func(t *testing.T, st state.BeaconState) {
|
|
||||||
require.DeepEqual(t, st, st)
|
|
||||||
},
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "one valid request",
|
|
||||||
state: func() state.BeaconState {
|
|
||||||
st := ð.BeaconStateElectra{
|
|
||||||
Slot: params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().ShardCommitteePeriod)),
|
|
||||||
Validators: createValidatorsWithTotalActiveBalance(32000000000000000), // 32M ETH
|
|
||||||
}
|
|
||||||
// Validator scenario setup. See comments in reqs section.
|
|
||||||
st.Validators[3].WithdrawalCredentials = bytesutil.Bytes32(0)
|
|
||||||
st.Validators[8].WithdrawalCredentials = bytesutil.Bytes32(1)
|
|
||||||
st.Validators[9].ActivationEpoch = params.BeaconConfig().FarFutureEpoch
|
|
||||||
st.Validators[12].ActivationEpoch = params.BeaconConfig().FarFutureEpoch
|
|
||||||
st.Validators[13].ExitEpoch = 10
|
|
||||||
st.Validators[16].ExitEpoch = 10
|
|
||||||
st.PendingPartialWithdrawals = []*eth.PendingPartialWithdrawal{
|
|
||||||
{
|
|
||||||
Index: 17,
|
|
||||||
Amount: 100,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
s, err := state_native.InitializeFromProtoElectra(st)
|
|
||||||
require.NoError(t, err)
|
|
||||||
return s
|
|
||||||
}(),
|
|
||||||
reqs: []*enginev1.ConsolidationRequest{
|
|
||||||
// Source doesn't have withdrawal credentials.
|
|
||||||
{
|
|
||||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(1)),
|
|
||||||
SourcePubkey: []byte("val_3"),
|
|
||||||
TargetPubkey: []byte("val_4"),
|
|
||||||
},
|
|
||||||
// Source withdrawal credentials don't match the consolidation address.
|
|
||||||
{
|
|
||||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(0)), // Should be 5
|
|
||||||
SourcePubkey: []byte("val_5"),
|
|
||||||
TargetPubkey: []byte("val_6"),
|
|
||||||
},
|
|
||||||
// Target does not have their withdrawal credentials set appropriately. (Using eth1 address prefix)
|
|
||||||
{
|
|
||||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(7)),
|
|
||||||
SourcePubkey: []byte("val_7"),
|
|
||||||
TargetPubkey: []byte("val_8"),
|
|
||||||
},
|
|
||||||
// Source is inactive.
|
|
||||||
{
|
|
||||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(9)),
|
|
||||||
SourcePubkey: []byte("val_9"),
|
|
||||||
TargetPubkey: []byte("val_10"),
|
|
||||||
},
|
|
||||||
// Target is inactive.
|
|
||||||
{
|
|
||||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(11)),
|
|
||||||
SourcePubkey: []byte("val_11"),
|
|
||||||
TargetPubkey: []byte("val_12"),
|
|
||||||
},
|
|
||||||
// Source is exiting.
|
|
||||||
{
|
|
||||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(13)),
|
|
||||||
SourcePubkey: []byte("val_13"),
|
|
||||||
TargetPubkey: []byte("val_14"),
|
|
||||||
},
|
|
||||||
// Target is exiting.
|
|
||||||
{
|
|
||||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(15)),
|
|
||||||
SourcePubkey: []byte("val_15"),
|
|
||||||
TargetPubkey: []byte("val_16"),
|
|
||||||
},
|
|
||||||
// Source doesn't exist
|
|
||||||
{
|
|
||||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(0)),
|
|
||||||
SourcePubkey: []byte("INVALID"),
|
|
||||||
TargetPubkey: []byte("val_0"),
|
|
||||||
},
|
|
||||||
// Target doesn't exist
|
|
||||||
{
|
|
||||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(0)),
|
|
||||||
SourcePubkey: []byte("val_0"),
|
|
||||||
TargetPubkey: []byte("INVALID"),
|
|
||||||
},
|
|
||||||
// Source == target
|
|
||||||
{
|
|
||||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(0)),
|
|
||||||
SourcePubkey: []byte("val_0"),
|
|
||||||
TargetPubkey: []byte("val_0"),
|
|
||||||
},
|
|
||||||
// Has pending partial withdrawal
|
|
||||||
{
|
|
||||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(0)),
|
|
||||||
SourcePubkey: []byte("val_17"),
|
|
||||||
TargetPubkey: []byte("val_1"),
|
|
||||||
},
|
|
||||||
// Valid consolidation request. This should be last to ensure invalid requests do
|
|
||||||
// not end the processing early.
|
|
||||||
{
|
|
||||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(1)),
|
|
||||||
SourcePubkey: []byte("val_1"),
|
|
||||||
TargetPubkey: []byte("val_2"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
validate: func(t *testing.T, st state.BeaconState) {
|
|
||||||
// Verify a pending consolidation is created.
|
|
||||||
numPC, err := st.NumPendingConsolidations()
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, uint64(1), numPC)
|
|
||||||
pcs, err := st.PendingConsolidations()
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, primitives.ValidatorIndex(1), pcs[0].SourceIndex)
|
|
||||||
require.Equal(t, primitives.ValidatorIndex(2), pcs[0].TargetIndex)
|
|
||||||
|
|
||||||
// Verify the source validator is exiting.
|
|
||||||
src, err := st.ValidatorAtIndex(1)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NotEqual(t, params.BeaconConfig().FarFutureEpoch, src.ExitEpoch, "source validator exit epoch not updated")
|
|
||||||
require.Equal(t, params.BeaconConfig().MinValidatorWithdrawabilityDelay, src.WithdrawableEpoch-src.ExitEpoch, "source validator withdrawable epoch not set correctly")
|
|
||||||
},
|
|
||||||
wantErr: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "pending consolidations limit reached",
|
|
||||||
state: func() state.BeaconState {
|
|
||||||
st := ð.BeaconStateElectra{
|
|
||||||
Validators: createValidatorsWithTotalActiveBalance(32000000000000000), // 32M ETH
|
|
||||||
PendingConsolidations: make([]*eth.PendingConsolidation, params.BeaconConfig().PendingConsolidationsLimit),
|
|
||||||
}
|
|
||||||
s, err := state_native.InitializeFromProtoElectra(st)
|
|
||||||
require.NoError(t, err)
|
|
||||||
return s
|
|
||||||
}(),
|
|
||||||
reqs: []*enginev1.ConsolidationRequest{
|
|
||||||
{
|
|
||||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(1)),
|
|
||||||
SourcePubkey: []byte("val_1"),
|
|
||||||
TargetPubkey: []byte("val_2"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
validate: func(t *testing.T, st state.BeaconState) {
|
|
||||||
// Verify no pending consolidation is created.
|
|
||||||
numPC, err := st.NumPendingConsolidations()
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, params.BeaconConfig().PendingConsolidationsLimit, numPC)
|
|
||||||
|
|
||||||
// Verify the source validator is not exiting.
|
|
||||||
src, err := st.ValidatorAtIndex(1)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, params.BeaconConfig().FarFutureEpoch, src.ExitEpoch, "source validator exit epoch should not be updated")
|
|
||||||
require.Equal(t, params.BeaconConfig().FarFutureEpoch, src.WithdrawableEpoch, "source validator withdrawable epoch should not be updated")
|
|
||||||
},
|
|
||||||
wantErr: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "pending consolidations limit reached during processing",
|
|
||||||
state: func() state.BeaconState {
|
|
||||||
st := ð.BeaconStateElectra{
|
|
||||||
Slot: params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().ShardCommitteePeriod)),
|
|
||||||
Validators: createValidatorsWithTotalActiveBalance(32000000000000000), // 32M ETH
|
|
||||||
PendingConsolidations: make([]*eth.PendingConsolidation, params.BeaconConfig().PendingConsolidationsLimit-1),
|
|
||||||
}
|
|
||||||
s, err := state_native.InitializeFromProtoElectra(st)
|
|
||||||
require.NoError(t, err)
|
|
||||||
return s
|
|
||||||
}(),
|
|
||||||
reqs: []*enginev1.ConsolidationRequest{
|
|
||||||
{
|
|
||||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(1)),
|
|
||||||
SourcePubkey: []byte("val_1"),
|
|
||||||
TargetPubkey: []byte("val_2"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(3)),
|
|
||||||
SourcePubkey: []byte("val_3"),
|
|
||||||
TargetPubkey: []byte("val_4"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
validate: func(t *testing.T, st state.BeaconState) {
|
|
||||||
// Verify a pending consolidation is created.
|
|
||||||
numPC, err := st.NumPendingConsolidations()
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, params.BeaconConfig().PendingConsolidationsLimit, numPC)
|
|
||||||
|
|
||||||
// The first consolidation was appended.
|
|
||||||
pcs, err := st.PendingConsolidations()
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, primitives.ValidatorIndex(1), pcs[params.BeaconConfig().PendingConsolidationsLimit-1].SourceIndex)
|
|
||||||
require.Equal(t, primitives.ValidatorIndex(2), pcs[params.BeaconConfig().PendingConsolidationsLimit-1].TargetIndex)
|
|
||||||
|
|
||||||
// Verify the second source validator is not exiting.
|
|
||||||
src, err := st.ValidatorAtIndex(3)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, params.BeaconConfig().FarFutureEpoch, src.ExitEpoch, "source validator exit epoch should not be updated")
|
|
||||||
require.Equal(t, params.BeaconConfig().FarFutureEpoch, src.WithdrawableEpoch, "source validator withdrawable epoch should not be updated")
|
|
||||||
},
|
|
||||||
wantErr: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "pending consolidations limit reached and compounded consolidation after",
|
|
||||||
state: func() state.BeaconState {
|
|
||||||
st := ð.BeaconStateElectra{
|
|
||||||
Slot: params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().ShardCommitteePeriod)),
|
|
||||||
Validators: createValidatorsWithTotalActiveBalance(32000000000000000), // 32M ETH
|
|
||||||
PendingConsolidations: make([]*eth.PendingConsolidation, params.BeaconConfig().PendingConsolidationsLimit),
|
|
||||||
}
|
|
||||||
// To allow compounding consolidation requests.
|
|
||||||
st.Validators[3].WithdrawalCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
|
||||||
s, err := state_native.InitializeFromProtoElectra(st)
|
|
||||||
require.NoError(t, err)
|
|
||||||
return s
|
|
||||||
}(),
|
|
||||||
reqs: []*enginev1.ConsolidationRequest{
|
|
||||||
{
|
|
||||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(1)),
|
|
||||||
SourcePubkey: []byte("val_1"),
|
|
||||||
TargetPubkey: []byte("val_2"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(3)),
|
|
||||||
SourcePubkey: []byte("val_3"),
|
|
||||||
TargetPubkey: []byte("val_3"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
validate: func(t *testing.T, st state.BeaconState) {
|
|
||||||
// Verify a pending consolidation is created.
|
|
||||||
numPC, err := st.NumPendingConsolidations()
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, params.BeaconConfig().PendingConsolidationsLimit, numPC)
|
|
||||||
|
|
||||||
// Verify that the last consolidation was included
|
|
||||||
src, err := st.ValidatorAtIndex(3)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, params.BeaconConfig().CompoundingWithdrawalPrefixByte, src.WithdrawalCredentials[0], "source validator was not compounded")
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
err := requests.ProcessConsolidationRequests(context.TODO(), tt.state, tt.reqs)
|
|
||||||
if (err != nil) != tt.wantErr {
|
|
||||||
t.Errorf("ProcessWithdrawalRequests() error = %v, wantErr %v", err, tt.wantErr)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if !tt.wantErr {
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
if tt.validate != nil {
|
|
||||||
tt.validate(t, tt.state)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,73 +0,0 @@
|
|||||||
package requests
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
|
||||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
|
||||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ProcessDepositRequests processes execution layer deposits requests.
|
|
||||||
func ProcessDepositRequests(ctx context.Context, beaconState state.BeaconState, reqs []*enginev1.DepositRequest) (state.BeaconState, error) {
|
|
||||||
_, span := trace.StartSpan(ctx, "requests.ProcessDepositRequests")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
if len(reqs) == 0 {
|
|
||||||
return beaconState, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var err error
|
|
||||||
for _, req := range reqs {
|
|
||||||
beaconState, err = processDepositRequest(beaconState, req)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "could not apply deposit request")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return beaconState, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// processDepositRequest processes the specific deposit request
|
|
||||||
//
|
|
||||||
// def process_deposit_request(state: BeaconState, deposit_request: DepositRequest) -> None:
|
|
||||||
//
|
|
||||||
// # Set deposit request start index
|
|
||||||
// if state.deposit_requests_start_index == UNSET_DEPOSIT_REQUESTS_START_INDEX:
|
|
||||||
// state.deposit_requests_start_index = deposit_request.index
|
|
||||||
//
|
|
||||||
// # Create pending deposit
|
|
||||||
// state.pending_deposits.append(PendingDeposit(
|
|
||||||
// pubkey=deposit_request.pubkey,
|
|
||||||
// withdrawal_credentials=deposit_request.withdrawal_credentials,
|
|
||||||
// amount=deposit_request.amount,
|
|
||||||
// signature=deposit_request.signature,
|
|
||||||
// slot=state.slot,
|
|
||||||
// ))
|
|
||||||
func processDepositRequest(beaconState state.BeaconState, req *enginev1.DepositRequest) (state.BeaconState, error) {
|
|
||||||
requestsStartIndex, err := beaconState.DepositRequestsStartIndex()
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "could not get deposit requests start index")
|
|
||||||
}
|
|
||||||
if req == nil {
|
|
||||||
return nil, errors.New("nil deposit request")
|
|
||||||
}
|
|
||||||
if requestsStartIndex == params.BeaconConfig().UnsetDepositRequestsStartIndex {
|
|
||||||
if err := beaconState.SetDepositRequestsStartIndex(req.Index); err != nil {
|
|
||||||
return nil, errors.Wrap(err, "could not set deposit requests start index")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err := beaconState.AppendPendingDeposit(ðpb.PendingDeposit{
|
|
||||||
PublicKey: bytesutil.SafeCopyBytes(req.Pubkey),
|
|
||||||
WithdrawalCredentials: bytesutil.SafeCopyBytes(req.WithdrawalCredentials),
|
|
||||||
Amount: req.Amount,
|
|
||||||
Signature: bytesutil.SafeCopyBytes(req.Signature),
|
|
||||||
Slot: beaconState.Slot(),
|
|
||||||
}); err != nil {
|
|
||||||
return nil, errors.Wrap(err, "could not append deposit request")
|
|
||||||
}
|
|
||||||
return beaconState, nil
|
|
||||||
}
|
|
||||||
@@ -1,70 +0,0 @@
|
|||||||
package requests_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/requests"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/crypto/bls"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
|
||||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
|
||||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestProcessDepositRequests(t *testing.T) {
|
|
||||||
st, _ := util.DeterministicGenesisStateElectra(t, 1)
|
|
||||||
sk, err := bls.RandKey()
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, st.SetDepositRequestsStartIndex(1))
|
|
||||||
|
|
||||||
t.Run("empty requests continues", func(t *testing.T) {
|
|
||||||
newSt, err := requests.ProcessDepositRequests(t.Context(), st, []*enginev1.DepositRequest{})
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.DeepEqual(t, newSt, st)
|
|
||||||
})
|
|
||||||
t.Run("nil request errors", func(t *testing.T) {
|
|
||||||
_, err = requests.ProcessDepositRequests(t.Context(), st, []*enginev1.DepositRequest{nil})
|
|
||||||
require.ErrorContains(t, "nil deposit request", err)
|
|
||||||
})
|
|
||||||
|
|
||||||
vals := st.Validators()
|
|
||||||
vals[0].PublicKey = sk.PublicKey().Marshal()
|
|
||||||
vals[0].WithdrawalCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
|
||||||
require.NoError(t, st.SetValidators(vals))
|
|
||||||
bals := st.Balances()
|
|
||||||
bals[0] = params.BeaconConfig().MinActivationBalance + 2000
|
|
||||||
require.NoError(t, st.SetBalances(bals))
|
|
||||||
require.NoError(t, st.SetPendingDeposits(make([]*eth.PendingDeposit, 0))) // reset pbd as the deterministic state populates this already
|
|
||||||
withdrawalCred := make([]byte, 32)
|
|
||||||
withdrawalCred[0] = params.BeaconConfig().CompoundingWithdrawalPrefixByte
|
|
||||||
depositMessage := ð.DepositMessage{
|
|
||||||
PublicKey: sk.PublicKey().Marshal(),
|
|
||||||
Amount: 1000,
|
|
||||||
WithdrawalCredentials: withdrawalCred,
|
|
||||||
}
|
|
||||||
domain, err := signing.ComputeDomain(params.BeaconConfig().DomainDeposit, nil, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
sr, err := signing.ComputeSigningRoot(depositMessage, domain)
|
|
||||||
require.NoError(t, err)
|
|
||||||
sig := sk.Sign(sr[:])
|
|
||||||
reqs := []*enginev1.DepositRequest{
|
|
||||||
{
|
|
||||||
Pubkey: depositMessage.PublicKey,
|
|
||||||
Index: 0,
|
|
||||||
WithdrawalCredentials: depositMessage.WithdrawalCredentials,
|
|
||||||
Amount: depositMessage.Amount,
|
|
||||||
Signature: sig.Marshal(),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
st, err = requests.ProcessDepositRequests(t.Context(), st, reqs)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
pbd, err := st.PendingDeposits()
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, 1, len(pbd))
|
|
||||||
require.Equal(t, uint64(1000), pbd[0].Amount)
|
|
||||||
require.DeepEqual(t, bytesutil.SafeCopyBytes(reqs[0].Pubkey), pbd[0].PublicKey)
|
|
||||||
}
|
|
||||||
@@ -1,9 +0,0 @@
|
|||||||
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
|
||||||
// This file is created and regenerated automatically. Anything added here might get removed.
|
|
||||||
package requests
|
|
||||||
|
|
||||||
import "github.com/sirupsen/logrus"
|
|
||||||
|
|
||||||
// The prefix for logs from this package will be the text after the last slash in the package path.
|
|
||||||
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
|
||||||
var log = logrus.WithField("package", "beacon-chain/core/requests")
|
|
||||||
@@ -27,7 +27,6 @@ go_library(
|
|||||||
"//beacon-chain/core/execution:go_default_library",
|
"//beacon-chain/core/execution:go_default_library",
|
||||||
"//beacon-chain/core/fulu:go_default_library",
|
"//beacon-chain/core/fulu:go_default_library",
|
||||||
"//beacon-chain/core/helpers:go_default_library",
|
"//beacon-chain/core/helpers:go_default_library",
|
||||||
"//beacon-chain/core/requests:go_default_library",
|
|
||||||
"//beacon-chain/core/time:go_default_library",
|
"//beacon-chain/core/time:go_default_library",
|
||||||
"//beacon-chain/core/transition/interop:go_default_library",
|
"//beacon-chain/core/transition/interop:go_default_library",
|
||||||
"//beacon-chain/core/validators:go_default_library",
|
"//beacon-chain/core/validators:go_default_library",
|
||||||
@@ -72,7 +71,6 @@ go_test(
|
|||||||
"state_test.go",
|
"state_test.go",
|
||||||
"trailing_slot_state_cache_test.go",
|
"trailing_slot_state_cache_test.go",
|
||||||
"transition_fuzz_test.go",
|
"transition_fuzz_test.go",
|
||||||
"transition_gloas_test.go",
|
|
||||||
"transition_no_verify_sig_test.go",
|
"transition_no_verify_sig_test.go",
|
||||||
"transition_test.go",
|
"transition_test.go",
|
||||||
],
|
],
|
||||||
@@ -108,7 +106,6 @@ go_test(
|
|||||||
"//time/slots:go_default_library",
|
"//time/slots:go_default_library",
|
||||||
"@com_github_google_gofuzz//:go_default_library",
|
"@com_github_google_gofuzz//:go_default_library",
|
||||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||||
"@com_github_stretchr_testify//require:go_default_library",
|
|
||||||
"@org_golang_google_protobuf//proto:go_default_library",
|
"@org_golang_google_protobuf//proto:go_default_library",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -6,7 +6,6 @@ import (
|
|||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/electra"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/electra"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||||
coreRequests "github.com/OffchainLabs/prysm/v7/beacon-chain/core/requests"
|
|
||||||
v "github.com/OffchainLabs/prysm/v7/beacon-chain/core/validators"
|
v "github.com/OffchainLabs/prysm/v7/beacon-chain/core/validators"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||||
@@ -98,7 +97,7 @@ func electraOperations(ctx context.Context, st state.BeaconState, block interfac
|
|||||||
return nil, electra.NewExecReqError("nil deposit request")
|
return nil, electra.NewExecReqError("nil deposit request")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
st, err = coreRequests.ProcessDepositRequests(ctx, st, requests.Deposits)
|
st, err = electra.ProcessDepositRequests(ctx, st, requests.Deposits)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, electra.NewExecReqError(errors.Wrap(err, "could not process deposit requests").Error())
|
return nil, electra.NewExecReqError(errors.Wrap(err, "could not process deposit requests").Error())
|
||||||
}
|
}
|
||||||
@@ -108,7 +107,7 @@ func electraOperations(ctx context.Context, st state.BeaconState, block interfac
|
|||||||
return nil, electra.NewExecReqError("nil withdrawal request")
|
return nil, electra.NewExecReqError("nil withdrawal request")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
st, err = coreRequests.ProcessWithdrawalRequests(ctx, st, requests.Withdrawals)
|
st, err = electra.ProcessWithdrawalRequests(ctx, st, requests.Withdrawals)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, electra.NewExecReqError(errors.Wrap(err, "could not process withdrawal requests").Error())
|
return nil, electra.NewExecReqError(errors.Wrap(err, "could not process withdrawal requests").Error())
|
||||||
}
|
}
|
||||||
@@ -117,7 +116,7 @@ func electraOperations(ctx context.Context, st state.BeaconState, block interfac
|
|||||||
return nil, electra.NewExecReqError("nil consolidation request")
|
return nil, electra.NewExecReqError("nil consolidation request")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err := coreRequests.ProcessConsolidationRequests(ctx, st, requests.Consolidations); err != nil {
|
if err := electra.ProcessConsolidationRequests(ctx, st, requests.Consolidations); err != nil {
|
||||||
return nil, electra.NewExecReqError(errors.Wrap(err, "could not process consolidation requests").Error())
|
return nil, electra.NewExecReqError(errors.Wrap(err, "could not process consolidation requests").Error())
|
||||||
}
|
}
|
||||||
return st, nil
|
return st, nil
|
||||||
|
|||||||
@@ -142,18 +142,6 @@ func ProcessSlot(ctx context.Context, state state.BeaconState) (state.BeaconStat
|
|||||||
); err != nil {
|
); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Spec v1.6.1 (pseudocode):
|
|
||||||
// # [New in Gloas:EIP7732]
|
|
||||||
// # Unset the next payload availability
|
|
||||||
// state.execution_payload_availability[(state.slot + 1) % SLOTS_PER_HISTORICAL_ROOT] = 0b0
|
|
||||||
if state.Version() >= version.Gloas {
|
|
||||||
index := uint64((state.Slot() + 1) % params.BeaconConfig().SlotsPerHistoricalRoot)
|
|
||||||
if err := state.UpdateExecutionPayloadAvailabilityAtIndex(index, 0x0); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return state, nil
|
return state, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,141 +0,0 @@
|
|||||||
package transition
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
|
||||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
|
||||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
|
||||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestProcessSlot_GloasClearsNextPayloadAvailability(t *testing.T) {
|
|
||||||
slot := primitives.Slot(10)
|
|
||||||
cfg := params.BeaconConfig()
|
|
||||||
nextIdx := uint64((slot + 1) % cfg.SlotsPerHistoricalRoot)
|
|
||||||
byteIdx := nextIdx / 8
|
|
||||||
bitMask := byte(1 << (nextIdx % 8))
|
|
||||||
availability := bytes.Repeat([]byte{0xFF}, int(cfg.SlotsPerHistoricalRoot/8))
|
|
||||||
st := newGloasState(t, slot, availability)
|
|
||||||
|
|
||||||
_, err := ProcessSlot(context.Background(), st)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
post := st.ToProto().(*ethpb.BeaconStateGloas)
|
|
||||||
require.Equal(t, byte(0xFF)&^bitMask, post.ExecutionPayloadAvailability[byteIdx])
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestProcessSlot_GloasClearsNextPayloadAvailability_Wrap(t *testing.T) {
|
|
||||||
cfg := params.BeaconConfig()
|
|
||||||
slot := primitives.Slot(cfg.SlotsPerHistoricalRoot - 1)
|
|
||||||
availability := bytes.Repeat([]byte{0xFF}, int(cfg.SlotsPerHistoricalRoot/8))
|
|
||||||
st := newGloasState(t, slot, availability)
|
|
||||||
|
|
||||||
_, err := ProcessSlot(context.Background(), st)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
post := st.ToProto().(*ethpb.BeaconStateGloas)
|
|
||||||
require.Equal(t, byte(0xFE), post.ExecutionPayloadAvailability[0])
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestProcessSlot_GloasAvailabilityUpdateError(t *testing.T) {
|
|
||||||
slot := primitives.Slot(7)
|
|
||||||
availability := make([]byte, 1)
|
|
||||||
st := newGloasState(t, slot, availability)
|
|
||||||
|
|
||||||
_, err := ProcessSlot(context.Background(), st)
|
|
||||||
cfg := params.BeaconConfig()
|
|
||||||
idx := uint64((slot + 1) % cfg.SlotsPerHistoricalRoot)
|
|
||||||
byteIdx := idx / 8
|
|
||||||
require.EqualError(t, err, fmt.Sprintf(
|
|
||||||
"bit index %d (byte index %d) out of range for execution payload availability length %d",
|
|
||||||
idx, byteIdx, len(availability),
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
func newGloasState(t *testing.T, slot primitives.Slot, availability []byte) state.BeaconState {
|
|
||||||
t.Helper()
|
|
||||||
|
|
||||||
cfg := params.BeaconConfig()
|
|
||||||
protoState := ðpb.BeaconStateGloas{
|
|
||||||
Slot: slot,
|
|
||||||
LatestBlockHeader: testBeaconBlockHeader(),
|
|
||||||
BlockRoots: make([][]byte, cfg.SlotsPerHistoricalRoot),
|
|
||||||
StateRoots: make([][]byte, cfg.SlotsPerHistoricalRoot),
|
|
||||||
RandaoMixes: make([][]byte, fieldparams.RandaoMixesLength),
|
|
||||||
ExecutionPayloadAvailability: availability,
|
|
||||||
BuilderPendingPayments: make([]*ethpb.BuilderPendingPayment, int(cfg.SlotsPerEpoch*2)),
|
|
||||||
LatestExecutionPayloadBid: ðpb.ExecutionPayloadBid{
|
|
||||||
ParentBlockHash: make([]byte, 32),
|
|
||||||
ParentBlockRoot: make([]byte, 32),
|
|
||||||
BlockHash: make([]byte, 32),
|
|
||||||
PrevRandao: make([]byte, 32),
|
|
||||||
FeeRecipient: make([]byte, 20),
|
|
||||||
BlobKzgCommitmentsRoot: make([]byte, 32),
|
|
||||||
},
|
|
||||||
Eth1Data: ðpb.Eth1Data{
|
|
||||||
DepositRoot: make([]byte, 32),
|
|
||||||
BlockHash: make([]byte, 32),
|
|
||||||
},
|
|
||||||
PreviousEpochParticipation: []byte{},
|
|
||||||
CurrentEpochParticipation: []byte{},
|
|
||||||
JustificationBits: []byte{0},
|
|
||||||
PreviousJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)},
|
|
||||||
CurrentJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)},
|
|
||||||
FinalizedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)},
|
|
||||||
CurrentSyncCommittee: ðpb.SyncCommittee{},
|
|
||||||
NextSyncCommittee: ðpb.SyncCommittee{},
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := range protoState.BlockRoots {
|
|
||||||
protoState.BlockRoots[i] = make([]byte, 32)
|
|
||||||
}
|
|
||||||
for i := range protoState.StateRoots {
|
|
||||||
protoState.StateRoots[i] = make([]byte, 32)
|
|
||||||
}
|
|
||||||
for i := range protoState.RandaoMixes {
|
|
||||||
protoState.RandaoMixes[i] = make([]byte, 32)
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := range protoState.BuilderPendingPayments {
|
|
||||||
protoState.BuilderPendingPayments[i] = ðpb.BuilderPendingPayment{
|
|
||||||
Withdrawal: ðpb.BuilderPendingWithdrawal{
|
|
||||||
FeeRecipient: make([]byte, 20),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pubkeys := make([][]byte, cfg.SyncCommitteeSize)
|
|
||||||
for i := range pubkeys {
|
|
||||||
pubkeys[i] = make([]byte, fieldparams.BLSPubkeyLength)
|
|
||||||
}
|
|
||||||
aggPubkey := make([]byte, fieldparams.BLSPubkeyLength)
|
|
||||||
protoState.CurrentSyncCommittee = ðpb.SyncCommittee{
|
|
||||||
Pubkeys: pubkeys,
|
|
||||||
AggregatePubkey: aggPubkey,
|
|
||||||
}
|
|
||||||
protoState.NextSyncCommittee = ðpb.SyncCommittee{
|
|
||||||
Pubkeys: pubkeys,
|
|
||||||
AggregatePubkey: aggPubkey,
|
|
||||||
}
|
|
||||||
|
|
||||||
st, err := state_native.InitializeFromProtoGloas(protoState)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, version.Gloas, st.Version())
|
|
||||||
return st
|
|
||||||
}
|
|
||||||
|
|
||||||
func testBeaconBlockHeader() *ethpb.BeaconBlockHeader {
|
|
||||||
return ðpb.BeaconBlockHeader{
|
|
||||||
ParentRoot: make([]byte, 32),
|
|
||||||
StateRoot: make([]byte, 32),
|
|
||||||
BodyRoot: make([]byte, 32),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -512,11 +512,6 @@ func (dcs *DataColumnStorage) Get(root [fieldparams.RootLength]byte, indices []u
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "data column sidecars file path open")
|
return nil, errors.Wrap(err, "data column sidecars file path open")
|
||||||
}
|
}
|
||||||
defer func() {
|
|
||||||
if closeErr := file.Close(); closeErr != nil {
|
|
||||||
log.WithError(closeErr).WithField("file", filePath).Error("Error closing file during Get")
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Read file metadata.
|
// Read file metadata.
|
||||||
metadata, err := dcs.metadata(file)
|
metadata, err := dcs.metadata(file)
|
||||||
|
|||||||
@@ -67,6 +67,7 @@ func getSubscriptionStatusFromDB(t *testing.T, db *Store) bool {
|
|||||||
return subscribed
|
return subscribed
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
func TestUpdateCustodyInfo(t *testing.T) {
|
func TestUpdateCustodyInfo(t *testing.T) {
|
||||||
ctx := t.Context()
|
ctx := t.Context()
|
||||||
|
|
||||||
|
|||||||
@@ -22,10 +22,6 @@ var ErrNotFoundFeeRecipient = errors.Wrap(ErrNotFound, "fee recipient")
|
|||||||
// ErrNotFoundMetadataSeqNum is a not found error specifically for the metadata sequence number getter
|
// ErrNotFoundMetadataSeqNum is a not found error specifically for the metadata sequence number getter
|
||||||
var ErrNotFoundMetadataSeqNum = errors.Wrap(ErrNotFound, "metadata sequence number")
|
var ErrNotFoundMetadataSeqNum = errors.Wrap(ErrNotFound, "metadata sequence number")
|
||||||
|
|
||||||
// ErrStateDiffIncompatible is returned when state-diff feature is enabled
|
|
||||||
// but the database was created without state-diff support.
|
|
||||||
var ErrStateDiffIncompatible = errors.New("state-diff feature enabled but database was created without state-diff support")
|
|
||||||
|
|
||||||
var errEmptyBlockSlice = errors.New("[]blocks.ROBlock is empty")
|
var errEmptyBlockSlice = errors.New("[]blocks.ROBlock is empty")
|
||||||
var errIncorrectBlockParent = errors.New("unexpected missing or forked blocks in a []ROBlock")
|
var errIncorrectBlockParent = errors.New("unexpected missing or forked blocks in a []ROBlock")
|
||||||
var errFinalizedChildNotFound = errors.New("unable to find finalized root descending from backfill batch")
|
var errFinalizedChildNotFound = errors.New("unable to find finalized root descending from backfill batch")
|
||||||
|
|||||||
@@ -42,10 +42,6 @@ func (s *Store) SaveGenesisData(ctx context.Context, genesisState state.BeaconSt
|
|||||||
if err := s.SaveGenesisBlockRoot(ctx, genesisBlkRoot); err != nil {
|
if err := s.SaveGenesisBlockRoot(ctx, genesisBlkRoot); err != nil {
|
||||||
return errors.Wrap(err, "could not save genesis block root")
|
return errors.Wrap(err, "could not save genesis block root")
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := s.initializeStateDiff(0, genesisState); err != nil {
|
|
||||||
return errors.Wrap(err, "failed to initialize state diff for genesis")
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -203,47 +203,17 @@ func NewKVStore(ctx context.Context, dirPath string, opts ...KVStoreOption) (*St
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := kv.startStateDiff(ctx); err != nil {
|
if features.Get().EnableStateDiff {
|
||||||
if errors.Is(err, ErrStateDiffIncompatible) {
|
sdCache, err := newStateDiffCache(kv)
|
||||||
return kv, err
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
return nil, err
|
kv.stateDiffCache = sdCache
|
||||||
}
|
}
|
||||||
|
|
||||||
return kv, nil
|
return kv, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (kv *Store) startStateDiff(ctx context.Context) error {
|
|
||||||
if !features.Get().EnableStateDiff {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
// Check if offset already exists (existing state-diff database).
|
|
||||||
hasOffset, err := kv.hasStateDiffOffset()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if hasOffset {
|
|
||||||
// Existing state-diff database - restarts not yet supported.
|
|
||||||
return errors.New("restarting with existing state-diff database not yet supported")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if this is a new database (no head block).
|
|
||||||
headBlock, err := kv.HeadBlock(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "could not get head block")
|
|
||||||
}
|
|
||||||
|
|
||||||
if headBlock == nil {
|
|
||||||
// New database - will be initialized later during checkpoint/genesis sync.
|
|
||||||
// stateDiffCache stays nil until SaveOrigin or SaveGenesisData initializes it.
|
|
||||||
log.Info("State-diff enabled: will be initialized during checkpoint or genesis sync")
|
|
||||||
} else {
|
|
||||||
// Existing database without state-diff - return store with error for caller to handle.
|
|
||||||
return ErrStateDiffIncompatible
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ClearDB removes the previously stored database in the data directory.
|
// ClearDB removes the previously stored database in the data directory.
|
||||||
func (s *Store) ClearDB() error {
|
func (s *Store) ClearDB() error {
|
||||||
if err := s.Close(); err != nil {
|
if err := s.Close(); err != nil {
|
||||||
|
|||||||
@@ -1053,10 +1053,6 @@ func (s *Store) getStateUsingStateDiff(ctx context.Context, blockRoot [32]byte)
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if uint64(slot) < s.getOffset() {
|
|
||||||
return nil, ErrSlotBeforeOffset
|
|
||||||
}
|
|
||||||
|
|
||||||
st, err := s.stateByDiff(ctx, slot)
|
st, err := s.stateByDiff(ctx, slot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -1074,10 +1070,6 @@ func (s *Store) hasStateUsingStateDiff(ctx context.Context, blockRoot [32]byte)
|
|||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if uint64(slot) < s.getOffset() {
|
|
||||||
return false, ErrSlotBeforeOffset
|
|
||||||
}
|
|
||||||
|
|
||||||
stateLvl := computeLevel(s.getOffset(), slot)
|
stateLvl := computeLevel(s.getOffset(), slot)
|
||||||
return stateLvl != -1, nil
|
return stateLvl != -1, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ const (
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
// SlotInDiffTree returns whether the given slot is a saving point in the diff tree.
|
// SlotInDiffTree returns whether the given slot is a saving point in the diff tree.
|
||||||
// If it is, it also returns the offset and level in the tree.
|
// It it is, it also returns the offset and level in the tree.
|
||||||
func (s *Store) SlotInDiffTree(slot primitives.Slot) (uint64, int, error) {
|
func (s *Store) SlotInDiffTree(slot primitives.Slot) (uint64, int, error) {
|
||||||
offset := s.getOffset()
|
offset := s.getOffset()
|
||||||
if uint64(slot) < offset {
|
if uint64(slot) < offset {
|
||||||
|
|||||||
@@ -25,7 +25,7 @@ func newStateDiffCache(s *Store) (*stateDiffCache, error) {
|
|||||||
return bbolt.ErrBucketNotFound
|
return bbolt.ErrBucketNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
offsetBytes := bucket.Get(offsetKey)
|
offsetBytes := bucket.Get([]byte("offset"))
|
||||||
if offsetBytes == nil {
|
if offsetBytes == nil {
|
||||||
return errors.New("state diff cache: offset not found")
|
return errors.New("state diff cache: offset not found")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -9,19 +9,17 @@ import (
|
|||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||||
statenative "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
statenative "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||||
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
|
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
|
||||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/consensus-types/hdiff"
|
"github.com/OffchainLabs/prysm/v7/consensus-types/hdiff"
|
||||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||||
"github.com/OffchainLabs/prysm/v7/math"
|
"github.com/OffchainLabs/prysm/v7/math"
|
||||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||||
pkgerrors "github.com/pkg/errors"
|
|
||||||
"go.etcd.io/bbolt"
|
"go.etcd.io/bbolt"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
offsetKey = []byte("offset")
|
offsetKey = []byte("offset")
|
||||||
ErrSlotBeforeOffset = errors.New("slot is before state-diff root offset")
|
ErrSlotBeforeOffset = errors.New("slot is before root offset")
|
||||||
)
|
)
|
||||||
|
|
||||||
func makeKeyForStateDiffTree(level int, slot uint64) []byte {
|
func makeKeyForStateDiffTree(level int, slot uint64) []byte {
|
||||||
@@ -75,9 +73,6 @@ func (s *Store) getAnchorState(offset uint64, lvl int, slot primitives.Slot) (an
|
|||||||
|
|
||||||
// computeLevel computes the level in the diff tree. Returns -1 in case slot should not be in tree.
|
// computeLevel computes the level in the diff tree. Returns -1 in case slot should not be in tree.
|
||||||
func computeLevel(offset uint64, slot primitives.Slot) int {
|
func computeLevel(offset uint64, slot primitives.Slot) int {
|
||||||
if uint64(slot) < offset {
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
rel := uint64(slot) - offset
|
rel := uint64(slot) - offset
|
||||||
for i, exp := range flags.Get().StateDiffExponents {
|
for i, exp := range flags.Get().StateDiffExponents {
|
||||||
if exp < 2 || exp >= 64 {
|
if exp < 2 || exp >= 64 {
|
||||||
@@ -124,66 +119,6 @@ func (s *Store) getOffset() uint64 {
|
|||||||
return s.stateDiffCache.getOffset()
|
return s.stateDiffCache.getOffset()
|
||||||
}
|
}
|
||||||
|
|
||||||
// hasStateDiffOffset checks if the state-diff offset has been set in the database.
|
|
||||||
// This is used to detect if an existing database has state-diff enabled.
|
|
||||||
func (s *Store) hasStateDiffOffset() (bool, error) {
|
|
||||||
var hasOffset bool
|
|
||||||
err := s.db.View(func(tx *bbolt.Tx) error {
|
|
||||||
bucket := tx.Bucket(stateDiffBucket)
|
|
||||||
if bucket == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
hasOffset = bucket.Get(offsetKey) != nil
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
return hasOffset, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// initializeStateDiff sets up the state-diff schema for a new database.
|
|
||||||
// This should be called during checkpoint sync or genesis sync.
|
|
||||||
func (s *Store) initializeStateDiff(slot primitives.Slot, initialState state.ReadOnlyBeaconState) error {
|
|
||||||
// Return early if the feature is not set
|
|
||||||
if !features.Get().EnableStateDiff {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
// Only reinitialize if the offset is different
|
|
||||||
if s.stateDiffCache != nil {
|
|
||||||
if s.stateDiffCache.getOffset() == uint64(slot) {
|
|
||||||
log.WithField("offset", slot).Warning("Ignoring state diff cache reinitialization")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Write offset directly to the database (without using cache which doesn't exist yet).
|
|
||||||
err := s.db.Update(func(tx *bbolt.Tx) error {
|
|
||||||
bucket := tx.Bucket(stateDiffBucket)
|
|
||||||
if bucket == nil {
|
|
||||||
return bbolt.ErrBucketNotFound
|
|
||||||
}
|
|
||||||
|
|
||||||
offsetBytes := make([]byte, 8)
|
|
||||||
binary.LittleEndian.PutUint64(offsetBytes, uint64(slot))
|
|
||||||
return bucket.Put(offsetKey, offsetBytes)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return pkgerrors.Wrap(err, "failed to set offset")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create the state diff cache (this will read the offset from the database).
|
|
||||||
sdCache, err := newStateDiffCache(s)
|
|
||||||
if err != nil {
|
|
||||||
return pkgerrors.Wrap(err, "failed to create state diff cache")
|
|
||||||
}
|
|
||||||
s.stateDiffCache = sdCache
|
|
||||||
|
|
||||||
// Save the initial state as a full snapshot.
|
|
||||||
if err := s.saveFullSnapshot(initialState); err != nil {
|
|
||||||
return pkgerrors.Wrap(err, "failed to save initial snapshot")
|
|
||||||
}
|
|
||||||
|
|
||||||
log.WithField("offset", slot).Info("Initialized state-diff cache")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func keyForSnapshot(v int) ([]byte, error) {
|
func keyForSnapshot(v int) ([]byte, error) {
|
||||||
switch v {
|
switch v {
|
||||||
case version.Fulu:
|
case version.Fulu:
|
||||||
|
|||||||
@@ -43,12 +43,8 @@ func TestStateDiff_ComputeLevel(t *testing.T) {
|
|||||||
|
|
||||||
offset := db.getOffset()
|
offset := db.getOffset()
|
||||||
|
|
||||||
// should be -1. slot < offset
|
|
||||||
lvl := computeLevel(10, primitives.Slot(9))
|
|
||||||
require.Equal(t, -1, lvl)
|
|
||||||
|
|
||||||
// 2 ** 21
|
// 2 ** 21
|
||||||
lvl = computeLevel(offset, primitives.Slot(math.PowerOf2(21)))
|
lvl := computeLevel(offset, primitives.Slot(math.PowerOf2(21)))
|
||||||
require.Equal(t, 0, lvl)
|
require.Equal(t, 0, lvl)
|
||||||
|
|
||||||
// 2 ** 21 * 3
|
// 2 ** 21 * 3
|
||||||
|
|||||||
@@ -1395,23 +1395,6 @@ func TestStore_CanSaveRetrieveStateUsingStateDiff(t *testing.T) {
|
|||||||
require.IsNil(t, readSt)
|
require.IsNil(t, readSt)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("slot before offset", func(t *testing.T) {
|
|
||||||
db := setupDB(t)
|
|
||||||
setDefaultStateDiffExponents()
|
|
||||||
|
|
||||||
err := setOffsetInDB(db, 10)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
r := bytesutil.ToBytes32([]byte{'A'})
|
|
||||||
ss := ðpb.StateSummary{Slot: 9, Root: r[:]}
|
|
||||||
err = db.SaveStateSummary(t.Context(), ss)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
st, err := db.getStateUsingStateDiff(t.Context(), r)
|
|
||||||
require.ErrorIs(t, err, ErrSlotBeforeOffset)
|
|
||||||
require.IsNil(t, st)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("Full state snapshot", func(t *testing.T) {
|
t.Run("Full state snapshot", func(t *testing.T) {
|
||||||
t.Run("using state summary", func(t *testing.T) {
|
t.Run("using state summary", func(t *testing.T) {
|
||||||
for v := range version.All() {
|
for v := range version.All() {
|
||||||
@@ -1644,21 +1627,4 @@ func TestStore_HasStateUsingStateDiff(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("slot before offset", func(t *testing.T) {
|
|
||||||
db := setupDB(t)
|
|
||||||
setDefaultStateDiffExponents()
|
|
||||||
|
|
||||||
err := setOffsetInDB(db, 10)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
r := bytesutil.ToBytes32([]byte{'B'})
|
|
||||||
ss := ðpb.StateSummary{Slot: 0, Root: r[:]}
|
|
||||||
err = db.SaveStateSummary(t.Context(), ss)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
hasState, err := db.hasStateUsingStateDiff(t.Context(), r)
|
|
||||||
require.ErrorIs(t, err, ErrSlotBeforeOffset)
|
|
||||||
require.Equal(t, false, hasState)
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -110,8 +110,6 @@ func (s *Store) SaveOrigin(ctx context.Context, serState, serBlock []byte) error
|
|||||||
if err = s.SaveFinalizedCheckpoint(ctx, chkpt); err != nil {
|
if err = s.SaveFinalizedCheckpoint(ctx, chkpt); err != nil {
|
||||||
return errors.Wrap(err, "save finalized checkpoint")
|
return errors.Wrap(err, "save finalized checkpoint")
|
||||||
}
|
}
|
||||||
if err := s.initializeStateDiff(state.Slot(), state); err != nil {
|
|
||||||
return errors.Wrap(err, "failed to initialize state diff")
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -64,6 +64,7 @@ go_library(
|
|||||||
"//monitoring/tracing:go_default_library",
|
"//monitoring/tracing:go_default_library",
|
||||||
"//runtime:go_default_library",
|
"//runtime:go_default_library",
|
||||||
"//runtime/prereqs:go_default_library",
|
"//runtime/prereqs:go_default_library",
|
||||||
|
"//runtime/version:go_default_library",
|
||||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||||
"@com_github_pkg_errors//:go_default_library",
|
"@com_github_pkg_errors//:go_default_library",
|
||||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||||
|
|||||||
@@ -66,6 +66,7 @@ import (
|
|||||||
"github.com/OffchainLabs/prysm/v7/monitoring/prometheus"
|
"github.com/OffchainLabs/prysm/v7/monitoring/prometheus"
|
||||||
"github.com/OffchainLabs/prysm/v7/runtime"
|
"github.com/OffchainLabs/prysm/v7/runtime"
|
||||||
"github.com/OffchainLabs/prysm/v7/runtime/prereqs"
|
"github.com/OffchainLabs/prysm/v7/runtime/prereqs"
|
||||||
|
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
@@ -468,6 +469,10 @@ func (b *BeaconNode) OperationFeed() event.SubscriberSender {
|
|||||||
func (b *BeaconNode) Start() {
|
func (b *BeaconNode) Start() {
|
||||||
b.lock.Lock()
|
b.lock.Lock()
|
||||||
|
|
||||||
|
log.WithFields(logrus.Fields{
|
||||||
|
"version": version.Version(),
|
||||||
|
}).Info("Starting beacon node")
|
||||||
|
|
||||||
b.services.StartAll()
|
b.services.StartAll()
|
||||||
|
|
||||||
stop := b.stop
|
stop := b.stop
|
||||||
@@ -535,12 +540,7 @@ func openDB(ctx context.Context, dbPath string, clearer *dbClearer) (*kv.Store,
|
|||||||
log.WithField("databasePath", dbPath).Info("Checking DB")
|
log.WithField("databasePath", dbPath).Info("Checking DB")
|
||||||
|
|
||||||
d, err := kv.NewKVStore(ctx, dbPath)
|
d, err := kv.NewKVStore(ctx, dbPath)
|
||||||
if errors.Is(err, kv.ErrStateDiffIncompatible) {
|
if err != nil {
|
||||||
log.WithError(err).Warn("Disabling state-diff feature")
|
|
||||||
cfg := features.Get()
|
|
||||||
cfg.EnableStateDiff = false
|
|
||||||
features.Init(cfg)
|
|
||||||
} else if err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "could not create database at %s", dbPath)
|
return nil, errors.Wrapf(err, "could not create database at %s", dbPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -68,6 +68,7 @@ func TestNodeClose_OK(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestNodeStart_Ok(t *testing.T) {
|
func TestNodeStart_Ok(t *testing.T) {
|
||||||
|
hook := logTest.NewGlobal()
|
||||||
app := cli.App{}
|
app := cli.App{}
|
||||||
tmp := fmt.Sprintf("%s/datadirtest2", t.TempDir())
|
tmp := fmt.Sprintf("%s/datadirtest2", t.TempDir())
|
||||||
set := flag.NewFlagSet("test", 0)
|
set := flag.NewFlagSet("test", 0)
|
||||||
@@ -96,9 +97,11 @@ func TestNodeStart_Ok(t *testing.T) {
|
|||||||
}()
|
}()
|
||||||
time.Sleep(3 * time.Second)
|
time.Sleep(3 * time.Second)
|
||||||
node.Close()
|
node.Close()
|
||||||
|
require.LogsContain(t, hook, "Starting beacon node")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNodeStart_SyncChecker(t *testing.T) {
|
func TestNodeStart_SyncChecker(t *testing.T) {
|
||||||
|
hook := logTest.NewGlobal()
|
||||||
app := cli.App{}
|
app := cli.App{}
|
||||||
tmp := fmt.Sprintf("%s/datadirtest2", t.TempDir())
|
tmp := fmt.Sprintf("%s/datadirtest2", t.TempDir())
|
||||||
set := flag.NewFlagSet("test", 0)
|
set := flag.NewFlagSet("test", 0)
|
||||||
@@ -124,6 +127,7 @@ func TestNodeStart_SyncChecker(t *testing.T) {
|
|||||||
time.Sleep(3 * time.Second)
|
time.Sleep(3 * time.Second)
|
||||||
assert.NotNil(t, node.syncChecker.Svc)
|
assert.NotNil(t, node.syncChecker.Svc)
|
||||||
node.Close()
|
node.Close()
|
||||||
|
require.LogsContain(t, hook, "Starting beacon node")
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestClearDB tests clearing the database
|
// TestClearDB tests clearing the database
|
||||||
|
|||||||
@@ -575,7 +575,7 @@ func (s *Service) beaconEndpoints(
|
|||||||
name: namespace + ".PublishBlockV2",
|
name: namespace + ".PublishBlockV2",
|
||||||
middleware: []middleware.Middleware{
|
middleware: []middleware.Middleware{
|
||||||
middleware.ContentTypeHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
middleware.ContentTypeHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||||
middleware.AcceptEncodingHeaderHandler(),
|
middleware.AcceptEncodingHeaderHandler(),
|
||||||
},
|
},
|
||||||
handler: server.PublishBlockV2,
|
handler: server.PublishBlockV2,
|
||||||
@@ -586,7 +586,7 @@ func (s *Service) beaconEndpoints(
|
|||||||
name: namespace + ".PublishBlindedBlockV2",
|
name: namespace + ".PublishBlindedBlockV2",
|
||||||
middleware: []middleware.Middleware{
|
middleware: []middleware.Middleware{
|
||||||
middleware.ContentTypeHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
middleware.ContentTypeHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||||
middleware.AcceptEncodingHeaderHandler(),
|
middleware.AcceptEncodingHeaderHandler(),
|
||||||
},
|
},
|
||||||
handler: server.PublishBlindedBlockV2,
|
handler: server.PublishBlindedBlockV2,
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/OffchainLabs/prysm/v7/api"
|
"github.com/OffchainLabs/prysm/v7/api"
|
||||||
@@ -25,6 +26,7 @@ import (
|
|||||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||||
|
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||||
"github.com/OffchainLabs/prysm/v7/network/httputil"
|
"github.com/OffchainLabs/prysm/v7/network/httputil"
|
||||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||||
@@ -1042,27 +1044,112 @@ func (s *Server) GetBlockRoot(w http.ResponseWriter, r *http.Request) {
|
|||||||
ctx, span := trace.StartSpan(r.Context(), "beacon.GetBlockRoot")
|
ctx, span := trace.StartSpan(r.Context(), "beacon.GetBlockRoot")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
|
var err error
|
||||||
|
var root []byte
|
||||||
blockID := r.PathValue("block_id")
|
blockID := r.PathValue("block_id")
|
||||||
if blockID == "" {
|
if blockID == "" {
|
||||||
httputil.HandleError(w, "block_id is required in URL params", http.StatusBadRequest)
|
httputil.HandleError(w, "block_id is required in URL params", http.StatusBadRequest)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
root, err := s.Blocker.BlockRoot(ctx, []byte(blockID))
|
switch blockID {
|
||||||
if !shared.WriteBlockRootFetchError(w, err) {
|
case "head":
|
||||||
return
|
root, err = s.ChainInfoFetcher.HeadRoot(ctx)
|
||||||
|
if err != nil {
|
||||||
|
httputil.HandleError(w, "Could not retrieve head root: "+err.Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if root == nil {
|
||||||
|
httputil.HandleError(w, "No head root was found", http.StatusNotFound)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
case "finalized":
|
||||||
|
finalized := s.ChainInfoFetcher.FinalizedCheckpt()
|
||||||
|
root = finalized.Root
|
||||||
|
case "genesis":
|
||||||
|
blk, err := s.BeaconDB.GenesisBlock(ctx)
|
||||||
|
if err != nil {
|
||||||
|
httputil.HandleError(w, "Could not retrieve genesis block: "+err.Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := blocks.BeaconBlockIsNil(blk); err != nil {
|
||||||
|
httputil.HandleError(w, "Could not find genesis block: "+err.Error(), http.StatusNotFound)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
blkRoot, err := blk.Block().HashTreeRoot()
|
||||||
|
if err != nil {
|
||||||
|
httputil.HandleError(w, "Could not hash genesis block: "+err.Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
root = blkRoot[:]
|
||||||
|
default:
|
||||||
|
isHex := strings.HasPrefix(blockID, "0x")
|
||||||
|
if isHex {
|
||||||
|
blockIDBytes, err := hexutil.Decode(blockID)
|
||||||
|
if err != nil {
|
||||||
|
httputil.HandleError(w, "Could not decode block ID into bytes: "+err.Error(), http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(blockIDBytes) != fieldparams.RootLength {
|
||||||
|
httputil.HandleError(w, fmt.Sprintf("Block ID has length %d instead of %d", len(blockIDBytes), fieldparams.RootLength), http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
blockID32 := bytesutil.ToBytes32(blockIDBytes)
|
||||||
|
blk, err := s.BeaconDB.Block(ctx, blockID32)
|
||||||
|
if err != nil {
|
||||||
|
httputil.HandleError(w, fmt.Sprintf("Could not retrieve block for block root %#x: %v", blockID, err), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := blocks.BeaconBlockIsNil(blk); err != nil {
|
||||||
|
httputil.HandleError(w, "Could not find block: "+err.Error(), http.StatusNotFound)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
root = blockIDBytes
|
||||||
|
} else {
|
||||||
|
slot, err := strconv.ParseUint(blockID, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
httputil.HandleError(w, "Could not parse block ID: "+err.Error(), http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
hasRoots, roots, err := s.BeaconDB.BlockRootsBySlot(ctx, primitives.Slot(slot))
|
||||||
|
if err != nil {
|
||||||
|
httputil.HandleError(w, fmt.Sprintf("Could not retrieve blocks for slot %d: %v", slot, err), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if !hasRoots {
|
||||||
|
httputil.HandleError(w, "Could not find any blocks with given slot", http.StatusNotFound)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
root = roots[0][:]
|
||||||
|
if len(roots) == 1 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
for _, blockRoot := range roots {
|
||||||
|
canonical, err := s.ChainInfoFetcher.IsCanonical(ctx, blockRoot)
|
||||||
|
if err != nil {
|
||||||
|
httputil.HandleError(w, "Could not determine if block root is canonical: "+err.Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if canonical {
|
||||||
|
root = blockRoot[:]
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
isOptimistic, err := s.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
b32Root := bytesutil.ToBytes32(root)
|
||||||
|
isOptimistic, err := s.OptimisticModeFetcher.IsOptimisticForRoot(ctx, b32Root)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
httputil.HandleError(w, "Could not check if block is optimistic: "+err.Error(), http.StatusInternalServerError)
|
httputil.HandleError(w, "Could not check if block is optimistic: "+err.Error(), http.StatusInternalServerError)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
response := &structs.BlockRootResponse{
|
response := &structs.BlockRootResponse{
|
||||||
Data: &structs.BlockRoot{
|
Data: &structs.BlockRoot{
|
||||||
Root: hexutil.Encode(root[:]),
|
Root: hexutil.Encode(root),
|
||||||
},
|
},
|
||||||
ExecutionOptimistic: isOptimistic,
|
ExecutionOptimistic: isOptimistic,
|
||||||
Finalized: s.FinalizationFetcher.IsFinalized(ctx, root),
|
Finalized: s.FinalizationFetcher.IsFinalized(ctx, b32Root),
|
||||||
}
|
}
|
||||||
httputil.WriteJson(w, response)
|
httputil.WriteJson(w, response)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -26,8 +26,8 @@ import (
|
|||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/voluntaryexits/mock"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/voluntaryexits/mock"
|
||||||
p2pMock "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
|
p2pMock "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/core"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/core"
|
||||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
|
||||||
mockSync "github.com/OffchainLabs/prysm/v7/beacon-chain/sync/initial-sync/testing"
|
mockSync "github.com/OffchainLabs/prysm/v7/beacon-chain/sync/initial-sync/testing"
|
||||||
|
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||||
"github.com/OffchainLabs/prysm/v7/crypto/bls"
|
"github.com/OffchainLabs/prysm/v7/crypto/bls"
|
||||||
|
|||||||
@@ -50,14 +50,6 @@ import (
|
|||||||
"go.uber.org/mock/gomock"
|
"go.uber.org/mock/gomock"
|
||||||
)
|
)
|
||||||
|
|
||||||
// fillGloasBlockTestData populates a Gloas block with non-zero test values for the
|
|
||||||
// Gloas-specific fields: SignedExecutionPayloadBid and PayloadAttestations.
|
|
||||||
func fillGloasBlockTestData(b *eth.SignedBeaconBlockGloas, numPayloadAttestations int) {
|
|
||||||
slot := b.Block.Slot
|
|
||||||
b.Block.Body.SignedExecutionPayloadBid = util.GenerateTestSignedExecutionPayloadBid(slot)
|
|
||||||
b.Block.Body.PayloadAttestations = util.GenerateTestPayloadAttestations(numPayloadAttestations, slot)
|
|
||||||
}
|
|
||||||
|
|
||||||
func fillDBTestBlocks(ctx context.Context, t *testing.T, beaconDB db.Database) (*eth.SignedBeaconBlock, []*eth.BeaconBlockContainer) {
|
func fillDBTestBlocks(ctx context.Context, t *testing.T, beaconDB db.Database) (*eth.SignedBeaconBlock, []*eth.BeaconBlockContainer) {
|
||||||
parentRoot := [32]byte{1, 2, 3}
|
parentRoot := [32]byte{1, 2, 3}
|
||||||
genBlk := util.NewBeaconBlock()
|
genBlk := util.NewBeaconBlock()
|
||||||
@@ -343,50 +335,6 @@ func TestGetBlockV2(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.DeepEqual(t, blk, b)
|
assert.DeepEqual(t, blk, b)
|
||||||
})
|
})
|
||||||
t.Run("gloas", func(t *testing.T) {
|
|
||||||
b := util.NewBeaconBlockGloas()
|
|
||||||
b.Block.Slot = 123
|
|
||||||
fillGloasBlockTestData(b, 2)
|
|
||||||
sb, err := blocks.NewSignedBeaconBlock(b)
|
|
||||||
require.NoError(t, err)
|
|
||||||
mockBlockFetcher := &testutil.MockBlocker{BlockToReturn: sb}
|
|
||||||
mockChainService := &chainMock.ChainService{
|
|
||||||
FinalizedRoots: map[[32]byte]bool{},
|
|
||||||
}
|
|
||||||
s := &Server{
|
|
||||||
OptimisticModeFetcher: mockChainService,
|
|
||||||
FinalizationFetcher: mockChainService,
|
|
||||||
Blocker: mockBlockFetcher,
|
|
||||||
}
|
|
||||||
|
|
||||||
request := httptest.NewRequest(http.MethodGet, "http://foo.example/eth/v2/beacon/blocks/{block_id}", nil)
|
|
||||||
request.SetPathValue("block_id", "head")
|
|
||||||
writer := httptest.NewRecorder()
|
|
||||||
writer.Body = &bytes.Buffer{}
|
|
||||||
|
|
||||||
s.GetBlockV2(writer, request)
|
|
||||||
require.Equal(t, http.StatusOK, writer.Code)
|
|
||||||
resp := &structs.GetBlockV2Response{}
|
|
||||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
|
||||||
assert.Equal(t, version.String(version.Gloas), resp.Version)
|
|
||||||
sbb := &structs.SignedBeaconBlockGloas{Message: &structs.BeaconBlockGloas{}}
|
|
||||||
require.NoError(t, json.Unmarshal(resp.Data.Message, sbb.Message))
|
|
||||||
sbb.Signature = resp.Data.Signature
|
|
||||||
blk, err := sbb.ToConsensus()
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.DeepEqual(t, blk, b)
|
|
||||||
|
|
||||||
// Verify Gloas-specific fields are correctly serialized/deserialized
|
|
||||||
require.NotNil(t, blk.Block.Body.SignedExecutionPayloadBid)
|
|
||||||
assert.Equal(t, primitives.Slot(123), blk.Block.Body.SignedExecutionPayloadBid.Message.Slot)
|
|
||||||
assert.Equal(t, primitives.BuilderIndex(1), blk.Block.Body.SignedExecutionPayloadBid.Message.BuilderIndex)
|
|
||||||
require.Equal(t, 2, len(blk.Block.Body.PayloadAttestations))
|
|
||||||
for _, att := range blk.Block.Body.PayloadAttestations {
|
|
||||||
assert.Equal(t, primitives.Slot(123), att.Data.Slot)
|
|
||||||
assert.Equal(t, true, att.Data.PayloadPresent)
|
|
||||||
assert.Equal(t, true, att.Data.BlobDataAvailable)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
t.Run("execution optimistic", func(t *testing.T) {
|
t.Run("execution optimistic", func(t *testing.T) {
|
||||||
b := util.NewBeaconBlockBellatrix()
|
b := util.NewBeaconBlockBellatrix()
|
||||||
sb, err := blocks.NewSignedBeaconBlock(b)
|
sb, err := blocks.NewSignedBeaconBlock(b)
|
||||||
@@ -626,37 +574,6 @@ func TestGetBlockSSZV2(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.DeepEqual(t, sszExpected, writer.Body.Bytes())
|
assert.DeepEqual(t, sszExpected, writer.Body.Bytes())
|
||||||
})
|
})
|
||||||
t.Run("gloas", func(t *testing.T) {
|
|
||||||
b := util.NewBeaconBlockGloas()
|
|
||||||
b.Block.Slot = 123
|
|
||||||
fillGloasBlockTestData(b, 2)
|
|
||||||
sb, err := blocks.NewSignedBeaconBlock(b)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
s := &Server{
|
|
||||||
Blocker: &testutil.MockBlocker{BlockToReturn: sb},
|
|
||||||
}
|
|
||||||
|
|
||||||
request := httptest.NewRequest(http.MethodGet, "http://foo.example/eth/v2/beacon/blocks/{block_id}", nil)
|
|
||||||
request.SetPathValue("block_id", "head")
|
|
||||||
request.Header.Set("Accept", api.OctetStreamMediaType)
|
|
||||||
writer := httptest.NewRecorder()
|
|
||||||
writer.Body = &bytes.Buffer{}
|
|
||||||
|
|
||||||
s.GetBlockV2(writer, request)
|
|
||||||
require.Equal(t, http.StatusOK, writer.Code)
|
|
||||||
assert.Equal(t, version.String(version.Gloas), writer.Header().Get(api.VersionHeader))
|
|
||||||
sszExpected, err := b.MarshalSSZ()
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.DeepEqual(t, sszExpected, writer.Body.Bytes())
|
|
||||||
|
|
||||||
// Verify SSZ round-trip preserves Gloas-specific fields
|
|
||||||
decoded := ð.SignedBeaconBlockGloas{}
|
|
||||||
require.NoError(t, decoded.UnmarshalSSZ(writer.Body.Bytes()))
|
|
||||||
require.NotNil(t, decoded.Block.Body.SignedExecutionPayloadBid)
|
|
||||||
assert.Equal(t, primitives.Slot(123), decoded.Block.Body.SignedExecutionPayloadBid.Message.Slot)
|
|
||||||
require.Equal(t, 2, len(decoded.Block.Body.PayloadAttestations))
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGetBlockAttestationsV2(t *testing.T) {
|
func TestGetBlockAttestationsV2(t *testing.T) {
|
||||||
@@ -2592,10 +2509,6 @@ func TestServer_GetBlockRoot(t *testing.T) {
|
|||||||
HeadFetcher: mockChainFetcher,
|
HeadFetcher: mockChainFetcher,
|
||||||
OptimisticModeFetcher: mockChainFetcher,
|
OptimisticModeFetcher: mockChainFetcher,
|
||||||
FinalizationFetcher: mockChainFetcher,
|
FinalizationFetcher: mockChainFetcher,
|
||||||
Blocker: &lookup.BeaconDbBlocker{
|
|
||||||
BeaconDB: beaconDB,
|
|
||||||
ChainInfoFetcher: mockChainFetcher,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
root, err := genBlk.Block.HashTreeRoot()
|
root, err := genBlk.Block.HashTreeRoot()
|
||||||
@@ -2611,7 +2524,7 @@ func TestServer_GetBlockRoot(t *testing.T) {
|
|||||||
{
|
{
|
||||||
name: "bad formatting",
|
name: "bad formatting",
|
||||||
blockID: map[string]string{"block_id": "3bad0"},
|
blockID: map[string]string{"block_id": "3bad0"},
|
||||||
wantErr: "Invalid block ID",
|
wantErr: "Could not parse block ID",
|
||||||
wantCode: http.StatusBadRequest,
|
wantCode: http.StatusBadRequest,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -2659,7 +2572,7 @@ func TestServer_GetBlockRoot(t *testing.T) {
|
|||||||
{
|
{
|
||||||
name: "non-existent root",
|
name: "non-existent root",
|
||||||
blockID: map[string]string{"block_id": hexutil.Encode(bytesutil.PadTo([]byte("hi there"), 32))},
|
blockID: map[string]string{"block_id": hexutil.Encode(bytesutil.PadTo([]byte("hi there"), 32))},
|
||||||
wantErr: "Block not found",
|
wantErr: "Could not find block",
|
||||||
wantCode: http.StatusNotFound,
|
wantCode: http.StatusNotFound,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -2672,7 +2585,7 @@ func TestServer_GetBlockRoot(t *testing.T) {
|
|||||||
{
|
{
|
||||||
name: "no block",
|
name: "no block",
|
||||||
blockID: map[string]string{"block_id": "105"},
|
blockID: map[string]string{"block_id": "105"},
|
||||||
wantErr: "Block not found",
|
wantErr: "Could not find any blocks with given slot",
|
||||||
wantCode: http.StatusNotFound,
|
wantCode: http.StatusNotFound,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -2720,10 +2633,6 @@ func TestServer_GetBlockRoot(t *testing.T) {
|
|||||||
HeadFetcher: mockChainFetcher,
|
HeadFetcher: mockChainFetcher,
|
||||||
OptimisticModeFetcher: mockChainFetcher,
|
OptimisticModeFetcher: mockChainFetcher,
|
||||||
FinalizationFetcher: mockChainFetcher,
|
FinalizationFetcher: mockChainFetcher,
|
||||||
Blocker: &lookup.BeaconDbBlocker{
|
|
||||||
BeaconDB: beaconDB,
|
|
||||||
ChainInfoFetcher: mockChainFetcher,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||||
@@ -2759,10 +2668,6 @@ func TestServer_GetBlockRoot(t *testing.T) {
|
|||||||
HeadFetcher: mockChainFetcher,
|
HeadFetcher: mockChainFetcher,
|
||||||
OptimisticModeFetcher: mockChainFetcher,
|
OptimisticModeFetcher: mockChainFetcher,
|
||||||
FinalizationFetcher: mockChainFetcher,
|
FinalizationFetcher: mockChainFetcher,
|
||||||
Blocker: &lookup.BeaconDbBlocker{
|
|
||||||
BeaconDB: beaconDB,
|
|
||||||
ChainInfoFetcher: mockChainFetcher,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
t.Run("true", func(t *testing.T) {
|
t.Run("true", func(t *testing.T) {
|
||||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||||
|
|||||||
@@ -7,7 +7,6 @@ go_library(
|
|||||||
visibility = ["//visibility:public"],
|
visibility = ["//visibility:public"],
|
||||||
deps = [
|
deps = [
|
||||||
"//api/server/structs:go_default_library",
|
"//api/server/structs:go_default_library",
|
||||||
"//config/fieldparams:go_default_library",
|
|
||||||
"//config/params:go_default_library",
|
"//config/params:go_default_library",
|
||||||
"//monitoring/tracing/trace:go_default_library",
|
"//monitoring/tracing/trace:go_default_library",
|
||||||
"//network/httputil:go_default_library",
|
"//network/httputil:go_default_library",
|
||||||
|
|||||||
@@ -9,7 +9,6 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/OffchainLabs/prysm/v7/api/server/structs"
|
"github.com/OffchainLabs/prysm/v7/api/server/structs"
|
||||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||||
"github.com/OffchainLabs/prysm/v7/network/httputil"
|
"github.com/OffchainLabs/prysm/v7/network/httputil"
|
||||||
@@ -182,16 +181,6 @@ func prepareConfigSpec() (map[string]any, error) {
|
|||||||
data[tag] = convertValueForJSON(val, tag)
|
data[tag] = convertValueForJSON(val, tag)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add Fulu preset values. These are compile-time constants from fieldparams,
|
|
||||||
// not runtime configs, but are required by the /eth/v1/config/spec API.
|
|
||||||
data["NUMBER_OF_COLUMNS"] = convertValueForJSON(reflect.ValueOf(uint64(fieldparams.NumberOfColumns)), "NUMBER_OF_COLUMNS")
|
|
||||||
data["CELLS_PER_EXT_BLOB"] = convertValueForJSON(reflect.ValueOf(uint64(fieldparams.NumberOfColumns)), "CELLS_PER_EXT_BLOB")
|
|
||||||
data["FIELD_ELEMENTS_PER_CELL"] = convertValueForJSON(reflect.ValueOf(uint64(fieldparams.CellsPerBlob)), "FIELD_ELEMENTS_PER_CELL")
|
|
||||||
data["FIELD_ELEMENTS_PER_EXT_BLOB"] = convertValueForJSON(reflect.ValueOf(config.FieldElementsPerBlob*2), "FIELD_ELEMENTS_PER_EXT_BLOB")
|
|
||||||
data["KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH"] = convertValueForJSON(reflect.ValueOf(uint64(4)), "KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH")
|
|
||||||
// UPDATE_TIMEOUT is derived from SLOTS_PER_EPOCH * EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
|
||||||
data["UPDATE_TIMEOUT"] = convertValueForJSON(reflect.ValueOf(uint64(config.SlotsPerEpoch)*uint64(config.EpochsPerSyncCommitteePeriod)), "UPDATE_TIMEOUT")
|
|
||||||
|
|
||||||
return data, nil
|
return data, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -83,7 +83,6 @@ func TestGetSpec(t *testing.T) {
|
|||||||
config.ElectraForkEpoch = 107
|
config.ElectraForkEpoch = 107
|
||||||
config.FuluForkVersion = []byte("FuluForkVersion")
|
config.FuluForkVersion = []byte("FuluForkVersion")
|
||||||
config.FuluForkEpoch = 109
|
config.FuluForkEpoch = 109
|
||||||
config.GloasForkEpoch = 110
|
|
||||||
config.BLSWithdrawalPrefixByte = byte('b')
|
config.BLSWithdrawalPrefixByte = byte('b')
|
||||||
config.ETH1AddressWithdrawalPrefixByte = byte('c')
|
config.ETH1AddressWithdrawalPrefixByte = byte('c')
|
||||||
config.GenesisDelay = 24
|
config.GenesisDelay = 24
|
||||||
@@ -133,12 +132,8 @@ func TestGetSpec(t *testing.T) {
|
|||||||
config.MinSyncCommitteeParticipants = 71
|
config.MinSyncCommitteeParticipants = 71
|
||||||
config.ProposerReorgCutoffBPS = primitives.BP(121)
|
config.ProposerReorgCutoffBPS = primitives.BP(121)
|
||||||
config.AttestationDueBPS = primitives.BP(122)
|
config.AttestationDueBPS = primitives.BP(122)
|
||||||
config.AggregateDueBPS = primitives.BP(123)
|
config.AggregrateDueBPS = primitives.BP(123)
|
||||||
config.ContributionDueBPS = primitives.BP(124)
|
config.ContributionDueBPS = primitives.BP(124)
|
||||||
config.AttestationDueBPSGloas = primitives.BP(126)
|
|
||||||
config.AggregateDueBPSGloas = primitives.BP(127)
|
|
||||||
config.SyncMessageDueBPSGloas = primitives.BP(128)
|
|
||||||
config.ContributionDueBPSGloas = primitives.BP(129)
|
|
||||||
config.TerminalBlockHash = common.HexToHash("TerminalBlockHash")
|
config.TerminalBlockHash = common.HexToHash("TerminalBlockHash")
|
||||||
config.TerminalBlockHashActivationEpoch = 72
|
config.TerminalBlockHashActivationEpoch = 72
|
||||||
config.TerminalTotalDifficulty = "73"
|
config.TerminalTotalDifficulty = "73"
|
||||||
@@ -175,8 +170,6 @@ func TestGetSpec(t *testing.T) {
|
|||||||
config.SyncMessageDueBPS = 103
|
config.SyncMessageDueBPS = 103
|
||||||
config.BuilderWithdrawalPrefixByte = byte('b')
|
config.BuilderWithdrawalPrefixByte = byte('b')
|
||||||
config.BuilderIndexSelfBuild = primitives.BuilderIndex(125)
|
config.BuilderIndexSelfBuild = primitives.BuilderIndex(125)
|
||||||
config.BuilderPaymentThresholdNumerator = 104
|
|
||||||
config.BuilderPaymentThresholdDenominator = 105
|
|
||||||
|
|
||||||
var dbp [4]byte
|
var dbp [4]byte
|
||||||
copy(dbp[:], []byte{'0', '0', '0', '1'})
|
copy(dbp[:], []byte{'0', '0', '0', '1'})
|
||||||
@@ -202,9 +195,6 @@ func TestGetSpec(t *testing.T) {
|
|||||||
var dbb [4]byte
|
var dbb [4]byte
|
||||||
copy(dbb[:], []byte{'0', '0', '0', '8'})
|
copy(dbb[:], []byte{'0', '0', '0', '8'})
|
||||||
config.DomainBeaconBuilder = dbb
|
config.DomainBeaconBuilder = dbb
|
||||||
var dptc [4]byte
|
|
||||||
copy(dptc[:], []byte{'0', '0', '0', '8'})
|
|
||||||
config.DomainPTCAttester = dptc
|
|
||||||
var dam [4]byte
|
var dam [4]byte
|
||||||
copy(dam[:], []byte{'1', '0', '0', '0'})
|
copy(dam[:], []byte{'1', '0', '0', '0'})
|
||||||
config.DomainApplicationMask = dam
|
config.DomainApplicationMask = dam
|
||||||
@@ -220,7 +210,7 @@ func TestGetSpec(t *testing.T) {
|
|||||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp))
|
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp))
|
||||||
data, ok := resp.Data.(map[string]any)
|
data, ok := resp.Data.(map[string]any)
|
||||||
require.Equal(t, true, ok)
|
require.Equal(t, true, ok)
|
||||||
assert.Equal(t, 192, len(data))
|
assert.Equal(t, 178, len(data))
|
||||||
for k, v := range data {
|
for k, v := range data {
|
||||||
t.Run(k, func(t *testing.T) {
|
t.Run(k, func(t *testing.T) {
|
||||||
switch k {
|
switch k {
|
||||||
@@ -300,8 +290,6 @@ func TestGetSpec(t *testing.T) {
|
|||||||
assert.Equal(t, "0x"+hex.EncodeToString([]byte("FuluForkVersion")), v)
|
assert.Equal(t, "0x"+hex.EncodeToString([]byte("FuluForkVersion")), v)
|
||||||
case "FULU_FORK_EPOCH":
|
case "FULU_FORK_EPOCH":
|
||||||
assert.Equal(t, "109", v)
|
assert.Equal(t, "109", v)
|
||||||
case "GLOAS_FORK_EPOCH":
|
|
||||||
assert.Equal(t, "110", v)
|
|
||||||
case "MIN_ANCHOR_POW_BLOCK_DIFFICULTY":
|
case "MIN_ANCHOR_POW_BLOCK_DIFFICULTY":
|
||||||
assert.Equal(t, "1000", v)
|
assert.Equal(t, "1000", v)
|
||||||
case "BLS_WITHDRAWAL_PREFIX":
|
case "BLS_WITHDRAWAL_PREFIX":
|
||||||
@@ -424,8 +412,6 @@ func TestGetSpec(t *testing.T) {
|
|||||||
assert.Equal(t, "0x30303036", v)
|
assert.Equal(t, "0x30303036", v)
|
||||||
case "DOMAIN_AGGREGATE_AND_PROOF":
|
case "DOMAIN_AGGREGATE_AND_PROOF":
|
||||||
assert.Equal(t, "0x30303037", v)
|
assert.Equal(t, "0x30303037", v)
|
||||||
case "DOMAIN_PTC_ATTESTER":
|
|
||||||
assert.Equal(t, "0x30303038", v)
|
|
||||||
case "DOMAIN_APPLICATION_MASK":
|
case "DOMAIN_APPLICATION_MASK":
|
||||||
assert.Equal(t, "0x31303030", v)
|
assert.Equal(t, "0x31303030", v)
|
||||||
case "DOMAIN_SYNC_COMMITTEE":
|
case "DOMAIN_SYNC_COMMITTEE":
|
||||||
@@ -482,18 +468,10 @@ func TestGetSpec(t *testing.T) {
|
|||||||
assert.Equal(t, "121", v)
|
assert.Equal(t, "121", v)
|
||||||
case "ATTESTATION_DUE_BPS":
|
case "ATTESTATION_DUE_BPS":
|
||||||
assert.Equal(t, "122", v)
|
assert.Equal(t, "122", v)
|
||||||
case "AGGREGATE_DUE_BPS":
|
case "AGGREGRATE_DUE_BPS":
|
||||||
assert.Equal(t, "123", v)
|
assert.Equal(t, "123", v)
|
||||||
case "CONTRIBUTION_DUE_BPS":
|
case "CONTRIBUTION_DUE_BPS":
|
||||||
assert.Equal(t, "124", v)
|
assert.Equal(t, "124", v)
|
||||||
case "ATTESTATION_DUE_BPS_GLOAS":
|
|
||||||
assert.Equal(t, "126", v)
|
|
||||||
case "AGGREGATE_DUE_BPS_GLOAS":
|
|
||||||
assert.Equal(t, "127", v)
|
|
||||||
case "SYNC_MESSAGE_DUE_BPS_GLOAS":
|
|
||||||
assert.Equal(t, "128", v)
|
|
||||||
case "CONTRIBUTION_DUE_BPS_GLOAS":
|
|
||||||
assert.Equal(t, "129", v)
|
|
||||||
case "MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT":
|
case "MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT":
|
||||||
assert.Equal(t, "8", v)
|
assert.Equal(t, "8", v)
|
||||||
case "MAX_REQUEST_LIGHT_CLIENT_UPDATES":
|
case "MAX_REQUEST_LIGHT_CLIENT_UPDATES":
|
||||||
@@ -610,26 +588,10 @@ func TestGetSpec(t *testing.T) {
|
|||||||
assert.Equal(t, "102", v)
|
assert.Equal(t, "102", v)
|
||||||
case "SYNC_MESSAGE_DUE_BPS":
|
case "SYNC_MESSAGE_DUE_BPS":
|
||||||
assert.Equal(t, "103", v)
|
assert.Equal(t, "103", v)
|
||||||
case "BUILDER_PAYMENT_THRESHOLD_NUMERATOR":
|
|
||||||
assert.Equal(t, "104", v)
|
|
||||||
case "BUILDER_PAYMENT_THRESHOLD_DENOMINATOR":
|
|
||||||
assert.Equal(t, "105", v)
|
|
||||||
case "BLOB_SCHEDULE":
|
case "BLOB_SCHEDULE":
|
||||||
blobSchedule, ok := v.([]any)
|
blobSchedule, ok := v.([]any)
|
||||||
assert.Equal(t, true, ok)
|
assert.Equal(t, true, ok)
|
||||||
assert.Equal(t, 2, len(blobSchedule))
|
assert.Equal(t, 2, len(blobSchedule))
|
||||||
case "FIELD_ELEMENTS_PER_CELL":
|
|
||||||
assert.Equal(t, "64", v) // From fieldparams.CellsPerBlob
|
|
||||||
case "FIELD_ELEMENTS_PER_EXT_BLOB":
|
|
||||||
assert.Equal(t, "198", v) // FieldElementsPerBlob (99) * 2
|
|
||||||
case "KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH":
|
|
||||||
assert.Equal(t, "4", v) // Preset value
|
|
||||||
case "CELLS_PER_EXT_BLOB":
|
|
||||||
assert.Equal(t, "128", v) // From fieldparams.NumberOfColumns
|
|
||||||
case "NUMBER_OF_COLUMNS":
|
|
||||||
assert.Equal(t, "128", v) // From fieldparams.NumberOfColumns
|
|
||||||
case "UPDATE_TIMEOUT":
|
|
||||||
assert.Equal(t, "1782", v) // SlotsPerEpoch (27) * EpochsPerSyncCommitteePeriod (66)
|
|
||||||
default:
|
default:
|
||||||
t.Errorf("Incorrect key: %s", k)
|
t.Errorf("Incorrect key: %s", k)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -26,30 +26,21 @@ func WriteStateFetchError(w http.ResponseWriter, err error) {
|
|||||||
httputil.HandleError(w, "Could not get state: "+err.Error(), http.StatusInternalServerError)
|
httputil.HandleError(w, "Could not get state: "+err.Error(), http.StatusInternalServerError)
|
||||||
}
|
}
|
||||||
|
|
||||||
// writeBlockIdError handles common block ID lookup errors.
|
// WriteBlockFetchError writes an appropriate error based on the supplied argument.
|
||||||
// Returns true if an error was handled and written to the response, false if no error.
|
// The argument error should be a result of fetching block.
|
||||||
func writeBlockIdError(w http.ResponseWriter, err error, fallbackMsg string) bool {
|
func WriteBlockFetchError(w http.ResponseWriter, blk interfaces.ReadOnlySignedBeaconBlock, err error) bool {
|
||||||
if err == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
var blockNotFoundErr *lookup.BlockNotFoundError
|
var blockNotFoundErr *lookup.BlockNotFoundError
|
||||||
if errors.As(err, &blockNotFoundErr) {
|
if errors.As(err, &blockNotFoundErr) {
|
||||||
httputil.HandleError(w, "Block not found: "+blockNotFoundErr.Error(), http.StatusNotFound)
|
httputil.HandleError(w, "Block not found: "+blockNotFoundErr.Error(), http.StatusNotFound)
|
||||||
return true
|
return false
|
||||||
}
|
}
|
||||||
var invalidBlockIdErr *lookup.BlockIdParseError
|
var invalidBlockIdErr *lookup.BlockIdParseError
|
||||||
if errors.As(err, &invalidBlockIdErr) {
|
if errors.As(err, &invalidBlockIdErr) {
|
||||||
httputil.HandleError(w, "Invalid block ID: "+invalidBlockIdErr.Error(), http.StatusBadRequest)
|
httputil.HandleError(w, "Invalid block ID: "+invalidBlockIdErr.Error(), http.StatusBadRequest)
|
||||||
return true
|
return false
|
||||||
}
|
}
|
||||||
httputil.HandleError(w, fallbackMsg+": "+err.Error(), http.StatusInternalServerError)
|
if err != nil {
|
||||||
return true
|
httputil.HandleError(w, "Could not get block from block ID: "+err.Error(), http.StatusInternalServerError)
|
||||||
}
|
|
||||||
|
|
||||||
// WriteBlockFetchError writes an appropriate error based on the supplied argument.
|
|
||||||
// The argument error should be a result of fetching block.
|
|
||||||
func WriteBlockFetchError(w http.ResponseWriter, blk interfaces.ReadOnlySignedBeaconBlock, err error) bool {
|
|
||||||
if writeBlockIdError(w, err, "Could not get block from block ID") {
|
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if err = blocks.BeaconBlockIsNil(blk); err != nil {
|
if err = blocks.BeaconBlockIsNil(blk); err != nil {
|
||||||
@@ -58,10 +49,3 @@ func WriteBlockFetchError(w http.ResponseWriter, blk interfaces.ReadOnlySignedBe
|
|||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// WriteBlockRootFetchError writes an appropriate error based on the supplied argument.
|
|
||||||
// The argument error should be a result of fetching block root.
|
|
||||||
// Returns true if no error occurred, false otherwise.
|
|
||||||
func WriteBlockRootFetchError(w http.ResponseWriter, err error) bool {
|
|
||||||
return !writeBlockIdError(w, err, "Could not get block root from block ID")
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -105,59 +105,3 @@ func TestWriteBlockFetchError(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestWriteBlockRootFetchError tests the WriteBlockRootFetchError function
|
|
||||||
// to ensure that the correct error message and code are written to the response
|
|
||||||
// and that the function returns the correct boolean value.
|
|
||||||
func TestWriteBlockRootFetchError(t *testing.T) {
|
|
||||||
cases := []struct {
|
|
||||||
name string
|
|
||||||
err error
|
|
||||||
expectedMessage string
|
|
||||||
expectedCode int
|
|
||||||
expectedReturn bool
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "Nil error should return true",
|
|
||||||
err: nil,
|
|
||||||
expectedReturn: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "BlockNotFoundError should return 404",
|
|
||||||
err: lookup.NewBlockNotFoundError("block not found at slot 123"),
|
|
||||||
expectedMessage: "Block not found",
|
|
||||||
expectedCode: http.StatusNotFound,
|
|
||||||
expectedReturn: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "BlockIdParseError should return 400",
|
|
||||||
err: &lookup.BlockIdParseError{},
|
|
||||||
expectedMessage: "Invalid block ID",
|
|
||||||
expectedCode: http.StatusBadRequest,
|
|
||||||
expectedReturn: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "Generic error should return 500",
|
|
||||||
err: errors.New("database connection failed"),
|
|
||||||
expectedMessage: "Could not get block root from block ID",
|
|
||||||
expectedCode: http.StatusInternalServerError,
|
|
||||||
expectedReturn: false,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, c := range cases {
|
|
||||||
t.Run(c.name, func(t *testing.T) {
|
|
||||||
writer := httptest.NewRecorder()
|
|
||||||
result := WriteBlockRootFetchError(writer, c.err)
|
|
||||||
|
|
||||||
assert.Equal(t, c.expectedReturn, result, "incorrect return value")
|
|
||||||
if !c.expectedReturn {
|
|
||||||
assert.Equal(t, c.expectedCode, writer.Code, "incorrect status code")
|
|
||||||
assert.StringContains(t, c.expectedMessage, writer.Body.String(), "incorrect error message")
|
|
||||||
|
|
||||||
e := &httputil.DefaultJsonError{}
|
|
||||||
assert.NoError(t, json.Unmarshal(writer.Body.Bytes(), e), "failed to unmarshal response")
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -60,7 +60,6 @@ func (e BlockIdParseError) Error() string {
|
|||||||
// Blocker is responsible for retrieving blocks.
|
// Blocker is responsible for retrieving blocks.
|
||||||
type Blocker interface {
|
type Blocker interface {
|
||||||
Block(ctx context.Context, id []byte) (interfaces.ReadOnlySignedBeaconBlock, error)
|
Block(ctx context.Context, id []byte) (interfaces.ReadOnlySignedBeaconBlock, error)
|
||||||
BlockRoot(ctx context.Context, id []byte) ([fieldparams.RootLength]byte, error)
|
|
||||||
BlobSidecars(ctx context.Context, id string, opts ...options.BlobsOption) ([]*blocks.VerifiedROBlob, *core.RpcError)
|
BlobSidecars(ctx context.Context, id string, opts ...options.BlobsOption) ([]*blocks.VerifiedROBlob, *core.RpcError)
|
||||||
Blobs(ctx context.Context, id string, opts ...options.BlobsOption) ([][]byte, *core.RpcError)
|
Blobs(ctx context.Context, id string, opts ...options.BlobsOption) ([][]byte, *core.RpcError)
|
||||||
DataColumns(ctx context.Context, id string, indices []int) ([]blocks.VerifiedRODataColumn, *core.RpcError)
|
DataColumns(ctx context.Context, id string, indices []int) ([]blocks.VerifiedRODataColumn, *core.RpcError)
|
||||||
@@ -226,18 +225,6 @@ func (p *BeaconDbBlocker) Block(ctx context.Context, id []byte) (interfaces.Read
|
|||||||
return blk, nil
|
return blk, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// BlockRoot returns the block root for a given identifier. The identifier can be one of:
|
|
||||||
// - "head" (canonical head in node's view)
|
|
||||||
// - "genesis"
|
|
||||||
// - "finalized"
|
|
||||||
// - "justified"
|
|
||||||
// - <slot>
|
|
||||||
// - <hex encoded block root with '0x' prefix>
|
|
||||||
func (p *BeaconDbBlocker) BlockRoot(ctx context.Context, id []byte) ([fieldparams.RootLength]byte, error) {
|
|
||||||
root, _, err := p.resolveBlockID(ctx, string(id))
|
|
||||||
return root, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// blobsContext holds common information needed for blob retrieval
|
// blobsContext holds common information needed for blob retrieval
|
||||||
type blobsContext struct {
|
type blobsContext struct {
|
||||||
root [fieldparams.RootLength]byte
|
root [fieldparams.RootLength]byte
|
||||||
|
|||||||
@@ -168,111 +168,6 @@ func TestGetBlock(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBlockRoot(t *testing.T) {
|
|
||||||
beaconDB := testDB.SetupDB(t)
|
|
||||||
ctx := t.Context()
|
|
||||||
|
|
||||||
genBlk, blkContainers := testutil.FillDBWithBlocks(ctx, t, beaconDB)
|
|
||||||
canonicalRoots := make(map[[32]byte]bool)
|
|
||||||
|
|
||||||
for _, bContr := range blkContainers {
|
|
||||||
canonicalRoots[bytesutil.ToBytes32(bContr.BlockRoot)] = true
|
|
||||||
}
|
|
||||||
headBlock := blkContainers[len(blkContainers)-1]
|
|
||||||
|
|
||||||
wsb, err := blocks.NewSignedBeaconBlock(headBlock.Block.(*ethpb.BeaconBlockContainer_Phase0Block).Phase0Block)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
fetcher := &BeaconDbBlocker{
|
|
||||||
BeaconDB: beaconDB,
|
|
||||||
ChainInfoFetcher: &mockChain.ChainService{
|
|
||||||
DB: beaconDB,
|
|
||||||
Block: wsb,
|
|
||||||
Root: headBlock.BlockRoot,
|
|
||||||
FinalizedCheckPoint: ðpb.Checkpoint{Root: blkContainers[64].BlockRoot},
|
|
||||||
CurrentJustifiedCheckPoint: ðpb.Checkpoint{Root: blkContainers[32].BlockRoot},
|
|
||||||
CanonicalRoots: canonicalRoots,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
genesisRoot, err := genBlk.Block.HashTreeRoot()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
blockID []byte
|
|
||||||
want [32]byte
|
|
||||||
wantErr bool
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "slot",
|
|
||||||
blockID: []byte("30"),
|
|
||||||
want: bytesutil.ToBytes32(blkContainers[30].BlockRoot),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "bad formatting",
|
|
||||||
blockID: []byte("3bad0"),
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "head",
|
|
||||||
blockID: []byte("head"),
|
|
||||||
want: bytesutil.ToBytes32(headBlock.BlockRoot),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "finalized",
|
|
||||||
blockID: []byte("finalized"),
|
|
||||||
want: bytesutil.ToBytes32(blkContainers[64].BlockRoot),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "justified",
|
|
||||||
blockID: []byte("justified"),
|
|
||||||
want: bytesutil.ToBytes32(blkContainers[32].BlockRoot),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "genesis",
|
|
||||||
blockID: []byte("genesis"),
|
|
||||||
want: genesisRoot,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "genesis root",
|
|
||||||
blockID: genesisRoot[:],
|
|
||||||
want: genesisRoot,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "root",
|
|
||||||
blockID: blkContainers[20].BlockRoot,
|
|
||||||
want: bytesutil.ToBytes32(blkContainers[20].BlockRoot),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "hex root",
|
|
||||||
blockID: []byte(hexutil.Encode(blkContainers[20].BlockRoot)),
|
|
||||||
want: bytesutil.ToBytes32(blkContainers[20].BlockRoot),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "non-existent root",
|
|
||||||
blockID: bytesutil.PadTo([]byte("hi there"), 32),
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "no block at slot",
|
|
||||||
blockID: []byte("105"),
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
result, err := fetcher.BlockRoot(ctx, tt.blockID)
|
|
||||||
if tt.wantErr {
|
|
||||||
assert.NotEqual(t, err, nil, "no error has been returned")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.DeepEqual(t, tt.want, result)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBlobsErrorHandling(t *testing.T) {
|
func TestBlobsErrorHandling(t *testing.T) {
|
||||||
params.SetupTestConfigCleanup(t)
|
params.SetupTestConfigCleanup(t)
|
||||||
cfg := params.BeaconConfig().Copy()
|
cfg := params.BeaconConfig().Copy()
|
||||||
|
|||||||
@@ -15,7 +15,6 @@ import (
|
|||||||
// MockBlocker is a fake implementation of lookup.Blocker.
|
// MockBlocker is a fake implementation of lookup.Blocker.
|
||||||
type MockBlocker struct {
|
type MockBlocker struct {
|
||||||
BlockToReturn interfaces.ReadOnlySignedBeaconBlock
|
BlockToReturn interfaces.ReadOnlySignedBeaconBlock
|
||||||
RootToReturn [32]byte
|
|
||||||
ErrorToReturn error
|
ErrorToReturn error
|
||||||
SlotBlockMap map[primitives.Slot]interfaces.ReadOnlySignedBeaconBlock
|
SlotBlockMap map[primitives.Slot]interfaces.ReadOnlySignedBeaconBlock
|
||||||
RootBlockMap map[[32]byte]interfaces.ReadOnlySignedBeaconBlock
|
RootBlockMap map[[32]byte]interfaces.ReadOnlySignedBeaconBlock
|
||||||
@@ -40,14 +39,6 @@ func (m *MockBlocker) Block(_ context.Context, b []byte) (interfaces.ReadOnlySig
|
|||||||
return m.SlotBlockMap[primitives.Slot(slotNumber)], nil
|
return m.SlotBlockMap[primitives.Slot(slotNumber)], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// BlockRoot --
|
|
||||||
func (m *MockBlocker) BlockRoot(_ context.Context, _ []byte) ([32]byte, error) {
|
|
||||||
if m.ErrorToReturn != nil {
|
|
||||||
return [32]byte{}, m.ErrorToReturn
|
|
||||||
}
|
|
||||||
return m.RootToReturn, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// BlobSidecars --
|
// BlobSidecars --
|
||||||
func (*MockBlocker) BlobSidecars(_ context.Context, _ string, _ ...options.BlobsOption) ([]*blocks.VerifiedROBlob, *core.RpcError) {
|
func (*MockBlocker) BlobSidecars(_ context.Context, _ string, _ ...options.BlobsOption) ([]*blocks.VerifiedROBlob, *core.RpcError) {
|
||||||
return nil, &core.RpcError{}
|
return nil, &core.RpcError{}
|
||||||
|
|||||||
@@ -9,10 +9,6 @@ import (
|
|||||||
type writeOnlyGloasFields interface {
|
type writeOnlyGloasFields interface {
|
||||||
SetExecutionPayloadBid(h interfaces.ROExecutionPayloadBid) error
|
SetExecutionPayloadBid(h interfaces.ROExecutionPayloadBid) error
|
||||||
SetBuilderPendingPayment(index primitives.Slot, payment *ethpb.BuilderPendingPayment) error
|
SetBuilderPendingPayment(index primitives.Slot, payment *ethpb.BuilderPendingPayment) error
|
||||||
ClearBuilderPendingPayment(index primitives.Slot) error
|
|
||||||
RotateBuilderPendingPayments() error
|
|
||||||
AppendBuilderPendingWithdrawals([]*ethpb.BuilderPendingWithdrawal) error
|
|
||||||
UpdateExecutionPayloadAvailabilityAtIndex(idx uint64, val byte) error
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type readOnlyGloasFields interface {
|
type readOnlyGloasFields interface {
|
||||||
@@ -20,5 +16,4 @@ type readOnlyGloasFields interface {
|
|||||||
IsActiveBuilder(primitives.BuilderIndex) (bool, error)
|
IsActiveBuilder(primitives.BuilderIndex) (bool, error)
|
||||||
CanBuilderCoverBid(primitives.BuilderIndex, primitives.Gwei) (bool, error)
|
CanBuilderCoverBid(primitives.BuilderIndex, primitives.Gwei) (bool, error)
|
||||||
LatestBlockHash() ([32]byte, error)
|
LatestBlockHash() ([32]byte, error)
|
||||||
BuilderPendingPayments() ([]*ethpb.BuilderPendingPayment, error)
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -135,15 +135,3 @@ func (b *BeaconState) builderPendingBalanceToWithdraw(builderIndex primitives.Bu
|
|||||||
}
|
}
|
||||||
return total
|
return total
|
||||||
}
|
}
|
||||||
|
|
||||||
// BuilderPendingPayments returns a copy of the builder pending payments.
|
|
||||||
func (b *BeaconState) BuilderPendingPayments() ([]*ethpb.BuilderPendingPayment, error) {
|
|
||||||
if b.version < version.Gloas {
|
|
||||||
return nil, errNotSupported("BuilderPendingPayments", b.version)
|
|
||||||
}
|
|
||||||
|
|
||||||
b.lock.RLock()
|
|
||||||
defer b.lock.RUnlock()
|
|
||||||
|
|
||||||
return b.builderPendingPaymentsVal(), nil
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -157,12 +157,3 @@ func TestBuilderHelpers(t *testing.T) {
|
|||||||
require.Equal(t, false, ok)
|
require.Equal(t, false, ok)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBuilderPendingPayments_UnsupportedVersion(t *testing.T) {
|
|
||||||
stIface, err := state_native.InitializeFromProtoElectra(ðpb.BeaconStateElectra{})
|
|
||||||
require.NoError(t, err)
|
|
||||||
st := stIface.(*state_native.BeaconState)
|
|
||||||
|
|
||||||
_, err = st.BuilderPendingPayments()
|
|
||||||
require.ErrorContains(t, "BuilderPendingPayments", err)
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -725,13 +725,3 @@ func ProtobufBeaconStateFulu(s any) (*ethpb.BeaconStateFulu, error) {
|
|||||||
}
|
}
|
||||||
return pbState, nil
|
return pbState, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ProtobufBeaconStateGloas transforms an input into beacon state Gloas in the form of protobuf.
|
|
||||||
// Error is returned if the input is not type protobuf beacon state.
|
|
||||||
func ProtobufBeaconStateGloas(s any) (*ethpb.BeaconStateGloas, error) {
|
|
||||||
pbState, ok := s.(*ethpb.BeaconStateGloas)
|
|
||||||
if !ok {
|
|
||||||
return nil, errors.New("input is not type pb.BeaconStateGloas")
|
|
||||||
}
|
|
||||||
return pbState, nil
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -113,100 +113,77 @@ func (b *BeaconState) ExpectedWithdrawals() ([]*enginev1.Withdrawal, uint64, err
|
|||||||
defer b.lock.RUnlock()
|
defer b.lock.RUnlock()
|
||||||
|
|
||||||
withdrawals := make([]*enginev1.Withdrawal, 0, params.BeaconConfig().MaxWithdrawalsPerPayload)
|
withdrawals := make([]*enginev1.Withdrawal, 0, params.BeaconConfig().MaxWithdrawalsPerPayload)
|
||||||
withdrawalIndex := b.nextWithdrawalIndex
|
|
||||||
|
|
||||||
withdrawalIndex, processedPartialWithdrawalsCount, err := b.appendPendingPartialWithdrawals(withdrawalIndex, &withdrawals)
|
|
||||||
if err != nil {
|
|
||||||
return nil, 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = b.appendValidatorsSweepWithdrawals(withdrawalIndex, &withdrawals)
|
|
||||||
if err != nil {
|
|
||||||
return nil, 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return withdrawals, processedPartialWithdrawalsCount, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BeaconState) appendPendingPartialWithdrawals(withdrawalIndex uint64, withdrawals *[]*enginev1.Withdrawal) (uint64, uint64, error) {
|
|
||||||
if b.version < version.Electra {
|
|
||||||
return withdrawalIndex, 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
ws := *withdrawals
|
|
||||||
epoch := slots.ToEpoch(b.slot)
|
|
||||||
var processedPartialWithdrawalsCount uint64
|
|
||||||
for _, w := range b.pendingPartialWithdrawals {
|
|
||||||
if w.WithdrawableEpoch > epoch || len(ws) >= int(params.BeaconConfig().MaxPendingPartialsPerWithdrawalsSweep) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
v, err := b.validatorAtIndexReadOnly(w.Index)
|
|
||||||
if err != nil {
|
|
||||||
return withdrawalIndex, 0, fmt.Errorf("failed to determine withdrawals at index %d: %w", w.Index, err)
|
|
||||||
}
|
|
||||||
vBal, err := b.balanceAtIndex(w.Index)
|
|
||||||
if err != nil {
|
|
||||||
return withdrawalIndex, 0, fmt.Errorf("could not retrieve balance at index %d: %w", w.Index, err)
|
|
||||||
}
|
|
||||||
hasSufficientEffectiveBalance := v.EffectiveBalance() >= params.BeaconConfig().MinActivationBalance
|
|
||||||
var totalWithdrawn uint64
|
|
||||||
for _, wi := range ws {
|
|
||||||
if wi.ValidatorIndex == w.Index {
|
|
||||||
totalWithdrawn += wi.Amount
|
|
||||||
}
|
|
||||||
}
|
|
||||||
balance, err := mathutil.Sub64(vBal, totalWithdrawn)
|
|
||||||
if err != nil {
|
|
||||||
return withdrawalIndex, 0, errors.Wrapf(err, "failed to subtract balance %d with total withdrawn %d", vBal, totalWithdrawn)
|
|
||||||
}
|
|
||||||
hasExcessBalance := balance > params.BeaconConfig().MinActivationBalance
|
|
||||||
if v.ExitEpoch() == params.BeaconConfig().FarFutureEpoch && hasSufficientEffectiveBalance && hasExcessBalance {
|
|
||||||
amount := min(balance-params.BeaconConfig().MinActivationBalance, w.Amount)
|
|
||||||
ws = append(ws, &enginev1.Withdrawal{
|
|
||||||
Index: withdrawalIndex,
|
|
||||||
ValidatorIndex: w.Index,
|
|
||||||
Address: v.GetWithdrawalCredentials()[12:],
|
|
||||||
Amount: amount,
|
|
||||||
})
|
|
||||||
withdrawalIndex++
|
|
||||||
}
|
|
||||||
processedPartialWithdrawalsCount++
|
|
||||||
}
|
|
||||||
|
|
||||||
*withdrawals = ws
|
|
||||||
return withdrawalIndex, processedPartialWithdrawalsCount, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BeaconState) appendValidatorsSweepWithdrawals(withdrawalIndex uint64, withdrawals *[]*enginev1.Withdrawal) error {
|
|
||||||
ws := *withdrawals
|
|
||||||
validatorIndex := b.nextWithdrawalValidatorIndex
|
validatorIndex := b.nextWithdrawalValidatorIndex
|
||||||
validatorsLen := b.validatorsLen()
|
withdrawalIndex := b.nextWithdrawalIndex
|
||||||
epoch := slots.ToEpoch(b.slot)
|
epoch := slots.ToEpoch(b.slot)
|
||||||
|
|
||||||
|
// Electra partial withdrawals functionality.
|
||||||
|
var processedPartialWithdrawalsCount uint64
|
||||||
|
if b.version >= version.Electra {
|
||||||
|
for _, w := range b.pendingPartialWithdrawals {
|
||||||
|
if w.WithdrawableEpoch > epoch || len(withdrawals) >= int(params.BeaconConfig().MaxPendingPartialsPerWithdrawalsSweep) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
v, err := b.validatorAtIndexReadOnly(w.Index)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, fmt.Errorf("failed to determine withdrawals at index %d: %w", w.Index, err)
|
||||||
|
}
|
||||||
|
vBal, err := b.balanceAtIndex(w.Index)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, fmt.Errorf("could not retrieve balance at index %d: %w", w.Index, err)
|
||||||
|
}
|
||||||
|
hasSufficientEffectiveBalance := v.EffectiveBalance() >= params.BeaconConfig().MinActivationBalance
|
||||||
|
var totalWithdrawn uint64
|
||||||
|
for _, wi := range withdrawals {
|
||||||
|
if wi.ValidatorIndex == w.Index {
|
||||||
|
totalWithdrawn += wi.Amount
|
||||||
|
}
|
||||||
|
}
|
||||||
|
balance, err := mathutil.Sub64(vBal, totalWithdrawn)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, errors.Wrapf(err, "failed to subtract balance %d with total withdrawn %d", vBal, totalWithdrawn)
|
||||||
|
}
|
||||||
|
hasExcessBalance := balance > params.BeaconConfig().MinActivationBalance
|
||||||
|
if v.ExitEpoch() == params.BeaconConfig().FarFutureEpoch && hasSufficientEffectiveBalance && hasExcessBalance {
|
||||||
|
amount := min(balance-params.BeaconConfig().MinActivationBalance, w.Amount)
|
||||||
|
withdrawals = append(withdrawals, &enginev1.Withdrawal{
|
||||||
|
Index: withdrawalIndex,
|
||||||
|
ValidatorIndex: w.Index,
|
||||||
|
Address: v.GetWithdrawalCredentials()[12:],
|
||||||
|
Amount: amount,
|
||||||
|
})
|
||||||
|
withdrawalIndex++
|
||||||
|
}
|
||||||
|
processedPartialWithdrawalsCount++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
validatorsLen := b.validatorsLen()
|
||||||
bound := min(uint64(validatorsLen), params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep)
|
bound := min(uint64(validatorsLen), params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep)
|
||||||
for range bound {
|
for range bound {
|
||||||
val, err := b.validatorAtIndexReadOnly(validatorIndex)
|
val, err := b.validatorAtIndexReadOnly(validatorIndex)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "could not retrieve validator at index %d", validatorIndex)
|
return nil, 0, errors.Wrapf(err, "could not retrieve validator at index %d", validatorIndex)
|
||||||
}
|
}
|
||||||
balance, err := b.balanceAtIndex(validatorIndex)
|
balance, err := b.balanceAtIndex(validatorIndex)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "could not retrieve balance at index %d", validatorIndex)
|
return nil, 0, errors.Wrapf(err, "could not retrieve balance at index %d", validatorIndex)
|
||||||
}
|
}
|
||||||
if b.version >= version.Electra {
|
if b.version >= version.Electra {
|
||||||
var partiallyWithdrawnBalance uint64
|
var partiallyWithdrawnBalance uint64
|
||||||
for _, w := range ws {
|
for _, w := range withdrawals {
|
||||||
if w.ValidatorIndex == validatorIndex {
|
if w.ValidatorIndex == validatorIndex {
|
||||||
partiallyWithdrawnBalance += w.Amount
|
partiallyWithdrawnBalance += w.Amount
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
balance, err = mathutil.Sub64(balance, partiallyWithdrawnBalance)
|
balance, err = mathutil.Sub64(balance, partiallyWithdrawnBalance)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "could not subtract balance %d with partial withdrawn balance %d", balance, partiallyWithdrawnBalance)
|
return nil, 0, errors.Wrapf(err, "could not subtract balance %d with partial withdrawn balance %d", balance, partiallyWithdrawnBalance)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if helpers.IsFullyWithdrawableValidator(val, balance, epoch, b.version) {
|
if helpers.IsFullyWithdrawableValidator(val, balance, epoch, b.version) {
|
||||||
ws = append(ws, &enginev1.Withdrawal{
|
withdrawals = append(withdrawals, &enginev1.Withdrawal{
|
||||||
Index: withdrawalIndex,
|
Index: withdrawalIndex,
|
||||||
ValidatorIndex: validatorIndex,
|
ValidatorIndex: validatorIndex,
|
||||||
Address: bytesutil.SafeCopyBytes(val.GetWithdrawalCredentials()[ETH1AddressOffset:]),
|
Address: bytesutil.SafeCopyBytes(val.GetWithdrawalCredentials()[ETH1AddressOffset:]),
|
||||||
@@ -214,7 +191,7 @@ func (b *BeaconState) appendValidatorsSweepWithdrawals(withdrawalIndex uint64, w
|
|||||||
})
|
})
|
||||||
withdrawalIndex++
|
withdrawalIndex++
|
||||||
} else if helpers.IsPartiallyWithdrawableValidator(val, balance, epoch, b.version) {
|
} else if helpers.IsPartiallyWithdrawableValidator(val, balance, epoch, b.version) {
|
||||||
ws = append(ws, &enginev1.Withdrawal{
|
withdrawals = append(withdrawals, &enginev1.Withdrawal{
|
||||||
Index: withdrawalIndex,
|
Index: withdrawalIndex,
|
||||||
ValidatorIndex: validatorIndex,
|
ValidatorIndex: validatorIndex,
|
||||||
Address: bytesutil.SafeCopyBytes(val.GetWithdrawalCredentials()[ETH1AddressOffset:]),
|
Address: bytesutil.SafeCopyBytes(val.GetWithdrawalCredentials()[ETH1AddressOffset:]),
|
||||||
@@ -222,7 +199,7 @@ func (b *BeaconState) appendValidatorsSweepWithdrawals(withdrawalIndex uint64, w
|
|||||||
})
|
})
|
||||||
withdrawalIndex++
|
withdrawalIndex++
|
||||||
}
|
}
|
||||||
if uint64(len(ws)) == params.BeaconConfig().MaxWithdrawalsPerPayload {
|
if uint64(len(withdrawals)) == params.BeaconConfig().MaxWithdrawalsPerPayload {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
validatorIndex += 1
|
validatorIndex += 1
|
||||||
@@ -231,8 +208,7 @@ func (b *BeaconState) appendValidatorsSweepWithdrawals(withdrawalIndex uint64, w
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
*withdrawals = ws
|
return withdrawals, processedPartialWithdrawalsCount, nil
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *BeaconState) PendingPartialWithdrawals() ([]*ethpb.PendingPartialWithdrawal, error) {
|
func (b *BeaconState) PendingPartialWithdrawals() ([]*ethpb.PendingPartialWithdrawal, error) {
|
||||||
|
|||||||
@@ -4,71 +4,12 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native/types"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native/types"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stateutil"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||||
)
|
)
|
||||||
|
|
||||||
// RotateBuilderPendingPayments rotates the queue by dropping slots per epoch payments from the
|
|
||||||
// front and appending slots per epoch empty payments to the end.
|
|
||||||
// This implements: state.builder_pending_payments = state.builder_pending_payments[SLOTS_PER_EPOCH:] + [BuilderPendingPayment() for _ in range(SLOTS_PER_EPOCH)]
|
|
||||||
func (b *BeaconState) RotateBuilderPendingPayments() error {
|
|
||||||
if b.version < version.Gloas {
|
|
||||||
return errNotSupported("RotateBuilderPendingPayments", b.version)
|
|
||||||
}
|
|
||||||
|
|
||||||
b.lock.Lock()
|
|
||||||
defer b.lock.Unlock()
|
|
||||||
|
|
||||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
|
||||||
copy(b.builderPendingPayments[:slotsPerEpoch], b.builderPendingPayments[slotsPerEpoch:2*slotsPerEpoch])
|
|
||||||
|
|
||||||
for i := slotsPerEpoch; i < primitives.Slot(len(b.builderPendingPayments)); i++ {
|
|
||||||
b.builderPendingPayments[i] = emptyBuilderPendingPayment
|
|
||||||
}
|
|
||||||
|
|
||||||
b.markFieldAsDirty(types.BuilderPendingPayments)
|
|
||||||
b.rebuildTrie[types.BuilderPendingPayments] = true
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// emptyBuilderPendingPayment is a shared zero-value payment used to clear entries.
|
|
||||||
var emptyBuilderPendingPayment = ðpb.BuilderPendingPayment{
|
|
||||||
Withdrawal: ðpb.BuilderPendingWithdrawal{
|
|
||||||
FeeRecipient: make([]byte, 20),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// AppendBuilderPendingWithdrawals appends builder pending withdrawals to the beacon state.
|
|
||||||
// If the withdrawals slice is shared, it copies the slice first to preserve references.
|
|
||||||
func (b *BeaconState) AppendBuilderPendingWithdrawals(withdrawals []*ethpb.BuilderPendingWithdrawal) error {
|
|
||||||
if b.version < version.Gloas {
|
|
||||||
return errNotSupported("AppendBuilderPendingWithdrawals", b.version)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(withdrawals) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
b.lock.Lock()
|
|
||||||
defer b.lock.Unlock()
|
|
||||||
|
|
||||||
pendingWithdrawals := b.builderPendingWithdrawals
|
|
||||||
if b.sharedFieldReferences[types.BuilderPendingWithdrawals].Refs() > 1 {
|
|
||||||
pendingWithdrawals = make([]*ethpb.BuilderPendingWithdrawal, 0, len(b.builderPendingWithdrawals)+len(withdrawals))
|
|
||||||
pendingWithdrawals = append(pendingWithdrawals, b.builderPendingWithdrawals...)
|
|
||||||
b.sharedFieldReferences[types.BuilderPendingWithdrawals].MinusRef()
|
|
||||||
b.sharedFieldReferences[types.BuilderPendingWithdrawals] = stateutil.NewRef(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
b.builderPendingWithdrawals = append(pendingWithdrawals, withdrawals...)
|
|
||||||
b.markFieldAsDirty(types.BuilderPendingWithdrawals)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetExecutionPayloadBid sets the latest execution payload bid in the state.
|
// SetExecutionPayloadBid sets the latest execution payload bid in the state.
|
||||||
func (b *BeaconState) SetExecutionPayloadBid(h interfaces.ROExecutionPayloadBid) error {
|
func (b *BeaconState) SetExecutionPayloadBid(h interfaces.ROExecutionPayloadBid) error {
|
||||||
if b.version < version.Gloas {
|
if b.version < version.Gloas {
|
||||||
@@ -102,25 +43,6 @@ func (b *BeaconState) SetExecutionPayloadBid(h interfaces.ROExecutionPayloadBid)
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ClearBuilderPendingPayment clears a builder pending payment at the specified index.
|
|
||||||
func (b *BeaconState) ClearBuilderPendingPayment(index primitives.Slot) error {
|
|
||||||
if b.version < version.Gloas {
|
|
||||||
return errNotSupported("ClearBuilderPendingPayment", b.version)
|
|
||||||
}
|
|
||||||
|
|
||||||
b.lock.Lock()
|
|
||||||
defer b.lock.Unlock()
|
|
||||||
|
|
||||||
if uint64(index) >= uint64(len(b.builderPendingPayments)) {
|
|
||||||
return fmt.Errorf("builder pending payments index %d out of range (len=%d)", index, len(b.builderPendingPayments))
|
|
||||||
}
|
|
||||||
|
|
||||||
b.builderPendingPayments[index] = emptyBuilderPendingPayment
|
|
||||||
|
|
||||||
b.markFieldAsDirty(types.BuilderPendingPayments)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetBuilderPendingPayment sets a builder pending payment at the specified index.
|
// SetBuilderPendingPayment sets a builder pending payment at the specified index.
|
||||||
func (b *BeaconState) SetBuilderPendingPayment(index primitives.Slot, payment *ethpb.BuilderPendingPayment) error {
|
func (b *BeaconState) SetBuilderPendingPayment(index primitives.Slot, payment *ethpb.BuilderPendingPayment) error {
|
||||||
if b.version < version.Gloas {
|
if b.version < version.Gloas {
|
||||||
@@ -139,25 +61,3 @@ func (b *BeaconState) SetBuilderPendingPayment(index primitives.Slot, payment *e
|
|||||||
b.markFieldAsDirty(types.BuilderPendingPayments)
|
b.markFieldAsDirty(types.BuilderPendingPayments)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateExecutionPayloadAvailabilityAtIndex updates the execution payload availability bit at a specific index.
|
|
||||||
func (b *BeaconState) UpdateExecutionPayloadAvailabilityAtIndex(idx uint64, val byte) error {
|
|
||||||
b.lock.Lock()
|
|
||||||
defer b.lock.Unlock()
|
|
||||||
|
|
||||||
byteIndex := idx / 8
|
|
||||||
bitIndex := idx % 8
|
|
||||||
|
|
||||||
if byteIndex >= uint64(len(b.executionPayloadAvailability)) {
|
|
||||||
return fmt.Errorf("bit index %d (byte index %d) out of range for execution payload availability length %d", idx, byteIndex, len(b.executionPayloadAvailability))
|
|
||||||
}
|
|
||||||
|
|
||||||
if val != 0 {
|
|
||||||
b.executionPayloadAvailability[byteIndex] |= (1 << bitIndex)
|
|
||||||
} else {
|
|
||||||
b.executionPayloadAvailability[byteIndex] &^= (1 << bitIndex)
|
|
||||||
}
|
|
||||||
|
|
||||||
b.markFieldAsDirty(types.ExecutionPayloadAvailability)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -5,8 +5,6 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native/types"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native/types"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stateutil"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||||
@@ -140,191 +138,3 @@ func TestSetBuilderPendingPayment(t *testing.T) {
|
|||||||
require.Equal(t, false, st.dirtyFields[types.BuilderPendingPayments])
|
require.Equal(t, false, st.dirtyFields[types.BuilderPendingPayments])
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestClearBuilderPendingPayment(t *testing.T) {
|
|
||||||
t.Run("previous fork returns expected error", func(t *testing.T) {
|
|
||||||
st := &BeaconState{version: version.Fulu}
|
|
||||||
err := st.ClearBuilderPendingPayment(0)
|
|
||||||
require.ErrorContains(t, "is not supported", err)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("clears and marks dirty", func(t *testing.T) {
|
|
||||||
st := &BeaconState{
|
|
||||||
version: version.Gloas,
|
|
||||||
dirtyFields: make(map[types.FieldIndex]bool),
|
|
||||||
builderPendingPayments: make([]*ethpb.BuilderPendingPayment, 2),
|
|
||||||
}
|
|
||||||
st.builderPendingPayments[1] = ðpb.BuilderPendingPayment{
|
|
||||||
Weight: 2,
|
|
||||||
Withdrawal: ðpb.BuilderPendingWithdrawal{
|
|
||||||
Amount: 99,
|
|
||||||
BuilderIndex: 1,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
require.NoError(t, st.ClearBuilderPendingPayment(1))
|
|
||||||
require.Equal(t, emptyBuilderPendingPayment, st.builderPendingPayments[1])
|
|
||||||
require.Equal(t, true, st.dirtyFields[types.BuilderPendingPayments])
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("returns error on out of range index", func(t *testing.T) {
|
|
||||||
st := &BeaconState{
|
|
||||||
version: version.Gloas,
|
|
||||||
dirtyFields: make(map[types.FieldIndex]bool),
|
|
||||||
builderPendingPayments: make([]*ethpb.BuilderPendingPayment, 1),
|
|
||||||
}
|
|
||||||
|
|
||||||
err := st.ClearBuilderPendingPayment(2)
|
|
||||||
|
|
||||||
require.ErrorContains(t, "out of range", err)
|
|
||||||
require.Equal(t, false, st.dirtyFields[types.BuilderPendingPayments])
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRotateBuilderPendingPayments(t *testing.T) {
|
|
||||||
totalPayments := 2 * params.BeaconConfig().SlotsPerEpoch
|
|
||||||
payments := make([]*ethpb.BuilderPendingPayment, totalPayments)
|
|
||||||
for i := range payments {
|
|
||||||
idx := uint64(i)
|
|
||||||
payments[i] = ðpb.BuilderPendingPayment{
|
|
||||||
Weight: primitives.Gwei(idx * 100e9),
|
|
||||||
Withdrawal: ðpb.BuilderPendingWithdrawal{
|
|
||||||
FeeRecipient: make([]byte, 20),
|
|
||||||
Amount: primitives.Gwei(idx * 1e9),
|
|
||||||
BuilderIndex: primitives.BuilderIndex(idx + 100),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
statePb, err := InitializeFromProtoUnsafeGloas(ðpb.BeaconStateGloas{
|
|
||||||
BuilderPendingPayments: payments,
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
st, ok := statePb.(*BeaconState)
|
|
||||||
require.Equal(t, true, ok)
|
|
||||||
|
|
||||||
oldPayments, err := st.BuilderPendingPayments()
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, st.RotateBuilderPendingPayments())
|
|
||||||
|
|
||||||
newPayments, err := st.BuilderPendingPayments()
|
|
||||||
require.NoError(t, err)
|
|
||||||
slotsPerEpoch := int(params.BeaconConfig().SlotsPerEpoch)
|
|
||||||
for i := range slotsPerEpoch {
|
|
||||||
require.DeepEqual(t, oldPayments[slotsPerEpoch+i], newPayments[i])
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := slotsPerEpoch; i < 2*slotsPerEpoch; i++ {
|
|
||||||
payment := newPayments[i]
|
|
||||||
require.Equal(t, primitives.Gwei(0), payment.Weight)
|
|
||||||
require.Equal(t, 20, len(payment.Withdrawal.FeeRecipient))
|
|
||||||
require.Equal(t, primitives.Gwei(0), payment.Withdrawal.Amount)
|
|
||||||
require.Equal(t, primitives.BuilderIndex(0), payment.Withdrawal.BuilderIndex)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRotateBuilderPendingPayments_UnsupportedVersion(t *testing.T) {
|
|
||||||
st := &BeaconState{version: version.Electra}
|
|
||||||
err := st.RotateBuilderPendingPayments()
|
|
||||||
require.ErrorContains(t, "RotateBuilderPendingPayments", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAppendBuilderPendingWithdrawal_CopyOnWrite(t *testing.T) {
|
|
||||||
wd := ðpb.BuilderPendingWithdrawal{
|
|
||||||
FeeRecipient: make([]byte, 20),
|
|
||||||
Amount: 1,
|
|
||||||
BuilderIndex: 2,
|
|
||||||
}
|
|
||||||
statePb, err := InitializeFromProtoUnsafeGloas(ðpb.BeaconStateGloas{
|
|
||||||
BuilderPendingWithdrawals: []*ethpb.BuilderPendingWithdrawal{wd},
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
st, ok := statePb.(*BeaconState)
|
|
||||||
require.Equal(t, true, ok)
|
|
||||||
|
|
||||||
copied := st.Copy().(*BeaconState)
|
|
||||||
require.Equal(t, uint(2), st.sharedFieldReferences[types.BuilderPendingWithdrawals].Refs())
|
|
||||||
|
|
||||||
appended := ðpb.BuilderPendingWithdrawal{
|
|
||||||
FeeRecipient: make([]byte, 20),
|
|
||||||
Amount: 4,
|
|
||||||
BuilderIndex: 5,
|
|
||||||
}
|
|
||||||
require.NoError(t, copied.AppendBuilderPendingWithdrawals([]*ethpb.BuilderPendingWithdrawal{appended}))
|
|
||||||
|
|
||||||
require.Equal(t, 1, len(st.builderPendingWithdrawals))
|
|
||||||
require.Equal(t, 2, len(copied.builderPendingWithdrawals))
|
|
||||||
require.DeepEqual(t, wd, copied.builderPendingWithdrawals[0])
|
|
||||||
require.DeepEqual(t, appended, copied.builderPendingWithdrawals[1])
|
|
||||||
require.DeepEqual(t, wd, st.builderPendingWithdrawals[0])
|
|
||||||
require.Equal(t, uint(1), st.sharedFieldReferences[types.BuilderPendingWithdrawals].Refs())
|
|
||||||
require.Equal(t, uint(1), copied.sharedFieldReferences[types.BuilderPendingWithdrawals].Refs())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAppendBuilderPendingWithdrawals(t *testing.T) {
|
|
||||||
st := &BeaconState{
|
|
||||||
version: version.Gloas,
|
|
||||||
dirtyFields: make(map[types.FieldIndex]bool),
|
|
||||||
sharedFieldReferences: map[types.FieldIndex]*stateutil.Reference{
|
|
||||||
types.BuilderPendingWithdrawals: stateutil.NewRef(1),
|
|
||||||
},
|
|
||||||
builderPendingWithdrawals: make([]*ethpb.BuilderPendingWithdrawal, 0),
|
|
||||||
}
|
|
||||||
|
|
||||||
first := ðpb.BuilderPendingWithdrawal{Amount: 1}
|
|
||||||
second := ðpb.BuilderPendingWithdrawal{Amount: 2}
|
|
||||||
require.NoError(t, st.AppendBuilderPendingWithdrawals([]*ethpb.BuilderPendingWithdrawal{first, second}))
|
|
||||||
|
|
||||||
require.Equal(t, 2, len(st.builderPendingWithdrawals))
|
|
||||||
require.DeepEqual(t, first, st.builderPendingWithdrawals[0])
|
|
||||||
require.DeepEqual(t, second, st.builderPendingWithdrawals[1])
|
|
||||||
require.Equal(t, true, st.dirtyFields[types.BuilderPendingWithdrawals])
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAppendBuilderPendingWithdrawals_UnsupportedVersion(t *testing.T) {
|
|
||||||
st := &BeaconState{version: version.Electra}
|
|
||||||
err := st.AppendBuilderPendingWithdrawals([]*ethpb.BuilderPendingWithdrawal{{}})
|
|
||||||
require.ErrorContains(t, "AppendBuilderPendingWithdrawals", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestUpdateExecutionPayloadAvailabilityAtIndex_SetAndClear(t *testing.T) {
|
|
||||||
st := newGloasStateWithAvailability(t, make([]byte, 1024))
|
|
||||||
|
|
||||||
otherIdx := uint64(8) // byte 1, bit 0
|
|
||||||
idx := uint64(9) // byte 1, bit 1
|
|
||||||
|
|
||||||
require.NoError(t, st.UpdateExecutionPayloadAvailabilityAtIndex(otherIdx, 1))
|
|
||||||
require.Equal(t, byte(0x01), st.executionPayloadAvailability[1])
|
|
||||||
|
|
||||||
require.NoError(t, st.UpdateExecutionPayloadAvailabilityAtIndex(idx, 1))
|
|
||||||
require.Equal(t, byte(0x03), st.executionPayloadAvailability[1])
|
|
||||||
|
|
||||||
require.NoError(t, st.UpdateExecutionPayloadAvailabilityAtIndex(idx, 0))
|
|
||||||
require.Equal(t, byte(0x01), st.executionPayloadAvailability[1])
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestUpdateExecutionPayloadAvailabilityAtIndex_OutOfRange(t *testing.T) {
|
|
||||||
st := newGloasStateWithAvailability(t, make([]byte, 1024))
|
|
||||||
|
|
||||||
idx := uint64(len(st.executionPayloadAvailability)) * 8
|
|
||||||
err := st.UpdateExecutionPayloadAvailabilityAtIndex(idx, 1)
|
|
||||||
require.ErrorContains(t, "out of range", err)
|
|
||||||
|
|
||||||
for _, b := range st.executionPayloadAvailability {
|
|
||||||
if b != 0 {
|
|
||||||
t.Fatalf("execution payload availability mutated on error")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func newGloasStateWithAvailability(t *testing.T, availability []byte) *BeaconState {
|
|
||||||
t.Helper()
|
|
||||||
|
|
||||||
st, err := InitializeFromProtoUnsafeGloas(ðpb.BeaconStateGloas{
|
|
||||||
ExecutionPayloadAvailability: availability,
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
return st.(*BeaconState)
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -137,7 +137,7 @@ func (s *State) migrateToColdHdiff(ctx context.Context, fRoot [32]byte) error {
|
|||||||
if ctx.Err() != nil {
|
if ctx.Err() != nil {
|
||||||
return ctx.Err()
|
return ctx.Err()
|
||||||
}
|
}
|
||||||
offset, lvl, err := s.beaconDB.SlotInDiffTree(slot)
|
_, lvl, err := s.beaconDB.SlotInDiffTree(slot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.WithError(err).Errorf("could not determine if slot %d is in diff tree", slot)
|
log.WithError(err).Errorf("could not determine if slot %d is in diff tree", slot)
|
||||||
continue
|
continue
|
||||||
@@ -145,9 +145,6 @@ func (s *State) migrateToColdHdiff(ctx context.Context, fRoot [32]byte) error {
|
|||||||
if lvl == -1 {
|
if lvl == -1 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if uint64(slot) == offset {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// The state needs to be saved.
|
// The state needs to be saved.
|
||||||
// Try the epoch boundary cache first.
|
// Try the epoch boundary cache first.
|
||||||
cached, exists, err := s.epochBoundaryStateCache.getBySlot(slot)
|
cached, exists, err := s.epochBoundaryStateCache.getBySlot(slot)
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ go_library(
|
|||||||
"//beacon-chain/state:__subpackages__",
|
"//beacon-chain/state:__subpackages__",
|
||||||
],
|
],
|
||||||
deps = [
|
deps = [
|
||||||
"//beacon-chain/core/helpers:go_default_library",
|
"//beacon-chain/core/blocks:go_default_library",
|
||||||
"//beacon-chain/core/signing:go_default_library",
|
"//beacon-chain/core/signing:go_default_library",
|
||||||
"//beacon-chain/state:go_default_library",
|
"//beacon-chain/state:go_default_library",
|
||||||
"//config/fieldparams:go_default_library",
|
"//config/fieldparams:go_default_library",
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ package testing
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
||||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||||
@@ -31,7 +31,7 @@ func GeneratePendingDeposit(t *testing.T, key common.SecretKey, amount uint64, w
|
|||||||
Amount: dm.Amount,
|
Amount: dm.Amount,
|
||||||
Signature: sig.Marshal(),
|
Signature: sig.Marshal(),
|
||||||
}
|
}
|
||||||
valid, err := helpers.IsValidDepositSignature(depositData)
|
valid, err := blocks.IsValidDepositSignature(depositData)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, true, valid)
|
require.Equal(t, true, valid)
|
||||||
return ðpb.PendingDeposit{
|
return ðpb.PendingDeposit{
|
||||||
|
|||||||
@@ -148,7 +148,7 @@ func (b batch) ensureParent(expected [32]byte) error {
|
|||||||
func (b batch) blockRequest() *eth.BeaconBlocksByRangeRequest {
|
func (b batch) blockRequest() *eth.BeaconBlocksByRangeRequest {
|
||||||
return ð.BeaconBlocksByRangeRequest{
|
return ð.BeaconBlocksByRangeRequest{
|
||||||
StartSlot: b.begin,
|
StartSlot: b.begin,
|
||||||
Count: uint64(b.end.FlooredSubSlot(b.begin)),
|
Count: uint64(b.end - b.begin),
|
||||||
Step: 1,
|
Step: 1,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -156,7 +156,7 @@ func (b batch) blockRequest() *eth.BeaconBlocksByRangeRequest {
|
|||||||
func (b batch) blobRequest() *eth.BlobSidecarsByRangeRequest {
|
func (b batch) blobRequest() *eth.BlobSidecarsByRangeRequest {
|
||||||
return ð.BlobSidecarsByRangeRequest{
|
return ð.BlobSidecarsByRangeRequest{
|
||||||
StartSlot: b.begin,
|
StartSlot: b.begin,
|
||||||
Count: uint64(b.end.FlooredSubSlot(b.begin)),
|
Count: uint64(b.end - b.begin),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -10,93 +10,6 @@ import (
|
|||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestBlockRequest(t *testing.T) {
|
|
||||||
cases := []struct {
|
|
||||||
name string
|
|
||||||
begin primitives.Slot
|
|
||||||
end primitives.Slot
|
|
||||||
expectedCount uint64
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "normal case",
|
|
||||||
begin: 100,
|
|
||||||
end: 200,
|
|
||||||
expectedCount: 100,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "end equals begin",
|
|
||||||
begin: 100,
|
|
||||||
end: 100,
|
|
||||||
expectedCount: 0,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "end less than begin (would underflow without check)",
|
|
||||||
begin: 200,
|
|
||||||
end: 100,
|
|
||||||
expectedCount: 0,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "zero values",
|
|
||||||
begin: 0,
|
|
||||||
end: 0,
|
|
||||||
expectedCount: 0,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "single slot",
|
|
||||||
begin: 0,
|
|
||||||
end: 1,
|
|
||||||
expectedCount: 1,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range cases {
|
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
|
||||||
b := batch{begin: tc.begin, end: tc.end}
|
|
||||||
req := b.blockRequest()
|
|
||||||
require.Equal(t, tc.expectedCount, req.Count)
|
|
||||||
require.Equal(t, tc.begin, req.StartSlot)
|
|
||||||
require.Equal(t, uint64(1), req.Step)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBlobRequest(t *testing.T) {
|
|
||||||
cases := []struct {
|
|
||||||
name string
|
|
||||||
begin primitives.Slot
|
|
||||||
end primitives.Slot
|
|
||||||
expectedCount uint64
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "normal case",
|
|
||||||
begin: 100,
|
|
||||||
end: 200,
|
|
||||||
expectedCount: 100,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "end equals begin",
|
|
||||||
begin: 100,
|
|
||||||
end: 100,
|
|
||||||
expectedCount: 0,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "end less than begin (would underflow without check)",
|
|
||||||
begin: 200,
|
|
||||||
end: 100,
|
|
||||||
expectedCount: 0,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range cases {
|
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
|
||||||
b := batch{begin: tc.begin, end: tc.end}
|
|
||||||
req := b.blobRequest()
|
|
||||||
require.Equal(t, tc.expectedCount, req.Count)
|
|
||||||
require.Equal(t, tc.begin, req.StartSlot)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSortBatchDesc(t *testing.T) {
|
func TestSortBatchDesc(t *testing.T) {
|
||||||
orderIn := []primitives.Slot{100, 10000, 1}
|
orderIn := []primitives.Slot{100, 10000, 1}
|
||||||
orderOut := []primitives.Slot{10000, 100, 1}
|
orderOut := []primitives.Slot{10000, 100, 1}
|
||||||
|
|||||||
@@ -4,6 +4,9 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||||
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/verification"
|
||||||
|
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||||
"github.com/OffchainLabs/prysm/v7/crypto/bls"
|
"github.com/OffchainLabs/prysm/v7/crypto/bls"
|
||||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing"
|
"github.com/OffchainLabs/prysm/v7/monitoring/tracing"
|
||||||
@@ -53,6 +56,32 @@ func (s *Service) verifierRoutine() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// A routine that runs in the background to perform batch
|
||||||
|
// KZG verifications by draining the channel and processing all pending requests.
|
||||||
|
func (s *Service) kzgVerifierRoutine() {
|
||||||
|
for {
|
||||||
|
kzgBatch := make([]*kzgVerifier, 0, 1)
|
||||||
|
select {
|
||||||
|
case <-s.ctx.Done():
|
||||||
|
return
|
||||||
|
case kzg := <-s.kzgChan:
|
||||||
|
kzgBatch = append(kzgBatch, kzg)
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-s.ctx.Done():
|
||||||
|
return
|
||||||
|
case kzg := <-s.kzgChan:
|
||||||
|
kzgBatch = append(kzgBatch, kzg)
|
||||||
|
continue
|
||||||
|
default:
|
||||||
|
verifyKzgBatch(kzgBatch)
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (s *Service) validateWithBatchVerifier(ctx context.Context, message string, set *bls.SignatureBatch) (pubsub.ValidationResult, error) {
|
func (s *Service) validateWithBatchVerifier(ctx context.Context, message string, set *bls.SignatureBatch) (pubsub.ValidationResult, error) {
|
||||||
_, span := trace.StartSpan(ctx, "sync.validateWithBatchVerifier")
|
_, span := trace.StartSpan(ctx, "sync.validateWithBatchVerifier")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
@@ -125,3 +154,71 @@ func performBatchAggregation(aggSet *bls.SignatureBatch) (*bls.SignatureBatch, e
|
|||||||
}
|
}
|
||||||
return aggSet, nil
|
return aggSet, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *Service) validateWithKzgBatchVerifier(ctx context.Context, dataColumns []blocks.RODataColumn) (pubsub.ValidationResult, error) {
|
||||||
|
_, span := trace.StartSpan(ctx, "sync.validateWithKzgBatchVerifier")
|
||||||
|
defer span.End()
|
||||||
|
|
||||||
|
timeout := time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second
|
||||||
|
|
||||||
|
resChan := make(chan error, 1)
|
||||||
|
verificationSet := &kzgVerifier{dataColumns: dataColumns, resChan: resChan}
|
||||||
|
ctx, cancel := context.WithTimeout(ctx, timeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case s.kzgChan <- verificationSet:
|
||||||
|
case <-ctx.Done():
|
||||||
|
return pubsub.ValidationIgnore, ctx.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return pubsub.ValidationIgnore, ctx.Err() // parent context canceled, give up
|
||||||
|
case err := <-resChan:
|
||||||
|
if err != nil {
|
||||||
|
log.WithError(err).Trace("Could not perform batch verification")
|
||||||
|
tracing.AnnotateError(span, err)
|
||||||
|
return s.validateUnbatchedColumnsKzg(ctx, dataColumns)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return pubsub.ValidationAccept, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Service) validateUnbatchedColumnsKzg(ctx context.Context, columns []blocks.RODataColumn) (pubsub.ValidationResult, error) {
|
||||||
|
_, span := trace.StartSpan(ctx, "sync.validateUnbatchedColumnsKzg")
|
||||||
|
defer span.End()
|
||||||
|
start := time.Now()
|
||||||
|
if err := peerdas.VerifyDataColumnsSidecarKZGProofs(columns); err != nil {
|
||||||
|
err = errors.Wrap(err, "could not verify")
|
||||||
|
tracing.AnnotateError(span, err)
|
||||||
|
return pubsub.ValidationReject, err
|
||||||
|
}
|
||||||
|
verification.DataColumnBatchKZGVerificationHistogram.WithLabelValues("fallback").Observe(float64(time.Since(start).Milliseconds()))
|
||||||
|
return pubsub.ValidationAccept, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func verifyKzgBatch(kzgBatch []*kzgVerifier) {
|
||||||
|
if len(kzgBatch) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
allDataColumns := make([]blocks.RODataColumn, 0, len(kzgBatch))
|
||||||
|
for _, kzgVerifier := range kzgBatch {
|
||||||
|
allDataColumns = append(allDataColumns, kzgVerifier.dataColumns...)
|
||||||
|
}
|
||||||
|
|
||||||
|
var verificationErr error
|
||||||
|
start := time.Now()
|
||||||
|
err := peerdas.VerifyDataColumnsSidecarKZGProofs(allDataColumns)
|
||||||
|
if err != nil {
|
||||||
|
verificationErr = errors.Wrap(err, "batch KZG verification failed")
|
||||||
|
} else {
|
||||||
|
verification.DataColumnBatchKZGVerificationHistogram.WithLabelValues("batch").Observe(float64(time.Since(start).Milliseconds()))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send the same result to all verifiers in the batch
|
||||||
|
for _, verifier := range kzgBatch {
|
||||||
|
verifier.resChan <- verificationErr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -668,7 +668,7 @@ func populateBlock(bw *blocks.BlockWithROSidecars, blobs []blocks.ROBlob, req *p
|
|||||||
|
|
||||||
func missingCommitError(root [32]byte, slot primitives.Slot, missing [][]byte) error {
|
func missingCommitError(root [32]byte, slot primitives.Slot, missing [][]byte) error {
|
||||||
missStr := make([]string, 0, len(missing))
|
missStr := make([]string, 0, len(missing))
|
||||||
for _, k := range missing {
|
for k := range missing {
|
||||||
missStr = append(missStr, fmt.Sprintf("%#x", k))
|
missStr = append(missStr, fmt.Sprintf("%#x", k))
|
||||||
}
|
}
|
||||||
return errors.Wrapf(errMissingBlobsForBlockCommitments,
|
return errors.Wrapf(errMissingBlobsForBlockCommitments,
|
||||||
|
|||||||
@@ -226,6 +226,8 @@ func (s *Service) Start() {
|
|||||||
|
|
||||||
// fetchOriginSidecars fetches origin sidecars
|
// fetchOriginSidecars fetches origin sidecars
|
||||||
func (s *Service) fetchOriginSidecars(peers []peer.ID) error {
|
func (s *Service) fetchOriginSidecars(peers []peer.ID) error {
|
||||||
|
const delay = 10 * time.Second // The delay between each attempt to fetch origin data column sidecars
|
||||||
|
|
||||||
blockRoot, err := s.cfg.DB.OriginCheckpointBlockRoot(s.ctx)
|
blockRoot, err := s.cfg.DB.OriginCheckpointBlockRoot(s.ctx)
|
||||||
if errors.Is(err, db.ErrNotFoundOriginBlockRoot) {
|
if errors.Is(err, db.ErrNotFoundOriginBlockRoot) {
|
||||||
return nil
|
return nil
|
||||||
@@ -258,7 +260,7 @@ func (s *Service) fetchOriginSidecars(peers []peer.ID) error {
|
|||||||
blockVersion := roBlock.Version()
|
blockVersion := roBlock.Version()
|
||||||
|
|
||||||
if blockVersion >= version.Fulu {
|
if blockVersion >= version.Fulu {
|
||||||
if err := s.fetchOriginDataColumnSidecars(roBlock); err != nil {
|
if err := s.fetchOriginDataColumnSidecars(roBlock, delay); err != nil {
|
||||||
return errors.Wrap(err, "fetch origin columns")
|
return errors.Wrap(err, "fetch origin columns")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@@ -412,7 +414,7 @@ func (s *Service) fetchOriginBlobSidecars(pids []peer.ID, rob blocks.ROBlock) er
|
|||||||
return fmt.Errorf("no connected peer able to provide blobs for checkpoint sync block %#x", r)
|
return fmt.Errorf("no connected peer able to provide blobs for checkpoint sync block %#x", r)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Service) fetchOriginDataColumnSidecars(roBlock blocks.ROBlock) error {
|
func (s *Service) fetchOriginDataColumnSidecars(roBlock blocks.ROBlock, delay time.Duration) error {
|
||||||
const (
|
const (
|
||||||
errorMessage = "Failed to fetch origin data column sidecars"
|
errorMessage = "Failed to fetch origin data column sidecars"
|
||||||
warningIteration = 10
|
warningIteration = 10
|
||||||
@@ -499,6 +501,7 @@ func (s *Service) fetchOriginDataColumnSidecars(roBlock blocks.ROBlock) error {
|
|||||||
log := log.WithFields(logrus.Fields{
|
log := log.WithFields(logrus.Fields{
|
||||||
"attempt": attempt,
|
"attempt": attempt,
|
||||||
"missingIndices": helpers.SortedPrettySliceFromMap(missingIndicesByRoot[root]),
|
"missingIndices": helpers.SortedPrettySliceFromMap(missingIndicesByRoot[root]),
|
||||||
|
"delay": delay,
|
||||||
})
|
})
|
||||||
|
|
||||||
logFunc := log.Debug
|
logFunc := log.Debug
|
||||||
@@ -507,6 +510,13 @@ func (s *Service) fetchOriginDataColumnSidecars(roBlock blocks.ROBlock) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
logFunc("Failed to fetch some origin data column sidecars, retrying later")
|
logFunc("Failed to fetch some origin data column sidecars, retrying later")
|
||||||
|
|
||||||
|
// Wait before retrying, respecting context cancellation.
|
||||||
|
select {
|
||||||
|
case <-s.ctx.Done():
|
||||||
|
return s.ctx.Err()
|
||||||
|
case <-time.After(delay):
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -687,7 +687,10 @@ func TestFetchOriginColumns(t *testing.T) {
|
|||||||
cfg.BlobSchedule = []params.BlobScheduleEntry{{Epoch: 0, MaxBlobsPerBlock: 10}}
|
cfg.BlobSchedule = []params.BlobScheduleEntry{{Epoch: 0, MaxBlobsPerBlock: 10}}
|
||||||
params.OverrideBeaconConfig(cfg)
|
params.OverrideBeaconConfig(cfg)
|
||||||
|
|
||||||
const blobCount = 1
|
const (
|
||||||
|
delay = 0
|
||||||
|
blobCount = 1
|
||||||
|
)
|
||||||
|
|
||||||
t.Run("block has no commitments", func(t *testing.T) {
|
t.Run("block has no commitments", func(t *testing.T) {
|
||||||
service := new(Service)
|
service := new(Service)
|
||||||
@@ -699,7 +702,7 @@ func TestFetchOriginColumns(t *testing.T) {
|
|||||||
roBlock, err := blocks.NewROBlock(signedBlock)
|
roBlock, err := blocks.NewROBlock(signedBlock)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
err = service.fetchOriginDataColumnSidecars(roBlock)
|
err = service.fetchOriginDataColumnSidecars(roBlock, delay)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -721,7 +724,7 @@ func TestFetchOriginColumns(t *testing.T) {
|
|||||||
err := storage.Save(verifiedSidecars)
|
err := storage.Save(verifiedSidecars)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
err = service.fetchOriginDataColumnSidecars(roBlock)
|
err = service.fetchOriginDataColumnSidecars(roBlock, delay)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -826,7 +829,7 @@ func TestFetchOriginColumns(t *testing.T) {
|
|||||||
attempt++
|
attempt++
|
||||||
})
|
})
|
||||||
|
|
||||||
err = service.fetchOriginDataColumnSidecars(roBlock)
|
err = service.fetchOriginDataColumnSidecars(roBlock, delay)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Check all corresponding sidecars are saved in the store.
|
// Check all corresponding sidecars are saved in the store.
|
||||||
|
|||||||
@@ -1,14 +1,339 @@
|
|||||||
package sync
|
package sync
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
||||||
|
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||||
|
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||||
|
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func TestValidateWithKzgBatchVerifier(t *testing.T) {
|
||||||
|
err := kzg.Start()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
dataColumns []blocks.RODataColumn
|
||||||
|
expectedResult pubsub.ValidationResult
|
||||||
|
expectError bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "single valid data column",
|
||||||
|
dataColumns: createValidTestDataColumns(t, 1),
|
||||||
|
expectedResult: pubsub.ValidationAccept,
|
||||||
|
expectError: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "multiple valid data columns",
|
||||||
|
dataColumns: createValidTestDataColumns(t, 3),
|
||||||
|
expectedResult: pubsub.ValidationAccept,
|
||||||
|
expectError: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "single invalid data column",
|
||||||
|
dataColumns: createInvalidTestDataColumns(t, 1),
|
||||||
|
expectedResult: pubsub.ValidationReject,
|
||||||
|
expectError: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "empty data column slice",
|
||||||
|
dataColumns: []blocks.RODataColumn{},
|
||||||
|
expectedResult: pubsub.ValidationAccept,
|
||||||
|
expectError: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
ctx := t.Context()
|
||||||
|
|
||||||
|
service := &Service{
|
||||||
|
ctx: ctx,
|
||||||
|
kzgChan: make(chan *kzgVerifier, 100),
|
||||||
|
}
|
||||||
|
go service.kzgVerifierRoutine()
|
||||||
|
|
||||||
|
result, err := service.validateWithKzgBatchVerifier(ctx, tt.dataColumns)
|
||||||
|
|
||||||
|
require.Equal(t, tt.expectedResult, result)
|
||||||
|
if tt.expectError {
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
} else {
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestVerifierRoutine(t *testing.T) {
|
||||||
|
err := kzg.Start()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
t.Run("processes single request", func(t *testing.T) {
|
||||||
|
ctx := t.Context()
|
||||||
|
|
||||||
|
service := &Service{
|
||||||
|
ctx: ctx,
|
||||||
|
kzgChan: make(chan *kzgVerifier, 100),
|
||||||
|
}
|
||||||
|
go service.kzgVerifierRoutine()
|
||||||
|
|
||||||
|
dataColumns := createValidTestDataColumns(t, 1)
|
||||||
|
resChan := make(chan error, 1)
|
||||||
|
service.kzgChan <- &kzgVerifier{dataColumns: dataColumns, resChan: resChan}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case err := <-resChan:
|
||||||
|
require.NoError(t, err)
|
||||||
|
case <-time.After(time.Second):
|
||||||
|
t.Fatal("timeout waiting for verification result")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("batches multiple requests", func(t *testing.T) {
|
||||||
|
ctx := t.Context()
|
||||||
|
|
||||||
|
service := &Service{
|
||||||
|
ctx: ctx,
|
||||||
|
kzgChan: make(chan *kzgVerifier, 100),
|
||||||
|
}
|
||||||
|
go service.kzgVerifierRoutine()
|
||||||
|
|
||||||
|
const numRequests = 5
|
||||||
|
resChans := make([]chan error, numRequests)
|
||||||
|
|
||||||
|
for i := range numRequests {
|
||||||
|
dataColumns := createValidTestDataColumns(t, 1)
|
||||||
|
resChan := make(chan error, 1)
|
||||||
|
resChans[i] = resChan
|
||||||
|
service.kzgChan <- &kzgVerifier{dataColumns: dataColumns, resChan: resChan}
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range numRequests {
|
||||||
|
select {
|
||||||
|
case err := <-resChans[i]:
|
||||||
|
require.NoError(t, err)
|
||||||
|
case <-time.After(time.Second):
|
||||||
|
t.Fatalf("timeout waiting for verification result %d", i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("context cancellation stops routine", func(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
|
||||||
|
service := &Service{
|
||||||
|
ctx: ctx,
|
||||||
|
kzgChan: make(chan *kzgVerifier, 100),
|
||||||
|
}
|
||||||
|
|
||||||
|
routineDone := make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
service.kzgVerifierRoutine()
|
||||||
|
close(routineDone)
|
||||||
|
}()
|
||||||
|
|
||||||
|
cancel()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-routineDone:
|
||||||
|
case <-time.After(time.Second):
|
||||||
|
t.Fatal("timeout waiting for routine to exit")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestVerifyKzgBatch(t *testing.T) {
|
||||||
|
err := kzg.Start()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
t.Run("all valid data columns succeed", func(t *testing.T) {
|
||||||
|
dataColumns := createValidTestDataColumns(t, 3)
|
||||||
|
resChan := make(chan error, 1)
|
||||||
|
kzgVerifiers := []*kzgVerifier{{dataColumns: dataColumns, resChan: resChan}}
|
||||||
|
|
||||||
|
verifyKzgBatch(kzgVerifiers)
|
||||||
|
|
||||||
|
select {
|
||||||
|
case err := <-resChan:
|
||||||
|
require.NoError(t, err)
|
||||||
|
case <-time.After(time.Second):
|
||||||
|
t.Fatal("timeout waiting for batch verification")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("invalid proofs fail entire batch", func(t *testing.T) {
|
||||||
|
validColumns := createValidTestDataColumns(t, 1)
|
||||||
|
invalidColumns := createInvalidTestDataColumns(t, 1)
|
||||||
|
allColumns := append(validColumns, invalidColumns...)
|
||||||
|
|
||||||
|
resChan := make(chan error, 1)
|
||||||
|
kzgVerifiers := []*kzgVerifier{{dataColumns: allColumns, resChan: resChan}}
|
||||||
|
|
||||||
|
verifyKzgBatch(kzgVerifiers)
|
||||||
|
|
||||||
|
select {
|
||||||
|
case err := <-resChan:
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
case <-time.After(time.Second):
|
||||||
|
t.Fatal("timeout waiting for batch verification")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("empty batch handling", func(t *testing.T) {
|
||||||
|
verifyKzgBatch([]*kzgVerifier{})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestKzgBatchVerifierConcurrency(t *testing.T) {
|
||||||
|
err := kzg.Start()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
ctx := t.Context()
|
||||||
|
|
||||||
|
service := &Service{
|
||||||
|
ctx: ctx,
|
||||||
|
kzgChan: make(chan *kzgVerifier, 100),
|
||||||
|
}
|
||||||
|
go service.kzgVerifierRoutine()
|
||||||
|
|
||||||
|
const numGoroutines = 10
|
||||||
|
const numRequestsPerGoroutine = 5
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(numGoroutines)
|
||||||
|
|
||||||
|
// Multiple goroutines sending verification requests simultaneously
|
||||||
|
for i := range numGoroutines {
|
||||||
|
go func(goroutineID int) {
|
||||||
|
defer wg.Done()
|
||||||
|
|
||||||
|
for range numRequestsPerGoroutine {
|
||||||
|
dataColumns := createValidTestDataColumns(t, 1)
|
||||||
|
result, err := service.validateWithKzgBatchVerifier(ctx, dataColumns)
|
||||||
|
require.Equal(t, pubsub.ValidationAccept, result)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
}(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestKzgBatchVerifierFallback(t *testing.T) {
|
||||||
|
err := kzg.Start()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
t.Run("fallback handles mixed valid/invalid batch correctly", func(t *testing.T) {
|
||||||
|
ctx := t.Context()
|
||||||
|
|
||||||
|
service := &Service{
|
||||||
|
ctx: ctx,
|
||||||
|
kzgChan: make(chan *kzgVerifier, 100),
|
||||||
|
}
|
||||||
|
go service.kzgVerifierRoutine()
|
||||||
|
|
||||||
|
validColumns := createValidTestDataColumns(t, 1)
|
||||||
|
invalidColumns := createInvalidTestDataColumns(t, 1)
|
||||||
|
|
||||||
|
result, err := service.validateWithKzgBatchVerifier(ctx, validColumns)
|
||||||
|
require.Equal(t, pubsub.ValidationAccept, result)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
result, err = service.validateWithKzgBatchVerifier(ctx, invalidColumns)
|
||||||
|
require.Equal(t, pubsub.ValidationReject, result)
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("empty data columns fallback", func(t *testing.T) {
|
||||||
|
ctx := t.Context()
|
||||||
|
|
||||||
|
service := &Service{
|
||||||
|
ctx: ctx,
|
||||||
|
kzgChan: make(chan *kzgVerifier, 100),
|
||||||
|
}
|
||||||
|
go service.kzgVerifierRoutine()
|
||||||
|
|
||||||
|
result, err := service.validateWithKzgBatchVerifier(ctx, []blocks.RODataColumn{})
|
||||||
|
require.Equal(t, pubsub.ValidationAccept, result)
|
||||||
|
require.NoError(t, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestValidateWithKzgBatchVerifier_DeadlockOnTimeout(t *testing.T) {
|
||||||
|
err := kzg.Start()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
params.SetupTestConfigCleanup(t)
|
||||||
|
cfg := params.BeaconConfig().Copy()
|
||||||
|
cfg.SecondsPerSlot = 0
|
||||||
|
params.OverrideBeaconConfig(cfg)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(t.Context())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
service := &Service{
|
||||||
|
ctx: ctx,
|
||||||
|
kzgChan: make(chan *kzgVerifier),
|
||||||
|
}
|
||||||
|
go service.kzgVerifierRoutine()
|
||||||
|
|
||||||
|
result, err := service.validateWithKzgBatchVerifier(context.Background(), nil)
|
||||||
|
require.Equal(t, pubsub.ValidationIgnore, result)
|
||||||
|
require.ErrorIs(t, err, context.DeadlineExceeded)
|
||||||
|
|
||||||
|
done := make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
_, _ = service.validateWithKzgBatchVerifier(context.Background(), nil)
|
||||||
|
close(done)
|
||||||
|
}()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-done:
|
||||||
|
case <-time.After(500 * time.Millisecond):
|
||||||
|
t.Fatal("validateWithKzgBatchVerifier blocked")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestValidateWithKzgBatchVerifier_ContextCanceledBeforeSend(t *testing.T) {
|
||||||
|
cancelledCtx, cancel := context.WithCancel(t.Context())
|
||||||
|
cancel()
|
||||||
|
|
||||||
|
service := &Service{
|
||||||
|
ctx: context.Background(),
|
||||||
|
kzgChan: make(chan *kzgVerifier),
|
||||||
|
}
|
||||||
|
|
||||||
|
done := make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
result, err := service.validateWithKzgBatchVerifier(cancelledCtx, nil)
|
||||||
|
require.Equal(t, pubsub.ValidationIgnore, result)
|
||||||
|
require.ErrorIs(t, err, context.Canceled)
|
||||||
|
close(done)
|
||||||
|
}()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-done:
|
||||||
|
case <-time.After(500 * time.Millisecond):
|
||||||
|
t.Fatal("validateWithKzgBatchVerifier did not return after context cancellation")
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-service.kzgChan:
|
||||||
|
t.Fatal("verificationSet was sent to kzgChan despite canceled context")
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func createValidTestDataColumns(t *testing.T, count int) []blocks.RODataColumn {
|
func createValidTestDataColumns(t *testing.T, count int) []blocks.RODataColumn {
|
||||||
_, roSidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, count)
|
_, roSidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, count)
|
||||||
if len(roSidecars) >= count {
|
if len(roSidecars) >= count {
|
||||||
|
|||||||
@@ -77,13 +77,8 @@ func SendBeaconBlocksByRangeRequest(
|
|||||||
}
|
}
|
||||||
defer closeStream(stream, log)
|
defer closeStream(stream, log)
|
||||||
|
|
||||||
// Cap the slice capacity to MaxRequestBlock to prevent panic from invalid Count values.
|
|
||||||
// This guards against upstream bugs that may produce astronomically large Count values
|
|
||||||
// (e.g., due to unsigned integer underflow).
|
|
||||||
sliceCap := min(req.Count, params.MaxRequestBlock(slots.ToEpoch(tor.CurrentSlot())))
|
|
||||||
|
|
||||||
// Augment block processing function, if non-nil block processor is provided.
|
// Augment block processing function, if non-nil block processor is provided.
|
||||||
blocks := make([]interfaces.ReadOnlySignedBeaconBlock, 0, sliceCap)
|
blocks := make([]interfaces.ReadOnlySignedBeaconBlock, 0, req.Count)
|
||||||
process := func(blk interfaces.ReadOnlySignedBeaconBlock) error {
|
process := func(blk interfaces.ReadOnlySignedBeaconBlock) error {
|
||||||
blocks = append(blocks, blk)
|
blocks = append(blocks, blk)
|
||||||
if blockProcessor != nil {
|
if blockProcessor != nil {
|
||||||
|
|||||||
@@ -168,6 +168,7 @@ type Service struct {
|
|||||||
syncContributionBitsOverlapLock sync.RWMutex
|
syncContributionBitsOverlapLock sync.RWMutex
|
||||||
syncContributionBitsOverlapCache *lru.Cache
|
syncContributionBitsOverlapCache *lru.Cache
|
||||||
signatureChan chan *signatureVerifier
|
signatureChan chan *signatureVerifier
|
||||||
|
kzgChan chan *kzgVerifier
|
||||||
clockWaiter startup.ClockWaiter
|
clockWaiter startup.ClockWaiter
|
||||||
initialSyncComplete chan struct{}
|
initialSyncComplete chan struct{}
|
||||||
verifierWaiter *verification.InitializerWaiter
|
verifierWaiter *verification.InitializerWaiter
|
||||||
@@ -208,7 +209,10 @@ func NewService(ctx context.Context, opts ...Option) *Service {
|
|||||||
}
|
}
|
||||||
// Initialize signature channel with configured limit
|
// Initialize signature channel with configured limit
|
||||||
r.signatureChan = make(chan *signatureVerifier, r.cfg.batchVerifierLimit)
|
r.signatureChan = make(chan *signatureVerifier, r.cfg.batchVerifierLimit)
|
||||||
|
// Initialize KZG channel with fixed buffer size of 100.
|
||||||
|
// This buffer size is designed to handle burst traffic of data column gossip messages:
|
||||||
|
// - Data columns arrive less frequently than attestations (default batchVerifierLimit=1000)
|
||||||
|
r.kzgChan = make(chan *kzgVerifier, 100)
|
||||||
// Correctly remove it from our seen pending block map.
|
// Correctly remove it from our seen pending block map.
|
||||||
// The eviction method always assumes that the mutex is held.
|
// The eviction method always assumes that the mutex is held.
|
||||||
r.slotToPendingBlocks.OnEvicted(func(s string, i any) {
|
r.slotToPendingBlocks.OnEvicted(func(s string, i any) {
|
||||||
@@ -261,6 +265,7 @@ func (s *Service) Start() {
|
|||||||
s.newColumnsVerifier = newDataColumnsVerifierFromInitializer(v)
|
s.newColumnsVerifier = newDataColumnsVerifierFromInitializer(v)
|
||||||
|
|
||||||
go s.verifierRoutine()
|
go s.verifierRoutine()
|
||||||
|
go s.kzgVerifierRoutine()
|
||||||
go s.startDiscoveryAndSubscriptions()
|
go s.startDiscoveryAndSubscriptions()
|
||||||
go s.processDataColumnLogs()
|
go s.processDataColumnLogs()
|
||||||
|
|
||||||
|
|||||||
@@ -144,9 +144,12 @@ func (s *Service) validateDataColumn(ctx context.Context, pid peer.ID, msg *pubs
|
|||||||
}
|
}
|
||||||
|
|
||||||
// [REJECT] The sidecar's column data is valid as verified by `verify_data_column_sidecar_kzg_proofs(sidecar)`.
|
// [REJECT] The sidecar's column data is valid as verified by `verify_data_column_sidecar_kzg_proofs(sidecar)`.
|
||||||
if err := verifier.SidecarKzgProofVerified(); err != nil {
|
validationResult, err := s.validateWithKzgBatchVerifier(ctx, roDataColumns)
|
||||||
return pubsub.ValidationReject, err
|
if validationResult != pubsub.ValidationAccept {
|
||||||
|
return validationResult, err
|
||||||
}
|
}
|
||||||
|
// Mark KZG verification as satisfied since we did it via batch verifier
|
||||||
|
verifier.SatisfyRequirement(verification.RequireSidecarKzgProofVerified)
|
||||||
|
|
||||||
// [IGNORE] The sidecar is the first sidecar for the tuple `(block_header.slot, block_header.proposer_index, sidecar.index)`
|
// [IGNORE] The sidecar is the first sidecar for the tuple `(block_header.slot, block_header.proposer_index, sidecar.index)`
|
||||||
// with valid header signature, sidecar inclusion proof, and kzg proof.
|
// with valid header signature, sidecar inclusion proof, and kzg proof.
|
||||||
|
|||||||
@@ -71,7 +71,10 @@ func TestValidateDataColumn(t *testing.T) {
|
|||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
newColumnsVerifier: newDataColumnsVerifier,
|
newColumnsVerifier: newDataColumnsVerifier,
|
||||||
seenDataColumnCache: newSlotAwareCache(seenDataColumnSize),
|
seenDataColumnCache: newSlotAwareCache(seenDataColumnSize),
|
||||||
|
kzgChan: make(chan *kzgVerifier, 100),
|
||||||
}
|
}
|
||||||
|
// Start the KZG verifier routine for batch verification
|
||||||
|
go service.kzgVerifierRoutine()
|
||||||
|
|
||||||
// Encode a `beaconBlock` message instead of expected.
|
// Encode a `beaconBlock` message instead of expected.
|
||||||
buf := new(bytes.Buffer)
|
buf := new(bytes.Buffer)
|
||||||
|
|||||||
@@ -1027,10 +1027,10 @@ func TestGetVerifyingStateEdgeCases(t *testing.T) {
|
|||||||
sc: signatureCache,
|
sc: signatureCache,
|
||||||
sr: &mockStateByRooter{sbr: sbrErrorIfCalled(t)}, // Should not be called
|
sr: &mockStateByRooter{sbr: sbrErrorIfCalled(t)}, // Should not be called
|
||||||
hsp: &mockHeadStateProvider{
|
hsp: &mockHeadStateProvider{
|
||||||
headRoot: parentRoot[:], // Same as parent
|
headRoot: parentRoot[:], // Same as parent
|
||||||
headSlot: 32, // Epoch 1
|
headSlot: 32, // Epoch 1
|
||||||
headState: fuluState.Copy(), // HeadState (not ReadOnly) for ProcessSlots
|
headState: fuluState.Copy(), // HeadState (not ReadOnly) for ProcessSlots
|
||||||
headStateReadOnly: nil, // Should not use ReadOnly path
|
headStateReadOnly: nil, // Should not use ReadOnly path
|
||||||
},
|
},
|
||||||
fc: &mockForkchoicer{
|
fc: &mockForkchoicer{
|
||||||
// Return same root for both to simulate same chain
|
// Return same root for both to simulate same chain
|
||||||
@@ -1045,8 +1045,8 @@ func TestGetVerifyingStateEdgeCases(t *testing.T) {
|
|||||||
// Wrap to detect HeadState call
|
// Wrap to detect HeadState call
|
||||||
originalHsp := initializer.shared.hsp.(*mockHeadStateProvider)
|
originalHsp := initializer.shared.hsp.(*mockHeadStateProvider)
|
||||||
wrappedHsp := &mockHeadStateProvider{
|
wrappedHsp := &mockHeadStateProvider{
|
||||||
headRoot: originalHsp.headRoot,
|
headRoot: originalHsp.headRoot,
|
||||||
headSlot: originalHsp.headSlot,
|
headSlot: originalHsp.headSlot,
|
||||||
headState: originalHsp.headState,
|
headState: originalHsp.headState,
|
||||||
}
|
}
|
||||||
initializer.shared.hsp = &headStateCallTracker{
|
initializer.shared.hsp = &headStateCallTracker{
|
||||||
|
|||||||
@@ -1,3 +0,0 @@
|
|||||||
### Ignored
|
|
||||||
|
|
||||||
- add `cmd` and all it's subcategories to the log.go generation process.
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
### Added
|
|
||||||
|
|
||||||
- Added a version log at startup to display the version of the build.
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
### Fixed
|
|
||||||
|
|
||||||
- Fixed a bug in `hack/check-logs.sh` where untracked files were ignored.
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
### Ignored
|
|
||||||
|
|
||||||
- Add `NewBeaconStateGloas()`.
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
### Ignored
|
|
||||||
|
|
||||||
- Added a field `path` for the ephemeral log file initialization log.
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
### Added
|
|
||||||
|
|
||||||
- Flag `--log.vmodule` to set per-package verbosity levels for logging.
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
### Ignored
|
|
||||||
|
|
||||||
- small touch ups on state diff code.
|
|
||||||
@@ -1,2 +0,0 @@
|
|||||||
### Fixed
|
|
||||||
- Fixed a typo: AggregrateDueBPS -> AggregateDueBPS.
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
### Fixed
|
|
||||||
|
|
||||||
- Fix Bazel build failure on macOS x86_64 (darwin_amd64) (adds missing assembly stub to hashtree patch).
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
### Fixed
|
|
||||||
|
|
||||||
- Prevent authentication bypass on direct `/v2/validator/*` endpoints by enforcing auth checks for non-public routes.
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
### Ignored
|
|
||||||
|
|
||||||
- optimizing /eth/v1/beacon/blocks/{block_id}/root endpoint by reusing blocker lookup instead of duplicated logic.
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
### Added
|
|
||||||
|
|
||||||
- gloas block return support for /eth/v2/beacon/blocks/{block_id} and /eth/v1/beacon/blocks/{block_id}/root endpoints.
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
### Ignored
|
|
||||||
|
|
||||||
- delayed head evaluator check to mid epoch for e2e.
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
### Ignored
|
|
||||||
|
|
||||||
- updating phase 0 constants for ethspecify
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
### Removed
|
|
||||||
|
|
||||||
- Batching of KZG verification for incoming via gossip data column sidecars
|
|
||||||
@@ -1,2 +0,0 @@
|
|||||||
### Removed
|
|
||||||
- `--disable-get-blobs-v2` flag from help.
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
### Removed
|
|
||||||
|
|
||||||
- Remove unused `delay` parameter from `fetchOriginDataColumnSidecars` function.
|
|
||||||
@@ -1,2 +0,0 @@
|
|||||||
### Fixed
|
|
||||||
- Fix hashtree release builds.
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user