mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 05:47:59 -05:00
Compare commits
15 Commits
sync-valid
...
revert-140
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
97f1c6f853 | ||
|
|
adc875b20d | ||
|
|
8cd249c1c8 | ||
|
|
305d5850e7 | ||
|
|
df3a9f218d | ||
|
|
ae451a3a02 | ||
|
|
17561a6576 | ||
|
|
b842b7ea01 | ||
|
|
9bbe12e28c | ||
|
|
0674cf64cc | ||
|
|
3413d05b34 | ||
|
|
070a765d24 | ||
|
|
8ac1647436 | ||
|
|
dfe31c9242 | ||
|
|
b7866be3a9 |
@@ -50,7 +50,7 @@ func ContentTypeHandler(acceptedMediaTypes []string) mux.MiddlewareFunc {
|
||||
|
||||
accepted := false
|
||||
for _, acceptedType := range acceptedMediaTypes {
|
||||
if strings.TrimSpace(contentType) == strings.TrimSpace(acceptedType) {
|
||||
if strings.Contains(strings.TrimSpace(contentType), strings.TrimSpace(acceptedType)) {
|
||||
accepted = true
|
||||
break
|
||||
}
|
||||
|
||||
@@ -31,7 +31,7 @@ func TestNormalizeQueryValuesHandler(t *testing.T) {
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
req, err := http.NewRequest("GET", "/test?"+test.inputQuery, nil)
|
||||
req, err := http.NewRequest("GET", "/test?"+test.inputQuery, http.NoBody)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -96,6 +96,11 @@ func TestContentTypeHandler(t *testing.T) {
|
||||
expectedStatusCode: http.StatusOK,
|
||||
isGet: true,
|
||||
},
|
||||
{
|
||||
name: "Content type contains charset is ok",
|
||||
contentType: "application/json; charset=utf-8",
|
||||
expectedStatusCode: http.StatusOK,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
|
||||
@@ -3,6 +3,7 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"attestation.go",
|
||||
"churn.go",
|
||||
"consolidations.go",
|
||||
"deposits.go",
|
||||
|
||||
7
beacon-chain/core/electra/attestation.go
Normal file
7
beacon-chain/core/electra/attestation.go
Normal file
@@ -0,0 +1,7 @@
|
||||
package electra
|
||||
|
||||
import "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/altair"
|
||||
|
||||
var (
|
||||
ProcessAttestationsNoVerifySignature = altair.ProcessAttestationsNoVerifySignature
|
||||
)
|
||||
@@ -77,11 +77,11 @@ func ProcessPendingBalanceDeposits(ctx context.Context, st state.BeaconState, ac
|
||||
}
|
||||
}
|
||||
|
||||
// ProcessDepositReceipts is a function as part of electra to process execution layer deposits
|
||||
func ProcessDepositReceipts(ctx context.Context, beaconState state.BeaconState, receipts []*enginev1.DepositReceipt) (state.BeaconState, error) {
|
||||
_, span := trace.StartSpan(ctx, "electra.ProcessDepositReceipts")
|
||||
// ProcessDepositRequests is a function as part of electra to process execution layer deposits
|
||||
func ProcessDepositRequests(ctx context.Context, beaconState state.BeaconState, requests []*enginev1.DepositRequest) (state.BeaconState, error) {
|
||||
_, span := trace.StartSpan(ctx, "electra.ProcessDepositRequests")
|
||||
defer span.End()
|
||||
// TODO: replace with 6110 logic
|
||||
// return b.ProcessDepositReceipts(beaconState, receipts)
|
||||
// return b.ProcessDepositRequests(beaconState, requests)
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
@@ -38,7 +38,7 @@ import (
|
||||
// withdrawals_root=pre.latest_execution_payload_header.withdrawals_root,
|
||||
// blob_gas_used=pre.latest_execution_payload_header.blob_gas_used,
|
||||
// excess_blob_gas=pre.latest_execution_payload_header.excess_blob_gas,
|
||||
// deposit_receipts_root=Root(), # [New in Electra:EIP6110]
|
||||
// deposit_requests_root=Root(), # [New in Electra:EIP6110]
|
||||
// withdrawal_requests_root=Root(), # [New in Electra:EIP7002],
|
||||
// )
|
||||
//
|
||||
@@ -94,7 +94,7 @@ import (
|
||||
// # Deep history valid from Capella onwards
|
||||
// historical_summaries=pre.historical_summaries,
|
||||
// # [New in Electra:EIP6110]
|
||||
// deposit_receipts_start_index=UNSET_DEPOSIT_RECEIPTS_START_INDEX,
|
||||
// deposit_requests_start_index=UNSET_DEPOSIT_REQUESTS_START_INDEX,
|
||||
// # [New in Electra:EIP7251]
|
||||
// deposit_balance_to_consume=0,
|
||||
// exit_balance_to_consume=0,
|
||||
@@ -261,14 +261,14 @@ func UpgradeToElectra(beaconState state.BeaconState) (state.BeaconState, error)
|
||||
WithdrawalsRoot: wdRoot,
|
||||
ExcessBlobGas: excessBlobGas,
|
||||
BlobGasUsed: blobGasUsed,
|
||||
DepositReceiptsRoot: bytesutil.Bytes32(0), // [New in Electra:EIP6110]
|
||||
DepositRequestsRoot: bytesutil.Bytes32(0), // [New in Electra:EIP6110]
|
||||
WithdrawalRequestsRoot: bytesutil.Bytes32(0), // [New in Electra:EIP7002]
|
||||
},
|
||||
NextWithdrawalIndex: wi,
|
||||
NextWithdrawalValidatorIndex: vi,
|
||||
HistoricalSummaries: summaries,
|
||||
|
||||
DepositReceiptsStartIndex: params.BeaconConfig().UnsetDepositReceiptsStartIndex,
|
||||
DepositRequestsStartIndex: params.BeaconConfig().UnsetDepositRequestsStartIndex,
|
||||
DepositBalanceToConsume: 0,
|
||||
ExitBalanceToConsume: helpers.ActivationExitChurnLimit(primitives.Gwei(tab)),
|
||||
EarliestExitEpoch: earliestExitEpoch,
|
||||
|
||||
@@ -128,7 +128,7 @@ func TestUpgradeToElectra(t *testing.T) {
|
||||
BlockHash: prevHeader.BlockHash(),
|
||||
TransactionsRoot: txRoot,
|
||||
WithdrawalsRoot: wdRoot,
|
||||
DepositReceiptsRoot: bytesutil.Bytes32(0),
|
||||
DepositRequestsRoot: bytesutil.Bytes32(0),
|
||||
WithdrawalRequestsRoot: bytesutil.Bytes32(0),
|
||||
}
|
||||
require.DeepEqual(t, wanted, protoHeader)
|
||||
@@ -145,9 +145,9 @@ func TestUpgradeToElectra(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(summaries))
|
||||
|
||||
startIndex, err := mSt.DepositReceiptsStartIndex()
|
||||
startIndex, err := mSt.DepositRequestsStartIndex()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().UnsetDepositReceiptsStartIndex, startIndex)
|
||||
require.Equal(t, params.BeaconConfig().UnsetDepositRequestsStartIndex, startIndex)
|
||||
|
||||
balance, err := mSt.DepositBalanceToConsume()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -19,15 +19,15 @@ import (
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// ProcessExecutionLayerWithdrawalRequests processes the validator withdrawals from the provided execution payload
|
||||
// ProcessWithdrawalRequests processes the validator withdrawals from the provided execution payload
|
||||
// into the beacon state triggered by the execution layer.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
//
|
||||
// def process_execution_layer_withdrawal_request(
|
||||
// def process_withdrawal_request(
|
||||
//
|
||||
// state: BeaconState,
|
||||
// execution_layer_withdrawal_request: ExecutionLayerWithdrawalRequest
|
||||
// withdrawal_request: WithdrawalRequest
|
||||
//
|
||||
// ) -> None:
|
||||
// amount = execution_layer_withdrawal_request.amount
|
||||
@@ -86,8 +86,8 @@ import (
|
||||
// amount=to_withdraw,
|
||||
// withdrawable_epoch=withdrawable_epoch,
|
||||
// ))
|
||||
func ProcessExecutionLayerWithdrawalRequests(ctx context.Context, st state.BeaconState, wrs []*enginev1.ExecutionLayerWithdrawalRequest) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "electra.ProcessExecutionLayerWithdrawalRequests")
|
||||
func ProcessWithdrawalRequests(ctx context.Context, st state.BeaconState, wrs []*enginev1.WithdrawalRequest) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "electra.ProcessWithdrawalRequests")
|
||||
defer span.End()
|
||||
currentEpoch := slots.ToEpoch(st.Slot())
|
||||
for _, wr := range wrs {
|
||||
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
"github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func TestProcessExecutionLayerWithdrawRequests(t *testing.T) {
|
||||
func TestProcessWithdrawRequests(t *testing.T) {
|
||||
logHook := test.NewGlobal()
|
||||
source, err := hexutil.Decode("0xb20a608c624Ca5003905aA834De7156C68b2E1d0")
|
||||
require.NoError(t, err)
|
||||
@@ -30,7 +30,7 @@ func TestProcessExecutionLayerWithdrawRequests(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
type args struct {
|
||||
st state.BeaconState
|
||||
wrs []*enginev1.ExecutionLayerWithdrawalRequest
|
||||
wrs []*enginev1.WithdrawalRequest
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -56,7 +56,7 @@ func TestProcessExecutionLayerWithdrawRequests(t *testing.T) {
|
||||
require.NoError(t, preSt.SetValidators([]*eth.Validator{v}))
|
||||
return preSt
|
||||
}(),
|
||||
wrs: []*enginev1.ExecutionLayerWithdrawalRequest{
|
||||
wrs: []*enginev1.WithdrawalRequest{
|
||||
{
|
||||
SourceAddress: source,
|
||||
ValidatorPubkey: bytesutil.SafeCopyBytes(val.PublicKey),
|
||||
@@ -121,7 +121,7 @@ func TestProcessExecutionLayerWithdrawRequests(t *testing.T) {
|
||||
}))
|
||||
return preSt
|
||||
}(),
|
||||
wrs: []*enginev1.ExecutionLayerWithdrawalRequest{
|
||||
wrs: []*enginev1.WithdrawalRequest{
|
||||
{
|
||||
SourceAddress: source,
|
||||
ValidatorPubkey: bytesutil.SafeCopyBytes(val.PublicKey),
|
||||
@@ -190,7 +190,7 @@ func TestProcessExecutionLayerWithdrawRequests(t *testing.T) {
|
||||
require.NoError(t, preSt.SetValidators([]*eth.Validator{v}))
|
||||
return preSt
|
||||
}(),
|
||||
wrs: []*enginev1.ExecutionLayerWithdrawalRequest{
|
||||
wrs: []*enginev1.WithdrawalRequest{
|
||||
{
|
||||
SourceAddress: source,
|
||||
ValidatorPubkey: bytesutil.SafeCopyBytes(val.PublicKey),
|
||||
@@ -227,7 +227,7 @@ func TestProcessExecutionLayerWithdrawRequests(t *testing.T) {
|
||||
require.NoError(t, preSt.SetValidators([]*eth.Validator{v}))
|
||||
return preSt
|
||||
}(),
|
||||
wrs: []*enginev1.ExecutionLayerWithdrawalRequest{
|
||||
wrs: []*enginev1.WithdrawalRequest{
|
||||
{
|
||||
SourceAddress: source,
|
||||
ValidatorPubkey: bytesutil.SafeCopyBytes(val.PublicKey),
|
||||
@@ -266,7 +266,7 @@ func TestProcessExecutionLayerWithdrawRequests(t *testing.T) {
|
||||
}))
|
||||
return preSt
|
||||
}(),
|
||||
wrs: []*enginev1.ExecutionLayerWithdrawalRequest{
|
||||
wrs: []*enginev1.WithdrawalRequest{
|
||||
{
|
||||
SourceAddress: source,
|
||||
ValidatorPubkey: bytesutil.SafeCopyBytes(val.PublicKey),
|
||||
@@ -284,9 +284,9 @@ func TestProcessExecutionLayerWithdrawRequests(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
|
||||
got, err := electra.ProcessExecutionLayerWithdrawalRequests(context.Background(), tt.args.st, tt.args.wrs)
|
||||
got, err := electra.ProcessWithdrawalRequests(context.Background(), tt.args.st, tt.args.wrs)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("ProcessExecutionLayerWithdrawalRequests() error = %v, wantErr %v", err, tt.wantErr)
|
||||
t.Errorf("ProcessWithdrawalRequests() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
tt.wantFn(t, got)
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
prysmTime "github.com/prysmaticlabs/prysm/v5/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
@@ -91,6 +92,14 @@ func IsAggregated(attestation ethpb.Att) bool {
|
||||
//
|
||||
// return uint64((committees_since_epoch_start + committee_index) % ATTESTATION_SUBNET_COUNT)
|
||||
func ComputeSubnetForAttestation(activeValCount uint64, att ethpb.Att) uint64 {
|
||||
if att.Version() >= version.Electra {
|
||||
committeeIndex := 0
|
||||
committeeIndices := att.CommitteeBitsVal().BitIndices()
|
||||
if len(committeeIndices) > 0 {
|
||||
committeeIndex = committeeIndices[0]
|
||||
}
|
||||
return ComputeSubnetFromCommitteeAndSlot(activeValCount, primitives.CommitteeIndex(committeeIndex), att.GetData().Slot)
|
||||
}
|
||||
return ComputeSubnetFromCommitteeAndSlot(activeValCount, att.GetData().CommitteeIndex, att.GetData().Slot)
|
||||
}
|
||||
|
||||
|
||||
@@ -73,21 +73,37 @@ func TestAttestation_ComputeSubnetForAttestation(t *testing.T) {
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
att := ðpb.Attestation{
|
||||
AggregationBits: []byte{'A'},
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 34,
|
||||
CommitteeIndex: 4,
|
||||
BeaconBlockRoot: []byte{'C'},
|
||||
Source: nil,
|
||||
Target: nil,
|
||||
},
|
||||
Signature: []byte{'B'},
|
||||
}
|
||||
valCount, err := helpers.ActiveValidatorCount(context.Background(), state, slots.ToEpoch(att.Data.Slot))
|
||||
valCount, err := helpers.ActiveValidatorCount(context.Background(), state, slots.ToEpoch(34))
|
||||
require.NoError(t, err)
|
||||
sub := helpers.ComputeSubnetForAttestation(valCount, att)
|
||||
assert.Equal(t, uint64(6), sub, "Did not get correct subnet for attestation")
|
||||
|
||||
t.Run("Phase 0", func(t *testing.T) {
|
||||
att := ðpb.Attestation{
|
||||
AggregationBits: []byte{'A'},
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 34,
|
||||
CommitteeIndex: 4,
|
||||
BeaconBlockRoot: []byte{'C'},
|
||||
},
|
||||
Signature: []byte{'B'},
|
||||
}
|
||||
sub := helpers.ComputeSubnetForAttestation(valCount, att)
|
||||
assert.Equal(t, uint64(6), sub, "Did not get correct subnet for attestation")
|
||||
})
|
||||
t.Run("Electra", func(t *testing.T) {
|
||||
cb := primitives.NewAttestationCommitteeBits()
|
||||
cb.SetBitAt(4, true)
|
||||
att := ðpb.AttestationElectra{
|
||||
AggregationBits: []byte{'A'},
|
||||
CommitteeBits: cb,
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 34,
|
||||
BeaconBlockRoot: []byte{'C'},
|
||||
},
|
||||
Signature: []byte{'B'},
|
||||
}
|
||||
sub := helpers.ComputeSubnetForAttestation(valCount, att)
|
||||
assert.Equal(t, uint64(6), sub, "Did not get correct subnet for attestation")
|
||||
})
|
||||
}
|
||||
|
||||
func Test_ValidateAttestationTime(t *testing.T) {
|
||||
|
||||
@@ -227,7 +227,7 @@ func ProcessBlockNoVerifyAnySig(
|
||||
// def process_operations(state: BeaconState, body: BeaconBlockBody) -> None:
|
||||
// # [Modified in Electra:EIP6110]
|
||||
// # Disable former deposit mechanism once all prior deposits are processed
|
||||
// eth1_deposit_index_limit = min(state.eth1_data.deposit_count, state.deposit_receipts_start_index)
|
||||
// eth1_deposit_index_limit = min(state.eth1_data.deposit_count, state.deposit_requests_start_index)
|
||||
// if state.eth1_deposit_index < eth1_deposit_index_limit:
|
||||
// assert len(body.deposits) == min(MAX_DEPOSITS, eth1_deposit_index_limit - state.eth1_deposit_index)
|
||||
// else:
|
||||
@@ -245,7 +245,7 @@ func ProcessBlockNoVerifyAnySig(
|
||||
// for_ops(body.bls_to_execution_changes, process_bls_to_execution_change)
|
||||
// # [New in Electra:EIP7002:EIP7251]
|
||||
// for_ops(body.execution_payload.withdrawal_requests, process_execution_layer_withdrawal_request)
|
||||
// for_ops(body.execution_payload.deposit_receipts, process_deposit_receipt) # [New in Electra:EIP6110]
|
||||
// for_ops(body.execution_payload.deposit_requests, process_deposit_requests) # [New in Electra:EIP6110]
|
||||
// for_ops(body.consolidations, process_consolidation) # [New in Electra:EIP7251]
|
||||
func ProcessOperationsNoVerifyAttsSigs(
|
||||
ctx context.Context,
|
||||
@@ -401,7 +401,7 @@ func VerifyBlobCommitmentCount(blk interfaces.ReadOnlyBeaconBlock) error {
|
||||
// def process_operations(state: BeaconState, body: BeaconBlockBody) -> None:
|
||||
// # [Modified in Electra:EIP6110]
|
||||
// # Disable former deposit mechanism once all prior deposits are processed
|
||||
// eth1_deposit_index_limit = min(state.eth1_data.deposit_count, state.deposit_receipts_start_index)
|
||||
// eth1_deposit_index_limit = min(state.eth1_data.deposit_count, state.deposit_requests_start_index)
|
||||
// if state.eth1_deposit_index < eth1_deposit_index_limit:
|
||||
// assert len(body.deposits) == min(MAX_DEPOSITS, eth1_deposit_index_limit - state.eth1_deposit_index)
|
||||
// else:
|
||||
@@ -419,7 +419,7 @@ func VerifyBlobCommitmentCount(blk interfaces.ReadOnlyBeaconBlock) error {
|
||||
// for_ops(body.bls_to_execution_changes, process_bls_to_execution_change)
|
||||
// # [New in Electra:EIP7002:EIP7251]
|
||||
// for_ops(body.execution_payload.withdrawal_requests, process_execution_layer_withdrawal_request)
|
||||
// for_ops(body.execution_payload.deposit_receipts, process_deposit_receipt) # [New in Electra:EIP6110]
|
||||
// for_ops(body.execution_payload.deposit_requests, process_deposit_requests) # [New in Electra:EIP6110]
|
||||
// for_ops(body.consolidations, process_consolidation) # [New in Electra:EIP7251]
|
||||
func electraOperations(
|
||||
ctx context.Context,
|
||||
@@ -445,12 +445,12 @@ func electraOperations(
|
||||
if !ok {
|
||||
return nil, errors.New("could not cast execution data to electra execution data")
|
||||
}
|
||||
st, err = electra.ProcessExecutionLayerWithdrawalRequests(ctx, st, exe.WithdrawalRequests())
|
||||
st, err = electra.ProcessWithdrawalRequests(ctx, st, exe.WithdrawalRequests())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process execution layer withdrawal requests")
|
||||
}
|
||||
|
||||
st, err = electra.ProcessDepositReceipts(ctx, st, exe.DepositReceipts()) // TODO: EIP-6110 deposit changes.
|
||||
st, err = electra.ProcessDepositRequests(ctx, st, exe.DepositRequests()) // TODO: EIP-6110 deposit changes.
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process deposit receipts")
|
||||
}
|
||||
|
||||
@@ -149,7 +149,7 @@ func TestState_CanSaveRetrieve(t *testing.T) {
|
||||
BlockHash: make([]byte, 32),
|
||||
TransactionsRoot: make([]byte, 32),
|
||||
WithdrawalsRoot: make([]byte, 32),
|
||||
DepositReceiptsRoot: make([]byte, 32),
|
||||
DepositRequestsRoot: make([]byte, 32),
|
||||
WithdrawalRequestsRoot: make([]byte, 32),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -631,7 +631,7 @@ func fullPayloadFromPayloadBody(
|
||||
Withdrawals: body.Withdrawals,
|
||||
ExcessBlobGas: ebg,
|
||||
BlobGasUsed: bgu,
|
||||
DepositReceipts: dr,
|
||||
DepositRequests: dr,
|
||||
WithdrawalRequests: wr,
|
||||
}) // We can't get the block value and don't care about the block value for this instance
|
||||
default:
|
||||
@@ -780,8 +780,8 @@ func buildEmptyExecutionPayload(v int) (proto.Message, error) {
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
Transactions: make([][]byte, 0),
|
||||
Withdrawals: make([]*pb.Withdrawal, 0),
|
||||
WithdrawalRequests: make([]*pb.ExecutionLayerWithdrawalRequest, 0),
|
||||
DepositReceipts: make([]*pb.DepositReceipt, 0),
|
||||
WithdrawalRequests: make([]*pb.WithdrawalRequest, 0),
|
||||
DepositRequests: make([]*pb.DepositRequest, 0),
|
||||
}, nil
|
||||
default:
|
||||
return nil, errors.Wrapf(ErrUnsupportedVersion, "version=%s", version.String(v))
|
||||
|
||||
@@ -1559,7 +1559,7 @@ func fixturesStruct() *payloadFixtures {
|
||||
Withdrawals: []*pb.Withdrawal{},
|
||||
BlobGasUsed: 2,
|
||||
ExcessBlobGas: 3,
|
||||
DepositReceipts: dr,
|
||||
DepositRequests: dr,
|
||||
WithdrawalRequests: wr,
|
||||
}
|
||||
hexUint := hexutil.Uint64(1)
|
||||
|
||||
@@ -66,7 +66,7 @@ func payloadToBody(t *testing.T, ed interfaces.ExecutionData) *pb.ExecutionPaylo
|
||||
}
|
||||
eed, isElectra := ed.(interfaces.ExecutionDataElectra)
|
||||
if isElectra {
|
||||
body.DepositRequests = pb.ProtoDepositRequestsToJson(eed.DepositReceipts())
|
||||
body.DepositRequests = pb.ProtoDepositRequestsToJson(eed.DepositRequests())
|
||||
body.WithdrawalRequests = pb.ProtoWithdrawalRequestsToJson(eed.WithdrawalRequests())
|
||||
}
|
||||
return body
|
||||
|
||||
@@ -69,7 +69,6 @@ go_library(
|
||||
"@com_github_gorilla_mux//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
],
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
fastssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
@@ -197,7 +196,3 @@ func configureExecutionSetting(cliCtx *cli.Context) error {
|
||||
" Default fee recipient will be used as a fall back", checksumAddress.Hex())
|
||||
return params.SetActive(c)
|
||||
}
|
||||
|
||||
func configureFastSSZHashingAlgorithm() {
|
||||
fastssz.EnableVectorizedHTR = true
|
||||
}
|
||||
|
||||
@@ -120,7 +120,6 @@ type BeaconNode struct {
|
||||
initialSyncComplete chan struct{}
|
||||
BlobStorage *filesystem.BlobStorage
|
||||
BlobStorageOptions []filesystem.BlobStorageOption
|
||||
blobRetentionEpochs primitives.Epoch
|
||||
verifyInitWaiter *verification.InitializerWaiter
|
||||
syncChecker *initialsync.SyncChecker
|
||||
}
|
||||
@@ -278,8 +277,6 @@ func configureBeacon(cliCtx *cli.Context) error {
|
||||
return errors.Wrap(err, "could not configure execution setting")
|
||||
}
|
||||
|
||||
configureFastSSZHashingAlgorithm()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/builder"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/execution"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
)
|
||||
|
||||
// Option for beacon node configuration.
|
||||
@@ -51,11 +50,3 @@ func WithBlobStorageOptions(opt ...filesystem.BlobStorageOption) Option {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithBlobRetentionEpochs sets the blobRetentionEpochs value, used in kv store initialization.
|
||||
func WithBlobRetentionEpochs(e primitives.Epoch) Option {
|
||||
return func(bn *BeaconNode) error {
|
||||
bn.blobRetentionEpochs = e
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -336,7 +336,7 @@ func TestService_InterceptAddrDial_Public(t *testing.T) {
|
||||
}),
|
||||
}
|
||||
var err error
|
||||
//test with public filter
|
||||
// test with public filter
|
||||
cidr := "public"
|
||||
ip := "212.67.10.122"
|
||||
s.addrFilter, err = configureFilter(&Config{AllowListCIDR: cidr})
|
||||
@@ -348,7 +348,7 @@ func TestService_InterceptAddrDial_Public(t *testing.T) {
|
||||
t.Errorf("Expected multiaddress with ip %s to not be rejected since we allow public addresses", ip)
|
||||
}
|
||||
|
||||
ip = "192.168.1.0" //this is private and should fail
|
||||
ip = "192.168.1.0" // this is private and should fail
|
||||
multiAddress, err = ma.NewMultiaddr(fmt.Sprintf("/ip4/%s/tcp/%d", ip, 3000))
|
||||
require.NoError(t, err)
|
||||
valid = s.InterceptAddrDial("", multiAddress)
|
||||
@@ -356,7 +356,7 @@ func TestService_InterceptAddrDial_Public(t *testing.T) {
|
||||
t.Errorf("Expected multiaddress with ip %s to be rejected since we are only allowing public addresses", ip)
|
||||
}
|
||||
|
||||
//test with public allow filter, with a public address added to the deny list
|
||||
// test with public allow filter, with a public address added to the deny list
|
||||
invalidPublicIp := "212.67.10.122"
|
||||
validPublicIp := "91.65.69.69"
|
||||
s.addrFilter, err = configureFilter(&Config{AllowListCIDR: "public", DenyListCIDR: []string{"212.67.89.112/16"}})
|
||||
@@ -384,7 +384,7 @@ func TestService_InterceptAddrDial_Private(t *testing.T) {
|
||||
}),
|
||||
}
|
||||
var err error
|
||||
//test with private filter
|
||||
// test with private filter
|
||||
cidr := "private"
|
||||
s.addrFilter, err = configureFilter(&Config{DenyListCIDR: []string{cidr}})
|
||||
require.NoError(t, err)
|
||||
@@ -413,7 +413,7 @@ func TestService_InterceptAddrDial_AllowPrivate(t *testing.T) {
|
||||
}),
|
||||
}
|
||||
var err error
|
||||
//test with private filter
|
||||
// test with private filter
|
||||
cidr := "private"
|
||||
s.addrFilter, err = configureFilter(&Config{AllowListCIDR: cidr})
|
||||
require.NoError(t, err)
|
||||
@@ -442,7 +442,7 @@ func TestService_InterceptAddrDial_DenyPublic(t *testing.T) {
|
||||
}),
|
||||
}
|
||||
var err error
|
||||
//test with private filter
|
||||
// test with private filter
|
||||
cidr := "public"
|
||||
s.addrFilter, err = configureFilter(&Config{DenyListCIDR: []string{cidr}})
|
||||
require.NoError(t, err)
|
||||
@@ -471,7 +471,7 @@ func TestService_InterceptAddrDial_AllowConflict(t *testing.T) {
|
||||
}),
|
||||
}
|
||||
var err error
|
||||
//test with private filter
|
||||
// test with private filter
|
||||
cidr := "public"
|
||||
s.addrFilter, err = configureFilter(&Config{DenyListCIDR: []string{cidr, "192.168.0.0/16"}})
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"bytes"
|
||||
"crypto/ecdsa"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
@@ -16,7 +15,6 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
ecdsaprysm "github.com/prysmaticlabs/prysm/v5/crypto/ecdsa"
|
||||
@@ -51,7 +49,7 @@ func (quicProtocol) ENRKey() string { return "quic" }
|
||||
// with the tracked committee ids for the epoch, allowing our node
|
||||
// to be dynamically discoverable by others given our tracked committee ids.
|
||||
func (s *Service) RefreshENR() {
|
||||
// return early if discv5 isnt running
|
||||
// return early if discv5 isn't running
|
||||
if s.dv5Listener == nil || !s.isInitialized() {
|
||||
return
|
||||
}
|
||||
@@ -108,7 +106,7 @@ func (s *Service) RefreshENR() {
|
||||
|
||||
// listen for new nodes watches for new nodes in the network and adds them to the peerstore.
|
||||
func (s *Service) listenForNewNodes() {
|
||||
iterator := filterNodes(s.ctx, s.dv5Listener.RandomNodes(), s.filterPeer)
|
||||
iterator := enode.Filter(s.dv5Listener.RandomNodes(), s.filterPeer)
|
||||
defer iterator.Close()
|
||||
|
||||
for {
|
||||
@@ -124,41 +122,29 @@ func (s *Service) listenForNewNodes() {
|
||||
time.Sleep(pollingPeriod)
|
||||
continue
|
||||
}
|
||||
wantedCount := s.wantedPeerDials()
|
||||
if wantedCount == 0 {
|
||||
log.Trace("Not looking for peers, at peer limit")
|
||||
time.Sleep(pollingPeriod)
|
||||
|
||||
if exists := iterator.Next(); !exists {
|
||||
break
|
||||
}
|
||||
|
||||
node := iterator.Node()
|
||||
peerInfo, _, err := convertToAddrInfo(node)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not convert to peer info")
|
||||
continue
|
||||
}
|
||||
// Restrict dials if limit is applied.
|
||||
if flags.MaxDialIsActive() {
|
||||
wantedCount = min(wantedCount, flags.Get().MaxConcurrentDials)
|
||||
}
|
||||
wantedNodes := enode.ReadNodes(iterator, wantedCount)
|
||||
wg := new(sync.WaitGroup)
|
||||
for i := 0; i < len(wantedNodes); i++ {
|
||||
node := wantedNodes[i]
|
||||
peerInfo, _, err := convertToAddrInfo(node)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not convert to peer info")
|
||||
continue
|
||||
}
|
||||
|
||||
if peerInfo == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Make sure that peer is not dialed too often, for each connection attempt there's a backoff period.
|
||||
s.Peers().RandomizeBackOff(peerInfo.ID)
|
||||
wg.Add(1)
|
||||
go func(info *peer.AddrInfo) {
|
||||
if err := s.connectWithPeer(s.ctx, *info); err != nil {
|
||||
log.WithError(err).Tracef("Could not connect with peer %s", info.String())
|
||||
}
|
||||
wg.Done()
|
||||
}(peerInfo)
|
||||
if peerInfo == nil {
|
||||
continue
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// Make sure that peer is not dialed too often, for each connection attempt there's a backoff period.
|
||||
s.Peers().RandomizeBackOff(peerInfo.ID)
|
||||
go func(info *peer.AddrInfo) {
|
||||
if err := s.connectWithPeer(s.ctx, *info); err != nil {
|
||||
log.WithError(err).Tracef("Could not connect with peer %s", info.String())
|
||||
}
|
||||
}(peerInfo)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -398,17 +384,6 @@ func (s *Service) isPeerAtLimit(inbound bool) bool {
|
||||
return activePeers >= maxPeers || numOfConns >= maxPeers
|
||||
}
|
||||
|
||||
func (s *Service) wantedPeerDials() int {
|
||||
maxPeers := int(s.cfg.MaxPeers)
|
||||
|
||||
activePeers := len(s.Peers().Active())
|
||||
wantedCount := 0
|
||||
if maxPeers > activePeers {
|
||||
wantedCount = maxPeers - activePeers
|
||||
}
|
||||
return wantedCount
|
||||
}
|
||||
|
||||
// PeersFromStringAddrs converts peer raw ENRs into multiaddrs for p2p.
|
||||
func PeersFromStringAddrs(addrs []string) ([]ma.Multiaddr, error) {
|
||||
var allAddrs []ma.Multiaddr
|
||||
|
||||
@@ -27,7 +27,8 @@ var gossipTopicMappings = map[string]proto.Message{
|
||||
// GossipTopicMappings is a function to return the assigned data type
|
||||
// versioned by epoch.
|
||||
func GossipTopicMappings(topic string, epoch primitives.Epoch) proto.Message {
|
||||
if topic == BlockSubnetTopicFormat {
|
||||
switch topic {
|
||||
case BlockSubnetTopicFormat:
|
||||
if epoch >= params.BeaconConfig().ElectraForkEpoch {
|
||||
return ðpb.SignedBeaconBlockElectra{}
|
||||
}
|
||||
@@ -43,8 +44,25 @@ func GossipTopicMappings(topic string, epoch primitives.Epoch) proto.Message {
|
||||
if epoch >= params.BeaconConfig().AltairForkEpoch {
|
||||
return ðpb.SignedBeaconBlockAltair{}
|
||||
}
|
||||
return gossipTopicMappings[topic]
|
||||
case AttestationSubnetTopicFormat:
|
||||
if epoch >= params.BeaconConfig().ElectraForkEpoch {
|
||||
return ðpb.AttestationElectra{}
|
||||
}
|
||||
return gossipTopicMappings[topic]
|
||||
case AttesterSlashingSubnetTopicFormat:
|
||||
if epoch >= params.BeaconConfig().ElectraForkEpoch {
|
||||
return ðpb.AttesterSlashingElectra{}
|
||||
}
|
||||
return gossipTopicMappings[topic]
|
||||
case AggregateAndProofSubnetTopicFormat:
|
||||
if epoch >= params.BeaconConfig().ElectraForkEpoch {
|
||||
return ðpb.SignedAggregateAttestationAndProofElectra{}
|
||||
}
|
||||
return gossipTopicMappings[topic]
|
||||
default:
|
||||
return gossipTopicMappings[topic]
|
||||
}
|
||||
return gossipTopicMappings[topic]
|
||||
}
|
||||
|
||||
// AllTopics returns all topics stored in our
|
||||
@@ -75,4 +93,7 @@ func init() {
|
||||
GossipTypeMapping[reflect.TypeOf(ðpb.SignedBeaconBlockDeneb{})] = BlockSubnetTopicFormat
|
||||
// Specially handle Electra objects.
|
||||
GossipTypeMapping[reflect.TypeOf(ðpb.SignedBeaconBlockElectra{})] = BlockSubnetTopicFormat
|
||||
GossipTypeMapping[reflect.TypeOf(ðpb.AttestationElectra{})] = AttestationSubnetTopicFormat
|
||||
GossipTypeMapping[reflect.TypeOf(ðpb.AttesterSlashingElectra{})] = AttesterSlashingSubnetTopicFormat
|
||||
GossipTypeMapping[reflect.TypeOf(ðpb.SignedAggregateAttestationAndProofElectra{})] = AggregateAndProofSubnetTopicFormat
|
||||
}
|
||||
|
||||
@@ -22,20 +22,20 @@ func TestMappingHasNoDuplicates(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestGossipTopicMappings_CorrectBlockType(t *testing.T) {
|
||||
func TestGossipTopicMappings_CorrectType(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
bCfg := params.BeaconConfig().Copy()
|
||||
altairForkEpoch := primitives.Epoch(100)
|
||||
BellatrixForkEpoch := primitives.Epoch(200)
|
||||
CapellaForkEpoch := primitives.Epoch(300)
|
||||
DenebForkEpoch := primitives.Epoch(400)
|
||||
ElectraForkEpoch := primitives.Epoch(500)
|
||||
bellatrixForkEpoch := primitives.Epoch(200)
|
||||
capellaForkEpoch := primitives.Epoch(300)
|
||||
denebForkEpoch := primitives.Epoch(400)
|
||||
electraForkEpoch := primitives.Epoch(500)
|
||||
|
||||
bCfg.AltairForkEpoch = altairForkEpoch
|
||||
bCfg.BellatrixForkEpoch = BellatrixForkEpoch
|
||||
bCfg.CapellaForkEpoch = CapellaForkEpoch
|
||||
bCfg.DenebForkEpoch = DenebForkEpoch
|
||||
bCfg.ElectraForkEpoch = ElectraForkEpoch
|
||||
bCfg.BellatrixForkEpoch = bellatrixForkEpoch
|
||||
bCfg.CapellaForkEpoch = capellaForkEpoch
|
||||
bCfg.DenebForkEpoch = denebForkEpoch
|
||||
bCfg.ElectraForkEpoch = electraForkEpoch
|
||||
bCfg.ForkVersionSchedule[bytesutil.ToBytes4(bCfg.AltairForkVersion)] = primitives.Epoch(100)
|
||||
bCfg.ForkVersionSchedule[bytesutil.ToBytes4(bCfg.BellatrixForkVersion)] = primitives.Epoch(200)
|
||||
bCfg.ForkVersionSchedule[bytesutil.ToBytes4(bCfg.CapellaForkVersion)] = primitives.Epoch(300)
|
||||
@@ -47,29 +47,83 @@ func TestGossipTopicMappings_CorrectBlockType(t *testing.T) {
|
||||
pMessage := GossipTopicMappings(BlockSubnetTopicFormat, 0)
|
||||
_, ok := pMessage.(*ethpb.SignedBeaconBlock)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AttestationSubnetTopicFormat, 0)
|
||||
_, ok = pMessage.(*ethpb.Attestation)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AttesterSlashingSubnetTopicFormat, 0)
|
||||
_, ok = pMessage.(*ethpb.AttesterSlashing)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AggregateAndProofSubnetTopicFormat, 0)
|
||||
_, ok = pMessage.(*ethpb.SignedAggregateAttestationAndProof)
|
||||
assert.Equal(t, true, ok)
|
||||
|
||||
// Altair Fork
|
||||
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, altairForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedBeaconBlockAltair)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AttestationSubnetTopicFormat, altairForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.Attestation)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AttesterSlashingSubnetTopicFormat, altairForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.AttesterSlashing)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AggregateAndProofSubnetTopicFormat, altairForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedAggregateAttestationAndProof)
|
||||
assert.Equal(t, true, ok)
|
||||
|
||||
// Bellatrix Fork
|
||||
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, BellatrixForkEpoch)
|
||||
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, bellatrixForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedBeaconBlockBellatrix)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AttestationSubnetTopicFormat, bellatrixForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.Attestation)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AttesterSlashingSubnetTopicFormat, bellatrixForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.AttesterSlashing)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AggregateAndProofSubnetTopicFormat, bellatrixForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedAggregateAttestationAndProof)
|
||||
assert.Equal(t, true, ok)
|
||||
|
||||
// Capella Fork
|
||||
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, CapellaForkEpoch)
|
||||
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, capellaForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedBeaconBlockCapella)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AttestationSubnetTopicFormat, capellaForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.Attestation)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AttesterSlashingSubnetTopicFormat, capellaForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.AttesterSlashing)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AggregateAndProofSubnetTopicFormat, capellaForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedAggregateAttestationAndProof)
|
||||
assert.Equal(t, true, ok)
|
||||
|
||||
// Deneb Fork
|
||||
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, DenebForkEpoch)
|
||||
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, denebForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedBeaconBlockDeneb)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AttestationSubnetTopicFormat, denebForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.Attestation)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AttesterSlashingSubnetTopicFormat, denebForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.AttesterSlashing)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AggregateAndProofSubnetTopicFormat, denebForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedAggregateAttestationAndProof)
|
||||
assert.Equal(t, true, ok)
|
||||
|
||||
// Electra Fork
|
||||
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, ElectraForkEpoch)
|
||||
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, electraForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedBeaconBlockElectra)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AttestationSubnetTopicFormat, electraForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.AttestationElectra)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AttesterSlashingSubnetTopicFormat, electraForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.AttesterSlashingElectra)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AggregateAndProofSubnetTopicFormat, electraForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedAggregateAttestationAndProofElectra)
|
||||
assert.Equal(t, true, ok)
|
||||
}
|
||||
|
||||
@@ -2,14 +2,10 @@ package p2p
|
||||
|
||||
import (
|
||||
"context"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
)
|
||||
|
||||
const backOffCounter = 50
|
||||
|
||||
// filterNodes wraps an iterator such that Next only returns nodes for which
|
||||
// the 'check' function returns true. This custom implementation also
|
||||
// checks for context deadlines so that in the event the parent context has
|
||||
@@ -28,21 +24,13 @@ type filterIter struct {
|
||||
// Next looks up for the next valid node according to our
|
||||
// filter criteria.
|
||||
func (f *filterIter) Next() bool {
|
||||
lookupCounter := 0
|
||||
for f.Iterator.Next() {
|
||||
// Do not excessively perform lookups if we constantly receive non-viable peers.
|
||||
if lookupCounter > backOffCounter {
|
||||
lookupCounter = 0
|
||||
runtime.Gosched()
|
||||
time.Sleep(pollingPeriod)
|
||||
}
|
||||
if f.Context.Err() != nil {
|
||||
return false
|
||||
}
|
||||
if f.check(f.Node()) {
|
||||
return true
|
||||
}
|
||||
lookupCounter++
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -87,12 +87,7 @@ func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string,
|
||||
return false, errors.Errorf("unable to find requisite number of peers for topic %s - "+
|
||||
"only %d out of %d peers were able to be found", topic, currNum, threshold)
|
||||
}
|
||||
nodeCount := int(params.BeaconNetworkConfig().MinimumPeersInSubnetSearch)
|
||||
// Restrict dials if limit is applied.
|
||||
if flags.MaxDialIsActive() {
|
||||
nodeCount = min(nodeCount, flags.Get().MaxConcurrentDials)
|
||||
}
|
||||
nodes := enode.ReadNodes(iterator, nodeCount)
|
||||
nodes := enode.ReadNodes(iterator, int(params.BeaconNetworkConfig().MinimumPeersInSubnetSearch))
|
||||
for _, node := range nodes {
|
||||
info, _, err := convertToAddrInfo(node)
|
||||
if err != nil {
|
||||
|
||||
@@ -81,7 +81,7 @@ func TestStartDiscV5_FindPeersWithSubnet(t *testing.T) {
|
||||
bootNodeENR := bootListener.Self().String()
|
||||
|
||||
// Create 3 nodes, each subscribed to a different subnet.
|
||||
// Each node is connected to the boostrap node.
|
||||
// Each node is connected to the bootstrap node.
|
||||
services := make([]*Service, 0, 3)
|
||||
|
||||
for i := 1; i <= 3; i++ {
|
||||
|
||||
@@ -43,6 +43,7 @@ go_test(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
|
||||
@@ -26,7 +26,13 @@ var (
|
||||
BlockMap map[[4]byte]func() (interfaces.ReadOnlySignedBeaconBlock, error)
|
||||
// MetaDataMap maps the fork-version to the underlying data type for that
|
||||
// particular fork period.
|
||||
MetaDataMap map[[4]byte]func() metadata.Metadata
|
||||
MetaDataMap map[[4]byte]func() (metadata.Metadata, error)
|
||||
// AttestationMap maps the fork-version to the underlying data type for that
|
||||
// particular fork period.
|
||||
AttestationMap map[[4]byte]func() (ethpb.Att, error)
|
||||
// AggregateAttestationMap maps the fork-version to the underlying data type for that
|
||||
// particular fork period.
|
||||
AggregateAttestationMap map[[4]byte]func() (ethpb.SignedAggregateAttAndProof, error)
|
||||
)
|
||||
|
||||
// InitializeDataMaps initializes all the relevant object maps. This function is called to
|
||||
@@ -67,24 +73,68 @@ func InitializeDataMaps() {
|
||||
}
|
||||
|
||||
// Reset our metadata map.
|
||||
MetaDataMap = map[[4]byte]func() metadata.Metadata{
|
||||
bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion): func() metadata.Metadata {
|
||||
return wrapper.WrappedMetadataV0(ðpb.MetaDataV0{})
|
||||
MetaDataMap = map[[4]byte]func() (metadata.Metadata, error){
|
||||
bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion): func() (metadata.Metadata, error) {
|
||||
return wrapper.WrappedMetadataV0(ðpb.MetaDataV0{}), nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().AltairForkVersion): func() metadata.Metadata {
|
||||
return wrapper.WrappedMetadataV1(ðpb.MetaDataV1{})
|
||||
bytesutil.ToBytes4(params.BeaconConfig().AltairForkVersion): func() (metadata.Metadata, error) {
|
||||
return wrapper.WrappedMetadataV1(ðpb.MetaDataV1{}), nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().BellatrixForkVersion): func() metadata.Metadata {
|
||||
return wrapper.WrappedMetadataV1(ðpb.MetaDataV1{})
|
||||
bytesutil.ToBytes4(params.BeaconConfig().BellatrixForkVersion): func() (metadata.Metadata, error) {
|
||||
return wrapper.WrappedMetadataV1(ðpb.MetaDataV1{}), nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().CapellaForkVersion): func() metadata.Metadata {
|
||||
return wrapper.WrappedMetadataV1(ðpb.MetaDataV1{})
|
||||
bytesutil.ToBytes4(params.BeaconConfig().CapellaForkVersion): func() (metadata.Metadata, error) {
|
||||
return wrapper.WrappedMetadataV1(ðpb.MetaDataV1{}), nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().DenebForkVersion): func() metadata.Metadata {
|
||||
return wrapper.WrappedMetadataV1(ðpb.MetaDataV1{})
|
||||
bytesutil.ToBytes4(params.BeaconConfig().DenebForkVersion): func() (metadata.Metadata, error) {
|
||||
return wrapper.WrappedMetadataV1(ðpb.MetaDataV1{}), nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().ElectraForkVersion): func() metadata.Metadata {
|
||||
return wrapper.WrappedMetadataV1(ðpb.MetaDataV1{})
|
||||
bytesutil.ToBytes4(params.BeaconConfig().ElectraForkVersion): func() (metadata.Metadata, error) {
|
||||
return wrapper.WrappedMetadataV1(ðpb.MetaDataV1{}), nil
|
||||
},
|
||||
}
|
||||
|
||||
// Reset our attestation map.
|
||||
AttestationMap = map[[4]byte]func() (ethpb.Att, error){
|
||||
bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion): func() (ethpb.Att, error) {
|
||||
return ðpb.Attestation{}, nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().AltairForkVersion): func() (ethpb.Att, error) {
|
||||
return ðpb.Attestation{}, nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().BellatrixForkVersion): func() (ethpb.Att, error) {
|
||||
return ðpb.Attestation{}, nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().CapellaForkVersion): func() (ethpb.Att, error) {
|
||||
return ðpb.Attestation{}, nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().DenebForkVersion): func() (ethpb.Att, error) {
|
||||
return ðpb.Attestation{}, nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().ElectraForkVersion): func() (ethpb.Att, error) {
|
||||
return ðpb.AttestationElectra{}, nil
|
||||
},
|
||||
}
|
||||
|
||||
// Reset our aggregate attestation map.
|
||||
AggregateAttestationMap = map[[4]byte]func() (ethpb.SignedAggregateAttAndProof, error){
|
||||
bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion): func() (ethpb.SignedAggregateAttAndProof, error) {
|
||||
return ðpb.SignedAggregateAttestationAndProof{}, nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().AltairForkVersion): func() (ethpb.SignedAggregateAttAndProof, error) {
|
||||
return ðpb.SignedAggregateAttestationAndProof{}, nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().BellatrixForkVersion): func() (ethpb.SignedAggregateAttAndProof, error) {
|
||||
return ðpb.SignedAggregateAttestationAndProof{}, nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().CapellaForkVersion): func() (ethpb.SignedAggregateAttAndProof, error) {
|
||||
return ðpb.SignedAggregateAttestationAndProof{}, nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().DenebForkVersion): func() (ethpb.SignedAggregateAttAndProof, error) {
|
||||
return ðpb.SignedAggregateAttestationAndProof{}, nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().ElectraForkVersion): func() (ethpb.SignedAggregateAttAndProof, error) {
|
||||
return ðpb.SignedAggregateAttestationAndProofElectra{}, nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,7 +5,9 @@ import (
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestInitializeDataMaps(t *testing.T) {
|
||||
@@ -44,8 +46,36 @@ func TestInitializeDataMaps(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tt.action()
|
||||
_, ok := BlockMap[bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)]
|
||||
bFunc, ok := BlockMap[bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)]
|
||||
assert.Equal(t, tt.exists, ok)
|
||||
if tt.exists {
|
||||
b, err := bFunc()
|
||||
require.NoError(t, err)
|
||||
generic, err := b.PbGenericBlock()
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, generic.GetPhase0())
|
||||
}
|
||||
mdFunc, ok := MetaDataMap[bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)]
|
||||
if tt.exists {
|
||||
md, err := mdFunc()
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, md.MetadataObjV0())
|
||||
}
|
||||
assert.Equal(t, tt.exists, ok)
|
||||
attFunc, ok := AttestationMap[bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)]
|
||||
if tt.exists {
|
||||
att, err := attFunc()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, version.Phase0, att.Version())
|
||||
}
|
||||
assert.Equal(t, tt.exists, ok)
|
||||
aggFunc, ok := AggregateAttestationMap[bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)]
|
||||
assert.Equal(t, tt.exists, ok)
|
||||
if tt.exists {
|
||||
agg, err := aggFunc()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, version.Phase0, agg.Version())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -70,7 +70,7 @@ func (s *Service) endpoints(
|
||||
endpoints = append(endpoints, s.eventsEndpoints()...)
|
||||
endpoints = append(endpoints, s.prysmBeaconEndpoints(ch, stater)...)
|
||||
endpoints = append(endpoints, s.prysmNodeEndpoints()...)
|
||||
endpoints = append(endpoints, s.prysmValidatorEndpoints(coreService, stater)...)
|
||||
endpoints = append(endpoints, s.prysmValidatorEndpoints(coreService)...)
|
||||
if enableDebug {
|
||||
endpoints = append(endpoints, s.debugEndpoints(stater)...)
|
||||
}
|
||||
@@ -143,7 +143,7 @@ func (s *Service) builderEndpoints(stater lookup.Stater) []endpoint {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) blobEndpoints(blocker lookup.Blocker) []endpoint {
|
||||
func (*Service) blobEndpoints(blocker lookup.Blocker) []endpoint {
|
||||
server := &blob.Server{
|
||||
Blocker: blocker,
|
||||
}
|
||||
@@ -777,7 +777,7 @@ func (s *Service) beaconEndpoints(
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) configEndpoints() []endpoint {
|
||||
func (*Service) configEndpoints() []endpoint {
|
||||
const namespace = "config"
|
||||
return []endpoint{
|
||||
{
|
||||
@@ -1045,7 +1045,7 @@ func (s *Service) prysmNodeEndpoints() []endpoint {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) prysmValidatorEndpoints(coreService *core.Service, stater lookup.Stater) []endpoint {
|
||||
func (*Service) prysmValidatorEndpoints(coreService *core.Service) []endpoint {
|
||||
server := &validatorprysm.Server{
|
||||
CoreService: coreService,
|
||||
}
|
||||
|
||||
@@ -149,7 +149,8 @@ func TestGetSpec(t *testing.T) {
|
||||
config.MaxAttestationsElectra = 89
|
||||
config.MaxWithdrawalRequestsPerPayload = 90
|
||||
config.MaxCellsInExtendedMatrix = 91
|
||||
config.UnsetDepositReceiptsStartIndex = 92
|
||||
config.UnsetDepositRequestsStartIndex = 92
|
||||
config.MaxDepositRequestsPerPayload = 93
|
||||
|
||||
var dbp [4]byte
|
||||
copy(dbp[:], []byte{'0', '0', '0', '1'})
|
||||
@@ -192,7 +193,7 @@ func TestGetSpec(t *testing.T) {
|
||||
data, ok := resp.Data.(map[string]interface{})
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
assert.Equal(t, 154, len(data))
|
||||
assert.Equal(t, 155, len(data))
|
||||
for k, v := range data {
|
||||
t.Run(k, func(t *testing.T) {
|
||||
switch k {
|
||||
@@ -525,8 +526,10 @@ func TestGetSpec(t *testing.T) {
|
||||
assert.Equal(t, "90", v)
|
||||
case "MAX_CELLS_IN_EXTENDED_MATRIX":
|
||||
assert.Equal(t, "91", v)
|
||||
case "UNSET_DEPOSIT_RECEIPTS_START_INDEX":
|
||||
case "UNSET_DEPOSIT_REQUESTS_START_INDEX":
|
||||
assert.Equal(t, "92", v)
|
||||
case "MAX_DEPOSIT_REQUESTS_PER_PAYLOAD":
|
||||
assert.Equal(t, "93", v)
|
||||
default:
|
||||
t.Errorf("Incorrect key: %s", k)
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ go_library(
|
||||
srcs = [
|
||||
"handlers.go",
|
||||
"handlers_block.go",
|
||||
"log.go",
|
||||
"server.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/validator",
|
||||
|
||||
@@ -31,7 +31,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/network/httputil"
|
||||
ethpbalpha "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
@@ -592,7 +592,7 @@ func (s *Server) PrepareBeaconProposer(w http.ResponseWriter, r *http.Request) {
|
||||
if len(validatorIndices) == 0 {
|
||||
return
|
||||
}
|
||||
log.WithFields(log.Fields{
|
||||
log.WithFields(logrus.Fields{
|
||||
"validatorIndices": validatorIndices,
|
||||
}).Info("Updated fee recipient addresses")
|
||||
}
|
||||
|
||||
@@ -220,8 +220,9 @@ func (s *Server) produceBlockV3(ctx context.Context, w http.ResponseWriter, r *h
|
||||
|
||||
consensusBlockValue, httpError := getConsensusBlockValue(ctx, s.BlockRewardFetcher, v1alpha1resp.Block)
|
||||
if httpError != nil {
|
||||
httputil.WriteError(w, httpError)
|
||||
return
|
||||
log.WithError(httpError).Debug("Failed to get consensus block value")
|
||||
// Having the consensus block value is not critical to block production
|
||||
consensusBlockValue = ""
|
||||
}
|
||||
|
||||
w.Header().Set(api.ExecutionPayloadBlindedHeader, fmt.Sprintf("%v", v1alpha1resp.IsBlinded))
|
||||
@@ -297,7 +298,7 @@ func getConsensusBlockValue(ctx context.Context, blockRewardsFetcher rewards.Blo
|
||||
}
|
||||
}
|
||||
if bb.Version() == version.Phase0 {
|
||||
// ignore for phase 0
|
||||
// Getting the block value for Phase 0 is very hard, so we ignore it
|
||||
return "", nil
|
||||
}
|
||||
// Get consensus payload value which is the same as the total from the block rewards api.
|
||||
|
||||
5
beacon-chain/rpc/eth/validator/log.go
Normal file
5
beacon-chain/rpc/eth/validator/log.go
Normal file
@@ -0,0 +1,5 @@
|
||||
package validator
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
var log = logrus.WithField("prefix", "beacon-api")
|
||||
@@ -630,14 +630,14 @@ func TestServer_GetBeaconBlock_Electra(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
timeStamp, err := slots.ToTime(beaconState.GenesisTime(), electraSlot+1)
|
||||
require.NoError(t, err)
|
||||
dr := []*enginev1.DepositReceipt{{
|
||||
dr := []*enginev1.DepositRequest{{
|
||||
Pubkey: bytesutil.PadTo(privKeys[0].PublicKey().Marshal(), 48),
|
||||
WithdrawalCredentials: bytesutil.PadTo([]byte("wc"), 32),
|
||||
Amount: 123,
|
||||
Signature: bytesutil.PadTo([]byte("sig"), 96),
|
||||
Index: 456,
|
||||
}}
|
||||
wr := []*enginev1.ExecutionLayerWithdrawalRequest{
|
||||
wr := []*enginev1.WithdrawalRequest{
|
||||
{
|
||||
SourceAddress: bytesutil.PadTo([]byte("sa"), 20),
|
||||
ValidatorPubkey: bytesutil.PadTo(privKeys[1].PublicKey().Marshal(), 48),
|
||||
@@ -654,7 +654,7 @@ func TestServer_GetBeaconBlock_Electra(t *testing.T) {
|
||||
PrevRandao: random,
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
DepositReceipts: dr,
|
||||
DepositRequests: dr,
|
||||
WithdrawalRequests: wr,
|
||||
}
|
||||
|
||||
@@ -680,7 +680,7 @@ func TestServer_GetBeaconBlock_Electra(t *testing.T) {
|
||||
got, err := proposerServer.GetBeaconBlock(ctx, req)
|
||||
require.NoError(t, err)
|
||||
p := got.GetElectra().Block.Body.ExecutionPayload
|
||||
require.DeepEqual(t, dr, p.DepositReceipts)
|
||||
require.DeepEqual(t, dr, p.DepositRequests)
|
||||
require.DeepEqual(t, wr, p.WithdrawalRequests)
|
||||
}
|
||||
|
||||
|
||||
@@ -4,7 +4,6 @@ package rpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"sync"
|
||||
|
||||
@@ -154,7 +153,7 @@ func NewService(ctx context.Context, cfg *Config) *Service {
|
||||
connectedRPCClients: make(map[net.Addr]bool),
|
||||
}
|
||||
|
||||
address := fmt.Sprintf("%s:%s", s.cfg.Host, s.cfg.Port)
|
||||
address := net.JoinHostPort(s.cfg.Host, s.cfg.Port)
|
||||
lis, err := net.Listen("tcp", address)
|
||||
if err != nil {
|
||||
log.WithError(err).Errorf("Could not listen to port in Start() %s", address)
|
||||
|
||||
@@ -141,7 +141,7 @@ func (s *Service) processAttestations(
|
||||
|
||||
start := time.Now()
|
||||
|
||||
// Check for attestatinos slashings (double, sourrounding, surrounded votes).
|
||||
// Check for attestations slashings (double, surrounding, surrounded votes).
|
||||
slashings, err := s.checkSlashableAttestations(ctx, currentEpoch, validAttestations)
|
||||
if err != nil {
|
||||
log.WithError(err).Error(couldNotCheckSlashableAtt)
|
||||
|
||||
@@ -219,7 +219,7 @@ type ReadOnlySyncCommittee interface {
|
||||
|
||||
type ReadOnlyDeposits interface {
|
||||
DepositBalanceToConsume() (primitives.Gwei, error)
|
||||
DepositReceiptsStartIndex() (uint64, error)
|
||||
DepositRequestsStartIndex() (uint64, error)
|
||||
PendingBalanceDeposits() ([]*ethpb.PendingBalanceDeposit, error)
|
||||
}
|
||||
|
||||
@@ -327,7 +327,7 @@ type WriteOnlyConsolidations interface {
|
||||
|
||||
type WriteOnlyDeposits interface {
|
||||
AppendPendingBalanceDeposit(index primitives.ValidatorIndex, amount uint64) error
|
||||
SetDepositReceiptsStartIndex(index uint64) error
|
||||
SetDepositRequestsStartIndex(index uint64) error
|
||||
SetPendingBalanceDeposits(val []*ethpb.PendingBalanceDeposit) error
|
||||
SetDepositBalanceToConsume(primitives.Gwei) error
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ go_library(
|
||||
"getters_block.go",
|
||||
"getters_checkpoint.go",
|
||||
"getters_consolidation.go",
|
||||
"getters_deposit_receipts.go",
|
||||
"getters_deposit_requests.go",
|
||||
"getters_eth1.go",
|
||||
"getters_exit.go",
|
||||
"getters_misc.go",
|
||||
@@ -33,7 +33,7 @@ go_library(
|
||||
"setters_checkpoint.go",
|
||||
"setters_churn.go",
|
||||
"setters_consolidation.go",
|
||||
"setters_deposit_receipts.go",
|
||||
"setters_deposit_requests.go",
|
||||
"setters_eth1.go",
|
||||
"setters_misc.go",
|
||||
"setters_participation.go",
|
||||
@@ -94,7 +94,7 @@ go_test(
|
||||
"getters_block_test.go",
|
||||
"getters_checkpoint_test.go",
|
||||
"getters_consolidation_test.go",
|
||||
"getters_deposit_receipts_test.go",
|
||||
"getters_deposit_requests_test.go",
|
||||
"getters_exit_test.go",
|
||||
"getters_participation_test.go",
|
||||
"getters_test.go",
|
||||
@@ -109,7 +109,7 @@ go_test(
|
||||
"setters_balance_deposits_test.go",
|
||||
"setters_churn_test.go",
|
||||
"setters_consolidation_test.go",
|
||||
"setters_deposit_receipts_test.go",
|
||||
"setters_deposit_requests_test.go",
|
||||
"setters_eth1_test.go",
|
||||
"setters_misc_test.go",
|
||||
"setters_participation_test.go",
|
||||
|
||||
@@ -62,7 +62,7 @@ type BeaconState struct {
|
||||
nextWithdrawalValidatorIndex primitives.ValidatorIndex
|
||||
|
||||
// Electra fields
|
||||
depositReceiptsStartIndex uint64
|
||||
depositRequestsStartIndex uint64
|
||||
depositBalanceToConsume primitives.Gwei
|
||||
exitBalanceToConsume primitives.Gwei
|
||||
earliestExitEpoch primitives.Epoch
|
||||
@@ -119,7 +119,7 @@ type beaconStateMarshalable struct {
|
||||
LatestExecutionPayloadHeaderElectra *enginev1.ExecutionPayloadHeaderElectra `json:"latest_execution_payload_header_electra" yaml:"latest_execution_payload_header_electra"`
|
||||
NextWithdrawalIndex uint64 `json:"next_withdrawal_index" yaml:"next_withdrawal_index"`
|
||||
NextWithdrawalValidatorIndex primitives.ValidatorIndex `json:"next_withdrawal_validator_index" yaml:"next_withdrawal_validator_index"`
|
||||
DepositReceiptsStartIndex uint64 `json:"deposit_receipts_start_index" yaml:"deposit_receipts_start_index"`
|
||||
DepositRequestsStartIndex uint64 `json:"deposit_requests_start_index" yaml:"deposit_requests_start_index"`
|
||||
DepositBalanceToConsume primitives.Gwei `json:"deposit_balance_to_consume" yaml:"deposit_balance_to_consume"`
|
||||
ExitBalanceToConsume primitives.Gwei `json:"exit_balance_to_consume" yaml:"exit_balance_to_consume"`
|
||||
EarliestExitEpoch primitives.Epoch `json:"earliest_exit_epoch" yaml:"earliest_exit_epoch"`
|
||||
@@ -189,7 +189,7 @@ func (b *BeaconState) MarshalJSON() ([]byte, error) {
|
||||
LatestExecutionPayloadHeaderElectra: b.latestExecutionPayloadHeaderElectra,
|
||||
NextWithdrawalIndex: b.nextWithdrawalIndex,
|
||||
NextWithdrawalValidatorIndex: b.nextWithdrawalValidatorIndex,
|
||||
DepositReceiptsStartIndex: b.depositReceiptsStartIndex,
|
||||
DepositRequestsStartIndex: b.depositRequestsStartIndex,
|
||||
DepositBalanceToConsume: b.depositBalanceToConsume,
|
||||
ExitBalanceToConsume: b.exitBalanceToConsume,
|
||||
EarliestExitEpoch: b.earliestExitEpoch,
|
||||
|
||||
@@ -62,7 +62,7 @@ type BeaconState struct {
|
||||
nextWithdrawalValidatorIndex primitives.ValidatorIndex
|
||||
|
||||
// Electra fields
|
||||
depositReceiptsStartIndex uint64
|
||||
depositRequestsStartIndex uint64
|
||||
depositBalanceToConsume primitives.Gwei
|
||||
exitBalanceToConsume primitives.Gwei
|
||||
earliestExitEpoch primitives.Epoch
|
||||
@@ -119,7 +119,7 @@ type beaconStateMarshalable struct {
|
||||
LatestExecutionPayloadHeaderElectra *enginev1.ExecutionPayloadHeaderElectra `json:"latest_execution_payload_header_electra" yaml:"latest_execution_payload_header_electra"`
|
||||
NextWithdrawalIndex uint64 `json:"next_withdrawal_index" yaml:"next_withdrawal_index"`
|
||||
NextWithdrawalValidatorIndex primitives.ValidatorIndex `json:"next_withdrawal_validator_index" yaml:"next_withdrawal_validator_index"`
|
||||
DepositReceiptsStartIndex uint64 `json:"deposit_receipts_start_index" yaml:"deposit_receipts_start_index"`
|
||||
DepositRequestsStartIndex uint64 `json:"deposit_requests_start_index" yaml:"deposit_requests_start_index"`
|
||||
DepositBalanceToConsume primitives.Gwei `json:"deposit_balance_to_consume" yaml:"deposit_balance_to_consume"`
|
||||
ExitBalanceToConsume primitives.Gwei `json:"exit_balance_to_consume" yaml:"exit_balance_to_consume"`
|
||||
EarliestExitEpoch primitives.Epoch `json:"earliest_exit_epoch" yaml:"earliest_exit_epoch"`
|
||||
@@ -189,7 +189,7 @@ func (b *BeaconState) MarshalJSON() ([]byte, error) {
|
||||
LatestExecutionPayloadHeaderElectra: b.latestExecutionPayloadHeaderElectra,
|
||||
NextWithdrawalIndex: b.nextWithdrawalIndex,
|
||||
NextWithdrawalValidatorIndex: b.nextWithdrawalValidatorIndex,
|
||||
DepositReceiptsStartIndex: b.depositReceiptsStartIndex,
|
||||
DepositRequestsStartIndex: b.depositRequestsStartIndex,
|
||||
DepositBalanceToConsume: b.depositBalanceToConsume,
|
||||
ExitBalanceToConsume: b.exitBalanceToConsume,
|
||||
EarliestExitEpoch: b.earliestExitEpoch,
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
package state_native
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
)
|
||||
|
||||
// DepositReceiptsStartIndex is used for returning the deposit receipts start index which is used for eip6110
|
||||
func (b *BeaconState) DepositReceiptsStartIndex() (uint64, error) {
|
||||
if b.version < version.Electra {
|
||||
return 0, errNotSupported("DepositReceiptsStartIndex", b.version)
|
||||
}
|
||||
b.lock.RLock()
|
||||
defer b.lock.RUnlock()
|
||||
|
||||
return b.depositReceiptsStartIndex, nil
|
||||
}
|
||||
16
beacon-chain/state/state-native/getters_deposit_requests.go
Normal file
16
beacon-chain/state/state-native/getters_deposit_requests.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package state_native
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
)
|
||||
|
||||
// DepositRequestsStartIndex is used for returning the deposit receipts start index which is used for eip6110
|
||||
func (b *BeaconState) DepositRequestsStartIndex() (uint64, error) {
|
||||
if b.version < version.Electra {
|
||||
return 0, errNotSupported("DepositRequestsStartIndex", b.version)
|
||||
}
|
||||
b.lock.RLock()
|
||||
defer b.lock.RUnlock()
|
||||
|
||||
return b.depositRequestsStartIndex, nil
|
||||
}
|
||||
@@ -9,17 +9,17 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
)
|
||||
|
||||
func TestDepositReceiptsStartIndex(t *testing.T) {
|
||||
func TestDepositRequestsStartIndex(t *testing.T) {
|
||||
t.Run("previous fork returns expected error", func(t *testing.T) {
|
||||
dState, _ := util.DeterministicGenesisState(t, 1)
|
||||
_, err := dState.DepositReceiptsStartIndex()
|
||||
_, err := dState.DepositRequestsStartIndex()
|
||||
require.ErrorContains(t, "is not supported", err)
|
||||
})
|
||||
t.Run("electra returns expected value", func(t *testing.T) {
|
||||
want := uint64(2)
|
||||
dState, err := state_native.InitializeFromProtoElectra(ðpb.BeaconStateElectra{DepositReceiptsStartIndex: want})
|
||||
dState, err := state_native.InitializeFromProtoElectra(ðpb.BeaconStateElectra{DepositRequestsStartIndex: want})
|
||||
require.NoError(t, err)
|
||||
got, err := dState.DepositReceiptsStartIndex()
|
||||
got, err := dState.DepositRequestsStartIndex()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, want, got)
|
||||
})
|
||||
@@ -202,7 +202,7 @@ func (b *BeaconState) ToProtoUnsafe() interface{} {
|
||||
NextWithdrawalIndex: b.nextWithdrawalIndex,
|
||||
NextWithdrawalValidatorIndex: b.nextWithdrawalValidatorIndex,
|
||||
HistoricalSummaries: b.historicalSummaries,
|
||||
DepositReceiptsStartIndex: b.depositReceiptsStartIndex,
|
||||
DepositRequestsStartIndex: b.depositRequestsStartIndex,
|
||||
DepositBalanceToConsume: b.depositBalanceToConsume,
|
||||
ExitBalanceToConsume: b.exitBalanceToConsume,
|
||||
EarliestExitEpoch: b.earliestExitEpoch,
|
||||
@@ -408,7 +408,7 @@ func (b *BeaconState) ToProto() interface{} {
|
||||
NextWithdrawalIndex: b.nextWithdrawalIndex,
|
||||
NextWithdrawalValidatorIndex: b.nextWithdrawalValidatorIndex,
|
||||
HistoricalSummaries: b.historicalSummariesVal(),
|
||||
DepositReceiptsStartIndex: b.depositReceiptsStartIndex,
|
||||
DepositRequestsStartIndex: b.depositRequestsStartIndex,
|
||||
DepositBalanceToConsume: b.depositBalanceToConsume,
|
||||
ExitBalanceToConsume: b.exitBalanceToConsume,
|
||||
EarliestExitEpoch: b.earliestExitEpoch,
|
||||
|
||||
@@ -281,9 +281,9 @@ func ComputeFieldRootsWithHasher(ctx context.Context, state *BeaconState) ([][]b
|
||||
}
|
||||
|
||||
if state.version >= version.Electra {
|
||||
// DepositReceiptsStartIndex root.
|
||||
drsiRoot := ssz.Uint64Root(state.depositReceiptsStartIndex)
|
||||
fieldRoots[types.DepositReceiptsStartIndex.RealPosition()] = drsiRoot[:]
|
||||
// DepositRequestsStartIndex root.
|
||||
drsiRoot := ssz.Uint64Root(state.depositRequestsStartIndex)
|
||||
fieldRoots[types.DepositRequestsStartIndex.RealPosition()] = drsiRoot[:]
|
||||
|
||||
// DepositBalanceToConsume root.
|
||||
dbtcRoot := ssz.Uint64Root(uint64(state.depositBalanceToConsume))
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
package state_native
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
)
|
||||
|
||||
// SetDepositReceiptsStartIndex for the beacon state. Updates the DepositReceiptsStartIndex
|
||||
func (b *BeaconState) SetDepositReceiptsStartIndex(index uint64) error {
|
||||
if b.version < version.Electra {
|
||||
return errNotSupported("SetDepositReceiptsStartIndex", b.version)
|
||||
}
|
||||
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
b.depositReceiptsStartIndex = index
|
||||
b.markFieldAsDirty(types.DepositReceiptsStartIndex)
|
||||
b.rebuildTrie[types.DepositReceiptsStartIndex] = true
|
||||
return nil
|
||||
}
|
||||
21
beacon-chain/state/state-native/setters_deposit_requests.go
Normal file
21
beacon-chain/state/state-native/setters_deposit_requests.go
Normal file
@@ -0,0 +1,21 @@
|
||||
package state_native
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
)
|
||||
|
||||
// SetDepositRequestsStartIndex for the beacon state. Updates the DepositRequestsStartIndex
|
||||
func (b *BeaconState) SetDepositRequestsStartIndex(index uint64) error {
|
||||
if b.version < version.Electra {
|
||||
return errNotSupported("SetDepositRequestsStartIndex", b.version)
|
||||
}
|
||||
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
b.depositRequestsStartIndex = index
|
||||
b.markFieldAsDirty(types.DepositRequestsStartIndex)
|
||||
b.rebuildTrie[types.DepositRequestsStartIndex] = true
|
||||
return nil
|
||||
}
|
||||
@@ -9,18 +9,18 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
)
|
||||
|
||||
func TestSetDepositReceiptsStartIndex(t *testing.T) {
|
||||
func TestSetDepositRequestsStartIndex(t *testing.T) {
|
||||
t.Run("previous fork returns expected error", func(t *testing.T) {
|
||||
dState, _ := util.DeterministicGenesisState(t, 1)
|
||||
require.ErrorContains(t, "is not supported", dState.SetDepositReceiptsStartIndex(1))
|
||||
require.ErrorContains(t, "is not supported", dState.SetDepositRequestsStartIndex(1))
|
||||
})
|
||||
t.Run("electra sets expected value", func(t *testing.T) {
|
||||
old := uint64(2)
|
||||
dState, err := state_native.InitializeFromProtoElectra(ðpb.BeaconStateElectra{DepositReceiptsStartIndex: old})
|
||||
dState, err := state_native.InitializeFromProtoElectra(ðpb.BeaconStateElectra{DepositRequestsStartIndex: old})
|
||||
require.NoError(t, err)
|
||||
want := uint64(3)
|
||||
require.NoError(t, dState.SetDepositReceiptsStartIndex(want))
|
||||
got, err := dState.DepositReceiptsStartIndex()
|
||||
require.NoError(t, dState.SetDepositRequestsStartIndex(want))
|
||||
got, err := dState.DepositRequestsStartIndex()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, want, got)
|
||||
})
|
||||
@@ -100,7 +100,7 @@ var electraFields = append(
|
||||
types.NextWithdrawalValidatorIndex,
|
||||
types.HistoricalSummaries,
|
||||
types.LatestExecutionPayloadHeaderElectra,
|
||||
types.DepositReceiptsStartIndex,
|
||||
types.DepositRequestsStartIndex,
|
||||
types.DepositBalanceToConsume,
|
||||
types.ExitBalanceToConsume,
|
||||
types.EarliestExitEpoch,
|
||||
@@ -744,7 +744,7 @@ func InitializeFromProtoUnsafeElectra(st *ethpb.BeaconStateElectra) (state.Beaco
|
||||
nextWithdrawalIndex: st.NextWithdrawalIndex,
|
||||
nextWithdrawalValidatorIndex: st.NextWithdrawalValidatorIndex,
|
||||
historicalSummaries: st.HistoricalSummaries,
|
||||
depositReceiptsStartIndex: st.DepositReceiptsStartIndex,
|
||||
depositRequestsStartIndex: st.DepositRequestsStartIndex,
|
||||
depositBalanceToConsume: st.DepositBalanceToConsume,
|
||||
exitBalanceToConsume: st.ExitBalanceToConsume,
|
||||
earliestExitEpoch: st.EarliestExitEpoch,
|
||||
@@ -862,7 +862,7 @@ func (b *BeaconState) Copy() state.BeaconState {
|
||||
eth1DepositIndex: b.eth1DepositIndex,
|
||||
nextWithdrawalIndex: b.nextWithdrawalIndex,
|
||||
nextWithdrawalValidatorIndex: b.nextWithdrawalValidatorIndex,
|
||||
depositReceiptsStartIndex: b.depositReceiptsStartIndex,
|
||||
depositRequestsStartIndex: b.depositRequestsStartIndex,
|
||||
depositBalanceToConsume: b.depositBalanceToConsume,
|
||||
exitBalanceToConsume: b.exitBalanceToConsume,
|
||||
earliestExitEpoch: b.earliestExitEpoch,
|
||||
@@ -1286,8 +1286,8 @@ func (b *BeaconState) rootSelector(ctx context.Context, field types.FieldIndex)
|
||||
return ssz.Uint64Root(uint64(b.nextWithdrawalValidatorIndex)), nil
|
||||
case types.HistoricalSummaries:
|
||||
return stateutil.HistoricalSummariesRoot(b.historicalSummaries)
|
||||
case types.DepositReceiptsStartIndex:
|
||||
return ssz.Uint64Root(b.depositReceiptsStartIndex), nil
|
||||
case types.DepositRequestsStartIndex:
|
||||
return ssz.Uint64Root(b.depositRequestsStartIndex), nil
|
||||
case types.DepositBalanceToConsume:
|
||||
return ssz.Uint64Root(uint64(b.depositBalanceToConsume)), nil
|
||||
case types.ExitBalanceToConsume:
|
||||
|
||||
@@ -96,8 +96,8 @@ func (f FieldIndex) String() string {
|
||||
return "nextWithdrawalValidatorIndex"
|
||||
case HistoricalSummaries:
|
||||
return "historicalSummaries"
|
||||
case DepositReceiptsStartIndex:
|
||||
return "depositReceiptsStartIndex"
|
||||
case DepositRequestsStartIndex:
|
||||
return "depositRequestsStartIndex"
|
||||
case DepositBalanceToConsume:
|
||||
return "depositBalanceToConsume"
|
||||
case ExitBalanceToConsume:
|
||||
@@ -179,7 +179,7 @@ func (f FieldIndex) RealPosition() int {
|
||||
return 26
|
||||
case HistoricalSummaries:
|
||||
return 27
|
||||
case DepositReceiptsStartIndex:
|
||||
case DepositRequestsStartIndex:
|
||||
return 28
|
||||
case DepositBalanceToConsume:
|
||||
return 29
|
||||
@@ -253,7 +253,7 @@ const (
|
||||
NextWithdrawalIndex
|
||||
NextWithdrawalValidatorIndex
|
||||
HistoricalSummaries
|
||||
DepositReceiptsStartIndex // Electra: EIP-6110
|
||||
DepositRequestsStartIndex // Electra: EIP-6110
|
||||
DepositBalanceToConsume // Electra: EIP-7251
|
||||
ExitBalanceToConsume // Electra: EIP-7251
|
||||
EarliestExitEpoch // Electra: EIP-7251
|
||||
|
||||
@@ -16,7 +16,6 @@ go_library(
|
||||
"pending_consolidations_root.go",
|
||||
"pending_partial_withdrawals_root.go",
|
||||
"reference.go",
|
||||
"slice_root.go",
|
||||
"sync_committee.root.go",
|
||||
"trie_helpers.go",
|
||||
"unrealized_justification.go",
|
||||
|
||||
@@ -2,9 +2,10 @@ package stateutil
|
||||
|
||||
import (
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/ssz"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
func HistoricalSummariesRoot(summaries []*ethpb.HistoricalSummary) ([32]byte, error) {
|
||||
return SliceRoot(summaries, fieldparams.HistoricalRootsLength)
|
||||
return ssz.SliceRoot(summaries, fieldparams.HistoricalRootsLength)
|
||||
}
|
||||
|
||||
@@ -2,9 +2,10 @@ package stateutil
|
||||
|
||||
import (
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/ssz"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
func PendingBalanceDepositsRoot(slice []*ethpb.PendingBalanceDeposit) ([32]byte, error) {
|
||||
return SliceRoot(slice, fieldparams.PendingBalanceDepositsLimit)
|
||||
return ssz.SliceRoot(slice, fieldparams.PendingBalanceDepositsLimit)
|
||||
}
|
||||
|
||||
@@ -2,9 +2,10 @@ package stateutil
|
||||
|
||||
import (
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/ssz"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
func PendingConsolidationsRoot(slice []*ethpb.PendingConsolidation) ([32]byte, error) {
|
||||
return SliceRoot(slice, fieldparams.PendingConsolidationsLimit)
|
||||
return ssz.SliceRoot(slice, fieldparams.PendingConsolidationsLimit)
|
||||
}
|
||||
|
||||
@@ -2,9 +2,10 @@ package stateutil
|
||||
|
||||
import (
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/ssz"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
func PendingPartialWithdrawalsRoot(slice []*ethpb.PendingPartialWithdrawal) ([32]byte, error) {
|
||||
return SliceRoot(slice, fieldparams.PendingPartialWithdrawalsLimit)
|
||||
return ssz.SliceRoot(slice, fieldparams.PendingPartialWithdrawalsLimit)
|
||||
}
|
||||
|
||||
@@ -44,6 +44,7 @@ go_library(
|
||||
"validate_aggregate_proof.go",
|
||||
"validate_attester_slashing.go",
|
||||
"validate_beacon_attestation.go",
|
||||
"validate_beacon_attestation_electra.go",
|
||||
"validate_beacon_blocks.go",
|
||||
"validate_blob.go",
|
||||
"validate_bls_to_execution_change.go",
|
||||
@@ -160,7 +161,6 @@ go_test(
|
||||
"rpc_beacon_blocks_by_root_test.go",
|
||||
"rpc_blob_sidecars_by_range_test.go",
|
||||
"rpc_blob_sidecars_by_root_test.go",
|
||||
"rpc_chunked_response_test.go",
|
||||
"rpc_goodbye_test.go",
|
||||
"rpc_handler_test.go",
|
||||
"rpc_metadata_test.go",
|
||||
@@ -177,6 +177,7 @@ go_test(
|
||||
"sync_test.go",
|
||||
"validate_aggregate_proof_test.go",
|
||||
"validate_attester_slashing_test.go",
|
||||
"validate_beacon_attestation_electra_test.go",
|
||||
"validate_beacon_attestation_test.go",
|
||||
"validate_beacon_blocks_test.go",
|
||||
"validate_blob_test.go",
|
||||
|
||||
@@ -1,13 +1,20 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/pkg/errors"
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
@@ -50,11 +57,12 @@ func (s *Service) decodePubsubMessage(msg *pubsub.Message) (ssz.Unmarshaler, err
|
||||
return nil, errors.Errorf("message of %T does not support marshaller interface", base)
|
||||
}
|
||||
// Handle different message types across forks.
|
||||
if topic == p2p.BlockSubnetTopicFormat {
|
||||
m, err = extractBlockDataType(fDigest[:], s.cfg.clock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dt, err := extractValidDataTypeFromTopic(topic, fDigest[:], s.cfg.clock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if dt != nil {
|
||||
m = dt
|
||||
}
|
||||
if err := s.cfg.p2p.Encoding().DecodeGossip(msg.Data, m); err != nil {
|
||||
return nil, err
|
||||
@@ -63,7 +71,7 @@ func (s *Service) decodePubsubMessage(msg *pubsub.Message) (ssz.Unmarshaler, err
|
||||
}
|
||||
|
||||
// Replaces our fork digest with the formatter.
|
||||
func (_ *Service) replaceForkDigest(topic string) (string, error) {
|
||||
func (*Service) replaceForkDigest(topic string) (string, error) {
|
||||
subStrings := strings.Split(topic, "/")
|
||||
if len(subStrings) != 4 {
|
||||
return "", errInvalidTopic
|
||||
@@ -71,3 +79,48 @@ func (_ *Service) replaceForkDigest(topic string) (string, error) {
|
||||
subStrings[2] = "%x"
|
||||
return strings.Join(subStrings, "/"), nil
|
||||
}
|
||||
|
||||
func extractValidDataTypeFromTopic(topic string, digest []byte, clock *startup.Clock) (ssz.Unmarshaler, error) {
|
||||
switch topic {
|
||||
case p2p.BlockSubnetTopicFormat:
|
||||
return extractDataTypeFromTypeMap(types.BlockMap, digest, clock)
|
||||
case p2p.AttestationSubnetTopicFormat:
|
||||
return extractDataTypeFromTypeMap(types.AttestationMap, digest, clock)
|
||||
case p2p.AggregateAndProofSubnetTopicFormat:
|
||||
return extractDataTypeFromTypeMap(types.AggregateAttestationMap, digest, clock)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func extractDataTypeFromTypeMap[T any](typeMap map[[4]byte]func() (T, error), digest []byte, tor blockchain.TemporalOracle) (T, error) {
|
||||
var zero T
|
||||
|
||||
if len(digest) == 0 {
|
||||
f, ok := typeMap[bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)]
|
||||
if !ok {
|
||||
return zero, fmt.Errorf("no %T type exists for the genesis fork version", zero)
|
||||
}
|
||||
return f()
|
||||
}
|
||||
if len(digest) != forkDigestLength {
|
||||
return zero, errors.Errorf("invalid digest returned, wanted a length of %d but received %d", forkDigestLength, len(digest))
|
||||
}
|
||||
vRoot := tor.GenesisValidatorsRoot()
|
||||
for k, f := range typeMap {
|
||||
rDigest, err := signing.ComputeForkDigest(k[:], vRoot[:])
|
||||
if err != nil {
|
||||
return zero, err
|
||||
}
|
||||
if rDigest == bytesutil.ToBytes4(digest) {
|
||||
return f()
|
||||
}
|
||||
}
|
||||
return zero, errors.Wrapf(
|
||||
ErrNoValidDigest,
|
||||
"could not extract %T data type, saw digest=%#x, genesis=%v, vr=%#x",
|
||||
zero,
|
||||
digest,
|
||||
tor.GenesisTime(),
|
||||
tor.GenesisValidatorsRoot(),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -11,15 +11,20 @@ import (
|
||||
"github.com/d4l3k/messagediff"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
pb "github.com/libp2p/go-libp2p-pubsub/pb"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
|
||||
mock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
|
||||
p2ptesting "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/wrapper"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/metadata"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
)
|
||||
@@ -109,3 +114,197 @@ func TestService_decodePubsubMessage(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractDataType(t *testing.T) {
|
||||
// Precompute digests
|
||||
genDigest, err := signing.ComputeForkDigest(params.BeaconConfig().GenesisForkVersion, params.BeaconConfig().ZeroHash[:])
|
||||
require.NoError(t, err)
|
||||
altairDigest, err := signing.ComputeForkDigest(params.BeaconConfig().AltairForkVersion, params.BeaconConfig().ZeroHash[:])
|
||||
require.NoError(t, err)
|
||||
bellatrixDigest, err := signing.ComputeForkDigest(params.BeaconConfig().BellatrixForkVersion, params.BeaconConfig().ZeroHash[:])
|
||||
require.NoError(t, err)
|
||||
capellaDigest, err := signing.ComputeForkDigest(params.BeaconConfig().CapellaForkVersion, params.BeaconConfig().ZeroHash[:])
|
||||
require.NoError(t, err)
|
||||
denebDigest, err := signing.ComputeForkDigest(params.BeaconConfig().DenebForkVersion, params.BeaconConfig().ZeroHash[:])
|
||||
require.NoError(t, err)
|
||||
electraDigest, err := signing.ComputeForkDigest(params.BeaconConfig().ElectraForkVersion, params.BeaconConfig().ZeroHash[:])
|
||||
require.NoError(t, err)
|
||||
|
||||
type args struct {
|
||||
digest []byte
|
||||
chain blockchain.ChainInfoFetcher
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
wantBlock interfaces.ReadOnlySignedBeaconBlock
|
||||
wantMd metadata.Metadata
|
||||
wantAtt ethpb.Att
|
||||
wantAggregate ethpb.SignedAggregateAttAndProof
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "no digest",
|
||||
args: args{
|
||||
digest: []byte{},
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
wantBlock: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
wsb, err := blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{}}})
|
||||
require.NoError(t, err)
|
||||
return wsb
|
||||
}(),
|
||||
wantMd: wrapper.WrappedMetadataV0(ðpb.MetaDataV0{}),
|
||||
wantAtt: ðpb.Attestation{},
|
||||
wantAggregate: ðpb.SignedAggregateAttestationAndProof{},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "invalid digest",
|
||||
args: args{
|
||||
digest: []byte{0x00, 0x01},
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
wantBlock: nil,
|
||||
wantMd: nil,
|
||||
wantAtt: nil,
|
||||
wantAggregate: nil,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "non existent digest",
|
||||
args: args{
|
||||
digest: []byte{0x00, 0x01, 0x02, 0x03},
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
wantBlock: nil,
|
||||
wantMd: nil,
|
||||
wantAtt: nil,
|
||||
wantAggregate: nil,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "genesis fork version",
|
||||
args: args{
|
||||
digest: genDigest[:],
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
wantBlock: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
wsb, err := blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{}}})
|
||||
require.NoError(t, err)
|
||||
return wsb
|
||||
}(),
|
||||
wantAtt: ðpb.Attestation{},
|
||||
wantAggregate: ðpb.SignedAggregateAttestationAndProof{},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "altair fork version",
|
||||
args: args{
|
||||
digest: altairDigest[:],
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
wantBlock: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
wsb, err := blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockAltair{Block: ðpb.BeaconBlockAltair{Body: ðpb.BeaconBlockBodyAltair{}}})
|
||||
require.NoError(t, err)
|
||||
return wsb
|
||||
}(),
|
||||
wantMd: wrapper.WrappedMetadataV1(ðpb.MetaDataV1{}),
|
||||
wantAtt: ðpb.Attestation{},
|
||||
wantAggregate: ðpb.SignedAggregateAttestationAndProof{},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "bellatrix fork version",
|
||||
args: args{
|
||||
digest: bellatrixDigest[:],
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
wantBlock: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
wsb, err := blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockBellatrix{Block: ðpb.BeaconBlockBellatrix{Body: ðpb.BeaconBlockBodyBellatrix{ExecutionPayload: &enginev1.ExecutionPayload{}}}})
|
||||
require.NoError(t, err)
|
||||
return wsb
|
||||
}(),
|
||||
wantMd: wrapper.WrappedMetadataV1(ðpb.MetaDataV1{}),
|
||||
wantAtt: ðpb.Attestation{},
|
||||
wantAggregate: ðpb.SignedAggregateAttestationAndProof{},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "capella fork version",
|
||||
args: args{
|
||||
digest: capellaDigest[:],
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
wantBlock: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
wsb, err := blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockCapella{Block: ðpb.BeaconBlockCapella{Body: ðpb.BeaconBlockBodyCapella{ExecutionPayload: &enginev1.ExecutionPayloadCapella{}}}})
|
||||
require.NoError(t, err)
|
||||
return wsb
|
||||
}(),
|
||||
wantMd: wrapper.WrappedMetadataV1(ðpb.MetaDataV1{}),
|
||||
wantAtt: ðpb.Attestation{},
|
||||
wantAggregate: ðpb.SignedAggregateAttestationAndProof{},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "deneb fork version",
|
||||
args: args{
|
||||
digest: denebDigest[:],
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
wantBlock: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
wsb, err := blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockDeneb{Block: ðpb.BeaconBlockDeneb{Body: ðpb.BeaconBlockBodyDeneb{ExecutionPayload: &enginev1.ExecutionPayloadDeneb{}}}})
|
||||
require.NoError(t, err)
|
||||
return wsb
|
||||
}(),
|
||||
wantMd: wrapper.WrappedMetadataV1(ðpb.MetaDataV1{}),
|
||||
wantAtt: ðpb.Attestation{},
|
||||
wantAggregate: ðpb.SignedAggregateAttestationAndProof{},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "electra fork version",
|
||||
args: args{
|
||||
digest: electraDigest[:],
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
wantBlock: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
wsb, err := blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockElectra{Block: ðpb.BeaconBlockElectra{Body: ðpb.BeaconBlockBodyElectra{ExecutionPayload: &enginev1.ExecutionPayloadElectra{}}}})
|
||||
require.NoError(t, err)
|
||||
return wsb
|
||||
}(),
|
||||
wantMd: wrapper.WrappedMetadataV1(ðpb.MetaDataV1{}),
|
||||
wantAtt: ðpb.AttestationElectra{},
|
||||
wantAggregate: ðpb.SignedAggregateAttestationAndProofElectra{},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
gotBlock, err := extractDataTypeFromTypeMap(types.BlockMap, tt.args.digest, tt.args.chain)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("block: error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(gotBlock, tt.wantBlock) {
|
||||
t.Errorf("block: got = %v, want %v", gotBlock, tt.wantBlock)
|
||||
}
|
||||
gotAtt, err := extractDataTypeFromTypeMap(types.AttestationMap, tt.args.digest, tt.args.chain)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("attestation: error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(gotAtt, tt.wantAtt) {
|
||||
t.Errorf("attestation: got = %v, want %v", gotAtt, tt.wantAtt)
|
||||
}
|
||||
gotAggregate, err := extractDataTypeFromTypeMap(types.AggregateAttestationMap, tt.args.digest, tt.args.chain)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("aggregate: error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(gotAggregate, tt.wantAggregate) {
|
||||
t.Errorf("aggregate: got = %v, want %v", gotAggregate, tt.wantAggregate)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,7 +22,7 @@ func NewRegularSyncFuzz(opts ...Option) *Service {
|
||||
cancel: cancel,
|
||||
slotToPendingBlocks: gcache.New(time.Second, 2*time.Second),
|
||||
seenPendingBlocks: make(map[[32]byte]bool),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
|
||||
}
|
||||
r.rateLimiter = newRateLimiter(r.cfg.p2p)
|
||||
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/rand"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
@@ -87,12 +88,13 @@ func (s *Service) processPendingAtts(ctx context.Context) error {
|
||||
return s.sendBatchRootRequest(ctx, pendingRoots, randGen)
|
||||
}
|
||||
|
||||
func (s *Service) processAttestations(ctx context.Context, attestations []*ethpb.SignedAggregateAttestationAndProof) {
|
||||
func (s *Service) processAttestations(ctx context.Context, attestations []ethpb.SignedAggregateAttAndProof) {
|
||||
for _, signedAtt := range attestations {
|
||||
att := signedAtt.Message
|
||||
aggregate := signedAtt.AggregateAttestationAndProof().AggregateVal()
|
||||
data := aggregate.GetData()
|
||||
// The pending attestations can arrive in both aggregated and unaggregated forms,
|
||||
// each from has distinct validation steps.
|
||||
if helpers.IsAggregated(att.Aggregate) {
|
||||
if helpers.IsAggregated(aggregate) {
|
||||
// Save the pending aggregated attestation to the pool if it passes the aggregated
|
||||
// validation steps.
|
||||
valRes, err := s.validateAggregatedAtt(ctx, signedAtt)
|
||||
@@ -101,11 +103,11 @@ func (s *Service) processAttestations(ctx context.Context, attestations []*ethpb
|
||||
}
|
||||
aggValid := pubsub.ValidationAccept == valRes
|
||||
if s.validateBlockInAttestation(ctx, signedAtt) && aggValid {
|
||||
if err := s.cfg.attPool.SaveAggregatedAttestation(att.Aggregate); err != nil {
|
||||
if err := s.cfg.attPool.SaveAggregatedAttestation(aggregate); err != nil {
|
||||
log.WithError(err).Debug("Could not save aggregate attestation")
|
||||
continue
|
||||
}
|
||||
s.setAggregatorIndexEpochSeen(att.Aggregate.Data.Target.Epoch, att.AggregatorIndex)
|
||||
s.setAggregatorIndexEpochSeen(data.Target.Epoch, signedAtt.AggregateAttestationAndProof().GetAggregatorIndex())
|
||||
|
||||
// Broadcasting the signed attestation again once a node is able to process it.
|
||||
if err := s.cfg.p2p.Broadcast(ctx, signedAtt); err != nil {
|
||||
@@ -116,39 +118,39 @@ func (s *Service) processAttestations(ctx context.Context, attestations []*ethpb
|
||||
// This is an important validation before retrieving attestation pre state to defend against
|
||||
// attestation's target intentionally reference checkpoint that's long ago.
|
||||
// Verify current finalized checkpoint is an ancestor of the block defined by the attestation's beacon block root.
|
||||
if !s.cfg.chain.InForkchoice(bytesutil.ToBytes32(att.Aggregate.Data.BeaconBlockRoot)) {
|
||||
if !s.cfg.chain.InForkchoice(bytesutil.ToBytes32(data.BeaconBlockRoot)) {
|
||||
log.WithError(blockchain.ErrNotDescendantOfFinalized).Debug("Could not verify finalized consistency")
|
||||
continue
|
||||
}
|
||||
if err := s.cfg.chain.VerifyLmdFfgConsistency(ctx, att.Aggregate); err != nil {
|
||||
if err := s.cfg.chain.VerifyLmdFfgConsistency(ctx, aggregate); err != nil {
|
||||
log.WithError(err).Debug("Could not verify FFG consistency")
|
||||
continue
|
||||
}
|
||||
preState, err := s.cfg.chain.AttestationTargetState(ctx, att.Aggregate.Data.Target)
|
||||
preState, err := s.cfg.chain.AttestationTargetState(ctx, data.Target)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not retrieve attestation prestate")
|
||||
continue
|
||||
}
|
||||
|
||||
valid, err := s.validateUnaggregatedAttWithState(ctx, att.Aggregate, preState)
|
||||
valid, err := s.validateUnaggregatedAttWithState(ctx, aggregate, preState)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Pending unaggregated attestation failed validation")
|
||||
continue
|
||||
}
|
||||
if valid == pubsub.ValidationAccept {
|
||||
if err := s.cfg.attPool.SaveUnaggregatedAttestation(att.Aggregate); err != nil {
|
||||
if err := s.cfg.attPool.SaveUnaggregatedAttestation(aggregate); err != nil {
|
||||
log.WithError(err).Debug("Could not save unaggregated attestation")
|
||||
continue
|
||||
}
|
||||
s.setSeenCommitteeIndicesSlot(att.Aggregate.Data.Slot, att.Aggregate.Data.CommitteeIndex, att.Aggregate.AggregationBits)
|
||||
s.setSeenCommitteeIndicesSlot(data.Slot, data.CommitteeIndex, aggregate.GetAggregationBits())
|
||||
|
||||
valCount, err := helpers.ActiveValidatorCount(ctx, preState, slots.ToEpoch(att.Aggregate.Data.Slot))
|
||||
valCount, err := helpers.ActiveValidatorCount(ctx, preState, slots.ToEpoch(data.Slot))
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not retrieve active validator count")
|
||||
continue
|
||||
}
|
||||
// Broadcasting the signed attestation again once a node is able to process it.
|
||||
if err := s.cfg.p2p.BroadcastAttestation(ctx, helpers.ComputeSubnetForAttestation(valCount, signedAtt.Message.Aggregate), signedAtt.Message.Aggregate); err != nil {
|
||||
if err := s.cfg.p2p.BroadcastAttestation(ctx, helpers.ComputeSubnetForAttestation(valCount, aggregate), aggregate); err != nil {
|
||||
log.WithError(err).Debug("Could not broadcast")
|
||||
}
|
||||
}
|
||||
@@ -160,8 +162,8 @@ func (s *Service) processAttestations(ctx context.Context, attestations []*ethpb
|
||||
// root of the missing block. The value is the list of pending attestations
|
||||
// that voted for that block root. The caller of this function is responsible
|
||||
// for not sending repeated attestations to the pending queue.
|
||||
func (s *Service) savePendingAtt(att *ethpb.SignedAggregateAttestationAndProof) {
|
||||
root := bytesutil.ToBytes32(att.Message.Aggregate.Data.BeaconBlockRoot)
|
||||
func (s *Service) savePendingAtt(att ethpb.SignedAggregateAttAndProof) {
|
||||
root := bytesutil.ToBytes32(att.AggregateAttestationAndProof().AggregateVal().GetData().BeaconBlockRoot)
|
||||
|
||||
s.pendingAttsLock.Lock()
|
||||
defer s.pendingAttsLock.Unlock()
|
||||
@@ -178,7 +180,7 @@ func (s *Service) savePendingAtt(att *ethpb.SignedAggregateAttestationAndProof)
|
||||
_, ok := s.blkRootToPendingAtts[root]
|
||||
if !ok {
|
||||
pendingAttCount.Inc()
|
||||
s.blkRootToPendingAtts[root] = []*ethpb.SignedAggregateAttestationAndProof{att}
|
||||
s.blkRootToPendingAtts[root] = []ethpb.SignedAggregateAttAndProof{att}
|
||||
return
|
||||
}
|
||||
// Skip if the attestation from the same aggregator already exists in
|
||||
@@ -192,20 +194,32 @@ func (s *Service) savePendingAtt(att *ethpb.SignedAggregateAttestationAndProof)
|
||||
s.blkRootToPendingAtts[root] = append(s.blkRootToPendingAtts[root], att)
|
||||
}
|
||||
|
||||
func attsAreEqual(a, b *ethpb.SignedAggregateAttestationAndProof) bool {
|
||||
if a.Signature != nil {
|
||||
return b.Signature != nil && a.Message.AggregatorIndex == b.Message.AggregatorIndex
|
||||
func attsAreEqual(a, b ethpb.SignedAggregateAttAndProof) bool {
|
||||
if a.GetSignature() != nil {
|
||||
return b.GetSignature() != nil && a.AggregateAttestationAndProof().GetAggregatorIndex() == b.AggregateAttestationAndProof().GetAggregatorIndex()
|
||||
}
|
||||
if b.Signature != nil {
|
||||
if b.GetSignature() != nil {
|
||||
return false
|
||||
}
|
||||
if a.Message.Aggregate.Data.Slot != b.Message.Aggregate.Data.Slot {
|
||||
|
||||
aAggregate := a.AggregateAttestationAndProof().AggregateVal()
|
||||
bAggregate := b.AggregateAttestationAndProof().AggregateVal()
|
||||
aData := aAggregate.GetData()
|
||||
bData := bAggregate.GetData()
|
||||
|
||||
if aData.Slot != bData.Slot {
|
||||
return false
|
||||
}
|
||||
if a.Message.Aggregate.Data.CommitteeIndex != b.Message.Aggregate.Data.CommitteeIndex {
|
||||
|
||||
if a.Version() >= version.Electra {
|
||||
if !bytes.Equal(aAggregate.CommitteeBitsVal().Bytes(), bAggregate.CommitteeBitsVal().Bytes()) {
|
||||
return false
|
||||
}
|
||||
} else if aData.CommitteeIndex != bData.CommitteeIndex {
|
||||
return false
|
||||
}
|
||||
return bytes.Equal(a.Message.Aggregate.AggregationBits, b.Message.Aggregate.AggregationBits)
|
||||
|
||||
return bytes.Equal(aAggregate.GetAggregationBits(), bAggregate.GetAggregationBits())
|
||||
}
|
||||
|
||||
// This validates the pending attestations in the queue are still valid.
|
||||
@@ -221,7 +235,7 @@ func (s *Service) validatePendingAtts(ctx context.Context, slot primitives.Slot)
|
||||
|
||||
for bRoot, atts := range s.blkRootToPendingAtts {
|
||||
for i := len(atts) - 1; i >= 0; i-- {
|
||||
if slot >= atts[i].Message.Aggregate.Data.Slot+params.BeaconConfig().SlotsPerEpoch {
|
||||
if slot >= atts[i].AggregateAttestationAndProof().AggregateVal().GetData().Slot+params.BeaconConfig().SlotsPerEpoch {
|
||||
// Remove the pending attestation from the list in place.
|
||||
atts = append(atts[:i], atts[i+1:]...)
|
||||
}
|
||||
|
||||
@@ -46,12 +46,12 @@ func TestProcessPendingAtts_NoBlockRequestBlock(t *testing.T) {
|
||||
chain := &mock.ChainService{Genesis: prysmTime.Now(), FinalizedCheckPoint: ðpb.Checkpoint{}}
|
||||
r := &Service{
|
||||
cfg: &config{p2p: p1, beaconDB: db, chain: chain, clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot)},
|
||||
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
|
||||
chainStarted: abool.New(),
|
||||
}
|
||||
|
||||
a := ðpb.AggregateAttestationAndProof{Aggregate: ðpb.Attestation{Data: ðpb.AttestationData{Target: ðpb.Checkpoint{Root: make([]byte, 32)}}}}
|
||||
r.blkRootToPendingAtts[[32]byte{'A'}] = []*ethpb.SignedAggregateAttestationAndProof{{Message: a}}
|
||||
r.blkRootToPendingAtts[[32]byte{'A'}] = []ethpb.SignedAggregateAttAndProof{ðpb.SignedAggregateAttestationAndProof{Message: a}}
|
||||
require.NoError(t, r.processPendingAtts(context.Background()))
|
||||
require.LogsContain(t, hook, "Requesting block by root")
|
||||
}
|
||||
@@ -124,7 +124,7 @@ func TestProcessPendingAtts_HasBlockSaveUnAggregatedAtt(t *testing.T) {
|
||||
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
||||
attPool: attestations.NewPool(),
|
||||
},
|
||||
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
|
||||
seenUnAggregatedAttestationCache: lruwrpr.New(10),
|
||||
signatureChan: make(chan *signatureVerifier, verifierLimit),
|
||||
}
|
||||
@@ -134,7 +134,7 @@ func TestProcessPendingAtts_HasBlockSaveUnAggregatedAtt(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, r.cfg.beaconDB.SaveState(context.Background(), s, root))
|
||||
|
||||
r.blkRootToPendingAtts[root] = []*ethpb.SignedAggregateAttestationAndProof{{Message: aggregateAndProof, Signature: aggreSig}}
|
||||
r.blkRootToPendingAtts[root] = []ethpb.SignedAggregateAttAndProof{ðpb.SignedAggregateAttestationAndProof{Message: aggregateAndProof, Signature: aggreSig}}
|
||||
require.NoError(t, r.processPendingAtts(context.Background()))
|
||||
|
||||
atts, err := r.cfg.attPool.UnaggregatedAttestations()
|
||||
@@ -162,7 +162,7 @@ func TestProcessPendingAtts_NoBroadcastWithBadSignature(t *testing.T) {
|
||||
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
||||
attPool: attestations.NewPool(),
|
||||
},
|
||||
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
|
||||
}
|
||||
|
||||
priv, err := bls.RandKey()
|
||||
@@ -182,7 +182,7 @@ func TestProcessPendingAtts_NoBroadcastWithBadSignature(t *testing.T) {
|
||||
util.SaveBlock(t, context.Background(), r.cfg.beaconDB, b)
|
||||
require.NoError(t, r.cfg.beaconDB.SaveState(context.Background(), s, r32))
|
||||
|
||||
r.blkRootToPendingAtts[r32] = []*ethpb.SignedAggregateAttestationAndProof{{Message: a, Signature: make([]byte, fieldparams.BLSSignatureLength)}}
|
||||
r.blkRootToPendingAtts[r32] = []ethpb.SignedAggregateAttAndProof{ðpb.SignedAggregateAttestationAndProof{Message: a, Signature: make([]byte, fieldparams.BLSSignatureLength)}}
|
||||
require.NoError(t, r.processPendingAtts(context.Background()))
|
||||
|
||||
assert.Equal(t, false, p1.BroadcastCalled.Load(), "Broadcasted bad aggregate")
|
||||
@@ -245,13 +245,13 @@ func TestProcessPendingAtts_NoBroadcastWithBadSignature(t *testing.T) {
|
||||
clock: startup.NewClock(chain2.Genesis, chain2.ValidatorsRoot),
|
||||
attPool: attestations.NewPool(),
|
||||
},
|
||||
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
|
||||
seenUnAggregatedAttestationCache: lruwrpr.New(10),
|
||||
signatureChan: make(chan *signatureVerifier, verifierLimit),
|
||||
}
|
||||
go r.verifierRoutine()
|
||||
|
||||
r.blkRootToPendingAtts[r32] = []*ethpb.SignedAggregateAttestationAndProof{{Message: aggregateAndProof, Signature: aggreSig}}
|
||||
r.blkRootToPendingAtts[r32] = []ethpb.SignedAggregateAttAndProof{ðpb.SignedAggregateAttestationAndProof{Message: aggregateAndProof, Signature: aggreSig}}
|
||||
require.NoError(t, r.processPendingAtts(context.Background()))
|
||||
|
||||
assert.Equal(t, true, p1.BroadcastCalled.Load(), "Could not broadcast the good aggregate")
|
||||
@@ -330,7 +330,7 @@ func TestProcessPendingAtts_HasBlockSaveAggregatedAtt(t *testing.T) {
|
||||
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
||||
attPool: attestations.NewPool(),
|
||||
},
|
||||
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
|
||||
seenAggregatedAttestationCache: lruwrpr.New(10),
|
||||
signatureChan: make(chan *signatureVerifier, verifierLimit),
|
||||
}
|
||||
@@ -339,7 +339,7 @@ func TestProcessPendingAtts_HasBlockSaveAggregatedAtt(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, r.cfg.beaconDB.SaveState(context.Background(), s, root))
|
||||
|
||||
r.blkRootToPendingAtts[root] = []*ethpb.SignedAggregateAttestationAndProof{{Message: aggregateAndProof, Signature: aggreSig}}
|
||||
r.blkRootToPendingAtts[root] = []ethpb.SignedAggregateAttAndProof{ðpb.SignedAggregateAttestationAndProof{Message: aggregateAndProof, Signature: aggreSig}}
|
||||
require.NoError(t, r.processPendingAtts(context.Background()))
|
||||
|
||||
assert.Equal(t, 1, len(r.cfg.attPool.AggregatedAttestations()), "Did not save aggregated att")
|
||||
@@ -353,7 +353,7 @@ func TestProcessPendingAtts_HasBlockSaveAggregatedAtt(t *testing.T) {
|
||||
|
||||
func TestValidatePendingAtts_CanPruneOldAtts(t *testing.T) {
|
||||
s := &Service{
|
||||
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
|
||||
}
|
||||
|
||||
// 100 Attestations per block root.
|
||||
@@ -401,7 +401,7 @@ func TestValidatePendingAtts_CanPruneOldAtts(t *testing.T) {
|
||||
|
||||
func TestValidatePendingAtts_NoDuplicatingAtts(t *testing.T) {
|
||||
s := &Service{
|
||||
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
|
||||
}
|
||||
|
||||
r1 := [32]byte{'A'}
|
||||
@@ -428,7 +428,7 @@ func TestValidatePendingAtts_NoDuplicatingAtts(t *testing.T) {
|
||||
|
||||
func TestSavePendingAtts_BeyondLimit(t *testing.T) {
|
||||
s := &Service{
|
||||
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
|
||||
}
|
||||
|
||||
for i := 0; i < pendingAttsLimit; i++ {
|
||||
@@ -457,5 +457,71 @@ func TestSavePendingAtts_BeyondLimit(t *testing.T) {
|
||||
|
||||
assert.Equal(t, 0, len(s.blkRootToPendingAtts[r1]), "Saved pending atts")
|
||||
assert.Equal(t, 0, len(s.blkRootToPendingAtts[r2]), "Saved pending atts")
|
||||
|
||||
}
|
||||
|
||||
func Test_attsAreEqual_Committee(t *testing.T) {
|
||||
t.Run("Phase 0 equal", func(t *testing.T) {
|
||||
att1 := ðpb.SignedAggregateAttestationAndProof{
|
||||
Message: ðpb.AggregateAttestationAndProof{
|
||||
Aggregate: ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
CommitteeIndex: 123}}}}
|
||||
att2 := ðpb.SignedAggregateAttestationAndProof{
|
||||
Message: ðpb.AggregateAttestationAndProof{
|
||||
Aggregate: ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
CommitteeIndex: 123}}}}
|
||||
assert.Equal(t, true, attsAreEqual(att1, att2))
|
||||
})
|
||||
t.Run("Phase 0 not equal", func(t *testing.T) {
|
||||
att1 := ðpb.SignedAggregateAttestationAndProof{
|
||||
Message: ðpb.AggregateAttestationAndProof{
|
||||
Aggregate: ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
CommitteeIndex: 123}}}}
|
||||
att2 := ðpb.SignedAggregateAttestationAndProof{
|
||||
Message: ðpb.AggregateAttestationAndProof{
|
||||
Aggregate: ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
CommitteeIndex: 456}}}}
|
||||
assert.Equal(t, false, attsAreEqual(att1, att2))
|
||||
})
|
||||
t.Run("Electra equal", func(t *testing.T) {
|
||||
cb1 := primitives.NewAttestationCommitteeBits()
|
||||
cb1.SetBitAt(0, true)
|
||||
att1 := ðpb.SignedAggregateAttestationAndProofElectra{
|
||||
Message: ðpb.AggregateAttestationAndProofElectra{
|
||||
Aggregate: ðpb.AttestationElectra{
|
||||
Data: ðpb.AttestationData{},
|
||||
CommitteeBits: cb1,
|
||||
}}}
|
||||
cb2 := primitives.NewAttestationCommitteeBits()
|
||||
cb2.SetBitAt(0, true)
|
||||
att2 := ðpb.SignedAggregateAttestationAndProofElectra{
|
||||
Message: ðpb.AggregateAttestationAndProofElectra{
|
||||
Aggregate: ðpb.AttestationElectra{
|
||||
Data: ðpb.AttestationData{},
|
||||
CommitteeBits: cb2,
|
||||
}}}
|
||||
assert.Equal(t, true, attsAreEqual(att1, att2))
|
||||
})
|
||||
t.Run("Electra not equal", func(t *testing.T) {
|
||||
cb1 := primitives.NewAttestationCommitteeBits()
|
||||
cb1.SetBitAt(0, true)
|
||||
att1 := ðpb.SignedAggregateAttestationAndProofElectra{
|
||||
Message: ðpb.AggregateAttestationAndProofElectra{
|
||||
Aggregate: ðpb.AttestationElectra{
|
||||
Data: ðpb.AttestationData{},
|
||||
CommitteeBits: cb1,
|
||||
}}}
|
||||
cb2 := primitives.NewAttestationCommitteeBits()
|
||||
cb2.SetBitAt(1, true)
|
||||
att2 := ðpb.SignedAggregateAttestationAndProofElectra{
|
||||
Message: ðpb.AggregateAttestationAndProofElectra{
|
||||
Aggregate: ðpb.AttestationElectra{
|
||||
Data: ðpb.AttestationData{},
|
||||
CommitteeBits: cb2,
|
||||
}}}
|
||||
assert.Equal(t, false, attsAreEqual(att1, att2))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -4,14 +4,12 @@ import (
|
||||
libp2pcore "github.com/libp2p/go-libp2p/core"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/encoder"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/network/forks"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
@@ -107,7 +105,7 @@ func readFirstChunkedBlock(stream libp2pcore.Stream, tor blockchain.TemporalOrac
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blk, err := extractBlockDataType(rpcCtx, tor)
|
||||
blk, err := extractDataTypeFromTypeMap(types.BlockMap, rpcCtx, tor)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -131,7 +129,7 @@ func readResponseChunk(stream libp2pcore.Stream, tor blockchain.TemporalOracle,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blk, err := extractBlockDataType(rpcCtx, tor)
|
||||
blk, err := extractDataTypeFromTypeMap(types.BlockMap, rpcCtx, tor)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -139,30 +137,6 @@ func readResponseChunk(stream libp2pcore.Stream, tor blockchain.TemporalOracle,
|
||||
return blk, err
|
||||
}
|
||||
|
||||
func extractBlockDataType(digest []byte, tor blockchain.TemporalOracle) (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
if len(digest) == 0 {
|
||||
bFunc, ok := types.BlockMap[bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)]
|
||||
if !ok {
|
||||
return nil, errors.New("no block type exists for the genesis fork version.")
|
||||
}
|
||||
return bFunc()
|
||||
}
|
||||
if len(digest) != forkDigestLength {
|
||||
return nil, errors.Errorf("invalid digest returned, wanted a length of %d but received %d", forkDigestLength, len(digest))
|
||||
}
|
||||
vRoot := tor.GenesisValidatorsRoot()
|
||||
for k, blkFunc := range types.BlockMap {
|
||||
rDigest, err := signing.ComputeForkDigest(k[:], vRoot[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if rDigest == bytesutil.ToBytes4(digest) {
|
||||
return blkFunc()
|
||||
}
|
||||
}
|
||||
return nil, errors.Wrapf(ErrNoValidDigest, "could not extract block data type, saw digest=%#x, genesis=%v, vr=%#x", digest, tor.GenesisTime(), tor.GenesisValidatorsRoot())
|
||||
}
|
||||
|
||||
// WriteBlobSidecarChunk writes blob chunk object to stream.
|
||||
// response_chunk ::= <result> | <context-bytes> | <encoding-dependent-header> | <encoded-payload>
|
||||
func WriteBlobSidecarChunk(stream libp2pcore.Stream, tor blockchain.TemporalOracle, encoding encoder.NetworkEncoding, sidecar blocks.VerifiedROBlob) error {
|
||||
|
||||
@@ -1,121 +0,0 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
|
||||
mock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestExtractBlockDataType(t *testing.T) {
|
||||
// Precompute digests
|
||||
genDigest, err := signing.ComputeForkDigest(params.BeaconConfig().GenesisForkVersion, params.BeaconConfig().ZeroHash[:])
|
||||
require.NoError(t, err)
|
||||
altairDigest, err := signing.ComputeForkDigest(params.BeaconConfig().AltairForkVersion, params.BeaconConfig().ZeroHash[:])
|
||||
require.NoError(t, err)
|
||||
bellatrixDigest, err := signing.ComputeForkDigest(params.BeaconConfig().BellatrixForkVersion, params.BeaconConfig().ZeroHash[:])
|
||||
require.NoError(t, err)
|
||||
|
||||
type args struct {
|
||||
digest []byte
|
||||
chain blockchain.ChainInfoFetcher
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want interfaces.ReadOnlySignedBeaconBlock
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "no digest",
|
||||
args: args{
|
||||
digest: []byte{},
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
|
||||
want: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
wsb, err := blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{}}})
|
||||
require.NoError(t, err)
|
||||
return wsb
|
||||
}(),
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "invalid digest",
|
||||
args: args{
|
||||
digest: []byte{0x00, 0x01},
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
want: nil,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "non existent digest",
|
||||
args: args{
|
||||
digest: []byte{0x00, 0x01, 0x02, 0x03},
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
want: nil,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "genesis fork version",
|
||||
args: args{
|
||||
digest: genDigest[:],
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
want: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
wsb, err := blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{}}})
|
||||
require.NoError(t, err)
|
||||
return wsb
|
||||
}(),
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "altair fork version",
|
||||
args: args{
|
||||
digest: altairDigest[:],
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
want: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
wsb, err := blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockAltair{Block: ðpb.BeaconBlockAltair{Body: ðpb.BeaconBlockBodyAltair{}}})
|
||||
require.NoError(t, err)
|
||||
return wsb
|
||||
}(),
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "bellatrix fork version",
|
||||
args: args{
|
||||
digest: bellatrixDigest[:],
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
want: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
wsb, err := blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockBellatrix{Block: ðpb.BeaconBlockBellatrix{Body: ðpb.BeaconBlockBodyBellatrix{ExecutionPayload: &enginev1.ExecutionPayload{}}}})
|
||||
require.NoError(t, err)
|
||||
return wsb
|
||||
}(),
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := extractBlockDataType(tt.args.digest, tt.args.chain)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("extractBlockDataType() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("extractBlockDataType() got = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -7,13 +7,9 @@ import (
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/network/forks"
|
||||
pb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/metadata"
|
||||
@@ -112,7 +108,7 @@ func (s *Service) sendMetaDataRequest(ctx context.Context, id peer.ID) (metadata
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
msg, err := extractMetaDataType(rpcCtx[:], s.cfg.clock)
|
||||
msg, err := extractDataTypeFromTypeMap(types.MetaDataMap, rpcCtx[:], s.cfg.clock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -133,27 +129,3 @@ func (s *Service) sendMetaDataRequest(ctx context.Context, id peer.ID) (metadata
|
||||
}
|
||||
return msg, nil
|
||||
}
|
||||
|
||||
func extractMetaDataType(digest []byte, tor blockchain.TemporalOracle) (metadata.Metadata, error) {
|
||||
if len(digest) == 0 {
|
||||
mdFunc, ok := types.MetaDataMap[bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)]
|
||||
if !ok {
|
||||
return nil, errors.New("no metadata type exists for the genesis fork version.")
|
||||
}
|
||||
return mdFunc(), nil
|
||||
}
|
||||
if len(digest) != forkDigestLength {
|
||||
return nil, errors.Errorf("invalid digest returned, wanted a length of %d but received %d", forkDigestLength, len(digest))
|
||||
}
|
||||
vRoot := tor.GenesisValidatorsRoot()
|
||||
for k, mdFunc := range types.MetaDataMap {
|
||||
rDigest, err := signing.ComputeForkDigest(k[:], vRoot[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if rDigest == bytesutil.ToBytes4(digest) {
|
||||
return mdFunc(), nil
|
||||
}
|
||||
}
|
||||
return nil, errors.Wrapf(ErrNoValidDigest, "could not extract metadata type, saw digest=%#x, genesis=%v, vr=%#x", digest, tor.GenesisTime(), tor.GenesisValidatorsRoot())
|
||||
}
|
||||
|
||||
@@ -2,16 +2,13 @@ package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
|
||||
mock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
|
||||
db "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
|
||||
p2ptest "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/testing"
|
||||
@@ -21,7 +18,6 @@ import (
|
||||
leakybucket "github.com/prysmaticlabs/prysm/v5/container/leaky-bucket"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/ssz/equality"
|
||||
pb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/metadata"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
@@ -233,80 +229,3 @@ func TestMetadataRPCHandler_SendsMetadataAltair(t *testing.T) {
|
||||
t.Error("Peer is disconnected despite receiving a valid ping")
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractMetaDataType(t *testing.T) {
|
||||
// Precompute digests
|
||||
genDigest, err := signing.ComputeForkDigest(params.BeaconConfig().GenesisForkVersion, params.BeaconConfig().ZeroHash[:])
|
||||
require.NoError(t, err)
|
||||
altairDigest, err := signing.ComputeForkDigest(params.BeaconConfig().AltairForkVersion, params.BeaconConfig().ZeroHash[:])
|
||||
require.NoError(t, err)
|
||||
|
||||
type args struct {
|
||||
digest []byte
|
||||
clock blockchain.TemporalOracle
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want metadata.Metadata
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "no digest",
|
||||
args: args{
|
||||
digest: []byte{},
|
||||
clock: startup.NewClock(time.Now(), [32]byte{}),
|
||||
},
|
||||
want: wrapper.WrappedMetadataV0(&pb.MetaDataV0{}),
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "invalid digest",
|
||||
args: args{
|
||||
digest: []byte{0x00, 0x01},
|
||||
clock: startup.NewClock(time.Now(), [32]byte{}),
|
||||
},
|
||||
want: nil,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "non existent digest",
|
||||
args: args{
|
||||
digest: []byte{0x00, 0x01, 0x02, 0x03},
|
||||
clock: startup.NewClock(time.Now(), [32]byte{}),
|
||||
},
|
||||
want: nil,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "genesis fork version",
|
||||
args: args{
|
||||
digest: genDigest[:],
|
||||
clock: startup.NewClock(time.Now(), [32]byte{}),
|
||||
},
|
||||
want: wrapper.WrappedMetadataV0(&pb.MetaDataV0{}),
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "altair fork version",
|
||||
args: args{
|
||||
digest: altairDigest[:],
|
||||
clock: startup.NewClock(time.Now(), [32]byte{}),
|
||||
},
|
||||
want: wrapper.WrappedMetadataV1(&pb.MetaDataV1{}),
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := extractMetaDataType(tt.args.digest, tt.args.clock)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("extractMetaDataType() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("extractMetaDataType() got = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -122,7 +122,7 @@ type Service struct {
|
||||
cancel context.CancelFunc
|
||||
slotToPendingBlocks *gcache.Cache
|
||||
seenPendingBlocks map[[32]byte]bool
|
||||
blkRootToPendingAtts map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof
|
||||
blkRootToPendingAtts map[[32]byte][]ethpb.SignedAggregateAttAndProof
|
||||
subHandler *subTopicHandler
|
||||
pendingAttsLock sync.RWMutex
|
||||
pendingQueueLock sync.RWMutex
|
||||
@@ -171,7 +171,7 @@ func NewService(ctx context.Context, opts ...Option) *Service {
|
||||
cfg: &config{clock: startup.NewClock(time.Unix(0, 0), [32]byte{})},
|
||||
slotToPendingBlocks: c,
|
||||
seenPendingBlocks: make(map[[32]byte]bool),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
|
||||
signatureChan: make(chan *signatureVerifier, verifierLimit),
|
||||
}
|
||||
for _, opt := range opts {
|
||||
|
||||
@@ -13,19 +13,21 @@ import (
|
||||
// beaconAggregateProofSubscriber forwards the incoming validated aggregated attestation and proof to the
|
||||
// attestation pool for processing.
|
||||
func (s *Service) beaconAggregateProofSubscriber(_ context.Context, msg proto.Message) error {
|
||||
a, ok := msg.(*ethpb.SignedAggregateAttestationAndProof)
|
||||
a, ok := msg.(ethpb.SignedAggregateAttAndProof)
|
||||
if !ok {
|
||||
return fmt.Errorf("message was not type *ethpb.SignedAggregateAttestationAndProof, type=%T", msg)
|
||||
return fmt.Errorf("message was not type ethpb.SignedAggregateAttAndProof, type=%T", msg)
|
||||
}
|
||||
|
||||
if a.Message.Aggregate == nil || a.Message.Aggregate.Data == nil {
|
||||
aggregate := a.AggregateAttestationAndProof().AggregateVal()
|
||||
|
||||
if aggregate == nil || aggregate.GetData() == nil {
|
||||
return errors.New("nil aggregate")
|
||||
}
|
||||
|
||||
// An unaggregated attestation can make it here. It’s valid, the aggregator it just itself, although it means poor performance for the subnet.
|
||||
if !helpers.IsAggregated(a.Message.Aggregate) {
|
||||
return s.cfg.attPool.SaveUnaggregatedAttestation(a.Message.Aggregate)
|
||||
if !helpers.IsAggregated(aggregate) {
|
||||
return s.cfg.attPool.SaveUnaggregatedAttestation(aggregate)
|
||||
}
|
||||
|
||||
return s.cfg.attPool.SaveAggregatedAttestation(a.Message.Aggregate)
|
||||
return s.cfg.attPool.SaveAggregatedAttestation(aggregate)
|
||||
}
|
||||
|
||||
@@ -15,19 +15,21 @@ import (
|
||||
)
|
||||
|
||||
func (s *Service) committeeIndexBeaconAttestationSubscriber(_ context.Context, msg proto.Message) error {
|
||||
a, ok := msg.(*eth.Attestation)
|
||||
a, ok := msg.(eth.Att)
|
||||
if !ok {
|
||||
return fmt.Errorf("message was not type *eth.Attestation, type=%T", msg)
|
||||
return fmt.Errorf("message was not type eth.Att, type=%T", msg)
|
||||
}
|
||||
|
||||
if a.Data == nil {
|
||||
data := a.GetData()
|
||||
|
||||
if data == nil {
|
||||
return errors.New("nil attestation")
|
||||
}
|
||||
s.setSeenCommitteeIndicesSlot(a.Data.Slot, a.Data.CommitteeIndex, a.AggregationBits)
|
||||
s.setSeenCommitteeIndicesSlot(data.Slot, data.CommitteeIndex, a.GetAggregationBits())
|
||||
|
||||
exists, err := s.cfg.attPool.HasAggregatedAttestation(a)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Could not determine if attestation pool has this atttestation")
|
||||
return errors.Wrap(err, "could not determine if attestation pool has this attestation")
|
||||
}
|
||||
if exists {
|
||||
return nil
|
||||
@@ -36,11 +38,11 @@ func (s *Service) committeeIndexBeaconAttestationSubscriber(_ context.Context, m
|
||||
return s.cfg.attPool.SaveUnaggregatedAttestation(a)
|
||||
}
|
||||
|
||||
func (_ *Service) persistentSubnetIndices() []uint64 {
|
||||
func (*Service) persistentSubnetIndices() []uint64 {
|
||||
return cache.SubnetIDs.GetAllSubnets()
|
||||
}
|
||||
|
||||
func (_ *Service) aggregatorSubnetIndices(currentSlot primitives.Slot) []uint64 {
|
||||
func (*Service) aggregatorSubnetIndices(currentSlot primitives.Slot) []uint64 {
|
||||
endEpoch := slots.ToEpoch(currentSlot) + 1
|
||||
endSlot := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(endEpoch))
|
||||
var commIds []uint64
|
||||
@@ -50,7 +52,7 @@ func (_ *Service) aggregatorSubnetIndices(currentSlot primitives.Slot) []uint64
|
||||
return slice.SetUint64(commIds)
|
||||
}
|
||||
|
||||
func (_ *Service) attesterSubnetIndices(currentSlot primitives.Slot) []uint64 {
|
||||
func (*Service) attesterSubnetIndices(currentSlot primitives.Slot) []uint64 {
|
||||
endEpoch := slots.ToEpoch(currentSlot) + 1
|
||||
endSlot := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(endEpoch))
|
||||
var commIds []uint64
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
prysmTime "github.com/prysmaticlabs/prysm/v5/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
@@ -47,38 +48,48 @@ func (s *Service) validateAggregateAndProof(ctx context.Context, pid peer.ID, ms
|
||||
tracing.AnnotateError(span, err)
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
m, ok := raw.(*ethpb.SignedAggregateAttestationAndProof)
|
||||
m, ok := raw.(ethpb.SignedAggregateAttAndProof)
|
||||
if !ok {
|
||||
return pubsub.ValidationReject, errors.Errorf("invalid message type: %T", raw)
|
||||
}
|
||||
if m.Message == nil {
|
||||
if m.AggregateAttestationAndProof() == nil {
|
||||
return pubsub.ValidationReject, errNilMessage
|
||||
}
|
||||
if err := helpers.ValidateNilAttestation(m.Message.Aggregate); err != nil {
|
||||
|
||||
aggregate := m.AggregateAttestationAndProof().AggregateVal()
|
||||
data := aggregate.GetData()
|
||||
|
||||
if err := helpers.ValidateNilAttestation(aggregate); err != nil {
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
// Do not process slot 0 aggregates.
|
||||
if m.Message.Aggregate.Data.Slot == 0 {
|
||||
if data.Slot == 0 {
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
|
||||
// Broadcast the aggregated attestation on a feed to notify other services in the beacon node
|
||||
// of a received aggregated attestation.
|
||||
s.cfg.attestationNotifier.OperationFeed().Send(&feed.Event{
|
||||
Type: operation.AggregatedAttReceived,
|
||||
Data: &operation.AggregatedAttReceivedData{
|
||||
Attestation: m.Message,
|
||||
},
|
||||
})
|
||||
// TODO: this will be extended to Electra in a later PR
|
||||
if m.Version() == version.Phase0 {
|
||||
phase0Att, ok := m.(*ethpb.SignedAggregateAttestationAndProof)
|
||||
if ok {
|
||||
s.cfg.attestationNotifier.OperationFeed().Send(&feed.Event{
|
||||
Type: operation.AggregatedAttReceived,
|
||||
Data: &operation.AggregatedAttReceivedData{
|
||||
Attestation: phase0Att.Message,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if err := helpers.ValidateSlotTargetEpoch(m.Message.Aggregate.Data); err != nil {
|
||||
if err := helpers.ValidateSlotTargetEpoch(data); err != nil {
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
// Attestation's slot is within ATTESTATION_PROPAGATION_SLOT_RANGE and early attestation
|
||||
// processing tolerance.
|
||||
if err := helpers.ValidateAttestationTime(
|
||||
m.Message.Aggregate.Data.Slot,
|
||||
data.Slot,
|
||||
s.cfg.clock.GenesisTime(),
|
||||
earlyAttestationProcessingTolerance,
|
||||
); err != nil {
|
||||
@@ -87,19 +98,19 @@ func (s *Service) validateAggregateAndProof(ctx context.Context, pid peer.ID, ms
|
||||
}
|
||||
|
||||
// Verify this is the first aggregate received from the aggregator with index and slot.
|
||||
if s.hasSeenAggregatorIndexEpoch(m.Message.Aggregate.Data.Target.Epoch, m.Message.AggregatorIndex) {
|
||||
if s.hasSeenAggregatorIndexEpoch(data.Target.Epoch, m.AggregateAttestationAndProof().GetAggregatorIndex()) {
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
// Check that the block being voted on isn't invalid.
|
||||
if s.hasBadBlock(bytesutil.ToBytes32(m.Message.Aggregate.Data.BeaconBlockRoot)) ||
|
||||
s.hasBadBlock(bytesutil.ToBytes32(m.Message.Aggregate.Data.Target.Root)) ||
|
||||
s.hasBadBlock(bytesutil.ToBytes32(m.Message.Aggregate.Data.Source.Root)) {
|
||||
if s.hasBadBlock(bytesutil.ToBytes32(data.BeaconBlockRoot)) ||
|
||||
s.hasBadBlock(bytesutil.ToBytes32(data.Target.Root)) ||
|
||||
s.hasBadBlock(bytesutil.ToBytes32(data.Source.Root)) {
|
||||
attBadBlockCount.Inc()
|
||||
return pubsub.ValidationReject, errors.New("bad block referenced in attestation data")
|
||||
}
|
||||
|
||||
// Verify aggregate attestation has not already been seen via aggregate gossip, within a block, or through the creation locally.
|
||||
seen, err := s.cfg.attPool.HasAggregatedAttestation(m.Message.Aggregate)
|
||||
seen, err := s.cfg.attPool.HasAggregatedAttestation(aggregate)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return pubsub.ValidationIgnore, err
|
||||
@@ -116,7 +127,7 @@ func (s *Service) validateAggregateAndProof(ctx context.Context, pid peer.ID, ms
|
||||
return validationRes, err
|
||||
}
|
||||
|
||||
s.setAggregatorIndexEpochSeen(m.Message.Aggregate.Data.Target.Epoch, m.Message.AggregatorIndex)
|
||||
s.setAggregatorIndexEpochSeen(data.Target.Epoch, m.AggregateAttestationAndProof().GetAggregatorIndex())
|
||||
|
||||
msg.ValidatorData = m
|
||||
|
||||
@@ -125,44 +136,75 @@ func (s *Service) validateAggregateAndProof(ctx context.Context, pid peer.ID, ms
|
||||
return pubsub.ValidationAccept, nil
|
||||
}
|
||||
|
||||
func (s *Service) validateAggregatedAtt(ctx context.Context, signed *ethpb.SignedAggregateAttestationAndProof) (pubsub.ValidationResult, error) {
|
||||
func (s *Service) validateAggregatedAtt(ctx context.Context, signed ethpb.SignedAggregateAttAndProof) (pubsub.ValidationResult, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "sync.validateAggregatedAtt")
|
||||
defer span.End()
|
||||
|
||||
aggregateAndProof := signed.AggregateAttestationAndProof()
|
||||
aggregatorIndex := aggregateAndProof.GetAggregatorIndex()
|
||||
aggregate := aggregateAndProof.AggregateVal()
|
||||
data := aggregate.GetData()
|
||||
|
||||
// Verify attestation target root is consistent with the head root.
|
||||
// This verification is not in the spec, however we guard against it as it opens us up
|
||||
// to weird edge cases during verification. The attestation technically could be used to add value to a block,
|
||||
// but it's invalid in the spirit of the protocol. Here we choose safety over profit.
|
||||
if err := s.cfg.chain.VerifyLmdFfgConsistency(ctx, signed.Message.Aggregate); err != nil {
|
||||
if err := s.cfg.chain.VerifyLmdFfgConsistency(ctx, aggregate); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
attBadLmdConsistencyCount.Inc()
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
// Verify current finalized checkpoint is an ancestor of the block defined by the attestation's beacon block root.
|
||||
if !s.cfg.chain.InForkchoice(bytesutil.ToBytes32(signed.Message.Aggregate.Data.BeaconBlockRoot)) {
|
||||
if !s.cfg.chain.InForkchoice(bytesutil.ToBytes32(data.BeaconBlockRoot)) {
|
||||
tracing.AnnotateError(span, blockchain.ErrNotDescendantOfFinalized)
|
||||
return pubsub.ValidationIgnore, blockchain.ErrNotDescendantOfFinalized
|
||||
}
|
||||
|
||||
bs, err := s.cfg.chain.AttestationTargetState(ctx, signed.Message.Aggregate.Data.Target)
|
||||
bs, err := s.cfg.chain.AttestationTargetState(ctx, data.Target)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
|
||||
// Verify validator index is within the beacon committee.
|
||||
result, err := s.validateIndexInCommittee(ctx, bs, signed.Message.Aggregate, signed.Message.AggregatorIndex)
|
||||
result, err := s.validateIndexInCommittee(ctx, bs, aggregate, aggregatorIndex)
|
||||
if result != pubsub.ValidationAccept {
|
||||
wrappedErr := errors.Wrapf(err, "Could not validate index in committee")
|
||||
wrappedErr := errors.Wrapf(err, "could not validate index in committee")
|
||||
tracing.AnnotateError(span, wrappedErr)
|
||||
return result, wrappedErr
|
||||
}
|
||||
|
||||
var committeeIndex primitives.CommitteeIndex
|
||||
if signed.Version() >= version.Electra {
|
||||
a, ok := aggregate.(*ethpb.AttestationElectra)
|
||||
// This will never fail in practice because we asserted the version
|
||||
if !ok {
|
||||
err := fmt.Errorf("aggregate attestation has wrong type (expected %T, got %T)", ðpb.AttestationElectra{}, aggregate)
|
||||
tracing.AnnotateError(span, err)
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
committeeIndex, result, err = validateCommitteeIndexElectra(ctx, a)
|
||||
if result != pubsub.ValidationAccept {
|
||||
wrappedErr := errors.Wrapf(err, "could not validate committee index for Electra version")
|
||||
tracing.AnnotateError(span, wrappedErr)
|
||||
return result, wrappedErr
|
||||
}
|
||||
} else {
|
||||
committeeIndex = data.CommitteeIndex
|
||||
}
|
||||
|
||||
// Verify selection proof reflects to the right validator.
|
||||
selectionSigSet, err := validateSelectionIndex(ctx, bs, signed.Message.Aggregate.Data, signed.Message.AggregatorIndex, signed.Message.SelectionProof)
|
||||
selectionSigSet, err := validateSelectionIndex(
|
||||
ctx,
|
||||
bs,
|
||||
data.Slot,
|
||||
committeeIndex,
|
||||
aggregatorIndex,
|
||||
aggregateAndProof.GetSelectionProof(),
|
||||
)
|
||||
if err != nil {
|
||||
wrappedErr := errors.Wrapf(err, "Could not validate selection for validator %d", signed.Message.AggregatorIndex)
|
||||
wrappedErr := errors.Wrapf(err, "could not validate selection for validator %d", aggregateAndProof.GetAggregatorIndex())
|
||||
tracing.AnnotateError(span, wrappedErr)
|
||||
attBadSelectionProofCount.Inc()
|
||||
return pubsub.ValidationReject, wrappedErr
|
||||
@@ -172,13 +214,13 @@ func (s *Service) validateAggregatedAtt(ctx context.Context, signed *ethpb.Signe
|
||||
// We use batch verify here to save compute.
|
||||
aggregatorSigSet, err := aggSigSet(bs, signed)
|
||||
if err != nil {
|
||||
wrappedErr := errors.Wrapf(err, "Could not get aggregator sig set %d", signed.Message.AggregatorIndex)
|
||||
wrappedErr := errors.Wrapf(err, "could not get aggregator sig set %d", aggregatorIndex)
|
||||
tracing.AnnotateError(span, wrappedErr)
|
||||
return pubsub.ValidationIgnore, wrappedErr
|
||||
}
|
||||
attSigSet, err := blocks.AttestationSignatureBatch(ctx, bs, []ethpb.Att{signed.Message.Aggregate})
|
||||
attSigSet, err := blocks.AttestationSignatureBatch(ctx, bs, []ethpb.Att{aggregate})
|
||||
if err != nil {
|
||||
wrappedErr := errors.Wrapf(err, "Could not verify aggregator signature %d", signed.Message.AggregatorIndex)
|
||||
wrappedErr := errors.Wrapf(err, "could not verify aggregator signature %d", aggregatorIndex)
|
||||
tracing.AnnotateError(span, wrappedErr)
|
||||
return pubsub.ValidationIgnore, wrappedErr
|
||||
}
|
||||
@@ -188,10 +230,9 @@ func (s *Service) validateAggregatedAtt(ctx context.Context, signed *ethpb.Signe
|
||||
return s.validateWithBatchVerifier(ctx, "aggregate", set)
|
||||
}
|
||||
|
||||
func (s *Service) validateBlockInAttestation(ctx context.Context, satt *ethpb.SignedAggregateAttestationAndProof) bool {
|
||||
a := satt.Message
|
||||
func (s *Service) validateBlockInAttestation(ctx context.Context, satt ethpb.SignedAggregateAttAndProof) bool {
|
||||
// Verify the block being voted and the processed state is in beaconDB. The block should have passed validation if it's in the beaconDB.
|
||||
blockRoot := bytesutil.ToBytes32(a.Aggregate.Data.BeaconBlockRoot)
|
||||
blockRoot := bytesutil.ToBytes32(satt.AggregateAttestationAndProof().AggregateVal().GetData().BeaconBlockRoot)
|
||||
if !s.hasBlockAndState(ctx, blockRoot) {
|
||||
// A node doesn't have the block, it'll request from peer while saving the pending attestation to a queue.
|
||||
s.savePendingAtt(satt)
|
||||
@@ -234,7 +275,7 @@ func (s *Service) validateIndexInCommittee(ctx context.Context, bs state.ReadOnl
|
||||
return result, err
|
||||
}
|
||||
|
||||
committee, result, err := s.validateBitLength(ctx, a, bs)
|
||||
committee, result, err := s.validateBitLength(ctx, bs, a.GetData().Slot, a.GetData().CommitteeIndex, a.GetAggregationBits())
|
||||
if result != pubsub.ValidationAccept {
|
||||
return result, err
|
||||
}
|
||||
@@ -262,14 +303,15 @@ func (s *Service) validateIndexInCommittee(ctx context.Context, bs state.ReadOnl
|
||||
func validateSelectionIndex(
|
||||
ctx context.Context,
|
||||
bs state.ReadOnlyBeaconState,
|
||||
data *ethpb.AttestationData,
|
||||
slot primitives.Slot,
|
||||
committeeIndex primitives.CommitteeIndex,
|
||||
validatorIndex primitives.ValidatorIndex,
|
||||
proof []byte,
|
||||
) (*bls.SignatureBatch, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "sync.validateSelectionIndex")
|
||||
defer span.End()
|
||||
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, bs, data.Slot, data.CommitteeIndex)
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, bs, slot, committeeIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -278,11 +320,11 @@ func validateSelectionIndex(
|
||||
return nil, err
|
||||
}
|
||||
if !aggregator {
|
||||
return nil, fmt.Errorf("validator is not an aggregator for slot %d", data.Slot)
|
||||
return nil, fmt.Errorf("validator is not an aggregator for slot %d", slot)
|
||||
}
|
||||
|
||||
domain := params.BeaconConfig().DomainSelectionProof
|
||||
epoch := slots.ToEpoch(data.Slot)
|
||||
epoch := slots.ToEpoch(slot)
|
||||
|
||||
v, err := bs.ValidatorAtIndex(validatorIndex)
|
||||
if err != nil {
|
||||
@@ -297,7 +339,7 @@ func validateSelectionIndex(
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sszUint := primitives.SSZUint64(data.Slot)
|
||||
sszUint := primitives.SSZUint64(slot)
|
||||
root, err := signing.ComputeSigningRoot(&sszUint, d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -311,8 +353,10 @@ func validateSelectionIndex(
|
||||
}
|
||||
|
||||
// This returns aggregator signature set which can be used to batch verify.
|
||||
func aggSigSet(s state.ReadOnlyBeaconState, a *ethpb.SignedAggregateAttestationAndProof) (*bls.SignatureBatch, error) {
|
||||
v, err := s.ValidatorAtIndex(a.Message.AggregatorIndex)
|
||||
func aggSigSet(s state.ReadOnlyBeaconState, a ethpb.SignedAggregateAttAndProof) (*bls.SignatureBatch, error) {
|
||||
aggregateAndProof := a.AggregateAttestationAndProof()
|
||||
|
||||
v, err := s.ValidatorAtIndex(aggregateAndProof.GetAggregatorIndex())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -321,17 +365,17 @@ func aggSigSet(s state.ReadOnlyBeaconState, a *ethpb.SignedAggregateAttestationA
|
||||
return nil, err
|
||||
}
|
||||
|
||||
epoch := slots.ToEpoch(a.Message.Aggregate.Data.Slot)
|
||||
epoch := slots.ToEpoch(aggregateAndProof.AggregateVal().GetData().Slot)
|
||||
d, err := signing.Domain(s.Fork(), epoch, params.BeaconConfig().DomainAggregateAndProof, s.GenesisValidatorsRoot())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
root, err := signing.ComputeSigningRoot(a.Message, d)
|
||||
root, err := signing.ComputeSigningRoot(aggregateAndProof, d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &bls.SignatureBatch{
|
||||
Signatures: [][]byte{a.Signature},
|
||||
Signatures: [][]byte{a.GetSignature()},
|
||||
PublicKeys: []bls.PublicKey{publicKey},
|
||||
Messages: [][32]byte{root},
|
||||
Descriptions: []string{signing.AggregatorSignature},
|
||||
|
||||
@@ -117,7 +117,7 @@ func TestVerifySelection_NotAnAggregator(t *testing.T) {
|
||||
sig := privKeys[0].Sign([]byte{'A'})
|
||||
data := util.HydrateAttestationData(ðpb.AttestationData{})
|
||||
|
||||
_, err := validateSelectionIndex(ctx, beaconState, data, 0, sig.Marshal())
|
||||
_, err := validateSelectionIndex(ctx, beaconState, data.Slot, data.CommitteeIndex, 0, sig.Marshal())
|
||||
wanted := "validator is not an aggregator for slot"
|
||||
assert.ErrorContains(t, wanted, err)
|
||||
}
|
||||
@@ -149,7 +149,7 @@ func TestValidateAggregateAndProof_NoBlock(t *testing.T) {
|
||||
attPool: attestations.NewPool(),
|
||||
chain: &mock.ChainService{},
|
||||
},
|
||||
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
|
||||
seenAggregatedAttestationCache: c,
|
||||
}
|
||||
r.initCaches()
|
||||
@@ -302,7 +302,7 @@ func TestValidateAggregateAndProof_ExistedInPool(t *testing.T) {
|
||||
attestationNotifier: (&mock.ChainService{}).OperationNotifier(),
|
||||
},
|
||||
seenAggregatedAttestationCache: lruwrpr.New(10),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
|
||||
}
|
||||
r.initCaches()
|
||||
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed"
|
||||
@@ -22,6 +23,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
@@ -55,16 +57,18 @@ func (s *Service) validateCommitteeIndexBeaconAttestation(ctx context.Context, p
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
att, ok := m.(*eth.Attestation)
|
||||
att, ok := m.(eth.Att)
|
||||
if !ok {
|
||||
return pubsub.ValidationReject, errWrongMessage
|
||||
}
|
||||
|
||||
data := att.GetData()
|
||||
|
||||
if err := helpers.ValidateNilAttestation(att); err != nil {
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
// Do not process slot 0 attestations.
|
||||
if att.Data.Slot == 0 {
|
||||
if data.Slot == 0 {
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
// Broadcast the unaggregated attestation on a feed to notify other services in the beacon node
|
||||
@@ -78,15 +82,36 @@ func (s *Service) validateCommitteeIndexBeaconAttestation(ctx context.Context, p
|
||||
|
||||
// Attestation's slot is within ATTESTATION_PROPAGATION_SLOT_RANGE and early attestation
|
||||
// processing tolerance.
|
||||
if err := helpers.ValidateAttestationTime(att.Data.Slot, s.cfg.clock.GenesisTime(),
|
||||
if err := helpers.ValidateAttestationTime(data.Slot, s.cfg.clock.GenesisTime(),
|
||||
earlyAttestationProcessingTolerance); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
if err := helpers.ValidateSlotTargetEpoch(att.Data); err != nil {
|
||||
if err := helpers.ValidateSlotTargetEpoch(data); err != nil {
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
var validationRes pubsub.ValidationResult
|
||||
|
||||
var committeeIndex primitives.CommitteeIndex
|
||||
if att.Version() >= version.Electra {
|
||||
a, ok := att.(*eth.AttestationElectra)
|
||||
// This will never fail in practice because we asserted the version
|
||||
if !ok {
|
||||
err := fmt.Errorf("attestation has wrong type (expected %T, got %T)", ð.AttestationElectra{}, att)
|
||||
tracing.AnnotateError(span, err)
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
committeeIndex, validationRes, err = validateCommitteeIndexElectra(ctx, a)
|
||||
if validationRes != pubsub.ValidationAccept {
|
||||
wrappedErr := errors.Wrapf(err, "could not validate committee index for Electra version")
|
||||
tracing.AnnotateError(span, wrappedErr)
|
||||
return validationRes, wrappedErr
|
||||
}
|
||||
} else {
|
||||
committeeIndex = data.CommitteeIndex
|
||||
}
|
||||
|
||||
if features.Get().EnableSlasher {
|
||||
// Feed the indexed attestation to slasher if enabled. This action
|
||||
// is done in the background to avoid adding more load to this critical code path.
|
||||
@@ -94,13 +119,13 @@ func (s *Service) validateCommitteeIndexBeaconAttestation(ctx context.Context, p
|
||||
// Using a different context to prevent timeouts as this operation can be expensive
|
||||
// and we want to avoid affecting the critical code path.
|
||||
ctx := context.TODO()
|
||||
preState, err := s.cfg.chain.AttestationTargetState(ctx, att.Data.Target)
|
||||
preState, err := s.cfg.chain.AttestationTargetState(ctx, data.Target)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not retrieve pre state")
|
||||
tracing.AnnotateError(span, err)
|
||||
return
|
||||
}
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, preState, att.Data.Slot, att.Data.CommitteeIndex)
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, preState, data.Slot, committeeIndex)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get attestation committee")
|
||||
tracing.AnnotateError(span, err)
|
||||
@@ -117,27 +142,41 @@ func (s *Service) validateCommitteeIndexBeaconAttestation(ctx context.Context, p
|
||||
}
|
||||
|
||||
// Verify this the first attestation received for the participating validator for the slot.
|
||||
if s.hasSeenCommitteeIndicesSlot(att.Data.Slot, att.Data.CommitteeIndex, att.AggregationBits) {
|
||||
if s.hasSeenCommitteeIndicesSlot(data.Slot, data.CommitteeIndex, att.GetAggregationBits()) {
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
|
||||
// Reject an attestation if it references an invalid block.
|
||||
if s.hasBadBlock(bytesutil.ToBytes32(att.Data.BeaconBlockRoot)) ||
|
||||
s.hasBadBlock(bytesutil.ToBytes32(att.Data.Target.Root)) ||
|
||||
s.hasBadBlock(bytesutil.ToBytes32(att.Data.Source.Root)) {
|
||||
if s.hasBadBlock(bytesutil.ToBytes32(data.BeaconBlockRoot)) ||
|
||||
s.hasBadBlock(bytesutil.ToBytes32(data.Target.Root)) ||
|
||||
s.hasBadBlock(bytesutil.ToBytes32(data.Source.Root)) {
|
||||
attBadBlockCount.Inc()
|
||||
return pubsub.ValidationReject, errors.New("attestation data references bad block root")
|
||||
}
|
||||
|
||||
// Verify the block being voted and the processed state is in beaconDB and the block has passed validation if it's in the beaconDB.
|
||||
blockRoot := bytesutil.ToBytes32(att.Data.BeaconBlockRoot)
|
||||
blockRoot := bytesutil.ToBytes32(data.BeaconBlockRoot)
|
||||
if !s.hasBlockAndState(ctx, blockRoot) {
|
||||
// A node doesn't have the block, it'll request from peer while saving the pending attestation to a queue.
|
||||
s.savePendingAtt(ð.SignedAggregateAttestationAndProof{Message: ð.AggregateAttestationAndProof{Aggregate: att}})
|
||||
if att.Version() >= version.Electra {
|
||||
a, ok := att.(*eth.AttestationElectra)
|
||||
// This will never fail in practice because we asserted the version
|
||||
if !ok {
|
||||
return pubsub.ValidationIgnore, fmt.Errorf("attestation has wrong type (expected %T, got %T)", ð.AttestationElectra{}, att)
|
||||
}
|
||||
s.savePendingAtt(ð.SignedAggregateAttestationAndProofElectra{Message: ð.AggregateAttestationAndProofElectra{Aggregate: a}})
|
||||
} else {
|
||||
a, ok := att.(*eth.Attestation)
|
||||
// This will never fail in practice because we asserted the version
|
||||
if !ok {
|
||||
return pubsub.ValidationIgnore, fmt.Errorf("attestation has wrong type (expected %T, got %T)", ð.Attestation{}, att)
|
||||
}
|
||||
s.savePendingAtt(ð.SignedAggregateAttestationAndProof{Message: ð.AggregateAttestationAndProof{Aggregate: a}})
|
||||
}
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
|
||||
if !s.cfg.chain.InForkchoice(bytesutil.ToBytes32(att.Data.BeaconBlockRoot)) {
|
||||
if !s.cfg.chain.InForkchoice(bytesutil.ToBytes32(data.BeaconBlockRoot)) {
|
||||
tracing.AnnotateError(span, blockchain.ErrNotDescendantOfFinalized)
|
||||
return pubsub.ValidationIgnore, blockchain.ErrNotDescendantOfFinalized
|
||||
}
|
||||
@@ -147,13 +186,13 @@ func (s *Service) validateCommitteeIndexBeaconAttestation(ctx context.Context, p
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
preState, err := s.cfg.chain.AttestationTargetState(ctx, att.Data.Target)
|
||||
preState, err := s.cfg.chain.AttestationTargetState(ctx, data.Target)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
|
||||
validationRes, err := s.validateUnaggregatedAttTopic(ctx, att, preState, *msg.Topic)
|
||||
validationRes, err = s.validateUnaggregatedAttTopic(ctx, att, preState, *msg.Topic)
|
||||
if validationRes != pubsub.ValidationAccept {
|
||||
return validationRes, err
|
||||
}
|
||||
@@ -163,7 +202,7 @@ func (s *Service) validateCommitteeIndexBeaconAttestation(ctx context.Context, p
|
||||
return validationRes, err
|
||||
}
|
||||
|
||||
s.setSeenCommitteeIndicesSlot(att.Data.Slot, att.Data.CommitteeIndex, att.AggregationBits)
|
||||
s.setSeenCommitteeIndicesSlot(data.Slot, data.CommitteeIndex, att.GetAggregationBits())
|
||||
|
||||
msg.ValidatorData = att
|
||||
|
||||
@@ -211,7 +250,7 @@ func (s *Service) validateUnaggregatedAttWithState(ctx context.Context, a eth.At
|
||||
ctx, span := trace.StartSpan(ctx, "sync.validateUnaggregatedAttWithState")
|
||||
defer span.End()
|
||||
|
||||
committee, result, err := s.validateBitLength(ctx, a, bs)
|
||||
committee, result, err := s.validateBitLength(ctx, bs, a.GetData().Slot, a.GetData().CommitteeIndex, a.GetAggregationBits())
|
||||
if result != pubsub.ValidationAccept {
|
||||
return result, err
|
||||
}
|
||||
@@ -232,14 +271,20 @@ func (s *Service) validateUnaggregatedAttWithState(ctx context.Context, a eth.At
|
||||
return s.validateWithBatchVerifier(ctx, "attestation", set)
|
||||
}
|
||||
|
||||
func (s *Service) validateBitLength(ctx context.Context, a eth.Att, bs state.ReadOnlyBeaconState) ([]primitives.ValidatorIndex, pubsub.ValidationResult, error) {
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, bs, a.GetData().Slot, a.GetData().CommitteeIndex)
|
||||
func (s *Service) validateBitLength(
|
||||
ctx context.Context,
|
||||
bs state.ReadOnlyBeaconState,
|
||||
slot primitives.Slot,
|
||||
committeeIndex primitives.CommitteeIndex,
|
||||
aggregationBits bitfield.Bitlist,
|
||||
) ([]primitives.ValidatorIndex, pubsub.ValidationResult, error) {
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, bs, slot, committeeIndex)
|
||||
if err != nil {
|
||||
return nil, pubsub.ValidationIgnore, err
|
||||
}
|
||||
|
||||
// Verify number of aggregation bits matches the committee size.
|
||||
if err := helpers.VerifyBitfieldLength(a.GetAggregationBits(), uint64(len(committee))); err != nil {
|
||||
if err := helpers.VerifyBitfieldLength(aggregationBits, uint64(len(committee))); err != nil {
|
||||
return nil, pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
|
||||
27
beacon-chain/sync/validate_beacon_attestation_electra.go
Normal file
27
beacon-chain/sync/validate_beacon_attestation_electra.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
func validateCommitteeIndexElectra(ctx context.Context, a *ethpb.AttestationElectra) (primitives.CommitteeIndex, pubsub.ValidationResult, error) {
|
||||
_, span := trace.StartSpan(ctx, "sync.validateCommitteeIndexElectra")
|
||||
defer span.End()
|
||||
|
||||
ci := a.Data.CommitteeIndex
|
||||
if ci != 0 {
|
||||
return 0, pubsub.ValidationReject, fmt.Errorf("committee index must be 0 but was %d", ci)
|
||||
}
|
||||
committeeIndices := helpers.CommitteeIndices(a.CommitteeBits)
|
||||
if len(committeeIndices) != 1 {
|
||||
return 0, pubsub.ValidationReject, fmt.Errorf("exactly 1 committee index must be set but %d were set", len(committeeIndices))
|
||||
}
|
||||
return committeeIndices[0], pubsub.ValidationAccept, nil
|
||||
}
|
||||
@@ -0,0 +1,46 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func Test_validateCommitteeIndexElectra(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
t.Run("valid", func(t *testing.T) {
|
||||
cb := primitives.NewAttestationCommitteeBits()
|
||||
cb.SetBitAt(1, true)
|
||||
ci, res, err := validateCommitteeIndexElectra(ctx, ðpb.AttestationElectra{Data: ðpb.AttestationData{}, CommitteeBits: cb})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, pubsub.ValidationAccept, res)
|
||||
assert.Equal(t, primitives.CommitteeIndex(1), ci)
|
||||
})
|
||||
t.Run("non-zero data committee index", func(t *testing.T) {
|
||||
cb := primitives.NewAttestationCommitteeBits()
|
||||
cb.SetBitAt(1, true)
|
||||
_, res, err := validateCommitteeIndexElectra(ctx, ðpb.AttestationElectra{Data: ðpb.AttestationData{CommitteeIndex: 1}, CommitteeBits: cb})
|
||||
assert.NotNil(t, err)
|
||||
assert.Equal(t, pubsub.ValidationReject, res)
|
||||
})
|
||||
t.Run("no committee bits set", func(t *testing.T) {
|
||||
cb := primitives.NewAttestationCommitteeBits()
|
||||
_, res, err := validateCommitteeIndexElectra(ctx, ðpb.AttestationElectra{Data: ðpb.AttestationData{}, CommitteeBits: cb})
|
||||
assert.NotNil(t, err)
|
||||
assert.Equal(t, pubsub.ValidationReject, res)
|
||||
})
|
||||
t.Run("more than 1 committee bit set", func(t *testing.T) {
|
||||
cb := primitives.NewAttestationCommitteeBits()
|
||||
cb.SetBitAt(0, true)
|
||||
cb.SetBitAt(1, true)
|
||||
_, res, err := validateCommitteeIndexElectra(ctx, ðpb.AttestationElectra{Data: ðpb.AttestationData{}, CommitteeBits: cb})
|
||||
assert.NotNil(t, err)
|
||||
assert.Equal(t, pubsub.ValidationReject, res)
|
||||
})
|
||||
}
|
||||
@@ -49,7 +49,7 @@ func TestService_validateCommitteeIndexBeaconAttestation(t *testing.T) {
|
||||
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
||||
attestationNotifier: (&mockChain.ChainService{}).OperationNotifier(),
|
||||
},
|
||||
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
|
||||
seenUnAggregatedAttestationCache: lruwrpr.New(10),
|
||||
signatureChan: make(chan *signatureVerifier, verifierLimit),
|
||||
}
|
||||
@@ -290,7 +290,7 @@ func TestService_validateCommitteeIndexBeaconAttestation(t *testing.T) {
|
||||
m.Message.Topic = nil
|
||||
}
|
||||
|
||||
res, err := s.validateCommitteeIndexBeaconAttestation(ctx, "" /*peerID*/, m)
|
||||
res, err := s.validateCommitteeIndexBeaconAttestation(ctx, "", m)
|
||||
received := res == pubsub.ValidationAccept
|
||||
if received != tt.want {
|
||||
t.Fatalf("Did not received wanted validation. Got %v, wanted %v", !tt.want, tt.want)
|
||||
|
||||
@@ -227,12 +227,6 @@ var (
|
||||
Usage: "Sets the minimum number of peers that a node will attempt to peer with that are subscribed to a subnet.",
|
||||
Value: 6,
|
||||
}
|
||||
// MaxConcurrentDials defines a flag to set the maximum number of peers that a node will attempt to dial with from discovery.
|
||||
MaxConcurrentDials = &cli.Uint64Flag{
|
||||
Name: "max-concurrent-dials",
|
||||
Usage: "Sets the maximum number of peers that a node will attempt to dial with from discovery. By default we will dials as " +
|
||||
"many peers as possible.",
|
||||
}
|
||||
// SuggestedFeeRecipient specifies the fee recipient for the transaction fees.
|
||||
SuggestedFeeRecipient = &cli.StringFlag{
|
||||
Name: "suggested-fee-recipient",
|
||||
|
||||
@@ -11,7 +11,6 @@ type GlobalFlags struct {
|
||||
SubscribeToAllSubnets bool
|
||||
MinimumSyncPeers int
|
||||
MinimumPeersPerSubnet int
|
||||
MaxConcurrentDials int
|
||||
BlockBatchLimit int
|
||||
BlockBatchLimitBurstFactor int
|
||||
BlobBatchLimit int
|
||||
@@ -46,17 +45,11 @@ func ConfigureGlobalFlags(ctx *cli.Context) {
|
||||
cfg.BlobBatchLimit = ctx.Int(BlobBatchLimit.Name)
|
||||
cfg.BlobBatchLimitBurstFactor = ctx.Int(BlobBatchLimitBurstFactor.Name)
|
||||
cfg.MinimumPeersPerSubnet = ctx.Int(MinPeersPerSubnet.Name)
|
||||
cfg.MaxConcurrentDials = ctx.Int(MaxConcurrentDials.Name)
|
||||
configureMinimumPeers(ctx, cfg)
|
||||
|
||||
Init(cfg)
|
||||
}
|
||||
|
||||
// MaxDialIsActive checks if the user has enabled the max dial flag.
|
||||
func MaxDialIsActive() bool {
|
||||
return Get().MaxConcurrentDials > 0
|
||||
}
|
||||
|
||||
func configureMinimumPeers(ctx *cli.Context, cfg *GlobalFlags) {
|
||||
cfg.MinimumSyncPeers = ctx.Int(MinSyncPeers.Name)
|
||||
maxPeers := ctx.Int(cmd.P2PMaxPeers.Name)
|
||||
|
||||
@@ -72,7 +72,6 @@ var appFlags = []cli.Flag{
|
||||
flags.WeakSubjectivityCheckpoint,
|
||||
flags.Eth1HeaderReqLimit,
|
||||
flags.MinPeersPerSubnet,
|
||||
flags.MaxConcurrentDials,
|
||||
flags.SuggestedFeeRecipient,
|
||||
flags.TerminalTotalDifficultyOverride,
|
||||
flags.TerminalBlockHashOverride,
|
||||
|
||||
@@ -124,7 +124,6 @@ var appHelpFlagGroups = []flagGroup{
|
||||
flags.WeakSubjectivityCheckpoint,
|
||||
flags.Eth1HeaderReqLimit,
|
||||
flags.MinPeersPerSubnet,
|
||||
flags.MaxConcurrentDials,
|
||||
flags.MevRelayEndpoint,
|
||||
flags.MaxBuilderEpochMissedSlots,
|
||||
flags.MaxBuilderConsecutiveMissedSlots,
|
||||
|
||||
0
cmd/prysmctl/tosaccepted
Normal file
0
cmd/prysmctl/tosaccepted
Normal file
@@ -36,6 +36,6 @@ const (
|
||||
PendingBalanceDepositsLimit = 134217728 // Maximum number of pending balance deposits in the beacon state.
|
||||
PendingPartialWithdrawalsLimit = 134217728 // Maximum number of pending partial withdrawals in the beacon state.
|
||||
PendingConsolidationsLimit = 262144 // Maximum number of pending consolidations in the beacon state.
|
||||
MaxDepositReceiptsPerPayload = 8192 // Maximum number of deposit receipts in an execution payload.
|
||||
MaxDepositRequestsPerPayload = 8192 // Maximum number of deposit requests in an execution payload.
|
||||
MaxWithdrawalRequestsPerPayload = 16 // Maximum number of execution layer withdrawal requests in an execution payload.
|
||||
)
|
||||
|
||||
@@ -36,6 +36,6 @@ const (
|
||||
PendingBalanceDepositsLimit = 134217728 // Maximum number of pending balance deposits in the beacon state.
|
||||
PendingPartialWithdrawalsLimit = 64 // Maximum number of pending partial withdrawals in the beacon state.
|
||||
PendingConsolidationsLimit = 64 // Maximum number of pending consolidations in the beacon state.
|
||||
MaxDepositReceiptsPerPayload = 4 // Maximum number of deposit receipts in an execution payload.
|
||||
MaxDepositRequestsPerPayload = 4 // Maximum number of deposit requests in an execution payload.
|
||||
MaxWithdrawalRequestsPerPayload = 2 // Maximum number of execution layer withdrawal requests in an execution payload.
|
||||
)
|
||||
|
||||
@@ -250,7 +250,8 @@ type BeaconChainConfig struct {
|
||||
MaxPendingPartialsPerWithdrawalsSweep uint64 `yaml:"MAX_PENDING_PARTIALS_PER_WITHDRAWALS_SWEEP" spec:"true"` // MaxPendingPartialsPerWithdrawalsSweep is the maximum number of pending partial withdrawals to process per payload.
|
||||
FullExitRequestAmount uint64 `yaml:"FULL_EXIT_REQUEST_AMOUNT" spec:"true"` // FullExitRequestAmount is the amount of Gwei required to request a full exit.
|
||||
MaxWithdrawalRequestsPerPayload uint64 `yaml:"MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD" spec:"true"` // MaxWithdrawalRequestsPerPayload is the maximum number of execution layer withdrawal requests in each payload.
|
||||
UnsetDepositReceiptsStartIndex uint64 `yaml:"UNSET_DEPOSIT_RECEIPTS_START_INDEX" spec:"true"` // UnsetDepositReceiptsStartIndex is used to check the start index for eip6110
|
||||
MaxDepositRequestsPerPayload uint64 `yaml:"MAX_DEPOSIT_REQUESTS_PER_PAYLOAD" spec:"true"` // MaxDepositRequestsPerPayload is the maximum number of execution layer deposits in each payload
|
||||
UnsetDepositRequestsStartIndex uint64 `yaml:"UNSET_DEPOSIT_REQUESTS_START_INDEX" spec:"true"` // UnsetDepositRequestsStartIndex is used to check the start index for eip6110
|
||||
|
||||
// Networking Specific Parameters
|
||||
GossipMaxSize uint64 `yaml:"GOSSIP_MAX_SIZE" spec:"true"` // GossipMaxSize is the maximum allowed size of uncompressed gossip messages.
|
||||
|
||||
@@ -36,7 +36,7 @@ var placeholderFields = []string{
|
||||
"MAX_BLOBS_PER_BLOCK",
|
||||
"MAX_BLOB_COMMITMENTS_PER_BLOCK", // Compile time constant on BeaconBlockBodyDeneb.blob_kzg_commitments.
|
||||
"MAX_BYTES_PER_TRANSACTION", // Used for ssz of EL transactions. Unused in Prysm.
|
||||
"MAX_DEPOSIT_RECEIPTS_PER_PAYLOAD", // Compile time constant on ExecutionPayload.deposit_receipts.
|
||||
"MAX_DEPOSIT_RECEIPTS_PER_PAYLOAD", // Compile time constant on ExecutionPayload.deposit_receipts. TODO: rename when updating spec configs
|
||||
"MAX_EXTRA_DATA_BYTES", // Compile time constant on ExecutionPayload.extra_data.
|
||||
"MAX_TRANSACTIONS_PER_PAYLOAD", // Compile time constant on ExecutionPayload.transactions.
|
||||
"REORG_HEAD_WEIGHT_THRESHOLD",
|
||||
|
||||
@@ -290,7 +290,8 @@ var mainnetBeaconConfig = &BeaconChainConfig{
|
||||
MaxPendingPartialsPerWithdrawalsSweep: 8,
|
||||
FullExitRequestAmount: 0,
|
||||
MaxWithdrawalRequestsPerPayload: 16,
|
||||
UnsetDepositReceiptsStartIndex: math.MaxUint64,
|
||||
MaxDepositRequestsPerPayload: 8192, // 2**13 (= 8192)
|
||||
UnsetDepositRequestsStartIndex: math.MaxUint64,
|
||||
|
||||
// Values related to networking parameters.
|
||||
GossipMaxSize: 10 * 1 << 20, // 10 MiB
|
||||
|
||||
@@ -107,6 +107,7 @@ func MinimalSpecConfig() *BeaconChainConfig {
|
||||
minimalConfig.PendingConsolidationsLimit = 64
|
||||
minimalConfig.MaxPartialWithdrawalsPerPayload = 1
|
||||
minimalConfig.MaxWithdrawalRequestsPerPayload = 2
|
||||
minimalConfig.MaxDepositRequestsPerPayload = 4
|
||||
minimalConfig.PendingPartialWithdrawalsLimit = 64
|
||||
minimalConfig.MaxPendingPartialsPerWithdrawalsSweep = 1
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package loader
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
@@ -128,12 +129,23 @@ func (psl *settingsLoader) Load(cliCtx *cli.Context) (*proposer.Settings, error)
|
||||
return nil, err
|
||||
}
|
||||
loadConfig = dbps.ToConsensus()
|
||||
log.Debugf("DB loaded proposer settings: %s", func() string {
|
||||
b, err := json.Marshal(loadConfig)
|
||||
if err != nil {
|
||||
return err.Error()
|
||||
}
|
||||
return string(b)
|
||||
}())
|
||||
}
|
||||
|
||||
// start to process based on load method
|
||||
for _, method := range psl.loadMethods {
|
||||
switch method {
|
||||
case defaultFlag:
|
||||
if psl.existsInDB && len(psl.loadMethods) == 1 {
|
||||
// only log the below if default flag is the only load method
|
||||
log.Warn("Previously saved proposer settings were loaded from the DB, only default settings will be updated. Please provide new proposer settings or clear DB to reset proposer settings.")
|
||||
}
|
||||
suggestedFeeRecipient := cliCtx.String(flags.SuggestedFeeRecipientFlag.Name)
|
||||
if !common.IsHexAddress(suggestedFeeRecipient) {
|
||||
return nil, errors.Errorf("--%s is not a valid Ethereum address", flags.SuggestedFeeRecipientFlag.Name)
|
||||
@@ -157,6 +169,7 @@ func (psl *settingsLoader) Load(cliCtx *cli.Context) (*proposer.Settings, error)
|
||||
return nil, errors.Errorf("proposer settings is empty after unmarshalling from file specified by %s flag", flags.ProposerSettingsFlag.Name)
|
||||
}
|
||||
loadConfig = psl.processProposerSettings(settingFromFile, loadConfig)
|
||||
log.WithField(flags.ProposerSettingsFlag.Name, cliCtx.String(flags.ProposerSettingsFlag.Name)).Info("Proposer settings loaded from file")
|
||||
case urlFlag:
|
||||
var settingFromURL *validatorpb.ProposerSettingsPayload
|
||||
if err := config.UnmarshalFromURL(cliCtx.Context, cliCtx.String(flags.ProposerSettingsURLFlag.Name), &settingFromURL); err != nil {
|
||||
@@ -166,9 +179,14 @@ func (psl *settingsLoader) Load(cliCtx *cli.Context) (*proposer.Settings, error)
|
||||
return nil, errors.New("proposer settings is empty after unmarshalling from url")
|
||||
}
|
||||
loadConfig = psl.processProposerSettings(settingFromURL, loadConfig)
|
||||
log.WithField(flags.ProposerSettingsURLFlag.Name, cliCtx.String(flags.ProposerSettingsURLFlag.Name)).Infof("Proposer settings loaded from URL")
|
||||
case onlyDB:
|
||||
loadConfig = psl.processProposerSettings(nil, loadConfig)
|
||||
log.Info("Proposer settings loaded from the DB")
|
||||
case none:
|
||||
if psl.existsInDB {
|
||||
log.Info("Proposer settings loaded from the DB")
|
||||
}
|
||||
if psl.options.builderConfig != nil {
|
||||
// if there are no proposer settings provided, create a default where fee recipient is not populated, this will be skipped for validator registration on validators that don't have a fee recipient set.
|
||||
// skip saving to DB if only builder settings are provided until a trigger like keymanager API updates with fee recipient values
|
||||
|
||||
@@ -812,14 +812,14 @@ func PayloadToHeaderElectra(payload interfaces.ExecutionDataElectra) (*enginev1.
|
||||
return nil, err
|
||||
}
|
||||
|
||||
depositReceipts := payload.DepositReceipts()
|
||||
depositReceiptsRoot, err := ssz.DepositReceiptSliceRoot(depositReceipts, fieldparams.MaxDepositReceiptsPerPayload)
|
||||
depositRequests := payload.DepositRequests()
|
||||
depositRequestsRoot, err := ssz.DepositRequestsSliceRoot(depositRequests, fieldparams.MaxDepositRequestsPerPayload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
withdrawalRequests := payload.WithdrawalRequests()
|
||||
withdrawalRequestsRoot, err := ssz.WithdrawalRequestSliceRoot(withdrawalRequests, fieldparams.MaxWithdrawalRequestsPerPayload)
|
||||
withdrawalRequestsRoot, err := ssz.WithdrawalRequestsSliceRoot(withdrawalRequests, fieldparams.MaxWithdrawalRequestsPerPayload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -842,7 +842,7 @@ func PayloadToHeaderElectra(payload interfaces.ExecutionDataElectra) (*enginev1.
|
||||
WithdrawalsRoot: withdrawalsRoot[:],
|
||||
BlobGasUsed: blobGasUsed,
|
||||
ExcessBlobGas: excessBlobGas,
|
||||
DepositReceiptsRoot: depositReceiptsRoot[:],
|
||||
DepositRequestsRoot: depositRequestsRoot[:],
|
||||
WithdrawalRequestsRoot: withdrawalRequestsRoot[:],
|
||||
}, nil
|
||||
}
|
||||
@@ -907,7 +907,7 @@ func IsEmptyExecutionData(data interfaces.ExecutionData) (bool, error) {
|
||||
|
||||
epe, postElectra := data.(interfaces.ExecutionDataElectra)
|
||||
if postElectra {
|
||||
drs := epe.DepositReceipts()
|
||||
drs := epe.DepositRequests()
|
||||
if len(drs) != 0 {
|
||||
return false, nil
|
||||
}
|
||||
@@ -1389,13 +1389,13 @@ func (e executionPayloadHeaderElectra) ExcessBlobGas() (uint64, error) {
|
||||
return e.p.ExcessBlobGas, nil
|
||||
}
|
||||
|
||||
// DepositReceipts --
|
||||
func (e executionPayloadHeaderElectra) DepositReceipts() ([]*enginev1.DepositReceipt, error) {
|
||||
// DepositRequests --
|
||||
func (e executionPayloadHeaderElectra) DepositRequests() ([]*enginev1.DepositRequest, error) {
|
||||
return nil, consensus_types.ErrUnsupportedField
|
||||
}
|
||||
|
||||
// WithdrawalRequests --
|
||||
func (e executionPayloadHeaderElectra) WithdrawalRequests() ([]*enginev1.ExecutionLayerWithdrawalRequest, error) {
|
||||
func (e executionPayloadHeaderElectra) WithdrawalRequests() ([]*enginev1.WithdrawalRequest, error) {
|
||||
return nil, consensus_types.ErrUnsupportedField
|
||||
}
|
||||
|
||||
@@ -1556,13 +1556,13 @@ func (e executionPayloadElectra) ExcessBlobGas() (uint64, error) {
|
||||
return e.p.ExcessBlobGas, nil
|
||||
}
|
||||
|
||||
// DepositReceipts --
|
||||
func (e executionPayloadElectra) DepositReceipts() []*enginev1.DepositReceipt {
|
||||
return e.p.DepositReceipts
|
||||
// DepositRequests --
|
||||
func (e executionPayloadElectra) DepositRequests() []*enginev1.DepositRequest {
|
||||
return e.p.DepositRequests
|
||||
}
|
||||
|
||||
// WithdrawalRequests --
|
||||
func (e executionPayloadElectra) WithdrawalRequests() []*enginev1.ExecutionLayerWithdrawalRequest {
|
||||
func (e executionPayloadElectra) WithdrawalRequests() []*enginev1.WithdrawalRequest {
|
||||
return e.p.WithdrawalRequests
|
||||
}
|
||||
|
||||
|
||||
@@ -130,6 +130,6 @@ type ExecutionData interface {
|
||||
|
||||
type ExecutionDataElectra interface {
|
||||
ExecutionData
|
||||
DepositReceipts() []*enginev1.DepositReceipt
|
||||
WithdrawalRequests() []*enginev1.ExecutionLayerWithdrawalRequest
|
||||
DepositRequests() []*enginev1.DepositRequest
|
||||
WithdrawalRequests() []*enginev1.WithdrawalRequest
|
||||
}
|
||||
|
||||
@@ -133,6 +133,11 @@ func AggregateCompressedSignatures(multiSigs [][]byte) (common.Signature, error)
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// VerifySignature -- stub
|
||||
func VerifySignature(_ []byte, _ [32]byte, _ common.PublicKey) (bool, error) {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// VerifyMultipleSignatures -- stub
|
||||
func VerifyMultipleSignatures(_ [][]byte, _ [][32]byte, _ []common.PublicKey) (bool, error) {
|
||||
panic(err)
|
||||
|
||||
8
deps.bzl
8
deps.bzl
@@ -2973,14 +2973,14 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_prysmaticlabs_fastssz",
|
||||
importpath = "github.com/prysmaticlabs/fastssz",
|
||||
sum = "h1:c3p3UzV4vFA7xaCDphnDWOjpxcadrQ26l5b+ypsvyxo=",
|
||||
version = "v0.0.0-20221107182844-78142813af44",
|
||||
sum = "h1:0LZAwwHnsZFfXm4IK4rzFV4N5IVSKZKLmuBMA4kAlFk=",
|
||||
version = "v0.0.0-20240620202422-a981b8ef89d3",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_prysmaticlabs_go_bitfield",
|
||||
importpath = "github.com/prysmaticlabs/go-bitfield",
|
||||
sum = "h1:0tVE4tdWQK9ZpYygoV7+vS6QkDvQVySboMVEIxBJmXw=",
|
||||
version = "v0.0.0-20210809151128-385d8c5e3fb7",
|
||||
sum = "h1:ATgOe+abbzfx9kCPeXIW4fiWyDdxlwHw07j8UGhdTd4=",
|
||||
version = "v0.0.0-20240328144219-a1caa50c3a1e",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_prysmaticlabs_gohashtree",
|
||||
|
||||
@@ -7,6 +7,7 @@ go_library(
|
||||
"helpers.go",
|
||||
"htrutils.go",
|
||||
"merkleize.go",
|
||||
"slice_root.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/encoding/ssz",
|
||||
visibility = ["//visibility:public"],
|
||||
|
||||
@@ -141,54 +141,16 @@ func WithdrawalSliceRoot(withdrawals []*enginev1.Withdrawal, limit uint64) ([32]
|
||||
return MixInLength(bytesRoot, bytesRootBufRoot), nil
|
||||
}
|
||||
|
||||
// DepositReceiptSliceRoot computes the HTR of a slice of deposit receipts.
|
||||
// DepositRequestsSliceRoot computes the HTR of a slice of deposit receipts.
|
||||
// The limit parameter is used as input to the bitwise merkleization algorithm.
|
||||
func DepositReceiptSliceRoot(depositReceipts []*enginev1.DepositReceipt, limit uint64) ([32]byte, error) {
|
||||
roots := make([][32]byte, len(depositReceipts))
|
||||
for i := 0; i < len(depositReceipts); i++ {
|
||||
r, err := depositReceipts[i].HashTreeRoot()
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
roots[i] = r
|
||||
}
|
||||
|
||||
bytesRoot, err := BitwiseMerkleize(roots, uint64(len(roots)), limit)
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not compute merkleization")
|
||||
}
|
||||
bytesRootBuf := new(bytes.Buffer)
|
||||
if err := binary.Write(bytesRootBuf, binary.LittleEndian, uint64(len(depositReceipts))); err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not marshal length")
|
||||
}
|
||||
bytesRootBufRoot := make([]byte, 32)
|
||||
copy(bytesRootBufRoot, bytesRootBuf.Bytes())
|
||||
return MixInLength(bytesRoot, bytesRootBufRoot), nil
|
||||
func DepositRequestsSliceRoot(depositRequests []*enginev1.DepositRequest, limit uint64) ([32]byte, error) {
|
||||
return SliceRoot(depositRequests, limit)
|
||||
}
|
||||
|
||||
// WithdrawalRequestSliceRoot computes the HTR of a slice of withdrawal requests from the EL.
|
||||
// WithdrawalRequestsSliceRoot computes the HTR of a slice of withdrawal requests from the EL.
|
||||
// The limit parameter is used as input to the bitwise merkleization algorithm.
|
||||
func WithdrawalRequestSliceRoot(withdrawalRequests []*enginev1.ExecutionLayerWithdrawalRequest, limit uint64) ([32]byte, error) {
|
||||
roots := make([][32]byte, len(withdrawalRequests))
|
||||
for i := 0; i < len(withdrawalRequests); i++ {
|
||||
r, err := withdrawalRequests[i].HashTreeRoot()
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
roots[i] = r
|
||||
}
|
||||
|
||||
bytesRoot, err := BitwiseMerkleize(roots, uint64(len(roots)), limit)
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not compute merkleization")
|
||||
}
|
||||
bytesRootBuf := new(bytes.Buffer)
|
||||
if err := binary.Write(bytesRootBuf, binary.LittleEndian, uint64(len(withdrawalRequests))); err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not marshal length")
|
||||
}
|
||||
bytesRootBufRoot := make([]byte, 32)
|
||||
copy(bytesRootBufRoot, bytesRootBuf.Bytes())
|
||||
return MixInLength(bytesRoot, bytesRootBufRoot), nil
|
||||
func WithdrawalRequestsSliceRoot(withdrawalRequests []*enginev1.WithdrawalRequest, limit uint64) ([32]byte, error) {
|
||||
return SliceRoot(withdrawalRequests, limit)
|
||||
}
|
||||
|
||||
// ByteSliceRoot is a helper func to merkleize an arbitrary List[Byte, N]
|
||||
|
||||
@@ -281,21 +281,21 @@ func TestWithrawalSliceRoot(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestDepositReceiptSliceRoot(t *testing.T) {
|
||||
func TestDepositRequestsSliceRoot(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input []*enginev1.DepositReceipt
|
||||
input []*enginev1.DepositRequest
|
||||
limit uint64
|
||||
want [32]byte
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
input: make([]*enginev1.DepositReceipt, 0),
|
||||
input: make([]*enginev1.DepositRequest, 0),
|
||||
want: [32]byte{0xf5, 0xa5, 0xfd, 0x42, 0xd1, 0x6a, 0x20, 0x30, 0x27, 0x98, 0xef, 0x6e, 0xd3, 0x9, 0x97, 0x9b, 0x43, 0x0, 0x3d, 0x23, 0x20, 0xd9, 0xf0, 0xe8, 0xea, 0x98, 0x31, 0xa9, 0x27, 0x59, 0xfb, 0x4b},
|
||||
},
|
||||
{
|
||||
name: "non-empty",
|
||||
input: []*enginev1.DepositReceipt{
|
||||
input: []*enginev1.DepositRequest{
|
||||
{
|
||||
Pubkey: bytesutil.PadTo([]byte{0x01, 0x02}, 48),
|
||||
WithdrawalCredentials: bytesutil.PadTo([]byte{0x03, 0x04}, 32),
|
||||
@@ -311,7 +311,7 @@ func TestDepositReceiptSliceRoot(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := ssz.DepositReceiptSliceRoot(tt.input, tt.limit)
|
||||
got, err := ssz.DepositRequestsSliceRoot(tt.input, tt.limit)
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, tt.want, got)
|
||||
})
|
||||
@@ -321,18 +321,18 @@ func TestDepositReceiptSliceRoot(t *testing.T) {
|
||||
func TestWithdrawalRequestSliceRoot(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input []*enginev1.ExecutionLayerWithdrawalRequest
|
||||
input []*enginev1.WithdrawalRequest
|
||||
limit uint64
|
||||
want [32]byte
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
input: make([]*enginev1.ExecutionLayerWithdrawalRequest, 0),
|
||||
input: make([]*enginev1.WithdrawalRequest, 0),
|
||||
want: [32]byte{0xf5, 0xa5, 0xfd, 0x42, 0xd1, 0x6a, 0x20, 0x30, 0x27, 0x98, 0xef, 0x6e, 0xd3, 0x9, 0x97, 0x9b, 0x43, 0x0, 0x3d, 0x23, 0x20, 0xd9, 0xf0, 0xe8, 0xea, 0x98, 0x31, 0xa9, 0x27, 0x59, 0xfb, 0x4b},
|
||||
},
|
||||
{
|
||||
name: "non-empty",
|
||||
input: []*enginev1.ExecutionLayerWithdrawalRequest{
|
||||
input: []*enginev1.WithdrawalRequest{
|
||||
{
|
||||
SourceAddress: bytesutil.PadTo([]byte{0x01, 0x02}, 20),
|
||||
ValidatorPubkey: bytesutil.PadTo([]byte{0x03, 0x04}, 48),
|
||||
@@ -346,7 +346,7 @@ func TestWithdrawalRequestSliceRoot(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := ssz.WithdrawalRequestSliceRoot(tt.input, tt.limit)
|
||||
got, err := ssz.WithdrawalRequestsSliceRoot(tt.input, tt.limit)
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, tt.want, got)
|
||||
})
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package stateutil
|
||||
package ssz
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@@ -6,11 +6,10 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/ssz"
|
||||
)
|
||||
|
||||
// SliceRoot computes the root of a slice of hashable objects.
|
||||
func SliceRoot[T ssz.Hashable](slice []T, limit uint64) ([32]byte, error) {
|
||||
func SliceRoot[T Hashable](slice []T, limit uint64) ([32]byte, error) {
|
||||
max := limit
|
||||
if uint64(len(slice)) > max {
|
||||
return [32]byte{}, fmt.Errorf("slice exceeds max length %d", max)
|
||||
@@ -25,7 +24,7 @@ func SliceRoot[T ssz.Hashable](slice []T, limit uint64) ([32]byte, error) {
|
||||
roots[i] = r
|
||||
}
|
||||
|
||||
sliceRoot, err := ssz.BitwiseMerkleize(roots, uint64(len(roots)), limit)
|
||||
sliceRoot, err := BitwiseMerkleize(roots, uint64(len(roots)), limit)
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not slice merkleization")
|
||||
}
|
||||
@@ -36,6 +35,5 @@ func SliceRoot[T ssz.Hashable](slice []T, limit uint64) ([32]byte, error) {
|
||||
// We need to mix in the length of the slice.
|
||||
sliceLenRoot := make([]byte, 32)
|
||||
copy(sliceLenRoot, sliceLenBuf.Bytes())
|
||||
res := ssz.MixInLength(sliceRoot, sliceLenRoot)
|
||||
return res, nil
|
||||
return MixInLength(sliceRoot, sliceLenRoot), nil
|
||||
}
|
||||
4
go.mod
4
go.mod
@@ -65,8 +65,8 @@ require (
|
||||
github.com/prometheus/client_golang v1.19.1
|
||||
github.com/prometheus/client_model v0.6.1
|
||||
github.com/prometheus/prom2json v1.3.0
|
||||
github.com/prysmaticlabs/fastssz v0.0.0-20221107182844-78142813af44
|
||||
github.com/prysmaticlabs/go-bitfield v0.0.0-20210809151128-385d8c5e3fb7
|
||||
github.com/prysmaticlabs/fastssz v0.0.0-20240620202422-a981b8ef89d3
|
||||
github.com/prysmaticlabs/go-bitfield v0.0.0-20240328144219-a1caa50c3a1e
|
||||
github.com/prysmaticlabs/prombbolt v0.0.0-20210126082820-9b7adba6db7c
|
||||
github.com/prysmaticlabs/protoc-gen-go-cast v0.0.0-20230228205207-28762a7b9294
|
||||
github.com/rs/cors v1.7.0
|
||||
|
||||
8
go.sum
8
go.sum
@@ -966,11 +966,11 @@ github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3c
|
||||
github.com/prometheus/prom2json v1.3.0 h1:BlqrtbT9lLH3ZsOVhXPsHzFrApCTKRifB7gjJuypu6Y=
|
||||
github.com/prometheus/prom2json v1.3.0/go.mod h1:rMN7m0ApCowcoDlypBHlkNbp5eJQf/+1isKykIP5ZnM=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/prysmaticlabs/fastssz v0.0.0-20221107182844-78142813af44 h1:c3p3UzV4vFA7xaCDphnDWOjpxcadrQ26l5b+ypsvyxo=
|
||||
github.com/prysmaticlabs/fastssz v0.0.0-20221107182844-78142813af44/go.mod h1:MA5zShstUwCQaE9faGHgCGvEWUbG87p4SAXINhmCkvg=
|
||||
github.com/prysmaticlabs/fastssz v0.0.0-20240620202422-a981b8ef89d3 h1:0LZAwwHnsZFfXm4IK4rzFV4N5IVSKZKLmuBMA4kAlFk=
|
||||
github.com/prysmaticlabs/fastssz v0.0.0-20240620202422-a981b8ef89d3/go.mod h1:h2OlIZD/M6wFvV3YMZbW16lFgh3Rsye00G44J2cwLyU=
|
||||
github.com/prysmaticlabs/go-bitfield v0.0.0-20210108222456-8e92c3709aa0/go.mod h1:hCwmef+4qXWjv0jLDbQdWnL0Ol7cS7/lCSS26WR+u6s=
|
||||
github.com/prysmaticlabs/go-bitfield v0.0.0-20210809151128-385d8c5e3fb7 h1:0tVE4tdWQK9ZpYygoV7+vS6QkDvQVySboMVEIxBJmXw=
|
||||
github.com/prysmaticlabs/go-bitfield v0.0.0-20210809151128-385d8c5e3fb7/go.mod h1:wmuf/mdK4VMD+jA9ThwcUKjg3a2XWM9cVfFYjDyY4j4=
|
||||
github.com/prysmaticlabs/go-bitfield v0.0.0-20240328144219-a1caa50c3a1e h1:ATgOe+abbzfx9kCPeXIW4fiWyDdxlwHw07j8UGhdTd4=
|
||||
github.com/prysmaticlabs/go-bitfield v0.0.0-20240328144219-a1caa50c3a1e/go.mod h1:wmuf/mdK4VMD+jA9ThwcUKjg3a2XWM9cVfFYjDyY4j4=
|
||||
github.com/prysmaticlabs/gohashtree v0.0.4-beta h1:H/EbCuXPeTV3lpKeXGPpEV9gsUpkqOOVnWapUyeWro4=
|
||||
github.com/prysmaticlabs/gohashtree v0.0.4-beta/go.mod h1:BFdtALS+Ffhg3lGQIHv9HDWuHS8cTvHZzrHWxwOtGOs=
|
||||
github.com/prysmaticlabs/grpc-gateway/v2 v2.3.1-0.20230315201114-09284ba20446 h1:4wctORg/1TkgLgXejv9yOSAm3cDBJxoTzl/RNuZmX28=
|
||||
|
||||
@@ -152,7 +152,7 @@
|
||||
"external/.*": "Third party code",
|
||||
"rules_go_work-.*": "Third party code",
|
||||
".*\\.pb.*.go": "Generated code is ok",
|
||||
".*generated\\.ssz\\.go": "Generated code is ok"
|
||||
".*\\.ssz\\.go": "Generated code is ok"
|
||||
}
|
||||
},
|
||||
"properpermissions": {
|
||||
@@ -180,7 +180,7 @@
|
||||
"external/.*": "Third party code",
|
||||
"rules_go_work-.*": "Third party code",
|
||||
".*\\.pb.*.go": "Generated code is ok",
|
||||
".*generated\\.ssz\\.go": "Generated code is ok",
|
||||
".*\\.ssz\\.go": "Generated code is ok",
|
||||
".*_test\\.go": "Tests are ok (for now)",
|
||||
"tools/analyzers/ineffassign/ineffassign\\.go": "3rd party code with a massive switch statement"
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user