Compare commits

..

1 Commits

Author SHA1 Message Date
terence tsao
ab41335404 Capture time when >50% of beacon attestation arrive 2024-06-04 20:00:58 -07:00
226 changed files with 23058 additions and 23122 deletions

View File

@@ -1,4 +1,3 @@
load("@prysm//tools/go:def.bzl", "go_library")
load("@bazel_gazelle//:def.bzl", "gazelle")
load("@com_github_atlassian_bazel_tools//gometalinter:def.bzl", "gometalinter")
load("@com_github_atlassian_bazel_tools//goimports:def.bzl", "goimports")
@@ -287,14 +286,3 @@ sh_binary(
srcs = ["prysm.sh"],
visibility = ["//visibility:public"],
)
go_library(
name = "go_default_library",
srcs = ["derp.go"],
importpath = "github.com/prysmaticlabs/prysm/v5",
visibility = ["//visibility:public"],
deps = [
"@com_github_prysmaticlabs_fastssz//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
],
)

View File

@@ -48,13 +48,8 @@ func (n *NodeHealthTracker) CheckHealth(ctx context.Context) bool {
if isStatusChanged {
// Update the health status
n.isHealthy = &newStatus
// Send the new status to the health channel, potentially overwriting the existing value
select {
case <-n.healthChan:
n.healthChan <- newStatus
default:
n.healthChan <- newStatus
}
// Send the new status to the health channel
n.healthChan <- newStatus
}
return newStatus
}

View File

@@ -87,6 +87,12 @@ func TestNodeHealth_Concurrency(t *testing.T) {
// Number of goroutines to spawn for both reading and writing
numGoroutines := 6
go func() {
for range n.HealthUpdates() {
// Consume values to avoid blocking on channel send.
}
}()
wg.Add(numGoroutines * 2) // for readers and writers
// Concurrently update health status

View File

@@ -3,7 +3,6 @@ package testing
import (
"context"
"reflect"
"sync"
"github.com/prysmaticlabs/prysm/v5/api/client/beacon/iface"
"go.uber.org/mock/gomock"
@@ -17,7 +16,6 @@ var (
type MockHealthClient struct {
ctrl *gomock.Controller
recorder *MockHealthClientMockRecorder
sync.Mutex
}
// MockHealthClientMockRecorder is the mock recorder for MockHealthClient.
@@ -27,8 +25,6 @@ type MockHealthClientMockRecorder struct {
// IsHealthy mocks base method.
func (m *MockHealthClient) IsHealthy(arg0 context.Context) bool {
m.Lock()
defer m.Unlock()
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "IsHealthy", arg0)
ret0, ok := ret[0].(bool)
@@ -45,8 +41,6 @@ func (m *MockHealthClient) EXPECT() *MockHealthClientMockRecorder {
// IsHealthy indicates an expected call of IsHealthy.
func (mr *MockHealthClientMockRecorder) IsHealthy(arg0 any) *gomock.Call {
mr.mock.Lock()
defer mr.mock.Unlock()
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsHealthy", reflect.TypeOf((*MockHealthClient)(nil).IsHealthy), arg0)
}

View File

@@ -50,7 +50,7 @@ func ContentTypeHandler(acceptedMediaTypes []string) mux.MiddlewareFunc {
accepted := false
for _, acceptedType := range acceptedMediaTypes {
if strings.Contains(strings.TrimSpace(contentType), strings.TrimSpace(acceptedType)) {
if strings.TrimSpace(contentType) == strings.TrimSpace(acceptedType) {
accepted = true
break
}

View File

@@ -31,7 +31,7 @@ func TestNormalizeQueryValuesHandler(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
req, err := http.NewRequest("GET", "/test?"+test.inputQuery, http.NoBody)
req, err := http.NewRequest("GET", "/test?"+test.inputQuery, nil)
if err != nil {
t.Fatal(err)
}
@@ -96,11 +96,6 @@ func TestContentTypeHandler(t *testing.T) {
expectedStatusCode: http.StatusOK,
isGet: true,
},
{
name: "Content type contains charset is ok",
contentType: "application/json; charset=utf-8",
expectedStatusCode: http.StatusOK,
},
}
for _, tt := range tests {

View File

@@ -3,7 +3,6 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"attestation.go",
"churn.go",
"consolidations.go",
"deposits.go",

View File

@@ -1,7 +0,0 @@
package electra
import "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/altair"
var (
ProcessAttestationsNoVerifySignature = altair.ProcessAttestationsNoVerifySignature
)

View File

@@ -77,11 +77,11 @@ func ProcessPendingBalanceDeposits(ctx context.Context, st state.BeaconState, ac
}
}
// ProcessDepositRequests is a function as part of electra to process execution layer deposits
func ProcessDepositRequests(ctx context.Context, beaconState state.BeaconState, requests []*enginev1.DepositRequest) (state.BeaconState, error) {
_, span := trace.StartSpan(ctx, "electra.ProcessDepositRequests")
// ProcessDepositReceipts is a function as part of electra to process execution layer deposits
func ProcessDepositReceipts(ctx context.Context, beaconState state.BeaconState, receipts []*enginev1.DepositReceipt) (state.BeaconState, error) {
_, span := trace.StartSpan(ctx, "electra.ProcessDepositReceipts")
defer span.End()
// TODO: replace with 6110 logic
// return b.ProcessDepositRequests(beaconState, requests)
// return b.ProcessDepositReceipts(beaconState, receipts)
return beaconState, nil
}

View File

@@ -38,7 +38,7 @@ import (
// withdrawals_root=pre.latest_execution_payload_header.withdrawals_root,
// blob_gas_used=pre.latest_execution_payload_header.blob_gas_used,
// excess_blob_gas=pre.latest_execution_payload_header.excess_blob_gas,
// deposit_requests_root=Root(), # [New in Electra:EIP6110]
// deposit_receipts_root=Root(), # [New in Electra:EIP6110]
// withdrawal_requests_root=Root(), # [New in Electra:EIP7002],
// )
//
@@ -94,7 +94,7 @@ import (
// # Deep history valid from Capella onwards
// historical_summaries=pre.historical_summaries,
// # [New in Electra:EIP6110]
// deposit_requests_start_index=UNSET_DEPOSIT_REQUESTS_START_INDEX,
// deposit_receipts_start_index=UNSET_DEPOSIT_RECEIPTS_START_INDEX,
// # [New in Electra:EIP7251]
// deposit_balance_to_consume=0,
// exit_balance_to_consume=0,
@@ -261,14 +261,14 @@ func UpgradeToElectra(beaconState state.BeaconState) (state.BeaconState, error)
WithdrawalsRoot: wdRoot,
ExcessBlobGas: excessBlobGas,
BlobGasUsed: blobGasUsed,
DepositRequestsRoot: bytesutil.Bytes32(0), // [New in Electra:EIP6110]
DepositReceiptsRoot: bytesutil.Bytes32(0), // [New in Electra:EIP6110]
WithdrawalRequestsRoot: bytesutil.Bytes32(0), // [New in Electra:EIP7002]
},
NextWithdrawalIndex: wi,
NextWithdrawalValidatorIndex: vi,
HistoricalSummaries: summaries,
DepositRequestsStartIndex: params.BeaconConfig().UnsetDepositRequestsStartIndex,
DepositReceiptsStartIndex: params.BeaconConfig().UnsetDepositReceiptsStartIndex,
DepositBalanceToConsume: 0,
ExitBalanceToConsume: helpers.ActivationExitChurnLimit(primitives.Gwei(tab)),
EarliestExitEpoch: earliestExitEpoch,

View File

@@ -128,7 +128,7 @@ func TestUpgradeToElectra(t *testing.T) {
BlockHash: prevHeader.BlockHash(),
TransactionsRoot: txRoot,
WithdrawalsRoot: wdRoot,
DepositRequestsRoot: bytesutil.Bytes32(0),
DepositReceiptsRoot: bytesutil.Bytes32(0),
WithdrawalRequestsRoot: bytesutil.Bytes32(0),
}
require.DeepEqual(t, wanted, protoHeader)
@@ -145,9 +145,9 @@ func TestUpgradeToElectra(t *testing.T) {
require.NoError(t, err)
require.Equal(t, 0, len(summaries))
startIndex, err := mSt.DepositRequestsStartIndex()
startIndex, err := mSt.DepositReceiptsStartIndex()
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().UnsetDepositRequestsStartIndex, startIndex)
require.Equal(t, params.BeaconConfig().UnsetDepositReceiptsStartIndex, startIndex)
balance, err := mSt.DepositBalanceToConsume()
require.NoError(t, err)

View File

@@ -19,15 +19,15 @@ import (
"go.opencensus.io/trace"
)
// ProcessWithdrawalRequests processes the validator withdrawals from the provided execution payload
// ProcessExecutionLayerWithdrawalRequests processes the validator withdrawals from the provided execution payload
// into the beacon state triggered by the execution layer.
//
// Spec pseudocode definition:
//
// def process_withdrawal_request(
// def process_execution_layer_withdrawal_request(
//
// state: BeaconState,
// withdrawal_request: WithdrawalRequest
// execution_layer_withdrawal_request: ExecutionLayerWithdrawalRequest
//
// ) -> None:
// amount = execution_layer_withdrawal_request.amount
@@ -86,8 +86,8 @@ import (
// amount=to_withdraw,
// withdrawable_epoch=withdrawable_epoch,
// ))
func ProcessWithdrawalRequests(ctx context.Context, st state.BeaconState, wrs []*enginev1.WithdrawalRequest) (state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "electra.ProcessWithdrawalRequests")
func ProcessExecutionLayerWithdrawalRequests(ctx context.Context, st state.BeaconState, wrs []*enginev1.ExecutionLayerWithdrawalRequest) (state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "electra.ProcessExecutionLayerWithdrawalRequests")
defer span.End()
currentEpoch := slots.ToEpoch(st.Slot())
for _, wr := range wrs {

View File

@@ -19,7 +19,7 @@ import (
"github.com/sirupsen/logrus/hooks/test"
)
func TestProcessWithdrawRequests(t *testing.T) {
func TestProcessExecutionLayerWithdrawRequests(t *testing.T) {
logHook := test.NewGlobal()
source, err := hexutil.Decode("0xb20a608c624Ca5003905aA834De7156C68b2E1d0")
require.NoError(t, err)
@@ -30,7 +30,7 @@ func TestProcessWithdrawRequests(t *testing.T) {
require.NoError(t, err)
type args struct {
st state.BeaconState
wrs []*enginev1.WithdrawalRequest
wrs []*enginev1.ExecutionLayerWithdrawalRequest
}
tests := []struct {
name string
@@ -56,7 +56,7 @@ func TestProcessWithdrawRequests(t *testing.T) {
require.NoError(t, preSt.SetValidators([]*eth.Validator{v}))
return preSt
}(),
wrs: []*enginev1.WithdrawalRequest{
wrs: []*enginev1.ExecutionLayerWithdrawalRequest{
{
SourceAddress: source,
ValidatorPubkey: bytesutil.SafeCopyBytes(val.PublicKey),
@@ -121,7 +121,7 @@ func TestProcessWithdrawRequests(t *testing.T) {
}))
return preSt
}(),
wrs: []*enginev1.WithdrawalRequest{
wrs: []*enginev1.ExecutionLayerWithdrawalRequest{
{
SourceAddress: source,
ValidatorPubkey: bytesutil.SafeCopyBytes(val.PublicKey),
@@ -190,7 +190,7 @@ func TestProcessWithdrawRequests(t *testing.T) {
require.NoError(t, preSt.SetValidators([]*eth.Validator{v}))
return preSt
}(),
wrs: []*enginev1.WithdrawalRequest{
wrs: []*enginev1.ExecutionLayerWithdrawalRequest{
{
SourceAddress: source,
ValidatorPubkey: bytesutil.SafeCopyBytes(val.PublicKey),
@@ -227,7 +227,7 @@ func TestProcessWithdrawRequests(t *testing.T) {
require.NoError(t, preSt.SetValidators([]*eth.Validator{v}))
return preSt
}(),
wrs: []*enginev1.WithdrawalRequest{
wrs: []*enginev1.ExecutionLayerWithdrawalRequest{
{
SourceAddress: source,
ValidatorPubkey: bytesutil.SafeCopyBytes(val.PublicKey),
@@ -266,7 +266,7 @@ func TestProcessWithdrawRequests(t *testing.T) {
}))
return preSt
}(),
wrs: []*enginev1.WithdrawalRequest{
wrs: []*enginev1.ExecutionLayerWithdrawalRequest{
{
SourceAddress: source,
ValidatorPubkey: bytesutil.SafeCopyBytes(val.PublicKey),
@@ -284,9 +284,9 @@ func TestProcessWithdrawRequests(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := electra.ProcessWithdrawalRequests(context.Background(), tt.args.st, tt.args.wrs)
got, err := electra.ProcessExecutionLayerWithdrawalRequests(context.Background(), tt.args.st, tt.args.wrs)
if (err != nil) != tt.wantErr {
t.Errorf("ProcessWithdrawalRequests() error = %v, wantErr %v", err, tt.wantErr)
t.Errorf("ProcessExecutionLayerWithdrawalRequests() error = %v, wantErr %v", err, tt.wantErr)
return
}
tt.wantFn(t, got)

View File

@@ -10,7 +10,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
prysmTime "github.com/prysmaticlabs/prysm/v5/time"
"github.com/prysmaticlabs/prysm/v5/time/slots"
)
@@ -92,14 +91,6 @@ func IsAggregated(attestation ethpb.Att) bool {
//
// return uint64((committees_since_epoch_start + committee_index) % ATTESTATION_SUBNET_COUNT)
func ComputeSubnetForAttestation(activeValCount uint64, att ethpb.Att) uint64 {
if att.Version() >= version.Electra {
committeeIndex := 0
committeeIndices := att.CommitteeBitsVal().BitIndices()
if len(committeeIndices) > 0 {
committeeIndex = committeeIndices[0]
}
return ComputeSubnetFromCommitteeAndSlot(activeValCount, primitives.CommitteeIndex(committeeIndex), att.GetData().Slot)
}
return ComputeSubnetFromCommitteeAndSlot(activeValCount, att.GetData().CommitteeIndex, att.GetData().Slot)
}

View File

@@ -73,37 +73,21 @@ func TestAttestation_ComputeSubnetForAttestation(t *testing.T) {
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
})
require.NoError(t, err)
valCount, err := helpers.ActiveValidatorCount(context.Background(), state, slots.ToEpoch(34))
att := &ethpb.Attestation{
AggregationBits: []byte{'A'},
Data: &ethpb.AttestationData{
Slot: 34,
CommitteeIndex: 4,
BeaconBlockRoot: []byte{'C'},
Source: nil,
Target: nil,
},
Signature: []byte{'B'},
}
valCount, err := helpers.ActiveValidatorCount(context.Background(), state, slots.ToEpoch(att.Data.Slot))
require.NoError(t, err)
t.Run("Phase 0", func(t *testing.T) {
att := &ethpb.Attestation{
AggregationBits: []byte{'A'},
Data: &ethpb.AttestationData{
Slot: 34,
CommitteeIndex: 4,
BeaconBlockRoot: []byte{'C'},
},
Signature: []byte{'B'},
}
sub := helpers.ComputeSubnetForAttestation(valCount, att)
assert.Equal(t, uint64(6), sub, "Did not get correct subnet for attestation")
})
t.Run("Electra", func(t *testing.T) {
cb := primitives.NewAttestationCommitteeBits()
cb.SetBitAt(4, true)
att := &ethpb.AttestationElectra{
AggregationBits: []byte{'A'},
CommitteeBits: cb,
Data: &ethpb.AttestationData{
Slot: 34,
BeaconBlockRoot: []byte{'C'},
},
Signature: []byte{'B'},
}
sub := helpers.ComputeSubnetForAttestation(valCount, att)
assert.Equal(t, uint64(6), sub, "Did not get correct subnet for attestation")
})
sub := helpers.ComputeSubnetForAttestation(valCount, att)
assert.Equal(t, uint64(6), sub, "Did not get correct subnet for attestation")
}
func Test_ValidateAttestationTime(t *testing.T) {

View File

@@ -227,7 +227,7 @@ func ProcessBlockNoVerifyAnySig(
// def process_operations(state: BeaconState, body: BeaconBlockBody) -> None:
// # [Modified in Electra:EIP6110]
// # Disable former deposit mechanism once all prior deposits are processed
// eth1_deposit_index_limit = min(state.eth1_data.deposit_count, state.deposit_requests_start_index)
// eth1_deposit_index_limit = min(state.eth1_data.deposit_count, state.deposit_receipts_start_index)
// if state.eth1_deposit_index < eth1_deposit_index_limit:
// assert len(body.deposits) == min(MAX_DEPOSITS, eth1_deposit_index_limit - state.eth1_deposit_index)
// else:
@@ -245,7 +245,7 @@ func ProcessBlockNoVerifyAnySig(
// for_ops(body.bls_to_execution_changes, process_bls_to_execution_change)
// # [New in Electra:EIP7002:EIP7251]
// for_ops(body.execution_payload.withdrawal_requests, process_execution_layer_withdrawal_request)
// for_ops(body.execution_payload.deposit_requests, process_deposit_requests) # [New in Electra:EIP6110]
// for_ops(body.execution_payload.deposit_receipts, process_deposit_receipt) # [New in Electra:EIP6110]
// for_ops(body.consolidations, process_consolidation) # [New in Electra:EIP7251]
func ProcessOperationsNoVerifyAttsSigs(
ctx context.Context,
@@ -401,7 +401,7 @@ func VerifyBlobCommitmentCount(blk interfaces.ReadOnlyBeaconBlock) error {
// def process_operations(state: BeaconState, body: BeaconBlockBody) -> None:
// # [Modified in Electra:EIP6110]
// # Disable former deposit mechanism once all prior deposits are processed
// eth1_deposit_index_limit = min(state.eth1_data.deposit_count, state.deposit_requests_start_index)
// eth1_deposit_index_limit = min(state.eth1_data.deposit_count, state.deposit_receipts_start_index)
// if state.eth1_deposit_index < eth1_deposit_index_limit:
// assert len(body.deposits) == min(MAX_DEPOSITS, eth1_deposit_index_limit - state.eth1_deposit_index)
// else:
@@ -419,7 +419,7 @@ func VerifyBlobCommitmentCount(blk interfaces.ReadOnlyBeaconBlock) error {
// for_ops(body.bls_to_execution_changes, process_bls_to_execution_change)
// # [New in Electra:EIP7002:EIP7251]
// for_ops(body.execution_payload.withdrawal_requests, process_execution_layer_withdrawal_request)
// for_ops(body.execution_payload.deposit_requests, process_deposit_requests) # [New in Electra:EIP6110]
// for_ops(body.execution_payload.deposit_receipts, process_deposit_receipt) # [New in Electra:EIP6110]
// for_ops(body.consolidations, process_consolidation) # [New in Electra:EIP7251]
func electraOperations(
ctx context.Context,
@@ -445,12 +445,12 @@ func electraOperations(
if !ok {
return nil, errors.New("could not cast execution data to electra execution data")
}
st, err = electra.ProcessWithdrawalRequests(ctx, st, exe.WithdrawalRequests())
st, err = electra.ProcessExecutionLayerWithdrawalRequests(ctx, st, exe.WithdrawalRequests())
if err != nil {
return nil, errors.Wrap(err, "could not process execution layer withdrawal requests")
}
st, err = electra.ProcessDepositRequests(ctx, st, exe.DepositRequests()) // TODO: EIP-6110 deposit changes.
st, err = electra.ProcessDepositReceipts(ctx, st, exe.DepositReceipts()) // TODO: EIP-6110 deposit changes.
if err != nil {
return nil, errors.Wrap(err, "could not process deposit receipts")
}

View File

@@ -149,7 +149,7 @@ func TestState_CanSaveRetrieve(t *testing.T) {
BlockHash: make([]byte, 32),
TransactionsRoot: make([]byte, 32),
WithdrawalsRoot: make([]byte, 32),
DepositRequestsRoot: make([]byte, 32),
DepositReceiptsRoot: make([]byte, 32),
WithdrawalRequestsRoot: make([]byte, 32),
})
require.NoError(t, err)

View File

@@ -631,7 +631,7 @@ func fullPayloadFromPayloadBody(
Withdrawals: body.Withdrawals,
ExcessBlobGas: ebg,
BlobGasUsed: bgu,
DepositRequests: dr,
DepositReceipts: dr,
WithdrawalRequests: wr,
}) // We can't get the block value and don't care about the block value for this instance
default:
@@ -780,8 +780,8 @@ func buildEmptyExecutionPayload(v int) (proto.Message, error) {
BlockHash: make([]byte, fieldparams.RootLength),
Transactions: make([][]byte, 0),
Withdrawals: make([]*pb.Withdrawal, 0),
WithdrawalRequests: make([]*pb.WithdrawalRequest, 0),
DepositRequests: make([]*pb.DepositRequest, 0),
WithdrawalRequests: make([]*pb.ExecutionLayerWithdrawalRequest, 0),
DepositReceipts: make([]*pb.DepositReceipt, 0),
}, nil
default:
return nil, errors.Wrapf(ErrUnsupportedVersion, "version=%s", version.String(v))

View File

@@ -1559,7 +1559,7 @@ func fixturesStruct() *payloadFixtures {
Withdrawals: []*pb.Withdrawal{},
BlobGasUsed: 2,
ExcessBlobGas: 3,
DepositRequests: dr,
DepositReceipts: dr,
WithdrawalRequests: wr,
}
hexUint := hexutil.Uint64(1)

View File

@@ -66,7 +66,7 @@ func payloadToBody(t *testing.T, ed interfaces.ExecutionData) *pb.ExecutionPaylo
}
eed, isElectra := ed.(interfaces.ExecutionDataElectra)
if isElectra {
body.DepositRequests = pb.ProtoDepositRequestsToJson(eed.DepositRequests())
body.DepositRequests = pb.ProtoDepositRequestsToJson(eed.DepositReceipts())
body.WithdrawalRequests = pb.ProtoWithdrawalRequestsToJson(eed.WithdrawalRequests())
}
return body

View File

@@ -69,6 +69,7 @@ go_library(
"@com_github_gorilla_mux//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prysmaticlabs_fastssz//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_urfave_cli_v2//:go_default_library",
],

View File

@@ -4,6 +4,7 @@ import (
"fmt"
"github.com/ethereum/go-ethereum/common"
fastssz "github.com/prysmaticlabs/fastssz"
"github.com/prysmaticlabs/prysm/v5/cmd"
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/v5/config/params"
@@ -196,3 +197,7 @@ func configureExecutionSetting(cliCtx *cli.Context) error {
" Default fee recipient will be used as a fall back", checksumAddress.Hex())
return params.SetActive(c)
}
func configureFastSSZHashingAlgorithm() {
fastssz.EnableVectorizedHTR = true
}

View File

@@ -120,6 +120,7 @@ type BeaconNode struct {
initialSyncComplete chan struct{}
BlobStorage *filesystem.BlobStorage
BlobStorageOptions []filesystem.BlobStorageOption
blobRetentionEpochs primitives.Epoch
verifyInitWaiter *verification.InitializerWaiter
syncChecker *initialsync.SyncChecker
}
@@ -277,6 +278,8 @@ func configureBeacon(cliCtx *cli.Context) error {
return errors.Wrap(err, "could not configure execution setting")
}
configureFastSSZHashingAlgorithm()
return nil
}
@@ -962,8 +965,9 @@ func (b *BeaconNode) registerRPCService(router *mux.Router) error {
cert := b.cliCtx.String(flags.CertFlag.Name)
key := b.cliCtx.String(flags.KeyFlag.Name)
mockEth1DataVotes := b.cliCtx.Bool(flags.InteropMockEth1DataVotesFlag.Name)
maxMsgSize := b.cliCtx.Int(cmd.GrpcMaxCallRecvMsgSizeFlag.Name)
enableDebugRPCEndpoints := !b.cliCtx.Bool(flags.DisableDebugRPCEndpoints.Name)
enableDebugRPCEndpoints := b.cliCtx.Bool(flags.EnableDebugRPCEndpoints.Name)
p2pService := b.fetchP2P()
rpcService := rpc.NewService(b.ctx, &rpc.Config{
@@ -1052,10 +1056,11 @@ func (b *BeaconNode) registerGRPCGateway(router *mux.Router) error {
gatewayPort := b.cliCtx.Int(flags.GRPCGatewayPort.Name)
rpcHost := b.cliCtx.String(flags.RPCHost.Name)
rpcPort := b.cliCtx.Int(flags.RPCPort.Name)
enableDebugRPCEndpoints := !b.cliCtx.Bool(flags.DisableDebugRPCEndpoints.Name)
selfAddress := net.JoinHostPort(rpcHost, strconv.Itoa(rpcPort))
gatewayAddress := net.JoinHostPort(gatewayHost, strconv.Itoa(gatewayPort))
allowedOrigins := strings.Split(b.cliCtx.String(flags.GPRCGatewayCorsDomain.Name), ",")
enableDebugRPCEndpoints := b.cliCtx.Bool(flags.EnableDebugRPCEndpoints.Name)
selfCert := b.cliCtx.String(flags.CertFlag.Name)
maxCallSize := b.cliCtx.Uint64(cmd.GrpcMaxCallRecvMsgSizeFlag.Name)
httpModules := b.cliCtx.String(flags.HTTPModules.Name)

View File

@@ -5,6 +5,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/builder"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/execution"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
)
// Option for beacon node configuration.
@@ -50,3 +51,11 @@ func WithBlobStorageOptions(opt ...filesystem.BlobStorageOption) Option {
return nil
}
}
// WithBlobRetentionEpochs sets the blobRetentionEpochs value, used in kv store initialization.
func WithBlobRetentionEpochs(e primitives.Epoch) Option {
return func(bn *BeaconNode) error {
bn.blobRetentionEpochs = e
return nil
}
}

View File

@@ -336,7 +336,7 @@ func TestService_InterceptAddrDial_Public(t *testing.T) {
}),
}
var err error
// test with public filter
//test with public filter
cidr := "public"
ip := "212.67.10.122"
s.addrFilter, err = configureFilter(&Config{AllowListCIDR: cidr})
@@ -348,7 +348,7 @@ func TestService_InterceptAddrDial_Public(t *testing.T) {
t.Errorf("Expected multiaddress with ip %s to not be rejected since we allow public addresses", ip)
}
ip = "192.168.1.0" // this is private and should fail
ip = "192.168.1.0" //this is private and should fail
multiAddress, err = ma.NewMultiaddr(fmt.Sprintf("/ip4/%s/tcp/%d", ip, 3000))
require.NoError(t, err)
valid = s.InterceptAddrDial("", multiAddress)
@@ -356,7 +356,7 @@ func TestService_InterceptAddrDial_Public(t *testing.T) {
t.Errorf("Expected multiaddress with ip %s to be rejected since we are only allowing public addresses", ip)
}
// test with public allow filter, with a public address added to the deny list
//test with public allow filter, with a public address added to the deny list
invalidPublicIp := "212.67.10.122"
validPublicIp := "91.65.69.69"
s.addrFilter, err = configureFilter(&Config{AllowListCIDR: "public", DenyListCIDR: []string{"212.67.89.112/16"}})
@@ -384,7 +384,7 @@ func TestService_InterceptAddrDial_Private(t *testing.T) {
}),
}
var err error
// test with private filter
//test with private filter
cidr := "private"
s.addrFilter, err = configureFilter(&Config{DenyListCIDR: []string{cidr}})
require.NoError(t, err)
@@ -413,7 +413,7 @@ func TestService_InterceptAddrDial_AllowPrivate(t *testing.T) {
}),
}
var err error
// test with private filter
//test with private filter
cidr := "private"
s.addrFilter, err = configureFilter(&Config{AllowListCIDR: cidr})
require.NoError(t, err)
@@ -442,7 +442,7 @@ func TestService_InterceptAddrDial_DenyPublic(t *testing.T) {
}),
}
var err error
// test with private filter
//test with private filter
cidr := "public"
s.addrFilter, err = configureFilter(&Config{DenyListCIDR: []string{cidr}})
require.NoError(t, err)
@@ -471,7 +471,7 @@ func TestService_InterceptAddrDial_AllowConflict(t *testing.T) {
}),
}
var err error
// test with private filter
//test with private filter
cidr := "public"
s.addrFilter, err = configureFilter(&Config{DenyListCIDR: []string{cidr, "192.168.0.0/16"}})
require.NoError(t, err)

View File

@@ -51,7 +51,7 @@ func (quicProtocol) ENRKey() string { return "quic" }
// with the tracked committee ids for the epoch, allowing our node
// to be dynamically discoverable by others given our tracked committee ids.
func (s *Service) RefreshENR() {
// return early if discv5 isn't running
// return early if discv5 isnt running
if s.dv5Listener == nil || !s.isInitialized() {
return
}

View File

@@ -27,8 +27,7 @@ var gossipTopicMappings = map[string]proto.Message{
// GossipTopicMappings is a function to return the assigned data type
// versioned by epoch.
func GossipTopicMappings(topic string, epoch primitives.Epoch) proto.Message {
switch topic {
case BlockSubnetTopicFormat:
if topic == BlockSubnetTopicFormat {
if epoch >= params.BeaconConfig().ElectraForkEpoch {
return &ethpb.SignedBeaconBlockElectra{}
}
@@ -44,25 +43,8 @@ func GossipTopicMappings(topic string, epoch primitives.Epoch) proto.Message {
if epoch >= params.BeaconConfig().AltairForkEpoch {
return &ethpb.SignedBeaconBlockAltair{}
}
return gossipTopicMappings[topic]
case AttestationSubnetTopicFormat:
if epoch >= params.BeaconConfig().ElectraForkEpoch {
return &ethpb.AttestationElectra{}
}
return gossipTopicMappings[topic]
case AttesterSlashingSubnetTopicFormat:
if epoch >= params.BeaconConfig().ElectraForkEpoch {
return &ethpb.AttesterSlashingElectra{}
}
return gossipTopicMappings[topic]
case AggregateAndProofSubnetTopicFormat:
if epoch >= params.BeaconConfig().ElectraForkEpoch {
return &ethpb.SignedAggregateAttestationAndProofElectra{}
}
return gossipTopicMappings[topic]
default:
return gossipTopicMappings[topic]
}
return gossipTopicMappings[topic]
}
// AllTopics returns all topics stored in our
@@ -93,7 +75,4 @@ func init() {
GossipTypeMapping[reflect.TypeOf(&ethpb.SignedBeaconBlockDeneb{})] = BlockSubnetTopicFormat
// Specially handle Electra objects.
GossipTypeMapping[reflect.TypeOf(&ethpb.SignedBeaconBlockElectra{})] = BlockSubnetTopicFormat
GossipTypeMapping[reflect.TypeOf(&ethpb.AttestationElectra{})] = AttestationSubnetTopicFormat
GossipTypeMapping[reflect.TypeOf(&ethpb.AttesterSlashingElectra{})] = AttesterSlashingSubnetTopicFormat
GossipTypeMapping[reflect.TypeOf(&ethpb.SignedAggregateAttestationAndProofElectra{})] = AggregateAndProofSubnetTopicFormat
}

View File

@@ -22,20 +22,20 @@ func TestMappingHasNoDuplicates(t *testing.T) {
}
}
func TestGossipTopicMappings_CorrectType(t *testing.T) {
func TestGossipTopicMappings_CorrectBlockType(t *testing.T) {
params.SetupTestConfigCleanup(t)
bCfg := params.BeaconConfig().Copy()
altairForkEpoch := primitives.Epoch(100)
bellatrixForkEpoch := primitives.Epoch(200)
capellaForkEpoch := primitives.Epoch(300)
denebForkEpoch := primitives.Epoch(400)
electraForkEpoch := primitives.Epoch(500)
BellatrixForkEpoch := primitives.Epoch(200)
CapellaForkEpoch := primitives.Epoch(300)
DenebForkEpoch := primitives.Epoch(400)
ElectraForkEpoch := primitives.Epoch(500)
bCfg.AltairForkEpoch = altairForkEpoch
bCfg.BellatrixForkEpoch = bellatrixForkEpoch
bCfg.CapellaForkEpoch = capellaForkEpoch
bCfg.DenebForkEpoch = denebForkEpoch
bCfg.ElectraForkEpoch = electraForkEpoch
bCfg.BellatrixForkEpoch = BellatrixForkEpoch
bCfg.CapellaForkEpoch = CapellaForkEpoch
bCfg.DenebForkEpoch = DenebForkEpoch
bCfg.ElectraForkEpoch = ElectraForkEpoch
bCfg.ForkVersionSchedule[bytesutil.ToBytes4(bCfg.AltairForkVersion)] = primitives.Epoch(100)
bCfg.ForkVersionSchedule[bytesutil.ToBytes4(bCfg.BellatrixForkVersion)] = primitives.Epoch(200)
bCfg.ForkVersionSchedule[bytesutil.ToBytes4(bCfg.CapellaForkVersion)] = primitives.Epoch(300)
@@ -47,83 +47,29 @@ func TestGossipTopicMappings_CorrectType(t *testing.T) {
pMessage := GossipTopicMappings(BlockSubnetTopicFormat, 0)
_, ok := pMessage.(*ethpb.SignedBeaconBlock)
assert.Equal(t, true, ok)
pMessage = GossipTopicMappings(AttestationSubnetTopicFormat, 0)
_, ok = pMessage.(*ethpb.Attestation)
assert.Equal(t, true, ok)
pMessage = GossipTopicMappings(AttesterSlashingSubnetTopicFormat, 0)
_, ok = pMessage.(*ethpb.AttesterSlashing)
assert.Equal(t, true, ok)
pMessage = GossipTopicMappings(AggregateAndProofSubnetTopicFormat, 0)
_, ok = pMessage.(*ethpb.SignedAggregateAttestationAndProof)
assert.Equal(t, true, ok)
// Altair Fork
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, altairForkEpoch)
_, ok = pMessage.(*ethpb.SignedBeaconBlockAltair)
assert.Equal(t, true, ok)
pMessage = GossipTopicMappings(AttestationSubnetTopicFormat, altairForkEpoch)
_, ok = pMessage.(*ethpb.Attestation)
assert.Equal(t, true, ok)
pMessage = GossipTopicMappings(AttesterSlashingSubnetTopicFormat, altairForkEpoch)
_, ok = pMessage.(*ethpb.AttesterSlashing)
assert.Equal(t, true, ok)
pMessage = GossipTopicMappings(AggregateAndProofSubnetTopicFormat, altairForkEpoch)
_, ok = pMessage.(*ethpb.SignedAggregateAttestationAndProof)
assert.Equal(t, true, ok)
// Bellatrix Fork
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, bellatrixForkEpoch)
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, BellatrixForkEpoch)
_, ok = pMessage.(*ethpb.SignedBeaconBlockBellatrix)
assert.Equal(t, true, ok)
pMessage = GossipTopicMappings(AttestationSubnetTopicFormat, bellatrixForkEpoch)
_, ok = pMessage.(*ethpb.Attestation)
assert.Equal(t, true, ok)
pMessage = GossipTopicMappings(AttesterSlashingSubnetTopicFormat, bellatrixForkEpoch)
_, ok = pMessage.(*ethpb.AttesterSlashing)
assert.Equal(t, true, ok)
pMessage = GossipTopicMappings(AggregateAndProofSubnetTopicFormat, bellatrixForkEpoch)
_, ok = pMessage.(*ethpb.SignedAggregateAttestationAndProof)
assert.Equal(t, true, ok)
// Capella Fork
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, capellaForkEpoch)
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, CapellaForkEpoch)
_, ok = pMessage.(*ethpb.SignedBeaconBlockCapella)
assert.Equal(t, true, ok)
pMessage = GossipTopicMappings(AttestationSubnetTopicFormat, capellaForkEpoch)
_, ok = pMessage.(*ethpb.Attestation)
assert.Equal(t, true, ok)
pMessage = GossipTopicMappings(AttesterSlashingSubnetTopicFormat, capellaForkEpoch)
_, ok = pMessage.(*ethpb.AttesterSlashing)
assert.Equal(t, true, ok)
pMessage = GossipTopicMappings(AggregateAndProofSubnetTopicFormat, capellaForkEpoch)
_, ok = pMessage.(*ethpb.SignedAggregateAttestationAndProof)
assert.Equal(t, true, ok)
// Deneb Fork
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, denebForkEpoch)
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, DenebForkEpoch)
_, ok = pMessage.(*ethpb.SignedBeaconBlockDeneb)
assert.Equal(t, true, ok)
pMessage = GossipTopicMappings(AttestationSubnetTopicFormat, denebForkEpoch)
_, ok = pMessage.(*ethpb.Attestation)
assert.Equal(t, true, ok)
pMessage = GossipTopicMappings(AttesterSlashingSubnetTopicFormat, denebForkEpoch)
_, ok = pMessage.(*ethpb.AttesterSlashing)
assert.Equal(t, true, ok)
pMessage = GossipTopicMappings(AggregateAndProofSubnetTopicFormat, denebForkEpoch)
_, ok = pMessage.(*ethpb.SignedAggregateAttestationAndProof)
assert.Equal(t, true, ok)
// Electra Fork
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, electraForkEpoch)
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, ElectraForkEpoch)
_, ok = pMessage.(*ethpb.SignedBeaconBlockElectra)
assert.Equal(t, true, ok)
pMessage = GossipTopicMappings(AttestationSubnetTopicFormat, electraForkEpoch)
_, ok = pMessage.(*ethpb.AttestationElectra)
assert.Equal(t, true, ok)
pMessage = GossipTopicMappings(AttesterSlashingSubnetTopicFormat, electraForkEpoch)
_, ok = pMessage.(*ethpb.AttesterSlashingElectra)
assert.Equal(t, true, ok)
pMessage = GossipTopicMappings(AggregateAndProofSubnetTopicFormat, electraForkEpoch)
_, ok = pMessage.(*ethpb.SignedAggregateAttestationAndProofElectra)
assert.Equal(t, true, ok)
}

View File

@@ -81,7 +81,7 @@ func TestStartDiscV5_FindPeersWithSubnet(t *testing.T) {
bootNodeENR := bootListener.Self().String()
// Create 3 nodes, each subscribed to a different subnet.
// Each node is connected to the bootstrap node.
// Each node is connected to the boostrap node.
services := make([]*Service, 0, 3)
for i := 1; i <= 3; i++ {

View File

@@ -43,7 +43,6 @@ go_test(
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"@com_github_prysmaticlabs_fastssz//:go_default_library",

View File

@@ -26,13 +26,7 @@ var (
BlockMap map[[4]byte]func() (interfaces.ReadOnlySignedBeaconBlock, error)
// MetaDataMap maps the fork-version to the underlying data type for that
// particular fork period.
MetaDataMap map[[4]byte]func() (metadata.Metadata, error)
// AttestationMap maps the fork-version to the underlying data type for that
// particular fork period.
AttestationMap map[[4]byte]func() (ethpb.Att, error)
// AggregateAttestationMap maps the fork-version to the underlying data type for that
// particular fork period.
AggregateAttestationMap map[[4]byte]func() (ethpb.SignedAggregateAttAndProof, error)
MetaDataMap map[[4]byte]func() metadata.Metadata
)
// InitializeDataMaps initializes all the relevant object maps. This function is called to
@@ -73,68 +67,24 @@ func InitializeDataMaps() {
}
// Reset our metadata map.
MetaDataMap = map[[4]byte]func() (metadata.Metadata, error){
bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion): func() (metadata.Metadata, error) {
return wrapper.WrappedMetadataV0(&ethpb.MetaDataV0{}), nil
MetaDataMap = map[[4]byte]func() metadata.Metadata{
bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion): func() metadata.Metadata {
return wrapper.WrappedMetadataV0(&ethpb.MetaDataV0{})
},
bytesutil.ToBytes4(params.BeaconConfig().AltairForkVersion): func() (metadata.Metadata, error) {
return wrapper.WrappedMetadataV1(&ethpb.MetaDataV1{}), nil
bytesutil.ToBytes4(params.BeaconConfig().AltairForkVersion): func() metadata.Metadata {
return wrapper.WrappedMetadataV1(&ethpb.MetaDataV1{})
},
bytesutil.ToBytes4(params.BeaconConfig().BellatrixForkVersion): func() (metadata.Metadata, error) {
return wrapper.WrappedMetadataV1(&ethpb.MetaDataV1{}), nil
bytesutil.ToBytes4(params.BeaconConfig().BellatrixForkVersion): func() metadata.Metadata {
return wrapper.WrappedMetadataV1(&ethpb.MetaDataV1{})
},
bytesutil.ToBytes4(params.BeaconConfig().CapellaForkVersion): func() (metadata.Metadata, error) {
return wrapper.WrappedMetadataV1(&ethpb.MetaDataV1{}), nil
bytesutil.ToBytes4(params.BeaconConfig().CapellaForkVersion): func() metadata.Metadata {
return wrapper.WrappedMetadataV1(&ethpb.MetaDataV1{})
},
bytesutil.ToBytes4(params.BeaconConfig().DenebForkVersion): func() (metadata.Metadata, error) {
return wrapper.WrappedMetadataV1(&ethpb.MetaDataV1{}), nil
bytesutil.ToBytes4(params.BeaconConfig().DenebForkVersion): func() metadata.Metadata {
return wrapper.WrappedMetadataV1(&ethpb.MetaDataV1{})
},
bytesutil.ToBytes4(params.BeaconConfig().ElectraForkVersion): func() (metadata.Metadata, error) {
return wrapper.WrappedMetadataV1(&ethpb.MetaDataV1{}), nil
},
}
// Reset our attestation map.
AttestationMap = map[[4]byte]func() (ethpb.Att, error){
bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion): func() (ethpb.Att, error) {
return &ethpb.Attestation{}, nil
},
bytesutil.ToBytes4(params.BeaconConfig().AltairForkVersion): func() (ethpb.Att, error) {
return &ethpb.Attestation{}, nil
},
bytesutil.ToBytes4(params.BeaconConfig().BellatrixForkVersion): func() (ethpb.Att, error) {
return &ethpb.Attestation{}, nil
},
bytesutil.ToBytes4(params.BeaconConfig().CapellaForkVersion): func() (ethpb.Att, error) {
return &ethpb.Attestation{}, nil
},
bytesutil.ToBytes4(params.BeaconConfig().DenebForkVersion): func() (ethpb.Att, error) {
return &ethpb.Attestation{}, nil
},
bytesutil.ToBytes4(params.BeaconConfig().ElectraForkVersion): func() (ethpb.Att, error) {
return &ethpb.AttestationElectra{}, nil
},
}
// Reset our aggregate attestation map.
AggregateAttestationMap = map[[4]byte]func() (ethpb.SignedAggregateAttAndProof, error){
bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion): func() (ethpb.SignedAggregateAttAndProof, error) {
return &ethpb.SignedAggregateAttestationAndProof{}, nil
},
bytesutil.ToBytes4(params.BeaconConfig().AltairForkVersion): func() (ethpb.SignedAggregateAttAndProof, error) {
return &ethpb.SignedAggregateAttestationAndProof{}, nil
},
bytesutil.ToBytes4(params.BeaconConfig().BellatrixForkVersion): func() (ethpb.SignedAggregateAttAndProof, error) {
return &ethpb.SignedAggregateAttestationAndProof{}, nil
},
bytesutil.ToBytes4(params.BeaconConfig().CapellaForkVersion): func() (ethpb.SignedAggregateAttAndProof, error) {
return &ethpb.SignedAggregateAttestationAndProof{}, nil
},
bytesutil.ToBytes4(params.BeaconConfig().DenebForkVersion): func() (ethpb.SignedAggregateAttAndProof, error) {
return &ethpb.SignedAggregateAttestationAndProof{}, nil
},
bytesutil.ToBytes4(params.BeaconConfig().ElectraForkVersion): func() (ethpb.SignedAggregateAttAndProof, error) {
return &ethpb.SignedAggregateAttestationAndProofElectra{}, nil
bytesutil.ToBytes4(params.BeaconConfig().ElectraForkVersion): func() metadata.Metadata {
return wrapper.WrappedMetadataV1(&ethpb.MetaDataV1{})
},
}
}

View File

@@ -5,9 +5,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
"github.com/prysmaticlabs/prysm/v5/testing/require"
)
func TestInitializeDataMaps(t *testing.T) {
@@ -46,36 +44,8 @@ func TestInitializeDataMaps(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tt.action()
bFunc, ok := BlockMap[bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)]
_, ok := BlockMap[bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)]
assert.Equal(t, tt.exists, ok)
if tt.exists {
b, err := bFunc()
require.NoError(t, err)
generic, err := b.PbGenericBlock()
require.NoError(t, err)
assert.NotNil(t, generic.GetPhase0())
}
mdFunc, ok := MetaDataMap[bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)]
if tt.exists {
md, err := mdFunc()
require.NoError(t, err)
assert.NotNil(t, md.MetadataObjV0())
}
assert.Equal(t, tt.exists, ok)
attFunc, ok := AttestationMap[bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)]
if tt.exists {
att, err := attFunc()
require.NoError(t, err)
assert.Equal(t, version.Phase0, att.Version())
}
assert.Equal(t, tt.exists, ok)
aggFunc, ok := AggregateAttestationMap[bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)]
assert.Equal(t, tt.exists, ok)
if tt.exists {
agg, err := aggFunc()
require.NoError(t, err)
assert.Equal(t, version.Phase0, agg.Version())
}
})
}
}

View File

@@ -70,7 +70,7 @@ func (s *Service) endpoints(
endpoints = append(endpoints, s.eventsEndpoints()...)
endpoints = append(endpoints, s.prysmBeaconEndpoints(ch, stater)...)
endpoints = append(endpoints, s.prysmNodeEndpoints()...)
endpoints = append(endpoints, s.prysmValidatorEndpoints(coreService)...)
endpoints = append(endpoints, s.prysmValidatorEndpoints(coreService, stater)...)
if enableDebug {
endpoints = append(endpoints, s.debugEndpoints(stater)...)
}
@@ -143,7 +143,7 @@ func (s *Service) builderEndpoints(stater lookup.Stater) []endpoint {
}
}
func (*Service) blobEndpoints(blocker lookup.Blocker) []endpoint {
func (s *Service) blobEndpoints(blocker lookup.Blocker) []endpoint {
server := &blob.Server{
Blocker: blocker,
}
@@ -777,7 +777,7 @@ func (s *Service) beaconEndpoints(
}
}
func (*Service) configEndpoints() []endpoint {
func (s *Service) configEndpoints() []endpoint {
const namespace = "config"
return []endpoint{
{
@@ -1045,7 +1045,7 @@ func (s *Service) prysmNodeEndpoints() []endpoint {
}
}
func (*Service) prysmValidatorEndpoints(coreService *core.Service) []endpoint {
func (s *Service) prysmValidatorEndpoints(coreService *core.Service, stater lookup.Stater) []endpoint {
server := &validatorprysm.Server{
CoreService: coreService,
}

View File

@@ -149,8 +149,7 @@ func TestGetSpec(t *testing.T) {
config.MaxAttestationsElectra = 89
config.MaxWithdrawalRequestsPerPayload = 90
config.MaxCellsInExtendedMatrix = 91
config.UnsetDepositRequestsStartIndex = 92
config.MaxDepositRequestsPerPayload = 93
config.UnsetDepositReceiptsStartIndex = 92
var dbp [4]byte
copy(dbp[:], []byte{'0', '0', '0', '1'})
@@ -193,7 +192,7 @@ func TestGetSpec(t *testing.T) {
data, ok := resp.Data.(map[string]interface{})
require.Equal(t, true, ok)
assert.Equal(t, 155, len(data))
assert.Equal(t, 154, len(data))
for k, v := range data {
t.Run(k, func(t *testing.T) {
switch k {
@@ -526,10 +525,8 @@ func TestGetSpec(t *testing.T) {
assert.Equal(t, "90", v)
case "MAX_CELLS_IN_EXTENDED_MATRIX":
assert.Equal(t, "91", v)
case "UNSET_DEPOSIT_REQUESTS_START_INDEX":
case "UNSET_DEPOSIT_RECEIPTS_START_INDEX":
assert.Equal(t, "92", v)
case "MAX_DEPOSIT_REQUESTS_PER_PAYLOAD":
assert.Equal(t, "93", v)
default:
t.Errorf("Incorrect key: %s", k)
}

View File

@@ -5,7 +5,6 @@ go_library(
srcs = [
"handlers.go",
"handlers_block.go",
"log.go",
"server.go",
],
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/validator",

View File

@@ -31,7 +31,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/network/httputil"
ethpbalpha "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/time/slots"
"github.com/sirupsen/logrus"
log "github.com/sirupsen/logrus"
"go.opencensus.io/trace"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
@@ -592,7 +592,7 @@ func (s *Server) PrepareBeaconProposer(w http.ResponseWriter, r *http.Request) {
if len(validatorIndices) == 0 {
return
}
log.WithFields(logrus.Fields{
log.WithFields(log.Fields{
"validatorIndices": validatorIndices,
}).Info("Updated fee recipient addresses")
}

View File

@@ -220,9 +220,8 @@ func (s *Server) produceBlockV3(ctx context.Context, w http.ResponseWriter, r *h
consensusBlockValue, httpError := getConsensusBlockValue(ctx, s.BlockRewardFetcher, v1alpha1resp.Block)
if httpError != nil {
log.WithError(httpError).Debug("Failed to get consensus block value")
// Having the consensus block value is not critical to block production
consensusBlockValue = ""
httputil.WriteError(w, httpError)
return
}
w.Header().Set(api.ExecutionPayloadBlindedHeader, fmt.Sprintf("%v", v1alpha1resp.IsBlinded))
@@ -298,7 +297,7 @@ func getConsensusBlockValue(ctx context.Context, blockRewardsFetcher rewards.Blo
}
}
if bb.Version() == version.Phase0 {
// Getting the block value for Phase 0 is very hard, so we ignore it
// ignore for phase 0
return "", nil
}
// Get consensus payload value which is the same as the total from the block rewards api.

View File

@@ -1,5 +0,0 @@
package validator
import "github.com/sirupsen/logrus"
var log = logrus.WithField("prefix", "beacon-api")

View File

@@ -630,14 +630,14 @@ func TestServer_GetBeaconBlock_Electra(t *testing.T) {
require.NoError(t, err)
timeStamp, err := slots.ToTime(beaconState.GenesisTime(), electraSlot+1)
require.NoError(t, err)
dr := []*enginev1.DepositRequest{{
dr := []*enginev1.DepositReceipt{{
Pubkey: bytesutil.PadTo(privKeys[0].PublicKey().Marshal(), 48),
WithdrawalCredentials: bytesutil.PadTo([]byte("wc"), 32),
Amount: 123,
Signature: bytesutil.PadTo([]byte("sig"), 96),
Index: 456,
}}
wr := []*enginev1.WithdrawalRequest{
wr := []*enginev1.ExecutionLayerWithdrawalRequest{
{
SourceAddress: bytesutil.PadTo([]byte("sa"), 20),
ValidatorPubkey: bytesutil.PadTo(privKeys[1].PublicKey().Marshal(), 48),
@@ -654,7 +654,7 @@ func TestServer_GetBeaconBlock_Electra(t *testing.T) {
PrevRandao: random,
BaseFeePerGas: make([]byte, fieldparams.RootLength),
BlockHash: make([]byte, fieldparams.RootLength),
DepositRequests: dr,
DepositReceipts: dr,
WithdrawalRequests: wr,
}
@@ -680,7 +680,7 @@ func TestServer_GetBeaconBlock_Electra(t *testing.T) {
got, err := proposerServer.GetBeaconBlock(ctx, req)
require.NoError(t, err)
p := got.GetElectra().Block.Body.ExecutionPayload
require.DeepEqual(t, dr, p.DepositRequests)
require.DeepEqual(t, dr, p.DepositReceipts)
require.DeepEqual(t, wr, p.WithdrawalRequests)
}

View File

@@ -4,6 +4,7 @@ package rpc
import (
"context"
"fmt"
"net"
"sync"
@@ -112,8 +113,8 @@ type Config struct {
ExecutionChainInfoFetcher execution.ChainInfoFetcher
GenesisTimeFetcher blockchain.TimeFetcher
GenesisFetcher blockchain.GenesisFetcher
MockEth1Votes bool
EnableDebugRPCEndpoints bool
MockEth1Votes bool
AttestationsPool attestations.Pool
ExitPool voluntaryexits.PoolManager
SlashingsPool slashings.PoolManager
@@ -153,7 +154,7 @@ func NewService(ctx context.Context, cfg *Config) *Service {
connectedRPCClients: make(map[net.Addr]bool),
}
address := net.JoinHostPort(s.cfg.Host, s.cfg.Port)
address := fmt.Sprintf("%s:%s", s.cfg.Host, s.cfg.Port)
lis, err := net.Listen("tcp", address)
if err != nil {
log.WithError(err).Errorf("Could not listen to port in Start() %s", address)
@@ -317,6 +318,7 @@ func NewService(ctx context.Context, cfg *Config) *Service {
ethpbv1alpha1.RegisterHealthServer(s.grpcServer, nodeServer)
ethpbv1alpha1.RegisterBeaconChainServer(s.grpcServer, beaconChainServer)
if s.cfg.EnableDebugRPCEndpoints {
log.Info("Enabled debug gRPC endpoints")
debugServer := &debugv1alpha1.Server{
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
BeaconDB: s.cfg.BeaconDB,

View File

@@ -141,7 +141,7 @@ func (s *Service) processAttestations(
start := time.Now()
// Check for attestations slashings (double, surrounding, surrounded votes).
// Check for attestatinos slashings (double, sourrounding, surrounded votes).
slashings, err := s.checkSlashableAttestations(ctx, currentEpoch, validAttestations)
if err != nil {
log.WithError(err).Error(couldNotCheckSlashableAtt)

View File

@@ -219,7 +219,7 @@ type ReadOnlySyncCommittee interface {
type ReadOnlyDeposits interface {
DepositBalanceToConsume() (primitives.Gwei, error)
DepositRequestsStartIndex() (uint64, error)
DepositReceiptsStartIndex() (uint64, error)
PendingBalanceDeposits() ([]*ethpb.PendingBalanceDeposit, error)
}
@@ -327,7 +327,7 @@ type WriteOnlyConsolidations interface {
type WriteOnlyDeposits interface {
AppendPendingBalanceDeposit(index primitives.ValidatorIndex, amount uint64) error
SetDepositRequestsStartIndex(index uint64) error
SetDepositReceiptsStartIndex(index uint64) error
SetPendingBalanceDeposits(val []*ethpb.PendingBalanceDeposit) error
SetDepositBalanceToConsume(primitives.Gwei) error
}

View File

@@ -12,7 +12,7 @@ go_library(
"getters_block.go",
"getters_checkpoint.go",
"getters_consolidation.go",
"getters_deposit_requests.go",
"getters_deposit_receipts.go",
"getters_eth1.go",
"getters_exit.go",
"getters_misc.go",
@@ -33,7 +33,7 @@ go_library(
"setters_checkpoint.go",
"setters_churn.go",
"setters_consolidation.go",
"setters_deposit_requests.go",
"setters_deposit_receipts.go",
"setters_eth1.go",
"setters_misc.go",
"setters_participation.go",
@@ -94,7 +94,7 @@ go_test(
"getters_block_test.go",
"getters_checkpoint_test.go",
"getters_consolidation_test.go",
"getters_deposit_requests_test.go",
"getters_deposit_receipts_test.go",
"getters_exit_test.go",
"getters_participation_test.go",
"getters_test.go",
@@ -109,7 +109,7 @@ go_test(
"setters_balance_deposits_test.go",
"setters_churn_test.go",
"setters_consolidation_test.go",
"setters_deposit_requests_test.go",
"setters_deposit_receipts_test.go",
"setters_eth1_test.go",
"setters_misc_test.go",
"setters_participation_test.go",

View File

@@ -62,7 +62,7 @@ type BeaconState struct {
nextWithdrawalValidatorIndex primitives.ValidatorIndex
// Electra fields
depositRequestsStartIndex uint64
depositReceiptsStartIndex uint64
depositBalanceToConsume primitives.Gwei
exitBalanceToConsume primitives.Gwei
earliestExitEpoch primitives.Epoch
@@ -119,7 +119,7 @@ type beaconStateMarshalable struct {
LatestExecutionPayloadHeaderElectra *enginev1.ExecutionPayloadHeaderElectra `json:"latest_execution_payload_header_electra" yaml:"latest_execution_payload_header_electra"`
NextWithdrawalIndex uint64 `json:"next_withdrawal_index" yaml:"next_withdrawal_index"`
NextWithdrawalValidatorIndex primitives.ValidatorIndex `json:"next_withdrawal_validator_index" yaml:"next_withdrawal_validator_index"`
DepositRequestsStartIndex uint64 `json:"deposit_requests_start_index" yaml:"deposit_requests_start_index"`
DepositReceiptsStartIndex uint64 `json:"deposit_receipts_start_index" yaml:"deposit_receipts_start_index"`
DepositBalanceToConsume primitives.Gwei `json:"deposit_balance_to_consume" yaml:"deposit_balance_to_consume"`
ExitBalanceToConsume primitives.Gwei `json:"exit_balance_to_consume" yaml:"exit_balance_to_consume"`
EarliestExitEpoch primitives.Epoch `json:"earliest_exit_epoch" yaml:"earliest_exit_epoch"`
@@ -189,7 +189,7 @@ func (b *BeaconState) MarshalJSON() ([]byte, error) {
LatestExecutionPayloadHeaderElectra: b.latestExecutionPayloadHeaderElectra,
NextWithdrawalIndex: b.nextWithdrawalIndex,
NextWithdrawalValidatorIndex: b.nextWithdrawalValidatorIndex,
DepositRequestsStartIndex: b.depositRequestsStartIndex,
DepositReceiptsStartIndex: b.depositReceiptsStartIndex,
DepositBalanceToConsume: b.depositBalanceToConsume,
ExitBalanceToConsume: b.exitBalanceToConsume,
EarliestExitEpoch: b.earliestExitEpoch,

View File

@@ -62,7 +62,7 @@ type BeaconState struct {
nextWithdrawalValidatorIndex primitives.ValidatorIndex
// Electra fields
depositRequestsStartIndex uint64
depositReceiptsStartIndex uint64
depositBalanceToConsume primitives.Gwei
exitBalanceToConsume primitives.Gwei
earliestExitEpoch primitives.Epoch
@@ -119,7 +119,7 @@ type beaconStateMarshalable struct {
LatestExecutionPayloadHeaderElectra *enginev1.ExecutionPayloadHeaderElectra `json:"latest_execution_payload_header_electra" yaml:"latest_execution_payload_header_electra"`
NextWithdrawalIndex uint64 `json:"next_withdrawal_index" yaml:"next_withdrawal_index"`
NextWithdrawalValidatorIndex primitives.ValidatorIndex `json:"next_withdrawal_validator_index" yaml:"next_withdrawal_validator_index"`
DepositRequestsStartIndex uint64 `json:"deposit_requests_start_index" yaml:"deposit_requests_start_index"`
DepositReceiptsStartIndex uint64 `json:"deposit_receipts_start_index" yaml:"deposit_receipts_start_index"`
DepositBalanceToConsume primitives.Gwei `json:"deposit_balance_to_consume" yaml:"deposit_balance_to_consume"`
ExitBalanceToConsume primitives.Gwei `json:"exit_balance_to_consume" yaml:"exit_balance_to_consume"`
EarliestExitEpoch primitives.Epoch `json:"earliest_exit_epoch" yaml:"earliest_exit_epoch"`
@@ -189,7 +189,7 @@ func (b *BeaconState) MarshalJSON() ([]byte, error) {
LatestExecutionPayloadHeaderElectra: b.latestExecutionPayloadHeaderElectra,
NextWithdrawalIndex: b.nextWithdrawalIndex,
NextWithdrawalValidatorIndex: b.nextWithdrawalValidatorIndex,
DepositRequestsStartIndex: b.depositRequestsStartIndex,
DepositReceiptsStartIndex: b.depositReceiptsStartIndex,
DepositBalanceToConsume: b.depositBalanceToConsume,
ExitBalanceToConsume: b.exitBalanceToConsume,
EarliestExitEpoch: b.earliestExitEpoch,

View File

@@ -0,0 +1,16 @@
package state_native
import (
"github.com/prysmaticlabs/prysm/v5/runtime/version"
)
// DepositReceiptsStartIndex is used for returning the deposit receipts start index which is used for eip6110
func (b *BeaconState) DepositReceiptsStartIndex() (uint64, error) {
if b.version < version.Electra {
return 0, errNotSupported("DepositReceiptsStartIndex", b.version)
}
b.lock.RLock()
defer b.lock.RUnlock()
return b.depositReceiptsStartIndex, nil
}

View File

@@ -9,17 +9,17 @@ import (
"github.com/prysmaticlabs/prysm/v5/testing/util"
)
func TestDepositRequestsStartIndex(t *testing.T) {
func TestDepositReceiptsStartIndex(t *testing.T) {
t.Run("previous fork returns expected error", func(t *testing.T) {
dState, _ := util.DeterministicGenesisState(t, 1)
_, err := dState.DepositRequestsStartIndex()
_, err := dState.DepositReceiptsStartIndex()
require.ErrorContains(t, "is not supported", err)
})
t.Run("electra returns expected value", func(t *testing.T) {
want := uint64(2)
dState, err := state_native.InitializeFromProtoElectra(&ethpb.BeaconStateElectra{DepositRequestsStartIndex: want})
dState, err := state_native.InitializeFromProtoElectra(&ethpb.BeaconStateElectra{DepositReceiptsStartIndex: want})
require.NoError(t, err)
got, err := dState.DepositRequestsStartIndex()
got, err := dState.DepositReceiptsStartIndex()
require.NoError(t, err)
require.Equal(t, want, got)
})

View File

@@ -1,16 +0,0 @@
package state_native
import (
"github.com/prysmaticlabs/prysm/v5/runtime/version"
)
// DepositRequestsStartIndex is used for returning the deposit receipts start index which is used for eip6110
func (b *BeaconState) DepositRequestsStartIndex() (uint64, error) {
if b.version < version.Electra {
return 0, errNotSupported("DepositRequestsStartIndex", b.version)
}
b.lock.RLock()
defer b.lock.RUnlock()
return b.depositRequestsStartIndex, nil
}

View File

@@ -202,7 +202,7 @@ func (b *BeaconState) ToProtoUnsafe() interface{} {
NextWithdrawalIndex: b.nextWithdrawalIndex,
NextWithdrawalValidatorIndex: b.nextWithdrawalValidatorIndex,
HistoricalSummaries: b.historicalSummaries,
DepositRequestsStartIndex: b.depositRequestsStartIndex,
DepositReceiptsStartIndex: b.depositReceiptsStartIndex,
DepositBalanceToConsume: b.depositBalanceToConsume,
ExitBalanceToConsume: b.exitBalanceToConsume,
EarliestExitEpoch: b.earliestExitEpoch,
@@ -408,7 +408,7 @@ func (b *BeaconState) ToProto() interface{} {
NextWithdrawalIndex: b.nextWithdrawalIndex,
NextWithdrawalValidatorIndex: b.nextWithdrawalValidatorIndex,
HistoricalSummaries: b.historicalSummariesVal(),
DepositRequestsStartIndex: b.depositRequestsStartIndex,
DepositReceiptsStartIndex: b.depositReceiptsStartIndex,
DepositBalanceToConsume: b.depositBalanceToConsume,
ExitBalanceToConsume: b.exitBalanceToConsume,
EarliestExitEpoch: b.earliestExitEpoch,

View File

@@ -281,9 +281,9 @@ func ComputeFieldRootsWithHasher(ctx context.Context, state *BeaconState) ([][]b
}
if state.version >= version.Electra {
// DepositRequestsStartIndex root.
drsiRoot := ssz.Uint64Root(state.depositRequestsStartIndex)
fieldRoots[types.DepositRequestsStartIndex.RealPosition()] = drsiRoot[:]
// DepositReceiptsStartIndex root.
drsiRoot := ssz.Uint64Root(state.depositReceiptsStartIndex)
fieldRoots[types.DepositReceiptsStartIndex.RealPosition()] = drsiRoot[:]
// DepositBalanceToConsume root.
dbtcRoot := ssz.Uint64Root(uint64(state.depositBalanceToConsume))

View File

@@ -0,0 +1,21 @@
package state_native
import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native/types"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
)
// SetDepositReceiptsStartIndex for the beacon state. Updates the DepositReceiptsStartIndex
func (b *BeaconState) SetDepositReceiptsStartIndex(index uint64) error {
if b.version < version.Electra {
return errNotSupported("SetDepositReceiptsStartIndex", b.version)
}
b.lock.Lock()
defer b.lock.Unlock()
b.depositReceiptsStartIndex = index
b.markFieldAsDirty(types.DepositReceiptsStartIndex)
b.rebuildTrie[types.DepositReceiptsStartIndex] = true
return nil
}

View File

@@ -9,18 +9,18 @@ import (
"github.com/prysmaticlabs/prysm/v5/testing/util"
)
func TestSetDepositRequestsStartIndex(t *testing.T) {
func TestSetDepositReceiptsStartIndex(t *testing.T) {
t.Run("previous fork returns expected error", func(t *testing.T) {
dState, _ := util.DeterministicGenesisState(t, 1)
require.ErrorContains(t, "is not supported", dState.SetDepositRequestsStartIndex(1))
require.ErrorContains(t, "is not supported", dState.SetDepositReceiptsStartIndex(1))
})
t.Run("electra sets expected value", func(t *testing.T) {
old := uint64(2)
dState, err := state_native.InitializeFromProtoElectra(&ethpb.BeaconStateElectra{DepositRequestsStartIndex: old})
dState, err := state_native.InitializeFromProtoElectra(&ethpb.BeaconStateElectra{DepositReceiptsStartIndex: old})
require.NoError(t, err)
want := uint64(3)
require.NoError(t, dState.SetDepositRequestsStartIndex(want))
got, err := dState.DepositRequestsStartIndex()
require.NoError(t, dState.SetDepositReceiptsStartIndex(want))
got, err := dState.DepositReceiptsStartIndex()
require.NoError(t, err)
require.Equal(t, want, got)
})

View File

@@ -1,21 +0,0 @@
package state_native
import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native/types"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
)
// SetDepositRequestsStartIndex for the beacon state. Updates the DepositRequestsStartIndex
func (b *BeaconState) SetDepositRequestsStartIndex(index uint64) error {
if b.version < version.Electra {
return errNotSupported("SetDepositRequestsStartIndex", b.version)
}
b.lock.Lock()
defer b.lock.Unlock()
b.depositRequestsStartIndex = index
b.markFieldAsDirty(types.DepositRequestsStartIndex)
b.rebuildTrie[types.DepositRequestsStartIndex] = true
return nil
}

View File

@@ -100,7 +100,7 @@ var electraFields = append(
types.NextWithdrawalValidatorIndex,
types.HistoricalSummaries,
types.LatestExecutionPayloadHeaderElectra,
types.DepositRequestsStartIndex,
types.DepositReceiptsStartIndex,
types.DepositBalanceToConsume,
types.ExitBalanceToConsume,
types.EarliestExitEpoch,
@@ -744,7 +744,7 @@ func InitializeFromProtoUnsafeElectra(st *ethpb.BeaconStateElectra) (state.Beaco
nextWithdrawalIndex: st.NextWithdrawalIndex,
nextWithdrawalValidatorIndex: st.NextWithdrawalValidatorIndex,
historicalSummaries: st.HistoricalSummaries,
depositRequestsStartIndex: st.DepositRequestsStartIndex,
depositReceiptsStartIndex: st.DepositReceiptsStartIndex,
depositBalanceToConsume: st.DepositBalanceToConsume,
exitBalanceToConsume: st.ExitBalanceToConsume,
earliestExitEpoch: st.EarliestExitEpoch,
@@ -862,7 +862,7 @@ func (b *BeaconState) Copy() state.BeaconState {
eth1DepositIndex: b.eth1DepositIndex,
nextWithdrawalIndex: b.nextWithdrawalIndex,
nextWithdrawalValidatorIndex: b.nextWithdrawalValidatorIndex,
depositRequestsStartIndex: b.depositRequestsStartIndex,
depositReceiptsStartIndex: b.depositReceiptsStartIndex,
depositBalanceToConsume: b.depositBalanceToConsume,
exitBalanceToConsume: b.exitBalanceToConsume,
earliestExitEpoch: b.earliestExitEpoch,
@@ -1286,8 +1286,8 @@ func (b *BeaconState) rootSelector(ctx context.Context, field types.FieldIndex)
return ssz.Uint64Root(uint64(b.nextWithdrawalValidatorIndex)), nil
case types.HistoricalSummaries:
return stateutil.HistoricalSummariesRoot(b.historicalSummaries)
case types.DepositRequestsStartIndex:
return ssz.Uint64Root(b.depositRequestsStartIndex), nil
case types.DepositReceiptsStartIndex:
return ssz.Uint64Root(b.depositReceiptsStartIndex), nil
case types.DepositBalanceToConsume:
return ssz.Uint64Root(uint64(b.depositBalanceToConsume)), nil
case types.ExitBalanceToConsume:

View File

@@ -96,8 +96,8 @@ func (f FieldIndex) String() string {
return "nextWithdrawalValidatorIndex"
case HistoricalSummaries:
return "historicalSummaries"
case DepositRequestsStartIndex:
return "depositRequestsStartIndex"
case DepositReceiptsStartIndex:
return "depositReceiptsStartIndex"
case DepositBalanceToConsume:
return "depositBalanceToConsume"
case ExitBalanceToConsume:
@@ -179,7 +179,7 @@ func (f FieldIndex) RealPosition() int {
return 26
case HistoricalSummaries:
return 27
case DepositRequestsStartIndex:
case DepositReceiptsStartIndex:
return 28
case DepositBalanceToConsume:
return 29
@@ -253,7 +253,7 @@ const (
NextWithdrawalIndex
NextWithdrawalValidatorIndex
HistoricalSummaries
DepositRequestsStartIndex // Electra: EIP-6110
DepositReceiptsStartIndex // Electra: EIP-6110
DepositBalanceToConsume // Electra: EIP-7251
ExitBalanceToConsume // Electra: EIP-7251
EarliestExitEpoch // Electra: EIP-7251

View File

@@ -16,6 +16,7 @@ go_library(
"pending_consolidations_root.go",
"pending_partial_withdrawals_root.go",
"reference.go",
"slice_root.go",
"sync_committee.root.go",
"trie_helpers.go",
"unrealized_justification.go",

View File

@@ -2,10 +2,9 @@ package stateutil
import (
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/encoding/ssz"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
)
func HistoricalSummariesRoot(summaries []*ethpb.HistoricalSummary) ([32]byte, error) {
return ssz.SliceRoot(summaries, fieldparams.HistoricalRootsLength)
return SliceRoot(summaries, fieldparams.HistoricalRootsLength)
}

View File

@@ -2,10 +2,9 @@ package stateutil
import (
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/encoding/ssz"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
)
func PendingBalanceDepositsRoot(slice []*ethpb.PendingBalanceDeposit) ([32]byte, error) {
return ssz.SliceRoot(slice, fieldparams.PendingBalanceDepositsLimit)
return SliceRoot(slice, fieldparams.PendingBalanceDepositsLimit)
}

View File

@@ -2,10 +2,9 @@ package stateutil
import (
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/encoding/ssz"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
)
func PendingConsolidationsRoot(slice []*ethpb.PendingConsolidation) ([32]byte, error) {
return ssz.SliceRoot(slice, fieldparams.PendingConsolidationsLimit)
return SliceRoot(slice, fieldparams.PendingConsolidationsLimit)
}

View File

@@ -2,10 +2,9 @@ package stateutil
import (
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/encoding/ssz"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
)
func PendingPartialWithdrawalsRoot(slice []*ethpb.PendingPartialWithdrawal) ([32]byte, error) {
return ssz.SliceRoot(slice, fieldparams.PendingPartialWithdrawalsLimit)
return SliceRoot(slice, fieldparams.PendingPartialWithdrawalsLimit)
}

View File

@@ -1,4 +1,4 @@
package ssz
package stateutil
import (
"bytes"
@@ -6,10 +6,11 @@ import (
"fmt"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/encoding/ssz"
)
// SliceRoot computes the root of a slice of hashable objects.
func SliceRoot[T Hashable](slice []T, limit uint64) ([32]byte, error) {
func SliceRoot[T ssz.Hashable](slice []T, limit uint64) ([32]byte, error) {
max := limit
if uint64(len(slice)) > max {
return [32]byte{}, fmt.Errorf("slice exceeds max length %d", max)
@@ -24,7 +25,7 @@ func SliceRoot[T Hashable](slice []T, limit uint64) ([32]byte, error) {
roots[i] = r
}
sliceRoot, err := BitwiseMerkleize(roots, uint64(len(roots)), limit)
sliceRoot, err := ssz.BitwiseMerkleize(roots, uint64(len(roots)), limit)
if err != nil {
return [32]byte{}, errors.Wrap(err, "could not slice merkleization")
}
@@ -35,5 +36,6 @@ func SliceRoot[T Hashable](slice []T, limit uint64) ([32]byte, error) {
// We need to mix in the length of the slice.
sliceLenRoot := make([]byte, 32)
copy(sliceLenRoot, sliceLenBuf.Bytes())
return MixInLength(sliceRoot, sliceLenRoot), nil
res := ssz.MixInLength(sliceRoot, sliceLenRoot)
return res, nil
}

View File

@@ -44,7 +44,6 @@ go_library(
"validate_aggregate_proof.go",
"validate_attester_slashing.go",
"validate_beacon_attestation.go",
"validate_beacon_attestation_electra.go",
"validate_beacon_blocks.go",
"validate_blob.go",
"validate_bls_to_execution_change.go",
@@ -161,6 +160,7 @@ go_test(
"rpc_beacon_blocks_by_root_test.go",
"rpc_blob_sidecars_by_range_test.go",
"rpc_blob_sidecars_by_root_test.go",
"rpc_chunked_response_test.go",
"rpc_goodbye_test.go",
"rpc_handler_test.go",
"rpc_metadata_test.go",
@@ -177,7 +177,6 @@ go_test(
"sync_test.go",
"validate_aggregate_proof_test.go",
"validate_attester_slashing_test.go",
"validate_beacon_attestation_electra_test.go",
"validate_beacon_attestation_test.go",
"validate_beacon_blocks_test.go",
"validate_blob_test.go",

View File

@@ -1,20 +1,13 @@
package sync
import (
"fmt"
"reflect"
"strings"
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/pkg/errors"
ssz "github.com/prysmaticlabs/fastssz"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/types"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"google.golang.org/protobuf/proto"
)
@@ -57,12 +50,11 @@ func (s *Service) decodePubsubMessage(msg *pubsub.Message) (ssz.Unmarshaler, err
return nil, errors.Errorf("message of %T does not support marshaller interface", base)
}
// Handle different message types across forks.
dt, err := extractValidDataTypeFromTopic(topic, fDigest[:], s.cfg.clock)
if err != nil {
return nil, err
}
if dt != nil {
m = dt
if topic == p2p.BlockSubnetTopicFormat {
m, err = extractBlockDataType(fDigest[:], s.cfg.clock)
if err != nil {
return nil, err
}
}
if err := s.cfg.p2p.Encoding().DecodeGossip(msg.Data, m); err != nil {
return nil, err
@@ -71,7 +63,7 @@ func (s *Service) decodePubsubMessage(msg *pubsub.Message) (ssz.Unmarshaler, err
}
// Replaces our fork digest with the formatter.
func (*Service) replaceForkDigest(topic string) (string, error) {
func (_ *Service) replaceForkDigest(topic string) (string, error) {
subStrings := strings.Split(topic, "/")
if len(subStrings) != 4 {
return "", errInvalidTopic
@@ -79,48 +71,3 @@ func (*Service) replaceForkDigest(topic string) (string, error) {
subStrings[2] = "%x"
return strings.Join(subStrings, "/"), nil
}
func extractValidDataTypeFromTopic(topic string, digest []byte, clock *startup.Clock) (ssz.Unmarshaler, error) {
switch topic {
case p2p.BlockSubnetTopicFormat:
return extractDataTypeFromTypeMap(types.BlockMap, digest, clock)
case p2p.AttestationSubnetTopicFormat:
return extractDataTypeFromTypeMap(types.AttestationMap, digest, clock)
case p2p.AggregateAndProofSubnetTopicFormat:
return extractDataTypeFromTypeMap(types.AggregateAttestationMap, digest, clock)
}
return nil, nil
}
func extractDataTypeFromTypeMap[T any](typeMap map[[4]byte]func() (T, error), digest []byte, tor blockchain.TemporalOracle) (T, error) {
var zero T
if len(digest) == 0 {
f, ok := typeMap[bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)]
if !ok {
return zero, fmt.Errorf("no %T type exists for the genesis fork version", zero)
}
return f()
}
if len(digest) != forkDigestLength {
return zero, errors.Errorf("invalid digest returned, wanted a length of %d but received %d", forkDigestLength, len(digest))
}
vRoot := tor.GenesisValidatorsRoot()
for k, f := range typeMap {
rDigest, err := signing.ComputeForkDigest(k[:], vRoot[:])
if err != nil {
return zero, err
}
if rDigest == bytesutil.ToBytes4(digest) {
return f()
}
}
return zero, errors.Wrapf(
ErrNoValidDigest,
"could not extract %T data type, saw digest=%#x, genesis=%v, vr=%#x",
zero,
digest,
tor.GenesisTime(),
tor.GenesisValidatorsRoot(),
)
}

View File

@@ -11,20 +11,15 @@ import (
"github.com/d4l3k/messagediff"
pubsub "github.com/libp2p/go-libp2p-pubsub"
pb "github.com/libp2p/go-libp2p-pubsub/pb"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
mock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
p2ptesting "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/types"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/consensus-types/wrapper"
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/metadata"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/testing/util"
)
@@ -114,197 +109,3 @@ func TestService_decodePubsubMessage(t *testing.T) {
})
}
}
func TestExtractDataType(t *testing.T) {
// Precompute digests
genDigest, err := signing.ComputeForkDigest(params.BeaconConfig().GenesisForkVersion, params.BeaconConfig().ZeroHash[:])
require.NoError(t, err)
altairDigest, err := signing.ComputeForkDigest(params.BeaconConfig().AltairForkVersion, params.BeaconConfig().ZeroHash[:])
require.NoError(t, err)
bellatrixDigest, err := signing.ComputeForkDigest(params.BeaconConfig().BellatrixForkVersion, params.BeaconConfig().ZeroHash[:])
require.NoError(t, err)
capellaDigest, err := signing.ComputeForkDigest(params.BeaconConfig().CapellaForkVersion, params.BeaconConfig().ZeroHash[:])
require.NoError(t, err)
denebDigest, err := signing.ComputeForkDigest(params.BeaconConfig().DenebForkVersion, params.BeaconConfig().ZeroHash[:])
require.NoError(t, err)
electraDigest, err := signing.ComputeForkDigest(params.BeaconConfig().ElectraForkVersion, params.BeaconConfig().ZeroHash[:])
require.NoError(t, err)
type args struct {
digest []byte
chain blockchain.ChainInfoFetcher
}
tests := []struct {
name string
args args
wantBlock interfaces.ReadOnlySignedBeaconBlock
wantMd metadata.Metadata
wantAtt ethpb.Att
wantAggregate ethpb.SignedAggregateAttAndProof
wantErr bool
}{
{
name: "no digest",
args: args{
digest: []byte{},
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
},
wantBlock: func() interfaces.ReadOnlySignedBeaconBlock {
wsb, err := blocks.NewSignedBeaconBlock(&ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{}}})
require.NoError(t, err)
return wsb
}(),
wantMd: wrapper.WrappedMetadataV0(&ethpb.MetaDataV0{}),
wantAtt: &ethpb.Attestation{},
wantAggregate: &ethpb.SignedAggregateAttestationAndProof{},
wantErr: false,
},
{
name: "invalid digest",
args: args{
digest: []byte{0x00, 0x01},
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
},
wantBlock: nil,
wantMd: nil,
wantAtt: nil,
wantAggregate: nil,
wantErr: true,
},
{
name: "non existent digest",
args: args{
digest: []byte{0x00, 0x01, 0x02, 0x03},
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
},
wantBlock: nil,
wantMd: nil,
wantAtt: nil,
wantAggregate: nil,
wantErr: true,
},
{
name: "genesis fork version",
args: args{
digest: genDigest[:],
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
},
wantBlock: func() interfaces.ReadOnlySignedBeaconBlock {
wsb, err := blocks.NewSignedBeaconBlock(&ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{}}})
require.NoError(t, err)
return wsb
}(),
wantAtt: &ethpb.Attestation{},
wantAggregate: &ethpb.SignedAggregateAttestationAndProof{},
wantErr: false,
},
{
name: "altair fork version",
args: args{
digest: altairDigest[:],
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
},
wantBlock: func() interfaces.ReadOnlySignedBeaconBlock {
wsb, err := blocks.NewSignedBeaconBlock(&ethpb.SignedBeaconBlockAltair{Block: &ethpb.BeaconBlockAltair{Body: &ethpb.BeaconBlockBodyAltair{}}})
require.NoError(t, err)
return wsb
}(),
wantMd: wrapper.WrappedMetadataV1(&ethpb.MetaDataV1{}),
wantAtt: &ethpb.Attestation{},
wantAggregate: &ethpb.SignedAggregateAttestationAndProof{},
wantErr: false,
},
{
name: "bellatrix fork version",
args: args{
digest: bellatrixDigest[:],
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
},
wantBlock: func() interfaces.ReadOnlySignedBeaconBlock {
wsb, err := blocks.NewSignedBeaconBlock(&ethpb.SignedBeaconBlockBellatrix{Block: &ethpb.BeaconBlockBellatrix{Body: &ethpb.BeaconBlockBodyBellatrix{ExecutionPayload: &enginev1.ExecutionPayload{}}}})
require.NoError(t, err)
return wsb
}(),
wantMd: wrapper.WrappedMetadataV1(&ethpb.MetaDataV1{}),
wantAtt: &ethpb.Attestation{},
wantAggregate: &ethpb.SignedAggregateAttestationAndProof{},
wantErr: false,
},
{
name: "capella fork version",
args: args{
digest: capellaDigest[:],
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
},
wantBlock: func() interfaces.ReadOnlySignedBeaconBlock {
wsb, err := blocks.NewSignedBeaconBlock(&ethpb.SignedBeaconBlockCapella{Block: &ethpb.BeaconBlockCapella{Body: &ethpb.BeaconBlockBodyCapella{ExecutionPayload: &enginev1.ExecutionPayloadCapella{}}}})
require.NoError(t, err)
return wsb
}(),
wantMd: wrapper.WrappedMetadataV1(&ethpb.MetaDataV1{}),
wantAtt: &ethpb.Attestation{},
wantAggregate: &ethpb.SignedAggregateAttestationAndProof{},
wantErr: false,
},
{
name: "deneb fork version",
args: args{
digest: denebDigest[:],
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
},
wantBlock: func() interfaces.ReadOnlySignedBeaconBlock {
wsb, err := blocks.NewSignedBeaconBlock(&ethpb.SignedBeaconBlockDeneb{Block: &ethpb.BeaconBlockDeneb{Body: &ethpb.BeaconBlockBodyDeneb{ExecutionPayload: &enginev1.ExecutionPayloadDeneb{}}}})
require.NoError(t, err)
return wsb
}(),
wantMd: wrapper.WrappedMetadataV1(&ethpb.MetaDataV1{}),
wantAtt: &ethpb.Attestation{},
wantAggregate: &ethpb.SignedAggregateAttestationAndProof{},
wantErr: false,
},
{
name: "electra fork version",
args: args{
digest: electraDigest[:],
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
},
wantBlock: func() interfaces.ReadOnlySignedBeaconBlock {
wsb, err := blocks.NewSignedBeaconBlock(&ethpb.SignedBeaconBlockElectra{Block: &ethpb.BeaconBlockElectra{Body: &ethpb.BeaconBlockBodyElectra{ExecutionPayload: &enginev1.ExecutionPayloadElectra{}}}})
require.NoError(t, err)
return wsb
}(),
wantMd: wrapper.WrappedMetadataV1(&ethpb.MetaDataV1{}),
wantAtt: &ethpb.AttestationElectra{},
wantAggregate: &ethpb.SignedAggregateAttestationAndProofElectra{},
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
gotBlock, err := extractDataTypeFromTypeMap(types.BlockMap, tt.args.digest, tt.args.chain)
if (err != nil) != tt.wantErr {
t.Errorf("block: error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(gotBlock, tt.wantBlock) {
t.Errorf("block: got = %v, want %v", gotBlock, tt.wantBlock)
}
gotAtt, err := extractDataTypeFromTypeMap(types.AttestationMap, tt.args.digest, tt.args.chain)
if (err != nil) != tt.wantErr {
t.Errorf("attestation: error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(gotAtt, tt.wantAtt) {
t.Errorf("attestation: got = %v, want %v", gotAtt, tt.wantAtt)
}
gotAggregate, err := extractDataTypeFromTypeMap(types.AggregateAttestationMap, tt.args.digest, tt.args.chain)
if (err != nil) != tt.wantErr {
t.Errorf("aggregate: error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(gotAggregate, tt.wantAggregate) {
t.Errorf("aggregate: got = %v, want %v", gotAggregate, tt.wantAggregate)
}
})
}
}

View File

@@ -22,7 +22,7 @@ func NewRegularSyncFuzz(opts ...Option) *Service {
cancel: cancel,
slotToPendingBlocks: gcache.New(time.Second, 2*time.Second),
seenPendingBlocks: make(map[[32]byte]bool),
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
}
r.rateLimiter = newRateLimiter(r.cfg.p2p)

View File

@@ -150,6 +150,12 @@ var (
Help: "Time to verify gossiped blob sidecars",
},
)
beaconAttestationReachHalfSummary = promauto.NewSummary(
prometheus.SummaryOpts{
Name: "attestation_reach_half_milliseconds",
Help: "Time for attestations to reach half",
},
)
pendingAttCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "gossip_pending_attestations_total",
Help: "increased when receiving a new pending attestation",

View File

@@ -15,7 +15,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/crypto/rand"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"github.com/prysmaticlabs/prysm/v5/time/slots"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
@@ -88,13 +87,12 @@ func (s *Service) processPendingAtts(ctx context.Context) error {
return s.sendBatchRootRequest(ctx, pendingRoots, randGen)
}
func (s *Service) processAttestations(ctx context.Context, attestations []ethpb.SignedAggregateAttAndProof) {
func (s *Service) processAttestations(ctx context.Context, attestations []*ethpb.SignedAggregateAttestationAndProof) {
for _, signedAtt := range attestations {
aggregate := signedAtt.AggregateAttestationAndProof().AggregateVal()
data := aggregate.GetData()
att := signedAtt.Message
// The pending attestations can arrive in both aggregated and unaggregated forms,
// each from has distinct validation steps.
if helpers.IsAggregated(aggregate) {
if helpers.IsAggregated(att.Aggregate) {
// Save the pending aggregated attestation to the pool if it passes the aggregated
// validation steps.
valRes, err := s.validateAggregatedAtt(ctx, signedAtt)
@@ -103,11 +101,11 @@ func (s *Service) processAttestations(ctx context.Context, attestations []ethpb.
}
aggValid := pubsub.ValidationAccept == valRes
if s.validateBlockInAttestation(ctx, signedAtt) && aggValid {
if err := s.cfg.attPool.SaveAggregatedAttestation(aggregate); err != nil {
if err := s.cfg.attPool.SaveAggregatedAttestation(att.Aggregate); err != nil {
log.WithError(err).Debug("Could not save aggregate attestation")
continue
}
s.setAggregatorIndexEpochSeen(data.Target.Epoch, signedAtt.AggregateAttestationAndProof().GetAggregatorIndex())
s.setAggregatorIndexEpochSeen(att.Aggregate.Data.Target.Epoch, att.AggregatorIndex)
// Broadcasting the signed attestation again once a node is able to process it.
if err := s.cfg.p2p.Broadcast(ctx, signedAtt); err != nil {
@@ -118,39 +116,39 @@ func (s *Service) processAttestations(ctx context.Context, attestations []ethpb.
// This is an important validation before retrieving attestation pre state to defend against
// attestation's target intentionally reference checkpoint that's long ago.
// Verify current finalized checkpoint is an ancestor of the block defined by the attestation's beacon block root.
if !s.cfg.chain.InForkchoice(bytesutil.ToBytes32(data.BeaconBlockRoot)) {
if !s.cfg.chain.InForkchoice(bytesutil.ToBytes32(att.Aggregate.Data.BeaconBlockRoot)) {
log.WithError(blockchain.ErrNotDescendantOfFinalized).Debug("Could not verify finalized consistency")
continue
}
if err := s.cfg.chain.VerifyLmdFfgConsistency(ctx, aggregate); err != nil {
if err := s.cfg.chain.VerifyLmdFfgConsistency(ctx, att.Aggregate); err != nil {
log.WithError(err).Debug("Could not verify FFG consistency")
continue
}
preState, err := s.cfg.chain.AttestationTargetState(ctx, data.Target)
preState, err := s.cfg.chain.AttestationTargetState(ctx, att.Aggregate.Data.Target)
if err != nil {
log.WithError(err).Debug("Could not retrieve attestation prestate")
continue
}
valid, err := s.validateUnaggregatedAttWithState(ctx, aggregate, preState)
valid, err := s.validateUnaggregatedAttWithState(ctx, att.Aggregate, preState)
if err != nil {
log.WithError(err).Debug("Pending unaggregated attestation failed validation")
continue
}
if valid == pubsub.ValidationAccept {
if err := s.cfg.attPool.SaveUnaggregatedAttestation(aggregate); err != nil {
if err := s.cfg.attPool.SaveUnaggregatedAttestation(att.Aggregate); err != nil {
log.WithError(err).Debug("Could not save unaggregated attestation")
continue
}
s.setSeenCommitteeIndicesSlot(data.Slot, data.CommitteeIndex, aggregate.GetAggregationBits())
s.setSeenCommitteeIndicesSlot(att.Aggregate.Data.Slot, att.Aggregate.Data.CommitteeIndex, att.Aggregate.AggregationBits)
valCount, err := helpers.ActiveValidatorCount(ctx, preState, slots.ToEpoch(data.Slot))
valCount, err := helpers.ActiveValidatorCount(ctx, preState, slots.ToEpoch(att.Aggregate.Data.Slot))
if err != nil {
log.WithError(err).Debug("Could not retrieve active validator count")
continue
}
// Broadcasting the signed attestation again once a node is able to process it.
if err := s.cfg.p2p.BroadcastAttestation(ctx, helpers.ComputeSubnetForAttestation(valCount, aggregate), aggregate); err != nil {
if err := s.cfg.p2p.BroadcastAttestation(ctx, helpers.ComputeSubnetForAttestation(valCount, signedAtt.Message.Aggregate), signedAtt.Message.Aggregate); err != nil {
log.WithError(err).Debug("Could not broadcast")
}
}
@@ -162,8 +160,8 @@ func (s *Service) processAttestations(ctx context.Context, attestations []ethpb.
// root of the missing block. The value is the list of pending attestations
// that voted for that block root. The caller of this function is responsible
// for not sending repeated attestations to the pending queue.
func (s *Service) savePendingAtt(att ethpb.SignedAggregateAttAndProof) {
root := bytesutil.ToBytes32(att.AggregateAttestationAndProof().AggregateVal().GetData().BeaconBlockRoot)
func (s *Service) savePendingAtt(att *ethpb.SignedAggregateAttestationAndProof) {
root := bytesutil.ToBytes32(att.Message.Aggregate.Data.BeaconBlockRoot)
s.pendingAttsLock.Lock()
defer s.pendingAttsLock.Unlock()
@@ -180,7 +178,7 @@ func (s *Service) savePendingAtt(att ethpb.SignedAggregateAttAndProof) {
_, ok := s.blkRootToPendingAtts[root]
if !ok {
pendingAttCount.Inc()
s.blkRootToPendingAtts[root] = []ethpb.SignedAggregateAttAndProof{att}
s.blkRootToPendingAtts[root] = []*ethpb.SignedAggregateAttestationAndProof{att}
return
}
// Skip if the attestation from the same aggregator already exists in
@@ -194,32 +192,20 @@ func (s *Service) savePendingAtt(att ethpb.SignedAggregateAttAndProof) {
s.blkRootToPendingAtts[root] = append(s.blkRootToPendingAtts[root], att)
}
func attsAreEqual(a, b ethpb.SignedAggregateAttAndProof) bool {
if a.GetSignature() != nil {
return b.GetSignature() != nil && a.AggregateAttestationAndProof().GetAggregatorIndex() == b.AggregateAttestationAndProof().GetAggregatorIndex()
func attsAreEqual(a, b *ethpb.SignedAggregateAttestationAndProof) bool {
if a.Signature != nil {
return b.Signature != nil && a.Message.AggregatorIndex == b.Message.AggregatorIndex
}
if b.GetSignature() != nil {
if b.Signature != nil {
return false
}
aAggregate := a.AggregateAttestationAndProof().AggregateVal()
bAggregate := b.AggregateAttestationAndProof().AggregateVal()
aData := aAggregate.GetData()
bData := bAggregate.GetData()
if aData.Slot != bData.Slot {
if a.Message.Aggregate.Data.Slot != b.Message.Aggregate.Data.Slot {
return false
}
if a.Version() >= version.Electra {
if !bytes.Equal(aAggregate.CommitteeBitsVal().Bytes(), bAggregate.CommitteeBitsVal().Bytes()) {
return false
}
} else if aData.CommitteeIndex != bData.CommitteeIndex {
if a.Message.Aggregate.Data.CommitteeIndex != b.Message.Aggregate.Data.CommitteeIndex {
return false
}
return bytes.Equal(aAggregate.GetAggregationBits(), bAggregate.GetAggregationBits())
return bytes.Equal(a.Message.Aggregate.AggregationBits, b.Message.Aggregate.AggregationBits)
}
// This validates the pending attestations in the queue are still valid.
@@ -235,7 +221,7 @@ func (s *Service) validatePendingAtts(ctx context.Context, slot primitives.Slot)
for bRoot, atts := range s.blkRootToPendingAtts {
for i := len(atts) - 1; i >= 0; i-- {
if slot >= atts[i].AggregateAttestationAndProof().AggregateVal().GetData().Slot+params.BeaconConfig().SlotsPerEpoch {
if slot >= atts[i].Message.Aggregate.Data.Slot+params.BeaconConfig().SlotsPerEpoch {
// Remove the pending attestation from the list in place.
atts = append(atts[:i], atts[i+1:]...)
}

View File

@@ -46,12 +46,12 @@ func TestProcessPendingAtts_NoBlockRequestBlock(t *testing.T) {
chain := &mock.ChainService{Genesis: prysmTime.Now(), FinalizedCheckPoint: &ethpb.Checkpoint{}}
r := &Service{
cfg: &config{p2p: p1, beaconDB: db, chain: chain, clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot)},
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
chainStarted: abool.New(),
}
a := &ethpb.AggregateAttestationAndProof{Aggregate: &ethpb.Attestation{Data: &ethpb.AttestationData{Target: &ethpb.Checkpoint{Root: make([]byte, 32)}}}}
r.blkRootToPendingAtts[[32]byte{'A'}] = []ethpb.SignedAggregateAttAndProof{&ethpb.SignedAggregateAttestationAndProof{Message: a}}
r.blkRootToPendingAtts[[32]byte{'A'}] = []*ethpb.SignedAggregateAttestationAndProof{{Message: a}}
require.NoError(t, r.processPendingAtts(context.Background()))
require.LogsContain(t, hook, "Requesting block by root")
}
@@ -124,7 +124,7 @@ func TestProcessPendingAtts_HasBlockSaveUnAggregatedAtt(t *testing.T) {
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
attPool: attestations.NewPool(),
},
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
seenUnAggregatedAttestationCache: lruwrpr.New(10),
signatureChan: make(chan *signatureVerifier, verifierLimit),
}
@@ -134,7 +134,7 @@ func TestProcessPendingAtts_HasBlockSaveUnAggregatedAtt(t *testing.T) {
require.NoError(t, err)
require.NoError(t, r.cfg.beaconDB.SaveState(context.Background(), s, root))
r.blkRootToPendingAtts[root] = []ethpb.SignedAggregateAttAndProof{&ethpb.SignedAggregateAttestationAndProof{Message: aggregateAndProof, Signature: aggreSig}}
r.blkRootToPendingAtts[root] = []*ethpb.SignedAggregateAttestationAndProof{{Message: aggregateAndProof, Signature: aggreSig}}
require.NoError(t, r.processPendingAtts(context.Background()))
atts, err := r.cfg.attPool.UnaggregatedAttestations()
@@ -162,7 +162,7 @@ func TestProcessPendingAtts_NoBroadcastWithBadSignature(t *testing.T) {
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
attPool: attestations.NewPool(),
},
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
}
priv, err := bls.RandKey()
@@ -182,7 +182,7 @@ func TestProcessPendingAtts_NoBroadcastWithBadSignature(t *testing.T) {
util.SaveBlock(t, context.Background(), r.cfg.beaconDB, b)
require.NoError(t, r.cfg.beaconDB.SaveState(context.Background(), s, r32))
r.blkRootToPendingAtts[r32] = []ethpb.SignedAggregateAttAndProof{&ethpb.SignedAggregateAttestationAndProof{Message: a, Signature: make([]byte, fieldparams.BLSSignatureLength)}}
r.blkRootToPendingAtts[r32] = []*ethpb.SignedAggregateAttestationAndProof{{Message: a, Signature: make([]byte, fieldparams.BLSSignatureLength)}}
require.NoError(t, r.processPendingAtts(context.Background()))
assert.Equal(t, false, p1.BroadcastCalled.Load(), "Broadcasted bad aggregate")
@@ -245,13 +245,13 @@ func TestProcessPendingAtts_NoBroadcastWithBadSignature(t *testing.T) {
clock: startup.NewClock(chain2.Genesis, chain2.ValidatorsRoot),
attPool: attestations.NewPool(),
},
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
seenUnAggregatedAttestationCache: lruwrpr.New(10),
signatureChan: make(chan *signatureVerifier, verifierLimit),
}
go r.verifierRoutine()
r.blkRootToPendingAtts[r32] = []ethpb.SignedAggregateAttAndProof{&ethpb.SignedAggregateAttestationAndProof{Message: aggregateAndProof, Signature: aggreSig}}
r.blkRootToPendingAtts[r32] = []*ethpb.SignedAggregateAttestationAndProof{{Message: aggregateAndProof, Signature: aggreSig}}
require.NoError(t, r.processPendingAtts(context.Background()))
assert.Equal(t, true, p1.BroadcastCalled.Load(), "Could not broadcast the good aggregate")
@@ -330,7 +330,7 @@ func TestProcessPendingAtts_HasBlockSaveAggregatedAtt(t *testing.T) {
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
attPool: attestations.NewPool(),
},
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
seenAggregatedAttestationCache: lruwrpr.New(10),
signatureChan: make(chan *signatureVerifier, verifierLimit),
}
@@ -339,7 +339,7 @@ func TestProcessPendingAtts_HasBlockSaveAggregatedAtt(t *testing.T) {
require.NoError(t, err)
require.NoError(t, r.cfg.beaconDB.SaveState(context.Background(), s, root))
r.blkRootToPendingAtts[root] = []ethpb.SignedAggregateAttAndProof{&ethpb.SignedAggregateAttestationAndProof{Message: aggregateAndProof, Signature: aggreSig}}
r.blkRootToPendingAtts[root] = []*ethpb.SignedAggregateAttestationAndProof{{Message: aggregateAndProof, Signature: aggreSig}}
require.NoError(t, r.processPendingAtts(context.Background()))
assert.Equal(t, 1, len(r.cfg.attPool.AggregatedAttestations()), "Did not save aggregated att")
@@ -353,7 +353,7 @@ func TestProcessPendingAtts_HasBlockSaveAggregatedAtt(t *testing.T) {
func TestValidatePendingAtts_CanPruneOldAtts(t *testing.T) {
s := &Service{
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
}
// 100 Attestations per block root.
@@ -401,7 +401,7 @@ func TestValidatePendingAtts_CanPruneOldAtts(t *testing.T) {
func TestValidatePendingAtts_NoDuplicatingAtts(t *testing.T) {
s := &Service{
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
}
r1 := [32]byte{'A'}
@@ -428,7 +428,7 @@ func TestValidatePendingAtts_NoDuplicatingAtts(t *testing.T) {
func TestSavePendingAtts_BeyondLimit(t *testing.T) {
s := &Service{
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
}
for i := 0; i < pendingAttsLimit; i++ {
@@ -457,71 +457,5 @@ func TestSavePendingAtts_BeyondLimit(t *testing.T) {
assert.Equal(t, 0, len(s.blkRootToPendingAtts[r1]), "Saved pending atts")
assert.Equal(t, 0, len(s.blkRootToPendingAtts[r2]), "Saved pending atts")
}
func Test_attsAreEqual_Committee(t *testing.T) {
t.Run("Phase 0 equal", func(t *testing.T) {
att1 := &ethpb.SignedAggregateAttestationAndProof{
Message: &ethpb.AggregateAttestationAndProof{
Aggregate: &ethpb.Attestation{
Data: &ethpb.AttestationData{
CommitteeIndex: 123}}}}
att2 := &ethpb.SignedAggregateAttestationAndProof{
Message: &ethpb.AggregateAttestationAndProof{
Aggregate: &ethpb.Attestation{
Data: &ethpb.AttestationData{
CommitteeIndex: 123}}}}
assert.Equal(t, true, attsAreEqual(att1, att2))
})
t.Run("Phase 0 not equal", func(t *testing.T) {
att1 := &ethpb.SignedAggregateAttestationAndProof{
Message: &ethpb.AggregateAttestationAndProof{
Aggregate: &ethpb.Attestation{
Data: &ethpb.AttestationData{
CommitteeIndex: 123}}}}
att2 := &ethpb.SignedAggregateAttestationAndProof{
Message: &ethpb.AggregateAttestationAndProof{
Aggregate: &ethpb.Attestation{
Data: &ethpb.AttestationData{
CommitteeIndex: 456}}}}
assert.Equal(t, false, attsAreEqual(att1, att2))
})
t.Run("Electra equal", func(t *testing.T) {
cb1 := primitives.NewAttestationCommitteeBits()
cb1.SetBitAt(0, true)
att1 := &ethpb.SignedAggregateAttestationAndProofElectra{
Message: &ethpb.AggregateAttestationAndProofElectra{
Aggregate: &ethpb.AttestationElectra{
Data: &ethpb.AttestationData{},
CommitteeBits: cb1,
}}}
cb2 := primitives.NewAttestationCommitteeBits()
cb2.SetBitAt(0, true)
att2 := &ethpb.SignedAggregateAttestationAndProofElectra{
Message: &ethpb.AggregateAttestationAndProofElectra{
Aggregate: &ethpb.AttestationElectra{
Data: &ethpb.AttestationData{},
CommitteeBits: cb2,
}}}
assert.Equal(t, true, attsAreEqual(att1, att2))
})
t.Run("Electra not equal", func(t *testing.T) {
cb1 := primitives.NewAttestationCommitteeBits()
cb1.SetBitAt(0, true)
att1 := &ethpb.SignedAggregateAttestationAndProofElectra{
Message: &ethpb.AggregateAttestationAndProofElectra{
Aggregate: &ethpb.AttestationElectra{
Data: &ethpb.AttestationData{},
CommitteeBits: cb1,
}}}
cb2 := primitives.NewAttestationCommitteeBits()
cb2.SetBitAt(1, true)
att2 := &ethpb.SignedAggregateAttestationAndProofElectra{
Message: &ethpb.AggregateAttestationAndProofElectra{
Aggregate: &ethpb.AttestationElectra{
Data: &ethpb.AttestationData{},
CommitteeBits: cb2,
}}}
assert.Equal(t, false, attsAreEqual(att1, att2))
})
}

View File

@@ -4,12 +4,14 @@ import (
libp2pcore "github.com/libp2p/go-libp2p/core"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/encoder"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/types"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/network/forks"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"github.com/prysmaticlabs/prysm/v5/time/slots"
@@ -105,7 +107,7 @@ func readFirstChunkedBlock(stream libp2pcore.Stream, tor blockchain.TemporalOrac
if err != nil {
return nil, err
}
blk, err := extractDataTypeFromTypeMap(types.BlockMap, rpcCtx, tor)
blk, err := extractBlockDataType(rpcCtx, tor)
if err != nil {
return nil, err
}
@@ -129,7 +131,7 @@ func readResponseChunk(stream libp2pcore.Stream, tor blockchain.TemporalOracle,
if err != nil {
return nil, err
}
blk, err := extractDataTypeFromTypeMap(types.BlockMap, rpcCtx, tor)
blk, err := extractBlockDataType(rpcCtx, tor)
if err != nil {
return nil, err
}
@@ -137,6 +139,30 @@ func readResponseChunk(stream libp2pcore.Stream, tor blockchain.TemporalOracle,
return blk, err
}
func extractBlockDataType(digest []byte, tor blockchain.TemporalOracle) (interfaces.ReadOnlySignedBeaconBlock, error) {
if len(digest) == 0 {
bFunc, ok := types.BlockMap[bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)]
if !ok {
return nil, errors.New("no block type exists for the genesis fork version.")
}
return bFunc()
}
if len(digest) != forkDigestLength {
return nil, errors.Errorf("invalid digest returned, wanted a length of %d but received %d", forkDigestLength, len(digest))
}
vRoot := tor.GenesisValidatorsRoot()
for k, blkFunc := range types.BlockMap {
rDigest, err := signing.ComputeForkDigest(k[:], vRoot[:])
if err != nil {
return nil, err
}
if rDigest == bytesutil.ToBytes4(digest) {
return blkFunc()
}
}
return nil, errors.Wrapf(ErrNoValidDigest, "could not extract block data type, saw digest=%#x, genesis=%v, vr=%#x", digest, tor.GenesisTime(), tor.GenesisValidatorsRoot())
}
// WriteBlobSidecarChunk writes blob chunk object to stream.
// response_chunk ::= <result> | <context-bytes> | <encoding-dependent-header> | <encoded-payload>
func WriteBlobSidecarChunk(stream libp2pcore.Stream, tor blockchain.TemporalOracle, encoding encoder.NetworkEncoding, sidecar blocks.VerifiedROBlob) error {

View File

@@ -0,0 +1,121 @@
package sync
import (
"reflect"
"testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
mock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/require"
)
func TestExtractBlockDataType(t *testing.T) {
// Precompute digests
genDigest, err := signing.ComputeForkDigest(params.BeaconConfig().GenesisForkVersion, params.BeaconConfig().ZeroHash[:])
require.NoError(t, err)
altairDigest, err := signing.ComputeForkDigest(params.BeaconConfig().AltairForkVersion, params.BeaconConfig().ZeroHash[:])
require.NoError(t, err)
bellatrixDigest, err := signing.ComputeForkDigest(params.BeaconConfig().BellatrixForkVersion, params.BeaconConfig().ZeroHash[:])
require.NoError(t, err)
type args struct {
digest []byte
chain blockchain.ChainInfoFetcher
}
tests := []struct {
name string
args args
want interfaces.ReadOnlySignedBeaconBlock
wantErr bool
}{
{
name: "no digest",
args: args{
digest: []byte{},
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
},
want: func() interfaces.ReadOnlySignedBeaconBlock {
wsb, err := blocks.NewSignedBeaconBlock(&ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{}}})
require.NoError(t, err)
return wsb
}(),
wantErr: false,
},
{
name: "invalid digest",
args: args{
digest: []byte{0x00, 0x01},
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
},
want: nil,
wantErr: true,
},
{
name: "non existent digest",
args: args{
digest: []byte{0x00, 0x01, 0x02, 0x03},
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
},
want: nil,
wantErr: true,
},
{
name: "genesis fork version",
args: args{
digest: genDigest[:],
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
},
want: func() interfaces.ReadOnlySignedBeaconBlock {
wsb, err := blocks.NewSignedBeaconBlock(&ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{}}})
require.NoError(t, err)
return wsb
}(),
wantErr: false,
},
{
name: "altair fork version",
args: args{
digest: altairDigest[:],
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
},
want: func() interfaces.ReadOnlySignedBeaconBlock {
wsb, err := blocks.NewSignedBeaconBlock(&ethpb.SignedBeaconBlockAltair{Block: &ethpb.BeaconBlockAltair{Body: &ethpb.BeaconBlockBodyAltair{}}})
require.NoError(t, err)
return wsb
}(),
wantErr: false,
},
{
name: "bellatrix fork version",
args: args{
digest: bellatrixDigest[:],
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
},
want: func() interfaces.ReadOnlySignedBeaconBlock {
wsb, err := blocks.NewSignedBeaconBlock(&ethpb.SignedBeaconBlockBellatrix{Block: &ethpb.BeaconBlockBellatrix{Body: &ethpb.BeaconBlockBodyBellatrix{ExecutionPayload: &enginev1.ExecutionPayload{}}}})
require.NoError(t, err)
return wsb
}(),
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := extractBlockDataType(tt.args.digest, tt.args.chain)
if (err != nil) != tt.wantErr {
t.Errorf("extractBlockDataType() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("extractBlockDataType() got = %v, want %v", got, tt.want)
}
})
}
}

View File

@@ -7,9 +7,13 @@ import (
"github.com/libp2p/go-libp2p/core/peer"
"github.com/pkg/errors"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/types"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/wrapper"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/network/forks"
pb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/metadata"
@@ -108,7 +112,7 @@ func (s *Service) sendMetaDataRequest(ctx context.Context, id peer.ID) (metadata
if err != nil {
return nil, err
}
msg, err := extractDataTypeFromTypeMap(types.MetaDataMap, rpcCtx[:], s.cfg.clock)
msg, err := extractMetaDataType(rpcCtx[:], s.cfg.clock)
if err != nil {
return nil, err
}
@@ -129,3 +133,27 @@ func (s *Service) sendMetaDataRequest(ctx context.Context, id peer.ID) (metadata
}
return msg, nil
}
func extractMetaDataType(digest []byte, tor blockchain.TemporalOracle) (metadata.Metadata, error) {
if len(digest) == 0 {
mdFunc, ok := types.MetaDataMap[bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)]
if !ok {
return nil, errors.New("no metadata type exists for the genesis fork version.")
}
return mdFunc(), nil
}
if len(digest) != forkDigestLength {
return nil, errors.Errorf("invalid digest returned, wanted a length of %d but received %d", forkDigestLength, len(digest))
}
vRoot := tor.GenesisValidatorsRoot()
for k, mdFunc := range types.MetaDataMap {
rDigest, err := signing.ComputeForkDigest(k[:], vRoot[:])
if err != nil {
return nil, err
}
if rDigest == bytesutil.ToBytes4(digest) {
return mdFunc(), nil
}
}
return nil, errors.Wrapf(ErrNoValidDigest, "could not extract metadata type, saw digest=%#x, genesis=%v, vr=%#x", digest, tor.GenesisTime(), tor.GenesisValidatorsRoot())
}

View File

@@ -2,13 +2,16 @@ package sync
import (
"context"
"reflect"
"sync"
"testing"
"time"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/protocol"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
mock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
db "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
p2ptest "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/testing"
@@ -18,6 +21,7 @@ import (
leakybucket "github.com/prysmaticlabs/prysm/v5/container/leaky-bucket"
"github.com/prysmaticlabs/prysm/v5/encoding/ssz/equality"
pb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/metadata"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/testing/util"
@@ -229,3 +233,80 @@ func TestMetadataRPCHandler_SendsMetadataAltair(t *testing.T) {
t.Error("Peer is disconnected despite receiving a valid ping")
}
}
func TestExtractMetaDataType(t *testing.T) {
// Precompute digests
genDigest, err := signing.ComputeForkDigest(params.BeaconConfig().GenesisForkVersion, params.BeaconConfig().ZeroHash[:])
require.NoError(t, err)
altairDigest, err := signing.ComputeForkDigest(params.BeaconConfig().AltairForkVersion, params.BeaconConfig().ZeroHash[:])
require.NoError(t, err)
type args struct {
digest []byte
clock blockchain.TemporalOracle
}
tests := []struct {
name string
args args
want metadata.Metadata
wantErr bool
}{
{
name: "no digest",
args: args{
digest: []byte{},
clock: startup.NewClock(time.Now(), [32]byte{}),
},
want: wrapper.WrappedMetadataV0(&pb.MetaDataV0{}),
wantErr: false,
},
{
name: "invalid digest",
args: args{
digest: []byte{0x00, 0x01},
clock: startup.NewClock(time.Now(), [32]byte{}),
},
want: nil,
wantErr: true,
},
{
name: "non existent digest",
args: args{
digest: []byte{0x00, 0x01, 0x02, 0x03},
clock: startup.NewClock(time.Now(), [32]byte{}),
},
want: nil,
wantErr: true,
},
{
name: "genesis fork version",
args: args{
digest: genDigest[:],
clock: startup.NewClock(time.Now(), [32]byte{}),
},
want: wrapper.WrappedMetadataV0(&pb.MetaDataV0{}),
wantErr: false,
},
{
name: "altair fork version",
args: args{
digest: altairDigest[:],
clock: startup.NewClock(time.Now(), [32]byte{}),
},
want: wrapper.WrappedMetadataV1(&pb.MetaDataV1{}),
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := extractMetaDataType(tt.args.digest, tt.args.clock)
if (err != nil) != tt.wantErr {
t.Errorf("extractMetaDataType() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("extractMetaDataType() got = %v, want %v", got, tt.want)
}
})
}
}

View File

@@ -122,7 +122,7 @@ type Service struct {
cancel context.CancelFunc
slotToPendingBlocks *gcache.Cache
seenPendingBlocks map[[32]byte]bool
blkRootToPendingAtts map[[32]byte][]ethpb.SignedAggregateAttAndProof
blkRootToPendingAtts map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof
subHandler *subTopicHandler
pendingAttsLock sync.RWMutex
pendingQueueLock sync.RWMutex
@@ -158,6 +158,7 @@ type Service struct {
newBlobVerifier verification.NewBlobVerifier
availableBlocker coverage.AvailableBlocker
ctxMap ContextByteVersions
attReceived chan struct{}
}
// NewService initializes new regular sync service.
@@ -171,8 +172,9 @@ func NewService(ctx context.Context, opts ...Option) *Service {
cfg: &config{clock: startup.NewClock(time.Unix(0, 0), [32]byte{})},
slotToPendingBlocks: c,
seenPendingBlocks: make(map[[32]byte]bool),
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
signatureChan: make(chan *signatureVerifier, verifierLimit),
attReceived: make(chan struct{}),
}
for _, opt := range opts {
if err := opt(r); err != nil {
@@ -237,6 +239,8 @@ func (s *Service) Start() {
s.maintainPeerStatuses()
s.resyncIfBehind()
go s.beaconAttestationWatcher()
// Update sync metrics.
async.RunEvery(s.ctx, syncMetricsInterval, s.updateMetrics)
}

View File

@@ -13,21 +13,19 @@ import (
// beaconAggregateProofSubscriber forwards the incoming validated aggregated attestation and proof to the
// attestation pool for processing.
func (s *Service) beaconAggregateProofSubscriber(_ context.Context, msg proto.Message) error {
a, ok := msg.(ethpb.SignedAggregateAttAndProof)
a, ok := msg.(*ethpb.SignedAggregateAttestationAndProof)
if !ok {
return fmt.Errorf("message was not type ethpb.SignedAggregateAttAndProof, type=%T", msg)
return fmt.Errorf("message was not type *ethpb.SignedAggregateAttestationAndProof, type=%T", msg)
}
aggregate := a.AggregateAttestationAndProof().AggregateVal()
if aggregate == nil || aggregate.GetData() == nil {
if a.Message.Aggregate == nil || a.Message.Aggregate.Data == nil {
return errors.New("nil aggregate")
}
// An unaggregated attestation can make it here. Its valid, the aggregator it just itself, although it means poor performance for the subnet.
if !helpers.IsAggregated(aggregate) {
return s.cfg.attPool.SaveUnaggregatedAttestation(aggregate)
if !helpers.IsAggregated(a.Message.Aggregate) {
return s.cfg.attPool.SaveUnaggregatedAttestation(a.Message.Aggregate)
}
return s.cfg.attPool.SaveAggregatedAttestation(aggregate)
return s.cfg.attPool.SaveAggregatedAttestation(a.Message.Aggregate)
}

View File

@@ -15,34 +15,34 @@ import (
)
func (s *Service) committeeIndexBeaconAttestationSubscriber(_ context.Context, msg proto.Message) error {
a, ok := msg.(eth.Att)
a, ok := msg.(*eth.Attestation)
if !ok {
return fmt.Errorf("message was not type eth.Att, type=%T", msg)
return fmt.Errorf("message was not type *eth.Attestation, type=%T", msg)
}
data := a.GetData()
if data == nil {
if a.Data == nil {
return errors.New("nil attestation")
}
s.setSeenCommitteeIndicesSlot(data.Slot, data.CommitteeIndex, a.GetAggregationBits())
s.setSeenCommitteeIndicesSlot(a.Data.Slot, a.Data.CommitteeIndex, a.AggregationBits)
exists, err := s.cfg.attPool.HasAggregatedAttestation(a)
if err != nil {
return errors.Wrap(err, "could not determine if attestation pool has this attestation")
return errors.Wrap(err, "Could not determine if attestation pool has this atttestation")
}
if exists {
return nil
}
s.attReceived <- struct{}{}
return s.cfg.attPool.SaveUnaggregatedAttestation(a)
}
func (*Service) persistentSubnetIndices() []uint64 {
func (_ *Service) persistentSubnetIndices() []uint64 {
return cache.SubnetIDs.GetAllSubnets()
}
func (*Service) aggregatorSubnetIndices(currentSlot primitives.Slot) []uint64 {
func (_ *Service) aggregatorSubnetIndices(currentSlot primitives.Slot) []uint64 {
endEpoch := slots.ToEpoch(currentSlot) + 1
endSlot := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(endEpoch))
var commIds []uint64
@@ -52,7 +52,7 @@ func (*Service) aggregatorSubnetIndices(currentSlot primitives.Slot) []uint64 {
return slice.SetUint64(commIds)
}
func (*Service) attesterSubnetIndices(currentSlot primitives.Slot) []uint64 {
func (_ *Service) attesterSubnetIndices(currentSlot primitives.Slot) []uint64 {
endEpoch := slots.ToEpoch(currentSlot) + 1
endSlot := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(endEpoch))
var commIds []uint64
@@ -61,3 +61,29 @@ func (*Service) attesterSubnetIndices(currentSlot primitives.Slot) []uint64 {
}
return slice.SetUint64(commIds)
}
func (s *Service) beaconAttestationWatcher() {
slotTicker := slots.NewSlotTicker(s.cfg.chain.GenesisTime(), params.BeaconConfig().SecondsPerSlot)
count := 0
for {
select {
case currSlot := <-slotTicker.C():
log.Infof("Beacon Attestation Watcher: Clearing Slot: %d, Total received: %d", currSlot, count)
count = 0
case <-s.attReceived:
count++
// 1014657 / 32 / 2 =15854
if count == 15854 {
t := uint64(s.cfg.chain.GenesisTime().Unix())
d := slots.TimeIntoSlot(t)
currentSlot := slots.CurrentSlot(t)
duration := d.Milliseconds() - 4000
beaconAttestationReachHalfSummary.Observe(float64(duration))
log.Infof("Beacon Attestation Watcher: Receive enough attestation for slot: %d and it took time %d", currentSlot, duration)
}
case <-s.ctx.Done():
slotTicker.Done()
return
}
}
}

View File

@@ -20,7 +20,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
prysmTime "github.com/prysmaticlabs/prysm/v5/time"
"github.com/prysmaticlabs/prysm/v5/time/slots"
"go.opencensus.io/trace"
@@ -48,48 +47,38 @@ func (s *Service) validateAggregateAndProof(ctx context.Context, pid peer.ID, ms
tracing.AnnotateError(span, err)
return pubsub.ValidationReject, err
}
m, ok := raw.(ethpb.SignedAggregateAttAndProof)
m, ok := raw.(*ethpb.SignedAggregateAttestationAndProof)
if !ok {
return pubsub.ValidationReject, errors.Errorf("invalid message type: %T", raw)
}
if m.AggregateAttestationAndProof() == nil {
if m.Message == nil {
return pubsub.ValidationReject, errNilMessage
}
aggregate := m.AggregateAttestationAndProof().AggregateVal()
data := aggregate.GetData()
if err := helpers.ValidateNilAttestation(aggregate); err != nil {
if err := helpers.ValidateNilAttestation(m.Message.Aggregate); err != nil {
return pubsub.ValidationReject, err
}
// Do not process slot 0 aggregates.
if data.Slot == 0 {
if m.Message.Aggregate.Data.Slot == 0 {
return pubsub.ValidationIgnore, nil
}
// Broadcast the aggregated attestation on a feed to notify other services in the beacon node
// of a received aggregated attestation.
// TODO: this will be extended to Electra in a later PR
if m.Version() == version.Phase0 {
phase0Att, ok := m.(*ethpb.SignedAggregateAttestationAndProof)
if ok {
s.cfg.attestationNotifier.OperationFeed().Send(&feed.Event{
Type: operation.AggregatedAttReceived,
Data: &operation.AggregatedAttReceivedData{
Attestation: phase0Att.Message,
},
})
}
}
s.cfg.attestationNotifier.OperationFeed().Send(&feed.Event{
Type: operation.AggregatedAttReceived,
Data: &operation.AggregatedAttReceivedData{
Attestation: m.Message,
},
})
if err := helpers.ValidateSlotTargetEpoch(data); err != nil {
if err := helpers.ValidateSlotTargetEpoch(m.Message.Aggregate.Data); err != nil {
return pubsub.ValidationReject, err
}
// Attestation's slot is within ATTESTATION_PROPAGATION_SLOT_RANGE and early attestation
// processing tolerance.
if err := helpers.ValidateAttestationTime(
data.Slot,
m.Message.Aggregate.Data.Slot,
s.cfg.clock.GenesisTime(),
earlyAttestationProcessingTolerance,
); err != nil {
@@ -98,19 +87,19 @@ func (s *Service) validateAggregateAndProof(ctx context.Context, pid peer.ID, ms
}
// Verify this is the first aggregate received from the aggregator with index and slot.
if s.hasSeenAggregatorIndexEpoch(data.Target.Epoch, m.AggregateAttestationAndProof().GetAggregatorIndex()) {
if s.hasSeenAggregatorIndexEpoch(m.Message.Aggregate.Data.Target.Epoch, m.Message.AggregatorIndex) {
return pubsub.ValidationIgnore, nil
}
// Check that the block being voted on isn't invalid.
if s.hasBadBlock(bytesutil.ToBytes32(data.BeaconBlockRoot)) ||
s.hasBadBlock(bytesutil.ToBytes32(data.Target.Root)) ||
s.hasBadBlock(bytesutil.ToBytes32(data.Source.Root)) {
if s.hasBadBlock(bytesutil.ToBytes32(m.Message.Aggregate.Data.BeaconBlockRoot)) ||
s.hasBadBlock(bytesutil.ToBytes32(m.Message.Aggregate.Data.Target.Root)) ||
s.hasBadBlock(bytesutil.ToBytes32(m.Message.Aggregate.Data.Source.Root)) {
attBadBlockCount.Inc()
return pubsub.ValidationReject, errors.New("bad block referenced in attestation data")
}
// Verify aggregate attestation has not already been seen via aggregate gossip, within a block, or through the creation locally.
seen, err := s.cfg.attPool.HasAggregatedAttestation(aggregate)
seen, err := s.cfg.attPool.HasAggregatedAttestation(m.Message.Aggregate)
if err != nil {
tracing.AnnotateError(span, err)
return pubsub.ValidationIgnore, err
@@ -127,7 +116,7 @@ func (s *Service) validateAggregateAndProof(ctx context.Context, pid peer.ID, ms
return validationRes, err
}
s.setAggregatorIndexEpochSeen(data.Target.Epoch, m.AggregateAttestationAndProof().GetAggregatorIndex())
s.setAggregatorIndexEpochSeen(m.Message.Aggregate.Data.Target.Epoch, m.Message.AggregatorIndex)
msg.ValidatorData = m
@@ -136,75 +125,44 @@ func (s *Service) validateAggregateAndProof(ctx context.Context, pid peer.ID, ms
return pubsub.ValidationAccept, nil
}
func (s *Service) validateAggregatedAtt(ctx context.Context, signed ethpb.SignedAggregateAttAndProof) (pubsub.ValidationResult, error) {
func (s *Service) validateAggregatedAtt(ctx context.Context, signed *ethpb.SignedAggregateAttestationAndProof) (pubsub.ValidationResult, error) {
ctx, span := trace.StartSpan(ctx, "sync.validateAggregatedAtt")
defer span.End()
aggregateAndProof := signed.AggregateAttestationAndProof()
aggregatorIndex := aggregateAndProof.GetAggregatorIndex()
aggregate := aggregateAndProof.AggregateVal()
data := aggregate.GetData()
// Verify attestation target root is consistent with the head root.
// This verification is not in the spec, however we guard against it as it opens us up
// to weird edge cases during verification. The attestation technically could be used to add value to a block,
// but it's invalid in the spirit of the protocol. Here we choose safety over profit.
if err := s.cfg.chain.VerifyLmdFfgConsistency(ctx, aggregate); err != nil {
if err := s.cfg.chain.VerifyLmdFfgConsistency(ctx, signed.Message.Aggregate); err != nil {
tracing.AnnotateError(span, err)
attBadLmdConsistencyCount.Inc()
return pubsub.ValidationReject, err
}
// Verify current finalized checkpoint is an ancestor of the block defined by the attestation's beacon block root.
if !s.cfg.chain.InForkchoice(bytesutil.ToBytes32(data.BeaconBlockRoot)) {
if !s.cfg.chain.InForkchoice(bytesutil.ToBytes32(signed.Message.Aggregate.Data.BeaconBlockRoot)) {
tracing.AnnotateError(span, blockchain.ErrNotDescendantOfFinalized)
return pubsub.ValidationIgnore, blockchain.ErrNotDescendantOfFinalized
}
bs, err := s.cfg.chain.AttestationTargetState(ctx, data.Target)
bs, err := s.cfg.chain.AttestationTargetState(ctx, signed.Message.Aggregate.Data.Target)
if err != nil {
tracing.AnnotateError(span, err)
return pubsub.ValidationIgnore, err
}
// Verify validator index is within the beacon committee.
result, err := s.validateIndexInCommittee(ctx, bs, aggregate, aggregatorIndex)
result, err := s.validateIndexInCommittee(ctx, bs, signed.Message.Aggregate, signed.Message.AggregatorIndex)
if result != pubsub.ValidationAccept {
wrappedErr := errors.Wrapf(err, "could not validate index in committee")
wrappedErr := errors.Wrapf(err, "Could not validate index in committee")
tracing.AnnotateError(span, wrappedErr)
return result, wrappedErr
}
var committeeIndex primitives.CommitteeIndex
if signed.Version() >= version.Electra {
a, ok := aggregate.(*ethpb.AttestationElectra)
// This will never fail in practice because we asserted the version
if !ok {
err := fmt.Errorf("aggregate attestation has wrong type (expected %T, got %T)", &ethpb.AttestationElectra{}, aggregate)
tracing.AnnotateError(span, err)
return pubsub.ValidationIgnore, err
}
committeeIndex, result, err = validateCommitteeIndexElectra(ctx, a)
if result != pubsub.ValidationAccept {
wrappedErr := errors.Wrapf(err, "could not validate committee index for Electra version")
tracing.AnnotateError(span, wrappedErr)
return result, wrappedErr
}
} else {
committeeIndex = data.CommitteeIndex
}
// Verify selection proof reflects to the right validator.
selectionSigSet, err := validateSelectionIndex(
ctx,
bs,
data.Slot,
committeeIndex,
aggregatorIndex,
aggregateAndProof.GetSelectionProof(),
)
selectionSigSet, err := validateSelectionIndex(ctx, bs, signed.Message.Aggregate.Data, signed.Message.AggregatorIndex, signed.Message.SelectionProof)
if err != nil {
wrappedErr := errors.Wrapf(err, "could not validate selection for validator %d", aggregateAndProof.GetAggregatorIndex())
wrappedErr := errors.Wrapf(err, "Could not validate selection for validator %d", signed.Message.AggregatorIndex)
tracing.AnnotateError(span, wrappedErr)
attBadSelectionProofCount.Inc()
return pubsub.ValidationReject, wrappedErr
@@ -214,13 +172,13 @@ func (s *Service) validateAggregatedAtt(ctx context.Context, signed ethpb.Signed
// We use batch verify here to save compute.
aggregatorSigSet, err := aggSigSet(bs, signed)
if err != nil {
wrappedErr := errors.Wrapf(err, "could not get aggregator sig set %d", aggregatorIndex)
wrappedErr := errors.Wrapf(err, "Could not get aggregator sig set %d", signed.Message.AggregatorIndex)
tracing.AnnotateError(span, wrappedErr)
return pubsub.ValidationIgnore, wrappedErr
}
attSigSet, err := blocks.AttestationSignatureBatch(ctx, bs, []ethpb.Att{aggregate})
attSigSet, err := blocks.AttestationSignatureBatch(ctx, bs, []ethpb.Att{signed.Message.Aggregate})
if err != nil {
wrappedErr := errors.Wrapf(err, "could not verify aggregator signature %d", aggregatorIndex)
wrappedErr := errors.Wrapf(err, "Could not verify aggregator signature %d", signed.Message.AggregatorIndex)
tracing.AnnotateError(span, wrappedErr)
return pubsub.ValidationIgnore, wrappedErr
}
@@ -230,9 +188,10 @@ func (s *Service) validateAggregatedAtt(ctx context.Context, signed ethpb.Signed
return s.validateWithBatchVerifier(ctx, "aggregate", set)
}
func (s *Service) validateBlockInAttestation(ctx context.Context, satt ethpb.SignedAggregateAttAndProof) bool {
func (s *Service) validateBlockInAttestation(ctx context.Context, satt *ethpb.SignedAggregateAttestationAndProof) bool {
a := satt.Message
// Verify the block being voted and the processed state is in beaconDB. The block should have passed validation if it's in the beaconDB.
blockRoot := bytesutil.ToBytes32(satt.AggregateAttestationAndProof().AggregateVal().GetData().BeaconBlockRoot)
blockRoot := bytesutil.ToBytes32(a.Aggregate.Data.BeaconBlockRoot)
if !s.hasBlockAndState(ctx, blockRoot) {
// A node doesn't have the block, it'll request from peer while saving the pending attestation to a queue.
s.savePendingAtt(satt)
@@ -275,7 +234,7 @@ func (s *Service) validateIndexInCommittee(ctx context.Context, bs state.ReadOnl
return result, err
}
committee, result, err := s.validateBitLength(ctx, bs, a.GetData().Slot, a.GetData().CommitteeIndex, a.GetAggregationBits())
committee, result, err := s.validateBitLength(ctx, a, bs)
if result != pubsub.ValidationAccept {
return result, err
}
@@ -303,15 +262,14 @@ func (s *Service) validateIndexInCommittee(ctx context.Context, bs state.ReadOnl
func validateSelectionIndex(
ctx context.Context,
bs state.ReadOnlyBeaconState,
slot primitives.Slot,
committeeIndex primitives.CommitteeIndex,
data *ethpb.AttestationData,
validatorIndex primitives.ValidatorIndex,
proof []byte,
) (*bls.SignatureBatch, error) {
ctx, span := trace.StartSpan(ctx, "sync.validateSelectionIndex")
defer span.End()
committee, err := helpers.BeaconCommitteeFromState(ctx, bs, slot, committeeIndex)
committee, err := helpers.BeaconCommitteeFromState(ctx, bs, data.Slot, data.CommitteeIndex)
if err != nil {
return nil, err
}
@@ -320,11 +278,11 @@ func validateSelectionIndex(
return nil, err
}
if !aggregator {
return nil, fmt.Errorf("validator is not an aggregator for slot %d", slot)
return nil, fmt.Errorf("validator is not an aggregator for slot %d", data.Slot)
}
domain := params.BeaconConfig().DomainSelectionProof
epoch := slots.ToEpoch(slot)
epoch := slots.ToEpoch(data.Slot)
v, err := bs.ValidatorAtIndex(validatorIndex)
if err != nil {
@@ -339,7 +297,7 @@ func validateSelectionIndex(
if err != nil {
return nil, err
}
sszUint := primitives.SSZUint64(slot)
sszUint := primitives.SSZUint64(data.Slot)
root, err := signing.ComputeSigningRoot(&sszUint, d)
if err != nil {
return nil, err
@@ -353,10 +311,8 @@ func validateSelectionIndex(
}
// This returns aggregator signature set which can be used to batch verify.
func aggSigSet(s state.ReadOnlyBeaconState, a ethpb.SignedAggregateAttAndProof) (*bls.SignatureBatch, error) {
aggregateAndProof := a.AggregateAttestationAndProof()
v, err := s.ValidatorAtIndex(aggregateAndProof.GetAggregatorIndex())
func aggSigSet(s state.ReadOnlyBeaconState, a *ethpb.SignedAggregateAttestationAndProof) (*bls.SignatureBatch, error) {
v, err := s.ValidatorAtIndex(a.Message.AggregatorIndex)
if err != nil {
return nil, err
}
@@ -365,17 +321,17 @@ func aggSigSet(s state.ReadOnlyBeaconState, a ethpb.SignedAggregateAttAndProof)
return nil, err
}
epoch := slots.ToEpoch(aggregateAndProof.AggregateVal().GetData().Slot)
epoch := slots.ToEpoch(a.Message.Aggregate.Data.Slot)
d, err := signing.Domain(s.Fork(), epoch, params.BeaconConfig().DomainAggregateAndProof, s.GenesisValidatorsRoot())
if err != nil {
return nil, err
}
root, err := signing.ComputeSigningRoot(aggregateAndProof, d)
root, err := signing.ComputeSigningRoot(a.Message, d)
if err != nil {
return nil, err
}
return &bls.SignatureBatch{
Signatures: [][]byte{a.GetSignature()},
Signatures: [][]byte{a.Signature},
PublicKeys: []bls.PublicKey{publicKey},
Messages: [][32]byte{root},
Descriptions: []string{signing.AggregatorSignature},

View File

@@ -117,7 +117,7 @@ func TestVerifySelection_NotAnAggregator(t *testing.T) {
sig := privKeys[0].Sign([]byte{'A'})
data := util.HydrateAttestationData(&ethpb.AttestationData{})
_, err := validateSelectionIndex(ctx, beaconState, data.Slot, data.CommitteeIndex, 0, sig.Marshal())
_, err := validateSelectionIndex(ctx, beaconState, data, 0, sig.Marshal())
wanted := "validator is not an aggregator for slot"
assert.ErrorContains(t, wanted, err)
}
@@ -149,7 +149,7 @@ func TestValidateAggregateAndProof_NoBlock(t *testing.T) {
attPool: attestations.NewPool(),
chain: &mock.ChainService{},
},
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
seenAggregatedAttestationCache: c,
}
r.initCaches()
@@ -302,7 +302,7 @@ func TestValidateAggregateAndProof_ExistedInPool(t *testing.T) {
attestationNotifier: (&mock.ChainService{}).OperationNotifier(),
},
seenAggregatedAttestationCache: lruwrpr.New(10),
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
}
r.initCaches()

View File

@@ -9,7 +9,6 @@ import (
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/pkg/errors"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed"
@@ -23,7 +22,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"github.com/prysmaticlabs/prysm/v5/time/slots"
"go.opencensus.io/trace"
)
@@ -57,18 +55,16 @@ func (s *Service) validateCommitteeIndexBeaconAttestation(ctx context.Context, p
return pubsub.ValidationReject, err
}
att, ok := m.(eth.Att)
att, ok := m.(*eth.Attestation)
if !ok {
return pubsub.ValidationReject, errWrongMessage
}
data := att.GetData()
if err := helpers.ValidateNilAttestation(att); err != nil {
return pubsub.ValidationReject, err
}
// Do not process slot 0 attestations.
if data.Slot == 0 {
if att.Data.Slot == 0 {
return pubsub.ValidationIgnore, nil
}
// Broadcast the unaggregated attestation on a feed to notify other services in the beacon node
@@ -82,36 +78,15 @@ func (s *Service) validateCommitteeIndexBeaconAttestation(ctx context.Context, p
// Attestation's slot is within ATTESTATION_PROPAGATION_SLOT_RANGE and early attestation
// processing tolerance.
if err := helpers.ValidateAttestationTime(data.Slot, s.cfg.clock.GenesisTime(),
if err := helpers.ValidateAttestationTime(att.Data.Slot, s.cfg.clock.GenesisTime(),
earlyAttestationProcessingTolerance); err != nil {
tracing.AnnotateError(span, err)
return pubsub.ValidationIgnore, err
}
if err := helpers.ValidateSlotTargetEpoch(data); err != nil {
if err := helpers.ValidateSlotTargetEpoch(att.Data); err != nil {
return pubsub.ValidationReject, err
}
var validationRes pubsub.ValidationResult
var committeeIndex primitives.CommitteeIndex
if att.Version() >= version.Electra {
a, ok := att.(*eth.AttestationElectra)
// This will never fail in practice because we asserted the version
if !ok {
err := fmt.Errorf("attestation has wrong type (expected %T, got %T)", &eth.AttestationElectra{}, att)
tracing.AnnotateError(span, err)
return pubsub.ValidationIgnore, err
}
committeeIndex, validationRes, err = validateCommitteeIndexElectra(ctx, a)
if validationRes != pubsub.ValidationAccept {
wrappedErr := errors.Wrapf(err, "could not validate committee index for Electra version")
tracing.AnnotateError(span, wrappedErr)
return validationRes, wrappedErr
}
} else {
committeeIndex = data.CommitteeIndex
}
if features.Get().EnableSlasher {
// Feed the indexed attestation to slasher if enabled. This action
// is done in the background to avoid adding more load to this critical code path.
@@ -119,13 +94,13 @@ func (s *Service) validateCommitteeIndexBeaconAttestation(ctx context.Context, p
// Using a different context to prevent timeouts as this operation can be expensive
// and we want to avoid affecting the critical code path.
ctx := context.TODO()
preState, err := s.cfg.chain.AttestationTargetState(ctx, data.Target)
preState, err := s.cfg.chain.AttestationTargetState(ctx, att.Data.Target)
if err != nil {
log.WithError(err).Error("Could not retrieve pre state")
tracing.AnnotateError(span, err)
return
}
committee, err := helpers.BeaconCommitteeFromState(ctx, preState, data.Slot, committeeIndex)
committee, err := helpers.BeaconCommitteeFromState(ctx, preState, att.Data.Slot, att.Data.CommitteeIndex)
if err != nil {
log.WithError(err).Error("Could not get attestation committee")
tracing.AnnotateError(span, err)
@@ -142,41 +117,27 @@ func (s *Service) validateCommitteeIndexBeaconAttestation(ctx context.Context, p
}
// Verify this the first attestation received for the participating validator for the slot.
if s.hasSeenCommitteeIndicesSlot(data.Slot, data.CommitteeIndex, att.GetAggregationBits()) {
if s.hasSeenCommitteeIndicesSlot(att.Data.Slot, att.Data.CommitteeIndex, att.AggregationBits) {
return pubsub.ValidationIgnore, nil
}
// Reject an attestation if it references an invalid block.
if s.hasBadBlock(bytesutil.ToBytes32(data.BeaconBlockRoot)) ||
s.hasBadBlock(bytesutil.ToBytes32(data.Target.Root)) ||
s.hasBadBlock(bytesutil.ToBytes32(data.Source.Root)) {
if s.hasBadBlock(bytesutil.ToBytes32(att.Data.BeaconBlockRoot)) ||
s.hasBadBlock(bytesutil.ToBytes32(att.Data.Target.Root)) ||
s.hasBadBlock(bytesutil.ToBytes32(att.Data.Source.Root)) {
attBadBlockCount.Inc()
return pubsub.ValidationReject, errors.New("attestation data references bad block root")
}
// Verify the block being voted and the processed state is in beaconDB and the block has passed validation if it's in the beaconDB.
blockRoot := bytesutil.ToBytes32(data.BeaconBlockRoot)
blockRoot := bytesutil.ToBytes32(att.Data.BeaconBlockRoot)
if !s.hasBlockAndState(ctx, blockRoot) {
// A node doesn't have the block, it'll request from peer while saving the pending attestation to a queue.
if att.Version() >= version.Electra {
a, ok := att.(*eth.AttestationElectra)
// This will never fail in practice because we asserted the version
if !ok {
return pubsub.ValidationIgnore, fmt.Errorf("attestation has wrong type (expected %T, got %T)", &eth.AttestationElectra{}, att)
}
s.savePendingAtt(&eth.SignedAggregateAttestationAndProofElectra{Message: &eth.AggregateAttestationAndProofElectra{Aggregate: a}})
} else {
a, ok := att.(*eth.Attestation)
// This will never fail in practice because we asserted the version
if !ok {
return pubsub.ValidationIgnore, fmt.Errorf("attestation has wrong type (expected %T, got %T)", &eth.Attestation{}, att)
}
s.savePendingAtt(&eth.SignedAggregateAttestationAndProof{Message: &eth.AggregateAttestationAndProof{Aggregate: a}})
}
s.savePendingAtt(&eth.SignedAggregateAttestationAndProof{Message: &eth.AggregateAttestationAndProof{Aggregate: att}})
return pubsub.ValidationIgnore, nil
}
if !s.cfg.chain.InForkchoice(bytesutil.ToBytes32(data.BeaconBlockRoot)) {
if !s.cfg.chain.InForkchoice(bytesutil.ToBytes32(att.Data.BeaconBlockRoot)) {
tracing.AnnotateError(span, blockchain.ErrNotDescendantOfFinalized)
return pubsub.ValidationIgnore, blockchain.ErrNotDescendantOfFinalized
}
@@ -186,13 +147,13 @@ func (s *Service) validateCommitteeIndexBeaconAttestation(ctx context.Context, p
return pubsub.ValidationReject, err
}
preState, err := s.cfg.chain.AttestationTargetState(ctx, data.Target)
preState, err := s.cfg.chain.AttestationTargetState(ctx, att.Data.Target)
if err != nil {
tracing.AnnotateError(span, err)
return pubsub.ValidationIgnore, err
}
validationRes, err = s.validateUnaggregatedAttTopic(ctx, att, preState, *msg.Topic)
validationRes, err := s.validateUnaggregatedAttTopic(ctx, att, preState, *msg.Topic)
if validationRes != pubsub.ValidationAccept {
return validationRes, err
}
@@ -202,7 +163,7 @@ func (s *Service) validateCommitteeIndexBeaconAttestation(ctx context.Context, p
return validationRes, err
}
s.setSeenCommitteeIndicesSlot(data.Slot, data.CommitteeIndex, att.GetAggregationBits())
s.setSeenCommitteeIndicesSlot(att.Data.Slot, att.Data.CommitteeIndex, att.AggregationBits)
msg.ValidatorData = att
@@ -250,7 +211,7 @@ func (s *Service) validateUnaggregatedAttWithState(ctx context.Context, a eth.At
ctx, span := trace.StartSpan(ctx, "sync.validateUnaggregatedAttWithState")
defer span.End()
committee, result, err := s.validateBitLength(ctx, bs, a.GetData().Slot, a.GetData().CommitteeIndex, a.GetAggregationBits())
committee, result, err := s.validateBitLength(ctx, a, bs)
if result != pubsub.ValidationAccept {
return result, err
}
@@ -271,20 +232,14 @@ func (s *Service) validateUnaggregatedAttWithState(ctx context.Context, a eth.At
return s.validateWithBatchVerifier(ctx, "attestation", set)
}
func (s *Service) validateBitLength(
ctx context.Context,
bs state.ReadOnlyBeaconState,
slot primitives.Slot,
committeeIndex primitives.CommitteeIndex,
aggregationBits bitfield.Bitlist,
) ([]primitives.ValidatorIndex, pubsub.ValidationResult, error) {
committee, err := helpers.BeaconCommitteeFromState(ctx, bs, slot, committeeIndex)
func (s *Service) validateBitLength(ctx context.Context, a eth.Att, bs state.ReadOnlyBeaconState) ([]primitives.ValidatorIndex, pubsub.ValidationResult, error) {
committee, err := helpers.BeaconCommitteeFromState(ctx, bs, a.GetData().Slot, a.GetData().CommitteeIndex)
if err != nil {
return nil, pubsub.ValidationIgnore, err
}
// Verify number of aggregation bits matches the committee size.
if err := helpers.VerifyBitfieldLength(aggregationBits, uint64(len(committee))); err != nil {
if err := helpers.VerifyBitfieldLength(a.GetAggregationBits(), uint64(len(committee))); err != nil {
return nil, pubsub.ValidationReject, err
}

View File

@@ -1,27 +0,0 @@
package sync
import (
"context"
"fmt"
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"go.opencensus.io/trace"
)
func validateCommitteeIndexElectra(ctx context.Context, a *ethpb.AttestationElectra) (primitives.CommitteeIndex, pubsub.ValidationResult, error) {
_, span := trace.StartSpan(ctx, "sync.validateCommitteeIndexElectra")
defer span.End()
ci := a.Data.CommitteeIndex
if ci != 0 {
return 0, pubsub.ValidationReject, fmt.Errorf("committee index must be 0 but was %d", ci)
}
committeeIndices := helpers.CommitteeIndices(a.CommitteeBits)
if len(committeeIndices) != 1 {
return 0, pubsub.ValidationReject, fmt.Errorf("exactly 1 committee index must be set but %d were set", len(committeeIndices))
}
return committeeIndices[0], pubsub.ValidationAccept, nil
}

View File

@@ -1,46 +0,0 @@
package sync
import (
"context"
"testing"
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
"github.com/prysmaticlabs/prysm/v5/testing/require"
)
func Test_validateCommitteeIndexElectra(t *testing.T) {
ctx := context.Background()
t.Run("valid", func(t *testing.T) {
cb := primitives.NewAttestationCommitteeBits()
cb.SetBitAt(1, true)
ci, res, err := validateCommitteeIndexElectra(ctx, &ethpb.AttestationElectra{Data: &ethpb.AttestationData{}, CommitteeBits: cb})
require.NoError(t, err)
assert.Equal(t, pubsub.ValidationAccept, res)
assert.Equal(t, primitives.CommitteeIndex(1), ci)
})
t.Run("non-zero data committee index", func(t *testing.T) {
cb := primitives.NewAttestationCommitteeBits()
cb.SetBitAt(1, true)
_, res, err := validateCommitteeIndexElectra(ctx, &ethpb.AttestationElectra{Data: &ethpb.AttestationData{CommitteeIndex: 1}, CommitteeBits: cb})
assert.NotNil(t, err)
assert.Equal(t, pubsub.ValidationReject, res)
})
t.Run("no committee bits set", func(t *testing.T) {
cb := primitives.NewAttestationCommitteeBits()
_, res, err := validateCommitteeIndexElectra(ctx, &ethpb.AttestationElectra{Data: &ethpb.AttestationData{}, CommitteeBits: cb})
assert.NotNil(t, err)
assert.Equal(t, pubsub.ValidationReject, res)
})
t.Run("more than 1 committee bit set", func(t *testing.T) {
cb := primitives.NewAttestationCommitteeBits()
cb.SetBitAt(0, true)
cb.SetBitAt(1, true)
_, res, err := validateCommitteeIndexElectra(ctx, &ethpb.AttestationElectra{Data: &ethpb.AttestationData{}, CommitteeBits: cb})
assert.NotNil(t, err)
assert.Equal(t, pubsub.ValidationReject, res)
})
}

View File

@@ -49,7 +49,7 @@ func TestService_validateCommitteeIndexBeaconAttestation(t *testing.T) {
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
attestationNotifier: (&mockChain.ChainService{}).OperationNotifier(),
},
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
seenUnAggregatedAttestationCache: lruwrpr.New(10),
signatureChan: make(chan *signatureVerifier, verifierLimit),
}
@@ -290,7 +290,7 @@ func TestService_validateCommitteeIndexBeaconAttestation(t *testing.T) {
m.Message.Topic = nil
}
res, err := s.validateCommitteeIndexBeaconAttestation(ctx, "", m)
res, err := s.validateCommitteeIndexBeaconAttestation(ctx, "" /*peerID*/, m)
received := res == pubsub.ValidationAccept
if received != tt.want {
t.Fatalf("Did not received wanted validation. Got %v, wanted %v", !tt.want, tt.want)

View File

@@ -174,10 +174,10 @@ var (
Usage: "The factor by which blob batch limit may increase on burst.",
Value: 2,
}
// DisableDebugRPCEndpoints disables the debug Beacon API namespace.
DisableDebugRPCEndpoints = &cli.BoolFlag{
Name: "disable-debug-rpc-endpoints",
Usage: "Disables the debug Beacon API namespace.",
// EnableDebugRPCEndpoints as /v1/beacon/state.
EnableDebugRPCEndpoints = &cli.BoolFlag{
Name: "enable-debug-rpc-endpoints",
Usage: "Enables the debug rpc service, containing utility endpoints such as /eth/v1alpha1/beacon/state.",
}
// SubscribeToAllSubnets defines a flag to specify whether to subscribe to all possible attestation/sync subnets or not.
SubscribeToAllSubnets = &cli.BoolFlag{

View File

@@ -64,7 +64,7 @@ var appFlags = []cli.Flag{
flags.InteropNumValidatorsFlag,
flags.InteropGenesisTimeFlag,
flags.SlotsPerArchivedPoint,
flags.DisableDebugRPCEndpoints,
flags.EnableDebugRPCEndpoints,
flags.SubscribeToAllSubnets,
flags.HistoricalSlasherNode,
flags.ChainID,

View File

@@ -116,7 +116,7 @@ var appHelpFlagGroups = []flagGroup{
flags.BlockBatchLimitBurstFactor,
flags.BlobBatchLimit,
flags.BlobBatchLimitBurstFactor,
flags.DisableDebugRPCEndpoints,
flags.EnableDebugRPCEndpoints,
flags.SubscribeToAllSubnets,
flags.HistoricalSlasherNode,
flags.ChainID,

View File

@@ -9,7 +9,6 @@ go_library(
visibility = ["//visibility:private"],
deps = [
"//cmd/prysmctl/checkpointsync:go_default_library",
"//cmd/prysmctl/codegen:go_default_library",
"//cmd/prysmctl/db:go_default_library",
"//cmd/prysmctl/p2p:go_default_library",
"//cmd/prysmctl/testnet:go_default_library",

View File

@@ -1,12 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["cmd.go"],
importpath = "github.com/prysmaticlabs/prysm/v5/cmd/prysmctl/codegen",
visibility = ["//visibility:public"],
deps = [
"@com_github_offchainlabs_methodical_ssz//cmd/ssz/commands:go_default_library",
"@com_github_urfave_cli_v2//:go_default_library",
],
)

View File

@@ -1,14 +0,0 @@
package codegen
import (
"github.com/OffchainLabs/methodical-ssz/cmd/ssz/commands"
"github.com/urfave/cli/v2"
)
var Commands = []*cli.Command{
{
Name: "ssz",
Usage: "ssz code generation utilities",
Subcommands: commands.All,
},
}

View File

@@ -4,7 +4,6 @@ import (
"os"
"github.com/prysmaticlabs/prysm/v5/cmd/prysmctl/checkpointsync"
"github.com/prysmaticlabs/prysm/v5/cmd/prysmctl/codegen"
"github.com/prysmaticlabs/prysm/v5/cmd/prysmctl/db"
"github.com/prysmaticlabs/prysm/v5/cmd/prysmctl/p2p"
"github.com/prysmaticlabs/prysm/v5/cmd/prysmctl/testnet"
@@ -33,5 +32,4 @@ func init() {
prysmctlCommands = append(prysmctlCommands, testnet.Commands...)
prysmctlCommands = append(prysmctlCommands, weaksubjectivity.Commands...)
prysmctlCommands = append(prysmctlCommands, validator.Commands...)
prysmctlCommands = append(prysmctlCommands, codegen.Commands...)
}

View File

@@ -52,11 +52,6 @@ var (
Usage: deprecatedUsage,
Hidden: true,
}
deprecatedEnableDebugRPCEndpoints = &cli.BoolFlag{
Name: "enable-debug-rpc-endpoints",
Usage: deprecatedUsage,
Hidden: true,
}
)
// Deprecated flags for both the beacon node and validator client.
@@ -70,7 +65,6 @@ var deprecatedFlags = []cli.Flag{
deprecatedEnableEIP4881,
deprecatedDisableEIP4881,
deprecatedVerboseSigVerification,
deprecatedEnableDebugRPCEndpoints,
}
// deprecatedBeaconFlags contains flags that are still used by other components

View File

@@ -36,6 +36,6 @@ const (
PendingBalanceDepositsLimit = 134217728 // Maximum number of pending balance deposits in the beacon state.
PendingPartialWithdrawalsLimit = 134217728 // Maximum number of pending partial withdrawals in the beacon state.
PendingConsolidationsLimit = 262144 // Maximum number of pending consolidations in the beacon state.
MaxDepositRequestsPerPayload = 8192 // Maximum number of deposit requests in an execution payload.
MaxDepositReceiptsPerPayload = 8192 // Maximum number of deposit receipts in an execution payload.
MaxWithdrawalRequestsPerPayload = 16 // Maximum number of execution layer withdrawal requests in an execution payload.
)

View File

@@ -36,6 +36,6 @@ const (
PendingBalanceDepositsLimit = 134217728 // Maximum number of pending balance deposits in the beacon state.
PendingPartialWithdrawalsLimit = 64 // Maximum number of pending partial withdrawals in the beacon state.
PendingConsolidationsLimit = 64 // Maximum number of pending consolidations in the beacon state.
MaxDepositRequestsPerPayload = 4 // Maximum number of deposit requests in an execution payload.
MaxDepositReceiptsPerPayload = 4 // Maximum number of deposit receipts in an execution payload.
MaxWithdrawalRequestsPerPayload = 2 // Maximum number of execution layer withdrawal requests in an execution payload.
)

View File

@@ -250,8 +250,7 @@ type BeaconChainConfig struct {
MaxPendingPartialsPerWithdrawalsSweep uint64 `yaml:"MAX_PENDING_PARTIALS_PER_WITHDRAWALS_SWEEP" spec:"true"` // MaxPendingPartialsPerWithdrawalsSweep is the maximum number of pending partial withdrawals to process per payload.
FullExitRequestAmount uint64 `yaml:"FULL_EXIT_REQUEST_AMOUNT" spec:"true"` // FullExitRequestAmount is the amount of Gwei required to request a full exit.
MaxWithdrawalRequestsPerPayload uint64 `yaml:"MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD" spec:"true"` // MaxWithdrawalRequestsPerPayload is the maximum number of execution layer withdrawal requests in each payload.
MaxDepositRequestsPerPayload uint64 `yaml:"MAX_DEPOSIT_REQUESTS_PER_PAYLOAD" spec:"true"` // MaxDepositRequestsPerPayload is the maximum number of execution layer deposits in each payload
UnsetDepositRequestsStartIndex uint64 `yaml:"UNSET_DEPOSIT_REQUESTS_START_INDEX" spec:"true"` // UnsetDepositRequestsStartIndex is used to check the start index for eip6110
UnsetDepositReceiptsStartIndex uint64 `yaml:"UNSET_DEPOSIT_RECEIPTS_START_INDEX" spec:"true"` // UnsetDepositReceiptsStartIndex is used to check the start index for eip6110
// Networking Specific Parameters
GossipMaxSize uint64 `yaml:"GOSSIP_MAX_SIZE" spec:"true"` // GossipMaxSize is the maximum allowed size of uncompressed gossip messages.

View File

@@ -36,7 +36,7 @@ var placeholderFields = []string{
"MAX_BLOBS_PER_BLOCK",
"MAX_BLOB_COMMITMENTS_PER_BLOCK", // Compile time constant on BeaconBlockBodyDeneb.blob_kzg_commitments.
"MAX_BYTES_PER_TRANSACTION", // Used for ssz of EL transactions. Unused in Prysm.
"MAX_DEPOSIT_RECEIPTS_PER_PAYLOAD", // Compile time constant on ExecutionPayload.deposit_receipts. TODO: rename when updating spec configs
"MAX_DEPOSIT_RECEIPTS_PER_PAYLOAD", // Compile time constant on ExecutionPayload.deposit_receipts.
"MAX_EXTRA_DATA_BYTES", // Compile time constant on ExecutionPayload.extra_data.
"MAX_TRANSACTIONS_PER_PAYLOAD", // Compile time constant on ExecutionPayload.transactions.
"REORG_HEAD_WEIGHT_THRESHOLD",

View File

@@ -290,8 +290,7 @@ var mainnetBeaconConfig = &BeaconChainConfig{
MaxPendingPartialsPerWithdrawalsSweep: 8,
FullExitRequestAmount: 0,
MaxWithdrawalRequestsPerPayload: 16,
MaxDepositRequestsPerPayload: 8192, // 2**13 (= 8192)
UnsetDepositRequestsStartIndex: math.MaxUint64,
UnsetDepositReceiptsStartIndex: math.MaxUint64,
// Values related to networking parameters.
GossipMaxSize: 10 * 1 << 20, // 10 MiB

View File

@@ -107,7 +107,6 @@ func MinimalSpecConfig() *BeaconChainConfig {
minimalConfig.PendingConsolidationsLimit = 64
minimalConfig.MaxPartialWithdrawalsPerPayload = 1
minimalConfig.MaxWithdrawalRequestsPerPayload = 2
minimalConfig.MaxDepositRequestsPerPayload = 4
minimalConfig.PendingPartialWithdrawalsLimit = 64
minimalConfig.MaxPendingPartialsPerWithdrawalsSweep = 1

View File

@@ -1,7 +1,6 @@
package loader
import (
"encoding/json"
"fmt"
"strconv"
@@ -129,23 +128,12 @@ func (psl *settingsLoader) Load(cliCtx *cli.Context) (*proposer.Settings, error)
return nil, err
}
loadConfig = dbps.ToConsensus()
log.Debugf("DB loaded proposer settings: %s", func() string {
b, err := json.Marshal(loadConfig)
if err != nil {
return err.Error()
}
return string(b)
}())
}
// start to process based on load method
for _, method := range psl.loadMethods {
switch method {
case defaultFlag:
if psl.existsInDB && len(psl.loadMethods) == 1 {
// only log the below if default flag is the only load method
log.Warn("Previously saved proposer settings were loaded from the DB, only default settings will be updated. Please provide new proposer settings or clear DB to reset proposer settings.")
}
suggestedFeeRecipient := cliCtx.String(flags.SuggestedFeeRecipientFlag.Name)
if !common.IsHexAddress(suggestedFeeRecipient) {
return nil, errors.Errorf("--%s is not a valid Ethereum address", flags.SuggestedFeeRecipientFlag.Name)
@@ -169,7 +157,6 @@ func (psl *settingsLoader) Load(cliCtx *cli.Context) (*proposer.Settings, error)
return nil, errors.Errorf("proposer settings is empty after unmarshalling from file specified by %s flag", flags.ProposerSettingsFlag.Name)
}
loadConfig = psl.processProposerSettings(settingFromFile, loadConfig)
log.WithField(flags.ProposerSettingsFlag.Name, cliCtx.String(flags.ProposerSettingsFlag.Name)).Info("Proposer settings loaded from file")
case urlFlag:
var settingFromURL *validatorpb.ProposerSettingsPayload
if err := config.UnmarshalFromURL(cliCtx.Context, cliCtx.String(flags.ProposerSettingsURLFlag.Name), &settingFromURL); err != nil {
@@ -179,14 +166,9 @@ func (psl *settingsLoader) Load(cliCtx *cli.Context) (*proposer.Settings, error)
return nil, errors.New("proposer settings is empty after unmarshalling from url")
}
loadConfig = psl.processProposerSettings(settingFromURL, loadConfig)
log.WithField(flags.ProposerSettingsURLFlag.Name, cliCtx.String(flags.ProposerSettingsURLFlag.Name)).Infof("Proposer settings loaded from URL")
case onlyDB:
loadConfig = psl.processProposerSettings(nil, loadConfig)
log.Info("Proposer settings loaded from the DB")
case none:
if psl.existsInDB {
log.Info("Proposer settings loaded from the DB")
}
if psl.options.builderConfig != nil {
// if there are no proposer settings provided, create a default where fee recipient is not populated, this will be skipped for validator registration on validators that don't have a fee recipient set.
// skip saving to DB if only builder settings are provided until a trigger like keymanager API updates with fee recipient values

View File

@@ -812,14 +812,14 @@ func PayloadToHeaderElectra(payload interfaces.ExecutionDataElectra) (*enginev1.
return nil, err
}
depositRequests := payload.DepositRequests()
depositRequestsRoot, err := ssz.DepositRequestsSliceRoot(depositRequests, fieldparams.MaxDepositRequestsPerPayload)
depositReceipts := payload.DepositReceipts()
depositReceiptsRoot, err := ssz.DepositReceiptSliceRoot(depositReceipts, fieldparams.MaxDepositReceiptsPerPayload)
if err != nil {
return nil, err
}
withdrawalRequests := payload.WithdrawalRequests()
withdrawalRequestsRoot, err := ssz.WithdrawalRequestsSliceRoot(withdrawalRequests, fieldparams.MaxWithdrawalRequestsPerPayload)
withdrawalRequestsRoot, err := ssz.WithdrawalRequestSliceRoot(withdrawalRequests, fieldparams.MaxWithdrawalRequestsPerPayload)
if err != nil {
return nil, err
}
@@ -842,7 +842,7 @@ func PayloadToHeaderElectra(payload interfaces.ExecutionDataElectra) (*enginev1.
WithdrawalsRoot: withdrawalsRoot[:],
BlobGasUsed: blobGasUsed,
ExcessBlobGas: excessBlobGas,
DepositRequestsRoot: depositRequestsRoot[:],
DepositReceiptsRoot: depositReceiptsRoot[:],
WithdrawalRequestsRoot: withdrawalRequestsRoot[:],
}, nil
}
@@ -907,7 +907,7 @@ func IsEmptyExecutionData(data interfaces.ExecutionData) (bool, error) {
epe, postElectra := data.(interfaces.ExecutionDataElectra)
if postElectra {
drs := epe.DepositRequests()
drs := epe.DepositReceipts()
if len(drs) != 0 {
return false, nil
}
@@ -1389,13 +1389,13 @@ func (e executionPayloadHeaderElectra) ExcessBlobGas() (uint64, error) {
return e.p.ExcessBlobGas, nil
}
// DepositRequests --
func (e executionPayloadHeaderElectra) DepositRequests() ([]*enginev1.DepositRequest, error) {
// DepositReceipts --
func (e executionPayloadHeaderElectra) DepositReceipts() ([]*enginev1.DepositReceipt, error) {
return nil, consensus_types.ErrUnsupportedField
}
// WithdrawalRequests --
func (e executionPayloadHeaderElectra) WithdrawalRequests() ([]*enginev1.WithdrawalRequest, error) {
func (e executionPayloadHeaderElectra) WithdrawalRequests() ([]*enginev1.ExecutionLayerWithdrawalRequest, error) {
return nil, consensus_types.ErrUnsupportedField
}
@@ -1556,13 +1556,13 @@ func (e executionPayloadElectra) ExcessBlobGas() (uint64, error) {
return e.p.ExcessBlobGas, nil
}
// DepositRequests --
func (e executionPayloadElectra) DepositRequests() []*enginev1.DepositRequest {
return e.p.DepositRequests
// DepositReceipts --
func (e executionPayloadElectra) DepositReceipts() []*enginev1.DepositReceipt {
return e.p.DepositReceipts
}
// WithdrawalRequests --
func (e executionPayloadElectra) WithdrawalRequests() []*enginev1.WithdrawalRequest {
func (e executionPayloadElectra) WithdrawalRequests() []*enginev1.ExecutionLayerWithdrawalRequest {
return e.p.WithdrawalRequests
}

View File

@@ -130,6 +130,6 @@ type ExecutionData interface {
type ExecutionDataElectra interface {
ExecutionData
DepositRequests() []*enginev1.DepositRequest
WithdrawalRequests() []*enginev1.WithdrawalRequest
DepositReceipts() []*enginev1.DepositReceipt
WithdrawalRequests() []*enginev1.ExecutionLayerWithdrawalRequest
}

View File

@@ -133,11 +133,6 @@ func AggregateCompressedSignatures(multiSigs [][]byte) (common.Signature, error)
panic(err)
}
// VerifySignature -- stub
func VerifySignature(_ []byte, _ [32]byte, _ common.PublicKey) (bool, error) {
panic(err)
}
// VerifyMultipleSignatures -- stub
func VerifyMultipleSignatures(_ [][]byte, _ [][32]byte, _ []common.PublicKey) (bool, error) {
panic(err)

Some files were not shown because too many files have changed in this diff Show More