Refactor rpc proposer into smaller files (#9796)

* Refactor rpc proposer into smaller files

* Update BUILD.bazel

* Fix test

* Update BUILD.bazel
This commit is contained in:
terence tsao
2021-10-18 09:35:32 -07:00
committed by GitHub
parent 1f51e59bfd
commit a80b1c252a
10 changed files with 895 additions and 765 deletions

View File

@@ -42,45 +42,60 @@ func ProcessAttesterSlashings(
slashings []*ethpb.AttesterSlashing,
slashFunc slashValidatorFunc,
) (state.BeaconState, error) {
for idx, slashing := range slashings {
if err := VerifyAttesterSlashing(ctx, beaconState, slashing); err != nil {
return nil, errors.Wrapf(err, "could not verify attester slashing %d", idx)
var err error
for _, slashing := range slashings {
beaconState, err = ProcessAttesterSlashing(ctx, beaconState, slashing, slashFunc)
if err != nil {
return nil, err
}
slashableIndices := slashableAttesterIndices(slashing)
sort.SliceStable(slashableIndices, func(i, j int) bool {
return slashableIndices[i] < slashableIndices[j]
})
currentEpoch := slots.ToEpoch(beaconState.Slot())
var err error
var slashedAny bool
var val state.ReadOnlyValidator
for _, validatorIndex := range slashableIndices {
val, err = beaconState.ValidatorAtIndexReadOnly(types.ValidatorIndex(validatorIndex))
}
return beaconState, nil
}
// ProcessAttesterSlashing processes individual attester slashing.
func ProcessAttesterSlashing(
ctx context.Context,
beaconState state.BeaconState,
slashing *ethpb.AttesterSlashing,
slashFunc slashValidatorFunc,
) (state.BeaconState, error) {
if err := VerifyAttesterSlashing(ctx, beaconState, slashing); err != nil {
return nil, errors.Wrap(err, "could not verify attester slashing")
}
slashableIndices := slashableAttesterIndices(slashing)
sort.SliceStable(slashableIndices, func(i, j int) bool {
return slashableIndices[i] < slashableIndices[j]
})
currentEpoch := slots.ToEpoch(beaconState.Slot())
var err error
var slashedAny bool
var val state.ReadOnlyValidator
for _, validatorIndex := range slashableIndices {
val, err = beaconState.ValidatorAtIndexReadOnly(types.ValidatorIndex(validatorIndex))
if err != nil {
return nil, err
}
if helpers.IsSlashableValidator(val.ActivationEpoch(), val.WithdrawableEpoch(), val.Slashed(), currentEpoch) {
cfg := params.BeaconConfig()
var slashingQuotient uint64
switch {
case beaconState.Version() == version.Phase0:
slashingQuotient = cfg.MinSlashingPenaltyQuotient
case beaconState.Version() == version.Altair:
slashingQuotient = cfg.MinSlashingPenaltyQuotientAltair
default:
return nil, errors.New("unknown state version")
}
beaconState, err = slashFunc(ctx, beaconState, types.ValidatorIndex(validatorIndex), slashingQuotient, cfg.ProposerRewardQuotient)
if err != nil {
return nil, err
}
if helpers.IsSlashableValidator(val.ActivationEpoch(), val.WithdrawableEpoch(), val.Slashed(), currentEpoch) {
cfg := params.BeaconConfig()
var slashingQuotient uint64
switch {
case beaconState.Version() == version.Phase0:
slashingQuotient = cfg.MinSlashingPenaltyQuotient
case beaconState.Version() == version.Altair:
slashingQuotient = cfg.MinSlashingPenaltyQuotientAltair
default:
return nil, errors.New("unknown state version")
}
beaconState, err = slashFunc(ctx, beaconState, types.ValidatorIndex(validatorIndex), slashingQuotient, cfg.ProposerRewardQuotient)
if err != nil {
return nil, errors.Wrapf(err, "could not slash validator index %d",
validatorIndex)
}
slashedAny = true
return nil, errors.Wrapf(err, "could not slash validator index %d",
validatorIndex)
}
slashedAny = true
}
if !slashedAny {
return nil, errors.New("unable to slash any validator despite confirmed attester slashing")
}
}
if !slashedAny {
return nil, errors.New("unable to slash any validator despite confirmed attester slashing")
}
return beaconState, nil
}

View File

@@ -51,31 +51,46 @@ func ProcessProposerSlashings(
slashFunc slashValidatorFunc,
) (state.BeaconState, error) {
var err error
for idx, slashing := range slashings {
if slashing == nil {
return nil, errors.New("nil proposer slashings in block body")
}
if err = VerifyProposerSlashing(beaconState, slashing); err != nil {
return nil, errors.Wrapf(err, "could not verify proposer slashing %d", idx)
}
cfg := params.BeaconConfig()
var slashingQuotient uint64
switch {
case beaconState.Version() == version.Phase0:
slashingQuotient = cfg.MinSlashingPenaltyQuotient
case beaconState.Version() == version.Altair:
slashingQuotient = cfg.MinSlashingPenaltyQuotientAltair
default:
return nil, errors.New("unknown state version")
}
beaconState, err = slashFunc(ctx, beaconState, slashing.Header_1.Header.ProposerIndex, slashingQuotient, cfg.ProposerRewardQuotient)
for _, slashing := range slashings {
beaconState, err = ProcessProposerSlashing(ctx, beaconState, slashing, slashFunc)
if err != nil {
return nil, errors.Wrapf(err, "could not slash proposer index %d", slashing.Header_1.Header.ProposerIndex)
return nil, err
}
}
return beaconState, nil
}
// ProcessProposerSlashing processes individual proposer slashing.
func ProcessProposerSlashing(
ctx context.Context,
beaconState state.BeaconState,
slashing *ethpb.ProposerSlashing,
slashFunc slashValidatorFunc,
) (state.BeaconState, error) {
var err error
if slashing == nil {
return nil, errors.New("nil proposer slashings in block body")
}
if err = VerifyProposerSlashing(beaconState, slashing); err != nil {
return nil, errors.Wrap(err, "could not verify proposer slashing")
}
cfg := params.BeaconConfig()
var slashingQuotient uint64
switch {
case beaconState.Version() == version.Phase0:
slashingQuotient = cfg.MinSlashingPenaltyQuotient
case beaconState.Version() == version.Altair:
slashingQuotient = cfg.MinSlashingPenaltyQuotientAltair
default:
return nil, errors.New("unknown state version")
}
beaconState, err = slashFunc(ctx, beaconState, slashing.Header_1.Header.ProposerIndex, slashingQuotient, cfg.ProposerRewardQuotient)
if err != nil {
return nil, errors.Wrapf(err, "could not slash proposer index %d", slashing.Header_1.Header.ProposerIndex)
}
return beaconState, nil
}
// VerifyProposerSlashing verifies that the data provided from slashing is valid.
func VerifyProposerSlashing(
beaconState state.ReadOnlyBeaconState,

View File

@@ -10,7 +10,11 @@ go_library(
"exit.go",
"log.go",
"proposer.go",
"proposer_altair.go",
"proposer_attestations.go",
"proposer_deposits.go",
"proposer_eth1data.go",
"proposer_phase0.go",
"proposer_sync_aggregate.go",
"server.go",
"status.go",
@@ -34,6 +38,7 @@ go_library(
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/core/transition:go_default_library",
"//beacon-chain/core/transition/interop:go_default_library",
"//beacon-chain/core/validators:go_default_library",
"//beacon-chain/operations/attestations:go_default_library",
"//beacon-chain/operations/slashings:go_default_library",
"//beacon-chain/operations/synccommittee:go_default_library",

View File

@@ -1,39 +1,23 @@
package validator
import (
"bytes"
"context"
"encoding/hex"
"fmt"
"math/big"
"time"
fastssz "github.com/ferranbt/fastssz"
"github.com/pkg/errors"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
blockfeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/block"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition/interop"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/config/features"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/container/trie"
"github.com/prysmaticlabs/prysm/crypto/bls"
"github.com/prysmaticlabs/prysm/crypto/hash"
"github.com/prysmaticlabs/prysm/crypto/rand"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
attaggregation "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/attestation/aggregation/attestations"
synccontribution "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/attestation/aggregation/sync_contribution"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/time/slots"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
"golang.org/x/sync/errgroup"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
@@ -43,19 +27,6 @@ var eth1DataNotification bool
const eth1dataTimeout = 2 * time.Second
// blockData required to create a beacon block.
type blockData struct {
ParentRoot []byte
Graffiti [32]byte
ProposerIdx types.ValidatorIndex
Eth1Data *ethpb.Eth1Data
Deposits []*ethpb.Deposit
Attestations []*ethpb.Attestation
ProposerSlashings []*ethpb.ProposerSlashing
AttesterSlashings []*ethpb.AttesterSlashing
VoluntaryExits []*ethpb.SignedVoluntaryExit
}
// GetBeaconBlock is called by a proposer during its assigned slot to request a block to sign
// by passing in the slot and the signed randao reveal of the slot. Returns phase0 beacon blocks
// before the Altair fork epoch and Altair blocks post-fork epoch.
@@ -89,162 +60,6 @@ func (vs *Server) GetBlock(ctx context.Context, req *ethpb.BlockRequest) (*ethpb
return vs.getPhase0BeaconBlock(ctx, req)
}
func (vs *Server) getAltairBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (*ethpb.BeaconBlockAltair, error) {
ctx, span := trace.StartSpan(ctx, "ProposerServer.getAltairBeaconBlock")
defer span.End()
blkData, err := vs.buildPhase0BlockData(ctx, req)
if err != nil {
return nil, fmt.Errorf("could not build block data: %v", err)
}
// Use zero hash as stub for state root to compute later.
stateRoot := params.BeaconConfig().ZeroHash[:]
// No need for safe sub as req.Slot cannot be 0 if requesting Altair blocks. If 0, we will be throwing
// an error in the first validity check of this endpoint.
syncAggregate, err := vs.getSyncAggregate(ctx, req.Slot-1, bytesutil.ToBytes32(blkData.ParentRoot))
if err != nil {
return nil, err
}
blk := &ethpb.BeaconBlockAltair{
Slot: req.Slot,
ParentRoot: blkData.ParentRoot,
StateRoot: stateRoot,
ProposerIndex: blkData.ProposerIdx,
Body: &ethpb.BeaconBlockBodyAltair{
Eth1Data: blkData.Eth1Data,
Deposits: blkData.Deposits,
Attestations: blkData.Attestations,
RandaoReveal: req.RandaoReveal,
ProposerSlashings: blkData.ProposerSlashings,
AttesterSlashings: blkData.AttesterSlashings,
VoluntaryExits: blkData.VoluntaryExits,
Graffiti: blkData.Graffiti[:],
SyncAggregate: syncAggregate,
},
}
// Compute state root with the newly constructed block.
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(
&ethpb.SignedBeaconBlockAltair{Block: blk, Signature: make([]byte, 96)},
)
if err != nil {
return nil, err
}
stateRoot, err = vs.ComputeStateRoot(ctx, wsb)
if err != nil {
interop.WriteBlockToDisk(wsb, true /*failed*/)
return nil, fmt.Errorf("could not compute state root: %v", err)
}
blk.StateRoot = stateRoot
return blk, nil
}
func (vs *Server) getPhase0BeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (*ethpb.BeaconBlock, error) {
ctx, span := trace.StartSpan(ctx, "ProposerServer.getPhase0BeaconBlock")
defer span.End()
blkData, err := vs.buildPhase0BlockData(ctx, req)
if err != nil {
return nil, fmt.Errorf("could not build block data: %v", err)
}
// Use zero hash as stub for state root to compute later.
stateRoot := params.BeaconConfig().ZeroHash[:]
blk := &ethpb.BeaconBlock{
Slot: req.Slot,
ParentRoot: blkData.ParentRoot,
StateRoot: stateRoot,
ProposerIndex: blkData.ProposerIdx,
Body: &ethpb.BeaconBlockBody{
Eth1Data: blkData.Eth1Data,
Deposits: blkData.Deposits,
Attestations: blkData.Attestations,
RandaoReveal: req.RandaoReveal,
ProposerSlashings: blkData.ProposerSlashings,
AttesterSlashings: blkData.AttesterSlashings,
VoluntaryExits: blkData.VoluntaryExits,
Graffiti: blkData.Graffiti[:],
},
}
// Compute state root with the newly constructed block.
stateRoot, err = vs.ComputeStateRoot(
ctx, wrapper.WrappedPhase0SignedBeaconBlock(
&ethpb.SignedBeaconBlock{Block: blk, Signature: make([]byte, 96)},
),
)
if err != nil {
interop.WriteBlockToDisk(wrapper.WrappedPhase0SignedBeaconBlock(&ethpb.SignedBeaconBlock{Block: blk}), true /*failed*/)
return nil, fmt.Errorf("could not compute state root: %v", err)
}
blk.StateRoot = stateRoot
return blk, nil
}
// Build data required for creating a new beacon block, so this method can be shared across forks.
func (vs *Server) buildPhase0BlockData(ctx context.Context, req *ethpb.BlockRequest) (*blockData, error) {
ctx, span := trace.StartSpan(ctx, "ProposerServer.buildPhase0BlockData")
defer span.End()
if vs.SyncChecker.Syncing() {
return nil, fmt.Errorf("syncing to latest head, not ready to respond")
}
// Retrieve the parent block as the current head of the canonical chain.
parentRoot, err := vs.HeadFetcher.HeadRoot(ctx)
if err != nil {
return nil, fmt.Errorf("could not retrieve head root: %v", err)
}
head, err := vs.HeadFetcher.HeadState(ctx)
if err != nil {
return nil, fmt.Errorf("could not get head state %v", err)
}
if features.Get().EnableNextSlotStateCache {
head, err = transition.ProcessSlotsUsingNextSlotCache(ctx, head, parentRoot, req.Slot)
if err != nil {
return nil, fmt.Errorf("could not advance slots to calculate proposer index: %v", err)
}
} else {
head, err = transition.ProcessSlots(ctx, head, req.Slot)
if err != nil {
return nil, fmt.Errorf("could not advance slot to calculate proposer index: %v", err)
}
}
eth1Data, err := vs.eth1DataMajorityVote(ctx, head)
if err != nil {
return nil, fmt.Errorf("could not get ETH1 data: %v", err)
}
deposits, atts, err := vs.packDepositsAndAttestations(ctx, head, eth1Data)
if err != nil {
return nil, err
}
graffiti := bytesutil.ToBytes32(req.Graffiti)
// Calculate new proposer index.
idx, err := helpers.BeaconProposerIndex(ctx, head)
if err != nil {
return nil, fmt.Errorf("could not calculate proposer index %v", err)
}
return &blockData{
ParentRoot: parentRoot,
Graffiti: graffiti,
ProposerIdx: idx,
Eth1Data: eth1Data,
Deposits: deposits,
Attestations: atts,
ProposerSlashings: vs.SlashingsPool.PendingProposerSlashings(ctx, head, false /*noLimit*/),
AttesterSlashings: vs.SlashingsPool.PendingAttesterSlashings(ctx, head, false /*noLimit*/),
VoluntaryExits: vs.ExitPool.PendingExits(head, req.Slot, false /*noLimit*/),
}, nil
}
// ProposeBeaconBlock is called by a proposer during its assigned slot to create a block in an attempt
// to get it processed by the beacon node as the canonical head.
func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSignedBeaconBlock) (*ethpb.ProposeResponse, error) {
@@ -266,70 +81,6 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
return vs.proposeGenericBeaconBlock(ctx, blk)
}
func (vs *Server) packDepositsAndAttestations(ctx context.Context, head state.BeaconState, eth1Data *ethpb.Eth1Data) ([]*ethpb.Deposit, []*ethpb.Attestation, error) {
if features.Get().EnableGetBlockOptimizations {
deposits, atts, err := vs.optimizedPackDepositsAndAttestations(ctx, head, eth1Data)
if err != nil {
return nil, nil, err
}
return deposits, atts, nil
}
// Pack ETH1 deposits which have not been included in the beacon chain.
deposits, err := vs.deposits(ctx, head, eth1Data)
if err != nil {
return nil, nil, status.Errorf(codes.Internal, "Could not get ETH1 deposits: %v", err)
}
// Pack aggregated attestations which have not been included in the beacon chain.
atts, err := vs.packAttestations(ctx, head)
if err != nil {
return nil, nil, status.Errorf(codes.Internal, "Could not get attestations to pack into block: %v", err)
}
return deposits, atts, nil
}
func (vs *Server) optimizedPackDepositsAndAttestations(ctx context.Context, head state.BeaconState, eth1Data *ethpb.Eth1Data) ([]*ethpb.Deposit, []*ethpb.Attestation, error) {
eg, egctx := errgroup.WithContext(ctx)
var deposits []*ethpb.Deposit
var atts []*ethpb.Attestation
eg.Go(func() error {
// Pack ETH1 deposits which have not been included in the beacon chain.
localDeposits, err := vs.deposits(egctx, head, eth1Data)
if err != nil {
return status.Errorf(codes.Internal, "Could not get ETH1 deposits: %v", err)
}
// if the original context is cancelled, then cancel this routine too
select {
case <-egctx.Done():
return egctx.Err()
default:
}
deposits = localDeposits
return nil
})
eg.Go(func() error {
// Pack aggregated attestations which have not been included in the beacon chain.
localAtts, err := vs.packAttestations(egctx, head)
if err != nil {
return status.Errorf(codes.Internal, "Could not get attestations to pack into block: %v", err)
}
// if the original context is cancelled, then cancel this routine too
select {
case <-egctx.Done():
return egctx.Err()
default:
}
atts = localAtts
return nil
})
return deposits, atts, eg.Wait()
}
// ProposeBlock is called by a proposer during its assigned slot to create a block in an attempt
// to get it processed by the beacon node as the canonical head.
//
@@ -376,197 +127,9 @@ func (vs *Server) proposeGenericBeaconBlock(ctx context.Context, blk block.Signe
}, nil
}
// getSyncAggregate retrieves the sync contributions from the pool to construct the sync aggregate object.
// The contributions are filtered based on matching of the input root and slot then profitability.
func (vs *Server) getSyncAggregate(ctx context.Context, slot types.Slot, root [32]byte) (*ethpb.SyncAggregate, error) {
_, span := trace.StartSpan(ctx, "ProposerServer.GetSyncAggregate")
defer span.End()
// Contributions have to match the input root
contributions, err := vs.SyncCommitteePool.SyncCommitteeContributions(slot)
if err != nil {
return nil, err
}
proposerContributions := proposerSyncContributions(contributions).filterByBlockRoot(root)
// Each sync subcommittee is 128 bits and the sync committee is 512 bits for mainnet.
var bitsHolder [][]byte
for i := uint64(0); i < params.BeaconConfig().SyncCommitteeSubnetCount; i++ {
bitsHolder = append(bitsHolder, ethpb.NewSyncCommitteeAggregationBits())
}
sigsHolder := make([]bls.Signature, 0, params.BeaconConfig().SyncCommitteeSize/params.BeaconConfig().SyncCommitteeSubnetCount)
for i := uint64(0); i < params.BeaconConfig().SyncCommitteeSubnetCount; i++ {
cs := proposerContributions.filterBySubIndex(i)
aggregates, err := synccontribution.Aggregate(cs)
if err != nil {
return nil, err
}
// Retrieve the most profitable contribution
deduped, err := proposerSyncContributions(aggregates).dedup()
if err != nil {
return nil, err
}
c := deduped.mostProfitable()
if c == nil {
continue
}
bitsHolder[i] = c.AggregationBits
sig, err := bls.SignatureFromBytes(c.Signature)
if err != nil {
return nil, err
}
sigsHolder = append(sigsHolder, sig)
}
// Aggregate all the contribution bits and signatures.
var syncBits []byte
for _, b := range bitsHolder {
syncBits = append(syncBits, b...)
}
syncSig := bls.AggregateSignatures(sigsHolder)
var syncSigBytes [96]byte
if syncSig == nil {
syncSigBytes = [96]byte{0xC0} // Infinity signature if itself is nil.
} else {
syncSigBytes = bytesutil.ToBytes96(syncSig.Marshal())
}
return &ethpb.SyncAggregate{
SyncCommitteeBits: syncBits,
SyncCommitteeSignature: syncSigBytes[:],
}, nil
}
// eth1DataMajorityVote determines the appropriate eth1data for a block proposal using
// an algorithm called Voting with the Majority. The algorithm works as follows:
// - Determine the timestamp for the start slot for the eth1 voting period.
// - Determine the earliest and latest timestamps that a valid block can have.
// - Determine the first block not before the earliest timestamp. This block is the lower bound.
// - Determine the last block not after the latest timestamp. This block is the upper bound.
// - If the last block is too early, use current eth1data from the beacon state.
// - Filter out votes on unknown blocks and blocks which are outside of the range determined by the lower and upper bounds.
// - If no blocks are left after filtering votes, use eth1data from the latest valid block.
// - Otherwise:
// - Determine the vote with the highest count. Prefer the vote with the highest eth1 block height in the event of a tie.
// - This vote's block is the eth1 block to use for the block proposal.
func (vs *Server) eth1DataMajorityVote(ctx context.Context, beaconState state.BeaconState) (*ethpb.Eth1Data, error) {
ctx, cancel := context.WithTimeout(ctx, eth1dataTimeout)
defer cancel()
slot := beaconState.Slot()
votingPeriodStartTime := vs.slotStartTime(slot)
if vs.MockEth1Votes {
return vs.mockETH1DataVote(ctx, slot)
}
if !vs.Eth1InfoFetcher.IsConnectedToETH1() {
return vs.randomETH1DataVote(ctx)
}
eth1DataNotification = false
eth1FollowDistance := params.BeaconConfig().Eth1FollowDistance
earliestValidTime := votingPeriodStartTime - 2*params.BeaconConfig().SecondsPerETH1Block*eth1FollowDistance
latestValidTime := votingPeriodStartTime - params.BeaconConfig().SecondsPerETH1Block*eth1FollowDistance
if !features.Get().EnableGetBlockOptimizations {
_, err := vs.Eth1BlockFetcher.BlockByTimestamp(ctx, earliestValidTime)
if err != nil {
log.WithError(err).Error("Could not get last block by earliest valid time")
return vs.randomETH1DataVote(ctx)
}
}
lastBlockByLatestValidTime, err := vs.Eth1BlockFetcher.BlockByTimestamp(ctx, latestValidTime)
if err != nil {
log.WithError(err).Error("Could not get last block by latest valid time")
return vs.randomETH1DataVote(ctx)
}
if lastBlockByLatestValidTime.Time < earliestValidTime {
return vs.HeadFetcher.HeadETH1Data(), nil
}
lastBlockDepositCount, lastBlockDepositRoot := vs.DepositFetcher.DepositsNumberAndRootAtHeight(ctx, lastBlockByLatestValidTime.Number)
if lastBlockDepositCount == 0 {
return vs.ChainStartFetcher.ChainStartEth1Data(), nil
}
if lastBlockDepositCount >= vs.HeadFetcher.HeadETH1Data().DepositCount {
hash, err := vs.Eth1BlockFetcher.BlockHashByHeight(ctx, lastBlockByLatestValidTime.Number)
if err != nil {
log.WithError(err).Error("Could not get hash of last block by latest valid time")
return vs.randomETH1DataVote(ctx)
}
return &ethpb.Eth1Data{
BlockHash: hash.Bytes(),
DepositCount: lastBlockDepositCount,
DepositRoot: lastBlockDepositRoot[:],
}, nil
}
return vs.HeadFetcher.HeadETH1Data(), nil
}
func (vs *Server) slotStartTime(slot types.Slot) uint64 {
startTime, _ := vs.Eth1InfoFetcher.Eth2GenesisPowchainInfo()
return slots.VotingPeriodStartTime(startTime, slot)
}
func (vs *Server) mockETH1DataVote(ctx context.Context, slot types.Slot) (*ethpb.Eth1Data, error) {
if !eth1DataNotification {
log.Warn("Beacon Node is no longer connected to an ETH1 chain, so ETH1 data votes are now mocked.")
eth1DataNotification = true
}
// If a mock eth1 data votes is specified, we use the following for the
// eth1data we provide to every proposer based on https://github.com/ethereum/eth2.0-pm/issues/62:
//
// slot_in_voting_period = current_slot % SLOTS_PER_ETH1_VOTING_PERIOD
// Eth1Data(
// DepositRoot = hash(current_epoch + slot_in_voting_period),
// DepositCount = state.eth1_deposit_index,
// BlockHash = hash(hash(current_epoch + slot_in_voting_period)),
// )
slotInVotingPeriod := slot.ModSlot(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().EpochsPerEth1VotingPeriod)))
headState, err := vs.HeadFetcher.HeadState(ctx)
if err != nil {
return nil, err
}
var enc []byte
enc = fastssz.MarshalUint64(enc, uint64(slots.ToEpoch(slot))+uint64(slotInVotingPeriod))
depRoot := hash.Hash(enc)
blockHash := hash.Hash(depRoot[:])
return &ethpb.Eth1Data{
DepositRoot: depRoot[:],
DepositCount: headState.Eth1DepositIndex(),
BlockHash: blockHash[:],
}, nil
}
func (vs *Server) randomETH1DataVote(ctx context.Context) (*ethpb.Eth1Data, error) {
if !eth1DataNotification {
log.Warn("Beacon Node is no longer connected to an ETH1 chain, so ETH1 data votes are now random.")
eth1DataNotification = true
}
headState, err := vs.HeadFetcher.HeadState(ctx)
if err != nil {
return nil, err
}
// set random roots and block hashes to prevent a majority from being
// built if the eth1 node is offline
randGen := rand.NewGenerator()
depRoot := hash.Hash(bytesutil.Bytes32(randGen.Uint64()))
blockHash := hash.Hash(bytesutil.Bytes32(randGen.Uint64()))
return &ethpb.Eth1Data{
DepositRoot: depRoot[:],
DepositCount: headState.Eth1DepositIndex(),
BlockHash: blockHash[:],
}, nil
}
// ComputeStateRoot computes the state root after a block has been processed through a state transition and
// computeStateRoot computes the state root after a block has been processed through a state transition and
// returns it to the validator client.
func (vs *Server) ComputeStateRoot(ctx context.Context, block block.SignedBeaconBlock) ([]byte, error) {
func (vs *Server) computeStateRoot(ctx context.Context, block block.SignedBeaconBlock) ([]byte, error) {
beaconState, err := vs.StateGen.StateByRoot(ctx, bytesutil.ToBytes32(block.Block().ParentRoot()))
if err != nil {
return nil, errors.Wrap(err, "could not retrieve beacon state")
@@ -583,274 +146,3 @@ func (vs *Server) ComputeStateRoot(ctx context.Context, block block.SignedBeacon
log.WithField("beaconStateRoot", fmt.Sprintf("%#x", root)).Debugf("Computed state root")
return root[:], nil
}
// deposits returns a list of pending deposits that are ready for inclusion in the next beacon
// block. Determining deposits depends on the current eth1data vote for the block and whether or not
// this eth1data has enough support to be considered for deposits inclusion. If current vote has
// enough support, then use that vote for basis of determining deposits, otherwise use current state
// eth1data.
func (vs *Server) deposits(
ctx context.Context,
beaconState state.BeaconState,
currentVote *ethpb.Eth1Data,
) ([]*ethpb.Deposit, error) {
ctx, span := trace.StartSpan(ctx, "ProposerServer.deposits")
defer span.End()
if vs.MockEth1Votes || !vs.Eth1InfoFetcher.IsConnectedToETH1() {
return []*ethpb.Deposit{}, nil
}
// Need to fetch if the deposits up to the state's latest eth1 data matches
// the number of all deposits in this RPC call. If not, then we return nil.
canonicalEth1Data, canonicalEth1DataHeight, err := vs.canonicalEth1Data(ctx, beaconState, currentVote)
if err != nil {
return nil, err
}
_, genesisEth1Block := vs.Eth1InfoFetcher.Eth2GenesisPowchainInfo()
if genesisEth1Block.Cmp(canonicalEth1DataHeight) == 0 {
return []*ethpb.Deposit{}, nil
}
// If there are no pending deposits, exit early.
allPendingContainers := vs.PendingDepositsFetcher.PendingContainers(ctx, canonicalEth1DataHeight)
if len(allPendingContainers) == 0 {
return []*ethpb.Deposit{}, nil
}
depositTrie, err := vs.depositTrie(ctx, canonicalEth1Data, canonicalEth1DataHeight)
if err != nil {
return nil, errors.Wrap(err, "could not retrieve deposit trie")
}
// Deposits need to be received in order of merkle index root, so this has to make sure
// deposits are sorted from lowest to highest.
var pendingDeps []*ethpb.DepositContainer
for _, dep := range allPendingContainers {
if uint64(dep.Index) >= beaconState.Eth1DepositIndex() && uint64(dep.Index) < canonicalEth1Data.DepositCount {
pendingDeps = append(pendingDeps, dep)
}
}
for i := range pendingDeps {
// Don't construct merkle proof if the number of deposits is more than max allowed in block.
if uint64(i) == params.BeaconConfig().MaxDeposits {
break
}
pendingDeps[i].Deposit, err = constructMerkleProof(depositTrie, int(pendingDeps[i].Index), pendingDeps[i].Deposit)
if err != nil {
return nil, err
}
}
// Limit the return of pending deposits to not be more than max deposits allowed in block.
var pendingDeposits []*ethpb.Deposit
for i := uint64(0); i < uint64(len(pendingDeps)) && i < params.BeaconConfig().MaxDeposits; i++ {
pendingDeposits = append(pendingDeposits, pendingDeps[i].Deposit)
}
return pendingDeposits, nil
}
// canonicalEth1Data determines the canonical eth1data and eth1 block height to use for determining deposits.
func (vs *Server) canonicalEth1Data(
ctx context.Context,
beaconState state.BeaconState,
currentVote *ethpb.Eth1Data) (*ethpb.Eth1Data, *big.Int, error) {
var eth1BlockHash [32]byte
// Add in current vote, to get accurate vote tally
if err := beaconState.AppendEth1DataVotes(currentVote); err != nil {
return nil, nil, errors.Wrap(err, "could not append eth1 data votes to state")
}
hasSupport, err := blocks.Eth1DataHasEnoughSupport(beaconState, currentVote)
if err != nil {
return nil, nil, errors.Wrap(err, "could not determine if current eth1data vote has enough support")
}
var canonicalEth1Data *ethpb.Eth1Data
if hasSupport {
canonicalEth1Data = currentVote
eth1BlockHash = bytesutil.ToBytes32(currentVote.BlockHash)
} else {
canonicalEth1Data = beaconState.Eth1Data()
eth1BlockHash = bytesutil.ToBytes32(beaconState.Eth1Data().BlockHash)
}
_, canonicalEth1DataHeight, err := vs.Eth1BlockFetcher.BlockExists(ctx, eth1BlockHash)
if err != nil {
return nil, nil, errors.Wrap(err, "could not fetch eth1data height")
}
return canonicalEth1Data, canonicalEth1DataHeight, nil
}
func (vs *Server) depositTrie(ctx context.Context, canonicalEth1Data *ethpb.Eth1Data, canonicalEth1DataHeight *big.Int) (*trie.SparseMerkleTrie, error) {
ctx, span := trace.StartSpan(ctx, "ProposerServer.depositTrie")
defer span.End()
var depositTrie *trie.SparseMerkleTrie
finalizedDeposits := vs.DepositFetcher.FinalizedDeposits(ctx)
depositTrie = finalizedDeposits.Deposits
upToEth1DataDeposits := vs.DepositFetcher.NonFinalizedDeposits(ctx, canonicalEth1DataHeight)
insertIndex := finalizedDeposits.MerkleTrieIndex + 1
for _, dep := range upToEth1DataDeposits {
depHash, err := dep.Data.HashTreeRoot()
if err != nil {
return nil, errors.Wrap(err, "could not hash deposit data")
}
if err = depositTrie.Insert(depHash[:], int(insertIndex)); err != nil {
return nil, err
}
insertIndex++
}
valid, err := vs.validateDepositTrie(depositTrie, canonicalEth1Data)
// Log a warning here, as the cached trie is invalid.
if !valid {
log.Warnf("Cached deposit trie is invalid, rebuilding it now: %v", err)
return vs.rebuildDepositTrie(ctx, canonicalEth1Data, canonicalEth1DataHeight)
}
return depositTrie, nil
}
// rebuilds our deposit trie by recreating it from all processed deposits till
// specified eth1 block height.
func (vs *Server) rebuildDepositTrie(ctx context.Context, canonicalEth1Data *ethpb.Eth1Data, canonicalEth1DataHeight *big.Int) (*trie.SparseMerkleTrie, error) {
ctx, span := trace.StartSpan(ctx, "ProposerServer.rebuildDepositTrie")
defer span.End()
deposits := vs.DepositFetcher.AllDeposits(ctx, canonicalEth1DataHeight)
trieItems := make([][]byte, 0, len(deposits))
for _, dep := range deposits {
depHash, err := dep.Data.HashTreeRoot()
if err != nil {
return nil, errors.Wrap(err, "could not hash deposit data")
}
trieItems = append(trieItems, depHash[:])
}
depositTrie, err := trie.GenerateTrieFromItems(trieItems, params.BeaconConfig().DepositContractTreeDepth)
if err != nil {
return nil, err
}
valid, err := vs.validateDepositTrie(depositTrie, canonicalEth1Data)
// Log an error here, as even with rebuilding the trie, it is still invalid.
if !valid {
log.Errorf("Rebuilt deposit trie is invalid: %v", err)
}
return depositTrie, nil
}
// validate that the provided deposit trie matches up with the canonical eth1 data provided.
func (vs *Server) validateDepositTrie(trie *trie.SparseMerkleTrie, canonicalEth1Data *ethpb.Eth1Data) (bool, error) {
if trie.NumOfItems() != int(canonicalEth1Data.DepositCount) {
return false, errors.Errorf("wanted the canonical count of %d but received %d", canonicalEth1Data.DepositCount, trie.NumOfItems())
}
rt := trie.HashTreeRoot()
if !bytes.Equal(rt[:], canonicalEth1Data.DepositRoot) {
return false, errors.Errorf("wanted the canonical deposit root of %#x but received %#x", canonicalEth1Data.DepositRoot, rt)
}
return true, nil
}
// This filters the input attestations to return a list of valid attestations to be packaged inside a beacon block.
func (vs *Server) validateAndDeleteAttsInPool(ctx context.Context, st state.BeaconState, atts []*ethpb.Attestation) ([]*ethpb.Attestation, error) {
ctx, span := trace.StartSpan(ctx, "ProposerServer.validateAndDeleteAttsInPool")
defer span.End()
validAtts, invalidAtts := proposerAtts(atts).filter(ctx, st)
if err := vs.deleteAttsInPool(ctx, invalidAtts); err != nil {
return nil, err
}
return validAtts, nil
}
// The input attestations are processed and seen by the node, this deletes them from pool
// so proposers don't include them in a block for the future.
func (vs *Server) deleteAttsInPool(ctx context.Context, atts []*ethpb.Attestation) error {
ctx, span := trace.StartSpan(ctx, "ProposerServer.deleteAttsInPool")
defer span.End()
for _, att := range atts {
if ctx.Err() != nil {
return ctx.Err()
}
if helpers.IsAggregated(att) {
if err := vs.AttPool.DeleteAggregatedAttestation(att); err != nil {
return err
}
} else {
if err := vs.AttPool.DeleteUnaggregatedAttestation(att); err != nil {
return err
}
}
}
return nil
}
func constructMerkleProof(trie *trie.SparseMerkleTrie, index int, deposit *ethpb.Deposit) (*ethpb.Deposit, error) {
proof, err := trie.MerkleProof(index)
if err != nil {
return nil, errors.Wrapf(err, "could not generate merkle proof for deposit at index %d", index)
}
// For every deposit, we construct a Merkle proof using the powchain service's
// in-memory deposits trie, which is updated only once the state's LatestETH1Data
// property changes during a state transition after a voting period.
deposit.Proof = proof
return deposit, nil
}
func (vs *Server) packAttestations(ctx context.Context, latestState state.BeaconState) ([]*ethpb.Attestation, error) {
ctx, span := trace.StartSpan(ctx, "ProposerServer.packAttestations")
defer span.End()
atts := vs.AttPool.AggregatedAttestations()
atts, err := vs.validateAndDeleteAttsInPool(ctx, latestState, atts)
if err != nil {
return nil, errors.Wrap(err, "could not filter attestations")
}
uAtts, err := vs.AttPool.UnaggregatedAttestations()
if err != nil {
return nil, errors.Wrap(err, "could not get unaggregated attestations")
}
uAtts, err = vs.validateAndDeleteAttsInPool(ctx, latestState, uAtts)
if err != nil {
return nil, errors.Wrap(err, "could not filter attestations")
}
atts = append(atts, uAtts...)
// Remove duplicates from both aggregated/unaggregated attestations. This
// prevents inefficient aggregates being created.
atts, err = proposerAtts(atts).dedup()
if err != nil {
return nil, err
}
attsByDataRoot := make(map[[32]byte][]*ethpb.Attestation, len(atts))
for _, att := range atts {
attDataRoot, err := att.Data.HashTreeRoot()
if err != nil {
return nil, err
}
attsByDataRoot[attDataRoot] = append(attsByDataRoot[attDataRoot], att)
}
attsForInclusion := proposerAtts(make([]*ethpb.Attestation, 0))
for _, as := range attsByDataRoot {
as, err := attaggregation.Aggregate(as)
if err != nil {
return nil, err
}
attsForInclusion = append(attsForInclusion, as...)
}
deduped, err := attsForInclusion.dedup()
if err != nil {
return nil, err
}
sorted, err := deduped.sortByProfitability()
if err != nil {
return nil, err
}
atts = sorted.limitToMaxAttestations()
return atts, nil
}

View File

@@ -0,0 +1,130 @@
package validator
import (
"context"
"fmt"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition/interop"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/crypto/bls"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
synccontribution "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/attestation/aggregation/sync_contribution"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"go.opencensus.io/trace"
)
func (vs *Server) getAltairBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (*ethpb.BeaconBlockAltair, error) {
ctx, span := trace.StartSpan(ctx, "ProposerServer.getAltairBeaconBlock")
defer span.End()
blkData, err := vs.buildPhase0BlockData(ctx, req)
if err != nil {
return nil, fmt.Errorf("could not build block data: %v", err)
}
// Use zero hash as stub for state root to compute later.
stateRoot := params.BeaconConfig().ZeroHash[:]
// No need for safe sub as req.Slot cannot be 0 if requesting Altair blocks. If 0, we will be throwing
// an error in the first validity check of this endpoint.
syncAggregate, err := vs.getSyncAggregate(ctx, req.Slot-1, bytesutil.ToBytes32(blkData.ParentRoot))
if err != nil {
return nil, err
}
blk := &ethpb.BeaconBlockAltair{
Slot: req.Slot,
ParentRoot: blkData.ParentRoot,
StateRoot: stateRoot,
ProposerIndex: blkData.ProposerIdx,
Body: &ethpb.BeaconBlockBodyAltair{
Eth1Data: blkData.Eth1Data,
Deposits: blkData.Deposits,
Attestations: blkData.Attestations,
RandaoReveal: req.RandaoReveal,
ProposerSlashings: blkData.ProposerSlashings,
AttesterSlashings: blkData.AttesterSlashings,
VoluntaryExits: blkData.VoluntaryExits,
Graffiti: blkData.Graffiti[:],
SyncAggregate: syncAggregate,
},
}
// Compute state root with the newly constructed block.
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(
&ethpb.SignedBeaconBlockAltair{Block: blk, Signature: make([]byte, 96)},
)
if err != nil {
return nil, err
}
stateRoot, err = vs.computeStateRoot(ctx, wsb)
if err != nil {
interop.WriteBlockToDisk(wsb, true /*failed*/)
return nil, fmt.Errorf("could not compute state root: %v", err)
}
blk.StateRoot = stateRoot
return blk, nil
}
// getSyncAggregate retrieves the sync contributions from the pool to construct the sync aggregate object.
// The contributions are filtered based on matching of the input root and slot then profitability.
func (vs *Server) getSyncAggregate(ctx context.Context, slot types.Slot, root [32]byte) (*ethpb.SyncAggregate, error) {
_, span := trace.StartSpan(ctx, "ProposerServer.getSyncAggregate")
defer span.End()
// Contributions have to match the input root
contributions, err := vs.SyncCommitteePool.SyncCommitteeContributions(slot)
if err != nil {
return nil, err
}
proposerContributions := proposerSyncContributions(contributions).filterByBlockRoot(root)
// Each sync subcommittee is 128 bits and the sync committee is 512 bits for mainnet.
var bitsHolder [][]byte
for i := uint64(0); i < params.BeaconConfig().SyncCommitteeSubnetCount; i++ {
bitsHolder = append(bitsHolder, ethpb.NewSyncCommitteeAggregationBits())
}
sigsHolder := make([]bls.Signature, 0, params.BeaconConfig().SyncCommitteeSize/params.BeaconConfig().SyncCommitteeSubnetCount)
for i := uint64(0); i < params.BeaconConfig().SyncCommitteeSubnetCount; i++ {
cs := proposerContributions.filterBySubIndex(i)
aggregates, err := synccontribution.Aggregate(cs)
if err != nil {
return nil, err
}
// Retrieve the most profitable contribution
deduped, err := proposerSyncContributions(aggregates).dedup()
if err != nil {
return nil, err
}
c := deduped.mostProfitable()
if c == nil {
continue
}
bitsHolder[i] = c.AggregationBits
sig, err := bls.SignatureFromBytes(c.Signature)
if err != nil {
return nil, err
}
sigsHolder = append(sigsHolder, sig)
}
// Aggregate all the contribution bits and signatures.
var syncBits []byte
for _, b := range bitsHolder {
syncBits = append(syncBits, b...)
}
syncSig := bls.AggregateSignatures(sigsHolder)
var syncSigBytes [96]byte
if syncSig == nil {
syncSigBytes = [96]byte{0xC0} // Infinity signature if itself is nil.
} else {
syncSigBytes = bytesutil.ToBytes96(syncSig.Marshal())
}
return &ethpb.SyncAggregate{
SyncCommitteeBits: syncBits,
SyncCommitteeSignature: syncSigBytes[:],
}, nil
}

View File

@@ -4,6 +4,7 @@ import (
"context"
"sort"
"github.com/pkg/errors"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
@@ -14,11 +15,69 @@ import (
"github.com/prysmaticlabs/prysm/config/params"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/attestation/aggregation"
attaggregation "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/attestation/aggregation/attestations"
"github.com/prysmaticlabs/prysm/runtime/version"
"go.opencensus.io/trace"
)
type proposerAtts []*ethpb.Attestation
func (vs *Server) packAttestations(ctx context.Context, latestState state.BeaconState) ([]*ethpb.Attestation, error) {
ctx, span := trace.StartSpan(ctx, "ProposerServer.packAttestations")
defer span.End()
atts := vs.AttPool.AggregatedAttestations()
atts, err := vs.validateAndDeleteAttsInPool(ctx, latestState, atts)
if err != nil {
return nil, errors.Wrap(err, "could not filter attestations")
}
uAtts, err := vs.AttPool.UnaggregatedAttestations()
if err != nil {
return nil, errors.Wrap(err, "could not get unaggregated attestations")
}
uAtts, err = vs.validateAndDeleteAttsInPool(ctx, latestState, uAtts)
if err != nil {
return nil, errors.Wrap(err, "could not filter attestations")
}
atts = append(atts, uAtts...)
// Remove duplicates from both aggregated/unaggregated attestations. This
// prevents inefficient aggregates being created.
atts, err = proposerAtts(atts).dedup()
if err != nil {
return nil, err
}
attsByDataRoot := make(map[[32]byte][]*ethpb.Attestation, len(atts))
for _, att := range atts {
attDataRoot, err := att.Data.HashTreeRoot()
if err != nil {
return nil, err
}
attsByDataRoot[attDataRoot] = append(attsByDataRoot[attDataRoot], att)
}
attsForInclusion := proposerAtts(make([]*ethpb.Attestation, 0))
for _, as := range attsByDataRoot {
as, err := attaggregation.Aggregate(as)
if err != nil {
return nil, err
}
attsForInclusion = append(attsForInclusion, as...)
}
deduped, err := attsForInclusion.dedup()
if err != nil {
return nil, err
}
sorted, err := deduped.sortByProfitability()
if err != nil {
return nil, err
}
atts = sorted.limitToMaxAttestations()
return atts, nil
}
// filter separates attestation list into two groups: valid and invalid attestations.
// The first group passes the all the required checks for attestation to be considered for proposing.
// And attestations from the second group should be deleted.
@@ -192,3 +251,38 @@ func (a proposerAtts) dedup() (proposerAtts, error) {
return uniqAtts, nil
}
// This filters the input attestations to return a list of valid attestations to be packaged inside a beacon block.
func (vs *Server) validateAndDeleteAttsInPool(ctx context.Context, st state.BeaconState, atts []*ethpb.Attestation) ([]*ethpb.Attestation, error) {
ctx, span := trace.StartSpan(ctx, "ProposerServer.validateAndDeleteAttsInPool")
defer span.End()
validAtts, invalidAtts := proposerAtts(atts).filter(ctx, st)
if err := vs.deleteAttsInPool(ctx, invalidAtts); err != nil {
return nil, err
}
return validAtts, nil
}
// The input attestations are processed and seen by the node, this deletes them from pool
// so proposers don't include them in a block for the future.
func (vs *Server) deleteAttsInPool(ctx context.Context, atts []*ethpb.Attestation) error {
ctx, span := trace.StartSpan(ctx, "ProposerServer.deleteAttsInPool")
defer span.End()
for _, att := range atts {
if ctx.Err() != nil {
return ctx.Err()
}
if helpers.IsAggregated(att) {
if err := vs.AttPool.DeleteAggregatedAttestation(att); err != nil {
return err
}
} else {
if err := vs.AttPool.DeleteUnaggregatedAttestation(att); err != nil {
return err
}
}
}
return nil
}

View File

@@ -0,0 +1,231 @@
package validator
import (
"bytes"
"context"
"math/big"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/config/features"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/container/trie"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"go.opencensus.io/trace"
"golang.org/x/sync/errgroup"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
func (vs *Server) packDepositsAndAttestations(ctx context.Context, head state.BeaconState, eth1Data *ethpb.Eth1Data) ([]*ethpb.Deposit, []*ethpb.Attestation, error) {
if features.Get().EnableGetBlockOptimizations {
deposits, atts, err := vs.optimizedPackDepositsAndAttestations(ctx, head, eth1Data)
if err != nil {
return nil, nil, err
}
return deposits, atts, nil
}
// Pack ETH1 deposits which have not been included in the beacon chain.
deposits, err := vs.deposits(ctx, head, eth1Data)
if err != nil {
return nil, nil, status.Errorf(codes.Internal, "Could not get ETH1 deposits: %v", err)
}
// Pack aggregated attestations which have not been included in the beacon chain.
atts, err := vs.packAttestations(ctx, head)
if err != nil {
return nil, nil, status.Errorf(codes.Internal, "Could not get attestations to pack into block: %v", err)
}
return deposits, atts, nil
}
func (vs *Server) optimizedPackDepositsAndAttestations(ctx context.Context, head state.BeaconState, eth1Data *ethpb.Eth1Data) ([]*ethpb.Deposit, []*ethpb.Attestation, error) {
eg, egctx := errgroup.WithContext(ctx)
var deposits []*ethpb.Deposit
var atts []*ethpb.Attestation
eg.Go(func() error {
// Pack ETH1 deposits which have not been included in the beacon chain.
localDeposits, err := vs.deposits(egctx, head, eth1Data)
if err != nil {
return status.Errorf(codes.Internal, "Could not get ETH1 deposits: %v", err)
}
// if the original context is cancelled, then cancel this routine too
select {
case <-egctx.Done():
return egctx.Err()
default:
}
deposits = localDeposits
return nil
})
eg.Go(func() error {
// Pack aggregated attestations which have not been included in the beacon chain.
localAtts, err := vs.packAttestations(egctx, head)
if err != nil {
return status.Errorf(codes.Internal, "Could not get attestations to pack into block: %v", err)
}
// if the original context is cancelled, then cancel this routine too
select {
case <-egctx.Done():
return egctx.Err()
default:
}
atts = localAtts
return nil
})
return deposits, atts, eg.Wait()
}
// deposits returns a list of pending deposits that are ready for inclusion in the next beacon
// block. Determining deposits depends on the current eth1data vote for the block and whether or not
// this eth1data has enough support to be considered for deposits inclusion. If current vote has
// enough support, then use that vote for basis of determining deposits, otherwise use current state
// eth1data.
func (vs *Server) deposits(
ctx context.Context,
beaconState state.BeaconState,
currentVote *ethpb.Eth1Data,
) ([]*ethpb.Deposit, error) {
ctx, span := trace.StartSpan(ctx, "ProposerServer.deposits")
defer span.End()
if vs.MockEth1Votes || !vs.Eth1InfoFetcher.IsConnectedToETH1() {
return []*ethpb.Deposit{}, nil
}
// Need to fetch if the deposits up to the state's latest eth1 data matches
// the number of all deposits in this RPC call. If not, then we return nil.
canonicalEth1Data, canonicalEth1DataHeight, err := vs.canonicalEth1Data(ctx, beaconState, currentVote)
if err != nil {
return nil, err
}
_, genesisEth1Block := vs.Eth1InfoFetcher.Eth2GenesisPowchainInfo()
if genesisEth1Block.Cmp(canonicalEth1DataHeight) == 0 {
return []*ethpb.Deposit{}, nil
}
// If there are no pending deposits, exit early.
allPendingContainers := vs.PendingDepositsFetcher.PendingContainers(ctx, canonicalEth1DataHeight)
if len(allPendingContainers) == 0 {
return []*ethpb.Deposit{}, nil
}
depositTrie, err := vs.depositTrie(ctx, canonicalEth1Data, canonicalEth1DataHeight)
if err != nil {
return nil, errors.Wrap(err, "could not retrieve deposit trie")
}
// Deposits need to be received in order of merkle index root, so this has to make sure
// deposits are sorted from lowest to highest.
var pendingDeps []*ethpb.DepositContainer
for _, dep := range allPendingContainers {
if uint64(dep.Index) >= beaconState.Eth1DepositIndex() && uint64(dep.Index) < canonicalEth1Data.DepositCount {
pendingDeps = append(pendingDeps, dep)
}
}
for i := range pendingDeps {
// Don't construct merkle proof if the number of deposits is more than max allowed in block.
if uint64(i) == params.BeaconConfig().MaxDeposits {
break
}
pendingDeps[i].Deposit, err = constructMerkleProof(depositTrie, int(pendingDeps[i].Index), pendingDeps[i].Deposit)
if err != nil {
return nil, err
}
}
// Limit the return of pending deposits to not be more than max deposits allowed in block.
var pendingDeposits []*ethpb.Deposit
for i := uint64(0); i < uint64(len(pendingDeps)) && i < params.BeaconConfig().MaxDeposits; i++ {
pendingDeposits = append(pendingDeposits, pendingDeps[i].Deposit)
}
return pendingDeposits, nil
}
func (vs *Server) depositTrie(ctx context.Context, canonicalEth1Data *ethpb.Eth1Data, canonicalEth1DataHeight *big.Int) (*trie.SparseMerkleTrie, error) {
ctx, span := trace.StartSpan(ctx, "ProposerServer.depositTrie")
defer span.End()
var depositTrie *trie.SparseMerkleTrie
finalizedDeposits := vs.DepositFetcher.FinalizedDeposits(ctx)
depositTrie = finalizedDeposits.Deposits
upToEth1DataDeposits := vs.DepositFetcher.NonFinalizedDeposits(ctx, canonicalEth1DataHeight)
insertIndex := finalizedDeposits.MerkleTrieIndex + 1
for _, dep := range upToEth1DataDeposits {
depHash, err := dep.Data.HashTreeRoot()
if err != nil {
return nil, errors.Wrap(err, "could not hash deposit data")
}
if err = depositTrie.Insert(depHash[:], int(insertIndex)); err != nil {
return nil, err
}
insertIndex++
}
valid, err := vs.validateDepositTrie(depositTrie, canonicalEth1Data)
// Log a warning here, as the cached trie is invalid.
if !valid {
log.Warnf("Cached deposit trie is invalid, rebuilding it now: %v", err)
return vs.rebuildDepositTrie(ctx, canonicalEth1Data, canonicalEth1DataHeight)
}
return depositTrie, nil
}
// rebuilds our deposit trie by recreating it from all processed deposits till
// specified eth1 block height.
func (vs *Server) rebuildDepositTrie(ctx context.Context, canonicalEth1Data *ethpb.Eth1Data, canonicalEth1DataHeight *big.Int) (*trie.SparseMerkleTrie, error) {
ctx, span := trace.StartSpan(ctx, "ProposerServer.rebuildDepositTrie")
defer span.End()
deposits := vs.DepositFetcher.AllDeposits(ctx, canonicalEth1DataHeight)
trieItems := make([][]byte, 0, len(deposits))
for _, dep := range deposits {
depHash, err := dep.Data.HashTreeRoot()
if err != nil {
return nil, errors.Wrap(err, "could not hash deposit data")
}
trieItems = append(trieItems, depHash[:])
}
depositTrie, err := trie.GenerateTrieFromItems(trieItems, params.BeaconConfig().DepositContractTreeDepth)
if err != nil {
return nil, err
}
valid, err := vs.validateDepositTrie(depositTrie, canonicalEth1Data)
// Log an error here, as even with rebuilding the trie, it is still invalid.
if !valid {
log.Errorf("Rebuilt deposit trie is invalid: %v", err)
}
return depositTrie, nil
}
// validate that the provided deposit trie matches up with the canonical eth1 data provided.
func (vs *Server) validateDepositTrie(trie *trie.SparseMerkleTrie, canonicalEth1Data *ethpb.Eth1Data) (bool, error) {
if trie.NumOfItems() != int(canonicalEth1Data.DepositCount) {
return false, errors.Errorf("wanted the canonical count of %d but received %d", canonicalEth1Data.DepositCount, trie.NumOfItems())
}
rt := trie.HashTreeRoot()
if !bytes.Equal(rt[:], canonicalEth1Data.DepositRoot) {
return false, errors.Errorf("wanted the canonical deposit root of %#x but received %#x", canonicalEth1Data.DepositRoot, rt)
}
return true, nil
}
func constructMerkleProof(trie *trie.SparseMerkleTrie, index int, deposit *ethpb.Deposit) (*ethpb.Deposit, error) {
proof, err := trie.MerkleProof(index)
if err != nil {
return nil, errors.Wrapf(err, "could not generate merkle proof for deposit at index %d", index)
}
// For every deposit, we construct a Merkle proof using the powchain service's
// in-memory deposits trie, which is updated only once the state's LatestETH1Data
// property changes during a state transition after a voting period.
deposit.Proof = proof
return deposit, nil
}

View File

@@ -0,0 +1,175 @@
package validator
import (
"context"
"math/big"
fastssz "github.com/ferranbt/fastssz"
"github.com/pkg/errors"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/config/features"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/crypto/hash"
"github.com/prysmaticlabs/prysm/crypto/rand"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/time/slots"
)
// eth1DataMajorityVote determines the appropriate eth1data for a block proposal using
// an algorithm called Voting with the Majority. The algorithm works as follows:
// - Determine the timestamp for the start slot for the eth1 voting period.
// - Determine the earliest and latest timestamps that a valid block can have.
// - Determine the first block not before the earliest timestamp. This block is the lower bound.
// - Determine the last block not after the latest timestamp. This block is the upper bound.
// - If the last block is too early, use current eth1data from the beacon state.
// - Filter out votes on unknown blocks and blocks which are outside of the range determined by the lower and upper bounds.
// - If no blocks are left after filtering votes, use eth1data from the latest valid block.
// - Otherwise:
// - Determine the vote with the highest count. Prefer the vote with the highest eth1 block height in the event of a tie.
// - This vote's block is the eth1 block to use for the block proposal.
func (vs *Server) eth1DataMajorityVote(ctx context.Context, beaconState state.BeaconState) (*ethpb.Eth1Data, error) {
ctx, cancel := context.WithTimeout(ctx, eth1dataTimeout)
defer cancel()
slot := beaconState.Slot()
votingPeriodStartTime := vs.slotStartTime(slot)
if vs.MockEth1Votes {
return vs.mockETH1DataVote(ctx, slot)
}
if !vs.Eth1InfoFetcher.IsConnectedToETH1() {
return vs.randomETH1DataVote(ctx)
}
eth1DataNotification = false
eth1FollowDistance := params.BeaconConfig().Eth1FollowDistance
earliestValidTime := votingPeriodStartTime - 2*params.BeaconConfig().SecondsPerETH1Block*eth1FollowDistance
latestValidTime := votingPeriodStartTime - params.BeaconConfig().SecondsPerETH1Block*eth1FollowDistance
if !features.Get().EnableGetBlockOptimizations {
_, err := vs.Eth1BlockFetcher.BlockByTimestamp(ctx, earliestValidTime)
if err != nil {
log.WithError(err).Error("Could not get last block by earliest valid time")
return vs.randomETH1DataVote(ctx)
}
}
lastBlockByLatestValidTime, err := vs.Eth1BlockFetcher.BlockByTimestamp(ctx, latestValidTime)
if err != nil {
log.WithError(err).Error("Could not get last block by latest valid time")
return vs.randomETH1DataVote(ctx)
}
if lastBlockByLatestValidTime.Time < earliestValidTime {
return vs.HeadFetcher.HeadETH1Data(), nil
}
lastBlockDepositCount, lastBlockDepositRoot := vs.DepositFetcher.DepositsNumberAndRootAtHeight(ctx, lastBlockByLatestValidTime.Number)
if lastBlockDepositCount == 0 {
return vs.ChainStartFetcher.ChainStartEth1Data(), nil
}
if lastBlockDepositCount >= vs.HeadFetcher.HeadETH1Data().DepositCount {
hash, err := vs.Eth1BlockFetcher.BlockHashByHeight(ctx, lastBlockByLatestValidTime.Number)
if err != nil {
log.WithError(err).Error("Could not get hash of last block by latest valid time")
return vs.randomETH1DataVote(ctx)
}
return &ethpb.Eth1Data{
BlockHash: hash.Bytes(),
DepositCount: lastBlockDepositCount,
DepositRoot: lastBlockDepositRoot[:],
}, nil
}
return vs.HeadFetcher.HeadETH1Data(), nil
}
func (vs *Server) slotStartTime(slot types.Slot) uint64 {
startTime, _ := vs.Eth1InfoFetcher.Eth2GenesisPowchainInfo()
return slots.VotingPeriodStartTime(startTime, slot)
}
// canonicalEth1Data determines the canonical eth1data and eth1 block height to use for determining deposits.
func (vs *Server) canonicalEth1Data(
ctx context.Context,
beaconState state.BeaconState,
currentVote *ethpb.Eth1Data) (*ethpb.Eth1Data, *big.Int, error) {
var eth1BlockHash [32]byte
// Add in current vote, to get accurate vote tally
if err := beaconState.AppendEth1DataVotes(currentVote); err != nil {
return nil, nil, errors.Wrap(err, "could not append eth1 data votes to state")
}
hasSupport, err := blocks.Eth1DataHasEnoughSupport(beaconState, currentVote)
if err != nil {
return nil, nil, errors.Wrap(err, "could not determine if current eth1data vote has enough support")
}
var canonicalEth1Data *ethpb.Eth1Data
if hasSupport {
canonicalEth1Data = currentVote
eth1BlockHash = bytesutil.ToBytes32(currentVote.BlockHash)
} else {
canonicalEth1Data = beaconState.Eth1Data()
eth1BlockHash = bytesutil.ToBytes32(beaconState.Eth1Data().BlockHash)
}
_, canonicalEth1DataHeight, err := vs.Eth1BlockFetcher.BlockExists(ctx, eth1BlockHash)
if err != nil {
return nil, nil, errors.Wrap(err, "could not fetch eth1data height")
}
return canonicalEth1Data, canonicalEth1DataHeight, nil
}
func (vs *Server) mockETH1DataVote(ctx context.Context, slot types.Slot) (*ethpb.Eth1Data, error) {
if !eth1DataNotification {
log.Warn("Beacon Node is no longer connected to an ETH1 chain, so ETH1 data votes are now mocked.")
eth1DataNotification = true
}
// If a mock eth1 data votes is specified, we use the following for the
// eth1data we provide to every proposer based on https://github.com/ethereum/eth2.0-pm/issues/62:
//
// slot_in_voting_period = current_slot % SLOTS_PER_ETH1_VOTING_PERIOD
// Eth1Data(
// DepositRoot = hash(current_epoch + slot_in_voting_period),
// DepositCount = state.eth1_deposit_index,
// BlockHash = hash(hash(current_epoch + slot_in_voting_period)),
// )
slotInVotingPeriod := slot.ModSlot(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().EpochsPerEth1VotingPeriod)))
headState, err := vs.HeadFetcher.HeadState(ctx)
if err != nil {
return nil, err
}
var enc []byte
enc = fastssz.MarshalUint64(enc, uint64(slots.ToEpoch(slot))+uint64(slotInVotingPeriod))
depRoot := hash.Hash(enc)
blockHash := hash.Hash(depRoot[:])
return &ethpb.Eth1Data{
DepositRoot: depRoot[:],
DepositCount: headState.Eth1DepositIndex(),
BlockHash: blockHash[:],
}, nil
}
func (vs *Server) randomETH1DataVote(ctx context.Context) (*ethpb.Eth1Data, error) {
if !eth1DataNotification {
log.Warn("Beacon Node is no longer connected to an ETH1 chain, so ETH1 data votes are now random.")
eth1DataNotification = true
}
headState, err := vs.HeadFetcher.HeadState(ctx)
if err != nil {
return nil, err
}
// set random roots and block hashes to prevent a majority from being
// built if the eth1 node is offline
randGen := rand.NewGenerator()
depRoot := hash.Hash(bytesutil.Bytes32(randGen.Uint64()))
blockHash := hash.Hash(bytesutil.Bytes32(randGen.Uint64()))
return &ethpb.Eth1Data{
DepositRoot: depRoot[:],
DepositCount: headState.Eth1DepositIndex(),
BlockHash: blockHash[:],
}, nil
}

View File

@@ -0,0 +1,173 @@
package validator
import (
"context"
"fmt"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition/interop"
v "github.com/prysmaticlabs/prysm/beacon-chain/core/validators"
"github.com/prysmaticlabs/prysm/config/features"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"go.opencensus.io/trace"
)
// blockData required to create a beacon block.
type blockData struct {
ParentRoot []byte
Graffiti [32]byte
ProposerIdx types.ValidatorIndex
Eth1Data *ethpb.Eth1Data
Deposits []*ethpb.Deposit
Attestations []*ethpb.Attestation
ProposerSlashings []*ethpb.ProposerSlashing
AttesterSlashings []*ethpb.AttesterSlashing
VoluntaryExits []*ethpb.SignedVoluntaryExit
}
func (vs *Server) getPhase0BeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (*ethpb.BeaconBlock, error) {
ctx, span := trace.StartSpan(ctx, "ProposerServer.getPhase0BeaconBlock")
defer span.End()
blkData, err := vs.buildPhase0BlockData(ctx, req)
if err != nil {
return nil, fmt.Errorf("could not build block data: %v", err)
}
// Use zero hash as stub for state root to compute later.
stateRoot := params.BeaconConfig().ZeroHash[:]
blk := &ethpb.BeaconBlock{
Slot: req.Slot,
ParentRoot: blkData.ParentRoot,
StateRoot: stateRoot,
ProposerIndex: blkData.ProposerIdx,
Body: &ethpb.BeaconBlockBody{
Eth1Data: blkData.Eth1Data,
Deposits: blkData.Deposits,
Attestations: blkData.Attestations,
RandaoReveal: req.RandaoReveal,
ProposerSlashings: blkData.ProposerSlashings,
AttesterSlashings: blkData.AttesterSlashings,
VoluntaryExits: blkData.VoluntaryExits,
Graffiti: blkData.Graffiti[:],
},
}
// Compute state root with the newly constructed block.
stateRoot, err = vs.computeStateRoot(
ctx, wrapper.WrappedPhase0SignedBeaconBlock(
&ethpb.SignedBeaconBlock{Block: blk, Signature: make([]byte, 96)},
),
)
if err != nil {
interop.WriteBlockToDisk(wrapper.WrappedPhase0SignedBeaconBlock(&ethpb.SignedBeaconBlock{Block: blk}), true /*failed*/)
return nil, fmt.Errorf("could not compute state root: %v", err)
}
blk.StateRoot = stateRoot
return blk, nil
}
// Build data required for creating a new beacon block, so this method can be shared across forks.
func (vs *Server) buildPhase0BlockData(ctx context.Context, req *ethpb.BlockRequest) (*blockData, error) {
ctx, span := trace.StartSpan(ctx, "ProposerServer.buildPhase0BlockData")
defer span.End()
if vs.SyncChecker.Syncing() {
return nil, fmt.Errorf("syncing to latest head, not ready to respond")
}
// Retrieve the parent block as the current head of the canonical chain.
parentRoot, err := vs.HeadFetcher.HeadRoot(ctx)
if err != nil {
return nil, fmt.Errorf("could not retrieve head root: %v", err)
}
head, err := vs.HeadFetcher.HeadState(ctx)
if err != nil {
return nil, fmt.Errorf("could not get head state %v", err)
}
if features.Get().EnableNextSlotStateCache {
head, err = transition.ProcessSlotsUsingNextSlotCache(ctx, head, parentRoot, req.Slot)
if err != nil {
return nil, fmt.Errorf("could not advance slots to calculate proposer index: %v", err)
}
} else {
head, err = transition.ProcessSlots(ctx, head, req.Slot)
if err != nil {
return nil, fmt.Errorf("could not advance slot to calculate proposer index: %v", err)
}
}
eth1Data, err := vs.eth1DataMajorityVote(ctx, head)
if err != nil {
return nil, fmt.Errorf("could not get ETH1 data: %v", err)
}
deposits, atts, err := vs.packDepositsAndAttestations(ctx, head, eth1Data)
if err != nil {
return nil, err
}
graffiti := bytesutil.ToBytes32(req.Graffiti)
// Calculate new proposer index.
idx, err := helpers.BeaconProposerIndex(ctx, head)
if err != nil {
return nil, fmt.Errorf("could not calculate proposer index %v", err)
}
proposerSlashings := vs.SlashingsPool.PendingProposerSlashings(ctx, head, false /*noLimit*/)
validProposerSlashings := make([]*ethpb.ProposerSlashing, 0, len(proposerSlashings))
for _, slashing := range proposerSlashings {
_, err := blocks.ProcessProposerSlashing(ctx, head, slashing, v.SlashValidator)
if err != nil {
log.WithError(err).Warn("Proposer: invalid proposer slashing")
continue
}
validProposerSlashings = append(validProposerSlashings, slashing)
}
attSlashings := vs.SlashingsPool.PendingAttesterSlashings(ctx, head, false /*noLimit*/)
validAttSlashings := make([]*ethpb.AttesterSlashing, 0, len(attSlashings))
for _, slashing := range attSlashings {
_, err := blocks.ProcessAttesterSlashing(ctx, head, slashing, v.SlashValidator)
if err != nil {
log.WithError(err).Warn("Proposer: invalid attester slashing")
continue
}
validAttSlashings = append(validAttSlashings, slashing)
}
exits := vs.ExitPool.PendingExits(head, req.Slot, false /*noLimit*/)
validExits := make([]*ethpb.SignedVoluntaryExit, 0, len(exits))
for _, exit := range exits {
val, err := head.ValidatorAtIndexReadOnly(exit.Exit.ValidatorIndex)
if err != nil {
log.WithError(err).Warn("Proposer: invalid exit")
continue
}
if err := blocks.VerifyExitAndSignature(val, head.Slot(), head.Fork(), exit, head.GenesisValidatorRoot()); err != nil {
log.WithError(err).Warn("Proposer: invalid exit")
continue
}
validExits = append(validExits, exit)
}
return &blockData{
ParentRoot: parentRoot,
Graffiti: graffiti,
ProposerIdx: idx,
Eth1Data: eth1Data,
Deposits: deposits,
Attestations: atts,
ProposerSlashings: validProposerSlashings,
AttesterSlashings: validAttSlashings,
VoluntaryExits: validExits,
}, nil
}

View File

@@ -323,7 +323,7 @@ func TestProposer_ComputeStateRoot_OK(t *testing.T) {
req.Signature, err = signing.ComputeDomainAndSign(beaconState, currentEpoch, req.Block, params.BeaconConfig().DomainBeaconProposer, privKeys[proposerIdx])
require.NoError(t, err)
_, err = proposerServer.ComputeStateRoot(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(req))
_, err = proposerServer.computeStateRoot(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(req))
require.NoError(t, err)
}