Compare commits

...

17 Commits

Author SHA1 Message Date
james-prysm
6f4b85fb23 updating committee subnets argument 2026-03-02 13:40:46 -06:00
james-prysm
2f5f7ac671 fixing generated files after develop merged 2026-02-25 16:33:39 -06:00
james-prysm
1c75f7d025 Merge branch 'develop' into breakup-duties 2026-02-25 14:32:29 -08:00
james-prysm
4ad48d3e26 self review 2026-02-24 16:23:04 -06:00
james-prysm
e7dab1971b fixing edge case 2026-02-24 15:55:34 -06:00
james-prysm
3285872827 self review 2026-02-24 14:54:10 -06:00
james-prysm
111d7ccdd3 making sure dependent root is calculated correctly post fulu for get proposer duties endpoint 2026-02-24 14:32:52 -06:00
james-prysm
abd0fb563c fixing linting 2026-02-24 13:11:42 -06:00
james-prysm
2d829d9e9c wip tests 2026-02-24 11:49:57 -06:00
james-prysm
722b7871ea Merge branch 'develop' into breakup-duties 2026-02-24 09:49:31 -08:00
james-prysm
45b57d5309 implemented proposer lookahead usage for duties with proposer cache 2026-02-24 10:32:11 -06:00
james-prysm
84a11859fe splitting up duties cache 2026-02-24 09:04:03 -06:00
james-prysm
6949a0ba47 gaz 2026-02-20 16:07:25 -06:00
james-prysm
ea278e19ba updating the duties call to use the separated grpc endpoint post gloas and continue with duties v2 prior to gloas 2026-02-20 16:02:49 -06:00
james-prysm
b98eaf543f only trigger after gloas 2026-02-20 10:33:32 -06:00
james-prysm
fb24a083a7 breaking up duties gRPC 2026-02-20 10:04:34 -06:00
james-prysm
31623e5917 refactoring duties processing into core helpers 2026-02-19 16:06:19 -06:00
37 changed files with 4212 additions and 1074 deletions

View File

@@ -4,6 +4,7 @@ go_library(
name = "go_default_library",
srcs = [
"beacon.go",
"duties.go",
"errors.go",
"log.go",
"service.go",
@@ -45,17 +46,23 @@ go_library(
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@org_golang_google_grpc//codes:go_default_library",
"@org_golang_google_grpc//status:go_default_library",
"@org_golang_x_sync//errgroup:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["validator_test.go"],
srcs = [
"duties_test.go",
"validator_test.go",
],
embed = [":go_default_library"],
deps = [
"//beacon-chain/blockchain/testing:go_default_library",
"//beacon-chain/cache:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/transition:go_default_library",
"//beacon-chain/p2p/testing:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
@@ -63,6 +70,7 @@ go_test(
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
],

View File

@@ -0,0 +1,266 @@
package core
import (
"bytes"
"context"
"sort"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
coreTime "github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/consensus-types/validator"
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/OffchainLabs/prysm/v7/time/slots"
"github.com/pkg/errors"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// AttesterDutyResult is a transport-agnostic representation of attester duty.
type AttesterDutyResult struct {
Pubkey [fieldparams.BLSPubkeyLength]byte
ValidatorIndex primitives.ValidatorIndex
CommitteeIndex primitives.CommitteeIndex
CommitteeLength uint64
CommitteesAtSlot uint64
ValidatorCommitteeIndex uint64
Slot primitives.Slot
}
// ProposerDutyResult is a transport-agnostic representation of proposer duty.
type ProposerDutyResult struct {
Pubkey [fieldparams.BLSPubkeyLength]byte
ValidatorIndex primitives.ValidatorIndex
Slot primitives.Slot
}
// SyncCommitteeDutyResult is a transport-agnostic representation of sync committee duty.
type SyncCommitteeDutyResult struct {
Pubkey [fieldparams.BLSPubkeyLength]byte
ValidatorIndex primitives.ValidatorIndex
ValidatorSyncCommitteeIndices []uint64
}
// AttesterDuties computes attester duties for the requested validators at the given epoch.
// The caller is responsible for providing a state that is adequate for the requested epoch.
func (s *Service) AttesterDuties(ctx context.Context, st state.BeaconState, epoch primitives.Epoch, indices []primitives.ValidatorIndex) ([]*AttesterDutyResult, *RpcError) {
ctx, span := trace.StartSpan(ctx, "coreService.AttesterDuties")
defer span.End()
assignments, err := helpers.CommitteeAssignments(ctx, st, epoch, indices)
if err != nil {
return nil, &RpcError{Err: errors.Wrap(err, "could not compute committee assignments"), Reason: Internal}
}
activeValidatorCount, err := helpers.ActiveValidatorCount(ctx, st, epoch)
if err != nil {
return nil, &RpcError{Err: errors.Wrap(err, "could not get active validator count"), Reason: Internal}
}
committeesAtSlot := helpers.SlotCommitteeCount(activeValidatorCount)
duties := make([]*AttesterDutyResult, 0, len(indices))
for _, index := range indices {
pubkey := st.PubkeyAtIndex(index)
var zeroPubkey [fieldparams.BLSPubkeyLength]byte
if bytes.Equal(pubkey[:], zeroPubkey[:]) {
return nil, &RpcError{Err: errors.Errorf("Invalid validator index %d", index), Reason: BadRequest}
}
committee := assignments[index]
if committee == nil {
continue
}
duties = append(duties, &AttesterDutyResult{
Pubkey: pubkey,
ValidatorIndex: index,
CommitteeIndex: committee.CommitteeIndex,
CommitteeLength: uint64(len(committee.Committee)),
CommitteesAtSlot: committeesAtSlot,
ValidatorCommitteeIndex: findValidatorIndexInCommittee(committee.Committee, index),
Slot: committee.AttesterSlot,
})
}
return duties, nil
}
// ProposerDuties computes proposer duties for the given epoch.
// Results are sorted by slot.
func (s *Service) ProposerDuties(ctx context.Context, st state.BeaconState, epoch primitives.Epoch) ([]*ProposerDutyResult, *RpcError) {
ctx, span := trace.StartSpan(ctx, "coreService.ProposerDuties")
defer span.End()
assignments, err := helpers.ProposerAssignments(ctx, st, epoch)
if err != nil {
return nil, &RpcError{Err: errors.Wrap(err, "could not compute proposer assignments"), Reason: Internal}
}
duties := make([]*ProposerDutyResult, 0)
for index, proposalSlots := range assignments {
pubkey := st.PubkeyAtIndex(index)
for _, slot := range proposalSlots {
duties = append(duties, &ProposerDutyResult{
Pubkey: pubkey,
ValidatorIndex: index,
Slot: slot,
})
}
}
sort.Slice(duties, func(i, j int) bool {
return duties[i].Slot < duties[j].Slot
})
return duties, nil
}
// SyncCommitteeDuties computes sync committee duties for the requested validators.
// It also registers sync subnets for matched validators.
// The caller is responsible for providing a state that is adequate for the requested epoch.
func (s *Service) SyncCommitteeDuties(ctx context.Context, st state.BeaconState, requestedEpoch primitives.Epoch, currentEpoch primitives.Epoch, indices []primitives.ValidatorIndex) ([]*SyncCommitteeDutyResult, *RpcError) {
_, span := trace.StartSpan(ctx, "coreService.SyncCommitteeDuties")
defer span.End()
// Determine which sync committee to use based on the requested epoch.
startingEpoch := min(requestedEpoch, currentEpoch)
currentSyncCommitteeFirstEpoch, err := slots.SyncCommitteePeriodStartEpoch(startingEpoch)
if err != nil {
return nil, &RpcError{Err: errors.Wrap(err, "could not get sync committee period start epoch"), Reason: Internal}
}
nextSyncCommitteeFirstEpoch := currentSyncCommitteeFirstEpoch + params.BeaconConfig().EpochsPerSyncCommitteePeriod
isCurrentCommittee := requestedEpoch < nextSyncCommitteeFirstEpoch
var committee [][]byte
if isCurrentCommittee {
sc, err := st.CurrentSyncCommittee()
if err != nil {
return nil, &RpcError{Err: errors.Wrap(err, "could not get sync committee"), Reason: Internal}
}
committee = sc.Pubkeys
} else {
sc, err := st.NextSyncCommittee()
if err != nil {
return nil, &RpcError{Err: errors.Wrap(err, "could not get sync committee"), Reason: Internal}
}
committee = sc.Pubkeys
}
// Build pubkey → positions map from committee pubkeys.
committeePubkeys := make(map[[fieldparams.BLSPubkeyLength]byte][]uint64)
for j, pk := range committee {
var pk48 [fieldparams.BLSPubkeyLength]byte
copy(pk48[:], pk)
committeePubkeys[pk48] = append(committeePubkeys[pk48], uint64(j))
}
duties := make([]*SyncCommitteeDutyResult, 0)
for _, index := range indices {
pubkey := st.PubkeyAtIndex(index)
var zeroPubkey [fieldparams.BLSPubkeyLength]byte
if bytes.Equal(pubkey[:], zeroPubkey[:]) {
return nil, &RpcError{Err: errors.Errorf("Invalid validator index %d", index), Reason: BadRequest}
}
positions, ok := committeePubkeys[pubkey]
if !ok {
continue
}
duties = append(duties, &SyncCommitteeDutyResult{
Pubkey: pubkey,
ValidatorIndex: index,
ValidatorSyncCommitteeIndices: positions,
})
// Register sync subnets for matched validators.
if isCurrentCommittee {
if err := RegisterSyncSubnetCurrentPeriod(st, requestedEpoch, pubkey[:], syncDutyStatus(st, index)); err != nil {
return nil, &RpcError{Err: errors.Wrapf(err, "could not register sync subnet for validator %d", index), Reason: Internal}
}
} else {
if err := RegisterSyncSubnetNextPeriod(st, requestedEpoch, pubkey[:], syncDutyStatus(st, index)); err != nil {
return nil, &RpcError{Err: errors.Wrapf(err, "could not register sync subnet for validator %d", index), Reason: Internal}
}
}
}
return duties, nil
}
// SyncCommitteeDutiesLastValidEpoch returns the last epoch for which sync committee duties can be computed.
func SyncCommitteeDutiesLastValidEpoch(currentEpoch primitives.Epoch) primitives.Epoch {
currentSyncPeriodIndex := currentEpoch / params.BeaconConfig().EpochsPerSyncCommitteePeriod
return (currentSyncPeriodIndex+2)*params.BeaconConfig().EpochsPerSyncCommitteePeriod - 1
}
// findValidatorIndexInCommittee finds the position of a validator in a committee.
func findValidatorIndexInCommittee(committee []primitives.ValidatorIndex, validatorIndex primitives.ValidatorIndex) uint64 {
for i, vIdx := range committee {
if vIdx == validatorIndex {
return uint64(i)
}
}
return 0
}
// syncDutyStatus returns a validator.Status suitable for sync subnet registration.
// It returns Active for any active validator and Pending otherwise.
func syncDutyStatus(st state.BeaconState, idx primitives.ValidatorIndex) validator.Status {
val, err := st.ValidatorAtIndexReadOnly(idx)
if err != nil || val.IsNil() {
return validator.Pending
}
currentEpoch := coreTime.CurrentEpoch(st)
if val.ActivationEpoch() <= currentEpoch && currentEpoch < val.ExitEpoch() {
return validator.Active
}
return validator.Pending
}
// AttestationDependentRoot returns the block root at (epoch-1 start - 1),
// which is the dependent root for attester duties at the given epoch.
// Callers must handle epoch <= 1 separately (e.g. using the genesis block root from the DB).
func AttestationDependentRoot(s state.BeaconState, epoch primitives.Epoch) ([]byte, error) {
if epoch <= 1 {
return nil, errors.New("epoch <= 1 requires genesis block root from DB")
}
prevEpochStartSlot, err := slots.EpochStart(epoch.Sub(1))
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not obtain epoch's start slot: %v", err)
}
root, err := helpers.BlockRootAtSlot(s, prevEpochStartSlot.Sub(1))
if err != nil {
return nil, errors.Wrap(err, "could not get block root")
}
return root, nil
}
// ProposalDependentRoot returns the block root at (epoch start - 1),
// which is the dependent root for proposer duties at the given epoch.
// This is the pre-Fulu (v1) calculation used by the REST /eth/v1 endpoint.
// Callers must handle epoch 0 separately (e.g. using the genesis block root from the DB).
func ProposalDependentRoot(s state.BeaconState, epoch primitives.Epoch) ([]byte, error) {
if epoch == 0 {
return nil, errors.New("epoch 0 requires genesis block root from DB")
}
epochStartSlot, err := slots.EpochStart(epoch)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not obtain epoch's start slot: %v", err)
}
root, err := helpers.BlockRootAtSlot(s, epochStartSlot.Sub(1))
if err != nil {
return nil, errors.Wrap(err, "could not get block root")
}
return root, nil
}
// ProposalDependentRootV2 returns the dependent root for proposer duties.
func ProposalDependentRootV2(s state.BeaconState, epoch primitives.Epoch) ([]byte, error) {
if s.Version() >= version.Fulu {
// Post-Fulu (EIP-7917) the proposer schedule is deterministic from the
// previous epoch's state, so the dependent root is (prev_epoch_start - 1),
// matching AttestationDependentRoot. Pre-Fulu it falls back to (epoch_start - 1).
// See https://github.com/ethereum/beacon-APIs/pull/563.
return AttestationDependentRoot(s, epoch)
}
return ProposalDependentRoot(s, epoch)
}

View File

@@ -0,0 +1,220 @@
package core
import (
"testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/assert"
"github.com/OffchainLabs/prysm/v7/testing/require"
"github.com/OffchainLabs/prysm/v7/testing/util"
)
func TestAttesterDuties(t *testing.T) {
helpers.ClearCache()
depChainStart := params.BeaconConfig().MinGenesisActiveValidatorCount
deposits, _, err := util.DeterministicDepositsAndKeys(depChainStart)
require.NoError(t, err)
eth1Data, err := util.DeterministicEth1Data(len(deposits))
require.NoError(t, err)
bs, err := transition.GenesisBeaconState(t.Context(), deposits, 0, eth1Data)
require.NoError(t, err)
s := &Service{}
t.Run("single validator", func(t *testing.T) {
duties, rpcErr := s.AttesterDuties(t.Context(), bs, 0, []primitives.ValidatorIndex{0})
require.Equal(t, (*RpcError)(nil), rpcErr)
require.Equal(t, 1, len(duties))
duty := duties[0]
assert.Equal(t, primitives.ValidatorIndex(0), duty.ValidatorIndex)
assert.NotEqual(t, uint64(0), duty.CommitteeLength)
assert.NotEqual(t, uint64(0), duty.CommitteesAtSlot)
})
t.Run("multiple validators", func(t *testing.T) {
indices := []primitives.ValidatorIndex{0, 1, 2}
duties, rpcErr := s.AttesterDuties(t.Context(), bs, 0, indices)
require.Equal(t, (*RpcError)(nil), rpcErr)
require.Equal(t, 3, len(duties))
})
t.Run("zero pubkey returns error", func(t *testing.T) {
// Index far beyond the validator count should have a zero pubkey.
badIndex := primitives.ValidatorIndex(depChainStart + 100)
_, rpcErr := s.AttesterDuties(t.Context(), bs, 0, []primitives.ValidatorIndex{badIndex})
require.NotNil(t, rpcErr)
require.Equal(t, ErrorReason(BadRequest), rpcErr.Reason)
})
}
func TestProposerDuties(t *testing.T) {
helpers.ClearCache()
depChainStart := params.BeaconConfig().MinGenesisActiveValidatorCount
deposits, _, err := util.DeterministicDepositsAndKeys(depChainStart)
require.NoError(t, err)
eth1Data, err := util.DeterministicEth1Data(len(deposits))
require.NoError(t, err)
bs, err := transition.GenesisBeaconState(t.Context(), deposits, 0, eth1Data)
require.NoError(t, err)
s := &Service{}
t.Run("basic OK", func(t *testing.T) {
duties, rpcErr := s.ProposerDuties(t.Context(), bs, 0)
require.Equal(t, (*RpcError)(nil), rpcErr)
// Epoch 0 has SlotsPerEpoch slots, but slot 0 is skipped for proposer, so expect SlotsPerEpoch-1 duties.
require.Equal(t, int(params.BeaconConfig().SlotsPerEpoch-1), len(duties))
})
t.Run("sorted by slot", func(t *testing.T) {
duties, rpcErr := s.ProposerDuties(t.Context(), bs, 0)
require.Equal(t, (*RpcError)(nil), rpcErr)
for i := 1; i < len(duties); i++ {
assert.Equal(t, true, duties[i-1].Slot <= duties[i].Slot, "duties should be sorted by slot")
}
})
}
func TestSyncCommitteeDuties(t *testing.T) {
helpers.ClearCache()
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.AltairForkEpoch = 0
params.OverrideBeaconConfig(cfg)
numVals := uint64(11)
st, _ := util.DeterministicGenesisStateAltair(t, numVals)
vals := st.Validators()
currCommittee := &ethpb.SyncCommittee{AggregatePubkey: make([]byte, 48)}
for i := range 5 {
currCommittee.Pubkeys = append(currCommittee.Pubkeys, vals[i].PublicKey)
}
// Add one pubkey twice to test duplicate positions.
currCommittee.Pubkeys = append(currCommittee.Pubkeys, vals[0].PublicKey)
require.NoError(t, st.SetCurrentSyncCommittee(currCommittee))
nextCommittee := &ethpb.SyncCommittee{AggregatePubkey: make([]byte, 48)}
for i := 5; i < 10; i++ {
nextCommittee.Pubkeys = append(nextCommittee.Pubkeys, vals[i].PublicKey)
}
require.NoError(t, st.SetNextSyncCommittee(nextCommittee))
s := &Service{}
t.Run("current committee", func(t *testing.T) {
duties, rpcErr := s.SyncCommitteeDuties(t.Context(), st, 0, 0, []primitives.ValidatorIndex{1})
require.Equal(t, (*RpcError)(nil), rpcErr)
require.Equal(t, 1, len(duties))
assert.Equal(t, primitives.ValidatorIndex(1), duties[0].ValidatorIndex)
require.Equal(t, 1, len(duties[0].ValidatorSyncCommitteeIndices))
assert.Equal(t, uint64(1), duties[0].ValidatorSyncCommitteeIndices[0])
})
t.Run("validator with duplicate positions", func(t *testing.T) {
duties, rpcErr := s.SyncCommitteeDuties(t.Context(), st, 0, 0, []primitives.ValidatorIndex{0})
require.Equal(t, (*RpcError)(nil), rpcErr)
require.Equal(t, 1, len(duties))
// Validator 0 appears at index 0 and 5.
require.Equal(t, 2, len(duties[0].ValidatorSyncCommitteeIndices))
})
t.Run("next committee", func(t *testing.T) {
nextEpoch := params.BeaconConfig().EpochsPerSyncCommitteePeriod
duties, rpcErr := s.SyncCommitteeDuties(t.Context(), st, nextEpoch, 0, []primitives.ValidatorIndex{5})
require.Equal(t, (*RpcError)(nil), rpcErr)
require.Equal(t, 1, len(duties))
assert.Equal(t, primitives.ValidatorIndex(5), duties[0].ValidatorIndex)
})
t.Run("validator not in committee", func(t *testing.T) {
// Validator 10 is not in either committee.
duties, rpcErr := s.SyncCommitteeDuties(t.Context(), st, 0, 0, []primitives.ValidatorIndex{10})
require.Equal(t, (*RpcError)(nil), rpcErr)
require.Equal(t, 0, len(duties))
})
t.Run("zero pubkey returns error", func(t *testing.T) {
badIndex := primitives.ValidatorIndex(numVals + 100)
_, rpcErr := s.SyncCommitteeDuties(t.Context(), st, 0, 0, []primitives.ValidatorIndex{badIndex})
require.NotNil(t, rpcErr)
require.Equal(t, ErrorReason(BadRequest), rpcErr.Reason)
})
}
func TestSyncCommitteeDutiesLastValidEpoch(t *testing.T) {
t.Run("epoch 0", func(t *testing.T) {
result := SyncCommitteeDutiesLastValidEpoch(0)
expected := 2*params.BeaconConfig().EpochsPerSyncCommitteePeriod - 1
assert.Equal(t, expected, result)
})
}
func TestProposalDependentRootV2(t *testing.T) {
helpers.ClearCache()
// With SlotsPerEpoch=8 and epoch=2:
// attestation dependent root slot = prev_epoch_start - 1 = 8 - 1 = 7
// v1 proposer dependent root slot = epoch_start - 1 = 16 - 1 = 15
// We set distinct roots at these slots so the test proves the fork
// branch selects the right one.
makeBlockRoots := func(t *testing.T) [][]byte {
shr := params.BeaconConfig().SlotsPerHistoricalRoot
roots := make([][]byte, shr)
for i := range roots {
roots[i] = make([]byte, 32)
roots[i][0] = byte(i)
}
return roots
}
t.Run("post-Fulu uses prev_epoch_start minus 1", func(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.FuluForkEpoch = 0
params.OverrideBeaconConfig(cfg)
spe := params.BeaconConfig().SlotsPerEpoch
st, _ := util.DeterministicGenesisStateFulu(t, 64)
require.NoError(t, st.SetSlot(2*spe))
require.NoError(t, st.SetBlockRoots(makeBlockRoots(t)))
got, err := ProposalDependentRootV2(st, 2)
require.NoError(t, err)
// Post-Fulu: prev_epoch_start - 1 = SlotsPerEpoch - 1
assert.Equal(t, byte(spe-1), got[0])
})
t.Run("pre-Fulu uses epoch_start minus 1", func(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.ElectraForkEpoch = 0
cfg.FuluForkEpoch = 1000
params.OverrideBeaconConfig(cfg)
spe := params.BeaconConfig().SlotsPerEpoch
st, _ := util.DeterministicGenesisStateElectra(t, 64)
require.NoError(t, st.SetSlot(2*spe))
require.NoError(t, st.SetBlockRoots(makeBlockRoots(t)))
got, err := ProposalDependentRootV2(st, 2)
require.NoError(t, err)
// Pre-Fulu: epoch_start - 1 = 2*SlotsPerEpoch - 1
assert.Equal(t, byte(2*spe-1), got[0])
})
}
func TestFindValidatorIndexInCommittee(t *testing.T) {
committee := []primitives.ValidatorIndex{10, 20, 30}
assert.Equal(t, uint64(0), findValidatorIndexInCommittee(committee, 10))
assert.Equal(t, uint64(1), findValidatorIndexInCommittee(committee, 20))
assert.Equal(t, uint64(2), findValidatorIndexInCommittee(committee, 30))
// Not found returns 0.
assert.Equal(t, uint64(0), findValidatorIndexInCommittee(committee, 99))
}

View File

@@ -50,8 +50,6 @@ go_library(
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@org_golang_google_grpc//codes:go_default_library",
"@org_golang_google_grpc//status:go_default_library",
"@org_golang_google_protobuf//types/known/wrapperspb:go_default_library",
],
)

View File

@@ -9,7 +9,6 @@ import (
"io"
"net/http"
"slices"
"sort"
"strconv"
"time"
@@ -30,7 +29,6 @@ import (
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
validator2 "github.com/OffchainLabs/prysm/v7/consensus-types/validator"
mvslice "github.com/OffchainLabs/prysm/v7/container/multi-value-slice"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
"github.com/OffchainLabs/prysm/v7/network/httputil"
ethpbalpha "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
@@ -40,8 +38,6 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// GetAggregateAttestationV2 aggregates all attestations matching the given attestation data root and slot, returning the aggregated result.
@@ -910,47 +906,22 @@ func (s *Server) GetAttesterDuties(w http.ResponseWriter, r *http.Request) {
return
}
assignments, err := helpers.CommitteeAssignments(ctx, st, requestedEpoch, requestedValIndices)
if err != nil {
httputil.HandleError(w, "Could not compute committee assignments: "+err.Error(), http.StatusInternalServerError)
coreDuties, rpcErr := s.CoreService.AttesterDuties(ctx, st, requestedEpoch, requestedValIndices)
if rpcErr != nil {
httputil.HandleError(w, rpcErr.Err.Error(), core.ErrorReasonToHTTP(rpcErr.Reason))
return
}
activeValidatorCount, err := helpers.ActiveValidatorCount(ctx, st, requestedEpoch)
if err != nil {
httputil.HandleError(w, "Could not get active validator count: "+err.Error(), http.StatusInternalServerError)
return
}
committeesAtSlot := helpers.SlotCommitteeCount(activeValidatorCount)
duties := make([]*structs.AttesterDuty, 0, len(requestedValIndices))
for _, index := range requestedValIndices {
pubkey := st.PubkeyAtIndex(index)
var zeroPubkey [fieldparams.BLSPubkeyLength]byte
if bytes.Equal(pubkey[:], zeroPubkey[:]) {
httputil.HandleError(w, fmt.Sprintf("Invalid validator index %d", index), http.StatusBadRequest)
return
}
committee := assignments[index]
if committee == nil {
continue
}
var valIndexInCommittee int
// valIndexInCommittee will be 0 in case we don't get a match. This is a potential false positive,
// however it's an impossible condition because every validator must be assigned to a committee.
for cIndex, vIndex := range committee.Committee {
if vIndex == index {
valIndexInCommittee = cIndex
break
}
}
duties := make([]*structs.AttesterDuty, 0, len(coreDuties))
for _, d := range coreDuties {
duties = append(duties, &structs.AttesterDuty{
Pubkey: hexutil.Encode(pubkey[:]),
ValidatorIndex: strconv.FormatUint(uint64(index), 10),
CommitteeIndex: strconv.FormatUint(uint64(committee.CommitteeIndex), 10),
CommitteeLength: strconv.Itoa(len(committee.Committee)),
CommitteesAtSlot: strconv.FormatUint(committeesAtSlot, 10),
ValidatorCommitteeIndex: strconv.Itoa(valIndexInCommittee),
Slot: strconv.FormatUint(uint64(committee.AttesterSlot), 10),
Pubkey: hexutil.Encode(d.Pubkey[:]),
ValidatorIndex: strconv.FormatUint(uint64(d.ValidatorIndex), 10),
CommitteeIndex: strconv.FormatUint(uint64(d.CommitteeIndex), 10),
CommitteeLength: strconv.FormatUint(d.CommitteeLength, 10),
CommitteesAtSlot: strconv.FormatUint(d.CommitteesAtSlot, 10),
ValidatorCommitteeIndex: strconv.FormatUint(d.ValidatorCommitteeIndex, 10),
Slot: strconv.FormatUint(uint64(d.Slot), 10),
})
}
@@ -963,7 +934,7 @@ func (s *Server) GetAttesterDuties(w http.ResponseWriter, r *http.Request) {
}
dependentRoot = r[:]
} else {
dependentRoot, err = attestationDependentRoot(st, requestedEpoch)
dependentRoot, err = core.AttestationDependentRoot(st, requestedEpoch)
if err != nil {
httputil.HandleError(w, "Could not get dependent root: "+err.Error(), http.StatusInternalServerError)
return
@@ -1021,33 +992,23 @@ func (s *Server) GetProposerDuties(w http.ResponseWriter, r *http.Request) {
return
}
var assignments map[primitives.ValidatorIndex][]primitives.Slot
dutyEpoch := requestedEpoch
if nextEpochLookahead {
assignments, err = helpers.ProposerAssignments(ctx, st, nextEpoch)
} else {
assignments, err = helpers.ProposerAssignments(ctx, st, requestedEpoch)
dutyEpoch = nextEpoch
}
if err != nil {
httputil.HandleError(w, "Could not compute committee assignments: "+err.Error(), http.StatusInternalServerError)
coreDuties, rpcErr := s.CoreService.ProposerDuties(ctx, st, dutyEpoch)
if rpcErr != nil {
httputil.HandleError(w, rpcErr.Err.Error(), core.ErrorReasonToHTTP(rpcErr.Reason))
return
}
duties := make([]*structs.ProposerDuty, 0)
for index, proposalSlots := range assignments {
val, err := st.ValidatorAtIndexReadOnly(index)
if err != nil {
httputil.HandleError(w, fmt.Sprintf("Could not get validator at index %d: %v", index, err), http.StatusInternalServerError)
return
}
pubkey48 := val.PublicKey()
pubkey := pubkey48[:]
for _, slot := range proposalSlots {
duties = append(duties, &structs.ProposerDuty{
Pubkey: hexutil.Encode(pubkey),
ValidatorIndex: strconv.FormatUint(uint64(index), 10),
Slot: strconv.FormatUint(uint64(slot), 10),
})
}
duties := make([]*structs.ProposerDuty, 0, len(coreDuties))
for _, d := range coreDuties {
duties = append(duties, &structs.ProposerDuty{
Pubkey: hexutil.Encode(d.Pubkey[:]),
ValidatorIndex: strconv.FormatUint(uint64(d.ValidatorIndex), 10),
Slot: strconv.FormatUint(uint64(d.Slot), 10),
})
}
var dependentRoot []byte
@@ -1059,7 +1020,7 @@ func (s *Server) GetProposerDuties(w http.ResponseWriter, r *http.Request) {
}
dependentRoot = r[:]
} else {
dependentRoot, err = proposalDependentRoot(st, requestedEpoch)
dependentRoot, err = core.ProposalDependentRoot(st, requestedEpoch)
if err != nil {
httputil.HandleError(w, "Could not get dependent root: "+err.Error(), http.StatusInternalServerError)
return
@@ -1070,10 +1031,6 @@ func (s *Server) GetProposerDuties(w http.ResponseWriter, r *http.Request) {
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
return
}
if err = sortProposerDuties(duties); err != nil {
httputil.HandleError(w, "Could not sort proposer duties: "+err.Error(), http.StatusInternalServerError)
return
}
resp := &structs.GetProposerDutiesResponse{
DependentRoot: hexutil.Encode(dependentRoot),
@@ -1135,7 +1092,7 @@ func (s *Server) GetSyncCommitteeDuties(w http.ResponseWriter, r *http.Request)
}
currentEpoch := slots.ToEpoch(s.TimeFetcher.CurrentSlot())
lastValidEpoch := syncCommitteeDutiesLastValidEpoch(currentEpoch)
lastValidEpoch := core.SyncCommitteeDutiesLastValidEpoch(currentEpoch)
if requestedEpoch > lastValidEpoch {
httputil.HandleError(w, fmt.Sprintf("Epoch is too far in the future, maximum valid epoch is %d", lastValidEpoch), http.StatusBadRequest)
return
@@ -1149,55 +1106,23 @@ func (s *Server) GetSyncCommitteeDuties(w http.ResponseWriter, r *http.Request)
return
}
currentSyncCommitteeFirstEpoch, err := slots.SyncCommitteePeriodStartEpoch(startingEpoch)
if err != nil {
httputil.HandleError(w, "Could not get sync committee period start epoch: "+err.Error(), http.StatusInternalServerError)
return
}
nextSyncCommitteeFirstEpoch := currentSyncCommitteeFirstEpoch + params.BeaconConfig().EpochsPerSyncCommitteePeriod
isCurrentCommitteeRequested := requestedEpoch < nextSyncCommitteeFirstEpoch
var committee *ethpbalpha.SyncCommittee
if isCurrentCommitteeRequested {
committee, err = st.CurrentSyncCommittee()
if err != nil {
httputil.HandleError(w, "Could not get sync committee: "+err.Error(), http.StatusInternalServerError)
return
}
} else {
committee, err = st.NextSyncCommittee()
if err != nil {
httputil.HandleError(w, "Could not get sync committee: "+err.Error(), http.StatusInternalServerError)
return
}
}
committeePubkeys := make(map[[fieldparams.BLSPubkeyLength]byte][]string)
for j, pubkey := range committee.Pubkeys {
pubkey48 := bytesutil.ToBytes48(pubkey)
committeePubkeys[pubkey48] = append(committeePubkeys[pubkey48], strconv.FormatUint(uint64(j), 10))
}
duties, vals, err := syncCommitteeDutiesAndVals(st, requestedValIndices, committeePubkeys)
if err != nil {
httputil.HandleError(w, err.Error(), http.StatusBadRequest)
coreDuties, rpcErr := s.CoreService.SyncCommitteeDuties(ctx, st, requestedEpoch, currentEpoch, requestedValIndices)
if rpcErr != nil {
httputil.HandleError(w, rpcErr.Err.Error(), core.ErrorReasonToHTTP(rpcErr.Reason))
return
}
var registerSyncSubnet func(state.BeaconState, primitives.Epoch, []byte, validator2.Status) error
if isCurrentCommitteeRequested {
registerSyncSubnet = core.RegisterSyncSubnetCurrentPeriod
} else {
registerSyncSubnet = core.RegisterSyncSubnetNextPeriod
}
for _, v := range vals {
pk := v.PublicKey()
valStatus, err := rpchelpers.ValidatorStatus(v, requestedEpoch)
if err != nil {
httputil.HandleError(w, "Could not get validator status: "+err.Error(), http.StatusInternalServerError)
return
}
if err := registerSyncSubnet(st, requestedEpoch, pk[:], valStatus); err != nil {
httputil.HandleError(w, fmt.Sprintf("Could not register sync subnet for pubkey %#x", pk), http.StatusInternalServerError)
return
duties := make([]*structs.SyncCommitteeDuty, 0, len(coreDuties))
for _, d := range coreDuties {
syncIndices := make([]string, len(d.ValidatorSyncCommitteeIndices))
for i, idx := range d.ValidatorSyncCommitteeIndices {
syncIndices[i] = strconv.FormatUint(idx, 10)
}
duties = append(duties, &structs.SyncCommitteeDuty{
Pubkey: hexutil.Encode(d.Pubkey[:]),
ValidatorIndex: strconv.FormatUint(uint64(d.ValidatorIndex), 10),
ValidatorSyncCommitteeIndices: syncIndices,
})
}
isOptimistic, err := s.OptimisticModeFetcher.IsOptimistic(ctx)
@@ -1388,7 +1313,7 @@ func (s *Server) GetPTCDuties(w http.ResponseWriter, r *http.Request) {
}
dependentRoot = r[:]
} else {
dependentRoot, err = attestationDependentRoot(st, requestedEpoch)
dependentRoot, err = core.AttestationDependentRoot(st, requestedEpoch)
if err != nil {
httputil.HandleError(w, "Could not get dependent root: "+err.Error(), http.StatusInternalServerError)
return
@@ -1525,102 +1450,3 @@ func (s *Server) BeaconCommitteeSelections(w http.ResponseWriter, _ *http.Reques
func (s *Server) SyncCommitteeSelections(w http.ResponseWriter, _ *http.Request) {
httputil.HandleError(w, "Endpoint not implemented", 501)
}
// attestationDependentRoot is get_block_root_at_slot(state, compute_start_slot_at_epoch(epoch - 1) - 1)
// or the genesis block root in the case of underflow.
func attestationDependentRoot(s state.BeaconState, epoch primitives.Epoch) ([]byte, error) {
var dependentRootSlot primitives.Slot
if epoch <= 1 {
dependentRootSlot = 0
} else {
prevEpochStartSlot, err := slots.EpochStart(epoch.Sub(1))
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not obtain epoch's start slot: %v", err)
}
dependentRootSlot = prevEpochStartSlot.Sub(1)
}
root, err := helpers.BlockRootAtSlot(s, dependentRootSlot)
if err != nil {
return nil, errors.Wrap(err, "could not get block root")
}
return root, nil
}
// proposalDependentRoot is get_block_root_at_slot(state, compute_start_slot_at_epoch(epoch) - 1)
// or the genesis block root in the case of underflow.
func proposalDependentRoot(s state.BeaconState, epoch primitives.Epoch) ([]byte, error) {
var dependentRootSlot primitives.Slot
if epoch == 0 {
dependentRootSlot = 0
} else {
epochStartSlot, err := slots.EpochStart(epoch)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not obtain epoch's start slot: %v", err)
}
dependentRootSlot = epochStartSlot.Sub(1)
}
root, err := helpers.BlockRootAtSlot(s, dependentRootSlot)
if err != nil {
return nil, errors.Wrap(err, "could not get block root")
}
return root, nil
}
func syncCommitteeDutiesLastValidEpoch(currentEpoch primitives.Epoch) primitives.Epoch {
currentSyncPeriodIndex := currentEpoch / params.BeaconConfig().EpochsPerSyncCommitteePeriod
// Return the last epoch of the next sync committee.
// To do this we go two periods ahead to find the first invalid epoch, and then subtract 1.
return (currentSyncPeriodIndex+2)*params.BeaconConfig().EpochsPerSyncCommitteePeriod - 1
}
// syncCommitteeDutiesAndVals takes a list of requested validator indices and the actual sync committee pubkeys.
// It returns duties for the validator indices that are part of the sync committee.
// Additionally, it returns read-only validator objects for these validator indices.
func syncCommitteeDutiesAndVals(
st state.BeaconState,
requestedValIndices []primitives.ValidatorIndex,
committeePubkeys map[[fieldparams.BLSPubkeyLength]byte][]string,
) ([]*structs.SyncCommitteeDuty, []state.ReadOnlyValidator, error) {
duties := make([]*structs.SyncCommitteeDuty, 0)
vals := make([]state.ReadOnlyValidator, 0)
for _, index := range requestedValIndices {
duty := &structs.SyncCommitteeDuty{
ValidatorIndex: strconv.FormatUint(uint64(index), 10),
}
valPubkey := st.PubkeyAtIndex(index)
var zeroPubkey [fieldparams.BLSPubkeyLength]byte
if bytes.Equal(valPubkey[:], zeroPubkey[:]) {
return nil, nil, errors.Errorf("Invalid validator index %d", index)
}
duty.Pubkey = hexutil.Encode(valPubkey[:])
indices, ok := committeePubkeys[valPubkey]
if ok {
duty.ValidatorSyncCommitteeIndices = indices
duties = append(duties, duty)
v, err := st.ValidatorAtIndexReadOnly(index)
if err != nil {
return nil, nil, fmt.Errorf("could not get validator at index %d", index)
}
vals = append(vals, v)
}
}
return duties, vals, nil
}
func sortProposerDuties(duties []*structs.ProposerDuty) error {
var err error
sort.Slice(duties, func(i, j int) bool {
si, parseErr := strconv.ParseUint(duties[i].Slot, 10, 64)
if parseErr != nil {
err = errors.Wrap(parseErr, "could not parse slot")
return false
}
sj, parseErr := strconv.ParseUint(duties[j].Slot, 10, 64)
if parseErr != nil {
err = errors.Wrap(parseErr, "could not parse slot")
return false
}
return si < sj
})
return err
}

View File

@@ -2009,6 +2009,7 @@ func TestGetAttesterDuties(t *testing.T) {
OptimisticModeFetcher: chain,
HeadFetcher: chain,
BeaconDB: db,
CoreService: &core.Service{},
}
t.Run("single validator", func(t *testing.T) {
@@ -2327,6 +2328,7 @@ func TestGetProposerDuties(t *testing.T) {
PayloadIDCache: cache.NewPayloadIDCache(),
TrackedValidatorsCache: cache.NewTrackedValidatorsCache(),
BeaconDB: db,
CoreService: &core.Service{},
}
request := httptest.NewRequest(http.MethodGet, "http://www.example.com/eth/v1/validator/duties/proposer/{epoch}", nil)
@@ -2369,6 +2371,7 @@ func TestGetProposerDuties(t *testing.T) {
PayloadIDCache: cache.NewPayloadIDCache(),
TrackedValidatorsCache: cache.NewTrackedValidatorsCache(),
BeaconDB: db,
CoreService: &core.Service{},
}
request := httptest.NewRequest(http.MethodGet, "http://www.example.com/eth/v1/validator/duties/proposer/{epoch}", nil)
@@ -2412,6 +2415,7 @@ func TestGetProposerDuties(t *testing.T) {
PayloadIDCache: cache.NewPayloadIDCache(),
TrackedValidatorsCache: cache.NewTrackedValidatorsCache(),
BeaconDB: db,
CoreService: &core.Service{},
}
currentEpoch := slots.ToEpoch(bs.Slot())
@@ -2451,6 +2455,7 @@ func TestGetProposerDuties(t *testing.T) {
PayloadIDCache: cache.NewPayloadIDCache(),
TrackedValidatorsCache: cache.NewTrackedValidatorsCache(),
BeaconDB: db,
CoreService: &core.Service{},
}
request := httptest.NewRequest(http.MethodGet, "http://www.example.com/eth/v1/validator/duties/proposer/{epoch}", nil)
@@ -2577,6 +2582,7 @@ func TestGetSyncCommitteeDuties(t *testing.T) {
TimeFetcher: mockChainService,
HeadFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
CoreService: &core.Service{},
}
t.Run("single validator", func(t *testing.T) {
@@ -2768,6 +2774,7 @@ func TestGetSyncCommitteeDuties(t *testing.T) {
TimeFetcher: mockChainService,
HeadFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
CoreService: &core.Service{},
}
var body bytes.Buffer
@@ -2863,6 +2870,7 @@ func TestGetSyncCommitteeDuties(t *testing.T) {
OptimisticModeFetcher: mockChainService,
ChainInfoFetcher: mockChainService,
BeaconDB: db,
CoreService: &core.Service{},
}
var body bytes.Buffer

View File

@@ -11,6 +11,7 @@ go_library(
"construct_generic_block.go",
"duties.go",
"duties_v2.go",
"duties_v3.go",
"exit.go",
"log.go",
"proposer.go",
@@ -200,6 +201,7 @@ go_test(
"construct_generic_block_test.go",
"duties_test.go",
"duties_v2_test.go",
"duties_v3_test.go",
"exit_test.go",
"proposer_altair_test.go",
"proposer_attestations_electra_test.go",

View File

@@ -72,10 +72,24 @@ func (vs *Server) dutiesv2(ctx context.Context, req *ethpb.DutiesRequest) (*ethp
}
}
meta, err := loadDutiesMetadata(ctx, s, req.Epoch, requestIndices)
if err != nil {
return nil, err
// Use core service for attester and proposer duties
currentAttesterDuties, rpcErr := vs.CoreService.AttesterDuties(ctx, s, req.Epoch, requestIndices)
if rpcErr != nil {
return nil, status.Errorf(core.ErrorReasonToGRPC(rpcErr.Reason), "%v", rpcErr.Err)
}
nextAttesterDuties, rpcErr := vs.CoreService.AttesterDuties(ctx, s, req.Epoch+1, requestIndices)
if rpcErr != nil {
return nil, status.Errorf(core.ErrorReasonToGRPC(rpcErr.Reason), "%v", rpcErr.Err)
}
proposerDuties, rpcErr := vs.CoreService.ProposerDuties(ctx, s, req.Epoch)
if rpcErr != nil {
return nil, status.Errorf(core.ErrorReasonToGRPC(rpcErr.Reason), "%v", rpcErr.Err)
}
// Build index maps for O(1) lookup
currentAttesterMap := buildAttesterMap(currentAttesterDuties)
nextAttesterMap := buildAttesterMap(nextAttesterDuties)
proposerMap := buildProposerMap(proposerDuties)
validatorAssignments := make([]*ethpb.DutiesV2Response_Duty, 0, len(req.PublicKeys))
nextValidatorAssignments := make([]*ethpb.DutiesV2Response_Duty, 0, len(req.PublicKeys))
@@ -97,13 +111,72 @@ func (vs *Server) dutiesv2(ctx context.Context, req *ethpb.DutiesRequest) (*ethp
continue
}
currentAssignment := vs.getValidatorAssignment(meta.current, info.index)
nextAssignment := vs.getValidatorAssignment(meta.next, info.index)
statusEnum := assignmentStatus(s, info.index)
assignment, nextDuty, err := vs.buildValidatorDuty(pubKey, info.index, s, req.Epoch, meta, currentAssignment, nextAssignment)
if err != nil {
return nil, err
// Current epoch assignment
assignment := &ethpb.DutiesV2Response_Duty{
PublicKey: pubKey,
ValidatorIndex: info.index,
Status: statusEnum,
ProposerSlots: proposerMap[info.index],
}
if ad, ok := currentAttesterMap[info.index]; ok {
assignment.AttesterSlot = ad.Slot
assignment.CommitteeIndex = ad.CommitteeIndex
assignment.CommitteeLength = ad.CommitteeLength
assignment.CommitteesAtSlot = ad.CommitteesAtSlot
assignment.ValidatorCommitteeIndex = ad.ValidatorCommitteeIndex
}
// Next epoch assignment
nextDuty := &ethpb.DutiesV2Response_Duty{
PublicKey: pubKey,
ValidatorIndex: info.index,
Status: statusEnum,
}
if ad, ok := nextAttesterMap[info.index]; ok {
nextDuty.AttesterSlot = ad.Slot
nextDuty.CommitteeIndex = ad.CommitteeIndex
nextDuty.CommitteeLength = ad.CommitteeLength
nextDuty.CommitteesAtSlot = ad.CommitteesAtSlot
nextDuty.ValidatorCommitteeIndex = ad.ValidatorCommitteeIndex
}
// Sync committee flags
if coreTime.HigherEqualThanAltairVersionAndEpoch(s, req.Epoch) {
inSync, err := helpers.IsCurrentPeriodSyncCommittee(s, info.index)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not determine current epoch sync committee: %v", err)
}
assignment.IsSyncCommittee = inSync
nextDuty.IsSyncCommittee = inSync
if inSync {
if err := core.RegisterSyncSubnetCurrentPeriodProto(s, req.Epoch, pubKey, statusEnum); err != nil {
return nil, status.Errorf(codes.Internal, "Could not register sync subnet current period: %v", err)
}
}
// Next epoch sync committee duty is assigned with next period sync committee only during
// sync period epoch boundary (ie. EPOCHS_PER_SYNC_COMMITTEE_PERIOD - 1). Else wise
// next epoch sync committee duty is the same as current epoch.
nextEpoch := req.Epoch.Add(1)
stateEpoch := coreTime.CurrentEpoch(s)
n := slots.SyncCommitteePeriod(nextEpoch)
c := slots.SyncCommitteePeriod(stateEpoch)
if n > c {
nextInSync, err := helpers.IsNextPeriodSyncCommittee(s, info.index)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not determine next epoch sync committee: %v", err)
}
nextDuty.IsSyncCommittee = nextInSync
if nextInSync {
if err := core.RegisterSyncSubnetNextPeriodProto(s, req.Epoch, pubKey, statusEnum); err != nil {
log.WithError(err).Warn("Could not register sync subnet next period")
}
}
}
}
validatorAssignments = append(validatorAssignments, assignment)
nextValidatorAssignments = append(nextValidatorAssignments, nextDuty)
}
@@ -150,155 +223,20 @@ func (vs *Server) stateForEpoch(ctx context.Context, s state.BeaconState, reqEpo
return s, nil
}
// dutiesMetadata bundles together related data needed for duty
// construction.
type dutiesMetadata struct {
current *metadata
next *metadata
// buildAttesterMap creates a map from validator index to attester duty for O(1) lookup.
func buildAttesterMap(duties []*core.AttesterDutyResult) map[primitives.ValidatorIndex]*core.AttesterDutyResult {
m := make(map[primitives.ValidatorIndex]*core.AttesterDutyResult, len(duties))
for _, d := range duties {
m[d.ValidatorIndex] = d
}
return m
}
type metadata struct {
committeesAtSlot uint64
proposalSlots map[primitives.ValidatorIndex][]primitives.Slot
committeeAssignments map[primitives.ValidatorIndex]*helpers.CommitteeAssignment
}
func loadDutiesMetadata(ctx context.Context, s state.BeaconState, reqEpoch primitives.Epoch, requestIndices []primitives.ValidatorIndex) (*dutiesMetadata, error) {
meta := &dutiesMetadata{}
var err error
meta.current, err = loadMetadata(ctx, s, reqEpoch, requestIndices)
if err != nil {
return nil, err
}
// note: we only set the proposer slots for the current assignment and not the next epoch assignment
meta.current.proposalSlots, err = helpers.ProposerAssignments(ctx, s, reqEpoch)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not compute proposer slots: %v", err)
}
meta.next, err = loadMetadata(ctx, s, reqEpoch+1, requestIndices)
if err != nil {
return nil, err
}
return meta, nil
}
func loadMetadata(ctx context.Context, s state.BeaconState, reqEpoch primitives.Epoch, requestIndices []primitives.ValidatorIndex) (*metadata, error) {
meta := &metadata{}
if err := helpers.VerifyAssignmentEpoch(reqEpoch, s); err != nil {
return nil, err
}
activeValidatorCount, err := helpers.ActiveValidatorCount(ctx, s, reqEpoch)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get active validator count: %v", err)
}
meta.committeesAtSlot = helpers.SlotCommitteeCount(activeValidatorCount)
// Use CommitteeAssignments which only computes committees for requested validators
meta.committeeAssignments, err = helpers.CommitteeAssignments(ctx, s, reqEpoch, requestIndices)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not compute committee assignments: %v", err)
}
return meta, nil
}
// findValidatorIndexInCommittee finds the position of a validator in a committee.
func findValidatorIndexInCommittee(committee []primitives.ValidatorIndex, validatorIndex primitives.ValidatorIndex) uint64 {
for i, vIdx := range committee {
if vIdx == validatorIndex {
return uint64(i)
}
}
return 0
}
// getValidatorAssignment retrieves the assignment for a validator from CommitteeAssignments.
func (vs *Server) getValidatorAssignment(meta *metadata, validatorIndex primitives.ValidatorIndex) *helpers.LiteAssignment {
if assignment, exists := meta.committeeAssignments[validatorIndex]; exists {
return &helpers.LiteAssignment{
AttesterSlot: assignment.AttesterSlot,
CommitteeIndex: assignment.CommitteeIndex,
CommitteeLength: uint64(len(assignment.Committee)),
ValidatorCommitteeIndex: findValidatorIndexInCommittee(assignment.Committee, validatorIndex),
}
}
return &helpers.LiteAssignment{}
}
// buildValidatorDuty builds both currentepoch and nextepoch V2 duty objects
// for a single validator index.
func (vs *Server) buildValidatorDuty(
pubKey []byte,
idx primitives.ValidatorIndex,
s state.BeaconState,
reqEpoch primitives.Epoch,
meta *dutiesMetadata,
currentAssignment *helpers.LiteAssignment,
nextAssignment *helpers.LiteAssignment,
) (*ethpb.DutiesV2Response_Duty, *ethpb.DutiesV2Response_Duty, error) {
assignment := &ethpb.DutiesV2Response_Duty{PublicKey: pubKey}
nextDuty := &ethpb.DutiesV2Response_Duty{PublicKey: pubKey}
statusEnum := assignmentStatus(s, idx)
assignment.ValidatorIndex = idx
assignment.Status = statusEnum
assignment.CommitteesAtSlot = meta.current.committeesAtSlot
assignment.ProposerSlots = meta.current.proposalSlots[idx]
populateCommitteeFields(assignment, currentAssignment)
nextDuty.ValidatorIndex = idx
nextDuty.Status = statusEnum
nextDuty.CommitteesAtSlot = meta.next.committeesAtSlot
populateCommitteeFields(nextDuty, nextAssignment)
// Sync committee flags
if coreTime.HigherEqualThanAltairVersionAndEpoch(s, reqEpoch) {
inSync, err := helpers.IsCurrentPeriodSyncCommittee(s, idx)
if err != nil {
return nil, nil, status.Errorf(codes.Internal, "Could not determine current epoch sync committee: %v", err)
}
assignment.IsSyncCommittee = inSync
nextDuty.IsSyncCommittee = inSync
if inSync {
if err := core.RegisterSyncSubnetCurrentPeriodProto(s, reqEpoch, pubKey, statusEnum); err != nil {
return nil, nil, status.Errorf(codes.Internal, "Could not register sync subnet current period: %v", err)
}
}
// Next epoch sync committee duty is assigned with next period sync committee only during
// sync period epoch boundary (ie. EPOCHS_PER_SYNC_COMMITTEE_PERIOD - 1). Else wise
// next epoch sync committee duty is the same as current epoch.
nextEpoch := reqEpoch + 1
currentEpoch := coreTime.CurrentEpoch(s)
n := slots.SyncCommitteePeriod(nextEpoch)
c := slots.SyncCommitteePeriod(currentEpoch)
if n > c {
nextInSync, err := helpers.IsNextPeriodSyncCommittee(s, idx)
if err != nil {
return nil, nil, status.Errorf(codes.Internal, "Could not determine next epoch sync committee: %v", err)
}
nextDuty.IsSyncCommittee = nextInSync
if nextInSync {
if err := core.RegisterSyncSubnetNextPeriodProto(s, reqEpoch, pubKey, statusEnum); err != nil {
log.WithError(err).Warn("Could not register sync subnet next period")
}
}
}
}
return assignment, nextDuty, nil
}
func populateCommitteeFields(duty *ethpb.DutiesV2Response_Duty, la *helpers.LiteAssignment) {
if duty == nil || la == nil {
// should never be the case as previous functions should set
return
}
duty.CommitteeLength = la.CommitteeLength
duty.CommitteeIndex = la.CommitteeIndex
duty.ValidatorCommitteeIndex = la.ValidatorCommitteeIndex
duty.AttesterSlot = la.AttesterSlot
// buildProposerMap creates a map from validator index to proposal slots for O(1) lookup.
func buildProposerMap(duties []*core.ProposerDutyResult) map[primitives.ValidatorIndex][]primitives.Slot {
m := make(map[primitives.ValidatorIndex][]primitives.Slot)
for _, d := range duties {
m[d.ValidatorIndex] = append(m[d.ValidatorIndex], d.Slot)
}
return m
}

View File

@@ -12,6 +12,7 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
mockExecution "github.com/OffchainLabs/prysm/v7/beacon-chain/execution/testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/core"
mockSync "github.com/OffchainLabs/prysm/v7/beacon-chain/sync/initial-sync/testing"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
@@ -53,6 +54,7 @@ func TestGetDutiesV2_OK(t *testing.T) {
ForkchoiceFetcher: chain,
SyncChecker: &mockSync.Sync{IsSyncing: false},
PayloadIDCache: cache.NewPayloadIDCache(),
CoreService: &core.Service{},
}
// Test the first validator in registry.
@@ -140,6 +142,7 @@ func TestGetAltairDutiesV2_SyncCommitteeOK(t *testing.T) {
Eth1InfoFetcher: &mockExecution.Chain{},
SyncChecker: &mockSync.Sync{IsSyncing: false},
PayloadIDCache: cache.NewPayloadIDCache(),
CoreService: &core.Service{},
}
// Test the first validator in registry.
@@ -247,6 +250,7 @@ func TestGetBellatrixDutiesV2_SyncCommitteeOK(t *testing.T) {
Eth1InfoFetcher: &mockExecution.Chain{},
SyncChecker: &mockSync.Sync{IsSyncing: false},
PayloadIDCache: cache.NewPayloadIDCache(),
CoreService: &core.Service{},
}
// Test the first validator in registry.
@@ -341,6 +345,7 @@ func TestGetAltairDutiesV2_UnknownPubkey(t *testing.T) {
SyncChecker: &mockSync.Sync{IsSyncing: false},
DepositFetcher: depositCache,
PayloadIDCache: cache.NewPayloadIDCache(),
CoreService: &core.Service{},
}
unknownPubkey := bytesutil.PadTo([]byte{'u'}, 48)
@@ -387,6 +392,7 @@ func TestGetDutiesV2_StateAdvancement(t *testing.T) {
TimeFetcher: chain,
ForkchoiceFetcher: chain,
SyncChecker: &mockSync.Sync{IsSyncing: false},
CoreService: &core.Service{},
}
// Verify state processing occurs
@@ -442,6 +448,7 @@ func TestGetDutiesV2_CurrentEpoch_ShouldNotFail(t *testing.T) {
TimeFetcher: chain,
SyncChecker: &mockSync.Sync{IsSyncing: false},
PayloadIDCache: cache.NewPayloadIDCache(),
CoreService: &core.Service{},
}
// Test the first validator in registry.
@@ -482,6 +489,7 @@ func TestGetDutiesV2_MultipleKeys_OK(t *testing.T) {
TimeFetcher: chain,
SyncChecker: &mockSync.Sync{IsSyncing: false},
PayloadIDCache: cache.NewPayloadIDCache(),
CoreService: &core.Service{},
}
pubkey0 := deposits[0].Data.PublicKey
@@ -540,6 +548,7 @@ func TestGetDutiesV2_NextSyncCommitteePeriod(t *testing.T) {
TimeFetcher: chain,
ForkchoiceFetcher: chain,
SyncChecker: &mockSync.Sync{IsSyncing: false},
CoreService: &core.Service{},
}
res, err := vs.GetDutiesV2(t.Context(), req)
@@ -559,35 +568,3 @@ func TestGetDutiesV2_SyncNotReady(t *testing.T) {
_, err := vs.GetDutiesV2(t.Context(), &ethpb.DutiesRequest{})
assert.ErrorContains(t, "Syncing to latest head", err)
}
func TestGetValidatorAssignment(t *testing.T) {
start := primitives.Slot(100)
// Test using CommitteeAssignments
committeeAssignments := map[primitives.ValidatorIndex]*helpers.CommitteeAssignment{
5: {
Committee: []primitives.ValidatorIndex{4, 5, 6},
AttesterSlot: start + 1,
CommitteeIndex: primitives.CommitteeIndex(0),
},
}
meta := &metadata{
committeeAssignments: committeeAssignments,
}
vs := &Server{}
// Test existing validator
assignment := vs.getValidatorAssignment(meta, primitives.ValidatorIndex(5))
require.NotNil(t, assignment)
assert.Equal(t, start+1, assignment.AttesterSlot)
assert.Equal(t, primitives.CommitteeIndex(0), assignment.CommitteeIndex)
assert.Equal(t, uint64(1), assignment.ValidatorCommitteeIndex)
// Test non-existent validator should return empty assignment
assignment = vs.getValidatorAssignment(meta, primitives.ValidatorIndex(99))
require.NotNil(t, assignment)
assert.Equal(t, primitives.Slot(0), assignment.AttesterSlot)
assert.Equal(t, primitives.CommitteeIndex(0), assignment.CommitteeIndex)
}

View File

@@ -0,0 +1,196 @@
package validator
import (
"context"
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/core"
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/OffchainLabs/prysm/v7/time/slots"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// GetAttesterDuties returns attester duties for the requested validators at the given epoch.
func (vs *Server) GetAttesterDuties(ctx context.Context, req *ethpb.AttesterDutiesRequest) (*ethpb.AttesterDutiesResponse, error) {
ctx, span := trace.StartSpan(ctx, "validator.GetAttesterDuties")
defer span.End()
if vs.SyncChecker.Syncing() {
return nil, status.Error(codes.Unavailable, "Syncing to latest head, not ready to respond")
}
currentEpoch := slots.ToEpoch(vs.TimeFetcher.CurrentSlot())
if req.Epoch > currentEpoch+1 {
return nil, status.Errorf(codes.InvalidArgument, "Request epoch %d can not be greater than next epoch %d", req.Epoch, currentEpoch+1)
}
s, err := vs.HeadFetcher.HeadState(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get head state: %v", err)
}
s, err = vs.stateForEpoch(ctx, s, req.Epoch)
if err != nil {
return nil, err
}
duties, rpcErr := vs.CoreService.AttesterDuties(ctx, s, req.Epoch, req.ValidatorIndices)
if rpcErr != nil {
return nil, status.Errorf(core.ErrorReasonToGRPC(rpcErr.Reason), "%v", rpcErr.Err)
}
var dependentRoot []byte
if req.Epoch <= 1 {
r, err := vs.BeaconDB.GenesisBlockRoot(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get genesis block root: %v", err)
}
dependentRoot = r[:]
} else {
dependentRoot, err = core.AttestationDependentRoot(s, req.Epoch)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get dependent root: %v", err)
}
}
optimistic, err := vs.OptimisticModeFetcher.IsOptimistic(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not determine optimistic status: %v", err)
}
resp := &ethpb.AttesterDutiesResponse{
DependentRoot: dependentRoot,
ExecutionOptimistic: optimistic,
Duties: make([]*ethpb.AttesterDuty, len(duties)),
}
for i, d := range duties {
resp.Duties[i] = &ethpb.AttesterDuty{
Pubkey: d.Pubkey[:],
ValidatorIndex: d.ValidatorIndex,
CommitteeIndex: d.CommitteeIndex,
CommitteeLength: d.CommitteeLength,
CommitteesAtSlot: d.CommitteesAtSlot,
ValidatorCommitteeIndex: d.ValidatorCommitteeIndex,
Slot: d.Slot,
}
}
return resp, nil
}
// GetProposerDutiesV2 returns proposer duties for the given epoch.
func (vs *Server) GetProposerDutiesV2(ctx context.Context, req *ethpb.ProposerDutiesRequest) (*ethpb.ProposerDutiesResponse, error) {
ctx, span := trace.StartSpan(ctx, "validator.GetProposerDutiesV2")
defer span.End()
if vs.SyncChecker.Syncing() {
return nil, status.Error(codes.Unavailable, "Syncing to latest head, not ready to respond")
}
currentEpoch := slots.ToEpoch(vs.TimeFetcher.CurrentSlot())
if req.Epoch > currentEpoch+1 {
return nil, status.Errorf(codes.InvalidArgument, "Request epoch %d can not be greater than next epoch %d", req.Epoch, currentEpoch+1)
}
s, err := vs.HeadFetcher.HeadState(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get head state: %v", err)
}
s, err = vs.stateForEpoch(ctx, s, req.Epoch)
if err != nil {
return nil, err
}
duties, rpcErr := vs.CoreService.ProposerDuties(ctx, s, req.Epoch)
if rpcErr != nil {
return nil, status.Errorf(core.ErrorReasonToGRPC(rpcErr.Reason), "%v", rpcErr.Err)
}
// Epoch 0 always needs genesis root from DB. Epoch 1 also needs it
// post-Fulu because V2 uses AttestationDependentRoot which requires epoch > 1.
// Pre-Fulu epoch 1 can be computed normally via ProposalDependentRoot.
useGenesisRoot := req.Epoch == 0 || (req.Epoch == 1 && s.Version() >= version.Fulu)
var dependentRoot []byte
if useGenesisRoot {
r, err := vs.BeaconDB.GenesisBlockRoot(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get genesis block root: %v", err)
}
dependentRoot = r[:]
} else {
dependentRoot, err = core.ProposalDependentRootV2(s, req.Epoch)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get dependent root: %v", err)
}
}
optimistic, err := vs.OptimisticModeFetcher.IsOptimistic(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not determine optimistic status: %v", err)
}
resp := &ethpb.ProposerDutiesResponse{
DependentRoot: dependentRoot,
ExecutionOptimistic: optimistic,
Duties: make([]*ethpb.ProposerDutyV2, len(duties)),
}
for i, d := range duties {
resp.Duties[i] = &ethpb.ProposerDutyV2{
Pubkey: d.Pubkey[:],
ValidatorIndex: d.ValidatorIndex,
Slot: d.Slot,
}
}
return resp, nil
}
// GetSyncCommitteeDuties returns sync committee duties for the requested validators at the given epoch.
func (vs *Server) GetSyncCommitteeDuties(ctx context.Context, req *ethpb.SyncCommitteeDutiesRequest) (*ethpb.SyncCommitteeDutiesResponse, error) {
ctx, span := trace.StartSpan(ctx, "validator.GetSyncCommitteeDuties")
defer span.End()
if vs.SyncChecker.Syncing() {
return nil, status.Error(codes.Unavailable, "Syncing to latest head, not ready to respond")
}
currentEpoch := slots.ToEpoch(vs.TimeFetcher.CurrentSlot())
lastValidEpoch := core.SyncCommitteeDutiesLastValidEpoch(currentEpoch)
if req.Epoch > lastValidEpoch {
return nil, status.Errorf(codes.InvalidArgument, "Request epoch %d can not be greater than last valid epoch %d for sync committee duties", req.Epoch, lastValidEpoch)
}
s, err := vs.HeadFetcher.HeadState(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get head state: %v", err)
}
s, err = vs.stateForEpoch(ctx, s, req.Epoch)
if err != nil {
return nil, err
}
duties, rpcErr := vs.CoreService.SyncCommitteeDuties(ctx, s, req.Epoch, currentEpoch, req.ValidatorIndices)
if rpcErr != nil {
return nil, status.Errorf(core.ErrorReasonToGRPC(rpcErr.Reason), "%v", rpcErr.Err)
}
optimistic, err := vs.OptimisticModeFetcher.IsOptimistic(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not determine optimistic status: %v", err)
}
resp := &ethpb.SyncCommitteeDutiesResponse{
ExecutionOptimistic: optimistic,
Duties: make([]*ethpb.SyncCommitteeDuty, len(duties)),
}
for i, d := range duties {
resp.Duties[i] = &ethpb.SyncCommitteeDuty{
Pubkey: d.Pubkey[:],
ValidatorIndex: d.ValidatorIndex,
ValidatorSyncCommitteeIndices: d.ValidatorSyncCommitteeIndices,
}
}
return resp, nil
}

View File

@@ -0,0 +1,311 @@
package validator
import (
"testing"
"time"
mockChain "github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/altair"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
dbutil "github.com/OffchainLabs/prysm/v7/beacon-chain/db/testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/core"
mockSync "github.com/OffchainLabs/prysm/v7/beacon-chain/sync/initial-sync/testing"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/assert"
"github.com/OffchainLabs/prysm/v7/testing/require"
"github.com/OffchainLabs/prysm/v7/testing/util"
)
func TestGetAttesterDuties_OK(t *testing.T) {
genesis := util.NewBeaconBlock()
depChainStart := params.BeaconConfig().MinGenesisActiveValidatorCount
deposits, _, err := util.DeterministicDepositsAndKeys(depChainStart)
require.NoError(t, err)
eth1Data, err := util.DeterministicEth1Data(len(deposits))
require.NoError(t, err)
bs, err := transition.GenesisBeaconState(t.Context(), deposits, 0, eth1Data)
require.NoError(t, err)
genesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
db := dbutil.SetupDB(t)
require.NoError(t, db.SaveGenesisBlockRoot(t.Context(), genesisRoot))
chain := &mockChain.ChainService{
State: bs, Root: genesisRoot[:], Genesis: time.Now(),
}
vs := &Server{
HeadFetcher: chain,
TimeFetcher: chain,
OptimisticModeFetcher: chain,
SyncChecker: &mockSync.Sync{IsSyncing: false},
BeaconDB: db,
CoreService: &core.Service{},
}
req := &ethpb.AttesterDutiesRequest{
Epoch: 0,
ValidatorIndices: []primitives.ValidatorIndex{0, 1},
}
res, err := vs.GetAttesterDuties(t.Context(), req)
require.NoError(t, err)
assert.Equal(t, 2, len(res.Duties))
assert.Equal(t, false, res.ExecutionOptimistic)
assert.NotNil(t, res.DependentRoot)
for _, d := range res.Duties {
assert.NotNil(t, d.Pubkey)
assert.Equal(t, true, d.Slot < params.BeaconConfig().SlotsPerEpoch)
}
}
func TestGetAttesterDuties_Syncing(t *testing.T) {
vs := &Server{
SyncChecker: &mockSync.Sync{IsSyncing: true},
}
_, err := vs.GetAttesterDuties(t.Context(), &ethpb.AttesterDutiesRequest{})
assert.ErrorContains(t, "Syncing to latest head", err)
}
func TestGetAttesterDuties_EpochOutOfBound(t *testing.T) {
chain := &mockChain.ChainService{Genesis: time.Now()}
vs := &Server{
TimeFetcher: chain,
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
currentEpoch := primitives.Epoch(chain.CurrentSlot() / params.BeaconConfig().SlotsPerEpoch)
req := &ethpb.AttesterDutiesRequest{Epoch: currentEpoch + 2}
_, err := vs.GetAttesterDuties(t.Context(), req)
assert.ErrorContains(t, "can not be greater than next epoch", err)
}
func TestGetProposerDutiesV2_OK(t *testing.T) {
genesis := util.NewBeaconBlock()
depChainStart := params.BeaconConfig().MinGenesisActiveValidatorCount
deposits, _, err := util.DeterministicDepositsAndKeys(depChainStart)
require.NoError(t, err)
eth1Data, err := util.DeterministicEth1Data(len(deposits))
require.NoError(t, err)
bs, err := transition.GenesisBeaconState(t.Context(), deposits, 0, eth1Data)
require.NoError(t, err)
genesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
db := dbutil.SetupDB(t)
require.NoError(t, db.SaveGenesisBlockRoot(t.Context(), genesisRoot))
chain := &mockChain.ChainService{
State: bs, Root: genesisRoot[:], Genesis: time.Now(),
}
vs := &Server{
HeadFetcher: chain,
TimeFetcher: chain,
OptimisticModeFetcher: chain,
SyncChecker: &mockSync.Sync{IsSyncing: false},
BeaconDB: db,
CoreService: &core.Service{},
}
req := &ethpb.ProposerDutiesRequest{Epoch: 0}
res, err := vs.GetProposerDutiesV2(t.Context(), req)
require.NoError(t, err)
assert.Equal(t, true, len(res.Duties) > 0)
assert.Equal(t, false, res.ExecutionOptimistic)
assert.NotNil(t, res.DependentRoot)
for _, d := range res.Duties {
assert.NotNil(t, d.Pubkey)
assert.Equal(t, true, d.Slot < params.BeaconConfig().SlotsPerEpoch)
}
}
func TestGetProposerDutiesV2_DependentRoot(t *testing.T) {
helpers.ClearCache()
spe := params.BeaconConfig().SlotsPerEpoch
genesisRoot := [32]byte{0xff}
t.Run("pre-Fulu epoch 1 computes dependent root", func(t *testing.T) {
helpers.ClearCache()
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.ElectraForkEpoch = 0
cfg.FuluForkEpoch = 1000
params.OverrideBeaconConfig(cfg)
bs, _ := util.DeterministicGenesisStateElectra(t, 64)
roots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot)
for i := range roots {
roots[i] = make([]byte, 32)
roots[i][0] = byte(i)
}
require.NoError(t, bs.SetBlockRoots(roots))
require.NoError(t, bs.SetSlot(spe)) // epoch 1 start
db := dbutil.SetupDB(t)
require.NoError(t, db.SaveGenesisBlockRoot(t.Context(), genesisRoot))
secondsPerSlot := params.BeaconConfig().SecondsPerSlot
chain := &mockChain.ChainService{
State: bs,
Root: genesisRoot[:],
Genesis: time.Now().Add(-time.Duration(uint64(spe)*secondsPerSlot) * time.Second),
}
vs := &Server{
HeadFetcher: chain,
TimeFetcher: chain,
OptimisticModeFetcher: chain,
SyncChecker: &mockSync.Sync{IsSyncing: false},
BeaconDB: db,
CoreService: &core.Service{},
}
res, err := vs.GetProposerDutiesV2(t.Context(), &ethpb.ProposerDutiesRequest{Epoch: 1})
require.NoError(t, err)
// Pre-Fulu: ProposalDependentRoot uses epoch_start-1 = spe-1.
assert.Equal(t, byte(spe-1), res.DependentRoot[0])
})
t.Run("post-Fulu epoch 1 uses genesis root", func(t *testing.T) {
helpers.ClearCache()
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.FuluForkEpoch = 0
params.OverrideBeaconConfig(cfg)
bs, _ := util.DeterministicGenesisStateFulu(t, 64)
roots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot)
for i := range roots {
roots[i] = make([]byte, 32)
roots[i][0] = byte(i)
}
require.NoError(t, bs.SetBlockRoots(roots))
require.NoError(t, bs.SetSlot(spe)) // epoch 1 start
db := dbutil.SetupDB(t)
require.NoError(t, db.SaveGenesisBlockRoot(t.Context(), genesisRoot))
secondsPerSlot := params.BeaconConfig().SecondsPerSlot
chain := &mockChain.ChainService{
State: bs,
Root: genesisRoot[:],
Genesis: time.Now().Add(-time.Duration(uint64(spe)*secondsPerSlot) * time.Second),
}
vs := &Server{
HeadFetcher: chain,
TimeFetcher: chain,
OptimisticModeFetcher: chain,
SyncChecker: &mockSync.Sync{IsSyncing: false},
BeaconDB: db,
CoreService: &core.Service{},
}
res, err := vs.GetProposerDutiesV2(t.Context(), &ethpb.ProposerDutiesRequest{Epoch: 1})
require.NoError(t, err)
// Post-Fulu: epoch 1 uses genesis root from DB.
assert.Equal(t, byte(0xff), res.DependentRoot[0])
})
}
func TestGetProposerDutiesV2_Syncing(t *testing.T) {
vs := &Server{
SyncChecker: &mockSync.Sync{IsSyncing: true},
}
_, err := vs.GetProposerDutiesV2(t.Context(), &ethpb.ProposerDutiesRequest{})
assert.ErrorContains(t, "Syncing to latest head", err)
}
func TestGetProposerDutiesV2_EpochOutOfBound(t *testing.T) {
chain := &mockChain.ChainService{Genesis: time.Now()}
vs := &Server{
TimeFetcher: chain,
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
currentEpoch := primitives.Epoch(chain.CurrentSlot() / params.BeaconConfig().SlotsPerEpoch)
req := &ethpb.ProposerDutiesRequest{Epoch: currentEpoch + 2}
_, err := vs.GetProposerDutiesV2(t.Context(), req)
assert.ErrorContains(t, "can not be greater than next epoch", err)
}
func TestGetSyncCommitteeDuties_OK(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.AltairForkEpoch = primitives.Epoch(0)
params.OverrideBeaconConfig(cfg)
deposits, _, err := util.DeterministicDepositsAndKeys(params.BeaconConfig().SyncCommitteeSize)
require.NoError(t, err)
eth1Data, err := util.DeterministicEth1Data(len(deposits))
require.NoError(t, err)
bs, err := util.GenesisBeaconState(t.Context(), deposits, 0, eth1Data)
require.NoError(t, err)
h := &ethpb.BeaconBlockHeader{
StateRoot: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
ParentRoot: bytesutil.PadTo([]byte{'b'}, fieldparams.RootLength),
BodyRoot: bytesutil.PadTo([]byte{'c'}, fieldparams.RootLength),
}
require.NoError(t, bs.SetLatestBlockHeader(h))
syncCommittee, err := altair.NextSyncCommittee(t.Context(), bs)
require.NoError(t, err)
require.NoError(t, bs.SetCurrentSyncCommittee(syncCommittee))
require.NoError(t, bs.SetSlot(params.BeaconConfig().SlotsPerEpoch*primitives.Slot(params.BeaconConfig().EpochsPerSyncCommitteePeriod)-1))
require.NoError(t, helpers.UpdateSyncCommitteeCache(bs))
slot := uint64(params.BeaconConfig().SlotsPerEpoch) * uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod) * params.BeaconConfig().SecondsPerSlot
genesisRoot := [32]byte{}
chain := &mockChain.ChainService{
State: bs, Root: genesisRoot[:], Genesis: time.Now().Add(time.Duration(-1*int64(slot-1)) * time.Second),
}
vs := &Server{
HeadFetcher: chain,
TimeFetcher: chain,
OptimisticModeFetcher: chain,
SyncChecker: &mockSync.Sync{IsSyncing: false},
CoreService: &core.Service{},
}
currentEpoch := primitives.Epoch(params.BeaconConfig().EpochsPerSyncCommitteePeriod - 1)
req := &ethpb.SyncCommitteeDutiesRequest{
Epoch: currentEpoch,
ValidatorIndices: []primitives.ValidatorIndex{0, 1},
}
res, err := vs.GetSyncCommitteeDuties(t.Context(), req)
require.NoError(t, err)
assert.Equal(t, 2, len(res.Duties))
assert.Equal(t, false, res.ExecutionOptimistic)
for _, d := range res.Duties {
assert.NotNil(t, d.Pubkey)
assert.Equal(t, true, len(d.ValidatorSyncCommitteeIndices) > 0)
}
}
func TestGetSyncCommitteeDuties_Syncing(t *testing.T) {
vs := &Server{
SyncChecker: &mockSync.Sync{IsSyncing: true},
}
_, err := vs.GetSyncCommitteeDuties(t.Context(), &ethpb.SyncCommitteeDutiesRequest{})
assert.ErrorContains(t, "Syncing to latest head", err)
}
func TestGetSyncCommitteeDuties_EpochOutOfBound(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.AltairForkEpoch = primitives.Epoch(0)
cfg.EpochsPerSyncCommitteePeriod = 1
params.OverrideBeaconConfig(cfg)
chain := &mockChain.ChainService{Genesis: time.Now()}
vs := &Server{
TimeFetcher: chain,
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
currentEpoch := primitives.Epoch(chain.CurrentSlot() / params.BeaconConfig().SlotsPerEpoch)
lastValid := core.SyncCommitteeDutiesLastValidEpoch(currentEpoch)
req := &ethpb.SyncCommitteeDutiesRequest{Epoch: lastValid + 1}
_, err := vs.GetSyncCommitteeDuties(t.Context(), req)
assert.ErrorContains(t, "can not be greater than last valid epoch", err)
}

View File

@@ -108,6 +108,7 @@ type SignRequest struct {
// *SignRequest_BlindedBlockFulu
// *SignRequest_BlockGloas
// *SignRequest_ExecutionPayloadEnvelope
// *SignRequest_PayloadAttestationData
Object isSignRequest_Object `protobuf_oneof:"object"`
SigningSlot github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Slot `protobuf:"varint,6,opt,name=signing_slot,json=signingSlot,proto3" json:"signing_slot,omitempty" cast-type:"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.Slot"`
unknownFields protoimpl.UnknownFields
@@ -388,6 +389,15 @@ func (x *SignRequest) GetExecutionPayloadEnvelope() *v1alpha1.ExecutionPayloadEn
return nil
}
func (x *SignRequest) GetPayloadAttestationData() *v1alpha1.PayloadAttestationData {
if x != nil {
if x, ok := x.Object.(*SignRequest_PayloadAttestationData); ok {
return x.PayloadAttestationData
}
}
return nil
}
func (x *SignRequest) GetSigningSlot() github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Slot {
if x != nil {
return x.SigningSlot
@@ -495,6 +505,10 @@ type SignRequest_ExecutionPayloadEnvelope struct {
ExecutionPayloadEnvelope *v1alpha1.ExecutionPayloadEnvelope `protobuf:"bytes,124,opt,name=execution_payload_envelope,json=executionPayloadEnvelope,proto3,oneof"`
}
type SignRequest_PayloadAttestationData struct {
PayloadAttestationData *v1alpha1.PayloadAttestationData `protobuf:"bytes,125,opt,name=payload_attestation_data,json=payloadAttestationData,proto3,oneof"`
}
func (*SignRequest_Block) isSignRequest_Object() {}
func (*SignRequest_AttestationData) isSignRequest_Object() {}
@@ -543,6 +557,8 @@ func (*SignRequest_BlockGloas) isSignRequest_Object() {}
func (*SignRequest_ExecutionPayloadEnvelope) isSignRequest_Object() {}
func (*SignRequest_PayloadAttestationData) isSignRequest_Object() {}
type SignResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
Signature []byte `protobuf:"bytes,1,opt,name=signature,proto3" json:"signature,omitempty"`
@@ -792,7 +808,7 @@ var file_proto_prysm_v1alpha1_validator_client_keymanager_proto_rawDesc = []byte
0x68, 0x61, 0x31, 0x2f, 0x67, 0x6c, 0x6f, 0x61, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
0x29, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x31, 0x61,
0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x69,
0x74, 0x74, 0x65, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa2, 0x13, 0x0a, 0x0b, 0x53,
0x74, 0x74, 0x65, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8d, 0x14, 0x0a, 0x0b, 0x53,
0x69, 0x67, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x75,
0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09,
0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x69, 0x67,
@@ -939,84 +955,91 @@ var file_proto_prysm_v1alpha1_validator_client_keymanager_proto_rawDesc = []byte
0x75, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x45, 0x6e, 0x76, 0x65,
0x6c, 0x6f, 0x70, 0x65, 0x48, 0x00, 0x52, 0x18, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f,
0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x45, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65,
0x12, 0x67, 0x0a, 0x0c, 0x73, 0x69, 0x67, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x6c, 0x6f, 0x74,
0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x42, 0x44, 0x82, 0xb5, 0x18, 0x40, 0x67, 0x69, 0x74, 0x68,
0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c,
0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x37, 0x2f, 0x63, 0x6f, 0x6e,
0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x72, 0x69,
0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2e, 0x53, 0x6c, 0x6f, 0x74, 0x52, 0x0b, 0x73, 0x69,
0x67, 0x6e, 0x69, 0x6e, 0x67, 0x53, 0x6c, 0x6f, 0x74, 0x42, 0x08, 0x0a, 0x06, 0x6f, 0x62, 0x6a,
0x65, 0x63, 0x74, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22,
0xb7, 0x01, 0x0a, 0x0c, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x01, 0x20,
0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x4b,
0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x33,
0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61,
0x74, 0x6f, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x32, 0x2e,
0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x74, 0x61,
0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x3c, 0x0a, 0x06, 0x53,
0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e,
0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x55, 0x43, 0x43, 0x45, 0x45, 0x44, 0x45, 0x44, 0x10,
0x01, 0x12, 0x0a, 0x0a, 0x06, 0x44, 0x45, 0x4e, 0x49, 0x45, 0x44, 0x10, 0x02, 0x12, 0x0a, 0x0a,
0x06, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x03, 0x22, 0xb3, 0x01, 0x0a, 0x15, 0x50, 0x72,
0x6f, 0x70, 0x6f, 0x73, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x79, 0x6c,
0x6f, 0x61, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x66, 0x65, 0x65, 0x5f, 0x72, 0x65, 0x63, 0x69, 0x70,
0x69, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x66, 0x65, 0x65, 0x52,
0x65, 0x63, 0x69, 0x70, 0x69, 0x65, 0x6e, 0x74, 0x12, 0x47, 0x0a, 0x07, 0x62, 0x75, 0x69, 0x6c,
0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x65, 0x74, 0x68, 0x65,
0x12, 0x69, 0x0a, 0x18, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x65,
0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x7d, 0x20, 0x01,
0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74,
0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f,
0x61, 0x64, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x61, 0x74,
0x61, 0x48, 0x00, 0x52, 0x16, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x41, 0x74, 0x74, 0x65,
0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x12, 0x67, 0x0a, 0x0c, 0x73,
0x69, 0x67, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x6c, 0x6f, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28,
0x04, 0x42, 0x44, 0x82, 0xb5, 0x18, 0x40, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f,
0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62, 0x73, 0x2f, 0x70,
0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x37, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75,
0x73, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76,
0x65, 0x73, 0x2e, 0x53, 0x6c, 0x6f, 0x74, 0x52, 0x0b, 0x73, 0x69, 0x67, 0x6e, 0x69, 0x6e, 0x67,
0x53, 0x6c, 0x6f, 0x74, 0x42, 0x08, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4a, 0x04,
0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0xb7, 0x01, 0x0a, 0x0c, 0x53,
0x69, 0x67, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x73,
0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09,
0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x4b, 0x0a, 0x06, 0x73, 0x74, 0x61,
0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x33, 0x2e, 0x65, 0x74, 0x68, 0x65,
0x72, 0x65, 0x75, 0x6d, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x61,
0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x69, 0x6c, 0x64,
0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x07, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x65,
0x72, 0x12, 0x1f, 0x0a, 0x08, 0x67, 0x72, 0x61, 0x66, 0x66, 0x69, 0x74, 0x69, 0x18, 0x03, 0x20,
0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x67, 0x72, 0x61, 0x66, 0x66, 0x69, 0x74, 0x69, 0x88,
0x01, 0x01, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x67, 0x72, 0x61, 0x66, 0x66, 0x69, 0x74, 0x69, 0x22,
0xa5, 0x01, 0x0a, 0x0d, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69,
0x67, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01,
0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x62, 0x0a, 0x09, 0x67,
0x61, 0x73, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x42, 0x45,
0x82, 0xb5, 0x18, 0x41, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f,
0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73,
0x6d, 0x2f, 0x76, 0x37, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x74,
0x79, 0x70, 0x65, 0x73, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x55,
0x69, 0x6e, 0x74, 0x36, 0x34, 0x52, 0x08, 0x67, 0x61, 0x73, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12,
0x16, 0x0a, 0x06, 0x72, 0x65, 0x6c, 0x61, 0x79, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52,
0x06, 0x72, 0x65, 0x6c, 0x61, 0x79, 0x73, 0x22, 0xe7, 0x02, 0x0a, 0x17, 0x50, 0x72, 0x6f, 0x70,
0x6f, 0x73, 0x65, 0x72, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x50, 0x61, 0x79, 0x6c,
0x6f, 0x61, 0x64, 0x12, 0x74, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x72, 0x5f,
0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4b, 0x2e, 0x65,
0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x52,
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06,
0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x3c, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0d, 0x0a,
0x09, 0x53, 0x55, 0x43, 0x43, 0x45, 0x45, 0x44, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06,
0x44, 0x45, 0x4e, 0x49, 0x45, 0x44, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x46, 0x41, 0x49, 0x4c,
0x45, 0x44, 0x10, 0x03, 0x22, 0xb3, 0x01, 0x0a, 0x15, 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65,
0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x23,
0x0a, 0x0d, 0x66, 0x65, 0x65, 0x5f, 0x72, 0x65, 0x63, 0x69, 0x70, 0x69, 0x65, 0x6e, 0x74, 0x18,
0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x66, 0x65, 0x65, 0x52, 0x65, 0x63, 0x69, 0x70, 0x69,
0x65, 0x6e, 0x74, 0x12, 0x47, 0x0a, 0x07, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x18, 0x02,
0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e,
0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e,
0x74, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e,
0x66, 0x69, 0x67, 0x52, 0x07, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x12, 0x1f, 0x0a, 0x08,
0x67, 0x72, 0x61, 0x66, 0x66, 0x69, 0x74, 0x69, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00,
0x52, 0x08, 0x67, 0x72, 0x61, 0x66, 0x66, 0x69, 0x74, 0x69, 0x88, 0x01, 0x01, 0x42, 0x0b, 0x0a,
0x09, 0x5f, 0x67, 0x72, 0x61, 0x66, 0x66, 0x69, 0x74, 0x69, 0x22, 0xa5, 0x01, 0x0a, 0x0d, 0x42,
0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, 0x07,
0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65,
0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x62, 0x0a, 0x09, 0x67, 0x61, 0x73, 0x5f, 0x6c, 0x69,
0x6d, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x42, 0x45, 0x82, 0xb5, 0x18, 0x41, 0x67,
0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68, 0x61,
0x69, 0x6e, 0x4c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x37, 0x2f,
0x63, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f,
0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x55, 0x69, 0x6e, 0x74, 0x36, 0x34,
0x52, 0x08, 0x67, 0x61, 0x73, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65,
0x6c, 0x61, 0x79, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x6c, 0x61,
0x79, 0x73, 0x22, 0xe7, 0x02, 0x0a, 0x17, 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x72, 0x53,
0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x74,
0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69,
0x67, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4b, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65,
0x75, 0x6d, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x61, 0x63, 0x63,
0x6f, 0x75, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65,
0x72, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64,
0x2e, 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45,
0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x72, 0x43, 0x6f,
0x6e, 0x66, 0x69, 0x67, 0x12, 0x5c, 0x0a, 0x0e, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f,
0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x65,
0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f,
0x72, 0x2e, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72,
0x6f, 0x70, 0x6f, 0x73, 0x65, 0x72, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x50, 0x61,
0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x72, 0x43, 0x6f,
0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x70, 0x6f,
0x73, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x5c, 0x0a, 0x0e, 0x64, 0x65, 0x66,
0x61, 0x75, 0x6c, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x35, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x76, 0x61, 0x6c,
0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x2e,
0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f,
0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c,
0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x78, 0x0a, 0x13, 0x50, 0x72, 0x6f, 0x70, 0x6f,
0x73, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10,
0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79,
0x12, 0x4b, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x35, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64,
0x61, 0x74, 0x6f, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x32,
0x2e, 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x50,
0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
0x01, 0x42, 0xcd, 0x01, 0x0a, 0x22, 0x6f, 0x72, 0x67, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65,
0x75, 0x6d, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x61, 0x63, 0x63,
0x6f, 0x75, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x32, 0x42, 0x0f, 0x4b, 0x65, 0x79, 0x6d, 0x61, 0x6e,
0x61, 0x67, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x52, 0x67, 0x69, 0x74,
0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e,
0x4c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x37, 0x2f, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68,
0x61, 0x31, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2d, 0x63, 0x6c, 0x69,
0x65, 0x6e, 0x74, 0x3b, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xaa,
0x02, 0x1e, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64,
0x61, 0x74, 0x6f, 0x72, 0x2e, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x2e, 0x56, 0x32,
0xca, 0x02, 0x1e, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x5c, 0x56, 0x61, 0x6c, 0x69,
0x64, 0x61, 0x74, 0x6f, 0x72, 0x5c, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x5c, 0x56,
0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
0x6f, 0x70, 0x6f, 0x73, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x79, 0x6c,
0x6f, 0x61, 0x64, 0x52, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x43, 0x6f, 0x6e, 0x66,
0x69, 0x67, 0x1a, 0x78, 0x0a, 0x13, 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x72, 0x43, 0x6f,
0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79,
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x4b, 0x0a, 0x05, 0x76,
0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x65, 0x74, 0x68,
0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2e,
0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x70,
0x6f, 0x73, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61,
0x64, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0xcd, 0x01, 0x0a,
0x22, 0x6f, 0x72, 0x67, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x76, 0x61,
0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73,
0x2e, 0x76, 0x32, 0x42, 0x0f, 0x4b, 0x65, 0x79, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x50,
0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x52, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63,
0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62, 0x73, 0x2f,
0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x37, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70,
0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x76, 0x61,
0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x3b, 0x76,
0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xaa, 0x02, 0x1e, 0x45, 0x74, 0x68,
0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2e,
0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x1e, 0x45, 0x74,
0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x5c, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72,
0x5c, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x5c, 0x56, 0x32, 0x62, 0x06, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x33,
}
var (
@@ -1061,6 +1084,7 @@ var file_proto_prysm_v1alpha1_validator_client_keymanager_proto_goTypes = []any{
(*v1alpha1.BlindedBeaconBlockFulu)(nil), // 24: ethereum.eth.v1alpha1.BlindedBeaconBlockFulu
(*v1alpha1.BeaconBlockGloas)(nil), // 25: ethereum.eth.v1alpha1.BeaconBlockGloas
(*v1alpha1.ExecutionPayloadEnvelope)(nil), // 26: ethereum.eth.v1alpha1.ExecutionPayloadEnvelope
(*v1alpha1.PayloadAttestationData)(nil), // 27: ethereum.eth.v1alpha1.PayloadAttestationData
}
var file_proto_prysm_v1alpha1_validator_client_keymanager_proto_depIdxs = []int32{
7, // 0: ethereum.validator.accounts.v2.SignRequest.block:type_name -> ethereum.eth.v1alpha1.BeaconBlock
@@ -1084,16 +1108,17 @@ var file_proto_prysm_v1alpha1_validator_client_keymanager_proto_depIdxs = []int3
24, // 18: ethereum.validator.accounts.v2.SignRequest.blinded_block_fulu:type_name -> ethereum.eth.v1alpha1.BlindedBeaconBlockFulu
25, // 19: ethereum.validator.accounts.v2.SignRequest.block_gloas:type_name -> ethereum.eth.v1alpha1.BeaconBlockGloas
26, // 20: ethereum.validator.accounts.v2.SignRequest.execution_payload_envelope:type_name -> ethereum.eth.v1alpha1.ExecutionPayloadEnvelope
0, // 21: ethereum.validator.accounts.v2.SignResponse.status:type_name -> ethereum.validator.accounts.v2.SignResponse.Status
4, // 22: ethereum.validator.accounts.v2.ProposerOptionPayload.builder:type_name -> ethereum.validator.accounts.v2.BuilderConfig
6, // 23: ethereum.validator.accounts.v2.ProposerSettingsPayload.proposer_config:type_name -> ethereum.validator.accounts.v2.ProposerSettingsPayload.ProposerConfigEntry
3, // 24: ethereum.validator.accounts.v2.ProposerSettingsPayload.default_config:type_name -> ethereum.validator.accounts.v2.ProposerOptionPayload
3, // 25: ethereum.validator.accounts.v2.ProposerSettingsPayload.ProposerConfigEntry.value:type_name -> ethereum.validator.accounts.v2.ProposerOptionPayload
26, // [26:26] is the sub-list for method output_type
26, // [26:26] is the sub-list for method input_type
26, // [26:26] is the sub-list for extension type_name
26, // [26:26] is the sub-list for extension extendee
0, // [0:26] is the sub-list for field type_name
27, // 21: ethereum.validator.accounts.v2.SignRequest.payload_attestation_data:type_name -> ethereum.eth.v1alpha1.PayloadAttestationData
0, // 22: ethereum.validator.accounts.v2.SignResponse.status:type_name -> ethereum.validator.accounts.v2.SignResponse.Status
4, // 23: ethereum.validator.accounts.v2.ProposerOptionPayload.builder:type_name -> ethereum.validator.accounts.v2.BuilderConfig
6, // 24: ethereum.validator.accounts.v2.ProposerSettingsPayload.proposer_config:type_name -> ethereum.validator.accounts.v2.ProposerSettingsPayload.ProposerConfigEntry
3, // 25: ethereum.validator.accounts.v2.ProposerSettingsPayload.default_config:type_name -> ethereum.validator.accounts.v2.ProposerOptionPayload
3, // 26: ethereum.validator.accounts.v2.ProposerSettingsPayload.ProposerConfigEntry.value:type_name -> ethereum.validator.accounts.v2.ProposerOptionPayload
27, // [27:27] is the sub-list for method output_type
27, // [27:27] is the sub-list for method input_type
27, // [27:27] is the sub-list for extension type_name
27, // [27:27] is the sub-list for extension extendee
0, // [0:27] is the sub-list for field type_name
}
func init() { file_proto_prysm_v1alpha1_validator_client_keymanager_proto_init() }
@@ -1126,6 +1151,7 @@ func file_proto_prysm_v1alpha1_validator_client_keymanager_proto_init() {
(*SignRequest_BlindedBlockFulu)(nil),
(*SignRequest_BlockGloas)(nil),
(*SignRequest_ExecutionPayloadEnvelope)(nil),
(*SignRequest_PayloadAttestationData)(nil),
}
file_proto_prysm_v1alpha1_validator_client_keymanager_proto_msgTypes[2].OneofWrappers = []any{}
type x struct{}

View File

@@ -84,6 +84,30 @@ service BeaconNodeValidator {
};
}
// Deprecated: Use the REST API instead. GetAttesterDuties retrieves attester duties for the requested validators at the given epoch.
rpc GetAttesterDuties(AttesterDutiesRequest) returns (AttesterDutiesResponse) {
option deprecated = true;
option (google.api.http) = {
get : "/eth/v1alpha1/validator/duties/attester"
};
}
// Deprecated: Use the REST API instead. GetProposerDutiesV2 retrieves proposer duties for the given epoch.
rpc GetProposerDutiesV2(ProposerDutiesRequest) returns (ProposerDutiesResponse) {
option deprecated = true;
option (google.api.http) = {
get : "/eth/v1alpha1/validator/duties/proposer"
};
}
// Deprecated: Use the REST API instead. GetSyncCommitteeDuties retrieves sync committee duties for the requested validators at the given epoch.
rpc GetSyncCommitteeDuties(SyncCommitteeDutiesRequest) returns (SyncCommitteeDutiesResponse) {
option deprecated = true;
option (google.api.http) = {
get : "/eth/v1alpha1/validator/duties/sync_committee"
};
}
// DomainData fetches the current BLS signature domain version information
// from the running beacon node's state. This information is used when
// validators sign blocks and attestations appropriately based on their duty.
@@ -1169,6 +1193,156 @@ message AggregatedSigAndAggregationBitsResponse {
bytes bits = 2;
}
// AttesterDutiesRequest is the request for GetAttesterDuties.
message AttesterDutiesRequest {
// Epoch at which validators should perform attester duties.
uint64 epoch = 1 [
(ethereum.eth.ext.cast_type) =
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.Epoch"
];
// Validator indices to get duties for.
repeated uint64 validator_indices = 2 [
(ethereum.eth.ext.cast_type) =
"github.com/OffchainLabs/prysm/v7/consensus-types/"
"primitives.ValidatorIndex"
];
}
// AttesterDutiesResponse is the response for GetAttesterDuties.
message AttesterDutiesResponse {
// The block root the duties are dependent on. If this root changes, duties must be refetched.
bytes dependent_root = 1 [ (ethereum.eth.ext.ssz_size) = "32" ];
// Whether the response was computed optimistically.
bool execution_optimistic = 2;
// The attester duties for the requested validators.
repeated AttesterDuty duties = 3;
}
// AttesterDuty is a single attester duty assignment.
message AttesterDuty {
// 48 byte BLS public key for the validator.
bytes pubkey = 1 [
(ethereum.eth.ext.ssz_size) = "48",
(ethereum.eth.ext.spec_name) = "pubkey"
];
// The index of the validator in the beacon state.
uint64 validator_index = 2 [
(ethereum.eth.ext.cast_type) =
"github.com/OffchainLabs/prysm/v7/consensus-types/"
"primitives.ValidatorIndex"
];
// The committee index.
uint64 committee_index = 3 [
(ethereum.eth.ext.cast_type) =
"github.com/OffchainLabs/prysm/v7/consensus-types/"
"primitives.CommitteeIndex"
];
// The total number of validators in the committee.
uint64 committee_length = 4;
// The number of committees at the slot.
uint64 committees_at_slot = 5;
// The validator's position within the committee.
uint64 validator_committee_index = 6;
// The slot at which the validator must attest.
uint64 slot = 7 [
(ethereum.eth.ext.cast_type) =
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.Slot"
];
}
// ProposerDutiesRequest is the request for GetProposerDutiesV2.
message ProposerDutiesRequest {
// Epoch at which to get proposer duties.
uint64 epoch = 1 [
(ethereum.eth.ext.cast_type) =
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.Epoch"
];
}
// ProposerDutiesResponse is the response for GetProposerDutiesV2.
message ProposerDutiesResponse {
// The block root the duties are dependent on. If this root changes, duties must be refetched.
bytes dependent_root = 1 [ (ethereum.eth.ext.ssz_size) = "32" ];
// Whether the response was computed optimistically.
bool execution_optimistic = 2;
// The proposer duties for the epoch.
repeated ProposerDutyV2 duties = 3;
}
// ProposerDutyV2 is a single proposer duty assignment.
message ProposerDutyV2 {
// 48 byte BLS public key for the validator.
bytes pubkey = 1 [
(ethereum.eth.ext.ssz_size) = "48",
(ethereum.eth.ext.spec_name) = "pubkey"
];
// The index of the validator in the beacon state.
uint64 validator_index = 2 [
(ethereum.eth.ext.cast_type) =
"github.com/OffchainLabs/prysm/v7/consensus-types/"
"primitives.ValidatorIndex"
];
// The slot at which the validator must propose a block.
uint64 slot = 3 [
}
// SyncCommitteeDutiesRequest is the request for GetSyncCommitteeDuties.
message SyncCommitteeDutiesRequest {
// Epoch at which to get sync committee duties.
uint64 epoch = 1 [
(ethereum.eth.ext.cast_type) =
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.Epoch"
];
// Validator indices to get duties for.
repeated uint64 validator_indices = 2 [
(ethereum.eth.ext.cast_type) =
"github.com/OffchainLabs/prysm/v7/consensus-types/"
"primitives.ValidatorIndex"
];
}
// SyncCommitteeDutiesResponse is the response for GetSyncCommitteeDuties.
message SyncCommitteeDutiesResponse {
// Whether the response was computed optimistically.
bool execution_optimistic = 1;
// The sync committee duties for the requested validators.
repeated SyncCommitteeDuty duties = 2;
}
// SyncCommitteeDuty is a single sync committee duty assignment.
message SyncCommitteeDuty {
// 48 byte BLS public key for the validator.
bytes pubkey = 1 [
(ethereum.eth.ext.ssz_size) = "48",
(ethereum.eth.ext.spec_name) = "pubkey"
];
// The index of the validator in the beacon state.
uint64 validator_index = 2 [
(ethereum.eth.ext.cast_type) =
"github.com/OffchainLabs/prysm/v7/consensus-types/"
"primitives.ValidatorIndex"
];
// The indices of the validator in the sync committee.
repeated uint64 validator_sync_committee_indices = 3;
}
// =============================================================================
// Gloas Fork Messages
// =============================================================================

View File

@@ -204,6 +204,19 @@ func (mr *MockBeaconNodeValidatorClientMockRecorder) GetDutiesV2(ctx, in any, op
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDutiesV2", reflect.TypeOf((*MockBeaconNodeValidatorClient)(nil).GetDutiesV2), varargs...)
}
// GetAttesterDuties mocks base method.
func (m *MockBeaconNodeValidatorClient) GetAttesterDuties(arg0 context.Context, arg1 *eth.AttesterDutiesRequest, arg2 ...grpc.CallOption) (*eth.AttesterDutiesResponse, error) {
m.ctrl.T.Helper()
varargs := []any{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "GetAttesterDuties", varargs...)
ret0, _ := ret[0].(*eth.AttesterDutiesResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetExecutionPayloadEnvelope mocks base method.
func (m *MockBeaconNodeValidatorClient) GetExecutionPayloadEnvelope(ctx context.Context, in *eth.ExecutionPayloadEnvelopeRequest, opts ...grpc.CallOption) (*eth.ExecutionPayloadEnvelopeResponse, error) {
m.ctrl.T.Helper()
@@ -217,6 +230,53 @@ func (m *MockBeaconNodeValidatorClient) GetExecutionPayloadEnvelope(ctx context.
return ret0, ret1
}
// GetAttesterDuties indicates an expected call of GetAttesterDuties.
func (mr *MockBeaconNodeValidatorClientMockRecorder) GetAttesterDuties(arg0, arg1 any, arg2 ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAttesterDuties", reflect.TypeOf((*MockBeaconNodeValidatorClient)(nil).GetAttesterDuties), varargs...)
}
// GetProposerDutiesV2 mocks base method.
func (m *MockBeaconNodeValidatorClient) GetProposerDutiesV2(arg0 context.Context, arg1 *eth.ProposerDutiesRequest, arg2 ...grpc.CallOption) (*eth.ProposerDutiesResponse, error) {
m.ctrl.T.Helper()
varargs := []any{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "GetProposerDutiesV2", varargs...)
ret0, _ := ret[0].(*eth.ProposerDutiesResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetProposerDutiesV2 indicates an expected call of GetProposerDutiesV2.
func (mr *MockBeaconNodeValidatorClientMockRecorder) GetProposerDutiesV2(arg0, arg1 any, arg2 ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProposerDutiesV2", reflect.TypeOf((*MockBeaconNodeValidatorClient)(nil).GetProposerDutiesV2), varargs...)
}
// GetSyncCommitteeDuties mocks base method.
func (m *MockBeaconNodeValidatorClient) GetSyncCommitteeDuties(arg0 context.Context, arg1 *eth.SyncCommitteeDutiesRequest, arg2 ...grpc.CallOption) (*eth.SyncCommitteeDutiesResponse, error) {
m.ctrl.T.Helper()
varargs := []any{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "GetSyncCommitteeDuties", varargs...)
ret0, _ := ret[0].(*eth.SyncCommitteeDutiesResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetSyncCommitteeDuties indicates an expected call of GetSyncCommitteeDuties.
func (mr *MockBeaconNodeValidatorClientMockRecorder) GetSyncCommitteeDuties(arg0, arg1 any, arg2 ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSyncCommitteeDuties", reflect.TypeOf((*MockBeaconNodeValidatorClient)(nil).GetSyncCommitteeDuties), varargs...)
}
// GetExecutionPayloadEnvelope indicates an expected call of GetExecutionPayloadEnvelope.
func (mr *MockBeaconNodeValidatorClientMockRecorder) GetExecutionPayloadEnvelope(ctx, in any, opts ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()

View File

@@ -163,6 +163,14 @@ func (mr *MockBeaconNodeValidatorServerMockRecorder) GetDutiesV2(arg0, arg1 any)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDutiesV2", reflect.TypeOf((*MockBeaconNodeValidatorServer)(nil).GetDutiesV2), arg0, arg1)
}
// GetAttesterDuties mocks base method.
func (m *MockBeaconNodeValidatorServer) GetAttesterDuties(arg0 context.Context, arg1 *eth.AttesterDutiesRequest) (*eth.AttesterDutiesResponse, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetAttesterDuties", arg0, arg1)
ret0, _ := ret[0].(*eth.AttesterDutiesResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetExecutionPayloadEnvelope mocks base method.
func (m *MockBeaconNodeValidatorServer) GetExecutionPayloadEnvelope(arg0 context.Context, arg1 *eth.ExecutionPayloadEnvelopeRequest) (*eth.ExecutionPayloadEnvelopeResponse, error) {
m.ctrl.T.Helper()
@@ -172,6 +180,42 @@ func (m *MockBeaconNodeValidatorServer) GetExecutionPayloadEnvelope(arg0 context
return ret0, ret1
}
// GetAttesterDuties indicates an expected call of GetAttesterDuties.
func (mr *MockBeaconNodeValidatorServerMockRecorder) GetAttesterDuties(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAttesterDuties", reflect.TypeOf((*MockBeaconNodeValidatorServer)(nil).GetAttesterDuties), arg0, arg1)
}
// GetProposerDutiesV2 mocks base method.
func (m *MockBeaconNodeValidatorServer) GetProposerDutiesV2(arg0 context.Context, arg1 *eth.ProposerDutiesRequest) (*eth.ProposerDutiesResponse, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetProposerDutiesV2", arg0, arg1)
ret0, _ := ret[0].(*eth.ProposerDutiesResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetProposerDutiesV2 indicates an expected call of GetProposerDutiesV2.
func (mr *MockBeaconNodeValidatorServerMockRecorder) GetProposerDutiesV2(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProposerDutiesV2", reflect.TypeOf((*MockBeaconNodeValidatorServer)(nil).GetProposerDutiesV2), arg0, arg1)
}
// GetSyncCommitteeDuties mocks base method.
func (m *MockBeaconNodeValidatorServer) GetSyncCommitteeDuties(arg0 context.Context, arg1 *eth.SyncCommitteeDutiesRequest) (*eth.SyncCommitteeDutiesResponse, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetSyncCommitteeDuties", arg0, arg1)
ret0, _ := ret[0].(*eth.SyncCommitteeDutiesResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetSyncCommitteeDuties indicates an expected call of GetSyncCommitteeDuties.
func (mr *MockBeaconNodeValidatorServerMockRecorder) GetSyncCommitteeDuties(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSyncCommitteeDuties", reflect.TypeOf((*MockBeaconNodeValidatorServer)(nil).GetSyncCommitteeDuties), arg0, arg1)
}
// GetExecutionPayloadEnvelope indicates an expected call of GetExecutionPayloadEnvelope.
func (mr *MockBeaconNodeValidatorServerMockRecorder) GetExecutionPayloadEnvelope(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()

View File

@@ -150,6 +150,51 @@ func (mr *MockValidatorClientMockRecorder) Duties(ctx, in any) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Duties", reflect.TypeOf((*MockValidatorClient)(nil).Duties), ctx, in)
}
// AttesterDuties mocks base method.
func (m *MockValidatorClient) AttesterDuties(ctx context.Context, epoch primitives.Epoch, validatorIndices []primitives.ValidatorIndex) (*eth.AttesterDutiesResponse, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "AttesterDuties", ctx, epoch, validatorIndices)
ret0, _ := ret[0].(*eth.AttesterDutiesResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// AttesterDuties indicates an expected call of AttesterDuties.
func (mr *MockValidatorClientMockRecorder) AttesterDuties(ctx, epoch, validatorIndices any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AttesterDuties", reflect.TypeOf((*MockValidatorClient)(nil).AttesterDuties), ctx, epoch, validatorIndices)
}
// ProposerDuties mocks base method.
func (m *MockValidatorClient) ProposerDuties(ctx context.Context, epoch primitives.Epoch) (*eth.ProposerDutiesResponse, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ProposerDuties", ctx, epoch)
ret0, _ := ret[0].(*eth.ProposerDutiesResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ProposerDuties indicates an expected call of ProposerDuties.
func (mr *MockValidatorClientMockRecorder) ProposerDuties(ctx, epoch any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ProposerDuties", reflect.TypeOf((*MockValidatorClient)(nil).ProposerDuties), ctx, epoch)
}
// SyncCommitteeDuties mocks base method.
func (m *MockValidatorClient) SyncCommitteeDuties(ctx context.Context, epoch primitives.Epoch, validatorIndices []primitives.ValidatorIndex) (*eth.SyncCommitteeDutiesResponse, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SyncCommitteeDuties", ctx, epoch, validatorIndices)
ret0, _ := ret[0].(*eth.SyncCommitteeDutiesResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// SyncCommitteeDuties indicates an expected call of SyncCommitteeDuties.
func (mr *MockValidatorClientMockRecorder) SyncCommitteeDuties(ctx, epoch, validatorIndices any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncCommitteeDuties", reflect.TypeOf((*MockValidatorClient)(nil).SyncCommitteeDuties), ctx, epoch, validatorIndices)
}
// EnsureReady mocks base method.
func (m *MockValidatorClient) EnsureReady(ctx context.Context) bool {
m.ctrl.T.Helper()
@@ -445,18 +490,18 @@ func (mr *MockValidatorClientMockRecorder) SubmitValidatorRegistrations(ctx, in
}
// SubscribeCommitteeSubnets mocks base method.
func (m *MockValidatorClient) SubscribeCommitteeSubnets(ctx context.Context, in *eth.CommitteeSubnetsSubscribeRequest, duties []*eth.ValidatorDuty) (*empty.Empty, error) {
func (m *MockValidatorClient) SubscribeCommitteeSubnets(ctx context.Context, in *eth.CommitteeSubnetsSubscribeRequest, validatorIndices []primitives.ValidatorIndex, committeesAtSlot []uint64) (*empty.Empty, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SubscribeCommitteeSubnets", ctx, in, duties)
ret := m.ctrl.Call(m, "SubscribeCommitteeSubnets", ctx, in, validatorIndices, committeesAtSlot)
ret0, _ := ret[0].(*empty.Empty)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// SubscribeCommitteeSubnets indicates an expected call of SubscribeCommitteeSubnets.
func (mr *MockValidatorClientMockRecorder) SubscribeCommitteeSubnets(ctx, in, duties any) *gomock.Call {
func (mr *MockValidatorClientMockRecorder) SubscribeCommitteeSubnets(ctx, in, validatorIndices, committeesAtSlot any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubscribeCommitteeSubnets", reflect.TypeOf((*MockValidatorClient)(nil).SubscribeCommitteeSubnets), ctx, in, duties)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubscribeCommitteeSubnets", reflect.TypeOf((*MockValidatorClient)(nil).SubscribeCommitteeSubnets), ctx, in, validatorIndices, committeesAtSlot)
}
// SyncCommitteeContribution mocks base method.

View File

@@ -5,6 +5,12 @@ go_library(
srcs = [
"aggregate.go",
"attest.go",
<<<<<<< Updated upstream
=======
"distributed.go",
>>>>>>> Stashed changes
"duties.go",
"duty_store.go",
"health_monitor.go",
"key_reload.go",
"log.go",
@@ -15,6 +21,7 @@ go_library(
"registration.go",
"runner.go",
"service.go",
"subnets.go",
"sync_committee.go",
"validator.go",
"wait_for_activation.go",
@@ -103,6 +110,12 @@ go_test(
srcs = [
"aggregate_test.go",
"attest_test.go",
<<<<<<< Updated upstream
=======
"distributed_test.go",
>>>>>>> Stashed changes
"duties_test.go",
"duty_store_test.go",
"health_monitor_test.go",
"key_reload_test.go",
"log_test.go",
@@ -113,6 +126,7 @@ go_test(
"runner_test.go",
"service_test.go",
"slashing_protection_interchange_test.go",
"subnets_test.go",
"sync_committee_test.go",
"validator_test.go",
"wait_for_activation_test.go",

View File

@@ -32,7 +32,7 @@ func (v *validator) SubmitAggregateAndProof(ctx context.Context, slot primitives
span.SetAttributes(trace.StringAttribute("validator", fmt.Sprintf("%#x", pubKey)))
fmtKey := fmt.Sprintf("%#x", pubKey[:])
duty, err := v.duty(pubKey)
duty, err := v.attesterDuty(pubKey)
if err != nil {
log.WithError(err).Error("Could not fetch validator assignment")
if v.emitAccountMetrics {
@@ -91,7 +91,7 @@ func (v *validator) SubmitAggregateAndProof(ctx context.Context, slot primitives
PublicKey: pubKey[:],
SlotSignature: slotSig,
}
// TODO: look at renaming SubmitAggregateSelectionProof functions as they are GET beacon API
var agg ethpb.AggregateAttAndProof
if postElectra {
res, err := v.validatorClient.SubmitAggregateSelectionProofElectra(ctx, aggSelectionRequest, duty.ValidatorIndex, duty.CommitteeLength)

View File

@@ -26,7 +26,7 @@ func TestSubmitAggregateAndProof_GetDutiesRequestFailure(t *testing.T) {
t.Run(fmt.Sprintf("SlashingProtectionMinimal:%v", isSlashingProtectionMinimal), func(t *testing.T) {
hook := logTest.NewGlobal()
validator, _, validatorKey, finish := setup(t, isSlashingProtectionMinimal)
validator.duties = &ethpb.ValidatorDutiesContainer{CurrentEpochDuties: []*ethpb.ValidatorDuty{}}
validator.duties = newDutyStoreFromLegacy(&ethpb.ValidatorDutiesContainer{CurrentEpochDuties: []*ethpb.ValidatorDuty{}})
defer finish()
var pubKey [fieldparams.BLSPubkeyLength]byte
@@ -45,13 +45,13 @@ func TestSubmitAggregateAndProof_SignFails(t *testing.T) {
defer finish()
var pubKey [fieldparams.BLSPubkeyLength]byte
copy(pubKey[:], validatorKey.PublicKey().Marshal())
validator.duties = &ethpb.ValidatorDutiesContainer{
validator.duties = newDutyStoreFromLegacy(&ethpb.ValidatorDutiesContainer{
CurrentEpochDuties: []*ethpb.ValidatorDuty{
{
PublicKey: validatorKey.PublicKey().Marshal(),
},
},
}
})
m.validatorClient.EXPECT().DomainData(
gomock.Any(), // ctx
@@ -90,13 +90,13 @@ func TestSubmitAggregateAndProof_Ok(t *testing.T) {
defer finish()
var pubKey [fieldparams.BLSPubkeyLength]byte
copy(pubKey[:], validatorKey.PublicKey().Marshal())
validator.duties = &ethpb.ValidatorDutiesContainer{
validator.duties = newDutyStoreFromLegacy(&ethpb.ValidatorDutiesContainer{
CurrentEpochDuties: []*ethpb.ValidatorDuty{
{
PublicKey: validatorKey.PublicKey().Marshal(),
},
},
}
})
m.validatorClient.EXPECT().DomainData(
gomock.Any(), // ctx
@@ -143,13 +143,13 @@ func TestSubmitAggregateAndProof_Ok(t *testing.T) {
defer finish()
var pubKey [fieldparams.BLSPubkeyLength]byte
copy(pubKey[:], validatorKey.PublicKey().Marshal())
validator.duties = &ethpb.ValidatorDutiesContainer{
validator.duties = newDutyStoreFromLegacy(&ethpb.ValidatorDutiesContainer{
CurrentEpochDuties: []*ethpb.ValidatorDuty{
{
PublicKey: validatorKey.PublicKey().Marshal(),
},
},
}
})
m.validatorClient.EXPECT().DomainData(
gomock.Any(), // ctx
@@ -197,7 +197,7 @@ func TestSubmitAggregateAndProof_Distributed(t *testing.T) {
var pubKey [fieldparams.BLSPubkeyLength]byte
copy(pubKey[:], validatorKey.PublicKey().Marshal())
validator.duties = &ethpb.ValidatorDutiesContainer{
validator.duties = newDutyStoreFromLegacy(&ethpb.ValidatorDutiesContainer{
CurrentEpochDuties: []*ethpb.ValidatorDuty{
{
PublicKey: validatorKey.PublicKey().Marshal(),
@@ -205,7 +205,7 @@ func TestSubmitAggregateAndProof_Distributed(t *testing.T) {
AttesterSlot: slot,
},
},
}
})
validator.distributed = true
validator.attSelections = make(map[attSelectionKey]iface.BeaconCommitteeSelection)

View File

@@ -1,7 +1,6 @@
package client
import (
"bytes"
"context"
"fmt"
"strings"
@@ -57,7 +56,7 @@ func (v *validator) SubmitAttestation(ctx context.Context, slot primitives.Slot,
fmtKey := fmt.Sprintf("%#x", pubKey[:])
log := log.WithField("pubkey", fmt.Sprintf("%#x", bytesutil.Trunc(pubKey[:]))).WithField("slot", slot)
duty, err := v.duty(pubKey)
duty, err := v.attesterDuty(pubKey)
if err != nil {
log.WithError(err).Error("Could not fetch validator assignment")
if v.emitAccountMetrics {
@@ -191,20 +190,32 @@ func (v *validator) SubmitAttestation(ctx context.Context, slot primitives.Slot,
}
// Given the validator public key, this gets the validator assignment.
func (v *validator) duty(pubKey [fieldparams.BLSPubkeyLength]byte) (*ethpb.ValidatorDuty, error) {
<<<<<<< Updated upstream
// attesterDuty returns the current epoch attester duty for the given pubkey.
// Other duty types (sync committee, aggregation) also use this because every
// active validator has exactly one attester duty per epoch, making it a
// reliable source for ValidatorIndex and committee assignment.
func (v *validator) attesterDuty(pubKey [fieldparams.BLSPubkeyLength]byte) (*ethpb.ValidatorDuty, error) {
=======
// attesterDuty returns the current epoch duty for the given pubkey.
func (v *validator) attesterDuty(pubKey [fieldparams.BLSPubkeyLength]byte) (*ethpb.AttesterDuty, error) {
>>>>>>> Stashed changes
v.dutiesLock.RLock()
defer v.dutiesLock.RUnlock()
if v.duties == nil {
if v.duties == nil || !v.duties.IsInitialized() {
return nil, errors.New("no duties for validators")
}
for _, duty := range v.duties.CurrentEpochDuties {
if bytes.Equal(pubKey[:], duty.PublicKey) {
return duty, nil
}
dv, ok := v.duties.CurrentAttesterDuty(pubKey)
if !ok {
return nil, fmt.Errorf("pubkey %#x not in duties", bytesutil.Trunc(pubKey[:]))
}
return nil, fmt.Errorf("pubkey %#x not in duties", bytesutil.Trunc(pubKey[:]))
<<<<<<< Updated upstream
return dutyViewToProto(dv), nil
=======
return dv, nil
>>>>>>> Stashed changes
}
// Given validator's public key, this function returns the signature of an attestation data and its signing root.

View File

@@ -33,7 +33,7 @@ func TestRequestAttestation_ValidatorDutiesRequestFailure(t *testing.T) {
t.Run(fmt.Sprintf("SlashingProtectionMinimal:%v", isSlashingProtectionMinimal), func(t *testing.T) {
hook := logTest.NewGlobal()
validator, _, validatorKey, finish := setup(t, isSlashingProtectionMinimal)
validator.duties = &ethpb.ValidatorDutiesContainer{CurrentEpochDuties: []*ethpb.ValidatorDuty{}}
validator.duties = newDutyStoreFromLegacy(&ethpb.ValidatorDutiesContainer{CurrentEpochDuties: []*ethpb.ValidatorDuty{}})
defer finish()
var pubKey [fieldparams.BLSPubkeyLength]byte
@@ -53,12 +53,12 @@ func TestAttestToBlockHead_SubmitAttestation_EmptyCommittee(t *testing.T) {
defer finish()
var pubKey [fieldparams.BLSPubkeyLength]byte
copy(pubKey[:], validatorKey.PublicKey().Marshal())
validator.duties = &ethpb.ValidatorDutiesContainer{CurrentEpochDuties: []*ethpb.ValidatorDuty{
validator.duties = newDutyStoreFromLegacy(&ethpb.ValidatorDutiesContainer{CurrentEpochDuties: []*ethpb.ValidatorDuty{
{
PublicKey: validatorKey.PublicKey().Marshal(),
CommitteeIndex: 0,
ValidatorIndex: 0,
}}}
}}})
validator.SubmitAttestation(t.Context(), 0, pubKey)
require.LogsContain(t, hook, "Empty committee")
})
@@ -72,13 +72,13 @@ func TestAttestToBlockHead_SubmitAttestation_RequestFailure(t *testing.T) {
validator, m, validatorKey, finish := setup(t, isSlashingProtectionMinimal)
defer finish()
validator.duties = &ethpb.ValidatorDutiesContainer{CurrentEpochDuties: []*ethpb.ValidatorDuty{
validator.duties = newDutyStoreFromLegacy(&ethpb.ValidatorDutiesContainer{CurrentEpochDuties: []*ethpb.ValidatorDuty{
{
PublicKey: validatorKey.PublicKey().Marshal(),
CommitteeIndex: 5,
CommitteeLength: 111,
ValidatorIndex: 0,
}}}
}}})
m.validatorClient.EXPECT().AttestationData(
gomock.Any(), // ctx
gomock.AssignableToTypeOf(&ethpb.AttestationDataRequest{}),
@@ -145,14 +145,14 @@ func TestSubmitAttestation_ElectraCommitteeIndex(t *testing.T) {
committee := []primitives.ValidatorIndex{0, 3, 4, 2, validatorIndex, 6, 8, 9, 10}
var pubKey [fieldparams.BLSPubkeyLength]byte
copy(pubKey[:], validatorKey.PublicKey().Marshal())
validator.duties = &ethpb.ValidatorDutiesContainer{CurrentEpochDuties: []*ethpb.ValidatorDuty{
validator.duties = newDutyStoreFromLegacy(&ethpb.ValidatorDutiesContainer{CurrentEpochDuties: []*ethpb.ValidatorDuty{
{
PublicKey: validatorKey.PublicKey().Marshal(),
CommitteeIndex: tt.assignedCommitteeIndex,
CommitteeLength: uint64(len(committee)),
ValidatorIndex: validatorIndex,
},
}}
}})
var capturedRequest *ethpb.AttestationDataRequest
// Capture the actual request to verify committee index
@@ -207,7 +207,7 @@ func TestAttestToBlockHead_AttestsCorrectly(t *testing.T) {
committee := []primitives.ValidatorIndex{0, 3, 4, 2, validatorIndex, 6, 8, 9, 10}
var pubKey [fieldparams.BLSPubkeyLength]byte
copy(pubKey[:], validatorKey.PublicKey().Marshal())
validator.duties = &ethpb.ValidatorDutiesContainer{CurrentEpochDuties: []*ethpb.ValidatorDuty{
validator.duties = newDutyStoreFromLegacy(&ethpb.ValidatorDutiesContainer{CurrentEpochDuties: []*ethpb.ValidatorDuty{
{
PublicKey: validatorKey.PublicKey().Marshal(),
CommitteeIndex: 5,
@@ -215,7 +215,7 @@ func TestAttestToBlockHead_AttestsCorrectly(t *testing.T) {
ValidatorCommitteeIndex: 4,
ValidatorIndex: validatorIndex,
},
}}
}})
beaconBlockRoot := bytesutil.ToBytes32([]byte("A"))
targetRoot := bytesutil.ToBytes32([]byte("B"))
@@ -288,14 +288,14 @@ func TestAttestToBlockHead_AttestsCorrectly(t *testing.T) {
committee := []primitives.ValidatorIndex{0, 3, 4, 2, validatorIndex, 6, 8, 9, 10}
var pubKey [fieldparams.BLSPubkeyLength]byte
copy(pubKey[:], validatorKey.PublicKey().Marshal())
validator.duties = &ethpb.ValidatorDutiesContainer{CurrentEpochDuties: []*ethpb.ValidatorDuty{
validator.duties = newDutyStoreFromLegacy(&ethpb.ValidatorDutiesContainer{CurrentEpochDuties: []*ethpb.ValidatorDuty{
{
PublicKey: validatorKey.PublicKey().Marshal(),
CommitteeIndex: 5,
CommitteeLength: uint64(len(committee)),
ValidatorIndex: validatorIndex,
},
}}
}})
beaconBlockRoot := bytesutil.ToBytes32([]byte("A"))
targetRoot := bytesutil.ToBytes32([]byte("B"))
@@ -368,14 +368,14 @@ func TestAttestToBlockHead_BlocksDoubleAtt(t *testing.T) {
committee := []primitives.ValidatorIndex{0, 3, 4, 2, validatorIndex, 6, 8, 9, 10}
var pubKey [fieldparams.BLSPubkeyLength]byte
copy(pubKey[:], validatorKey.PublicKey().Marshal())
validator.duties = &ethpb.ValidatorDutiesContainer{CurrentEpochDuties: []*ethpb.ValidatorDuty{
validator.duties = newDutyStoreFromLegacy(&ethpb.ValidatorDutiesContainer{CurrentEpochDuties: []*ethpb.ValidatorDuty{
{
PublicKey: validatorKey.PublicKey().Marshal(),
CommitteeIndex: 5,
CommitteeLength: uint64(len(committee)),
ValidatorIndex: validatorIndex,
},
}}
}})
beaconBlockRoot := bytesutil.ToBytes32([]byte("A"))
targetRoot := bytesutil.ToBytes32([]byte("B"))
sourceRoot := bytesutil.ToBytes32([]byte("C"))
@@ -424,14 +424,14 @@ func TestAttestToBlockHead_BlocksSurroundAtt(t *testing.T) {
committee := []primitives.ValidatorIndex{0, 3, 4, 2, validatorIndex, 6, 8, 9, 10}
var pubKey [fieldparams.BLSPubkeyLength]byte
copy(pubKey[:], validatorKey.PublicKey().Marshal())
validator.duties = &ethpb.ValidatorDutiesContainer{CurrentEpochDuties: []*ethpb.ValidatorDuty{
validator.duties = newDutyStoreFromLegacy(&ethpb.ValidatorDutiesContainer{CurrentEpochDuties: []*ethpb.ValidatorDuty{
{
PublicKey: validatorKey.PublicKey().Marshal(),
CommitteeIndex: 5,
CommitteeLength: uint64(len(committee)),
ValidatorIndex: validatorIndex,
},
}}
}})
beaconBlockRoot := bytesutil.ToBytes32([]byte("A"))
targetRoot := bytesutil.ToBytes32([]byte("B"))
sourceRoot := bytesutil.ToBytes32([]byte("C"))
@@ -480,14 +480,14 @@ func TestAttestToBlockHead_BlocksSurroundedAtt(t *testing.T) {
var pubKey [fieldparams.BLSPubkeyLength]byte
copy(pubKey[:], validatorKey.PublicKey().Marshal())
committee := []primitives.ValidatorIndex{0, 3, 4, 2, validatorIndex, 6, 8, 9, 10}
validator.duties = &ethpb.ValidatorDutiesContainer{CurrentEpochDuties: []*ethpb.ValidatorDuty{
validator.duties = newDutyStoreFromLegacy(&ethpb.ValidatorDutiesContainer{CurrentEpochDuties: []*ethpb.ValidatorDuty{
{
PublicKey: validatorKey.PublicKey().Marshal(),
CommitteeIndex: 5,
CommitteeLength: uint64(len(committee)),
ValidatorIndex: validatorIndex,
},
}}
}})
beaconBlockRoot := bytesutil.ToBytes32([]byte("A"))
targetRoot := bytesutil.ToBytes32([]byte("B"))
sourceRoot := bytesutil.ToBytes32([]byte("C"))
@@ -575,13 +575,13 @@ func TestAttestToBlockHead_DoesAttestAfterDelay(t *testing.T) {
committee := []primitives.ValidatorIndex{0, 3, 4, 2, validatorIndex, 6, 8, 9, 10}
var pubKey [fieldparams.BLSPubkeyLength]byte
copy(pubKey[:], validatorKey.PublicKey().Marshal())
validator.duties = &ethpb.ValidatorDutiesContainer{CurrentEpochDuties: []*ethpb.ValidatorDuty{
validator.duties = newDutyStoreFromLegacy(&ethpb.ValidatorDutiesContainer{CurrentEpochDuties: []*ethpb.ValidatorDuty{
{
PublicKey: validatorKey.PublicKey().Marshal(),
CommitteeIndex: 5,
CommitteeLength: uint64(len(committee)),
ValidatorIndex: validatorIndex,
}}}
}}})
m.validatorClient.EXPECT().AttestationData(
gomock.Any(), // ctx
@@ -618,13 +618,13 @@ func TestAttestToBlockHead_CorrectBitfieldLength(t *testing.T) {
committee := []primitives.ValidatorIndex{0, 3, 4, 2, validatorIndex, 6, 8, 9, 10}
var pubKey [fieldparams.BLSPubkeyLength]byte
copy(pubKey[:], validatorKey.PublicKey().Marshal())
validator.duties = &ethpb.ValidatorDutiesContainer{CurrentEpochDuties: []*ethpb.ValidatorDuty{
validator.duties = newDutyStoreFromLegacy(&ethpb.ValidatorDutiesContainer{CurrentEpochDuties: []*ethpb.ValidatorDuty{
{
PublicKey: validatorKey.PublicKey().Marshal(),
CommitteeIndex: 5,
CommitteeLength: uint64(len(committee)),
ValidatorIndex: validatorIndex,
}}}
}}})
m.validatorClient.EXPECT().AttestationData(
gomock.Any(), // ctx
gomock.AssignableToTypeOf(&ethpb.AttestationDataRequest{}),

View File

@@ -256,12 +256,12 @@ func (c *beaconApiValidatorClient) SubmitValidatorRegistrations(ctx context.Cont
})
}
func (c *beaconApiValidatorClient) SubscribeCommitteeSubnets(ctx context.Context, in *ethpb.CommitteeSubnetsSubscribeRequest, duties []*ethpb.ValidatorDuty) (*empty.Empty, error) {
func (c *beaconApiValidatorClient) SubscribeCommitteeSubnets(ctx context.Context, in *ethpb.CommitteeSubnetsSubscribeRequest, validatorIndices []primitives.ValidatorIndex, committeesAtSlot []uint64) (*empty.Empty, error) {
ctx, span := trace.StartSpan(ctx, "beacon-api.SubscribeCommitteeSubnets")
defer span.End()
return wrapInMetrics[*empty.Empty]("SubscribeCommitteeSubnets", func() (*empty.Empty, error) {
return new(empty.Empty), c.subscribeCommitteeSubnets(ctx, in, duties)
return new(empty.Empty), c.subscribeCommitteeSubnets(ctx, in, validatorIndices, committeesAtSlot)
})
}

View File

@@ -228,6 +228,132 @@ func (c *beaconApiValidatorClient) dutiesForEpoch(
return nil
}
func (c *beaconApiValidatorClient) AttesterDuties(ctx context.Context, epoch primitives.Epoch, validatorIndices []primitives.ValidatorIndex) (*ethpb.AttesterDutiesResponse, error) {
resp, err := c.dutiesProvider.AttesterDuties(ctx, epoch, validatorIndices)
if err != nil {
return nil, errors.Wrap(err, "failed to get attester duties")
}
dependentRoot, err := hexutil.Decode(resp.DependentRoot)
if err != nil {
return nil, errors.Wrap(err, "failed to decode dependent root")
}
duties := make([]*ethpb.AttesterDuty, len(resp.Data))
for i, d := range resp.Data {
pubkey, err := hexutil.Decode(d.Pubkey)
if err != nil {
return nil, errors.Wrapf(err, "failed to decode pubkey %s", d.Pubkey)
}
valIdx, err := strconv.ParseUint(d.ValidatorIndex, 10, 64)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse validator index %s", d.ValidatorIndex)
}
commIdx, err := strconv.ParseUint(d.CommitteeIndex, 10, 64)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse committee index %s", d.CommitteeIndex)
}
commLen, err := strconv.ParseUint(d.CommitteeLength, 10, 64)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse committee length %s", d.CommitteeLength)
}
commsAtSlot, err := strconv.ParseUint(d.CommitteesAtSlot, 10, 64)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse committees at slot %s", d.CommitteesAtSlot)
}
valCommIdx, err := strconv.ParseUint(d.ValidatorCommitteeIndex, 10, 64)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse validator committee index %s", d.ValidatorCommitteeIndex)
}
slot, err := strconv.ParseUint(d.Slot, 10, 64)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse slot %s", d.Slot)
}
duties[i] = &ethpb.AttesterDuty{
Pubkey: pubkey,
ValidatorIndex: primitives.ValidatorIndex(valIdx),
CommitteeIndex: primitives.CommitteeIndex(commIdx),
CommitteeLength: commLen,
CommitteesAtSlot: commsAtSlot,
ValidatorCommitteeIndex: valCommIdx,
Slot: primitives.Slot(slot),
}
}
return &ethpb.AttesterDutiesResponse{
DependentRoot: dependentRoot,
ExecutionOptimistic: resp.ExecutionOptimistic,
Duties: duties,
}, nil
}
func (c *beaconApiValidatorClient) ProposerDuties(ctx context.Context, epoch primitives.Epoch) (*ethpb.ProposerDutiesResponse, error) {
resp, err := c.dutiesProvider.ProposerDuties(ctx, epoch)
if err != nil {
return nil, errors.Wrap(err, "failed to get proposer duties")
}
dependentRoot, err := hexutil.Decode(resp.DependentRoot)
if err != nil {
return nil, errors.Wrap(err, "failed to decode dependent root")
}
duties := make([]*ethpb.ProposerDutyV2, len(resp.Data))
for i, d := range resp.Data {
pubkey, err := hexutil.Decode(d.Pubkey)
if err != nil {
return nil, errors.Wrapf(err, "failed to decode pubkey %s", d.Pubkey)
}
valIdx, err := strconv.ParseUint(d.ValidatorIndex, 10, 64)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse validator index %s", d.ValidatorIndex)
}
slot, err := strconv.ParseUint(d.Slot, 10, 64)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse slot %s", d.Slot)
}
duties[i] = &ethpb.ProposerDutyV2{
Pubkey: pubkey,
ValidatorIndex: primitives.ValidatorIndex(valIdx),
Slot: primitives.Slot(slot),
}
}
return &ethpb.ProposerDutiesResponse{
DependentRoot: dependentRoot,
ExecutionOptimistic: resp.ExecutionOptimistic,
Duties: duties,
}, nil
}
func (c *beaconApiValidatorClient) SyncCommitteeDuties(ctx context.Context, epoch primitives.Epoch, validatorIndices []primitives.ValidatorIndex) (*ethpb.SyncCommitteeDutiesResponse, error) {
syncDuties, err := c.dutiesProvider.SyncDuties(ctx, epoch, validatorIndices)
if err != nil {
return nil, errors.Wrap(err, "failed to get sync committee duties")
}
duties := make([]*ethpb.SyncCommitteeDuty, len(syncDuties))
for i, d := range syncDuties {
pubkey, err := hexutil.Decode(d.Pubkey)
if err != nil {
return nil, errors.Wrapf(err, "failed to decode pubkey %s", d.Pubkey)
}
valIdx, err := strconv.ParseUint(d.ValidatorIndex, 10, 64)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse validator index %s", d.ValidatorIndex)
}
indices := make([]uint64, len(d.ValidatorSyncCommitteeIndices))
for j, idx := range d.ValidatorSyncCommitteeIndices {
parsed, err := strconv.ParseUint(idx, 10, 64)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse sync committee index %s", idx)
}
indices[j] = parsed
}
duties[i] = &ethpb.SyncCommitteeDuty{
Pubkey: pubkey,
ValidatorIndex: primitives.ValidatorIndex(valIdx),
ValidatorSyncCommitteeIndices: indices,
}
}
return &ethpb.SyncCommitteeDutiesResponse{
Duties: duties,
}, nil
}
func (c *beaconApiValidatorClient) validatorsForDuties(ctx context.Context, pubkeys [][]byte) ([]validatorForDuty, error) {
vals := make([]validatorForDuty, 0, len(pubkeys))
stringPubkeysToPubkeys := make(map[string][]byte, len(pubkeys))

View File

@@ -7,27 +7,29 @@ import (
"strconv"
"github.com/OffchainLabs/prysm/v7/api/server/structs"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/pkg/errors"
)
func (c *beaconApiValidatorClient) subscribeCommitteeSubnets(ctx context.Context, in *ethpb.CommitteeSubnetsSubscribeRequest, duties []*ethpb.ValidatorDuty) error {
func (c *beaconApiValidatorClient) subscribeCommitteeSubnets(ctx context.Context, in *ethpb.CommitteeSubnetsSubscribeRequest, validatorIndices []primitives.ValidatorIndex, committeesAtSlot []uint64) error {
if in == nil {
return errors.New("committee subnets subscribe request is nil")
}
if len(in.CommitteeIds) != len(in.Slots) || len(in.CommitteeIds) != len(in.IsAggregator) || len(in.CommitteeIds) != len(duties) {
return errors.New("arrays `in.CommitteeIds`, `in.Slots`, `in.IsAggregator` and `duties` don't have the same length")
if len(in.CommitteeIds) != len(in.Slots) || len(in.CommitteeIds) != len(in.IsAggregator) ||
len(in.CommitteeIds) != len(validatorIndices) || len(in.CommitteeIds) != len(committeesAtSlot) {
return errors.New("arrays `in.CommitteeIds`, `in.Slots`, `in.IsAggregator`, `validatorIndices` and `committeesAtSlot` don't have the same length")
}
jsonCommitteeSubscriptions := make([]*structs.BeaconCommitteeSubscription, len(in.CommitteeIds))
for index := range in.CommitteeIds {
jsonCommitteeSubscriptions[index] = &structs.BeaconCommitteeSubscription{
CommitteeIndex: strconv.FormatUint(uint64(in.CommitteeIds[index]), 10),
CommitteesAtSlot: strconv.FormatUint(duties[index].CommitteesAtSlot, 10),
CommitteesAtSlot: strconv.FormatUint(committeesAtSlot[index], 10),
Slot: strconv.FormatUint(uint64(in.Slots[index]), 10),
IsAggregator: in.IsAggregator[index],
ValidatorIndex: strconv.FormatUint(uint64(duties[index].ValidatorIndex), 10),
ValidatorIndex: strconv.FormatUint(uint64(validatorIndices[index]), 10),
}
}

View File

@@ -55,16 +55,6 @@ func TestSubscribeCommitteeSubnets_Valid(t *testing.T) {
nil,
).Times(1)
duties := make([]*structs.AttesterDuty, len(subscribeSlots))
for index := range duties {
duties[index] = &structs.AttesterDuty{
ValidatorIndex: strconv.FormatUint(uint64(validatorIndices[index]), 10),
CommitteeIndex: strconv.FormatUint(uint64(committeeIndices[index]), 10),
CommitteesAtSlot: strconv.FormatUint(committeesAtSlot[index], 10),
Slot: strconv.FormatUint(uint64(subscribeSlots[index]), 10),
}
}
validatorClient := &beaconApiValidatorClient{
handler: handler,
}
@@ -75,31 +65,20 @@ func TestSubscribeCommitteeSubnets_Valid(t *testing.T) {
CommitteeIds: committeeIndices,
IsAggregator: isAggregator,
},
[]*ethpb.ValidatorDuty{
{
ValidatorIndex: validatorIndices[0],
CommitteesAtSlot: committeesAtSlot[0],
},
{
ValidatorIndex: validatorIndices[1],
CommitteesAtSlot: committeesAtSlot[1],
},
{
ValidatorIndex: validatorIndices[2],
CommitteesAtSlot: committeesAtSlot[2],
},
},
validatorIndices,
committeesAtSlot,
)
require.NoError(t, err)
}
func TestSubscribeCommitteeSubnets_Error(t *testing.T) {
const arraySizeMismatchErrorMessage = "arrays `in.CommitteeIds`, `in.Slots`, `in.IsAggregator` and `duties` don't have the same length"
const arraySizeMismatchErrorMessage = "arrays `in.CommitteeIds`, `in.Slots`, `in.IsAggregator`, `validatorIndices` and `committeesAtSlot` don't have the same length"
testCases := []struct {
name string
subscribeRequest *ethpb.CommitteeSubnetsSubscribeRequest
duties []*ethpb.ValidatorDuty
validatorIndices []primitives.ValidatorIndex
committeesAtSlot []uint64
expectSubscribeRestCall bool
expectedErrorMessage string
}{
@@ -115,16 +94,8 @@ func TestSubscribeCommitteeSubnets_Error(t *testing.T) {
Slots: []primitives.Slot{1, 2},
IsAggregator: []bool{false, true},
},
duties: []*ethpb.ValidatorDuty{
{
ValidatorIndex: 1,
CommitteesAtSlot: 1,
},
{
ValidatorIndex: 2,
CommitteesAtSlot: 2,
},
},
validatorIndices: []primitives.ValidatorIndex{1, 2},
committeesAtSlot: []uint64{1, 2},
expectedErrorMessage: arraySizeMismatchErrorMessage,
},
{
@@ -134,16 +105,8 @@ func TestSubscribeCommitteeSubnets_Error(t *testing.T) {
Slots: []primitives.Slot{1},
IsAggregator: []bool{false, true},
},
duties: []*ethpb.ValidatorDuty{
{
ValidatorIndex: 1,
CommitteesAtSlot: 1,
},
{
ValidatorIndex: 2,
CommitteesAtSlot: 2,
},
},
validatorIndices: []primitives.ValidatorIndex{1, 2},
committeesAtSlot: []uint64{1, 2},
expectedErrorMessage: arraySizeMismatchErrorMessage,
},
{
@@ -153,31 +116,19 @@ func TestSubscribeCommitteeSubnets_Error(t *testing.T) {
Slots: []primitives.Slot{1, 2},
IsAggregator: []bool{false},
},
duties: []*ethpb.ValidatorDuty{
{
ValidatorIndex: 1,
CommitteesAtSlot: 1,
},
{
ValidatorIndex: 2,
CommitteesAtSlot: 2,
},
},
validatorIndices: []primitives.ValidatorIndex{1, 2},
committeesAtSlot: []uint64{1, 2},
expectedErrorMessage: arraySizeMismatchErrorMessage,
},
{
name: "duties size mismatch",
name: "validatorIndices size mismatch",
subscribeRequest: &ethpb.CommitteeSubnetsSubscribeRequest{
CommitteeIds: []primitives.CommitteeIndex{1, 2},
Slots: []primitives.Slot{1, 2},
IsAggregator: []bool{false, true},
},
duties: []*ethpb.ValidatorDuty{
{
ValidatorIndex: 1,
CommitteesAtSlot: 1,
},
},
validatorIndices: []primitives.ValidatorIndex{1},
committeesAtSlot: []uint64{1, 2},
expectedErrorMessage: arraySizeMismatchErrorMessage,
},
{
@@ -187,12 +138,8 @@ func TestSubscribeCommitteeSubnets_Error(t *testing.T) {
CommitteeIds: []primitives.CommitteeIndex{2},
IsAggregator: []bool{false},
},
duties: []*ethpb.ValidatorDuty{
{
ValidatorIndex: 1,
CommitteesAtSlot: 1,
},
},
validatorIndices: []primitives.ValidatorIndex{1},
committeesAtSlot: []uint64{1},
expectSubscribeRestCall: true,
expectedErrorMessage: "foo error",
},
@@ -221,7 +168,7 @@ func TestSubscribeCommitteeSubnets_Error(t *testing.T) {
validatorClient := &beaconApiValidatorClient{
handler: handler,
}
err := validatorClient.subscribeCommitteeSubnets(ctx, testCase.subscribeRequest, testCase.duties)
err := validatorClient.subscribeCommitteeSubnets(ctx, testCase.subscribeRequest, testCase.validatorIndices, testCase.committeesAtSlot)
assert.ErrorContains(t, testCase.expectedErrorMessage, err)
})
}

505
validator/client/duties.go Normal file
View File

@@ -0,0 +1,505 @@
package client
import (
"bytes"
"context"
"fmt"
"sync"
"time"
"github.com/OffchainLabs/prysm/v7/api/server/structs"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/time/slots"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"google.golang.org/grpc/metadata"
)
// UpdateDuties checks the slot number to determine if the validator's
// list of upcoming assignments needs to be updated. For example, at the
// beginning of a new epoch.
// filterBlacklistedKeys returns validating keys with slashable keys removed.
func (v *validator) filterBlacklistedKeys(ctx context.Context) ([][fieldparams.BLSPubkeyLength]byte, error) {
validatingKeys, err := v.km.FetchValidatingPublicKeys(ctx)
if err != nil {
return nil, err
}
filtered := make([][fieldparams.BLSPubkeyLength]byte, 0, len(validatingKeys))
v.blacklistedPubkeysLock.RLock()
defer v.blacklistedPubkeysLock.RUnlock()
for _, pubKey := range validatingKeys {
if v.blacklistedPubkeys[pubKey] {
log.WithField(
"pubkey", fmt.Sprintf("%#x", bytesutil.Trunc(pubKey[:])),
).Warn("Not including slashable public key from slashing protection import " +
"in request to update validator duties")
continue
}
filtered = append(filtered, pubKey)
}
return filtered, nil
}
func (v *validator) UpdateDuties(ctx context.Context) error {
ctx, span := trace.StartSpan(ctx, "validator.UpdateDuties")
defer span.End()
filteredKeys, err := v.filterBlacklistedKeys(ctx)
if err != nil {
return err
}
epoch := slots.ToEpoch(slots.CurrentSlot(v.genesisTime) + 1)
if epoch >= params.BeaconConfig().GloasForkEpoch {
if err := v.updateDutiesSplit(ctx, epoch, filteredKeys); err != nil {
return err
}
} else {
if err := v.updateDutiesLegacy(ctx, epoch, filteredKeys); err != nil {
return err
}
}
v.dutiesLock.RLock()
initialized := v.duties != nil && v.duties.IsInitialized()
v.dutiesLock.RUnlock()
if !initialized {
return nil
}
ss, err := slots.EpochStart(epoch)
if err != nil {
return err
}
v.dutiesLock.Lock()
v.logDuties(ss)
v.dutiesLock.Unlock()
return v.onDutiesUpdated(ctx)
}
// clearDuties resets the duty store under lock, used on fetch errors.
func (v *validator) clearDuties() {
v.dutiesLock.Lock()
defer v.dutiesLock.Unlock()
if v.duties == nil {
v.duties = &dutyStore{}
}
v.duties.Reset()
}
// updateDutiesLegacy uses the combined Duties() endpoint for backward compat.
func (v *validator) updateDutiesLegacy(ctx context.Context, epoch primitives.Epoch, filteredKeys [][fieldparams.BLSPubkeyLength]byte) error {
req := &ethpb.DutiesRequest{
Epoch: epoch,
PublicKeys: bytesutil.FromBytes48Array(filteredKeys),
}
resp, err := v.validatorClient.Duties(ctx, req)
if err != nil || resp == nil {
v.clearDuties()
log.WithError(err).Error("Error getting validator duties")
return err
}
v.dutiesLock.Lock()
if v.duties == nil {
v.duties = &dutyStore{}
}
v.duties.SetLegacy(resp, v.pubkeyToStatus)
v.dutiesLock.Unlock()
return nil
}
// onDutiesUpdated checks for all-exited validators and starts subnet subscriptions.
func (v *validator) onDutiesUpdated(ctx context.Context) error {
v.dutiesLock.RLock()
exited, total := v.duties.AllCurrentExitedCount()
v.dutiesLock.RUnlock()
if exited != 0 && exited == total {
return ErrValidatorsAllExited
}
// Non-blocking call for beacon node to start subscriptions for aggregators.
md, exists := metadata.FromOutgoingContext(ctx)
ctx = context.Background()
if exists {
ctx = metadata.NewOutgoingContext(ctx, md)
}
go func() {
if err := v.subscribeToSubnets(ctx); err != nil {
log.WithError(err).Error("Failed to subscribe to subnets")
}
}()
return nil
}
// fetchAttesterDuties fetches attester duties, using the cache when the dependent root matches.
func (v *validator) fetchAttesterDuties(
ctx context.Context, epoch primitives.Epoch, indices []primitives.ValidatorIndex,
) (*attesterDutiesCacheEntry, error) {
// Check cache.
v.dutiesLock.RLock()
var cached *attesterDutiesCacheEntry
if v.duties != nil {
cached = v.duties.AttesterDutiesCache()
}
v.dutiesLock.RUnlock()
if cached != nil && cached.epoch == epoch {
probe, err := v.validatorClient.AttesterDuties(ctx, epoch, indices[:1])
if err == nil && bytes.Equal(probe.DependentRoot, cached.current.DependentRoot) {
return cached, nil
}
}
// Cache miss — fetch current and next in parallel.
var (
current, next *ethpb.AttesterDutiesResponse
currErr error
nextErr error
wg sync.WaitGroup
)
wg.Go(func() {
current, currErr = v.validatorClient.AttesterDuties(ctx, epoch, indices)
})
wg.Go(func() {
next, nextErr = v.validatorClient.AttesterDuties(ctx, epoch+1, indices)
})
wg.Wait()
if currErr != nil {
return nil, currErr
}
if nextErr != nil {
return nil, nextErr
}
return &attesterDutiesCacheEntry{current: current, next: next, epoch: epoch}, nil
}
// fetchSyncDuties fetches sync committee duties, using the cache when still in the same period.
// Returns nil, nil for pre-Altair epochs.
func (v *validator) fetchSyncDuties(
ctx context.Context, epoch primitives.Epoch, indices []primitives.ValidatorIndex,
) (*syncDutiesCacheEntry, error) {
if epoch < params.BeaconConfig().AltairForkEpoch {
return nil, nil
}
currentPeriod := uint64(epoch) / uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)
// Check cache.
v.dutiesLock.RLock()
var cached *syncDutiesCacheEntry
if v.duties != nil {
cached = v.duties.SyncDutiesCache()
}
v.dutiesLock.RUnlock()
if cached != nil && cached.period == currentPeriod {
return cached, nil
}
// Cache miss — fetch current and next in parallel.
var (
current, next *ethpb.SyncCommitteeDutiesResponse
currErr error
nextErr error
wg sync.WaitGroup
)
wg.Go(func() {
current, currErr = v.validatorClient.SyncCommitteeDuties(ctx, epoch, indices)
})
wg.Go(func() {
next, nextErr = v.validatorClient.SyncCommitteeDuties(ctx, epoch+1, indices)
if nextErr != nil {
log.WithError(nextErr).Debug("Could not get next epoch sync committee duties")
nextErr = nil // non-fatal
}
})
wg.Wait()
if currErr != nil {
return nil, currErr
}
return &syncDutiesCacheEntry{current: current, next: next, epoch: epoch, period: currentPeriod}, nil
}
// fetchProposerDuties fetches proposer duties, using the cache when the epoch matches.
// Post-Fulu, also fetches next-epoch duties (deterministic via proposer_lookahead).
func (v *validator) fetchProposerDuties(
ctx context.Context, epoch primitives.Epoch,
) (*proposerDutiesCacheEntry, error) {
// Check cache.
v.dutiesLock.RLock()
var cached *proposerDutiesCacheEntry
if v.duties != nil {
cached = v.duties.ProposerDutiesCache()
}
v.dutiesLock.RUnlock()
if cached != nil && cached.epoch == epoch {
return cached, nil
}
// Cache miss — fetch current (and next post-Fulu) in parallel.
var (
current, next *ethpb.ProposerDutiesResponse
currErr error
nextErr error
wg sync.WaitGroup
)
wg.Go(func() {
current, currErr = v.validatorClient.ProposerDuties(ctx, epoch)
})
if epoch >= params.BeaconConfig().FuluForkEpoch {
wg.Go(func() {
next, nextErr = v.validatorClient.ProposerDuties(ctx, epoch+1)
})
}
wg.Wait()
if currErr != nil {
return nil, currErr
}
entry := &proposerDutiesCacheEntry{current: current, epoch: epoch}
if nextErr != nil {
log.WithError(nextErr).Debug("Could not get next epoch proposer duties")
} else {
entry.next = next
}
return entry, nil
}
// updateDutiesSplit fetches duties from the split V3 endpoints with per-duty caching.
func (v *validator) updateDutiesSplit(ctx context.Context, epoch primitives.Epoch, filteredKeys [][fieldparams.BLSPubkeyLength]byte) error {
// Resolve pubkeys → indices via pubkeyToStatus (populated in WaitForActivation).
indices := make([]primitives.ValidatorIndex, 0, len(filteredKeys))
indexToPubkey := make(map[primitives.ValidatorIndex][fieldparams.BLSPubkeyLength]byte, len(filteredKeys))
for _, pk := range filteredKeys {
if st, ok := v.pubkeyToStatus[pk]; ok && st.status != nil && st.status.Status != ethpb.ValidatorStatus_UNKNOWN_STATUS {
indices = append(indices, st.index)
indexToPubkey[st.index] = pk
}
}
if len(indices) == 0 {
return nil
}
// Fetch all three duty types in parallel.
var (
propCache *proposerDutiesCacheEntry
attCache *attesterDutiesCacheEntry
syncCache *syncDutiesCacheEntry
propErr error
attErr error
syncErr error
wg sync.WaitGroup
)
wg.Go(func() { propCache, propErr = v.fetchProposerDuties(ctx, epoch) })
wg.Go(func() { attCache, attErr = v.fetchAttesterDuties(ctx, epoch, indices) })
wg.Go(func() { syncCache, syncErr = v.fetchSyncDuties(ctx, epoch, indices) })
wg.Wait()
// Proposer or attester failure is fatal.
if propErr != nil {
v.clearDuties()
log.WithError(propErr).Error("Error getting proposer duties")
return propErr
}
if attErr != nil {
v.clearDuties()
log.WithError(attErr).Error("Error getting attester duties")
return attErr
}
// Sync failure is non-fatal — reuse cached sync data.
if syncErr != nil {
log.WithError(syncErr).Warn("Error getting sync committee duties, reusing cached data")
v.dutiesLock.RLock()
if v.duties != nil {
syncCache = v.duties.SyncDutiesCache()
}
v.dutiesLock.RUnlock()
}
v.dutiesLock.Lock()
if v.duties == nil {
v.duties = &dutyStore{}
}
v.duties.SetSplit(attCache, propCache, syncCache, indexToPubkey, v.pubkeyToStatus)
v.dutiesLock.Unlock()
return nil
}
func (v *validator) logDuties(slot primitives.Slot) {
epochStartSlot, err := slots.EpochStart(slots.ToEpoch(slot))
if err != nil {
log.WithError(err).Error("Could not calculate epoch start. Ignoring logging duties.")
return
}
attesterKeys := make([][]string, params.BeaconConfig().SlotsPerEpoch)
for i := range attesterKeys {
attesterKeys[i] = make([]string, 0)
}
proposerKeys := make([]string, params.BeaconConfig().SlotsPerEpoch)
var totalProposingKeys, totalAttestingKeys uint64
for _, duty := range v.duties.CurrentEpochDuties() {
if v.emitAccountMetrics {
v.emitCurrentDutyMetrics(duty)
}
if duty.Status != ethpb.ValidatorStatus_ACTIVE && duty.Status != ethpb.ValidatorStatus_EXITING {
continue
}
truncatedPubkey := fmt.Sprintf("%#x", bytesutil.Trunc(duty.Pubkey[:]))
attesterSlotInEpoch := duty.Slot - epochStartSlot
if attesterSlotInEpoch >= params.BeaconConfig().SlotsPerEpoch {
log.WithField("duty", duty).Warn("Invalid attester slot")
} else {
attesterKeys[attesterSlotInEpoch] = append(attesterKeys[attesterSlotInEpoch], truncatedPubkey)
totalAttestingKeys++
}
for _, proposerSlot := range duty.ProposerSlots {
proposerSlotInEpoch := proposerSlot - epochStartSlot
if proposerSlotInEpoch >= params.BeaconConfig().SlotsPerEpoch {
log.WithField("duty", duty).Warn("Invalid proposer slot")
} else {
proposerKeys[proposerSlotInEpoch] = truncatedPubkey
totalProposingKeys++
}
}
}
if v.emitAccountMetrics {
for _, duty := range v.duties.NextEpochDuties() {
if duty.Status == ethpb.ValidatorStatus_ACTIVE || duty.Status == ethpb.ValidatorStatus_EXITING {
v.emitNextDutyMetrics(duty)
}
}
}
log.WithFields(logrus.Fields{
"proposerCount": totalProposingKeys,
"attesterCount": totalAttestingKeys,
}).Infof("Schedule for epoch %d", slots.ToEpoch(slot))
v.logSlotSchedule(epochStartSlot, attesterKeys, proposerKeys)
}
func (v *validator) emitCurrentDutyMetrics(duty *attesterDutyView) {
pubkey := fmt.Sprintf("%#x", duty.Pubkey)
ValidatorStatusesGaugeVec.WithLabelValues(pubkey, fmt.Sprintf("%#x", duty.ValidatorIndex)).Set(float64(duty.Status))
if duty.Status != ethpb.ValidatorStatus_ACTIVE && duty.Status != ethpb.ValidatorStatus_EXITING {
return
}
ValidatorNextAttestationSlotGaugeVec.WithLabelValues(pubkey).Set(float64(duty.Slot))
if duty.IsSyncCommittee {
ValidatorInSyncCommitteeGaugeVec.WithLabelValues(pubkey).Set(float64(1))
} else {
ValidatorInSyncCommitteeGaugeVec.WithLabelValues(pubkey).Set(float64(0))
}
for _, proposerSlot := range duty.ProposerSlots {
ValidatorNextProposalSlotGaugeVec.WithLabelValues(pubkey).Set(float64(proposerSlot))
}
}
func (v *validator) emitNextDutyMetrics(duty *attesterDutyView) {
pubkey := fmt.Sprintf("%#x", duty.Pubkey)
if duty.IsSyncCommittee {
ValidatorInNextSyncCommitteeGaugeVec.WithLabelValues(pubkey).Set(float64(1))
} else {
ValidatorInNextSyncCommitteeGaugeVec.WithLabelValues(pubkey).Set(float64(0))
}
}
func (v *validator) logSlotSchedule(epochStartSlot primitives.Slot, attesterKeys [][]string, proposerKeys []string) {
for i := primitives.Slot(0); i < params.BeaconConfig().SlotsPerEpoch; i++ {
isProposer := proposerKeys[i] != ""
isAttester := len(attesterKeys[i]) > 0
if !isProposer && !isAttester {
continue
}
startTime, err := slots.StartTime(v.genesisTime, epochStartSlot+i)
if err != nil {
log.WithError(err).WithField("slot", epochStartSlot+i).Error("Slot overflows, unable to log duties!")
return
}
slotLog := log.WithFields(logrus.Fields{})
if isProposer {
slotLog = slotLog.WithField("proposerPubkey", proposerKeys[i])
}
if isAttester {
slotLog = slotLog.WithFields(logrus.Fields{
"slot": epochStartSlot + i,
"slotInEpoch": (epochStartSlot + i) % params.BeaconConfig().SlotsPerEpoch,
"attesterCount": len(attesterKeys[i]),
"attesterPubkeys": attesterKeys[i],
})
}
durationTillDuty := (time.Until(startTime) + time.Second).Truncate(time.Second)
if durationTillDuty > 0 {
slotLog = slotLog.WithField("timeUntilDuty", durationTillDuty)
}
slotLog.Infof("Duties schedule")
}
}
// dependentRootChangeReason checks whether the stored dependent roots differ
// from the head event roots. Returns "previous", "current", or "" (no change).
func (v *validator) dependentRootChangeReason(prevRoot, currRoot []byte) string {
v.dutiesLock.RLock()
defer v.dutiesLock.RUnlock()
if v.duties == nil || !v.duties.IsInitialized() {
return "previous"
}
storedPrev, storedCurr := v.duties.DependentRoots()
if !bytes.Equal(prevRoot, storedPrev) {
return "previous"
}
if bytes.Equal(currRoot, params.BeaconConfig().ZeroHash[:]) {
return ""
}
if !bytes.Equal(currRoot, storedCurr) {
return "current"
}
return ""
}
func (v *validator) checkDependentRoots(ctx context.Context, head *structs.HeadEvent) error {
if head == nil {
return errors.New("received empty head event")
}
prevDependentRoot, err := bytesutil.DecodeHexWithLength(head.PreviousDutyDependentRoot, fieldparams.RootLength)
if err != nil {
return errors.Wrap(err, "failed to decode previous duty dependent root")
}
if bytes.Equal(prevDependentRoot, params.BeaconConfig().ZeroHash[:]) {
return nil
}
currDependentRoot, err := bytesutil.DecodeHexWithLength(head.CurrentDutyDependentRoot, fieldparams.RootLength)
if err != nil {
return errors.Wrap(err, "failed to decode current duty dependent root")
}
reason := v.dependentRootChangeReason(prevDependentRoot, currDependentRoot)
if reason == "" {
return nil
}
epoch := slots.ToEpoch(slots.CurrentSlot(v.genesisTime) + 1)
ss, err := slots.EpochStart(epoch + 1)
if err != nil {
return errors.Wrap(err, "failed to get epoch start")
}
dutiesCtx, cancel := context.WithDeadline(ctx, v.SlotDeadline(ss-1))
defer cancel()
if err := v.UpdateDuties(dutiesCtx); err != nil {
return errors.Wrap(err, "failed to update duties")
}
log.Infof("Updated duties due to %s dependent root change", reason)
return nil
}

View File

@@ -0,0 +1,710 @@
package client
import (
"errors"
"testing"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/assert"
"github.com/OffchainLabs/prysm/v7/testing/require"
validatormock "github.com/OffchainLabs/prysm/v7/testing/validator-mock"
logTest "github.com/sirupsen/logrus/hooks/test"
"go.uber.org/mock/gomock"
)
func TestFetchAttesterDuties_CacheHit(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
client := validatormock.NewMockValidatorClient(ctrl)
depRoot := []byte("dependent-root-xxxxxxxxxxxxxxxx")
indices := []primitives.ValidatorIndex{1, 2, 3}
epoch := primitives.Epoch(10)
cachedCurrent := &ethpb.AttesterDutiesResponse{DependentRoot: depRoot, Duties: []*ethpb.AttesterDuty{{ValidatorIndex: 1}}}
cachedNext := &ethpb.AttesterDutiesResponse{Duties: []*ethpb.AttesterDuty{{ValidatorIndex: 1}}}
v := &validator{
validatorClient: client,
duties: &dutyStore{
attester: &attesterDutiesCacheEntry{
current: cachedCurrent,
next: cachedNext,
epoch: epoch,
},
initialized: true,
},
}
// Probe returns matching dependent root → cache hit.
client.EXPECT().AttesterDuties(gomock.Any(), epoch, indices[:1]).Return(
&ethpb.AttesterDutiesResponse{DependentRoot: depRoot}, nil,
)
result, err := v.fetchAttesterDuties(t.Context(), epoch, indices)
require.NoError(t, err)
assert.Equal(t, cachedCurrent, result.current)
assert.Equal(t, cachedNext, result.next)
assert.Equal(t, epoch, result.epoch)
}
func TestFetchAttesterDuties_CacheMiss(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
client := validatormock.NewMockValidatorClient(ctrl)
oldRoot := []byte("old-root-xxxxxxxxxxxxxxxxxxxxxxx")
newRoot := []byte("new-root-xxxxxxxxxxxxxxxxxxxxxxx")
indices := []primitives.ValidatorIndex{1, 2}
epoch := primitives.Epoch(10)
v := &validator{
validatorClient: client,
duties: &dutyStore{
attester: &attesterDutiesCacheEntry{
current: &ethpb.AttesterDutiesResponse{DependentRoot: oldRoot},
epoch: epoch,
},
initialized: true,
},
}
// Probe returns different dependent root → cache miss.
client.EXPECT().AttesterDuties(gomock.Any(), epoch, indices[:1]).Return(
&ethpb.AttesterDutiesResponse{DependentRoot: newRoot}, nil,
)
currentResp := &ethpb.AttesterDutiesResponse{DependentRoot: newRoot, Duties: []*ethpb.AttesterDuty{{ValidatorIndex: 1}}}
nextResp := &ethpb.AttesterDutiesResponse{Duties: []*ethpb.AttesterDuty{{ValidatorIndex: 2}}}
// Full fetch: current + next epoch.
client.EXPECT().AttesterDuties(gomock.Any(), epoch, indices).Return(currentResp, nil)
client.EXPECT().AttesterDuties(gomock.Any(), epoch+1, indices).Return(nextResp, nil)
result, err := v.fetchAttesterDuties(t.Context(), epoch, indices)
require.NoError(t, err)
assert.Equal(t, currentResp, result.current)
assert.Equal(t, nextResp, result.next)
assert.Equal(t, epoch, result.epoch)
}
func TestFetchSyncDuties_CacheHit(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
client := validatormock.NewMockValidatorClient(ctrl)
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.AltairForkEpoch = 0
cfg.EpochsPerSyncCommitteePeriod = 256
params.OverrideBeaconConfig(cfg)
epoch := primitives.Epoch(10)
currentPeriod := uint64(epoch) / uint64(cfg.EpochsPerSyncCommitteePeriod)
indices := []primitives.ValidatorIndex{1}
cachedEntry := &syncDutiesCacheEntry{
current: &ethpb.SyncCommitteeDutiesResponse{Duties: []*ethpb.SyncCommitteeDuty{{ValidatorIndex: 1}}},
next: &ethpb.SyncCommitteeDutiesResponse{},
epoch: epoch,
period: currentPeriod,
}
v := &validator{
validatorClient: client,
duties: &dutyStore{
sync: cachedEntry,
initialized: true,
},
}
// No RPC calls expected — cache hit.
result, err := v.fetchSyncDuties(t.Context(), epoch, indices)
require.NoError(t, err)
assert.Equal(t, cachedEntry, result)
}
func TestFetchSyncDuties_PreAltair(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
client := validatormock.NewMockValidatorClient(ctrl)
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.AltairForkEpoch = 100
params.OverrideBeaconConfig(cfg)
v := &validator{
validatorClient: client,
duties: &dutyStore{initialized: true},
}
// No RPC calls expected — pre-Altair returns nil, nil.
result, err := v.fetchSyncDuties(t.Context(), 50, []primitives.ValidatorIndex{1})
require.NoError(t, err)
assert.Equal(t, (*syncDutiesCacheEntry)(nil), result)
}
func TestUpdateDutiesSplit_SyncFailureNonFatal(t *testing.T) {
hook := logTest.NewGlobal()
ctrl := gomock.NewController(t)
defer ctrl.Finish()
client := validatormock.NewMockValidatorClient(ctrl)
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.AltairForkEpoch = 0
cfg.EpochsPerSyncCommitteePeriod = 256
params.OverrideBeaconConfig(cfg)
kp := randKeypair(t)
epoch := primitives.Epoch(10)
idx := primitives.ValidatorIndex(42)
v := &validator{
validatorClient: client,
pubkeyToStatus: map[[fieldparams.BLSPubkeyLength]byte]*validatorStatus{
kp.pub: {index: idx, status: &ethpb.ValidatorStatusResponse{Status: ethpb.ValidatorStatus_ACTIVE}},
},
duties: &dutyStore{},
}
depRoot := make([]byte, 32)
// Proposer succeeds.
client.EXPECT().ProposerDuties(gomock.Any(), epoch).Return(
&ethpb.ProposerDutiesResponse{DependentRoot: depRoot}, nil,
)
// Attester: no cache → straight to parallel fetch (current + next).
client.EXPECT().AttesterDuties(gomock.Any(), epoch, gomock.Any()).Return(
&ethpb.AttesterDutiesResponse{
DependentRoot: depRoot,
Duties: []*ethpb.AttesterDuty{{ValidatorIndex: idx, Slot: 320}},
}, nil,
)
client.EXPECT().AttesterDuties(gomock.Any(), epoch+1, gomock.Any()).Return(
&ethpb.AttesterDutiesResponse{Duties: []*ethpb.AttesterDuty{{ValidatorIndex: idx, Slot: 352}}}, nil,
)
// Sync committee: current fails.
client.EXPECT().SyncCommitteeDuties(gomock.Any(), epoch, gomock.Any()).Return(
nil, errors.New("sync rpc failed"),
)
// Next epoch sync may or may not be called (parallel), allow it.
client.EXPECT().SyncCommitteeDuties(gomock.Any(), epoch+1, gomock.Any()).Return(
&ethpb.SyncCommitteeDutiesResponse{}, nil,
).AnyTimes()
filteredKeys := [][fieldparams.BLSPubkeyLength]byte{kp.pub}
err := v.updateDutiesSplit(t.Context(), epoch, filteredKeys)
// Should succeed despite sync failure.
require.NoError(t, err)
assert.Equal(t, true, v.duties.IsInitialized())
assert.LogsContain(t, hook, "Error getting sync committee duties, reusing cached data")
}
func TestUpdateDutiesSplit_AttesterFailureFatal(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
client := validatormock.NewMockValidatorClient(ctrl)
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.AltairForkEpoch = 0
cfg.EpochsPerSyncCommitteePeriod = 256
params.OverrideBeaconConfig(cfg)
kp := randKeypair(t)
epoch := primitives.Epoch(10)
idx := primitives.ValidatorIndex(42)
v := &validator{
validatorClient: client,
pubkeyToStatus: map[[fieldparams.BLSPubkeyLength]byte]*validatorStatus{
kp.pub: {index: idx, status: &ethpb.ValidatorStatusResponse{Status: ethpb.ValidatorStatus_ACTIVE}},
},
duties: &dutyStore{initialized: true},
}
// Proposer succeeds.
client.EXPECT().ProposerDuties(gomock.Any(), epoch).Return(
&ethpb.ProposerDutiesResponse{}, nil,
)
// Attester current epoch fails.
client.EXPECT().AttesterDuties(gomock.Any(), gomock.Any(), gomock.Any()).Return(
nil, errors.New("attester rpc failed"),
).AnyTimes()
// Sync may or may not be called (parallel).
client.EXPECT().SyncCommitteeDuties(gomock.Any(), gomock.Any(), gomock.Any()).Return(
&ethpb.SyncCommitteeDutiesResponse{}, nil,
).AnyTimes()
filteredKeys := [][fieldparams.BLSPubkeyLength]byte{kp.pub}
err := v.updateDutiesSplit(t.Context(), epoch, filteredKeys)
require.ErrorContains(t, "attester rpc failed", err)
// Duties should be cleared.
assert.Equal(t, false, v.duties.IsInitialized())
}
func TestFetchProposerDuties_CacheHit(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
client := validatormock.NewMockValidatorClient(ctrl)
epoch := primitives.Epoch(10)
cached := &proposerDutiesCacheEntry{
current: &ethpb.ProposerDutiesResponse{Duties: []*ethpb.ProposerDutyV2{{ValidatorIndex: 1, Slot: 320}}},
epoch: epoch,
}
v := &validator{
validatorClient: client,
duties: &dutyStore{
proposer: cached,
initialized: true,
},
}
// No RPC calls expected — cache hit.
result, err := v.fetchProposerDuties(t.Context(), epoch)
require.NoError(t, err)
assert.Equal(t, cached, result)
}
func TestFetchProposerDuties_CacheMiss(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
client := validatormock.NewMockValidatorClient(ctrl)
epoch := primitives.Epoch(10)
v := &validator{
validatorClient: client,
duties: &dutyStore{
proposer: &proposerDutiesCacheEntry{
current: &ethpb.ProposerDutiesResponse{},
epoch: epoch - 1, // different epoch
},
initialized: true,
},
}
resp := &ethpb.ProposerDutiesResponse{
DependentRoot: make([]byte, 32),
Duties: []*ethpb.ProposerDutyV2{{ValidatorIndex: 1, Slot: 320}},
}
client.EXPECT().ProposerDuties(gomock.Any(), epoch).Return(resp, nil)
result, err := v.fetchProposerDuties(t.Context(), epoch)
require.NoError(t, err)
assert.Equal(t, resp, result.current)
assert.Equal(t, (*ethpb.ProposerDutiesResponse)(nil), result.next)
assert.Equal(t, epoch, result.epoch)
}
func TestFetchProposerDuties_PostFulu(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
client := validatormock.NewMockValidatorClient(ctrl)
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.FuluForkEpoch = 5
params.OverrideBeaconConfig(cfg)
epoch := primitives.Epoch(10)
v := &validator{
validatorClient: client,
duties: &dutyStore{},
}
currentResp := &ethpb.ProposerDutiesResponse{
DependentRoot: make([]byte, 32),
Duties: []*ethpb.ProposerDutyV2{{ValidatorIndex: 1, Slot: 320}},
}
nextResp := &ethpb.ProposerDutiesResponse{
DependentRoot: make([]byte, 32),
Duties: []*ethpb.ProposerDutyV2{{ValidatorIndex: 2, Slot: 352}},
}
client.EXPECT().ProposerDuties(gomock.Any(), epoch).Return(currentResp, nil)
client.EXPECT().ProposerDuties(gomock.Any(), epoch+1).Return(nextResp, nil)
result, err := v.fetchProposerDuties(t.Context(), epoch)
require.NoError(t, err)
assert.Equal(t, currentResp, result.current)
assert.Equal(t, nextResp, result.next)
assert.Equal(t, epoch, result.epoch)
}
func TestFetchProposerDuties_PreFulu(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
client := validatormock.NewMockValidatorClient(ctrl)
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.FuluForkEpoch = 100
params.OverrideBeaconConfig(cfg)
epoch := primitives.Epoch(10)
v := &validator{
validatorClient: client,
duties: &dutyStore{},
}
resp := &ethpb.ProposerDutiesResponse{
DependentRoot: make([]byte, 32),
Duties: []*ethpb.ProposerDutyV2{{ValidatorIndex: 1, Slot: 320}},
}
// Only current epoch fetched, no next.
client.EXPECT().ProposerDuties(gomock.Any(), epoch).Return(resp, nil)
result, err := v.fetchProposerDuties(t.Context(), epoch)
require.NoError(t, err)
assert.Equal(t, resp, result.current)
assert.Equal(t, (*ethpb.ProposerDutiesResponse)(nil), result.next)
assert.Equal(t, epoch, result.epoch)
}
func TestFetchProposerDuties_PostFulu_NextEpochFailureNonFatal(t *testing.T) {
hook := logTest.NewGlobal()
ctrl := gomock.NewController(t)
defer ctrl.Finish()
client := validatormock.NewMockValidatorClient(ctrl)
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.FuluForkEpoch = 5
params.OverrideBeaconConfig(cfg)
epoch := primitives.Epoch(10)
v := &validator{
validatorClient: client,
duties: &dutyStore{},
}
currentResp := &ethpb.ProposerDutiesResponse{
DependentRoot: make([]byte, 32),
Duties: []*ethpb.ProposerDutyV2{{ValidatorIndex: 1, Slot: 320}},
}
client.EXPECT().ProposerDuties(gomock.Any(), epoch).Return(currentResp, nil)
client.EXPECT().ProposerDuties(gomock.Any(), epoch+1).Return(nil, errors.New("next epoch failed"))
result, err := v.fetchProposerDuties(t.Context(), epoch)
require.NoError(t, err)
assert.Equal(t, currentResp, result.current)
assert.Equal(t, (*ethpb.ProposerDutiesResponse)(nil), result.next)
assert.Equal(t, epoch, result.epoch)
assert.LogsContain(t, hook, "Could not get next epoch proposer duties")
}
func TestFetchSyncDuties_CacheMiss(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
client := validatormock.NewMockValidatorClient(ctrl)
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.AltairForkEpoch = 0
cfg.EpochsPerSyncCommitteePeriod = 256
params.OverrideBeaconConfig(cfg)
epoch := primitives.Epoch(10)
indices := []primitives.ValidatorIndex{1, 2}
v := &validator{
validatorClient: client,
duties: &dutyStore{},
}
currentResp := &ethpb.SyncCommitteeDutiesResponse{Duties: []*ethpb.SyncCommitteeDuty{{ValidatorIndex: 1}}}
nextResp := &ethpb.SyncCommitteeDutiesResponse{Duties: []*ethpb.SyncCommitteeDuty{{ValidatorIndex: 2}}}
client.EXPECT().SyncCommitteeDuties(gomock.Any(), epoch, indices).Return(currentResp, nil)
client.EXPECT().SyncCommitteeDuties(gomock.Any(), epoch+1, indices).Return(nextResp, nil)
result, err := v.fetchSyncDuties(t.Context(), epoch, indices)
require.NoError(t, err)
assert.Equal(t, currentResp, result.current)
assert.Equal(t, nextResp, result.next)
assert.Equal(t, epoch, result.epoch)
assert.Equal(t, uint64(0), result.period)
}
func TestFetchSyncDuties_NextEpochFailureNonFatal(t *testing.T) {
hook := logTest.NewGlobal()
ctrl := gomock.NewController(t)
defer ctrl.Finish()
client := validatormock.NewMockValidatorClient(ctrl)
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.AltairForkEpoch = 0
cfg.EpochsPerSyncCommitteePeriod = 256
params.OverrideBeaconConfig(cfg)
epoch := primitives.Epoch(10)
indices := []primitives.ValidatorIndex{1}
v := &validator{
validatorClient: client,
duties: &dutyStore{},
}
currentResp := &ethpb.SyncCommitteeDutiesResponse{Duties: []*ethpb.SyncCommitteeDuty{{ValidatorIndex: 1}}}
client.EXPECT().SyncCommitteeDuties(gomock.Any(), epoch, indices).Return(currentResp, nil)
client.EXPECT().SyncCommitteeDuties(gomock.Any(), epoch+1, indices).Return(nil, errors.New("next sync failed"))
result, err := v.fetchSyncDuties(t.Context(), epoch, indices)
require.NoError(t, err)
assert.Equal(t, currentResp, result.current)
assert.Equal(t, (*ethpb.SyncCommitteeDutiesResponse)(nil), result.next)
assert.LogsContain(t, hook, "Could not get next epoch sync committee duties")
}
func TestUpdateDutiesSplit_ProposerFailureFatal(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
client := validatormock.NewMockValidatorClient(ctrl)
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.AltairForkEpoch = 0
cfg.EpochsPerSyncCommitteePeriod = 256
params.OverrideBeaconConfig(cfg)
kp := randKeypair(t)
epoch := primitives.Epoch(10)
idx := primitives.ValidatorIndex(42)
v := &validator{
validatorClient: client,
pubkeyToStatus: map[[fieldparams.BLSPubkeyLength]byte]*validatorStatus{
kp.pub: {index: idx, status: &ethpb.ValidatorStatusResponse{Status: ethpb.ValidatorStatus_ACTIVE}},
},
duties: &dutyStore{initialized: true},
}
// Proposer fails.
client.EXPECT().ProposerDuties(gomock.Any(), epoch).Return(nil, errors.New("proposer rpc failed"))
// Attester may or may not be called (parallel).
client.EXPECT().AttesterDuties(gomock.Any(), gomock.Any(), gomock.Any()).Return(
&ethpb.AttesterDutiesResponse{DependentRoot: make([]byte, 32)}, nil,
).AnyTimes()
// Sync may or may not be called (parallel).
client.EXPECT().SyncCommitteeDuties(gomock.Any(), gomock.Any(), gomock.Any()).Return(
&ethpb.SyncCommitteeDutiesResponse{}, nil,
).AnyTimes()
err := v.updateDutiesSplit(t.Context(), epoch, [][fieldparams.BLSPubkeyLength]byte{kp.pub})
require.ErrorContains(t, "proposer rpc failed", err)
assert.Equal(t, false, v.duties.IsInitialized())
}
func TestDependentRootChangeReason_NoChange(t *testing.T) {
prevRoot := []byte("prev-root-xxxxxxxxxxxxxxxxxxxxxxx")
currRoot := []byte("curr-root-xxxxxxxxxxxxxxxxxxxxxxx")
v := &validator{
duties: newDutyStoreFromLegacy(&ethpb.ValidatorDutiesContainer{
PrevDependentRoot: prevRoot,
CurrDependentRoot: currRoot,
}),
}
assert.Equal(t, "", v.dependentRootChangeReason(prevRoot, currRoot))
}
func TestDependentRootChangeReason_PreviousChanged(t *testing.T) {
prevRoot := []byte("prev-root-xxxxxxxxxxxxxxxxxxxxxxx")
currRoot := []byte("curr-root-xxxxxxxxxxxxxxxxxxxxxxx")
newPrev := []byte("new-prev-xxxxxxxxxxxxxxxxxxxxxxxx")
v := &validator{
duties: newDutyStoreFromLegacy(&ethpb.ValidatorDutiesContainer{
PrevDependentRoot: prevRoot,
CurrDependentRoot: currRoot,
}),
}
assert.Equal(t, "previous", v.dependentRootChangeReason(newPrev, currRoot))
}
func TestDependentRootChangeReason_CurrentChanged(t *testing.T) {
prevRoot := []byte("prev-root-xxxxxxxxxxxxxxxxxxxxxxx")
currRoot := []byte("curr-root-xxxxxxxxxxxxxxxxxxxxxxx")
newCurr := []byte("new-curr-xxxxxxxxxxxxxxxxxxxxxxxx")
v := &validator{
duties: newDutyStoreFromLegacy(&ethpb.ValidatorDutiesContainer{
PrevDependentRoot: prevRoot,
CurrDependentRoot: currRoot,
}),
}
assert.Equal(t, "current", v.dependentRootChangeReason(prevRoot, newCurr))
}
func TestDependentRootChangeReason_Uninitialized(t *testing.T) {
v := &validator{duties: &dutyStore{}}
assert.Equal(t, "previous", v.dependentRootChangeReason([]byte("a"), []byte("b")))
}
func TestDependentRootChangeReason_ZeroCurrentRoot(t *testing.T) {
prevRoot := []byte("prev-root-xxxxxxxxxxxxxxxxxxxxxxx")
currRoot := []byte("curr-root-xxxxxxxxxxxxxxxxxxxxxxx")
v := &validator{
duties: newDutyStoreFromLegacy(&ethpb.ValidatorDutiesContainer{
PrevDependentRoot: prevRoot,
CurrDependentRoot: currRoot,
}),
}
// Zero hash current root should return "" (no change).
assert.Equal(t, "", v.dependentRootChangeReason(prevRoot, params.BeaconConfig().ZeroHash[:]))
}
func TestUpdateDutiesSplit_NoIndices(t *testing.T) {
v := &validator{
pubkeyToStatus: map[[fieldparams.BLSPubkeyLength]byte]*validatorStatus{},
duties: &dutyStore{},
}
// No matching indices → returns nil immediately without RPCs.
kp := randKeypair(t)
err := v.updateDutiesSplit(t.Context(), 10, [][fieldparams.BLSPubkeyLength]byte{kp.pub})
require.NoError(t, err)
}
func TestUpdateDutiesLegacy_OK(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
client := validatormock.NewMockValidatorClient(ctrl)
kp := randKeypair(t)
epoch := primitives.Epoch(5)
v := &validator{
validatorClient: client,
duties: &dutyStore{},
}
resp := &ethpb.ValidatorDutiesContainer{
PrevDependentRoot: make([]byte, 32),
CurrDependentRoot: make([]byte, 32),
CurrentEpochDuties: []*ethpb.ValidatorDuty{
{
PublicKey: kp.pub[:],
ValidatorIndex: 42,
AttesterSlot: 160,
CommitteeIndex: 1,
},
},
}
client.EXPECT().Duties(gomock.Any(), gomock.Any()).Return(resp, nil)
err := v.updateDutiesLegacy(t.Context(), epoch, [][fieldparams.BLSPubkeyLength]byte{kp.pub})
require.NoError(t, err)
assert.Equal(t, true, v.duties.IsInitialized())
assert.Equal(t, 1, len(v.duties.CurrentEpochDuties()))
assert.Equal(t, primitives.ValidatorIndex(42), v.duties.CurrentEpochDuties()[0].ValidatorIndex)
}
func TestUpdateDutiesLegacy_Error(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
client := validatormock.NewMockValidatorClient(ctrl)
v := &validator{
validatorClient: client,
duties: &dutyStore{initialized: true},
}
client.EXPECT().Duties(gomock.Any(), gomock.Any()).Return(nil, errors.New("rpc failed"))
err := v.updateDutiesLegacy(t.Context(), 5, [][fieldparams.BLSPubkeyLength]byte{})
require.ErrorContains(t, "rpc failed", err)
assert.Equal(t, false, v.duties.IsInitialized())
}
func TestUpdateDutiesLegacy_NilResponse(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
client := validatormock.NewMockValidatorClient(ctrl)
v := &validator{
validatorClient: client,
duties: &dutyStore{initialized: true},
}
client.EXPECT().Duties(gomock.Any(), gomock.Any()).Return(nil, nil)
err := v.updateDutiesLegacy(t.Context(), 5, [][fieldparams.BLSPubkeyLength]byte{})
require.NoError(t, err)
assert.Equal(t, false, v.duties.IsInitialized())
}
func TestOnDutiesUpdated_AllExited(t *testing.T) {
kp := randKeypair(t)
v := &validator{
duties: newDutyStoreFromLegacy(&ethpb.ValidatorDutiesContainer{
CurrentEpochDuties: []*ethpb.ValidatorDuty{
{PublicKey: kp.pub[:], Status: ethpb.ValidatorStatus_EXITED},
},
}),
}
err := v.onDutiesUpdated(t.Context())
require.ErrorIs(t, err, ErrValidatorsAllExited)
}
func TestOnDutiesUpdated_NotAllExited(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
client := validatormock.NewMockValidatorClient(ctrl)
kp := randKeypair(t)
// Use PENDING status so subscribeToSubnets skips the isAggregator call.
v := &validator{
validatorClient: client,
duties: newDutyStoreFromLegacy(&ethpb.ValidatorDutiesContainer{
CurrentEpochDuties: []*ethpb.ValidatorDuty{
{PublicKey: kp.pub[:], Status: ethpb.ValidatorStatus_PENDING},
},
}),
}
client.EXPECT().SubscribeCommitteeSubnets(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil).AnyTimes()
err := v.onDutiesUpdated(t.Context())
require.NoError(t, err)
}
func TestClearDuties(t *testing.T) {
v := &validator{
duties: newDutyStoreFromLegacy(&ethpb.ValidatorDutiesContainer{
CurrentEpochDuties: []*ethpb.ValidatorDuty{{ValidatorIndex: 1}},
}),
}
assert.Equal(t, true, v.duties.IsInitialized())
v.clearDuties()
assert.Equal(t, false, v.duties.IsInitialized())
}
func TestClearDuties_NilStore(t *testing.T) {
v := &validator{}
v.clearDuties()
assert.NotNil(t, v.duties)
assert.Equal(t, false, v.duties.IsInitialized())
}

View File

@@ -0,0 +1,412 @@
package client
import (
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
)
// dutyStore stores validator duty data as pre-computed attesterDutyView slices.
// Both the legacy combined endpoint and the split per-duty endpoints eagerly
// convert into the same internal representation, so accessor methods have a
// single code path with no branching on which endpoint populated the store.
type dutyStore struct {
// Pre-computed duty views.
currentDuties []*attesterDutyView
nextDuties []*attesterDutyView
// Dependent roots.
prevDependentRoot []byte
currDependentRoot []byte
// Pubkey -> current duty lookup (for O(1) CurrentAttesterDuty).
pubkeyToDuty map[[fieldparams.BLSPubkeyLength]byte]*attesterDutyView
// Sync committee membership (O(1) lookup, independent of duty views).
syncCurrentMap map[primitives.ValidatorIndex]bool
syncNextMap map[primitives.ValidatorIndex]bool
// Split cache entries (kept for updateDutiesSplit cache-skip logic).
attester *attesterDutiesCacheEntry
proposer *proposerDutiesCacheEntry
sync *syncDutiesCacheEntry
// Whether initialized with valid data.
initialized bool
}
// attesterDutyView is a unified view of an attester duty plus validator status,
// since the split AttesterDuty proto doesn't carry status.
type attesterDutyView struct {
Pubkey [fieldparams.BLSPubkeyLength]byte
ValidatorIndex primitives.ValidatorIndex
CommitteeIndex primitives.CommitteeIndex
CommitteeLength uint64
CommitteesAtSlot uint64
ValidatorCommitteeIndex uint64
Slot primitives.Slot
ProposerSlots []primitives.Slot
IsSyncCommittee bool
Status ethpb.ValidatorStatus
}
// Reset clears all duty data, marking the store as uninitialized.
func (ds *dutyStore) Reset() {
ds.currentDuties = nil
ds.nextDuties = nil
ds.prevDependentRoot = nil
ds.currDependentRoot = nil
ds.pubkeyToDuty = nil
ds.syncCurrentMap = nil
ds.syncNextMap = nil
ds.attester = nil
ds.proposer = nil
ds.sync = nil
ds.initialized = false
}
// IsInitialized returns true if any duty data has been populated.
func (ds *dutyStore) IsInitialized() bool {
if ds == nil {
return false
}
return ds.initialized
}
// DependentRoots returns the previous and current dependent roots.
func (ds *dutyStore) DependentRoots() (prev, curr []byte) {
if !ds.IsInitialized() {
return nil, nil
}
return ds.prevDependentRoot, ds.currDependentRoot
}
// CurrentEpochDuties returns the current epoch duties as attesterDutyView slices.
func (ds *dutyStore) CurrentEpochDuties() []*attesterDutyView {
if !ds.IsInitialized() {
return nil
}
return ds.currentDuties
}
// NextEpochDuties returns the next epoch duties as attesterDutyView slices.
func (ds *dutyStore) NextEpochDuties() []*attesterDutyView {
if !ds.IsInitialized() {
return nil
}
return ds.nextDuties
}
// CurrentAttesterDuty returns the current epoch duty for a given pubkey. O(1) lookup.
func (ds *dutyStore) CurrentAttesterDuty(pubkey [fieldparams.BLSPubkeyLength]byte) (*attesterDutyView, bool) {
if !ds.IsInitialized() {
return nil, false
}
d, ok := ds.pubkeyToDuty[pubkey]
return d, ok
}
// ProposerSlots returns the proposer slots for a given validator index.
func (ds *dutyStore) ProposerSlots(idx primitives.ValidatorIndex) []primitives.Slot {
if !ds.IsInitialized() {
return nil
}
for _, d := range ds.currentDuties {
if d.ValidatorIndex == idx {
return d.ProposerSlots
}
}
return nil
}
// IsSyncCommittee returns whether a validator is in the current sync committee.
func (ds *dutyStore) IsSyncCommittee(idx primitives.ValidatorIndex) bool {
if !ds.IsInitialized() {
return false
}
return ds.syncCurrentMap[idx]
}
// IsNextSyncCommittee returns whether a validator is in the next epoch's sync committee.
func (ds *dutyStore) IsNextSyncCommittee(idx primitives.ValidatorIndex) bool {
if !ds.IsInitialized() {
return false
}
return ds.syncNextMap[idx]
}
// AllCurrentExitedCount returns the count of EXITED validators in current epoch duties.
func (ds *dutyStore) AllCurrentExitedCount() (exited, total int) {
if !ds.IsInitialized() {
return 0, 0
}
for _, d := range ds.currentDuties {
if d.Status == ethpb.ValidatorStatus_EXITED {
exited++
}
}
return exited, len(ds.currentDuties)
}
// statusForPubkey looks up validator status from a pubkeyToStatus map, defaulting to ACTIVE.
func statusForPubkey(pk [fieldparams.BLSPubkeyLength]byte, pubkeyToStatus map[[fieldparams.BLSPubkeyLength]byte]*validatorStatus) ethpb.ValidatorStatus {
if pubkeyToStatus != nil {
if st, ok := pubkeyToStatus[pk]; ok && st.status != nil {
return st.status.Status
}
}
return ethpb.ValidatorStatus_ACTIVE
}
// legacyDutyToView converts a legacy ValidatorDuty to an attesterDutyView.
func legacyDutyToView(d *ethpb.ValidatorDuty) *attesterDutyView {
if d == nil {
return nil
}
var pk [fieldparams.BLSPubkeyLength]byte
copy(pk[:], d.PublicKey)
return &attesterDutyView{
Pubkey: pk,
ValidatorIndex: d.ValidatorIndex,
CommitteeIndex: d.CommitteeIndex,
CommitteeLength: d.CommitteeLength,
CommitteesAtSlot: d.CommitteesAtSlot,
ValidatorCommitteeIndex: d.ValidatorCommitteeIndex,
Slot: d.AttesterSlot,
ProposerSlots: d.ProposerSlots,
IsSyncCommittee: d.IsSyncCommittee,
Status: d.Status,
}
}
// SetLegacy stores a legacy combined duties response by converting it to the unified format.
func (ds *dutyStore) SetLegacy(container *ethpb.ValidatorDutiesContainer, pubkeyToStatus map[[fieldparams.BLSPubkeyLength]byte]*validatorStatus) {
// Clear cache entries (legacy doesn't use per-duty caching).
ds.attester = nil
ds.proposer = nil
ds.sync = nil
if container == nil {
ds.currentDuties = nil
ds.nextDuties = nil
ds.prevDependentRoot = nil
ds.currDependentRoot = nil
ds.pubkeyToDuty = nil
ds.syncCurrentMap = nil
ds.syncNextMap = nil
ds.initialized = false
return
}
// Convert current epoch duties.
ds.currentDuties = make([]*attesterDutyView, 0, len(container.CurrentEpochDuties))
ds.pubkeyToDuty = make(map[[fieldparams.BLSPubkeyLength]byte]*attesterDutyView, len(container.CurrentEpochDuties))
ds.syncCurrentMap = make(map[primitives.ValidatorIndex]bool)
for _, d := range container.CurrentEpochDuties {
view := legacyDutyToView(d)
if view != nil {
ds.currentDuties = append(ds.currentDuties, view)
ds.pubkeyToDuty[view.Pubkey] = view
if view.IsSyncCommittee {
ds.syncCurrentMap[view.ValidatorIndex] = true
}
}
}
// Convert next epoch duties.
ds.nextDuties = make([]*attesterDutyView, 0, len(container.NextEpochDuties))
ds.syncNextMap = make(map[primitives.ValidatorIndex]bool)
for _, d := range container.NextEpochDuties {
view := legacyDutyToView(d)
if view != nil {
ds.nextDuties = append(ds.nextDuties, view)
if view.IsSyncCommittee {
ds.syncNextMap[view.ValidatorIndex] = true
}
}
}
ds.prevDependentRoot = container.PrevDependentRoot
ds.currDependentRoot = container.CurrDependentRoot
ds.initialized = true
}
// proposerSlotsMap builds a map of validator index to proposer slots from a proposer duties response.
func proposerSlotsMap(resp *ethpb.ProposerDutiesResponse) map[primitives.ValidatorIndex][]primitives.Slot {
m := make(map[primitives.ValidatorIndex][]primitives.Slot)
if resp != nil {
for _, d := range resp.Duties {
m[d.ValidatorIndex] = append(m[d.ValidatorIndex], d.Slot)
}
}
return m
}
// syncMap builds a set of validator indices in a sync committee response.
func syncMap(resp *ethpb.SyncCommitteeDutiesResponse) map[primitives.ValidatorIndex]bool {
m := make(map[primitives.ValidatorIndex]bool)
if resp != nil {
for _, d := range resp.Duties {
m[d.ValidatorIndex] = true
}
}
return m
}
// buildDutyViews converts split attester duties into attesterDutyView slices,
// merging proposer and sync committee data.
func buildDutyViews(
attResp *ethpb.AttesterDutiesResponse,
propSlots map[primitives.ValidatorIndex][]primitives.Slot,
syncMembership map[primitives.ValidatorIndex]bool,
indexToPubkey map[primitives.ValidatorIndex][fieldparams.BLSPubkeyLength]byte,
pubkeyToStatus map[[fieldparams.BLSPubkeyLength]byte]*validatorStatus,
) []*attesterDutyView {
if attResp == nil {
return nil
}
seen := make(map[primitives.ValidatorIndex]bool, len(attResp.Duties))
views := make([]*attesterDutyView, 0, len(attResp.Duties))
for _, d := range attResp.Duties {
seen[d.ValidatorIndex] = true
pk := indexToPubkey[d.ValidatorIndex]
views = append(views, &attesterDutyView{
Pubkey: pk,
ValidatorIndex: d.ValidatorIndex,
CommitteeIndex: d.CommitteeIndex,
CommitteeLength: d.CommitteeLength,
CommitteesAtSlot: d.CommitteesAtSlot,
ValidatorCommitteeIndex: d.ValidatorCommitteeIndex,
Slot: d.Slot,
ProposerSlots: propSlots[d.ValidatorIndex],
IsSyncCommittee: syncMembership[d.ValidatorIndex],
Status: statusForPubkey(pk, pubkeyToStatus),
})
}
// Add proposer-only validators not in the attester list (skip non-local validators).
for idx, pSlots := range propSlots {
if seen[idx] {
continue
}
pk, ok := indexToPubkey[idx]
if !ok {
continue
}
views = append(views, &attesterDutyView{
Pubkey: pk,
ValidatorIndex: idx,
ProposerSlots: pSlots,
IsSyncCommittee: syncMembership[idx],
Status: statusForPubkey(pk, pubkeyToStatus),
})
}
return views
}
// SetSplit stores split per-duty responses by converting them to the unified format.
func (ds *dutyStore) SetSplit(
att *attesterDutiesCacheEntry,
prop *proposerDutiesCacheEntry,
sc *syncDutiesCacheEntry,
indexToPubkey map[primitives.ValidatorIndex][fieldparams.BLSPubkeyLength]byte,
pubkeyToStatus map[[fieldparams.BLSPubkeyLength]byte]*validatorStatus,
) {
// Store cache entries for updateDutiesSplit cache-skip logic.
ds.attester = att
ds.proposer = prop
ds.sync = sc
if att == nil || att.current == nil {
ds.currentDuties = nil
ds.nextDuties = nil
ds.prevDependentRoot = nil
ds.currDependentRoot = nil
ds.pubkeyToDuty = nil
ds.syncCurrentMap = nil
ds.syncNextMap = nil
ds.initialized = false
return
}
// Extract dependent roots.
ds.prevDependentRoot = att.current.DependentRoot
if prop != nil && prop.current != nil {
ds.currDependentRoot = prop.current.DependentRoot
} else {
ds.currDependentRoot = nil
}
// Build sync membership maps.
if sc != nil {
ds.syncCurrentMap = syncMap(sc.current)
ds.syncNextMap = syncMap(sc.next)
} else {
ds.syncCurrentMap = make(map[primitives.ValidatorIndex]bool)
ds.syncNextMap = make(map[primitives.ValidatorIndex]bool)
}
// Build current epoch views.
var propSlotsMap map[primitives.ValidatorIndex][]primitives.Slot
if prop != nil && prop.current != nil {
propSlotsMap = proposerSlotsMap(prop.current)
} else {
propSlotsMap = make(map[primitives.ValidatorIndex][]primitives.Slot)
}
ds.currentDuties = buildDutyViews(att.current, propSlotsMap, ds.syncCurrentMap, indexToPubkey, pubkeyToStatus)
// Build pubkey lookup.
ds.pubkeyToDuty = make(map[[fieldparams.BLSPubkeyLength]byte]*attesterDutyView, len(ds.currentDuties))
for _, d := range ds.currentDuties {
ds.pubkeyToDuty[d.Pubkey] = d
}
// Build next epoch views.
var nextPropSlots map[primitives.ValidatorIndex][]primitives.Slot
if prop != nil && prop.next != nil {
nextPropSlots = proposerSlotsMap(prop.next)
} else {
nextPropSlots = make(map[primitives.ValidatorIndex][]primitives.Slot)
}
ds.nextDuties = buildDutyViews(att.next, nextPropSlots, ds.syncNextMap, indexToPubkey, pubkeyToStatus)
ds.initialized = true
}
// dutyViewToProto converts an attesterDutyView to a ValidatorDuty proto for SubscribeCommitteeSubnets compat.
func dutyViewToProto(dv *attesterDutyView) *ethpb.ValidatorDuty {
return &ethpb.ValidatorDuty{
PublicKey: dv.Pubkey[:],
ValidatorIndex: dv.ValidatorIndex,
CommitteeIndex: dv.CommitteeIndex,
CommitteeLength: dv.CommitteeLength,
CommitteesAtSlot: dv.CommitteesAtSlot,
ValidatorCommitteeIndex: dv.ValidatorCommitteeIndex,
AttesterSlot: dv.Slot,
ProposerSlots: dv.ProposerSlots,
Status: dv.Status,
IsSyncCommittee: dv.IsSyncCommittee,
}
}
// newDutyStoreFromLegacy creates a dutyStore from a legacy ValidatorDutiesContainer.
// This is a convenience helper primarily used in tests.
func newDutyStoreFromLegacy(container *ethpb.ValidatorDutiesContainer) *dutyStore {
ds := &dutyStore{}
ds.SetLegacy(container, nil)
return ds
}
// AttesterDutiesCache returns the attester duties cache entry (for dependent root checking).
func (ds *dutyStore) AttesterDutiesCache() *attesterDutiesCacheEntry {
return ds.attester
}
// ProposerDutiesCache returns the proposer duties cache entry.
func (ds *dutyStore) ProposerDutiesCache() *proposerDutiesCacheEntry {
return ds.proposer
}
// SyncDutiesCache returns the sync duties cache entry.
func (ds *dutyStore) SyncDutiesCache() *syncDutiesCacheEntry {
return ds.sync
}

View File

@@ -0,0 +1,457 @@
package client
import (
"testing"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/assert"
"github.com/OffchainLabs/prysm/v7/testing/require"
)
func TestDutyStore_IsInitialized(t *testing.T) {
t.Run("nil store", func(t *testing.T) {
var ds *dutyStore
assert.Equal(t, false, ds.IsInitialized())
})
t.Run("empty store", func(t *testing.T) {
ds := &dutyStore{}
assert.Equal(t, false, ds.IsInitialized())
})
t.Run("legacy initialized", func(t *testing.T) {
ds := &dutyStore{}
ds.SetLegacy(&ethpb.ValidatorDutiesContainer{}, nil)
assert.Equal(t, true, ds.IsInitialized())
})
t.Run("split initialized", func(t *testing.T) {
ds := &dutyStore{}
ds.SetSplit(
&attesterDutiesCacheEntry{current: &ethpb.AttesterDutiesResponse{}},
nil, nil, nil, nil,
)
assert.Equal(t, true, ds.IsInitialized())
})
t.Run("split without attester", func(t *testing.T) {
ds := &dutyStore{}
ds.SetSplit(nil, nil, nil, nil, nil)
assert.Equal(t, false, ds.IsInitialized())
})
}
func TestDutyStore_DependentRoots_Legacy(t *testing.T) {
ds := &dutyStore{}
ds.SetLegacy(&ethpb.ValidatorDutiesContainer{
PrevDependentRoot: []byte("prev"),
CurrDependentRoot: []byte("curr"),
}, nil)
prev, curr := ds.DependentRoots()
assert.DeepEqual(t, []byte("prev"), prev)
assert.DeepEqual(t, []byte("curr"), curr)
}
func TestDutyStore_DependentRoots_Split(t *testing.T) {
ds := &dutyStore{}
ds.SetSplit(
&attesterDutiesCacheEntry{
current: &ethpb.AttesterDutiesResponse{DependentRoot: []byte("att-root")},
},
&proposerDutiesCacheEntry{
current: &ethpb.ProposerDutiesResponse{DependentRoot: []byte("prop-root")},
},
nil, nil, nil,
)
prev, curr := ds.DependentRoots()
assert.DeepEqual(t, []byte("att-root"), prev)
assert.DeepEqual(t, []byte("prop-root"), curr)
}
func testPubkey(b byte) [fieldparams.BLSPubkeyLength]byte {
var pk [fieldparams.BLSPubkeyLength]byte
pk[0] = b
return pk
}
func TestDutyStore_CurrentEpochDuties_Legacy(t *testing.T) {
pk1 := testPubkey(1)
pk2 := testPubkey(2)
ds := &dutyStore{}
ds.SetLegacy(&ethpb.ValidatorDutiesContainer{
CurrentEpochDuties: []*ethpb.ValidatorDuty{
{
PublicKey: pk1[:],
ValidatorIndex: 10,
AttesterSlot: 5,
CommitteeIndex: 1,
ProposerSlots: []primitives.Slot{3},
Status: ethpb.ValidatorStatus_ACTIVE,
IsSyncCommittee: true,
},
{
PublicKey: pk2[:],
ValidatorIndex: 20,
AttesterSlot: 6,
Status: ethpb.ValidatorStatus_EXITING,
},
},
}, nil)
duties := ds.CurrentEpochDuties()
require.Equal(t, 2, len(duties))
assert.Equal(t, primitives.ValidatorIndex(10), duties[0].ValidatorIndex)
assert.Equal(t, primitives.Slot(5), duties[0].Slot)
assert.Equal(t, true, duties[0].IsSyncCommittee)
assert.DeepEqual(t, []primitives.Slot{3}, duties[0].ProposerSlots)
assert.Equal(t, ethpb.ValidatorStatus_ACTIVE, duties[0].Status)
assert.Equal(t, primitives.ValidatorIndex(20), duties[1].ValidatorIndex)
assert.Equal(t, ethpb.ValidatorStatus_EXITING, duties[1].Status)
}
func TestDutyStore_CurrentEpochDuties_Split(t *testing.T) {
pk1 := testPubkey(1)
pk2 := testPubkey(2)
ds := &dutyStore{}
ds.SetSplit(
&attesterDutiesCacheEntry{
current: &ethpb.AttesterDutiesResponse{
Duties: []*ethpb.AttesterDuty{
{ValidatorIndex: 10, Slot: 5, CommitteeIndex: 1, CommitteeLength: 128},
{ValidatorIndex: 20, Slot: 6, CommitteeIndex: 2},
},
},
},
&proposerDutiesCacheEntry{
current: &ethpb.ProposerDutiesResponse{
Duties: []*ethpb.ProposerDutyV2{
{ValidatorIndex: 10, Slot: 3},
},
},
},
&syncDutiesCacheEntry{
current: &ethpb.SyncCommitteeDutiesResponse{
Duties: []*ethpb.SyncCommitteeDuty{
{ValidatorIndex: 10},
},
},
},
map[primitives.ValidatorIndex][fieldparams.BLSPubkeyLength]byte{
10: pk1,
20: pk2,
},
map[[fieldparams.BLSPubkeyLength]byte]*validatorStatus{
pk1: {status: &ethpb.ValidatorStatusResponse{Status: ethpb.ValidatorStatus_ACTIVE}},
pk2: {status: &ethpb.ValidatorStatusResponse{Status: ethpb.ValidatorStatus_EXITING}},
},
)
duties := ds.CurrentEpochDuties()
require.Equal(t, 2, len(duties))
// Validator 10: attester + proposer + sync.
assert.Equal(t, pk1, duties[0].Pubkey)
assert.Equal(t, primitives.ValidatorIndex(10), duties[0].ValidatorIndex)
assert.Equal(t, primitives.Slot(5), duties[0].Slot)
assert.DeepEqual(t, []primitives.Slot{3}, duties[0].ProposerSlots)
assert.Equal(t, true, duties[0].IsSyncCommittee)
assert.Equal(t, ethpb.ValidatorStatus_ACTIVE, duties[0].Status)
// Validator 20: attester only.
assert.Equal(t, pk2, duties[1].Pubkey)
assert.Equal(t, primitives.ValidatorIndex(20), duties[1].ValidatorIndex)
assert.Equal(t, false, duties[1].IsSyncCommittee)
assert.Equal(t, ethpb.ValidatorStatus_EXITING, duties[1].Status)
}
func TestDutyStore_CurrentAttesterDuty_Legacy(t *testing.T) {
pk1 := testPubkey(1)
pk2 := testPubkey(2)
ds := &dutyStore{}
ds.SetLegacy(&ethpb.ValidatorDutiesContainer{
CurrentEpochDuties: []*ethpb.ValidatorDuty{
{PublicKey: pk1[:], ValidatorIndex: 10, AttesterSlot: 5},
{PublicKey: pk2[:], ValidatorIndex: 20, AttesterSlot: 6},
},
}, nil)
duty, ok := ds.CurrentAttesterDuty(pk1)
require.Equal(t, true, ok)
assert.Equal(t, primitives.ValidatorIndex(10), duty.ValidatorIndex)
duty, ok = ds.CurrentAttesterDuty(pk2)
require.Equal(t, true, ok)
assert.Equal(t, primitives.ValidatorIndex(20), duty.ValidatorIndex)
_, ok = ds.CurrentAttesterDuty(testPubkey(99))
assert.Equal(t, false, ok)
}
func TestDutyStore_CurrentAttesterDuty_Split(t *testing.T) {
pk1 := testPubkey(1)
ds := &dutyStore{}
ds.SetSplit(
&attesterDutiesCacheEntry{
current: &ethpb.AttesterDutiesResponse{
Duties: []*ethpb.AttesterDuty{
{ValidatorIndex: 10, Slot: 5},
},
},
},
nil, nil,
map[primitives.ValidatorIndex][fieldparams.BLSPubkeyLength]byte{10: pk1},
nil,
)
duty, ok := ds.CurrentAttesterDuty(pk1)
require.Equal(t, true, ok)
assert.Equal(t, primitives.ValidatorIndex(10), duty.ValidatorIndex)
_, ok = ds.CurrentAttesterDuty(testPubkey(99))
assert.Equal(t, false, ok)
}
func TestDutyStore_ProposerSlots_Legacy(t *testing.T) {
ds := &dutyStore{}
ds.SetLegacy(&ethpb.ValidatorDutiesContainer{
CurrentEpochDuties: []*ethpb.ValidatorDuty{
{ValidatorIndex: 10, ProposerSlots: []primitives.Slot{3, 7}},
{ValidatorIndex: 20},
},
}, nil)
assert.DeepEqual(t, []primitives.Slot{3, 7}, ds.ProposerSlots(10))
assert.Equal(t, 0, len(ds.ProposerSlots(20)))
assert.Equal(t, 0, len(ds.ProposerSlots(99)))
}
func TestDutyStore_ProposerSlots_Split(t *testing.T) {
pk1 := testPubkey(1)
ds := &dutyStore{}
ds.SetSplit(
&attesterDutiesCacheEntry{current: &ethpb.AttesterDutiesResponse{}},
&proposerDutiesCacheEntry{
current: &ethpb.ProposerDutiesResponse{
Duties: []*ethpb.ProposerDutyV2{
{ValidatorIndex: 10, Slot: 3},
},
},
},
nil,
map[primitives.ValidatorIndex][fieldparams.BLSPubkeyLength]byte{10: pk1},
nil,
)
assert.DeepEqual(t, []primitives.Slot{3}, ds.ProposerSlots(10))
assert.Equal(t, 0, len(ds.ProposerSlots(99)))
}
func TestDutyStore_IsSyncCommittee_Legacy(t *testing.T) {
ds := &dutyStore{}
ds.SetLegacy(&ethpb.ValidatorDutiesContainer{
CurrentEpochDuties: []*ethpb.ValidatorDuty{
{ValidatorIndex: 10, IsSyncCommittee: true},
{ValidatorIndex: 20, IsSyncCommittee: false},
},
NextEpochDuties: []*ethpb.ValidatorDuty{
{ValidatorIndex: 10, IsSyncCommittee: false},
{ValidatorIndex: 20, IsSyncCommittee: true},
},
}, nil)
assert.Equal(t, true, ds.IsSyncCommittee(10))
assert.Equal(t, false, ds.IsSyncCommittee(20))
assert.Equal(t, false, ds.IsNextSyncCommittee(10))
assert.Equal(t, true, ds.IsNextSyncCommittee(20))
}
func TestDutyStore_IsSyncCommittee_Split(t *testing.T) {
ds := &dutyStore{}
ds.SetSplit(
&attesterDutiesCacheEntry{current: &ethpb.AttesterDutiesResponse{}},
nil,
&syncDutiesCacheEntry{
current: &ethpb.SyncCommitteeDutiesResponse{
Duties: []*ethpb.SyncCommitteeDuty{{ValidatorIndex: 10}},
},
next: &ethpb.SyncCommitteeDutiesResponse{
Duties: []*ethpb.SyncCommitteeDuty{{ValidatorIndex: 20}},
},
},
nil, nil,
)
assert.Equal(t, true, ds.IsSyncCommittee(10))
assert.Equal(t, false, ds.IsSyncCommittee(20))
assert.Equal(t, false, ds.IsNextSyncCommittee(10))
assert.Equal(t, true, ds.IsNextSyncCommittee(20))
}
func TestDutyStore_AllCurrentExitedCount(t *testing.T) {
t.Run("legacy", func(t *testing.T) {
pk1 := testPubkey(1)
pk2 := testPubkey(2)
pk3 := testPubkey(3)
ds := &dutyStore{}
ds.SetLegacy(&ethpb.ValidatorDutiesContainer{
CurrentEpochDuties: []*ethpb.ValidatorDuty{
{PublicKey: pk1[:], Status: ethpb.ValidatorStatus_EXITED},
{PublicKey: pk2[:], Status: ethpb.ValidatorStatus_EXITED},
{PublicKey: pk3[:], Status: ethpb.ValidatorStatus_ACTIVE},
},
}, nil)
exited, total := ds.AllCurrentExitedCount()
assert.Equal(t, 2, exited)
assert.Equal(t, 3, total)
})
t.Run("split", func(t *testing.T) {
pk1 := testPubkey(1)
pk2 := testPubkey(2)
ds := &dutyStore{}
ds.SetSplit(
&attesterDutiesCacheEntry{
current: &ethpb.AttesterDutiesResponse{
Duties: []*ethpb.AttesterDuty{
{ValidatorIndex: 10},
{ValidatorIndex: 20},
},
},
},
nil, nil,
map[primitives.ValidatorIndex][fieldparams.BLSPubkeyLength]byte{10: pk1, 20: pk2},
map[[fieldparams.BLSPubkeyLength]byte]*validatorStatus{
pk1: {status: &ethpb.ValidatorStatusResponse{Status: ethpb.ValidatorStatus_EXITED}},
pk2: {status: &ethpb.ValidatorStatusResponse{Status: ethpb.ValidatorStatus_ACTIVE}},
},
)
exited, total := ds.AllCurrentExitedCount()
assert.Equal(t, 1, exited)
assert.Equal(t, 2, total)
})
}
func TestDutyStore_NextEpochDuties_Split(t *testing.T) {
t.Run("without next proposer", func(t *testing.T) {
pk1 := testPubkey(1)
ds := &dutyStore{}
ds.SetSplit(
&attesterDutiesCacheEntry{
current: &ethpb.AttesterDutiesResponse{},
next: &ethpb.AttesterDutiesResponse{
Duties: []*ethpb.AttesterDuty{
{ValidatorIndex: 10, Slot: 40},
},
},
},
nil,
&syncDutiesCacheEntry{
next: &ethpb.SyncCommitteeDutiesResponse{
Duties: []*ethpb.SyncCommitteeDuty{{ValidatorIndex: 10}},
},
},
map[primitives.ValidatorIndex][fieldparams.BLSPubkeyLength]byte{10: pk1},
nil,
)
duties := ds.NextEpochDuties()
require.Equal(t, 1, len(duties))
assert.Equal(t, primitives.Slot(40), duties[0].Slot)
assert.Equal(t, true, duties[0].IsSyncCommittee)
assert.Equal(t, 0, len(duties[0].ProposerSlots))
})
t.Run("with next proposer (post-Fulu)", func(t *testing.T) {
pk1 := testPubkey(1)
ds := &dutyStore{}
ds.SetSplit(
&attesterDutiesCacheEntry{
current: &ethpb.AttesterDutiesResponse{},
next: &ethpb.AttesterDutiesResponse{
Duties: []*ethpb.AttesterDuty{
{ValidatorIndex: 10, Slot: 40},
},
},
},
&proposerDutiesCacheEntry{
current: &ethpb.ProposerDutiesResponse{},
next: &ethpb.ProposerDutiesResponse{
Duties: []*ethpb.ProposerDutyV2{
{ValidatorIndex: 10, Slot: 42},
},
},
},
nil,
map[primitives.ValidatorIndex][fieldparams.BLSPubkeyLength]byte{10: pk1},
nil,
)
duties := ds.NextEpochDuties()
require.Equal(t, 1, len(duties))
assert.Equal(t, primitives.Slot(40), duties[0].Slot)
assert.DeepEqual(t, []primitives.Slot{42}, duties[0].ProposerSlots)
})
}
func TestDutyStore_SetLegacy_ClearsSplit(t *testing.T) {
ds := &dutyStore{}
ds.SetSplit(
&attesterDutiesCacheEntry{current: &ethpb.AttesterDutiesResponse{}},
&proposerDutiesCacheEntry{},
&syncDutiesCacheEntry{},
nil, nil,
)
assert.Equal(t, true, ds.IsInitialized())
assert.NotNil(t, ds.AttesterDutiesCache())
ds.SetLegacy(&ethpb.ValidatorDutiesContainer{}, nil)
assert.Equal(t, true, ds.IsInitialized())
assert.Equal(t, (*attesterDutiesCacheEntry)(nil), ds.AttesterDutiesCache())
assert.Equal(t, (*proposerDutiesCacheEntry)(nil), ds.ProposerDutiesCache())
assert.Equal(t, (*syncDutiesCacheEntry)(nil), ds.SyncDutiesCache())
}
func TestDutyStore_SetSplit_ClearsLegacy(t *testing.T) {
ds := &dutyStore{}
pk := testPubkey(1)
ds.SetLegacy(&ethpb.ValidatorDutiesContainer{
CurrentEpochDuties: []*ethpb.ValidatorDuty{{PublicKey: pk[:]}},
}, nil)
assert.Equal(t, true, ds.IsInitialized())
assert.Equal(t, 1, len(ds.CurrentEpochDuties()))
ds.SetSplit(
&attesterDutiesCacheEntry{current: &ethpb.AttesterDutiesResponse{}},
nil, nil, nil, nil,
)
assert.Equal(t, true, ds.IsInitialized())
assert.Equal(t, 0, len(ds.CurrentEpochDuties()))
assert.NotNil(t, ds.AttesterDutiesCache())
}
func TestDutyStore_ProposerOnlyInSplit(t *testing.T) {
pk1 := testPubkey(1)
pk2 := testPubkey(2)
ds := &dutyStore{}
ds.SetSplit(
&attesterDutiesCacheEntry{
current: &ethpb.AttesterDutiesResponse{
Duties: []*ethpb.AttesterDuty{
{ValidatorIndex: 10, Slot: 5},
},
},
},
&proposerDutiesCacheEntry{
current: &ethpb.ProposerDutiesResponse{
Duties: []*ethpb.ProposerDutyV2{
{ValidatorIndex: 20, Slot: 3}, // proposer-only validator
},
},
},
nil,
map[primitives.ValidatorIndex][fieldparams.BLSPubkeyLength]byte{10: pk1, 20: pk2},
nil,
)
duties := ds.CurrentEpochDuties()
require.Equal(t, 2, len(duties))
// Check the proposer-only duty exists.
found := false
for _, d := range duties {
if d.ValidatorIndex == 20 {
found = true
assert.DeepEqual(t, []primitives.Slot{3}, d.ProposerSlots)
}
}
assert.Equal(t, true, found)
}

View File

@@ -148,6 +148,38 @@ func toValidatorDutyV2(duty *ethpb.DutiesV2Response_Duty) (*ethpb.ValidatorDuty,
}, nil
}
func (c *grpcValidatorClient) AttesterDuties(ctx context.Context, epoch primitives.Epoch, validatorIndices []primitives.ValidatorIndex) (*ethpb.AttesterDutiesResponse, error) {
resp, err := c.getClient().GetAttesterDuties(ctx, &ethpb.AttesterDutiesRequest{
Epoch: epoch,
ValidatorIndices: validatorIndices,
})
if err != nil {
return nil, errors.Wrap(err, "GetAttesterDuties")
}
return resp, nil
}
func (c *grpcValidatorClient) ProposerDuties(ctx context.Context, epoch primitives.Epoch) (*ethpb.ProposerDutiesResponse, error) {
resp, err := c.getClient().GetProposerDutiesV2(ctx, &ethpb.ProposerDutiesRequest{
Epoch: epoch,
})
if err != nil {
return nil, errors.Wrap(err, "GetProposerDutiesV2")
}
return resp, nil
}
func (c *grpcValidatorClient) SyncCommitteeDuties(ctx context.Context, epoch primitives.Epoch, validatorIndices []primitives.ValidatorIndex) (*ethpb.SyncCommitteeDutiesResponse, error) {
resp, err := c.getClient().GetSyncCommitteeDuties(ctx, &ethpb.SyncCommitteeDutiesRequest{
Epoch: epoch,
ValidatorIndices: validatorIndices,
})
if err != nil {
return nil, errors.Wrap(err, "GetSyncCommitteeDuties")
}
return resp, nil
}
func (c *grpcValidatorClient) CheckDoppelGanger(ctx context.Context, in *ethpb.DoppelGangerRequest) (*ethpb.DoppelGangerResponse, error) {
return c.getClient().CheckDoppelGanger(ctx, in)
}
@@ -236,7 +268,8 @@ func (c *grpcValidatorClient) SubmitValidatorRegistrations(ctx context.Context,
return c.getClient().SubmitValidatorRegistrations(ctx, in)
}
func (c *grpcValidatorClient) SubscribeCommitteeSubnets(ctx context.Context, in *ethpb.CommitteeSubnetsSubscribeRequest, _ []*ethpb.ValidatorDuty) (*empty.Empty, error) {
func (c *grpcValidatorClient) SubscribeCommitteeSubnets(ctx context.Context, in *ethpb.CommitteeSubnetsSubscribeRequest, _ []primitives.ValidatorIndex, _ []uint64) (*empty.Empty, error) {
// TODO: change gRPC endpoint to accept validatorIndices, committeesAtSlot
return c.getClient().SubscribeCommitteeSubnets(ctx, in)
}

View File

@@ -121,7 +121,13 @@ func (s *SyncCommitteeSelection) UnmarshalJSON(input []byte) error {
}
type ValidatorClient interface {
// Duties is the pre-GLOAS combined endpoint (GetDuties/GetDutiesV2).
// Post-GLOAS, use the split endpoints below instead.
Duties(ctx context.Context, in *ethpb.DutiesRequest) (*ethpb.ValidatorDutiesContainer, error)
// Split duty endpoints used post-GLOAS.
AttesterDuties(ctx context.Context, epoch primitives.Epoch, validatorIndices []primitives.ValidatorIndex) (*ethpb.AttesterDutiesResponse, error)
ProposerDuties(ctx context.Context, epoch primitives.Epoch) (*ethpb.ProposerDutiesResponse, error)
SyncCommitteeDuties(ctx context.Context, epoch primitives.Epoch, validatorIndices []primitives.ValidatorIndex) (*ethpb.SyncCommitteeDutiesResponse, error)
DomainData(ctx context.Context, in *ethpb.DomainRequest) (*ethpb.DomainResponse, error)
WaitForChainStart(ctx context.Context, in *empty.Empty) (*ethpb.ChainStartResponse, error)
ValidatorIndex(ctx context.Context, in *ethpb.ValidatorIndexRequest) (*ethpb.ValidatorIndexResponse, error)
@@ -139,7 +145,7 @@ type ValidatorClient interface {
SubmitSignedAggregateSelectionProof(ctx context.Context, in *ethpb.SignedAggregateSubmitRequest) (*ethpb.SignedAggregateSubmitResponse, error)
SubmitSignedAggregateSelectionProofElectra(ctx context.Context, in *ethpb.SignedAggregateSubmitElectraRequest) (*ethpb.SignedAggregateSubmitResponse, error)
ProposeExit(ctx context.Context, in *ethpb.SignedVoluntaryExit) (*ethpb.ProposeExitResponse, error)
SubscribeCommitteeSubnets(ctx context.Context, in *ethpb.CommitteeSubnetsSubscribeRequest, duties []*ethpb.ValidatorDuty) (*empty.Empty, error)
SubscribeCommitteeSubnets(ctx context.Context, in *ethpb.CommitteeSubnetsSubscribeRequest, validatorIndices []primitives.ValidatorIndex, committeesAtSlot []uint64) (*empty.Empty, error)
CheckDoppelGanger(ctx context.Context, in *ethpb.DoppelGangerRequest) (*ethpb.DoppelGangerResponse, error)
SyncMessageBlockRoot(ctx context.Context, in *empty.Empty) (*ethpb.SyncMessageBlockRootResponse, error)
SubmitSyncMessage(ctx context.Context, in *ethpb.SyncCommitteeMessage) (*empty.Empty, error)

View File

@@ -448,7 +448,7 @@ func TestRunnerPushesProposerSettings_ValidContext(t *testing.T) {
}).MinTimes(1)
// DomainData calls are really fast, no delay needed.
vcm.EXPECT().DomainData(liveCtx, gomock.Any()).Return(&ethpb.DomainResponse{SignatureDomain: make([]byte, 32)}, nil).AnyTimes()
vcm.EXPECT().SubscribeCommitteeSubnets(liveCtx, gomock.Any(), gomock.Any()).AnyTimes().Do(func(_, _, _ any) { delay(t) })
vcm.EXPECT().SubscribeCommitteeSubnets(liveCtx, gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Do(func(_, _, _, _ any) { delay(t) })
vcm.EXPECT().AttestationData(liveCtx, gomock.Any()).DoAndReturn(func(ctx context.Context, req *ethpb.AttestationDataRequest) (*ethpb.AttestationData, error) {
defer assertValidContext(t, timedCtx, ctx)
delay(t)

117
validator/client/subnets.go Normal file
View File

@@ -0,0 +1,117 @@
package client
import (
"context"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/validator/client/iface"
"github.com/pkg/errors"
)
// subscribeToSubnets iterates through each validator duty, signs each slot, and asks beacon node
// to eagerly subscribe to subnets so that the aggregator has attestations to aggregate.
func (v *validator) subscribeToSubnets(ctx context.Context) error {
ctx, span := trace.StartSpan(ctx, "validator.subscribeToSubnets")
defer span.End()
v.dutiesLock.RLock()
currentDuties := v.duties.CurrentEpochDuties()
nextDuties := v.duties.NextEpochDuties()
v.dutiesLock.RUnlock()
subscribeSlots := make([]primitives.Slot, 0, len(currentDuties)+len(nextDuties))
subscribeCommitteeIndices := make([]primitives.CommitteeIndex, 0, len(currentDuties)+len(nextDuties))
subscribeIsAggregator := make([]bool, 0, len(currentDuties)+len(nextDuties))
activeDuties := make([]*ethpb.ValidatorDuty, 0, len(currentDuties)+len(nextDuties))
alreadySubscribed := make(map[[64]byte]bool)
if v.distributed {
if err := v.aggregatedSelectionProofs(ctx, currentDuties); err != nil {
return errors.Wrap(err, "could not get aggregated selection proofs")
}
}
allDuties := append(currentDuties, nextDuties...)
for _, duty := range allDuties {
if duty.Status != ethpb.ValidatorStatus_ACTIVE && duty.Status != ethpb.ValidatorStatus_EXITING {
continue
}
alreadySubscribedKey := validatorSubnetSubscriptionKey(duty.Slot, duty.CommitteeIndex)
if _, ok := alreadySubscribed[alreadySubscribedKey]; ok {
continue
}
aggregator, err := v.isAggregator(ctx, duty.CommitteeLength, duty.Slot, duty.Pubkey, duty.ValidatorIndex)
if err != nil {
return errors.Wrap(err, "could not check if a validator is an aggregator")
}
if aggregator {
alreadySubscribed[alreadySubscribedKey] = true
}
subscribeSlots = append(subscribeSlots, duty.Slot)
subscribeCommitteeIndices = append(subscribeCommitteeIndices, duty.CommitteeIndex)
subscribeIsAggregator = append(subscribeIsAggregator, aggregator)
activeDuties = append(activeDuties, dutyViewToProto(duty))
}
_, err := v.validatorClient.SubscribeCommitteeSubnets(ctx,
&ethpb.CommitteeSubnetsSubscribeRequest{
Slots: subscribeSlots,
CommitteeIds: subscribeCommitteeIndices,
IsAggregator: subscribeIsAggregator,
},
activeDuties,
)
return err
}
func (v *validator) aggregatedSelectionProofs(ctx context.Context, currentDuties []*attesterDutyView) error {
ctx, span := trace.StartSpan(ctx, "validator.aggregatedSelectionProofs")
defer span.End()
v.attSelectionLock.Lock()
defer v.attSelectionLock.Unlock()
v.attSelections = make(map[attSelectionKey]iface.BeaconCommitteeSelection)
var req []iface.BeaconCommitteeSelection
for _, duty := range currentDuties {
if duty.Status != ethpb.ValidatorStatus_ACTIVE && duty.Status != ethpb.ValidatorStatus_EXITING {
continue
}
slotSig, err := v.signSlotWithSelectionProof(ctx, duty.Pubkey, duty.Slot)
if err != nil {
return err
}
req = append(req, iface.BeaconCommitteeSelection{
SelectionProof: slotSig,
Slot: duty.Slot,
ValidatorIndex: duty.ValidatorIndex,
})
}
resp, err := v.validatorClient.AggregatedSelections(ctx, req)
if err != nil {
return err
}
for _, s := range resp {
v.attSelections[attSelectionKey{
slot: s.Slot,
index: s.ValidatorIndex,
}] = s
}
return nil
}
func validatorSubnetSubscriptionKey(slot primitives.Slot, committeeIndex primitives.CommitteeIndex) [64]byte {
return bytesutil.ToBytes64(append(bytesutil.Bytes32(uint64(slot)), bytesutil.Bytes32(uint64(committeeIndex))...))
}

View File

@@ -38,7 +38,7 @@ func (v *validator) SubmitSyncCommitteeMessage(ctx context.Context, slot primiti
return
}
duty, err := v.duty(pubKey)
duty, err := v.attesterDuty(pubKey)
if err != nil {
log.WithError(err).Error("Could not fetch validator assignment")
return
@@ -102,7 +102,7 @@ func (v *validator) SubmitSignedContributionAndProof(ctx context.Context, slot p
defer span.End()
span.SetAttributes(trace.StringAttribute("validator", fmt.Sprintf("%#x", pubKey)))
duty, err := v.duty(pubKey)
duty, err := v.attesterDuty(pubKey)
if err != nil {
log.WithError(err).Error("Could not fetch validator assignment")
return

View File

@@ -25,7 +25,7 @@ func TestSubmitSyncCommitteeMessage_ValidatorDutiesRequestFailure(t *testing.T)
t.Run(fmt.Sprintf("SlashingProtectionMinimal:%v", isSlashingProtectionMinimal), func(t *testing.T) {
hook := logTest.NewGlobal()
validator, m, validatorKey, finish := setup(t, isSlashingProtectionMinimal)
validator.duties = &ethpb.ValidatorDutiesContainer{CurrentEpochDuties: []*ethpb.ValidatorDuty{}}
validator.duties = newDutyStoreFromLegacy(&ethpb.ValidatorDutiesContainer{CurrentEpochDuties: []*ethpb.ValidatorDuty{}})
defer finish()
m.validatorClient.EXPECT().SyncMessageBlockRoot(
@@ -51,13 +51,13 @@ func TestSubmitSyncCommitteeMessage_BadDomainData(t *testing.T) {
hook := logTest.NewGlobal()
validatorIndex := primitives.ValidatorIndex(7)
committee := []primitives.ValidatorIndex{0, 3, 4, 2, validatorIndex, 6, 8, 9, 10}
validator.duties = &ethpb.ValidatorDutiesContainer{CurrentEpochDuties: []*ethpb.ValidatorDuty{
validator.duties = newDutyStoreFromLegacy(&ethpb.ValidatorDutiesContainer{CurrentEpochDuties: []*ethpb.ValidatorDuty{
{
PublicKey: validatorKey.PublicKey().Marshal(),
CommitteeLength: uint64(len(committee)),
ValidatorIndex: validatorIndex,
},
}}
}})
r := []byte{'a'}
m.validatorClient.EXPECT().SyncMessageBlockRoot(
@@ -87,13 +87,13 @@ func TestSubmitSyncCommitteeMessage_CouldNotSubmit(t *testing.T) {
hook := logTest.NewGlobal()
validatorIndex := primitives.ValidatorIndex(7)
committee := []primitives.ValidatorIndex{0, 3, 4, 2, validatorIndex, 6, 8, 9, 10}
validator.duties = &ethpb.ValidatorDutiesContainer{CurrentEpochDuties: []*ethpb.ValidatorDuty{
validator.duties = newDutyStoreFromLegacy(&ethpb.ValidatorDutiesContainer{CurrentEpochDuties: []*ethpb.ValidatorDuty{
{
PublicKey: validatorKey.PublicKey().Marshal(),
CommitteeLength: uint64(len(committee)),
ValidatorIndex: validatorIndex,
},
}}
}})
r := []byte{'a'}
m.validatorClient.EXPECT().SyncMessageBlockRoot(
@@ -132,13 +132,13 @@ func TestSubmitSyncCommitteeMessage_OK(t *testing.T) {
hook := logTest.NewGlobal()
validatorIndex := primitives.ValidatorIndex(7)
committee := []primitives.ValidatorIndex{0, 3, 4, 2, validatorIndex, 6, 8, 9, 10}
validator.duties = &ethpb.ValidatorDutiesContainer{CurrentEpochDuties: []*ethpb.ValidatorDuty{
validator.duties = newDutyStoreFromLegacy(&ethpb.ValidatorDutiesContainer{CurrentEpochDuties: []*ethpb.ValidatorDuty{
{
PublicKey: validatorKey.PublicKey().Marshal(),
CommitteeLength: uint64(len(committee)),
ValidatorIndex: validatorIndex,
},
}}
}})
r := []byte{'a'}
m.validatorClient.EXPECT().SyncMessageBlockRoot(
@@ -180,7 +180,7 @@ func TestSubmitSignedContributionAndProof_ValidatorDutiesRequestFailure(t *testi
t.Run(fmt.Sprintf("SlashingProtectionMinimal:%v", isSlashingProtectionMinimal), func(t *testing.T) {
hook := logTest.NewGlobal()
validator, _, validatorKey, finish := setup(t, isSlashingProtectionMinimal)
validator.duties = &ethpb.ValidatorDutiesContainer{CurrentEpochDuties: []*ethpb.ValidatorDuty{}}
validator.duties = newDutyStoreFromLegacy(&ethpb.ValidatorDutiesContainer{CurrentEpochDuties: []*ethpb.ValidatorDuty{}})
defer finish()
var pubKey [fieldparams.BLSPubkeyLength]byte
@@ -198,13 +198,13 @@ func TestSubmitSignedContributionAndProof_SyncSubcommitteeIndexFailure(t *testin
validator, m, validatorKey, finish := setup(t, isSlashingProtectionMinimal)
validatorIndex := primitives.ValidatorIndex(7)
committee := []primitives.ValidatorIndex{0, 3, 4, 2, validatorIndex, 6, 8, 9, 10}
validator.duties = &ethpb.ValidatorDutiesContainer{CurrentEpochDuties: []*ethpb.ValidatorDuty{
validator.duties = newDutyStoreFromLegacy(&ethpb.ValidatorDutiesContainer{CurrentEpochDuties: []*ethpb.ValidatorDuty{
{
PublicKey: validatorKey.PublicKey().Marshal(),
CommitteeLength: uint64(len(committee)),
ValidatorIndex: validatorIndex,
},
}}
}})
defer finish()
var pubKey [fieldparams.BLSPubkeyLength]byte
@@ -230,13 +230,13 @@ func TestSubmitSignedContributionAndProof_NothingToDo(t *testing.T) {
validator, m, validatorKey, finish := setup(t, isSlashingProtectionMinimal)
validatorIndex := primitives.ValidatorIndex(7)
committee := []primitives.ValidatorIndex{0, 3, 4, 2, validatorIndex, 6, 8, 9, 10}
validator.duties = &ethpb.ValidatorDutiesContainer{CurrentEpochDuties: []*ethpb.ValidatorDuty{
validator.duties = newDutyStoreFromLegacy(&ethpb.ValidatorDutiesContainer{CurrentEpochDuties: []*ethpb.ValidatorDuty{
{
PublicKey: validatorKey.PublicKey().Marshal(),
CommitteeLength: uint64(len(committee)),
ValidatorIndex: validatorIndex,
},
}}
}})
defer finish()
var pubKey [fieldparams.BLSPubkeyLength]byte
@@ -262,13 +262,13 @@ func TestSubmitSignedContributionAndProof_BadDomain(t *testing.T) {
validator, m, validatorKey, finish := setup(t, isSlashingProtectionMinimal)
validatorIndex := primitives.ValidatorIndex(7)
committee := []primitives.ValidatorIndex{0, 3, 4, 2, validatorIndex, 6, 8, 9, 10}
validator.duties = &ethpb.ValidatorDutiesContainer{CurrentEpochDuties: []*ethpb.ValidatorDuty{
validator.duties = newDutyStoreFromLegacy(&ethpb.ValidatorDutiesContainer{CurrentEpochDuties: []*ethpb.ValidatorDuty{
{
PublicKey: validatorKey.PublicKey().Marshal(),
CommitteeLength: uint64(len(committee)),
ValidatorIndex: validatorIndex,
},
}}
}})
defer finish()
var pubKey [fieldparams.BLSPubkeyLength]byte
@@ -308,13 +308,13 @@ func TestSubmitSignedContributionAndProof_CouldNotGetContribution(t *testing.T)
validator, m, validatorKey, finish := setupWithKey(t, validatorKey, isSlashingProtectionMinimal)
validatorIndex := primitives.ValidatorIndex(7)
committee := []primitives.ValidatorIndex{0, 3, 4, 2, validatorIndex, 6, 8, 9, 10}
validator.duties = &ethpb.ValidatorDutiesContainer{CurrentEpochDuties: []*ethpb.ValidatorDuty{
validator.duties = newDutyStoreFromLegacy(&ethpb.ValidatorDutiesContainer{CurrentEpochDuties: []*ethpb.ValidatorDuty{
{
PublicKey: validatorKey.PublicKey().Marshal(),
CommitteeLength: uint64(len(committee)),
ValidatorIndex: validatorIndex,
},
}}
}})
defer finish()
var pubKey [fieldparams.BLSPubkeyLength]byte
@@ -362,13 +362,13 @@ func TestSubmitSignedContributionAndProof_CouldNotSubmitContribution(t *testing.
validator, m, validatorKey, finish := setupWithKey(t, validatorKey, isSlashingProtectionMinimal)
validatorIndex := primitives.ValidatorIndex(7)
committee := []primitives.ValidatorIndex{0, 3, 4, 2, validatorIndex, 6, 8, 9, 10}
validator.duties = &ethpb.ValidatorDutiesContainer{CurrentEpochDuties: []*ethpb.ValidatorDuty{
validator.duties = newDutyStoreFromLegacy(&ethpb.ValidatorDutiesContainer{CurrentEpochDuties: []*ethpb.ValidatorDuty{
{
PublicKey: validatorKey.PublicKey().Marshal(),
CommitteeLength: uint64(len(committee)),
ValidatorIndex: validatorIndex,
},
}}
}})
defer finish()
var pubKey [fieldparams.BLSPubkeyLength]byte
@@ -444,13 +444,13 @@ func TestSubmitSignedContributionAndProof_Ok(t *testing.T) {
validator, m, validatorKey, finish := setupWithKey(t, validatorKey, isSlashingProtectionMinimal)
validatorIndex := primitives.ValidatorIndex(7)
committee := []primitives.ValidatorIndex{0, 3, 4, 2, validatorIndex, 6, 8, 9, 10}
validator.duties = &ethpb.ValidatorDutiesContainer{CurrentEpochDuties: []*ethpb.ValidatorDuty{
validator.duties = newDutyStoreFromLegacy(&ethpb.ValidatorDutiesContainer{CurrentEpochDuties: []*ethpb.ValidatorDuty{
{
PublicKey: validatorKey.PublicKey().Marshal(),
CommitteeLength: uint64(len(committee)),
ValidatorIndex: validatorIndex,
},
}}
}})
defer finish()
var pubKey [fieldparams.BLSPubkeyLength]byte
@@ -525,13 +525,13 @@ func TestSubmitSignedContributionAndProof_OncePerPubkeyAndSubcommittee(t *testin
validator, m, validatorKey, finish := setupWithKey(t, validatorKey, isSlashingProtectionMinimal)
validatorIndex := primitives.ValidatorIndex(7)
committee := []primitives.ValidatorIndex{0, 3, 4, 2, validatorIndex, 6, 8, 9, 10}
validator.duties = &ethpb.ValidatorDutiesContainer{CurrentEpochDuties: []*ethpb.ValidatorDuty{
validator.duties = newDutyStoreFromLegacy(&ethpb.ValidatorDutiesContainer{CurrentEpochDuties: []*ethpb.ValidatorDuty{
{
PublicKey: validatorKey.PublicKey().Marshal(),
CommitteeLength: uint64(len(committee)),
ValidatorIndex: validatorIndex,
},
}}
}})
defer finish()
// Sync committee aggregator is selected twice in the sync committee

View File

@@ -48,7 +48,6 @@ import (
lru "github.com/hashicorp/golang-lru"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"google.golang.org/grpc/metadata"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/emptypb"
)
@@ -92,7 +91,7 @@ type validator struct {
aggregatedSlotCommitteeIDCache *lru.Cache
attSelections map[attSelectionKey]iface.BeaconCommitteeSelection
interopKeysConfig *local.InteropKeymanagerConfig
duties *ethpb.ValidatorDutiesContainer
duties *dutyStore
signedValidatorRegistrations map[[fieldparams.BLSPubkeyLength]byte]*ethpb.SignedValidatorRegistrationV1
proposerSettings *proposer.Settings
web3SignerConfig *remoteweb3signer.SetupConfig
@@ -126,11 +125,30 @@ type validatorStatus struct {
index primitives.ValidatorIndex
}
<<<<<<< Updated upstream
type attesterDutiesCacheEntry struct {
current, next *ethpb.AttesterDutiesResponse
epoch primitives.Epoch
}
type proposerDutiesCacheEntry struct {
current, next *ethpb.ProposerDutiesResponse
epoch primitives.Epoch
}
type syncDutiesCacheEntry struct {
current, next *ethpb.SyncCommitteeDutiesResponse
epoch primitives.Epoch
period uint64
}
type attSelectionKey struct {
slot primitives.Slot
index primitives.ValidatorIndex
}
=======
>>>>>>> Stashed changes
// Done cleans up the validator.
func (v *validator) Done() {
if v.accountChangedSub != nil {
@@ -535,167 +553,6 @@ func retrieveLatestRecord(recs []*dbCommon.AttestationRecord) *dbCommon.Attestat
return chosenRec
}
// UpdateDuties checks the slot number to determine if the validator's
// list of upcoming assignments needs to be updated. For example, at the
// beginning of a new epoch.
func (v *validator) UpdateDuties(ctx context.Context) error {
ctx, span := trace.StartSpan(ctx, "validator.UpdateDuties")
defer span.End()
validatingKeys, err := v.km.FetchValidatingPublicKeys(ctx)
if err != nil {
return err
}
// Filter out the slashable public keys from the duties request.
filteredKeys := make([][fieldparams.BLSPubkeyLength]byte, 0, len(validatingKeys))
v.blacklistedPubkeysLock.RLock()
for _, pubKey := range validatingKeys {
if ok := v.blacklistedPubkeys[pubKey]; !ok {
filteredKeys = append(filteredKeys, pubKey)
} else {
log.WithField(
"pubkey", fmt.Sprintf("%#x", bytesutil.Trunc(pubKey[:])),
).Warn("Not including slashable public key from slashing protection import " +
"in request to update validator duties")
}
}
v.blacklistedPubkeysLock.RUnlock()
epoch := slots.ToEpoch(slots.CurrentSlot(v.genesisTime) + 1)
req := &ethpb.DutiesRequest{
Epoch: epoch,
PublicKeys: bytesutil.FromBytes48Array(filteredKeys),
}
// If duties is nil it means we have had no prior duties and just started up.
resp, err := v.validatorClient.Duties(ctx, req)
if err != nil || resp == nil {
v.dutiesLock.Lock()
v.duties = nil // Clear assignments so we know to retry the request.
v.dutiesLock.Unlock()
log.WithError(err).Error("Error getting validator duties")
return err
}
ss, err := slots.EpochStart(epoch)
if err != nil {
return err
}
v.dutiesLock.Lock()
v.duties = resp
v.logDuties(ss, v.duties.CurrentEpochDuties, v.duties.NextEpochDuties)
v.dutiesLock.Unlock()
allExitedCounter := 0
for i := range resp.CurrentEpochDuties {
if resp.CurrentEpochDuties[i].Status == ethpb.ValidatorStatus_EXITED {
allExitedCounter++
}
}
if allExitedCounter != 0 && allExitedCounter == len(resp.CurrentEpochDuties) {
return ErrValidatorsAllExited
}
// Non-blocking call for beacon node to start subscriptions for aggregators.
// Make sure to copy metadata into a new context
md, exists := metadata.FromOutgoingContext(ctx)
ctx = context.Background()
if exists {
ctx = metadata.NewOutgoingContext(ctx, md)
}
go func() {
if err := v.subscribeToSubnets(ctx, resp); err != nil {
log.WithError(err).Error("Failed to subscribe to subnets")
}
}()
return nil
}
// subscribeToSubnets iterates through each validator duty, signs each slot, and asks beacon node
// to eagerly subscribe to subnets so that the aggregator has attestations to aggregate.
func (v *validator) subscribeToSubnets(ctx context.Context, duties *ethpb.ValidatorDutiesContainer) error {
ctx, span := trace.StartSpan(ctx, "validator.subscribeToSubnets")
defer span.End()
subscribeSlots := make([]primitives.Slot, 0, len(duties.CurrentEpochDuties)+len(duties.NextEpochDuties))
subscribeCommitteeIndices := make([]primitives.CommitteeIndex, 0, len(duties.CurrentEpochDuties)+len(duties.NextEpochDuties))
subscribeIsAggregator := make([]bool, 0, len(duties.CurrentEpochDuties)+len(duties.NextEpochDuties))
activeDuties := make([]*ethpb.ValidatorDuty, 0, len(duties.CurrentEpochDuties)+len(duties.NextEpochDuties))
alreadySubscribed := make(map[[64]byte]bool)
if v.distributed {
// Get aggregated selection proofs to calculate isAggregator.
if err := v.aggregatedSelectionProofs(ctx, duties); err != nil {
return errors.Wrap(err, "could not get aggregated selection proofs")
}
}
for _, duty := range duties.CurrentEpochDuties {
pk := bytesutil.ToBytes48(duty.PublicKey)
if duty.Status == ethpb.ValidatorStatus_ACTIVE || duty.Status == ethpb.ValidatorStatus_EXITING {
attesterSlot := duty.AttesterSlot
committeeIndex := duty.CommitteeIndex
validatorIndex := duty.ValidatorIndex
alreadySubscribedKey := validatorSubnetSubscriptionKey(attesterSlot, committeeIndex)
if _, ok := alreadySubscribed[alreadySubscribedKey]; ok {
continue
}
aggregator, err := v.isAggregator(ctx, duty.CommitteeLength, attesterSlot, pk, validatorIndex)
if err != nil {
return errors.Wrap(err, "could not check if a validator is an aggregator")
}
if aggregator {
alreadySubscribed[alreadySubscribedKey] = true
}
subscribeSlots = append(subscribeSlots, attesterSlot)
subscribeCommitteeIndices = append(subscribeCommitteeIndices, committeeIndex)
subscribeIsAggregator = append(subscribeIsAggregator, aggregator)
activeDuties = append(activeDuties, duty)
}
}
for _, duty := range duties.NextEpochDuties {
if duty.Status == ethpb.ValidatorStatus_ACTIVE || duty.Status == ethpb.ValidatorStatus_EXITING {
attesterSlot := duty.AttesterSlot
committeeIndex := duty.CommitteeIndex
validatorIndex := duty.ValidatorIndex
alreadySubscribedKey := validatorSubnetSubscriptionKey(attesterSlot, committeeIndex)
if _, ok := alreadySubscribed[alreadySubscribedKey]; ok {
continue
}
aggregator, err := v.isAggregator(ctx, duty.CommitteeLength, attesterSlot, bytesutil.ToBytes48(duty.PublicKey), validatorIndex)
if err != nil {
return errors.Wrap(err, "could not check if a validator is an aggregator")
}
if aggregator {
alreadySubscribed[alreadySubscribedKey] = true
}
subscribeSlots = append(subscribeSlots, attesterSlot)
subscribeCommitteeIndices = append(subscribeCommitteeIndices, committeeIndex)
subscribeIsAggregator = append(subscribeIsAggregator, aggregator)
activeDuties = append(activeDuties, duty)
}
}
_, err := v.validatorClient.SubscribeCommitteeSubnets(ctx,
&ethpb.CommitteeSubnetsSubscribeRequest{
Slots: subscribeSlots,
CommitteeIds: subscribeCommitteeIndices,
IsAggregator: subscribeIsAggregator,
},
activeDuties,
)
return err
}
// RolesAt slot returns the validator roles at the given slot. Returns nil if the
// validator is known to not have a roles at the slot. Returns UNKNOWN if the
// validator assignments are unknown. Otherwise, returns a valid ValidatorRole map.
@@ -706,7 +563,7 @@ func (v *validator) RolesAt(ctx context.Context, slot primitives.Slot) (map[[fie
v.dutiesLock.RLock()
defer v.dutiesLock.RUnlock()
if v.duties == nil {
if v.duties == nil || !v.duties.IsInitialized() {
return nil, errors.New("validator duties are not initialized")
}
@@ -718,14 +575,17 @@ func (v *validator) RolesAt(ctx context.Context, slot primitives.Slot) (map[[fie
syncCommitteeValidators = make(map[primitives.ValidatorIndex][fieldparams.BLSPubkeyLength]byte)
)
for validator, duty := range v.duties.CurrentEpochDuties {
for _, duty := range v.duties.CurrentEpochDuties() {
var roles []iface.ValidatorRole
if duty == nil {
continue
}
if len(duty.ProposerSlots) > 0 {
for _, proposerSlot := range duty.ProposerSlots {
pk := bytesutil.ToBytes48(duty.Pubkey)
proposerSlots := v.duties.ProposerSlots(duty.ValidatorIndex)
if len(proposerSlots) > 0 {
for _, proposerSlot := range proposerSlots {
if proposerSlot != 0 && proposerSlot == slot {
roles = append(roles, iface.RoleProposer)
break
@@ -733,13 +593,20 @@ func (v *validator) RolesAt(ctx context.Context, slot primitives.Slot) (map[[fie
}
}
if duty.AttesterSlot == slot {
if duty.Slot == slot {
roles = append(roles, iface.RoleAttester)
aggregator, err := v.isAggregator(ctx, duty.CommitteeLength, slot, bytesutil.ToBytes48(duty.PublicKey), duty.ValidatorIndex)
<<<<<<< Updated upstream
aggregator, err := v.isAggregator(ctx, duty.CommitteeLength, slot, duty.Pubkey, duty.ValidatorIndex)
if err != nil {
aggregator = false
log.WithError(err).Errorf("Could not check if validator %#x is an aggregator", bytesutil.Trunc(duty.PublicKey))
log.WithError(err).Errorf("Could not check if validator %#x is an aggregator", bytesutil.Trunc(duty.Pubkey[:]))
=======
aggregator, err := v.isAggregator(ctx, duty.CommitteeLength, slot, pk, duty.ValidatorIndex)
if err != nil {
aggregator = false
log.WithError(err).Errorf("Could not check if validator %#x is an aggregator", bytesutil.Trunc(pk[:]))
>>>>>>> Stashed changes
}
if aggregator {
roles = append(roles, iface.RoleAggregator)
@@ -751,28 +618,34 @@ func (v *validator) RolesAt(ctx context.Context, slot primitives.Slot) (map[[fie
// the validator checks whether it's in the sync committee of following epoch.
inSyncCommittee := false
if slots.IsEpochEnd(slot) {
if v.duties.NextEpochDuties[validator].IsSyncCommittee {
if v.duties.IsNextSyncCommittee(duty.ValidatorIndex) {
roles = append(roles, iface.RoleSyncCommittee)
inSyncCommittee = true
}
} else {
if duty.IsSyncCommittee {
if v.duties.IsSyncCommittee(duty.ValidatorIndex) {
roles = append(roles, iface.RoleSyncCommittee)
inSyncCommittee = true
}
}
if inSyncCommittee {
syncCommitteeValidators[duty.ValidatorIndex] = bytesutil.ToBytes48(duty.PublicKey)
<<<<<<< Updated upstream
syncCommitteeValidators[duty.ValidatorIndex] = duty.Pubkey
=======
syncCommitteeValidators[duty.ValidatorIndex] = pk
>>>>>>> Stashed changes
}
if len(roles) == 0 {
roles = append(roles, iface.RoleUnknown)
}
var pubKey [fieldparams.BLSPubkeyLength]byte
copy(pubKey[:], duty.PublicKey)
rolesAt[pubKey] = roles
<<<<<<< Updated upstream
rolesAt[duty.Pubkey] = roles
=======
rolesAt[pk] = roles
>>>>>>> Stashed changes
}
aggregator, err := v.isSyncCommitteeAggregator(
@@ -1027,114 +900,6 @@ func (v *validator) getAttestationData(ctx context.Context, slot primitives.Slot
return data, nil
}
func (v *validator) logDuties(slot primitives.Slot, currentEpochDuties []*ethpb.ValidatorDuty, nextEpochDuties []*ethpb.ValidatorDuty) {
attesterKeys := make([][]string, params.BeaconConfig().SlotsPerEpoch)
for i := range attesterKeys {
attesterKeys[i] = make([]string, 0)
}
proposerKeys := make([]string, params.BeaconConfig().SlotsPerEpoch)
epochStartSlot, err := slots.EpochStart(slots.ToEpoch(slot))
if err != nil {
log.WithError(err).Error("Could not calculate epoch start. Ignoring logging duties.")
return
}
var totalProposingKeys, totalAttestingKeys uint64
for _, duty := range currentEpochDuties {
pubkey := fmt.Sprintf("%#x", duty.PublicKey)
if v.emitAccountMetrics {
ValidatorStatusesGaugeVec.WithLabelValues(pubkey, fmt.Sprintf("%#x", duty.ValidatorIndex)).Set(float64(duty.Status))
}
// Only interested in validators who are attesting/proposing.
// Note that SLASHING validators will have duties but their results are ignored by the network so we don't bother with them.
if duty.Status != ethpb.ValidatorStatus_ACTIVE && duty.Status != ethpb.ValidatorStatus_EXITING {
continue
}
truncatedPubkey := fmt.Sprintf("%#x", bytesutil.Trunc(duty.PublicKey))
attesterSlotInEpoch := duty.AttesterSlot - epochStartSlot
if attesterSlotInEpoch >= params.BeaconConfig().SlotsPerEpoch {
log.WithField("duty", duty).Warn("Invalid attester slot")
} else {
attesterKeys[attesterSlotInEpoch] = append(attesterKeys[attesterSlotInEpoch], truncatedPubkey)
totalAttestingKeys++
if v.emitAccountMetrics {
ValidatorNextAttestationSlotGaugeVec.WithLabelValues(pubkey).Set(float64(duty.AttesterSlot))
}
}
if v.emitAccountMetrics && duty.IsSyncCommittee {
ValidatorInSyncCommitteeGaugeVec.WithLabelValues(pubkey).Set(float64(1))
} else if v.emitAccountMetrics && !duty.IsSyncCommittee {
// clear the metric out if the validator is not in the current sync committee anymore otherwise it will be left at 1
ValidatorInSyncCommitteeGaugeVec.WithLabelValues(pubkey).Set(float64(0))
}
for _, proposerSlot := range duty.ProposerSlots {
proposerSlotInEpoch := proposerSlot - epochStartSlot
if proposerSlotInEpoch >= params.BeaconConfig().SlotsPerEpoch {
log.WithField("duty", duty).Warn("Invalid proposer slot")
} else {
proposerKeys[proposerSlotInEpoch] = truncatedPubkey
totalProposingKeys++
}
if v.emitAccountMetrics {
ValidatorNextProposalSlotGaugeVec.WithLabelValues(pubkey).Set(float64(proposerSlot))
}
}
}
for _, duty := range nextEpochDuties {
// for the next epoch, currently we are only interested in whether the validator is in the next sync committee or not
pubkey := fmt.Sprintf("%#x", duty.PublicKey)
// Only interested in validators who are attesting/proposing.
// Note that slashed validators will have duties but their results are ignored by the network so we don't bother with them.
if duty.Status != ethpb.ValidatorStatus_ACTIVE && duty.Status != ethpb.ValidatorStatus_EXITING {
continue
}
if v.emitAccountMetrics && duty.IsSyncCommittee {
ValidatorInNextSyncCommitteeGaugeVec.WithLabelValues(pubkey).Set(float64(1))
} else if v.emitAccountMetrics && !duty.IsSyncCommittee {
// clear the metric out if the validator is now not in the next sync committee otherwise it will be left at 1
ValidatorInNextSyncCommitteeGaugeVec.WithLabelValues(pubkey).Set(float64(0))
}
}
log.WithFields(logrus.Fields{
"proposerCount": totalProposingKeys,
"attesterCount": totalAttestingKeys,
}).Infof("Schedule for epoch %d", slots.ToEpoch(slot))
for i := primitives.Slot(0); i < params.BeaconConfig().SlotsPerEpoch; i++ {
startTime, err := slots.StartTime(v.genesisTime, epochStartSlot+i)
if err != nil {
log.WithError(err).WithField("slot", slot).Error("Slot overflows, unable to log duties!")
return
}
durationTillDuty := (time.Until(startTime) + time.Second).Truncate(time.Second) // Round up to next second.
slotLog := log.WithFields(logrus.Fields{})
isProposer := proposerKeys[i] != ""
if isProposer {
slotLog = slotLog.WithField("proposerPubkey", proposerKeys[i])
}
isAttester := len(attesterKeys[i]) > 0
if isAttester {
slotLog = slotLog.WithFields(logrus.Fields{
"slot": epochStartSlot + i,
"slotInEpoch": (epochStartSlot + i) % params.BeaconConfig().SlotsPerEpoch,
"attesterCount": len(attesterKeys[i]),
"attesterPubkeys": attesterKeys[i],
})
}
if durationTillDuty > 0 {
slotLog = slotLog.WithField("timeUntilDuty", durationTillDuty)
}
if isProposer || isAttester {
slotLog.Infof("Duties schedule")
}
}
}
// ProposerSettings gets the current proposer settings saved in memory validator
func (v *validator) ProposerSettings() *proposer.Settings {
return v.proposerSettings
@@ -1219,57 +984,6 @@ func (v *validator) StartEventStream(ctx context.Context, topics []string) {
v.validatorClient.StartEventStream(ctx, topics, v.eventsChannel)
}
func (v *validator) checkDependentRoots(ctx context.Context, head *structs.HeadEvent) error {
if head == nil {
return errors.New("received empty head event")
}
prevDependentRoot, err := bytesutil.DecodeHexWithLength(head.PreviousDutyDependentRoot, fieldparams.RootLength)
if err != nil {
return errors.Wrap(err, "failed to decode previous duty dependent root")
}
if bytes.Equal(prevDependentRoot, params.BeaconConfig().ZeroHash[:]) {
return nil
}
epoch := slots.ToEpoch(slots.CurrentSlot(v.genesisTime) + 1)
ss, err := slots.EpochStart(epoch + 1)
if err != nil {
return errors.Wrap(err, "failed to get epoch start")
}
deadline := v.SlotDeadline(ss - 1)
dutiesCtx, cancel := context.WithDeadline(ctx, deadline)
defer cancel()
v.dutiesLock.RLock()
needsPrevDependentRootUpdate := v.duties == nil || !bytes.Equal(prevDependentRoot, v.duties.PrevDependentRoot)
v.dutiesLock.RUnlock()
if needsPrevDependentRootUpdate {
// There's an edge case when the initial duties are not set yet
// This routine will lock and recompute them right after the initial duties finishes.
if err := v.UpdateDuties(dutiesCtx); err != nil {
return errors.Wrap(err, "failed to update duties")
}
log.Info("Updated duties due to previous dependent root change")
return nil
}
currDepedentRoot, err := bytesutil.DecodeHexWithLength(head.CurrentDutyDependentRoot, fieldparams.RootLength)
if err != nil {
return errors.Wrap(err, "failed to decode current duty dependent root")
}
if bytes.Equal(currDepedentRoot, params.BeaconConfig().ZeroHash[:]) {
return nil
}
v.dutiesLock.RLock()
needsCurrDependentRootUpdate := v.duties == nil || !bytes.Equal(currDepedentRoot, v.duties.CurrDependentRoot)
v.dutiesLock.RUnlock()
if !needsCurrDependentRootUpdate {
return nil
}
if err := v.UpdateDuties(dutiesCtx); err != nil {
return errors.Wrap(err, "failed to update duties")
}
log.Info("Updated duties due to current dependent root change")
return nil
}
func (v *validator) ProcessEvent(ctx context.Context, event *eventClient.Event) {
if event == nil || event.Data == nil {
log.Warn("Received empty event")
@@ -1518,52 +1232,7 @@ func (v *validator) buildSignedRegReqs(
return signedValRegRequests
}
func (v *validator) aggregatedSelectionProofs(ctx context.Context, duties *ethpb.ValidatorDutiesContainer) error {
ctx, span := trace.StartSpan(ctx, "validator.aggregatedSelectionProofs")
defer span.End()
// Lock the selection proofs until we receive response from DV.
v.attSelectionLock.Lock()
defer v.attSelectionLock.Unlock()
// Create new instance of attestation selections map.
v.attSelections = make(map[attSelectionKey]iface.BeaconCommitteeSelection)
var req []iface.BeaconCommitteeSelection
for _, duty := range duties.CurrentEpochDuties {
if duty.Status != ethpb.ValidatorStatus_ACTIVE && duty.Status != ethpb.ValidatorStatus_EXITING {
continue
}
pk := bytesutil.ToBytes48(duty.PublicKey)
slotSig, err := v.signSlotWithSelectionProof(ctx, pk, duty.AttesterSlot)
if err != nil {
return err
}
req = append(req, iface.BeaconCommitteeSelection{
SelectionProof: slotSig,
Slot: duty.AttesterSlot,
ValidatorIndex: duty.ValidatorIndex,
})
}
resp, err := v.validatorClient.AggregatedSelections(ctx, req)
if err != nil {
return err
}
// Store aggregated selection proofs in state.
for _, s := range resp {
v.attSelections[attSelectionKey{
slot: s.Slot,
index: s.ValidatorIndex,
}] = s
}
return nil
}
<<<<<<< Updated upstream
func (v *validator) attSelection(key attSelectionKey) ([]byte, error) {
v.attSelectionLock.Lock()
defer v.attSelectionLock.Unlock()
@@ -1576,12 +1245,8 @@ func (v *validator) attSelection(key attSelectionKey) ([]byte, error) {
return s.SelectionProof, nil
}
// This constructs a validator subscribed key, it's used to track
// which subnet has already been pending requested.
func validatorSubnetSubscriptionKey(slot primitives.Slot, committeeIndex primitives.CommitteeIndex) [64]byte {
return bytesutil.ToBytes64(append(bytesutil.Bytes32(uint64(slot)), bytesutil.Bytes32(uint64(committeeIndex))...))
}
=======
>>>>>>> Stashed changes
// This tracks all validators' voting status.
type voteStats struct {
startEpoch primitives.Epoch

View File

@@ -363,7 +363,7 @@ func TestUpdateDuties_DoesNothingWhenNotEpochStart_AlreadyExistingAssignments(t
v := validator{
km: newMockKeymanager(t, randKeypair(t)),
validatorClient: client,
duties: &ethpb.ValidatorDutiesContainer{
duties: newDutyStoreFromLegacy(&ethpb.ValidatorDutiesContainer{
CurrentEpochDuties: []*ethpb.ValidatorDuty{
{
AttesterSlot: 10,
@@ -376,7 +376,7 @@ func TestUpdateDuties_DoesNothingWhenNotEpochStart_AlreadyExistingAssignments(t
CommitteeIndex: 20,
},
},
},
}),
}
client.EXPECT().Duties(
gomock.Any(),
@@ -394,13 +394,13 @@ func TestUpdateDuties_ReturnsError(t *testing.T) {
v := validator{
validatorClient: client,
km: newMockKeymanager(t, randKeypair(t)),
duties: &ethpb.ValidatorDutiesContainer{
duties: newDutyStoreFromLegacy(&ethpb.ValidatorDutiesContainer{
CurrentEpochDuties: []*ethpb.ValidatorDuty{
{
CommitteeIndex: 1,
},
},
},
}),
}
expected := errors.New("bad")
@@ -411,7 +411,7 @@ func TestUpdateDuties_ReturnsError(t *testing.T) {
).Return(nil, expected)
assert.ErrorContains(t, expected.Error(), v.UpdateDuties(t.Context()))
assert.Equal(t, (*ethpb.ValidatorDutiesContainer)(nil), v.duties, "Assignments should have been cleared on failure")
assert.Equal(t, false, v.duties.IsInitialized(), "Assignments should have been cleared on failure")
}
func TestUpdateDuties_OK(t *testing.T) {
@@ -419,6 +419,7 @@ func TestUpdateDuties_OK(t *testing.T) {
defer ctrl.Finish()
client := validatormock.NewMockValidatorClient(ctrl)
kp := randKeypair(t)
resp := &ethpb.ValidatorDutiesContainer{
CurrentEpochDuties: []*ethpb.ValidatorDuty{
{
@@ -426,20 +427,27 @@ func TestUpdateDuties_OK(t *testing.T) {
ValidatorIndex: 200,
CommitteeIndex: 100,
CommitteeLength: 4,
PublicKey: []byte("testPubKey_1"),
PublicKey: kp.pub[:],
ProposerSlots: []primitives.Slot{params.BeaconConfig().SlotsPerEpoch + 1},
},
},
}
v := validator{
km: newMockKeymanager(t, randKeypair(t)),
km: newMockKeymanager(t, kp),
validatorClient: client,
pubkeyToStatus: map[[fieldparams.BLSPubkeyLength]byte]*validatorStatus{
kp.pub: {status: &ethpb.ValidatorStatusResponse{Status: ethpb.ValidatorStatus_ACTIVE}},
},
}
client.EXPECT().Duties(
gomock.Any(),
gomock.Any(),
).Return(resp, nil)
client.EXPECT().DomainData(gomock.Any(), gomock.Any()).Return(
&ethpb.DomainResponse{SignatureDomain: make([]byte, 32)}, nil,
)
var wg sync.WaitGroup
wg.Add(1)
@@ -447,7 +455,8 @@ func TestUpdateDuties_OK(t *testing.T) {
gomock.Any(),
gomock.Any(),
gomock.Any(),
).DoAndReturn(func(_ context.Context, _ *ethpb.CommitteeSubnetsSubscribeRequest, _ []*ethpb.ValidatorDuty) (*emptypb.Empty, error) {
gomock.Any(),
).DoAndReturn(func(_ context.Context, _ *ethpb.CommitteeSubnetsSubscribeRequest, _ []primitives.ValidatorIndex, _ []uint64) (*emptypb.Empty, error) {
wg.Done()
return nil, nil
})
@@ -456,10 +465,21 @@ func TestUpdateDuties_OK(t *testing.T) {
util.WaitTimeout(&wg, 2*time.Second)
assert.Equal(t, params.BeaconConfig().SlotsPerEpoch+1, v.duties.CurrentEpochDuties[0].ProposerSlots[0], "Unexpected validator assignments")
assert.Equal(t, params.BeaconConfig().SlotsPerEpoch, v.duties.CurrentEpochDuties[0].AttesterSlot, "Unexpected validator assignments")
assert.Equal(t, resp.CurrentEpochDuties[0].CommitteeIndex, v.duties.CurrentEpochDuties[0].CommitteeIndex, "Unexpected validator assignments")
assert.Equal(t, resp.CurrentEpochDuties[0].ValidatorIndex, v.duties.CurrentEpochDuties[0].ValidatorIndex, "Unexpected validator assignments")
<<<<<<< Updated upstream
currentDuties := v.duties.CurrentEpochDuties()
assert.Equal(t, params.BeaconConfig().SlotsPerEpoch+1, currentDuties[0].ProposerSlots[0], "Unexpected validator assignments")
assert.Equal(t, params.BeaconConfig().SlotsPerEpoch, currentDuties[0].Slot, "Unexpected validator assignments")
assert.Equal(t, resp.CurrentEpochDuties[0].CommitteeIndex, currentDuties[0].CommitteeIndex, "Unexpected validator assignments")
assert.Equal(t, resp.CurrentEpochDuties[0].ValidatorIndex, currentDuties[0].ValidatorIndex, "Unexpected validator assignments")
=======
duty := v.duties.CurrentEpochDuties()[kp.pub]
require.NotNil(t, duty)
proposerSlots := v.duties.ProposerSlots(duty.ValidatorIndex)
assert.Equal(t, params.BeaconConfig().SlotsPerEpoch+1, proposerSlots[0], "Unexpected validator assignments")
assert.Equal(t, params.BeaconConfig().SlotsPerEpoch, duty.Slot, "Unexpected validator assignments")
assert.Equal(t, resp.CurrentEpochDuties[0].CommitteeIndex, duty.CommitteeIndex, "Unexpected validator assignments")
assert.Equal(t, resp.CurrentEpochDuties[0].ValidatorIndex, duty.ValidatorIndex, "Unexpected validator assignments")
>>>>>>> Stashed changes
}
func TestUpdateDuties_OK_FilterBlacklistedPublicKeys(t *testing.T) {
@@ -494,7 +514,8 @@ func TestUpdateDuties_OK_FilterBlacklistedPublicKeys(t *testing.T) {
gomock.Any(),
gomock.Any(),
gomock.Any(),
).DoAndReturn(func(_ context.Context, _ *ethpb.CommitteeSubnetsSubscribeRequest, _ []*ethpb.ValidatorDuty) (*emptypb.Empty, error) {
gomock.Any(),
).DoAndReturn(func(_ context.Context, _ *ethpb.CommitteeSubnetsSubscribeRequest, _ []primitives.ValidatorIndex, _ []uint64) (*emptypb.Empty, error) {
wg.Done()
return nil, nil
})
@@ -513,6 +534,8 @@ func TestUpdateDuties_AllValidatorsExited(t *testing.T) {
defer ctrl.Finish()
client := validatormock.NewMockValidatorClient(ctrl)
kp1 := randKeypair(t)
kp2 := randKeypair(t)
resp := &ethpb.ValidatorDutiesContainer{
CurrentEpochDuties: []*ethpb.ValidatorDuty{
{
@@ -520,7 +543,7 @@ func TestUpdateDuties_AllValidatorsExited(t *testing.T) {
ValidatorIndex: 200,
CommitteeIndex: 100,
CommitteeLength: 4,
PublicKey: []byte("testPubKey_1"),
PublicKey: kp1.pub[:],
ProposerSlots: []primitives.Slot{params.BeaconConfig().SlotsPerEpoch + 1},
Status: ethpb.ValidatorStatus_EXITED,
},
@@ -529,15 +552,19 @@ func TestUpdateDuties_AllValidatorsExited(t *testing.T) {
ValidatorIndex: 201,
CommitteeIndex: 101,
CommitteeLength: 4,
PublicKey: []byte("testPubKey_2"),
PublicKey: kp2.pub[:],
ProposerSlots: []primitives.Slot{params.BeaconConfig().SlotsPerEpoch + 1},
Status: ethpb.ValidatorStatus_EXITED,
},
},
}
v := validator{
km: newMockKeymanager(t, randKeypair(t)),
km: newMockKeymanager(t, kp1),
validatorClient: client,
pubkeyToStatus: map[[fieldparams.BLSPubkeyLength]byte]*validatorStatus{
kp1.pub: {status: &ethpb.ValidatorStatusResponse{Status: ethpb.ValidatorStatus_EXITED}},
kp2.pub: {status: &ethpb.ValidatorStatusResponse{Status: ethpb.ValidatorStatus_EXITED}},
},
}
client.EXPECT().Duties(
gomock.Any(),
@@ -546,7 +573,6 @@ func TestUpdateDuties_AllValidatorsExited(t *testing.T) {
err := v.UpdateDuties(t.Context())
require.ErrorContains(t, ErrValidatorsAllExited.Error(), err)
}
func TestUpdateDuties_Distributed(t *testing.T) {
@@ -582,6 +608,9 @@ func TestUpdateDuties_Distributed(t *testing.T) {
km: newMockKeymanager(t, keys),
validatorClient: client,
distributed: true,
pubkeyToStatus: map[[fieldparams.BLSPubkeyLength]byte]*validatorStatus{
keys.pub: {status: &ethpb.ValidatorStatusResponse{Status: ethpb.ValidatorStatus_ACTIVE}},
},
}
sigDomain := make([]byte, 32)
@@ -625,7 +654,8 @@ func TestUpdateDuties_Distributed(t *testing.T) {
gomock.Any(),
gomock.Any(),
gomock.Any(),
).DoAndReturn(func(_ context.Context, _ *ethpb.CommitteeSubnetsSubscribeRequest, _ []*ethpb.ValidatorDuty) (*emptypb.Empty, error) {
gomock.Any(),
).DoAndReturn(func(_ context.Context, _ *ethpb.CommitteeSubnetsSubscribeRequest, _ []primitives.ValidatorIndex, _ []uint64) (*emptypb.Empty, error) {
wg.Done()
return nil, nil
})
@@ -641,7 +671,7 @@ func TestRolesAt_OK(t *testing.T) {
v, m, validatorKey, finish := setup(t, isSlashingProtectionMinimal)
defer finish()
v.duties = &ethpb.ValidatorDutiesContainer{
v.duties = newDutyStoreFromLegacy(&ethpb.ValidatorDutiesContainer{
CurrentEpochDuties: []*ethpb.ValidatorDuty{
{
CommitteeIndex: 1,
@@ -658,7 +688,7 @@ func TestRolesAt_OK(t *testing.T) {
IsSyncCommittee: true,
},
},
}
})
m.validatorClient.EXPECT().DomainData(
gomock.Any(), // ctx
@@ -681,7 +711,7 @@ func TestRolesAt_OK(t *testing.T) {
assert.Equal(t, iface.RoleSyncCommittee, roleMap[bytesutil.ToBytes48(validatorKey.PublicKey().Marshal())][2])
// Test sync committee role at epoch boundary.
v.duties = &ethpb.ValidatorDutiesContainer{
v.duties = newDutyStoreFromLegacy(&ethpb.ValidatorDutiesContainer{
CurrentEpochDuties: []*ethpb.ValidatorDuty{
{
CommitteeIndex: 1,
@@ -698,7 +728,7 @@ func TestRolesAt_OK(t *testing.T) {
IsSyncCommittee: true,
},
},
}
})
m.validatorClient.EXPECT().SyncSubcommitteeIndex(
gomock.Any(), // ctx
@@ -721,7 +751,7 @@ func TestRolesAt_DoesNotAssignProposer_Slot0(t *testing.T) {
v, m, validatorKey, finish := setup(t, isSlashingProtectionMinimal)
defer finish()
v.duties = &ethpb.ValidatorDutiesContainer{
v.duties = newDutyStoreFromLegacy(&ethpb.ValidatorDutiesContainer{
CurrentEpochDuties: []*ethpb.ValidatorDuty{
{
CommitteeIndex: 1,
@@ -730,7 +760,7 @@ func TestRolesAt_DoesNotAssignProposer_Slot0(t *testing.T) {
PublicKey: validatorKey.PublicKey().Marshal(),
},
},
}
})
m.validatorClient.EXPECT().DomainData(
gomock.Any(), // ctx
@@ -836,13 +866,13 @@ func TestCheckAndLogValidatorStatus_OK(t *testing.T) {
client := validatormock.NewMockValidatorClient(ctrl)
v := validator{
validatorClient: client,
duties: &ethpb.ValidatorDutiesContainer{
duties: newDutyStoreFromLegacy(&ethpb.ValidatorDutiesContainer{
CurrentEpochDuties: []*ethpb.ValidatorDuty{
{
CommitteeIndex: 1,
},
},
},
}),
pubkeyToStatus: make(map[[48]byte]*validatorStatus),
}
v.pubkeyToStatus[bytesutil.ToBytes48(test.status.publicKey)] = test.status
@@ -2868,23 +2898,24 @@ func TestValidator_CheckDependentRoots(t *testing.T) {
ctx := t.Context()
client := validatormock.NewMockValidatorClient(ctrl)
dutiesResp := &ethpb.ValidatorDutiesContainer{
PrevDependentRoot: bytesutil.PadTo([]byte{0x01, 0x02, 0x03}, fieldparams.RootLength),
CurrDependentRoot: bytesutil.PadTo([]byte{0x04, 0x05, 0x06}, fieldparams.RootLength),
CurrentEpochDuties: []*ethpb.ValidatorDuty{
{
AttesterSlot: params.BeaconConfig().SlotsPerEpoch,
ValidatorIndex: 200,
CommitteeIndex: 100,
CommitteeLength: 4,
PublicKey: []byte("testPubKey_1"),
ProposerSlots: []primitives.Slot{params.BeaconConfig().SlotsPerEpoch + 1},
},
},
}
v := &validator{
km: newMockKeymanager(t, randKeypair(t)),
validatorClient: client,
duties: &ethpb.ValidatorDutiesContainer{
PrevDependentRoot: bytesutil.PadTo([]byte{0x01, 0x02, 0x03}, fieldparams.RootLength),
CurrDependentRoot: bytesutil.PadTo([]byte{0x04, 0x05, 0x06}, fieldparams.RootLength),
CurrentEpochDuties: []*ethpb.ValidatorDuty{
{
AttesterSlot: params.BeaconConfig().SlotsPerEpoch,
ValidatorIndex: 200,
CommitteeIndex: 100,
CommitteeLength: 4,
PublicKey: []byte("testPubKey_1"),
ProposerSlots: []primitives.Slot{params.BeaconConfig().SlotsPerEpoch + 1},
},
},
},
duties: newDutyStoreFromLegacy(dutiesResp),
}
t.Run("nil head event", func(t *testing.T) {
@@ -2921,10 +2952,11 @@ func TestValidator_CheckDependentRoots(t *testing.T) {
gomock.Any(),
gomock.Any(),
gomock.Any(),
).DoAndReturn(func(_ context.Context, _ *ethpb.CommitteeSubnetsSubscribeRequest, _ []*ethpb.ValidatorDuty) (*emptypb.Empty, error) {
gomock.Any(),
).DoAndReturn(func(_ context.Context, _ *ethpb.CommitteeSubnetsSubscribeRequest, _ []primitives.ValidatorIndex, _ []uint64) (*emptypb.Empty, error) {
return nil, nil
}).AnyTimes()
client.EXPECT().Duties(gomock.Any(), gomock.Any()).Return(v.duties, nil)
client.EXPECT().Duties(gomock.Any(), gomock.Any()).Return(dutiesResp, nil)
err := v.checkDependentRoots(ctx, head)
require.NoError(t, err)
})
@@ -2935,7 +2967,7 @@ func TestValidator_CheckDependentRoots(t *testing.T) {
PreviousDutyDependentRoot: "0x0102030000000000000000000000000000000000000000000000000000000000",
CurrentDutyDependentRoot: "0xe3f7a1b2c489d56f03a6b8d9c7e1fa2456bb09f3de42a67c8910fc3e7a5d4b12",
}
client.EXPECT().Duties(gomock.Any(), gomock.Any()).Return(v.duties, nil)
client.EXPECT().Duties(gomock.Any(), gomock.Any()).Return(dutiesResp, nil)
var wg sync.WaitGroup
wg.Add(1)
@@ -2943,7 +2975,8 @@ func TestValidator_CheckDependentRoots(t *testing.T) {
gomock.Any(),
gomock.Any(),
gomock.Any(),
).DoAndReturn(func(_ context.Context, _ *ethpb.CommitteeSubnetsSubscribeRequest, _ []*ethpb.ValidatorDuty) (*emptypb.Empty, error) {
gomock.Any(),
).DoAndReturn(func(_ context.Context, _ *ethpb.CommitteeSubnetsSubscribeRequest, _ []primitives.ValidatorIndex, _ []uint64) (*emptypb.Empty, error) {
wg.Done()
return nil, nil
}).AnyTimes()
@@ -2959,7 +2992,8 @@ func TestValidator_CheckDependentRoots(t *testing.T) {
}
curr, err := bytesutil.DecodeHexWithLength(head.CurrentDutyDependentRoot, fieldparams.RootLength)
require.NoError(t, err)
require.DeepEqual(t, curr, v.duties.CurrDependentRoot)
_, currRoot := v.duties.DependentRoots()
require.DeepEqual(t, curr, currRoot)
require.NoError(t, v.checkDependentRoots(ctx, head))
})
}