mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 16:08:26 -05:00
Revert archival deprecations (#5464)
* Revert "Fix `ListValidatorBalances` for v0.11 (#5458)" This reverts commit3763a8ce5d. * Revert "Deprecate archival service (#5445)" This reverts commit4fbcedf541. * Revert "Delete archival DB methods (#5459)" This reverts commit8a3ea1e936. * Revert "Modify `ListBeaconCommittees ` to use new state service (#5411)" This reverts commit497fa6ed50. * Revert "Modify `GetValidatorParticipation` to use new state service (#5409)" This reverts commit046a00aa87. * Revert "Modify `GetValidatorActiveSetChanges` to use new state service (#5408)" This reverts commit5eb6485e14. * Revert "Modify `ListValidatorAssignments` to use new state service (#5365)" This reverts commit5a1a768135.
This commit is contained in:
45
beacon-chain/archiver/BUILD.bazel
Normal file
45
beacon-chain/archiver/BUILD.bazel
Normal file
@@ -0,0 +1,45 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["service.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/archiver",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/validators:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["service_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/testutil:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
],
|
||||
)
|
||||
200
beacon-chain/archiver/service.go
Normal file
200
beacon-chain/archiver/service.go
Normal file
@@ -0,0 +1,200 @@
|
||||
package archiver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/validators"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var log = logrus.WithField("prefix", "archiver")
|
||||
|
||||
// Service defining archiver functionality for persisting checkpointed
|
||||
// beacon chain information to a database backend for historical purposes.
|
||||
type Service struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
beaconDB db.NoHeadAccessDatabase
|
||||
headFetcher blockchain.HeadFetcher
|
||||
participationFetcher blockchain.ParticipationFetcher
|
||||
stateNotifier statefeed.Notifier
|
||||
lastArchivedEpoch uint64
|
||||
}
|
||||
|
||||
// Config options for the archiver service.
|
||||
type Config struct {
|
||||
BeaconDB db.NoHeadAccessDatabase
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
ParticipationFetcher blockchain.ParticipationFetcher
|
||||
StateNotifier statefeed.Notifier
|
||||
}
|
||||
|
||||
// NewArchiverService initializes the service from configuration options.
|
||||
func NewArchiverService(ctx context.Context, cfg *Config) *Service {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
return &Service{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
beaconDB: cfg.BeaconDB,
|
||||
headFetcher: cfg.HeadFetcher,
|
||||
participationFetcher: cfg.ParticipationFetcher,
|
||||
stateNotifier: cfg.StateNotifier,
|
||||
}
|
||||
}
|
||||
|
||||
// Start the archiver service event loop.
|
||||
func (s *Service) Start() {
|
||||
go s.run(s.ctx)
|
||||
}
|
||||
|
||||
// Stop the archiver service event loop.
|
||||
func (s *Service) Stop() error {
|
||||
defer s.cancel()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Status reports the healthy status of the archiver. Returning nil means service
|
||||
// is correctly running without error.
|
||||
func (s *Service) Status() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// We archive committee information pertaining to the head state's epoch.
|
||||
func (s *Service) archiveCommitteeInfo(ctx context.Context, headState *state.BeaconState, epoch uint64) error {
|
||||
proposerSeed, err := helpers.Seed(headState, epoch, params.BeaconConfig().DomainBeaconProposer)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not generate seed")
|
||||
}
|
||||
attesterSeed, err := helpers.Seed(headState, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not generate seed")
|
||||
}
|
||||
|
||||
info := &pb.ArchivedCommitteeInfo{
|
||||
ProposerSeed: proposerSeed[:],
|
||||
AttesterSeed: attesterSeed[:],
|
||||
}
|
||||
if err := s.beaconDB.SaveArchivedCommitteeInfo(ctx, epoch, info); err != nil {
|
||||
return errors.Wrap(err, "could not archive committee info")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// We archive active validator set changes that happened during the previous epoch.
|
||||
func (s *Service) archiveActiveSetChanges(ctx context.Context, headState *state.BeaconState, epoch uint64) error {
|
||||
prevEpoch := epoch - 1
|
||||
vals := headState.Validators()
|
||||
activations := validators.ActivatedValidatorIndices(prevEpoch, vals)
|
||||
slashings := validators.SlashedValidatorIndices(prevEpoch, vals)
|
||||
activeValidatorCount, err := helpers.ActiveValidatorCount(headState, prevEpoch)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get active validator count")
|
||||
}
|
||||
exited, err := validators.ExitedValidatorIndices(prevEpoch, vals, activeValidatorCount)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not determine exited validator indices")
|
||||
}
|
||||
activeSetChanges := &pb.ArchivedActiveSetChanges{
|
||||
Activated: activations,
|
||||
Exited: exited,
|
||||
Slashed: slashings,
|
||||
}
|
||||
if err := s.beaconDB.SaveArchivedActiveValidatorChanges(ctx, prevEpoch, activeSetChanges); err != nil {
|
||||
return errors.Wrap(err, "could not archive active validator set changes")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// We compute participation metrics by first retrieving the head state and
|
||||
// matching validator attestations during the epoch.
|
||||
func (s *Service) archiveParticipation(ctx context.Context, epoch uint64) error {
|
||||
p := s.participationFetcher.Participation(epoch)
|
||||
participation := ðpb.ValidatorParticipation{}
|
||||
if p != nil {
|
||||
participation = ðpb.ValidatorParticipation{
|
||||
EligibleEther: p.PrevEpoch,
|
||||
VotedEther: p.PrevEpochTargetAttesters,
|
||||
GlobalParticipationRate: float32(p.PrevEpochTargetAttesters) / float32(p.PrevEpoch),
|
||||
}
|
||||
}
|
||||
return s.beaconDB.SaveArchivedValidatorParticipation(ctx, epoch, participation)
|
||||
}
|
||||
|
||||
// We archive validator balances and active indices.
|
||||
func (s *Service) archiveBalances(ctx context.Context, balances []uint64, epoch uint64) error {
|
||||
if err := s.beaconDB.SaveArchivedBalances(ctx, epoch, balances); err != nil {
|
||||
return errors.Wrap(err, "could not archive balances")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) run(ctx context.Context) {
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := s.stateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
defer stateSub.Unsubscribe()
|
||||
for {
|
||||
select {
|
||||
case event := <-stateChannel:
|
||||
if event.Type == statefeed.BlockProcessed {
|
||||
data, ok := event.Data.(*statefeed.BlockProcessedData)
|
||||
if !ok {
|
||||
log.Error("Event feed data is not type *statefeed.BlockProcessedData")
|
||||
continue
|
||||
}
|
||||
log.WithField("headRoot", fmt.Sprintf("%#x", data.BlockRoot)).Debug("Received block processed event")
|
||||
headState, err := s.headFetcher.HeadState(ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Head state is not available")
|
||||
continue
|
||||
}
|
||||
slot := headState.Slot()
|
||||
currentEpoch := helpers.SlotToEpoch(slot)
|
||||
if !helpers.IsEpochEnd(slot) && currentEpoch <= s.lastArchivedEpoch {
|
||||
continue
|
||||
}
|
||||
epochToArchive := currentEpoch
|
||||
if !helpers.IsEpochEnd(slot) {
|
||||
epochToArchive--
|
||||
}
|
||||
if err := s.archiveCommitteeInfo(ctx, headState, epochToArchive); err != nil {
|
||||
log.WithError(err).Error("Could not archive committee info")
|
||||
continue
|
||||
}
|
||||
if err := s.archiveActiveSetChanges(ctx, headState, epochToArchive); err != nil {
|
||||
log.WithError(err).Error("Could not archive active validator set changes")
|
||||
continue
|
||||
}
|
||||
if err := s.archiveParticipation(ctx, epochToArchive); err != nil {
|
||||
log.WithError(err).Error("Could not archive validator participation")
|
||||
continue
|
||||
}
|
||||
if err := s.archiveBalances(ctx, headState.Balances(), epochToArchive); err != nil {
|
||||
log.WithError(err).Error("Could not archive validator balances and active indices")
|
||||
continue
|
||||
}
|
||||
log.WithField(
|
||||
"epoch",
|
||||
epochToArchive,
|
||||
).Debug("Successfully archived beacon chain data during epoch")
|
||||
s.lastArchivedEpoch = epochToArchive
|
||||
}
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Context closed, exiting goroutine")
|
||||
return
|
||||
case err := <-stateSub.Err():
|
||||
log.WithError(err).Error("Subscription to state feed notifier failed")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
483
beacon-chain/archiver/service_test.go
Normal file
483
beacon-chain/archiver/service_test.go
Normal file
@@ -0,0 +1,483 @@
|
||||
package archiver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
dbutil "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/sirupsen/logrus"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func init() {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
logrus.SetOutput(ioutil.Discard)
|
||||
}
|
||||
|
||||
func TestArchiverService_ReceivesBlockProcessedEvent(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
svc, beaconDB := setupService(t)
|
||||
defer dbutil.TeardownDB(t, beaconDB)
|
||||
st := testutil.NewBeaconState()
|
||||
if err := st.SetSlot(1); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
svc.headFetcher = &mock.ChainService{
|
||||
State: st,
|
||||
}
|
||||
|
||||
event := &feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
BlockRoot: [32]byte{1, 2, 3},
|
||||
Verified: true,
|
||||
},
|
||||
}
|
||||
triggerStateEvent(t, svc, event)
|
||||
testutil.AssertLogsContain(t, hook, fmt.Sprintf("%#x", event.Data.(*statefeed.BlockProcessedData).BlockRoot))
|
||||
testutil.AssertLogsContain(t, hook, "Received block processed event")
|
||||
}
|
||||
|
||||
func TestArchiverService_OnlyArchiveAtEpochEnd(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
svc, beaconDB := setupService(t)
|
||||
defer dbutil.TeardownDB(t, beaconDB)
|
||||
// The head state is NOT an epoch end.
|
||||
st := testutil.NewBeaconState()
|
||||
if err := st.SetSlot(params.BeaconConfig().SlotsPerEpoch - 2); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
svc.headFetcher = &mock.ChainService{
|
||||
State: st,
|
||||
}
|
||||
event := &feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
BlockRoot: [32]byte{1, 2, 3},
|
||||
Verified: true,
|
||||
},
|
||||
}
|
||||
triggerStateEvent(t, svc, event)
|
||||
|
||||
// The context should have been canceled.
|
||||
if svc.ctx.Err() != context.Canceled {
|
||||
t.Error("context was not canceled")
|
||||
}
|
||||
testutil.AssertLogsContain(t, hook, "Received block processed event")
|
||||
// The service should ONLY log any archival logs if we receive a
|
||||
// head slot that is an epoch end.
|
||||
testutil.AssertLogsDoNotContain(t, hook, "Successfully archived")
|
||||
}
|
||||
|
||||
func TestArchiverService_ArchivesEvenThroughSkipSlot(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
svc, beaconDB := setupService(t)
|
||||
validatorCount := uint64(100)
|
||||
headState, err := setupState(validatorCount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer dbutil.TeardownDB(t, beaconDB)
|
||||
event := &feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
BlockRoot: [32]byte{1, 2, 3},
|
||||
Verified: true,
|
||||
},
|
||||
}
|
||||
|
||||
exitRoutine := make(chan bool)
|
||||
go func() {
|
||||
svc.run(svc.ctx)
|
||||
<-exitRoutine
|
||||
}()
|
||||
|
||||
// Send out an event every slot, skipping the end slot of the epoch.
|
||||
for i := uint64(0); i < params.BeaconConfig().SlotsPerEpoch+1; i++ {
|
||||
if err := headState.SetSlot(i); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
svc.headFetcher = &mock.ChainService{
|
||||
State: headState,
|
||||
}
|
||||
if helpers.IsEpochEnd(i) {
|
||||
continue
|
||||
}
|
||||
// Send in a loop to ensure it is delivered (busy wait for the service to subscribe to the state feed).
|
||||
for sent := 0; sent == 0; {
|
||||
sent = svc.stateNotifier.StateFeed().Send(event)
|
||||
}
|
||||
}
|
||||
if err := svc.Stop(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
exitRoutine <- true
|
||||
|
||||
// The context should have been canceled.
|
||||
if svc.ctx.Err() != context.Canceled {
|
||||
t.Error("context was not canceled")
|
||||
}
|
||||
|
||||
testutil.AssertLogsContain(t, hook, "Received block processed event")
|
||||
// Even though there was a skip slot, we should still be able to archive
|
||||
// upon the next block event afterwards.
|
||||
testutil.AssertLogsContain(t, hook, "Successfully archived")
|
||||
}
|
||||
|
||||
func TestArchiverService_ComputesAndSavesParticipation(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
validatorCount := uint64(100)
|
||||
headState, err := setupState(validatorCount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
svc, beaconDB := setupService(t)
|
||||
defer dbutil.TeardownDB(t, beaconDB)
|
||||
svc.headFetcher = &mock.ChainService{
|
||||
State: headState,
|
||||
}
|
||||
event := &feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
BlockRoot: [32]byte{1, 2, 3},
|
||||
Verified: true,
|
||||
},
|
||||
}
|
||||
triggerStateEvent(t, svc, event)
|
||||
|
||||
attestedBalance := uint64(1)
|
||||
|
||||
currentEpoch := helpers.CurrentEpoch(headState)
|
||||
wanted := ðpb.ValidatorParticipation{
|
||||
VotedEther: attestedBalance,
|
||||
EligibleEther: validatorCount * params.BeaconConfig().MaxEffectiveBalance,
|
||||
GlobalParticipationRate: float32(attestedBalance) / float32(validatorCount*params.BeaconConfig().MaxEffectiveBalance),
|
||||
}
|
||||
|
||||
retrieved, err := svc.beaconDB.ArchivedValidatorParticipation(svc.ctx, currentEpoch)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !proto.Equal(wanted, retrieved) {
|
||||
t.Errorf("Wanted participation for epoch %d %v, retrieved %v", currentEpoch-1, wanted, retrieved)
|
||||
}
|
||||
testutil.AssertLogsContain(t, hook, "Successfully archived")
|
||||
}
|
||||
|
||||
func TestArchiverService_SavesIndicesAndBalances(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
validatorCount := uint64(100)
|
||||
headState, err := setupState(validatorCount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
svc, beaconDB := setupService(t)
|
||||
defer dbutil.TeardownDB(t, beaconDB)
|
||||
svc.headFetcher = &mock.ChainService{
|
||||
State: headState,
|
||||
}
|
||||
event := &feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
BlockRoot: [32]byte{1, 2, 3},
|
||||
Verified: true,
|
||||
},
|
||||
}
|
||||
triggerStateEvent(t, svc, event)
|
||||
|
||||
retrieved, err := svc.beaconDB.ArchivedBalances(svc.ctx, helpers.CurrentEpoch(headState))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(headState.Balances(), retrieved) {
|
||||
t.Errorf(
|
||||
"Wanted balances for epoch %d %v, retrieved %v",
|
||||
helpers.CurrentEpoch(headState),
|
||||
headState.Balances(),
|
||||
retrieved,
|
||||
)
|
||||
}
|
||||
testutil.AssertLogsContain(t, hook, "Successfully archived")
|
||||
}
|
||||
|
||||
func TestArchiverService_SavesCommitteeInfo(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
validatorCount := uint64(100)
|
||||
headState, err := setupState(validatorCount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
svc, beaconDB := setupService(t)
|
||||
defer dbutil.TeardownDB(t, beaconDB)
|
||||
svc.headFetcher = &mock.ChainService{
|
||||
State: headState,
|
||||
}
|
||||
event := &feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
BlockRoot: [32]byte{1, 2, 3},
|
||||
Verified: true,
|
||||
},
|
||||
}
|
||||
triggerStateEvent(t, svc, event)
|
||||
|
||||
currentEpoch := helpers.CurrentEpoch(headState)
|
||||
proposerSeed, err := helpers.Seed(headState, currentEpoch, params.BeaconConfig().DomainBeaconProposer)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
attesterSeed, err := helpers.Seed(headState, currentEpoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
wanted := &pb.ArchivedCommitteeInfo{
|
||||
ProposerSeed: proposerSeed[:],
|
||||
AttesterSeed: attesterSeed[:],
|
||||
}
|
||||
|
||||
retrieved, err := svc.beaconDB.ArchivedCommitteeInfo(svc.ctx, helpers.CurrentEpoch(headState))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !proto.Equal(wanted, retrieved) {
|
||||
t.Errorf(
|
||||
"Wanted committee info for epoch %d %v, retrieved %v",
|
||||
helpers.CurrentEpoch(headState),
|
||||
wanted,
|
||||
retrieved,
|
||||
)
|
||||
}
|
||||
testutil.AssertLogsContain(t, hook, "Successfully archived")
|
||||
}
|
||||
|
||||
func TestArchiverService_SavesActivatedValidatorChanges(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
validatorCount := uint64(100)
|
||||
headState, err := setupState(validatorCount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
svc, beaconDB := setupService(t)
|
||||
defer dbutil.TeardownDB(t, beaconDB)
|
||||
svc.headFetcher = &mock.ChainService{
|
||||
State: headState,
|
||||
}
|
||||
prevEpoch := helpers.PrevEpoch(headState)
|
||||
delayedActEpoch := helpers.ActivationExitEpoch(prevEpoch)
|
||||
val1, err := headState.ValidatorAtIndex(4)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
val1.ActivationEpoch = delayedActEpoch
|
||||
val2, err := headState.ValidatorAtIndex(5)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
val2.ActivationEpoch = delayedActEpoch
|
||||
if err := headState.UpdateValidatorAtIndex(4, val1); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := headState.UpdateValidatorAtIndex(5, val1); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
event := &feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
BlockRoot: [32]byte{1, 2, 3},
|
||||
Verified: true,
|
||||
},
|
||||
}
|
||||
triggerStateEvent(t, svc, event)
|
||||
|
||||
retrieved, err := beaconDB.ArchivedActiveValidatorChanges(svc.ctx, prevEpoch)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if retrieved == nil {
|
||||
t.Fatal("Retrieved indices are nil")
|
||||
}
|
||||
if len(retrieved.Activated) != 98 {
|
||||
t.Error("Did not get wanted active length")
|
||||
}
|
||||
testutil.AssertLogsContain(t, hook, "Successfully archived")
|
||||
}
|
||||
|
||||
func TestArchiverService_SavesSlashedValidatorChanges(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
validatorCount := uint64(100)
|
||||
headState, err := setupState(validatorCount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
svc, beaconDB := setupService(t)
|
||||
defer dbutil.TeardownDB(t, beaconDB)
|
||||
svc.headFetcher = &mock.ChainService{
|
||||
State: headState,
|
||||
}
|
||||
prevEpoch := helpers.PrevEpoch(headState)
|
||||
val1, err := headState.ValidatorAtIndex(95)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
val1.Slashed = true
|
||||
val2, err := headState.ValidatorAtIndex(96)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
val2.Slashed = true
|
||||
if err := headState.UpdateValidatorAtIndex(95, val1); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := headState.UpdateValidatorAtIndex(96, val1); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
event := &feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
BlockRoot: [32]byte{1, 2, 3},
|
||||
Verified: true,
|
||||
},
|
||||
}
|
||||
triggerStateEvent(t, svc, event)
|
||||
|
||||
retrieved, err := beaconDB.ArchivedActiveValidatorChanges(svc.ctx, prevEpoch)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if retrieved == nil {
|
||||
t.Fatal("Retrieved indices are nil")
|
||||
}
|
||||
if !reflect.DeepEqual(retrieved.Slashed, []uint64{95, 96}) {
|
||||
t.Errorf("Wanted indices 95, 96 slashed, received %v", retrieved.Slashed)
|
||||
}
|
||||
testutil.AssertLogsContain(t, hook, "Successfully archived")
|
||||
}
|
||||
|
||||
func TestArchiverService_SavesExitedValidatorChanges(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
validatorCount := uint64(100)
|
||||
headState, err := setupState(validatorCount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
svc, beaconDB := setupService(t)
|
||||
defer dbutil.TeardownDB(t, beaconDB)
|
||||
svc.headFetcher = &mock.ChainService{
|
||||
State: headState,
|
||||
}
|
||||
prevEpoch := helpers.PrevEpoch(headState)
|
||||
val, err := headState.ValidatorAtIndex(95)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
val.ExitEpoch = prevEpoch
|
||||
val.WithdrawableEpoch = prevEpoch + params.BeaconConfig().MinValidatorWithdrawabilityDelay
|
||||
if err := headState.UpdateValidatorAtIndex(95, val); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
event := &feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
BlockRoot: [32]byte{1, 2, 3},
|
||||
Verified: true,
|
||||
},
|
||||
}
|
||||
triggerStateEvent(t, svc, event)
|
||||
testutil.AssertLogsContain(t, hook, "Successfully archived")
|
||||
retrieved, err := beaconDB.ArchivedActiveValidatorChanges(svc.ctx, prevEpoch)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if retrieved == nil {
|
||||
t.Fatal("Retrieved indices are nil")
|
||||
}
|
||||
if !reflect.DeepEqual(retrieved.Exited, []uint64{95}) {
|
||||
t.Errorf("Wanted indices 95 exited, received %v", retrieved.Exited)
|
||||
}
|
||||
}
|
||||
|
||||
func setupState(validatorCount uint64) (*stateTrie.BeaconState, error) {
|
||||
validators := make([]*ethpb.Validator, validatorCount)
|
||||
balances := make([]uint64, validatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
}
|
||||
balances[i] = params.BeaconConfig().MaxEffectiveBalance
|
||||
}
|
||||
|
||||
atts := []*pb.PendingAttestation{{Data: ðpb.AttestationData{Target: ðpb.Checkpoint{}}}}
|
||||
|
||||
// We initialize a head state that has attestations from participated
|
||||
// validators in a simulated fashion.
|
||||
st := testutil.NewBeaconState()
|
||||
if err := st.SetSlot((2 * params.BeaconConfig().SlotsPerEpoch) - 1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := st.SetValidators(validators); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := st.SetBalances(balances); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := st.SetCurrentEpochAttestations(atts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return st, nil
|
||||
}
|
||||
|
||||
func setupService(t *testing.T) (*Service, db.Database) {
|
||||
beaconDB := dbutil.SetupDB(t)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
validatorCount := uint64(100)
|
||||
totalBalance := validatorCount * params.BeaconConfig().MaxEffectiveBalance
|
||||
mockChainService := &mock.ChainService{}
|
||||
return &Service{
|
||||
beaconDB: beaconDB,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
stateNotifier: mockChainService.StateNotifier(),
|
||||
participationFetcher: &mock.ChainService{
|
||||
Balance: &precompute.Balance{PrevEpoch: totalBalance, PrevEpochTargetAttesters: 1}},
|
||||
}, beaconDB
|
||||
}
|
||||
|
||||
func triggerStateEvent(t *testing.T, svc *Service, event *feed.Event) {
|
||||
exitRoutine := make(chan bool)
|
||||
go func() {
|
||||
svc.run(svc.ctx)
|
||||
<-exitRoutine
|
||||
}()
|
||||
|
||||
// Send in a loop to ensure it is delivered (busy wait for the service to subscribe to the state feed).
|
||||
for sent := 0; sent == 0; {
|
||||
sent = svc.stateNotifier.StateFeed().Send(event)
|
||||
}
|
||||
if err := svc.Stop(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
exitRoutine <- true
|
||||
|
||||
// The context should have been canceled.
|
||||
if svc.ctx.Err() != context.Canceled {
|
||||
t.Error("context was not canceled")
|
||||
}
|
||||
}
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/flags"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/roughtime"
|
||||
@@ -127,6 +128,13 @@ func (s *Service) onAttestation(ctx context.Context, a *ethpb.Attestation) ([]ui
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Only save attestation in DB for archival node.
|
||||
if flags.Get().EnableArchive {
|
||||
if err := s.beaconDB.SaveAttestation(ctx, a); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if indexedAtt.AttestingIndices == nil {
|
||||
return nil, errors.New("nil attesting indices")
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/flags"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/attestationutil"
|
||||
@@ -243,6 +244,13 @@ func (s *Service) onBlockInitialSyncStateTransition(ctx context.Context, signed
|
||||
s.filterBoundaryCandidates(ctx, root, postState)
|
||||
}
|
||||
|
||||
if flags.Get().EnableArchive {
|
||||
atts := signed.Block.Body.Attestations
|
||||
if err := s.beaconDB.SaveAttestations(ctx, atts); err != nil {
|
||||
return errors.Wrapf(err, "could not save block attestations from slot %d", b.Slot)
|
||||
}
|
||||
}
|
||||
|
||||
// Update justified check point.
|
||||
if postState.CurrentJustifiedCheckpoint().Epoch > s.justifiedCheckpt.Epoch {
|
||||
if err := s.updateJustified(ctx, postState); err != nil {
|
||||
|
||||
@@ -49,6 +49,10 @@ type ReadOnlyDatabase interface {
|
||||
JustifiedCheckpoint(ctx context.Context) (*eth.Checkpoint, error)
|
||||
FinalizedCheckpoint(ctx context.Context) (*eth.Checkpoint, error)
|
||||
// Archival data handlers for storing/retrieving historical beacon node information.
|
||||
ArchivedActiveValidatorChanges(ctx context.Context, epoch uint64) (*ethereum_beacon_p2p_v1.ArchivedActiveSetChanges, error)
|
||||
ArchivedCommitteeInfo(ctx context.Context, epoch uint64) (*ethereum_beacon_p2p_v1.ArchivedCommitteeInfo, error)
|
||||
ArchivedBalances(ctx context.Context, epoch uint64) ([]uint64, error)
|
||||
ArchivedValidatorParticipation(ctx context.Context, epoch uint64) (*eth.ValidatorParticipation, error)
|
||||
ArchivedPointRoot(ctx context.Context, index uint64) [32]byte
|
||||
HasArchivedPoint(ctx context.Context, index uint64) bool
|
||||
LastArchivedIndexRoot(ctx context.Context) [32]byte
|
||||
@@ -93,6 +97,10 @@ type NoHeadAccessDatabase interface {
|
||||
SaveJustifiedCheckpoint(ctx context.Context, checkpoint *eth.Checkpoint) error
|
||||
SaveFinalizedCheckpoint(ctx context.Context, checkpoint *eth.Checkpoint) error
|
||||
// Archival data handlers for storing/retrieving historical beacon node information.
|
||||
SaveArchivedActiveValidatorChanges(ctx context.Context, epoch uint64, changes *ethereum_beacon_p2p_v1.ArchivedActiveSetChanges) error
|
||||
SaveArchivedCommitteeInfo(ctx context.Context, epoch uint64, info *ethereum_beacon_p2p_v1.ArchivedCommitteeInfo) error
|
||||
SaveArchivedBalances(ctx context.Context, epoch uint64, balances []uint64) error
|
||||
SaveArchivedValidatorParticipation(ctx context.Context, epoch uint64, part *eth.ValidatorParticipation) error
|
||||
SaveArchivedPointRoot(ctx context.Context, blockRoot [32]byte, index uint64) error
|
||||
SaveLastArchivedIndex(ctx context.Context, index uint64) error
|
||||
// Deposit contract related handlers.
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db/filters"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/proto/beacon/db"
|
||||
ethereum_beacon_p2p_v1 "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
)
|
||||
|
||||
@@ -162,6 +163,26 @@ func (e Exporter) FinalizedCheckpoint(ctx context.Context) (*eth.Checkpoint, err
|
||||
return e.db.FinalizedCheckpoint(ctx)
|
||||
}
|
||||
|
||||
// ArchivedActiveValidatorChanges -- passthrough.
|
||||
func (e Exporter) ArchivedActiveValidatorChanges(ctx context.Context, epoch uint64) (*ethereum_beacon_p2p_v1.ArchivedActiveSetChanges, error) {
|
||||
return e.db.ArchivedActiveValidatorChanges(ctx, epoch)
|
||||
}
|
||||
|
||||
// ArchivedCommitteeInfo -- passthrough.
|
||||
func (e Exporter) ArchivedCommitteeInfo(ctx context.Context, epoch uint64) (*ethereum_beacon_p2p_v1.ArchivedCommitteeInfo, error) {
|
||||
return e.db.ArchivedCommitteeInfo(ctx, epoch)
|
||||
}
|
||||
|
||||
// ArchivedBalances -- passthrough.
|
||||
func (e Exporter) ArchivedBalances(ctx context.Context, epoch uint64) ([]uint64, error) {
|
||||
return e.db.ArchivedBalances(ctx, epoch)
|
||||
}
|
||||
|
||||
// ArchivedValidatorParticipation -- passthrough.
|
||||
func (e Exporter) ArchivedValidatorParticipation(ctx context.Context, epoch uint64) (*eth.ValidatorParticipation, error) {
|
||||
return e.db.ArchivedValidatorParticipation(ctx, epoch)
|
||||
}
|
||||
|
||||
// DepositContractAddress -- passthrough.
|
||||
func (e Exporter) DepositContractAddress(ctx context.Context) ([]byte, error) {
|
||||
return e.db.DepositContractAddress(ctx)
|
||||
@@ -227,6 +248,26 @@ func (e Exporter) SaveFinalizedCheckpoint(ctx context.Context, checkpoint *eth.C
|
||||
return e.db.SaveFinalizedCheckpoint(ctx, checkpoint)
|
||||
}
|
||||
|
||||
// SaveArchivedActiveValidatorChanges -- passthrough.
|
||||
func (e Exporter) SaveArchivedActiveValidatorChanges(ctx context.Context, epoch uint64, changes *ethereum_beacon_p2p_v1.ArchivedActiveSetChanges) error {
|
||||
return e.db.SaveArchivedActiveValidatorChanges(ctx, epoch, changes)
|
||||
}
|
||||
|
||||
// SaveArchivedCommitteeInfo -- passthrough.
|
||||
func (e Exporter) SaveArchivedCommitteeInfo(ctx context.Context, epoch uint64, info *ethereum_beacon_p2p_v1.ArchivedCommitteeInfo) error {
|
||||
return e.db.SaveArchivedCommitteeInfo(ctx, epoch, info)
|
||||
}
|
||||
|
||||
// SaveArchivedBalances -- passthrough.
|
||||
func (e Exporter) SaveArchivedBalances(ctx context.Context, epoch uint64, balances []uint64) error {
|
||||
return e.db.SaveArchivedBalances(ctx, epoch, balances)
|
||||
}
|
||||
|
||||
// SaveArchivedValidatorParticipation -- passthrough.
|
||||
func (e Exporter) SaveArchivedValidatorParticipation(ctx context.Context, epoch uint64, part *eth.ValidatorParticipation) error {
|
||||
return e.db.SaveArchivedValidatorParticipation(ctx, epoch, part)
|
||||
}
|
||||
|
||||
// SaveDepositContractAddress -- passthrough.
|
||||
func (e Exporter) SaveDepositContractAddress(ctx context.Context, addr common.Address) error {
|
||||
return e.db.SaveDepositContractAddress(ctx, addr)
|
||||
|
||||
@@ -3,6 +3,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"archive.go",
|
||||
"archived_point.go",
|
||||
"attestations.go",
|
||||
"backup.go",
|
||||
@@ -59,6 +60,7 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"archive_test.go",
|
||||
"archived_point_test.go",
|
||||
"attestations_test.go",
|
||||
"backup_test.go",
|
||||
|
||||
166
beacon-chain/db/kv/archive.go
Normal file
166
beacon-chain/db/kv/archive.go
Normal file
@@ -0,0 +1,166 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// ArchivedActiveValidatorChanges retrieval by epoch.
|
||||
func (k *Store) ArchivedActiveValidatorChanges(ctx context.Context, epoch uint64) (*pb.ArchivedActiveSetChanges, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.ArchivedActiveValidatorChanges")
|
||||
defer span.End()
|
||||
|
||||
buf := bytesutil.Uint64ToBytes(epoch)
|
||||
var target *pb.ArchivedActiveSetChanges
|
||||
err := k.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(archivedValidatorSetChangesBucket)
|
||||
enc := bkt.Get(buf)
|
||||
if enc == nil {
|
||||
return nil
|
||||
}
|
||||
target = &pb.ArchivedActiveSetChanges{}
|
||||
return decode(enc, target)
|
||||
})
|
||||
return target, err
|
||||
}
|
||||
|
||||
// SaveArchivedActiveValidatorChanges by epoch.
|
||||
func (k *Store) SaveArchivedActiveValidatorChanges(ctx context.Context, epoch uint64, changes *pb.ArchivedActiveSetChanges) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveArchivedActiveValidatorChanges")
|
||||
defer span.End()
|
||||
buf := bytesutil.Uint64ToBytes(epoch)
|
||||
enc, err := encode(changes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return k.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(archivedValidatorSetChangesBucket)
|
||||
return bucket.Put(buf, enc)
|
||||
})
|
||||
}
|
||||
|
||||
// ArchivedCommitteeInfo retrieval by epoch.
|
||||
func (k *Store) ArchivedCommitteeInfo(ctx context.Context, epoch uint64) (*pb.ArchivedCommitteeInfo, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.ArchivedCommitteeInfo")
|
||||
defer span.End()
|
||||
|
||||
buf := bytesutil.Uint64ToBytes(epoch)
|
||||
var target *pb.ArchivedCommitteeInfo
|
||||
err := k.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(archivedCommitteeInfoBucket)
|
||||
enc := bkt.Get(buf)
|
||||
if enc == nil {
|
||||
return nil
|
||||
}
|
||||
target = &pb.ArchivedCommitteeInfo{}
|
||||
return decode(enc, target)
|
||||
})
|
||||
return target, err
|
||||
}
|
||||
|
||||
// SaveArchivedCommitteeInfo by epoch.
|
||||
func (k *Store) SaveArchivedCommitteeInfo(ctx context.Context, epoch uint64, info *pb.ArchivedCommitteeInfo) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveArchivedCommitteeInfo")
|
||||
defer span.End()
|
||||
buf := bytesutil.Uint64ToBytes(epoch)
|
||||
enc, err := encode(info)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return k.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(archivedCommitteeInfoBucket)
|
||||
return bucket.Put(buf, enc)
|
||||
})
|
||||
}
|
||||
|
||||
// ArchivedBalances retrieval by epoch.
|
||||
func (k *Store) ArchivedBalances(ctx context.Context, epoch uint64) ([]uint64, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.ArchivedBalances")
|
||||
defer span.End()
|
||||
|
||||
buf := bytesutil.Uint64ToBytes(epoch)
|
||||
var target []uint64
|
||||
err := k.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(archivedBalancesBucket)
|
||||
enc := bkt.Get(buf)
|
||||
if enc == nil {
|
||||
return nil
|
||||
}
|
||||
target = unmarshalBalances(enc)
|
||||
return nil
|
||||
})
|
||||
return target, err
|
||||
}
|
||||
|
||||
// SaveArchivedBalances by epoch.
|
||||
func (k *Store) SaveArchivedBalances(ctx context.Context, epoch uint64, balances []uint64) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveArchivedBalances")
|
||||
defer span.End()
|
||||
buf := bytesutil.Uint64ToBytes(epoch)
|
||||
enc := marshalBalances(balances)
|
||||
return k.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(archivedBalancesBucket)
|
||||
return bucket.Put(buf, enc)
|
||||
})
|
||||
}
|
||||
|
||||
// ArchivedValidatorParticipation retrieval by epoch.
|
||||
func (k *Store) ArchivedValidatorParticipation(ctx context.Context, epoch uint64) (*ethpb.ValidatorParticipation, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.ArchivedValidatorParticipation")
|
||||
defer span.End()
|
||||
|
||||
buf := bytesutil.Uint64ToBytes(epoch)
|
||||
var target *ethpb.ValidatorParticipation
|
||||
err := k.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(archivedValidatorParticipationBucket)
|
||||
enc := bkt.Get(buf)
|
||||
if enc == nil {
|
||||
return nil
|
||||
}
|
||||
target = ðpb.ValidatorParticipation{}
|
||||
return decode(enc, target)
|
||||
})
|
||||
return target, err
|
||||
}
|
||||
|
||||
// SaveArchivedValidatorParticipation by epoch.
|
||||
func (k *Store) SaveArchivedValidatorParticipation(ctx context.Context, epoch uint64, part *ethpb.ValidatorParticipation) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveArchivedValidatorParticipation")
|
||||
defer span.End()
|
||||
buf := bytesutil.Uint64ToBytes(epoch)
|
||||
enc, err := encode(part)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return k.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(archivedValidatorParticipationBucket)
|
||||
return bucket.Put(buf, enc)
|
||||
})
|
||||
}
|
||||
|
||||
func marshalBalances(bals []uint64) []byte {
|
||||
res := make([]byte, len(bals)*8)
|
||||
offset := 0
|
||||
for i := 0; i < len(bals); i++ {
|
||||
binary.LittleEndian.PutUint64(res[offset:offset+8], bals[i])
|
||||
offset += 8
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func unmarshalBalances(bals []byte) []uint64 {
|
||||
numItems := len(bals) / 8
|
||||
res := make([]uint64, numItems)
|
||||
offset := 0
|
||||
for i := 0; i < numItems; i++ {
|
||||
res[i] = binary.LittleEndian.Uint64(bals[offset : offset+8])
|
||||
offset += 8
|
||||
}
|
||||
return res
|
||||
}
|
||||
167
beacon-chain/db/kv/archive_test.go
Normal file
167
beacon-chain/db/kv/archive_test.go
Normal file
@@ -0,0 +1,167 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
pbp2p "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
)
|
||||
|
||||
func TestStore_ArchivedActiveValidatorChanges(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer teardownDB(t, db)
|
||||
ctx := context.Background()
|
||||
activated := []uint64{3, 4, 5}
|
||||
exited := []uint64{6, 7, 8}
|
||||
slashed := []uint64{1212}
|
||||
someRoot := [32]byte{1, 2, 3}
|
||||
changes := &pbp2p.ArchivedActiveSetChanges{
|
||||
Activated: activated,
|
||||
Exited: exited,
|
||||
Slashed: slashed,
|
||||
VoluntaryExits: []*ethpb.VoluntaryExit{
|
||||
{
|
||||
Epoch: 5,
|
||||
ValidatorIndex: 6,
|
||||
},
|
||||
{
|
||||
Epoch: 5,
|
||||
ValidatorIndex: 7,
|
||||
},
|
||||
{
|
||||
Epoch: 5,
|
||||
ValidatorIndex: 8,
|
||||
},
|
||||
},
|
||||
ProposerSlashings: []*ethpb.ProposerSlashing{
|
||||
{
|
||||
Header_1: ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
ProposerIndex: 1212,
|
||||
Slot: 10,
|
||||
ParentRoot: someRoot[:],
|
||||
StateRoot: someRoot[:],
|
||||
BodyRoot: someRoot[:],
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
Header_2: ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
ProposerIndex: 1212,
|
||||
Slot: 10,
|
||||
ParentRoot: someRoot[:],
|
||||
StateRoot: someRoot[:],
|
||||
BodyRoot: someRoot[:],
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
},
|
||||
},
|
||||
AttesterSlashings: []*ethpb.AttesterSlashing{
|
||||
{
|
||||
Attestation_1: ðpb.IndexedAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
BeaconBlockRoot: someRoot[:],
|
||||
Source: ðpb.Checkpoint{
|
||||
Epoch: 5,
|
||||
Root: someRoot[:],
|
||||
},
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 5,
|
||||
Root: someRoot[:],
|
||||
},
|
||||
},
|
||||
},
|
||||
Attestation_2: ðpb.IndexedAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
BeaconBlockRoot: someRoot[:],
|
||||
Source: ðpb.Checkpoint{
|
||||
Epoch: 5,
|
||||
Root: someRoot[:],
|
||||
},
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 5,
|
||||
Root: someRoot[:],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
epoch := uint64(10)
|
||||
if err := db.SaveArchivedActiveValidatorChanges(ctx, epoch, changes); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
retrieved, err := db.ArchivedActiveValidatorChanges(ctx, epoch)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !proto.Equal(changes, retrieved) {
|
||||
t.Errorf("Wanted %v, received %v", changes, retrieved)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_ArchivedCommitteeInfo(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer teardownDB(t, db)
|
||||
ctx := context.Background()
|
||||
someSeed := [32]byte{1, 2, 3}
|
||||
info := &pbp2p.ArchivedCommitteeInfo{
|
||||
ProposerSeed: someSeed[:],
|
||||
AttesterSeed: someSeed[:],
|
||||
}
|
||||
epoch := uint64(10)
|
||||
if err := db.SaveArchivedCommitteeInfo(ctx, epoch, info); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
retrieved, err := db.ArchivedCommitteeInfo(ctx, epoch)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !proto.Equal(info, retrieved) {
|
||||
t.Errorf("Wanted %v, received %v", info, retrieved)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_ArchivedBalances(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer teardownDB(t, db)
|
||||
ctx := context.Background()
|
||||
balances := []uint64{2, 3, 4, 5, 6, 7}
|
||||
epoch := uint64(10)
|
||||
if err := db.SaveArchivedBalances(ctx, epoch, balances); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
retrieved, err := db.ArchivedBalances(ctx, epoch)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(balances, retrieved) {
|
||||
t.Errorf("Wanted %v, received %v", balances, retrieved)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_ArchivedValidatorParticipation(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer teardownDB(t, db)
|
||||
ctx := context.Background()
|
||||
epoch := uint64(10)
|
||||
part := ðpb.ValidatorParticipation{
|
||||
GlobalParticipationRate: 0.99,
|
||||
EligibleEther: 12202000,
|
||||
VotedEther: 12079998,
|
||||
}
|
||||
if err := db.SaveArchivedValidatorParticipation(ctx, epoch, part); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
retrieved, err := db.ArchivedValidatorParticipation(ctx, epoch)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !proto.Equal(part, retrieved) {
|
||||
t.Errorf("Wanted %v, received %v", part, retrieved)
|
||||
}
|
||||
}
|
||||
@@ -95,6 +95,10 @@ func NewKVStore(dirPath string, stateSummaryCache *cache.StateSummaryCache) (*St
|
||||
voluntaryExitsBucket,
|
||||
chainMetadataBucket,
|
||||
checkpointBucket,
|
||||
archivedValidatorSetChangesBucket,
|
||||
archivedCommitteeInfoBucket,
|
||||
archivedBalancesBucket,
|
||||
archivedValidatorParticipationBucket,
|
||||
powchainBucket,
|
||||
stateSummaryBucket,
|
||||
archivedIndexRootBucket,
|
||||
|
||||
@@ -3,6 +3,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"archive.go",
|
||||
"base.go",
|
||||
"config.go",
|
||||
"interop.go",
|
||||
|
||||
32
beacon-chain/flags/archive.go
Normal file
32
beacon-chain/flags/archive.go
Normal file
@@ -0,0 +1,32 @@
|
||||
package flags
|
||||
|
||||
import (
|
||||
"gopkg.in/urfave/cli.v2"
|
||||
)
|
||||
|
||||
var (
|
||||
// ArchiveEnableFlag defines whether or not the beacon chain should archive
|
||||
// historical blocks, attestations, and validator set changes.
|
||||
ArchiveEnableFlag = &cli.BoolFlag{
|
||||
Name: "archive",
|
||||
Usage: "Whether or not beacon chain should archive historical data including blocks, attestations, and validator set changes",
|
||||
}
|
||||
// ArchiveValidatorSetChangesFlag defines whether or not the beacon chain should archive
|
||||
// historical validator set changes in persistent storage.
|
||||
ArchiveValidatorSetChangesFlag = &cli.BoolFlag{
|
||||
Name: "archive-validator-set-changes",
|
||||
Usage: "Whether or not beacon chain should archive historical validator set changes",
|
||||
}
|
||||
// ArchiveBlocksFlag defines whether or not the beacon chain should archive
|
||||
// historical block data in persistent storage.
|
||||
ArchiveBlocksFlag = &cli.BoolFlag{
|
||||
Name: "archive-blocks",
|
||||
Usage: "Whether or not beacon chain should archive historical blocks",
|
||||
}
|
||||
// ArchiveAttestationsFlag defines whether or not the beacon chain should archive
|
||||
// historical attestation data in persistent storage.
|
||||
ArchiveAttestationsFlag = &cli.BoolFlag{
|
||||
Name: "archive-attestations",
|
||||
Usage: "Whether or not beacon chain should archive historical blocks",
|
||||
}
|
||||
)
|
||||
@@ -9,11 +9,15 @@ import (
|
||||
// GlobalFlags specifies all the global flags for the
|
||||
// beacon node.
|
||||
type GlobalFlags struct {
|
||||
UnsafeSync bool
|
||||
DisableDiscv5 bool
|
||||
MinimumSyncPeers int
|
||||
MaxPageSize int
|
||||
DeploymentBlock int
|
||||
EnableArchive bool
|
||||
EnableArchivedValidatorSetChanges bool
|
||||
EnableArchivedBlocks bool
|
||||
EnableArchivedAttestations bool
|
||||
UnsafeSync bool
|
||||
DisableDiscv5 bool
|
||||
MinimumSyncPeers int
|
||||
MaxPageSize int
|
||||
DeploymentBlock int
|
||||
}
|
||||
|
||||
var globalConfig *GlobalFlags
|
||||
@@ -35,6 +39,18 @@ func Init(c *GlobalFlags) {
|
||||
// based on the provided cli context.
|
||||
func ConfigureGlobalFlags(ctx *cli.Context) {
|
||||
cfg := &GlobalFlags{}
|
||||
if ctx.Bool(ArchiveEnableFlag.Name) {
|
||||
cfg.EnableArchive = true
|
||||
}
|
||||
if ctx.Bool(ArchiveValidatorSetChangesFlag.Name) {
|
||||
cfg.EnableArchivedValidatorSetChanges = true
|
||||
}
|
||||
if ctx.Bool(ArchiveBlocksFlag.Name) {
|
||||
cfg.EnableArchivedBlocks = true
|
||||
}
|
||||
if ctx.Bool(ArchiveAttestationsFlag.Name) {
|
||||
cfg.EnableArchivedAttestations = true
|
||||
}
|
||||
if ctx.Bool(UnsafeSync.Name) {
|
||||
cfg.UnsafeSync = true
|
||||
}
|
||||
|
||||
@@ -43,6 +43,10 @@ var appFlags = []cli.Flag{
|
||||
flags.InteropGenesisStateFlag,
|
||||
flags.InteropNumValidatorsFlag,
|
||||
flags.InteropGenesisTimeFlag,
|
||||
flags.ArchiveEnableFlag,
|
||||
flags.ArchiveValidatorSetChangesFlag,
|
||||
flags.ArchiveBlocksFlag,
|
||||
flags.ArchiveAttestationsFlag,
|
||||
flags.SlotsPerArchivedPoint,
|
||||
cmd.BootstrapNode,
|
||||
cmd.NoDiscovery,
|
||||
|
||||
@@ -6,6 +6,7 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/node",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//beacon-chain/archiver:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/cache/depositcache:go_default_library",
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/archiver"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
|
||||
@@ -149,6 +150,10 @@ func NewBeaconNode(ctx *cli.Context) (*BeaconNode, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := beacon.registerArchiverService(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !ctx.Bool(cmd.DisableMonitoringFlag.Name) {
|
||||
if err := beacon.registerPrometheusService(ctx); err != nil {
|
||||
return nil, err
|
||||
@@ -619,3 +624,20 @@ func (b *BeaconNode) registerInteropServices(ctx *cli.Context) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *BeaconNode) registerArchiverService(ctx *cli.Context) error {
|
||||
if !flags.Get().EnableArchive {
|
||||
return nil
|
||||
}
|
||||
var chainService *blockchain.Service
|
||||
if err := b.services.FetchService(&chainService); err != nil {
|
||||
return err
|
||||
}
|
||||
svc := archiver.NewArchiverService(context.Background(), &archiver.Config{
|
||||
BeaconDB: b.db,
|
||||
HeadFetcher: chainService,
|
||||
ParticipationFetcher: chainService,
|
||||
StateNotifier: b,
|
||||
})
|
||||
return b.services.RegisterService(svc)
|
||||
}
|
||||
|
||||
@@ -41,11 +41,13 @@ go_library(
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/event:go_default_library",
|
||||
"//shared/featureconfig:go_default_library",
|
||||
"//shared/hashutil:go_default_library",
|
||||
"//shared/pagination:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/sliceutil:go_default_library",
|
||||
"@com_github_gogo_protobuf//types:go_default_library",
|
||||
"@com_github_patrickmn_go_cache//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
@@ -73,6 +75,7 @@ go_test(
|
||||
deps = [
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/block:go_default_library",
|
||||
"//beacon-chain/core/feed/operation:go_default_library",
|
||||
@@ -100,5 +103,6 @@ go_test(
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
|
||||
"@in_gopkg_d4l3k_messagediff_v1//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -4,11 +4,15 @@ import (
|
||||
"context"
|
||||
"strconv"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/flags"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/pagination"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
@@ -28,9 +32,14 @@ func (bs *Server) ListValidatorAssignments(
|
||||
}
|
||||
|
||||
var res []*ethpb.ValidatorAssignments_CommitteeAssignment
|
||||
headState, err := bs.HeadFetcher.HeadState(ctx)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, "Could not get head state")
|
||||
}
|
||||
filtered := map[uint64]bool{} // track filtered validators to prevent duplication in the response.
|
||||
filteredIndices := make([]uint64, 0)
|
||||
var requestedEpoch uint64
|
||||
requestedEpoch := helpers.CurrentEpoch(headState)
|
||||
|
||||
switch q := req.QueryFilter.(type) {
|
||||
case *ethpb.ListValidatorAssignmentsRequest_Genesis:
|
||||
if q.Genesis {
|
||||
@@ -40,24 +49,18 @@ func (bs *Server) ListValidatorAssignments(
|
||||
requestedEpoch = q.Epoch
|
||||
}
|
||||
|
||||
currentEpoch := helpers.SlotToEpoch(bs.GenesisTimeFetcher.CurrentSlot())
|
||||
if requestedEpoch > currentEpoch {
|
||||
if requestedEpoch > helpers.CurrentEpoch(headState) {
|
||||
return nil, status.Errorf(
|
||||
codes.InvalidArgument,
|
||||
"Cannot retrieve information about an epoch in the future, current epoch %d, requesting %d",
|
||||
currentEpoch,
|
||||
helpers.CurrentEpoch(headState),
|
||||
requestedEpoch,
|
||||
)
|
||||
}
|
||||
|
||||
requestedState, err := bs.StateGen.StateBySlot(ctx, helpers.StartSlot(requestedEpoch))
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not retrieve archived state for epoch %d: %v", requestedEpoch, err)
|
||||
}
|
||||
|
||||
// Filter out assignments by public keys.
|
||||
for _, pubKey := range req.PublicKeys {
|
||||
index, ok := requestedState.ValidatorIndexByPubkey(bytesutil.ToBytes48(pubKey))
|
||||
index, ok := headState.ValidatorIndexByPubkey(bytesutil.ToBytes48(pubKey))
|
||||
if !ok {
|
||||
return nil, status.Errorf(codes.NotFound, "Could not find validator index for public key %#x", pubKey)
|
||||
}
|
||||
@@ -72,7 +75,7 @@ func (bs *Server) ListValidatorAssignments(
|
||||
}
|
||||
}
|
||||
|
||||
activeIndices, err := helpers.ActiveValidatorIndices(requestedState, requestedEpoch)
|
||||
activeIndices, err := helpers.ActiveValidatorIndices(headState, requestedEpoch)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not retrieve active validator indices: %v", err)
|
||||
}
|
||||
@@ -93,21 +96,53 @@ func (bs *Server) ListValidatorAssignments(
|
||||
return nil, status.Errorf(codes.Internal, "Could not paginate results: %v", err)
|
||||
}
|
||||
|
||||
// Initialize all committee related data.
|
||||
shouldFetchFromArchive := requestedEpoch < bs.FinalizationFetcher.FinalizedCheckpt().Epoch
|
||||
|
||||
// initialize all committee related data.
|
||||
committeeAssignments := map[uint64]*helpers.CommitteeAssignmentContainer{}
|
||||
proposerIndexToSlots := make(map[uint64][]uint64)
|
||||
committeeAssignments, proposerIndexToSlots, err = helpers.CommitteeAssignments(requestedState, requestedEpoch)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not compute committee assignments: %v", err)
|
||||
archivedInfo := &pb.ArchivedCommitteeInfo{}
|
||||
archivedBalances := make([]uint64, 0)
|
||||
archivedAssignments := make(map[uint64]*ethpb.ValidatorAssignments_CommitteeAssignment)
|
||||
|
||||
if shouldFetchFromArchive {
|
||||
archivedInfo, archivedBalances, err = bs.archivedCommitteeData(ctx, requestedEpoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
archivedAssignments, err = archivedValidatorCommittee(
|
||||
requestedEpoch,
|
||||
archivedInfo,
|
||||
activeIndices,
|
||||
archivedBalances,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not retrieve archived assignment for epoch %d: %v", requestedEpoch, err)
|
||||
}
|
||||
} else {
|
||||
committeeAssignments, proposerIndexToSlots, err = helpers.CommitteeAssignments(headState, requestedEpoch)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not compute committee assignments: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, index := range filteredIndices[start:end] {
|
||||
if int(index) >= requestedState.NumValidators() {
|
||||
if int(index) >= headState.NumValidators() {
|
||||
return nil, status.Errorf(codes.OutOfRange, "Validator index %d >= validator count %d",
|
||||
index, requestedState.NumValidators())
|
||||
index, headState.NumValidators())
|
||||
}
|
||||
if shouldFetchFromArchive {
|
||||
assignment, ok := archivedAssignments[index]
|
||||
if !ok {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get archived committee assignment for index %d", index)
|
||||
}
|
||||
pubkey := headState.PubkeyAtIndex(index)
|
||||
assignment.PublicKey = pubkey[:]
|
||||
res = append(res, assignment)
|
||||
continue
|
||||
}
|
||||
comAssignment := committeeAssignments[index]
|
||||
pubkey := requestedState.PubkeyAtIndex(index)
|
||||
pubkey := headState.PubkeyAtIndex(index)
|
||||
assign := ðpb.ValidatorAssignments_CommitteeAssignment{
|
||||
BeaconCommittees: comAssignment.Committee,
|
||||
CommitteeIndex: comAssignment.CommitteeIndex,
|
||||
@@ -125,3 +160,93 @@ func (bs *Server) ListValidatorAssignments(
|
||||
TotalSize: int32(len(filteredIndices)),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Computes validator assignments for an epoch and validator index using archived committee
|
||||
// information, archived balances, and a set of active validators.
|
||||
func archivedValidatorCommittee(
|
||||
epoch uint64,
|
||||
archivedInfo *pb.ArchivedCommitteeInfo,
|
||||
activeIndices []uint64,
|
||||
archivedBalances []uint64,
|
||||
) (map[uint64]*ethpb.ValidatorAssignments_CommitteeAssignment, error) {
|
||||
proposerSeed := bytesutil.ToBytes32(archivedInfo.ProposerSeed)
|
||||
attesterSeed := bytesutil.ToBytes32(archivedInfo.AttesterSeed)
|
||||
|
||||
startSlot := helpers.StartSlot(epoch)
|
||||
proposerIndexToSlots := make(map[uint64][]uint64)
|
||||
activeVals := make([]*ethpb.Validator, len(archivedBalances))
|
||||
for i, bal := range archivedBalances {
|
||||
activeVals[i] = ðpb.Validator{EffectiveBalance: bal}
|
||||
}
|
||||
|
||||
for slot := startSlot; slot < startSlot+params.BeaconConfig().SlotsPerEpoch; slot++ {
|
||||
seedWithSlot := append(proposerSeed[:], bytesutil.Bytes8(slot)...)
|
||||
seedWithSlotHash := hashutil.Hash(seedWithSlot)
|
||||
i, err := helpers.ComputeProposerIndex(activeVals, activeIndices, seedWithSlotHash)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not check proposer at slot %d", slot)
|
||||
}
|
||||
proposerIndexToSlots[i] = append(proposerIndexToSlots[i], slot)
|
||||
}
|
||||
|
||||
assignmentMap := make(map[uint64]*ethpb.ValidatorAssignments_CommitteeAssignment)
|
||||
for slot := startSlot; slot < startSlot+params.BeaconConfig().SlotsPerEpoch; slot++ {
|
||||
var countAtSlot = uint64(len(activeIndices)) / params.BeaconConfig().SlotsPerEpoch / params.BeaconConfig().TargetCommitteeSize
|
||||
if countAtSlot > params.BeaconConfig().MaxCommitteesPerSlot {
|
||||
countAtSlot = params.BeaconConfig().MaxCommitteesPerSlot
|
||||
}
|
||||
if countAtSlot == 0 {
|
||||
countAtSlot = 1
|
||||
}
|
||||
for i := uint64(0); i < countAtSlot; i++ {
|
||||
committee, err := helpers.BeaconCommittee(activeIndices, attesterSeed, slot, i)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute committee")
|
||||
}
|
||||
for _, index := range committee {
|
||||
assignmentMap[index] = ðpb.ValidatorAssignments_CommitteeAssignment{
|
||||
BeaconCommittees: committee,
|
||||
CommitteeIndex: i,
|
||||
AttesterSlot: slot,
|
||||
ProposerSlots: proposerIndexToSlots[index],
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return assignmentMap, nil
|
||||
}
|
||||
|
||||
func (bs *Server) archivedCommitteeData(ctx context.Context, requestedEpoch uint64) (*pb.ArchivedCommitteeInfo,
|
||||
[]uint64, error) {
|
||||
archivedInfo, err := bs.BeaconDB.ArchivedCommitteeInfo(ctx, requestedEpoch)
|
||||
if err != nil {
|
||||
return nil, nil, status.Errorf(
|
||||
codes.Internal,
|
||||
"Could not retrieve archived committee info for epoch %d",
|
||||
requestedEpoch,
|
||||
)
|
||||
}
|
||||
if archivedInfo == nil {
|
||||
return nil, nil, status.Errorf(
|
||||
codes.NotFound,
|
||||
"Could not retrieve data for epoch %d, perhaps --archive in the running beacon node is disabled",
|
||||
requestedEpoch,
|
||||
)
|
||||
}
|
||||
archivedBalances, err := bs.BeaconDB.ArchivedBalances(ctx, requestedEpoch)
|
||||
if err != nil {
|
||||
return nil, nil, status.Errorf(
|
||||
codes.Internal,
|
||||
"Could not retrieve archived balances for epoch %d",
|
||||
requestedEpoch,
|
||||
)
|
||||
}
|
||||
if archivedBalances == nil {
|
||||
return nil, nil, status.Errorf(
|
||||
codes.NotFound,
|
||||
"Could not retrieve data for epoch %d, perhaps --archive in the running beacon node is disabled",
|
||||
requestedEpoch,
|
||||
)
|
||||
}
|
||||
return archivedInfo, archivedBalances, nil
|
||||
}
|
||||
|
||||
@@ -13,11 +13,11 @@ import (
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
dbTest "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
pbp2p "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
)
|
||||
@@ -27,9 +27,17 @@ func TestServer_ListAssignments_CannotRequestFutureEpoch(t *testing.T) {
|
||||
defer dbTest.TeardownDB(t, db)
|
||||
|
||||
ctx := context.Background()
|
||||
st, err := stateTrie.InitializeFromProto(&pbp2p.BeaconState{
|
||||
Slot: 0,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
bs := &Server{
|
||||
BeaconDB: db,
|
||||
GenesisTimeFetcher: &mock.ChainService{},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: &mock.ChainService{
|
||||
State: st,
|
||||
},
|
||||
}
|
||||
|
||||
wanted := "Cannot retrieve information about an epoch in the future"
|
||||
@@ -37,7 +45,7 @@ func TestServer_ListAssignments_CannotRequestFutureEpoch(t *testing.T) {
|
||||
ctx,
|
||||
ðpb.ListValidatorAssignmentsRequest{
|
||||
QueryFilter: ðpb.ListValidatorAssignmentsRequest_Epoch{
|
||||
Epoch: helpers.SlotToEpoch(bs.GenesisTimeFetcher.CurrentSlot()) + 1,
|
||||
Epoch: 1,
|
||||
},
|
||||
},
|
||||
); err != nil && !strings.Contains(err.Error(), wanted) {
|
||||
@@ -50,27 +58,19 @@ func TestServer_ListAssignments_NoResults(t *testing.T) {
|
||||
defer dbTest.TeardownDB(t, db)
|
||||
|
||||
ctx := context.Background()
|
||||
st := testutil.NewBeaconState()
|
||||
|
||||
b := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{}}
|
||||
if err := db.SaveBlock(ctx, b); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
gRoot, err := ssz.HashTreeRoot(b.Block)
|
||||
st, err := stateTrie.InitializeFromProto(&pbp2p.BeaconState{
|
||||
Slot: 0,
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
Validators: []*ethpb.Validator{},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveGenesisBlockRoot(ctx, gRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveState(ctx, st, gRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
bs := &Server{
|
||||
BeaconDB: db,
|
||||
GenesisTimeFetcher: &mock.ChainService{},
|
||||
StateGen: stategen.New(db, cache.NewStateSummaryCache()),
|
||||
BeaconDB: db,
|
||||
HeadFetcher: &mock.ChainService{
|
||||
State: st,
|
||||
},
|
||||
}
|
||||
wanted := ðpb.ValidatorAssignments{
|
||||
Assignments: make([]*ethpb.ValidatorAssignments_CommitteeAssignment, 0),
|
||||
@@ -103,26 +103,11 @@ func TestServer_ListAssignments_Pagination_InputOutOfRange(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
b := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{}}
|
||||
if err := db.SaveBlock(ctx, b); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
gRoot, err := ssz.HashTreeRoot(b.Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveGenesisBlockRoot(ctx, gRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveState(ctx, headState, gRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
bs := &Server{
|
||||
BeaconDB: db,
|
||||
GenesisTimeFetcher: &mock.ChainService{},
|
||||
StateGen: stategen.New(db, cache.NewStateSummaryCache()),
|
||||
BeaconDB: db,
|
||||
HeadFetcher: &mock.ChainService{
|
||||
State: headState,
|
||||
},
|
||||
}
|
||||
|
||||
wanted := fmt.Sprintf("page start %d >= list %d", 0, 0)
|
||||
@@ -194,7 +179,7 @@ func TestServer_ListAssignments_Pagination_DefaultPageSize_NoArchive(t *testing.
|
||||
if err := db.SaveState(ctx, s, blockRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveGenesisBlockRoot(ctx, blockRoot); err != nil {
|
||||
if err := db.SaveHeadBlockRoot(ctx, blockRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -208,8 +193,6 @@ func TestServer_ListAssignments_Pagination_DefaultPageSize_NoArchive(t *testing.
|
||||
Epoch: 0,
|
||||
},
|
||||
},
|
||||
GenesisTimeFetcher: &mock.ChainService{},
|
||||
StateGen: stategen.New(db, cache.NewStateSummaryCache()),
|
||||
}
|
||||
|
||||
res, err := bs.ListValidatorAssignments(context.Background(), ðpb.ListValidatorAssignmentsRequest{
|
||||
@@ -294,7 +277,7 @@ func TestServer_ListAssignments_Pagination_DefaultPageSize_FromArchive(t *testin
|
||||
if err := db.SaveState(ctx, s, blockRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveGenesisBlockRoot(ctx, blockRoot); err != nil {
|
||||
if err := db.SaveHeadBlockRoot(ctx, blockRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -302,13 +285,35 @@ func TestServer_ListAssignments_Pagination_DefaultPageSize_FromArchive(t *testin
|
||||
// we request assignments for epoch 0, it looks within the archived data.
|
||||
bs := &Server{
|
||||
BeaconDB: db,
|
||||
HeadFetcher: &mock.ChainService{
|
||||
State: s,
|
||||
},
|
||||
FinalizationFetcher: &mock.ChainService{
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{
|
||||
Epoch: 10,
|
||||
},
|
||||
},
|
||||
GenesisTimeFetcher: &mock.ChainService{},
|
||||
StateGen: stategen.New(db, cache.NewStateSummaryCache()),
|
||||
}
|
||||
|
||||
// We then store archived data into the DB.
|
||||
currentEpoch := helpers.CurrentEpoch(s)
|
||||
proposerSeed, err := helpers.Seed(s, currentEpoch, params.BeaconConfig().DomainBeaconProposer)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
attesterSeed, err := helpers.Seed(s, currentEpoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveArchivedCommitteeInfo(context.Background(), 0, &pbp2p.ArchivedCommitteeInfo{
|
||||
ProposerSeed: proposerSeed[:],
|
||||
AttesterSeed: attesterSeed[:],
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := db.SaveArchivedBalances(context.Background(), 0, balances); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Construct the wanted assignments.
|
||||
@@ -374,19 +379,20 @@ func TestServer_ListAssignments_FilterPubkeysIndices_NoPagination(t *testing.T)
|
||||
if err := db.SaveState(ctx, s, blockRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveGenesisBlockRoot(ctx, blockRoot); err != nil {
|
||||
if err := db.SaveHeadBlockRoot(ctx, blockRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
bs := &Server{
|
||||
BeaconDB: db,
|
||||
HeadFetcher: &mock.ChainService{
|
||||
State: s,
|
||||
},
|
||||
FinalizationFetcher: &mock.ChainService{
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
},
|
||||
},
|
||||
GenesisTimeFetcher: &mock.ChainService{},
|
||||
StateGen: stategen.New(db, cache.NewStateSummaryCache()),
|
||||
}
|
||||
|
||||
pubKey1 := make([]byte, params.BeaconConfig().BLSPubkeyLength)
|
||||
@@ -456,19 +462,20 @@ func TestServer_ListAssignments_CanFilterPubkeysIndices_WithPagination(t *testin
|
||||
if err := db.SaveState(ctx, s, blockRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveGenesisBlockRoot(ctx, blockRoot); err != nil {
|
||||
if err := db.SaveHeadBlockRoot(ctx, blockRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
bs := &Server{
|
||||
BeaconDB: db,
|
||||
HeadFetcher: &mock.ChainService{
|
||||
State: s,
|
||||
},
|
||||
FinalizationFetcher: &mock.ChainService{
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
},
|
||||
},
|
||||
GenesisTimeFetcher: &mock.ChainService{},
|
||||
StateGen: stategen.New(db, cache.NewStateSummaryCache()),
|
||||
}
|
||||
|
||||
req := ðpb.ListValidatorAssignmentsRequest{Indices: []uint64{1, 2, 3, 4, 5, 6}, PageSize: 2, PageToken: "1"}
|
||||
|
||||
@@ -889,7 +889,6 @@ func TestServer_StreamIndexedAttestations_ContextCanceled(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestServer_StreamIndexedAttestations_OK(t *testing.T) {
|
||||
params.UseMainnetConfig()
|
||||
db := dbTest.SetupDB(t)
|
||||
defer dbTest.TeardownDB(t, db)
|
||||
exitRoutine := make(chan bool)
|
||||
@@ -899,18 +898,11 @@ func TestServer_StreamIndexedAttestations_OK(t *testing.T) {
|
||||
|
||||
numValidators := 64
|
||||
headState, privKeys := testutil.DeterministicGenesisState(t, uint64(numValidators))
|
||||
b := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{}}
|
||||
if err := db.SaveBlock(ctx, b); err != nil {
|
||||
t.Fatal(err)
|
||||
randaoMixes := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector)
|
||||
for i := 0; i < len(randaoMixes); i++ {
|
||||
randaoMixes[i] = make([]byte, 32)
|
||||
}
|
||||
gRoot, err := ssz.HashTreeRoot(b.Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveGenesisBlockRoot(ctx, gRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveState(ctx, headState, gRoot); err != nil {
|
||||
if err := headState.SetRandaoMixes(randaoMixes); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -1003,7 +995,6 @@ func TestServer_StreamIndexedAttestations_OK(t *testing.T) {
|
||||
},
|
||||
AttestationNotifier: chainService.OperationNotifier(),
|
||||
CollectedAttestationsBuffer: make(chan []*ethpb.Attestation, 1),
|
||||
StateGen: stategen.New(db, cache.NewStateSummaryCache()),
|
||||
}
|
||||
|
||||
mockStream := mockRPC.NewMockBeaconChain_StreamIndexedAttestationsServer(ctrl)
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
@@ -19,39 +20,31 @@ func (bs *Server) ListBeaconCommittees(
|
||||
req *ethpb.ListCommitteesRequest,
|
||||
) (*ethpb.BeaconCommittees, error) {
|
||||
|
||||
currentSlot := bs.GenesisTimeFetcher.CurrentSlot()
|
||||
var requestedSlot uint64
|
||||
var requestingGenesis bool
|
||||
var startSlot uint64
|
||||
headSlot := bs.GenesisTimeFetcher.CurrentSlot()
|
||||
switch q := req.QueryFilter.(type) {
|
||||
case *ethpb.ListCommitteesRequest_Epoch:
|
||||
requestedSlot = helpers.StartSlot(q.Epoch)
|
||||
startSlot = helpers.StartSlot(q.Epoch)
|
||||
case *ethpb.ListCommitteesRequest_Genesis:
|
||||
requestedSlot = 0
|
||||
requestingGenesis = q.Genesis
|
||||
if !requestingGenesis {
|
||||
startSlot = headSlot
|
||||
}
|
||||
default:
|
||||
requestedSlot = currentSlot
|
||||
startSlot = headSlot
|
||||
}
|
||||
|
||||
requestedEpoch := helpers.SlotToEpoch(requestedSlot)
|
||||
currentEpoch := helpers.SlotToEpoch(currentSlot)
|
||||
if requestedEpoch > currentEpoch {
|
||||
return nil, status.Errorf(
|
||||
codes.InvalidArgument,
|
||||
"Cannot retrieve information for an future epoch, current epoch %d, requesting %d",
|
||||
currentEpoch,
|
||||
requestedEpoch,
|
||||
)
|
||||
}
|
||||
|
||||
committees, activeIndices, err := bs.retrieveCommitteesForEpoch(ctx, requestedEpoch)
|
||||
committees, activeIndices, err := bs.retrieveCommitteesForEpoch(ctx, helpers.SlotToEpoch(startSlot))
|
||||
if err != nil {
|
||||
return nil, status.Errorf(
|
||||
codes.Internal,
|
||||
"Could not retrieve committees for epoch %d: %v",
|
||||
requestedEpoch,
|
||||
helpers.SlotToEpoch(startSlot),
|
||||
err,
|
||||
)
|
||||
}
|
||||
return ðpb.BeaconCommittees{
|
||||
Epoch: requestedEpoch,
|
||||
Epoch: helpers.SlotToEpoch(startSlot),
|
||||
Committees: committees,
|
||||
ActiveValidatorCount: uint64(len(activeIndices)),
|
||||
}, nil
|
||||
@@ -61,21 +54,70 @@ func (bs *Server) retrieveCommitteesForEpoch(
|
||||
ctx context.Context,
|
||||
epoch uint64,
|
||||
) (map[uint64]*ethpb.BeaconCommittees_CommitteesList, []uint64, error) {
|
||||
var attesterSeed [32]byte
|
||||
var activeIndices []uint64
|
||||
var err error
|
||||
startSlot := helpers.StartSlot(epoch)
|
||||
requestedState, err := bs.StateGen.StateBySlot(ctx, startSlot)
|
||||
if err != nil {
|
||||
return nil, nil, status.Error(codes.Internal, "Could not get state")
|
||||
}
|
||||
seed, err := helpers.Seed(requestedState, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
if err != nil {
|
||||
return nil, nil, status.Error(codes.Internal, "Could not get seed")
|
||||
}
|
||||
activeIndices, err := helpers.ActiveValidatorIndices(requestedState, epoch)
|
||||
if err != nil {
|
||||
return nil, nil, status.Error(codes.Internal, "Could not get active indices")
|
||||
currentEpoch := helpers.SlotToEpoch(bs.GenesisTimeFetcher.CurrentSlot())
|
||||
if helpers.SlotToEpoch(startSlot)+1 < currentEpoch {
|
||||
activeIndices, err = bs.HeadFetcher.HeadValidatorsIndices(helpers.SlotToEpoch(startSlot))
|
||||
if err != nil {
|
||||
return nil, nil, status.Errorf(
|
||||
codes.Internal,
|
||||
"Could not retrieve active indices for epoch %d: %v",
|
||||
helpers.SlotToEpoch(startSlot),
|
||||
err,
|
||||
)
|
||||
}
|
||||
archivedCommitteeInfo, err := bs.BeaconDB.ArchivedCommitteeInfo(ctx, helpers.SlotToEpoch(startSlot))
|
||||
if err != nil {
|
||||
return nil, nil, status.Errorf(
|
||||
codes.Internal,
|
||||
"Could not request archival data for epoch %d: %v",
|
||||
helpers.SlotToEpoch(startSlot),
|
||||
err,
|
||||
)
|
||||
}
|
||||
if archivedCommitteeInfo == nil {
|
||||
return nil, nil, status.Errorf(
|
||||
codes.NotFound,
|
||||
"Could not retrieve data for epoch %d, perhaps --archive in the running beacon node is disabled",
|
||||
helpers.SlotToEpoch(startSlot),
|
||||
)
|
||||
}
|
||||
attesterSeed = bytesutil.ToBytes32(archivedCommitteeInfo.AttesterSeed)
|
||||
} else if helpers.SlotToEpoch(startSlot)+1 == currentEpoch || helpers.SlotToEpoch(startSlot) == currentEpoch {
|
||||
// Otherwise, we use current beacon state to calculate the committees.
|
||||
requestedEpoch := helpers.SlotToEpoch(startSlot)
|
||||
activeIndices, err = bs.HeadFetcher.HeadValidatorsIndices(requestedEpoch)
|
||||
if err != nil {
|
||||
return nil, nil, status.Errorf(
|
||||
codes.Internal,
|
||||
"Could not retrieve active indices for requested epoch %d: %v",
|
||||
requestedEpoch,
|
||||
err,
|
||||
)
|
||||
}
|
||||
attesterSeed, err = bs.HeadFetcher.HeadSeed(requestedEpoch)
|
||||
if err != nil {
|
||||
return nil, nil, status.Errorf(
|
||||
codes.Internal,
|
||||
"Could not retrieve attester seed for requested epoch %d: %v",
|
||||
requestedEpoch,
|
||||
err,
|
||||
)
|
||||
}
|
||||
} else {
|
||||
// Otherwise, we are requesting data from the future and we return an error.
|
||||
return nil, nil, status.Errorf(
|
||||
codes.InvalidArgument,
|
||||
"Cannot retrieve information about an epoch in the future, current epoch %d, requesting %d",
|
||||
currentEpoch,
|
||||
helpers.SlotToEpoch(startSlot),
|
||||
)
|
||||
}
|
||||
|
||||
committeesListsBySlot, err := computeCommittees(startSlot, activeIndices, seed)
|
||||
committeesListsBySlot, err := computeCommittees(startSlot, activeIndices, attesterSeed)
|
||||
if err != nil {
|
||||
return nil, nil, status.Errorf(
|
||||
codes.InvalidArgument,
|
||||
|
||||
@@ -3,22 +3,22 @@ package beacon
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
dbTest "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
pbp2p "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/roughtime"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"gopkg.in/d4l3k/messagediff.v1"
|
||||
)
|
||||
|
||||
func TestServer_ListBeaconCommittees_CurrentEpoch(t *testing.T) {
|
||||
@@ -27,30 +27,23 @@ func TestServer_ListBeaconCommittees_CurrentEpoch(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
numValidators := 128
|
||||
ctx := context.Background()
|
||||
headState := setupActiveValidators(t, db, numValidators)
|
||||
|
||||
randaoMixes := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector)
|
||||
for i := 0; i < len(randaoMixes); i++ {
|
||||
randaoMixes[i] = make([]byte, 32)
|
||||
}
|
||||
if err := headState.SetRandaoMixes(randaoMixes); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
m := &mock.ChainService{
|
||||
State: headState,
|
||||
Genesis: roughtime.Now().Add(time.Duration(-1*int64((headState.Slot()*params.BeaconConfig().SecondsPerSlot))) * time.Second),
|
||||
}
|
||||
bs := &Server{
|
||||
HeadFetcher: m,
|
||||
GenesisTimeFetcher: m,
|
||||
StateGen: stategen.New(db, cache.NewStateSummaryCache()),
|
||||
}
|
||||
b := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{}}
|
||||
if err := db.SaveBlock(ctx, b); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
gRoot, err := ssz.HashTreeRoot(b.Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveGenesisBlockRoot(ctx, gRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveState(ctx, headState, gRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
activeIndices, err := helpers.ActiveValidatorIndices(headState, 0)
|
||||
@@ -82,6 +75,172 @@ func TestServer_ListBeaconCommittees_CurrentEpoch(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestServer_ListBeaconCommittees_PreviousEpoch(t *testing.T) {
|
||||
db := dbTest.SetupDB(t)
|
||||
defer dbTest.TeardownDB(t, db)
|
||||
helpers.ClearCache()
|
||||
|
||||
numValidators := 128
|
||||
headState := setupActiveValidators(t, db, numValidators)
|
||||
|
||||
mixes := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector)
|
||||
for i := 0; i < len(mixes); i++ {
|
||||
mixes[i] = make([]byte, 32)
|
||||
}
|
||||
if err := headState.SetRandaoMixes(mixes); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := headState.SetSlot(params.BeaconConfig().SlotsPerEpoch * 2); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
m := &mock.ChainService{
|
||||
State: headState,
|
||||
Genesis: roughtime.Now().Add(time.Duration(-1*int64((headState.Slot()*params.BeaconConfig().SecondsPerSlot))) * time.Second),
|
||||
}
|
||||
bs := &Server{
|
||||
HeadFetcher: m,
|
||||
GenesisTimeFetcher: m,
|
||||
}
|
||||
|
||||
activeIndices, err := helpers.ActiveValidatorIndices(headState, 1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
attesterSeed, err := helpers.Seed(headState, 1, params.BeaconConfig().DomainBeaconAttester)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
startSlot := helpers.StartSlot(1)
|
||||
wanted, err := computeCommittees(startSlot, activeIndices, attesterSeed)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
req *ethpb.ListCommitteesRequest
|
||||
res *ethpb.BeaconCommittees
|
||||
}{
|
||||
{
|
||||
req: ðpb.ListCommitteesRequest{
|
||||
QueryFilter: ðpb.ListCommitteesRequest_Epoch{Epoch: 1},
|
||||
},
|
||||
res: ðpb.BeaconCommittees{
|
||||
Epoch: 1,
|
||||
Committees: wanted,
|
||||
ActiveValidatorCount: uint64(numValidators),
|
||||
},
|
||||
},
|
||||
}
|
||||
helpers.ClearCache()
|
||||
for i, test := range tests {
|
||||
res, err := bs.ListBeaconCommittees(context.Background(), test.req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !proto.Equal(res, test.res) {
|
||||
diff, _ := messagediff.PrettyDiff(res, test.res)
|
||||
t.Errorf("%d/ Diff between responses %s", i, diff)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestServer_ListBeaconCommittees_FromArchive(t *testing.T) {
|
||||
db := dbTest.SetupDB(t)
|
||||
defer dbTest.TeardownDB(t, db)
|
||||
helpers.ClearCache()
|
||||
ctx := context.Background()
|
||||
|
||||
numValidators := 128
|
||||
balances := make([]uint64, numValidators)
|
||||
validators := make([]*ethpb.Validator, 0, numValidators)
|
||||
for i := 0; i < numValidators; i++ {
|
||||
pubKey := make([]byte, params.BeaconConfig().BLSPubkeyLength)
|
||||
binary.LittleEndian.PutUint64(pubKey, uint64(i))
|
||||
balances[i] = uint64(i)
|
||||
validators = append(validators, ðpb.Validator{
|
||||
PublicKey: pubKey,
|
||||
ActivationEpoch: 0,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
})
|
||||
}
|
||||
headState, err := stateTrie.InitializeFromProto(&pbp2p.BeaconState{Validators: validators, Balances: balances})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
randaoMixes := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector)
|
||||
for i := 0; i < len(randaoMixes); i++ {
|
||||
randaoMixes[i] = make([]byte, 32)
|
||||
}
|
||||
if err := headState.SetRandaoMixes(randaoMixes); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := headState.SetSlot(params.BeaconConfig().SlotsPerEpoch * 10); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Store the genesis seed.
|
||||
seed, err := helpers.Seed(headState, 0, params.BeaconConfig().DomainBeaconAttester)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveArchivedCommitteeInfo(ctx, 0, &pbp2p.ArchivedCommitteeInfo{
|
||||
AttesterSeed: seed[:],
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
m := &mock.ChainService{
|
||||
State: headState,
|
||||
}
|
||||
bs := &Server{
|
||||
BeaconDB: db,
|
||||
HeadFetcher: m,
|
||||
GenesisTimeFetcher: m,
|
||||
}
|
||||
|
||||
activeIndices, err := helpers.ActiveValidatorIndices(headState, 0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
wanted, err := computeCommittees(0, activeIndices, seed)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
res1, err := bs.ListBeaconCommittees(context.Background(), ðpb.ListCommitteesRequest{
|
||||
QueryFilter: ðpb.ListCommitteesRequest_Genesis{
|
||||
Genesis: true,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
res2, err := bs.ListBeaconCommittees(context.Background(), ðpb.ListCommitteesRequest{
|
||||
QueryFilter: ðpb.ListCommitteesRequest_Epoch{
|
||||
Epoch: 0,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(res1, res2) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
wantedRes := ðpb.BeaconCommittees{
|
||||
Epoch: 0,
|
||||
Committees: wanted,
|
||||
ActiveValidatorCount: uint64(numValidators),
|
||||
}
|
||||
if !reflect.DeepEqual(wantedRes, res1) {
|
||||
t.Errorf("Wanted %v", wantedRes)
|
||||
t.Errorf("Received %v", res1)
|
||||
}
|
||||
}
|
||||
|
||||
func setupActiveValidators(t *testing.T, db db.Database, count int) *stateTrie.BeaconState {
|
||||
balances := make([]uint64, count)
|
||||
validators := make([]*ethpb.Validator, 0, count)
|
||||
|
||||
@@ -30,35 +30,51 @@ func (bs *Server) ListValidatorBalances(
|
||||
req.PageSize, flags.Get().MaxPageSize)
|
||||
}
|
||||
|
||||
currentEpoch := helpers.SlotToEpoch(bs.GenesisTimeFetcher.CurrentSlot())
|
||||
requestedEpoch := currentEpoch
|
||||
switch q := req.QueryFilter.(type) {
|
||||
case *ethpb.ListValidatorBalancesRequest_Epoch:
|
||||
requestedEpoch = q.Epoch
|
||||
case *ethpb.ListValidatorBalancesRequest_Genesis:
|
||||
requestedEpoch = 0
|
||||
default:
|
||||
requestedEpoch = currentEpoch
|
||||
}
|
||||
|
||||
if requestedEpoch > currentEpoch {
|
||||
return nil, status.Errorf(
|
||||
codes.InvalidArgument,
|
||||
"Cannot retrieve information about an epoch in the future, current epoch %d, requesting %d",
|
||||
currentEpoch,
|
||||
requestedEpoch,
|
||||
)
|
||||
}
|
||||
res := make([]*ethpb.ValidatorBalances_Balance, 0)
|
||||
filtered := map[uint64]bool{} // Track filtered validators to prevent duplication in the response.
|
||||
|
||||
requestedState, err := bs.StateGen.StateBySlot(ctx, helpers.StartSlot(requestedEpoch))
|
||||
headState, err := bs.HeadFetcher.HeadState(ctx)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get state")
|
||||
return nil, status.Error(codes.Internal, "Could not get head state")
|
||||
}
|
||||
|
||||
var requestingGenesis bool
|
||||
var epoch uint64
|
||||
switch q := req.QueryFilter.(type) {
|
||||
case *ethpb.ListValidatorBalancesRequest_Epoch:
|
||||
epoch = q.Epoch
|
||||
case *ethpb.ListValidatorBalancesRequest_Genesis:
|
||||
requestingGenesis = q.Genesis
|
||||
default:
|
||||
epoch = helpers.CurrentEpoch(headState)
|
||||
}
|
||||
|
||||
var balances []uint64
|
||||
validators := headState.Validators()
|
||||
if requestingGenesis || epoch < helpers.CurrentEpoch(headState) {
|
||||
balances, err = bs.BeaconDB.ArchivedBalances(ctx, epoch)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not retrieve balances for epoch %d", epoch)
|
||||
}
|
||||
if balances == nil {
|
||||
return nil, status.Errorf(
|
||||
codes.NotFound,
|
||||
"Could not retrieve data for epoch %d, perhaps --archive in the running beacon node is disabled",
|
||||
0,
|
||||
)
|
||||
}
|
||||
} else if epoch == helpers.CurrentEpoch(headState) {
|
||||
balances = headState.Balances()
|
||||
} else {
|
||||
// Otherwise, we are requesting data from the future and we return an error.
|
||||
return nil, status.Errorf(
|
||||
codes.InvalidArgument,
|
||||
"Cannot retrieve information about an epoch in the future, current epoch %d, requesting %d",
|
||||
helpers.CurrentEpoch(headState),
|
||||
epoch,
|
||||
)
|
||||
}
|
||||
|
||||
validators := requestedState.Validators()
|
||||
balances := requestedState.Balances()
|
||||
balancesCount := len(balances)
|
||||
for _, pubKey := range req.PublicKeys {
|
||||
// Skip empty public key.
|
||||
@@ -66,7 +82,7 @@ func (bs *Server) ListValidatorBalances(
|
||||
continue
|
||||
}
|
||||
pubkeyBytes := bytesutil.ToBytes48(pubKey)
|
||||
index, ok := requestedState.ValidatorIndexByPubkey(pubkeyBytes)
|
||||
index, ok := headState.ValidatorIndexByPubkey(pubkeyBytes)
|
||||
if !ok {
|
||||
return nil, status.Errorf(codes.NotFound, "Could not find validator index for public key %#x", pubkeyBytes)
|
||||
}
|
||||
@@ -88,6 +104,10 @@ func (bs *Server) ListValidatorBalances(
|
||||
|
||||
for _, index := range req.Indices {
|
||||
if int(index) >= len(balances) {
|
||||
if epoch <= helpers.CurrentEpoch(headState) {
|
||||
return nil, status.Errorf(codes.OutOfRange, "Validator index %d does not exist in historical balances",
|
||||
index)
|
||||
}
|
||||
return nil, status.Errorf(codes.OutOfRange, "Validator index %d >= balance list %d",
|
||||
index, len(balances))
|
||||
}
|
||||
@@ -110,7 +130,7 @@ func (bs *Server) ListValidatorBalances(
|
||||
// Otherwise, attempting to paginate 0 balances below would result in an error.
|
||||
if balancesCount == 0 {
|
||||
return ðpb.ValidatorBalances{
|
||||
Epoch: requestedEpoch,
|
||||
Epoch: epoch,
|
||||
Balances: make([]*ethpb.ValidatorBalances_Balance, 0),
|
||||
TotalSize: int32(0),
|
||||
NextPageToken: strconv.Itoa(0),
|
||||
@@ -129,7 +149,7 @@ func (bs *Server) ListValidatorBalances(
|
||||
if len(req.Indices) == 0 && len(req.PublicKeys) == 0 {
|
||||
// Return everything.
|
||||
for i := start; i < end; i++ {
|
||||
pubkey := requestedState.PubkeyAtIndex(uint64(i))
|
||||
pubkey := headState.PubkeyAtIndex(uint64(i))
|
||||
res = append(res, ðpb.ValidatorBalances_Balance{
|
||||
PublicKey: pubkey[:],
|
||||
Index: uint64(i),
|
||||
@@ -137,7 +157,7 @@ func (bs *Server) ListValidatorBalances(
|
||||
})
|
||||
}
|
||||
return ðpb.ValidatorBalances{
|
||||
Epoch: requestedEpoch,
|
||||
Epoch: epoch,
|
||||
Balances: res,
|
||||
TotalSize: int32(balancesCount),
|
||||
NextPageToken: nextPageToken,
|
||||
@@ -145,7 +165,7 @@ func (bs *Server) ListValidatorBalances(
|
||||
}
|
||||
|
||||
return ðpb.ValidatorBalances{
|
||||
Epoch: requestedEpoch,
|
||||
Epoch: epoch,
|
||||
Balances: res[start:end],
|
||||
TotalSize: int32(balancesCount),
|
||||
NextPageToken: nextPageToken,
|
||||
@@ -340,18 +360,60 @@ func (bs *Server) GetValidator(
|
||||
func (bs *Server) GetValidatorActiveSetChanges(
|
||||
ctx context.Context, req *ethpb.GetValidatorActiveSetChangesRequest,
|
||||
) (*ethpb.ActiveSetChanges, error) {
|
||||
currentEpoch := helpers.SlotToEpoch(bs.GenesisTimeFetcher.CurrentSlot())
|
||||
headState, err := bs.HeadFetcher.HeadState(ctx)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, "Could not get head state")
|
||||
}
|
||||
currentEpoch := helpers.CurrentEpoch(headState)
|
||||
requestedEpoch := currentEpoch
|
||||
requestingGenesis := false
|
||||
|
||||
var requestedEpoch uint64
|
||||
switch q := req.QueryFilter.(type) {
|
||||
case *ethpb.GetValidatorActiveSetChangesRequest_Genesis:
|
||||
requestingGenesis = q.Genesis
|
||||
requestedEpoch = 0
|
||||
case *ethpb.GetValidatorActiveSetChangesRequest_Epoch:
|
||||
requestedEpoch = q.Epoch
|
||||
default:
|
||||
requestedEpoch = currentEpoch
|
||||
}
|
||||
if requestedEpoch > currentEpoch {
|
||||
|
||||
activatedIndices := make([]uint64, 0)
|
||||
exitedIndices := make([]uint64, 0)
|
||||
slashedIndices := make([]uint64, 0)
|
||||
ejectedIndices := make([]uint64, 0)
|
||||
if requestingGenesis || requestedEpoch < currentEpoch {
|
||||
archivedChanges, err := bs.BeaconDB.ArchivedActiveValidatorChanges(ctx, requestedEpoch)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not fetch archived active validator changes: %v", err)
|
||||
}
|
||||
if archivedChanges == nil {
|
||||
return nil, status.Errorf(
|
||||
codes.NotFound,
|
||||
"Did not find any data for epoch %d - perhaps no active set changed occurred during the epoch",
|
||||
requestedEpoch,
|
||||
)
|
||||
}
|
||||
activatedIndices = archivedChanges.Activated
|
||||
exitedIndices = archivedChanges.Exited
|
||||
slashedIndices = archivedChanges.Slashed
|
||||
ejectedIndices = archivedChanges.Ejected
|
||||
} else if requestedEpoch == currentEpoch {
|
||||
activeValidatorCount, err := helpers.ActiveValidatorCount(headState, helpers.PrevEpoch(headState))
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get active validator count: %v", err)
|
||||
}
|
||||
vals := headState.Validators()
|
||||
activatedIndices = validators.ActivatedValidatorIndices(helpers.PrevEpoch(headState), vals)
|
||||
exitedIndices, err = validators.ExitedValidatorIndices(helpers.PrevEpoch(headState), vals, activeValidatorCount)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not determine exited validator indices: %v", err)
|
||||
}
|
||||
slashedIndices = validators.SlashedValidatorIndices(helpers.PrevEpoch(headState), vals)
|
||||
ejectedIndices, err = validators.EjectedValidatorIndices(helpers.PrevEpoch(headState), vals, activeValidatorCount)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not determine ejected validator indices: %v", err)
|
||||
}
|
||||
} else {
|
||||
// We are requesting data from the future and we return an error.
|
||||
return nil, status.Errorf(
|
||||
codes.InvalidArgument,
|
||||
"Cannot retrieve information about an epoch in the future, current epoch %d, requesting %d",
|
||||
@@ -360,51 +422,25 @@ func (bs *Server) GetValidatorActiveSetChanges(
|
||||
)
|
||||
}
|
||||
|
||||
activatedIndices := make([]uint64, 0)
|
||||
exitedIndices := make([]uint64, 0)
|
||||
slashedIndices := make([]uint64, 0)
|
||||
ejectedIndices := make([]uint64, 0)
|
||||
|
||||
requestedState, err := bs.StateGen.StateBySlot(ctx, helpers.StartSlot(requestedEpoch))
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get state: %v", err)
|
||||
}
|
||||
|
||||
activeValidatorCount, err := helpers.ActiveValidatorCount(requestedState, helpers.CurrentEpoch(requestedState))
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get active validator count: %v", err)
|
||||
}
|
||||
vs := requestedState.Validators()
|
||||
activatedIndices = validators.ActivatedValidatorIndices(helpers.CurrentEpoch(requestedState), vs)
|
||||
exitedIndices, err = validators.ExitedValidatorIndices(helpers.CurrentEpoch(requestedState), vs, activeValidatorCount)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not determine exited validator indices: %v", err)
|
||||
}
|
||||
slashedIndices = validators.SlashedValidatorIndices(helpers.CurrentEpoch(requestedState), vs)
|
||||
ejectedIndices, err = validators.EjectedValidatorIndices(helpers.CurrentEpoch(requestedState), vs, activeValidatorCount)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not determine ejected validator indices: %v", err)
|
||||
}
|
||||
|
||||
// Retrieve public keys for the indices.
|
||||
// We retrieve the public keys for the indices.
|
||||
activatedKeys := make([][]byte, len(activatedIndices))
|
||||
exitedKeys := make([][]byte, len(exitedIndices))
|
||||
slashedKeys := make([][]byte, len(slashedIndices))
|
||||
ejectedKeys := make([][]byte, len(ejectedIndices))
|
||||
for i, idx := range activatedIndices {
|
||||
pubkey := requestedState.PubkeyAtIndex(idx)
|
||||
pubkey := headState.PubkeyAtIndex(idx)
|
||||
activatedKeys[i] = pubkey[:]
|
||||
}
|
||||
for i, idx := range exitedIndices {
|
||||
pubkey := requestedState.PubkeyAtIndex(idx)
|
||||
pubkey := headState.PubkeyAtIndex(idx)
|
||||
exitedKeys[i] = pubkey[:]
|
||||
}
|
||||
for i, idx := range slashedIndices {
|
||||
pubkey := requestedState.PubkeyAtIndex(idx)
|
||||
pubkey := headState.PubkeyAtIndex(idx)
|
||||
slashedKeys[i] = pubkey[:]
|
||||
}
|
||||
for i, idx := range ejectedIndices {
|
||||
pubkey := requestedState.PubkeyAtIndex(idx)
|
||||
pubkey := headState.PubkeyAtIndex(idx)
|
||||
ejectedKeys[i] = pubkey[:]
|
||||
}
|
||||
return ðpb.ActiveSetChanges{
|
||||
@@ -426,57 +462,81 @@ func (bs *Server) GetValidatorActiveSetChanges(
|
||||
func (bs *Server) GetValidatorParticipation(
|
||||
ctx context.Context, req *ethpb.GetValidatorParticipationRequest,
|
||||
) (*ethpb.ValidatorParticipationResponse, error) {
|
||||
currentEpoch := helpers.SlotToEpoch(bs.GenesisTimeFetcher.CurrentSlot())
|
||||
|
||||
var requestedEpoch uint64
|
||||
switch q := req.QueryFilter.(type) {
|
||||
case *ethpb.GetValidatorParticipationRequest_Genesis:
|
||||
requestedEpoch = 0
|
||||
case *ethpb.GetValidatorParticipationRequest_Epoch:
|
||||
requestedEpoch = q.Epoch
|
||||
default:
|
||||
// Prevent underflow and ensure participation is always queried for previous epoch.
|
||||
if currentEpoch > 1 {
|
||||
requestedEpoch = currentEpoch - 1
|
||||
}
|
||||
}
|
||||
|
||||
if requestedEpoch >= currentEpoch {
|
||||
return nil, status.Errorf(
|
||||
codes.InvalidArgument,
|
||||
"Cannot retrieve information about an epoch until older than current epoch, current epoch %d, requesting %d",
|
||||
currentEpoch,
|
||||
requestedEpoch,
|
||||
)
|
||||
}
|
||||
|
||||
requestedState, err := bs.StateGen.StateBySlot(ctx, helpers.StartSlot(requestedEpoch+1))
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, "Could not get state")
|
||||
}
|
||||
|
||||
v, b, err := precompute.New(ctx, requestedState)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, "Could not set up pre compute instance")
|
||||
}
|
||||
_, b, err = precompute.ProcessAttestations(ctx, requestedState, v, b)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, "Could not pre compute attestations")
|
||||
}
|
||||
|
||||
headState, err := bs.HeadFetcher.HeadState(ctx)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, "Could not get head state")
|
||||
}
|
||||
|
||||
currentEpoch := helpers.CurrentEpoch(headState)
|
||||
prevEpoch := helpers.PrevEpoch(headState)
|
||||
|
||||
var requestedEpoch uint64
|
||||
var requestingGenesis bool
|
||||
switch q := req.QueryFilter.(type) {
|
||||
case *ethpb.GetValidatorParticipationRequest_Genesis:
|
||||
requestingGenesis = q.Genesis
|
||||
requestedEpoch = 0
|
||||
case *ethpb.GetValidatorParticipationRequest_Epoch:
|
||||
requestedEpoch = q.Epoch
|
||||
default:
|
||||
requestedEpoch = prevEpoch
|
||||
}
|
||||
|
||||
// If the request is from genesis or another past epoch, we look into our archived
|
||||
// data to find it and return it if it exists.
|
||||
if requestingGenesis || requestedEpoch < prevEpoch {
|
||||
participation, err := bs.BeaconDB.ArchivedValidatorParticipation(ctx, requestedEpoch)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not fetch archived participation: %v", err)
|
||||
}
|
||||
if participation == nil {
|
||||
return nil, status.Errorf(
|
||||
codes.NotFound,
|
||||
"Could not retrieve data for epoch %d, perhaps --archive in the running beacon node is disabled",
|
||||
0,
|
||||
)
|
||||
}
|
||||
return ðpb.ValidatorParticipationResponse{
|
||||
Epoch: requestedEpoch,
|
||||
Finalized: requestedEpoch <= headState.FinalizedCheckpointEpoch(),
|
||||
Participation: participation,
|
||||
}, nil
|
||||
} else if requestedEpoch == currentEpoch {
|
||||
// We cannot retrieve participation for an epoch currently in progress.
|
||||
return nil, status.Errorf(
|
||||
codes.InvalidArgument,
|
||||
"Cannot retrieve information about an epoch currently in progress, current epoch %d, requesting %d",
|
||||
currentEpoch,
|
||||
requestedEpoch,
|
||||
)
|
||||
} else if requestedEpoch > currentEpoch {
|
||||
// We are requesting data from the future and we return an error.
|
||||
return nil, status.Errorf(
|
||||
codes.InvalidArgument,
|
||||
"Cannot retrieve information about an epoch in the future, current epoch %d, requesting %d",
|
||||
currentEpoch,
|
||||
requestedEpoch,
|
||||
)
|
||||
}
|
||||
|
||||
p := bs.ParticipationFetcher.Participation(requestedEpoch)
|
||||
if p == nil {
|
||||
p = &precompute.Balance{}
|
||||
}
|
||||
participation := ðpb.ValidatorParticipation{
|
||||
EligibleEther: p.PrevEpoch,
|
||||
VotedEther: p.PrevEpochTargetAttesters,
|
||||
}
|
||||
participation.GlobalParticipationRate = float32(0)
|
||||
// only divide if prevEpoch is non zero
|
||||
if p.PrevEpoch != 0 {
|
||||
participation.GlobalParticipationRate = float32(p.PrevEpochTargetAttesters) / float32(p.PrevEpoch)
|
||||
}
|
||||
|
||||
return ðpb.ValidatorParticipationResponse{
|
||||
Epoch: requestedEpoch,
|
||||
Finalized: requestedEpoch <= headState.FinalizedCheckpointEpoch(),
|
||||
Participation: ðpb.ValidatorParticipation{
|
||||
GlobalParticipationRate: float32(b.PrevEpochTargetAttesters) / float32(b.PrevEpoch),
|
||||
VotedEther: b.PrevEpochTargetAttesters,
|
||||
EligibleEther: b.PrevEpoch,
|
||||
},
|
||||
Epoch: requestedEpoch,
|
||||
Finalized: requestedEpoch <= headState.FinalizedCheckpointEpoch(),
|
||||
Participation: participation,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -259,9 +259,9 @@ func (s *Service) Start() {
|
||||
BlockNotifier: s.blockNotifier,
|
||||
AttestationNotifier: s.operationNotifier,
|
||||
Broadcaster: s.p2p,
|
||||
StateGen: s.stateGen,
|
||||
ReceivedAttestationsBuffer: make(chan *ethpb.Attestation, 100),
|
||||
CollectedAttestationsBuffer: make(chan []*ethpb.Attestation, 100),
|
||||
StateGen: s.stateGen,
|
||||
}
|
||||
ethpb.RegisterNodeServer(s.grpcServer, nodeServer)
|
||||
ethpb.RegisterBeaconChainServer(s.grpcServer, beaconChainServer)
|
||||
|
||||
@@ -130,6 +130,15 @@ var appHelpFlagGroups = []flagGroup{
|
||||
flags.InteropNumValidatorsFlag,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "archive",
|
||||
Flags: []cli.Flag{
|
||||
flags.ArchiveEnableFlag,
|
||||
flags.ArchiveValidatorSetChangesFlag,
|
||||
flags.ArchiveBlocksFlag,
|
||||
flags.ArchiveAttestationsFlag,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
@@ -46,6 +46,7 @@ go_library(
|
||||
ssz_proto_files(
|
||||
name = "ssz_proto_files",
|
||||
srcs = [
|
||||
"archive.proto",
|
||||
"messages.proto",
|
||||
"types.proto",
|
||||
],
|
||||
|
||||
1157
proto/beacon/p2p/v1/archive.pb.go
generated
Executable file
1157
proto/beacon/p2p/v1/archive.pb.go
generated
Executable file
File diff suppressed because it is too large
Load Diff
46
proto/beacon/p2p/v1/archive.proto
Normal file
46
proto/beacon/p2p/v1/archive.proto
Normal file
@@ -0,0 +1,46 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package ethereum.beacon.p2p.v1;
|
||||
|
||||
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
|
||||
import "eth/v1alpha1/beacon_block.proto";
|
||||
|
||||
// ArchivedActiveSetChanges represents the changes to the active validator registry
|
||||
// between epoch N and N-1. In particular, it keeps track of validator indices
|
||||
// which were newly activated, exited, or slashed, along with those particular
|
||||
// block operations.
|
||||
message ArchivedActiveSetChanges {
|
||||
// Activated validator indices.
|
||||
repeated uint64 activated = 1;
|
||||
|
||||
// Exited validator indices.
|
||||
repeated uint64 exited = 2;
|
||||
|
||||
// Slashed validator indices.
|
||||
repeated uint64 slashed = 3;
|
||||
|
||||
// Ejected validator indices.
|
||||
repeated uint64 ejected = 4;
|
||||
|
||||
// Voluntary exit objects corresponding 1-to-1 to the exited indices
|
||||
// list in this same data structure.
|
||||
repeated ethereum.eth.v1alpha1.VoluntaryExit voluntary_exits = 6;
|
||||
|
||||
// Proposer slashing objects corresponding 1-to-1 to the slashed proposer indices
|
||||
// list in this same data structure.
|
||||
repeated ethereum.eth.v1alpha1.ProposerSlashing proposer_slashings = 7;
|
||||
|
||||
// Attester slashing objects corresponding 1-to-1 to the slashed attester indices
|
||||
// list in this same data structure.
|
||||
repeated ethereum.eth.v1alpha1.AttesterSlashing attester_slashings = 8;
|
||||
}
|
||||
|
||||
// ArchivedCommitteeInfo representing the minimal data required to reconstruct
|
||||
// validator committee assignments for an epoch N.
|
||||
message ArchivedCommitteeInfo {
|
||||
// Proposer seed represents the random seed used in shuffling proposers.
|
||||
bytes proposer_seed = 1 [(gogoproto.moretags) = "ssz-size:\"32\""];
|
||||
|
||||
// Attester seed represents the random seed used in shuffling attesters.
|
||||
bytes attester_seed = 2 [(gogoproto.moretags) = "ssz-size:\"32\""];
|
||||
}
|
||||
Reference in New Issue
Block a user