Compare commits

...

8 Commits

Author SHA1 Message Date
Kasey Kirkham
5a4adba3ae WIP refactoring blockchain service startup 2022-08-09 11:11:29 -05:00
Kasey Kirkham
eb2303f02f wip refactoring powchain<>blockchain init 2022-08-08 14:12:57 -05:00
Kasey Kirkham
cd7fd7ebb6 big refactor to use WaitForClock interface 2022-08-08 14:12:08 -05:00
Kasey Kirkham
c10cbd9291 decreasing stategen interface surface area 2022-07-29 11:06:03 -05:00
Radosław Kapka
e5ab259ee1 Capella changes to protobufs (#11119)
* manual proto changes

* generated files

* missed comment
2022-07-27 18:39:15 +02:00
Raul Jordan
9e74c3d641 Better Log In Case Contract Code Not Found At Address (#11118) 2022-07-27 15:22:16 +00:00
terencechain
59dcea81c2 Clean up push proposer setting method (#11091)
* Clean up push proposer setting method

* Update validator_test.go

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-07-26 16:57:23 +00:00
terencechain
9149dc2aae Run ./hack/update-go-pbs.sh (#11107) 2022-07-26 16:45:16 +00:00
150 changed files with 5562 additions and 2771 deletions

View File

@@ -4,6 +4,7 @@ go_library(
name = "go_default_library",
srcs = [
"chain_info.go",
"clock.go",
"error.go",
"execution_engine.go",
"head.go",
@@ -52,6 +53,7 @@ go_library(
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
"//beacon-chain/forkchoice/protoarray:go_default_library",
"//beacon-chain/forkchoice/types:go_default_library",
"//beacon-chain/geninit:go_default_library",
"//beacon-chain/operations/attestations:go_default_library",
"//beacon-chain/operations/slashings:go_default_library",
"//beacon-chain/operations/voluntaryexits:go_default_library",
@@ -59,7 +61,6 @@ go_library(
"//beacon-chain/powchain:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",

View File

@@ -27,8 +27,8 @@ type ChainInfoFetcher interface {
GenesisFetcher
CanonicalFetcher
ForkFetcher
TimeFetcher
HeadDomainFetcher
ClockProvider
}
// HeadUpdater defines a common interface for methods in blockchain service
@@ -37,9 +37,7 @@ type HeadUpdater interface {
UpdateHead(context.Context) error
}
// TimeFetcher retrieves the Ethereum consensus data that's related to time.
type TimeFetcher interface {
GenesisTime() time.Time
type CurrentSlotter interface {
CurrentSlot() types.Slot
}
@@ -220,11 +218,6 @@ func (s *Service) HeadETH1Data() *ethpb.Eth1Data {
return s.head.state.Eth1Data()
}
// GenesisTime returns the genesis time of beacon chain.
func (s *Service) GenesisTime() time.Time {
return s.genesisTime
}
// GenesisValidatorsRoot returns the genesis validators
// root of the chain.
func (s *Service) GenesisValidatorsRoot() [32]byte {
@@ -370,10 +363,44 @@ func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool,
// SetGenesisTime sets the genesis time of beacon chain.
func (s *Service) SetGenesisTime(t time.Time) {
s.genesisTime = t
s.clock = NewClock(t)
close(s.clockReady)
}
func (s *Service) setClock(c Clock) {
s.clock = c
close(s.clockReady)
}
func (s *Service) genesisTime() time.Time {
c, err := s.WaitForClock(context.TODO())
if err != nil {
panic(err)
}
return c.GenesisTime()
}
// ForkChoiceStore returns the fork choice store in the service.
func (s *Service) ForkChoiceStore() forkchoice.ForkChoicer {
return s.cfg.ForkChoiceStore
}
// ClockProvider implements WaitForClock, yielding a clock type that can be used to get
// the genesis time, slot values derived from genesis, or a possibly synthetic time.Now value
type ClockProvider interface {
WaitForClock(context.Context) (Clock, error)
}
// WaitForClock will block until the Clock is ready. This provides a syncronization mechanism for services that
// need the clock to be available before they can start running.
func (s *Service) WaitForClock(ctx context.Context) (Clock, error) {
if s.clockReady != nil {
select {
case <-s.clockReady:
return s.clock, nil
case <-ctx.Done():
return nil, ctx.Err()
}
}
return s.clock, nil
}

View File

@@ -28,7 +28,6 @@ import (
// Ensure Service implements chain info interface.
var _ ChainInfoFetcher = (*Service)(nil)
var _ TimeFetcher = (*Service)(nil)
var _ ForkFetcher = (*Service)(nil)
// prepareForkchoiceState prepares a beacon state with the given data to mock
@@ -202,9 +201,9 @@ func TestHeadState_CanRetrieve(t *testing.T) {
}
func TestGenesisTime_CanRetrieve(t *testing.T) {
c := &Service{genesisTime: time.Unix(999, 0)}
c := &Service{clock: NewClock(time.Unix(999, 0))}
wanted := time.Unix(999, 0)
assert.Equal(t, wanted, c.GenesisTime(), "Did not get wanted genesis time")
assert.Equal(t, wanted, c.genesisTime(), "Did not get wanted genesis time")
}
func TestCurrentFork_CanRetrieve(t *testing.T) {
@@ -304,6 +303,7 @@ func TestService_HeadGenesisValidatorsRoot(t *testing.T) {
root = c.HeadGenesisValidatorsRoot()
require.DeepEqual(t, root[:], s.GenesisValidatorsRoot())
}
func TestService_ChainHeads_ProtoArray(t *testing.T) {
ctx := context.Background()
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New()}}
@@ -474,7 +474,7 @@ func TestService_IsOptimistic_DoublyLinkedTree(t *testing.T) {
func TestService_IsOptimisticBeforeBellatrix(t *testing.T) {
ctx := context.Background()
c := &Service{genesisTime: time.Now()}
c := &Service{clock: NewClock(time.Now())}
opt, err := c.IsOptimistic(ctx)
require.NoError(t, err)
require.Equal(t, false, opt)

View File

@@ -0,0 +1,69 @@
package blockchain
import (
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/time/slots"
"time"
)
// Clock abstracts important time-related concerns in the beacon chain:
// - genesis time
// - provides a time.Now() construct that can be overriden in tests
// - syncronization point for code that needs to know the genesis time
// - CurrentSlot: convenience conversion for current time -> slot
// - support backwards compatibility with the TimeFetcher interface
type Clock interface {
GenesisTime() time.Time
CurrentSlot() types.Slot
Now() time.Time
}
// clock is a type that fulfills the TimeFetcher interface. This can be used in a number of places where
// blockchain.ChainInfoFetcher has historically been used.
type clock struct {
time.Time
now Now
}
var _ Clock = &clock{}
// clock provides an accessor to the embedded time, also fulfilling the blockchain.TimeFetcher interface.
func (gt clock) GenesisTime() time.Time {
return gt.Time
}
// CurrentSlot returns the current slot relative to the time.Time value clock embeds.
func (gt clock) CurrentSlot() types.Slot {
return slots.Duration(gt.Time, gt.now())
}
// Now provides a value for time.Now() that can be overriden in tests.
func (gt clock) Now() time.Time {
return gt.now()
}
// ClockOpt is a functional option to change the behavior of a clock value made by NewClock.
// It is primarily intended as a way to inject an alternate time.Now() callback (WithNow) for testing.
type ClockOpt func(*clock)
// WithNow allows tests in particular to inject an alternate implementation of time.Now (vs using system time)
func WithNow(n Now) ClockOpt {
return func(gt *clock) {
gt.now = n
}
}
// NewClock constructs a clock value using the given time value. Optional ClockOpt can be provided.
// If an implementation of the Now function type is not provided (via WithNow), time.Now (system time) will be used by default.
func NewClock(t time.Time, opts ...ClockOpt) clock {
gt := clock{Time: t}
for _, o := range opts {
o(&gt)
}
if gt.now == nil {
gt.now = time.Now
}
return gt
}
// Now is a function that can return the current time. This will be time.Now by default, but can be overridden for tests.
type Now func() time.Time

View File

@@ -0,0 +1,31 @@
package blockchain
import (
"github.com/prysmaticlabs/prysm/config/params"
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/testing/require"
"testing"
"time"
)
func TestClock_GenesisTime(t *testing.T) {
n := time.Now()
gt := NewClock(n)
gtt := gt.GenesisTime()
require.Equal(t, gt.Time, gtt)
require.Equal(t, n, gtt)
}
func TestWithNow(t *testing.T) {
genUnix := time.Unix(0, 0)
var expectedSlots uint64 = 7200 // a day worth of slots
now := genUnix.Add(time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot * expectedSlots))
fn := func() time.Time {
return now
}
gt := NewClock(genUnix, WithNow(fn))
// in this scenario, "genesis" is exactly 24 hours before "now"
// so "now" should be 7200 slots after "genesis"
require.Equal(t, types.Slot(expectedSlots), gt.CurrentSlot())
}

View File

@@ -315,7 +315,7 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
}
// Get timestamp.
t, err := slots.ToTime(uint64(s.genesisTime.Unix()), slot)
t, err := slots.ToTime(uint64(s.genesisTime().Unix()), slot)
if err != nil {
return false, nil, 0, err
}

View File

@@ -535,7 +535,7 @@ func Test_NotifyNewPayload(t *testing.T) {
service, err := NewService(ctx, opts...)
require.NoError(t, err)
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
service.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
service.SetGenesisTime(time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second))
r, err := bellatrixBlk.Block().HashTreeRoot()
require.NoError(t, err)
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
@@ -800,7 +800,7 @@ func Test_IsOptimisticCandidateBlock(t *testing.T) {
require.NoError(t, err)
params.BeaconConfig().SafeSlotsToImportOptimistically = 128
service.genesisTime = time.Now().Add(-time.Second * 12 * 2 * 128)
service.SetGenesisTime(time.Now().Add(-time.Second * 12 * 2 * 128))
parentBlk := util.NewBeaconBlockBellatrix()
wrappedParentBlock, err := wrapper.WrappedSignedBeaconBlock(parentBlk)
@@ -903,7 +903,7 @@ func Test_IsOptimisticShallowExecutionParent(t *testing.T) {
require.NoError(t, err)
params.BeaconConfig().SafeSlotsToImportOptimistically = 128
service.genesisTime = time.Now().Add(-time.Second * 12 * 2 * 128)
service.SetGenesisTime(time.Now().Add(-time.Second * 12 * 2 * 128))
payload := &v1.ExecutionPayload{
ParentHash: make([]byte, 32),
FeeRecipient: make([]byte, 20),

View File

@@ -219,7 +219,7 @@ func TestSaveOrphanedAtts_NoCommonAncestor(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
service.genesisTime = time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
service.SetGenesisTime(time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second))
// Chain setup
// 0 -- 1 -- 2 -- 3
@@ -273,7 +273,7 @@ func TestSaveOrphanedAtts(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
service.genesisTime = time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
service.SetGenesisTime(time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second))
// Chain setup
// 0 -- 1 -- 2 -- 3
@@ -339,7 +339,7 @@ func TestSaveOrphanedAtts_CanFilter(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
service.genesisTime = time.Now().Add(time.Duration(-1*int64(params.BeaconConfig().SlotsPerEpoch+2)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
service.SetGenesisTime(time.Now().Add(time.Duration(-1*int64(params.BeaconConfig().SlotsPerEpoch+2)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second))
// Chain setup
// 0 -- 1 -- 2
@@ -393,7 +393,7 @@ func TestSaveOrphanedAtts_NoCommonAncestor_DoublyLinkedTrie(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
service.genesisTime = time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
service.SetGenesisTime(time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second))
// Chain setup
// 0 -- 1 -- 2 -- 3
@@ -452,7 +452,7 @@ func TestSaveOrphanedAtts_DoublyLinkedTrie(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
service.genesisTime = time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
service.SetGenesisTime(time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second))
// Chain setup
// 0 -- 1 -- 2 -- 3
@@ -522,7 +522,7 @@ func TestSaveOrphanedAtts_CanFilter_DoublyLinkedTrie(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
service.genesisTime = time.Now().Add(time.Duration(-1*int64(params.BeaconConfig().SlotsPerEpoch+2)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
service.SetGenesisTime(time.Now().Add(time.Duration(-1*int64(params.BeaconConfig().SlotsPerEpoch+2)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second))
// Chain setup
// 0 -- 1 -- 2

View File

@@ -29,7 +29,7 @@ func TestService_getBlock(t *testing.T) {
// block in cache
b, err := wrapper.WrappedSignedBeaconBlock(b1)
require.NoError(t, err)
s.saveInitSyncBlock(ctx, r1, b)
require.NoError(t, s.saveInitSyncBlock(ctx, r1, b))
got, err := s.getBlock(ctx, r1)
require.NoError(t, err)
require.DeepEqual(t, b, got)
@@ -59,7 +59,7 @@ func TestService_hasBlockInInitSyncOrDB(t *testing.T) {
// block in cache
b, err := wrapper.WrappedSignedBeaconBlock(b1)
require.NoError(t, err)
s.saveInitSyncBlock(ctx, r1, b)
require.NoError(t, s.saveInitSyncBlock(ctx, r1, b))
require.Equal(t, true, s.hasBlockInInitSyncOrDB(ctx, r1))
// block in db

View File

@@ -59,7 +59,7 @@ func (s *Service) OnAttestation(ctx context.Context, a *ethpb.Attestation) error
return err
}
genesisTime := uint64(s.genesisTime.Unix())
genesisTime := uint64(s.genesisTime().Unix())
// Verify attestation target is from current epoch or previous epoch.
if err := verifyAttTargetEpoch(ctx, genesisTime, uint64(time.Now().Unix()), tgt); err != nil {

View File

@@ -132,8 +132,12 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
return err
}
}
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState); err != nil {
return err
if err := s.cfg.BeaconDB.SaveBlock(ctx, signed); err != nil {
return errors.Wrapf(err, "could not save block from slot %d", signed.Block().Slot())
}
if err := s.cfg.StateGen.SaveState(ctx, blockRoot, postState); err != nil {
return errors.Wrap(err, "could not save state")
}
if err := s.insertBlockAndAttestationsToForkChoiceStore(ctx, signed.Block(), blockRoot, postState); err != nil {
@@ -538,20 +542,6 @@ func (s *Service) InsertSlashingsToForkChoiceStore(ctx context.Context, slashing
}
}
// This saves post state info to DB or cache. This also saves post state info to fork choice store.
// Post state info consists of processed block and state. Do not call this method unless the block and state are verified.
func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b interfaces.SignedBeaconBlock, st state.BeaconState) error {
ctx, span := trace.StartSpan(ctx, "blockChain.savePostStateInfo")
defer span.End()
if err := s.cfg.BeaconDB.SaveBlock(ctx, b); err != nil {
return errors.Wrapf(err, "could not save block from slot %d", b.Block().Slot())
}
if err := s.cfg.StateGen.SaveState(ctx, r, st); err != nil {
return errors.Wrap(err, "could not save state")
}
return nil
}
// This removes the attestations from the mem pool. It will only remove the attestations if input root `r` is canonical,
// meaning the block `b` is part of the canonical chain.
func (s *Service) pruneCanonicalAttsFromPool(ctx context.Context, r [32]byte, b interfaces.SignedBeaconBlock) error {

View File

@@ -24,7 +24,11 @@ import (
// CurrentSlot returns the current slot based on time.
func (s *Service) CurrentSlot() types.Slot {
return slots.CurrentSlot(uint64(s.genesisTime.Unix()))
c, err := s.WaitForClock(context.TODO())
if err != nil {
panic(err)
}
return c.CurrentSlot()
}
// getBlockPreState returns the pre state of an incoming block. It uses the parent root of the block
@@ -48,7 +52,7 @@ func (s *Service) getBlockPreState(ctx context.Context, b interfaces.BeaconBlock
}
// Verify block slot time is not from the future.
if err := slots.VerifyTime(uint64(s.genesisTime.Unix()), b.Slot(), params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil {
if err := slots.VerifyTime(uint64(s.genesisTime().Unix()), b.Slot(), params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil {
return nil, err
}

View File

@@ -873,7 +873,7 @@ func blockTree1(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][]byt
}
func TestCurrentSlot_HandlesOverflow(t *testing.T) {
svc := Service{genesisTime: prysmTime.Now().Add(1 * time.Hour)}
svc := Service{clock: NewClock(prysmTime.Now().Add(1 * time.Hour))}
slot := svc.CurrentSlot()
require.Equal(t, types.Slot(0), slot, "Unexpected slot")

View File

@@ -4,8 +4,6 @@ import (
"bytes"
"context"
"fmt"
"time"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/async/event"
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
@@ -38,7 +36,7 @@ func (s *Service) AttestationTargetState(ctx context.Context, target *ethpb.Chec
if err != nil {
return nil, err
}
if err := slots.ValidateClock(ss, uint64(s.genesisTime.Unix())); err != nil {
if err := slots.ValidateClock(ss, uint64(s.genesisTime().Unix())); err != nil {
return nil, err
}
return s.getAttPreState(ctx, target)
@@ -102,19 +100,14 @@ func (s *Service) spawnProcessAttestationsRoutine(stateFeed *event.Feed) {
break
}
if s.genesisTime.IsZero() {
log.Warn("ProcessAttestations routine waiting for genesis time")
for s.genesisTime.IsZero() {
if err := s.ctx.Err(); err != nil {
log.WithError(err).Error("Giving up waiting for genesis time")
return
}
time.Sleep(1 * time.Second)
}
log.Warn("Genesis time received, now available to process attestations")
log.Warn("ProcessAttestations routine waiting for genesis time")
c, err := s.WaitForClock(s.ctx)
if err != nil {
log.WithError(err).Error("timeout waiting for genesis time in spawnProcessAttestationsRoutine")
}
log.Warn("Genesis time received, now available to process attestations")
st := slots.NewSlotTicker(s.genesisTime, params.BeaconConfig().SecondsPerSlot)
st := slots.NewSlotTicker(c.GenesisTime(), params.BeaconConfig().SecondsPerSlot)
for {
select {
case <-s.ctx.Done():
@@ -219,7 +212,7 @@ func (s *Service) processAttestations(ctx context.Context) {
// This delays consideration in the fork choice until their slot is in the past.
// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/fork-choice.md#validate_on_attestation
nextSlot := a.Data.Slot + 1
if err := slots.VerifyTime(uint64(s.genesisTime.Unix()), nextSlot, params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil {
if err := slots.VerifyTime(uint64(s.genesisTime().Unix()), nextSlot, params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil {
continue
}
@@ -233,7 +226,7 @@ func (s *Service) processAttestations(ctx context.Context) {
log.WithError(err).Error("Could not delete fork choice attestation in pool")
}
if !helpers.VerifyCheckpointEpoch(a.Data.Target, s.genesisTime) {
if !helpers.VerifyCheckpointEpoch(a.Data.Target, s.genesisTime()) {
continue
}

View File

@@ -33,7 +33,7 @@ func TestAttestationCheckPtState_FarFutureSlot(t *testing.T) {
beaconDB := testDB.SetupDB(t)
chainService := setupBeaconChain(t, beaconDB)
chainService.genesisTime = time.Now()
chainService.SetGenesisTime(time.Now())
e := types.Epoch(slots.MaxSlotBuffer/uint64(params.BeaconConfig().SlotsPerEpoch) + 1)
_, err := chainService.AttestationTargetState(context.Background(), &ethpb.Checkpoint{Epoch: e})
@@ -102,7 +102,7 @@ func TestProcessAttestations_Ok(t *testing.T) {
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.genesisTime = prysmTime.Now().Add(-1 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
service.SetGenesisTime(prysmTime.Now().Add(-1 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second))
genesisState, pks := util.DeterministicGenesisState(t, 64)
require.NoError(t, genesisState.SetGenesisTime(uint64(prysmTime.Now().Unix())-params.BeaconConfig().SecondsPerSlot))
require.NoError(t, service.saveGenesisData(ctx, genesisState))
@@ -209,7 +209,7 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.genesisTime = prysmTime.Now().Add(-2 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
service.SetGenesisTime(prysmTime.Now().Add(-2 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second))
genesisState, pks := util.DeterministicGenesisState(t, 64)
require.NoError(t, service.saveGenesisData(ctx, genesisState))
copied := genesisState.Copy()

View File

@@ -7,17 +7,12 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/monitoring/tracing"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/time"
"github.com/prysmaticlabs/prysm/time/slots"
"go.opencensus.io/trace"
)
// This defines how many epochs since finality the run time will begin to save hot state on to the DB.
var epochsSinceFinalitySaveHotStateDB = types.Epoch(100)
// BlockReceiver interface defines the methods of chain service for receiving and processing new blocks.
type BlockReceiver interface {
ReceiveBlock(ctx context.Context, block interfaces.SignedBeaconBlock, blockRoot [32]byte) error
@@ -53,18 +48,13 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.SignedBeaco
return err
}
// Have we been finalizing? Should we start saving hot states to db?
if err := s.checkSaveHotStateDB(ctx); err != nil {
return err
}
// Reports on block and fork choice metrics.
finalized := s.FinalizedCheckpt()
reportSlotMetrics(blockCopy.Block().Slot(), s.HeadSlot(), s.CurrentSlot(), finalized)
// Log block sync status.
justified := s.CurrentJustifiedCheckpt()
if err := logBlockSyncStatus(blockCopy.Block(), blockRoot, justified, finalized, receivedTime, uint64(s.genesisTime.Unix())); err != nil {
if err := logBlockSyncStatus(blockCopy.Block(), blockRoot, justified, finalized, receivedTime, uint64(s.genesisTime().Unix())); err != nil {
log.WithError(err).Error("Unable to log block sync status")
}
// Log payload data
@@ -160,25 +150,3 @@ func (s *Service) handlePostBlockOperations(b interfaces.BeaconBlock) error {
}
return nil
}
// This checks whether it's time to start saving hot state to DB.
// It's time when there's `epochsSinceFinalitySaveHotStateDB` epochs of non-finality.
func (s *Service) checkSaveHotStateDB(ctx context.Context) error {
currentEpoch := slots.ToEpoch(s.CurrentSlot())
// Prevent `sinceFinality` going underflow.
var sinceFinality types.Epoch
finalized := s.FinalizedCheckpt()
if finalized == nil {
return errNilFinalizedInStore
}
if currentEpoch > finalized.Epoch {
sinceFinality = currentEpoch - finalized.Epoch
}
if sinceFinality >= epochsSinceFinalitySaveHotStateDB {
s.cfg.StateGen.EnableSaveHotStateToDB(ctx)
return nil
}
return s.cfg.StateGen.DisableSaveHotStateToDB(ctx)
}

View File

@@ -2,10 +2,6 @@ package blockchain
import (
"context"
"sync"
"testing"
"time"
blockchainTesting "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
@@ -21,7 +17,8 @@ import (
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
logTest "github.com/sirupsen/logrus/hooks/test"
"sync"
"testing"
)
func TestService_ReceiveBlock(t *testing.T) {
@@ -284,40 +281,3 @@ func TestService_HasBlock(t *testing.T) {
require.NoError(t, err)
require.Equal(t, true, s.HasBlock(context.Background(), r))
}
func TestCheckSaveHotStateDB_Enabling(t *testing.T) {
opts := testServiceOptsWithDB(t)
hook := logTest.NewGlobal()
s, err := NewService(context.Background(), opts...)
require.NoError(t, err)
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
assert.LogsContain(t, hook, "Entering mode to save hot states in DB")
}
func TestCheckSaveHotStateDB_Disabling(t *testing.T) {
hook := logTest.NewGlobal()
opts := testServiceOptsWithDB(t)
s, err := NewService(context.Background(), opts...)
require.NoError(t, err)
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
s.genesisTime = time.Now()
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
assert.LogsContain(t, hook, "Exiting mode to save hot states in DB")
}
func TestCheckSaveHotStateDB_Overflow(t *testing.T) {
hook := logTest.NewGlobal()
opts := testServiceOptsWithDB(t)
s, err := NewService(context.Background(), opts...)
require.NoError(t, err)
s.genesisTime = time.Now()
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
assert.LogsDoNotContain(t, hook, "Entering mode to save hot states in DB")
}

View File

@@ -6,6 +6,7 @@ import (
"bytes"
"context"
"fmt"
"github.com/prysmaticlabs/prysm/beacon-chain/geninit"
"runtime"
"sync"
"time"
@@ -17,7 +18,6 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/beacon-chain/db"
f "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice"
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
@@ -30,7 +30,6 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/config/features"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
@@ -53,7 +52,6 @@ type Service struct {
cfg *config
ctx context.Context
cancel context.CancelFunc
genesisTime time.Time
head *head
headLock sync.RWMutex
originBlockRoot [32]byte // genesis root, or weak subjectivity checkpoint root, depending on how the node is initialized
@@ -65,6 +63,9 @@ type Service struct {
justifiedBalances *stateBalanceCache
wsVerifier *WeakSubjectivityVerifier
processAttestationsLock sync.Mutex
clock Clock
clockReady chan struct{}
clockWaiter geninit.ClockWaiter
}
// config options for the service.
@@ -138,6 +139,70 @@ func (s *Service) Start() {
s.fillMissingPayloadIDRoutine(s.ctx, s.cfg.StateNotifier.StateFeed())
}
func (s *Service) CombinedStart(genesis state.BeaconState) error {
// TODO: we only do this on the genesis code path, not from disk - why?
/*
// Update committee shuffled indices for genesis epoch.
if err := helpers.UpdateCommitteeCache(ctx, genesisState, 0); err != nil {
return nil, err
}
if err := helpers.UpdateProposerIndicesInCache(ctx, genesisState); err != nil {
return nil, err
}
*/
gt := time.Unix(int64(genesis.GenesisTime()), 0)
s.SetGenesisTime(gt)
s.cfg.AttService.SetGenesisTime(genesis.GenesisTime())
if features.Get().EnableForkChoiceDoublyLinkedTree {
s.cfg.ForkChoiceStore = doublylinkedtree.New()
} else {
s.cfg.ForkChoiceStore = protoarray.New()
}
// TODO: move head to use fork choice - we currently do not call initializeHeadFromDB!!
gb, err := s.cfg.BeaconDB.GenesisBlock(s.ctx)
if err != nil {
return errors.Wrap(err, "could not get genesis block from db")
}
if err := wrapper.BeaconBlockIsNil(gb); err != nil {
return errors.Wrap(err, "nil value from database block query")
}
gbr, err := gb.Block().HashTreeRoot()
if err != nil {
return errors.Wrap(err, "could not compute hash_tree_root of genesis block")
}
if err := s.cfg.ForkChoiceStore.InsertNode(s.ctx, genesis, gbr); err != nil {
log.Fatalf("Could not process genesis block for fork choice: %v", err)
}
s.cfg.ForkChoiceStore.SetGenesisTime(genesis.GenesisTime())
obr, err := s.cfg.BeaconDB.OriginCheckpointBlockRoot(s.ctx)
if err == nil {
// this means checkpoint sync was used, use the database origin root value
s.originBlockRoot = obr
} else {
if !errors.Is(err, db.ErrNotFound) {
return errors.Wrap(err, "could not retrieve checkpoint sync chain origin data from db")
}
// this means we got ErrNotFound, meaning checkpoint sync wasn't used, so the node should
// be synced from genesis. In this case, use the genesis block root as origin.
s.originBlockRoot = gbr
}
// TODO: should we set origin root when its the genesis block root, or is this method only for checkpoint sync?
s.cfg.ForkChoiceStore.SetOriginRoot(s.originBlockRoot)
spawnCountdownIfPreGenesis(s.ctx, gt, genesis)
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.Initialized,
Data: &statefeed.InitializedData{
StartTime: gt,
GenesisValidatorsRoot: genesis.GenesisValidatorsRoot(),
},
})
return nil
}
// Stop the blockchain service's main event loop and associated goroutines.
func (s *Service) Stop() error {
defer s.cancel()
@@ -173,7 +238,7 @@ func (s *Service) Status() error {
// StartFromSavedState initializes the blockchain using a previously saved finalized checkpoint.
func (s *Service) StartFromSavedState(saved state.BeaconState) error {
log.Info("Blockchain data already exists in DB, initializing...")
s.genesisTime = time.Unix(int64(saved.GenesisTime()), 0) // lint:ignore uintcast -- Genesis time will not exceed int64 in your lifetime.
s.SetGenesisTime(time.Unix(int64(saved.GenesisTime()), 0)) // lint:ignore uintcast -- Genesis time will not exceed int64 in your lifetime.
s.cfg.AttService.SetGenesisTime(saved.GenesisTime())
originRoot, err := s.originRootFromSavedState(s.ctx)
@@ -185,7 +250,12 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
if err := s.initializeHeadFromDB(s.ctx); err != nil {
return errors.Wrap(err, "could not set up chain info")
}
spawnCountdownIfPreGenesis(s.ctx, s.genesisTime, s.cfg.BeaconDB)
gs, err := s.cfg.BeaconDB.GenesisState(s.ctx)
if err != nil {
return err
}
gt := time.Unix(int64(gs.GenesisTime()), 0)
spawnCountdownIfPreGenesis(s.ctx, gt, gs)
justified, err := s.cfg.BeaconDB.JustifiedCheckpoint(s.ctx)
if err != nil {
@@ -218,7 +288,7 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
Root: bytesutil.ToBytes32(finalized.Root)}); err != nil {
return errors.Wrap(err, "could not update forkchoice's finalized checkpoint")
}
forkChoicer.SetGenesisTime(uint64(s.genesisTime.Unix()))
forkChoicer.SetGenesisTime(uint64(s.genesisTime().Unix()))
st, err := s.cfg.StateGen.StateByRoot(s.ctx, fRoot)
if err != nil {
@@ -247,7 +317,7 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.Initialized,
Data: &statefeed.InitializedData{
StartTime: s.genesisTime,
StartTime: s.genesisTime(),
GenesisValidatorsRoot: saved.GenesisValidatorsRoot(),
},
})
@@ -295,48 +365,11 @@ func (s *Service) initializeHeadFromDB(ctx context.Context) error {
return errors.New("no finalized epoch in the database")
}
finalizedRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(finalized.Root))
var finalizedState state.BeaconState
finalizedState, err = s.cfg.StateGen.Resume(ctx, s.cfg.FinalizedStateAtStartUp)
finalizedState, err := s.cfg.StateGen.Resume(ctx, s.cfg.FinalizedStateAtStartUp)
if err != nil {
return errors.Wrap(err, "could not get finalized state from db")
}
if flags.Get().HeadSync {
headBlock, err := s.cfg.BeaconDB.HeadBlock(ctx)
if err != nil {
return errors.Wrap(err, "could not retrieve head block")
}
headEpoch := slots.ToEpoch(headBlock.Block().Slot())
var epochsSinceFinality types.Epoch
if headEpoch > finalized.Epoch {
epochsSinceFinality = headEpoch - finalized.Epoch
}
// Head sync when node is far enough beyond known finalized epoch,
// this becomes really useful during long period of non-finality.
if epochsSinceFinality >= headSyncMinEpochsAfterCheckpoint {
headRoot, err := headBlock.Block().HashTreeRoot()
if err != nil {
return errors.Wrap(err, "could not hash head block")
}
finalizedState, err := s.cfg.StateGen.Resume(ctx, s.cfg.FinalizedStateAtStartUp)
if err != nil {
return errors.Wrap(err, "could not get finalized state from db")
}
log.Infof("Regenerating state from the last checkpoint at slot %d to current head slot of %d."+
"This process may take a while, please wait.", finalizedState.Slot(), headBlock.Block().Slot())
headState, err := s.cfg.StateGen.StateByRoot(ctx, headRoot)
if err != nil {
return errors.Wrap(err, "could not retrieve head state")
}
s.setHead(headRoot, headBlock, headState)
return nil
} else {
log.Warnf("Finalized checkpoint at slot %d is too close to the current head slot, "+
"resetting head from the checkpoint ('--%s' flag is ignored).",
finalizedState.Slot(), flags.HeadSync.Name)
}
}
if finalizedState == nil || finalizedState.IsNil() {
return errors.New("finalized state can't be nil")
}
@@ -352,34 +385,15 @@ func (s *Service) initializeHeadFromDB(ctx context.Context) error {
func (s *Service) startFromPOWChain() error {
log.Info("Waiting to reach the validator deposit threshold to start the beacon chain...")
if s.cfg.ChainStartFetcher == nil {
return errors.New("not configured web3Service for POW chain")
}
go func() {
stateChannel := make(chan *feed.Event, 1)
stateSub := s.cfg.StateNotifier.StateFeed().Subscribe(stateChannel)
defer stateSub.Unsubscribe()
for {
select {
case e := <-stateChannel:
if e.Type == statefeed.ChainStarted {
data, ok := e.Data.(*statefeed.ChainStartedData)
if !ok {
log.Error("event data is not type *statefeed.ChainStartedData")
return
}
log.WithField("starttime", data.StartTime).Debug("Received chain start event")
s.onPowchainStart(s.ctx, data.StartTime)
return
}
case <-s.ctx.Done():
log.Debug("Context closed, exiting goroutine")
return
case err := <-stateSub.Err():
log.WithError(err).Error("Subscription to state notifier failed")
return
}
c, err := s.clockWaiter.WaitForClock(s.ctx)
if err != nil {
log.WithError(err).Error("timeout while waiting for genesis during blockchain service startup")
return
}
log.WithField("starttime", c.GenesisTime()).Debug("Received chain start event")
s.onPowchainStart(s.ctx, c.GenesisTime())
s.setClock(c)
}()
return nil
@@ -388,8 +402,7 @@ func (s *Service) startFromPOWChain() error {
// onPowchainStart initializes a series of deposits from the ChainStart deposits in the eth1
// deposit contract, initializes the beacon chain's state, and kicks off the beacon chain.
func (s *Service) onPowchainStart(ctx context.Context, genesisTime time.Time) {
preGenesisState := s.cfg.ChainStartFetcher.PreGenesisState()
initializedState, err := s.initializeBeaconChain(ctx, genesisTime, preGenesisState, s.cfg.ChainStartFetcher.ChainStartEth1Data())
initializedState, err := s.initializeBeaconChain(ctx, genesisTime, preGenesisState)
if err != nil {
log.Fatalf("Could not initialize beacon chain: %v", err)
}
@@ -414,29 +427,10 @@ func (s *Service) onPowchainStart(ctx context.Context, genesisTime time.Time) {
// initializes the state and genesis block of the beacon chain to persistent storage
// based on a genesis timestamp value obtained from the ChainStart event emitted
// by the ETH1.0 Deposit Contract and the POWChain service of the node.
func (s *Service) initializeBeaconChain(
ctx context.Context,
genesisTime time.Time,
preGenesisState state.BeaconState,
eth1data *ethpb.Eth1Data) (state.BeaconState, error) {
func (s *Service) initializeBeaconChain(ctx context.Context, genesisTime time.Time) (state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "beacon-chain.Service.initializeBeaconChain")
defer span.End()
s.genesisTime = genesisTime
unixTime := uint64(genesisTime.Unix())
genesisState, err := transition.OptimizedGenesisBeaconState(unixTime, preGenesisState, eth1data)
if err != nil {
return nil, errors.Wrap(err, "could not initialize genesis state")
}
if err := s.saveGenesisData(ctx, genesisState); err != nil {
return nil, errors.Wrap(err, "could not save genesis data")
}
log.Info("Initialized beacon chain genesis state")
// Clear out all pre-genesis data now that the state is initialized.
s.cfg.ChainStartFetcher.ClearPreGenesisData()
s.SetGenesisTime(genesisTime)
// Update committee shuffled indices for genesis epoch.
if err := helpers.UpdateCommitteeCache(ctx, genesisState, 0); err != nil {
@@ -447,15 +441,11 @@ func (s *Service) initializeBeaconChain(
}
s.cfg.AttService.SetGenesisTime(genesisState.GenesisTime())
return genesisState, nil
}
// This gets called when beacon chain is first initialized to save genesis data (state, block, and more) in db.
func (s *Service) saveGenesisData(ctx context.Context, genesisState state.BeaconState) error {
if err := s.cfg.BeaconDB.SaveGenesisData(ctx, genesisState); err != nil {
return errors.Wrap(err, "could not save genesis data")
}
genesisBlk, err := s.cfg.BeaconDB.GenesisBlock(ctx)
if err != nil || genesisBlk == nil || genesisBlk.IsNil() {
return fmt.Errorf("could not load genesis block: %v", err)
@@ -476,7 +466,7 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, genesisBlkRoot); err != nil {
return errors.Wrap(err, "Could not set optimistic status of genesis block to false")
}
s.cfg.ForkChoiceStore.SetGenesisTime(uint64(s.genesisTime.Unix()))
s.cfg.ForkChoiceStore.SetGenesisTime(uint64(s.genesisTime().Unix()))
s.setHead(genesisBlkRoot, genesisBlk, genesisState)
return nil
@@ -494,19 +484,14 @@ func (s *Service) hasBlock(ctx context.Context, root [32]byte) bool {
return s.cfg.BeaconDB.HasBlock(ctx, root)
}
func spawnCountdownIfPreGenesis(ctx context.Context, genesisTime time.Time, db db.HeadAccessDatabase) {
currentTime := prysmTime.Now()
if currentTime.After(genesisTime) {
func spawnCountdownIfPreGenesis(ctx context.Context, gt time.Time, gs state.BeaconState) {
// only proceed if this function runs before genesis time
if prysmTime.Now().After(gt) {
return
}
gState, err := db.GenesisState(ctx)
gr, err := gs.HashTreeRoot(ctx)
if err != nil {
log.Fatalf("Could not retrieve genesis state: %v", err)
log.WithError(err).Fatal("Could not compute hash_tree_root of genesis state")
}
gRoot, err := gState.HashTreeRoot(ctx)
if err != nil {
log.Fatalf("Could not hash tree root genesis state: %v", err)
}
go slots.CountdownToGenesis(ctx, genesisTime, uint64(gState.NumValidators()), gRoot)
go slots.CountdownToGenesis(ctx, gt, uint64(gs.NumValidators()), gr)
}

View File

@@ -136,7 +136,7 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
chainService, err := NewService(ctx, opts...)
require.NoError(t, err, "Unable to setup chain service")
chainService.genesisTime = time.Unix(1, 0) // non-zero time
chainService.SetGenesisTime(time.Unix(1, 0)) // non-zero time
return chainService
}

View File

@@ -11,6 +11,7 @@ go_library(
],
deps = [
"//async/event:go_default_library",
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/core/epoch/precompute:go_default_library",
"//beacon-chain/core/feed:go_default_library",
"//beacon-chain/core/feed/block:go_default_library",

View File

@@ -5,6 +5,7 @@ package testing
import (
"bytes"
"context"
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
"sync"
"time"
@@ -64,6 +65,7 @@ type ChainService struct {
ReceiveBlockMockErr error
OptimisticCheckRootReceived [32]byte
FinalizedRoots map[[32]byte]bool
Clock blockchain.Clock
}
// ForkChoicer mocks the same method in the chain service
@@ -326,6 +328,13 @@ func (s *ChainService) GenesisTime() time.Time {
return s.Genesis
}
func (s *ChainService) WaitForClock(ctx context.Context) (blockchain.Clock, error) {
if s.Clock == nil {
return blockchain.NewClock(time.Now()), nil
}
return s.Clock, nil
}
// GenesisValidatorsRoot mocks the same method in the chain service.
func (s *ChainService) GenesisValidatorsRoot() [32]byte {
return s.ValidatorsRoot
@@ -336,7 +345,7 @@ func (s *ChainService) CurrentSlot() types.Slot {
if s.Slot != nil {
return *s.Slot
}
return types.Slot(uint64(time.Now().Unix()-s.Genesis.Unix()) / params.BeaconConfig().SecondsPerSlot)
return s.Clock.CurrentSlot()
}
// Participation mocks the same method in the chain service.
@@ -464,3 +473,11 @@ func (s *ChainService) ReceiveAttesterSlashing(context.Context, *ethpb.AttesterS
func (s *ChainService) IsFinalized(_ context.Context, blockRoot [32]byte) bool {
return s.FinalizedRoots[blockRoot]
}
func NewMockClock(now time.Time, slotsAfterGenesis types.Slot) blockchain.Clock {
offset := uint64(slotsAfterGenesis) * params.BeaconConfig().SecondsPerSlot
genesis := now.Add(-1 * time.Second * time.Duration(offset))
return blockchain.NewClock(genesis, blockchain.WithNow(func() time.Time {
return genesis
}))
}

View File

@@ -0,0 +1,69 @@
package geninit
import (
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/time/slots"
"time"
)
// Clock abstracts important time-related concerns in the beacon chain:
// - genesis time
// - provides a time.Now() construct that can be overriden in tests
// - syncronization point for code that needs to know the genesis time
// - CurrentSlot: convenience conversion for current time -> slot
// - support backwards compatibility with the TimeFetcher interface
type Clock interface {
GenesisTime() time.Time
CurrentSlot() types.Slot
Now() time.Time
}
// clock is a type that fulfills the TimeFetcher interface. This can be used in a number of places where
// blockchain.ChainInfoFetcher has historically been used.
type clock struct {
genesis time.Time
now Now
}
var _ Clock = &clock{}
// clock provides an accessor to the embedded time, also fulfilling the blockchain.TimeFetcher interface.
func (gt clock) GenesisTime() time.Time {
return gt.genesis
}
// CurrentSlot returns the current slot relative to the time.Time value clock embeds.
func (gt clock) CurrentSlot() types.Slot {
return slots.Duration(gt.genesis, gt.now())
}
// Now provides a value for time.Now() that can be overriden in tests.
func (gt clock) Now() time.Time {
return gt.now()
}
// ClockOpt is a functional option to change the behavior of a clock value made by NewClock.
// It is primarily intended as a way to inject an alternate time.Now() callback (WithNow) for testing.
type ClockOpt func(*clock)
// WithNow allows tests in particular to inject an alternate implementation of time.Now (vs using system time)
func WithNow(n Now) ClockOpt {
return func(gt *clock) {
gt.now = n
}
}
// NewClock constructs a clock value using the given time value. Optional ClockOpt can be provided.
// If an implementation of the Now function type is not provided (via WithNow), time.Now (system time) will be used by default.
func NewClock(t time.Time, opts ...ClockOpt) clock {
gt := clock{genesis: t}
for _, o := range opts {
o(&gt)
}
if gt.now == nil {
gt.now = time.Now
}
return gt
}
// Now is a function that can return the current time. This will be time.Now by default, but can be overridden for tests.
type Now func() time.Time

View File

@@ -0,0 +1,84 @@
package geninit
import (
"context"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/beacon-chain/db"
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
"go.opencensus.io/trace"
"time"
"github.com/prysmaticlabs/prysm/runtime"
log "github.com/sirupsen/logrus"
)
type Service struct {
ctx context.Context
powWaiter ClockWaiter
genesisSetter ClockSetter
powFetcher powchain.ChainStartFetcher
d db.HeadAccessDatabase
}
var _ runtime.Service = &Service{}
type ServiceOption func(*Service)
func New(ctx context.Context, pw ClockWaiter, gs ClockSetter, f powchain.ChainStartFetcher, d db.HeadAccessDatabase, opts ...ServiceOption) (*Service, error) {
s := &Service{
ctx: ctx,
powWaiter: pw,
genesisSetter: gs,
powFetcher: f,
d: d,
}
for _, o := range opts {
o(s)
}
return s, nil
}
func (s *Service) Start() {
go s.run()
}
func (s *Service) run() {
c, err := s.powWaiter.WaitForClock(s.ctx)
if err != nil {
log.WithError(err).Error("timeout waiting for genesis timestamp")
}
if err = s.saveGenesis(c); err != nil {
log.Fatalf("Could not initialize beacon chain, %s", err.Error())
}
s.genesisSetter.SetGenesisClock(c)
}
func (s *Service) saveGenesis(c Clock) error {
ctx, span := trace.StartSpan(s.ctx, "beacon-chain.geninit.Service.saveGenesis")
defer span.End()
eth1 := s.powFetcher.ChainStartEth1Data()
pst := s.powFetcher.PreGenesisState()
st, err := transition.OptimizedGenesisBeaconState(uint64(c.GenesisTime().Unix()), pst, eth1)
if err != nil {
return err
}
if err := s.d.SaveGenesisData(ctx, st); err != nil {
return errors.Wrap(err, "db error, could not save genesis data")
}
log.Info("Initialized beacon chain genesis state")
// Clear out all pre-genesis data now that the state is initialized.
s.powFetcher.ClearPreGenesisData()
return nil
}
func (s *Service) Stop() error {
return nil
}
func (s *Service) Status() error {
return nil
}
type GenesisReady struct {
time time.Time
}

View File

@@ -0,0 +1,50 @@
package geninit
import (
"context"
"time"
)
type ClockWaiter interface {
WaitForClock(context.Context) (Clock, error)
}
type ClockSetter interface {
SetGenesisTime(time.Time)
SetGenesisClock(Clock)
}
type ClockSync struct {
ready chan struct{}
c Clock
}
func (w *ClockSync) SetGenesisTime(g time.Time) {
w.c = NewClock(g)
close(w.ready)
w.ready = nil
}
func (w *ClockSync) SetGenesisClock(c Clock) {
w.c = c
close(w.ready)
w.ready = nil
}
func (w *ClockSync) WaitForClock(ctx context.Context) (Clock, error) {
if w.ready == nil {
return w.c, nil
}
select {
case <-w.ready:
return w.c, nil
case <-ctx.Done():
return nil, ctx.Err()
}
}
func NewClockSync() *ClockSync {
return &ClockSync{
ready: make(chan struct{}),
}
}

View File

@@ -29,6 +29,7 @@ go_library(
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
"//beacon-chain/forkchoice/protoarray:go_default_library",
"//beacon-chain/gateway:go_default_library",
"//beacon-chain/geninit:go_default_library",
"//beacon-chain/monitor:go_default_library",
"//beacon-chain/node/registration:go_default_library",
"//beacon-chain/operations/attestations:go_default_library",

View File

@@ -31,6 +31,7 @@ import (
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/beacon-chain/gateway"
"github.com/prysmaticlabs/prysm/beacon-chain/geninit"
"github.com/prysmaticlabs/prysm/beacon-chain/monitor"
"github.com/prysmaticlabs/prysm/beacon-chain/node/registration"
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
@@ -211,8 +212,17 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
return nil, err
}
powSync := geninit.NewClockSync()
genSync := geninit.NewClockSync()
log.Debugln("Registering POW Chain Service")
if err := beacon.registerPOWChainService(); err != nil {
powService, err := beacon.registerPOWChainService(powSync)
if err != nil {
return nil, err
}
log.Debugln("Registering Genesis Init Service")
if err := beacon.registerGenesisInitService(powSync, genSync, powService, beacon.db); err != nil {
return nil, err
}
@@ -629,17 +639,17 @@ func (b *BeaconNode) registerBlockchainService() error {
return b.services.RegisterService(blockchainService)
}
func (b *BeaconNode) registerPOWChainService() error {
func (b *BeaconNode) registerPOWChainService(gcs geninit.ClockSetter) (*powchain.Service, error) {
if b.cliCtx.Bool(testSkipPowFlag) {
return b.services.RegisterService(&powchain.Service{})
return nil, b.services.RegisterService(&powchain.Service{})
}
bs, err := powchain.NewPowchainCollector(b.ctx)
if err != nil {
return err
return nil, err
}
depositContractAddr, err := powchain.DepositContractAddress()
if err != nil {
return err
return nil, err
}
// skipcq: CRT-D0001
@@ -652,13 +662,23 @@ func (b *BeaconNode) registerPOWChainService() error {
powchain.WithStateGen(b.stateGen),
powchain.WithBeaconNodeStatsUpdater(bs),
powchain.WithFinalizedStateAtStartup(b.finalizedStateAtStartUp),
powchain.WithGenesisClockSetter(gcs),
)
web3Service, err := powchain.NewService(b.ctx, opts...)
srv, err := powchain.NewService(b.ctx, opts...)
if err != nil {
return errors.Wrap(err, "could not register proof-of-work chain web3Service")
return nil, errors.Wrap(err, "could not register proof-of-work chain web3Service")
}
return b.services.RegisterService(web3Service)
err = b.services.RegisterService(srv)
return srv, err
}
func (b *BeaconNode) registerGenesisInitService(w geninit.ClockWaiter, s geninit.ClockSetter, f powchain.ChainStartFetcher, d db.HeadAccessDatabase) error {
g, err := geninit.New(b.ctx, w, s, f, d)
if err != nil {
return err
}
return b.services.RegisterService(g)
}
func (b *BeaconNode) registerSyncService() error {
@@ -821,7 +841,6 @@ func (b *BeaconNode) registerRPCService() error {
FinalizationFetcher: chainService,
BlockReceiver: chainService,
AttestationReceiver: chainService,
GenesisTimeFetcher: chainService,
GenesisFetcher: chainService,
OptimisticModeFetcher: chainService,
AttestationsPool: b.attestationPool,
@@ -844,6 +863,7 @@ func (b *BeaconNode) registerRPCService() error {
MaxMsgSize: maxMsgSize,
ProposerIdsCache: b.proposerIdsCache,
BlockBuilder: b.fetchBuilderService(),
ClockProvider: chainService,
})
return b.services.RegisterService(rpcService)

View File

@@ -32,6 +32,7 @@ go_library(
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/transition:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/geninit:go_default_library",
"//beacon-chain/powchain/types:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/state-native:go_default_library",

View File

@@ -255,6 +255,7 @@ func (s *Service) ProcessChainStart(genesisTime uint64, eth1BlockHash [32]byte,
log.WithFields(logrus.Fields{
"ChainStartTime": chainStartTime,
}).Info("Minimum number of validators reached for beacon-chain to start")
s.genSync.SetGenesisTime(chainStartTime)
s.cfg.stateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.ChainStarted,
Data: &statefeed.ChainStartedData{

View File

@@ -5,6 +5,7 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/beacon-chain/db"
"github.com/prysmaticlabs/prysm/beacon-chain/geninit"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/network"
@@ -123,3 +124,10 @@ func WithFinalizedStateAtStartup(st state.BeaconState) Option {
return nil
}
}
func WithGenesisClockSetter(cs geninit.ClockSetter) Option {
return func(s *Service) error {
s.genSync = cs
return nil
}
}

View File

@@ -27,6 +27,7 @@ import (
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/beacon-chain/db"
"github.com/prysmaticlabs/prysm/beacon-chain/geninit"
"github.com/prysmaticlabs/prysm/beacon-chain/powchain/types"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
native "github.com/prysmaticlabs/prysm/beacon-chain/state/state-native"
@@ -161,6 +162,7 @@ type Service struct {
lastReceivedMerkleIndex int64 // Keeps track of the last received index to prevent log spam.
runError error
preGenesisState state.BeaconState
genSync geninit.ClockSetter
}
// NewService sets up a new instance with an ethclient when given a web3 endpoint as a string in the config.
@@ -580,7 +582,10 @@ func (s *Service) initPOWService() {
if err := s.processPastLogs(ctx); err != nil {
s.retryExecutionClientConnection(ctx, err)
errorLogger(err, "Unable to process past deposit contract logs")
errorLogger(
err,
"Unable to process past deposit contract logs, perhaps your execution client is not fully synced",
)
continue
}
// Cache eth1 headers from our voting period.

View File

@@ -46,6 +46,7 @@ go_library(
"@com_github_grpc_ecosystem_go_grpc_middleware//recovery:go_default_library",
"@com_github_grpc_ecosystem_go_grpc_middleware//tracing/opentracing:go_default_library",
"@com_github_grpc_ecosystem_go_grpc_prometheus//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@io_opencensus_go//plugin/ocgrpc:go_default_library",
"@org_golang_google_grpc//:go_default_library",
@@ -61,6 +62,7 @@ go_test(
srcs = ["service_test.go"],
embed = [":go_default_library"],
deps = [
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/blockchain/testing:go_default_library",
"//beacon-chain/powchain/testing:go_default_library",
"//beacon-chain/sync/initial-sync/testing:go_default_library",

View File

@@ -80,6 +80,7 @@ go_test(
embed = [":go_default_library"],
deps = [
"//api/grpc:go_default_library",
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/blockchain/testing:go_default_library",
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/db:go_default_library",

View File

@@ -53,7 +53,11 @@ func (e *blockIdParseError) Error() string {
// GetWeakSubjectivity computes the starting epoch of the current weak subjectivity period, and then also
// determines the best block root and state root to use for a Checkpoint Sync starting from that point.
func (bs *Server) GetWeakSubjectivity(ctx context.Context, _ *empty.Empty) (*ethpbv1.WeakSubjectivityResponse, error) {
if err := rpchelpers.ValidateSync(ctx, bs.SyncChecker, bs.HeadFetcher, bs.GenesisTimeFetcher, bs.OptimisticModeFetcher); err != nil {
c, err := bs.ClockProvider.WaitForClock(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "timeout while waiting for genesis timestamp, %s", err)
}
if err := rpchelpers.ValidateSync(ctx, bs.SyncChecker, bs.HeadFetcher, c, bs.OptimisticModeFetcher); err != nil {
// This is already a grpc error, so we can't wrap it any further
return nil, err
}
@@ -70,7 +74,11 @@ func (bs *Server) GetWeakSubjectivity(ctx context.Context, _ *empty.Empty) (*eth
if err != nil {
return nil, status.Errorf(codes.Internal, "could not get weak subjectivity slot: %v", err)
}
cbr, err := bs.CanonicalHistory.BlockRootForSlot(ctx, wsSlot)
h, err := bs.CanonicalHistoryWaiter.WaitForCanonicalHistory(ctx)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
cbr, err := h.BlockRootForSlot(ctx, wsSlot)
if err != nil {
return nil, status.Errorf(codes.Internal, fmt.Sprintf("could not find highest block below slot %d", wsSlot))
}

View File

@@ -24,7 +24,6 @@ import (
type Server struct {
BeaconDB db.ReadOnlyDatabase
ChainInfoFetcher blockchain.ChainInfoFetcher
GenesisTimeFetcher blockchain.TimeFetcher
BlockReceiver blockchain.BlockReceiver
BlockNotifier blockfeed.Notifier
OperationNotifier operation.Notifier
@@ -37,8 +36,9 @@ type Server struct {
HeadFetcher blockchain.HeadFetcher
OptimisticModeFetcher blockchain.OptimisticModeFetcher
V1Alpha1ValidatorServer *v1alpha1validator.Server
SyncChecker sync.Checker
CanonicalHistory *stategen.CanonicalHistory
HeadUpdater blockchain.HeadUpdater
SyncChecker sync.Checker
CanonicalHistoryWaiter stategen.CanonicalHistoryWaiter
HeadUpdater blockchain.HeadUpdater
ExecutionPayloadReconstructor powchain.ExecutionPayloadReconstructor
ClockProvider blockchain.ClockProvider
}

View File

@@ -30,10 +30,11 @@ func (bs *Server) GetGenesis(ctx context.Context, _ *emptypb.Empty) (*ethpb.Gene
ctx, span := trace.StartSpan(ctx, "beacon.GetGenesis")
defer span.End()
genesisTime := bs.GenesisTimeFetcher.GenesisTime()
if genesisTime.IsZero() {
return nil, status.Errorf(codes.NotFound, "Chain genesis info is not yet known")
c, err := bs.ClockProvider.WaitForClock(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "timeout waiting for genesis timestamp, %s", err)
}
genesisTime := c.GenesisTime()
validatorRoot := bs.ChainInfoFetcher.GenesisValidatorsRoot()
if bytes.Equal(validatorRoot[:], params.BeaconConfig().ZeroHash[:]) {
return nil, status.Errorf(codes.NotFound, "Chain genesis info is not yet known")

View File

@@ -2,6 +2,7 @@ package beacon
import (
"context"
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
"testing"
"time"
@@ -29,11 +30,11 @@ func TestGetGenesis(t *testing.T) {
t.Run("OK", func(t *testing.T) {
chainService := &chainMock.ChainService{
Genesis: genesis,
Clock: blockchain.NewClock(genesis),
ValidatorsRoot: validatorsRoot,
}
s := Server{
GenesisTimeFetcher: chainService,
ClockProvider: chainService,
ChainInfoFetcher: chainService,
}
resp, err := s.GetGenesis(ctx, &emptypb.Empty{})
@@ -46,11 +47,11 @@ func TestGetGenesis(t *testing.T) {
t.Run("No genesis time", func(t *testing.T) {
chainService := &chainMock.ChainService{
Genesis: time.Time{},
Clock: blockchain.NewClock(time.Time{}),
ValidatorsRoot: validatorsRoot,
}
s := Server{
GenesisTimeFetcher: chainService,
ClockProvider: chainService,
ChainInfoFetcher: chainService,
}
_, err := s.GetGenesis(ctx, &emptypb.Empty{})
@@ -59,11 +60,11 @@ func TestGetGenesis(t *testing.T) {
t.Run("No genesis validators root", func(t *testing.T) {
chainService := &chainMock.ChainService{
Genesis: genesis,
Clock: blockchain.NewClock(genesis),
ValidatorsRoot: [32]byte{},
}
s := Server{
GenesisTimeFetcher: chainService,
ClockProvider: chainService,
ChainInfoFetcher: chainService,
}
_, err := s.GetGenesis(ctx, &emptypb.Empty{})

View File

@@ -27,7 +27,11 @@ func (bs *Server) ListSyncCommittees(ctx context.Context, req *ethpbv2.StateSync
ctx, span := trace.StartSpan(ctx, "beacon.ListSyncCommittees")
defer span.End()
currentSlot := bs.GenesisTimeFetcher.CurrentSlot()
c, err := bs.ClockProvider.WaitForClock(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "timeout waiting for genesis timestamp, %s", err)
}
currentSlot := c.CurrentSlot()
currentEpoch := slots.ToEpoch(currentSlot)
currentPeriodStartEpoch, err := slots.SyncCommitteePeriodStartEpoch(currentEpoch)
if err != nil {

View File

@@ -3,6 +3,7 @@ package beacon
import (
"bytes"
"context"
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
"strconv"
"strings"
"testing"
@@ -161,11 +162,9 @@ func TestListSyncCommittees(t *testing.T) {
require.NoError(t, err)
db := dbTest.SetupDB(t)
chainService := &mock.ChainService{}
chainService := &mock.ChainService{Clock: blockchain.NewClock(time.Now())}
s := &Server{
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
Genesis: time.Now(),
},
ClockProvider: chainService,
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
@@ -204,11 +203,9 @@ func TestListSyncCommittees(t *testing.T) {
util.SaveBlock(t, ctx, db, blk)
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
chainService := &mock.ChainService{Optimistic: true}
chainService := &mock.ChainService{Optimistic: true, Clock: blockchain.NewClock(time.Now())}
s := &Server{
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
Genesis: time.Now(),
},
ClockProvider: chainService,
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
@@ -262,11 +259,9 @@ func TestListSyncCommitteesFuture(t *testing.T) {
}))
db := dbTest.SetupDB(t)
chainService := &mock.ChainService{}
chainService := &mock.ChainService{Clock: blockchain.NewClock(time.Now())}
s := &Server{
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
Genesis: time.Now(),
},
ClockProvider: chainService,
StateFetcher: &futureSyncMockFetcher{
BeaconState: st,
},

View File

@@ -21,7 +21,7 @@ func ValidateSync(
ctx context.Context,
syncChecker sync.Checker,
headFetcher blockchain.HeadFetcher,
timeFetcher blockchain.TimeFetcher,
clock blockchain.Clock,
optimisticModeFetcher blockchain.OptimisticModeFetcher,
) error {
if !syncChecker.Syncing() {
@@ -36,7 +36,7 @@ func ValidateSync(
syncDetailsContainer := &syncDetailsContainer{
SyncDetails: &SyncDetailsJson{
HeadSlot: strconv.FormatUint(uint64(headSlot), 10),
SyncDistance: strconv.FormatUint(uint64(timeFetcher.CurrentSlot()-headSlot), 10),
SyncDistance: strconv.FormatUint(uint64(clock.CurrentSlot()-headSlot), 10),
IsSyncing: true,
IsOptimistic: isOptimistic,
},

View File

@@ -270,11 +270,19 @@ func (ns *Server) GetSyncStatus(ctx context.Context, _ *emptypb.Empty) (*ethpb.S
return nil, status.Errorf(codes.Internal, "Could not check optimistic status: %v", err)
}
c, err := ns.ClockProvider.WaitForClock(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "timeout while waiting for genesis timestamp, %s", err)
}
headSlot := ns.HeadFetcher.HeadSlot()
sd, err := c.CurrentSlot().SafeSubSlot(headSlot)
if err != nil {
return nil, status.Errorf(codes.Internal, "error computing sync_distance, head > current slot, %s", err)
}
return &ethpb.SyncingResponse{
Data: &ethpb.SyncInfo{
HeadSlot: headSlot,
SyncDistance: ns.GenesisTimeFetcher.CurrentSlot() - headSlot,
SyncDistance: sd,
IsSyncing: ns.SyncChecker.Syncing(),
IsOptimistic: isOptimistic,
},

View File

@@ -173,7 +173,7 @@ func TestSyncStatus(t *testing.T) {
s := &Server{
HeadFetcher: chainService,
GenesisTimeFetcher: chainService,
ClockProvider: chainService,
OptimisticModeFetcher: chainService,
SyncChecker: syncChecker,
}

View File

@@ -22,6 +22,6 @@ type Server struct {
PeersFetcher p2p.PeersProvider
PeerManager p2p.PeerManager
MetadataProvider p2p.MetadataProvider
GenesisTimeFetcher blockchain.TimeFetcher
HeadFetcher blockchain.HeadFetcher
ClockProvider blockchain.ClockProvider
}

View File

@@ -48,6 +48,7 @@ go_test(
srcs = ["validator_test.go"],
embed = [":go_default_library"],
deps = [
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/blockchain/testing:go_default_library",
"//beacon-chain/cache:go_default_library",
"//beacon-chain/core/altair:go_default_library",

View File

@@ -15,7 +15,6 @@ import (
type Server struct {
HeadFetcher blockchain.HeadFetcher
HeadUpdater blockchain.HeadUpdater
TimeFetcher blockchain.TimeFetcher
SyncChecker sync.Checker
AttestationsPool attestations.Pool
PeerManager p2p.PeerManager
@@ -24,4 +23,5 @@ type Server struct {
OptimisticModeFetcher blockchain.OptimisticModeFetcher
SyncCommitteePool synccommittee.Pool
V1Alpha1Server *v1alpha1validator.Server
ClockProvider blockchain.ClockProvider
}

View File

@@ -42,12 +42,16 @@ func (vs *Server) GetAttesterDuties(ctx context.Context, req *ethpbv1.AttesterDu
ctx, span := trace.StartSpan(ctx, "validator.GetAttesterDuties")
defer span.End()
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
c, err := vs.ClockProvider.WaitForClock(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "timeout while waiting for genesis timestamp, %s", err)
}
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, c, vs.OptimisticModeFetcher); err != nil {
// We simply return the error because it's already a gRPC error.
return nil, err
}
cs := vs.TimeFetcher.CurrentSlot()
cs := c.CurrentSlot()
currentEpoch := slots.ToEpoch(cs)
if req.Epoch > currentEpoch+1 {
return nil, status.Errorf(codes.InvalidArgument, "Request epoch %d can not be greater than next epoch %d", req.Epoch, currentEpoch+1)
@@ -125,12 +129,16 @@ func (vs *Server) GetProposerDuties(ctx context.Context, req *ethpbv1.ProposerDu
ctx, span := trace.StartSpan(ctx, "validator.GetProposerDuties")
defer span.End()
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
c, err := vs.ClockProvider.WaitForClock(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "timeout while waiting for genesis timestamp, %s", err)
}
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, c, vs.OptimisticModeFetcher); err != nil {
// We simply return the error because it's already a gRPC error.
return nil, err
}
cs := vs.TimeFetcher.CurrentSlot()
cs := c.CurrentSlot()
currentEpoch := slots.ToEpoch(cs)
if req.Epoch > currentEpoch+1 {
return nil, status.Errorf(codes.InvalidArgument, "Request epoch %d can not be greater than next epoch %d", req.Epoch, currentEpoch+1)
@@ -202,12 +210,16 @@ func (vs *Server) GetSyncCommitteeDuties(ctx context.Context, req *ethpbv2.SyncC
ctx, span := trace.StartSpan(ctx, "validator.GetSyncCommitteeDuties")
defer span.End()
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
c, err := vs.ClockProvider.WaitForClock(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "timeout while waiting for genesis timestamp, %s", err)
}
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, c, vs.OptimisticModeFetcher); err != nil {
// We simply return the error because it's already a gRPC error.
return nil, err
}
currentEpoch := slots.ToEpoch(vs.TimeFetcher.CurrentSlot())
currentEpoch := slots.ToEpoch(c.CurrentSlot())
lastValidEpoch := syncCommitteeDutiesLastValidEpoch(currentEpoch)
if req.Epoch > lastValidEpoch {
return nil, status.Errorf(codes.InvalidArgument, "Epoch is too far in the future. Maximum valid epoch is %v.", lastValidEpoch)
@@ -272,7 +284,11 @@ func (vs *Server) ProduceBlock(ctx context.Context, req *ethpbv1.ProduceBlockReq
ctx, span := trace.StartSpan(ctx, "validator.ProduceBlock")
defer span.End()
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
c, err := vs.ClockProvider.WaitForClock(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "timeout while waiting for genesis timestamp, %s", err)
}
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, c, vs.OptimisticModeFetcher); err != nil {
// We simply return the error because it's already a gRPC error.
return nil, err
}
@@ -289,7 +305,11 @@ func (vs *Server) ProduceBlockV2(ctx context.Context, req *ethpbv1.ProduceBlockR
ctx, span := trace.StartSpan(ctx, "validator.ProduceBlockV2")
defer span.End()
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
c, err := vs.ClockProvider.WaitForClock(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "timeout while waiting for genesis timestamp, %s", err)
}
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, c, vs.OptimisticModeFetcher); err != nil {
// We simply return the error because it's already a gRPC error.
return nil, err
}
@@ -353,7 +373,11 @@ func (vs *Server) ProduceBlockV2SSZ(ctx context.Context, req *ethpbv1.ProduceBlo
ctx, span := trace.StartSpan(ctx, "validator.ProduceBlockV2SSZ")
defer span.End()
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
c, err := vs.ClockProvider.WaitForClock(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "timeout while waiting for genesis timestamp, %s", err)
}
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, c, vs.OptimisticModeFetcher); err != nil {
// We simply return the error because it's already a gRPC error.
return nil, err
}
@@ -424,7 +448,11 @@ func (vs *Server) ProduceBlindedBlock(ctx context.Context, req *ethpbv1.ProduceB
ctx, span := trace.StartSpan(ctx, "validator.ProduceBlindedBlock")
defer span.End()
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
c, err := vs.ClockProvider.WaitForClock(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "timeout while waiting for genesis timestamp, %s", err)
}
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, c, vs.OptimisticModeFetcher); err != nil {
// We simply return the error because it's already a gRPC error.
return nil, err
}
@@ -491,7 +519,11 @@ func (vs *Server) ProduceBlindedBlockSSZ(ctx context.Context, req *ethpbv1.Produ
ctx, span := trace.StartSpan(ctx, "validator.ProduceBlindedBlockSSZ")
defer span.End()
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
c, err := vs.ClockProvider.WaitForClock(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "timeout while waiting for genesis timestamp, %s", err)
}
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, c, vs.OptimisticModeFetcher); err != nil {
// We simply return the error because it's already a gRPC error.
return nil, err
}
@@ -633,6 +665,10 @@ func (vs *Server) SubmitAggregateAndProofs(ctx context.Context, req *ethpbv1.Sub
ctx, span := trace.StartSpan(ctx, "validator.SubmitAggregateAndProofs")
defer span.End()
c, err := vs.ClockProvider.WaitForClock(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "timeout waiting for genesis timestamp, %s", err)
}
for _, agg := range req.Data {
if agg == nil || agg.Message == nil || agg.Message.Aggregate == nil || agg.Message.Aggregate.Data == nil {
return nil, status.Error(codes.InvalidArgument, "Signed aggregate request can't be nil")
@@ -648,7 +684,7 @@ func (vs *Server) SubmitAggregateAndProofs(ctx context.Context, req *ethpbv1.Sub
// As a preventive measure, a beacon node shouldn't broadcast an attestation whose slot is out of range.
if err := helpers.ValidateAttestationTime(agg.Message.Aggregate.Data.Slot,
vs.TimeFetcher.GenesisTime(), params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil {
c.GenesisTime(), params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil {
return nil, status.Error(codes.InvalidArgument, "Attestation slot is no longer valid from current time")
}
}
@@ -683,7 +719,11 @@ func (vs *Server) SubmitBeaconCommitteeSubscription(ctx context.Context, req *et
ctx, span := trace.StartSpan(ctx, "validator.SubmitBeaconCommitteeSubscription")
defer span.End()
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
c, err := vs.ClockProvider.WaitForClock(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "timeout while waiting for genesis timestamp, %s", err)
}
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, c, vs.OptimisticModeFetcher); err != nil {
// We simply return the error because it's already a gRPC error.
return nil, err
}
@@ -760,7 +800,11 @@ func (vs *Server) SubmitSyncCommitteeSubscription(ctx context.Context, req *ethp
ctx, span := trace.StartSpan(ctx, "validator.SubmitSyncCommitteeSubscription")
defer span.End()
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
c, err := vs.ClockProvider.WaitForClock(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "timeout while waiting for genesis timestamp, %s", err)
}
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, c, vs.OptimisticModeFetcher); err != nil {
// We simply return the error because it's already a gRPC error.
return nil, err
}

View File

@@ -3,6 +3,7 @@ package validator
import (
"context"
"fmt"
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
"strconv"
"testing"
"time"
@@ -75,13 +76,12 @@ func TestGetAttesterDuties(t *testing.T) {
pubKeys[i] = deposits[i].Data.PublicKey
}
chainSlot := types.Slot(0)
chain := &mockChain.ChainService{
State: bs, Root: genesisRoot[:], Slot: &chainSlot,
State: bs, Root: genesisRoot[:],
}
vs := &Server{
HeadFetcher: chain,
TimeFetcher: chain,
ClockProvider: chain,
SyncChecker: &mockSync.Sync{IsSyncing: false},
OptimisticModeFetcher: chain,
}
@@ -155,12 +155,13 @@ func TestGetAttesterDuties(t *testing.T) {
indices[i] = uint64(i)
}
chainSlot := params.BeaconConfig().SlotsPerEpoch.Mul(2)
clock := mockChain.NewMockClock(time.Now(), chainSlot)
chain := &mockChain.ChainService{
State: bs, Root: genesisRoot[:], Slot: &chainSlot,
State: bs, Root: genesisRoot[:], Clock: clock,
}
vs := &Server{
HeadFetcher: chain,
TimeFetcher: chain,
ClockProvider: chain,
OptimisticModeFetcher: chain,
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
@@ -224,13 +225,12 @@ func TestGetAttesterDuties(t *testing.T) {
util.SaveBlock(t, ctx, db, blk)
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
chainSlot := types.Slot(0)
chain := &mockChain.ChainService{
State: bs, Root: genesisRoot[:], Slot: &chainSlot, Optimistic: true,
State: bs, Root: genesisRoot[:], Optimistic: true,
}
vs := &Server{
HeadFetcher: chain,
TimeFetcher: chain,
ClockProvider: chain,
OptimisticModeFetcher: chain,
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
@@ -251,7 +251,7 @@ func TestGetAttesterDuties_SyncNotReady(t *testing.T) {
vs := &Server{
SyncChecker: &mockSync.Sync{IsSyncing: true},
HeadFetcher: chainService,
TimeFetcher: chainService,
ClockProvider: chainService,
OptimisticModeFetcher: chainService,
}
_, err = vs.GetAttesterDuties(context.Background(), &ethpbv1.AttesterDutiesRequest{})
@@ -282,13 +282,12 @@ func TestGetProposerDuties(t *testing.T) {
pubKeys[i] = deposits[i].Data.PublicKey
}
chainSlot := types.Slot(0)
chain := &mockChain.ChainService{
State: bs, Root: genesisRoot[:], Slot: &chainSlot,
State: bs, Root: genesisRoot[:],
}
vs := &Server{
HeadFetcher: chain,
TimeFetcher: chain,
ClockProvider: chain,
OptimisticModeFetcher: chain,
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
@@ -334,12 +333,15 @@ func TestGetProposerDuties(t *testing.T) {
indices[i] = uint64(i)
}
chainSlot := params.BeaconConfig().SlotsPerEpoch.Mul(2)
clock := mockChain.NewMockClock(time.Now(), chainSlot)
chain := &mockChain.ChainService{
State: bs, Root: genesisRoot[:], Slot: &chainSlot,
State: bs,
Root: genesisRoot[:],
Clock: clock,
}
vs := &Server{
HeadFetcher: chain,
TimeFetcher: chain,
ClockProvider: chain,
OptimisticModeFetcher: chain,
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
@@ -383,13 +385,12 @@ func TestGetProposerDuties(t *testing.T) {
util.SaveBlock(t, ctx, db, blk)
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
chainSlot := types.Slot(0)
chain := &mockChain.ChainService{
State: bs, Root: genesisRoot[:], Slot: &chainSlot, Optimistic: true,
State: bs, Root: genesisRoot[:], Optimistic: true,
}
vs := &Server{
HeadFetcher: chain,
TimeFetcher: chain,
ClockProvider: chain,
OptimisticModeFetcher: chain,
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
@@ -409,7 +410,7 @@ func TestGetProposerDuties_SyncNotReady(t *testing.T) {
vs := &Server{
SyncChecker: &mockSync.Sync{IsSyncing: true},
HeadFetcher: chainService,
TimeFetcher: chainService,
ClockProvider: chainService,
OptimisticModeFetcher: chainService,
}
_, err = vs.GetProposerDuties(context.Background(), &ethpbv1.ProposerDutiesRequest{})
@@ -437,11 +438,11 @@ func TestGetSyncCommitteeDuties(t *testing.T) {
require.NoError(t, st.SetNextSyncCommittee(nextCommittee))
db := dbutil.SetupDB(t)
mockChainService := &mockChain.ChainService{Genesis: genesisTime}
mockChainService := &mockChain.ChainService{Clock: blockchain.NewClock(genesisTime)}
vs := &Server{
StateFetcher: &testutil.MockFetcher{BeaconState: st},
SyncChecker: &mockSync.Sync{IsSyncing: false},
TimeFetcher: mockChainService,
ClockProvider: mockChainService,
HeadFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
}
@@ -576,11 +577,14 @@ func TestGetSyncCommitteeDuties(t *testing.T) {
return newSyncPeriodSt
}
}
mockChainService := &mockChain.ChainService{Genesis: genesisTime, Slot: &newSyncPeriodStartSlot}
clock := mockChain.NewMockClock(time.Now(), newSyncPeriodStartSlot)
mockChainService := &mockChain.ChainService{
Clock: clock,
}
vs := &Server{
StateFetcher: &testutil.MockFetcher{BeaconState: stateFetchFn(newSyncPeriodStartSlot)},
SyncChecker: &mockSync.Sync{IsSyncing: false},
TimeFetcher: mockChainService,
ClockProvider: mockChainService,
HeadFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
}
@@ -610,11 +614,14 @@ func TestGetSyncCommitteeDuties(t *testing.T) {
util.SaveBlock(t, ctx, db, blk)
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
mockChainService := &mockChain.ChainService{Genesis: genesisTime, Optimistic: true}
mockChainService := &mockChain.ChainService{
Clock: blockchain.NewClock(genesisTime),
Optimistic: true,
}
vs := &Server{
StateFetcher: &testutil.MockFetcher{BeaconState: st},
SyncChecker: &mockSync.Sync{IsSyncing: false},
TimeFetcher: mockChainService,
ClockProvider: mockChainService,
HeadFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
}
@@ -635,7 +642,7 @@ func TestGetSyncCommitteeDuties_SyncNotReady(t *testing.T) {
vs := &Server{
SyncChecker: &mockSync.Sync{IsSyncing: true},
HeadFetcher: chainService,
TimeFetcher: chainService,
ClockProvider: chainService,
OptimisticModeFetcher: chainService,
}
_, err = vs.GetSyncCommitteeDuties(context.Background(), &ethpbv2.SyncCommitteeDutiesRequest{})
@@ -745,7 +752,7 @@ func TestProduceBlock_SyncNotReady(t *testing.T) {
vs := &Server{
SyncChecker: &mockSync.Sync{IsSyncing: true},
HeadFetcher: chainService,
TimeFetcher: chainService,
ClockProvider: chainService,
OptimisticModeFetcher: chainService,
}
_, err = vs.ProduceBlock(context.Background(), &ethpbv1.ProduceBlockRequest{})
@@ -1011,7 +1018,7 @@ func TestProduceBlockV2(t *testing.T) {
TotalDifficulty: "0x1",
},
},
TimeFetcher: &mockChain.ChainService{},
ClockProvider: &mockChain.ChainService{},
HeadFetcher: &mockChain.ChainService{State: beaconState, Root: parentRoot[:]},
OptimisticModeFetcher: &mockChain.ChainService{},
SyncChecker: &mockSync.Sync{IsSyncing: false},
@@ -1500,7 +1507,7 @@ func TestProduceBlockV2SSZ(t *testing.T) {
TotalDifficulty: "0x1",
},
},
TimeFetcher: &mockChain.ChainService{},
ClockProvider: &mockChain.ChainService{},
HeadFetcher: &mockChain.ChainService{State: bs, Root: parentRoot[:]},
OptimisticModeFetcher: &mockChain.ChainService{},
SyncChecker: &mockSync.Sync{IsSyncing: false},
@@ -1691,7 +1698,7 @@ func TestProduceBlockV2_SyncNotReady(t *testing.T) {
vs := &Server{
SyncChecker: &mockSync.Sync{IsSyncing: true},
HeadFetcher: chainService,
TimeFetcher: chainService,
ClockProvider: chainService,
OptimisticModeFetcher: chainService,
}
_, err = vs.ProduceBlockV2(context.Background(), &ethpbv1.ProduceBlockRequest{})
@@ -1705,7 +1712,7 @@ func TestProduceBlockV2SSZ_SyncNotReady(t *testing.T) {
vs := &Server{
SyncChecker: &mockSync.Sync{IsSyncing: true},
HeadFetcher: chainService,
TimeFetcher: chainService,
ClockProvider: chainService,
OptimisticModeFetcher: chainService,
}
_, err = vs.ProduceBlockV2SSZ(context.Background(), &ethpbv1.ProduceBlockRequest{})
@@ -1971,7 +1978,7 @@ func TestProduceBlindedBlock(t *testing.T) {
TotalDifficulty: "0x1",
},
},
TimeFetcher: &mockChain.ChainService{},
ClockProvider: &mockChain.ChainService{},
HeadFetcher: &mockChain.ChainService{State: beaconState, Root: parentRoot[:]},
OptimisticModeFetcher: &mockChain.ChainService{},
SyncChecker: &mockSync.Sync{IsSyncing: false},
@@ -2460,7 +2467,7 @@ func TestProduceBlindedBlockSSZ(t *testing.T) {
TotalDifficulty: "0x1",
},
},
TimeFetcher: &mockChain.ChainService{},
ClockProvider: &mockChain.ChainService{},
HeadFetcher: &mockChain.ChainService{State: bs, Root: parentRoot[:]},
OptimisticModeFetcher: &mockChain.ChainService{},
SyncChecker: &mockSync.Sync{IsSyncing: false},
@@ -2651,7 +2658,7 @@ func TestProduceBlindedBlock_SyncNotReady(t *testing.T) {
vs := &Server{
SyncChecker: &mockSync.Sync{IsSyncing: true},
HeadFetcher: chainService,
TimeFetcher: chainService,
ClockProvider: chainService,
OptimisticModeFetcher: chainService,
}
_, err = vs.ProduceBlindedBlock(context.Background(), &ethpbv1.ProduceBlockRequest{})
@@ -2665,7 +2672,7 @@ func TestProduceBlindedBlockSSZ_SyncNotReady(t *testing.T) {
vs := &Server{
SyncChecker: &mockSync.Sync{IsSyncing: true},
HeadFetcher: chainService,
TimeFetcher: chainService,
ClockProvider: chainService,
OptimisticModeFetcher: chainService,
}
_, err = vs.ProduceBlindedBlockSSZ(context.Background(), &ethpbv1.ProduceBlockRequest{})
@@ -2704,6 +2711,7 @@ func TestProduceAttestationData(t *testing.T) {
Genesis: time.Now(),
}
offset := int64(slot.Mul(params.BeaconConfig().SecondsPerSlot))
gent := time.Now().Add(time.Duration(-1*offset) * time.Second)
v1Alpha1Server := &v1alpha1validator.Server{
P2P: &p2pmock.MockBroadcaster{},
SyncChecker: &mockSync.Sync{IsSyncing: false},
@@ -2714,8 +2722,8 @@ func TestProduceAttestationData(t *testing.T) {
FinalizationFetcher: &mockChain.ChainService{
CurrentJustifiedCheckPoint: beaconState.CurrentJustifiedCheckpoint(),
},
TimeFetcher: &mockChain.ChainService{
Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second),
ClockProvider: &mockChain.ChainService{
Clock: blockchain.NewClock(gent),
},
StateNotifier: chainService.StateNotifier(),
}
@@ -2925,11 +2933,11 @@ func TestSubmitBeaconCommitteeSubscription(t *testing.T) {
chainSlot := types.Slot(0)
chain := &mockChain.ChainService{
State: bs, Root: genesisRoot[:], Slot: &chainSlot,
State: bs, Root:genesisRoot[:], Slot: &chainSlot,
}
vs := &Server{
HeadFetcher: chain,
TimeFetcher: chain,
ClockProvider: chain,
SyncChecker: &mockSync.Sync{IsSyncing: false},
V1Alpha1Server: &v1alpha1validator.Server{},
}
@@ -3040,7 +3048,7 @@ func TestSubmitBeaconCommitteeSubscription_SyncNotReady(t *testing.T) {
vs := &Server{
SyncChecker: &mockSync.Sync{IsSyncing: true},
HeadFetcher: chainService,
TimeFetcher: chainService,
ClockProvider: chainService,
OptimisticModeFetcher: chainService,
}
_, err = vs.SubmitBeaconCommitteeSubscription(context.Background(), &ethpbv1.SubmitBeaconCommitteeSubscriptionsRequest{})
@@ -3069,11 +3077,12 @@ func TestSubmitSyncCommitteeSubscription(t *testing.T) {
chainSlot := types.Slot(0)
chain := &mockChain.ChainService{
State: bs, Root: genesisRoot[:], Slot: &chainSlot,
State: bs,
Root: genesisRoot[:],
}
vs := &Server{
HeadFetcher: chain,
TimeFetcher: chain,
ClockProvider: chain,
SyncChecker: &mockSync.Sync{IsSyncing: false},
V1Alpha1Server: &v1alpha1validator.Server{},
}
@@ -3199,7 +3208,7 @@ func TestSubmitSyncCommitteeSubscription_SyncNotReady(t *testing.T) {
vs := &Server{
SyncChecker: &mockSync.Sync{IsSyncing: true},
HeadFetcher: chainService,
TimeFetcher: chainService,
ClockProvider: chainService,
OptimisticModeFetcher: chainService,
}
_, err = vs.SubmitSyncCommitteeSubscription(context.Background(), &ethpbv2.SubmitSyncCommitteeSubscriptionsRequest{})
@@ -3236,11 +3245,12 @@ func TestSubmitAggregateAndProofs(t *testing.T) {
t.Run("OK", func(t *testing.T) {
chainSlot := types.Slot(0)
chain := &mockChain.ChainService{
Genesis: time.Now(), Slot: &chainSlot,
Clock: blockchain.NewClock(time.Now()),
Slot: &chainSlot,
}
broadcaster := &p2pmock.MockBroadcaster{}
vs := Server{
TimeFetcher: chain,
ClockProvider: chain,
Broadcaster: broadcaster,
}
@@ -3458,11 +3468,11 @@ func TestSubmitAggregateAndProofs(t *testing.T) {
t.Run("invalid attestation time", func(t *testing.T) {
chainSlot := types.Slot(0)
chain := &mockChain.ChainService{
Genesis: time.Now().Add(time.Hour * 2), Slot: &chainSlot,
Clock: blockchain.NewClock(time.Now().Add(time.Hour * 2)), Slot: &chainSlot,
}
broadcaster := &p2pmock.MockBroadcaster{}
vs := Server{
TimeFetcher: chain,
ClockProvider: chain,
Broadcaster: broadcaster,
}

View File

@@ -83,6 +83,7 @@ go_test(
embed = [":go_default_library"],
shard_count = 4,
deps = [
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/blockchain/testing:go_default_library",
"//beacon-chain/core/altair:go_default_library",
"//beacon-chain/core/epoch/precompute:go_default_library",

View File

@@ -32,6 +32,10 @@ func (bs *Server) ListValidatorAssignments(
)
}
c, err := bs.ClockProvider.WaitForClock(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "timeout waiting for genesis timestamp, %s", err)
}
var res []*ethpb.ValidatorAssignments_CommitteeAssignment
filtered := map[types.ValidatorIndex]bool{} // track filtered validators to prevent duplication in the response.
filteredIndices := make([]types.ValidatorIndex, 0)
@@ -45,7 +49,7 @@ func (bs *Server) ListValidatorAssignments(
requestedEpoch = q.Epoch
}
currentEpoch := slots.ToEpoch(bs.GenesisTimeFetcher.CurrentSlot())
currentEpoch := slots.ToEpoch(c.CurrentSlot())
if requestedEpoch > currentEpoch {
return nil, status.Errorf(
codes.InvalidArgument,
@@ -59,7 +63,11 @@ func (bs *Server) ListValidatorAssignments(
if err != nil {
return nil, err
}
requestedState, err := bs.ReplayerBuilder.ReplayerForSlot(startSlot).ReplayBlocks(ctx)
b, err := bs.CanonicalHistoryWaiter.WaitForCanonicalHistory(ctx)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
requestedState, err := b.ReplayerForSlot(startSlot).ReplayBlocks(ctx)
if err != nil {
msg := fmt.Sprintf("could not replay all blocks from the closest stored state (at slot %d) "+
"to the requested epoch (%d) - %v", startSlot, requestedEpoch, err)

View File

@@ -28,16 +28,18 @@ func TestServer_ListAssignments_CannotRequestFutureEpoch(t *testing.T) {
ctx := context.Background()
bs := &Server{
BeaconDB: db,
GenesisTimeFetcher: &mock.ChainService{},
ClockProvider: &mock.ChainService{},
}
addDefaultReplayerBuilder(bs, db)
c, err := bs.ClockProvider.WaitForClock(ctx)
require.NoError(t, err)
wanted := errNoEpochInfoError
_, err := bs.ListValidatorAssignments(
_, err = bs.ListValidatorAssignments(
ctx,
&ethpb.ListValidatorAssignmentsRequest{
QueryFilter: &ethpb.ListValidatorAssignmentsRequest_Epoch{
Epoch: slots.ToEpoch(bs.GenesisTimeFetcher.CurrentSlot()) + 1,
Epoch: slots.ToEpoch(c.CurrentSlot()) + 1,
},
},
)
@@ -59,7 +61,7 @@ func TestServer_ListAssignments_NoResults(t *testing.T) {
bs := &Server{
BeaconDB: db,
GenesisTimeFetcher: &mock.ChainService{},
ClockProvider: &mock.ChainService{},
StateGen: stategen.New(db),
ReplayerBuilder: mockstategen.NewMockReplayerBuilder(mockstategen.WithMockState(st)),
}
@@ -121,7 +123,7 @@ func TestServer_ListAssignments_Pagination_InputOutOfRange(t *testing.T) {
Epoch: 0,
},
},
GenesisTimeFetcher: &mock.ChainService{},
ClockProvider: &mock.ChainService{},
StateGen: stategen.New(db),
ReplayerBuilder: mockstategen.NewMockReplayerBuilder(mockstategen.WithMockState(s)),
}
@@ -197,7 +199,7 @@ func TestServer_ListAssignments_Pagination_DefaultPageSize_NoArchive(t *testing.
Epoch: 0,
},
},
GenesisTimeFetcher: &mock.ChainService{},
ClockProvider: &mock.ChainService{},
StateGen: stategen.New(db),
ReplayerBuilder: mockstategen.NewMockReplayerBuilder(mockstategen.WithMockState(s)),
}
@@ -264,7 +266,7 @@ func TestServer_ListAssignments_FilterPubkeysIndices_NoPagination(t *testing.T)
Epoch: 0,
},
},
GenesisTimeFetcher: &mock.ChainService{},
ClockProvider: &mock.ChainService{},
StateGen: stategen.New(db),
ReplayerBuilder: mockstategen.NewMockReplayerBuilder(mockstategen.WithMockState(s)),
}
@@ -335,7 +337,7 @@ func TestServer_ListAssignments_CanFilterPubkeysIndices_WithPagination(t *testin
Epoch: 0,
},
},
GenesisTimeFetcher: &mock.ChainService{},
ClockProvider: &mock.ChainService{},
StateGen: stategen.New(db),
}

View File

@@ -340,7 +340,12 @@ func (bs *Server) StreamIndexedAttestations(
func (bs *Server) collectReceivedAttestations(ctx context.Context) {
attsByRoot := make(map[[32]byte][]*ethpb.Attestation)
twoThirdsASlot := 2 * slots.DivideSlotBy(3) /* 2/3 slot duration */
ticker := slots.NewSlotTickerWithOffset(bs.GenesisTimeFetcher.GenesisTime(), twoThirdsASlot, params.BeaconConfig().SecondsPerSlot)
c, err := bs.ClockProvider.WaitForClock(ctx)
if err != nil {
log.WithError(err).Error("timeout while waiting for genesis timestamp in collectReceivedAttestations")
return
}
ticker := slots.NewSlotTickerWithOffset(c.GenesisTime(), twoThirdsASlot, params.BeaconConfig().SecondsPerSlot)
for {
select {
case <-ticker.C():

View File

@@ -3,11 +3,6 @@ package beacon
import (
"context"
"fmt"
"sort"
"strconv"
"testing"
"time"
"github.com/golang/mock/gomock"
"github.com/prysmaticlabs/go-bitfield"
chainMock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
@@ -36,6 +31,9 @@ import (
"github.com/prysmaticlabs/prysm/time/slots"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/emptypb"
"sort"
"strconv"
"testing"
)
func TestServer_ListAttestations_NoResults(t *testing.T) {
@@ -567,7 +565,7 @@ func TestServer_ListIndexedAttestations_GenesisEpoch(t *testing.T) {
bs := &Server{
BeaconDB: db,
GenesisTimeFetcher: &chainMock.ChainService{State: state},
ClockProvider: &chainMock.ChainService{State: state},
HeadFetcher: &chainMock.ChainService{State: state},
StateGen: stategen.New(db),
}
@@ -666,9 +664,7 @@ func TestServer_ListIndexedAttestations_OldEpoch(t *testing.T) {
bs := &Server{
BeaconDB: db,
GenesisTimeFetcher: &chainMock.ChainService{
Genesis: time.Now(),
},
ClockProvider: &chainMock.ChainService{},
StateGen: stategen.New(db),
}
err = db.SaveStateSummary(ctx, &ethpb.StateSummary{
@@ -831,9 +827,7 @@ func TestServer_StreamIndexedAttestations_ContextCanceled(t *testing.T) {
server := &Server{
Ctx: ctx,
AttestationNotifier: chainService.OperationNotifier(),
GenesisTimeFetcher: &chainMock.ChainService{
Genesis: time.Now(),
},
ClockProvider: &chainMock.ChainService{},
}
exitRoutine := make(chan bool)
@@ -935,9 +929,7 @@ func TestServer_StreamIndexedAttestations_OK(t *testing.T) {
HeadFetcher: &chainMock.ChainService{
State: headState,
},
GenesisTimeFetcher: &chainMock.ChainService{
Genesis: time.Now(),
},
ClockProvider: &chainMock.ChainService{},
AttestationNotifier: chainService.OperationNotifier(),
CollectedAttestationsBuffer: make(chan []*ethpb.Attestation, 1),
StateGen: stategen.New(db),

View File

@@ -23,7 +23,11 @@ func (bs *Server) ListBeaconCommittees(
ctx context.Context,
req *ethpb.ListCommitteesRequest,
) (*ethpb.BeaconCommittees, error) {
currentSlot := bs.GenesisTimeFetcher.CurrentSlot()
c, err := bs.ClockProvider.WaitForClock(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "timeout waiting for genesis timestamp, %s", err)
}
currentSlot := c.CurrentSlot()
var requestedSlot types.Slot
switch q := req.QueryFilter.(type) {
case *ethpb.ListCommitteesRequest_Epoch:
@@ -74,7 +78,11 @@ func (bs *Server) retrieveCommitteesForEpoch(
if err != nil {
return nil, nil, err
}
requestedState, err := bs.ReplayerBuilder.ReplayerForSlot(startSlot).ReplayBlocks(ctx)
b, err := bs.CanonicalHistoryWaiter.WaitForCanonicalHistory(ctx)
if err != nil {
return nil, nil, status.Error(codes.Internal, err.Error())
}
requestedState, err := b.ReplayerForSlot(startSlot).ReplayBlocks(ctx)
if err != nil {
return nil, nil, status.Errorf(codes.Internal, "error replaying blocks for state at slot %d: %v", startSlot, err)
}

View File

@@ -3,6 +3,7 @@ package beacon
import (
"context"
"encoding/binary"
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
"math"
"testing"
"time"
@@ -35,12 +36,13 @@ func TestServer_ListBeaconCommittees_CurrentEpoch(t *testing.T) {
headState := setupActiveValidators(t, numValidators)
offset := int64(headState.Slot().Mul(params.BeaconConfig().SecondsPerSlot))
gent := prysmTime.Now().Add(time.Duration(-1*offset) * time.Second)
m := &mock.ChainService{
Genesis: prysmTime.Now().Add(time.Duration(-1*offset) * time.Second),
Clock: blockchain.NewClock(gent),
}
bs := &Server{
HeadFetcher: m,
GenesisTimeFetcher: m,
ClockProvider: m,
StateGen: stategen.New(db),
}
b := util.NewBeaconBlock()
@@ -106,13 +108,14 @@ func TestServer_ListBeaconCommittees_PreviousEpoch(t *testing.T) {
require.NoError(t, db.SaveState(ctx, headState, gRoot))
offset := int64(headState.Slot().Mul(params.BeaconConfig().SecondsPerSlot))
gent := prysmTime.Now().Add(time.Duration(-1*offset) * time.Second)
m := &mock.ChainService{
State: headState,
Genesis: prysmTime.Now().Add(time.Duration(-1*offset) * time.Second),
Clock: blockchain.NewClock(gent),
}
bs := &Server{
HeadFetcher: m,
GenesisTimeFetcher: m,
ClockProvider: m,
StateGen: stategen.New(db),
}
addDefaultReplayerBuilder(bs, db)
@@ -162,12 +165,13 @@ func TestRetrieveCommitteesForRoot(t *testing.T) {
headState := setupActiveValidators(t, numValidators)
offset := int64(headState.Slot().Mul(params.BeaconConfig().SecondsPerSlot))
gent := prysmTime.Now().Add(time.Duration(-1*offset) * time.Second)
m := &mock.ChainService{
Genesis: prysmTime.Now().Add(time.Duration(-1*offset) * time.Second),
Clock: blockchain.NewClock(gent),
}
bs := &Server{
HeadFetcher: m,
GenesisTimeFetcher: m,
ClockProvider: m,
StateGen: stategen.New(db),
}
b := util.NewBeaconBlock()

View File

@@ -33,7 +33,6 @@ type Server struct {
FinalizationFetcher blockchain.FinalizationFetcher
DepositFetcher depositcache.DepositFetcher
BlockFetcher powchain.POWBlockFetcher
GenesisTimeFetcher blockchain.TimeFetcher
StateNotifier statefeed.Notifier
BlockNotifier blockfeed.Notifier
AttestationNotifier operation.Notifier
@@ -44,8 +43,9 @@ type Server struct {
ReceivedAttestationsBuffer chan *ethpb.Attestation
CollectedAttestationsBuffer chan []*ethpb.Attestation
StateGen stategen.StateManager
SyncChecker sync.Checker
ReplayerBuilder stategen.ReplayerBuilder
HeadUpdater blockchain.HeadUpdater
SyncChecker sync.Checker
CanonicalHistoryWaiter stategen.CanonicalHistoryWaiter
HeadUpdater blockchain.HeadUpdater
OptimisticModeFetcher blockchain.OptimisticModeFetcher
ClockProvider blockchain.ClockProvider
}

View File

@@ -38,10 +38,11 @@ func (bs *Server) ListValidatorBalances(
req.PageSize, cmd.Get().MaxRPCPageSize)
}
if bs.GenesisTimeFetcher == nil {
return nil, status.Errorf(codes.Internal, "Nil genesis time fetcher")
c, err := bs.ClockProvider.WaitForClock(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "timeout waiting for genesis timestamp, %s", err)
}
currentEpoch := slots.ToEpoch(bs.GenesisTimeFetcher.CurrentSlot())
currentEpoch := slots.ToEpoch(c.CurrentSlot())
requestedEpoch := currentEpoch
switch q := req.QueryFilter.(type) {
case *ethpb.ListValidatorBalancesRequest_Epoch:
@@ -65,7 +66,11 @@ func (bs *Server) ListValidatorBalances(
if err != nil {
return nil, err
}
requestedState, err := bs.ReplayerBuilder.ReplayerForSlot(startSlot).ReplayBlocks(ctx)
b, err := bs.CanonicalHistoryWaiter.WaitForCanonicalHistory(ctx)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
requestedState, err := b.ReplayerForSlot(startSlot).ReplayBlocks(ctx)
if err != nil {
return nil, status.Error(codes.Internal, fmt.Sprintf("error replaying blocks for state at slot %d: %v", startSlot, err))
}
@@ -193,7 +198,11 @@ func (bs *Server) ListValidators(
req.PageSize, cmd.Get().MaxRPCPageSize)
}
currentEpoch := slots.ToEpoch(bs.GenesisTimeFetcher.CurrentSlot())
c, err := bs.ClockProvider.WaitForClock(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "timeout waiting for genesis timestamp, %s", err)
}
currentEpoch := slots.ToEpoch(c.CurrentSlot())
requestedEpoch := currentEpoch
switch q := req.QueryFilter.(type) {
@@ -213,14 +222,17 @@ func (bs *Server) ListValidators(
requestedEpoch = q.Epoch
}
var reqState state.BeaconState
var err error
if requestedEpoch != currentEpoch {
var s types.Slot
s, err = slots.EpochStart(requestedEpoch)
if err != nil {
return nil, err
}
reqState, err = bs.ReplayerBuilder.ReplayerForSlot(s).ReplayBlocks(ctx)
b, err := bs.CanonicalHistoryWaiter.WaitForCanonicalHistory(ctx)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
reqState, err = b.ReplayerForSlot(s).ReplayBlocks(ctx)
if err != nil {
return nil, status.Error(codes.Internal, fmt.Sprintf("error replaying blocks for state at slot %d: %v", s, err))
}
@@ -391,7 +403,11 @@ func (bs *Server) GetValidator(
func (bs *Server) GetValidatorActiveSetChanges(
ctx context.Context, req *ethpb.GetValidatorActiveSetChangesRequest,
) (*ethpb.ActiveSetChanges, error) {
currentEpoch := slots.ToEpoch(bs.GenesisTimeFetcher.CurrentSlot())
c, err := bs.ClockProvider.WaitForClock(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "timeout waiting for genesis timestamp, %s", err)
}
currentEpoch := slots.ToEpoch(c.CurrentSlot())
var requestedEpoch types.Epoch
switch q := req.QueryFilter.(type) {
@@ -415,7 +431,11 @@ func (bs *Server) GetValidatorActiveSetChanges(
if err != nil {
return nil, err
}
requestedState, err := bs.ReplayerBuilder.ReplayerForSlot(s).ReplayBlocks(ctx)
b, err := bs.CanonicalHistoryWaiter.WaitForCanonicalHistory(ctx)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
requestedState, err := b.ReplayerForSlot(s).ReplayBlocks(ctx)
if err != nil {
return nil, status.Error(codes.Internal, fmt.Sprintf("error replaying blocks for state at slot %d: %v", s, err))
}
@@ -476,7 +496,11 @@ func (bs *Server) GetValidatorActiveSetChanges(
func (bs *Server) GetValidatorParticipation(
ctx context.Context, req *ethpb.GetValidatorParticipationRequest,
) (*ethpb.ValidatorParticipationResponse, error) {
currentSlot := bs.GenesisTimeFetcher.CurrentSlot()
c, err := bs.ClockProvider.WaitForClock(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "timeout waiting for genesis timestamp, %s", err)
}
currentSlot := c.CurrentSlot()
currentEpoch := slots.ToEpoch(currentSlot)
var requestedEpoch types.Epoch
@@ -512,7 +536,11 @@ func (bs *Server) GetValidatorParticipation(
}
// ReplayerBuilder ensures that a canonical chain is followed to the slot
beaconState, err := bs.ReplayerBuilder.ReplayerForSlot(endSlot).ReplayBlocks(ctx)
rb, err := bs.CanonicalHistoryWaiter.WaitForCanonicalHistory(ctx)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
beaconState, err := rb.ReplayerForSlot(endSlot).ReplayBlocks(ctx)
if err != nil {
return nil, status.Error(codes.Internal, fmt.Sprintf("error replaying blocks for state at slot %d: %v", endSlot, err))
}
@@ -660,12 +688,16 @@ func (bs *Server) GetValidatorPerformance(
if bs.SyncChecker.Syncing() {
return nil, status.Errorf(codes.Unavailable, "Syncing to latest head, not ready to respond")
}
c, err := bs.ClockProvider.WaitForClock(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "timeout waiting for genesis timestamp, %s", err)
}
headState, err := bs.HeadFetcher.HeadState(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get head state: %v", err)
}
currSlot := bs.GenesisTimeFetcher.CurrentSlot()
currSlot := c.CurrentSlot()
if currSlot > headState.Slot() {
headRoot, err := bs.HeadFetcher.HeadRoot(ctx)
@@ -817,7 +849,11 @@ func (bs *Server) GetIndividualVotes(
ctx context.Context,
req *ethpb.IndividualVotesRequest,
) (*ethpb.IndividualVotesRespond, error) {
currentEpoch := slots.ToEpoch(bs.GenesisTimeFetcher.CurrentSlot())
c, err := bs.ClockProvider.WaitForClock(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "timeout waiting for genesis timestamp, %s", err)
}
currentEpoch := slots.ToEpoch(c.CurrentSlot())
if req.Epoch > currentEpoch {
return nil, status.Errorf(
codes.InvalidArgument,
@@ -831,7 +867,11 @@ func (bs *Server) GetIndividualVotes(
if err != nil {
return nil, err
}
st, err := bs.ReplayerBuilder.ReplayerForSlot(s).ReplayBlocks(ctx)
b, err := bs.CanonicalHistoryWaiter.WaitForCanonicalHistory(ctx)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
st, err := b.ReplayerForSlot(s).ReplayBlocks(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "failed to replay blocks for state at epoch %d: %v", req.Epoch, err)
}

View File

@@ -4,6 +4,7 @@ import (
"context"
"encoding/binary"
"fmt"
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
"sort"
"strconv"
"testing"
@@ -51,19 +52,21 @@ func TestServer_GetValidatorActiveSetChanges_CannotRequestFutureEpoch(t *testing
require.NoError(t, err)
require.NoError(t, st.SetSlot(0))
bs := &Server{
GenesisTimeFetcher: &mock.ChainService{},
ClockProvider: &mock.ChainService{},
HeadFetcher: &mock.ChainService{
State: st,
},
BeaconDB: beaconDB,
}
c, err := bs.ClockProvider.WaitForClock(ctx)
require.NoError(t, err)
wanted := errNoEpochInfoError
_, err = bs.GetValidatorActiveSetChanges(
ctx,
&ethpb.GetValidatorActiveSetChangesRequest{
QueryFilter: &ethpb.GetValidatorActiveSetChangesRequest_Epoch{
Epoch: slots.ToEpoch(bs.GenesisTimeFetcher.CurrentSlot()) + 1,
Epoch: slots.ToEpoch(c.CurrentSlot()) + 1,
},
},
)
@@ -82,15 +85,17 @@ func TestServer_ListValidatorBalances_CannotRequestFutureEpoch(t *testing.T) {
HeadFetcher: &mock.ChainService{
State: st,
},
GenesisTimeFetcher: &mock.ChainService{},
ClockProvider: &mock.ChainService{},
}
wanted := errNoEpochInfoError
c, err := bs.ClockProvider.WaitForClock(ctx)
require.NoError(t, err)
_, err = bs.ListValidatorBalances(
ctx,
&ethpb.ListValidatorBalancesRequest{
QueryFilter: &ethpb.ListValidatorBalancesRequest_Epoch{
Epoch: slots.ToEpoch(bs.GenesisTimeFetcher.CurrentSlot()) + 1,
Epoch: slots.ToEpoch(c.CurrentSlot()) + 1,
},
},
)
@@ -105,7 +110,7 @@ func TestServer_ListValidatorBalances_NoResults(t *testing.T) {
require.NoError(t, err)
require.NoError(t, st.SetSlot(0))
bs := &Server{
GenesisTimeFetcher: &mock.ChainService{},
ClockProvider: &mock.ChainService{},
StateGen: stategen.New(beaconDB),
}
@@ -172,7 +177,7 @@ func TestServer_ListValidatorBalances_DefaultResponse_NoArchive(t *testing.T) {
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, gRoot))
require.NoError(t, beaconDB.SaveState(ctx, st, gRoot))
bs := &Server{
GenesisTimeFetcher: &mock.ChainService{},
ClockProvider: &mock.ChainService{},
StateGen: stategen.New(beaconDB),
HeadFetcher: &mock.ChainService{
State: st,
@@ -201,7 +206,7 @@ func TestServer_ListValidatorBalances_PaginationOutOfRange(t *testing.T) {
require.NoError(t, beaconDB.SaveState(ctx, headState, gRoot))
bs := &Server{
GenesisTimeFetcher: &mock.ChainService{},
ClockProvider: &mock.ChainService{},
StateGen: stategen.New(beaconDB),
HeadFetcher: &mock.ChainService{
State: headState,
@@ -250,7 +255,7 @@ func TestServer_ListValidatorBalances_Pagination_Default(t *testing.T) {
require.NoError(t, beaconDB.SaveState(ctx, headState, gRoot))
bs := &Server{
GenesisTimeFetcher: &mock.ChainService{},
ClockProvider: &mock.ChainService{},
StateGen: stategen.New(beaconDB),
HeadFetcher: &mock.ChainService{
State: headState,
@@ -334,7 +339,7 @@ func TestServer_ListValidatorBalances_Pagination_CustomPageSizes(t *testing.T) {
require.NoError(t, beaconDB.SaveState(ctx, headState, gRoot))
bs := &Server{
GenesisTimeFetcher: &mock.ChainService{},
ClockProvider: &mock.ChainService{},
StateGen: stategen.New(beaconDB),
HeadFetcher: &mock.ChainService{
State: headState,
@@ -402,7 +407,7 @@ func TestServer_ListValidatorBalances_OutOfRange(t *testing.T) {
require.NoError(t, beaconDB.SaveState(ctx, headState, gRoot))
bs := &Server{
GenesisTimeFetcher: &mock.ChainService{},
ClockProvider: &mock.ChainService{},
StateGen: stategen.New(beaconDB),
HeadFetcher: &mock.ChainService{
State: headState,
@@ -425,10 +430,7 @@ func TestServer_ListValidators_CannotRequestFutureEpoch(t *testing.T) {
require.NoError(t, st.SetSlot(0))
bs := &Server{
BeaconDB: beaconDB,
GenesisTimeFetcher: &mock.ChainService{
// We are in epoch 0.
Genesis: time.Now(),
},
ClockProvider: &mock.ChainService{},
HeadFetcher: &mock.ChainService{
State: st,
},
@@ -449,12 +451,10 @@ func TestServer_ListValidators_CannotRequestFutureEpoch(t *testing.T) {
func TestServer_ListValidators_reqStateIsNil(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
secondsPerEpoch := params.BeaconConfig().SecondsPerSlot * uint64(params.BeaconConfig().SlotsPerEpoch)
gent := time.Now().Add(time.Duration(-1*int64(secondsPerEpoch)) * time.Second)
bs := &Server{
BeaconDB: beaconDB,
GenesisTimeFetcher: &mock.ChainService{
// We are in epoch 1.
Genesis: time.Now().Add(time.Duration(-1*int64(secondsPerEpoch)) * time.Second),
},
ClockProvider: &mock.ChainService{Clock: blockchain.NewClock(gent)},
HeadFetcher: &mock.ChainService{
State: nil,
},
@@ -492,10 +492,7 @@ func TestServer_ListValidators_NoResults(t *testing.T) {
require.NoError(t, beaconDB.SaveState(ctx, st, gRoot))
bs := &Server{
BeaconDB: beaconDB,
GenesisTimeFetcher: &mock.ChainService{
// We are in epoch 0.
Genesis: time.Now(),
},
ClockProvider: &mock.ChainService{},
HeadFetcher: &mock.ChainService{
State: st,
},
@@ -562,10 +559,7 @@ func TestServer_ListValidators_OnlyActiveValidators(t *testing.T) {
HeadFetcher: &mock.ChainService{
State: st,
},
GenesisTimeFetcher: &mock.ChainService{
// We are in epoch 0.
Genesis: time.Now(),
},
ClockProvider: &mock.ChainService{},
StateGen: stategen.New(beaconDB),
}
@@ -630,10 +624,7 @@ func TestServer_ListValidators_InactiveInTheMiddle(t *testing.T) {
HeadFetcher: &mock.ChainService{
State: st,
},
GenesisTimeFetcher: &mock.ChainService{
// We are in epoch 0.
Genesis: time.Now(),
},
ClockProvider: &mock.ChainService{},
StateGen: stategen.New(beaconDB),
}
@@ -665,7 +656,7 @@ func TestServer_ListValidatorBalances_UnknownValidatorInResponse(t *testing.T) {
require.NoError(t, beaconDB.SaveState(ctx, headState, gRoot))
bs := &Server{
GenesisTimeFetcher: &mock.ChainService{},
ClockProvider: &mock.ChainService{},
StateGen: stategen.New(beaconDB),
HeadFetcher: &mock.ChainService{
State: headState,
@@ -715,10 +706,7 @@ func TestServer_ListValidators_NoPagination(t *testing.T) {
HeadFetcher: &mock.ChainService{
State: headState,
},
GenesisTimeFetcher: &mock.ChainService{
// We are in epoch 0.
Genesis: time.Now(),
},
ClockProvider: &mock.ChainService{},
FinalizationFetcher: &mock.ChainService{
FinalizedCheckPoint: &ethpb.Checkpoint{
Epoch: 0,
@@ -748,10 +736,7 @@ func TestServer_ListValidators_StategenNotUsed(t *testing.T) {
HeadFetcher: &mock.ChainService{
State: headState,
},
GenesisTimeFetcher: &mock.ChainService{
// We are in epoch 0.
Genesis: time.Now(),
},
ClockProvider: &mock.ChainService{},
}
received, err := bs.ListValidators(context.Background(), &ethpb.ListValidatorsRequest{})
@@ -786,10 +771,7 @@ func TestServer_ListValidators_IndicesPubKeys(t *testing.T) {
Epoch: 0,
},
},
GenesisTimeFetcher: &mock.ChainService{
// We are in epoch 0.
Genesis: time.Now(),
},
ClockProvider: &mock.ChainService{},
StateGen: stategen.New(beaconDB),
}
@@ -822,10 +804,7 @@ func TestServer_ListValidators_Pagination(t *testing.T) {
Epoch: 0,
},
},
GenesisTimeFetcher: &mock.ChainService{
// We are in epoch 0.
Genesis: time.Now(),
},
ClockProvider: &mock.ChainService{},
StateGen: stategen.New(beaconDB),
}
@@ -959,10 +938,7 @@ func TestServer_ListValidators_PaginationOutOfRange(t *testing.T) {
Epoch: 0,
},
},
GenesisTimeFetcher: &mock.ChainService{
// We are in epoch 0.
Genesis: time.Now(),
},
ClockProvider: &mock.ChainService{},
StateGen: stategen.New(beaconDB),
}
@@ -1003,10 +979,7 @@ func TestServer_ListValidators_DefaultPageSize(t *testing.T) {
Epoch: 0,
},
},
GenesisTimeFetcher: &mock.ChainService{
// We are in epoch 0.
Genesis: time.Now(),
},
ClockProvider: &mock.ChainService{},
StateGen: stategen.New(beaconDB),
}
@@ -1045,13 +1018,12 @@ func TestServer_ListValidators_FromOldEpoch(t *testing.T) {
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, r))
secondsPerEpoch := params.BeaconConfig().SecondsPerSlot * uint64(params.BeaconConfig().SlotsPerEpoch)
gent := time.Now().Add(time.Duration(-1*int64(uint64(epochs)*secondsPerEpoch)) * time.Second)
bs := &Server{
HeadFetcher: &mock.ChainService{
State: st,
},
GenesisTimeFetcher: &mock.ChainService{
Genesis: time.Now().Add(time.Duration(-1*int64(uint64(epochs)*secondsPerEpoch)) * time.Second),
},
ClockProvider: &mock.ChainService{Clock: blockchain.NewClock(gent)},
}
addDefaultReplayerBuilder(bs, beaconDB)
@@ -1124,13 +1096,12 @@ func TestServer_ListValidators_ProcessHeadStateSlots(t *testing.T) {
require.NoError(t, beaconDB.SaveState(ctx, st, gRoot))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, gRoot))
secondsPerEpoch := params.BeaconConfig().SecondsPerSlot * uint64(params.BeaconConfig().SlotsPerEpoch)
gent := time.Now().Add(time.Duration(-1*int64(secondsPerEpoch)) * time.Second)
bs := &Server{
HeadFetcher: &mock.ChainService{
State: st,
},
GenesisTimeFetcher: &mock.ChainService{
Genesis: time.Now().Add(time.Duration(-1*int64(secondsPerEpoch)) * time.Second),
},
ClockProvider: &mock.ChainService{Clock: blockchain.NewClock(gent)},
StateGen: stategen.New(beaconDB),
}
@@ -1283,7 +1254,7 @@ func TestServer_GetValidatorActiveSetChanges(t *testing.T) {
FinalizationFetcher: &mock.ChainService{
FinalizedCheckPoint: &ethpb.Checkpoint{Epoch: 0, Root: make([]byte, fieldparams.RootLength)},
},
GenesisTimeFetcher: &mock.ChainService{},
ClockProvider: &mock.ChainService{},
}
addDefaultReplayerBuilder(bs, beaconDB)
res, err := bs.GetValidatorActiveSetChanges(ctx, &ethpb.GetValidatorActiveSetChangesRequest{
@@ -1487,16 +1458,18 @@ func TestServer_GetValidatorParticipation_CannotRequestFutureEpoch(t *testing.T)
HeadFetcher: &mock.ChainService{
State: headState,
},
GenesisTimeFetcher: &mock.ChainService{},
ClockProvider: &mock.ChainService{},
StateGen: stategen.New(beaconDB),
}
wanted := "Cannot retrieve information about an epoch"
c, err := bs.ClockProvider.WaitForClock(ctx)
require.NoError(t, err)
_, err = bs.GetValidatorParticipation(
ctx,
&ethpb.GetValidatorParticipationRequest{
QueryFilter: &ethpb.GetValidatorParticipationRequest_Epoch{
Epoch: slots.ToEpoch(bs.GenesisTimeFetcher.CurrentSlot()) + 1,
Epoch: slots.ToEpoch(c.CurrentSlot()) + 1,
},
},
)
@@ -1550,13 +1523,12 @@ func TestServer_GetValidatorParticipation_CurrentAndPrevEpoch(t *testing.T) {
m := &mock.ChainService{State: headState}
offset := int64(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
gent := prysmTime.Now().Add(time.Duration(-1*offset) * time.Second)
bs := &Server{
BeaconDB: beaconDB,
HeadFetcher: m,
StateGen: stategen.New(beaconDB),
GenesisTimeFetcher: &mock.ChainService{
Genesis: prysmTime.Now().Add(time.Duration(-1*offset) * time.Second),
},
ClockProvider: &mock.ChainService{Clock: blockchain.NewClock(gent)},
CanonicalFetcher: &mock.ChainService{
CanonicalRoots: map[[32]byte]bool{
bRoot: true,
@@ -1629,13 +1601,12 @@ func TestServer_GetValidatorParticipation_OrphanedUntilGenesis(t *testing.T) {
m := &mock.ChainService{State: headState}
offset := int64(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
gent := prysmTime.Now().Add(time.Duration(-1*offset) * time.Second)
bs := &Server{
BeaconDB: beaconDB,
HeadFetcher: m,
StateGen: stategen.New(beaconDB),
GenesisTimeFetcher: &mock.ChainService{
Genesis: prysmTime.Now().Add(time.Duration(-1*offset) * time.Second),
},
ClockProvider: &mock.ChainService{Clock: blockchain.NewClock(gent),
CanonicalFetcher: &mock.ChainService{
CanonicalRoots: map[[32]byte]bool{
bRoot: true,
@@ -1726,13 +1697,12 @@ func runGetValidatorParticipationCurrentAndPrevEpoch(t *testing.T, genState stat
m := &mock.ChainService{State: genState}
offset := int64(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
gent := prysmTime.Now().Add(time.Duration(-1*offset) * time.Second)
bs := &Server{
BeaconDB: beaconDB,
HeadFetcher: m,
StateGen: stategen.New(beaconDB),
GenesisTimeFetcher: &mock.ChainService{
Genesis: prysmTime.Now().Add(time.Duration(-1*offset) * time.Second),
},
ClockProvider: &mock.ChainService{Clock: blockchain.NewClock(gent)},
FinalizationFetcher: &mock.ChainService{FinalizedCheckPoint: &ethpb.Checkpoint{Epoch: 100}},
}
addDefaultReplayerBuilder(bs, beaconDB)
@@ -1837,11 +1807,12 @@ func TestGetValidatorPerformance_OK(t *testing.T) {
require.NoError(t, headState.SetValidators(validators))
require.NoError(t, headState.SetBalances([]uint64{100, 101, 102}))
offset := int64(headState.Slot().Mul(params.BeaconConfig().SecondsPerSlot))
gent := time.Now().Add(time.Duration(-1*offset) * time.Second)
bs := &Server{
HeadFetcher: &mock.ChainService{
State: headState,
},
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
ClockProvider: &mock.ChainService{Clock: blockchain.NewClock(gent)},
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
farFuture := params.BeaconConfig().FarFutureSlot
@@ -1901,13 +1872,14 @@ func TestGetValidatorPerformance_Indices(t *testing.T) {
}
require.NoError(t, headState.SetValidators(validators))
offset := int64(headState.Slot().Mul(params.BeaconConfig().SecondsPerSlot))
gent := time.Now().Add(time.Duration(-1*offset) * time.Second)
bs := &Server{
HeadFetcher: &mock.ChainService{
// 10 epochs into the future.
State: headState,
},
SyncChecker: &mockSync.Sync{IsSyncing: false},
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
ClockProvider: &mock.ChainService{Clock: blockchain.NewClock(gent)},
}
c := headState.Copy()
vp, bp, err := precompute.New(ctx, c)
@@ -1974,13 +1946,14 @@ func TestGetValidatorPerformance_IndicesPubkeys(t *testing.T) {
require.NoError(t, headState.SetValidators(validators))
offset := int64(headState.Slot().Mul(params.BeaconConfig().SecondsPerSlot))
gent := time.Now().Add(time.Duration(-1*offset) * time.Second)
bs := &Server{
HeadFetcher: &mock.ChainService{
// 10 epochs into the future.
State: headState,
},
SyncChecker: &mockSync.Sync{IsSyncing: false},
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
ClockProvider: &mock.ChainService{Clock: blockchain.NewClock(gent)},
}
c := headState.Copy()
vp, bp, err := precompute.New(ctx, c)
@@ -2053,11 +2026,12 @@ func TestGetValidatorPerformanceAltair_OK(t *testing.T) {
require.NoError(t, headState.SetInactivityScores([]uint64{0, 0, 0}))
require.NoError(t, headState.SetBalances([]uint64{100, 101, 102}))
offset := int64(headState.Slot().Mul(params.BeaconConfig().SecondsPerSlot))
gent := time.Now().Add(time.Duration(-1*offset) * time.Second)
bs := &Server{
HeadFetcher: &mock.ChainService{
State: headState,
},
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
ClockProvider: &mock.ChainService{Clock: blockchain.NewClock(gent)},
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
want := &ethpb.ValidatorPerformanceResponse{
@@ -2123,11 +2097,13 @@ func TestGetValidatorPerformanceBellatrix_OK(t *testing.T) {
require.NoError(t, headState.SetInactivityScores([]uint64{0, 0, 0}))
require.NoError(t, headState.SetBalances([]uint64{100, 101, 102}))
offset := int64(headState.Slot().Mul(params.BeaconConfig().SecondsPerSlot))
gent := time.Now().Add(time.Duration(-1*offset) * time.Second)
clock := blockchain.NewClock(gent)
bs := &Server{
HeadFetcher: &mock.ChainService{
State: headState,
},
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
ClockProvider: &mock.ChainService{Clock: clock},
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
want := &ethpb.ValidatorPerformanceResponse{
@@ -2194,12 +2170,14 @@ func setupValidators(t testing.TB, _ db.Database, count int) ([]*ethpb.Validator
}
func TestServer_GetIndividualVotes_RequestFutureSlot(t *testing.T) {
ds := &Server{GenesisTimeFetcher: &mock.ChainService{}}
ds := &Server{ClockProvider: &mock.ChainService{}}
c, err := ds.ClockProvider.WaitForClock(context.Background())
require.NoError(t, err)
req := &ethpb.IndividualVotesRequest{
Epoch: slots.ToEpoch(ds.GenesisTimeFetcher.CurrentSlot()) + 1,
Epoch: slots.ToEpoch(c.CurrentSlot()) + 1,
}
wanted := errNoEpochInfoError
_, err := ds.GetIndividualVotes(context.Background(), req)
_, err = ds.GetIndividualVotes(context.Background(), req)
assert.ErrorContains(t, wanted, err)
}
@@ -2229,7 +2207,7 @@ func TestServer_GetIndividualVotes_ValidatorsDontExist(t *testing.T) {
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, gRoot))
bs := &Server{
StateGen: gen,
GenesisTimeFetcher: &mock.ChainService{},
ClockProvider: &mock.ChainService{},
}
addDefaultReplayerBuilder(bs, beaconDB)
@@ -2325,7 +2303,7 @@ func TestServer_GetIndividualVotes_Working(t *testing.T) {
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, gRoot))
bs := &Server{
StateGen: gen,
GenesisTimeFetcher: &mock.ChainService{},
ClockProvider: &mock.ChainService{},
}
addDefaultReplayerBuilder(bs, beaconDB)
@@ -2390,7 +2368,7 @@ func TestServer_GetIndividualVotes_WorkingAltair(t *testing.T) {
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, gRoot))
bs := &Server{
StateGen: gen,
GenesisTimeFetcher: &mock.ChainService{},
ClockProvider: &mock.ChainService{},
}
addDefaultReplayerBuilder(bs, beaconDB)
@@ -2476,7 +2454,7 @@ func TestServer_GetIndividualVotes_AltairEndOfEpoch(t *testing.T) {
require.NoError(t, beaconDB.SaveState(ctx, beaconState, gRoot))
bs := &Server{
StateGen: gen,
GenesisTimeFetcher: &mock.ChainService{},
ClockProvider: &mock.ChainService{},
}
addDefaultReplayerBuilder(bs, beaconDB)
@@ -2564,7 +2542,7 @@ func TestServer_GetIndividualVotes_BellatrixEndOfEpoch(t *testing.T) {
require.NoError(t, beaconDB.SaveState(ctx, beaconState, gRoot))
bs := &Server{
StateGen: gen,
GenesisTimeFetcher: &mock.ChainService{},
ClockProvider: &mock.ChainService{},
}
addDefaultReplayerBuilder(bs, beaconDB)

View File

@@ -45,6 +45,7 @@ go_test(
],
embed = [":go_default_library"],
deps = [
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/blockchain/testing:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/db/testing:go_default_library",

View File

@@ -41,12 +41,14 @@ func (ds *Server) GetBlock(
// GetInclusionSlot of an attestation in block.
func (ds *Server) GetInclusionSlot(ctx context.Context, req *pbrpc.InclusionSlotRequest) (*pbrpc.InclusionSlotResponse, error) {
ds.GenesisTimeFetcher.CurrentSlot()
c, err := ds.ClockProvider.WaitForClock(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "timeout while waiting for genesis timestamp, %s", err)
}
// Attestation has one epoch to get included in the chain. This blocks users from requesting too soon.
epochBack := types.Slot(0)
if ds.GenesisTimeFetcher.CurrentSlot() > params.BeaconConfig().SlotsPerEpoch {
epochBack = ds.GenesisTimeFetcher.CurrentSlot() - params.BeaconConfig().SlotsPerEpoch
if c.CurrentSlot() > params.BeaconConfig().SlotsPerEpoch {
epochBack = c.CurrentSlot() - params.BeaconConfig().SlotsPerEpoch
}
if epochBack < req.Slot {
return nil, fmt.Errorf("attestation has one epoch window, please request slot older than %d", epochBack)

View File

@@ -6,6 +6,7 @@ import (
"time"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
dbTest "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
@@ -51,10 +52,11 @@ func TestServer_GetAttestationInclusionSlot(t *testing.T) {
db := dbTest.SetupDB(t)
ctx := context.Background()
offset := int64(2 * params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
gent := time.Now().Add(time.Duration(-1*offset) * time.Second)
bs := &Server{
BeaconDB: db,
StateGen: stategen.New(db),
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
ClockProvider: &mock.ChainService{Clock: blockchain.NewClock(gent)},
}
s, _ := util.DeterministicGenesisState(t, 2048)

View File

@@ -25,13 +25,13 @@ import (
// gated behind the feature flag --enable-debug-rpc-endpoints.
type Server struct {
BeaconDB db.NoHeadAccessDatabase
GenesisTimeFetcher blockchain.TimeFetcher
StateGen *stategen.State
HeadFetcher blockchain.HeadFetcher
ForkFetcher blockchain.ForkFetcher
PeerManager p2p.PeerManager
PeersFetcher p2p.PeersProvider
ReplayerBuilder stategen.ReplayerBuilder
PeersFetcher p2p.PeersProvider
CanonicalHistoryWaiter stategen.CanonicalHistoryWaiter
ClockProvider blockchain.ClockProvider
}
// SetLoggingLevel of a beacon node according to a request type,

View File

@@ -18,7 +18,11 @@ func (ds *Server) GetBeaconState(
) (*pbrpc.SSZResponse, error) {
switch q := req.QueryFilter.(type) {
case *pbrpc.BeaconStateRequest_Slot:
currentSlot := ds.GenesisTimeFetcher.CurrentSlot()
c, err := ds.ClockProvider.WaitForClock(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "timeout waiting for genesis timestamp, %s", err)
}
currentSlot := c.CurrentSlot()
requestedSlot := q.Slot
if requestedSlot > currentSlot {
return nil, status.Errorf(
@@ -29,7 +33,11 @@ func (ds *Server) GetBeaconState(
)
}
st, err := ds.ReplayerBuilder.ReplayerForSlot(q.Slot).ReplayBlocks(ctx)
b, err := ds.CanonicalHistoryWaiter.WaitForCanonicalHistory(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, err.Error())
}
st, err := b.ReplayerForSlot(q.Slot).ReplayBlocks(ctx)
if err != nil {
return nil, status.Error(codes.Internal, fmt.Sprintf("error replaying blocks for state at slot %d: %v", q.Slot, err))
}

View File

@@ -39,7 +39,7 @@ func TestServer_GetBeaconState(t *testing.T) {
require.NoError(t, db.SaveState(ctx, st, gRoot))
bs := &Server{
StateGen: gen,
GenesisTimeFetcher: &mock.ChainService{},
ClockProvider: &mock.ChainService{},
}
addDefaultReplayerBuilder(bs, db)
_, err = bs.GetBeaconState(ctx, &pbrpc.BeaconStateRequest{})
@@ -95,13 +95,15 @@ func TestServer_GetBeaconState(t *testing.T) {
}
func TestServer_GetBeaconState_RequestFutureSlot(t *testing.T) {
ds := &Server{GenesisTimeFetcher: &mock.ChainService{}}
ds := &Server{ClockProvider: &mock.ChainService{}}
c, err := ds.ClockProvider.WaitForClock(context.Background())
require.NoError(t, err)
req := &pbrpc.BeaconStateRequest{
QueryFilter: &pbrpc.BeaconStateRequest_Slot{
Slot: ds.GenesisTimeFetcher.CurrentSlot() + 1,
Slot: c.CurrentSlot() + 1,
},
}
wanted := "Cannot retrieve information about a slot in the future"
_, err := ds.GetBeaconState(context.Background(), req)
_, err = ds.GetBeaconState(context.Background(), req)
assert.ErrorContains(t, wanted, err)
}

View File

@@ -30,6 +30,7 @@ go_test(
srcs = ["server_test.go"],
embed = [":go_default_library"],
deps = [
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/blockchain/testing:go_default_library",
"//beacon-chain/db/testing:go_default_library",
"//beacon-chain/p2p:go_default_library",

View File

@@ -38,11 +38,11 @@ type Server struct {
BeaconDB db.ReadOnlyDatabase
PeersFetcher p2p.PeersProvider
PeerManager p2p.PeerManager
GenesisTimeFetcher blockchain.TimeFetcher
GenesisFetcher blockchain.GenesisFetcher
POWChainInfoFetcher powchain.ChainInfoFetcher
BeaconMonitoringHost string
BeaconMonitoringPort int
ClockProvider blockchain.ClockProvider
}
// GetSyncStatus checks the current network sync status of the node.
@@ -59,7 +59,11 @@ func (ns *Server) GetGenesis(ctx context.Context, _ *empty.Empty) (*ethpb.Genesi
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not retrieve contract address from db: %v", err)
}
genesisTime := ns.GenesisTimeFetcher.GenesisTime()
c, err := ns.ClockProvider.WaitForClock(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "timeout while waiting for genesis timestamp, %s", err)
}
genesisTime := c.GenesisTime()
var defaultGenesisTime time.Time
var gt *timestamp.Timestamp
if genesisTime == defaultGenesisTime {

View File

@@ -3,6 +3,7 @@ package node
import (
"context"
"fmt"
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
"testing"
"time"
@@ -51,7 +52,7 @@ func TestNodeServer_GetGenesis(t *testing.T) {
genValRoot := bytesutil.ToBytes32([]byte("I am root"))
ns := &Server{
BeaconDB: db,
GenesisTimeFetcher: &mock.ChainService{},
ClockProvider: &mock.ChainService{},
GenesisFetcher: &mock.ChainService{
State: st,
ValidatorsRoot: genValRoot,
@@ -64,7 +65,7 @@ func TestNodeServer_GetGenesis(t *testing.T) {
assert.Equal(t, res.GenesisTime.Seconds, pUnix.Seconds)
assert.DeepEqual(t, genValRoot[:], res.GenesisValidatorsRoot)
ns.GenesisTimeFetcher = &mock.ChainService{Genesis: time.Unix(10, 0)}
ns.ClockProvider = &mock.ChainService{Clock: blockchain.NewClock(time.Unix(10, 0))}
res, err = ns.GetGenesis(context.Background(), &emptypb.Empty{})
require.NoError(t, err)
pUnix = timestamppb.New(time.Unix(10, 0))

View File

@@ -117,6 +117,7 @@ go_test(
embed = [":go_default_library"],
deps = [
"//async/event:go_default_library",
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/blockchain/testing:go_default_library",
"//beacon-chain/builder/testing:go_default_library",
"//beacon-chain/cache:go_default_library",

View File

@@ -129,9 +129,13 @@ func (vs *Server) SubmitSignedAggregateSelectionProof(
return nil, status.Error(codes.InvalidArgument, "Signed signatures can't be zero hashes")
}
c, err := vs.ClockProvider.WaitForClock(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "timeout waiting for genesis time initialization, %s", err)
}
// As a preventive measure, a beacon node shouldn't broadcast an attestation whose slot is out of range.
if err := helpers.ValidateAttestationTime(req.SignedAggregateAndProof.Message.Aggregate.Data.Slot,
vs.TimeFetcher.GenesisTime(), params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil {
c.GenesisTime(), params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil {
return nil, status.Error(codes.InvalidArgument, "Attestation slot is no longer valid from current time")
}

View File

@@ -2,6 +2,7 @@ package validator
import (
"context"
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
"reflect"
"testing"
"time"
@@ -55,7 +56,7 @@ func TestSubmitAggregateAndProof_CantFindValidatorIndex(t *testing.T) {
server := &Server{
HeadFetcher: &mock.ChainService{State: s},
SyncChecker: &mockSync.Sync{IsSyncing: false},
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
ClockProvider: &mock.ChainService{Clock: blockchain.NewClock(time.Now())},
}
priv, err := bls.RandKey()
@@ -83,7 +84,7 @@ func TestSubmitAggregateAndProof_IsAggregatorAndNoAtts(t *testing.T) {
HeadFetcher: &mock.ChainService{State: s},
SyncChecker: &mockSync.Sync{IsSyncing: false},
AttPool: attestations.NewPool(),
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
ClockProvider: &mock.ChainService{Clock: blockchain.NewClock(time.Now())},
}
priv, err := bls.RandKey()
@@ -117,7 +118,7 @@ func TestSubmitAggregateAndProof_UnaggregateOk(t *testing.T) {
SyncChecker: &mockSync.Sync{IsSyncing: false},
AttPool: attestations.NewPool(),
P2P: &mockp2p.MockBroadcaster{},
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
ClockProvider: &mock.ChainService{Clock: blockchain.NewClock(time.Now())},
}
priv, err := bls.RandKey()
@@ -155,7 +156,7 @@ func TestSubmitAggregateAndProof_AggregateOk(t *testing.T) {
SyncChecker: &mockSync.Sync{IsSyncing: false},
AttPool: attestations.NewPool(),
P2P: &mockp2p.MockBroadcaster{},
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
ClockProvider: &mock.ChainService{Clock: blockchain.NewClock(time.Now())},
}
priv, err := bls.RandKey()
@@ -195,7 +196,7 @@ func TestSubmitAggregateAndProof_AggregateNotOk(t *testing.T) {
SyncChecker: &mockSync.Sync{IsSyncing: false},
AttPool: attestations.NewPool(),
P2P: &mockp2p.MockBroadcaster{},
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
ClockProvider: &mock.ChainService{Clock: blockchain.NewClock(time.Now())},
}
priv, err := bls.RandKey()
@@ -324,7 +325,7 @@ func TestSubmitAggregateAndProof_PreferOwnAttestation(t *testing.T) {
SyncChecker: &mockSync.Sync{IsSyncing: false},
AttPool: attestations.NewPool(),
P2P: &mockp2p.MockBroadcaster{},
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
ClockProvider: &mock.ChainService{Clock: blockchain.NewClock(time.Now())},
}
priv, err := bls.RandKey()
@@ -375,7 +376,7 @@ func TestSubmitAggregateAndProof_SelectsMostBitsWhenOwnAttestationNotPresent(t *
SyncChecker: &mockSync.Sync{IsSyncing: false},
AttPool: attestations.NewPool(),
P2P: &mockp2p.MockBroadcaster{},
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
ClockProvider: &mock.ChainService{Clock: blockchain.NewClock(time.Now())},
}
priv, err := bls.RandKey()
@@ -428,8 +429,8 @@ func TestSubmitSignedAggregateSelectionProof_ZeroHashesSignatures(t *testing.T)
}
func TestSubmitSignedAggregateSelectionProof_InvalidSlot(t *testing.T) {
c := &mock.ChainService{Genesis: time.Now()}
aggregatorServer := &Server{TimeFetcher: c}
c := &mock.ChainService{Clock: blockchain.NewClock(time.Now())}
aggregatorServer := &Server{ClockProvider: c}
req := &ethpb.SignedAggregateSubmitRequest{
SignedAggregateAndProof: &ethpb.SignedAggregateAttestationAndProof{
Signature: []byte{'a'},

View File

@@ -41,15 +41,16 @@ func (vs *Server) StreamDuties(req *ethpb.DutiesRequest, stream ethpb.BeaconNode
return status.Error(codes.Unavailable, "Syncing to latest head, not ready to respond")
}
c, err := vs.ClockProvider.WaitForClock(stream.Context())
if err != nil {
return status.Errorf(codes.Internal, "timeout waiting for genesis timestamp, %s", err)
}
// If we are post-genesis time, then set the current epoch to
// the number epochs since the genesis time, otherwise 0 by default.
genesisTime := vs.TimeFetcher.GenesisTime()
if genesisTime.IsZero() {
return status.Error(codes.Unavailable, "genesis time is not set")
}
genesisTime := c.GenesisTime()
var currentEpoch types.Epoch
if genesisTime.Before(prysmTime.Now()) {
currentEpoch = slots.EpochsSinceGenesis(vs.TimeFetcher.GenesisTime())
if genesisTime.Before(c.Now()) {
currentEpoch = slots.EpochsSinceGenesis(genesisTime)
}
req.Epoch = currentEpoch
res, err := vs.duties(stream.Context(), req)
@@ -66,7 +67,7 @@ func (vs *Server) StreamDuties(req *ethpb.DutiesRequest, stream ethpb.BeaconNode
defer stateSub.Unsubscribe()
secondsPerEpoch := params.BeaconConfig().SecondsPerSlot * uint64(params.BeaconConfig().SlotsPerEpoch)
epochTicker := slots.NewSlotTicker(vs.TimeFetcher.GenesisTime(), secondsPerEpoch)
epochTicker := slots.NewSlotTicker(genesisTime, secondsPerEpoch)
for {
select {
// Ticks every epoch to submit assignments to connected validator clients.
@@ -82,7 +83,7 @@ func (vs *Server) StreamDuties(req *ethpb.DutiesRequest, stream ethpb.BeaconNode
case ev := <-stateChannel:
// If a reorg occurred, we recompute duties for the connected validator clients
// and send another response over the server stream right away.
currentEpoch = slots.EpochsSinceGenesis(vs.TimeFetcher.GenesisTime())
currentEpoch = slots.EpochsSinceGenesis(genesisTime)
if ev.Type == statefeed.Reorg {
data, ok := ev.Data.(*ethpbv1.EventChainReorg)
if !ok {
@@ -108,7 +109,11 @@ func (vs *Server) StreamDuties(req *ethpb.DutiesRequest, stream ethpb.BeaconNode
// Compute the validator duties from the head state's corresponding epoch
// for validators public key / indices requested.
func (vs *Server) duties(ctx context.Context, req *ethpb.DutiesRequest) (*ethpb.DutiesResponse, error) {
currentEpoch := slots.ToEpoch(vs.TimeFetcher.CurrentSlot())
c, err := vs.ClockProvider.WaitForClock(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "timeout while waiting for genesis timestamp, %s", err)
}
currentEpoch := slots.ToEpoch(c.CurrentSlot())
if req.Epoch > currentEpoch+1 {
return nil, status.Errorf(codes.Unavailable, "Request epoch %d can not be greater than next epoch %d", req.Epoch, currentEpoch+1)
}

View File

@@ -3,6 +3,7 @@ package validator
import (
"context"
"encoding/binary"
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
"testing"
"time"
@@ -57,11 +58,13 @@ func TestGetDuties_OK(t *testing.T) {
}
chain := &mockChain.ChainService{
State: bs, Root: genesisRoot[:], Genesis: time.Now(),
State: bs,
Root: genesisRoot[:],
Clock: blockchain.NewClock(time.Now()),
}
vs := &Server{
HeadFetcher: chain,
TimeFetcher: chain,
ClockProvider: chain,
SyncChecker: &mockSync.Sync{IsSyncing: false},
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
}
@@ -141,12 +144,15 @@ func TestGetAltairDuties_SyncCommitteeOK(t *testing.T) {
}
slot := uint64(params.BeaconConfig().SlotsPerEpoch) * uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod) * params.BeaconConfig().SecondsPerSlot
gent := time.Now().Add(time.Duration(-1*int64(slot-1)) * time.Second)
chain := &mockChain.ChainService{
State: bs, Root: genesisRoot[:], Genesis: time.Now().Add(time.Duration(-1*int64(slot-1)) * time.Second),
State: bs,
Root: genesisRoot[:],
Clock: blockchain.NewClock(gent),
}
vs := &Server{
HeadFetcher: chain,
TimeFetcher: chain,
ClockProvider: chain,
Eth1InfoFetcher: &mockPOW.POWChain{},
SyncChecker: &mockSync.Sync{IsSyncing: false},
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
@@ -247,12 +253,15 @@ func TestGetBellatrixDuties_SyncCommitteeOK(t *testing.T) {
}
slot := uint64(params.BeaconConfig().SlotsPerEpoch) * uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod) * params.BeaconConfig().SecondsPerSlot
gent := time.Now().Add(time.Duration(-1*int64(slot-1)) * time.Second)
chain := &mockChain.ChainService{
State: bs, Root: genesisRoot[:], Genesis: time.Now().Add(time.Duration(-1*int64(slot-1)) * time.Second),
State: bs,
Root: genesisRoot[:],
Clock: blockchain.NewClock(gent),
}
vs := &Server{
HeadFetcher: chain,
TimeFetcher: chain,
ClockProvider: chain,
Eth1InfoFetcher: &mockPOW.POWChain{},
SyncChecker: &mockSync.Sync{IsSyncing: false},
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
@@ -336,15 +345,18 @@ func TestGetAltairDuties_UnknownPubkey(t *testing.T) {
require.NoError(t, helpers.UpdateSyncCommitteeCache(bs))
slot := uint64(params.BeaconConfig().SlotsPerEpoch) * uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod) * params.BeaconConfig().SecondsPerSlot
gent := time.Now().Add(time.Duration(-1*int64(slot-1)) * time.Second)
chain := &mockChain.ChainService{
State: bs, Root: genesisRoot[:], Genesis: time.Now().Add(time.Duration(-1*int64(slot-1)) * time.Second),
State: bs,
Root: genesisRoot[:],
Clock: blockchain.NewClock(gent),
}
depositCache, err := depositcache.New()
require.NoError(t, err)
vs := &Server{
HeadFetcher: chain,
TimeFetcher: chain,
ClockProvider: chain,
Eth1InfoFetcher: &mockPOW.POWChain{},
SyncChecker: &mockSync.Sync{IsSyncing: false},
DepositFetcher: depositCache,
@@ -365,10 +377,10 @@ func TestGetAltairDuties_UnknownPubkey(t *testing.T) {
func TestGetDuties_SlotOutOfUpperBound(t *testing.T) {
chain := &mockChain.ChainService{
Genesis: time.Now(),
Clock: blockchain.NewClock(time.Now()),
}
vs := &Server{
TimeFetcher: chain,
ClockProvider: chain,
}
req := &ethpb.DutiesRequest{
Epoch: types.Epoch(chain.CurrentSlot()/params.BeaconConfig().SlotsPerEpoch + 2),
@@ -400,11 +412,13 @@ func TestGetDuties_CurrentEpoch_ShouldNotFail(t *testing.T) {
}
chain := &mockChain.ChainService{
State: bState, Root: genesisRoot[:], Genesis: time.Now(),
State: bState,
Root: genesisRoot[:],
Clock: blockchain.NewClock(time.Now()),
}
vs := &Server{
HeadFetcher: chain,
TimeFetcher: chain,
ClockProvider: chain,
SyncChecker: &mockSync.Sync{IsSyncing: false},
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
}
@@ -439,11 +453,12 @@ func TestGetDuties_MultipleKeys_OK(t *testing.T) {
}
chain := &mockChain.ChainService{
State: bs, Root: genesisRoot[:], Genesis: time.Now(),
State: bs, Root: genesisRoot[:],
Clock: blockchain.NewClock(time.Now()),
}
vs := &Server{
HeadFetcher: chain,
TimeFetcher: chain,
ClockProvider: chain,
SyncChecker: &mockSync.Sync{IsSyncing: false},
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
}
@@ -506,13 +521,13 @@ func TestStreamDuties_OK(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
c := &mockChain.ChainService{
Genesis: time.Now(),
Clock: blockchain.NewClock(time.Now()),
}
vs := &Server{
Ctx: ctx,
HeadFetcher: &mockChain.ChainService{State: bs, Root: genesisRoot[:]},
SyncChecker: &mockSync.Sync{IsSyncing: false},
TimeFetcher: c,
ClockProvider: c,
StateNotifier: &mockChain.MockStateNotifier{},
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
}
@@ -564,13 +579,13 @@ func TestStreamDuties_OK_ChainReorg(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
c := &mockChain.ChainService{
Genesis: time.Now(),
Clock: blockchain.NewClock(time.Now()),
}
vs := &Server{
Ctx: ctx,
HeadFetcher: &mockChain.ChainService{State: bs, Root: genesisRoot[:]},
SyncChecker: &mockSync.Sync{IsSyncing: false},
TimeFetcher: c,
ClockProvider: c,
StateNotifier: &mockChain.MockStateNotifier{},
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
}

View File

@@ -42,7 +42,11 @@ func (vs *Server) GetAttestationData(ctx context.Context, req *ethpb.Attestation
return nil, err
}
if err := helpers.ValidateAttestationTime(req.Slot, vs.TimeFetcher.GenesisTime(),
c, err := vs.ClockProvider.WaitForClock(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "timeout while waiting for genesis timestamp, %s", err)
}
if err := helpers.ValidateAttestationTime(req.Slot, c.GenesisTime(),
params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil {
return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("invalid request: %v", err))
}

View File

@@ -2,6 +2,7 @@ package validator
import (
"context"
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
"math/rand"
"sync"
"testing"
@@ -121,6 +122,7 @@ func TestGetAttestationData_OK(t *testing.T) {
Genesis: time.Now(),
}
offset := int64(slot.Mul(params.BeaconConfig().SecondsPerSlot))
gent := time.Now().Add(time.Duration(-1*offset) * time.Second)
attesterServer := &Server{
P2P: &mockp2p.MockBroadcaster{},
SyncChecker: &mockSync.Sync{IsSyncing: false},
@@ -131,8 +133,8 @@ func TestGetAttestationData_OK(t *testing.T) {
FinalizationFetcher: &mock.ChainService{
CurrentJustifiedCheckPoint: beaconState.CurrentJustifiedCheckpoint(),
},
TimeFetcher: &mock.ChainService{
Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second),
ClockProvider: &mock.ChainService{
Clock: blockchain.NewClock(gent),
},
StateNotifier: chainService.StateNotifier(),
}
@@ -178,7 +180,7 @@ func TestGetAttestationData_Optimistic(t *testing.T) {
as := &Server{
SyncChecker: &mockSync.Sync{},
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
ClockProvider: &mock.ChainService{Clock: blockchain.NewClock(time.Now())},
HeadFetcher: &mock.ChainService{},
OptimisticModeFetcher: &mock.ChainService{Optimistic: true},
}
@@ -192,7 +194,7 @@ func TestGetAttestationData_Optimistic(t *testing.T) {
require.NoError(t, err)
as = &Server{
SyncChecker: &mockSync.Sync{},
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
ClockProvider: &mock.ChainService{Clock: blockchain.NewClock(time.Now())},
HeadFetcher: &mock.ChainService{Optimistic: false, State: beaconState},
OptimisticModeFetcher: &mock.ChainService{Optimistic: false},
AttestationCache: cache.NewAttestationCache(),
@@ -252,6 +254,8 @@ func TestAttestationDataAtSlot_HandlesFarAwayJustifiedEpoch(t *testing.T) {
Genesis: time.Now(),
}
offset := int64(slot.Mul(params.BeaconConfig().SecondsPerSlot))
gent := time.Now().Add(time.Duration(-1*offset) * time.Second)
clock := blockchain.NewClock(gent)
attesterServer := &Server{
P2P: &mockp2p.MockBroadcaster{},
AttestationCache: cache.NewAttestationCache(),
@@ -260,7 +264,7 @@ func TestAttestationDataAtSlot_HandlesFarAwayJustifiedEpoch(t *testing.T) {
CurrentJustifiedCheckPoint: beaconState.CurrentJustifiedCheckpoint(),
},
SyncChecker: &mockSync.Sync{IsSyncing: false},
TimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
ClockProvider: &mock.ChainService{Clock: clock},
StateNotifier: chainService.StateNotifier(),
}
@@ -299,11 +303,13 @@ func TestAttestationDataSlot_handlesInProgressRequest(t *testing.T) {
}
slot := types.Slot(2)
offset := int64(slot.Mul(params.BeaconConfig().SecondsPerSlot))
gent := time.Now().Add(time.Duration(-1*offset) * time.Second)
clock := blockchain.NewClock(gent)
server := &Server{
HeadFetcher: &mock.ChainService{State: state},
AttestationCache: cache.NewAttestationCache(),
SyncChecker: &mockSync.Sync{IsSyncing: false},
TimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
ClockProvider: &mock.ChainService{Clock: clock},
StateNotifier: chainService.StateNotifier(),
}
@@ -347,10 +353,12 @@ func TestServer_GetAttestationData_InvalidRequestSlot(t *testing.T) {
slot := 3*params.BeaconConfig().SlotsPerEpoch + 1
offset := int64(slot.Mul(params.BeaconConfig().SecondsPerSlot))
gent := time.Now().Add(time.Duration(-1*offset) * time.Second)
clock := blockchain.NewClock(gent)
attesterServer := &Server{
SyncChecker: &mockSync.Sync{IsSyncing: false},
HeadFetcher: &mock.ChainService{},
TimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
ClockProvider: &mock.ChainService{Clock: clock},
}
req := &ethpb.AttestationDataRequest{
@@ -411,17 +419,19 @@ func TestServer_GetAttestationData_HeadStateSlotGreaterThanRequestSlot(t *testin
beaconstate := beaconState.Copy()
require.NoError(t, beaconstate.SetSlot(beaconstate.Slot()-1))
require.NoError(t, db.SaveState(ctx, beaconstate, blockRoot2))
chainService := &mock.ChainService{
Genesis: time.Now(),
}
offset = int64(slot.Mul(params.BeaconConfig().SecondsPerSlot))
gent := time.Now().Add(time.Duration(-1*offset) * time.Second)
clock := blockchain.NewClock(gent)
chainService := &mock.ChainService{
Clock: clock,
}
attesterServer := &Server{
P2P: &mockp2p.MockBroadcaster{},
SyncChecker: &mockSync.Sync{IsSyncing: false},
AttestationCache: cache.NewAttestationCache(),
HeadFetcher: &mock.ChainService{State: beaconState, Root: blockRoot[:]},
FinalizationFetcher: &mock.ChainService{CurrentJustifiedCheckPoint: beaconState.CurrentJustifiedCheckpoint()},
TimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
ClockProvider: chainService,
StateNotifier: chainService.StateNotifier(),
StateGen: stategen.New(db),
}
@@ -486,6 +496,8 @@ func TestGetAttestationData_SucceedsInFirstEpoch(t *testing.T) {
Genesis: time.Now(),
}
offset := int64(slot.Mul(params.BeaconConfig().SecondsPerSlot))
gent := prysmTime.Now().Add(time.Duration(-1*offset) * time.Second)
clock := blockchain.NewClock(gent)
attesterServer := &Server{
P2P: &mockp2p.MockBroadcaster{},
SyncChecker: &mockSync.Sync{IsSyncing: false},
@@ -496,7 +508,7 @@ func TestGetAttestationData_SucceedsInFirstEpoch(t *testing.T) {
FinalizationFetcher: &mock.ChainService{
CurrentJustifiedCheckPoint: beaconState.CurrentJustifiedCheckpoint(),
},
TimeFetcher: &mock.ChainService{Genesis: prysmTime.Now().Add(time.Duration(-1*offset) * time.Second)},
ClockProvider: &mock.ChainService{Clock: clock},
StateNotifier: chainService.StateNotifier(),
}

View File

@@ -2,6 +2,7 @@ package validator
import (
"context"
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
"testing"
"time"
@@ -39,11 +40,15 @@ func TestProposeExit_Notification(t *testing.T) {
// Set genesis time to be 100 epochs ago.
offset := int64(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
genesisTime := time.Now().Add(time.Duration(-100*offset) * time.Second)
mockChainService := &mockChain.ChainService{State: beaconState, Root: genesisRoot[:], Genesis: genesisTime}
mockChainService := &mockChain.ChainService{
State: beaconState,
Root: genesisRoot[:],
Clock: blockchain.NewClock(genesisTime),
}
server := &Server{
HeadFetcher: mockChainService,
SyncChecker: &mockSync.Sync{IsSyncing: false},
TimeFetcher: mockChainService,
ClockProvider: mockChainService,
StateNotifier: mockChainService.StateNotifier(),
OperationNotifier: mockChainService.OperationNotifier(),
ExitPool: voluntaryexits.NewPool(),
@@ -106,11 +111,15 @@ func TestProposeExit_NoPanic(t *testing.T) {
// Set genesis time to be 100 epochs ago.
offset := int64(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
genesisTime := time.Now().Add(time.Duration(-100*offset) * time.Second)
mockChainService := &mockChain.ChainService{State: beaconState, Root: genesisRoot[:], Genesis: genesisTime}
mockChainService := &mockChain.ChainService{
State: beaconState,
Root: genesisRoot[:],
Clock: blockchain.NewClock(genesisTime),
}
server := &Server{
HeadFetcher: mockChainService,
SyncChecker: &mockSync.Sync{IsSyncing: false},
TimeFetcher: mockChainService,
ClockProvider: mockChainService,
StateNotifier: mockChainService.StateNotifier(),
OperationNotifier: mockChainService.OperationNotifier(),
ExitPool: voluntaryexits.NewPool(),

View File

@@ -2,6 +2,7 @@ package validator
import (
"context"
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
"testing"
"time"
@@ -441,7 +442,7 @@ func TestServer_GetBellatrixBeaconBlock_HappyCase(t *testing.T) {
proposerServer := &Server{
HeadFetcher: &blockchainTest.ChainService{State: beaconState, Root: parentRoot[:], Optimistic: false},
TimeFetcher: &blockchainTest.ChainService{Genesis: time.Now()},
ClockProvider: &blockchainTest.ChainService{Clock: blockchain.NewClock(time.Now())},
SyncChecker: &mockSync.Sync{IsSyncing: false},
BlockReceiver: &blockchainTest.ChainService{},
HeadUpdater: &blockchainTest.ChainService{},
@@ -570,7 +571,7 @@ func TestServer_GetBellatrixBeaconBlock_BuilderCase(t *testing.T) {
proposerServer := &Server{
FinalizationFetcher: &blockchainTest.ChainService{FinalizedCheckPoint: &ethpb.Checkpoint{Root: wbr1[:]}},
HeadFetcher: &blockchainTest.ChainService{State: beaconState, Root: parentRoot[:], Optimistic: false, Block: wb1},
TimeFetcher: &blockchainTest.ChainService{Genesis: time.Unix(int64(beaconState.GenesisTime()), 0)},
ClockProvider: &blockchainTest.ChainService{Clock: blockchain.NewClock(time.Unix(int64(beaconState.GenesisTime()), 0))},
SyncChecker: &mockSync.Sync{IsSyncing: false},
BlockReceiver: &blockchainTest.ChainService{},
HeadUpdater: &blockchainTest.ChainService{},

View File

@@ -2,6 +2,7 @@ package validator
import (
"context"
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
"math/big"
"testing"
"time"
@@ -2329,7 +2330,7 @@ func TestProposer_GetBeaconBlock_BellatrixEpoch(t *testing.T) {
}
proposerServer := &Server{
HeadFetcher: &mock.ChainService{State: beaconState, Root: parentRoot[:], Optimistic: false},
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
ClockProvider: &mock.ChainService{Clock: blockchain.NewClock(time.Now())},
SyncChecker: &mockSync.Sync{IsSyncing: false},
BlockReceiver: &mock.ChainService{},
HeadUpdater: &mock.ChainService{},
@@ -2394,8 +2395,8 @@ func TestProposer_GetBeaconBlock_Optimistic(t *testing.T) {
bellatrixSlot, err := slots.EpochStart(params.BeaconConfig().BellatrixForkEpoch)
require.NoError(t, err)
proposerServer := &Server{OptimisticModeFetcher: &mock.ChainService{Optimistic: true}, TimeFetcher: &mock.ChainService{}}
clockp := &mock.ChainService{Clock: blockchain.NewClock(time.Now())}
proposerServer := &Server{OptimisticModeFetcher: &mock.ChainService{Optimistic: true}, ClockProvider: clockp}
req := &ethpb.BlockRequest{
Slot: bellatrixSlot + 1,
}

View File

@@ -47,7 +47,6 @@ type Server struct {
HeadUpdater blockchain.HeadUpdater
ForkFetcher blockchain.ForkFetcher
FinalizationFetcher blockchain.FinalizationFetcher
TimeFetcher blockchain.TimeFetcher
BlockFetcher powchain.POWBlockFetcher
DepositFetcher depositcache.DepositFetcher
ChainStartFetcher powchain.ChainStartFetcher
@@ -67,10 +66,11 @@ type Server struct {
PendingDepositsFetcher depositcache.PendingDepositsFetcher
OperationNotifier opfeed.Notifier
StateGen stategen.StateManager
ReplayerBuilder stategen.ReplayerBuilder
CanonicalHistoryWaiter stategen.CanonicalHistoryWaiter
BeaconDB db.HeadAccessDatabase
ExecutionEngineCaller powchain.EngineCaller
BlockBuilder builder.BlockBuilder
ClockProvider blockchain.ClockProvider
}
// WaitForActivation checks if a validator public key exists in the active validator registry of the current
@@ -124,7 +124,7 @@ func (vs *Server) ValidatorIndex(ctx context.Context, req *ethpb.ValidatorIndexR
}
index, ok := st.ValidatorIndexByPubkey(bytesutil.ToBytes48(req.PublicKey))
if !ok {
return nil, status.Errorf(codes.Internal, "Could not find validator index for public key %#x not found", req.PublicKey)
return nil, status.Errorf(codes.NotFound, "Could not find validator index for public key %#x not found", req.PublicKey)
}
return &ethpb.ValidatorIndexResponse{Index: index}, nil

View File

@@ -146,7 +146,11 @@ func (vs *Server) CheckDoppelGanger(ctx context.Context, req *ethpb.DoppelGanger
if err != nil {
return nil, status.Error(codes.Internal, "Could not get previous epoch's end")
}
prevState, err := vs.ReplayerBuilder.ReplayerForSlot(prevEpochEnd).ReplayBlocks(ctx)
b, err := vs.CanonicalHistoryWaiter.WaitForCanonicalHistory(ctx)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
prevState, err := b.ReplayerForSlot(prevEpochEnd).ReplayBlocks(ctx)
if err != nil {
return nil, status.Error(codes.Internal, "Could not get previous state")
}
@@ -252,7 +256,11 @@ func (vs *Server) activationStatus(
// Spec:
// https://github.com/ethereum/consensus-specs/blob/dev/sync/optimistic.md
func (vs *Server) optimisticStatus(ctx context.Context) error {
if slots.ToEpoch(vs.TimeFetcher.CurrentSlot()) < params.BeaconConfig().BellatrixForkEpoch {
c, err := vs.ClockProvider.WaitForClock(ctx)
if err != nil {
return status.Errorf(codes.Internal, "timeout while waiting for genesis timestamp, %s", err)
}
if slots.ToEpoch(c.CurrentSlot()) < params.BeaconConfig().BellatrixForkEpoch {
return nil
}
optimistic, err := vs.OptimisticModeFetcher.IsOptimistic(ctx)
@@ -363,15 +371,6 @@ func (vs *Server) validatorStatus(
}
}
func (vs *Server) retrieveAfterEpochTransition(ctx context.Context, epoch types.Epoch) (state.BeaconState, error) {
endSlot, err := slots.EpochEnd(epoch)
if err != nil {
return nil, err
}
// replay to first slot of following epoch
return vs.ReplayerBuilder.ReplayerForSlot(endSlot).ReplayToSlot(ctx, endSlot+1)
}
func checkValidatorsAreRecent(headEpoch types.Epoch, req *ethpb.DoppelGangerRequest) (bool, *ethpb.DoppelGangerResponse) {
validatorsAreRecent := true
resp := &ethpb.DoppelGangerResponse{

View File

@@ -2,6 +2,7 @@ package validator
import (
"context"
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
"reflect"
"testing"
"time"
@@ -604,7 +605,8 @@ func TestActivationStatus_OK(t *testing.T) {
func TestOptimisticStatus(t *testing.T) {
params.SetupTestConfigCleanup(t)
server := &Server{OptimisticModeFetcher: &mockChain.ChainService{}, TimeFetcher: &mockChain.ChainService{}}
clockp := &mockChain.ChainService{Clock: blockchain.NewClock(time.Now())}
server := &Server{OptimisticModeFetcher: &mockChain.ChainService{}, ClockProvider: clockp}
err := server.optimisticStatus(context.Background())
require.NoError(t, err)
@@ -612,14 +614,14 @@ func TestOptimisticStatus(t *testing.T) {
cfg.BellatrixForkEpoch = 2
params.OverrideBeaconConfig(cfg)
server = &Server{OptimisticModeFetcher: &mockChain.ChainService{Optimistic: true}, TimeFetcher: &mockChain.ChainService{}}
server = &Server{OptimisticModeFetcher: &mockChain.ChainService{Optimistic: true}, ClockProvider: clockp}
err = server.optimisticStatus(context.Background())
s, ok := status.FromError(err)
require.Equal(t, true, ok)
require.DeepEqual(t, codes.Unavailable, s.Code())
require.ErrorContains(t, errOptimisticMode.Error(), err)
server = &Server{OptimisticModeFetcher: &mockChain.ChainService{Optimistic: false}, TimeFetcher: &mockChain.ChainService{}}
server = &Server{OptimisticModeFetcher: &mockChain.ChainService{Optimistic: false}, ClockProvider: clockp}
err = server.optimisticStatus(context.Background())
require.NoError(t, err)
}

View File

@@ -5,6 +5,7 @@ import (
"testing"
"time"
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
opfeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/operation"
@@ -27,8 +28,8 @@ import (
func TestGetSyncMessageBlockRoot_OK(t *testing.T) {
r := []byte{'a'}
server := &Server{
HeadFetcher: &mock.ChainService{Root: r},
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
HeadFetcher: &mock.ChainService{Root: r},
ClockProvider: &mock.ChainService{Clock: blockchain.NewClock(time.Now())},
}
res, err := server.GetSyncMessageBlockRoot(context.Background(), &emptypb.Empty{})
require.NoError(t, err)
@@ -43,7 +44,7 @@ func TestGetSyncMessageBlockRoot_Optimistic(t *testing.T) {
server := &Server{
HeadFetcher: &mock.ChainService{},
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
ClockProvider: &mock.ChainService{Clock: blockchain.NewClock(time.Now())},
OptimisticModeFetcher: &mock.ChainService{Optimistic: true},
}
_, err := server.GetSyncMessageBlockRoot(context.Background(), &emptypb.Empty{})
@@ -54,7 +55,7 @@ func TestGetSyncMessageBlockRoot_Optimistic(t *testing.T) {
server = &Server{
HeadFetcher: &mock.ChainService{},
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
ClockProvider: &mock.ChainService{Clock: blockchain.NewClock(time.Now())},
OptimisticModeFetcher: &mock.ChainService{Optimistic: false},
}
_, err = server.GetSyncMessageBlockRoot(context.Background(), &emptypb.Empty{})
@@ -103,14 +104,16 @@ func TestGetSyncSubcommitteeIndex_Ok(t *testing.T) {
func TestGetSyncCommitteeContribution_FiltersDuplicates(t *testing.T) {
st, _ := util.DeterministicGenesisStateAltair(t, 10)
chain := &mock.ChainService{
State: st,
SyncCommitteeIndices: []types.CommitteeIndex{10},
Clock: blockchain.NewClock(time.Now()),
}
server := &Server{
SyncCommitteePool: synccommittee.NewStore(),
P2P: &mockp2p.MockBroadcaster{},
HeadFetcher: &mock.ChainService{
State: st,
SyncCommitteeIndices: []types.CommitteeIndex{10},
},
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
HeadFetcher: chain,
ClockProvider: chain,
}
secKey, err := bls.RandKey()
require.NoError(t, err)

View File

@@ -4,7 +4,6 @@ package rpc
import (
"context"
"errors"
"fmt"
"net"
"sync"
@@ -13,6 +12,7 @@ import (
recovery "github.com/grpc-ecosystem/go-grpc-middleware/recovery"
grpcopentracing "github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing"
grpcprometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/beacon-chain/builder"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
@@ -90,7 +90,6 @@ type Config struct {
POWChainService powchain.Chain
ChainStartFetcher powchain.ChainStartFetcher
POWChainInfoFetcher powchain.ChainInfoFetcher
GenesisTimeFetcher blockchain.TimeFetcher
GenesisFetcher blockchain.GenesisFetcher
EnableDebugRPCEndpoints bool
MockEth1Votes bool
@@ -115,6 +114,7 @@ type Config struct {
ProposerIdsCache *cache.ProposerPayloadIDsCache
OptimisticModeFetcher blockchain.OptimisticModeFetcher
BlockBuilder builder.BlockBuilder
ClockProvider blockchain.ClockProvider
}
// NewService instantiates a new RPC service instance that will
@@ -175,7 +175,6 @@ func NewService(ctx context.Context, cfg *Config) *Service {
// paranoid build time check to ensure ChainInfoFetcher implements required interfaces
var _ stategen.CanonicalChecker = blockchain.ChainInfoFetcher(nil)
var _ stategen.CurrentSlotter = blockchain.ChainInfoFetcher(nil)
// Start the gRPC server.
func (s *Service) Start() {
@@ -186,7 +185,8 @@ func (s *Service) Start() {
stateCache = s.cfg.StateGen.CombinedCache()
}
withCache := stategen.WithCache(stateCache)
ch := stategen.NewCanonicalHistory(s.cfg.BeaconDB, s.cfg.ChainInfoFetcher, s.cfg.ChainInfoFetcher, withCache)
//ch := stategen.NewCanonicalHistory(s.cfg.BeaconDB, s.cfg.ChainInfoFetcher, s.cfg.ChainInfoFetcher, withCache)
rbw := NewCanonicalHistoryWaiter(s.cfg.ChainInfoFetcher, s.cfg.BeaconDB, s.cfg.ChainInfoFetcher, withCache)
validatorServer := &validatorv1alpha1.Server{
Ctx: s.ctx,
@@ -197,7 +197,6 @@ func (s *Service) Start() {
HeadUpdater: s.cfg.HeadUpdater,
ForkFetcher: s.cfg.ForkFetcher,
FinalizationFetcher: s.cfg.FinalizationFetcher,
TimeFetcher: s.cfg.GenesisTimeFetcher,
BlockFetcher: s.cfg.POWChainService,
DepositFetcher: s.cfg.DepositFetcher,
ChainStartFetcher: s.cfg.ChainStartFetcher,
@@ -215,16 +214,16 @@ func (s *Service) Start() {
SlashingsPool: s.cfg.SlashingsPool,
StateGen: s.cfg.StateGen,
SyncCommitteePool: s.cfg.SyncCommitteeObjectPool,
ReplayerBuilder: ch,
CanonicalHistoryWaiter: rbw,
ExecutionEngineCaller: s.cfg.ExecutionEngineCaller,
BeaconDB: s.cfg.BeaconDB,
ProposerSlotIndexCache: s.cfg.ProposerIdsCache,
BlockBuilder: s.cfg.BlockBuilder,
ClockProvider: s.cfg.ClockProvider,
}
validatorServerV1 := &validator.Server{
HeadFetcher: s.cfg.HeadFetcher,
HeadUpdater: s.cfg.HeadUpdater,
TimeFetcher: s.cfg.GenesisTimeFetcher,
SyncChecker: s.cfg.SyncService,
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
AttestationsPool: s.cfg.AttestationsPool,
@@ -232,13 +231,14 @@ func (s *Service) Start() {
Broadcaster: s.cfg.Broadcaster,
V1Alpha1Server: validatorServer,
StateFetcher: &statefetcher.StateProvider{
BeaconDB: s.cfg.BeaconDB,
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
StateGenService: s.cfg.StateGen,
ReplayerBuilder: ch,
BeaconDB: s.cfg.BeaconDB,
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
StateGenService: s.cfg.StateGen,
CanonicalHistoryWaiter: rbw,
ClockProvider: s.cfg.ClockProvider,
},
SyncCommitteePool: s.cfg.SyncCommitteeObjectPool,
ClockProvider: s.cfg.ClockProvider,
}
nodeServer := &nodev1alpha1.Server{
@@ -247,24 +247,24 @@ func (s *Service) Start() {
BeaconDB: s.cfg.BeaconDB,
Server: s.grpcServer,
SyncChecker: s.cfg.SyncService,
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
PeersFetcher: s.cfg.PeersFetcher,
PeerManager: s.cfg.PeerManager,
GenesisFetcher: s.cfg.GenesisFetcher,
POWChainInfoFetcher: s.cfg.POWChainInfoFetcher,
BeaconMonitoringHost: s.cfg.BeaconMonitoringHost,
BeaconMonitoringPort: s.cfg.BeaconMonitoringPort,
ClockProvider: s.cfg.ClockProvider,
}
nodeServerV1 := &node.Server{
BeaconDB: s.cfg.BeaconDB,
Server: s.grpcServer,
SyncChecker: s.cfg.SyncService,
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
PeersFetcher: s.cfg.PeersFetcher,
PeerManager: s.cfg.PeerManager,
MetadataProvider: s.cfg.MetadataProvider,
HeadFetcher: s.cfg.HeadFetcher,
ClockProvider: s.cfg.ClockProvider,
}
beaconChainServer := &beaconv1alpha1.Server{
@@ -280,7 +280,6 @@ func (s *Service) Start() {
ChainStartFetcher: s.cfg.ChainStartFetcher,
DepositFetcher: s.cfg.DepositFetcher,
BlockFetcher: s.cfg.POWChainService,
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
StateNotifier: s.cfg.StateNotifier,
BlockNotifier: s.cfg.BlockNotifier,
AttestationNotifier: s.cfg.OperationNotifier,
@@ -289,26 +288,26 @@ func (s *Service) Start() {
SyncChecker: s.cfg.SyncService,
ReceivedAttestationsBuffer: make(chan *ethpbv1alpha1.Attestation, attestationBufferSize),
CollectedAttestationsBuffer: make(chan []*ethpbv1alpha1.Attestation, attestationBufferSize),
ReplayerBuilder: ch,
CanonicalHistoryWaiter: rbw,
ClockProvider: s.cfg.ClockProvider,
}
beaconChainServerV1 := &beacon.Server{
CanonicalHistory: ch,
BeaconDB: s.cfg.BeaconDB,
AttestationsPool: s.cfg.AttestationsPool,
SlashingsPool: s.cfg.SlashingsPool,
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
BlockNotifier: s.cfg.BlockNotifier,
OperationNotifier: s.cfg.OperationNotifier,
Broadcaster: s.cfg.Broadcaster,
BlockReceiver: s.cfg.BlockReceiver,
StateGenService: s.cfg.StateGen,
CanonicalHistoryWaiter: rbw,
BeaconDB: s.cfg.BeaconDB,
AttestationsPool: s.cfg.AttestationsPool,
SlashingsPool: s.cfg.SlashingsPool,
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
BlockNotifier: s.cfg.BlockNotifier,
OperationNotifier: s.cfg.OperationNotifier,
Broadcaster: s.cfg.Broadcaster,
BlockReceiver: s.cfg.BlockReceiver,
StateGenService: s.cfg.StateGen,
StateFetcher: &statefetcher.StateProvider{
BeaconDB: s.cfg.BeaconDB,
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
StateGenService: s.cfg.StateGen,
ReplayerBuilder: ch,
BeaconDB: s.cfg.BeaconDB,
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
StateGenService: s.cfg.StateGen,
CanonicalHistoryWaiter: rbw,
ClockProvider: s.cfg.ClockProvider,
},
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
HeadFetcher: s.cfg.HeadFetcher,
@@ -316,6 +315,7 @@ func (s *Service) Start() {
V1Alpha1ValidatorServer: validatorServer,
SyncChecker: s.cfg.SyncService,
ExecutionPayloadReconstructor: s.cfg.ExecutionPayloadReconstructor,
ClockProvider: s.cfg.ClockProvider,
}
ethpbv1alpha1.RegisterNodeServer(s.grpcServer, nodeServer)
ethpbservice.RegisterBeaconNodeServer(s.grpcServer, nodeServerV1)
@@ -331,24 +331,24 @@ func (s *Service) Start() {
if s.cfg.EnableDebugRPCEndpoints {
log.Info("Enabled debug gRPC endpoints")
debugServer := &debugv1alpha1.Server{
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
BeaconDB: s.cfg.BeaconDB,
StateGen: s.cfg.StateGen,
HeadFetcher: s.cfg.HeadFetcher,
ForkFetcher: s.cfg.ForkFetcher,
PeerManager: s.cfg.PeerManager,
PeersFetcher: s.cfg.PeersFetcher,
ReplayerBuilder: ch,
BeaconDB: s.cfg.BeaconDB,
StateGen: s.cfg.StateGen,
HeadFetcher: s.cfg.HeadFetcher,
ForkFetcher: s.cfg.ForkFetcher,
PeerManager: s.cfg.PeerManager,
PeersFetcher: s.cfg.PeersFetcher,
CanonicalHistoryWaiter: rbw,
ClockProvider: s.cfg.ClockProvider,
}
debugServerV1 := &debug.Server{
BeaconDB: s.cfg.BeaconDB,
HeadFetcher: s.cfg.HeadFetcher,
StateFetcher: &statefetcher.StateProvider{
BeaconDB: s.cfg.BeaconDB,
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
StateGenService: s.cfg.StateGen,
ReplayerBuilder: ch,
BeaconDB: s.cfg.BeaconDB,
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
StateGenService: s.cfg.StateGen,
CanonicalHistoryWaiter: rbw,
ClockProvider: s.cfg.ClockProvider,
},
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
}
@@ -429,3 +429,27 @@ func (s *Service) logNewClientConnection(ctx context.Context) {
}
}
}
func NewCanonicalHistoryWaiter(c blockchain.ClockProvider, h stategen.HistoryAccessor, cc stategen.CanonicalChecker, opts ...stategen.CanonicalHistoryOption) stategen.CanonicalHistoryWaiter {
return &canonicalHistoryWaiter{
c: c,
h: h,
cc: cc,
opts: opts,
}
}
type canonicalHistoryWaiter struct {
c blockchain.ClockProvider
h stategen.HistoryAccessor
cc stategen.CanonicalChecker
opts []stategen.CanonicalHistoryOption
}
func (w *canonicalHistoryWaiter) WaitForCanonicalHistory(ctx context.Context) (*stategen.CanonicalHistory, error) {
c, err := w.c.WaitForClock(ctx)
if err != nil {
return nil, errors.Wrap(err, "timeout waiting for genesis timestamp")
}
return stategen.NewCanonicalHistory(w.h, w.cc, c, w.opts...), nil
}

View File

@@ -3,6 +3,7 @@ package rpc
import (
"context"
"errors"
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
"io"
"testing"
"time"
@@ -24,7 +25,7 @@ func init() {
func TestLifecycle_OK(t *testing.T) {
hook := logTest.NewGlobal()
chainService := &mock.ChainService{
Genesis: time.Now(),
Clock: blockchain.NewClock(time.Now()),
}
rpcService := NewService(context.Background(), &Config{
Port: "7348",
@@ -32,9 +33,9 @@ func TestLifecycle_OK(t *testing.T) {
BlockReceiver: chainService,
AttestationReceiver: chainService,
HeadFetcher: chainService,
GenesisTimeFetcher: chainService,
POWChainService: &mockPOW.POWChain{},
StateNotifier: chainService.StateNotifier(),
ClockProvider: chainService,
})
rpcService.Start()
@@ -55,12 +56,12 @@ func TestStatus_CredentialError(t *testing.T) {
func TestRPC_InsecureEndpoint(t *testing.T) {
hook := logTest.NewGlobal()
chainService := &mock.ChainService{Genesis: time.Now()}
chainService := &mock.ChainService{Clock: blockchain.NewClock(time.Now())}
rpcService := NewService(context.Background(), &Config{
Port: "7777",
SyncService: &mockSync.Sync{IsSyncing: false},
BlockReceiver: chainService,
GenesisTimeFetcher: chainService,
ClockProvider: chainService,
AttestationReceiver: chainService,
HeadFetcher: chainService,
POWChainService: &mockPOW.POWChain{},

View File

@@ -24,6 +24,7 @@ go_test(
srcs = ["fetcher_test.go"],
embed = [":go_default_library"],
deps = [
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/blockchain/testing:go_default_library",
"//beacon-chain/db/testing:go_default_library",
"//beacon-chain/state/state-native:go_default_library",

View File

@@ -81,9 +81,9 @@ type Fetcher interface {
type StateProvider struct {
BeaconDB db.ReadOnlyDatabase
ChainInfoFetcher blockchain.ChainInfoFetcher
GenesisTimeFetcher blockchain.TimeFetcher
StateGenService stategen.StateManager
ReplayerBuilder stategen.ReplayerBuilder
StateGenService stategen.StateManager
CanonicalHistoryWaiter stategen.CanonicalHistoryWaiter
ClockProvider blockchain.ClockProvider
}
// State returns the BeaconState for a given identifier. The identifier can be one of:
@@ -200,14 +200,22 @@ func (p *StateProvider) StateBySlot(ctx context.Context, target types.Slot) (sta
ctx, span := trace.StartSpan(ctx, "statefetcher.StateBySlot")
defer span.End()
if target > p.GenesisTimeFetcher.CurrentSlot() {
c, err := p.ClockProvider.WaitForClock(ctx)
if err != nil {
return nil, errors.Wrap(err, "timeout while waiting for genesis timestamp")
}
if target > c.CurrentSlot() {
return nil, errors.New("requested slot is in the future")
}
if target > p.ChainInfoFetcher.HeadSlot() {
return nil, errors.New("requested slot number is higher than head slot number")
}
st, err := p.ReplayerBuilder.ReplayerForSlot(target).ReplayBlocks(ctx)
b, err := p.ReplayerBuilder(ctx)
if err != nil {
return nil, err
}
st, err := b.ReplayerForSlot(target).ReplayBlocks(ctx)
if err != nil {
msg := fmt.Sprintf("error while replaying history to slot=%d", target)
return nil, errors.Wrap(err, msg)
@@ -285,7 +293,11 @@ func (p *StateProvider) stateRootByHex(ctx context.Context, stateId []byte) ([]b
}
func (p *StateProvider) stateRootBySlot(ctx context.Context, slot types.Slot) ([]byte, error) {
currentSlot := p.GenesisTimeFetcher.CurrentSlot()
c, err := p.ClockProvider.WaitForClock(ctx)
if err != nil {
return nil, errors.Wrap(err, "timeout waiting for genesis time")
}
currentSlot := c.CurrentSlot()
if slot > currentSlot {
return nil, errors.New("slot cannot be in the future")
}
@@ -304,3 +316,7 @@ func (p *StateProvider) stateRootBySlot(ctx context.Context, slot types.Slot) ([
}
return blks[0].Block().StateRoot(), nil
}
func (p *StateProvider) ReplayerBuilder(ctx context.Context) (stategen.ReplayerBuilder, error) {
return p.CanonicalHistoryWaiter.WaitForCanonicalHistory(ctx)
}

View File

@@ -2,6 +2,7 @@ package statefetcher
import (
"context"
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
"strconv"
"strings"
"testing"
@@ -77,11 +78,11 @@ func TestGetState(t *testing.T) {
cc := &mockstategen.MockCanonicalChecker{Is: true}
cs := &mockstategen.MockCurrentSlotter{Slot: bs.Slot() + 1}
ch := stategen.NewCanonicalHistory(db, cc, cs)
currentSlot := types.Slot(0)
clock := blockchain.NewClock(time.Now())
p := StateProvider{
BeaconDB: db,
ReplayerBuilder: ch,
GenesisTimeFetcher: &chainMock.ChainService{Slot: &currentSlot},
ClockProvider: &chainMock.ChainService{Clock: clock},
ChainInfoFetcher: &chainMock.ChainService{State: bs},
}
@@ -161,8 +162,10 @@ func TestGetState(t *testing.T) {
})
t.Run("slot", func(t *testing.T) {
clock := NewMockClock(time.Now(), headSlot)
p := StateProvider{
GenesisTimeFetcher: &chainMock.ChainService{Slot: &headSlot},
ClockProvider: &chainMock.ChainService{Clock: clock},
ChainInfoFetcher: &chainMock.ChainService{
CanonicalRoots: map[[32]byte]bool{
bytesutil.ToBytes32(newBeaconState.LatestBlockHeader().ParentRoot): true,
@@ -346,8 +349,9 @@ func TestGetStateRoot(t *testing.T) {
require.NoError(t, db.SaveState(ctx, st, root))
slot := types.Slot(40)
clock := NewMockClock(time.Now(), slot)
p := StateProvider{
GenesisTimeFetcher: &chainMock.ChainService{Slot: &slot},
ClockProvider: &chainMock.ChainService{Clock: clock},
BeaconDB: db,
}
@@ -357,11 +361,7 @@ func TestGetStateRoot(t *testing.T) {
})
t.Run("slot_too_big", func(t *testing.T) {
p := StateProvider{
GenesisTimeFetcher: &chainMock.ChainService{
Genesis: time.Now(),
},
}
p := StateProvider{ClockProvider: &chainMock.ChainService{Clock: blockchain.NewClock(time.Now())}}
_, err := p.StateRoot(ctx, []byte(strconv.FormatUint(1, 10)))
assert.ErrorContains(t, "slot cannot be in the future", err)
})
@@ -380,7 +380,8 @@ func TestNewStateNotFoundError(t *testing.T) {
func TestStateBySlot_FutureSlot(t *testing.T) {
slot := types.Slot(100)
p := StateProvider{GenesisTimeFetcher: &chainMock.ChainService{Slot: &slot}}
clock := NewMockClock(time.Now(), slot)
p := StateProvider{ClockProvider: &chainMock.ChainService{Clock: clock}}
_, err := p.StateBySlot(context.Background(), 101)
assert.ErrorContains(t, "requested slot is in the future", err)
}
@@ -389,8 +390,17 @@ func TestStateBySlot_AfterHeadSlot(t *testing.T) {
st, err := statenative.InitializeFromProtoPhase0(&ethpb.BeaconState{Slot: 100})
require.NoError(t, err)
currentSlot := types.Slot(102)
mock := &chainMock.ChainService{State: st, Slot: &currentSlot}
p := StateProvider{ChainInfoFetcher: mock, GenesisTimeFetcher: mock}
clock := NewMockClock(time.Now(), currentSlot)
mock := &chainMock.ChainService{State: st, Slot: &currentSlot, Clock: clock}
p := StateProvider{ChainInfoFetcher: mock, ClockProvider: mock}
_, err = p.StateBySlot(context.Background(), 101)
assert.ErrorContains(t, "requested slot number is higher than head slot number", err)
}
func NewMockClock(now time.Time, slotsAfterGenesis types.Slot) blockchain.Clock {
offset := uint64(slotsAfterGenesis) * params.BeaconConfig().SecondsPerSlot
genesis := now.Add(-1 * time.Second * time.Duration(offset))
return blockchain.NewClock(genesis, blockchain.WithNow(func() time.Time {
return genesis
}))
}

View File

@@ -4,7 +4,6 @@ go_library(
name = "go_default_library",
testonly = True,
srcs = [
"mock_genesis_timefetcher.go",
"mock_powchain_info_fetcher.go",
"mock_state_fetcher.go",
],
@@ -12,7 +11,6 @@ go_library(
visibility = ["//beacon-chain:__subpackages__"],
deps = [
"//beacon-chain/state:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
],
)

View File

@@ -1,21 +0,0 @@
package testutil
import (
"time"
"github.com/prysmaticlabs/prysm/config/params"
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
)
// MockGenesisTimeFetcher is a fake implementation of the blockchain.TimeFetcher
type MockGenesisTimeFetcher struct {
Genesis time.Time
}
func (m *MockGenesisTimeFetcher) GenesisTime() time.Time {
return m.Genesis
}
func (m *MockGenesisTimeFetcher) CurrentSlot() types.Slot {
return types.Slot(uint64(time.Now().Unix()-m.Genesis.Unix()) / params.BeaconConfig().SecondsPerSlot)
}

View File

@@ -26,6 +26,7 @@ go_library(
"//beacon-chain/core/transition:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/filters:go_default_library",
"//beacon-chain/forkchoice/types:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/sync/backfill:go_default_library",
"//cache/lru:go_default_library",
@@ -70,6 +71,7 @@ go_test(
"//beacon-chain/core/transition:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/testing:go_default_library",
"//beacon-chain/forkchoice/types:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/v1:go_default_library",
"//config/params:go_default_library",

View File

@@ -17,7 +17,7 @@ var ErrNoDataForSlot = errors.New("cannot retrieve data for slot")
// HasState returns true if the state exists in cache or in DB.
func (s *State) HasState(ctx context.Context, blockRoot [32]byte) (bool, error) {
has, err := s.HasStateInCache(ctx, blockRoot)
has, err := s.hasStateInCache(ctx, blockRoot)
if err != nil {
return false, err
}
@@ -27,8 +27,8 @@ func (s *State) HasState(ctx context.Context, blockRoot [32]byte) (bool, error)
return s.beaconDB.HasState(ctx, blockRoot), nil
}
// HasStateInCache returns true if the state exists in cache.
func (s *State) HasStateInCache(_ context.Context, blockRoot [32]byte) (bool, error) {
// hasStateInCache returns true if the state exists in cache.
func (s *State) hasStateInCache(_ context.Context, blockRoot [32]byte) (bool, error) {
if s.hotStateCache.has(blockRoot) {
return true, nil
}
@@ -87,7 +87,7 @@ func (s *State) StateByRootInitialSync(ctx context.Context, blockRoot [32]byte)
return cachedInfo.state, nil
}
startState, err := s.LastAncestorState(ctx, blockRoot)
startState, err := s.latestAncestor(ctx, blockRoot)
if err != nil {
return nil, errors.Wrap(err, "could not get ancestor state")
}
@@ -102,11 +102,11 @@ func (s *State) StateByRootInitialSync(ctx context.Context, blockRoot [32]byte)
return startState, nil
}
blks, err := s.LoadBlocks(ctx, startState.Slot()+1, summary.Slot, bytesutil.ToBytes32(summary.Root))
blks, err := s.loadBlocks(ctx, startState.Slot()+1, summary.Slot, bytesutil.ToBytes32(summary.Root))
if err != nil {
return nil, errors.Wrap(err, "could not load blocks")
}
startState, err = s.ReplayBlocks(ctx, startState, blks, summary.Slot)
startState, err = s.replayBlocks(ctx, startState, blks, summary.Slot)
if err != nil {
return nil, errors.Wrap(err, "could not replay blocks")
}
@@ -125,13 +125,13 @@ func (s *State) stateSummary(ctx context.Context, blockRoot [32]byte) (*ethpb.St
}
if summary == nil {
return s.RecoverStateSummary(ctx, blockRoot)
return s.recoverStateSummary(ctx, blockRoot)
}
return summary, nil
}
// RecoverStateSummary recovers state summary object of a given block root by using the saved block in DB.
func (s *State) RecoverStateSummary(ctx context.Context, blockRoot [32]byte) (*ethpb.StateSummary, error) {
func (s *State) recoverStateSummary(ctx context.Context, blockRoot [32]byte) (*ethpb.StateSummary, error) {
if s.beaconDB.HasBlock(ctx, blockRoot) {
b, err := s.beaconDB.Block(ctx, blockRoot)
if err != nil {
@@ -185,7 +185,7 @@ func (s *State) loadStateByRoot(ctx context.Context, blockRoot [32]byte) (state.
// Since the requested state is not in caches or DB, start replaying using the last
// available ancestor state which is retrieved using input block's root.
startState, err := s.LastAncestorState(ctx, blockRoot)
startState, err := s.latestAncestor(ctx, blockRoot)
if err != nil {
return nil, errors.Wrap(err, "could not get ancestor state")
}
@@ -197,17 +197,17 @@ func (s *State) loadStateByRoot(ctx context.Context, blockRoot [32]byte) (state.
return startState, nil
}
blks, err := s.LoadBlocks(ctx, startState.Slot()+1, targetSlot, bytesutil.ToBytes32(summary.Root))
blks, err := s.loadBlocks(ctx, startState.Slot()+1, targetSlot, bytesutil.ToBytes32(summary.Root))
if err != nil {
return nil, errors.Wrap(err, "could not load blocks for hot state using root")
}
replayBlockCount.Observe(float64(len(blks)))
return s.ReplayBlocks(ctx, startState, blks, targetSlot)
return s.replayBlocks(ctx, startState, blks, targetSlot)
}
// LastAncestorState returns the highest available ancestor state of the input block root.
// latestAncestor returns the highest available ancestor state of the input block root.
// It recursively looks up block's parent until a corresponding state of the block root
// is found in the caches or DB.
//
@@ -215,8 +215,8 @@ func (s *State) loadStateByRoot(ctx context.Context, blockRoot [32]byte) (state.
// 1) block parent state is the last finalized state
// 2) block parent state is the epoch boundary state and exists in epoch boundary cache
// 3) block parent state is in DB
func (s *State) LastAncestorState(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "stateGen.LastAncestorState")
func (s *State) latestAncestor(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "stateGen.latestAncestor")
defer span.End()
if s.isFinalizedRoot(blockRoot) && s.finalizedState() != nil {

View File

@@ -385,7 +385,7 @@ func TestLastAncestorState_CanGetUsingDB(t *testing.T) {
util.SaveBlock(t, ctx, service.beaconDB, b3)
require.NoError(t, service.beaconDB.SaveState(ctx, b1State, r1))
lastState, err := service.LastAncestorState(ctx, r3)
lastState, err := service.latestAncestor(ctx, r3)
require.NoError(t, err)
assert.Equal(t, b1State.Slot(), lastState.Slot(), "Did not get wanted state")
}
@@ -425,7 +425,7 @@ func TestLastAncestorState_CanGetUsingCache(t *testing.T) {
util.SaveBlock(t, ctx, service.beaconDB, b3)
service.hotStateCache.put(r1, b1State)
lastState, err := service.LastAncestorState(ctx, r3)
lastState, err := service.latestAncestor(ctx, r3)
require.NoError(t, err)
assert.Equal(t, b1State.Slot(), lastState.Slot(), "Did not get wanted state")
}
@@ -483,7 +483,7 @@ func TestState_HasStateInCache(t *testing.T) {
{rMiss, false},
}
for _, tc := range tt {
got, err := service.HasStateInCache(ctx, tc.root)
got, err := service.hasStateInCache(ctx, tc.root)
require.NoError(t, err)
require.Equal(t, tc.want, got)
}

View File

@@ -12,8 +12,6 @@ go_library(
deps = [
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
],
)

View File

@@ -4,9 +4,7 @@ import (
"context"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
)
// MockStateManager is a fake implementation of StateManager.
@@ -43,35 +41,11 @@ func (_ *MockStateManager) MigrateToCold(_ context.Context, _ [32]byte) error {
panic("implement me")
}
// ReplayBlocks --
func (_ *MockStateManager) ReplayBlocks(
_ context.Context,
_ state.BeaconState,
_ []interfaces.SignedBeaconBlock,
_ types.Slot,
) (state.BeaconState, error) {
panic("implement me")
}
// LoadBlocks --
func (_ *MockStateManager) LoadBlocks(
_ context.Context,
_, _ types.Slot,
_ [32]byte,
) ([]interfaces.SignedBeaconBlock, error) {
panic("implement me")
}
// HasState --
func (_ *MockStateManager) HasState(_ context.Context, _ [32]byte) (bool, error) {
panic("implement me")
}
// HasStateInCache --
func (_ *MockStateManager) HasStateInCache(_ context.Context, _ [32]byte) (bool, error) {
panic("implement me")
}
// StateByRoot --
func (m *MockStateManager) StateByRoot(_ context.Context, blockRoot [32]byte) (state.BeaconState, error) {
return m.StatesByRoot[blockRoot], nil
@@ -87,14 +61,6 @@ func (m *MockStateManager) StateBySlot(_ context.Context, slot types.Slot) (stat
return m.StatesBySlot[slot], nil
}
// RecoverStateSummary --
func (_ *MockStateManager) RecoverStateSummary(
_ context.Context,
_ [32]byte,
) (*ethpb.StateSummary, error) {
panic("implement me")
}
// SaveState --
func (_ *MockStateManager) SaveState(_ context.Context, _ [32]byte, _ state.BeaconState) error {
panic("implement me")
@@ -105,16 +71,6 @@ func (_ *MockStateManager) ForceCheckpoint(_ context.Context, _ []byte) error {
panic("implement me")
}
// EnableSaveHotStateToDB --
func (_ *MockStateManager) EnableSaveHotStateToDB(_ context.Context) {
panic("implement me")
}
// DisableSaveHotStateToDB --
func (_ *MockStateManager) DisableSaveHotStateToDB(_ context.Context) error {
panic("implement me")
}
// AddStateForRoot --
func (m *MockStateManager) AddStateForRoot(state state.BeaconState, blockRoot [32]byte) {
m.StatesByRoot[blockRoot] = state

View File

@@ -25,13 +25,13 @@ import (
// ReplayBlocks replays the input blocks on the input state until the target slot is reached.
//
// WARNING Blocks passed to the function must be in decreasing slots order.
func (_ *State) ReplayBlocks(
func (_ *State) replayBlocks(
ctx context.Context,
state state.BeaconState,
signed []interfaces.SignedBeaconBlock,
targetSlot types.Slot,
) (state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "stateGen.ReplayBlocks")
ctx, span := trace.StartSpan(ctx, "stateGen.replayBlocks")
defer span.End()
var err error
@@ -77,9 +77,9 @@ func (_ *State) ReplayBlocks(
return state, nil
}
// LoadBlocks loads the blocks between start slot and end slot by recursively fetching from end block root.
// loadBlocks loads the blocks between start slot and end slot by recursively fetching from end block root.
// The Blocks are returned in slot-descending order.
func (s *State) LoadBlocks(ctx context.Context, startSlot, endSlot types.Slot, endBlockRoot [32]byte) ([]interfaces.SignedBeaconBlock, error) {
func (s *State) loadBlocks(ctx context.Context, startSlot, endSlot types.Slot, endBlockRoot [32]byte) ([]interfaces.SignedBeaconBlock, error) {
// Nothing to load for invalid range.
if endSlot < startSlot {
return nil, fmt.Errorf("start slot %d >= end slot %d", startSlot, endSlot)

View File

@@ -43,7 +43,7 @@ func TestReplayBlocks_AllSkipSlots(t *testing.T) {
service := New(beaconDB)
targetSlot := params.BeaconConfig().SlotsPerEpoch - 1
newState, err := service.ReplayBlocks(context.Background(), beaconState, []interfaces.SignedBeaconBlock{}, targetSlot)
newState, err := service.replayBlocks(context.Background(), beaconState, []interfaces.SignedBeaconBlock{}, targetSlot)
require.NoError(t, err)
assert.Equal(t, targetSlot, newState.Slot(), "Did not advance slots")
}
@@ -72,7 +72,7 @@ func TestReplayBlocks_SameSlot(t *testing.T) {
service := New(beaconDB)
targetSlot := beaconState.Slot()
newState, err := service.ReplayBlocks(context.Background(), beaconState, []interfaces.SignedBeaconBlock{}, targetSlot)
newState, err := service.replayBlocks(context.Background(), beaconState, []interfaces.SignedBeaconBlock{}, targetSlot)
require.NoError(t, err)
assert.Equal(t, targetSlot, newState.Slot(), "Did not advance slots")
}
@@ -106,7 +106,7 @@ func TestReplayBlocks_LowerSlotBlock(t *testing.T) {
b.Block.Slot = beaconState.Slot() - 1
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
newState, err := service.ReplayBlocks(context.Background(), beaconState, []interfaces.SignedBeaconBlock{wsb}, targetSlot)
newState, err := service.replayBlocks(context.Background(), beaconState, []interfaces.SignedBeaconBlock{wsb}, targetSlot)
require.NoError(t, err)
assert.Equal(t, targetSlot, newState.Slot(), "Did not advance slots")
}
@@ -132,7 +132,7 @@ func TestReplayBlocks_ThroughForkBoundary(t *testing.T) {
service := New(testDB.SetupDB(t))
targetSlot := params.BeaconConfig().SlotsPerEpoch
newState, err := service.ReplayBlocks(context.Background(), beaconState, []interfaces.SignedBeaconBlock{}, targetSlot)
newState, err := service.replayBlocks(context.Background(), beaconState, []interfaces.SignedBeaconBlock{}, targetSlot)
require.NoError(t, err)
// Verify state is version Altair.
@@ -162,7 +162,7 @@ func TestReplayBlocks_ThroughBellatrixForkBoundary(t *testing.T) {
service := New(testDB.SetupDB(t))
targetSlot := params.BeaconConfig().SlotsPerEpoch * 2
newState, err := service.ReplayBlocks(context.Background(), beaconState, []interfaces.SignedBeaconBlock{}, targetSlot)
newState, err := service.replayBlocks(context.Background(), beaconState, []interfaces.SignedBeaconBlock{}, targetSlot)
require.NoError(t, err)
// Verify state is version Altair.
@@ -179,7 +179,7 @@ func TestLoadBlocks_FirstBranch(t *testing.T) {
roots, savedBlocks, err := tree1(t, beaconDB, bytesutil.PadTo([]byte{'A'}, 32))
require.NoError(t, err)
filteredBlocks, err := s.LoadBlocks(ctx, 0, 8, roots[len(roots)-1])
filteredBlocks, err := s.loadBlocks(ctx, 0, 8, roots[len(roots)-1])
require.NoError(t, err)
wanted := []*ethpb.SignedBeaconBlock{
@@ -208,7 +208,7 @@ func TestLoadBlocks_SecondBranch(t *testing.T) {
roots, savedBlocks, err := tree1(t, beaconDB, bytesutil.PadTo([]byte{'A'}, 32))
require.NoError(t, err)
filteredBlocks, err := s.LoadBlocks(ctx, 0, 5, roots[5])
filteredBlocks, err := s.loadBlocks(ctx, 0, 5, roots[5])
require.NoError(t, err)
wanted := []*ethpb.SignedBeaconBlock{
@@ -235,7 +235,7 @@ func TestLoadBlocks_ThirdBranch(t *testing.T) {
roots, savedBlocks, err := tree1(t, beaconDB, bytesutil.PadTo([]byte{'A'}, 32))
require.NoError(t, err)
filteredBlocks, err := s.LoadBlocks(ctx, 0, 7, roots[7])
filteredBlocks, err := s.loadBlocks(ctx, 0, 7, roots[7])
require.NoError(t, err)
wanted := []*ethpb.SignedBeaconBlock{
@@ -264,7 +264,7 @@ func TestLoadBlocks_SameSlots(t *testing.T) {
roots, savedBlocks, err := tree2(t, beaconDB, bytesutil.PadTo([]byte{'A'}, 32))
require.NoError(t, err)
filteredBlocks, err := s.LoadBlocks(ctx, 0, 3, roots[6])
filteredBlocks, err := s.loadBlocks(ctx, 0, 3, roots[6])
require.NoError(t, err)
wanted := []*ethpb.SignedBeaconBlock{
@@ -291,7 +291,7 @@ func TestLoadBlocks_SameEndSlots(t *testing.T) {
roots, savedBlocks, err := tree3(t, beaconDB, bytesutil.PadTo([]byte{'A'}, 32))
require.NoError(t, err)
filteredBlocks, err := s.LoadBlocks(ctx, 0, 2, roots[2])
filteredBlocks, err := s.loadBlocks(ctx, 0, 2, roots[2])
require.NoError(t, err)
wanted := []*ethpb.SignedBeaconBlock{
@@ -317,7 +317,7 @@ func TestLoadBlocks_SameEndSlotsWith2blocks(t *testing.T) {
roots, savedBlocks, err := tree4(t, beaconDB, bytesutil.PadTo([]byte{'A'}, 32))
require.NoError(t, err)
filteredBlocks, err := s.LoadBlocks(ctx, 0, 2, roots[1])
filteredBlocks, err := s.loadBlocks(ctx, 0, 2, roots[1])
require.NoError(t, err)
wanted := []*ethpb.SignedBeaconBlock{
@@ -341,7 +341,7 @@ func TestLoadBlocks_BadStart(t *testing.T) {
roots, _, err := tree1(t, beaconDB, bytesutil.PadTo([]byte{'A'}, 32))
require.NoError(t, err)
_, err = s.LoadBlocks(ctx, 0, 5, roots[8])
_, err = s.loadBlocks(ctx, 0, 5, roots[8])
assert.ErrorContains(t, "end block roots don't match", err)
}

View File

@@ -173,3 +173,10 @@ type ReplayerBuilder interface {
// slots via process_slots.
ReplayerForSlot(target types.Slot) Replayer
}
// ReplayerBuiilderWaiter is designed for situations where services need to start up before it is possible to init
// a ReplayerBuilder. When WaitForReplayerBuilder is called,
// it will block until prerequisites for a replayer are available, or the parent context is canceled.
type CanonicalHistoryWaiter interface {
WaitForCanonicalHistory(context.Context) (*CanonicalHistory, error)
}

View File

@@ -6,16 +6,15 @@ package stategen
import (
"context"
"errors"
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
"sync"
"github.com/prysmaticlabs/prysm/beacon-chain/db"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/beacon-chain/sync/backfill"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"go.opencensus.io/trace"
)
@@ -25,21 +24,15 @@ var defaultHotStateDBInterval types.Slot = 128
// logic of maintaining both hot and cold states in DB.
type StateManager interface {
Resume(ctx context.Context, fState state.BeaconState) (state.BeaconState, error)
HasState(ctx context.Context, blockRoot [32]byte) (bool, error)
DeleteStateFromCaches(ctx context.Context, blockRoot [32]byte) error
ForceCheckpoint(ctx context.Context, root []byte) error
SaveState(ctx context.Context, blockRoot [32]byte, st state.BeaconState) error
SaveFinalizedState(fSlot types.Slot, fRoot [32]byte, fState state.BeaconState)
MigrateToCold(ctx context.Context, fRoot [32]byte) error
ReplayBlocks(ctx context.Context, state state.BeaconState, signed []interfaces.SignedBeaconBlock, targetSlot types.Slot) (state.BeaconState, error)
LoadBlocks(ctx context.Context, startSlot, endSlot types.Slot, endBlockRoot [32]byte) ([]interfaces.SignedBeaconBlock, error)
HasState(ctx context.Context, blockRoot [32]byte) (bool, error)
HasStateInCache(ctx context.Context, blockRoot [32]byte) (bool, error)
StateByRoot(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error)
StateByRootIfCachedNoCopy(blockRoot [32]byte) state.BeaconState
StateByRootInitialSync(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error)
RecoverStateSummary(ctx context.Context, blockRoot [32]byte) (*ethpb.StateSummary, error)
SaveState(ctx context.Context, blockRoot [32]byte, st state.BeaconState) error
ForceCheckpoint(ctx context.Context, root []byte) error
EnableSaveHotStateToDB(_ context.Context)
DisableSaveHotStateToDB(ctx context.Context) error
DeleteStateFromCaches(ctx context.Context, blockRoot [32]byte) error
}
// State is a concrete implementation of StateManager.
@@ -51,6 +44,12 @@ type State struct {
epochBoundaryStateCache *epochBoundaryState
saveHotStateDB *saveHotStateDbConfig
backfillStatus *backfill.Status
fc minimumForkChoicer
cs CurrentSlotter
}
type minimumForkChoicer interface {
FinalizedCheckpoint() *forkchoicetypes.Checkpoint
}
// This tracks the config in the event of long non-finality,
@@ -81,6 +80,18 @@ func WithBackfillStatus(bfs *backfill.Status) StateGenOption {
}
}
func WithMinimumForkChoicer(fc minimumForkChoicer) StateGenOption {
return func(sg *State) {
sg.fc = fc
}
}
func WithCurrentSlotter(cs CurrentSlotter) StateGenOption {
return func(sg *State) {
sg.cs = cs
}
}
// New returns a new state management object.
func New(beaconDB db.NoHeadAccessDatabase, opts ...StateGenOption) *State {
s := &State{

View File

@@ -2,6 +2,8 @@ package stategen
import (
"context"
"github.com/pkg/errors"
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
"math"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
@@ -13,12 +15,38 @@ import (
"go.opencensus.io/trace"
)
var errForkChoiceFinalizedNil = errors.New("nil finalized checkpoint returned from forkchoice store")
// number of epochs of non-finality before the hot state store will begin saving snapshots for faster recovery.
var hotStateSaveThreshold = types.Epoch(100)
// SaveState saves the state in the cache and/or DB.
func (s *State) SaveState(ctx context.Context, blockRoot [32]byte, st state.BeaconState) error {
ctx, span := trace.StartSpan(ctx, "stateGen.SaveState")
defer span.End()
return s.saveStateByRoot(ctx, blockRoot, st)
if err := s.saveStateByRoot(ctx, blockRoot, st); err != nil {
return err
}
return s.toggleHotStateSaving(ctx)
}
func (s *State) toggleHotStateSaving(ctx context.Context) error {
finalized := s.fc.FinalizedCheckpoint()
if finalized == nil {
return errForkChoiceFinalizedNil
}
currentEpoch := slots.ToEpoch(s.cs.CurrentSlot())
if currentEpoch > finalized.Epoch {
if currentEpoch - finalized.Epoch > hotStateSaveThreshold {
s.EnableSaveHotStateToDB(ctx)
return nil
}
}
return s.DisableSaveHotStateToDB(ctx)
}
// ForceCheckpoint initiates a cold state save of the given block root's state. This method does not update the

View File

@@ -2,14 +2,14 @@ package stategen
import (
"context"
"testing"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
logTest "github.com/sirupsen/logrus/hooks/test"
"testing"
)
func TestSaveState_HotStateCanBeSaved(t *testing.T) {
@@ -201,3 +201,28 @@ func TestEnableSaveHotStateToDB_AlreadyDisabled(t *testing.T) {
require.LogsDoNotContain(t, hook, "Exiting mode to save hot states in DB")
require.Equal(t, false, service.saveHotStateDB.enabled)
}
type mockMinForkChoicer struct {
finalizedCheckpoint *forkchoicetypes.Checkpoint
}
var _ minimumForkChoicer = &mockMinForkChoicer{}
func (fc *mockMinForkChoicer) FinalizedCheckpoint() *forkchoicetypes.Checkpoint {
return fc.finalizedCheckpoint
}
func TestCheckSaveHotStateDB_EnableDisable(t *testing.T) {
beaconDB := testDB.SetupDB(t)
service := New(beaconDB)
// the zero value will return a finalized checkpoint at slot 0 with zero-value for root - this is fine!
service.fc = &mockMinForkChoicer{}
// set current slot as over the threshhold for hot state to be enabled
service.cs = &mockCurrentSlotter{Slot: params.BeaconConfig().SlotsPerEpoch.Mul(uint64(hotStateSaveThreshold))}
require.NoError(t, service.toggleHotStateSaving(context.Background()))
require.Equal(t, true, service.saveHotStateDB.enabled)
// set the finalized checkpoint to the same slot as the current slotter, assert that hot state saving is now disabled
service.fc = &mockMinForkChoicer{finalizedCheckpoint: &forkchoicetypes.Checkpoint{Epoch: hotStateSaveThreshold}}
require.Equal(t, false, service.saveHotStateDB.enabled)
}

View File

@@ -12,7 +12,11 @@ import (
// Is a background routine that observes for new incoming forks. Depending on the epoch
// it will be in charge of subscribing/unsubscribing the relevant topics at the fork boundaries.
func (s *Service) forkWatcher() {
slotTicker := slots.NewSlotTicker(s.cfg.chain.GenesisTime(), params.BeaconConfig().SecondsPerSlot)
clock, err := s.cfg.chain.WaitForClock(s.ctx)
if err != nil {
log.WithError(err).Error("timeout waiting for genesis time in forkWatcher()")
}
slotTicker := slots.NewSlotTicker(clock.GenesisTime(), params.BeaconConfig().SecondsPerSlot)
for {
select {
// In the event of a node restart, we will still end up subscribing to the correct
@@ -40,7 +44,11 @@ func (s *Service) forkWatcher() {
// it registers the appropriate gossip and rpc topics.
func (s *Service) registerForUpcomingFork(currEpoch types.Epoch) error {
genRoot := s.cfg.chain.GenesisValidatorsRoot()
isNextForkEpoch, err := forks.IsForkNextEpoch(s.cfg.chain.GenesisTime(), genRoot[:])
clock, err := s.cfg.chain.WaitForClock(s.ctx)
if err != nil {
return errors.Wrap(err, "timeout waiting for genesis time in registerForUpcomingFork")
}
isNextForkEpoch, err := forks.IsForkNextEpoch(clock.GenesisTime(), genRoot[:])
if err != nil {
return errors.Wrap(err, "Could not retrieve next fork epoch")
}

View File

@@ -2,6 +2,7 @@ package sync
import (
"fmt"
"github.com/prysmaticlabs/prysm/async"
"reflect"
"strings"
@@ -91,57 +92,61 @@ var (
)
)
func (s *Service) updateMetrics() {
// do not update metrics if genesis time
// has not been initialized
if s.cfg.chain.GenesisTime().IsZero() {
func (s *Service) spawnUpdateMetrics() {
// do not update metrics if genesis time has not been initialized
c, err := s.cfg.chain.WaitForClock(s.ctx)
if err != nil {
log.WithError(err).Error("timeout while waiting for geenesis time in spawnUpdateMetrics")
return
}
// We update the dynamic subnet topics.
digest, err := s.currentForkDigest()
if err != nil {
log.WithError(err).Debugf("Could not compute fork digest")
}
indices := s.aggregatorSubnetIndices(s.cfg.chain.CurrentSlot())
syncIndices := cache.SyncSubnetIDs.GetAllSubnets(slots.ToEpoch(s.cfg.chain.CurrentSlot()))
attTopic := p2p.GossipTypeMapping[reflect.TypeOf(&pb.Attestation{})]
syncTopic := p2p.GossipTypeMapping[reflect.TypeOf(&pb.SyncCommitteeMessage{})]
attTopic += s.cfg.p2p.Encoding().ProtocolSuffix()
syncTopic += s.cfg.p2p.Encoding().ProtocolSuffix()
if flags.Get().SubscribeToAllSubnets {
for i := uint64(0); i < params.BeaconNetworkConfig().AttestationSubnetCount; i++ {
s.collectMetricForSubnet(attTopic, digest, i)
async.RunEvery(s.ctx, syncMetricsInterval, func() {
// We update the dynamic subnet topics.
digest, err := s.currentForkDigest()
if err != nil {
log.WithError(err).Debugf("Could not compute fork digest")
}
for i := uint64(0); i < params.BeaconConfig().SyncCommitteeSubnetCount; i++ {
s.collectMetricForSubnet(syncTopic, digest, i)
indices := s.aggregatorSubnetIndices(c.CurrentSlot())
syncIndices := cache.SyncSubnetIDs.GetAllSubnets(slots.ToEpoch(c.CurrentSlot()))
attTopic := p2p.GossipTypeMapping[reflect.TypeOf(&pb.Attestation{})]
syncTopic := p2p.GossipTypeMapping[reflect.TypeOf(&pb.SyncCommitteeMessage{})]
attTopic += s.cfg.p2p.Encoding().ProtocolSuffix()
syncTopic += s.cfg.p2p.Encoding().ProtocolSuffix()
if flags.Get().SubscribeToAllSubnets {
for i := uint64(0); i < params.BeaconNetworkConfig().AttestationSubnetCount; i++ {
s.collectMetricForSubnet(attTopic, digest, i)
}
for i := uint64(0); i < params.BeaconConfig().SyncCommitteeSubnetCount; i++ {
s.collectMetricForSubnet(syncTopic, digest, i)
}
} else {
for _, committeeIdx := range indices {
s.collectMetricForSubnet(attTopic, digest, committeeIdx)
}
for _, committeeIdx := range syncIndices {
s.collectMetricForSubnet(syncTopic, digest, committeeIdx)
}
}
} else {
for _, committeeIdx := range indices {
s.collectMetricForSubnet(attTopic, digest, committeeIdx)
}
for _, committeeIdx := range syncIndices {
s.collectMetricForSubnet(syncTopic, digest, committeeIdx)
}
}
// We update all other gossip topics.
for _, topic := range p2p.AllTopics() {
// We already updated attestation subnet topics.
if strings.Contains(topic, p2p.GossipAttestationMessage) || strings.Contains(topic, p2p.GossipSyncCommitteeMessage) {
continue
// We update all other gossip topics.
for _, topic := range p2p.AllTopics() {
// We already updated attestation subnet topics.
if strings.Contains(topic, p2p.GossipAttestationMessage) || strings.Contains(topic, p2p.GossipSyncCommitteeMessage) {
continue
}
topic += s.cfg.p2p.Encoding().ProtocolSuffix()
if !strings.Contains(topic, "%x") {
topicPeerCount.WithLabelValues(topic).Set(float64(len(s.cfg.p2p.PubSub().ListPeers(topic))))
continue
}
formattedTopic := fmt.Sprintf(topic, digest)
topicPeerCount.WithLabelValues(formattedTopic).Set(float64(len(s.cfg.p2p.PubSub().ListPeers(formattedTopic))))
}
topic += s.cfg.p2p.Encoding().ProtocolSuffix()
if !strings.Contains(topic, "%x") {
topicPeerCount.WithLabelValues(topic).Set(float64(len(s.cfg.p2p.PubSub().ListPeers(topic))))
continue
}
formattedTopic := fmt.Sprintf(topic, digest)
topicPeerCount.WithLabelValues(formattedTopic).Set(float64(len(s.cfg.p2p.PubSub().ListPeers(formattedTopic))))
}
for _, topic := range s.cfg.p2p.PubSub().GetTopics() {
subscribedTopicPeerCount.WithLabelValues(topic).Set(float64(len(s.cfg.p2p.PubSub().ListPeers(topic))))
}
for _, topic := range s.cfg.p2p.PubSub().GetTopics() {
subscribedTopicPeerCount.WithLabelValues(topic).Set(float64(len(s.cfg.p2p.PubSub().ListPeers(topic))))
}
})
}
func (s *Service) collectMetricForSubnet(topic string, digest [4]byte, index uint64) {

View File

@@ -3,6 +3,7 @@ package sync
import (
"context"
"encoding/hex"
"github.com/pkg/errors"
"sync"
pubsub "github.com/libp2p/go-libp2p-pubsub"
@@ -42,10 +43,14 @@ func (s *Service) processPendingAtts(ctx context.Context) error {
ctx, span := trace.StartSpan(ctx, "processPendingAtts")
defer span.End()
c, err := s.cfg.chain.WaitForClock(ctx)
if err != nil {
return errors.Wrap(err, "timeout while waiting for genesis timestamp")
}
// Before a node processes pending attestations queue, it verifies
// the attestations in the queue are still valid. Attestations will
// be deleted from the queue if invalid (ie. getting staled from falling too many slots behind).
s.validatePendingAtts(ctx, s.cfg.chain.CurrentSlot())
s.validatePendingAtts(ctx, c.CurrentSlot())
s.pendingAttsLock.RLock()
roots := make([][32]byte, 0, len(s.blkRootToPendingAtts))
@@ -75,7 +80,7 @@ func (s *Service) processPendingAtts(ctx context.Context) error {
} else {
// Pending attestation's missing block has not arrived yet.
log.WithFields(logrus.Fields{
"currentSlot": s.cfg.chain.CurrentSlot(),
"currentSlot": c.CurrentSlot(),
"attSlot": attestations[0].Message.Aggregate.Data.Slot,
"attCount": len(attestations),
"blockRoot": hex.EncodeToString(bytesutil.Trunc(bRoot[:])),

Some files were not shown because too many files have changed in this diff Show More