mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-09 21:38:05 -05:00
Compare commits
35 Commits
eip4844-ch
...
build-blk-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9efacb5c3c | ||
|
|
0325741318 | ||
|
|
150e8aa14d | ||
|
|
f4307a902c | ||
|
|
d257ef1742 | ||
|
|
aad7aa79d4 | ||
|
|
2eb2f87913 | ||
|
|
9214364c5e | ||
|
|
fb65421678 | ||
|
|
35e3eeddf9 | ||
|
|
2618a114e5 | ||
|
|
aac47640b4 | ||
|
|
17cfc60bdd | ||
|
|
8d001d49d4 | ||
|
|
5ecebbe15a | ||
|
|
5ae7e17ada | ||
|
|
17488028a6 | ||
|
|
163f65f8a3 | ||
|
|
6ebe5ea8b2 | ||
|
|
5ea0417df2 | ||
|
|
61fed336e1 | ||
|
|
fed6a351cc | ||
|
|
98949d8075 | ||
|
|
5cbbd26df4 | ||
|
|
83ccb15e66 | ||
|
|
f5bfb8e9eb | ||
|
|
b00f7f4592 | ||
|
|
191e603e28 | ||
|
|
76c729f9fa | ||
|
|
797cc360c7 | ||
|
|
79da486508 | ||
|
|
5bb1491d8f | ||
|
|
c0ee781638 | ||
|
|
4d5827c527 | ||
|
|
88f03967dd |
@@ -2,8 +2,8 @@
|
||||
|
||||
[](https://buildkite.com/prysmatic-labs/prysm)
|
||||
[](https://goreportcard.com/report/github.com/prysmaticlabs/prysm)
|
||||
[](https://github.com/ethereum/consensus-specs/tree/v1.2.0)
|
||||
[](https://github.com/ethereum/execution-apis/tree/v1.0.0-beta.1/src/engine)
|
||||
[](https://github.com/ethereum/consensus-specs/tree/v1.3.0)
|
||||
[](https://github.com/ethereum/execution-apis/tree/v1.0.0-beta.2/src/engine)
|
||||
[](https://discord.gg/CTYGPUJ)
|
||||
[](https://www.gitpoap.io/gh/prysmaticlabs/prysm)
|
||||
|
||||
|
||||
@@ -70,15 +70,16 @@ type Gateway struct {
|
||||
func New(ctx context.Context, opts ...Option) (*Gateway, error) {
|
||||
g := &Gateway{
|
||||
ctx: ctx,
|
||||
cfg: &config{
|
||||
router: mux.NewRouter(),
|
||||
},
|
||||
cfg: &config{},
|
||||
}
|
||||
for _, opt := range opts {
|
||||
if err := opt(g); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if g.cfg.router == nil {
|
||||
g.cfg.router = mux.NewRouter()
|
||||
}
|
||||
return g, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -10,11 +10,6 @@ import (
|
||||
|
||||
type Option func(g *Gateway) error
|
||||
|
||||
func (g *Gateway) SetRouter(r *mux.Router) *Gateway {
|
||||
g.cfg.router = r
|
||||
return g
|
||||
}
|
||||
|
||||
func WithPbHandlers(handlers []*PbMux) Option {
|
||||
return func(g *Gateway) error {
|
||||
g.cfg.pbHandlers = handlers
|
||||
|
||||
@@ -4,6 +4,7 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"chain_info.go",
|
||||
"chain_info_forkchoice.go",
|
||||
"error.go",
|
||||
"execution_engine.go",
|
||||
"forkchoice_update_execution.go",
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
@@ -15,6 +14,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
@@ -37,12 +37,16 @@ type ChainInfoFetcher interface {
|
||||
// of locking forkchoice
|
||||
type ForkchoiceFetcher interface {
|
||||
Ancestor(context.Context, []byte, primitives.Slot) ([]byte, error)
|
||||
}
|
||||
|
||||
// HeadUpdater defines a common interface for methods in blockchain service
|
||||
// which allow to update the head info
|
||||
type HeadUpdater interface {
|
||||
CachedHeadRoot() [32]byte
|
||||
GetProposerHead() [32]byte
|
||||
SetForkChoiceGenesisTime(uint64)
|
||||
UpdateHead(context.Context, primitives.Slot)
|
||||
HighestReceivedBlockSlot() primitives.Slot
|
||||
ReceivedBlocksLastEpoch() (uint64, error)
|
||||
InsertNode(context.Context, state.BeaconState, [32]byte) error
|
||||
ForkChoiceDump(context.Context) (*ethpbv1.ForkChoiceDump, error)
|
||||
NewSlot(context.Context, primitives.Slot) error
|
||||
ProposerBoost() [32]byte
|
||||
}
|
||||
|
||||
// TimeFetcher retrieves the Ethereum consensus data that's related to time.
|
||||
@@ -76,7 +80,6 @@ type HeadFetcher interface {
|
||||
|
||||
// ForkFetcher retrieves the current fork information of the Ethereum beacon chain.
|
||||
type ForkFetcher interface {
|
||||
ForkChoicer() forkchoice.ForkChoicer
|
||||
CurrentFork() *ethpb.Fork
|
||||
GenesisFetcher
|
||||
TimeFetcher
|
||||
@@ -93,6 +96,8 @@ type FinalizationFetcher interface {
|
||||
FinalizedCheckpt() *ethpb.Checkpoint
|
||||
CurrentJustifiedCheckpt() *ethpb.Checkpoint
|
||||
PreviousJustifiedCheckpt() *ethpb.Checkpoint
|
||||
UnrealizedJustifiedPayloadBlockHash() ([32]byte, error)
|
||||
FinalizedBlockHash() [32]byte
|
||||
InForkchoice([32]byte) bool
|
||||
IsFinalized(ctx context.Context, blockRoot [32]byte) bool
|
||||
}
|
||||
@@ -105,25 +110,25 @@ type OptimisticModeFetcher interface {
|
||||
|
||||
// FinalizedCheckpt returns the latest finalized checkpoint from chain store.
|
||||
func (s *Service) FinalizedCheckpt() *ethpb.Checkpoint {
|
||||
s.ForkChoicer().RLock()
|
||||
defer s.ForkChoicer().RUnlock()
|
||||
cp := s.ForkChoicer().FinalizedCheckpoint()
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
cp := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
return ðpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
|
||||
}
|
||||
|
||||
// PreviousJustifiedCheckpt returns the current justified checkpoint from chain store.
|
||||
func (s *Service) PreviousJustifiedCheckpt() *ethpb.Checkpoint {
|
||||
s.ForkChoicer().RLock()
|
||||
defer s.ForkChoicer().RUnlock()
|
||||
cp := s.ForkChoicer().PreviousJustifiedCheckpoint()
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
cp := s.cfg.ForkChoiceStore.PreviousJustifiedCheckpoint()
|
||||
return ðpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
|
||||
}
|
||||
|
||||
// CurrentJustifiedCheckpt returns the current justified checkpoint from chain store.
|
||||
func (s *Service) CurrentJustifiedCheckpt() *ethpb.Checkpoint {
|
||||
s.ForkChoicer().RLock()
|
||||
defer s.ForkChoicer().RUnlock()
|
||||
cp := s.ForkChoicer().JustifiedCheckpoint()
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
cp := s.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
return ðpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
|
||||
}
|
||||
|
||||
@@ -286,8 +291,8 @@ func (s *Service) CurrentFork() *ethpb.Fork {
|
||||
|
||||
// IsCanonical returns true if the input block root is part of the canonical chain.
|
||||
func (s *Service) IsCanonical(ctx context.Context, blockRoot [32]byte) (bool, error) {
|
||||
s.ForkChoicer().RLock()
|
||||
defer s.ForkChoicer().RUnlock()
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
// If the block has not been finalized, check fork choice store to see if the block is canonical
|
||||
if s.cfg.ForkChoiceStore.HasNode(blockRoot) {
|
||||
return s.cfg.ForkChoiceStore.IsCanonical(blockRoot), nil
|
||||
@@ -297,14 +302,6 @@ func (s *Service) IsCanonical(ctx context.Context, blockRoot [32]byte) (bool, er
|
||||
return s.cfg.BeaconDB.IsFinalizedBlock(ctx, blockRoot), nil
|
||||
}
|
||||
|
||||
// ChainHeads returns all possible chain heads (leaves of fork choice tree).
|
||||
// Heads roots and heads slots are returned.
|
||||
func (s *Service) ChainHeads() ([][32]byte, []primitives.Slot) {
|
||||
s.ForkChoicer().RLock()
|
||||
defer s.ForkChoicer().RUnlock()
|
||||
return s.cfg.ForkChoiceStore.Tips()
|
||||
}
|
||||
|
||||
// HeadPublicKeyToValidatorIndex returns the validator index of the `pubkey` in current head state.
|
||||
func (s *Service) HeadPublicKeyToValidatorIndex(pubKey [fieldparams.BLSPubkeyLength]byte) (primitives.ValidatorIndex, bool) {
|
||||
s.headLock.RLock()
|
||||
@@ -329,11 +326,6 @@ func (s *Service) HeadValidatorIndexToPublicKey(_ context.Context, index primiti
|
||||
return v.PublicKey(), nil
|
||||
}
|
||||
|
||||
// ForkChoicer returns the forkchoice interface.
|
||||
func (s *Service) ForkChoicer() forkchoice.ForkChoicer {
|
||||
return s.cfg.ForkChoiceStore
|
||||
}
|
||||
|
||||
// IsOptimistic returns true if the current head is optimistic.
|
||||
func (s *Service) IsOptimistic(ctx context.Context) (bool, error) {
|
||||
if slots.ToEpoch(s.CurrentSlot()) < params.BeaconConfig().BellatrixForkEpoch {
|
||||
@@ -343,8 +335,8 @@ func (s *Service) IsOptimistic(ctx context.Context) (bool, error) {
|
||||
headRoot := s.head.root
|
||||
s.headLock.RUnlock()
|
||||
|
||||
s.ForkChoicer().RLock()
|
||||
defer s.ForkChoicer().RUnlock()
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(headRoot)
|
||||
if err == nil {
|
||||
return optimistic, nil
|
||||
@@ -360,14 +352,14 @@ func (s *Service) IsOptimistic(ctx context.Context) (bool, error) {
|
||||
// IsFinalized returns true if the input root is finalized.
|
||||
// It first checks latest finalized root then checks finalized root index in DB.
|
||||
func (s *Service) IsFinalized(ctx context.Context, root [32]byte) bool {
|
||||
s.ForkChoicer().RLock()
|
||||
defer s.ForkChoicer().RUnlock()
|
||||
if s.ForkChoicer().FinalizedCheckpoint().Root == root {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
if s.cfg.ForkChoiceStore.FinalizedCheckpoint().Root == root {
|
||||
return true
|
||||
}
|
||||
// If node exists in our store, then it is not
|
||||
// finalized.
|
||||
if s.ForkChoicer().HasNode(root) {
|
||||
if s.cfg.ForkChoiceStore.HasNode(root) {
|
||||
return false
|
||||
}
|
||||
return s.cfg.BeaconDB.IsFinalizedBlock(ctx, root)
|
||||
@@ -377,17 +369,17 @@ func (s *Service) IsFinalized(ctx context.Context, root [32]byte) bool {
|
||||
// This in particular means that the blockroot is a descendant of the
|
||||
// finalized checkpoint
|
||||
func (s *Service) InForkchoice(root [32]byte) bool {
|
||||
s.ForkChoicer().RLock()
|
||||
defer s.ForkChoicer().RUnlock()
|
||||
return s.ForkChoicer().HasNode(root)
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.HasNode(root)
|
||||
}
|
||||
|
||||
// IsOptimisticForRoot takes the root as argument instead of the current head
|
||||
// and returns true if it is optimistic.
|
||||
func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error) {
|
||||
s.ForkChoicer().RLock()
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(root)
|
||||
s.ForkChoicer().RUnlock()
|
||||
s.cfg.ForkChoiceStore.RUnlock()
|
||||
if err == nil {
|
||||
return optimistic, nil
|
||||
}
|
||||
@@ -410,7 +402,10 @@ func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool,
|
||||
}
|
||||
|
||||
if ss == nil {
|
||||
return true, errInvalidNilSummary
|
||||
ss, err = s.recoverStateSummary(ctx, root)
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
validatedCheckpoint, err := s.cfg.BeaconDB.LastValidatedCheckpoint(ctx)
|
||||
if err != nil {
|
||||
@@ -436,7 +431,10 @@ func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool,
|
||||
return false, err
|
||||
}
|
||||
if lastValidated == nil {
|
||||
return false, errInvalidNilSummary
|
||||
lastValidated, err = s.recoverStateSummary(ctx, root)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
if ss.Slot > lastValidated.Slot {
|
||||
@@ -465,9 +463,9 @@ func (s *Service) Ancestor(ctx context.Context, root []byte, slot primitives.Slo
|
||||
r := bytesutil.ToBytes32(root)
|
||||
// Get ancestor root from fork choice store instead of recursively looking up blocks in DB.
|
||||
// This is most optimal outcome.
|
||||
s.ForkChoicer().RLock()
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
ar, err := s.cfg.ForkChoiceStore.AncestorRoot(ctx, r, slot)
|
||||
s.ForkChoicer().RUnlock()
|
||||
s.cfg.ForkChoiceStore.RUnlock()
|
||||
if err != nil {
|
||||
// Try getting ancestor root from DB when failed to retrieve from fork choice store.
|
||||
// This is the second line of defense for retrieving ancestor root.
|
||||
@@ -485,7 +483,17 @@ func (s *Service) SetGenesisTime(t time.Time) {
|
||||
s.genesisTime = t
|
||||
}
|
||||
|
||||
// ForkChoiceStore returns the fork choice store in the service.
|
||||
func (s *Service) ForkChoiceStore() forkchoice.ForkChoicer {
|
||||
return s.cfg.ForkChoiceStore
|
||||
func (s *Service) recoverStateSummary(ctx context.Context, blockRoot [32]byte) (*ethpb.StateSummary, error) {
|
||||
if s.cfg.BeaconDB.HasBlock(ctx, blockRoot) {
|
||||
b, err := s.cfg.BeaconDB.Block(ctx, blockRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
summary := ðpb.StateSummary{Slot: b.Block().Slot(), Root: blockRoot[:]}
|
||||
if err := s.cfg.BeaconDB.SaveStateSummary(ctx, summary); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return summary, nil
|
||||
}
|
||||
return nil, errBlockDoesNotExist
|
||||
}
|
||||
|
||||
94
beacon-chain/blockchain/chain_info_forkchoice.go
Normal file
94
beacon-chain/blockchain/chain_info_forkchoice.go
Normal file
@@ -0,0 +1,94 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
)
|
||||
|
||||
// CachedHeadRoot returns the corresponding value from Forkchoice
|
||||
func (s *Service) CachedHeadRoot() [32]byte {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.CachedHeadRoot()
|
||||
}
|
||||
|
||||
// GetProposerHead returns the corresponding value from forkchoice
|
||||
func (s *Service) GetProposerHead() [32]byte {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.GetProposerHead()
|
||||
}
|
||||
|
||||
// SetForkChoiceGenesisTime sets the genesis time in Forkchoice
|
||||
func (s *Service) SetForkChoiceGenesisTime(timestamp uint64) {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
s.cfg.ForkChoiceStore.SetGenesisTime(timestamp)
|
||||
}
|
||||
|
||||
// HighestReceivedBlockSlot returns the corresponding value from forkchoice
|
||||
func (s *Service) HighestReceivedBlockSlot() primitives.Slot {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.HighestReceivedBlockSlot()
|
||||
}
|
||||
|
||||
// ReceivedBlocksLastEpoch returns the corresponding value from forkchoice
|
||||
func (s *Service) ReceivedBlocksLastEpoch() (uint64, error) {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.ReceivedBlocksLastEpoch()
|
||||
}
|
||||
|
||||
// InsertNode is a wrapper for node insertion which is self locked
|
||||
func (s *Service) InsertNode(ctx context.Context, st state.BeaconState, root [32]byte) error {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
return s.cfg.ForkChoiceStore.InsertNode(ctx, st, root)
|
||||
}
|
||||
|
||||
// ForkChoiceDump returns the corresponding value from forkchoice
|
||||
func (s *Service) ForkChoiceDump(ctx context.Context) (*ethpbv1.ForkChoiceDump, error) {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.ForkChoiceDump(ctx)
|
||||
}
|
||||
|
||||
// NewSlot returns the corresponding value from forkchoice
|
||||
func (s *Service) NewSlot(ctx context.Context, slot primitives.Slot) error {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
return s.cfg.ForkChoiceStore.NewSlot(ctx, slot)
|
||||
}
|
||||
|
||||
// ProposerBoost wraps the corresponding method from forkchoice
|
||||
func (s *Service) ProposerBoost() [32]byte {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
return s.cfg.ForkChoiceStore.ProposerBoost()
|
||||
}
|
||||
|
||||
// ChainHeads returns all possible chain heads (leaves of fork choice tree).
|
||||
// Heads roots and heads slots are returned.
|
||||
func (s *Service) ChainHeads() ([][32]byte, []primitives.Slot) {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.Tips()
|
||||
}
|
||||
|
||||
// UnrealizedJustifiedPayloadBlockHash returns unrealized justified payload block hash from forkchoice.
|
||||
func (s *Service) UnrealizedJustifiedPayloadBlockHash() ([32]byte, error) {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.UnrealizedJustifiedPayloadBlockHash()
|
||||
}
|
||||
|
||||
// FinalizedBlockHash returns finalized payload block hash from forkchoice.
|
||||
func (s *Service) FinalizedBlockHash() [32]byte {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.FinalizedPayloadBlockHash()
|
||||
}
|
||||
@@ -71,12 +71,6 @@ func TestHeadRoot_Nil(t *testing.T) {
|
||||
assert.DeepEqual(t, params.BeaconConfig().ZeroHash[:], headRoot, "Incorrect pre chain start value")
|
||||
}
|
||||
|
||||
func TestService_ForkChoiceStore(t *testing.T) {
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}}
|
||||
p := c.ForkChoiceStore()
|
||||
require.Equal(t, primitives.Epoch(0), p.FinalizedCheckpoint().Epoch)
|
||||
}
|
||||
|
||||
func TestFinalizedCheckpt_GenesisRootOk(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
@@ -125,6 +119,46 @@ func TestCurrentJustifiedCheckpt_CanRetrieve(t *testing.T) {
|
||||
require.Equal(t, cp.Root, bytesutil.ToBytes32(jp.Root))
|
||||
}
|
||||
|
||||
func TestFinalizedBlockHash(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
r := [32]byte{'f'}
|
||||
cp := &forkchoicetypes.Checkpoint{Epoch: 6, Root: r}
|
||||
bState, _ := util.DeterministicGenesisState(t, 10)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, bState, r))
|
||||
|
||||
require.NoError(t, fcs.UpdateFinalizedCheckpoint(cp))
|
||||
h := service.FinalizedBlockHash()
|
||||
require.Equal(t, params.BeaconConfig().ZeroHash, h)
|
||||
require.Equal(t, r, fcs.FinalizedCheckpoint().Root)
|
||||
}
|
||||
|
||||
func TestUnrealizedJustifiedBlockHash(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
service := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}}
|
||||
ojc := ðpb.Checkpoint{Root: []byte{'j'}}
|
||||
ofc := ðpb.Checkpoint{Root: []byte{'f'}}
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
service.cfg.ForkChoiceStore.SetBalancesByRooter(func(_ context.Context, _ [32]byte) ([]uint64, error) { return []uint64{}, nil })
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(ctx, &forkchoicetypes.Checkpoint{Epoch: 6, Root: [32]byte{'j'}}))
|
||||
|
||||
h, err := service.UnrealizedJustifiedPayloadBlockHash()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().ZeroHash, h)
|
||||
require.Equal(t, [32]byte{'j'}, service.cfg.ForkChoiceStore.JustifiedCheckpoint().Root)
|
||||
}
|
||||
|
||||
func TestHeadSlot_CanRetrieve(t *testing.T) {
|
||||
c := &Service{}
|
||||
s, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{})
|
||||
@@ -484,15 +518,10 @@ func TestService_IsOptimisticForRoot_DB(t *testing.T) {
|
||||
validatedCheckpoint := ðpb.Checkpoint{Root: br[:]}
|
||||
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
|
||||
|
||||
_, err = c.IsOptimisticForRoot(ctx, optimisticRoot)
|
||||
require.ErrorContains(t, "nil summary returned from the DB", err)
|
||||
|
||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: optimisticRoot[:], Slot: 11}))
|
||||
optimistic, err := c.IsOptimisticForRoot(ctx, optimisticRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, optimistic)
|
||||
|
||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: validatedRoot[:], Slot: 9}))
|
||||
cp := ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: validatedRoot[:],
|
||||
@@ -554,12 +583,31 @@ func TestService_IsOptimisticForRoot_DB_non_canonical(t *testing.T) {
|
||||
|
||||
}
|
||||
|
||||
func TestService_IsOptimisticForRoot_StateSummaryRecovered(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
|
||||
c.head = &head{root: params.BeaconConfig().ZeroHash}
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 10
|
||||
br, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, context.Background(), beaconDB, b)
|
||||
_, err = c.IsOptimisticForRoot(ctx, br)
|
||||
assert.NoError(t, err)
|
||||
summ, err := beaconDB.StateSummary(ctx, br)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, summ)
|
||||
assert.Equal(t, 10, int(summ.Slot))
|
||||
assert.DeepEqual(t, br[:], summ.Root)
|
||||
}
|
||||
|
||||
func TestService_IsFinalized(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}}
|
||||
r1 := [32]byte{'a'}
|
||||
require.NoError(t, c.ForkChoiceStore().UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{
|
||||
Root: r1,
|
||||
}))
|
||||
b := util.NewBeaconBlock()
|
||||
|
||||
@@ -15,8 +15,8 @@ var (
|
||||
errNilFinalizedCheckpoint = errors.New("nil finalized checkpoint returned from state")
|
||||
// errNilJustifiedCheckpoint is returned when a nil justified checkpt is returned from a state.
|
||||
errNilJustifiedCheckpoint = errors.New("nil justified checkpoint returned from state")
|
||||
// errInvalidNilSummary is returned when a nil summary is returned from the DB.
|
||||
errInvalidNilSummary = errors.New("nil summary returned from the DB")
|
||||
// errBlockDoesNotExist is returned when a block does not exist for a particular state summary.
|
||||
errBlockDoesNotExist = errors.New("could not find block in DB")
|
||||
// errWrongBlockCount is returned when the wrong number of blocks or block roots is used
|
||||
errWrongBlockCount = errors.New("wrong number of blocks or block roots")
|
||||
// errBlockNotFoundInCacheOrDB is returned when a block is not found in the cache or DB.
|
||||
|
||||
@@ -60,8 +60,12 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
|
||||
log.WithError(err).Error("Could not get execution payload for head block")
|
||||
return nil, nil
|
||||
}
|
||||
finalizedHash := s.ForkChoicer().FinalizedPayloadBlockHash()
|
||||
justifiedHash := s.ForkChoicer().JustifiedPayloadBlockHash()
|
||||
finalizedHash := s.cfg.ForkChoiceStore.FinalizedPayloadBlockHash()
|
||||
justifiedHash, err := s.cfg.ForkChoiceStore.UnrealizedJustifiedPayloadBlockHash()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get unrealized justified payload block hash")
|
||||
justifiedHash = finalizedHash
|
||||
}
|
||||
fcs := &enginev1.ForkchoiceState{
|
||||
HeadBlockHash: headPayload.BlockHash(),
|
||||
SafeBlockHash: justifiedHash[:],
|
||||
@@ -88,7 +92,7 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
|
||||
if len(lastValidHash) == 0 {
|
||||
lastValidHash = defaultLatestValidHash
|
||||
}
|
||||
invalidRoots, err := s.ForkChoicer().SetOptimisticToInvalid(ctx, headRoot, headBlk.ParentRoot(), bytesutil.ToBytes32(lastValidHash))
|
||||
invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, headRoot, headBlk.ParentRoot(), bytesutil.ToBytes32(lastValidHash))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not set head root to invalid")
|
||||
return nil, nil
|
||||
@@ -224,7 +228,7 @@ func (s *Service) notifyNewPayload(ctx context.Context, postStateVersion int,
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
invalidRoots, err := s.ForkChoicer().SetOptimisticToInvalid(ctx, root, blk.Block().ParentRoot(), bytesutil.ToBytes32(lastValidHash))
|
||||
invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, root, blk.Block().ParentRoot(), bytesutil.ToBytes32(lastValidHash))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
@@ -89,13 +89,13 @@ func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, newHeadRoot
|
||||
// shouldOverrideFCU checks whether the incoming block is still subject to being
|
||||
// reorged or not by the next proposer.
|
||||
func (s *Service) shouldOverrideFCU(newHeadRoot [32]byte, proposingSlot primitives.Slot) bool {
|
||||
headWeight, err := s.ForkChoicer().Weight(newHeadRoot)
|
||||
headWeight, err := s.cfg.ForkChoiceStore.Weight(newHeadRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("root", fmt.Sprintf("%#x", newHeadRoot)).Warn("could not determine node weight")
|
||||
}
|
||||
currentSlot := s.CurrentSlot()
|
||||
if proposingSlot == currentSlot {
|
||||
proposerHead := s.ForkChoicer().GetProposerHead()
|
||||
proposerHead := s.cfg.ForkChoiceStore.GetProposerHead()
|
||||
if proposerHead != newHeadRoot {
|
||||
return true
|
||||
}
|
||||
@@ -106,7 +106,7 @@ func (s *Service) shouldOverrideFCU(newHeadRoot [32]byte, proposingSlot primitiv
|
||||
params.BeaconConfig().SecondsPerSlot)
|
||||
lateBlockFailedAttemptSecondThreshold.Inc()
|
||||
} else {
|
||||
if s.ForkChoicer().ShouldOverrideFCU() {
|
||||
if s.cfg.ForkChoiceStore.ShouldOverrideFCU() {
|
||||
return true
|
||||
}
|
||||
secs, err := slots.SecondsSinceSlotStart(currentSlot,
|
||||
|
||||
@@ -28,6 +28,8 @@ import (
|
||||
// UpdateAndSaveHeadWithBalances updates the beacon state head after getting justified balanced from cache.
|
||||
// This function is only used in spec-tests, it does save the head after updating it.
|
||||
func (s *Service) UpdateAndSaveHeadWithBalances(ctx context.Context) error {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
headRoot, err := s.cfg.ForkChoiceStore.Head(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not update head")
|
||||
@@ -94,18 +96,18 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
|
||||
}
|
||||
oldHeadRoot := bytesutil.ToBytes32(r)
|
||||
if headBlock.Block().ParentRoot() != oldHeadRoot {
|
||||
commonRoot, forkSlot, err := s.ForkChoicer().CommonAncestor(ctx, oldHeadRoot, newHeadRoot)
|
||||
commonRoot, forkSlot, err := s.cfg.ForkChoiceStore.CommonAncestor(ctx, oldHeadRoot, newHeadRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not find common ancestor root")
|
||||
commonRoot = params.BeaconConfig().ZeroHash
|
||||
}
|
||||
dis := headSlot + newHeadSlot - 2*forkSlot
|
||||
dep := math.Max(uint64(headSlot-forkSlot), uint64(newHeadSlot-forkSlot))
|
||||
oldWeight, err := s.ForkChoicer().Weight(oldHeadRoot)
|
||||
oldWeight, err := s.cfg.ForkChoiceStore.Weight(oldHeadRoot)
|
||||
if err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", oldHeadRoot)).Warn("could not determine node weight")
|
||||
}
|
||||
newWeight, err := s.ForkChoicer().Weight(newHeadRoot)
|
||||
newWeight, err := s.cfg.ForkChoiceStore.Weight(newHeadRoot)
|
||||
if err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", newHeadRoot)).Warn("could not determine node weight")
|
||||
}
|
||||
@@ -123,7 +125,7 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
|
||||
reorgDistance.Observe(float64(dis))
|
||||
reorgDepth.Observe(float64(dep))
|
||||
|
||||
isOptimistic, err := s.ForkChoicer().IsOptimistic(newHeadRoot)
|
||||
isOptimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(newHeadRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not check if node is optimistically synced")
|
||||
}
|
||||
@@ -363,7 +365,7 @@ func (s *Service) notifyNewHeadEvent(
|
||||
// This saves the Attestations and BLSToExecChanges between `orphanedRoot` and the common ancestor root that is derived using `newHeadRoot`.
|
||||
// It also filters out the attestations that is one epoch older as a defense so invalid attestations don't flow into the attestation pool.
|
||||
func (s *Service) saveOrphanedOperations(ctx context.Context, orphanedRoot [32]byte, newHeadRoot [32]byte) error {
|
||||
commonAncestorRoot, _, err := s.ForkChoicer().CommonAncestor(ctx, newHeadRoot, orphanedRoot)
|
||||
commonAncestorRoot, _, err := s.cfg.ForkChoiceStore.CommonAncestor(ctx, newHeadRoot, orphanedRoot)
|
||||
switch {
|
||||
// Exit early if there's no common ancestor and root doesn't exist, there would be nothing to save.
|
||||
case errors.Is(err, forkchoice.ErrUnknownCommonAncestor):
|
||||
@@ -401,6 +403,19 @@ func (s *Service) saveOrphanedOperations(ctx context.Context, orphanedRoot [32]b
|
||||
}
|
||||
saveOrphanedAttCount.Inc()
|
||||
}
|
||||
for _, as := range orphanedBlk.Block().Body().AttesterSlashings() {
|
||||
if err := s.cfg.SlashingPool.InsertAttesterSlashing(ctx, s.headStateReadOnly(ctx), as); err != nil {
|
||||
log.WithError(err).Error("Could not insert reorg attester slashing")
|
||||
}
|
||||
}
|
||||
for _, vs := range orphanedBlk.Block().Body().ProposerSlashings() {
|
||||
if err := s.cfg.SlashingPool.InsertProposerSlashing(ctx, s.headStateReadOnly(ctx), vs); err != nil {
|
||||
log.WithError(err).Error("Could not insert reorg proposer slashing")
|
||||
}
|
||||
}
|
||||
for _, v := range orphanedBlk.Block().Body().VoluntaryExits() {
|
||||
s.cfg.ExitPool.InsertVoluntaryExit(v)
|
||||
}
|
||||
if orphanedBlk.Version() >= version.Capella {
|
||||
changes, err := orphanedBlk.Block().Body().BLSToExecutionChanges()
|
||||
if err != nil {
|
||||
|
||||
@@ -308,7 +308,7 @@ func TestSaveOrphanedAtts(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.ForkChoicer().InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, blk)
|
||||
}
|
||||
|
||||
@@ -326,6 +326,88 @@ func TestSaveOrphanedAtts(t *testing.T) {
|
||||
require.DeepEqual(t, wantAtts, atts)
|
||||
}
|
||||
|
||||
func TestSaveOrphanedOps(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.ShardCommitteePeriod = 0
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
service.genesisTime = time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
|
||||
// Chain setup
|
||||
// 0 -- 1 -- 2 -- 3
|
||||
// \-4
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
service.head = &head{state: st}
|
||||
blkG, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
util.SaveBlock(t, ctx, service.cfg.BeaconDB, blkG)
|
||||
rG, err := blkG.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
blk1, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
|
||||
assert.NoError(t, err)
|
||||
blk1.Block.ParentRoot = rG[:]
|
||||
r1, err := blk1.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
blk2, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 2)
|
||||
assert.NoError(t, err)
|
||||
blk2.Block.ParentRoot = r1[:]
|
||||
r2, err := blk2.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
blkConfig := util.DefaultBlockGenConfig()
|
||||
blkConfig.NumBLSChanges = 5
|
||||
blkConfig.NumProposerSlashings = 1
|
||||
blkConfig.NumAttesterSlashings = 1
|
||||
blkConfig.NumVoluntaryExits = 1
|
||||
blk3, err := util.GenerateFullBlock(st, keys, blkConfig, 3)
|
||||
assert.NoError(t, err)
|
||||
blk3.Block.ParentRoot = r2[:]
|
||||
r3, err := blk3.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
blk4 := util.NewBeaconBlock()
|
||||
blk4.Block.Slot = 4
|
||||
blk4.Block.ParentRoot = rG[:]
|
||||
r4, err := blk4.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
|
||||
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk3, blk4} {
|
||||
r, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, blk)
|
||||
}
|
||||
|
||||
require.NoError(t, service.saveOrphanedOperations(ctx, r3, r4))
|
||||
require.Equal(t, 3, service.cfg.AttPool.AggregatedAttestationCount())
|
||||
wantAtts := []*ethpb.Attestation{
|
||||
blk3.Block.Body.Attestations[0],
|
||||
blk2.Block.Body.Attestations[0],
|
||||
blk1.Block.Body.Attestations[0],
|
||||
}
|
||||
atts := service.cfg.AttPool.AggregatedAttestations()
|
||||
sort.Slice(atts, func(i, j int) bool {
|
||||
return atts[i].Data.Slot > atts[j].Data.Slot
|
||||
})
|
||||
require.DeepEqual(t, wantAtts, atts)
|
||||
require.Equal(t, 1, len(service.cfg.SlashingPool.PendingProposerSlashings(ctx, st, false)))
|
||||
require.Equal(t, 1, len(service.cfg.SlashingPool.PendingAttesterSlashings(ctx, st, false)))
|
||||
exits, err := service.cfg.ExitPool.PendingExits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(exits))
|
||||
}
|
||||
|
||||
func TestSaveOrphanedAtts_CanFilter(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
@@ -373,7 +455,7 @@ func TestSaveOrphanedAtts_CanFilter(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.ForkChoicer().InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, blk)
|
||||
}
|
||||
|
||||
@@ -431,7 +513,7 @@ func TestSaveOrphanedAtts_DoublyLinkedTrie(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.ForkChoicer().InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, blk)
|
||||
}
|
||||
|
||||
@@ -490,7 +572,7 @@ func TestSaveOrphanedAtts_CanFilter_DoublyLinkedTrie(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.ForkChoicer().InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, blk)
|
||||
}
|
||||
|
||||
|
||||
@@ -108,13 +108,13 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedB
|
||||
}
|
||||
|
||||
// Verify that the parent block is in forkchoice
|
||||
if !s.ForkChoicer().HasNode(b.ParentRoot()) {
|
||||
if !s.cfg.ForkChoiceStore.HasNode(b.ParentRoot()) {
|
||||
return ErrNotDescendantOfFinalized
|
||||
}
|
||||
|
||||
// Save current justified and finalized epochs for future use.
|
||||
currStoreJustifiedEpoch := s.ForkChoicer().JustifiedCheckpoint().Epoch
|
||||
currStoreFinalizedEpoch := s.ForkChoicer().FinalizedCheckpoint().Epoch
|
||||
currStoreJustifiedEpoch := s.cfg.ForkChoiceStore.JustifiedCheckpoint().Epoch
|
||||
currStoreFinalizedEpoch := s.cfg.ForkChoiceStore.FinalizedCheckpoint().Epoch
|
||||
preStateFinalizedEpoch := preState.FinalizedCheckpoint().Epoch
|
||||
preStateJustifiedEpoch := preState.CurrentJustifiedCheckpoint().Epoch
|
||||
|
||||
@@ -187,18 +187,18 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedB
|
||||
}()
|
||||
}
|
||||
|
||||
justified := s.ForkChoicer().JustifiedCheckpoint()
|
||||
justified := s.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
start := time.Now()
|
||||
headRoot, err := s.cfg.ForkChoiceStore.Head(ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Warn("Could not update head")
|
||||
}
|
||||
if blockRoot != headRoot {
|
||||
receivedWeight, err := s.ForkChoicer().Weight(blockRoot)
|
||||
receivedWeight, err := s.cfg.ForkChoiceStore.Weight(blockRoot)
|
||||
if err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", blockRoot)).Warn("could not determine node weight")
|
||||
}
|
||||
headWeight, err := s.ForkChoicer().Weight(headRoot)
|
||||
headWeight, err := s.cfg.ForkChoiceStore.Weight(headRoot)
|
||||
if err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", headRoot)).Warn("could not determine node weight")
|
||||
}
|
||||
@@ -252,7 +252,7 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedB
|
||||
|
||||
// Save finalized check point to db and more.
|
||||
postStateFinalizedEpoch := postState.FinalizedCheckpoint().Epoch
|
||||
finalized := s.ForkChoicer().FinalizedCheckpoint()
|
||||
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
if finalized.Epoch > currStoreFinalizedEpoch || (finalized.Epoch == postStateFinalizedEpoch && finalized.Epoch > preStateFinalizedEpoch) {
|
||||
if err := s.updateFinalized(ctx, ðpb.Checkpoint{Epoch: finalized.Epoch, Root: finalized.Root[:]}); err != nil {
|
||||
return err
|
||||
@@ -576,7 +576,7 @@ func (s *Service) InsertSlashingsToForkChoiceStore(ctx context.Context, slashing
|
||||
for _, slashing := range slashings {
|
||||
indices := blocks.SlashableAttesterIndices(slashing)
|
||||
for _, index := range indices {
|
||||
s.ForkChoicer().InsertSlashedIndex(ctx, primitives.ValidatorIndex(index))
|
||||
s.cfg.ForkChoiceStore.InsertSlashedIndex(ctx, primitives.ValidatorIndex(index))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -671,9 +671,7 @@ func (s *Service) fillMissingPayloadIDRoutine(ctx context.Context, stateFeed *ev
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C():
|
||||
if err := s.fillMissingBlockPayloadId(ctx); err != nil {
|
||||
log.WithError(err).Error("Could not fill missing payload ID")
|
||||
}
|
||||
s.lateBlockTasks(ctx)
|
||||
|
||||
case <-ctx.Done():
|
||||
log.Debug("Context closed, exiting routine")
|
||||
@@ -683,11 +681,13 @@ func (s *Service) fillMissingPayloadIDRoutine(ctx context.Context, stateFeed *ev
|
||||
}()
|
||||
}
|
||||
|
||||
// fillMissingBlockPayloadId is called 4 seconds into the slot and calls FCU if we are proposing next slot
|
||||
// and the cache has been missed
|
||||
func (s *Service) fillMissingBlockPayloadId(ctx context.Context) error {
|
||||
// lateBlockTasks is called 4 seconds into the slot and performs tasks
|
||||
// related to late blocks. It emits a MissedSlot state feed event.
|
||||
// It calls FCU and sets the right attributes if we are proposing next slot
|
||||
// it also updates the next slot cache to deal with skipped slots.
|
||||
func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
if s.CurrentSlot() == s.HeadSlot() {
|
||||
return nil
|
||||
return
|
||||
}
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.MissedSlot,
|
||||
@@ -697,21 +697,31 @@ func (s *Service) fillMissingBlockPayloadId(ctx context.Context) error {
|
||||
_, id, has := s.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(s.CurrentSlot()+1, [32]byte{} /* head root */)
|
||||
// There exists proposer for next slot, but we haven't called fcu w/ payload attribute yet.
|
||||
if !has || id != [8]byte{} {
|
||||
return nil
|
||||
return
|
||||
}
|
||||
s.headLock.RLock()
|
||||
headBlock, err := s.headBlock()
|
||||
if err != nil {
|
||||
s.headLock.RUnlock()
|
||||
return err
|
||||
log.WithError(err).Debug("could not perform late block tasks: failed to retrieve head block")
|
||||
return
|
||||
}
|
||||
headState := s.headState(ctx)
|
||||
headRoot := s.headRoot()
|
||||
headState := s.headState(ctx)
|
||||
s.headLock.RUnlock()
|
||||
_, err = s.notifyForkchoiceUpdate(ctx, ¬ifyForkchoiceUpdateArg{
|
||||
headState: headState,
|
||||
headRoot: headRoot,
|
||||
headBlock: headBlock.Block(),
|
||||
})
|
||||
return err
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("could not perform late block tasks: failed to update forkchoice with engine")
|
||||
}
|
||||
lastRoot, lastState := transition.LastCachedState()
|
||||
if lastState == nil {
|
||||
lastRoot, lastState = headRoot[:], headState
|
||||
}
|
||||
if err = transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
|
||||
log.WithError(err).Debug("could not update next slot state cache")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -85,7 +85,7 @@ func (s *Service) verifyBlkPreState(ctx context.Context, b interfaces.ReadOnlyBe
|
||||
// verifyBlkFinalizedSlot validates input block is not less than or equal
|
||||
// to current finalized slot.
|
||||
func (s *Service) verifyBlkFinalizedSlot(b interfaces.ReadOnlyBeaconBlock) error {
|
||||
finalized := s.ForkChoicer().FinalizedCheckpoint()
|
||||
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
finalizedSlot, err := slots.EpochStart(finalized.Epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -177,7 +177,7 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfa
|
||||
pendingNodes := make([]*forkchoicetypes.BlockAndCheckpoints, 0)
|
||||
|
||||
// Fork choice only matters from last finalized slot.
|
||||
finalized := s.ForkChoicer().FinalizedCheckpoint()
|
||||
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
fSlot, err := slots.EpochStart(finalized.Epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -203,7 +203,7 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfa
|
||||
if len(pendingNodes) == 1 {
|
||||
return nil
|
||||
}
|
||||
if root != s.ensureRootNotZeros(finalized.Root) && !s.ForkChoicer().HasNode(root) {
|
||||
if root != s.ensureRootNotZeros(finalized.Root) && !s.cfg.ForkChoiceStore.HasNode(root) {
|
||||
return ErrNotDescendantOfFinalized
|
||||
}
|
||||
return s.cfg.ForkChoiceStore.InsertChain(ctx, pendingNodes)
|
||||
|
||||
@@ -139,7 +139,7 @@ func TestStore_OnBlock(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
fRoot := bytesutil.ToBytes32(roots[0])
|
||||
require.NoError(t, service.ForkChoicer().UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Root: fRoot}))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Root: fRoot}))
|
||||
root, err := tt.blk.Block.HashTreeRoot()
|
||||
assert.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(tt.blk)
|
||||
@@ -288,9 +288,9 @@ func TestFillForkChoiceMissingBlocks_CanSave(t *testing.T) {
|
||||
r0 := bytesutil.ToBytes32(roots[0])
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, r0, service.originBlockRoot, [32]byte{}, fcp, fcp)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.ForkChoicer().InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
fcp2 := &forkchoicetypes.Checkpoint{Epoch: 0, Root: r0}
|
||||
require.NoError(t, service.ForkChoicer().UpdateFinalizedCheckpoint(fcp2))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(fcp2))
|
||||
|
||||
err = service.fillInForkChoiceMissingBlocks(
|
||||
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
@@ -339,9 +339,9 @@ func TestFillForkChoiceMissingBlocks_RootsMatch(t *testing.T) {
|
||||
r0 := bytesutil.ToBytes32(roots[0])
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, r0, service.originBlockRoot, [32]byte{}, fcp, fcp)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.ForkChoicer().InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
fcp2 := &forkchoicetypes.Checkpoint{Epoch: 0, Root: r0}
|
||||
require.NoError(t, service.ForkChoicer().UpdateFinalizedCheckpoint(fcp2))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(fcp2))
|
||||
|
||||
err = service.fillInForkChoiceMissingBlocks(
|
||||
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
@@ -406,7 +406,7 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized(t *testing.T) {
|
||||
beaconState, _ := util.DeterministicGenesisState(t, 32)
|
||||
|
||||
// Set finalized epoch to 2.
|
||||
require.NoError(t, service.ForkChoicer().UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: 2, Root: r64}))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: 2, Root: r64}))
|
||||
err = service.fillInForkChoiceMissingBlocks(
|
||||
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
require.NoError(t, err)
|
||||
@@ -1363,7 +1363,7 @@ func Test_verifyBlkFinalizedSlot_invalidBlock(t *testing.T) {
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.ForkChoicer().UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: 1}))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: 1}))
|
||||
blk := util.HydrateBeaconBlock(ðpb.BeaconBlock{Slot: 1})
|
||||
wb, err := consensusblocks.NewBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
@@ -1459,7 +1459,7 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
// Check that we haven't justified the second epoch yet
|
||||
jc := service.ForkChoicer().JustifiedCheckpoint()
|
||||
jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(0), jc.Epoch)
|
||||
|
||||
// import a block that justifies the second epoch
|
||||
@@ -1474,14 +1474,14 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
err = service.onBlock(ctx, wsb, firstInvalidRoot)
|
||||
require.NoError(t, err)
|
||||
jc = service.ForkChoicer().JustifiedCheckpoint()
|
||||
jc = service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(2), jc.Epoch)
|
||||
|
||||
sjc := validHeadState.CurrentJustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(0), sjc.Epoch)
|
||||
lvh := b.Block.Body.ExecutionPayload.ParentHash
|
||||
// check our head
|
||||
require.Equal(t, firstInvalidRoot, service.ForkChoicer().CachedHeadRoot())
|
||||
require.Equal(t, firstInvalidRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
|
||||
// import another block to find out that it was invalid
|
||||
mockEngine = &mockExecution.EngineClient{ErrNewPayload: execution.ErrAcceptedSyncingPayloadStatus, ErrForkchoiceUpdated: execution.ErrInvalidPayloadStatus, ForkChoiceUpdatedResp: lvh}
|
||||
@@ -1500,7 +1500,7 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
// Check that forkchoice's head is the last invalid block imported. The
|
||||
// store's headroot is the previous head (since the invalid block did
|
||||
// not finish importing) one and that the node is optimistic
|
||||
require.Equal(t, root, service.ForkChoicer().CachedHeadRoot())
|
||||
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
headRoot, err := service.HeadRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, firstInvalidRoot, bytesutil.ToBytes32(headRoot))
|
||||
@@ -1522,7 +1522,7 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
// Check the newly imported block is head, it justified the right
|
||||
// checkpoint and the node is no longer optimistic
|
||||
require.Equal(t, root, service.ForkChoicer().CachedHeadRoot())
|
||||
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
sjc = service.CurrentJustifiedCheckpt()
|
||||
require.Equal(t, jc.Epoch, sjc.Epoch)
|
||||
require.Equal(t, jc.Root, bytesutil.ToBytes32(sjc.Root))
|
||||
@@ -1619,7 +1619,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
// Check that we haven't justified the second epoch yet
|
||||
jc := service.ForkChoicer().JustifiedCheckpoint()
|
||||
jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(0), jc.Epoch)
|
||||
|
||||
// import a block that justifies the second epoch
|
||||
@@ -1634,14 +1634,14 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
err = service.onBlock(ctx, wsb, firstInvalidRoot)
|
||||
require.NoError(t, err)
|
||||
jc = service.ForkChoicer().JustifiedCheckpoint()
|
||||
jc = service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(2), jc.Epoch)
|
||||
|
||||
sjc := validHeadState.CurrentJustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(0), sjc.Epoch)
|
||||
lvh := b.Block.Body.ExecutionPayload.ParentHash
|
||||
// check our head
|
||||
require.Equal(t, firstInvalidRoot, service.ForkChoicer().CachedHeadRoot())
|
||||
require.Equal(t, firstInvalidRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
|
||||
// import another block to find out that it was invalid
|
||||
mockEngine = &mockExecution.EngineClient{ErrNewPayload: execution.ErrInvalidPayloadStatus, NewPayloadResp: lvh}
|
||||
@@ -1660,7 +1660,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
// Check that forkchoice's head and store's headroot are the previous head (since the invalid block did
|
||||
// not finish importing and it was never imported to forkchoice). Check
|
||||
// also that the node is optimistic
|
||||
require.Equal(t, firstInvalidRoot, service.ForkChoicer().CachedHeadRoot())
|
||||
require.Equal(t, firstInvalidRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
headRoot, err := service.HeadRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, firstInvalidRoot, bytesutil.ToBytes32(headRoot))
|
||||
@@ -1682,7 +1682,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
// Check the newly imported block is head, it justified the right
|
||||
// checkpoint and the node is no longer optimistic
|
||||
require.Equal(t, root, service.ForkChoicer().CachedHeadRoot())
|
||||
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
sjc = service.CurrentJustifiedCheckpt()
|
||||
require.Equal(t, jc.Epoch, sjc.Epoch)
|
||||
require.Equal(t, jc.Root, bytesutil.ToBytes32(sjc.Root))
|
||||
@@ -1802,9 +1802,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
// Check that we have justified the second epoch
|
||||
jc := service.ForkChoicer().JustifiedCheckpoint()
|
||||
jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(2), jc.Epoch)
|
||||
invalidHeadRoot := service.ForkChoicer().CachedHeadRoot()
|
||||
invalidHeadRoot := service.cfg.ForkChoiceStore.CachedHeadRoot()
|
||||
|
||||
// import block 19 to find out that the whole chain 13--18 was in fact
|
||||
// invalid
|
||||
@@ -1825,7 +1825,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
// Check that forkchoice's head and store's headroot are the previous head (since the invalid block did
|
||||
// not finish importing and it was never imported to forkchoice). Check
|
||||
// also that the node is optimistic
|
||||
require.Equal(t, invalidHeadRoot, service.ForkChoicer().CachedHeadRoot())
|
||||
require.Equal(t, invalidHeadRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
headRoot, err := service.HeadRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, invalidHeadRoot, bytesutil.ToBytes32(headRoot))
|
||||
@@ -1855,7 +1855,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.onBlock(ctx, wsb, root))
|
||||
// Check that the head is still INVALID and the node is still optimistic
|
||||
require.Equal(t, invalidHeadRoot, service.ForkChoicer().CachedHeadRoot())
|
||||
require.Equal(t, invalidHeadRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
optimistic, err = service.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, optimistic)
|
||||
@@ -1877,7 +1877,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
// Head should still be INVALID and the node optimistic
|
||||
require.Equal(t, invalidHeadRoot, service.ForkChoicer().CachedHeadRoot())
|
||||
require.Equal(t, invalidHeadRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
optimistic, err = service.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, optimistic)
|
||||
@@ -1893,7 +1893,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
err = service.onBlock(ctx, wsb, root)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, root, service.ForkChoicer().CachedHeadRoot())
|
||||
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
sjc = service.CurrentJustifiedCheckpt()
|
||||
require.Equal(t, primitives.Epoch(4), sjc.Epoch)
|
||||
optimistic, err = service.IsOptimistic(ctx)
|
||||
@@ -2011,7 +2011,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, service.onBlock(ctx, wsb, root))
|
||||
}
|
||||
// Check that we have justified the second epoch
|
||||
jc := service.ForkChoicer().JustifiedCheckpoint()
|
||||
jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(2), jc.Epoch)
|
||||
|
||||
// import block 19 to find out that the whole chain 13--18 was in fact
|
||||
@@ -2045,7 +2045,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, service.StartFromSavedState(genesisState))
|
||||
|
||||
// Forkchoice has the genesisRoot loaded at startup
|
||||
require.Equal(t, genesisRoot, service.ensureRootNotZeros(service.ForkChoicer().CachedHeadRoot()))
|
||||
require.Equal(t, genesisRoot, service.ensureRootNotZeros(service.cfg.ForkChoiceStore.CachedHeadRoot()))
|
||||
// Service's store has the finalized state as headRoot
|
||||
headRoot, err := service.HeadRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -2072,7 +2072,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
// We use onBlockBatch here because the valid chain is missing in forkchoice
|
||||
require.NoError(t, service.onBlockBatch(ctx, []interfaces.ReadOnlySignedBeaconBlock{wsb}, [][32]byte{root}))
|
||||
// Check that the head is now VALID and the node is not optimistic
|
||||
require.Equal(t, genesisRoot, service.ensureRootNotZeros(service.ForkChoicer().CachedHeadRoot()))
|
||||
require.Equal(t, genesisRoot, service.ensureRootNotZeros(service.cfg.ForkChoiceStore.CachedHeadRoot()))
|
||||
headRoot, err = service.HeadRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, root, bytesutil.ToBytes32(headRoot))
|
||||
@@ -2153,6 +2153,7 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestFillMissingBlockPayloadId_DiffSlotExitEarly(t *testing.T) {
|
||||
logHook := logTest.NewGlobal()
|
||||
fc := doublylinkedtree.New()
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
@@ -2164,7 +2165,8 @@ func TestFillMissingBlockPayloadId_DiffSlotExitEarly(t *testing.T) {
|
||||
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.fillMissingBlockPayloadId(ctx), 0)
|
||||
service.lateBlockTasks(ctx)
|
||||
require.LogsDoNotContain(t, logHook, "could not perform late block tasks")
|
||||
}
|
||||
|
||||
// Helper function to simulate the block being on time or delayed for proposer
|
||||
|
||||
@@ -100,17 +100,15 @@ func (s *Service) spawnProcessAttestationsRoutine(stateFeed *event.Feed) {
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
case <-pat.C():
|
||||
s.ForkChoicer().Lock()
|
||||
s.UpdateHead(s.ctx, s.CurrentSlot()+1)
|
||||
s.ForkChoicer().Unlock()
|
||||
case <-st.C():
|
||||
s.ForkChoicer().Lock()
|
||||
if err := s.ForkChoicer().NewSlot(s.ctx, s.CurrentSlot()); err != nil {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
if err := s.cfg.ForkChoiceStore.NewSlot(s.ctx, s.CurrentSlot()); err != nil {
|
||||
log.WithError(err).Error("could not process new slot")
|
||||
}
|
||||
s.cfg.ForkChoiceStore.Unlock()
|
||||
|
||||
s.UpdateHead(s.ctx, s.CurrentSlot())
|
||||
s.ForkChoicer().Unlock()
|
||||
}
|
||||
}
|
||||
}()
|
||||
@@ -120,7 +118,8 @@ func (s *Service) spawnProcessAttestationsRoutine(stateFeed *event.Feed) {
|
||||
// The caller of this function MUST hold a lock in forkchoice
|
||||
func (s *Service) UpdateHead(ctx context.Context, proposingSlot primitives.Slot) {
|
||||
start := time.Now()
|
||||
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
// This function is only called at 10 seconds or 0 seconds into the slot
|
||||
disparity := params.BeaconNetworkConfig().MaximumGossipClockDisparity
|
||||
if !features.Get().DisableReorgLateBlocks {
|
||||
@@ -134,6 +133,10 @@ func (s *Service) UpdateHead(ctx context.Context, proposingSlot primitives.Slot)
|
||||
newHeadRoot, err := s.cfg.ForkChoiceStore.Head(ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not compute head from new attestations")
|
||||
// Fallback to our current head root in the event of a failure.
|
||||
s.headLock.RLock()
|
||||
newHeadRoot = s.headRoot()
|
||||
s.headLock.RUnlock()
|
||||
}
|
||||
newAttHeadElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
|
||||
|
||||
|
||||
@@ -67,12 +67,12 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
}
|
||||
|
||||
// Reports on block and fork choice metrics.
|
||||
cp := s.ForkChoicer().FinalizedCheckpoint()
|
||||
cp := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
finalized := ðpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
|
||||
reportSlotMetrics(blockCopy.Block().Slot(), s.HeadSlot(), s.CurrentSlot(), finalized)
|
||||
|
||||
// Log block sync status.
|
||||
cp = s.ForkChoicer().JustifiedCheckpoint()
|
||||
cp = s.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
justified := ðpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
|
||||
if err := logBlockSyncStatus(blockCopy.Block(), blockRoot, justified, finalized, receivedTime, uint64(s.genesisTime.Unix())); err != nil {
|
||||
log.WithError(err).Error("Unable to log block sync status")
|
||||
@@ -123,7 +123,7 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []interfaces.Rea
|
||||
})
|
||||
|
||||
// Reports on blockCopy and fork choice metrics.
|
||||
cp := s.ForkChoicer().FinalizedCheckpoint()
|
||||
cp := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
finalized := ðpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
|
||||
reportSlotMetrics(blockCopy.Block().Slot(), s.HeadSlot(), s.CurrentSlot(), finalized)
|
||||
}
|
||||
@@ -131,7 +131,7 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []interfaces.Rea
|
||||
if err := s.cfg.BeaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
|
||||
return err
|
||||
}
|
||||
finalized := s.ForkChoicer().FinalizedCheckpoint()
|
||||
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
if finalized == nil {
|
||||
return errNilFinalizedInStore
|
||||
}
|
||||
@@ -152,8 +152,8 @@ func (s *Service) HasBlock(ctx context.Context, root [32]byte) bool {
|
||||
|
||||
// ReceiveAttesterSlashing receives an attester slashing and inserts it to forkchoice
|
||||
func (s *Service) ReceiveAttesterSlashing(ctx context.Context, slashing *ethpb.AttesterSlashing) {
|
||||
s.ForkChoicer().Lock()
|
||||
defer s.ForkChoicer().Unlock()
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
s.InsertSlashingsToForkChoiceStore(ctx, []*ethpb.AttesterSlashing{slashing})
|
||||
}
|
||||
|
||||
@@ -179,10 +179,14 @@ func (s *Service) prunePostBlockOperationPools(ctx context.Context, blk interfac
|
||||
return errors.Wrap(err, "could not process BLSToExecutionChanges")
|
||||
}
|
||||
|
||||
// Mark attester slashings as seen so we don't include same ones in future blocks.
|
||||
// Mark slashings as seen so we don't include same ones in future blocks.
|
||||
for _, as := range blk.Block().Body().AttesterSlashings() {
|
||||
s.cfg.SlashingPool.MarkIncludedAttesterSlashing(as)
|
||||
}
|
||||
for _, ps := range blk.Block().Body().ProposerSlashings() {
|
||||
s.cfg.SlashingPool.MarkIncludedProposerSlashing(ps)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -207,7 +211,7 @@ func (s *Service) checkSaveHotStateDB(ctx context.Context) error {
|
||||
currentEpoch := slots.ToEpoch(s.CurrentSlot())
|
||||
// Prevent `sinceFinality` going underflow.
|
||||
var sinceFinality primitives.Epoch
|
||||
finalized := s.ForkChoicer().FinalizedCheckpoint()
|
||||
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
if finalized == nil {
|
||||
return errNilFinalizedInStore
|
||||
}
|
||||
|
||||
@@ -23,6 +23,8 @@ import (
|
||||
mockExecution "github.com/prysmaticlabs/prysm/v4/beacon-chain/execution/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/slashings"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
|
||||
@@ -131,6 +133,8 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
WithDepositCache(depositCache),
|
||||
WithChainStartFetcher(web3Service),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
WithSlashingPool(slashings.NewPool()),
|
||||
WithExitPool(voluntaryexits.NewPool()),
|
||||
WithP2PBroadcaster(&mockBroadcaster{}),
|
||||
WithStateNotifier(&mockBeaconNode{}),
|
||||
WithForkChoiceStore(fc),
|
||||
|
||||
@@ -27,6 +27,7 @@ go_library(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@@ -70,13 +71,8 @@ type ChainService struct {
|
||||
OptimisticRoots map[[32]byte]bool
|
||||
}
|
||||
|
||||
// ForkChoicer mocks the same method in the chain service
|
||||
func (s *ChainService) ForkChoicer() forkchoice.ForkChoicer {
|
||||
return s.ForkChoiceStore
|
||||
}
|
||||
|
||||
func (s *ChainService) Ancestor(ctx context.Context, root []byte, slot primitives.Slot) ([]byte, error) {
|
||||
r, err := s.ForkChoicer().AncestorRoot(ctx, bytesutil.ToBytes32(root), slot)
|
||||
r, err := s.ForkChoiceStore.AncestorRoot(ctx, bytesutil.ToBytes32(root), slot)
|
||||
return r[:], err
|
||||
}
|
||||
|
||||
@@ -473,7 +469,7 @@ func (s *ChainService) UpdateHead(ctx context.Context, slot primitives.Slot) {
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("could not update head")
|
||||
}
|
||||
err = s.ForkChoicer().InsertNode(ctx, st, root)
|
||||
err = s.ForkChoiceStore.InsertNode(ctx, st, root)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("could not insert node to forkchoice")
|
||||
}
|
||||
@@ -520,3 +516,84 @@ func prepareForkchoiceState(
|
||||
st, err := state_native.InitializeFromProtoBellatrix(base)
|
||||
return st, blockRoot, err
|
||||
}
|
||||
|
||||
// CachedHeadRoot mocks the same method in the chain service
|
||||
func (s *ChainService) CachedHeadRoot() [32]byte {
|
||||
if s.ForkChoiceStore != nil {
|
||||
return s.ForkChoiceStore.CachedHeadRoot()
|
||||
}
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
// GetProposerHead mocks the same method in the chain service
|
||||
func (s *ChainService) GetProposerHead() [32]byte {
|
||||
if s.ForkChoiceStore != nil {
|
||||
return s.ForkChoiceStore.GetProposerHead()
|
||||
}
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
// SetForkchoiceGenesisTime mocks the same method in the chain service
|
||||
func (s *ChainService) SetForkChoiceGenesisTime(timestamp uint64) {
|
||||
if s.ForkChoiceStore != nil {
|
||||
s.ForkChoiceStore.SetGenesisTime(timestamp)
|
||||
}
|
||||
}
|
||||
|
||||
// ReceivedBlocksLastEpoch mocks the same method in the chain service
|
||||
func (s *ChainService) ReceivedBlocksLastEpoch() (uint64, error) {
|
||||
if s.ForkChoiceStore != nil {
|
||||
return s.ForkChoiceStore.ReceivedBlocksLastEpoch()
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// HighestReceivedBlockSlot mocks the same method in the chain service
|
||||
func (s *ChainService) HighestReceivedBlockSlot() primitives.Slot {
|
||||
if s.ForkChoiceStore != nil {
|
||||
return s.ForkChoiceStore.HighestReceivedBlockSlot()
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// InsertNode mocks the same method in the chain service
|
||||
func (s *ChainService) InsertNode(ctx context.Context, st state.BeaconState, root [32]byte) error {
|
||||
if s.ForkChoiceStore != nil {
|
||||
return s.ForkChoiceStore.InsertNode(ctx, st, root)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ForkChoiceDump mocks the same method in the chain service
|
||||
func (s *ChainService) ForkChoiceDump(ctx context.Context) (*ethpbv1.ForkChoiceDump, error) {
|
||||
if s.ForkChoiceStore != nil {
|
||||
return s.ForkChoiceStore.ForkChoiceDump(ctx)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// NewSlot mocks the same method in the chain service
|
||||
func (s *ChainService) NewSlot(ctx context.Context, slot primitives.Slot) error {
|
||||
if s.ForkChoiceStore != nil {
|
||||
return s.ForkChoiceStore.NewSlot(ctx, slot)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProposerBoost mocks the same method in the chain service
|
||||
func (s *ChainService) ProposerBoost() [32]byte {
|
||||
if s.ForkChoiceStore != nil {
|
||||
return s.ForkChoiceStore.ProposerBoost()
|
||||
}
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
// FinalizedBlockHash mocks the same method in the chain service
|
||||
func (s *ChainService) FinalizedBlockHash() [32]byte {
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
// UnrealizedJustifiedPayloadBlockHash mocks the same method in the chain service
|
||||
func (s *ChainService) UnrealizedJustifiedPayloadBlockHash() ([32]byte, error) {
|
||||
return [32]byte{}, nil
|
||||
}
|
||||
|
||||
@@ -78,7 +78,7 @@ func TestService_VerifyWeakSubjectivityRoot(t *testing.T) {
|
||||
wsVerifier: wv,
|
||||
}
|
||||
require.NoError(t, fcs.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: tt.finalizedEpoch}))
|
||||
cp := s.ForkChoicer().FinalizedCheckpoint()
|
||||
cp := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
err = s.wsVerifier.VerifyWeakSubjectivity(context.Background(), cp.Epoch)
|
||||
if tt.wantErr == nil {
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -265,7 +265,7 @@ func AttestationsDelta(beaconState state.BeaconState, bal *precompute.Balance, v
|
||||
finalizedEpoch := beaconState.FinalizedCheckpointEpoch()
|
||||
increment := cfg.EffectiveBalanceIncrement
|
||||
factor := cfg.BaseRewardFactor
|
||||
baseRewardMultiplier := increment * factor / math.IntegerSquareRoot(bal.ActiveCurrentEpoch)
|
||||
baseRewardMultiplier := increment * factor / math.CachedSquareRoot(bal.ActiveCurrentEpoch)
|
||||
leak := helpers.IsInInactivityLeak(prevEpoch, finalizedEpoch)
|
||||
|
||||
// Modified in Altair and Bellatrix.
|
||||
|
||||
@@ -58,5 +58,5 @@ func BaseRewardPerIncrement(activeBalance uint64) (uint64, error) {
|
||||
return 0, errors.New("active balance can't be 0")
|
||||
}
|
||||
cfg := params.BeaconConfig()
|
||||
return cfg.EffectiveBalanceIncrement * cfg.BaseRewardFactor / math.IntegerSquareRoot(activeBalance), nil
|
||||
return cfg.EffectiveBalanceIncrement * cfg.BaseRewardFactor / math.CachedSquareRoot(activeBalance), nil
|
||||
}
|
||||
|
||||
@@ -72,7 +72,7 @@ func AttestationsDelta(state state.ReadOnlyBeaconState, pBal *Balance, vp []*Val
|
||||
prevEpoch := time.PrevEpoch(state)
|
||||
finalizedEpoch := state.FinalizedCheckpointEpoch()
|
||||
|
||||
sqrtActiveCurrentEpoch := math.IntegerSquareRoot(pBal.ActiveCurrentEpoch)
|
||||
sqrtActiveCurrentEpoch := math.CachedSquareRoot(pBal.ActiveCurrentEpoch)
|
||||
for i, v := range vp {
|
||||
rewards[i], penalties[i] = attestationDelta(pBal, sqrtActiveCurrentEpoch, v, prevEpoch, finalizedEpoch)
|
||||
}
|
||||
@@ -161,7 +161,7 @@ func ProposersDelta(state state.ReadOnlyBeaconState, pBal *Balance, vp []*Valida
|
||||
rewards := make([]uint64, numofVals)
|
||||
|
||||
totalBalance := pBal.ActiveCurrentEpoch
|
||||
balanceSqrt := math.IntegerSquareRoot(totalBalance)
|
||||
balanceSqrt := math.CachedSquareRoot(totalBalance)
|
||||
// Balance square root cannot be 0, this prevents division by 0.
|
||||
if balanceSqrt == 0 {
|
||||
balanceSqrt = 1
|
||||
|
||||
@@ -5,15 +5,20 @@ import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
types "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
)
|
||||
|
||||
type nextSlotCache struct {
|
||||
sync.RWMutex
|
||||
root []byte
|
||||
state state.BeaconState
|
||||
sync.Mutex
|
||||
prevRoot []byte
|
||||
lastRoot []byte
|
||||
prevState state.BeaconState
|
||||
lastState state.BeaconState
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -29,19 +34,22 @@ var (
|
||||
})
|
||||
)
|
||||
|
||||
// NextSlotState returns the saved state if the input root matches the root in `nextSlotCache`. Returns nil otherwise.
|
||||
// This is useful to check before processing slots. With a cache hit, it will return last processed state with slot plus
|
||||
// one advancement.
|
||||
func NextSlotState(_ context.Context, root []byte) (state.BeaconState, error) {
|
||||
nsc.RLock()
|
||||
defer nsc.RUnlock()
|
||||
if !bytes.Equal(root, nsc.root) || bytes.Equal(root, []byte{}) {
|
||||
nextSlotCacheMiss.Inc()
|
||||
return nil, nil
|
||||
// NextSlotState returns the saved state for the given blockroot.
|
||||
// It returns the last updated state if it matches. Otherwise it returns the previously
|
||||
// updated state if it matches its root. If no root matches it returns nil
|
||||
func NextSlotState(root []byte, wantedSlot types.Slot) state.BeaconState {
|
||||
nsc.Lock()
|
||||
defer nsc.Unlock()
|
||||
if bytes.Equal(root, nsc.lastRoot) && nsc.lastState.Slot() <= wantedSlot {
|
||||
nextSlotCacheHit.Inc()
|
||||
return nsc.lastState.Copy()
|
||||
}
|
||||
nextSlotCacheHit.Inc()
|
||||
// Returning copied state.
|
||||
return nsc.state.Copy(), nil
|
||||
if bytes.Equal(root, nsc.prevRoot) && nsc.prevState.Slot() <= wantedSlot {
|
||||
nextSlotCacheHit.Inc()
|
||||
return nsc.prevState.Copy()
|
||||
}
|
||||
nextSlotCacheMiss.Inc()
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateNextSlotCache updates the `nextSlotCache`. It saves the input state after advancing the state slot by 1
|
||||
@@ -52,13 +60,25 @@ func UpdateNextSlotCache(ctx context.Context, root []byte, state state.BeaconSta
|
||||
copied := state.Copy()
|
||||
copied, err := ProcessSlots(ctx, copied, copied.Slot()+1)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "could not process slots")
|
||||
}
|
||||
|
||||
nsc.Lock()
|
||||
defer nsc.Unlock()
|
||||
|
||||
nsc.root = root
|
||||
nsc.state = copied
|
||||
nsc.prevRoot = nsc.lastRoot
|
||||
nsc.prevState = nsc.lastState
|
||||
nsc.lastRoot = bytesutil.SafeCopyBytes(root)
|
||||
nsc.lastState = copied
|
||||
return nil
|
||||
}
|
||||
|
||||
// LastCachedState returns the last cached state and root in the cache
|
||||
func LastCachedState() ([]byte, state.BeaconState) {
|
||||
nsc.Lock()
|
||||
defer nsc.Unlock()
|
||||
if nsc.lastState == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return bytesutil.SafeCopyBytes(nsc.lastRoot), nsc.lastState.Copy()
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||
)
|
||||
@@ -13,18 +14,36 @@ import (
|
||||
func TestTrailingSlotState_RoundTrip(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := []byte{'a'}
|
||||
s, err := transition.NextSlotState(ctx, r)
|
||||
require.NoError(t, err)
|
||||
s := transition.NextSlotState(r, 0)
|
||||
require.Equal(t, nil, s)
|
||||
|
||||
s, _ = util.DeterministicGenesisState(t, 1)
|
||||
require.NoError(t, transition.UpdateNextSlotCache(ctx, r, s))
|
||||
s, err = transition.NextSlotState(ctx, r)
|
||||
require.NoError(t, err)
|
||||
s = transition.NextSlotState(r, 1)
|
||||
require.Equal(t, primitives.Slot(1), s.Slot())
|
||||
|
||||
lastRoot, lastState := transition.LastCachedState()
|
||||
require.DeepEqual(t, r, lastRoot)
|
||||
require.Equal(t, s.Slot(), lastState.Slot())
|
||||
|
||||
require.NoError(t, transition.UpdateNextSlotCache(ctx, r, s))
|
||||
s, err = transition.NextSlotState(ctx, r)
|
||||
require.NoError(t, err)
|
||||
s = transition.NextSlotState(r, 2)
|
||||
require.Equal(t, primitives.Slot(2), s.Slot())
|
||||
|
||||
lastRoot, lastState = transition.LastCachedState()
|
||||
require.DeepEqual(t, r, lastRoot)
|
||||
require.Equal(t, s.Slot(), lastState.Slot())
|
||||
}
|
||||
|
||||
func TestTrailingSlotState_StateAdvancedBeyondRequest(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := []byte{'a'}
|
||||
s := transition.NextSlotState(r, 0)
|
||||
require.Equal(t, nil, s)
|
||||
|
||||
s, _ = util.DeterministicGenesisState(t, 1)
|
||||
assert.NoError(t, s.SetSlot(2))
|
||||
require.NoError(t, transition.UpdateNextSlotCache(ctx, r, s))
|
||||
s = transition.NextSlotState(r, 1)
|
||||
require.Equal(t, nil, s)
|
||||
}
|
||||
|
||||
@@ -147,25 +147,15 @@ func ProcessSlotsUsingNextSlotCache(
|
||||
ctx, span := trace.StartSpan(ctx, "core.state.ProcessSlotsUsingNextSlotCache")
|
||||
defer span.End()
|
||||
|
||||
// Check whether the parent state has been advanced by 1 slot in next slot cache.
|
||||
nextSlotState, err := NextSlotState(ctx, parentRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cachedStateExists := nextSlotState != nil && !nextSlotState.IsNil()
|
||||
// If the next slot state is not nil (i.e. cache hit).
|
||||
// We replace next slot state with parent state.
|
||||
if cachedStateExists {
|
||||
nextSlotState := NextSlotState(parentRoot, slot)
|
||||
if nextSlotState != nil {
|
||||
parentState = nextSlotState
|
||||
}
|
||||
|
||||
// In the event our cached state has advanced our
|
||||
// state to the desired slot, we exit early.
|
||||
if cachedStateExists && parentState.Slot() == slot {
|
||||
if parentState.Slot() == slot {
|
||||
return parentState, nil
|
||||
}
|
||||
// Since next slot cache only advances state by 1 slot,
|
||||
// we check if there's more slots that need to process.
|
||||
|
||||
var err error
|
||||
parentState, err = ProcessSlots(ctx, parentState, slot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process slots")
|
||||
|
||||
@@ -405,8 +405,8 @@ func TestProcessETH2GenesisLog_CorrectNumOfDeposits(t *testing.T) {
|
||||
web3Service.rpcClient = &mockExecution.RPCClient{Backend: testAcc.Backend}
|
||||
web3Service.httpLogger = testAcc.Backend
|
||||
web3Service.latestEth1Data.LastRequestedBlock = 0
|
||||
web3Service.latestEth1Data.BlockHeight = testAcc.Backend.Blockchain().CurrentBlock().NumberU64()
|
||||
web3Service.latestEth1Data.BlockTime = testAcc.Backend.Blockchain().CurrentBlock().Time()
|
||||
web3Service.latestEth1Data.BlockHeight = testAcc.Backend.Blockchain().CurrentBlock().Number.Uint64()
|
||||
web3Service.latestEth1Data.BlockTime = testAcc.Backend.Blockchain().CurrentBlock().Time
|
||||
bConfig := params.MinimalSpecConfig().Copy()
|
||||
bConfig.MinGenesisTime = 0
|
||||
bConfig.SecondsPerETH1Block = 10
|
||||
@@ -444,8 +444,8 @@ func TestProcessETH2GenesisLog_CorrectNumOfDeposits(t *testing.T) {
|
||||
for i := uint64(0); i < params.BeaconConfig().Eth1FollowDistance; i++ {
|
||||
testAcc.Backend.Commit()
|
||||
}
|
||||
web3Service.latestEth1Data.BlockHeight = testAcc.Backend.Blockchain().CurrentBlock().NumberU64()
|
||||
web3Service.latestEth1Data.BlockTime = testAcc.Backend.Blockchain().CurrentBlock().Time()
|
||||
web3Service.latestEth1Data.BlockHeight = testAcc.Backend.Blockchain().CurrentBlock().Number.Uint64()
|
||||
web3Service.latestEth1Data.BlockTime = testAcc.Backend.Blockchain().CurrentBlock().Time
|
||||
|
||||
// Set up our subscriber now to listen for the chain started event.
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
@@ -502,8 +502,8 @@ func TestProcessETH2GenesisLog_LargePeriodOfNoLogs(t *testing.T) {
|
||||
web3Service.rpcClient = &mockExecution.RPCClient{Backend: testAcc.Backend}
|
||||
web3Service.httpLogger = testAcc.Backend
|
||||
web3Service.latestEth1Data.LastRequestedBlock = 0
|
||||
web3Service.latestEth1Data.BlockHeight = testAcc.Backend.Blockchain().CurrentBlock().NumberU64()
|
||||
web3Service.latestEth1Data.BlockTime = testAcc.Backend.Blockchain().CurrentBlock().Time()
|
||||
web3Service.latestEth1Data.BlockHeight = testAcc.Backend.Blockchain().CurrentBlock().Number.Uint64()
|
||||
web3Service.latestEth1Data.BlockTime = testAcc.Backend.Blockchain().CurrentBlock().Time
|
||||
bConfig := params.MinimalSpecConfig().Copy()
|
||||
bConfig.SecondsPerETH1Block = 10
|
||||
params.OverrideBeaconConfig(bConfig)
|
||||
@@ -540,14 +540,14 @@ func TestProcessETH2GenesisLog_LargePeriodOfNoLogs(t *testing.T) {
|
||||
for i := uint64(0); i < 1500; i++ {
|
||||
testAcc.Backend.Commit()
|
||||
}
|
||||
wantedGenesisTime := testAcc.Backend.Blockchain().CurrentBlock().Time()
|
||||
wantedGenesisTime := testAcc.Backend.Blockchain().CurrentBlock().Time
|
||||
|
||||
// Forward the chain to account for the follow distance
|
||||
for i := uint64(0); i < params.BeaconConfig().Eth1FollowDistance; i++ {
|
||||
testAcc.Backend.Commit()
|
||||
}
|
||||
web3Service.latestEth1Data.BlockHeight = testAcc.Backend.Blockchain().CurrentBlock().NumberU64()
|
||||
web3Service.latestEth1Data.BlockTime = testAcc.Backend.Blockchain().CurrentBlock().Time()
|
||||
web3Service.latestEth1Data.BlockHeight = testAcc.Backend.Blockchain().CurrentBlock().Number.Uint64()
|
||||
web3Service.latestEth1Data.BlockTime = testAcc.Backend.Blockchain().CurrentBlock().Time
|
||||
|
||||
// Set the genesis time 500 blocks ahead of the last
|
||||
// deposit log.
|
||||
|
||||
@@ -210,14 +210,14 @@ func TestFollowBlock_OK(t *testing.T) {
|
||||
|
||||
web3Service = setDefaultMocks(web3Service)
|
||||
web3Service.rpcClient = &mockExecution.RPCClient{Backend: testAcc.Backend}
|
||||
baseHeight := testAcc.Backend.Blockchain().CurrentBlock().NumberU64()
|
||||
baseHeight := testAcc.Backend.Blockchain().CurrentBlock().Number.Uint64()
|
||||
// process follow_distance blocks
|
||||
for i := 0; i < int(params.BeaconConfig().Eth1FollowDistance); i++ {
|
||||
testAcc.Backend.Commit()
|
||||
}
|
||||
// set current height
|
||||
web3Service.latestEth1Data.BlockHeight = testAcc.Backend.Blockchain().CurrentBlock().NumberU64()
|
||||
web3Service.latestEth1Data.BlockTime = testAcc.Backend.Blockchain().CurrentBlock().Time()
|
||||
web3Service.latestEth1Data.BlockHeight = testAcc.Backend.Blockchain().CurrentBlock().Number.Uint64()
|
||||
web3Service.latestEth1Data.BlockTime = testAcc.Backend.Blockchain().CurrentBlock().Time
|
||||
|
||||
h, err := web3Service.followedBlockHeight(context.Background())
|
||||
require.NoError(t, err)
|
||||
@@ -229,8 +229,8 @@ func TestFollowBlock_OK(t *testing.T) {
|
||||
testAcc.Backend.Commit()
|
||||
}
|
||||
// set current height
|
||||
web3Service.latestEth1Data.BlockHeight = testAcc.Backend.Blockchain().CurrentBlock().NumberU64()
|
||||
web3Service.latestEth1Data.BlockTime = testAcc.Backend.Blockchain().CurrentBlock().Time()
|
||||
web3Service.latestEth1Data.BlockHeight = testAcc.Backend.Blockchain().CurrentBlock().Number.Uint64()
|
||||
web3Service.latestEth1Data.BlockTime = testAcc.Backend.Blockchain().CurrentBlock().Time
|
||||
|
||||
h, err = web3Service.followedBlockHeight(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -508,6 +508,17 @@ func (f *ForkChoice) JustifiedPayloadBlockHash() [32]byte {
|
||||
return node.payloadHash
|
||||
}
|
||||
|
||||
// UnrealizedJustifiedPayloadBlockHash returns the hash of the payload at the unrealized justified checkpoint
|
||||
func (f *ForkChoice) UnrealizedJustifiedPayloadBlockHash() ([32]byte, error) {
|
||||
root := f.store.unrealizedJustifiedCheckpoint.Root
|
||||
node, ok := f.store.nodeByRoot[root]
|
||||
if !ok || node == nil {
|
||||
// This should not happen
|
||||
return [32]byte{}, ErrNilNode
|
||||
}
|
||||
return node.payloadHash, nil
|
||||
}
|
||||
|
||||
// ForkChoiceDump returns a full dump of forkchoice.
|
||||
func (f *ForkChoice) ForkChoiceDump(ctx context.Context) (*v1.ForkChoiceDump, error) {
|
||||
jc := &v1.Checkpoint{
|
||||
|
||||
@@ -740,5 +740,22 @@ func TestForkchoice_UpdateJustifiedBalances(t *testing.T) {
|
||||
require.Equal(t, uint64(7), f.numActiveValidators)
|
||||
require.Equal(t, uint64(430)/32, f.store.committeeWeight)
|
||||
require.DeepEqual(t, balances, f.justifiedBalances)
|
||||
|
||||
}
|
||||
|
||||
func TestForkChoice_UnrealizedJustifiedPayloadBlockHash(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(0, 0)
|
||||
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
|
||||
f.store.unrealizedJustifiedCheckpoint.Root = [32]byte{'a'}
|
||||
got, err := f.UnrealizedJustifiedPayloadBlockHash()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte{'A'}, got)
|
||||
|
||||
f.store.unrealizedJustifiedCheckpoint.Root = [32]byte{'b'}
|
||||
_, err = f.UnrealizedJustifiedPayloadBlockHash()
|
||||
require.ErrorIs(t, err, ErrNilNode)
|
||||
}
|
||||
|
||||
@@ -57,6 +57,7 @@ type Getter interface {
|
||||
JustifiedCheckpoint() *forkchoicetypes.Checkpoint
|
||||
PreviousJustifiedCheckpoint() *forkchoicetypes.Checkpoint
|
||||
JustifiedPayloadBlockHash() [32]byte
|
||||
UnrealizedJustifiedPayloadBlockHash() ([32]byte, error)
|
||||
NodeCount() int
|
||||
HighestReceivedBlockSlot() primitives.Slot
|
||||
ReceivedBlocksLastEpoch() (uint64, error)
|
||||
|
||||
@@ -61,8 +61,10 @@ go_library(
|
||||
"//runtime/prereqs:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_gorilla_mux//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
],
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
fastssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/prysmaticlabs/prysm/v4/cmd"
|
||||
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
@@ -66,6 +67,13 @@ func configureBuilderCircuitBreaker(cliCtx *cli.Context) error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if cliCtx.IsSet(flags.LocalBlockValueBoost.Name) {
|
||||
c := params.BeaconConfig().Copy()
|
||||
c.LocalBlockValueBoost = cliCtx.Uint64(flags.LocalBlockValueBoost.Name)
|
||||
if err := params.SetActive(c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -189,3 +197,7 @@ func configureExecutionSetting(cliCtx *cli.Context) error {
|
||||
" Default fee recipient will be used as a fall back", checksumAddress.Hex())
|
||||
return params.SetActive(c)
|
||||
}
|
||||
|
||||
func configureFastSSZHashingAlgorithm() {
|
||||
fastssz.EnableVectorizedHTR = true
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"syscall"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/pkg/errors"
|
||||
apigateway "github.com/prysmaticlabs/prysm/v4/api/gateway"
|
||||
"github.com/prysmaticlabs/prysm/v4/async/event"
|
||||
@@ -148,6 +149,7 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
|
||||
if err := configureExecutionSetting(cliCtx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
configureFastSSZHashingAlgorithm()
|
||||
|
||||
// Initializes any forks here.
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
@@ -252,12 +254,13 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
|
||||
}
|
||||
|
||||
log.Debugln("Registering RPC Service")
|
||||
if err := beacon.registerRPCService(); err != nil {
|
||||
router := mux.NewRouter()
|
||||
if err := beacon.registerRPCService(router); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugln("Registering GRPC Gateway Service")
|
||||
if err := beacon.registerGRPCGateway(); err != nil {
|
||||
if err := beacon.registerGRPCGateway(router); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -535,6 +538,7 @@ func (b *BeaconNode) registerP2P(cliCtx *cli.Context) error {
|
||||
HostAddress: cliCtx.String(cmd.P2PHost.Name),
|
||||
HostDNS: cliCtx.String(cmd.P2PHostDNS.Name),
|
||||
PrivateKey: cliCtx.String(cmd.P2PPrivKey.Name),
|
||||
StaticPeerID: cliCtx.Bool(cmd.P2PStaticID.Name),
|
||||
MetaDataDir: cliCtx.String(cmd.P2PMetadata.Name),
|
||||
TCPPort: cliCtx.Uint(cmd.P2PTCPPort.Name),
|
||||
UDPPort: cliCtx.Uint(cmd.P2PUDPPort.Name),
|
||||
@@ -733,7 +737,7 @@ func (b *BeaconNode) registerSlasherService() error {
|
||||
return b.services.RegisterService(slasherSrv)
|
||||
}
|
||||
|
||||
func (b *BeaconNode) registerRPCService() error {
|
||||
func (b *BeaconNode) registerRPCService(router *mux.Router) error {
|
||||
var chainService *blockchain.Service
|
||||
if err := b.services.FetchService(&chainService); err != nil {
|
||||
return err
|
||||
@@ -798,10 +802,10 @@ func (b *BeaconNode) registerRPCService() error {
|
||||
PeerManager: p2pService,
|
||||
MetadataProvider: p2pService,
|
||||
ChainInfoFetcher: chainService,
|
||||
HeadUpdater: chainService,
|
||||
HeadFetcher: chainService,
|
||||
CanonicalFetcher: chainService,
|
||||
ForkFetcher: chainService,
|
||||
ForkchoiceFetcher: chainService,
|
||||
FinalizationFetcher: chainService,
|
||||
BlockReceiver: chainService,
|
||||
AttestationReceiver: chainService,
|
||||
@@ -829,6 +833,7 @@ func (b *BeaconNode) registerRPCService() error {
|
||||
MaxMsgSize: maxMsgSize,
|
||||
ProposerIdsCache: b.proposerIdsCache,
|
||||
BlockBuilder: b.fetchBuilderService(),
|
||||
Router: router,
|
||||
})
|
||||
|
||||
return b.services.RegisterService(rpcService)
|
||||
@@ -857,7 +862,7 @@ func (b *BeaconNode) registerPrometheusService(_ *cli.Context) error {
|
||||
return b.services.RegisterService(service)
|
||||
}
|
||||
|
||||
func (b *BeaconNode) registerGRPCGateway() error {
|
||||
func (b *BeaconNode) registerGRPCGateway(router *mux.Router) error {
|
||||
if b.cliCtx.Bool(flags.DisableGRPCGateway.Name) {
|
||||
return nil
|
||||
}
|
||||
@@ -883,6 +888,7 @@ func (b *BeaconNode) registerGRPCGateway() error {
|
||||
}
|
||||
|
||||
opts := []apigateway.Option{
|
||||
apigateway.WithRouter(router),
|
||||
apigateway.WithGatewayAddr(gatewayAddress),
|
||||
apigateway.WithRemoteAddr(selfAddress),
|
||||
apigateway.WithPbHandlers(muxs),
|
||||
|
||||
@@ -24,6 +24,7 @@ go_library(
|
||||
"options.go",
|
||||
"pubsub.go",
|
||||
"pubsub_filter.go",
|
||||
"pubsub_tracer.go",
|
||||
"rpc_topic_mappings.go",
|
||||
"sender.go",
|
||||
"service.go",
|
||||
@@ -84,6 +85,7 @@ go_library(
|
||||
"@com_github_libp2p_go_libp2p//core/host:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/network:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/peerstore:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/protocol:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//p2p/muxer/mplex:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//p2p/security/noise:go_default_library",
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
type Config struct {
|
||||
NoDiscovery bool
|
||||
EnableUPnP bool
|
||||
StaticPeerID bool
|
||||
StaticPeers []string
|
||||
BootstrapNodeAddr []string
|
||||
Discv5BootStrapAddr []string
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/peerstore"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
@@ -63,6 +64,80 @@ var (
|
||||
Name: "p2p_sync_committee_subnet_attempted_broadcasts",
|
||||
Help: "The number of sync committee that were attempted to be broadcast.",
|
||||
})
|
||||
|
||||
// Gossip Tracer Metrics
|
||||
pubsubTopicsActive = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "p2p_pubsub_topic_active",
|
||||
Help: "The topics that the peer is participating in gossipsub.",
|
||||
},
|
||||
[]string{"topic"})
|
||||
pubsubTopicsGraft = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "p2p_pubsub_graft_total",
|
||||
Help: "The number of graft messages sent for a particular topic",
|
||||
},
|
||||
[]string{"topic"})
|
||||
pubsubTopicsPrune = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "p2p_pubsub_prune_total",
|
||||
Help: "The number of prune messages sent for a particular topic",
|
||||
},
|
||||
[]string{"topic"})
|
||||
pubsubMessageDeliver = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "p2p_pubsub_deliver_total",
|
||||
Help: "The number of messages received for delivery of a particular topic",
|
||||
},
|
||||
[]string{"topic"})
|
||||
pubsubMessageUndeliverable = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "p2p_pubsub_undeliverable_total",
|
||||
Help: "The number of messages received which weren't able to be delivered of a particular topic",
|
||||
},
|
||||
[]string{"topic"})
|
||||
pubsubMessageValidate = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "p2p_pubsub_validate_total",
|
||||
Help: "The number of messages received for validation of a particular topic",
|
||||
},
|
||||
[]string{"topic"})
|
||||
pubsubMessageDuplicate = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "p2p_pubsub_duplicate_total",
|
||||
Help: "The number of duplicate messages sent for a particular topic",
|
||||
},
|
||||
[]string{"topic"})
|
||||
pubsubMessageReject = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "p2p_pubsub_reject_total",
|
||||
Help: "The number of messages rejected of a particular topic",
|
||||
},
|
||||
[]string{"topic"})
|
||||
pubsubPeerThrottle = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "p2p_pubsub_throttle_total",
|
||||
Help: "The number of times a peer has been throttled for a particular topic",
|
||||
},
|
||||
[]string{"topic"})
|
||||
pubsubRPCRecv = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "p2p_pubsub_rpc_recv_total",
|
||||
Help: "The number of messages received via rpc for a particular topic",
|
||||
},
|
||||
[]string{"control_message"})
|
||||
pubsubRPCSubRecv = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "p2p_pubsub_rpc_recv_sub_total",
|
||||
Help: "The number of subscription messages received via rpc",
|
||||
})
|
||||
pubsubRPCDrop = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "p2p_pubsub_rpc_drop_total",
|
||||
Help: "The number of messages dropped via rpc for a particular topic",
|
||||
},
|
||||
[]string{"control_message"})
|
||||
pubsubRPCSubDrop = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "p2p_pubsub_rpc_drop_sub_total",
|
||||
Help: "The number of subscription messages dropped via rpc",
|
||||
})
|
||||
pubsubRPCSent = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "p2p_pubsub_rpc_sent_total",
|
||||
Help: "The number of messages sent via rpc for a particular topic",
|
||||
},
|
||||
[]string{"control_message"})
|
||||
pubsubRPCSubSent = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "p2p_pubsub_rpc_sent_sub_total",
|
||||
Help: "The number of subscription messages sent via rpc",
|
||||
})
|
||||
)
|
||||
|
||||
func (s *Service) updateMetrics() {
|
||||
@@ -84,20 +159,7 @@ func (s *Service) updateMetrics() {
|
||||
continue
|
||||
}
|
||||
|
||||
// Get the agent data.
|
||||
rawAgent, err := store.Get(pid, "AgentVersion")
|
||||
agent, ok := rawAgent.(string)
|
||||
if err != nil || !ok {
|
||||
agent = "unknown"
|
||||
}
|
||||
foundName := "unknown"
|
||||
for _, knownAgent := range knownAgentVersions {
|
||||
// If the agent string matches one of our known agents, we set
|
||||
// the value to our own, sanitized string.
|
||||
if strings.Contains(strings.ToLower(agent), knownAgent) {
|
||||
foundName = knownAgent
|
||||
}
|
||||
}
|
||||
foundName := agentFromPid(pid, store)
|
||||
numConnectedPeersByClient[foundName] += 1
|
||||
|
||||
// Get peer scoring data.
|
||||
@@ -123,3 +185,21 @@ func average(xs []float64) float64 {
|
||||
}
|
||||
return total / float64(len(xs))
|
||||
}
|
||||
|
||||
func agentFromPid(pid peer.ID, store peerstore.Peerstore) string {
|
||||
// Get the agent data.
|
||||
rawAgent, err := store.Get(pid, "AgentVersion")
|
||||
agent, ok := rawAgent.(string)
|
||||
if err != nil || !ok {
|
||||
return "unknown"
|
||||
}
|
||||
foundName := "unknown"
|
||||
for _, knownAgent := range knownAgentVersions {
|
||||
// If the agent string matches one of our known agents, we set
|
||||
// the value to our own, sanitized string.
|
||||
if strings.Contains(strings.ToLower(agent), knownAgent) {
|
||||
foundName = knownAgent
|
||||
}
|
||||
}
|
||||
return foundName
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"encoding/hex"
|
||||
"net"
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
gethCrypto "github.com/ethereum/go-ethereum/crypto"
|
||||
@@ -50,6 +51,32 @@ func TestPrivateKeyLoading(t *testing.T) {
|
||||
assert.DeepEqual(t, rawBytes, newRaw, "Private keys do not match")
|
||||
}
|
||||
|
||||
func TestPrivateKeyLoading_StaticPrivateKey(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
tempDir := t.TempDir()
|
||||
|
||||
cfg := &Config{
|
||||
StaticPeerID: true,
|
||||
DataDir: tempDir,
|
||||
}
|
||||
pKey, err := privKey(cfg)
|
||||
require.NoError(t, err, "Could not apply option")
|
||||
|
||||
newPkey, err := ecdsaprysm.ConvertToInterfacePrivkey(pKey)
|
||||
require.NoError(t, err)
|
||||
|
||||
retrievedKey, err := privKeyFromFile(path.Join(tempDir, keyPath))
|
||||
require.NoError(t, err)
|
||||
retrievedPKey, err := ecdsaprysm.ConvertToInterfacePrivkey(retrievedKey)
|
||||
require.NoError(t, err)
|
||||
|
||||
rawBytes, err := retrievedPKey.Raw()
|
||||
require.NoError(t, err)
|
||||
newRaw, err := newPkey.Raw()
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, rawBytes, newRaw, "Private keys do not match")
|
||||
}
|
||||
|
||||
func TestIPV6Support(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
key, err := gethCrypto.GenerateKey()
|
||||
|
||||
@@ -145,6 +145,7 @@ func (s *Service) pubsubOptions() []pubsub.Option {
|
||||
pubsub.WithPeerScore(peerScoringParams()),
|
||||
pubsub.WithPeerScoreInspect(s.peerInspector, time.Minute),
|
||||
pubsub.WithGossipSubParams(pubsubGossipParam()),
|
||||
pubsub.WithRawTracer(gossipTracer{host: s.host}),
|
||||
}
|
||||
return psOpts
|
||||
}
|
||||
|
||||
103
beacon-chain/p2p/pubsub_tracer.go
Normal file
103
beacon-chain/p2p/pubsub_tracer.go
Normal file
@@ -0,0 +1,103 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
var _ = pubsub.RawTracer(gossipTracer{})
|
||||
|
||||
// This tracer is used to implement metrics collection for messages received
|
||||
// and broadcasted through gossipsub.
|
||||
type gossipTracer struct {
|
||||
host host.Host
|
||||
}
|
||||
|
||||
// AddPeer .
|
||||
func (g gossipTracer) AddPeer(p peer.ID, proto protocol.ID) {
|
||||
// no-op
|
||||
}
|
||||
|
||||
// RemovePeer .
|
||||
func (g gossipTracer) RemovePeer(p peer.ID) {
|
||||
// no-op
|
||||
}
|
||||
|
||||
// Join .
|
||||
func (g gossipTracer) Join(topic string) {
|
||||
pubsubTopicsActive.WithLabelValues(topic).Set(1)
|
||||
}
|
||||
|
||||
// Leave .
|
||||
func (g gossipTracer) Leave(topic string) {
|
||||
pubsubTopicsActive.WithLabelValues(topic).Set(0)
|
||||
}
|
||||
|
||||
// Graft .
|
||||
func (g gossipTracer) Graft(p peer.ID, topic string) {
|
||||
pubsubTopicsGraft.WithLabelValues(topic).Inc()
|
||||
}
|
||||
|
||||
// Prune .
|
||||
func (g gossipTracer) Prune(p peer.ID, topic string) {
|
||||
pubsubTopicsPrune.WithLabelValues(topic).Inc()
|
||||
}
|
||||
|
||||
// ValidateMessage .
|
||||
func (g gossipTracer) ValidateMessage(msg *pubsub.Message) {
|
||||
pubsubMessageValidate.WithLabelValues(*msg.Topic).Inc()
|
||||
}
|
||||
|
||||
// DeliverMessage .
|
||||
func (g gossipTracer) DeliverMessage(msg *pubsub.Message) {
|
||||
pubsubMessageDeliver.WithLabelValues(*msg.Topic).Inc()
|
||||
}
|
||||
|
||||
// RejectMessage .
|
||||
func (g gossipTracer) RejectMessage(msg *pubsub.Message, reason string) {
|
||||
pubsubMessageReject.WithLabelValues(*msg.Topic).Inc()
|
||||
}
|
||||
|
||||
// DuplicateMessage .
|
||||
func (g gossipTracer) DuplicateMessage(msg *pubsub.Message) {
|
||||
pubsubMessageDuplicate.WithLabelValues(*msg.Topic).Inc()
|
||||
}
|
||||
|
||||
// UndeliverableMessage .
|
||||
func (g gossipTracer) UndeliverableMessage(msg *pubsub.Message) {
|
||||
pubsubMessageUndeliverable.WithLabelValues(*msg.Topic).Inc()
|
||||
}
|
||||
|
||||
// ThrottlePeer .
|
||||
func (g gossipTracer) ThrottlePeer(p peer.ID) {
|
||||
agent := agentFromPid(p, g.host.Peerstore())
|
||||
pubsubPeerThrottle.WithLabelValues(agent).Inc()
|
||||
}
|
||||
|
||||
// RecvRPC .
|
||||
func (g gossipTracer) RecvRPC(rpc *pubsub.RPC) {
|
||||
setMetricFromRPC(pubsubRPCSubRecv, pubsubRPCRecv, rpc)
|
||||
}
|
||||
|
||||
// SendRPC .
|
||||
func (g gossipTracer) SendRPC(rpc *pubsub.RPC, p peer.ID) {
|
||||
setMetricFromRPC(pubsubRPCSubSent, pubsubRPCSent, rpc)
|
||||
}
|
||||
|
||||
// DropRPC .
|
||||
func (g gossipTracer) DropRPC(rpc *pubsub.RPC, p peer.ID) {
|
||||
setMetricFromRPC(pubsubRPCSubDrop, pubsubRPCDrop, rpc)
|
||||
}
|
||||
|
||||
func setMetricFromRPC(ctr prometheus.Counter, gauge *prometheus.CounterVec, rpc *pubsub.RPC) {
|
||||
ctr.Add(float64(len(rpc.Subscriptions)))
|
||||
if rpc.Control != nil {
|
||||
gauge.WithLabelValues("graft").Add(float64(len(rpc.Control.Graft)))
|
||||
gauge.WithLabelValues("prune").Add(float64(len(rpc.Control.Prune)))
|
||||
gauge.WithLabelValues("ihave").Add(float64(len(rpc.Control.Ihave)))
|
||||
gauge.WithLabelValues("iwant").Add(float64(len(rpc.Control.Iwant)))
|
||||
}
|
||||
}
|
||||
@@ -49,23 +49,43 @@ func privKey(cfg *Config) (*ecdsa.PrivateKey, error) {
|
||||
defaultKeyPath := path.Join(cfg.DataDir, keyPath)
|
||||
privateKeyPath := cfg.PrivateKey
|
||||
|
||||
// PrivateKey cli flag takes highest precedence.
|
||||
if privateKeyPath != "" {
|
||||
return privKeyFromFile(cfg.PrivateKey)
|
||||
}
|
||||
|
||||
_, err := os.Stat(defaultKeyPath)
|
||||
defaultKeysExist := !os.IsNotExist(err)
|
||||
if err != nil && defaultKeysExist {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if privateKeyPath == "" && !defaultKeysExist {
|
||||
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
|
||||
// Default keys have the next highest precendence, if they exist.
|
||||
if defaultKeysExist {
|
||||
return privKeyFromFile(defaultKeyPath)
|
||||
}
|
||||
// There are no keys on the filesystem, so we need to generate one.
|
||||
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// If the StaticPeerID flag is set, save the generated key as the default
|
||||
// key, so that it will be used by default on the next node start.
|
||||
if cfg.StaticPeerID {
|
||||
rawbytes, err := priv.Raw()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ecdsaprysm.ConvertFromInterfacePrivKey(priv)
|
||||
dst := make([]byte, hex.EncodedLen(len(rawbytes)))
|
||||
hex.Encode(dst, rawbytes)
|
||||
if err := file.WriteFile(defaultKeyPath, dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.Infof("Wrote network key to file")
|
||||
// Read the key from the defaultKeyPath file just written
|
||||
// for the strongest guarantee that the next start will be the same as this one.
|
||||
return privKeyFromFile(defaultKeyPath)
|
||||
}
|
||||
if defaultKeysExist && privateKeyPath == "" {
|
||||
privateKeyPath = defaultKeyPath
|
||||
}
|
||||
return privKeyFromFile(privateKeyPath)
|
||||
return ecdsaprysm.ConvertFromInterfacePrivKey(priv)
|
||||
}
|
||||
|
||||
// Retrieves a p2p networking private key from a file path.
|
||||
|
||||
@@ -28,12 +28,13 @@ go_library(
|
||||
"//beacon-chain/rpc/eth/debug:go_default_library",
|
||||
"//beacon-chain/rpc/eth/events:go_default_library",
|
||||
"//beacon-chain/rpc/eth/node:go_default_library",
|
||||
"//beacon-chain/rpc/eth/rewards:go_default_library",
|
||||
"//beacon-chain/rpc/eth/validator:go_default_library",
|
||||
"//beacon-chain/rpc/lookup:go_default_library",
|
||||
"//beacon-chain/rpc/prysm/v1alpha1/beacon:go_default_library",
|
||||
"//beacon-chain/rpc/prysm/v1alpha1/debug:go_default_library",
|
||||
"//beacon-chain/rpc/prysm/v1alpha1/node:go_default_library",
|
||||
"//beacon-chain/rpc/prysm/v1alpha1/validator:go_default_library",
|
||||
"//beacon-chain/rpc/statefetcher:go_default_library",
|
||||
"//beacon-chain/slasher:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//beacon-chain/sync:go_default_library",
|
||||
@@ -43,6 +44,7 @@ go_library(
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//proto/eth/service:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_gorilla_mux//:go_default_library",
|
||||
"@com_github_grpc_ecosystem_go_grpc_middleware//:go_default_library",
|
||||
"@com_github_grpc_ecosystem_go_grpc_middleware//recovery:go_default_library",
|
||||
"@com_github_grpc_ecosystem_go_grpc_middleware//tracing/opentracing:go_default_library",
|
||||
@@ -68,6 +70,7 @@ go_test(
|
||||
"//beacon-chain/sync/initial-sync/testing:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_gorilla_mux//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
],
|
||||
|
||||
@@ -1185,7 +1185,7 @@ type EventPayloadAttributeV2Json struct {
|
||||
ParentBlockNumber string `json:"parent_block_number"`
|
||||
ParentBlockRoot string `json:"parent_block_root" hex:"true"`
|
||||
ParentBlockHash string `json:"parent_block_hash" hex:"true"`
|
||||
PayloadAttributes *PayloadAttributesV2Json `json:"payload_attributes_v2"`
|
||||
PayloadAttributes *PayloadAttributesV2Json `json:"payload_attributes"`
|
||||
}
|
||||
|
||||
type PayloadAttributesV1Json struct {
|
||||
|
||||
@@ -34,8 +34,8 @@ go_library(
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/rpc/eth/helpers:go_default_library",
|
||||
"//beacon-chain/rpc/lookup:go_default_library",
|
||||
"//beacon-chain/rpc/prysm/v1alpha1/validator:go_default_library",
|
||||
"//beacon-chain/rpc/statefetcher:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
@@ -92,7 +92,6 @@ go_test(
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/execution/testing:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/blstoexec:go_default_library",
|
||||
"//beacon-chain/operations/blstoexec/mock:go_default_library",
|
||||
@@ -101,12 +100,11 @@ go_test(
|
||||
"//beacon-chain/operations/voluntaryexits/mock:go_default_library",
|
||||
"//beacon-chain/p2p/testing:go_default_library",
|
||||
"//beacon-chain/rpc/eth/helpers:go_default_library",
|
||||
"//beacon-chain/rpc/lookup:go_default_library",
|
||||
"//beacon-chain/rpc/prysm/v1alpha1/validator:go_default_library",
|
||||
"//beacon-chain/rpc/statefetcher:go_default_library",
|
||||
"//beacon-chain/rpc/testutil:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
|
||||
@@ -25,7 +25,7 @@ func (bs *Server) GetBlindedBlock(ctx context.Context, req *ethpbv1.BlockRequest
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.GetBlindedBlock")
|
||||
defer span.End()
|
||||
|
||||
blk, err := bs.blockFromBlockID(ctx, req.BlockId)
|
||||
blk, err := bs.Blocker.Block(ctx, req.BlockId)
|
||||
err = handleGetBlockError(blk, err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -80,7 +80,7 @@ func (bs *Server) GetBlindedBlockSSZ(ctx context.Context, req *ethpbv1.BlockRequ
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.GetBlindedBlockSSZ")
|
||||
defer span.End()
|
||||
|
||||
blk, err := bs.blockFromBlockID(ctx, req.BlockId)
|
||||
blk, err := bs.Blocker.Block(ctx, req.BlockId)
|
||||
err = handleGetBlockError(blk, err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -2,17 +2,15 @@ package beacon
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
builderTest "github.com/prysmaticlabs/prysm/v4/beacon-chain/builder/testing"
|
||||
dbTest "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
|
||||
executionTest "github.com/prysmaticlabs/prysm/v4/beacon-chain/execution/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/synccommittee"
|
||||
mockp2p "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/prysm/v1alpha1/validator"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/testutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
@@ -21,7 +19,6 @@ import (
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v4/proto/eth/v2"
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/migration"
|
||||
ethpbalpha "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||
@@ -29,774 +26,284 @@ import (
|
||||
)
|
||||
|
||||
func TestServer_GetBlindedBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
t.Run("Phase 0", func(t *testing.T) {
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
genBlk, blkContainers := fillDBTestBlocks(ctx, t, beaconDB)
|
||||
canonicalRoots := make(map[[32]byte]bool)
|
||||
for _, bContr := range blkContainers {
|
||||
canonicalRoots[bytesutil.ToBytes32(bContr.BlockRoot)] = true
|
||||
}
|
||||
headBlock := blkContainers[len(blkContainers)-1]
|
||||
nextSlot := headBlock.GetPhase0Block().Block.Slot + 1
|
||||
wsb, err := blocks.NewSignedBeaconBlock(headBlock.Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block)
|
||||
b := util.NewBeaconBlock()
|
||||
blk, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
mockChainService := &mock.ChainService{
|
||||
DB: beaconDB,
|
||||
Block: wsb,
|
||||
Root: headBlock.BlockRoot,
|
||||
FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
CanonicalRoots: canonicalRoots,
|
||||
FinalizedRoots: map[[32]byte]bool{},
|
||||
}
|
||||
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
FinalizationFetcher: mockChainService,
|
||||
FinalizationFetcher: &mock.ChainService{},
|
||||
Blocker: &testutil.MockBlocker{BlockToReturn: blk},
|
||||
}
|
||||
|
||||
root, err := genBlk.Block.HashTreeRoot()
|
||||
expected, err := migration.V1Alpha1ToV1SignedBlock(b)
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
blockID []byte
|
||||
want *ethpbalpha.SignedBeaconBlock
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "slot",
|
||||
blockID: []byte("30"),
|
||||
want: blkContainers[30].Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block,
|
||||
},
|
||||
{
|
||||
name: "bad formatting",
|
||||
blockID: []byte("3bad0"),
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "canonical",
|
||||
blockID: []byte("30"),
|
||||
want: blkContainers[30].Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block,
|
||||
},
|
||||
{
|
||||
name: "non canonical",
|
||||
blockID: []byte(fmt.Sprintf("%d", nextSlot)),
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "head",
|
||||
blockID: []byte("head"),
|
||||
want: headBlock.Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block,
|
||||
},
|
||||
{
|
||||
name: "finalized",
|
||||
blockID: []byte("finalized"),
|
||||
want: blkContainers[64].Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block,
|
||||
},
|
||||
{
|
||||
name: "genesis",
|
||||
blockID: []byte("genesis"),
|
||||
want: genBlk,
|
||||
},
|
||||
{
|
||||
name: "genesis root",
|
||||
blockID: root[:],
|
||||
want: genBlk,
|
||||
},
|
||||
{
|
||||
name: "root",
|
||||
blockID: blkContainers[20].BlockRoot,
|
||||
want: blkContainers[20].Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block,
|
||||
},
|
||||
{
|
||||
name: "non-existent root",
|
||||
blockID: bytesutil.PadTo([]byte("hi there"), 32),
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "no block",
|
||||
blockID: []byte("105"),
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
blk, err := bs.GetBlindedBlock(ctx, ðpbv1.BlockRequest{
|
||||
BlockId: tt.blockID,
|
||||
})
|
||||
if tt.wantErr {
|
||||
require.NotEqual(t, err, nil)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
v1Block, err := migration.V1Alpha1ToV1SignedBlock(tt.want)
|
||||
require.NoError(t, err)
|
||||
|
||||
phase0Block, ok := blk.Data.Message.(*ethpbv2.SignedBlindedBeaconBlockContainer_Phase0Block)
|
||||
require.Equal(t, true, ok)
|
||||
if !reflect.DeepEqual(phase0Block.Phase0Block, v1Block.Block) {
|
||||
t.Error("Expected blocks to equal")
|
||||
}
|
||||
assert.Equal(t, ethpbv2.Version_PHASE0, blk.Version)
|
||||
})
|
||||
}
|
||||
resp, err := bs.GetBlindedBlock(ctx, ðpbv1.BlockRequest{})
|
||||
require.NoError(t, err)
|
||||
phase0Block, ok := resp.Data.Message.(*ethpbv2.SignedBlindedBeaconBlockContainer_Phase0Block)
|
||||
require.Equal(t, true, ok)
|
||||
assert.DeepEqual(t, expected.Block, phase0Block.Phase0Block)
|
||||
assert.Equal(t, ethpbv2.Version_PHASE0, resp.Version)
|
||||
})
|
||||
|
||||
t.Run("Altair", func(t *testing.T) {
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
genBlk, blkContainers := fillDBTestBlocksAltair(ctx, t, beaconDB)
|
||||
canonicalRoots := make(map[[32]byte]bool)
|
||||
for _, bContr := range blkContainers {
|
||||
canonicalRoots[bytesutil.ToBytes32(bContr.BlockRoot)] = true
|
||||
}
|
||||
headBlock := blkContainers[len(blkContainers)-1]
|
||||
nextSlot := headBlock.GetAltairBlock().Block.Slot + 1
|
||||
chainBlk, err := blocks.NewSignedBeaconBlock(headBlock.GetAltairBlock())
|
||||
b := util.NewBeaconBlockAltair()
|
||||
blk, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
mockChainService := &mock.ChainService{
|
||||
DB: beaconDB,
|
||||
Block: chainBlk,
|
||||
Root: headBlock.BlockRoot,
|
||||
FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
CanonicalRoots: canonicalRoots,
|
||||
FinalizedRoots: map[[32]byte]bool{},
|
||||
}
|
||||
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
FinalizationFetcher: mockChainService,
|
||||
FinalizationFetcher: &mock.ChainService{},
|
||||
Blocker: &testutil.MockBlocker{BlockToReturn: blk},
|
||||
}
|
||||
|
||||
root, err := genBlk.Block.HashTreeRoot()
|
||||
expected, err := migration.V1Alpha1BeaconBlockAltairToV2(b.Block)
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
blockID []byte
|
||||
want *ethpbalpha.SignedBeaconBlockAltair
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "slot",
|
||||
blockID: []byte("30"),
|
||||
want: blkContainers[30].GetAltairBlock(),
|
||||
},
|
||||
{
|
||||
name: "bad formatting",
|
||||
blockID: []byte("3bad0"),
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "canonical",
|
||||
blockID: []byte("30"),
|
||||
want: blkContainers[30].GetAltairBlock(),
|
||||
},
|
||||
{
|
||||
name: "non canonical",
|
||||
blockID: []byte(fmt.Sprintf("%d", nextSlot)),
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "head",
|
||||
blockID: []byte("head"),
|
||||
want: headBlock.GetAltairBlock(),
|
||||
},
|
||||
{
|
||||
name: "finalized",
|
||||
blockID: []byte("finalized"),
|
||||
want: blkContainers[64].GetAltairBlock(),
|
||||
},
|
||||
{
|
||||
name: "genesis",
|
||||
blockID: []byte("genesis"),
|
||||
want: genBlk,
|
||||
},
|
||||
{
|
||||
name: "genesis root",
|
||||
blockID: root[:],
|
||||
want: genBlk,
|
||||
},
|
||||
{
|
||||
name: "root",
|
||||
blockID: blkContainers[20].BlockRoot,
|
||||
want: blkContainers[20].GetAltairBlock(),
|
||||
},
|
||||
{
|
||||
name: "non-existent root",
|
||||
blockID: bytesutil.PadTo([]byte("hi there"), 32),
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "no block",
|
||||
blockID: []byte("105"),
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
blk, err := bs.GetBlindedBlock(ctx, ðpbv1.BlockRequest{
|
||||
BlockId: tt.blockID,
|
||||
})
|
||||
if tt.wantErr {
|
||||
require.NotEqual(t, err, nil)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
v2Block, err := migration.V1Alpha1BeaconBlockAltairToV2(tt.want.Block)
|
||||
require.NoError(t, err)
|
||||
|
||||
altairBlock, ok := blk.Data.Message.(*ethpbv2.SignedBlindedBeaconBlockContainer_AltairBlock)
|
||||
require.Equal(t, true, ok)
|
||||
if !reflect.DeepEqual(altairBlock.AltairBlock, v2Block) {
|
||||
t.Error("Expected blocks to equal")
|
||||
}
|
||||
assert.Equal(t, ethpbv2.Version_ALTAIR, blk.Version)
|
||||
})
|
||||
}
|
||||
resp, err := bs.GetBlindedBlock(ctx, ðpbv1.BlockRequest{})
|
||||
require.NoError(t, err)
|
||||
altairBlock, ok := resp.Data.Message.(*ethpbv2.SignedBlindedBeaconBlockContainer_AltairBlock)
|
||||
require.Equal(t, true, ok)
|
||||
assert.DeepEqual(t, expected, altairBlock.AltairBlock)
|
||||
assert.Equal(t, ethpbv2.Version_ALTAIR, resp.Version)
|
||||
})
|
||||
|
||||
t.Run("Bellatrix", func(t *testing.T) {
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
genBlk, blkContainers := fillDBTestBlocksBellatrixBlinded(ctx, t, beaconDB)
|
||||
canonicalRoots := make(map[[32]byte]bool)
|
||||
for _, bContr := range blkContainers {
|
||||
canonicalRoots[bytesutil.ToBytes32(bContr.BlockRoot)] = true
|
||||
}
|
||||
headBlock := blkContainers[len(blkContainers)-1]
|
||||
nextSlot := headBlock.GetBlindedBellatrixBlock().Block.Slot + 1
|
||||
chainBlk, err := blocks.NewSignedBeaconBlock(headBlock.GetBlindedBellatrixBlock())
|
||||
b := util.NewBlindedBeaconBlockBellatrix()
|
||||
blk, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
mockChainService := &mock.ChainService{
|
||||
DB: beaconDB,
|
||||
Block: chainBlk,
|
||||
Root: headBlock.BlockRoot,
|
||||
FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
CanonicalRoots: canonicalRoots,
|
||||
FinalizedRoots: map[[32]byte]bool{},
|
||||
}
|
||||
|
||||
mockChainService := &mock.ChainService{}
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
FinalizationFetcher: mockChainService,
|
||||
Blocker: &testutil.MockBlocker{BlockToReturn: blk},
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
ExecutionPayloadReconstructor: &executionTest.EngineClient{
|
||||
ExecutionPayloadByBlockHash: map[[32]byte]*enginev1.ExecutionPayload{},
|
||||
},
|
||||
FinalizationFetcher: mockChainService,
|
||||
}
|
||||
|
||||
root, err := genBlk.Block.HashTreeRoot()
|
||||
expected, err := migration.V1Alpha1BeaconBlockBlindedBellatrixToV2Blinded(b.Block)
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
blockID []byte
|
||||
want *ethpbalpha.SignedBlindedBeaconBlockBellatrix
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "slot",
|
||||
blockID: []byte("30"),
|
||||
want: blkContainers[30].GetBlindedBellatrixBlock(),
|
||||
},
|
||||
{
|
||||
name: "bad formatting",
|
||||
blockID: []byte("3bad0"),
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "canonical",
|
||||
blockID: []byte("30"),
|
||||
want: blkContainers[30].GetBlindedBellatrixBlock(),
|
||||
},
|
||||
{
|
||||
name: "non canonical",
|
||||
blockID: []byte(fmt.Sprintf("%d", nextSlot)),
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "head",
|
||||
blockID: []byte("head"),
|
||||
want: headBlock.GetBlindedBellatrixBlock(),
|
||||
},
|
||||
{
|
||||
name: "finalized",
|
||||
blockID: []byte("finalized"),
|
||||
want: blkContainers[64].GetBlindedBellatrixBlock(),
|
||||
},
|
||||
{
|
||||
name: "genesis",
|
||||
blockID: []byte("genesis"),
|
||||
want: genBlk,
|
||||
},
|
||||
{
|
||||
name: "genesis root",
|
||||
blockID: root[:],
|
||||
want: genBlk,
|
||||
},
|
||||
{
|
||||
name: "root",
|
||||
blockID: blkContainers[20].BlockRoot,
|
||||
want: blkContainers[20].GetBlindedBellatrixBlock(),
|
||||
},
|
||||
{
|
||||
name: "non-existent root",
|
||||
blockID: bytesutil.PadTo([]byte("hi there"), 32),
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "no block",
|
||||
blockID: []byte("105"),
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
blk, err := bs.GetBlindedBlock(ctx, ðpbv1.BlockRequest{
|
||||
BlockId: tt.blockID,
|
||||
})
|
||||
if tt.wantErr {
|
||||
require.NotEqual(t, err, nil)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
v2Block, err := migration.V1Alpha1BeaconBlockBlindedBellatrixToV2Blinded(tt.want.Block)
|
||||
require.NoError(t, err)
|
||||
|
||||
b, ok := blk.Data.Message.(*ethpbv2.SignedBlindedBeaconBlockContainer_BellatrixBlock)
|
||||
require.Equal(t, true, ok)
|
||||
if !reflect.DeepEqual(b.BellatrixBlock, v2Block) {
|
||||
t.Error("Expected blocks to equal")
|
||||
}
|
||||
assert.Equal(t, ethpbv2.Version_BELLATRIX, blk.Version)
|
||||
})
|
||||
}
|
||||
resp, err := bs.GetBlindedBlock(ctx, ðpbv1.BlockRequest{})
|
||||
require.NoError(t, err)
|
||||
bellatrixBlock, ok := resp.Data.Message.(*ethpbv2.SignedBlindedBeaconBlockContainer_BellatrixBlock)
|
||||
require.Equal(t, true, ok)
|
||||
assert.DeepEqual(t, expected, bellatrixBlock.BellatrixBlock)
|
||||
assert.Equal(t, ethpbv2.Version_BELLATRIX, resp.Version)
|
||||
})
|
||||
|
||||
t.Run("Capella", func(t *testing.T) {
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
genBlk, blkContainers := fillDBTestBlocksCapellaBlinded(ctx, t, beaconDB)
|
||||
canonicalRoots := make(map[[32]byte]bool)
|
||||
for _, bContr := range blkContainers {
|
||||
canonicalRoots[bytesutil.ToBytes32(bContr.BlockRoot)] = true
|
||||
}
|
||||
headBlock := blkContainers[len(blkContainers)-1]
|
||||
nextSlot := headBlock.GetBlindedCapellaBlock().Block.Slot + 1
|
||||
chainBlk, err := blocks.NewSignedBeaconBlock(headBlock.GetBlindedCapellaBlock())
|
||||
b := util.NewBlindedBeaconBlockCapella()
|
||||
blk, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
mockChainService := &mock.ChainService{
|
||||
DB: beaconDB,
|
||||
Block: chainBlk,
|
||||
Root: headBlock.BlockRoot,
|
||||
FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
CanonicalRoots: canonicalRoots,
|
||||
FinalizedRoots: map[[32]byte]bool{},
|
||||
}
|
||||
|
||||
mockChainService := &mock.ChainService{}
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
FinalizationFetcher: mockChainService,
|
||||
Blocker: &testutil.MockBlocker{BlockToReturn: blk},
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
ExecutionPayloadReconstructor: &executionTest.EngineClient{
|
||||
ExecutionPayloadByBlockHash: map[[32]byte]*enginev1.ExecutionPayload{},
|
||||
},
|
||||
FinalizationFetcher: mockChainService,
|
||||
}
|
||||
|
||||
root, err := genBlk.Block.HashTreeRoot()
|
||||
expected, err := migration.V1Alpha1BeaconBlockBlindedCapellaToV2Blinded(b.Block)
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
blockID []byte
|
||||
want *ethpbalpha.SignedBlindedBeaconBlockCapella
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "slot",
|
||||
blockID: []byte("30"),
|
||||
want: blkContainers[30].GetBlindedCapellaBlock(),
|
||||
},
|
||||
{
|
||||
name: "bad formatting",
|
||||
blockID: []byte("3bad0"),
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "canonical",
|
||||
blockID: []byte("30"),
|
||||
want: blkContainers[30].GetBlindedCapellaBlock(),
|
||||
},
|
||||
{
|
||||
name: "non canonical",
|
||||
blockID: []byte(fmt.Sprintf("%d", nextSlot)),
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "head",
|
||||
blockID: []byte("head"),
|
||||
want: headBlock.GetBlindedCapellaBlock(),
|
||||
},
|
||||
{
|
||||
name: "finalized",
|
||||
blockID: []byte("finalized"),
|
||||
want: blkContainers[64].GetBlindedCapellaBlock(),
|
||||
},
|
||||
{
|
||||
name: "genesis",
|
||||
blockID: []byte("genesis"),
|
||||
want: genBlk,
|
||||
},
|
||||
{
|
||||
name: "genesis root",
|
||||
blockID: root[:],
|
||||
want: genBlk,
|
||||
},
|
||||
{
|
||||
name: "root",
|
||||
blockID: blkContainers[20].BlockRoot,
|
||||
want: blkContainers[20].GetBlindedCapellaBlock(),
|
||||
},
|
||||
{
|
||||
name: "non-existent root",
|
||||
blockID: bytesutil.PadTo([]byte("hi there"), 32),
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "no block",
|
||||
blockID: []byte("105"),
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
blk, err := bs.GetBlindedBlock(ctx, ðpbv1.BlockRequest{
|
||||
BlockId: tt.blockID,
|
||||
})
|
||||
if tt.wantErr {
|
||||
require.NotEqual(t, err, nil)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
v2Block, err := migration.V1Alpha1BeaconBlockBlindedCapellaToV2Blinded(tt.want.Block)
|
||||
require.NoError(t, err)
|
||||
|
||||
b, ok := blk.Data.Message.(*ethpbv2.SignedBlindedBeaconBlockContainer_CapellaBlock)
|
||||
require.Equal(t, true, ok)
|
||||
if !reflect.DeepEqual(b.CapellaBlock, v2Block) {
|
||||
t.Error("Expected blocks to equal")
|
||||
}
|
||||
assert.Equal(t, ethpbv2.Version_CAPELLA, blk.Version)
|
||||
})
|
||||
}
|
||||
resp, err := bs.GetBlindedBlock(ctx, ðpbv1.BlockRequest{})
|
||||
require.NoError(t, err)
|
||||
capellaBlock, ok := resp.Data.Message.(*ethpbv2.SignedBlindedBeaconBlockContainer_CapellaBlock)
|
||||
require.Equal(t, true, ok)
|
||||
assert.DeepEqual(t, expected, capellaBlock.CapellaBlock)
|
||||
assert.Equal(t, ethpbv2.Version_CAPELLA, resp.Version)
|
||||
})
|
||||
|
||||
t.Run("execution optimistic", func(t *testing.T) {
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
_, blkContainers := fillDBTestBlocksBellatrix(ctx, t, beaconDB)
|
||||
headBlock := blkContainers[len(blkContainers)-1]
|
||||
chainBlk, err := blocks.NewSignedBeaconBlock(headBlock.GetBellatrixBlock())
|
||||
b := util.NewBlindedBeaconBlockBellatrix()
|
||||
blk, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
r, err := blk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
mockChainService := &mock.ChainService{
|
||||
DB: beaconDB,
|
||||
Block: chainBlk,
|
||||
Root: headBlock.BlockRoot,
|
||||
FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
FinalizedRoots: map[[32]byte]bool{},
|
||||
OptimisticRoots: map[[32]byte]bool{
|
||||
bytesutil.ToBytes32(headBlock.BlockRoot): true,
|
||||
},
|
||||
OptimisticRoots: map[[32]byte]bool{r: true},
|
||||
}
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
FinalizationFetcher: mockChainService,
|
||||
Blocker: &testutil.MockBlocker{BlockToReturn: blk},
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
}
|
||||
|
||||
blk, err := bs.GetBlindedBlock(ctx, ðpbv1.BlockRequest{
|
||||
BlockId: []byte("head"),
|
||||
})
|
||||
resp, err := bs.GetBlindedBlock(ctx, ðpbv1.BlockRequest{})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, blk.ExecutionOptimistic)
|
||||
assert.Equal(t, true, resp.ExecutionOptimistic)
|
||||
})
|
||||
|
||||
t.Run("finalized", func(t *testing.T) {
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
_, blkContainers := fillDBTestBlocks(ctx, t, beaconDB)
|
||||
headBlock := blkContainers[len(blkContainers)-1]
|
||||
chainBlk, err := blocks.NewSignedBeaconBlock(headBlock.GetPhase0Block())
|
||||
b := util.NewBeaconBlock()
|
||||
blk, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
mockChainService := &mock.ChainService{
|
||||
DB: beaconDB,
|
||||
Block: chainBlk,
|
||||
Root: headBlock.BlockRoot,
|
||||
FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
Optimistic: true,
|
||||
FinalizedRoots: map[[32]byte]bool{
|
||||
bytesutil.ToBytes32(blkContainers[32].BlockRoot): true,
|
||||
bytesutil.ToBytes32(blkContainers[64].BlockRoot): false,
|
||||
},
|
||||
FinalizedRoots: map[[32]byte]bool{root: true},
|
||||
}
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
FinalizationFetcher: mockChainService,
|
||||
FinalizationFetcher: mockChainService,
|
||||
Blocker: &testutil.MockBlocker{BlockToReturn: blk},
|
||||
}
|
||||
|
||||
t.Run("true", func(t *testing.T) {
|
||||
blk, err := bs.GetBlindedBlock(ctx, ðpbv1.BlockRequest{
|
||||
BlockId: []byte("32"),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, blk.Finalized)
|
||||
})
|
||||
t.Run("false", func(t *testing.T) {
|
||||
blk, err := bs.GetBlindedBlock(ctx, ðpbv1.BlockRequest{
|
||||
BlockId: []byte("64"),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, blk.Finalized)
|
||||
})
|
||||
resp, err := bs.GetBlindedBlock(ctx, ðpbv1.BlockRequest{BlockId: root[:]})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, resp.Finalized)
|
||||
})
|
||||
t.Run("not finalized", func(t *testing.T) {
|
||||
b := util.NewBeaconBlock()
|
||||
blk, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
mockChainService := &mock.ChainService{
|
||||
FinalizedRoots: map[[32]byte]bool{root: false},
|
||||
}
|
||||
bs := &Server{
|
||||
FinalizationFetcher: mockChainService,
|
||||
Blocker: &testutil.MockBlocker{BlockToReturn: blk},
|
||||
}
|
||||
|
||||
resp, err := bs.GetBlindedBlock(ctx, ðpbv1.BlockRequest{BlockId: root[:]})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
}
|
||||
|
||||
func TestServer_GetBlindedBlockSSZ(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
t.Run("Phase 0", func(t *testing.T) {
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
_, blkContainers := fillDBTestBlocks(ctx, t, beaconDB)
|
||||
headBlock := blkContainers[len(blkContainers)-1]
|
||||
wsb, err := blocks.NewSignedBeaconBlock(headBlock.Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block)
|
||||
b := util.NewBeaconBlock()
|
||||
blk, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
mockChainService := &mock.ChainService{
|
||||
DB: beaconDB,
|
||||
Block: wsb,
|
||||
Root: headBlock.BlockRoot,
|
||||
FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
FinalizedRoots: map[[32]byte]bool{},
|
||||
}
|
||||
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
FinalizationFetcher: mockChainService,
|
||||
FinalizationFetcher: &mock.ChainService{},
|
||||
Blocker: &testutil.MockBlocker{BlockToReturn: blk},
|
||||
}
|
||||
|
||||
blks, err := beaconDB.BlocksBySlot(ctx, 30)
|
||||
require.Equal(t, true, len(blks) > 0)
|
||||
expected, err := blk.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
sszBlock, err := blks[0].MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
|
||||
resp, err := bs.GetBlindedBlockSSZ(ctx, ðpbv1.BlockRequest{BlockId: []byte("30")})
|
||||
resp, err := bs.GetBlindedBlockSSZ(ctx, ðpbv1.BlockRequest{})
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, resp)
|
||||
assert.DeepEqual(t, sszBlock, resp.Data)
|
||||
assert.DeepEqual(t, expected, resp.Data)
|
||||
assert.Equal(t, ethpbv2.Version_PHASE0, resp.Version)
|
||||
})
|
||||
|
||||
t.Run("Altair", func(t *testing.T) {
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
_, blkContainers := fillDBTestBlocksAltair(ctx, t, beaconDB)
|
||||
headBlock := blkContainers[len(blkContainers)-1]
|
||||
chainBlk, err := blocks.NewSignedBeaconBlock(headBlock.GetAltairBlock())
|
||||
b := util.NewBeaconBlockAltair()
|
||||
blk, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
mockChainService := &mock.ChainService{
|
||||
DB: beaconDB,
|
||||
Block: chainBlk,
|
||||
Root: headBlock.BlockRoot,
|
||||
FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
FinalizedRoots: map[[32]byte]bool{},
|
||||
}
|
||||
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
FinalizationFetcher: mockChainService,
|
||||
FinalizationFetcher: &mock.ChainService{},
|
||||
Blocker: &testutil.MockBlocker{BlockToReturn: blk},
|
||||
}
|
||||
|
||||
blks, err := beaconDB.BlocksBySlot(ctx, 30)
|
||||
require.Equal(t, true, len(blks) > 0)
|
||||
expected, err := blk.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
sszBlock, err := blks[0].MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
|
||||
resp, err := bs.GetBlindedBlockSSZ(ctx, ðpbv1.BlockRequest{BlockId: []byte("30")})
|
||||
resp, err := bs.GetBlindedBlockSSZ(ctx, ðpbv1.BlockRequest{})
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, resp)
|
||||
assert.DeepEqual(t, sszBlock, resp.Data)
|
||||
assert.DeepEqual(t, expected, resp.Data)
|
||||
assert.Equal(t, ethpbv2.Version_ALTAIR, resp.Version)
|
||||
})
|
||||
|
||||
t.Run("Bellatrix", func(t *testing.T) {
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
_, blkContainers := fillDBTestBlocksBellatrixBlinded(ctx, t, beaconDB)
|
||||
headBlock := blkContainers[len(blkContainers)-1]
|
||||
chainBlk, err := blocks.NewSignedBeaconBlock(headBlock.GetBlindedBellatrixBlock())
|
||||
b := util.NewBlindedBeaconBlockBellatrix()
|
||||
blk, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
mockChainService := &mock.ChainService{
|
||||
DB: beaconDB,
|
||||
Block: chainBlk,
|
||||
Root: headBlock.BlockRoot,
|
||||
FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
FinalizedRoots: map[[32]byte]bool{},
|
||||
}
|
||||
|
||||
mockChainService := &mock.ChainService{}
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
FinalizationFetcher: mockChainService,
|
||||
Blocker: &testutil.MockBlocker{BlockToReturn: blk},
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
}
|
||||
|
||||
blks, err := beaconDB.BlocksBySlot(ctx, 30)
|
||||
require.Equal(t, true, len(blks) > 0)
|
||||
expected, err := blk.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
sszBlock, err := blks[0].MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
|
||||
resp, err := bs.GetBlindedBlockSSZ(ctx, ðpbv1.BlockRequest{BlockId: []byte("30")})
|
||||
resp, err := bs.GetBlindedBlockSSZ(ctx, ðpbv1.BlockRequest{})
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, resp)
|
||||
assert.DeepEqual(t, sszBlock, resp.Data)
|
||||
assert.DeepEqual(t, expected, resp.Data)
|
||||
assert.Equal(t, ethpbv2.Version_BELLATRIX, resp.Version)
|
||||
})
|
||||
|
||||
t.Run("Capella", func(t *testing.T) {
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
_, blkContainers := fillDBTestBlocksCapellaBlinded(ctx, t, beaconDB)
|
||||
headBlock := blkContainers[len(blkContainers)-1]
|
||||
chainBlk, err := blocks.NewSignedBeaconBlock(headBlock.GetBlindedCapellaBlock())
|
||||
b := util.NewBlindedBeaconBlockCapella()
|
||||
blk, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
mockChainService := &mock.ChainService{
|
||||
DB: beaconDB,
|
||||
Block: chainBlk,
|
||||
Root: headBlock.BlockRoot,
|
||||
FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
FinalizedRoots: map[[32]byte]bool{},
|
||||
}
|
||||
|
||||
mockChainService := &mock.ChainService{}
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
FinalizationFetcher: mockChainService,
|
||||
Blocker: &testutil.MockBlocker{BlockToReturn: blk},
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
}
|
||||
|
||||
blks, err := beaconDB.BlocksBySlot(ctx, 30)
|
||||
require.Equal(t, true, len(blks) > 0)
|
||||
expected, err := blk.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
sszBlock, err := blks[0].MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
|
||||
resp, err := bs.GetBlindedBlockSSZ(ctx, ðpbv1.BlockRequest{BlockId: []byte("30")})
|
||||
resp, err := bs.GetBlindedBlockSSZ(ctx, ðpbv1.BlockRequest{})
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, resp)
|
||||
assert.DeepEqual(t, sszBlock, resp.Data)
|
||||
assert.DeepEqual(t, expected, resp.Data)
|
||||
assert.Equal(t, ethpbv2.Version_CAPELLA, resp.Version)
|
||||
})
|
||||
|
||||
t.Run("execution optimistic", func(t *testing.T) {
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
_, blkContainers := fillDBTestBlocksBellatrix(ctx, t, beaconDB)
|
||||
headBlock := blkContainers[len(blkContainers)-1]
|
||||
chainBlk, err := blocks.NewSignedBeaconBlock(headBlock.GetBellatrixBlock())
|
||||
b := util.NewBlindedBeaconBlockBellatrix()
|
||||
blk, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
r, err := blk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
mockChainService := &mock.ChainService{
|
||||
DB: beaconDB,
|
||||
Block: chainBlk,
|
||||
Root: headBlock.BlockRoot,
|
||||
FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
FinalizedRoots: map[[32]byte]bool{},
|
||||
OptimisticRoots: map[[32]byte]bool{
|
||||
bytesutil.ToBytes32(headBlock.BlockRoot): true,
|
||||
},
|
||||
OptimisticRoots: map[[32]byte]bool{r: true},
|
||||
}
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
FinalizationFetcher: mockChainService,
|
||||
Blocker: &testutil.MockBlocker{BlockToReturn: blk},
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
}
|
||||
|
||||
blk, err := bs.GetBlindedBlockSSZ(ctx, ðpbv1.BlockRequest{
|
||||
BlockId: []byte("head"),
|
||||
})
|
||||
resp, err := bs.GetBlindedBlockSSZ(ctx, ðpbv1.BlockRequest{})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, blk.ExecutionOptimistic)
|
||||
assert.Equal(t, true, resp.ExecutionOptimistic)
|
||||
})
|
||||
|
||||
t.Run("finalized", func(t *testing.T) {
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
_, blkContainers := fillDBTestBlocks(ctx, t, beaconDB)
|
||||
headBlock := blkContainers[len(blkContainers)-1]
|
||||
chainBlk, err := blocks.NewSignedBeaconBlock(headBlock.GetPhase0Block())
|
||||
b := util.NewBeaconBlock()
|
||||
blk, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
mockChainService := &mock.ChainService{
|
||||
DB: beaconDB,
|
||||
Block: chainBlk,
|
||||
Root: headBlock.BlockRoot,
|
||||
FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
Optimistic: true,
|
||||
FinalizedRoots: map[[32]byte]bool{
|
||||
bytesutil.ToBytes32(blkContainers[32].BlockRoot): true,
|
||||
bytesutil.ToBytes32(blkContainers[64].BlockRoot): false,
|
||||
},
|
||||
FinalizedRoots: map[[32]byte]bool{root: true},
|
||||
}
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
FinalizationFetcher: mockChainService,
|
||||
FinalizationFetcher: mockChainService,
|
||||
Blocker: &testutil.MockBlocker{BlockToReturn: blk},
|
||||
}
|
||||
|
||||
t.Run("true", func(t *testing.T) {
|
||||
blk, err := bs.GetBlindedBlockSSZ(ctx, ðpbv1.BlockRequest{
|
||||
BlockId: []byte("32"),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, blk.Finalized)
|
||||
})
|
||||
t.Run("false", func(t *testing.T) {
|
||||
blk, err := bs.GetBlindedBlockSSZ(ctx, ðpbv1.BlockRequest{
|
||||
BlockId: []byte("64"),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, blk.Finalized)
|
||||
})
|
||||
resp, err := bs.GetBlindedBlockSSZ(ctx, ðpbv1.BlockRequest{BlockId: root[:]})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, resp.Finalized)
|
||||
})
|
||||
t.Run("not finalized", func(t *testing.T) {
|
||||
b := util.NewBeaconBlock()
|
||||
blk, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
mockChainService := &mock.ChainService{
|
||||
FinalizedRoots: map[[32]byte]bool{root: false},
|
||||
}
|
||||
bs := &Server{
|
||||
FinalizationFetcher: mockChainService,
|
||||
Blocker: &testutil.MockBlocker{BlockToReturn: blk},
|
||||
}
|
||||
|
||||
resp, err := bs.GetBlindedBlockSSZ(ctx, ðpbv1.BlockRequest{BlockId: root[:]})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -882,14 +389,6 @@ func TestServer_SubmitBlindedBlockSSZ_OK(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Bellatrix", func(t *testing.T) {
|
||||
// INFO: This code block can be removed once Bellatrix
|
||||
// fork epoch is set to a value other than math.MaxUint64
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.BellatrixForkEpoch = cfg.AltairForkEpoch + 1000
|
||||
cfg.ForkVersionSchedule[bytesutil.ToBytes4(cfg.BellatrixForkVersion)] = cfg.AltairForkEpoch + 1000
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
@@ -938,16 +437,6 @@ func TestServer_SubmitBlindedBlockSSZ_OK(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Capella", func(t *testing.T) {
|
||||
t.Skip("This test needs Capella fork version configured properly")
|
||||
|
||||
// INFO: This code block can be removed once Capella
|
||||
// fork epoch is set to a value other than math.MaxUint64
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.CapellaForkEpoch = cfg.BellatrixForkEpoch + 1000
|
||||
cfg.ForkVersionSchedule[bytesutil.ToBytes4(cfg.CapellaForkVersion)] = cfg.BellatrixForkEpoch + 1000
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filters"
|
||||
rpchelpers "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/lookup"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
@@ -37,23 +38,6 @@ var (
|
||||
errNilBlock = errors.New("nil block")
|
||||
)
|
||||
|
||||
// blockIdParseError represents an error scenario where a block ID could not be parsed.
|
||||
type blockIdParseError struct {
|
||||
message string
|
||||
}
|
||||
|
||||
// newBlockIdParseError creates a new error instance.
|
||||
func newBlockIdParseError(reason error) blockIdParseError {
|
||||
return blockIdParseError{
|
||||
message: errors.Wrapf(reason, "could not parse block ID").Error(),
|
||||
}
|
||||
}
|
||||
|
||||
// Error returns the underlying error message.
|
||||
func (e *blockIdParseError) Error() string {
|
||||
return e.message
|
||||
}
|
||||
|
||||
// GetWeakSubjectivity computes the starting epoch of the current weak subjectivity period, and then also
|
||||
// determines the best block root and state root to use for a Checkpoint Sync starting from that point.
|
||||
// DEPRECATED: GetWeakSubjectivity endpoint will no longer be supported
|
||||
@@ -101,7 +85,7 @@ func (bs *Server) GetBlockHeader(ctx context.Context, req *ethpbv1.BlockRequest)
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.GetBlockHeader")
|
||||
defer span.End()
|
||||
|
||||
blk, err := bs.blockFromBlockID(ctx, req.BlockId)
|
||||
blk, err := bs.Blocker.Block(ctx, req.BlockId)
|
||||
err = handleGetBlockError(blk, err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -290,7 +274,7 @@ func (bs *Server) GetBlock(ctx context.Context, req *ethpbv1.BlockRequest) (*eth
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.GetBlock")
|
||||
defer span.End()
|
||||
|
||||
blk, err := bs.blockFromBlockID(ctx, req.BlockId)
|
||||
blk, err := bs.Blocker.Block(ctx, req.BlockId)
|
||||
err = handleGetBlockError(blk, err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -314,7 +298,7 @@ func (bs *Server) GetBlockSSZ(ctx context.Context, req *ethpbv1.BlockRequest) (*
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.GetBlockSSZ")
|
||||
defer span.End()
|
||||
|
||||
blk, err := bs.blockFromBlockID(ctx, req.BlockId)
|
||||
blk, err := bs.Blocker.Block(ctx, req.BlockId)
|
||||
err = handleGetBlockError(blk, err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -336,7 +320,7 @@ func (bs *Server) GetBlockV2(ctx context.Context, req *ethpbv2.BlockRequestV2) (
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.GetBlockV2")
|
||||
defer span.End()
|
||||
|
||||
blk, err := bs.blockFromBlockID(ctx, req.BlockId)
|
||||
blk, err := bs.Blocker.Block(ctx, req.BlockId)
|
||||
err = handleGetBlockError(blk, err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -390,7 +374,7 @@ func (bs *Server) GetBlockSSZV2(ctx context.Context, req *ethpbv2.BlockRequestV2
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.GetBlockSSZV2")
|
||||
defer span.End()
|
||||
|
||||
blk, err := bs.blockFromBlockID(ctx, req.BlockId)
|
||||
blk, err := bs.Blocker.Block(ctx, req.BlockId)
|
||||
err = handleGetBlockError(blk, err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -532,7 +516,7 @@ func (bs *Server) ListBlockAttestations(ctx context.Context, req *ethpbv1.BlockR
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.ListBlockAttestations")
|
||||
defer span.End()
|
||||
|
||||
blk, err := bs.blockFromBlockID(ctx, req.BlockId)
|
||||
blk, err := bs.Blocker.Block(ctx, req.BlockId)
|
||||
err = handleGetBlockError(blk, err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -559,68 +543,8 @@ func (bs *Server) ListBlockAttestations(ctx context.Context, req *ethpbv1.BlockR
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (bs *Server) blockFromBlockID(ctx context.Context, blockId []byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
var err error
|
||||
var blk interfaces.ReadOnlySignedBeaconBlock
|
||||
switch string(blockId) {
|
||||
case "head":
|
||||
blk, err = bs.ChainInfoFetcher.HeadBlock(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not retrieve head block")
|
||||
}
|
||||
case "finalized":
|
||||
finalized := bs.ChainInfoFetcher.FinalizedCheckpt()
|
||||
finalizedRoot := bytesutil.ToBytes32(finalized.Root)
|
||||
blk, err = bs.BeaconDB.Block(ctx, finalizedRoot)
|
||||
if err != nil {
|
||||
return nil, errors.New("could not get finalized block from db")
|
||||
}
|
||||
case "genesis":
|
||||
blk, err = bs.BeaconDB.GenesisBlock(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not retrieve blocks for genesis slot")
|
||||
}
|
||||
default:
|
||||
if len(blockId) == 32 {
|
||||
blk, err = bs.BeaconDB.Block(ctx, bytesutil.ToBytes32(blockId))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not retrieve block")
|
||||
}
|
||||
} else {
|
||||
slot, err := strconv.ParseUint(string(blockId), 10, 64)
|
||||
if err != nil {
|
||||
e := newBlockIdParseError(err)
|
||||
return nil, &e
|
||||
}
|
||||
blks, err := bs.BeaconDB.BlocksBySlot(ctx, primitives.Slot(slot))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not retrieve blocks for slot %d", slot)
|
||||
}
|
||||
_, roots, err := bs.BeaconDB.BlockRootsBySlot(ctx, primitives.Slot(slot))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not retrieve block roots for slot %d", slot)
|
||||
}
|
||||
numBlks := len(blks)
|
||||
if numBlks == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
for i, b := range blks {
|
||||
canonical, err := bs.ChainInfoFetcher.IsCanonical(ctx, roots[i])
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not determine if block root is canonical: %v", err)
|
||||
}
|
||||
if canonical {
|
||||
blk = b
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return blk, nil
|
||||
}
|
||||
|
||||
func handleGetBlockError(blk interfaces.ReadOnlySignedBeaconBlock, err error) error {
|
||||
if invalidBlockIdErr, ok := err.(*blockIdParseError); ok {
|
||||
if invalidBlockIdErr, ok := err.(*lookup.BlockIdParseError); ok {
|
||||
return status.Errorf(codes.InvalidArgument, "Invalid block ID: %v", invalidBlockIdErr)
|
||||
}
|
||||
if err != nil {
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -186,7 +186,7 @@ func (bs *Server) SubmitAttesterSlashing(ctx context.Context, req *ethpbv1.Attes
|
||||
return nil, status.Errorf(codes.Internal, "Could not insert attester slashing into pool: %v", err)
|
||||
}
|
||||
if !features.Get().DisableBroadcastSlashings {
|
||||
if err := bs.Broadcaster.Broadcast(ctx, req); err != nil {
|
||||
if err := bs.Broadcaster.Broadcast(ctx, alphaSlashing); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not broadcast slashing object: %v", err)
|
||||
}
|
||||
}
|
||||
@@ -242,7 +242,7 @@ func (bs *Server) SubmitProposerSlashing(ctx context.Context, req *ethpbv1.Propo
|
||||
return nil, status.Errorf(codes.Internal, "Could not insert proposer slashing into pool: %v", err)
|
||||
}
|
||||
if !features.Get().DisableBroadcastSlashings {
|
||||
if err := bs.Broadcaster.Broadcast(ctx, req); err != nil {
|
||||
if err := bs.Broadcaster.Broadcast(ctx, alphaSlashing); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not broadcast slashing object: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -452,6 +452,9 @@ func TestSubmitAttesterSlashing_Ok(t *testing.T) {
|
||||
require.Equal(t, 1, len(pendingSlashings))
|
||||
assert.DeepEqual(t, migration.V1AttSlashingToV1Alpha1(slashing), pendingSlashings[0])
|
||||
assert.Equal(t, true, broadcaster.BroadcastCalled)
|
||||
require.Equal(t, 1, len(broadcaster.BroadcastMessages))
|
||||
_, ok := broadcaster.BroadcastMessages[0].(*ethpbv1alpha1.AttesterSlashing)
|
||||
assert.Equal(t, true, ok)
|
||||
}
|
||||
|
||||
func TestSubmitAttesterSlashing_AcrossFork(t *testing.T) {
|
||||
@@ -526,6 +529,9 @@ func TestSubmitAttesterSlashing_AcrossFork(t *testing.T) {
|
||||
require.Equal(t, 1, len(pendingSlashings))
|
||||
assert.DeepEqual(t, migration.V1AttSlashingToV1Alpha1(slashing), pendingSlashings[0])
|
||||
assert.Equal(t, true, broadcaster.BroadcastCalled)
|
||||
require.Equal(t, 1, len(broadcaster.BroadcastMessages))
|
||||
_, ok := broadcaster.BroadcastMessages[0].(*ethpbv1alpha1.AttesterSlashing)
|
||||
assert.Equal(t, true, ok)
|
||||
}
|
||||
|
||||
func TestSubmitAttesterSlashing_InvalidSlashing(t *testing.T) {
|
||||
@@ -633,6 +639,9 @@ func TestSubmitProposerSlashing_Ok(t *testing.T) {
|
||||
require.Equal(t, 1, len(pendingSlashings))
|
||||
assert.DeepEqual(t, migration.V1ProposerSlashingToV1Alpha1(slashing), pendingSlashings[0])
|
||||
assert.Equal(t, true, broadcaster.BroadcastCalled)
|
||||
require.Equal(t, 1, len(broadcaster.BroadcastMessages))
|
||||
_, ok := broadcaster.BroadcastMessages[0].(*ethpbv1alpha1.ProposerSlashing)
|
||||
assert.Equal(t, true, ok)
|
||||
}
|
||||
|
||||
func TestSubmitProposerSlashing_AcrossFork(t *testing.T) {
|
||||
@@ -695,6 +704,13 @@ func TestSubmitProposerSlashing_AcrossFork(t *testing.T) {
|
||||
|
||||
_, err = s.SubmitProposerSlashing(ctx, slashing)
|
||||
require.NoError(t, err)
|
||||
pendingSlashings := s.SlashingsPool.PendingProposerSlashings(ctx, bs, true)
|
||||
require.Equal(t, 1, len(pendingSlashings))
|
||||
assert.DeepEqual(t, migration.V1ProposerSlashingToV1Alpha1(slashing), pendingSlashings[0])
|
||||
assert.Equal(t, true, broadcaster.BroadcastCalled)
|
||||
require.Equal(t, 1, len(broadcaster.BroadcastMessages))
|
||||
_, ok := broadcaster.BroadcastMessages[0].(*ethpbv1alpha1.ProposerSlashing)
|
||||
assert.Equal(t, true, ok)
|
||||
}
|
||||
|
||||
func TestSubmitProposerSlashing_InvalidSlashing(t *testing.T) {
|
||||
|
||||
@@ -14,8 +14,8 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/slashings"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/lookup"
|
||||
v1alpha1validator "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/prysm/v1alpha1/validator"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/statefetcher"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/sync"
|
||||
)
|
||||
@@ -34,7 +34,8 @@ type Server struct {
|
||||
SlashingsPool slashings.PoolManager
|
||||
VoluntaryExitsPool voluntaryexits.PoolManager
|
||||
StateGenService stategen.StateManager
|
||||
StateFetcher statefetcher.Fetcher
|
||||
Stater lookup.Stater
|
||||
Blocker lookup.Blocker
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
OptimisticModeFetcher blockchain.OptimisticModeFetcher
|
||||
V1Alpha1ValidatorServer *v1alpha1validator.Server
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"strconv"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/statefetcher"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/lookup"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
@@ -58,20 +58,20 @@ func (bs *Server) GetStateRoot(ctx context.Context, req *ethpb.StateRequest) (*e
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.GetStateRoot")
|
||||
defer span.End()
|
||||
|
||||
stateRoot, err := bs.StateFetcher.StateRoot(ctx, req.StateId)
|
||||
stateRoot, err := bs.Stater.StateRoot(ctx, req.StateId)
|
||||
if err != nil {
|
||||
if rootNotFoundErr, ok := err.(*statefetcher.StateRootNotFoundError); ok {
|
||||
if rootNotFoundErr, ok := err.(*lookup.StateRootNotFoundError); ok {
|
||||
return nil, status.Errorf(codes.NotFound, "State root not found: %v", rootNotFoundErr)
|
||||
} else if parseErr, ok := err.(*statefetcher.StateIdParseError); ok {
|
||||
} else if parseErr, ok := err.(*lookup.StateIdParseError); ok {
|
||||
return nil, status.Errorf(codes.InvalidArgument, "Invalid state ID: %v", parseErr)
|
||||
}
|
||||
return nil, status.Errorf(codes.Internal, "Could not get state root: %v", err)
|
||||
}
|
||||
st, err := bs.StateFetcher.State(ctx, req.StateId)
|
||||
st, err := bs.Stater.State(ctx, req.StateId)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get state: %v", err)
|
||||
}
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, req.StateId, bs.OptimisticModeFetcher, bs.StateFetcher, bs.ChainInfoFetcher, bs.BeaconDB)
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, req.StateId, bs.OptimisticModeFetcher, bs.Stater, bs.ChainInfoFetcher, bs.BeaconDB)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
|
||||
}
|
||||
@@ -95,12 +95,12 @@ func (bs *Server) GetStateFork(ctx context.Context, req *ethpb.StateRequest) (*e
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.GetStateFork")
|
||||
defer span.End()
|
||||
|
||||
st, err := bs.StateFetcher.State(ctx, req.StateId)
|
||||
st, err := bs.Stater.State(ctx, req.StateId)
|
||||
if err != nil {
|
||||
return nil, helpers.PrepareStateFetchGRPCError(err)
|
||||
}
|
||||
fork := st.Fork()
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, req.StateId, bs.OptimisticModeFetcher, bs.StateFetcher, bs.ChainInfoFetcher, bs.BeaconDB)
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, req.StateId, bs.OptimisticModeFetcher, bs.Stater, bs.ChainInfoFetcher, bs.BeaconDB)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
|
||||
}
|
||||
@@ -127,11 +127,11 @@ func (bs *Server) GetFinalityCheckpoints(ctx context.Context, req *ethpb.StateRe
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.GetFinalityCheckpoints")
|
||||
defer span.End()
|
||||
|
||||
st, err := bs.StateFetcher.State(ctx, req.StateId)
|
||||
st, err := bs.Stater.State(ctx, req.StateId)
|
||||
if err != nil {
|
||||
return nil, helpers.PrepareStateFetchGRPCError(err)
|
||||
}
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, req.StateId, bs.OptimisticModeFetcher, bs.StateFetcher, bs.ChainInfoFetcher, bs.BeaconDB)
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, req.StateId, bs.OptimisticModeFetcher, bs.Stater, bs.ChainInfoFetcher, bs.BeaconDB)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
|
||||
}
|
||||
@@ -160,7 +160,7 @@ func (bs *Server) GetRandao(ctx context.Context, req *eth2.RandaoRequest) (*eth2
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.GetRandao")
|
||||
defer span.End()
|
||||
|
||||
st, err := bs.StateFetcher.State(ctx, req.StateId)
|
||||
st, err := bs.Stater.State(ctx, req.StateId)
|
||||
if err != nil {
|
||||
return nil, helpers.PrepareStateFetchGRPCError(err)
|
||||
}
|
||||
@@ -186,7 +186,7 @@ func (bs *Server) GetRandao(ctx context.Context, req *eth2.RandaoRequest) (*eth2
|
||||
return nil, status.Errorf(codes.Internal, "Could not get randao mix at index %d", idx)
|
||||
}
|
||||
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, req.StateId, bs.OptimisticModeFetcher, bs.StateFetcher, bs.ChainInfoFetcher, bs.BeaconDB)
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, req.StateId, bs.OptimisticModeFetcher, bs.Stater, bs.ChainInfoFetcher, bs.BeaconDB)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
|
||||
}
|
||||
@@ -215,14 +215,14 @@ func (bs *Server) stateFromRequest(ctx context.Context, req *stateRequest) (stat
|
||||
err,
|
||||
)
|
||||
}
|
||||
st, err := bs.StateFetcher.State(ctx, []byte(strconv.FormatUint(uint64(slot), 10)))
|
||||
st, err := bs.Stater.State(ctx, []byte(strconv.FormatUint(uint64(slot), 10)))
|
||||
if err != nil {
|
||||
return nil, helpers.PrepareStateFetchGRPCError(err)
|
||||
}
|
||||
return st, nil
|
||||
}
|
||||
var err error
|
||||
st, err := bs.StateFetcher.State(ctx, req.stateId)
|
||||
st, err := bs.Stater.State(ctx, req.stateId)
|
||||
if err != nil {
|
||||
return nil, helpers.PrepareStateFetchGRPCError(err)
|
||||
}
|
||||
|
||||
@@ -83,7 +83,7 @@ func TestGetStateRoot(t *testing.T) {
|
||||
|
||||
chainService := &chainMock.ChainService{}
|
||||
server := &Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconStateRoot: stateRoot[:],
|
||||
BeaconState: fakeState,
|
||||
},
|
||||
@@ -111,7 +111,7 @@ func TestGetStateRoot(t *testing.T) {
|
||||
|
||||
chainService := &chainMock.ChainService{Optimistic: true}
|
||||
server := &Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconStateRoot: stateRoot[:],
|
||||
BeaconState: fakeState,
|
||||
},
|
||||
@@ -145,7 +145,7 @@ func TestGetStateRoot(t *testing.T) {
|
||||
},
|
||||
}
|
||||
server := &Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconStateRoot: stateRoot[:],
|
||||
BeaconState: fakeState,
|
||||
},
|
||||
@@ -179,7 +179,7 @@ func TestGetStateFork(t *testing.T) {
|
||||
|
||||
chainService := &chainMock.ChainService{}
|
||||
server := &Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: fakeState,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
@@ -209,7 +209,7 @@ func TestGetStateFork(t *testing.T) {
|
||||
|
||||
chainService := &chainMock.ChainService{Optimistic: true}
|
||||
server := &Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: fakeState,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
@@ -242,7 +242,7 @@ func TestGetStateFork(t *testing.T) {
|
||||
},
|
||||
}
|
||||
server := &Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: fakeState,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
@@ -282,7 +282,7 @@ func TestGetFinalityCheckpoints(t *testing.T) {
|
||||
|
||||
chainService := &chainMock.ChainService{}
|
||||
server := &Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: fakeState,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
@@ -314,7 +314,7 @@ func TestGetFinalityCheckpoints(t *testing.T) {
|
||||
|
||||
chainService := &chainMock.ChainService{Optimistic: true}
|
||||
server := &Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: fakeState,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
@@ -347,7 +347,7 @@ func TestGetFinalityCheckpoints(t *testing.T) {
|
||||
},
|
||||
}
|
||||
server := &Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: fakeState,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
@@ -388,7 +388,7 @@ func TestGetRandao(t *testing.T) {
|
||||
db := dbTest.SetupDB(t)
|
||||
chainService := &chainMock.ChainService{}
|
||||
server := &Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
@@ -413,7 +413,7 @@ func TestGetRandao(t *testing.T) {
|
||||
assert.DeepEqual(t, mixOld, resp.Data.Randao)
|
||||
})
|
||||
t.Run("head state below `EpochsPerHistoricalVector`", func(t *testing.T) {
|
||||
server.StateFetcher = &testutil.MockFetcher{
|
||||
server.Stater = &testutil.MockStater{
|
||||
BeaconState: headSt,
|
||||
}
|
||||
resp, err := server.GetRandao(ctx, ð2.RandaoRequest{StateId: []byte("head")})
|
||||
@@ -441,7 +441,7 @@ func TestGetRandao(t *testing.T) {
|
||||
|
||||
chainService := &chainMock.ChainService{Optimistic: true}
|
||||
server := &Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
@@ -473,7 +473,7 @@ func TestGetRandao(t *testing.T) {
|
||||
},
|
||||
}
|
||||
server := &Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
|
||||
@@ -91,7 +91,7 @@ func (bs *Server) ListSyncCommittees(ctx context.Context, req *ethpbv2.StateSync
|
||||
return nil, status.Errorf(codes.Internal, "Could not extract sync subcommittees: %v", err)
|
||||
}
|
||||
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, req.StateId, bs.OptimisticModeFetcher, bs.StateFetcher, bs.ChainInfoFetcher, bs.BeaconDB)
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, req.StateId, bs.OptimisticModeFetcher, bs.Stater, bs.ChainInfoFetcher, bs.BeaconDB)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
|
||||
}
|
||||
|
||||
@@ -167,7 +167,7 @@ func TestListSyncCommittees(t *testing.T) {
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
@@ -213,7 +213,7 @@ func TestListSyncCommittees(t *testing.T) {
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
@@ -249,7 +249,7 @@ func TestListSyncCommittees(t *testing.T) {
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
@@ -309,7 +309,7 @@ func TestListSyncCommitteesFuture(t *testing.T) {
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
StateFetcher: &futureSyncMockFetcher{
|
||||
Stater: &futureSyncMockFetcher{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
|
||||
@@ -42,7 +42,7 @@ func (bs *Server) GetValidator(ctx context.Context, req *ethpb.StateValidatorReq
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.GetValidator")
|
||||
defer span.End()
|
||||
|
||||
st, err := bs.StateFetcher.State(ctx, req.StateId)
|
||||
st, err := bs.Stater.State(ctx, req.StateId)
|
||||
if err != nil {
|
||||
return nil, helpers.PrepareStateFetchGRPCError(err)
|
||||
}
|
||||
@@ -57,7 +57,7 @@ func (bs *Server) GetValidator(ctx context.Context, req *ethpb.StateValidatorReq
|
||||
return nil, status.Error(codes.NotFound, "Could not find validator")
|
||||
}
|
||||
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, req.StateId, bs.OptimisticModeFetcher, bs.StateFetcher, bs.ChainInfoFetcher, bs.BeaconDB)
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, req.StateId, bs.OptimisticModeFetcher, bs.Stater, bs.ChainInfoFetcher, bs.BeaconDB)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
|
||||
}
|
||||
@@ -76,7 +76,7 @@ func (bs *Server) ListValidators(ctx context.Context, req *ethpb.StateValidators
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.ListValidators")
|
||||
defer span.End()
|
||||
|
||||
st, err := bs.StateFetcher.State(ctx, req.StateId)
|
||||
st, err := bs.Stater.State(ctx, req.StateId)
|
||||
if err != nil {
|
||||
return nil, helpers.PrepareStateFetchGRPCError(err)
|
||||
}
|
||||
@@ -86,7 +86,7 @@ func (bs *Server) ListValidators(ctx context.Context, req *ethpb.StateValidators
|
||||
return nil, handleValContainerErr(err)
|
||||
}
|
||||
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, req.StateId, bs.OptimisticModeFetcher, bs.StateFetcher, bs.ChainInfoFetcher, bs.BeaconDB)
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, req.StateId, bs.OptimisticModeFetcher, bs.Stater, bs.ChainInfoFetcher, bs.BeaconDB)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
|
||||
}
|
||||
@@ -138,7 +138,7 @@ func (bs *Server) ListValidatorBalances(ctx context.Context, req *ethpb.Validato
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.ListValidatorBalances")
|
||||
defer span.End()
|
||||
|
||||
st, err := bs.StateFetcher.State(ctx, req.StateId)
|
||||
st, err := bs.Stater.State(ctx, req.StateId)
|
||||
if err != nil {
|
||||
return nil, helpers.PrepareStateFetchGRPCError(err)
|
||||
}
|
||||
@@ -155,7 +155,7 @@ func (bs *Server) ListValidatorBalances(ctx context.Context, req *ethpb.Validato
|
||||
}
|
||||
}
|
||||
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, req.StateId, bs.OptimisticModeFetcher, bs.StateFetcher, bs.ChainInfoFetcher, bs.BeaconDB)
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, req.StateId, bs.OptimisticModeFetcher, bs.Stater, bs.ChainInfoFetcher, bs.BeaconDB)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
|
||||
}
|
||||
@@ -175,7 +175,7 @@ func (bs *Server) ListCommittees(ctx context.Context, req *ethpb.StateCommittees
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.ListCommittees")
|
||||
defer span.End()
|
||||
|
||||
st, err := bs.StateFetcher.State(ctx, req.StateId)
|
||||
st, err := bs.Stater.State(ctx, req.StateId)
|
||||
if err != nil {
|
||||
return nil, helpers.PrepareStateFetchGRPCError(err)
|
||||
}
|
||||
@@ -220,7 +220,7 @@ func (bs *Server) ListCommittees(ctx context.Context, req *ethpb.StateCommittees
|
||||
}
|
||||
}
|
||||
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, req.StateId, bs.OptimisticModeFetcher, bs.StateFetcher, bs.ChainInfoFetcher, bs.BeaconDB)
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, req.StateId, bs.OptimisticModeFetcher, bs.Stater, bs.ChainInfoFetcher, bs.BeaconDB)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
chainMock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
dbTest "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
|
||||
rpchelpers "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/statefetcher"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/lookup"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/testutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
|
||||
@@ -34,7 +34,7 @@ func TestGetValidator(t *testing.T) {
|
||||
t.Run("Head Get Validator by index", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
@@ -54,7 +54,7 @@ func TestGetValidator(t *testing.T) {
|
||||
t.Run("Head Get Validator by pubkey", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
@@ -75,7 +75,7 @@ func TestGetValidator(t *testing.T) {
|
||||
|
||||
t.Run("Validator ID required", func(t *testing.T) {
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{},
|
||||
@@ -98,7 +98,7 @@ func TestGetValidator(t *testing.T) {
|
||||
|
||||
chainService := &chainMock.ChainService{Optimistic: true}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
@@ -131,7 +131,7 @@ func TestGetValidator(t *testing.T) {
|
||||
},
|
||||
}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
@@ -157,7 +157,7 @@ func TestListValidators(t *testing.T) {
|
||||
t.Run("Head List All Validators", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
@@ -179,7 +179,7 @@ func TestListValidators(t *testing.T) {
|
||||
t.Run("Head List Validators by index", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
@@ -204,7 +204,7 @@ func TestListValidators(t *testing.T) {
|
||||
t.Run("Head List Validators by pubkey", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
@@ -233,7 +233,7 @@ func TestListValidators(t *testing.T) {
|
||||
t.Run("Head List Validators by both index and pubkey", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
@@ -264,7 +264,7 @@ func TestListValidators(t *testing.T) {
|
||||
t.Run("Unknown public key is ignored", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
@@ -287,7 +287,7 @@ func TestListValidators(t *testing.T) {
|
||||
t.Run("Unknown index is ignored", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
@@ -317,7 +317,7 @@ func TestListValidators(t *testing.T) {
|
||||
|
||||
chainService := &chainMock.ChainService{Optimistic: true}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
@@ -349,7 +349,7 @@ func TestListValidators(t *testing.T) {
|
||||
},
|
||||
}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
@@ -440,7 +440,7 @@ func TestListValidators_Status(t *testing.T) {
|
||||
t.Run("Head List All ACTIVE Validators", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &statefetcher.StateProvider{
|
||||
Stater: &lookup.BeaconDbStater{
|
||||
ChainInfoFetcher: &chainMock.ChainService{State: st},
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
@@ -478,7 +478,7 @@ func TestListValidators_Status(t *testing.T) {
|
||||
t.Run("Head List All ACTIVE_ONGOING Validators", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &statefetcher.StateProvider{
|
||||
Stater: &lookup.BeaconDbStater{
|
||||
ChainInfoFetcher: &chainMock.ChainService{State: st},
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
@@ -515,7 +515,7 @@ func TestListValidators_Status(t *testing.T) {
|
||||
t.Run("Head List All EXITED Validators", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &statefetcher.StateProvider{
|
||||
Stater: &lookup.BeaconDbStater{
|
||||
ChainInfoFetcher: &chainMock.ChainService{State: st},
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
@@ -551,7 +551,7 @@ func TestListValidators_Status(t *testing.T) {
|
||||
t.Run("Head List All PENDING_INITIALIZED and EXITED_UNSLASHED Validators", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &statefetcher.StateProvider{
|
||||
Stater: &lookup.BeaconDbStater{
|
||||
ChainInfoFetcher: &chainMock.ChainService{State: st},
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
@@ -587,7 +587,7 @@ func TestListValidators_Status(t *testing.T) {
|
||||
t.Run("Head List All PENDING and EXITED Validators", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &statefetcher.StateProvider{
|
||||
Stater: &lookup.BeaconDbStater{
|
||||
ChainInfoFetcher: &chainMock.ChainService{State: st},
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
@@ -638,7 +638,7 @@ func TestListValidatorBalances(t *testing.T) {
|
||||
t.Run("Head List Validators Balance by index", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
@@ -663,7 +663,7 @@ func TestListValidatorBalances(t *testing.T) {
|
||||
t.Run("Head List Validators Balance by pubkey", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
@@ -691,7 +691,7 @@ func TestListValidatorBalances(t *testing.T) {
|
||||
t.Run("Head List Validators Balance by both index and pubkey", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
@@ -726,7 +726,7 @@ func TestListValidatorBalances(t *testing.T) {
|
||||
|
||||
chainService := &chainMock.ChainService{Optimistic: true}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
@@ -761,7 +761,7 @@ func TestListValidatorBalances(t *testing.T) {
|
||||
},
|
||||
}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
@@ -791,7 +791,7 @@ func TestListCommittees(t *testing.T) {
|
||||
t.Run("Head All Committees", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
@@ -814,7 +814,7 @@ func TestListCommittees(t *testing.T) {
|
||||
t.Run("Head All Committees of Epoch 10", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
@@ -836,7 +836,7 @@ func TestListCommittees(t *testing.T) {
|
||||
t.Run("Head All Committees of Slot 4", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
@@ -864,7 +864,7 @@ func TestListCommittees(t *testing.T) {
|
||||
t.Run("Head All Committees of Index 1", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
@@ -892,7 +892,7 @@ func TestListCommittees(t *testing.T) {
|
||||
t.Run("Head All Committees of Slot 2, Index 1", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
@@ -928,7 +928,7 @@ func TestListCommittees(t *testing.T) {
|
||||
|
||||
chainService := &chainMock.ChainService{Optimistic: true}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
@@ -961,7 +961,7 @@ func TestListCommittees(t *testing.T) {
|
||||
},
|
||||
}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
|
||||
@@ -12,7 +12,7 @@ go_library(
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/rpc/eth/helpers:go_default_library",
|
||||
"//beacon-chain/rpc/statefetcher:go_default_library",
|
||||
"//beacon-chain/rpc/lookup:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/eth/v2:go_default_library",
|
||||
"//proto/migration:go_default_library",
|
||||
|
||||
@@ -19,7 +19,7 @@ func (ds *Server) GetBeaconStateSSZ(ctx context.Context, req *ethpbv1.StateReque
|
||||
ctx, span := trace.StartSpan(ctx, "debug.GetBeaconStateSSZ")
|
||||
defer span.End()
|
||||
|
||||
state, err := ds.StateFetcher.State(ctx, req.StateId)
|
||||
state, err := ds.Stater.State(ctx, req.StateId)
|
||||
if err != nil {
|
||||
return nil, helpers.PrepareStateFetchGRPCError(err)
|
||||
}
|
||||
@@ -37,11 +37,11 @@ func (ds *Server) GetBeaconStateV2(ctx context.Context, req *ethpbv2.BeaconState
|
||||
ctx, span := trace.StartSpan(ctx, "debug.GetBeaconStateV2")
|
||||
defer span.End()
|
||||
|
||||
beaconSt, err := ds.StateFetcher.State(ctx, req.StateId)
|
||||
beaconSt, err := ds.Stater.State(ctx, req.StateId)
|
||||
if err != nil {
|
||||
return nil, helpers.PrepareStateFetchGRPCError(err)
|
||||
}
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, req.StateId, ds.OptimisticModeFetcher, ds.StateFetcher, ds.ChainInfoFetcher, ds.BeaconDB)
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, req.StateId, ds.OptimisticModeFetcher, ds.Stater, ds.ChainInfoFetcher, ds.BeaconDB)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
|
||||
}
|
||||
@@ -114,7 +114,7 @@ func (ds *Server) GetBeaconStateSSZV2(ctx context.Context, req *ethpbv2.BeaconSt
|
||||
ctx, span := trace.StartSpan(ctx, "debug.GetBeaconStateSSZV2")
|
||||
defer span.End()
|
||||
|
||||
st, err := ds.StateFetcher.State(ctx, req.StateId)
|
||||
st, err := ds.Stater.State(ctx, req.StateId)
|
||||
if err != nil {
|
||||
return nil, helpers.PrepareStateFetchGRPCError(err)
|
||||
}
|
||||
@@ -166,7 +166,5 @@ func (ds *Server) ListForkChoiceHeadsV2(ctx context.Context, _ *emptypb.Empty) (
|
||||
|
||||
// GetForkChoice returns a dump fork choice store.
|
||||
func (ds *Server) GetForkChoice(ctx context.Context, _ *emptypb.Empty) (*ethpbv1.ForkChoiceDump, error) {
|
||||
ds.ForkFetcher.ForkChoicer().RLock()
|
||||
defer ds.ForkFetcher.ForkChoicer().RUnlock()
|
||||
return ds.ForkFetcher.ForkChoicer().ForkChoiceDump(ctx)
|
||||
return ds.ForkchoiceFetcher.ForkChoiceDump(ctx)
|
||||
}
|
||||
|
||||
@@ -28,7 +28,7 @@ func TestGetBeaconStateV2(t *testing.T) {
|
||||
fakeState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
server := &Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: fakeState,
|
||||
},
|
||||
HeadFetcher: &blockchainmock.ChainService{},
|
||||
@@ -46,7 +46,7 @@ func TestGetBeaconStateV2(t *testing.T) {
|
||||
t.Run("Altair", func(t *testing.T) {
|
||||
fakeState, _ := util.DeterministicGenesisStateAltair(t, 1)
|
||||
server := &Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: fakeState,
|
||||
},
|
||||
HeadFetcher: &blockchainmock.ChainService{},
|
||||
@@ -64,7 +64,7 @@ func TestGetBeaconStateV2(t *testing.T) {
|
||||
t.Run("Bellatrix", func(t *testing.T) {
|
||||
fakeState, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
server := &Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: fakeState,
|
||||
},
|
||||
HeadFetcher: &blockchainmock.ChainService{},
|
||||
@@ -82,7 +82,7 @@ func TestGetBeaconStateV2(t *testing.T) {
|
||||
t.Run("Capella", func(t *testing.T) {
|
||||
fakeState, _ := util.DeterministicGenesisStateCapella(t, 1)
|
||||
server := &Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: fakeState,
|
||||
},
|
||||
HeadFetcher: &blockchainmock.ChainService{},
|
||||
@@ -108,7 +108,7 @@ func TestGetBeaconStateV2(t *testing.T) {
|
||||
|
||||
fakeState, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
server := &Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: fakeState,
|
||||
},
|
||||
HeadFetcher: &blockchainmock.ChainService{},
|
||||
@@ -141,7 +141,7 @@ func TestGetBeaconStateV2(t *testing.T) {
|
||||
},
|
||||
}
|
||||
server := &Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: fakeState,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
@@ -165,7 +165,7 @@ func TestGetBeaconStateSSZ(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
server := &Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: fakeState,
|
||||
},
|
||||
}
|
||||
@@ -186,7 +186,7 @@ func TestGetBeaconStateSSZV2(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
server := &Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: fakeState,
|
||||
},
|
||||
}
|
||||
@@ -205,7 +205,7 @@ func TestGetBeaconStateSSZV2(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
server := &Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: fakeState,
|
||||
},
|
||||
}
|
||||
@@ -224,7 +224,7 @@ func TestGetBeaconStateSSZV2(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
server := &Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: fakeState,
|
||||
},
|
||||
}
|
||||
@@ -243,7 +243,7 @@ func TestGetBeaconStateSSZV2(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
server := &Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: fakeState,
|
||||
},
|
||||
}
|
||||
@@ -326,7 +326,7 @@ func TestServer_GetForkChoice(t *testing.T) {
|
||||
fRoot := [32]byte{'a'}
|
||||
fc := &forkchoicetypes.Checkpoint{Epoch: 2, Root: fRoot}
|
||||
require.NoError(t, store.UpdateFinalizedCheckpoint(fc))
|
||||
bs := &Server{ForkFetcher: &blockchainmock.ChainService{ForkChoiceStore: store}}
|
||||
bs := &Server{ForkchoiceFetcher: &blockchainmock.ChainService{ForkChoiceStore: store}}
|
||||
res, err := bs.GetForkChoice(context.Background(), &empty.Empty{})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.Epoch(2), res.FinalizedCheckpoint.Epoch, "Did not get wanted finalized epoch")
|
||||
|
||||
@@ -6,7 +6,7 @@ package debug
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/statefetcher"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/lookup"
|
||||
)
|
||||
|
||||
// Server defines a server implementation of the gRPC Beacon Chain service,
|
||||
@@ -14,9 +14,10 @@ import (
|
||||
type Server struct {
|
||||
BeaconDB db.ReadOnlyDatabase
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
StateFetcher statefetcher.Fetcher
|
||||
Stater lookup.Stater
|
||||
OptimisticModeFetcher blockchain.OptimisticModeFetcher
|
||||
ForkFetcher blockchain.ForkFetcher
|
||||
ForkchoiceFetcher blockchain.ForkchoiceFetcher
|
||||
FinalizationFetcher blockchain.FinalizationFetcher
|
||||
ChainInfoFetcher blockchain.ChainInfoFetcher
|
||||
}
|
||||
|
||||
@@ -331,7 +331,7 @@ func (s *Server) streamPayloadAttributes(stream ethpbservice.Events_StreamEvents
|
||||
ParentBlockNumber: headPayload.BlockNumber(),
|
||||
ParentBlockRoot: headRoot,
|
||||
ParentBlockHash: headPayload.BlockHash(),
|
||||
PayloadAttributesV2: &enginev1.PayloadAttributesV2{
|
||||
PayloadAttributes: &enginev1.PayloadAttributesV2{
|
||||
Timestamp: uint64(t.Unix()),
|
||||
PrevRandao: prevRando,
|
||||
SuggestedFeeRecipient: headPayload.FeeRecipient(),
|
||||
|
||||
@@ -494,7 +494,7 @@ func TestStreamEvents_StateEvents(t *testing.T) {
|
||||
ParentBlockNumber: 1,
|
||||
ParentBlockRoot: make([]byte, 32),
|
||||
ParentBlockHash: make([]byte, 32),
|
||||
PayloadAttributesV2: &enginev1.PayloadAttributesV2{
|
||||
PayloadAttributes: &enginev1.PayloadAttributesV2{
|
||||
Timestamp: 24,
|
||||
PrevRandao: prevRando,
|
||||
SuggestedFeeRecipient: make([]byte, 20),
|
||||
|
||||
@@ -13,7 +13,7 @@ go_library(
|
||||
"//api/grpc:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/rpc/statefetcher:go_default_library",
|
||||
"//beacon-chain/rpc/lookup:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//beacon-chain/sync:go_default_library",
|
||||
|
||||
@@ -3,7 +3,7 @@ package helpers
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/statefetcher"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/lookup"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
@@ -15,10 +15,10 @@ func PrepareStateFetchGRPCError(err error) error {
|
||||
if errors.Is(err, stategen.ErrNoDataForSlot) {
|
||||
return status.Errorf(codes.NotFound, "lacking historical data needed to fulfill request")
|
||||
}
|
||||
if stateNotFoundErr, ok := err.(*statefetcher.StateNotFoundError); ok {
|
||||
if stateNotFoundErr, ok := err.(*lookup.StateNotFoundError); ok {
|
||||
return status.Errorf(codes.NotFound, "State not found: %v", stateNotFoundErr)
|
||||
}
|
||||
if parseErr, ok := err.(*statefetcher.StateIdParseError); ok {
|
||||
if parseErr, ok := err.(*lookup.StateIdParseError); ok {
|
||||
return status.Errorf(codes.InvalidArgument, "Invalid state ID: %v", parseErr)
|
||||
}
|
||||
return status.Errorf(codes.Internal, "Invalid state ID: %v", err)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -9,8 +10,9 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/api/grpc"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/statefetcher"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/lookup"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/sync"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
@@ -61,7 +63,7 @@ func IsOptimistic(
|
||||
ctx context.Context,
|
||||
stateId []byte,
|
||||
optimisticModeFetcher blockchain.OptimisticModeFetcher,
|
||||
stateFetcher statefetcher.Fetcher,
|
||||
stateFetcher lookup.Stater,
|
||||
chainInfo blockchain.ChainInfoFetcher,
|
||||
database db.ReadOnlyDatabase,
|
||||
) (bool, error) {
|
||||
@@ -76,12 +78,20 @@ func IsOptimistic(
|
||||
if fcp == nil {
|
||||
return true, errors.New("received nil finalized checkpoint")
|
||||
}
|
||||
// Special genesis case in the event our checkpoint root is a zerohash.
|
||||
if bytes.Equal(fcp.Root, params.BeaconConfig().ZeroHash[:]) {
|
||||
return false, nil
|
||||
}
|
||||
return optimisticModeFetcher.IsOptimisticForRoot(ctx, bytesutil.ToBytes32(fcp.Root))
|
||||
case "justified":
|
||||
jcp := chainInfo.CurrentJustifiedCheckpt()
|
||||
if jcp == nil {
|
||||
return true, errors.New("received nil justified checkpoint")
|
||||
}
|
||||
// Special genesis case in the event our checkpoint root is a zerohash.
|
||||
if bytes.Equal(jcp.Root, params.BeaconConfig().ZeroHash[:]) {
|
||||
return false, nil
|
||||
}
|
||||
return optimisticModeFetcher.IsOptimisticForRoot(ctx, bytesutil.ToBytes32(jcp.Root))
|
||||
default:
|
||||
if len(stateId) == 32 {
|
||||
@@ -97,7 +107,7 @@ func IsOptimistic(
|
||||
slotNumber, parseErr := strconv.ParseUint(stateIdString, 10, 64)
|
||||
if parseErr != nil {
|
||||
// ID format does not match any valid options.
|
||||
e := statefetcher.NewStateIdParseError(parseErr)
|
||||
e := lookup.NewStateIdParseError(parseErr)
|
||||
return true, &e
|
||||
}
|
||||
fcp := chainInfo.FinalizedCheckpt()
|
||||
@@ -146,7 +156,7 @@ func isStateRootOptimistic(
|
||||
ctx context.Context,
|
||||
stateId []byte,
|
||||
optimisticModeFetcher blockchain.OptimisticModeFetcher,
|
||||
stateFetcher statefetcher.Fetcher,
|
||||
stateFetcher lookup.Stater,
|
||||
chainInfo blockchain.ChainInfoFetcher,
|
||||
database db.ReadOnlyDatabase,
|
||||
) (bool, error) {
|
||||
|
||||
@@ -97,7 +97,7 @@ func TestIsOptimistic(t *testing.T) {
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
cs := &chainmock.ChainService{Optimistic: true, FinalizedCheckPoint: ð.Checkpoint{}, OptimisticRoots: map[[32]byte]bool{[32]byte{}: true}}
|
||||
mf := &testutil.MockFetcher{BeaconState: st}
|
||||
mf := &testutil.MockStater{BeaconState: st}
|
||||
o, err := IsOptimistic(ctx, []byte("finalized"), cs, mf, cs, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, o)
|
||||
@@ -106,7 +106,7 @@ func TestIsOptimistic(t *testing.T) {
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
cs := &chainmock.ChainService{Optimistic: true, FinalizedCheckPoint: ð.Checkpoint{}}
|
||||
mf := &testutil.MockFetcher{BeaconState: st}
|
||||
mf := &testutil.MockStater{BeaconState: st}
|
||||
o, err := IsOptimistic(ctx, []byte("finalized"), cs, mf, cs, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, o)
|
||||
@@ -117,7 +117,7 @@ func TestIsOptimistic(t *testing.T) {
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
cs := &chainmock.ChainService{Optimistic: true, CurrentJustifiedCheckPoint: ð.Checkpoint{}, OptimisticRoots: map[[32]byte]bool{[32]byte{}: true}}
|
||||
mf := &testutil.MockFetcher{BeaconState: st}
|
||||
mf := &testutil.MockStater{BeaconState: st}
|
||||
o, err := IsOptimistic(ctx, []byte("justified"), cs, mf, cs, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, o)
|
||||
@@ -126,7 +126,7 @@ func TestIsOptimistic(t *testing.T) {
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
cs := &chainmock.ChainService{Optimistic: true, CurrentJustifiedCheckPoint: ð.Checkpoint{}}
|
||||
mf := &testutil.MockFetcher{BeaconState: st}
|
||||
mf := &testutil.MockStater{BeaconState: st}
|
||||
o, err := IsOptimistic(ctx, []byte("justified"), cs, mf, cs, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, o)
|
||||
@@ -137,7 +137,7 @@ func TestIsOptimistic(t *testing.T) {
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
cs := &chainmock.ChainService{Optimistic: true}
|
||||
mf := &testutil.MockFetcher{BeaconState: st}
|
||||
mf := &testutil.MockStater{BeaconState: st}
|
||||
o, err := IsOptimistic(ctx, bytesutil.PadTo([]byte("root"), 32), cs, mf, cs, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, o)
|
||||
@@ -146,7 +146,7 @@ func TestIsOptimistic(t *testing.T) {
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
cs := &chainmock.ChainService{Optimistic: false}
|
||||
mf := &testutil.MockFetcher{BeaconState: st}
|
||||
mf := &testutil.MockStater{BeaconState: st}
|
||||
o, err := IsOptimistic(ctx, bytesutil.PadTo([]byte("root"), 32), cs, mf, cs, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, o)
|
||||
@@ -165,7 +165,7 @@ func TestIsOptimistic(t *testing.T) {
|
||||
bRoot, err := b.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
cs := &chainmock.ChainService{State: chainSt, OptimisticRoots: map[[32]byte]bool{bRoot: true}}
|
||||
mf := &testutil.MockFetcher{BeaconState: fetcherSt}
|
||||
mf := &testutil.MockStater{BeaconState: fetcherSt}
|
||||
o, err := IsOptimistic(ctx, bytesutil.PadTo([]byte("root"), 32), cs, mf, cs, db)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, o)
|
||||
@@ -182,7 +182,7 @@ func TestIsOptimistic(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, chainSt.SetSlot(fieldparams.SlotsPerEpoch))
|
||||
cs := &chainmock.ChainService{State: chainSt}
|
||||
mf := &testutil.MockFetcher{BeaconState: fetcherSt}
|
||||
mf := &testutil.MockStater{BeaconState: fetcherSt}
|
||||
o, err := IsOptimistic(ctx, bytesutil.PadTo([]byte("root"), 32), cs, mf, cs, db)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, o)
|
||||
@@ -198,7 +198,7 @@ func TestIsOptimistic(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, chainSt.SetSlot(fieldparams.SlotsPerEpoch))
|
||||
cs := &chainmock.ChainService{Optimistic: false, State: chainSt, CanonicalRoots: map[[32]byte]bool{}}
|
||||
mf := &testutil.MockFetcher{BeaconState: fetcherSt}
|
||||
mf := &testutil.MockStater{BeaconState: fetcherSt}
|
||||
o, err := IsOptimistic(ctx, bytesutil.PadTo([]byte("root"), 32), nil, mf, cs, db)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, o)
|
||||
@@ -248,7 +248,7 @@ func TestIsOptimistic(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, chainSt.SetSlot(fieldparams.SlotsPerEpoch*2))
|
||||
cs := &chainmock.ChainService{Optimistic: true, State: chainSt, FinalizedCheckPoint: ð.Checkpoint{Epoch: 0}}
|
||||
mf := &testutil.MockFetcher{BeaconState: fetcherSt}
|
||||
mf := &testutil.MockStater{BeaconState: fetcherSt}
|
||||
o, err := IsOptimistic(ctx, []byte(strconv.Itoa(fieldparams.SlotsPerEpoch*2)), cs, mf, cs, db)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, o)
|
||||
@@ -268,7 +268,7 @@ func TestIsOptimistic(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, st, root))
|
||||
cs := &chainmock.ChainService{Root: headRoot[:], Optimistic: true, ForkChoiceStore: fcs, OptimisticRoots: map[[32]byte]bool{r: true}, FinalizedCheckPoint: finalizedCheckpt}
|
||||
mf := &testutil.MockFetcher{BeaconState: st}
|
||||
mf := &testutil.MockStater{BeaconState: st}
|
||||
o, err := IsOptimistic(ctx, []byte(strconv.Itoa(fieldparams.SlotsPerEpoch*2)), cs, mf, cs, db)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, o)
|
||||
@@ -288,7 +288,7 @@ func TestIsOptimistic(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, st, root))
|
||||
cs := &chainmock.ChainService{Root: headRoot[:], Optimistic: true, ForkChoiceStore: fcs, OptimisticRoots: map[[32]byte]bool{r: false}, FinalizedCheckPoint: finalizedCheckpt}
|
||||
mf := &testutil.MockFetcher{BeaconState: st}
|
||||
mf := &testutil.MockStater{BeaconState: st}
|
||||
o, err := IsOptimistic(ctx, []byte(strconv.Itoa(fieldparams.SlotsPerEpoch*2)), cs, mf, cs, db)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, o)
|
||||
|
||||
52
beacon-chain/rpc/eth/rewards/BUILD.bazel
Normal file
52
beacon-chain/rpc/eth/rewards/BUILD.bazel
Normal file
@@ -0,0 +1,52 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"handlers.go",
|
||||
"server.go",
|
||||
"structs.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/rewards",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/validators:go_default_library",
|
||||
"//beacon-chain/rpc/lookup:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//network:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["handlers_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/rpc/testutil:go_default_library",
|
||||
"//beacon-chain/state/stategen/mock:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//crypto/bls/blst:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//network:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
],
|
||||
)
|
||||
189
beacon-chain/rpc/eth/rewards/handlers.go
Normal file
189
beacon-chain/rpc/eth/rewards/handlers.go
Normal file
@@ -0,0 +1,189 @@
|
||||
package rewards
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/altair"
|
||||
coreblocks "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/validators"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/lookup"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/network"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
)
|
||||
|
||||
// BlockRewards is an HTTP handler for Beacon API getBlockRewards.
|
||||
func (s *Server) BlockRewards(w http.ResponseWriter, r *http.Request) {
|
||||
segments := strings.Split(r.URL.Path, "/")
|
||||
blockId := segments[len(segments)-1]
|
||||
|
||||
blk, err := s.Blocker.Block(r.Context(), []byte(blockId))
|
||||
if errJson := handleGetBlockError(blk, err); errJson != nil {
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
if blk.Version() == version.Phase0 {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: "block rewards are not supported for Phase 0 blocks",
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
|
||||
// We want to run several block processing functions that update the proposer's balance.
|
||||
// This will allow us to calculate proposer rewards for each operation (atts, slashings etc).
|
||||
// To do this, we replay the state up to the block's slot, but before processing the block.
|
||||
st, err := s.ReplayerBuilder.ReplayerForSlot(blk.Block().Slot()-1).ReplayToSlot(r.Context(), blk.Block().Slot())
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: errors.Wrapf(err, "could not get state").Error(),
|
||||
Code: http.StatusInternalServerError,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
|
||||
proposerIndex := blk.Block().ProposerIndex()
|
||||
initBalance, err := st.BalanceAtIndex(proposerIndex)
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: errors.Wrapf(err, "could not get proposer's balance").Error(),
|
||||
Code: http.StatusInternalServerError,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
st, err = altair.ProcessAttestationsNoVerifySignature(r.Context(), st, blk)
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: errors.Wrapf(err, "could not get attestation rewards").Error(),
|
||||
Code: http.StatusInternalServerError,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
attBalance, err := st.BalanceAtIndex(proposerIndex)
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: errors.Wrapf(err, "could not get proposer's balance").Error(),
|
||||
Code: http.StatusInternalServerError,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
st, err = coreblocks.ProcessAttesterSlashings(r.Context(), st, blk.Block().Body().AttesterSlashings(), validators.SlashValidator)
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: errors.Wrapf(err, "could not get attester slashing rewards").Error(),
|
||||
Code: http.StatusInternalServerError,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
attSlashingsBalance, err := st.BalanceAtIndex(proposerIndex)
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: errors.Wrapf(err, "could not get proposer's balance").Error(),
|
||||
Code: http.StatusInternalServerError,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
st, err = coreblocks.ProcessProposerSlashings(r.Context(), st, blk.Block().Body().ProposerSlashings(), validators.SlashValidator)
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: errors.Wrapf(err, "could not get proposer slashing rewards").Error(),
|
||||
Code: http.StatusInternalServerError,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
proposerSlashingsBalance, err := st.BalanceAtIndex(proposerIndex)
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: errors.Wrapf(err, "could not get proposer's balance").Error(),
|
||||
Code: http.StatusInternalServerError,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
sa, err := blk.Block().Body().SyncAggregate()
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: errors.Wrapf(err, "could not get sync aggregate").Error(),
|
||||
Code: http.StatusInternalServerError,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
var syncCommitteeReward uint64
|
||||
_, syncCommitteeReward, err = altair.ProcessSyncAggregate(r.Context(), st, sa)
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: errors.Wrapf(err, "could not get sync aggregate rewards").Error(),
|
||||
Code: http.StatusInternalServerError,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
|
||||
optimistic, err := s.OptimisticModeFetcher.IsOptimistic(r.Context())
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: errors.Wrapf(err, "could not get optimistic mode info").Error(),
|
||||
Code: http.StatusInternalServerError,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
blkRoot, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: errors.Wrapf(err, "could not get block root").Error(),
|
||||
Code: http.StatusInternalServerError,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
|
||||
response := &BlockRewardsResponse{
|
||||
Data: &BlockRewards{
|
||||
ProposerIndex: strconv.FormatUint(uint64(proposerIndex), 10),
|
||||
Total: strconv.FormatUint(proposerSlashingsBalance-initBalance+syncCommitteeReward, 10),
|
||||
Attestations: strconv.FormatUint(attBalance-initBalance, 10),
|
||||
SyncAggregate: strconv.FormatUint(syncCommitteeReward, 10),
|
||||
ProposerSlashings: strconv.FormatUint(proposerSlashingsBalance-attSlashingsBalance, 10),
|
||||
AttesterSlashings: strconv.FormatUint(attSlashingsBalance-attBalance, 10),
|
||||
},
|
||||
ExecutionOptimistic: optimistic,
|
||||
Finalized: s.FinalizationFetcher.IsFinalized(r.Context(), blkRoot),
|
||||
}
|
||||
network.WriteJson(w, response)
|
||||
}
|
||||
|
||||
func handleGetBlockError(blk interfaces.ReadOnlySignedBeaconBlock, err error) *network.DefaultErrorJson {
|
||||
if errors.Is(err, lookup.BlockIdParseError{}) {
|
||||
return &network.DefaultErrorJson{
|
||||
Message: errors.Wrapf(err, "invalid block ID").Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return &network.DefaultErrorJson{
|
||||
Message: errors.Wrapf(err, "could not get block from block ID").Error(),
|
||||
Code: http.StatusInternalServerError,
|
||||
}
|
||||
}
|
||||
if err := blocks.BeaconBlockIsNil(blk); err != nil {
|
||||
return &network.DefaultErrorJson{
|
||||
Message: errors.Wrapf(err, "could not find requested block").Error(),
|
||||
Code: http.StatusNotFound,
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
198
beacon-chain/rpc/eth/rewards/handlers_test.go
Normal file
198
beacon-chain/rpc/eth/rewards/handlers_test.go
Normal file
@@ -0,0 +1,198 @@
|
||||
package rewards
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/testutil"
|
||||
mockstategen "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen/mock"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v4/crypto/bls/blst"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/network"
|
||||
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||
)
|
||||
|
||||
func TestBlockRewards(t *testing.T) {
|
||||
valCount := 64
|
||||
|
||||
st, err := util.NewBeaconStateAltair()
|
||||
require.NoError(t, st.SetSlot(1))
|
||||
require.NoError(t, err)
|
||||
validators := make([]*eth.Validator, 0, valCount)
|
||||
balances := make([]uint64, 0, valCount)
|
||||
secretKeys := make([]bls.SecretKey, 0, valCount)
|
||||
for i := 0; i < valCount; i++ {
|
||||
blsKey, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
secretKeys = append(secretKeys, blsKey)
|
||||
validators = append(validators, ð.Validator{
|
||||
PublicKey: blsKey.PublicKey().Marshal(),
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
})
|
||||
balances = append(balances, params.BeaconConfig().MaxEffectiveBalance)
|
||||
}
|
||||
require.NoError(t, st.SetValidators(validators))
|
||||
require.NoError(t, st.SetBalances(balances))
|
||||
require.NoError(t, st.SetCurrentParticipationBits(make([]byte, valCount)))
|
||||
syncCommittee, err := altair.NextSyncCommittee(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetCurrentSyncCommittee(syncCommittee))
|
||||
slot0bRoot := bytesutil.PadTo([]byte("slot0root"), 32)
|
||||
bRoots := make([][]byte, fieldparams.BlockRootsLength)
|
||||
bRoots[0] = slot0bRoot
|
||||
require.NoError(t, st.SetBlockRoots(bRoots))
|
||||
|
||||
b := util.HydrateSignedBeaconBlockAltair(util.NewBeaconBlockAltair())
|
||||
b.Block.Slot = 2
|
||||
// we have to set the proposer index to the value that will be randomly chosen (fortunately it's deterministic)
|
||||
b.Block.ProposerIndex = 12
|
||||
b.Block.Body.Attestations = []*eth.Attestation{
|
||||
{
|
||||
AggregationBits: bitfield.Bitlist{0b00000111},
|
||||
Data: util.HydrateAttestationData(ð.AttestationData{}),
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
{
|
||||
AggregationBits: bitfield.Bitlist{0b00000111},
|
||||
Data: util.HydrateAttestationData(ð.AttestationData{}),
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
}
|
||||
attData1 := util.HydrateAttestationData(ð.AttestationData{BeaconBlockRoot: bytesutil.PadTo([]byte("root1"), 32)})
|
||||
attData2 := util.HydrateAttestationData(ð.AttestationData{BeaconBlockRoot: bytesutil.PadTo([]byte("root2"), 32)})
|
||||
domain, err := signing.Domain(st.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, st.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
sigRoot1, err := signing.ComputeSigningRoot(attData1, domain)
|
||||
require.NoError(t, err)
|
||||
sigRoot2, err := signing.ComputeSigningRoot(attData2, domain)
|
||||
require.NoError(t, err)
|
||||
b.Block.Body.AttesterSlashings = []*eth.AttesterSlashing{
|
||||
{
|
||||
Attestation_1: ð.IndexedAttestation{
|
||||
AttestingIndices: []uint64{0},
|
||||
Data: attData1,
|
||||
Signature: secretKeys[0].Sign(sigRoot1[:]).Marshal(),
|
||||
},
|
||||
Attestation_2: ð.IndexedAttestation{
|
||||
AttestingIndices: []uint64{0},
|
||||
Data: attData2,
|
||||
Signature: secretKeys[0].Sign(sigRoot2[:]).Marshal(),
|
||||
},
|
||||
},
|
||||
}
|
||||
header1 := ð.BeaconBlockHeader{
|
||||
Slot: 0,
|
||||
ProposerIndex: 1,
|
||||
ParentRoot: bytesutil.PadTo([]byte("root1"), 32),
|
||||
StateRoot: bytesutil.PadTo([]byte("root1"), 32),
|
||||
BodyRoot: bytesutil.PadTo([]byte("root1"), 32),
|
||||
}
|
||||
header2 := ð.BeaconBlockHeader{
|
||||
Slot: 0,
|
||||
ProposerIndex: 1,
|
||||
ParentRoot: bytesutil.PadTo([]byte("root2"), 32),
|
||||
StateRoot: bytesutil.PadTo([]byte("root2"), 32),
|
||||
BodyRoot: bytesutil.PadTo([]byte("root2"), 32),
|
||||
}
|
||||
domain, err = signing.Domain(st.Fork(), 0, params.BeaconConfig().DomainBeaconProposer, st.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
sigRoot1, err = signing.ComputeSigningRoot(header1, domain)
|
||||
require.NoError(t, err)
|
||||
sigRoot2, err = signing.ComputeSigningRoot(header2, domain)
|
||||
require.NoError(t, err)
|
||||
b.Block.Body.ProposerSlashings = []*eth.ProposerSlashing{
|
||||
{
|
||||
Header_1: ð.SignedBeaconBlockHeader{
|
||||
Header: header1,
|
||||
Signature: secretKeys[1].Sign(sigRoot1[:]).Marshal(),
|
||||
},
|
||||
Header_2: ð.SignedBeaconBlockHeader{
|
||||
Header: header2,
|
||||
Signature: secretKeys[1].Sign(sigRoot2[:]).Marshal(),
|
||||
},
|
||||
},
|
||||
}
|
||||
scBits := bitfield.NewBitvector512()
|
||||
scBits.SetBitAt(10, true)
|
||||
scBits.SetBitAt(100, true)
|
||||
domain, err = signing.Domain(st.Fork(), 0, params.BeaconConfig().DomainSyncCommittee, st.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
sszBytes := primitives.SSZBytes(slot0bRoot)
|
||||
r, err := signing.ComputeSigningRoot(&sszBytes, domain)
|
||||
require.NoError(t, err)
|
||||
// Bits set in sync committee bits determine which validators will be treated as participating in sync committee.
|
||||
// These validators have to sign the message.
|
||||
sig1, err := blst.SignatureFromBytes(secretKeys[47].Sign(r[:]).Marshal())
|
||||
require.NoError(t, err)
|
||||
sig2, err := blst.SignatureFromBytes(secretKeys[19].Sign(r[:]).Marshal())
|
||||
require.NoError(t, err)
|
||||
aggSig := bls.AggregateSignatures([]bls.Signature{sig1, sig2}).Marshal()
|
||||
b.Block.Body.SyncAggregate = ð.SyncAggregate{SyncCommitteeBits: scBits, SyncCommitteeSignature: aggSig}
|
||||
|
||||
sbb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
phase0block, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
mockChainService := &mock.ChainService{Optimistic: true}
|
||||
s := &Server{
|
||||
Blocker: &testutil.MockBlocker{SlotBlockMap: map[primitives.Slot]interfaces.ReadOnlySignedBeaconBlock{
|
||||
0: phase0block,
|
||||
2: sbb,
|
||||
}},
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
FinalizationFetcher: mockChainService,
|
||||
ReplayerBuilder: mockstategen.NewMockReplayerBuilder(mockstategen.WithMockState(st)),
|
||||
}
|
||||
|
||||
t.Run("ok", func(t *testing.T) {
|
||||
url := "http://only.the.slot.number.at.the.end.is.important/2"
|
||||
request := httptest.NewRequest("GET", url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.BlockRewards(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &BlockRewardsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
assert.Equal(t, "12", resp.Data.ProposerIndex)
|
||||
assert.Equal(t, "125089490", resp.Data.Total)
|
||||
assert.Equal(t, "89442", resp.Data.Attestations)
|
||||
assert.Equal(t, "48", resp.Data.SyncAggregate)
|
||||
assert.Equal(t, "62500000", resp.Data.AttesterSlashings)
|
||||
assert.Equal(t, "62500000", resp.Data.ProposerSlashings)
|
||||
assert.Equal(t, true, resp.ExecutionOptimistic)
|
||||
assert.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
t.Run("phase 0", func(t *testing.T) {
|
||||
url := "http://only.the.slot.number.at.the.end.is.important/0"
|
||||
request := httptest.NewRequest("GET", url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.BlockRewards(writer, request)
|
||||
assert.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
e := &network.DefaultErrorJson{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusBadRequest, e.Code)
|
||||
assert.Equal(t, "block rewards are not supported for Phase 0 blocks", e.Message)
|
||||
})
|
||||
}
|
||||
14
beacon-chain/rpc/eth/rewards/server.go
Normal file
14
beacon-chain/rpc/eth/rewards/server.go
Normal file
@@ -0,0 +1,14 @@
|
||||
package rewards
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/lookup"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
|
||||
)
|
||||
|
||||
type Server struct {
|
||||
Blocker lookup.Blocker
|
||||
OptimisticModeFetcher blockchain.OptimisticModeFetcher
|
||||
FinalizationFetcher blockchain.FinalizationFetcher
|
||||
ReplayerBuilder stategen.ReplayerBuilder
|
||||
}
|
||||
16
beacon-chain/rpc/eth/rewards/structs.go
Normal file
16
beacon-chain/rpc/eth/rewards/structs.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package rewards
|
||||
|
||||
type BlockRewardsResponse struct {
|
||||
Data *BlockRewards `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
}
|
||||
|
||||
type BlockRewards struct {
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
Total string `json:"total"`
|
||||
Attestations string `json:"attestations"`
|
||||
SyncAggregate string `json:"sync_aggregate"`
|
||||
ProposerSlashings string `json:"proposer_slashings"`
|
||||
AttesterSlashings string `json:"attester_slashings"`
|
||||
}
|
||||
@@ -19,8 +19,8 @@ go_library(
|
||||
"//beacon-chain/operations/synccommittee:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/rpc/eth/helpers:go_default_library",
|
||||
"//beacon-chain/rpc/lookup:go_default_library",
|
||||
"//beacon-chain/rpc/prysm/v1alpha1/validator:go_default_library",
|
||||
"//beacon-chain/rpc/statefetcher:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//beacon-chain/sync:go_default_library",
|
||||
|
||||
@@ -7,8 +7,8 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/synccommittee"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/lookup"
|
||||
v1alpha1validator "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/prysm/v1alpha1/validator"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/statefetcher"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/sync"
|
||||
)
|
||||
|
||||
@@ -16,13 +16,12 @@ import (
|
||||
// providing RPC endpoints intended for validator clients.
|
||||
type Server struct {
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
HeadUpdater blockchain.HeadUpdater
|
||||
TimeFetcher blockchain.TimeFetcher
|
||||
SyncChecker sync.Checker
|
||||
AttestationsPool attestations.Pool
|
||||
PeerManager p2p.PeerManager
|
||||
Broadcaster p2p.Broadcaster
|
||||
StateFetcher statefetcher.Fetcher
|
||||
Stater lookup.Stater
|
||||
OptimisticModeFetcher blockchain.OptimisticModeFetcher
|
||||
SyncCommitteePool synccommittee.Pool
|
||||
V1Alpha1Server *v1alpha1validator.Server
|
||||
|
||||
@@ -70,7 +70,7 @@ func (vs *Server) GetAttesterDuties(ctx context.Context, req *ethpbv1.AttesterDu
|
||||
return nil, status.Errorf(codes.Internal, "Could not get start slot from epoch %d: %v", req.Epoch, err)
|
||||
}
|
||||
|
||||
s, err := vs.StateFetcher.StateBySlot(ctx, startSlot)
|
||||
s, err := vs.Stater.StateBySlot(ctx, startSlot)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get state: %v", err)
|
||||
}
|
||||
@@ -159,7 +159,7 @@ func (vs *Server) GetProposerDuties(ctx context.Context, req *ethpbv1.ProposerDu
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get start slot from epoch %d: %v", req.Epoch, err)
|
||||
}
|
||||
s, err := vs.StateFetcher.StateBySlot(ctx, startSlot)
|
||||
s, err := vs.Stater.StateBySlot(ctx, startSlot)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get state: %v", err)
|
||||
}
|
||||
@@ -243,7 +243,7 @@ func (vs *Server) GetSyncCommitteeDuties(ctx context.Context, req *ethpbv2.SyncC
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get sync committee slot: %v", err)
|
||||
}
|
||||
st, err := vs.StateFetcher.State(ctx, []byte(strconv.FormatUint(uint64(slot), 10)))
|
||||
st, err := vs.Stater.State(ctx, []byte(strconv.FormatUint(uint64(slot), 10)))
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get sync committee state: %v", err)
|
||||
}
|
||||
@@ -282,7 +282,7 @@ func (vs *Server) GetSyncCommitteeDuties(ctx context.Context, req *ethpbv2.SyncC
|
||||
ctx,
|
||||
[]byte(strconv.FormatUint(uint64(slot), 10)),
|
||||
vs.OptimisticModeFetcher,
|
||||
vs.StateFetcher,
|
||||
vs.Stater,
|
||||
vs.ChainInfoFetcher,
|
||||
vs.BeaconDB,
|
||||
)
|
||||
@@ -1081,7 +1081,7 @@ func (vs *Server) GetLiveness(ctx context.Context, req *ethpbv2.GetLivenessReque
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, "Could not get requested epoch's end slot")
|
||||
}
|
||||
st, err = vs.StateFetcher.StateBySlot(ctx, epochEnd)
|
||||
st, err = vs.Stater.StateBySlot(ctx, epochEnd)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, "Could not get slot for requested epoch")
|
||||
}
|
||||
|
||||
@@ -93,7 +93,7 @@ func TestGetAttesterDuties(t *testing.T) {
|
||||
State: bs, Root: genesisRoot[:], Slot: &chainSlot,
|
||||
}
|
||||
vs := &Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
Stater: &testutil.MockStater{
|
||||
StatesBySlot: map[primitives.Slot]state.BeaconState{
|
||||
0: bs,
|
||||
params.BeaconConfig().SlotsPerEpoch: nextEpochState,
|
||||
@@ -198,7 +198,7 @@ func TestGetAttesterDuties(t *testing.T) {
|
||||
State: bs, Root: genesisRoot[:], Slot: &chainSlot, Optimistic: true,
|
||||
}
|
||||
vs := &Server{
|
||||
StateFetcher: &testutil.MockFetcher{StatesBySlot: map[primitives.Slot]state.BeaconState{0: bs}},
|
||||
Stater: &testutil.MockStater{StatesBySlot: map[primitives.Slot]state.BeaconState{0: bs}},
|
||||
TimeFetcher: chain,
|
||||
OptimisticModeFetcher: chain,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
@@ -258,7 +258,7 @@ func TestGetProposerDuties(t *testing.T) {
|
||||
State: bs, Root: genesisRoot[:], Slot: &chainSlot,
|
||||
}
|
||||
vs := &Server{
|
||||
StateFetcher: &testutil.MockFetcher{StatesBySlot: map[primitives.Slot]state.BeaconState{0: bs}},
|
||||
Stater: &testutil.MockStater{StatesBySlot: map[primitives.Slot]state.BeaconState{0: bs}},
|
||||
HeadFetcher: chain,
|
||||
TimeFetcher: chain,
|
||||
OptimisticModeFetcher: chain,
|
||||
@@ -297,7 +297,7 @@ func TestGetProposerDuties(t *testing.T) {
|
||||
State: bs, Root: genesisRoot[:], Slot: &chainSlot,
|
||||
}
|
||||
vs := &Server{
|
||||
StateFetcher: &testutil.MockFetcher{StatesBySlot: map[primitives.Slot]state.BeaconState{0: bs}},
|
||||
Stater: &testutil.MockStater{StatesBySlot: map[primitives.Slot]state.BeaconState{0: bs}},
|
||||
HeadFetcher: chain,
|
||||
TimeFetcher: chain,
|
||||
OptimisticModeFetcher: chain,
|
||||
@@ -337,7 +337,7 @@ func TestGetProposerDuties(t *testing.T) {
|
||||
State: bs, Root: genesisRoot[:], Slot: &chainSlot,
|
||||
}
|
||||
vs := &Server{
|
||||
StateFetcher: &testutil.MockFetcher{StatesBySlot: map[primitives.Slot]state.BeaconState{params.BeaconConfig().SlotsPerEpoch: bs}},
|
||||
Stater: &testutil.MockStater{StatesBySlot: map[primitives.Slot]state.BeaconState{params.BeaconConfig().SlotsPerEpoch: bs}},
|
||||
HeadFetcher: chain,
|
||||
TimeFetcher: chain,
|
||||
OptimisticModeFetcher: chain,
|
||||
@@ -377,7 +377,7 @@ func TestGetProposerDuties(t *testing.T) {
|
||||
State: bs, Root: genesisRoot[:], Slot: &chainSlot,
|
||||
}
|
||||
vs := &Server{
|
||||
StateFetcher: &testutil.MockFetcher{StatesBySlot: map[primitives.Slot]state.BeaconState{0: bs}},
|
||||
Stater: &testutil.MockStater{StatesBySlot: map[primitives.Slot]state.BeaconState{0: bs}},
|
||||
HeadFetcher: chain,
|
||||
TimeFetcher: chain,
|
||||
OptimisticModeFetcher: chain,
|
||||
@@ -414,7 +414,7 @@ func TestGetProposerDuties(t *testing.T) {
|
||||
State: bs, Root: genesisRoot[:], Slot: &chainSlot, Optimistic: true,
|
||||
}
|
||||
vs := &Server{
|
||||
StateFetcher: &testutil.MockFetcher{StatesBySlot: map[primitives.Slot]state.BeaconState{0: bs}},
|
||||
Stater: &testutil.MockStater{StatesBySlot: map[primitives.Slot]state.BeaconState{0: bs}},
|
||||
HeadFetcher: chain,
|
||||
TimeFetcher: chain,
|
||||
OptimisticModeFetcher: chain,
|
||||
@@ -469,7 +469,7 @@ func TestGetSyncCommitteeDuties(t *testing.T) {
|
||||
|
||||
mockChainService := &mockChain.ChainService{Genesis: genesisTime}
|
||||
vs := &Server{
|
||||
StateFetcher: &testutil.MockFetcher{BeaconState: st},
|
||||
Stater: &testutil.MockStater{BeaconState: st},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
TimeFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
@@ -611,7 +611,7 @@ func TestGetSyncCommitteeDuties(t *testing.T) {
|
||||
}
|
||||
mockChainService := &mockChain.ChainService{Genesis: genesisTime, Slot: &newSyncPeriodStartSlot}
|
||||
vs := &Server{
|
||||
StateFetcher: &testutil.MockFetcher{BeaconState: stateFetchFn(newSyncPeriodStartSlot)},
|
||||
Stater: &testutil.MockStater{BeaconState: stateFetchFn(newSyncPeriodStartSlot)},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
TimeFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
@@ -665,7 +665,7 @@ func TestGetSyncCommitteeDuties(t *testing.T) {
|
||||
State: state,
|
||||
}
|
||||
vs := &Server{
|
||||
StateFetcher: &testutil.MockFetcher{BeaconState: st},
|
||||
Stater: &testutil.MockStater{BeaconState: st},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
TimeFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
@@ -723,8 +723,8 @@ func TestProduceBlockV2(t *testing.T) {
|
||||
HeadFetcher: mockChainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: mockChainService,
|
||||
HeadUpdater: mockChainService,
|
||||
ForkFetcher: mockChainService,
|
||||
ForkchoiceFetcher: mockChainService,
|
||||
ChainStartFetcher: mockExecutionChain,
|
||||
Eth1InfoFetcher: mockExecutionChain,
|
||||
Eth1BlockFetcher: mockExecutionChain,
|
||||
@@ -835,8 +835,8 @@ func TestProduceBlockV2(t *testing.T) {
|
||||
HeadFetcher: mockChainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: mockChainService,
|
||||
HeadUpdater: mockChainService,
|
||||
ForkFetcher: mockChainService,
|
||||
ForkchoiceFetcher: mockChainService,
|
||||
ChainStartFetcher: mockExecutionChain,
|
||||
Eth1InfoFetcher: mockExecutionChain,
|
||||
Eth1BlockFetcher: mockExecutionChain,
|
||||
@@ -1033,10 +1033,11 @@ func TestProduceBlockV2(t *testing.T) {
|
||||
TimeFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
FinalizationFetcher: mockChainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: mockChainService,
|
||||
HeadUpdater: mockChainService,
|
||||
ForkFetcher: mockChainService,
|
||||
ForkchoiceFetcher: mockChainService,
|
||||
ChainStartFetcher: mockExecutionChain,
|
||||
Eth1InfoFetcher: mockExecutionChain,
|
||||
Eth1BlockFetcher: mockExecutionChain,
|
||||
@@ -1278,8 +1279,9 @@ func TestProduceBlockV2(t *testing.T) {
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: mockChainService,
|
||||
HeadUpdater: mockChainService,
|
||||
ForkFetcher: mockChainService,
|
||||
ForkchoiceFetcher: mockChainService,
|
||||
FinalizationFetcher: mockChainService,
|
||||
ChainStartFetcher: mockExecutionChain,
|
||||
Eth1InfoFetcher: mockExecutionChain,
|
||||
Eth1BlockFetcher: mockExecutionChain,
|
||||
@@ -1413,19 +1415,21 @@ func TestProduceBlockV2SSZ(t *testing.T) {
|
||||
|
||||
mockChainService := &mockChain.ChainService{State: bs, Root: parentRoot[:], ForkChoiceStore: doublylinkedtree.New()}
|
||||
v1Alpha1Server := &v1alpha1validator.Server{
|
||||
HeadFetcher: mockChainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: mockChainService,
|
||||
HeadUpdater: mockChainService,
|
||||
ForkFetcher: mockChainService,
|
||||
ChainStartFetcher: &mockExecution.Chain{},
|
||||
Eth1InfoFetcher: &mockExecution.Chain{},
|
||||
Eth1BlockFetcher: &mockExecution.Chain{},
|
||||
MockEth1Votes: true,
|
||||
AttPool: attestations.NewPool(),
|
||||
SlashingsPool: slashings.NewPool(),
|
||||
ExitPool: voluntaryexits.NewPool(),
|
||||
StateGen: stategen.New(db, doublylinkedtree.New()),
|
||||
HeadFetcher: mockChainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: mockChainService,
|
||||
TimeFetcher: mockChainService,
|
||||
ForkFetcher: mockChainService,
|
||||
ForkchoiceFetcher: mockChainService,
|
||||
FinalizationFetcher: mockChainService,
|
||||
ChainStartFetcher: &mockExecution.Chain{},
|
||||
Eth1InfoFetcher: &mockExecution.Chain{},
|
||||
Eth1BlockFetcher: &mockExecution.Chain{},
|
||||
MockEth1Votes: true,
|
||||
AttPool: attestations.NewPool(),
|
||||
SlashingsPool: slashings.NewPool(),
|
||||
ExitPool: voluntaryexits.NewPool(),
|
||||
StateGen: stategen.New(db, doublylinkedtree.New()),
|
||||
}
|
||||
|
||||
proposerSlashings := make([]*ethpbalpha.ProposerSlashing, 1)
|
||||
@@ -1579,8 +1583,9 @@ func TestProduceBlockV2SSZ(t *testing.T) {
|
||||
HeadFetcher: mockChainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: mockChainService,
|
||||
HeadUpdater: mockChainService,
|
||||
TimeFetcher: mockChainService,
|
||||
ForkFetcher: mockChainService,
|
||||
ForkchoiceFetcher: mockChainService,
|
||||
ChainStartFetcher: &mockExecution.Chain{},
|
||||
Eth1InfoFetcher: &mockExecution.Chain{},
|
||||
Eth1BlockFetcher: &mockExecution.Chain{},
|
||||
@@ -1785,8 +1790,8 @@ func TestProduceBlockV2SSZ(t *testing.T) {
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: mockChainService,
|
||||
HeadUpdater: mockChainService,
|
||||
ForkFetcher: mockChainService,
|
||||
ForkchoiceFetcher: mockChainService,
|
||||
ChainStartFetcher: &mockExecution.Chain{},
|
||||
Eth1InfoFetcher: &mockExecution.Chain{},
|
||||
Eth1BlockFetcher: &mockExecution.Chain{},
|
||||
@@ -2029,8 +2034,9 @@ func TestProduceBlockV2SSZ(t *testing.T) {
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: mockChainService,
|
||||
HeadUpdater: mockChainService,
|
||||
ForkFetcher: mockChainService,
|
||||
ForkchoiceFetcher: mockChainService,
|
||||
FinalizationFetcher: mockChainService,
|
||||
ChainStartFetcher: &mockExecution.Chain{},
|
||||
Eth1InfoFetcher: &mockExecution.Chain{},
|
||||
Eth1BlockFetcher: &mockExecution.Chain{},
|
||||
@@ -2260,8 +2266,9 @@ func TestProduceBlindedBlock(t *testing.T) {
|
||||
HeadFetcher: mockChainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: mockChainService,
|
||||
HeadUpdater: mockChainService,
|
||||
ForkFetcher: mockChainService,
|
||||
TimeFetcher: mockChainService,
|
||||
ForkchoiceFetcher: mockChainService,
|
||||
ChainStartFetcher: &mockExecution.Chain{},
|
||||
Eth1InfoFetcher: &mockExecution.Chain{},
|
||||
Eth1BlockFetcher: &mockExecution.Chain{},
|
||||
@@ -2366,8 +2373,9 @@ func TestProduceBlindedBlock(t *testing.T) {
|
||||
HeadFetcher: mockChainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: mockChainService,
|
||||
HeadUpdater: mockChainService,
|
||||
ForkFetcher: mockChainService,
|
||||
TimeFetcher: mockChainService,
|
||||
ForkchoiceFetcher: mockChainService,
|
||||
ChainStartFetcher: &mockExecution.Chain{},
|
||||
Eth1InfoFetcher: &mockExecution.Chain{},
|
||||
Eth1BlockFetcher: &mockExecution.Chain{},
|
||||
@@ -2557,12 +2565,12 @@ func TestProduceBlindedBlock(t *testing.T) {
|
||||
v1Alpha1Server := &v1alpha1validator.Server{
|
||||
BeaconDB: db,
|
||||
ForkFetcher: mockChainService,
|
||||
ForkchoiceFetcher: mockChainService,
|
||||
TimeFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: mockChainService,
|
||||
HeadUpdater: mockChainService,
|
||||
ChainStartFetcher: &mockExecution.Chain{GenesisState: beaconState},
|
||||
Eth1InfoFetcher: &mockExecution.Chain{},
|
||||
Eth1BlockFetcher: &mockExecution.Chain{},
|
||||
@@ -2780,12 +2788,12 @@ func TestProduceBlindedBlock(t *testing.T) {
|
||||
ExecutionEngineCaller: &mockExecution.EngineClient{PayloadIDBytes: id, ExecutionPayloadCapella: &enginev1.ExecutionPayloadCapella{BlockNumber: 1, Withdrawals: wds}, BlockValue: big.NewInt(0)},
|
||||
BeaconDB: db,
|
||||
ForkFetcher: mockChainService,
|
||||
ForkchoiceFetcher: mockChainService,
|
||||
TimeFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: mockChainService,
|
||||
HeadUpdater: mockChainService,
|
||||
ChainStartFetcher: &mockExecution.Chain{},
|
||||
Eth1InfoFetcher: &mockExecution.Chain{},
|
||||
Eth1BlockFetcher: &mockExecution.Chain{},
|
||||
@@ -2930,8 +2938,9 @@ func TestProduceBlindedBlockSSZ(t *testing.T) {
|
||||
HeadFetcher: mockChainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: mockChainService,
|
||||
HeadUpdater: mockChainService,
|
||||
TimeFetcher: mockChainService,
|
||||
ForkFetcher: mockChainService,
|
||||
ForkchoiceFetcher: mockChainService,
|
||||
ChainStartFetcher: &mockExecution.Chain{},
|
||||
Eth1InfoFetcher: &mockExecution.Chain{},
|
||||
Eth1BlockFetcher: &mockExecution.Chain{},
|
||||
@@ -3093,8 +3102,9 @@ func TestProduceBlindedBlockSSZ(t *testing.T) {
|
||||
HeadFetcher: mockChainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: &mockChain.ChainService{},
|
||||
HeadUpdater: mockChainService,
|
||||
TimeFetcher: mockChainService,
|
||||
ForkFetcher: mockChainService,
|
||||
ForkchoiceFetcher: mockChainService,
|
||||
ChainStartFetcher: &mockExecution.Chain{},
|
||||
Eth1InfoFetcher: &mockExecution.Chain{},
|
||||
Eth1BlockFetcher: &mockExecution.Chain{},
|
||||
@@ -3299,8 +3309,8 @@ func TestProduceBlindedBlockSSZ(t *testing.T) {
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: mockChainService,
|
||||
HeadUpdater: mockChainService,
|
||||
ForkFetcher: mockChainService,
|
||||
ForkchoiceFetcher: mockChainService,
|
||||
ChainStartFetcher: &mockExecution.Chain{},
|
||||
Eth1InfoFetcher: &mockExecution.Chain{},
|
||||
Eth1BlockFetcher: &mockExecution.Chain{},
|
||||
@@ -3537,8 +3547,9 @@ func TestProduceBlindedBlockSSZ(t *testing.T) {
|
||||
OptimisticModeFetcher: &mockChain.ChainService{},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: mockChainService,
|
||||
HeadUpdater: mockChainService,
|
||||
ForkFetcher: mockChainService,
|
||||
ForkchoiceFetcher: mockChainService,
|
||||
FinalizationFetcher: mockChainService,
|
||||
ChainStartFetcher: &mockExecution.Chain{},
|
||||
Eth1InfoFetcher: &mockExecution.Chain{},
|
||||
Eth1BlockFetcher: &mockExecution.Chain{},
|
||||
@@ -5015,7 +5026,7 @@ func TestGetLiveness(t *testing.T) {
|
||||
|
||||
server := &Server{
|
||||
HeadFetcher: &mockChain.ChainService{State: headSt},
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
Stater: &testutil.MockStater{
|
||||
// We configure states for last slots of an epoch
|
||||
StatesBySlot: map[primitives.Slot]state.BeaconState{
|
||||
params.BeaconConfig().SlotsPerEpoch - 1: oldSt,
|
||||
|
||||
@@ -2,9 +2,12 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["fetcher.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/statefetcher",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
srcs = [
|
||||
"blocker.go",
|
||||
"stater.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/lookup",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
@@ -12,6 +15,7 @@ go_library(
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
@@ -22,11 +26,15 @@ go_library(
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["fetcher_test.go"],
|
||||
srcs = [
|
||||
"blocker_test.go",
|
||||
"stater_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/rpc/testutil:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//beacon-chain/state/stategen/mock:go_default_library",
|
||||
108
beacon-chain/rpc/lookup/blocker.go
Normal file
108
beacon-chain/rpc/lookup/blocker.go
Normal file
@@ -0,0 +1,108 @@
|
||||
package lookup
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
)
|
||||
|
||||
// BlockIdParseError represents an error scenario where a block ID could not be parsed.
|
||||
type BlockIdParseError struct {
|
||||
message string
|
||||
}
|
||||
|
||||
// NewBlockIdParseError creates a new error instance.
|
||||
func NewBlockIdParseError(reason error) BlockIdParseError {
|
||||
return BlockIdParseError{
|
||||
message: errors.Wrapf(reason, "could not parse block ID").Error(),
|
||||
}
|
||||
}
|
||||
|
||||
// Error returns the underlying error message.
|
||||
func (e BlockIdParseError) Error() string {
|
||||
return e.message
|
||||
}
|
||||
|
||||
// Blocker is responsible for retrieving blocks.
|
||||
type Blocker interface {
|
||||
Block(ctx context.Context, id []byte) (interfaces.ReadOnlySignedBeaconBlock, error)
|
||||
}
|
||||
|
||||
// BeaconDbBlocker is an implementation of Blocker. It retrieves blocks from the beacon chain database.
|
||||
type BeaconDbBlocker struct {
|
||||
BeaconDB db.ReadOnlyDatabase
|
||||
ChainInfoFetcher blockchain.ChainInfoFetcher
|
||||
}
|
||||
|
||||
// Block returns the beacon block for a given identifier. The identifier can be one of:
|
||||
// - "head" (canonical head in node's view)
|
||||
// - "genesis"
|
||||
// - "finalized"
|
||||
// - "justified"
|
||||
// - <slot>
|
||||
// - <hex encoded block root with '0x' prefix>
|
||||
func (p *BeaconDbBlocker) Block(ctx context.Context, id []byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
var err error
|
||||
var blk interfaces.ReadOnlySignedBeaconBlock
|
||||
switch string(id) {
|
||||
case "head":
|
||||
blk, err = p.ChainInfoFetcher.HeadBlock(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not retrieve head block")
|
||||
}
|
||||
case "finalized":
|
||||
finalized := p.ChainInfoFetcher.FinalizedCheckpt()
|
||||
finalizedRoot := bytesutil.ToBytes32(finalized.Root)
|
||||
blk, err = p.BeaconDB.Block(ctx, finalizedRoot)
|
||||
if err != nil {
|
||||
return nil, errors.New("could not get finalized block from db")
|
||||
}
|
||||
case "genesis":
|
||||
blk, err = p.BeaconDB.GenesisBlock(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not retrieve genesis block")
|
||||
}
|
||||
default:
|
||||
if len(id) == 32 {
|
||||
blk, err = p.BeaconDB.Block(ctx, bytesutil.ToBytes32(id))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not retrieve block")
|
||||
}
|
||||
} else {
|
||||
slot, err := strconv.ParseUint(string(id), 10, 64)
|
||||
if err != nil {
|
||||
e := NewBlockIdParseError(err)
|
||||
return nil, &e
|
||||
}
|
||||
blks, err := p.BeaconDB.BlocksBySlot(ctx, primitives.Slot(slot))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not retrieve blocks for slot %d", slot)
|
||||
}
|
||||
_, roots, err := p.BeaconDB.BlockRootsBySlot(ctx, primitives.Slot(slot))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not retrieve block roots for slot %d", slot)
|
||||
}
|
||||
numBlks := len(blks)
|
||||
if numBlks == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
for i, b := range blks {
|
||||
canonical, err := p.ChainInfoFetcher.IsCanonical(ctx, roots[i])
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not determine if block root is canonical")
|
||||
}
|
||||
if canonical {
|
||||
blk = b
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return blk, nil
|
||||
}
|
||||
144
beacon-chain/rpc/lookup/blocker_test.go
Normal file
144
beacon-chain/rpc/lookup/blocker_test.go
Normal file
@@ -0,0 +1,144 @@
|
||||
package lookup
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
dbtesting "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/testutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
ethpbalpha "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||
)
|
||||
|
||||
func TestGetBlock(t *testing.T) {
|
||||
beaconDB := dbtesting.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
genBlk, blkContainers := testutil.FillDBWithBlocks(ctx, t, beaconDB)
|
||||
canonicalRoots := make(map[[32]byte]bool)
|
||||
|
||||
for _, bContr := range blkContainers {
|
||||
canonicalRoots[bytesutil.ToBytes32(bContr.BlockRoot)] = true
|
||||
}
|
||||
headBlock := blkContainers[len(blkContainers)-1]
|
||||
nextSlot := headBlock.GetPhase0Block().Block.Slot + 1
|
||||
|
||||
b2 := util.NewBeaconBlock()
|
||||
b2.Block.Slot = 30
|
||||
b2.Block.ParentRoot = bytesutil.PadTo([]byte{1}, 32)
|
||||
util.SaveBlock(t, ctx, beaconDB, b2)
|
||||
b3 := util.NewBeaconBlock()
|
||||
b3.Block.Slot = 30
|
||||
b3.Block.ParentRoot = bytesutil.PadTo([]byte{4}, 32)
|
||||
util.SaveBlock(t, ctx, beaconDB, b3)
|
||||
b4 := util.NewBeaconBlock()
|
||||
b4.Block.Slot = nextSlot
|
||||
b4.Block.ParentRoot = bytesutil.PadTo([]byte{8}, 32)
|
||||
util.SaveBlock(t, ctx, beaconDB, b4)
|
||||
|
||||
wsb, err := blocks.NewSignedBeaconBlock(headBlock.Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block)
|
||||
require.NoError(t, err)
|
||||
|
||||
fetcher := &BeaconDbBlocker{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: &mock.ChainService{
|
||||
DB: beaconDB,
|
||||
Block: wsb,
|
||||
Root: headBlock.BlockRoot,
|
||||
FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
CanonicalRoots: canonicalRoots,
|
||||
},
|
||||
}
|
||||
|
||||
root, err := genBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
blockID []byte
|
||||
want *ethpbalpha.SignedBeaconBlock
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "slot",
|
||||
blockID: []byte("30"),
|
||||
want: blkContainers[30].Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block,
|
||||
},
|
||||
{
|
||||
name: "bad formatting",
|
||||
blockID: []byte("3bad0"),
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "canonical",
|
||||
blockID: []byte("30"),
|
||||
want: blkContainers[30].Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block,
|
||||
},
|
||||
{
|
||||
name: "non canonical",
|
||||
blockID: []byte(fmt.Sprintf("%d", nextSlot)),
|
||||
want: nil,
|
||||
},
|
||||
{
|
||||
name: "head",
|
||||
blockID: []byte("head"),
|
||||
want: headBlock.Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block,
|
||||
},
|
||||
{
|
||||
name: "finalized",
|
||||
blockID: []byte("finalized"),
|
||||
want: blkContainers[64].Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block,
|
||||
},
|
||||
{
|
||||
name: "genesis",
|
||||
blockID: []byte("genesis"),
|
||||
want: genBlk,
|
||||
},
|
||||
{
|
||||
name: "genesis root",
|
||||
blockID: root[:],
|
||||
want: genBlk,
|
||||
},
|
||||
{
|
||||
name: "root",
|
||||
blockID: blkContainers[20].BlockRoot,
|
||||
want: blkContainers[20].Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block,
|
||||
},
|
||||
{
|
||||
name: "non-existent root",
|
||||
blockID: bytesutil.PadTo([]byte("hi there"), 32),
|
||||
want: nil,
|
||||
},
|
||||
{
|
||||
name: "no block",
|
||||
blockID: []byte("105"),
|
||||
want: nil,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result, err := fetcher.Block(ctx, tt.blockID)
|
||||
if tt.wantErr {
|
||||
assert.NotEqual(t, err, nil, "no error has been returned")
|
||||
return
|
||||
}
|
||||
if tt.want == nil {
|
||||
assert.Equal(t, nil, result)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
pbBlock, err := result.PbPhase0Block()
|
||||
require.NoError(t, err)
|
||||
if !reflect.DeepEqual(pbBlock, tt.want) {
|
||||
t.Error("Expected blocks to equal")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package statefetcher
|
||||
package lookup
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@@ -71,15 +71,15 @@ func (e *StateRootNotFoundError) Error() string {
|
||||
return e.message
|
||||
}
|
||||
|
||||
// Fetcher is responsible for retrieving info related with the beacon chain.
|
||||
type Fetcher interface {
|
||||
State(ctx context.Context, stateId []byte) (state.BeaconState, error)
|
||||
StateRoot(ctx context.Context, stateId []byte) ([]byte, error)
|
||||
// Stater is responsible for retrieving states.
|
||||
type Stater interface {
|
||||
State(ctx context.Context, id []byte) (state.BeaconState, error)
|
||||
StateRoot(ctx context.Context, id []byte) ([]byte, error)
|
||||
StateBySlot(ctx context.Context, slot primitives.Slot) (state.BeaconState, error)
|
||||
}
|
||||
|
||||
// StateProvider is a real implementation of Fetcher.
|
||||
type StateProvider struct {
|
||||
// BeaconDbStater is an implementation of Stater. It retrieves states from the beacon chain database.
|
||||
type BeaconDbStater struct {
|
||||
BeaconDB db.ReadOnlyDatabase
|
||||
ChainInfoFetcher blockchain.ChainInfoFetcher
|
||||
GenesisTimeFetcher blockchain.TimeFetcher
|
||||
@@ -94,7 +94,7 @@ type StateProvider struct {
|
||||
// - "justified"
|
||||
// - <slot>
|
||||
// - <hex encoded state root with '0x' prefix>
|
||||
func (p *StateProvider) State(ctx context.Context, stateId []byte) (state.BeaconState, error) {
|
||||
func (p *BeaconDbStater) State(ctx context.Context, stateId []byte) (state.BeaconState, error) {
|
||||
var (
|
||||
s state.BeaconState
|
||||
err error
|
||||
@@ -164,7 +164,7 @@ func (p *StateProvider) State(ctx context.Context, stateId []byte) (state.Beacon
|
||||
// - "justified"
|
||||
// - <slot>
|
||||
// - <hex encoded state root with '0x' prefix>
|
||||
func (p *StateProvider) StateRoot(ctx context.Context, stateId []byte) (root []byte, err error) {
|
||||
func (p *BeaconDbStater) StateRoot(ctx context.Context, stateId []byte) (root []byte, err error) {
|
||||
stateIdString := strings.ToLower(string(stateId))
|
||||
switch stateIdString {
|
||||
case "head":
|
||||
@@ -192,7 +192,7 @@ func (p *StateProvider) StateRoot(ctx context.Context, stateId []byte) (root []b
|
||||
return root, err
|
||||
}
|
||||
|
||||
func (p *StateProvider) stateByRoot(ctx context.Context, stateRoot []byte) (state.BeaconState, error) {
|
||||
func (p *BeaconDbStater) stateByRoot(ctx context.Context, stateRoot []byte) (state.BeaconState, error) {
|
||||
headState, err := p.ChainInfoFetcher.HeadStateReadOnly(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get head state")
|
||||
@@ -213,7 +213,7 @@ func (p *StateProvider) stateByRoot(ctx context.Context, stateRoot []byte) (stat
|
||||
// between the found state's slot and the target slot.
|
||||
// process_blocks is applied for all canonical blocks, and process_slots is called for any skipped
|
||||
// slots, or slots following the most recent canonical block up to and including the target slot.
|
||||
func (p *StateProvider) StateBySlot(ctx context.Context, target primitives.Slot) (state.BeaconState, error) {
|
||||
func (p *BeaconDbStater) StateBySlot(ctx context.Context, target primitives.Slot) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "statefetcher.StateBySlot")
|
||||
defer span.End()
|
||||
|
||||
@@ -229,7 +229,7 @@ func (p *StateProvider) StateBySlot(ctx context.Context, target primitives.Slot)
|
||||
return st, nil
|
||||
}
|
||||
|
||||
func (p *StateProvider) headStateRoot(ctx context.Context) ([]byte, error) {
|
||||
func (p *BeaconDbStater) headStateRoot(ctx context.Context) ([]byte, error) {
|
||||
b, err := p.ChainInfoFetcher.HeadBlock(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get head block")
|
||||
@@ -241,7 +241,7 @@ func (p *StateProvider) headStateRoot(ctx context.Context) ([]byte, error) {
|
||||
return stateRoot[:], nil
|
||||
}
|
||||
|
||||
func (p *StateProvider) genesisStateRoot(ctx context.Context) ([]byte, error) {
|
||||
func (p *BeaconDbStater) genesisStateRoot(ctx context.Context) ([]byte, error) {
|
||||
b, err := p.BeaconDB.GenesisBlock(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get genesis block")
|
||||
@@ -253,7 +253,7 @@ func (p *StateProvider) genesisStateRoot(ctx context.Context) ([]byte, error) {
|
||||
return stateRoot[:], nil
|
||||
}
|
||||
|
||||
func (p *StateProvider) finalizedStateRoot(ctx context.Context) ([]byte, error) {
|
||||
func (p *BeaconDbStater) finalizedStateRoot(ctx context.Context) ([]byte, error) {
|
||||
cp, err := p.BeaconDB.FinalizedCheckpoint(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get finalized checkpoint")
|
||||
@@ -269,7 +269,7 @@ func (p *StateProvider) finalizedStateRoot(ctx context.Context) ([]byte, error)
|
||||
return stateRoot[:], nil
|
||||
}
|
||||
|
||||
func (p *StateProvider) justifiedStateRoot(ctx context.Context) ([]byte, error) {
|
||||
func (p *BeaconDbStater) justifiedStateRoot(ctx context.Context) ([]byte, error) {
|
||||
cp, err := p.BeaconDB.JustifiedCheckpoint(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get justified checkpoint")
|
||||
@@ -285,7 +285,7 @@ func (p *StateProvider) justifiedStateRoot(ctx context.Context) ([]byte, error)
|
||||
return stateRoot[:], nil
|
||||
}
|
||||
|
||||
func (p *StateProvider) stateRootByRoot(ctx context.Context, stateRoot []byte) ([]byte, error) {
|
||||
func (p *BeaconDbStater) stateRootByRoot(ctx context.Context, stateRoot []byte) ([]byte, error) {
|
||||
var r [32]byte
|
||||
copy(r[:], stateRoot)
|
||||
headState, err := p.ChainInfoFetcher.HeadStateReadOnly(ctx)
|
||||
@@ -302,7 +302,7 @@ func (p *StateProvider) stateRootByRoot(ctx context.Context, stateRoot []byte) (
|
||||
return nil, &rootNotFoundErr
|
||||
}
|
||||
|
||||
func (p *StateProvider) stateRootBySlot(ctx context.Context, slot primitives.Slot) ([]byte, error) {
|
||||
func (p *BeaconDbStater) stateRootBySlot(ctx context.Context, slot primitives.Slot) ([]byte, error) {
|
||||
currentSlot := p.GenesisTimeFetcher.CurrentSlot()
|
||||
if slot > currentSlot {
|
||||
return nil, errors.New("slot cannot be in the future")
|
||||
@@ -1,4 +1,4 @@
|
||||
package statefetcher
|
||||
package lookup
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -38,7 +38,7 @@ func TestGetState(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("head", func(t *testing.T) {
|
||||
p := StateProvider{
|
||||
p := BeaconDbStater{
|
||||
ChainInfoFetcher: &chainMock.ChainService{State: newBeaconState},
|
||||
}
|
||||
|
||||
@@ -78,7 +78,7 @@ func TestGetState(t *testing.T) {
|
||||
cs := &mockstategen.MockCurrentSlotter{Slot: bs.Slot() + 1}
|
||||
ch := stategen.NewCanonicalHistory(db, cc, cs)
|
||||
currentSlot := primitives.Slot(0)
|
||||
p := StateProvider{
|
||||
p := BeaconDbStater{
|
||||
BeaconDB: db,
|
||||
ReplayerBuilder: ch,
|
||||
GenesisTimeFetcher: &chainMock.ChainService{Slot: ¤tSlot},
|
||||
@@ -98,7 +98,7 @@ func TestGetState(t *testing.T) {
|
||||
replayer.SetMockStateForSlot(newBeaconState, params.BeaconConfig().SlotsPerEpoch*10)
|
||||
stateGen.StatesByRoot[stateRoot] = newBeaconState
|
||||
|
||||
p := StateProvider{
|
||||
p := BeaconDbStater{
|
||||
ChainInfoFetcher: &chainMock.ChainService{
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{
|
||||
Root: stateRoot[:],
|
||||
@@ -122,7 +122,7 @@ func TestGetState(t *testing.T) {
|
||||
replayer.SetMockStateForSlot(newBeaconState, params.BeaconConfig().SlotsPerEpoch*10)
|
||||
stateGen.StatesByRoot[stateRoot] = newBeaconState
|
||||
|
||||
p := StateProvider{
|
||||
p := BeaconDbStater{
|
||||
ChainInfoFetcher: &chainMock.ChainService{
|
||||
CurrentJustifiedCheckPoint: ðpb.Checkpoint{
|
||||
Root: stateRoot[:],
|
||||
@@ -146,7 +146,7 @@ func TestGetState(t *testing.T) {
|
||||
stateGen := mockstategen.NewMockService()
|
||||
stateGen.StatesByRoot[bytesutil.ToBytes32(stateId)] = newBeaconState
|
||||
|
||||
p := StateProvider{
|
||||
p := BeaconDbStater{
|
||||
ChainInfoFetcher: &chainMock.ChainService{State: newBeaconState},
|
||||
StateGenService: stateGen,
|
||||
}
|
||||
@@ -159,7 +159,7 @@ func TestGetState(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("hex_root_not_found", func(t *testing.T) {
|
||||
p := StateProvider{
|
||||
p := BeaconDbStater{
|
||||
ChainInfoFetcher: &chainMock.ChainService{State: newBeaconState},
|
||||
}
|
||||
stateId, err := hexutil.Decode("0x" + strings.Repeat("f", 64))
|
||||
@@ -169,7 +169,7 @@ func TestGetState(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("slot", func(t *testing.T) {
|
||||
p := StateProvider{
|
||||
p := BeaconDbStater{
|
||||
GenesisTimeFetcher: &chainMock.ChainService{Slot: &headSlot},
|
||||
ChainInfoFetcher: &chainMock.ChainService{
|
||||
CanonicalRoots: map[[32]byte]bool{
|
||||
@@ -188,7 +188,7 @@ func TestGetState(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("invalid_state", func(t *testing.T) {
|
||||
p := StateProvider{}
|
||||
p := BeaconDbStater{}
|
||||
_, err := p.State(ctx, []byte("foo"))
|
||||
require.ErrorContains(t, "could not parse state ID", err)
|
||||
})
|
||||
@@ -212,7 +212,7 @@ func TestGetStateRoot(t *testing.T) {
|
||||
b.Block.StateRoot = stateRoot[:]
|
||||
wsb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
p := StateProvider{
|
||||
p := BeaconDbStater{
|
||||
ChainInfoFetcher: &chainMock.ChainService{
|
||||
State: newBeaconState,
|
||||
Block: wsb,
|
||||
@@ -241,7 +241,7 @@ func TestGetStateRoot(t *testing.T) {
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, r))
|
||||
require.NoError(t, db.SaveState(ctx, bs, r))
|
||||
|
||||
p := StateProvider{
|
||||
p := BeaconDbStater{
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
@@ -275,7 +275,7 @@ func TestGetStateRoot(t *testing.T) {
|
||||
require.NoError(t, db.SaveState(ctx, st, root))
|
||||
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, cp))
|
||||
|
||||
p := StateProvider{
|
||||
p := BeaconDbStater{
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
@@ -306,7 +306,7 @@ func TestGetStateRoot(t *testing.T) {
|
||||
require.NoError(t, db.SaveState(ctx, st, root))
|
||||
require.NoError(t, db.SaveJustifiedCheckpoint(ctx, cp))
|
||||
|
||||
p := StateProvider{
|
||||
p := BeaconDbStater{
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
@@ -319,7 +319,7 @@ func TestGetStateRoot(t *testing.T) {
|
||||
stateId, err := hexutil.Decode("0x" + strings.Repeat("0", 63) + "1")
|
||||
require.NoError(t, err)
|
||||
|
||||
p := StateProvider{
|
||||
p := BeaconDbStater{
|
||||
ChainInfoFetcher: &chainMock.ChainService{State: newBeaconState},
|
||||
}
|
||||
|
||||
@@ -329,7 +329,7 @@ func TestGetStateRoot(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("hex_root_not_found", func(t *testing.T) {
|
||||
p := StateProvider{
|
||||
p := BeaconDbStater{
|
||||
ChainInfoFetcher: &chainMock.ChainService{State: newBeaconState},
|
||||
}
|
||||
stateId, err := hexutil.Decode("0x" + strings.Repeat("f", 64))
|
||||
@@ -355,7 +355,7 @@ func TestGetStateRoot(t *testing.T) {
|
||||
require.NoError(t, db.SaveState(ctx, st, root))
|
||||
|
||||
slot := primitives.Slot(40)
|
||||
p := StateProvider{
|
||||
p := BeaconDbStater{
|
||||
GenesisTimeFetcher: &chainMock.ChainService{Slot: &slot},
|
||||
BeaconDB: db,
|
||||
}
|
||||
@@ -366,7 +366,7 @@ func TestGetStateRoot(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("slot_too_big", func(t *testing.T) {
|
||||
p := StateProvider{
|
||||
p := BeaconDbStater{
|
||||
GenesisTimeFetcher: &chainMock.ChainService{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
@@ -376,7 +376,7 @@ func TestGetStateRoot(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("invalid_state", func(t *testing.T) {
|
||||
p := StateProvider{}
|
||||
p := BeaconDbStater{}
|
||||
_, err := p.StateRoot(ctx, []byte("foo"))
|
||||
require.ErrorContains(t, "could not parse state ID", err)
|
||||
})
|
||||
@@ -389,7 +389,7 @@ func TestNewStateNotFoundError(t *testing.T) {
|
||||
|
||||
func TestStateBySlot_FutureSlot(t *testing.T) {
|
||||
slot := primitives.Slot(100)
|
||||
p := StateProvider{GenesisTimeFetcher: &chainMock.ChainService{Slot: &slot}}
|
||||
p := BeaconDbStater{GenesisTimeFetcher: &chainMock.ChainService{Slot: &slot}}
|
||||
_, err := p.StateBySlot(context.Background(), 101)
|
||||
assert.ErrorContains(t, "requested slot is in the future", err)
|
||||
}
|
||||
@@ -403,7 +403,7 @@ func TestStateBySlot_AfterHeadSlot(t *testing.T) {
|
||||
mock := &chainMock.ChainService{State: headSt, Slot: ¤tSlot}
|
||||
mockReplayer := mockstategen.NewMockReplayerBuilder()
|
||||
mockReplayer.SetMockStateForSlot(slotSt, 101)
|
||||
p := StateProvider{ChainInfoFetcher: mock, GenesisTimeFetcher: mock, ReplayerBuilder: mockReplayer}
|
||||
p := BeaconDbStater{ChainInfoFetcher: mock, GenesisTimeFetcher: mock, ReplayerBuilder: mockReplayer}
|
||||
st, err := p.StateBySlot(context.Background(), 101)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, primitives.Slot(101), st.Slot())
|
||||
@@ -46,6 +46,5 @@ type Server struct {
|
||||
StateGen stategen.StateManager
|
||||
SyncChecker sync.Checker
|
||||
ReplayerBuilder stategen.ReplayerBuilder
|
||||
HeadUpdater blockchain.HeadUpdater
|
||||
OptimisticModeFetcher blockchain.OptimisticModeFetcher
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@@ -24,6 +25,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
prysmTime "github.com/prysmaticlabs/prysm/v4/time"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
@@ -39,6 +41,11 @@ const eth1dataTimeout = 2 * time.Second
|
||||
// GetBeaconBlock is called by a proposer during its assigned slot to request a block to sign
|
||||
// by passing in the slot and the signed randao reveal of the slot.
|
||||
func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (*ethpb.GenericBeaconBlock, error) {
|
||||
startTime, err := slots.ToTime(uint64(vs.TimeFetcher.GenesisTime().Unix()), req.Slot)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get start time: %v", err)
|
||||
}
|
||||
|
||||
ctx, span := trace.StartSpan(ctx, "ProposerServer.GetBeaconBlock")
|
||||
defer span.End()
|
||||
span.AddAttributes(trace.Int64Attribute("slot", int64(req.Slot)))
|
||||
@@ -49,14 +56,12 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
|
||||
}
|
||||
|
||||
// process attestations and update head in forkchoice
|
||||
vs.ForkFetcher.ForkChoicer().Lock()
|
||||
vs.HeadUpdater.UpdateHead(ctx, vs.ForkFetcher.CurrentSlot())
|
||||
headRoot := vs.ForkFetcher.ForkChoicer().CachedHeadRoot()
|
||||
parentRoot := vs.ForkFetcher.ForkChoicer().GetProposerHead()
|
||||
vs.ForkchoiceFetcher.UpdateHead(ctx, vs.TimeFetcher.CurrentSlot())
|
||||
headRoot := vs.ForkchoiceFetcher.CachedHeadRoot()
|
||||
parentRoot := vs.ForkchoiceFetcher.GetProposerHead()
|
||||
if parentRoot != headRoot {
|
||||
blockchain.LateBlockAttemptedReorgCount.Inc()
|
||||
}
|
||||
vs.ForkFetcher.ForkChoicer().Unlock()
|
||||
|
||||
// An optimistic validator MUST NOT produce a block (i.e., sign across the DOMAIN_BEACON_PROPOSER domain).
|
||||
if slots.ToEpoch(req.Slot) >= params.BeaconConfig().BellatrixForkEpoch {
|
||||
@@ -84,50 +89,92 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
|
||||
sBlk.SetRandaoReveal(req.RandaoReveal)
|
||||
sBlk.SetParentRoot(parentRoot[:])
|
||||
|
||||
// Set eth1 data.
|
||||
eth1Data, err := vs.eth1DataMajorityVote(ctx, head)
|
||||
if err != nil {
|
||||
eth1Data = ðpb.Eth1Data{DepositRoot: params.BeaconConfig().ZeroHash[:], BlockHash: params.BeaconConfig().ZeroHash[:]}
|
||||
log.WithError(err).Error("Could not get eth1data")
|
||||
}
|
||||
sBlk.SetEth1Data(eth1Data)
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Set deposit and attestation.
|
||||
deposits, atts, err := vs.packDepositsAndAttestations(ctx, head, eth1Data) // TODO: split attestations and deposits
|
||||
if err != nil {
|
||||
sBlk.SetDeposits([]*ethpb.Deposit{})
|
||||
sBlk.SetAttestations([]*ethpb.Attestation{})
|
||||
log.WithError(err).Error("Could not pack deposits and attestations")
|
||||
} else {
|
||||
sBlk.SetDeposits(deposits)
|
||||
sBlk.SetAttestations(atts)
|
||||
}
|
||||
wg.Add(1)
|
||||
var eth1Data *ethpb.Eth1Data
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
eth1Data, err := vs.eth1DataMajorityVote(ctx, head)
|
||||
if err != nil {
|
||||
eth1Data = ðpb.Eth1Data{DepositRoot: params.BeaconConfig().ZeroHash[:], BlockHash: params.BeaconConfig().ZeroHash[:]}
|
||||
log.WithError(err).Error("Could not get eth1data")
|
||||
}
|
||||
sBlk.SetEth1Data(eth1Data)
|
||||
}()
|
||||
|
||||
// Set proposer index.
|
||||
idx, err := helpers.BeaconProposerIndex(ctx, head)
|
||||
if err != nil {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
atts, err := vs.packAttestations(ctx, head)
|
||||
if err != nil {
|
||||
sBlk.SetAttestations([]*ethpb.Attestation{})
|
||||
log.WithError(err).Error("Could not pack attestations")
|
||||
} else {
|
||||
sBlk.SetAttestations(atts)
|
||||
}
|
||||
}()
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
deposits, err := vs.packDeposit(ctx, head, eth1Data)
|
||||
if err != nil {
|
||||
sBlk.SetDeposits([]*ethpb.Deposit{})
|
||||
log.WithError(err).Error("Could not pack deposits")
|
||||
} else {
|
||||
sBlk.SetDeposits(deposits)
|
||||
}
|
||||
}()
|
||||
|
||||
wg.Add(1)
|
||||
if err := func() error {
|
||||
defer wg.Done()
|
||||
idx, err := helpers.BeaconProposerIndex(ctx, head)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sBlk.SetProposerIndex(idx)
|
||||
return nil
|
||||
}(); err != nil {
|
||||
return nil, fmt.Errorf("could not calculate proposer index %v", err)
|
||||
}
|
||||
sBlk.SetProposerIndex(idx)
|
||||
|
||||
// Set slashings.
|
||||
validProposerSlashings, validAttSlashings := vs.getSlashings(ctx, head)
|
||||
sBlk.SetProposerSlashings(validProposerSlashings)
|
||||
sBlk.SetAttesterSlashings(validAttSlashings)
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
validProposerSlashings, validAttSlashings := vs.getSlashings(ctx, head)
|
||||
sBlk.SetProposerSlashings(validProposerSlashings)
|
||||
sBlk.SetAttesterSlashings(validAttSlashings)
|
||||
}()
|
||||
|
||||
// Set exits.
|
||||
sBlk.SetVoluntaryExits(vs.getExits(head, req.Slot))
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
wg.Done()
|
||||
sBlk.SetVoluntaryExits(vs.getExits(head, req.Slot))
|
||||
}()
|
||||
|
||||
// Set sync aggregate. New in Altair.
|
||||
vs.setSyncAggregate(ctx, sBlk)
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
wg.Done()
|
||||
vs.setSyncAggregate(ctx, sBlk)
|
||||
}()
|
||||
|
||||
// Set execution data. New in Bellatrix.
|
||||
if err := vs.setExecutionData(ctx, sBlk, head); err != nil {
|
||||
wg.Add(1)
|
||||
if err := func() error {
|
||||
defer wg.Done()
|
||||
return vs.setExecutionData(ctx, sBlk, head)
|
||||
}(); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not set execution data: %v", err)
|
||||
}
|
||||
|
||||
// Set bls to execution change. New in Capella.
|
||||
vs.setBlsToExecData(sBlk, head)
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
wg.Done()
|
||||
vs.setBlsToExecData(sBlk, head)
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
|
||||
sr, err := vs.computeStateRoot(ctx, sBlk)
|
||||
if err != nil {
|
||||
@@ -154,6 +201,12 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
|
||||
if slots.ToEpoch(req.Slot) >= params.BeaconConfig().AltairForkEpoch {
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_Altair{Altair: pb.(*ethpb.BeaconBlockAltair)}}, nil
|
||||
}
|
||||
|
||||
currentSlotStartTime := vs.TimeFetcher.GenesisTime().Add(time.Duration(int64(uint64(vs.TimeFetcher.CurrentSlot()) * params.BeaconConfig().SecondsPerSlot)))
|
||||
|
||||
log.Info("Returned block to proposer in #1", prysmTime.Since(currentSlotStartTime).Milliseconds())
|
||||
log.Info("Returned block to proposer in #2", prysmTime.Since(startTime).Milliseconds())
|
||||
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_Phase0{Phase0: pb.(*ethpb.BeaconBlock)}}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -60,43 +61,56 @@ func (vs *Server) setExecutionData(ctx context.Context, blk interfaces.SignedBea
|
||||
return errors.Wrap(err, "failed to get execution payload")
|
||||
}
|
||||
// Compare payload values between local and builder. Default to the local value if it is higher.
|
||||
localValue, err := localPayload.Value()
|
||||
v, err := localPayload.Value()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get local payload value")
|
||||
}
|
||||
builderValue, err := builderPayload.Value()
|
||||
localValue := v.Uint64()
|
||||
v, err = builderPayload.Value()
|
||||
if err != nil {
|
||||
log.WithError(err).Warn("Proposer: failed to get builder payload value") // Default to local if can't get builder value.
|
||||
v = big.NewInt(0) // Default to local if can't get builder value.
|
||||
}
|
||||
builderValue := v.Uint64()
|
||||
|
||||
withdrawalsMatched, err := matchingWithdrawalsRoot(localPayload, builderPayload)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to match withdrawals root")
|
||||
}
|
||||
|
||||
// Use builder payload if the following in true:
|
||||
// builder_bid_value * 100 > local_block_value * (local-block-value-boost + 100)
|
||||
boost := params.BeaconConfig().LocalBlockValueBoost
|
||||
higherValueBuilder := builderValue*100 > localValue*(100+boost)
|
||||
|
||||
// If we can't get the builder value, just use local block.
|
||||
if builderValue.Cmp(localValue) > 0 && withdrawalsMatched { // Builder value is higher and withdrawals match.
|
||||
if higherValueBuilder && withdrawalsMatched { // Builder value is higher and withdrawals match.
|
||||
blk.SetBlinded(true)
|
||||
if err := blk.SetExecution(builderPayload); err != nil {
|
||||
log.WithError(err).Warn("Proposer: failed to set builder payload")
|
||||
blk.SetBlinded(false)
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"localValue": localValue,
|
||||
"builderValue": builderValue,
|
||||
}).Warn("Proposer: using local execution payload because higher value")
|
||||
if !higherValueBuilder {
|
||||
log.WithFields(logrus.Fields{
|
||||
"localGweiValue": localValue,
|
||||
"localBoostPercentage": 100 + boost,
|
||||
"builderGweiValue": builderValue,
|
||||
}).Warn("Proposer: using local execution payload because higher value")
|
||||
}
|
||||
return blk.SetExecution(localPayload)
|
||||
default: // Bellatrix case.
|
||||
blk.SetBlinded(true)
|
||||
if err := blk.SetExecution(builderPayload); err != nil {
|
||||
log.WithError(err).Warn("Proposer: failed to set builder payload")
|
||||
blk.SetBlinded(false)
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
executionData, err := vs.getExecutionPayload(ctx, slot, idx, blk.Block().ParentRoot(), headState)
|
||||
@@ -137,6 +151,9 @@ func (vs *Server) getPayloadHeaderFromBuilder(ctx context.Context, slot primitiv
|
||||
if signedBid.IsNil() {
|
||||
return nil, errors.New("builder returned nil bid")
|
||||
}
|
||||
if signedBid.Version() != b.Version() {
|
||||
return nil, fmt.Errorf("builder bid response version: %d is different from head block version: %d", signedBid.Version(), b.Version())
|
||||
}
|
||||
bid, err := signedBid.Message()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get bid")
|
||||
|
||||
@@ -29,9 +29,12 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func TestServer_setExecutionData(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
|
||||
ctx := context.Background()
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.BellatrixForkEpoch = 0
|
||||
@@ -63,6 +66,7 @@ func TestServer_setExecutionData(t *testing.T) {
|
||||
vs := &Server{
|
||||
ExecutionEngineCaller: &powtesting.EngineClient{PayloadIDBytes: id, ExecutionPayloadCapella: &v1.ExecutionPayloadCapella{BlockNumber: 1, Withdrawals: withdrawals}, BlockValue: big.NewInt(0)},
|
||||
HeadFetcher: &blockchainTest.ChainService{State: capellaTransitionState},
|
||||
FinalizationFetcher: &blockchainTest.ChainService{},
|
||||
BeaconDB: beaconDB,
|
||||
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
|
||||
}
|
||||
@@ -117,8 +121,8 @@ func TestServer_setExecutionData(t *testing.T) {
|
||||
wb, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlockBellatrix())
|
||||
require.NoError(t, err)
|
||||
chain := &blockchainTest.ChainService{ForkChoiceStore: doublylinkedtree.New(), Genesis: time.Now(), Block: wb}
|
||||
vs.ForkFetcher = chain
|
||||
vs.ForkFetcher.ForkChoicer().SetGenesisTime(uint64(time.Now().Unix()))
|
||||
vs.ForkchoiceFetcher = chain
|
||||
vs.ForkchoiceFetcher.SetForkChoiceGenesisTime(uint64(time.Now().Unix()))
|
||||
vs.TimeFetcher = chain
|
||||
vs.HeadFetcher = chain
|
||||
require.NoError(t, vs.setExecutionData(context.Background(), blk, capellaTransitionState))
|
||||
@@ -137,6 +141,7 @@ func TestServer_setExecutionData(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
wr, err := ssz.WithdrawalSliceRoot(withdrawals, fieldparams.MaxWithdrawalsPerPayload)
|
||||
require.NoError(t, err)
|
||||
builderValue := bytesutil.ReverseByteOrder(big.NewInt(1e9).Bytes())
|
||||
bid := ðpb.BuilderBidCapella{
|
||||
Header: &v1.ExecutionPayloadHeaderCapella{
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
@@ -153,7 +158,7 @@ func TestServer_setExecutionData(t *testing.T) {
|
||||
WithdrawalsRoot: wr[:],
|
||||
},
|
||||
Pubkey: sk.PublicKey().Marshal(),
|
||||
Value: bytesutil.PadTo([]byte{1}, 32),
|
||||
Value: bytesutil.PadTo(builderValue, 32),
|
||||
}
|
||||
d := params.BeaconConfig().DomainApplicationBuilder
|
||||
domain, err := signing.ComputeDomain(d, nil, nil)
|
||||
@@ -167,11 +172,11 @@ func TestServer_setExecutionData(t *testing.T) {
|
||||
vs.BlockBuilder = &builderTest.MockBuilderService{
|
||||
BidCapella: sBid,
|
||||
}
|
||||
wb, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlockBellatrix())
|
||||
wb, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlockCapella())
|
||||
require.NoError(t, err)
|
||||
chain := &blockchainTest.ChainService{ForkChoiceStore: doublylinkedtree.New(), Genesis: time.Now(), Block: wb}
|
||||
vs.ForkFetcher = chain
|
||||
vs.ForkFetcher.ForkChoicer().SetGenesisTime(uint64(time.Now().Unix()))
|
||||
vs.ForkchoiceFetcher.SetForkChoiceGenesisTime(uint64(time.Now().Unix()))
|
||||
vs.TimeFetcher = chain
|
||||
vs.HeadFetcher = chain
|
||||
require.NoError(t, vs.setExecutionData(context.Background(), blk, capellaTransitionState))
|
||||
@@ -182,11 +187,28 @@ func TestServer_setExecutionData(t *testing.T) {
|
||||
t.Run("Builder configured. Local block has higher value", func(t *testing.T) {
|
||||
blk, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlockCapella())
|
||||
require.NoError(t, err)
|
||||
vs.ExecutionEngineCaller = &powtesting.EngineClient{PayloadIDBytes: id, ExecutionPayloadCapella: &v1.ExecutionPayloadCapella{BlockNumber: 3}, BlockValue: big.NewInt(3)}
|
||||
vs.ExecutionEngineCaller = &powtesting.EngineClient{PayloadIDBytes: id, ExecutionPayloadCapella: &v1.ExecutionPayloadCapella{BlockNumber: 3}, BlockValue: big.NewInt(2 * 1e9)}
|
||||
require.NoError(t, vs.setExecutionData(context.Background(), blk, capellaTransitionState))
|
||||
e, err := blk.Block().Body().Execution()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(3), e.BlockNumber()) // Local block
|
||||
|
||||
require.LogsContain(t, hook, "builderGweiValue=1000000000 localBoostPercentage=100 localGweiValue=2000000000")
|
||||
})
|
||||
t.Run("Builder configured. Local block and boost has higher value", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.LocalBlockValueBoost = 1 // Boost 1%.
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
blk, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlockCapella())
|
||||
require.NoError(t, err)
|
||||
vs.ExecutionEngineCaller = &powtesting.EngineClient{PayloadIDBytes: id, ExecutionPayloadCapella: &v1.ExecutionPayloadCapella{BlockNumber: 3}, BlockValue: big.NewInt(1e9)}
|
||||
require.NoError(t, vs.setExecutionData(context.Background(), blk, capellaTransitionState))
|
||||
e, err := blk.Block().Body().Execution()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(3), e.BlockNumber()) // Local block
|
||||
|
||||
require.LogsContain(t, hook, "builderGweiValue=1000000000 localBoostPercentage=101 localGweiValue=1000000000")
|
||||
})
|
||||
t.Run("Builder configured. Builder returns fault. Use local block", func(t *testing.T) {
|
||||
blk, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlockCapella())
|
||||
|
||||
@@ -41,15 +41,12 @@ func (vs *Server) validatorRegistered(ctx context.Context, id primitives.Validat
|
||||
|
||||
// circuitBreakBuilder returns true if the builder is not allowed to be used due to circuit breaker conditions.
|
||||
func (vs *Server) circuitBreakBuilder(s primitives.Slot) (bool, error) {
|
||||
if vs.ForkFetcher == nil || vs.ForkFetcher.ForkChoicer() == nil {
|
||||
if vs.ForkchoiceFetcher == nil {
|
||||
return true, errors.New("no fork choicer configured")
|
||||
}
|
||||
|
||||
vs.ForkFetcher.ForkChoicer().RLock()
|
||||
defer vs.ForkFetcher.ForkChoicer().RUnlock()
|
||||
|
||||
// Circuit breaker is active if the missing consecutive slots greater than `MaxBuilderConsecutiveMissedSlots`.
|
||||
highestReceivedSlot := vs.ForkFetcher.ForkChoicer().HighestReceivedBlockSlot()
|
||||
highestReceivedSlot := vs.ForkchoiceFetcher.HighestReceivedBlockSlot()
|
||||
maxConsecutiveSkipSlotsAllowed := params.BeaconConfig().MaxBuilderConsecutiveMissedSlots
|
||||
diff, err := s.SafeSubSlot(highestReceivedSlot)
|
||||
if err != nil {
|
||||
@@ -71,7 +68,7 @@ func (vs *Server) circuitBreakBuilder(s primitives.Slot) (bool, error) {
|
||||
}
|
||||
|
||||
// Circuit breaker is active if the missing slots per epoch (rolling window) greater than `MaxBuilderEpochMissedSlots`.
|
||||
receivedCount, err := vs.ForkFetcher.ForkChoicer().ReceivedBlocksLastEpoch()
|
||||
receivedCount, err := vs.ForkchoiceFetcher.ReceivedBlocksLastEpoch()
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
|
||||
@@ -26,8 +26,8 @@ func TestServer_circuitBreakBuilder(t *testing.T) {
|
||||
_, err := s.circuitBreakBuilder(0)
|
||||
require.ErrorContains(t, "no fork choicer configured", err)
|
||||
|
||||
s.ForkFetcher = &blockchainTest.ChainService{ForkChoiceStore: doublylinkedtree.New()}
|
||||
s.ForkFetcher.ForkChoicer().SetGenesisTime(uint64(time.Now().Unix()))
|
||||
s.ForkchoiceFetcher = &blockchainTest.ChainService{ForkChoiceStore: doublylinkedtree.New()}
|
||||
s.ForkchoiceFetcher.SetForkChoiceGenesisTime(uint64(time.Now().Unix()))
|
||||
b, err := s.circuitBreakBuilder(params.BeaconConfig().MaxBuilderConsecutiveMissedSlots + 1)
|
||||
require.NoError(
|
||||
t,
|
||||
@@ -41,7 +41,7 @@ func TestServer_circuitBreakBuilder(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
st, blkRoot, err := createState(1, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.ForkFetcher.ForkChoicer().InsertNode(ctx, st, blkRoot))
|
||||
require.NoError(t, s.ForkchoiceFetcher.InsertNode(ctx, st, blkRoot))
|
||||
b, err = s.circuitBreakBuilder(params.BeaconConfig().MaxBuilderConsecutiveMissedSlots)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, b)
|
||||
@@ -52,7 +52,7 @@ func TestServer_circuitBreakBuilder(t *testing.T) {
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
st, blkRoot, err = createState(params.BeaconConfig().SlotsPerEpoch, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.ForkFetcher.ForkChoicer().InsertNode(ctx, st, blkRoot))
|
||||
require.NoError(t, s.ForkchoiceFetcher.InsertNode(ctx, st, blkRoot))
|
||||
b, err = s.circuitBreakBuilder(params.BeaconConfig().SlotsPerEpoch + 1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, b)
|
||||
@@ -62,7 +62,7 @@ func TestServer_circuitBreakBuilder(t *testing.T) {
|
||||
for i := primitives.Slot(2); i <= want+2; i++ {
|
||||
st, blkRoot, err = createState(i, [32]byte{byte(i)}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.ForkFetcher.ForkChoicer().InsertNode(ctx, st, blkRoot))
|
||||
require.NoError(t, s.ForkchoiceFetcher.InsertNode(ctx, st, blkRoot))
|
||||
}
|
||||
b, err = s.circuitBreakBuilder(params.BeaconConfig().SlotsPerEpoch + 1)
|
||||
require.NoError(t, err)
|
||||
@@ -99,8 +99,8 @@ func TestServer_canUseBuilder(t *testing.T) {
|
||||
proposerServer := &Server{}
|
||||
ctx := context.Background()
|
||||
|
||||
proposerServer.ForkFetcher = &blockchainTest.ChainService{ForkChoiceStore: doublylinkedtree.New()}
|
||||
proposerServer.ForkFetcher.ForkChoicer().SetGenesisTime(uint64(time.Now().Unix()))
|
||||
proposerServer.ForkchoiceFetcher = &blockchainTest.ChainService{ForkChoiceStore: doublylinkedtree.New()}
|
||||
proposerServer.ForkchoiceFetcher.SetForkChoiceGenesisTime(uint64(time.Now().Unix()))
|
||||
reg, err := proposerServer.canUseBuilder(ctx, params.BeaconConfig().MaxBuilderConsecutiveMissedSlots+1, 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, reg)
|
||||
|
||||
@@ -12,49 +12,16 @@ import (
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
func (vs *Server) packDepositsAndAttestations(ctx context.Context, head state.BeaconState, eth1Data *ethpb.Eth1Data) ([]*ethpb.Deposit, []*ethpb.Attestation, error) {
|
||||
eg, egctx := errgroup.WithContext(ctx)
|
||||
var deposits []*ethpb.Deposit
|
||||
var atts []*ethpb.Attestation
|
||||
|
||||
eg.Go(func() error {
|
||||
// Pack ETH1 deposits which have not been included in the beacon chain.
|
||||
localDeposits, err := vs.deposits(egctx, head, eth1Data)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "Could not get ETH1 deposits: %v", err)
|
||||
}
|
||||
// if the original context is cancelled, then cancel this routine too
|
||||
select {
|
||||
case <-egctx.Done():
|
||||
return egctx.Err()
|
||||
default:
|
||||
}
|
||||
deposits = localDeposits
|
||||
return nil
|
||||
})
|
||||
|
||||
eg.Go(func() error {
|
||||
// Pack aggregated attestations which have not been included in the beacon chain.
|
||||
localAtts, err := vs.packAttestations(egctx, head)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "Could not get attestations to pack into block: %v", err)
|
||||
}
|
||||
// if the original context is cancelled, then cancel this routine too
|
||||
select {
|
||||
case <-egctx.Done():
|
||||
return egctx.Err()
|
||||
default:
|
||||
}
|
||||
atts = localAtts
|
||||
return nil
|
||||
})
|
||||
|
||||
return deposits, atts, eg.Wait()
|
||||
func (vs *Server) packDeposit(ctx context.Context, head state.BeaconState, eth1Data *ethpb.Eth1Data) ([]*ethpb.Deposit, error) {
|
||||
d, err := vs.deposits(ctx, head, eth1Data)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get ETH1 deposits: %v", err)
|
||||
}
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// deposits returns a list of pending deposits that are ready for inclusion in the next beacon
|
||||
|
||||
@@ -115,31 +115,23 @@ func (vs *Server) getExecutionPayload(ctx context.Context, slot primitives.Slot,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
finalizedBlockHash := params.BeaconConfig().ZeroHash[:]
|
||||
finalizedRoot := bytesutil.ToBytes32(st.FinalizedCheckpoint().Root)
|
||||
if finalizedRoot != [32]byte{} { // finalized root could be zeros before the first finalized block.
|
||||
finalizedBlock, err := vs.BeaconDB.Block(ctx, bytesutil.ToBytes32(st.FinalizedCheckpoint().Root))
|
||||
|
||||
finalizedBlockHash := [32]byte{}
|
||||
justifiedBlockHash := [32]byte{}
|
||||
// Blocks before Bellatrix don't have execution payloads. Use zeros as the hash.
|
||||
if st.Version() >= version.Altair {
|
||||
finalizedBlockHash = vs.FinalizationFetcher.FinalizedBlockHash()
|
||||
justifiedBlockHash, err = vs.FinalizationFetcher.UnrealizedJustifiedPayloadBlockHash()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := consensusblocks.BeaconBlockIsNil(finalizedBlock); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch finalizedBlock.Version() {
|
||||
case version.Phase0, version.Altair: // Blocks before Bellatrix don't have execution payloads. Use zeros as the hash.
|
||||
default:
|
||||
finalizedPayload, err := finalizedBlock.Block().Body().Execution()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
finalizedBlockHash = finalizedPayload.BlockHash()
|
||||
log.WithError(err).Error("Could not get unrealized justified payload block hash")
|
||||
justifiedBlockHash = finalizedBlockHash // Don't fail block proposal if we can't get the justified block hash.
|
||||
}
|
||||
}
|
||||
|
||||
f := &enginev1.ForkchoiceState{
|
||||
HeadBlockHash: parentHash,
|
||||
SafeBlockHash: finalizedBlockHash,
|
||||
FinalizedBlockHash: finalizedBlockHash,
|
||||
SafeBlockHash: justifiedBlockHash[:],
|
||||
FinalizedBlockHash: finalizedBlockHash[:],
|
||||
}
|
||||
var attr payloadattribute.Attributer
|
||||
switch st.Version() {
|
||||
|
||||
@@ -139,6 +139,7 @@ func TestServer_getExecutionPayload(t *testing.T) {
|
||||
vs := &Server{
|
||||
ExecutionEngineCaller: &powtesting.EngineClient{PayloadIDBytes: tt.payloadID, ErrForkchoiceUpdated: tt.forkchoiceErr, ExecutionPayload: &pb.ExecutionPayload{}},
|
||||
HeadFetcher: &chainMock.ChainService{State: tt.st},
|
||||
FinalizationFetcher: &chainMock.ChainService{},
|
||||
BeaconDB: beaconDB,
|
||||
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
|
||||
}
|
||||
@@ -221,6 +222,7 @@ func TestServer_getExecutionPayload_UnexpectedFeeRecipient(t *testing.T) {
|
||||
ExecutionPayload: payload,
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{State: transitionSt},
|
||||
FinalizationFetcher: &chainMock.ChainService{},
|
||||
BeaconDB: beaconDB,
|
||||
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
|
||||
}
|
||||
|
||||
@@ -443,8 +443,8 @@ func TestServer_GetBeaconBlock_Optimistic(t *testing.T) {
|
||||
proposerServer := &Server{
|
||||
OptimisticModeFetcher: &mock.ChainService{Optimistic: true},
|
||||
SyncChecker: &mockSync.Sync{},
|
||||
HeadUpdater: mockChainService,
|
||||
ForkFetcher: mockChainService,
|
||||
ForkchoiceFetcher: mockChainService,
|
||||
TimeFetcher: &mock.ChainService{}}
|
||||
req := ðpb.BlockRequest{
|
||||
Slot: bellatrixSlot + 1,
|
||||
@@ -462,11 +462,12 @@ func getProposerServer(db db.HeadAccessDatabase, headState state.BeaconState, he
|
||||
HeadFetcher: mockChainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: mockChainService,
|
||||
HeadUpdater: mockChainService,
|
||||
ChainStartFetcher: &mockExecution.Chain{},
|
||||
Eth1InfoFetcher: &mockExecution.Chain{},
|
||||
Eth1BlockFetcher: &mockExecution.Chain{},
|
||||
FinalizationFetcher: mockChainService,
|
||||
ForkFetcher: mockChainService,
|
||||
ForkchoiceFetcher: mockChainService,
|
||||
MockEth1Votes: true,
|
||||
AttPool: attestations.NewPool(),
|
||||
SlashingsPool: slashings.NewPool(),
|
||||
|
||||
@@ -45,8 +45,8 @@ type Server struct {
|
||||
AttestationCache *cache.AttestationCache
|
||||
ProposerSlotIndexCache *cache.ProposerPayloadIDsCache
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
HeadUpdater blockchain.HeadUpdater
|
||||
ForkFetcher blockchain.ForkFetcher
|
||||
ForkchoiceFetcher blockchain.ForkchoiceFetcher
|
||||
GenesisFetcher blockchain.GenesisFetcher
|
||||
FinalizationFetcher blockchain.FinalizationFetcher
|
||||
TimeFetcher blockchain.TimeFetcher
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"net"
|
||||
"sync"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
middleware "github.com/grpc-ecosystem/go-grpc-middleware"
|
||||
recovery "github.com/grpc-ecosystem/go-grpc-middleware/recovery"
|
||||
grpcopentracing "github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing"
|
||||
@@ -32,12 +33,13 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/debug"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/events"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/node"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/rewards"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/validator"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/lookup"
|
||||
beaconv1alpha1 "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/prysm/v1alpha1/beacon"
|
||||
debugv1alpha1 "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/prysm/v1alpha1/debug"
|
||||
nodev1alpha1 "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/prysm/v1alpha1/node"
|
||||
validatorv1alpha1 "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/prysm/v1alpha1/validator"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/statefetcher"
|
||||
slasherservice "github.com/prysmaticlabs/prysm/v4/beacon-chain/slasher"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
|
||||
chainSync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync"
|
||||
@@ -81,10 +83,10 @@ type Config struct {
|
||||
BeaconMonitoringPort int
|
||||
BeaconDB db.HeadAccessDatabase
|
||||
ChainInfoFetcher blockchain.ChainInfoFetcher
|
||||
HeadUpdater blockchain.HeadUpdater
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
CanonicalFetcher blockchain.CanonicalFetcher
|
||||
ForkFetcher blockchain.ForkFetcher
|
||||
ForkchoiceFetcher blockchain.ForkchoiceFetcher
|
||||
FinalizationFetcher blockchain.FinalizationFetcher
|
||||
AttestationReceiver blockchain.AttestationReceiver
|
||||
BlockReceiver blockchain.BlockReceiver
|
||||
@@ -117,6 +119,7 @@ type Config struct {
|
||||
ProposerIdsCache *cache.ProposerPayloadIDsCache
|
||||
OptimisticModeFetcher blockchain.OptimisticModeFetcher
|
||||
BlockBuilder builder.BlockBuilder
|
||||
Router *mux.Router
|
||||
}
|
||||
|
||||
// NewService instantiates a new RPC service instance that will
|
||||
@@ -189,6 +192,25 @@ func (s *Service) Start() {
|
||||
}
|
||||
withCache := stategen.WithCache(stateCache)
|
||||
ch := stategen.NewCanonicalHistory(s.cfg.BeaconDB, s.cfg.ChainInfoFetcher, s.cfg.ChainInfoFetcher, withCache)
|
||||
stater := &lookup.BeaconDbStater{
|
||||
BeaconDB: s.cfg.BeaconDB,
|
||||
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
|
||||
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||
StateGenService: s.cfg.StateGen,
|
||||
ReplayerBuilder: ch,
|
||||
}
|
||||
blocker := &lookup.BeaconDbBlocker{
|
||||
BeaconDB: s.cfg.BeaconDB,
|
||||
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
|
||||
}
|
||||
|
||||
rewardsServer := &rewards.Server{
|
||||
Blocker: blocker,
|
||||
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
|
||||
FinalizationFetcher: s.cfg.FinalizationFetcher,
|
||||
ReplayerBuilder: ch,
|
||||
}
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/rewards/blocks/{block_id}", rewardsServer.BlockRewards)
|
||||
|
||||
validatorServer := &validatorv1alpha1.Server{
|
||||
Ctx: s.ctx,
|
||||
@@ -196,8 +218,8 @@ func (s *Service) Start() {
|
||||
AttPool: s.cfg.AttestationsPool,
|
||||
ExitPool: s.cfg.ExitPool,
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
HeadUpdater: s.cfg.HeadUpdater,
|
||||
ForkFetcher: s.cfg.ForkFetcher,
|
||||
ForkchoiceFetcher: s.cfg.ForkchoiceFetcher,
|
||||
GenesisFetcher: s.cfg.GenesisFetcher,
|
||||
FinalizationFetcher: s.cfg.FinalizationFetcher,
|
||||
TimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||
@@ -226,22 +248,15 @@ func (s *Service) Start() {
|
||||
BLSChangesPool: s.cfg.BLSChangesPool,
|
||||
}
|
||||
validatorServerV1 := &validator.Server{
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
HeadUpdater: s.cfg.HeadUpdater,
|
||||
TimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||
SyncChecker: s.cfg.SyncService,
|
||||
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
|
||||
AttestationsPool: s.cfg.AttestationsPool,
|
||||
PeerManager: s.cfg.PeerManager,
|
||||
Broadcaster: s.cfg.Broadcaster,
|
||||
V1Alpha1Server: validatorServer,
|
||||
StateFetcher: &statefetcher.StateProvider{
|
||||
BeaconDB: s.cfg.BeaconDB,
|
||||
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
|
||||
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||
StateGenService: s.cfg.StateGen,
|
||||
ReplayerBuilder: ch,
|
||||
},
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
TimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||
SyncChecker: s.cfg.SyncService,
|
||||
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
|
||||
AttestationsPool: s.cfg.AttestationsPool,
|
||||
PeerManager: s.cfg.PeerManager,
|
||||
Broadcaster: s.cfg.Broadcaster,
|
||||
V1Alpha1Server: validatorServer,
|
||||
Stater: stater,
|
||||
SyncCommitteePool: s.cfg.SyncCommitteeObjectPool,
|
||||
ProposerSlotIndexCache: s.cfg.ProposerIdsCache,
|
||||
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
|
||||
@@ -280,7 +295,6 @@ func (s *Service) Start() {
|
||||
BeaconDB: s.cfg.BeaconDB,
|
||||
AttestationsPool: s.cfg.AttestationsPool,
|
||||
SlashingsPool: s.cfg.SlashingsPool,
|
||||
HeadUpdater: s.cfg.HeadUpdater,
|
||||
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
FinalizationFetcher: s.cfg.FinalizationFetcher,
|
||||
@@ -300,24 +314,19 @@ func (s *Service) Start() {
|
||||
ReplayerBuilder: ch,
|
||||
}
|
||||
beaconChainServerV1 := &beacon.Server{
|
||||
CanonicalHistory: ch,
|
||||
BeaconDB: s.cfg.BeaconDB,
|
||||
AttestationsPool: s.cfg.AttestationsPool,
|
||||
SlashingsPool: s.cfg.SlashingsPool,
|
||||
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
|
||||
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||
BlockNotifier: s.cfg.BlockNotifier,
|
||||
OperationNotifier: s.cfg.OperationNotifier,
|
||||
Broadcaster: s.cfg.Broadcaster,
|
||||
BlockReceiver: s.cfg.BlockReceiver,
|
||||
StateGenService: s.cfg.StateGen,
|
||||
StateFetcher: &statefetcher.StateProvider{
|
||||
BeaconDB: s.cfg.BeaconDB,
|
||||
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
|
||||
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||
StateGenService: s.cfg.StateGen,
|
||||
ReplayerBuilder: ch,
|
||||
},
|
||||
CanonicalHistory: ch,
|
||||
BeaconDB: s.cfg.BeaconDB,
|
||||
AttestationsPool: s.cfg.AttestationsPool,
|
||||
SlashingsPool: s.cfg.SlashingsPool,
|
||||
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
|
||||
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||
BlockNotifier: s.cfg.BlockNotifier,
|
||||
OperationNotifier: s.cfg.OperationNotifier,
|
||||
Broadcaster: s.cfg.Broadcaster,
|
||||
BlockReceiver: s.cfg.BlockReceiver,
|
||||
StateGenService: s.cfg.StateGen,
|
||||
Stater: stater,
|
||||
Blocker: blocker,
|
||||
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
VoluntaryExitsPool: s.cfg.ExitPool,
|
||||
@@ -352,17 +361,12 @@ func (s *Service) Start() {
|
||||
ReplayerBuilder: ch,
|
||||
}
|
||||
debugServerV1 := &debug.Server{
|
||||
BeaconDB: s.cfg.BeaconDB,
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
StateFetcher: &statefetcher.StateProvider{
|
||||
BeaconDB: s.cfg.BeaconDB,
|
||||
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
|
||||
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||
StateGenService: s.cfg.StateGen,
|
||||
ReplayerBuilder: ch,
|
||||
},
|
||||
BeaconDB: s.cfg.BeaconDB,
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
Stater: stater,
|
||||
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
|
||||
ForkFetcher: s.cfg.ForkFetcher,
|
||||
ForkchoiceFetcher: s.cfg.ForkchoiceFetcher,
|
||||
FinalizationFetcher: s.cfg.FinalizationFetcher,
|
||||
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
mockExecution "github.com/prysmaticlabs/prysm/v4/beacon-chain/execution/testing"
|
||||
mockSync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/initial-sync/testing"
|
||||
@@ -35,6 +36,7 @@ func TestLifecycle_OK(t *testing.T) {
|
||||
GenesisTimeFetcher: chainService,
|
||||
ExecutionChainService: &mockExecution.Chain{},
|
||||
StateNotifier: chainService.StateNotifier(),
|
||||
Router: mux.NewRouter(),
|
||||
})
|
||||
|
||||
rpcService.Start()
|
||||
@@ -75,6 +77,7 @@ func TestRPC_InsecureEndpoint(t *testing.T) {
|
||||
HeadFetcher: chainService,
|
||||
ExecutionChainService: &mockExecution.Chain{},
|
||||
StateNotifier: chainService.StateNotifier(),
|
||||
Router: mux.NewRouter(),
|
||||
})
|
||||
|
||||
rpcService.Start()
|
||||
|
||||
@@ -4,15 +4,24 @@ go_library(
|
||||
name = "go_default_library",
|
||||
testonly = True,
|
||||
srcs = [
|
||||
"db.go",
|
||||
"mock_blocker.go",
|
||||
"mock_exec_chain_info_fetcher.go",
|
||||
"mock_genesis_timefetcher.go",
|
||||
"mock_state_fetcher.go",
|
||||
"mock_stater.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/testutil",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
51
beacon-chain/rpc/testutil/db.go
Normal file
51
beacon-chain/rpc/testutil/db.go
Normal file
@@ -0,0 +1,51 @@
|
||||
package testutil
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
ethpbalpha "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||
)
|
||||
|
||||
func FillDBWithBlocks(ctx context.Context, t *testing.T, beaconDB db.Database) (*ethpbalpha.SignedBeaconBlock, []*ethpbalpha.BeaconBlockContainer) {
|
||||
parentRoot := [32]byte{1, 2, 3}
|
||||
genBlk := util.NewBeaconBlock()
|
||||
genBlk.Block.ParentRoot = parentRoot[:]
|
||||
root, err := genBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, ctx, beaconDB, genBlk)
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, root))
|
||||
|
||||
count := primitives.Slot(100)
|
||||
blks := make([]interfaces.ReadOnlySignedBeaconBlock, count)
|
||||
blkContainers := make([]*ethpbalpha.BeaconBlockContainer, count)
|
||||
for i := primitives.Slot(0); i < count; i++ {
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = i
|
||||
b.Block.ParentRoot = bytesutil.PadTo([]byte{uint8(i)}, 32)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
blks[i], err = blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
blkContainers[i] = ðpbalpha.BeaconBlockContainer{
|
||||
Block: ðpbalpha.BeaconBlockContainer_Phase0Block{Phase0Block: b},
|
||||
BlockRoot: root[:],
|
||||
}
|
||||
}
|
||||
require.NoError(t, beaconDB.SaveBlocks(ctx, blks))
|
||||
headRoot := bytesutil.ToBytes32(blkContainers[len(blks)-1].BlockRoot)
|
||||
summary := ðpbalpha.StateSummary{
|
||||
Root: headRoot[:],
|
||||
Slot: blkContainers[len(blks)-1].Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block.Block.Slot,
|
||||
}
|
||||
require.NoError(t, beaconDB.SaveStateSummary(ctx, summary))
|
||||
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, headRoot))
|
||||
return genBlk, blkContainers
|
||||
}
|
||||
28
beacon-chain/rpc/testutil/mock_blocker.go
Normal file
28
beacon-chain/rpc/testutil/mock_blocker.go
Normal file
@@ -0,0 +1,28 @@
|
||||
package testutil
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
)
|
||||
|
||||
// MockBlocker is a fake implementation of lookup.Blocker.
|
||||
type MockBlocker struct {
|
||||
BlockToReturn interfaces.ReadOnlySignedBeaconBlock
|
||||
ErrorToReturn error
|
||||
SlotBlockMap map[primitives.Slot]interfaces.ReadOnlySignedBeaconBlock
|
||||
}
|
||||
|
||||
// Block --
|
||||
func (m *MockBlocker) Block(_ context.Context, b []byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
if m.ErrorToReturn != nil {
|
||||
return nil, m.ErrorToReturn
|
||||
}
|
||||
slotNumber, parseErr := strconv.ParseUint(string(b), 10, 64)
|
||||
if parseErr != nil {
|
||||
return m.BlockToReturn, nil
|
||||
}
|
||||
return m.SlotBlockMap[primitives.Slot(slotNumber)], nil
|
||||
}
|
||||
@@ -7,23 +7,24 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
)
|
||||
|
||||
// MockFetcher is a fake implementation of statefetcher.Fetcher.
|
||||
type MockFetcher struct {
|
||||
// MockStater is a fake implementation of lookup.Stater.
|
||||
type MockStater struct {
|
||||
BeaconState state.BeaconState
|
||||
BeaconStateRoot []byte
|
||||
StatesBySlot map[primitives.Slot]state.BeaconState
|
||||
}
|
||||
|
||||
// State --
|
||||
func (m *MockFetcher) State(context.Context, []byte) (state.BeaconState, error) {
|
||||
func (m *MockStater) State(context.Context, []byte) (state.BeaconState, error) {
|
||||
return m.BeaconState, nil
|
||||
}
|
||||
|
||||
// StateRoot --
|
||||
func (m *MockFetcher) StateRoot(context.Context, []byte) ([]byte, error) {
|
||||
func (m *MockStater) StateRoot(context.Context, []byte) ([]byte, error) {
|
||||
return m.BeaconStateRoot, nil
|
||||
}
|
||||
|
||||
func (m *MockFetcher) StateBySlot(_ context.Context, s primitives.Slot) (state.BeaconState, error) {
|
||||
// StateBySlot --
|
||||
func (m *MockStater) StateBySlot(_ context.Context, s primitives.Slot) (state.BeaconState, error) {
|
||||
return m.StatesBySlot[s], nil
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary")
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"log.go",
|
||||
"main.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/server",
|
||||
visibility = ["//visibility:private"],
|
||||
deps = [
|
||||
"//api/gateway:go_default_library",
|
||||
"//beacon-chain/gateway:go_default_library",
|
||||
"//beacon-chain/rpc/apimiddleware:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//runtime/maxprocs:go_default_library",
|
||||
"@com_github_gorilla_mux//:go_default_library",
|
||||
"@com_github_joonix_log//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "server",
|
||||
embed = [":go_default_library"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user