mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 13:58:09 -05:00
Compare commits
44 Commits
prune-log
...
debugE2EFa
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8d4505c367 | ||
|
|
f4ab2ca79f | ||
|
|
dbcf5c29cd | ||
|
|
c9fe53bc32 | ||
|
|
8522febd88 | ||
|
|
75a28310c2 | ||
|
|
1c9a3b43b4 | ||
|
|
b7aaf2813a | ||
|
|
1df173e701 | ||
|
|
3187a05a76 | ||
|
|
4e24102237 | ||
|
|
8dd5e96b29 | ||
|
|
4afb379f8d | ||
|
|
5a2453ac9c | ||
|
|
e610d2a5de | ||
|
|
233aaf2f9e | ||
|
|
a49bdcaa1f | ||
|
|
bdd7b2caa9 | ||
|
|
8de0e3804b | ||
|
|
bfb648067b | ||
|
|
852db1f3eb | ||
|
|
5d3663ef8d | ||
|
|
a608630727 | ||
|
|
37739b4193 | ||
|
|
4d2067dbae | ||
|
|
fc05e306dd | ||
|
|
204de13c86 | ||
|
|
f3ef1b64d6 | ||
|
|
c3dbfa66d0 | ||
|
|
93aba997f4 | ||
|
|
79bb7efbf8 | ||
|
|
87b53db3b4 | ||
|
|
fe431b9201 | ||
|
|
790a09f9b1 | ||
|
|
46387a903a | ||
|
|
6a65e07684 | ||
|
|
abef94d7ad | ||
|
|
99a8d0bac6 | ||
|
|
b585ff77f5 | ||
|
|
1ff5a43385 | ||
|
|
0cfbddc980 | ||
|
|
22a484c45e | ||
|
|
6ddafe1159 | ||
|
|
b8c5af665f |
@@ -349,9 +349,9 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "9f66d8d5644982d3d0d2e3d2b9ebe77a5f96638a5d7fcd715599c32818195cb3",
|
||||
strip_prefix = "holesky-ea39b9006210848e13f28d92e12a30548cecd41d",
|
||||
url = "https://github.com/eth-clients/holesky/archive/ea39b9006210848e13f28d92e12a30548cecd41d.tar.gz", # 2023-09-21
|
||||
sha256 = "5f4be6fd088683ea9db45c863b9c5a1884422449e5b59fd2d561d3ba0f73ffd9",
|
||||
strip_prefix = "holesky-9d9aabf2d4de51334ee5fed6c79a4d55097d1a43",
|
||||
url = "https://github.com/eth-clients/holesky/archive/9d9aabf2d4de51334ee5fed6c79a4d55097d1a43.tar.gz", # 2024-01-22
|
||||
)
|
||||
|
||||
http_archive(
|
||||
|
||||
@@ -7,4 +7,6 @@ const (
|
||||
ConsensusBlockValueHeader = "Eth-Consensus-Block-Value"
|
||||
JsonMediaType = "application/json"
|
||||
OctetStreamMediaType = "application/octet-stream"
|
||||
EventStreamMediaType = "text/event-stream"
|
||||
KeepAlive = "keep-alive"
|
||||
)
|
||||
|
||||
@@ -6,6 +6,7 @@ go_library(
|
||||
"chain_info.go",
|
||||
"chain_info_forkchoice.go",
|
||||
"currently_syncing_block.go",
|
||||
"defragment.go",
|
||||
"error.go",
|
||||
"execution_engine.go",
|
||||
"forkchoice_update_execution.go",
|
||||
|
||||
@@ -6,7 +6,10 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"go.opencensus.io/trace"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
f "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
@@ -18,7 +21,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// ChainInfoFetcher defines a common interface for methods in blockchain service which
|
||||
@@ -334,6 +336,11 @@ func (s *Service) HeadValidatorIndexToPublicKey(_ context.Context, index primiti
|
||||
return v.PublicKey(), nil
|
||||
}
|
||||
|
||||
// ForkChoicer returns the forkchoice interface.
|
||||
func (s *Service) ForkChoicer() f.ForkChoicer {
|
||||
return s.cfg.ForkChoiceStore
|
||||
}
|
||||
|
||||
// IsOptimistic returns true if the current head is optimistic.
|
||||
func (s *Service) IsOptimistic(_ context.Context) (bool, error) {
|
||||
if slots.ToEpoch(s.CurrentSlot()) < params.BeaconConfig().BellatrixForkEpoch {
|
||||
@@ -549,3 +556,18 @@ func (s *Service) RecentBlockSlot(root [32]byte) (primitives.Slot, error) {
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.Slot(root)
|
||||
}
|
||||
|
||||
// inRegularSync applies the following heuristics to decide if the node is in
|
||||
// regular sync mode vs init sync mode using only forkchoice.
|
||||
// It checks that the highest received block is behind the current time by at least 2 epochs
|
||||
// and that it was imported at least one epoch late if both of these
|
||||
// tests pass then the node is in init sync. The caller of this function MUST
|
||||
// have a lock on forkchoice
|
||||
func (s *Service) inRegularSync() bool {
|
||||
currentSlot := s.CurrentSlot()
|
||||
fc := s.cfg.ForkChoiceStore
|
||||
if currentSlot-fc.HighestReceivedBlockSlot() < 2*params.BeaconConfig().SlotsPerEpoch {
|
||||
return true
|
||||
}
|
||||
return fc.HighestReceivedBlockDelay() < params.BeaconConfig().SlotsPerEpoch
|
||||
}
|
||||
|
||||
@@ -593,3 +593,26 @@ func TestService_IsFinalized(t *testing.T) {
|
||||
require.Equal(t, true, c.IsFinalized(ctx, br))
|
||||
require.Equal(t, false, c.IsFinalized(ctx, [32]byte{'c'}))
|
||||
}
|
||||
|
||||
func TestService_inRegularSync(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
require.Equal(t, false, c.inRegularSync())
|
||||
c.SetGenesisTime(time.Now().Add(time.Second * time.Duration(-1*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot))))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 128, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
require.Equal(t, false, c.inRegularSync())
|
||||
|
||||
c.SetGenesisTime(time.Now().Add(time.Second * time.Duration(-5*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot))))
|
||||
require.Equal(t, true, c.inRegularSync())
|
||||
|
||||
c.SetGenesisTime(time.Now().Add(time.Second * time.Duration(-1*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot))))
|
||||
c.cfg.ForkChoiceStore.SetGenesisTime(uint64(time.Now().Unix()))
|
||||
require.Equal(t, true, c.inRegularSync())
|
||||
}
|
||||
|
||||
27
beacon-chain/blockchain/defragment.go
Normal file
27
beacon-chain/blockchain/defragment.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v4/time"
|
||||
)
|
||||
|
||||
var stateDefragmentationTime = promauto.NewSummary(prometheus.SummaryOpts{
|
||||
Name: "head_state_defragmentation_milliseconds",
|
||||
Help: "Milliseconds it takes to defragment the head state",
|
||||
})
|
||||
|
||||
// This method defragments our state, so that any specific fields which have
|
||||
// a higher number of fragmented indexes are reallocated to a new separate slice for
|
||||
// that field.
|
||||
func (s *Service) defragmentState(st state.BeaconState) {
|
||||
if !features.Get().EnableExperimentalState {
|
||||
return
|
||||
}
|
||||
startTime := time.Now()
|
||||
st.Defragment()
|
||||
elapsedTime := time.Since(startTime)
|
||||
stateDefragmentationTime.Observe(float64(elapsedTime.Milliseconds()))
|
||||
}
|
||||
@@ -387,7 +387,10 @@ func (s *Service) removeInvalidBlockAndState(ctx context.Context, blkRoots [][32
|
||||
// This is an irreparable condition, it would me a justified or finalized block has become invalid.
|
||||
return err
|
||||
}
|
||||
// TODO: Remove blob here
|
||||
if err := s.blobStorage.Remove(root); err != nil {
|
||||
// Blobs may not exist for some blocks, leading to deletion failures. Log such errors at debug level.
|
||||
log.WithError(err).Debug("Could not remove blob from blob storage")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -6,6 +6,8 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"go.opencensus.io/trace"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||
@@ -29,7 +31,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// A custom slot deadline for processing state slots in our cache.
|
||||
@@ -66,7 +67,10 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
|
||||
startTime := time.Now()
|
||||
fcuArgs := &fcuConfig{}
|
||||
|
||||
defer s.handleSecondFCUCall(cfg, fcuArgs)
|
||||
if s.inRegularSync() {
|
||||
defer s.handleSecondFCUCall(cfg, fcuArgs)
|
||||
}
|
||||
defer s.sendLightClientFeeds(cfg)
|
||||
defer s.sendStateFeedOnBlock(cfg)
|
||||
defer reportProcessingTime(startTime)
|
||||
defer reportAttestationInclusion(cfg.signed.Block())
|
||||
@@ -102,6 +106,7 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
|
||||
if err := s.sendFCU(cfg, fcuArgs); err != nil {
|
||||
return errors.Wrap(err, "could not send FCU to engine")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -577,10 +582,15 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
if s.CurrentSlot() == s.HeadSlot() {
|
||||
return
|
||||
}
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
// return early if we are in init sync
|
||||
if !s.inRegularSync() {
|
||||
return
|
||||
}
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.MissedSlot,
|
||||
})
|
||||
|
||||
s.headLock.RLock()
|
||||
headRoot := s.headRoot()
|
||||
headState := s.headState(ctx)
|
||||
@@ -595,18 +605,22 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
|
||||
log.WithError(err).Debug("could not update next slot state cache")
|
||||
}
|
||||
// handleEpochBoundary requires a forkchoice lock to obtain the target root.
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil {
|
||||
log.WithError(err).Error("lateBlockTasks: could not update epoch boundary caches")
|
||||
}
|
||||
s.cfg.ForkChoiceStore.RUnlock()
|
||||
// return early if we already started building a block for the current
|
||||
// head root
|
||||
_, has := s.cfg.PayloadIDCache.PayloadID(s.CurrentSlot()+1, headRoot)
|
||||
if has {
|
||||
return
|
||||
}
|
||||
|
||||
attribute := s.getPayloadAttribute(ctx, headState, s.CurrentSlot()+1, headRoot[:])
|
||||
// return early if we are not proposing next slot
|
||||
if attribute.IsEmpty() {
|
||||
return
|
||||
}
|
||||
|
||||
s.headLock.RLock()
|
||||
headBlock, err := s.headBlock()
|
||||
if err != nil {
|
||||
@@ -617,18 +631,12 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
s.headLock.RUnlock()
|
||||
|
||||
fcuArgs := &fcuConfig{
|
||||
headState: headState,
|
||||
headRoot: headRoot,
|
||||
headBlock: headBlock,
|
||||
headState: headState,
|
||||
headRoot: headRoot,
|
||||
headBlock: headBlock,
|
||||
attributes: attribute,
|
||||
}
|
||||
fcuArgs.attributes = s.getPayloadAttribute(ctx, headState, s.CurrentSlot()+1, headRoot[:])
|
||||
// return early if we are not proposing next slot
|
||||
if fcuArgs.attributes.IsEmpty() {
|
||||
return
|
||||
}
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
_, err = s.notifyForkchoiceUpdate(ctx, fcuArgs)
|
||||
s.cfg.ForkChoiceStore.RUnlock()
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("could not perform late block tasks: failed to update forkchoice with engine")
|
||||
}
|
||||
|
||||
@@ -7,21 +7,24 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
mathutil "github.com/prysmaticlabs/prysm/v4/math"
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v4/proto/eth/v2"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// CurrentSlot returns the current slot based on time.
|
||||
@@ -34,6 +37,9 @@ func (s *Service) getFCUArgs(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) er
|
||||
if err := s.getFCUArgsEarlyBlock(cfg, fcuArgs); err != nil {
|
||||
return err
|
||||
}
|
||||
if !s.inRegularSync() {
|
||||
return nil
|
||||
}
|
||||
slot := cfg.signed.Block().Slot()
|
||||
if slots.WithinVotingWindow(uint64(s.genesisTime.Unix()), slot) {
|
||||
return nil
|
||||
@@ -106,6 +112,128 @@ func (s *Service) sendStateFeedOnBlock(cfg *postBlockProcessConfig) {
|
||||
})
|
||||
}
|
||||
|
||||
// sendLightClientFeeds sends the light client feeds when feature flag is enabled.
|
||||
func (s *Service) sendLightClientFeeds(cfg *postBlockProcessConfig) {
|
||||
if features.Get().EnableLightClient {
|
||||
if _, err := s.sendLightClientOptimisticUpdate(cfg.ctx, cfg.signed, cfg.postState); err != nil {
|
||||
log.WithError(err).Error("Failed to send light client optimistic update")
|
||||
}
|
||||
|
||||
// Get the finalized checkpoint
|
||||
finalized := s.ForkChoicer().FinalizedCheckpoint()
|
||||
|
||||
// LightClientFinalityUpdate needs super majority
|
||||
s.tryPublishLightClientFinalityUpdate(cfg.ctx, cfg.signed, finalized, cfg.postState)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) tryPublishLightClientFinalityUpdate(ctx context.Context, signed interfaces.ReadOnlySignedBeaconBlock, finalized *forkchoicetypes.Checkpoint, postState state.BeaconState) {
|
||||
if finalized.Epoch <= s.lastPublishedLightClientEpoch {
|
||||
return
|
||||
}
|
||||
|
||||
config := params.BeaconConfig()
|
||||
if finalized.Epoch < config.AltairForkEpoch {
|
||||
return
|
||||
}
|
||||
|
||||
syncAggregate, err := signed.Block().Body().SyncAggregate()
|
||||
if err != nil || syncAggregate == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// LightClientFinalityUpdate needs super majority
|
||||
if syncAggregate.SyncCommitteeBits.Count()*3 < config.SyncCommitteeSize*2 {
|
||||
return
|
||||
}
|
||||
|
||||
_, err = s.sendLightClientFinalityUpdate(ctx, signed, postState)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to send light client finality update")
|
||||
} else {
|
||||
s.lastPublishedLightClientEpoch = finalized.Epoch
|
||||
}
|
||||
}
|
||||
|
||||
// sendLightClientFinalityUpdate sends a light client finality update notification to the state feed.
|
||||
func (s *Service) sendLightClientFinalityUpdate(ctx context.Context, signed interfaces.ReadOnlySignedBeaconBlock,
|
||||
postState state.BeaconState) (int, error) {
|
||||
// Get attested state
|
||||
attestedRoot := signed.Block().ParentRoot()
|
||||
attestedState, err := s.cfg.StateGen.StateByRoot(ctx, attestedRoot)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "could not get attested state")
|
||||
}
|
||||
|
||||
// Get finalized block
|
||||
var finalizedBlock interfaces.ReadOnlySignedBeaconBlock
|
||||
finalizedCheckPoint := attestedState.FinalizedCheckpoint()
|
||||
if finalizedCheckPoint != nil {
|
||||
finalizedRoot := bytesutil.ToBytes32(finalizedCheckPoint.Root)
|
||||
finalizedBlock, err = s.cfg.BeaconDB.Block(ctx, finalizedRoot)
|
||||
if err != nil {
|
||||
finalizedBlock = nil
|
||||
}
|
||||
}
|
||||
|
||||
update, err := NewLightClientFinalityUpdateFromBeaconState(
|
||||
ctx,
|
||||
postState,
|
||||
signed,
|
||||
attestedState,
|
||||
finalizedBlock,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "could not create light client update")
|
||||
}
|
||||
|
||||
// Return the result
|
||||
result := ðpbv2.LightClientFinalityUpdateWithVersion{
|
||||
Version: ethpbv2.Version(signed.Version()),
|
||||
Data: CreateLightClientFinalityUpdate(update),
|
||||
}
|
||||
|
||||
// Send event
|
||||
return s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.LightClientFinalityUpdate,
|
||||
Data: result,
|
||||
}), nil
|
||||
}
|
||||
|
||||
// sendLightClientOptimisticUpdate sends a light client optimistic update notification to the state feed.
|
||||
func (s *Service) sendLightClientOptimisticUpdate(ctx context.Context, signed interfaces.ReadOnlySignedBeaconBlock,
|
||||
postState state.BeaconState) (int, error) {
|
||||
// Get attested state
|
||||
attestedRoot := signed.Block().ParentRoot()
|
||||
attestedState, err := s.cfg.StateGen.StateByRoot(ctx, attestedRoot)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "could not get attested state")
|
||||
}
|
||||
|
||||
update, err := NewLightClientOptimisticUpdateFromBeaconState(
|
||||
ctx,
|
||||
postState,
|
||||
signed,
|
||||
attestedState,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "could not create light client update")
|
||||
}
|
||||
|
||||
// Return the result
|
||||
result := ðpbv2.LightClientOptimisticUpdateWithVersion{
|
||||
Version: ethpbv2.Version(signed.Version()),
|
||||
Data: CreateLightClientOptimisticUpdate(update),
|
||||
}
|
||||
|
||||
return s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.LightClientOptimisticUpdate,
|
||||
Data: result,
|
||||
}), nil
|
||||
}
|
||||
|
||||
// updateCachesPostBlockProcessing updates the next slot cache and handles the epoch
|
||||
// boundary in order to compute the right proposer indices after processing
|
||||
// state transition. This function is called on late blocks while still locked,
|
||||
|
||||
@@ -150,7 +150,9 @@ func (s *Service) UpdateHead(ctx context.Context, proposingSlot primitives.Slot)
|
||||
headBlock: headBlock,
|
||||
proposingSlot: proposingSlot,
|
||||
}
|
||||
fcuArgs.attributes = s.getPayloadAttribute(ctx, headState, proposingSlot, newHeadRoot[:])
|
||||
if s.inRegularSync() {
|
||||
fcuArgs.attributes = s.getPayloadAttribute(ctx, headState, proposingSlot, newHeadRoot[:])
|
||||
}
|
||||
if fcuArgs.attributes != nil && s.shouldOverrideFCU(newHeadRoot, proposingSlot) {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -123,6 +123,9 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
}
|
||||
daWaitedTime := time.Since(daStartTime)
|
||||
|
||||
// Defragment the state before continuing block processing.
|
||||
s.defragmentState(postState)
|
||||
|
||||
// The rest of block processing takes a lock on forkchoice.
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
|
||||
@@ -11,6 +11,8 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"go.opencensus.io/trace"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/async/event"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/kzg"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
|
||||
@@ -37,34 +39,35 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
prysmTime "github.com/prysmaticlabs/prysm/v4/time"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// Service represents a service that handles the internal
|
||||
// logic of managing the full PoS beacon chain.
|
||||
type Service struct {
|
||||
cfg *config
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
genesisTime time.Time
|
||||
head *head
|
||||
headLock sync.RWMutex
|
||||
originBlockRoot [32]byte // genesis root, or weak subjectivity checkpoint root, depending on how the node is initialized
|
||||
boundaryRoots [][32]byte
|
||||
checkpointStateCache *cache.CheckpointStateCache
|
||||
initSyncBlocks map[[32]byte]interfaces.ReadOnlySignedBeaconBlock
|
||||
initSyncBlocksLock sync.RWMutex
|
||||
wsVerifier *WeakSubjectivityVerifier
|
||||
clockSetter startup.ClockSetter
|
||||
clockWaiter startup.ClockWaiter
|
||||
syncComplete chan struct{}
|
||||
blobNotifiers *blobNotifierMap
|
||||
blockBeingSynced *currentlySyncingBlock
|
||||
blobStorage *filesystem.BlobStorage
|
||||
cfg *config
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
genesisTime time.Time
|
||||
head *head
|
||||
headLock sync.RWMutex
|
||||
originBlockRoot [32]byte // genesis root, or weak subjectivity checkpoint root, depending on how the node is initialized
|
||||
boundaryRoots [][32]byte
|
||||
checkpointStateCache *cache.CheckpointStateCache
|
||||
initSyncBlocks map[[32]byte]interfaces.ReadOnlySignedBeaconBlock
|
||||
initSyncBlocksLock sync.RWMutex
|
||||
wsVerifier *WeakSubjectivityVerifier
|
||||
clockSetter startup.ClockSetter
|
||||
clockWaiter startup.ClockWaiter
|
||||
syncComplete chan struct{}
|
||||
blobNotifiers *blobNotifierMap
|
||||
blockBeingSynced *currentlySyncingBlock
|
||||
blobStorage *filesystem.BlobStorage
|
||||
lastPublishedLightClientEpoch primitives.Epoch
|
||||
}
|
||||
|
||||
// config options for the service.
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache/depositcache"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filesystem"
|
||||
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
@@ -116,6 +117,7 @@ func minimalTestService(t *testing.T, opts ...Option) (*Service, *testServiceReq
|
||||
WithBLSToExecPool(req.blsPool),
|
||||
WithDepositCache(dc),
|
||||
WithTrackedValidatorsCache(cache.NewTrackedValidatorsCache()),
|
||||
WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)),
|
||||
}
|
||||
// append the variadic opts so they override the defaults by being processed afterwards
|
||||
opts = append(defOpts, opts...)
|
||||
|
||||
@@ -15,11 +15,12 @@ import (
|
||||
|
||||
// AttDelta contains rewards and penalties for a single attestation.
|
||||
type AttDelta struct {
|
||||
HeadReward uint64
|
||||
SourceReward uint64
|
||||
SourcePenalty uint64
|
||||
TargetReward uint64
|
||||
TargetPenalty uint64
|
||||
HeadReward uint64
|
||||
SourceReward uint64
|
||||
SourcePenalty uint64
|
||||
TargetReward uint64
|
||||
TargetPenalty uint64
|
||||
InactivityPenalty uint64
|
||||
}
|
||||
|
||||
// InitializePrecomputeValidators precomputes individual validator for its attested balances and the total sum of validators attested balances of the epoch.
|
||||
@@ -251,7 +252,7 @@ func ProcessRewardsAndPenaltiesPrecompute(
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
balances[i] = helpers.DecreaseBalanceWithVal(balances[i], delta.SourcePenalty+delta.TargetPenalty)
|
||||
balances[i] = helpers.DecreaseBalanceWithVal(balances[i], delta.SourcePenalty+delta.TargetPenalty+delta.InactivityPenalty)
|
||||
|
||||
vals[i].AfterEpochTransitionBalance = balances[i]
|
||||
}
|
||||
@@ -351,7 +352,7 @@ func attestationDelta(
|
||||
if err != nil {
|
||||
return &AttDelta{}, err
|
||||
}
|
||||
attDelta.TargetPenalty += n / inactivityDenominator
|
||||
attDelta.InactivityPenalty = n / inactivityDenominator
|
||||
}
|
||||
|
||||
return attDelta, nil
|
||||
|
||||
@@ -220,7 +220,7 @@ func TestAttestationsDelta(t *testing.T) {
|
||||
penalties := make([]uint64, len(deltas))
|
||||
for i, d := range deltas {
|
||||
rewards[i] = d.HeadReward + d.SourceReward + d.TargetReward
|
||||
penalties[i] = d.SourcePenalty + d.TargetPenalty
|
||||
penalties[i] = d.SourcePenalty + d.TargetPenalty + d.InactivityPenalty
|
||||
}
|
||||
|
||||
// Reward amount should increase as validator index increases due to setup.
|
||||
@@ -258,7 +258,7 @@ func TestAttestationsDeltaBellatrix(t *testing.T) {
|
||||
penalties := make([]uint64, len(deltas))
|
||||
for i, d := range deltas {
|
||||
rewards[i] = d.HeadReward + d.SourceReward + d.TargetReward
|
||||
penalties[i] = d.SourcePenalty + d.TargetPenalty
|
||||
penalties[i] = d.SourcePenalty + d.TargetPenalty + d.InactivityPenalty
|
||||
}
|
||||
|
||||
// Reward amount should increase as validator index increases due to setup.
|
||||
@@ -306,7 +306,7 @@ func TestProcessRewardsAndPenaltiesPrecompute_Ok(t *testing.T) {
|
||||
penalties := make([]uint64, len(deltas))
|
||||
for i, d := range deltas {
|
||||
rewards[i] = d.HeadReward + d.SourceReward + d.TargetReward
|
||||
penalties[i] = d.SourcePenalty + d.TargetPenalty
|
||||
penalties[i] = d.SourcePenalty + d.TargetPenalty + d.InactivityPenalty
|
||||
}
|
||||
for i := range rewards {
|
||||
wanted[i] += rewards[i]
|
||||
|
||||
@@ -57,6 +57,10 @@ func UpgradeToDeneb(state state.BeaconState) (state.BeaconState, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
historicalRoots, err := state.HistoricalRoots()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s := ðpb.BeaconStateDeneb{
|
||||
GenesisTime: state.GenesisTime(),
|
||||
@@ -70,7 +74,7 @@ func UpgradeToDeneb(state state.BeaconState) (state.BeaconState, error) {
|
||||
LatestBlockHeader: state.LatestBlockHeader(),
|
||||
BlockRoots: state.BlockRoots(),
|
||||
StateRoots: state.StateRoots(),
|
||||
HistoricalRoots: [][]byte{},
|
||||
HistoricalRoots: historicalRoots,
|
||||
Eth1Data: state.Eth1Data(),
|
||||
Eth1DataVotes: state.Eth1DataVotes(),
|
||||
Eth1DepositIndex: state.Eth1DepositIndex(),
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
|
||||
func TestUpgradeToDeneb(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateCapella(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
require.NoError(t, st.SetHistoricalRoots([][]byte{{1}}))
|
||||
preForkState := st.Copy()
|
||||
mSt, err := deneb.UpgradeToDeneb(st)
|
||||
require.NoError(t, err)
|
||||
@@ -46,6 +47,12 @@ func TestUpgradeToDeneb(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, make([]uint64, numValidators), s)
|
||||
|
||||
hr1, err := preForkState.HistoricalRoots()
|
||||
require.NoError(t, err)
|
||||
hr2, err := mSt.HistoricalRoots()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, hr1, hr2)
|
||||
|
||||
f := mSt.Fork()
|
||||
require.DeepSSZEqual(t, ðpb.Fork{
|
||||
PreviousVersion: st.Fork().CurrentVersion,
|
||||
|
||||
@@ -27,6 +27,10 @@ const (
|
||||
NewHead
|
||||
// MissedSlot is sent when we need to notify users that a slot was missed.
|
||||
MissedSlot
|
||||
// LightClientFinalityUpdate event
|
||||
LightClientFinalityUpdate
|
||||
// LightClientOptimisticUpdate event
|
||||
LightClientOptimisticUpdate
|
||||
)
|
||||
|
||||
// BlockProcessedData is the data sent with BlockProcessed events.
|
||||
|
||||
@@ -202,3 +202,14 @@ func ParseWeakSubjectivityInputString(wsCheckpointString string) (*v1alpha1.Chec
|
||||
Root: bRoot,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// MinEpochsForBlockRequests computes the number of epochs of block history that we need to maintain,
|
||||
// relative to the current epoch, per the p2p specs. This is used to compute the slot where backfill is complete.
|
||||
// value defined:
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/p2p-interface.md#configuration
|
||||
// MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2 (= 33024, ~5 months)
|
||||
// detailed rationale: https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/p2p-interface.md#why-are-blocksbyrange-requests-only-required-to-be-served-for-the-latest-min_epochs_for_block_requests-epochs
|
||||
func MinEpochsForBlockRequests() primitives.Epoch {
|
||||
return params.BeaconConfig().MinValidatorWithdrawabilityDelay +
|
||||
primitives.Epoch(params.BeaconConfig().ChurnLimitQuotient/2)
|
||||
}
|
||||
|
||||
@@ -281,3 +281,19 @@ func genState(t *testing.T, valCount, avgBalance uint64) state.BeaconState {
|
||||
|
||||
return beaconState
|
||||
}
|
||||
|
||||
func TestMinEpochsForBlockRequests(t *testing.T) {
|
||||
params.SetActiveTestCleanup(t, params.MainnetConfig())
|
||||
var expected primitives.Epoch = 33024
|
||||
// expected value of 33024 via spec commentary:
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/p2p-interface.md#why-are-blocksbyrange-requests-only-required-to-be-served-for-the-latest-min_epochs_for_block_requests-epochs
|
||||
// MIN_EPOCHS_FOR_BLOCK_REQUESTS is calculated using the arithmetic from compute_weak_subjectivity_period found in the weak subjectivity guide. Specifically to find this max epoch range, we use the worst case event of a very large validator size (>= MIN_PER_EPOCH_CHURN_LIMIT * CHURN_LIMIT_QUOTIENT).
|
||||
//
|
||||
// MIN_EPOCHS_FOR_BLOCK_REQUESTS = (
|
||||
// MIN_VALIDATOR_WITHDRAWABILITY_DELAY
|
||||
// + MAX_SAFETY_DECAY * CHURN_LIMIT_QUOTIENT // (2 * 100)
|
||||
// )
|
||||
//
|
||||
// Where MAX_SAFETY_DECAY = 100 and thus MIN_EPOCHS_FOR_BLOCK_REQUESTS = 33024 (~5 months).
|
||||
require.Equal(t, expected, helpers.MinEpochsForBlockRequests())
|
||||
}
|
||||
|
||||
@@ -22,9 +22,6 @@ var ErrNotFoundOriginBlockRoot = kv.ErrNotFoundOriginBlockRoot
|
||||
// ErrNotFoundBackfillBlockRoot wraps ErrNotFound for an error specific to the backfill block root.
|
||||
var ErrNotFoundBackfillBlockRoot = kv.ErrNotFoundBackfillBlockRoot
|
||||
|
||||
// ErrNotFoundGenesisBlockRoot means no genesis block root was found, indicating the db was not initialized with genesis
|
||||
var ErrNotFoundGenesisBlockRoot = kv.ErrNotFoundGenesisBlockRoot
|
||||
|
||||
// IsNotFound allows callers to treat errors from a flat-file database, where the file record is missing,
|
||||
// as equivalent to db.ErrNotFound.
|
||||
func IsNotFound(err error) bool {
|
||||
|
||||
@@ -156,7 +156,7 @@ func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error {
|
||||
}
|
||||
partialMoved = true
|
||||
blobsWrittenCounter.Inc()
|
||||
blobSaveLatency.Observe(time.Since(startTime).Seconds())
|
||||
blobSaveLatency.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -180,11 +180,17 @@ func (bs *BlobStorage) Get(root [32]byte, idx uint64) (blocks.VerifiedROBlob, er
|
||||
return blocks.VerifiedROBlob{}, err
|
||||
}
|
||||
defer func() {
|
||||
blobFetchLatency.Observe(time.Since(startTime).Seconds())
|
||||
blobFetchLatency.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
}()
|
||||
return verification.BlobSidecarNoop(ro)
|
||||
}
|
||||
|
||||
// Remove removes all blobs for a given root.
|
||||
func (bs *BlobStorage) Remove(root [32]byte) error {
|
||||
rootDir := blobNamer{root: root}.dir()
|
||||
return bs.fs.RemoveAll(rootDir)
|
||||
}
|
||||
|
||||
// Indices generates a bitmap representing which BlobSidecar.Index values are present on disk for a given root.
|
||||
// This value can be compared to the commitments observed in a block to determine which indices need to be found
|
||||
// on the network to confirm data availability.
|
||||
|
||||
@@ -73,6 +73,21 @@ func TestBlobStorage_SaveBlobData(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, expected, actual)
|
||||
})
|
||||
|
||||
t.Run("round trip write, read and delete", func(t *testing.T) {
|
||||
bs := NewEphemeralBlobStorage(t)
|
||||
err := bs.Save(testSidecars[0])
|
||||
require.NoError(t, err)
|
||||
|
||||
expected := testSidecars[0]
|
||||
actual, err := bs.Get(expected.BlockRoot(), expected.Index)
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, expected, actual)
|
||||
|
||||
require.NoError(t, bs.Remove(expected.BlockRoot()))
|
||||
_, err = bs.Get(expected.BlockRoot(), expected.Index)
|
||||
require.ErrorContains(t, "file does not exist", err)
|
||||
})
|
||||
}
|
||||
|
||||
// pollUntil polls a condition function until it returns true or a timeout is reached.
|
||||
|
||||
@@ -6,15 +6,15 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
blobBuckets = []float64{0.00003, 0.00005, 0.00007, 0.00009, 0.00011, 0.00013, 0.00015}
|
||||
blobBuckets = []float64{3, 5, 7, 9, 11, 13}
|
||||
blobSaveLatency = promauto.NewHistogram(prometheus.HistogramOpts{
|
||||
Name: "blob_storage_save_latency",
|
||||
Help: "Latency of BlobSidecar storage save operations in seconds",
|
||||
Help: "Latency of BlobSidecar storage save operations in milliseconds",
|
||||
Buckets: blobBuckets,
|
||||
})
|
||||
blobFetchLatency = promauto.NewHistogram(prometheus.HistogramOpts{
|
||||
Name: "blob_storage_get_latency",
|
||||
Help: "Latency of BlobSidecar storage get operations in seconds",
|
||||
Help: "Latency of BlobSidecar storage get operations in milliseconds",
|
||||
Buckets: blobBuckets,
|
||||
})
|
||||
blobsPrunedCounter = promauto.NewCounter(prometheus.CounterOpts{
|
||||
|
||||
@@ -13,9 +13,11 @@ go_library(
|
||||
"//beacon-chain/db/filters:go_default_library",
|
||||
"//beacon-chain/slasher/types:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//monitoring/backup:go_default_library",
|
||||
"//proto/dbval:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
],
|
||||
|
||||
@@ -11,9 +11,11 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filters"
|
||||
slashertypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/slasher/types"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/monitoring/backup"
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/dbval"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
@@ -57,7 +59,7 @@ type ReadOnlyDatabase interface {
|
||||
|
||||
// origin checkpoint sync support
|
||||
OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
BackfillBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
BackfillStatus(context.Context) (*dbval.BackfillStatus, error)
|
||||
}
|
||||
|
||||
// NoHeadAccessDatabase defines a struct without access to chain head data.
|
||||
@@ -68,6 +70,7 @@ type NoHeadAccessDatabase interface {
|
||||
DeleteBlock(ctx context.Context, root [32]byte) error
|
||||
SaveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock) error
|
||||
SaveBlocks(ctx context.Context, blocks []interfaces.ReadOnlySignedBeaconBlock) error
|
||||
SaveROBlocks(ctx context.Context, blks []blocks.ROBlock, cache bool) error
|
||||
SaveGenesisBlockRoot(ctx context.Context, blockRoot [32]byte) error
|
||||
// State related methods.
|
||||
SaveState(ctx context.Context, state state.ReadOnlyBeaconState, blockRoot [32]byte) error
|
||||
@@ -106,9 +109,10 @@ type HeadAccessDatabase interface {
|
||||
SaveGenesisData(ctx context.Context, state state.BeaconState) error
|
||||
EnsureEmbeddedGenesis(ctx context.Context) error
|
||||
|
||||
// initialization method needed for origin checkpoint sync
|
||||
// Support for checkpoint sync and backfill.
|
||||
SaveOrigin(ctx context.Context, serState, serBlock []byte) error
|
||||
SaveBackfillBlockRoot(ctx context.Context, blockRoot [32]byte) error
|
||||
SaveBackfillStatus(context.Context, *dbval.BackfillStatus) error
|
||||
BackfillFinalizedIndex(ctx context.Context, blocks []blocks.ROBlock, finalizedChildRoot [32]byte) error
|
||||
}
|
||||
|
||||
// SlasherDatabase interface for persisting data related to detecting slashable offenses on Ethereum.
|
||||
|
||||
@@ -4,6 +4,7 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"archived_point.go",
|
||||
"backfill.go",
|
||||
"backup.go",
|
||||
"blocks.go",
|
||||
"checkpoint.go",
|
||||
@@ -48,6 +49,7 @@ go_library(
|
||||
"//io/file:go_default_library",
|
||||
"//monitoring/progress:go_default_library",
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//proto/dbval:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time:go_default_library",
|
||||
@@ -73,6 +75,7 @@ go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"archived_point_test.go",
|
||||
"backfill_test.go",
|
||||
"backup_test.go",
|
||||
"blocks_test.go",
|
||||
"checkpoint_test.go",
|
||||
@@ -107,6 +110,7 @@ go_test(
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/dbval:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/testing:go_default_library",
|
||||
|
||||
44
beacon-chain/db/kv/backfill.go
Normal file
44
beacon-chain/db/kv/backfill.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/dbval"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
"go.opencensus.io/trace"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// SaveBackfillStatus encodes the given BackfillStatus protobuf struct and writes it to a single key in the db.
|
||||
// This value is used by the backfill service to keep track of the range of blocks that need to be synced. It is also used by the
|
||||
// code that serves blocks or regenerates states to keep track of what range of blocks are available.
|
||||
func (s *Store) SaveBackfillStatus(ctx context.Context, bf *dbval.BackfillStatus) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.SaveBackfillStatus")
|
||||
defer span.End()
|
||||
bfb, err := proto.Marshal(bf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(blocksBucket)
|
||||
return bucket.Put(backfillStatusKey, bfb)
|
||||
})
|
||||
}
|
||||
|
||||
// BackfillStatus retrieves the most recently saved version of the BackfillStatus protobuf struct.
|
||||
// This is used to persist information about backfill status across restarts.
|
||||
func (s *Store) BackfillStatus(ctx context.Context) (*dbval.BackfillStatus, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.BackfillStatus")
|
||||
defer span.End()
|
||||
bf := &dbval.BackfillStatus{}
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(blocksBucket)
|
||||
bs := bucket.Get(backfillStatusKey)
|
||||
if len(bs) == 0 {
|
||||
return errors.Wrap(ErrNotFound, "BackfillStatus not found")
|
||||
}
|
||||
return proto.Unmarshal(bs, bf)
|
||||
})
|
||||
return bf, err
|
||||
}
|
||||
35
beacon-chain/db/kv/backfill_test.go
Normal file
35
beacon-chain/db/kv/backfill_test.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/dbval"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func TestBackfillRoundtrip(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
b := &dbval.BackfillStatus{}
|
||||
b.LowSlot = 23
|
||||
b.LowRoot = bytesutil.PadTo([]byte("low"), 32)
|
||||
b.LowParentRoot = bytesutil.PadTo([]byte("parent"), 32)
|
||||
m, err := proto.Marshal(b)
|
||||
require.NoError(t, err)
|
||||
ub := &dbval.BackfillStatus{}
|
||||
require.NoError(t, proto.Unmarshal(m, ub))
|
||||
require.Equal(t, b.LowSlot, ub.LowSlot)
|
||||
require.DeepEqual(t, b.LowRoot, ub.LowRoot)
|
||||
require.DeepEqual(t, b.LowParentRoot, ub.LowParentRoot)
|
||||
|
||||
ctx := context.Background()
|
||||
require.NoError(t, db.SaveBackfillStatus(ctx, b))
|
||||
dbub, err := db.BackfillStatus(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, b.LowSlot, dbub.LowSlot)
|
||||
require.DeepEqual(t, b.LowRoot, dbub.LowRoot)
|
||||
require.DeepEqual(t, b.LowParentRoot, dbub.LowParentRoot)
|
||||
}
|
||||
@@ -70,25 +70,6 @@ func (s *Store) OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
return root, err
|
||||
}
|
||||
|
||||
// BackfillBlockRoot keeps track of the highest block available before the OriginCheckpointBlockRoot
|
||||
func (s *Store) BackfillBlockRoot(ctx context.Context) ([32]byte, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.BackfillBlockRoot")
|
||||
defer span.End()
|
||||
|
||||
var root [32]byte
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(blocksBucket)
|
||||
rootSlice := bkt.Get(backfillBlockRootKey)
|
||||
if len(rootSlice) == 0 {
|
||||
return ErrNotFoundBackfillBlockRoot
|
||||
}
|
||||
root = bytesutil.ToBytes32(rootSlice)
|
||||
return nil
|
||||
})
|
||||
|
||||
return root, err
|
||||
}
|
||||
|
||||
// HeadBlock returns the latest canonical block in the Ethereum Beacon Chain.
|
||||
func (s *Store) HeadBlock(ctx context.Context) (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.HeadBlock")
|
||||
@@ -292,55 +273,95 @@ func (s *Store) SaveBlocks(ctx context.Context, blks []interfaces.ReadOnlySigned
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveBlocks")
|
||||
defer span.End()
|
||||
|
||||
// Performing marshaling, hashing, and indexing outside the bolt transaction
|
||||
// to minimize the time we hold the DB lock.
|
||||
blockRoots := make([][]byte, len(blks))
|
||||
encodedBlocks := make([][]byte, len(blks))
|
||||
indicesForBlocks := make([]map[string][]byte, len(blks))
|
||||
for i, blk := range blks {
|
||||
blockRoot, err := blk.Block().HashTreeRoot()
|
||||
robs := make([]blocks.ROBlock, len(blks))
|
||||
for i := range blks {
|
||||
rb, err := blocks.NewROBlock(blks[i])
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrapf(err, "failed to make an ROBlock for a block in SaveBlocks")
|
||||
}
|
||||
enc, err := s.marshalBlock(ctx, blk)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blockRoots[i] = blockRoot[:]
|
||||
encodedBlocks[i] = enc
|
||||
indicesByBucket := createBlockIndicesFromBlock(ctx, blk.Block())
|
||||
indicesForBlocks[i] = indicesByBucket
|
||||
robs[i] = rb
|
||||
}
|
||||
saveBlinded, err := s.shouldSaveBlinded(ctx)
|
||||
return s.SaveROBlocks(ctx, robs, true)
|
||||
}
|
||||
|
||||
type blockBatchEntry struct {
|
||||
root []byte
|
||||
block interfaces.ReadOnlySignedBeaconBlock
|
||||
enc []byte
|
||||
updated bool
|
||||
indices map[string][]byte
|
||||
}
|
||||
|
||||
func prepareBlockBatch(blks []blocks.ROBlock, shouldBlind bool) ([]blockBatchEntry, error) {
|
||||
batch := make([]blockBatchEntry, len(blks))
|
||||
for i := range blks {
|
||||
batch[i].root, batch[i].block = blks[i].RootSlice(), blks[i].ReadOnlySignedBeaconBlock
|
||||
batch[i].indices = blockIndices(batch[i].block.Block().Slot(), batch[i].block.Block().ParentRoot())
|
||||
if shouldBlind {
|
||||
blinded, err := batch[i].block.ToBlinded()
|
||||
if err != nil {
|
||||
if !errors.Is(err, blocks.ErrUnsupportedVersion) {
|
||||
return nil, errors.Wrapf(err, "could not convert block to blinded format for root %#x", batch[i].root)
|
||||
}
|
||||
// Pre-deneb blocks give ErrUnsupportedVersion; use the full block already in the batch entry.
|
||||
} else {
|
||||
batch[i].block = blinded
|
||||
}
|
||||
}
|
||||
enc, err := encodeBlock(batch[i].block)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to encode block for root %#x", batch[i].root)
|
||||
}
|
||||
batch[i].enc = enc
|
||||
}
|
||||
return batch, nil
|
||||
}
|
||||
|
||||
func (s *Store) SaveROBlocks(ctx context.Context, blks []blocks.ROBlock, cache bool) error {
|
||||
shouldBlind, err := s.shouldSaveBlinded(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
// Precompute expensive values outside the db transaction.
|
||||
batch, err := prepareBlockBatch(blks, shouldBlind)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to encode all blocks in batch for saving to the db")
|
||||
}
|
||||
err = s.db.Update(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(blocksBucket)
|
||||
for i, blk := range blks {
|
||||
if existingBlock := bkt.Get(blockRoots[i]); existingBlock != nil {
|
||||
for i := range batch {
|
||||
if exists := bkt.Get(batch[i].root); exists != nil {
|
||||
continue
|
||||
}
|
||||
if err := updateValueForIndices(ctx, indicesForBlocks[i], blockRoots[i], tx); err != nil {
|
||||
return errors.Wrap(err, "could not update DB indices")
|
||||
if err := bkt.Put(batch[i].root, batch[i].enc); err != nil {
|
||||
return errors.Wrapf(err, "could write block to db with root %#x", batch[i].root)
|
||||
}
|
||||
if saveBlinded {
|
||||
blindedBlock, err := blk.ToBlinded()
|
||||
if err != nil {
|
||||
if !errors.Is(err, blocks.ErrUnsupportedVersion) {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
blk = blindedBlock
|
||||
}
|
||||
}
|
||||
s.blockCache.Set(string(blockRoots[i]), blk, int64(len(encodedBlocks[i])))
|
||||
if err := bkt.Put(blockRoots[i], encodedBlocks[i]); err != nil {
|
||||
return err
|
||||
if err := updateValueForIndices(ctx, batch[i].indices, batch[i].root, tx); err != nil {
|
||||
return errors.Wrapf(err, "could not update DB indices for root %#x", batch[i].root)
|
||||
}
|
||||
batch[i].updated = true
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if !cache {
|
||||
return err
|
||||
}
|
||||
for i := range batch {
|
||||
if batch[i].updated {
|
||||
s.blockCache.Set(string(batch[i].root), batch[i].block, int64(len(batch[i].enc)))
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// blockIndices takes in a beacon block and returns
|
||||
// a map of bolt DB index buckets corresponding to each particular key for indices for
|
||||
// data, such as (shard indices bucket -> shard 5).
|
||||
func blockIndices(slot primitives.Slot, parentRoot [32]byte) map[string][]byte {
|
||||
return map[string][]byte{
|
||||
string(blockSlotIndicesBucket): bytesutil.SlotToBytesBigEndian(slot),
|
||||
string(blockParentRootIndicesBucket): parentRoot[:],
|
||||
}
|
||||
}
|
||||
|
||||
// SaveHeadBlockRoot to the db.
|
||||
@@ -417,17 +438,6 @@ func (s *Store) SaveOriginCheckpointBlockRoot(ctx context.Context, blockRoot [32
|
||||
})
|
||||
}
|
||||
|
||||
// SaveBackfillBlockRoot is used to keep track of the most recently backfilled block root when
|
||||
// the node was initialized via checkpoint sync.
|
||||
func (s *Store) SaveBackfillBlockRoot(ctx context.Context, blockRoot [32]byte) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.SaveBackfillBlockRoot")
|
||||
defer span.End()
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(blocksBucket)
|
||||
return bucket.Put(backfillBlockRootKey, blockRoot[:])
|
||||
})
|
||||
}
|
||||
|
||||
// HighestRootsBelowSlot returns roots from the database slot index from the highest slot below the input slot.
|
||||
// The slot value at the beginning of the return list is the slot where the roots were found. This is helpful so that
|
||||
// calling code can make decisions based on the slot without resolving the blocks to discover their slot (for instance
|
||||
@@ -726,31 +736,6 @@ func blockRootsBySlot(ctx context.Context, tx *bolt.Tx, slot primitives.Slot) ([
|
||||
return [][32]byte{}, nil
|
||||
}
|
||||
|
||||
// createBlockIndicesFromBlock takes in a beacon block and returns
|
||||
// a map of bolt DB index buckets corresponding to each particular key for indices for
|
||||
// data, such as (shard indices bucket -> shard 5).
|
||||
func createBlockIndicesFromBlock(ctx context.Context, block interfaces.ReadOnlyBeaconBlock) map[string][]byte {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.createBlockIndicesFromBlock")
|
||||
defer span.End()
|
||||
indicesByBucket := make(map[string][]byte)
|
||||
// Every index has a unique bucket for fast, binary-search
|
||||
// range scans for filtering across keys.
|
||||
buckets := [][]byte{
|
||||
blockSlotIndicesBucket,
|
||||
}
|
||||
indices := [][]byte{
|
||||
bytesutil.SlotToBytesBigEndian(block.Slot()),
|
||||
}
|
||||
buckets = append(buckets, blockParentRootIndicesBucket)
|
||||
parentRoot := block.ParentRoot()
|
||||
indices = append(indices, parentRoot[:])
|
||||
|
||||
for i := 0; i < len(buckets); i++ {
|
||||
indicesByBucket[string(buckets[i])] = indices[i]
|
||||
}
|
||||
return indicesByBucket
|
||||
}
|
||||
|
||||
// createBlockFiltersFromIndices takes in filter criteria and returns
|
||||
// a map with a single key-value pair: "block-parent-root-indices” -> parentRoot (array of bytes).
|
||||
//
|
||||
@@ -838,74 +823,44 @@ func unmarshalBlock(_ context.Context, enc []byte) (interfaces.ReadOnlySignedBea
|
||||
return blocks.NewSignedBeaconBlock(rawBlock)
|
||||
}
|
||||
|
||||
func (s *Store) marshalBlock(
|
||||
ctx context.Context,
|
||||
blk interfaces.ReadOnlySignedBeaconBlock,
|
||||
) ([]byte, error) {
|
||||
shouldBlind, err := s.shouldSaveBlinded(ctx)
|
||||
func encodeBlock(blk interfaces.ReadOnlySignedBeaconBlock) ([]byte, error) {
|
||||
key, err := keyForBlock(blk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "could not determine version encoding key for block")
|
||||
}
|
||||
if shouldBlind {
|
||||
return marshalBlockBlinded(ctx, blk)
|
||||
enc, err := blk.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not marshal block")
|
||||
}
|
||||
return marshalBlockFull(ctx, blk)
|
||||
dbfmt := make([]byte, len(key)+len(enc))
|
||||
if len(key) > 0 {
|
||||
copy(dbfmt, key)
|
||||
}
|
||||
copy(dbfmt[len(key):], enc)
|
||||
return snappy.Encode(nil, dbfmt), nil
|
||||
}
|
||||
|
||||
// Encodes a full beacon block to the DB with its associated key.
|
||||
func marshalBlockFull(
|
||||
_ context.Context,
|
||||
blk interfaces.ReadOnlySignedBeaconBlock,
|
||||
) ([]byte, error) {
|
||||
var encodedBlock []byte
|
||||
var err error
|
||||
encodedBlock, err = blk.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
func keyForBlock(blk interfaces.ReadOnlySignedBeaconBlock) ([]byte, error) {
|
||||
switch blk.Version() {
|
||||
case version.Deneb:
|
||||
return snappy.Encode(nil, append(denebKey, encodedBlock...)), nil
|
||||
case version.Capella:
|
||||
return snappy.Encode(nil, append(capellaKey, encodedBlock...)), nil
|
||||
case version.Bellatrix:
|
||||
return snappy.Encode(nil, append(bellatrixKey, encodedBlock...)), nil
|
||||
case version.Altair:
|
||||
return snappy.Encode(nil, append(altairKey, encodedBlock...)), nil
|
||||
case version.Phase0:
|
||||
return snappy.Encode(nil, encodedBlock), nil
|
||||
default:
|
||||
return nil, errors.New("unknown block version")
|
||||
}
|
||||
}
|
||||
|
||||
// Encodes a blinded beacon block with its associated key.
|
||||
// If the block does not support blinding, we then encode it as a full
|
||||
// block with its associated key by calling marshalBlockFull.
|
||||
func marshalBlockBlinded(
|
||||
ctx context.Context,
|
||||
blk interfaces.ReadOnlySignedBeaconBlock,
|
||||
) ([]byte, error) {
|
||||
blindedBlock, err := blk.ToBlinded()
|
||||
if err != nil {
|
||||
switch {
|
||||
case errors.Is(err, blocks.ErrUnsupportedVersion):
|
||||
return marshalBlockFull(ctx, blk)
|
||||
default:
|
||||
return nil, errors.Wrap(err, "could not convert block to blinded format")
|
||||
if blk.IsBlinded() {
|
||||
return denebBlindKey, nil
|
||||
}
|
||||
}
|
||||
encodedBlock, err := blindedBlock.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not marshal blinded block")
|
||||
}
|
||||
switch blk.Version() {
|
||||
case version.Deneb:
|
||||
return snappy.Encode(nil, append(denebBlindKey, encodedBlock...)), nil
|
||||
return denebKey, nil
|
||||
case version.Capella:
|
||||
return snappy.Encode(nil, append(capellaBlindKey, encodedBlock...)), nil
|
||||
if blk.IsBlinded() {
|
||||
return capellaBlindKey, nil
|
||||
}
|
||||
return capellaKey, nil
|
||||
case version.Bellatrix:
|
||||
return snappy.Encode(nil, append(bellatrixBlindKey, encodedBlock...)), nil
|
||||
if blk.IsBlinded() {
|
||||
return bellatrixBlindKey, nil
|
||||
}
|
||||
return bellatrixKey, nil
|
||||
case version.Altair:
|
||||
return altairKey, nil
|
||||
case version.Phase0:
|
||||
return nil, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported block version: %v", blk.Version())
|
||||
}
|
||||
|
||||
@@ -126,23 +126,6 @@ var blockTests = []struct {
|
||||
},
|
||||
}
|
||||
|
||||
func TestStore_SaveBackfillBlockRoot(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
_, err := db.BackfillBlockRoot(ctx)
|
||||
require.ErrorIs(t, err, ErrNotFoundBackfillBlockRoot)
|
||||
|
||||
var expected [32]byte
|
||||
copy(expected[:], []byte{0x23})
|
||||
err = db.SaveBackfillBlockRoot(ctx, expected)
|
||||
require.NoError(t, err)
|
||||
actual, err := db.BackfillBlockRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, actual)
|
||||
|
||||
}
|
||||
|
||||
func TestStore_SaveBlock_NoDuplicates(t *testing.T) {
|
||||
BlockCacheSize = 1
|
||||
slot := primitives.Slot(20)
|
||||
|
||||
@@ -21,3 +21,8 @@ var ErrNotFoundBackfillBlockRoot = errors.Wrap(ErrNotFound, "BackfillBlockRoot")
|
||||
|
||||
// ErrNotFoundFeeRecipient is a not found error specifically for the fee recipient getter
|
||||
var ErrNotFoundFeeRecipient = errors.Wrap(ErrNotFound, "fee recipient")
|
||||
|
||||
var errEmptyBlockSlice = errors.New("[]blocks.ROBlock is empty")
|
||||
var errIncorrectBlockParent = errors.New("unexpected missing or forked blocks in a []ROBlock")
|
||||
var errFinalizedChildNotFound = errors.New("unable to find finalized root descending from backfill batch")
|
||||
var errNotConnectedToFinalized = errors.New("unable to finalize backfill blocks, finalized parent_root does not match")
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filters"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
@@ -163,6 +164,83 @@ func (s *Store) updateFinalizedBlockRoots(ctx context.Context, tx *bolt.Tx, chec
|
||||
return bkt.Put(previousFinalizedCheckpointKey, enc)
|
||||
}
|
||||
|
||||
// BackfillFinalizedIndex updates the finalized index for a contiguous chain of blocks that are the ancestors of the
|
||||
// given finalized child root. This is needed to update the finalized index during backfill, because the usual
|
||||
// updateFinalizedBlockRoots has assumptions that are incompatible with backfill processing.
|
||||
func (s *Store) BackfillFinalizedIndex(ctx context.Context, blocks []blocks.ROBlock, finalizedChildRoot [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.BackfillFinalizedIndex")
|
||||
defer span.End()
|
||||
if len(blocks) == 0 {
|
||||
return errEmptyBlockSlice
|
||||
}
|
||||
|
||||
fbrs := make([]*ethpb.FinalizedBlockRootContainer, len(blocks))
|
||||
encs := make([][]byte, len(blocks))
|
||||
for i := range blocks {
|
||||
pr := blocks[i].Block().ParentRoot()
|
||||
fbrs[i] = ðpb.FinalizedBlockRootContainer{
|
||||
ParentRoot: pr[:],
|
||||
// ChildRoot: will be filled in on the next iteration when we look at the descendent block.
|
||||
}
|
||||
if i == 0 {
|
||||
continue
|
||||
}
|
||||
if blocks[i-1].Root() != blocks[i].Block().ParentRoot() {
|
||||
return errors.Wrapf(errIncorrectBlockParent, "previous root=%#x, slot=%d; child parent_root=%#x, root=%#x, slot=%d",
|
||||
blocks[i-1].Root(), blocks[i-1].Block().Slot(), blocks[i].Block().ParentRoot(), blocks[i].Root(), blocks[i].Block().Slot())
|
||||
}
|
||||
|
||||
// We know the previous index is the parent of this one thanks to the assertion above,
|
||||
// so we can set the ChildRoot of the previous value to the root of the current value.
|
||||
fbrs[i-1].ChildRoot = blocks[i].RootSlice()
|
||||
// Now that the value for fbrs[i-1] is complete, perform encoding here to minimize time in Update,
|
||||
// which holds the global db lock.
|
||||
penc, err := encode(ctx, fbrs[i-1])
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
encs[i-1] = penc
|
||||
|
||||
// The final element is the parent of finalizedChildRoot. This is checked inside the db transaction using
|
||||
// the parent_root value stored in the index data for finalizedChildRoot.
|
||||
if i == len(blocks)-1 {
|
||||
fbrs[i].ChildRoot = finalizedChildRoot[:]
|
||||
// Final element is complete, so it is pre-encoded like the others.
|
||||
enc, err := encode(ctx, fbrs[i])
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
encs[i] = enc
|
||||
}
|
||||
}
|
||||
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(finalizedBlockRootsIndexBucket)
|
||||
child := bkt.Get(finalizedChildRoot[:])
|
||||
if len(child) == 0 {
|
||||
return errFinalizedChildNotFound
|
||||
}
|
||||
fcc := ðpb.FinalizedBlockRootContainer{}
|
||||
if err := decode(ctx, child, fcc); err != nil {
|
||||
return errors.Wrapf(err, "unable to decode finalized block root container for root=%#x", finalizedChildRoot)
|
||||
}
|
||||
// Ensure that the existing finalized chain descends from the new segment.
|
||||
if !bytes.Equal(fcc.ParentRoot, blocks[len(blocks)-1].RootSlice()) {
|
||||
return errors.Wrapf(errNotConnectedToFinalized, "finalized block root container for root=%#x has parent_root=%#x, not %#x",
|
||||
finalizedChildRoot, fcc.ParentRoot, blocks[len(blocks)-1].RootSlice())
|
||||
}
|
||||
// Update the finalized index with entries for each block in the new segment.
|
||||
for i := range fbrs {
|
||||
if err := bkt.Put(blocks[i].RootSlice(), encs[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// IsFinalizedBlock returns true if the block root is present in the finalized block root index.
|
||||
// A beacon block root contained exists in this index if it is considered finalized and canonical.
|
||||
// Note: beacon blocks from the latest finalized epoch return true, whether or not they are
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
@@ -14,6 +15,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
var genesisBlockRoot = bytesutil.ToBytes32([]byte{'G', 'E', 'N', 'E', 'S', 'I', 'S'})
|
||||
@@ -234,3 +236,64 @@ func makeBlocksAltair(t *testing.T, startIdx, num uint64, previousRoot [32]byte)
|
||||
}
|
||||
return ifaceBlocks
|
||||
}
|
||||
|
||||
func TestStore_BackfillFinalizedIndex(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
require.ErrorIs(t, db.BackfillFinalizedIndex(ctx, []consensusblocks.ROBlock{}, [32]byte{}), errEmptyBlockSlice)
|
||||
blks, err := consensusblocks.NewROBlockSlice(makeBlocks(t, 0, 66, [32]byte{}))
|
||||
require.NoError(t, err)
|
||||
|
||||
// set up existing finalized block
|
||||
ebpr := blks[64].Block().ParentRoot()
|
||||
ebr := blks[64].Root()
|
||||
chldr := blks[65].Root()
|
||||
ebf := ðpb.FinalizedBlockRootContainer{
|
||||
ParentRoot: ebpr[:],
|
||||
ChildRoot: chldr[:],
|
||||
}
|
||||
disjoint := []consensusblocks.ROBlock{
|
||||
blks[0],
|
||||
blks[2],
|
||||
}
|
||||
enc, err := encode(ctx, ebf)
|
||||
require.NoError(t, err)
|
||||
err = db.db.Update(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(finalizedBlockRootsIndexBucket)
|
||||
return bkt.Put(ebr[:], enc)
|
||||
})
|
||||
|
||||
// reslice to remove the existing blocks
|
||||
blks = blks[0:64]
|
||||
// check the other error conditions with a descendent root that really doesn't exist
|
||||
require.NoError(t, err)
|
||||
require.ErrorIs(t, db.BackfillFinalizedIndex(ctx, disjoint, [32]byte{}), errIncorrectBlockParent)
|
||||
require.NoError(t, err)
|
||||
require.ErrorIs(t, errFinalizedChildNotFound, db.BackfillFinalizedIndex(ctx, blks, [32]byte{}))
|
||||
|
||||
// use the real root so that it succeeds
|
||||
require.NoError(t, db.BackfillFinalizedIndex(ctx, blks, ebr))
|
||||
for i := range blks {
|
||||
require.NoError(t, db.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(finalizedBlockRootsIndexBucket)
|
||||
encfr := bkt.Get(blks[i].RootSlice())
|
||||
require.Equal(t, true, len(encfr) > 0)
|
||||
fr := ðpb.FinalizedBlockRootContainer{}
|
||||
require.NoError(t, decode(ctx, encfr, fr))
|
||||
require.Equal(t, 32, len(fr.ParentRoot))
|
||||
require.Equal(t, 32, len(fr.ChildRoot))
|
||||
pr := blks[i].Block().ParentRoot()
|
||||
require.Equal(t, true, bytes.Equal(fr.ParentRoot, pr[:]))
|
||||
if i > 0 {
|
||||
require.Equal(t, true, bytes.Equal(fr.ParentRoot, blks[i-1].RootSlice()))
|
||||
}
|
||||
if i < len(blks)-1 {
|
||||
require.DeepEqual(t, fr.ChildRoot, blks[i+1].RootSlice())
|
||||
}
|
||||
if i == len(blks)-1 {
|
||||
require.DeepEqual(t, fr.ChildRoot, ebr[:])
|
||||
}
|
||||
return nil
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
@@ -116,13 +117,25 @@ func Test_setupBlockStorageType(t *testing.T) {
|
||||
root, err = wrappedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, store.SaveBlock(ctx, wrappedBlock))
|
||||
retrievedBlk, err = store.Block(ctx, root)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, true, retrievedBlk.IsBlinded())
|
||||
wrappedBlinded, err := wrappedBlock.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, wrappedBlinded, retrievedBlk)
|
||||
|
||||
retrievedBlk, err = store.Block(ctx, root)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, retrievedBlk.IsBlinded())
|
||||
|
||||
// Compare retrieved value by root, and marshaled bytes.
|
||||
mSrc, err := wrappedBlinded.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
mTgt, err := retrievedBlk.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, bytes.Equal(mSrc, mTgt))
|
||||
|
||||
rSrc, err := wrappedBlinded.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
rTgt, err := retrievedBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, rSrc, rTgt)
|
||||
})
|
||||
t.Run("existing database with full blocks type should continue storing full blocks", func(t *testing.T) {
|
||||
store := setupDB(t)
|
||||
@@ -155,10 +168,21 @@ func Test_setupBlockStorageType(t *testing.T) {
|
||||
root, err = wrappedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, store.SaveBlock(ctx, wrappedBlock))
|
||||
|
||||
retrievedBlk, err = store.Block(ctx, root)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, retrievedBlk.IsBlinded())
|
||||
require.DeepEqual(t, wrappedBlock, retrievedBlk)
|
||||
|
||||
// Compare retrieved value by root, and marshaled bytes.
|
||||
mSrc, err := wrappedBlock.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
mTgt, err := retrievedBlk.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, bytes.Equal(mSrc, mTgt))
|
||||
|
||||
rTgt, err := retrievedBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, root, rTgt)
|
||||
})
|
||||
t.Run("existing database with blinded blocks type should error if user enables full blocks feature flag", func(t *testing.T) {
|
||||
store := setupDB(t)
|
||||
|
||||
@@ -61,8 +61,8 @@ var (
|
||||
|
||||
// block root included in the beacon state used by weak subjectivity initial sync
|
||||
originCheckpointBlockRootKey = []byte("origin-checkpoint-block-root")
|
||||
// block root tracking the progress of backfill, or pointing at genesis if backfill has not been initiated
|
||||
backfillBlockRootKey = []byte("backfill-block-root")
|
||||
// tracking data about an ongoing backfill
|
||||
backfillStatusKey = []byte("backfill-status")
|
||||
|
||||
// Deprecated: This index key was migrated in PR 6461. Do not use, except for migrations.
|
||||
lastArchivedIndexKey = []byte("last-archived")
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/ssz/detect"
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/dbval"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
)
|
||||
@@ -17,18 +18,6 @@ import (
|
||||
// syncing, using the provided values as their point of origin. This is an alternative
|
||||
// to syncing from genesis, and should only be run on an empty database.
|
||||
func (s *Store) SaveOrigin(ctx context.Context, serState, serBlock []byte) error {
|
||||
genesisRoot, err := s.GenesisBlockRoot(ctx)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrNotFoundGenesisBlockRoot) {
|
||||
return errors.Wrap(err, "genesis block root not found: genesis must be provided for checkpoint sync")
|
||||
}
|
||||
return errors.Wrap(err, "genesis block root query error: checkpoint sync must verify genesis to proceed")
|
||||
}
|
||||
err = s.SaveBackfillBlockRoot(ctx, genesisRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to save genesis root as initial backfill starting point for checkpoint sync")
|
||||
}
|
||||
|
||||
cf, err := detect.FromState(serState)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not sniff config+fork for origin state bytes")
|
||||
@@ -50,11 +39,24 @@ func (s *Store) SaveOrigin(ctx context.Context, serState, serBlock []byte) error
|
||||
}
|
||||
blk := wblk.Block()
|
||||
|
||||
// save block
|
||||
blockRoot, err := blk.HashTreeRoot()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not compute HashTreeRoot of checkpoint block")
|
||||
}
|
||||
|
||||
pr := blk.ParentRoot()
|
||||
bf := &dbval.BackfillStatus{
|
||||
LowSlot: uint64(wblk.Block().Slot()),
|
||||
LowRoot: blockRoot[:],
|
||||
LowParentRoot: pr[:],
|
||||
OriginRoot: blockRoot[:],
|
||||
OriginSlot: uint64(wblk.Block().Slot()),
|
||||
}
|
||||
|
||||
if err = s.SaveBackfillStatus(ctx, bf); err != nil {
|
||||
return errors.Wrap(err, "unable to save backfill status data to db for checkpoint sync")
|
||||
}
|
||||
|
||||
log.Infof("saving checkpoint block to db, w/ root=%#x", blockRoot)
|
||||
if err := s.SaveBlock(ctx, wblk); err != nil {
|
||||
return errors.Wrap(err, "could not save checkpoint block")
|
||||
|
||||
@@ -2,4 +2,4 @@ package execution
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
var log = logrus.WithField("prefix", "powchain")
|
||||
var log = logrus.WithField("prefix", "execution")
|
||||
|
||||
@@ -240,6 +240,20 @@ func (f *ForkChoice) HighestReceivedBlockSlot() primitives.Slot {
|
||||
return f.store.highestReceivedNode.slot
|
||||
}
|
||||
|
||||
// HighestReceivedBlockSlotDelay returns the number of slots that the highest
|
||||
// received block was late when receiving it
|
||||
func (f *ForkChoice) HighestReceivedBlockDelay() primitives.Slot {
|
||||
n := f.store.highestReceivedNode
|
||||
if n == nil {
|
||||
return 0
|
||||
}
|
||||
secs, err := slots.SecondsSinceSlotStart(n.slot, f.store.genesisTime, n.timestamp)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
return primitives.Slot(secs / params.BeaconConfig().SecondsPerSlot)
|
||||
}
|
||||
|
||||
// ReceivedBlocksLastEpoch returns the number of blocks received in the last epoch
|
||||
func (f *ForkChoice) ReceivedBlocksLastEpoch() (uint64, error) {
|
||||
count := uint64(0)
|
||||
|
||||
@@ -333,26 +333,29 @@ func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(1), count)
|
||||
require.Equal(t, primitives.Slot(1), f.HighestReceivedBlockSlot())
|
||||
require.Equal(t, primitives.Slot(0), f.HighestReceivedBlockDelay())
|
||||
|
||||
// 64
|
||||
// Received block last epoch is 1
|
||||
_, err = s.insert(context.Background(), 64, [32]byte{'A'}, b, b, 1, 1)
|
||||
require.NoError(t, err)
|
||||
s.genesisTime = uint64(time.Now().Add(time.Duration(-64*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second).Unix())
|
||||
s.genesisTime = uint64(time.Now().Add(time.Duration((-64*int64(params.BeaconConfig().SecondsPerSlot))-1) * time.Second).Unix())
|
||||
count, err = f.ReceivedBlocksLastEpoch()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(1), count)
|
||||
require.Equal(t, primitives.Slot(64), f.HighestReceivedBlockSlot())
|
||||
require.Equal(t, primitives.Slot(0), f.HighestReceivedBlockDelay())
|
||||
|
||||
// 64 65
|
||||
// Received block last epoch is 2
|
||||
_, err = s.insert(context.Background(), 65, [32]byte{'B'}, b, b, 1, 1)
|
||||
require.NoError(t, err)
|
||||
s.genesisTime = uint64(time.Now().Add(time.Duration(-65*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second).Unix())
|
||||
s.genesisTime = uint64(time.Now().Add(time.Duration(-66*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second).Unix())
|
||||
count, err = f.ReceivedBlocksLastEpoch()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(2), count)
|
||||
require.Equal(t, primitives.Slot(65), f.HighestReceivedBlockSlot())
|
||||
require.Equal(t, primitives.Slot(1), f.HighestReceivedBlockDelay())
|
||||
|
||||
// 64 65 66
|
||||
// Received block last epoch is 3
|
||||
|
||||
@@ -64,6 +64,7 @@ type FastGetter interface {
|
||||
FinalizedPayloadBlockHash() [32]byte
|
||||
HasNode([32]byte) bool
|
||||
HighestReceivedBlockSlot() primitives.Slot
|
||||
HighestReceivedBlockDelay() primitives.Slot
|
||||
IsCanonical(root [32]byte) bool
|
||||
IsOptimistic(root [32]byte) (bool, error)
|
||||
IsViableForCheckpoint(*forkchoicetypes.Checkpoint) (bool, error)
|
||||
|
||||
@@ -114,6 +114,13 @@ func (ro *ROForkChoice) HighestReceivedBlockSlot() primitives.Slot {
|
||||
return ro.getter.HighestReceivedBlockSlot()
|
||||
}
|
||||
|
||||
// HighestReceivedBlockDelay delegates to the underlying forkchoice call, under a lock.
|
||||
func (ro *ROForkChoice) HighestReceivedBlockDelay() primitives.Slot {
|
||||
ro.l.RLock()
|
||||
defer ro.l.RUnlock()
|
||||
return ro.getter.HighestReceivedBlockDelay()
|
||||
}
|
||||
|
||||
// ReceivedBlocksLastEpoch delegates to the underlying forkchoice call, under a lock.
|
||||
func (ro *ROForkChoice) ReceivedBlocksLastEpoch() (uint64, error) {
|
||||
ro.l.RLock()
|
||||
|
||||
@@ -29,6 +29,7 @@ const (
|
||||
unrealizedJustifiedPayloadBlockHashCalled
|
||||
nodeCountCalled
|
||||
highestReceivedBlockSlotCalled
|
||||
highestReceivedBlockDelayCalled
|
||||
receivedBlocksLastEpochCalled
|
||||
weightCalled
|
||||
isOptimisticCalled
|
||||
@@ -113,6 +114,11 @@ func TestROLocking(t *testing.T) {
|
||||
call: highestReceivedBlockSlotCalled,
|
||||
cb: func(g FastGetter) { g.HighestReceivedBlockSlot() },
|
||||
},
|
||||
{
|
||||
name: "highestReceivedBlockDelayCalled",
|
||||
call: highestReceivedBlockDelayCalled,
|
||||
cb: func(g FastGetter) { g.HighestReceivedBlockDelay() },
|
||||
},
|
||||
{
|
||||
name: "receivedBlocksLastEpochCalled",
|
||||
call: receivedBlocksLastEpochCalled,
|
||||
@@ -245,6 +251,11 @@ func (ro *mockROForkchoice) HighestReceivedBlockSlot() primitives.Slot {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (ro *mockROForkchoice) HighestReceivedBlockDelay() primitives.Slot {
|
||||
ro.calls = append(ro.calls, highestReceivedBlockDelayCalled)
|
||||
return 0
|
||||
}
|
||||
|
||||
func (ro *mockROForkchoice) ReceivedBlocksLastEpoch() (uint64, error) {
|
||||
ro.calls = append(ro.calls, receivedBlocksLastEpochCalled)
|
||||
return 0, nil
|
||||
|
||||
@@ -6,6 +6,7 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/gateway",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//api/gateway:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
|
||||
@@ -2,6 +2,7 @@ package gateway
|
||||
|
||||
import (
|
||||
gwruntime "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
||||
"github.com/prysmaticlabs/prysm/v4/api"
|
||||
"github.com/prysmaticlabs/prysm/v4/api/gateway"
|
||||
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags"
|
||||
ethpbalpha "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
@@ -40,7 +41,7 @@ func DefaultConfig(enableDebugRPCEndpoints bool, httpModules string) MuxConfig {
|
||||
},
|
||||
}),
|
||||
gwruntime.WithMarshalerOption(
|
||||
"text/event-stream", &gwruntime.EventSourceJSONPb{},
|
||||
api.EventStreamMediaType, &gwruntime.EventSourceJSONPb{},
|
||||
),
|
||||
)
|
||||
v1AlphaPbHandler = &gateway.PbMux{
|
||||
|
||||
@@ -40,6 +40,7 @@ go_library(
|
||||
"//beacon-chain/operations/synccommittee:go_default_library",
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/p2p/peers:go_default_library",
|
||||
"//beacon-chain/rpc:go_default_library",
|
||||
"//beacon-chain/slasher:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
@@ -47,6 +48,7 @@ go_library(
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//beacon-chain/sync:go_default_library",
|
||||
"//beacon-chain/sync/backfill:go_default_library",
|
||||
"//beacon-chain/sync/backfill/coverage:go_default_library",
|
||||
"//beacon-chain/sync/checkpoint:go_default_library",
|
||||
"//beacon-chain/sync/genesis:go_default_library",
|
||||
"//beacon-chain/sync/initial-sync:go_default_library",
|
||||
|
||||
@@ -42,6 +42,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/synccommittee"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/slasher"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
@@ -49,6 +50,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
|
||||
regularsync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/backfill"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/backfill/coverage"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/checkpoint"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/genesis"
|
||||
initialsync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/initial-sync"
|
||||
@@ -113,6 +115,7 @@ type BeaconNode struct {
|
||||
CheckpointInitializer checkpoint.Initializer
|
||||
forkChoicer forkchoice.ForkChoicer
|
||||
clockWaiter startup.ClockWaiter
|
||||
BackfillOpts []backfill.ServiceOption
|
||||
initialSyncComplete chan struct{}
|
||||
BlobStorage *filesystem.BlobStorage
|
||||
blobRetentionEpochs primitives.Epoch
|
||||
@@ -215,10 +218,23 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
|
||||
return nil, err
|
||||
}
|
||||
|
||||
bfs := backfill.NewStatus(beacon.db)
|
||||
if err := bfs.Reload(ctx); err != nil {
|
||||
log.Debugln("Registering P2P Service")
|
||||
if err := beacon.registerP2P(cliCtx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
bfs, err := backfill.NewUpdater(ctx, beacon.db)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "backfill status initialization error")
|
||||
}
|
||||
pa := peers.NewAssigner(beacon.fetchP2P().Peers(), beacon.forkChoicer)
|
||||
bf, err := backfill.NewService(ctx, bfs, beacon.clockWaiter, beacon.fetchP2P(), pa, beacon.BackfillOpts...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error initializing backfill service")
|
||||
}
|
||||
if err := beacon.services.RegisterService(bf); err != nil {
|
||||
return nil, errors.Wrap(err, "error registering backfill service")
|
||||
}
|
||||
|
||||
log.Debugln("Starting State Gen")
|
||||
if err := beacon.startStateGen(ctx, bfs, beacon.forkChoicer); err != nil {
|
||||
@@ -233,11 +249,6 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
|
||||
beacon.verifyInitWaiter = verification.NewInitializerWaiter(
|
||||
beacon.clockWaiter, forkchoice.NewROForkChoice(beacon.forkChoicer), beacon.stateGen)
|
||||
|
||||
log.Debugln("Registering P2P Service")
|
||||
if err := beacon.registerP2P(cliCtx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugln("Registering POW Chain Service")
|
||||
if err := beacon.registerPOWChainService(); err != nil {
|
||||
return nil, err
|
||||
@@ -534,8 +545,8 @@ func (b *BeaconNode) startSlasherDB(cliCtx *cli.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *BeaconNode) startStateGen(ctx context.Context, bfs *backfill.Status, fc forkchoice.ForkChoicer) error {
|
||||
opts := []stategen.Option{stategen.WithBackfillStatus(bfs)}
|
||||
func (b *BeaconNode) startStateGen(ctx context.Context, bfs coverage.AvailableBlocker, fc forkchoice.ForkChoicer) error {
|
||||
opts := []stategen.Option{stategen.WithAvailableBlocker(bfs)}
|
||||
sg := stategen.New(b.db, fc, opts...)
|
||||
|
||||
cp, err := b.db.FinalizedCheckpoint(ctx)
|
||||
|
||||
@@ -42,7 +42,7 @@ func (s *Service) prepareForkChoiceAtts() {
|
||||
switch slotInterval.Interval {
|
||||
case 0:
|
||||
duration := time.Since(t)
|
||||
log.WithField("Duration", duration).Debug("aggregated unaggregated attestations")
|
||||
log.WithField("Duration", duration).Debug("Aggregated unaggregated attestations")
|
||||
batchForkChoiceAttsT1.Observe(float64(duration.Milliseconds()))
|
||||
case 1:
|
||||
batchForkChoiceAttsT2.Observe(float64(time.Since(t).Milliseconds()))
|
||||
|
||||
@@ -83,7 +83,7 @@ func postAltairMsgID(pmsg *pubsubpb.Message, fEpoch primitives.Epoch) string {
|
||||
decodedData, err := encoder.DecodeSnappy(pmsg.Data, gossipPubSubSize)
|
||||
if err != nil {
|
||||
totalLength, err := math.AddInt(
|
||||
len(params.BeaconConfig().MessageDomainValidSnappy),
|
||||
len(params.BeaconConfig().MessageDomainInvalidSnappy),
|
||||
len(topicLenBytes),
|
||||
topicLen,
|
||||
len(pmsg.Data),
|
||||
|
||||
@@ -3,6 +3,7 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"assigner.go",
|
||||
"log.go",
|
||||
"status.go",
|
||||
],
|
||||
@@ -12,8 +13,10 @@ go_library(
|
||||
"//cmd:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/p2p/peers/peerdata:go_default_library",
|
||||
"//beacon-chain/p2p/peers/scorers:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
@@ -28,6 +31,7 @@ go_library(
|
||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||
"@com_github_multiformats_go_multiaddr//:go_default_library",
|
||||
"@com_github_multiformats_go_multiaddr//net:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
@@ -36,6 +40,7 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"assigner_test.go",
|
||||
"benchmark_test.go",
|
||||
"peers_test.go",
|
||||
"status_test.go",
|
||||
|
||||
78
beacon-chain/p2p/peers/assigner.go
Normal file
78
beacon-chain/p2p/peers/assigner.go
Normal file
@@ -0,0 +1,78 @@
|
||||
package peers
|
||||
|
||||
import (
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// FinalizedCheckpointer describes the minimum capability that Assigner needs from forkchoice.
|
||||
// That is, the ability to retrieve the latest finalized checkpoint to help with peer evaluation.
|
||||
type FinalizedCheckpointer interface {
|
||||
FinalizedCheckpoint() *forkchoicetypes.Checkpoint
|
||||
}
|
||||
|
||||
// NewAssigner assists in the correct construction of an Assigner by code in other packages,
|
||||
// assuring all the important private member fields are given values.
|
||||
// The FinalizedCheckpointer is used to retrieve the latest finalized checkpoint each time peers are requested.
|
||||
// Peers that report an older finalized checkpoint are filtered out.
|
||||
func NewAssigner(s *Status, fc FinalizedCheckpointer) *Assigner {
|
||||
return &Assigner{
|
||||
ps: s,
|
||||
fc: fc,
|
||||
}
|
||||
}
|
||||
|
||||
// Assigner uses the "BestFinalized" peer scoring method to pick the next-best peer to receive rpc requests.
|
||||
type Assigner struct {
|
||||
ps *Status
|
||||
fc FinalizedCheckpointer
|
||||
}
|
||||
|
||||
// ErrInsufficientSuitable is a sentinel error, signaling that a peer couldn't be assigned because there are currently
|
||||
// not enough peers that match our selection criteria to serve rpc requests. It is the responsibility of the caller to
|
||||
// look for this error and continue to try calling Assign with appropriate backoff logic.
|
||||
var ErrInsufficientSuitable = errors.New("no suitable peers")
|
||||
|
||||
func (a *Assigner) freshPeers() ([]peer.ID, error) {
|
||||
required := params.BeaconConfig().MaxPeersToSync
|
||||
if flags.Get().MinimumSyncPeers < required {
|
||||
required = flags.Get().MinimumSyncPeers
|
||||
}
|
||||
_, peers := a.ps.BestFinalized(params.BeaconConfig().MaxPeersToSync, a.fc.FinalizedCheckpoint().Epoch)
|
||||
if len(peers) < required {
|
||||
log.WithFields(logrus.Fields{
|
||||
"suitable": len(peers),
|
||||
"required": required}).Warn("Unable to assign peer while suitable peers < required ")
|
||||
return nil, ErrInsufficientSuitable
|
||||
}
|
||||
return peers, nil
|
||||
}
|
||||
|
||||
// Assign uses the "BestFinalized" method to select the best peers that agree on a canonical block
|
||||
// for the configured finalized epoch. At most `n` peers will be returned. The `busy` param can be used
|
||||
// to filter out peers that we know we don't want to connect to, for instance if we are trying to limit
|
||||
// the number of outbound requests to each peer from a given component.
|
||||
func (a *Assigner) Assign(busy map[peer.ID]bool, n int) ([]peer.ID, error) {
|
||||
best, err := a.freshPeers()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return pickBest(busy, n, best), nil
|
||||
}
|
||||
|
||||
func pickBest(busy map[peer.ID]bool, n int, best []peer.ID) []peer.ID {
|
||||
ps := make([]peer.ID, 0, n)
|
||||
for _, p := range best {
|
||||
if len(ps) == n {
|
||||
return ps
|
||||
}
|
||||
if !busy[p] {
|
||||
ps = append(ps, p)
|
||||
}
|
||||
}
|
||||
return ps
|
||||
}
|
||||
114
beacon-chain/p2p/peers/assigner_test.go
Normal file
114
beacon-chain/p2p/peers/assigner_test.go
Normal file
@@ -0,0 +1,114 @@
|
||||
package peers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
)
|
||||
|
||||
func TestPickBest(t *testing.T) {
|
||||
best := testPeerIds(10)
|
||||
cases := []struct {
|
||||
name string
|
||||
busy map[peer.ID]bool
|
||||
n int
|
||||
best []peer.ID
|
||||
expected []peer.ID
|
||||
}{
|
||||
{
|
||||
name: "",
|
||||
n: 0,
|
||||
},
|
||||
{
|
||||
name: "none busy",
|
||||
n: 1,
|
||||
expected: best[0:1],
|
||||
},
|
||||
{
|
||||
name: "all busy except last",
|
||||
n: 1,
|
||||
busy: testBusyMap(best[0 : len(best)-1]),
|
||||
expected: best[len(best)-1:],
|
||||
},
|
||||
{
|
||||
name: "all busy except i=5",
|
||||
n: 1,
|
||||
busy: testBusyMap(append(append([]peer.ID{}, best[0:5]...), best[6:]...)),
|
||||
expected: []peer.ID{best[5]},
|
||||
},
|
||||
{
|
||||
name: "all busy - 0 results",
|
||||
n: 1,
|
||||
busy: testBusyMap(best),
|
||||
},
|
||||
{
|
||||
name: "first half busy",
|
||||
n: 5,
|
||||
busy: testBusyMap(best[0:5]),
|
||||
expected: best[5:],
|
||||
},
|
||||
{
|
||||
name: "back half busy",
|
||||
n: 5,
|
||||
busy: testBusyMap(best[5:]),
|
||||
expected: best[0:5],
|
||||
},
|
||||
{
|
||||
name: "pick all ",
|
||||
n: 10,
|
||||
expected: best,
|
||||
},
|
||||
{
|
||||
name: "none available",
|
||||
n: 10,
|
||||
best: []peer.ID{},
|
||||
},
|
||||
{
|
||||
name: "not enough",
|
||||
n: 10,
|
||||
best: best[0:1],
|
||||
expected: best[0:1],
|
||||
},
|
||||
{
|
||||
name: "not enough, some busy",
|
||||
n: 10,
|
||||
best: best[0:6],
|
||||
busy: testBusyMap(best[0:5]),
|
||||
expected: best[5:6],
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
name := fmt.Sprintf("n=%d", c.n)
|
||||
if c.name != "" {
|
||||
name += " " + c.name
|
||||
}
|
||||
t.Run(name, func(t *testing.T) {
|
||||
if c.best == nil {
|
||||
c.best = best
|
||||
}
|
||||
pb := pickBest(c.busy, c.n, c.best)
|
||||
require.Equal(t, len(c.expected), len(pb))
|
||||
for i := range c.expected {
|
||||
require.Equal(t, c.expected[i], pb[i])
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testBusyMap(b []peer.ID) map[peer.ID]bool {
|
||||
m := make(map[peer.ID]bool)
|
||||
for i := range b {
|
||||
m[b[i]] = true
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func testPeerIds(n int) []peer.ID {
|
||||
pids := make([]peer.ID, n)
|
||||
for i := range pids {
|
||||
pids[i] = peer.ID(fmt.Sprintf("%d", i))
|
||||
}
|
||||
return pids
|
||||
}
|
||||
@@ -38,6 +38,7 @@ go_library(
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
"@org_golang_google_grpc//codes:go_default_library",
|
||||
"@org_golang_x_sync//errgroup:go_default_library",
|
||||
],
|
||||
|
||||
@@ -11,14 +11,15 @@ import (
|
||||
)
|
||||
|
||||
type Service struct {
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
FinalizedFetcher blockchain.FinalizationFetcher
|
||||
GenesisTimeFetcher blockchain.TimeFetcher
|
||||
SyncChecker sync.Checker
|
||||
Broadcaster p2p.Broadcaster
|
||||
SyncCommitteePool synccommittee.Pool
|
||||
OperationNotifier opfeed.Notifier
|
||||
AttestationCache *cache.AttestationCache
|
||||
StateGen stategen.StateManager
|
||||
P2P p2p.Broadcaster
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
FinalizedFetcher blockchain.FinalizationFetcher
|
||||
GenesisTimeFetcher blockchain.TimeFetcher
|
||||
SyncChecker sync.Checker
|
||||
Broadcaster p2p.Broadcaster
|
||||
SyncCommitteePool synccommittee.Pool
|
||||
OperationNotifier opfeed.Notifier
|
||||
AttestationCache *cache.AttestationCache
|
||||
StateGen stategen.StateManager
|
||||
P2P p2p.Broadcaster
|
||||
OptimisticModeFetcher blockchain.OptimisticModeFetcher
|
||||
}
|
||||
|
||||
@@ -29,9 +29,12 @@ import (
|
||||
prysmTime "github.com/prysmaticlabs/prysm/v4/time"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
var errOptimisticMode = errors.New("the node is currently optimistic and cannot serve validators")
|
||||
|
||||
// AggregateBroadcastFailedError represents an error scenario where
|
||||
// broadcasting an aggregate selection proof failed.
|
||||
type AggregateBroadcastFailedError struct {
|
||||
@@ -56,6 +59,9 @@ func (s *Service) ComputeValidatorPerformance(
|
||||
ctx context.Context,
|
||||
req *ethpb.ValidatorPerformanceRequest,
|
||||
) (*ethpb.ValidatorPerformanceResponse, *RpcError) {
|
||||
ctx, span := trace.StartSpan(ctx, "coreService.ComputeValidatorPerformance")
|
||||
defer span.End()
|
||||
|
||||
if s.SyncChecker.Syncing() {
|
||||
return nil, &RpcError{Reason: Unavailable, Err: errors.New("Syncing to latest head, not ready to respond")}
|
||||
}
|
||||
@@ -211,6 +217,9 @@ func (s *Service) SubmitSignedContributionAndProof(
|
||||
ctx context.Context,
|
||||
req *ethpb.SignedContributionAndProof,
|
||||
) *RpcError {
|
||||
ctx, span := trace.StartSpan(ctx, "coreService.SubmitSignedContributionAndProof")
|
||||
defer span.End()
|
||||
|
||||
errs, ctx := errgroup.WithContext(ctx)
|
||||
|
||||
// Broadcasting and saving contribution into the pool in parallel. As one fail should not affect another.
|
||||
@@ -243,6 +252,9 @@ func (s *Service) SubmitSignedAggregateSelectionProof(
|
||||
ctx context.Context,
|
||||
req *ethpb.SignedAggregateSubmitRequest,
|
||||
) *RpcError {
|
||||
ctx, span := trace.StartSpan(ctx, "coreService.SubmitSignedAggregateSelectionProof")
|
||||
defer span.End()
|
||||
|
||||
if req.SignedAggregateAndProof == nil || req.SignedAggregateAndProof.Message == nil ||
|
||||
req.SignedAggregateAndProof.Message.Aggregate == nil || req.SignedAggregateAndProof.Message.Aggregate.Data == nil {
|
||||
return &RpcError{Err: errors.New("signed aggregate request can't be nil"), Reason: BadRequest}
|
||||
@@ -315,6 +327,9 @@ func (s *Service) AggregatedSigAndAggregationBits(
|
||||
func (s *Service) GetAttestationData(
|
||||
ctx context.Context, req *ethpb.AttestationDataRequest,
|
||||
) (*ethpb.AttestationData, *RpcError) {
|
||||
ctx, span := trace.StartSpan(ctx, "coreService.GetAttestationData")
|
||||
defer span.End()
|
||||
|
||||
if req.Slot != s.GenesisTimeFetcher.CurrentSlot() {
|
||||
return nil, &RpcError{Reason: BadRequest, Err: errors.Errorf("invalid request: slot %d is not the current slot %d", req.Slot, s.GenesisTimeFetcher.CurrentSlot())}
|
||||
}
|
||||
@@ -368,6 +383,14 @@ func (s *Service) GetAttestationData(
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
// cache miss, we need to check for optimistic status before proceeding
|
||||
optimistic, err := s.OptimisticModeFetcher.IsOptimistic(ctx)
|
||||
if err != nil {
|
||||
return nil, &RpcError{Reason: Internal, Err: err}
|
||||
}
|
||||
if optimistic {
|
||||
return nil, &RpcError{Reason: Unavailable, Err: errOptimisticMode}
|
||||
}
|
||||
|
||||
headRoot, err := s.HeadFetcher.HeadRoot(ctx)
|
||||
if err != nil {
|
||||
@@ -412,6 +435,9 @@ func (s *Service) GetAttestationData(
|
||||
// SubmitSyncMessage submits the sync committee message to the network.
|
||||
// It also saves the sync committee message into the pending pool for block inclusion.
|
||||
func (s *Service) SubmitSyncMessage(ctx context.Context, msg *ethpb.SyncCommitteeMessage) *RpcError {
|
||||
ctx, span := trace.StartSpan(ctx, "coreService.SubmitSyncMessage")
|
||||
defer span.End()
|
||||
|
||||
errs, ctx := errgroup.WithContext(ctx)
|
||||
|
||||
headSyncCommitteeIndices, err := s.HeadFetcher.HeadSyncCommitteeIndices(ctx, msg.ValidatorIndex, msg.Slot)
|
||||
|
||||
@@ -62,7 +62,7 @@ func (s *Server) GetBlock(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
if httputil.SszRequested(r) {
|
||||
if httputil.RespondWithSsz(r) {
|
||||
s.getBlockSSZ(ctx, w, blk)
|
||||
} else {
|
||||
s.getBlock(ctx, w, blk)
|
||||
@@ -105,7 +105,7 @@ func (s *Server) GetBlockV2(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
if httputil.SszRequested(r) {
|
||||
if httputil.RespondWithSsz(r) {
|
||||
s.getBlockSSZV2(ctx, w, blk)
|
||||
} else {
|
||||
s.getBlockV2(ctx, w, blk)
|
||||
@@ -205,7 +205,7 @@ func (s *Server) GetBlindedBlock(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
if httputil.SszRequested(r) {
|
||||
if httputil.RespondWithSsz(r) {
|
||||
s.getBlindedBlockSSZ(ctx, w, blk)
|
||||
} else {
|
||||
s.getBlindedBlock(ctx, w, blk)
|
||||
@@ -953,8 +953,7 @@ func (s *Server) PublishBlindedBlock(w http.ResponseWriter, r *http.Request) {
|
||||
if shared.IsSyncing(r.Context(), w, s.SyncChecker, s.HeadFetcher, s.TimeFetcher, s.OptimisticModeFetcher) {
|
||||
return
|
||||
}
|
||||
isSSZ := httputil.SszRequested(r)
|
||||
if isSSZ {
|
||||
if httputil.IsRequestSsz(r) {
|
||||
s.publishBlindedBlockSSZ(ctx, w, r, false)
|
||||
} else {
|
||||
s.publishBlindedBlock(ctx, w, r, false)
|
||||
@@ -978,8 +977,7 @@ func (s *Server) PublishBlindedBlockV2(w http.ResponseWriter, r *http.Request) {
|
||||
if shared.IsSyncing(r.Context(), w, s.SyncChecker, s.HeadFetcher, s.TimeFetcher, s.OptimisticModeFetcher) {
|
||||
return
|
||||
}
|
||||
isSSZ := httputil.SszRequested(r)
|
||||
if isSSZ {
|
||||
if httputil.IsRequestSsz(r) {
|
||||
s.publishBlindedBlockSSZ(ctx, w, r, true)
|
||||
} else {
|
||||
s.publishBlindedBlock(ctx, w, r, true)
|
||||
@@ -1250,8 +1248,7 @@ func (s *Server) PublishBlock(w http.ResponseWriter, r *http.Request) {
|
||||
if shared.IsSyncing(r.Context(), w, s.SyncChecker, s.HeadFetcher, s.TimeFetcher, s.OptimisticModeFetcher) {
|
||||
return
|
||||
}
|
||||
isSSZ := httputil.SszRequested(r)
|
||||
if isSSZ {
|
||||
if httputil.IsRequestSsz(r) {
|
||||
s.publishBlockSSZ(ctx, w, r, false)
|
||||
} else {
|
||||
s.publishBlock(ctx, w, r, false)
|
||||
@@ -1273,8 +1270,7 @@ func (s *Server) PublishBlockV2(w http.ResponseWriter, r *http.Request) {
|
||||
if shared.IsSyncing(r.Context(), w, s.SyncChecker, s.HeadFetcher, s.TimeFetcher, s.OptimisticModeFetcher) {
|
||||
return
|
||||
}
|
||||
isSSZ := httputil.SszRequested(r)
|
||||
if isSSZ {
|
||||
if httputil.IsRequestSsz(r) {
|
||||
s.publishBlockSSZ(ctx, w, r, true)
|
||||
} else {
|
||||
s.publishBlock(ctx, w, r, true)
|
||||
|
||||
@@ -1169,7 +1169,7 @@ func TestPublishBlockSSZ(t *testing.T) {
|
||||
ssz, err := genericBlock.GetPhase0().MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader(ssz))
|
||||
request.Header.Set("Accept", api.OctetStreamMediaType)
|
||||
request.Header.Set("Content-Type", api.OctetStreamMediaType)
|
||||
request.Header.Set(api.VersionHeader, version.String(version.Phase0))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
@@ -1199,7 +1199,7 @@ func TestPublishBlockSSZ(t *testing.T) {
|
||||
ssz, err := genericBlock.GetAltair().MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader(ssz))
|
||||
request.Header.Set("Accept", api.OctetStreamMediaType)
|
||||
request.Header.Set("Content-Type", api.OctetStreamMediaType)
|
||||
request.Header.Set(api.VersionHeader, version.String(version.Altair))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
@@ -1224,7 +1224,7 @@ func TestPublishBlockSSZ(t *testing.T) {
|
||||
ssz, err := genericBlock.GetBellatrix().MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader(ssz))
|
||||
request.Header.Set("Accept", api.OctetStreamMediaType)
|
||||
request.Header.Set("Content-Type", api.OctetStreamMediaType)
|
||||
request.Header.Set(api.VersionHeader, version.String(version.Bellatrix))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
@@ -1250,7 +1250,7 @@ func TestPublishBlockSSZ(t *testing.T) {
|
||||
ssz, err := genericBlock.GetCapella().MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader(ssz))
|
||||
request.Header.Set("Accept", api.OctetStreamMediaType)
|
||||
request.Header.Set("Content-Type", api.OctetStreamMediaType)
|
||||
request.Header.Set(api.VersionHeader, version.String(version.Capella))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
@@ -1276,7 +1276,7 @@ func TestPublishBlockSSZ(t *testing.T) {
|
||||
ssz, err := genericBlock.GetDeneb().MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader(ssz))
|
||||
request.Header.Set("Accept", api.OctetStreamMediaType)
|
||||
request.Header.Set("Content-Type", api.OctetStreamMediaType)
|
||||
request.Header.Set(api.VersionHeader, version.String(version.Deneb))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
@@ -1296,7 +1296,7 @@ func TestPublishBlockSSZ(t *testing.T) {
|
||||
ssz, err := genericBlock.GetBlindedBellatrix().MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader(ssz))
|
||||
request.Header.Set("Accept", api.OctetStreamMediaType)
|
||||
request.Header.Set("Content-Type", api.OctetStreamMediaType)
|
||||
request.Header.Set(api.VersionHeader, version.String(version.Bellatrix))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
@@ -1317,7 +1317,7 @@ func TestPublishBlockSSZ(t *testing.T) {
|
||||
ssz, err := genericBlock.GetBellatrix().MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader(ssz))
|
||||
request.Header.Set("Accept", api.OctetStreamMediaType)
|
||||
request.Header.Set("Content-Type", api.OctetStreamMediaType)
|
||||
request.Header.Set(api.VersionHeader, version.String(version.Capella))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
@@ -1335,7 +1335,7 @@ func TestPublishBlockSSZ(t *testing.T) {
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader([]byte("foo")))
|
||||
request.Header.Set("Accept", api.OctetStreamMediaType)
|
||||
request.Header.Set("Content-Type", api.OctetStreamMediaType)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.PublishBlock(writer, request)
|
||||
@@ -1531,7 +1531,7 @@ func TestPublishBlindedBlockSSZ(t *testing.T) {
|
||||
ssz, err := genericBlock.GetPhase0().MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader(ssz))
|
||||
request.Header.Set("Accept", api.OctetStreamMediaType)
|
||||
request.Header.Set("Content-Type", api.OctetStreamMediaType)
|
||||
request.Header.Set(api.VersionHeader, version.String(version.Phase0))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
@@ -1561,7 +1561,7 @@ func TestPublishBlindedBlockSSZ(t *testing.T) {
|
||||
ssz, err := genericBlock.GetAltair().MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader(ssz))
|
||||
request.Header.Set("Accept", api.OctetStreamMediaType)
|
||||
request.Header.Set("Content-Type", api.OctetStreamMediaType)
|
||||
request.Header.Set(api.VersionHeader, version.String(version.Altair))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
@@ -1587,7 +1587,7 @@ func TestPublishBlindedBlockSSZ(t *testing.T) {
|
||||
ssz, err := genericBlock.GetBlindedBellatrix().MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader(ssz))
|
||||
request.Header.Set("Accept", api.OctetStreamMediaType)
|
||||
request.Header.Set("Content-Type", api.OctetStreamMediaType)
|
||||
request.Header.Set(api.VersionHeader, version.String(version.Bellatrix))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
@@ -1613,7 +1613,7 @@ func TestPublishBlindedBlockSSZ(t *testing.T) {
|
||||
ssz, err := genericBlock.GetBlindedCapella().MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader(ssz))
|
||||
request.Header.Set("Accept", api.OctetStreamMediaType)
|
||||
request.Header.Set("Content-Type", api.OctetStreamMediaType)
|
||||
request.Header.Set(api.VersionHeader, version.String(version.Capella))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
@@ -1639,7 +1639,7 @@ func TestPublishBlindedBlockSSZ(t *testing.T) {
|
||||
ssz, err := genericBlock.GetBlindedDeneb().MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader(ssz))
|
||||
request.Header.Set("Accept", api.OctetStreamMediaType)
|
||||
request.Header.Set("Content-Type", api.OctetStreamMediaType)
|
||||
request.Header.Set(api.VersionHeader, version.String(version.Deneb))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
@@ -1672,7 +1672,7 @@ func TestPublishBlindedBlockSSZ(t *testing.T) {
|
||||
ssz, err := genericBlock.GetBlindedBellatrix().MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader(ssz))
|
||||
request.Header.Set("Accept", api.OctetStreamMediaType)
|
||||
request.Header.Set("Content-Type", api.OctetStreamMediaType)
|
||||
request.Header.Set(api.VersionHeader, version.String(version.Capella))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
@@ -1690,7 +1690,7 @@ func TestPublishBlindedBlockSSZ(t *testing.T) {
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader([]byte("foo")))
|
||||
request.Header.Set("Accept", api.OctetStreamMediaType)
|
||||
request.Header.Set("Content-Type", api.OctetStreamMediaType)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.PublishBlindedBlock(writer, request)
|
||||
@@ -1899,7 +1899,7 @@ func TestPublishBlockV2SSZ(t *testing.T) {
|
||||
ssz, err := genericBlock.GetPhase0().MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader(ssz))
|
||||
request.Header.Set("Accept", api.OctetStreamMediaType)
|
||||
request.Header.Set("Content-Type", api.OctetStreamMediaType)
|
||||
request.Header.Set(api.VersionHeader, version.String(version.Phase0))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
@@ -1929,7 +1929,7 @@ func TestPublishBlockV2SSZ(t *testing.T) {
|
||||
ssz, err := genericBlock.GetAltair().MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader(ssz))
|
||||
request.Header.Set("Accept", api.OctetStreamMediaType)
|
||||
request.Header.Set("Content-Type", api.OctetStreamMediaType)
|
||||
request.Header.Set(api.VersionHeader, version.String(version.Altair))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
@@ -1954,7 +1954,7 @@ func TestPublishBlockV2SSZ(t *testing.T) {
|
||||
ssz, err := genericBlock.GetBellatrix().MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader(ssz))
|
||||
request.Header.Set("Accept", api.OctetStreamMediaType)
|
||||
request.Header.Set("Content-Type", api.OctetStreamMediaType)
|
||||
request.Header.Set(api.VersionHeader, version.String(version.Bellatrix))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
@@ -1980,7 +1980,7 @@ func TestPublishBlockV2SSZ(t *testing.T) {
|
||||
ssz, err := genericBlock.GetCapella().MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader(ssz))
|
||||
request.Header.Set("Accept", api.OctetStreamMediaType)
|
||||
request.Header.Set("Content-Type", api.OctetStreamMediaType)
|
||||
request.Header.Set(api.VersionHeader, version.String(version.Capella))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
@@ -2006,7 +2006,7 @@ func TestPublishBlockV2SSZ(t *testing.T) {
|
||||
ssz, err := genericBlock.GetDeneb().MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader(ssz))
|
||||
request.Header.Set("Accept", api.OctetStreamMediaType)
|
||||
request.Header.Set("Content-Type", api.OctetStreamMediaType)
|
||||
request.Header.Set(api.VersionHeader, version.String(version.Deneb))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
@@ -2026,7 +2026,7 @@ func TestPublishBlockV2SSZ(t *testing.T) {
|
||||
ssz, err := genericBlock.GetBlindedBellatrix().MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader(ssz))
|
||||
request.Header.Set("Accept", api.OctetStreamMediaType)
|
||||
request.Header.Set("Content-Type", api.OctetStreamMediaType)
|
||||
request.Header.Set(api.VersionHeader, version.String(version.Bellatrix))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
@@ -2047,7 +2047,7 @@ func TestPublishBlockV2SSZ(t *testing.T) {
|
||||
ssz, err := genericBlock.GetBellatrix().MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader(ssz))
|
||||
request.Header.Set("Accept", api.OctetStreamMediaType)
|
||||
request.Header.Set("Content-Type", api.OctetStreamMediaType)
|
||||
request.Header.Set(api.VersionHeader, version.String(version.Capella))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
@@ -2061,7 +2061,7 @@ func TestPublishBlockV2SSZ(t *testing.T) {
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader([]byte(rpctesting.CapellaBlock)))
|
||||
request.Header.Set("Accept", api.OctetStreamMediaType)
|
||||
request.Header.Set("Content-Type", api.OctetStreamMediaType)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.PublishBlockV2(writer, request)
|
||||
@@ -2078,7 +2078,7 @@ func TestPublishBlockV2SSZ(t *testing.T) {
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader([]byte("foo")))
|
||||
request.Header.Set("Accept", api.OctetStreamMediaType)
|
||||
request.Header.Set("Content-Type", api.OctetStreamMediaType)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.PublishBlockV2(writer, request)
|
||||
@@ -2286,7 +2286,7 @@ func TestPublishBlindedBlockV2SSZ(t *testing.T) {
|
||||
ssz, err := genericBlock.GetPhase0().MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader(ssz))
|
||||
request.Header.Set("Accept", api.OctetStreamMediaType)
|
||||
request.Header.Set("Content-Type", api.OctetStreamMediaType)
|
||||
request.Header.Set(api.VersionHeader, version.String(version.Phase0))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
@@ -2316,7 +2316,7 @@ func TestPublishBlindedBlockV2SSZ(t *testing.T) {
|
||||
ssz, err := genericBlock.GetAltair().MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader(ssz))
|
||||
request.Header.Set("Accept", api.OctetStreamMediaType)
|
||||
request.Header.Set("Content-Type", api.OctetStreamMediaType)
|
||||
request.Header.Set(api.VersionHeader, version.String(version.Altair))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
@@ -2342,7 +2342,7 @@ func TestPublishBlindedBlockV2SSZ(t *testing.T) {
|
||||
ssz, err := genericBlock.GetBlindedBellatrix().MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader(ssz))
|
||||
request.Header.Set("Accept", api.OctetStreamMediaType)
|
||||
request.Header.Set("Content-Type", api.OctetStreamMediaType)
|
||||
request.Header.Set(api.VersionHeader, version.String(version.Bellatrix))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
@@ -2368,7 +2368,7 @@ func TestPublishBlindedBlockV2SSZ(t *testing.T) {
|
||||
ssz, err := genericBlock.GetBlindedCapella().MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader(ssz))
|
||||
request.Header.Set("Accept", api.OctetStreamMediaType)
|
||||
request.Header.Set("Content-Type", api.OctetStreamMediaType)
|
||||
request.Header.Set(api.VersionHeader, version.String(version.Capella))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
@@ -2394,7 +2394,7 @@ func TestPublishBlindedBlockV2SSZ(t *testing.T) {
|
||||
ssz, err := genericBlock.GetBlindedDeneb().MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader(ssz))
|
||||
request.Header.Set("Accept", api.OctetStreamMediaType)
|
||||
request.Header.Set("Content-Type", api.OctetStreamMediaType)
|
||||
request.Header.Set(api.VersionHeader, version.String(version.Deneb))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
@@ -2427,7 +2427,7 @@ func TestPublishBlindedBlockV2SSZ(t *testing.T) {
|
||||
ssz, err := genericBlock.GetBlindedBellatrix().MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader(ssz))
|
||||
request.Header.Set("Accept", api.OctetStreamMediaType)
|
||||
request.Header.Set("Content-Type", api.OctetStreamMediaType)
|
||||
request.Header.Set(api.VersionHeader, version.String(version.Capella))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
@@ -2441,7 +2441,7 @@ func TestPublishBlindedBlockV2SSZ(t *testing.T) {
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader([]byte(rpctesting.BlindedCapellaBlock)))
|
||||
request.Header.Set("Accept", api.OctetStreamMediaType)
|
||||
request.Header.Set("Content-Type", api.OctetStreamMediaType)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.PublishBlockV2(writer, request)
|
||||
@@ -2458,7 +2458,7 @@ func TestPublishBlindedBlockV2SSZ(t *testing.T) {
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader([]byte("foo")))
|
||||
request.Header.Set("Accept", api.OctetStreamMediaType)
|
||||
request.Header.Set("Content-Type", api.OctetStreamMediaType)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.PublishBlindedBlockV2(writer, request)
|
||||
|
||||
@@ -46,8 +46,7 @@ func (s *Server) Blobs(w http.ResponseWriter, r *http.Request) {
|
||||
for i := range verifiedBlobs {
|
||||
sidecars = append(sidecars, verifiedBlobs[i].BlobSidecar)
|
||||
}
|
||||
ssz := httputil.SszRequested(r)
|
||||
if ssz {
|
||||
if httputil.RespondWithSsz(r) {
|
||||
sidecarResp := ð.BlobSidecars{
|
||||
Sidecars: sidecars,
|
||||
}
|
||||
|
||||
@@ -12,5 +12,5 @@ type Sidecar struct {
|
||||
SignedBeaconBlockHeader *shared.SignedBeaconBlockHeader `json:"signed_block_header"`
|
||||
KzgCommitment string `json:"kzg_commitment"`
|
||||
KzgProof string `json:"kzg_proof"`
|
||||
CommitmentInclusionProof []string `json:"commitment_inclusion_proof"`
|
||||
CommitmentInclusionProof []string `json:"kzg_commitment_inclusion_proof"`
|
||||
}
|
||||
|
||||
@@ -54,7 +54,7 @@ func (s *Server) GetBeaconStateV2(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
if httputil.SszRequested(r) {
|
||||
if httputil.RespondWithSsz(r) {
|
||||
s.getBeaconStateSSZV2(ctx, w, []byte(stateId))
|
||||
} else {
|
||||
s.getBeaconStateV2(ctx, w, []byte(stateId))
|
||||
|
||||
@@ -471,7 +471,7 @@ func TestGetForkChoice(t *testing.T) {
|
||||
require.NoError(t, store.UpdateFinalizedCheckpoint(fc))
|
||||
s := &Server{ForkchoiceFetcher: &blockchainmock.ChainService{ForkChoiceStore: store}}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v2/debug/fork_choice", nil)
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/debug/fork_choice", nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
|
||||
@@ -8,8 +8,9 @@ go_library(
|
||||
"structs.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/events",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/operation:go_default_library",
|
||||
@@ -18,8 +19,10 @@ go_library(
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/rpc/eth/shared:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//network/httputil:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/eth/v2:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
|
||||
@@ -5,8 +5,10 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
time2 "time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/api"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/operation"
|
||||
@@ -15,8 +17,10 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/network/httputil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v4/proto/eth/v2"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
log "github.com/sirupsen/logrus"
|
||||
@@ -48,6 +52,10 @@ const (
|
||||
ProposerSlashingTopic = "proposer_slashing"
|
||||
// AttesterSlashingTopic represents a new attester slashing event topic
|
||||
AttesterSlashingTopic = "attester_slashing"
|
||||
// LightClientFinalityUpdateTopic represents a new light client finality update event topic.
|
||||
LightClientFinalityUpdateTopic = "light_client_finality_update"
|
||||
// LightClientOptimisticUpdateTopic represents a new light client optimistic update event topic.
|
||||
LightClientOptimisticUpdateTopic = "light_client_optimistic_update"
|
||||
)
|
||||
|
||||
const topicDataMismatch = "Event data type %T does not correspond to event topic %s"
|
||||
@@ -55,18 +63,20 @@ const topicDataMismatch = "Event data type %T does not correspond to event topic
|
||||
const chanBuffer = 1000
|
||||
|
||||
var casesHandled = map[string]bool{
|
||||
HeadTopic: true,
|
||||
BlockTopic: true,
|
||||
AttestationTopic: true,
|
||||
VoluntaryExitTopic: true,
|
||||
FinalizedCheckpointTopic: true,
|
||||
ChainReorgTopic: true,
|
||||
SyncCommitteeContributionTopic: true,
|
||||
BLSToExecutionChangeTopic: true,
|
||||
PayloadAttributesTopic: true,
|
||||
BlobSidecarTopic: true,
|
||||
ProposerSlashingTopic: true,
|
||||
AttesterSlashingTopic: true,
|
||||
HeadTopic: true,
|
||||
BlockTopic: true,
|
||||
AttestationTopic: true,
|
||||
VoluntaryExitTopic: true,
|
||||
FinalizedCheckpointTopic: true,
|
||||
ChainReorgTopic: true,
|
||||
SyncCommitteeContributionTopic: true,
|
||||
BLSToExecutionChangeTopic: true,
|
||||
PayloadAttributesTopic: true,
|
||||
BlobSidecarTopic: true,
|
||||
ProposerSlashingTopic: true,
|
||||
AttesterSlashingTopic: true,
|
||||
LightClientFinalityUpdateTopic: true,
|
||||
LightClientOptimisticUpdateTopic: true,
|
||||
}
|
||||
|
||||
// StreamEvents provides an endpoint to subscribe to the beacon node Server-Sent-Events stream.
|
||||
@@ -106,16 +116,24 @@ func (s *Server) StreamEvents(w http.ResponseWriter, r *http.Request) {
|
||||
defer stateSub.Unsubscribe()
|
||||
|
||||
// Set up SSE response headers
|
||||
w.Header().Set("Content-Type", "text/event-stream")
|
||||
w.Header().Set("Connection", "keep-alive")
|
||||
w.Header().Set("Content-Type", api.EventStreamMediaType)
|
||||
w.Header().Set("Connection", api.KeepAlive)
|
||||
|
||||
// Handle each event received and context cancellation.
|
||||
// We send a keepalive dummy message immediately to prevent clients
|
||||
// stalling while waiting for the first response chunk.
|
||||
// After that we send a keepalive dummy message every SECONDS_PER_SLOT
|
||||
// to prevent anyone (e.g. proxy servers) from closing connections.
|
||||
sendKeepalive(w, flusher)
|
||||
keepaliveTicker := time2.NewTicker(time2.Duration(params.BeaconConfig().SecondsPerSlot) * time2.Second)
|
||||
for {
|
||||
select {
|
||||
case event := <-opsChan:
|
||||
handleBlockOperationEvents(w, flusher, topicsMap, event)
|
||||
case event := <-stateChan:
|
||||
s.handleStateEvents(ctx, w, flusher, topicsMap, event)
|
||||
case <-keepaliveTicker.C:
|
||||
sendKeepalive(w, flusher)
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
@@ -262,6 +280,72 @@ func (s *Server) handleStateEvents(ctx context.Context, w http.ResponseWriter, f
|
||||
ExecutionOptimistic: checkpointData.ExecutionOptimistic,
|
||||
}
|
||||
send(w, flusher, FinalizedCheckpointTopic, checkpoint)
|
||||
case statefeed.LightClientFinalityUpdate:
|
||||
if _, ok := requestedTopics[LightClientFinalityUpdateTopic]; !ok {
|
||||
return
|
||||
}
|
||||
updateData, ok := event.Data.(*ethpbv2.LightClientFinalityUpdateWithVersion)
|
||||
if !ok {
|
||||
write(w, flusher, topicDataMismatch, event.Data, LightClientFinalityUpdateTopic)
|
||||
return
|
||||
}
|
||||
|
||||
var finalityBranch []string
|
||||
for _, b := range updateData.Data.FinalityBranch {
|
||||
finalityBranch = append(finalityBranch, hexutil.Encode(b))
|
||||
}
|
||||
update := &LightClientFinalityUpdateEvent{
|
||||
Version: version.String(int(updateData.Version)),
|
||||
Data: &LightClientFinalityUpdate{
|
||||
AttestedHeader: &shared.BeaconBlockHeader{
|
||||
Slot: fmt.Sprintf("%d", updateData.Data.AttestedHeader.Slot),
|
||||
ProposerIndex: fmt.Sprintf("%d", updateData.Data.AttestedHeader.ProposerIndex),
|
||||
ParentRoot: hexutil.Encode(updateData.Data.AttestedHeader.ParentRoot),
|
||||
StateRoot: hexutil.Encode(updateData.Data.AttestedHeader.StateRoot),
|
||||
BodyRoot: hexutil.Encode(updateData.Data.AttestedHeader.BodyRoot),
|
||||
},
|
||||
FinalizedHeader: &shared.BeaconBlockHeader{
|
||||
Slot: fmt.Sprintf("%d", updateData.Data.FinalizedHeader.Slot),
|
||||
ProposerIndex: fmt.Sprintf("%d", updateData.Data.FinalizedHeader.ProposerIndex),
|
||||
ParentRoot: hexutil.Encode(updateData.Data.FinalizedHeader.ParentRoot),
|
||||
StateRoot: hexutil.Encode(updateData.Data.FinalizedHeader.StateRoot),
|
||||
},
|
||||
FinalityBranch: finalityBranch,
|
||||
SyncAggregate: &shared.SyncAggregate{
|
||||
SyncCommitteeBits: hexutil.Encode(updateData.Data.SyncAggregate.SyncCommitteeBits),
|
||||
SyncCommitteeSignature: hexutil.Encode(updateData.Data.SyncAggregate.SyncCommitteeSignature),
|
||||
},
|
||||
SignatureSlot: fmt.Sprintf("%d", updateData.Data.SignatureSlot),
|
||||
},
|
||||
}
|
||||
send(w, flusher, LightClientFinalityUpdateTopic, update)
|
||||
case statefeed.LightClientOptimisticUpdate:
|
||||
if _, ok := requestedTopics[LightClientOptimisticUpdateTopic]; !ok {
|
||||
return
|
||||
}
|
||||
updateData, ok := event.Data.(*ethpbv2.LightClientOptimisticUpdateWithVersion)
|
||||
if !ok {
|
||||
write(w, flusher, topicDataMismatch, event.Data, LightClientOptimisticUpdateTopic)
|
||||
return
|
||||
}
|
||||
update := &LightClientOptimisticUpdateEvent{
|
||||
Version: version.String(int(updateData.Version)),
|
||||
Data: &LightClientOptimisticUpdate{
|
||||
AttestedHeader: &shared.BeaconBlockHeader{
|
||||
Slot: fmt.Sprintf("%d", updateData.Data.AttestedHeader.Slot),
|
||||
ProposerIndex: fmt.Sprintf("%d", updateData.Data.AttestedHeader.ProposerIndex),
|
||||
ParentRoot: hexutil.Encode(updateData.Data.AttestedHeader.ParentRoot),
|
||||
StateRoot: hexutil.Encode(updateData.Data.AttestedHeader.StateRoot),
|
||||
BodyRoot: hexutil.Encode(updateData.Data.AttestedHeader.BodyRoot),
|
||||
},
|
||||
SyncAggregate: &shared.SyncAggregate{
|
||||
SyncCommitteeBits: hexutil.Encode(updateData.Data.SyncAggregate.SyncCommitteeBits),
|
||||
SyncCommitteeSignature: hexutil.Encode(updateData.Data.SyncAggregate.SyncCommitteeSignature),
|
||||
},
|
||||
SignatureSlot: fmt.Sprintf("%d", updateData.Data.SignatureSlot),
|
||||
},
|
||||
}
|
||||
send(w, flusher, LightClientOptimisticUpdateTopic, update)
|
||||
case statefeed.Reorg:
|
||||
if _, ok := requestedTopics[ChainReorgTopic]; !ok {
|
||||
return
|
||||
@@ -431,6 +515,10 @@ func send(w http.ResponseWriter, flusher http.Flusher, name string, data interfa
|
||||
write(w, flusher, "event: %s\ndata: %s\n\n", name, string(j))
|
||||
}
|
||||
|
||||
func sendKeepalive(w http.ResponseWriter, flusher http.Flusher) {
|
||||
write(w, flusher, ":\n\n")
|
||||
}
|
||||
|
||||
func write(w http.ResponseWriter, flusher http.Flusher, format string, a ...any) {
|
||||
_, err := fmt.Fprintf(w, format, a...)
|
||||
if err != nil {
|
||||
|
||||
@@ -375,7 +375,9 @@ func TestStreamEvents_OperationsEvents(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
const operationsResult = `event: attestation
|
||||
const operationsResult = `:
|
||||
|
||||
event: attestation
|
||||
data: {"aggregation_bits":"0x00","data":{"slot":"0","index":"0","beacon_block_root":"0x0000000000000000000000000000000000000000000000000000000000000000","source":{"epoch":"0","root":"0x0000000000000000000000000000000000000000000000000000000000000000"},"target":{"epoch":"0","root":"0x0000000000000000000000000000000000000000000000000000000000000000"}},"signature":"0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"}
|
||||
|
||||
event: attestation
|
||||
@@ -401,7 +403,9 @@ data: {"signed_header_1":{"message":{"slot":"0","proposer_index":"0","parent_roo
|
||||
|
||||
`
|
||||
|
||||
const stateResult = `event: head
|
||||
const stateResult = `:
|
||||
|
||||
event: head
|
||||
data: {"slot":"0","block":"0x0000000000000000000000000000000000000000000000000000000000000000","state":"0x0000000000000000000000000000000000000000000000000000000000000000","epoch_transition":true,"execution_optimistic":false,"previous_duty_dependent_root":"0x0000000000000000000000000000000000000000000000000000000000000000","current_duty_dependent_root":"0x0000000000000000000000000000000000000000000000000000000000000000"}
|
||||
|
||||
event: finalized_checkpoint
|
||||
@@ -415,17 +419,23 @@ data: {"slot":"0","block":"0xeade62f0457b2fdf48e7d3fc4b60736688286be7c7a3ac4c9a1
|
||||
|
||||
`
|
||||
|
||||
const payloadAttributesBellatrixResult = `event: payload_attributes
|
||||
const payloadAttributesBellatrixResult = `:
|
||||
|
||||
event: payload_attributes
|
||||
data: {"version":"bellatrix","data":{"proposer_index":"0","proposal_slot":"1","parent_block_number":"0","parent_block_root":"0x0000000000000000000000000000000000000000000000000000000000000000","parent_block_hash":"0x0000000000000000000000000000000000000000000000000000000000000000","payload_attributes":{"timestamp":"12","prev_randao":"0x0000000000000000000000000000000000000000000000000000000000000000","suggested_fee_recipient":"0x0000000000000000000000000000000000000000"}}}
|
||||
|
||||
`
|
||||
|
||||
const payloadAttributesCapellaResult = `event: payload_attributes
|
||||
const payloadAttributesCapellaResult = `:
|
||||
|
||||
event: payload_attributes
|
||||
data: {"version":"capella","data":{"proposer_index":"0","proposal_slot":"1","parent_block_number":"0","parent_block_root":"0x0000000000000000000000000000000000000000000000000000000000000000","parent_block_hash":"0x0000000000000000000000000000000000000000000000000000000000000000","payload_attributes":{"timestamp":"12","prev_randao":"0x0000000000000000000000000000000000000000000000000000000000000000","suggested_fee_recipient":"0x0000000000000000000000000000000000000000","withdrawals":[]}}}
|
||||
|
||||
`
|
||||
|
||||
const payloadAttributesDenebResult = `event: payload_attributes
|
||||
const payloadAttributesDenebResult = `:
|
||||
|
||||
event: payload_attributes
|
||||
data: {"version":"deneb","data":{"proposer_index":"0","proposal_slot":"1","parent_block_number":"0","parent_block_root":"0x0000000000000000000000000000000000000000000000000000000000000000","parent_block_hash":"0x0000000000000000000000000000000000000000000000000000000000000000","payload_attributes":{"timestamp":"12","prev_randao":"0x0000000000000000000000000000000000000000000000000000000000000000","suggested_fee_recipient":"0x0000000000000000000000000000000000000000","withdrawals":[],"parent_beacon_block_root":"0xbef96cb938fd48b2403d3e662664325abb0102ed12737cbb80d717520e50cf4a"}}}
|
||||
|
||||
`
|
||||
|
||||
@@ -92,3 +92,27 @@ type BlobSidecarEvent struct {
|
||||
KzgCommitment string `json:"kzg_commitment"`
|
||||
VersionedHash string `json:"versioned_hash"`
|
||||
}
|
||||
|
||||
type LightClientFinalityUpdateEvent struct {
|
||||
Version string `json:"version"`
|
||||
Data *LightClientFinalityUpdate `json:"data"`
|
||||
}
|
||||
|
||||
type LightClientFinalityUpdate struct {
|
||||
AttestedHeader *shared.BeaconBlockHeader `json:"attested_header"`
|
||||
FinalizedHeader *shared.BeaconBlockHeader `json:"finalized_header"`
|
||||
FinalityBranch []string `json:"finality_branch"`
|
||||
SyncAggregate *shared.SyncAggregate `json:"sync_aggregate"`
|
||||
SignatureSlot string `json:"signature_slot"`
|
||||
}
|
||||
|
||||
type LightClientOptimisticUpdateEvent struct {
|
||||
Version string `json:"version"`
|
||||
Data *LightClientOptimisticUpdate `json:"data"`
|
||||
}
|
||||
|
||||
type LightClientOptimisticUpdate struct {
|
||||
AttestedHeader *shared.BeaconBlockHeader `json:"attested_header"`
|
||||
SyncAggregate *shared.SyncAggregate `json:"sync_aggregate"`
|
||||
SignatureSlot string `json:"signature_slot"`
|
||||
}
|
||||
|
||||
@@ -8,9 +8,10 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/wealdtech/go-bytesutil"
|
||||
"go.opencensus.io/trace"
|
||||
|
||||
"github.com/wealdtech/go-bytesutil"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
@@ -219,11 +220,7 @@ func (s *Server) GetLightClientUpdatesByRange(w http.ResponseWriter, req *http.R
|
||||
return
|
||||
}
|
||||
|
||||
response := &LightClientUpdatesByRangeResponse{
|
||||
Updates: updates,
|
||||
}
|
||||
|
||||
httputil.WriteJson(w, response)
|
||||
httputil.WriteJson(w, updates)
|
||||
}
|
||||
|
||||
// GetLightClientFinalityUpdate - implements https://github.com/ethereum/beacon-APIs/blob/263f4ed6c263c967f13279c7a9f5629b51c5fc55/apis/beacon/light_client/finality_update.yaml
|
||||
|
||||
@@ -171,12 +171,12 @@ func TestLightClientHandler_GetLightClientUpdatesByRange(t *testing.T) {
|
||||
s.GetLightClientUpdatesByRange(writer, request)
|
||||
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &LightClientUpdatesByRangeResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, 1, len(resp.Updates))
|
||||
require.Equal(t, "capella", resp.Updates[0].Version)
|
||||
require.Equal(t, hexutil.Encode(attestedHeader.BodyRoot), resp.Updates[0].Data.AttestedHeader.BodyRoot)
|
||||
require.NotNil(t, resp.Updates)
|
||||
var resp []LightClientUpdateWithVersion
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp))
|
||||
require.Equal(t, 1, len(resp))
|
||||
require.Equal(t, "capella", resp[0].Version)
|
||||
require.Equal(t, hexutil.Encode(attestedHeader.BodyRoot), resp[0].Data.AttestedHeader.BodyRoot)
|
||||
require.NotNil(t, resp)
|
||||
}
|
||||
|
||||
func TestLightClientHandler_GetLightClientUpdatesByRange_TooBigInputCount(t *testing.T) {
|
||||
@@ -274,12 +274,12 @@ func TestLightClientHandler_GetLightClientUpdatesByRange_TooBigInputCount(t *tes
|
||||
s.GetLightClientUpdatesByRange(writer, request)
|
||||
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &LightClientUpdatesByRangeResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, 1, len(resp.Updates)) // Even with big count input, the response is still the max available period, which is 1 in test case.
|
||||
require.Equal(t, "capella", resp.Updates[0].Version)
|
||||
require.Equal(t, hexutil.Encode(attestedHeader.BodyRoot), resp.Updates[0].Data.AttestedHeader.BodyRoot)
|
||||
require.NotNil(t, resp.Updates)
|
||||
var resp []LightClientUpdateWithVersion
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp))
|
||||
require.Equal(t, 1, len(resp)) // Even with big count input, the response is still the max available period, which is 1 in test case.
|
||||
require.Equal(t, "capella", resp[0].Version)
|
||||
require.Equal(t, hexutil.Encode(attestedHeader.BodyRoot), resp[0].Data.AttestedHeader.BodyRoot)
|
||||
require.NotNil(t, resp)
|
||||
}
|
||||
|
||||
func TestLightClientHandler_GetLightClientUpdatesByRange_TooEarlyPeriod(t *testing.T) {
|
||||
@@ -377,12 +377,12 @@ func TestLightClientHandler_GetLightClientUpdatesByRange_TooEarlyPeriod(t *testi
|
||||
s.GetLightClientUpdatesByRange(writer, request)
|
||||
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &LightClientUpdatesByRangeResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, 1, len(resp.Updates))
|
||||
require.Equal(t, "capella", resp.Updates[0].Version)
|
||||
require.Equal(t, hexutil.Encode(attestedHeader.BodyRoot), resp.Updates[0].Data.AttestedHeader.BodyRoot)
|
||||
require.NotNil(t, resp.Updates)
|
||||
var resp []LightClientUpdateWithVersion
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp))
|
||||
require.Equal(t, 1, len(resp))
|
||||
require.Equal(t, "capella", resp[0].Version)
|
||||
require.Equal(t, hexutil.Encode(attestedHeader.BodyRoot), resp[0].Data.AttestedHeader.BodyRoot)
|
||||
require.NotNil(t, resp)
|
||||
}
|
||||
|
||||
func TestLightClientHandler_GetLightClientUpdatesByRange_TooBigCount(t *testing.T) {
|
||||
@@ -480,12 +480,12 @@ func TestLightClientHandler_GetLightClientUpdatesByRange_TooBigCount(t *testing.
|
||||
s.GetLightClientUpdatesByRange(writer, request)
|
||||
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &LightClientUpdatesByRangeResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, 1, len(resp.Updates))
|
||||
require.Equal(t, "capella", resp.Updates[0].Version)
|
||||
require.Equal(t, hexutil.Encode(attestedHeader.BodyRoot), resp.Updates[0].Data.AttestedHeader.BodyRoot)
|
||||
require.NotNil(t, resp.Updates)
|
||||
var resp []LightClientUpdateWithVersion
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp))
|
||||
require.Equal(t, 1, len(resp))
|
||||
require.Equal(t, "capella", resp[0].Version)
|
||||
require.Equal(t, hexutil.Encode(attestedHeader.BodyRoot), resp[0].Data.AttestedHeader.BodyRoot)
|
||||
require.NotNil(t, resp)
|
||||
}
|
||||
|
||||
func TestLightClientHandler_GetLightClientUpdatesByRange_BeforeAltair(t *testing.T) {
|
||||
@@ -583,9 +583,6 @@ func TestLightClientHandler_GetLightClientUpdatesByRange_BeforeAltair(t *testing
|
||||
s.GetLightClientUpdatesByRange(writer, request)
|
||||
|
||||
require.Equal(t, http.StatusNotFound, writer.Code)
|
||||
resp := &LightClientUpdatesByRangeResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, 0, len(resp.Updates))
|
||||
}
|
||||
|
||||
func TestLightClientHandler_GetLightClientFinalityUpdate(t *testing.T) {
|
||||
|
||||
@@ -296,11 +296,16 @@ func newLightClientUpdateToJSON(input *v2.LightClientUpdate) *LightClientUpdate
|
||||
nextSyncCommittee = shared.SyncCommitteeFromConsensus(migration.V2SyncCommitteeToV1Alpha1(input.NextSyncCommittee))
|
||||
}
|
||||
|
||||
var finalizedHeader *shared.BeaconBlockHeader
|
||||
if input.FinalizedHeader != nil {
|
||||
finalizedHeader = shared.BeaconBlockHeaderFromConsensus(migration.V1HeaderToV1Alpha1(input.FinalizedHeader))
|
||||
}
|
||||
|
||||
return &LightClientUpdate{
|
||||
AttestedHeader: shared.BeaconBlockHeaderFromConsensus(migration.V1HeaderToV1Alpha1(input.AttestedHeader)),
|
||||
NextSyncCommittee: nextSyncCommittee,
|
||||
NextSyncCommitteeBranch: branchToJSON(input.NextSyncCommitteeBranch),
|
||||
FinalizedHeader: shared.BeaconBlockHeaderFromConsensus(migration.V1HeaderToV1Alpha1(input.FinalizedHeader)),
|
||||
FinalizedHeader: finalizedHeader,
|
||||
FinalityBranch: branchToJSON(input.FinalityBranch),
|
||||
SyncAggregate: syncAggregateToJSON(input.SyncAggregate),
|
||||
SignatureSlot: strconv.FormatUint(uint64(input.SignatureSlot), 10),
|
||||
|
||||
@@ -17,11 +17,11 @@ type LightClientBootstrap struct {
|
||||
|
||||
type LightClientUpdate struct {
|
||||
AttestedHeader *shared.BeaconBlockHeader `json:"attested_header"`
|
||||
NextSyncCommittee *shared.SyncCommittee `json:"next_sync_committee"`
|
||||
FinalizedHeader *shared.BeaconBlockHeader `json:"finalized_header"`
|
||||
NextSyncCommittee *shared.SyncCommittee `json:"next_sync_committee,omitempty"`
|
||||
FinalizedHeader *shared.BeaconBlockHeader `json:"finalized_header,omitempty"`
|
||||
SyncAggregate *shared.SyncAggregate `json:"sync_aggregate"`
|
||||
NextSyncCommitteeBranch []string `json:"next_sync_committee_branch"`
|
||||
FinalityBranch []string `json:"finality_branch"`
|
||||
NextSyncCommitteeBranch []string `json:"next_sync_committee_branch,omitempty"`
|
||||
FinalityBranch []string `json:"finality_branch,omitempty"`
|
||||
SignatureSlot string `json:"signature_slot"`
|
||||
}
|
||||
|
||||
|
||||
@@ -62,7 +62,6 @@ func (s *Server) BlockRewards(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
// AttestationRewards retrieves attestation reward info for validators specified by array of public keys or validator index.
|
||||
// If no array is provided, return reward info for every validator.
|
||||
// TODO: Inclusion delay
|
||||
func (s *Server) AttestationRewards(w http.ResponseWriter, r *http.Request) {
|
||||
st, ok := s.attRewardsState(w, r)
|
||||
if !ok {
|
||||
@@ -277,7 +276,10 @@ func idealAttRewards(
|
||||
IsPrevEpochTargetAttester: true,
|
||||
IsPrevEpochHeadAttester: true,
|
||||
})
|
||||
idealRewards = append(idealRewards, IdealAttestationReward{EffectiveBalance: strconv.FormatUint(effectiveBalance, 10)})
|
||||
idealRewards = append(idealRewards, IdealAttestationReward{
|
||||
EffectiveBalance: strconv.FormatUint(effectiveBalance, 10),
|
||||
Inactivity: strconv.FormatUint(0, 10),
|
||||
})
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -299,6 +301,11 @@ func idealAttRewards(
|
||||
} else {
|
||||
idealRewards[i].Target = strconv.FormatUint(d.TargetReward, 10)
|
||||
}
|
||||
if d.InactivityPenalty > 0 {
|
||||
idealRewards[i].Inactivity = fmt.Sprintf("-%s", strconv.FormatUint(d.InactivityPenalty, 10))
|
||||
} else {
|
||||
idealRewards[i].Inactivity = strconv.FormatUint(d.InactivityPenalty, 10)
|
||||
}
|
||||
}
|
||||
return idealRewards, true
|
||||
}
|
||||
@@ -331,6 +338,11 @@ func totalAttRewards(
|
||||
} else {
|
||||
totalRewards[i].Target = strconv.FormatUint(d.TargetReward, 10)
|
||||
}
|
||||
if d.InactivityPenalty > 0 {
|
||||
totalRewards[i].Inactivity = fmt.Sprintf("-%s", strconv.FormatUint(d.InactivityPenalty, 10))
|
||||
} else {
|
||||
totalRewards[i].Inactivity = strconv.FormatUint(d.InactivityPenalty, 10)
|
||||
}
|
||||
}
|
||||
return totalRewards, true
|
||||
}
|
||||
|
||||
@@ -396,7 +396,7 @@ func TestAttestationRewards(t *testing.T) {
|
||||
FinalizationFetcher: mockChainService,
|
||||
}
|
||||
|
||||
t.Run("ok - ideal rewards", func(t *testing.T) {
|
||||
t.Run("ideal rewards", func(t *testing.T) {
|
||||
url := "http://only.the.epoch.number.at.the.end.is.important/1"
|
||||
request := httptest.NewRequest("POST", url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
@@ -419,7 +419,7 @@ func TestAttestationRewards(t *testing.T) {
|
||||
}
|
||||
assert.Equal(t, uint64(20756849), sum)
|
||||
})
|
||||
t.Run("ok - filtered vals", func(t *testing.T) {
|
||||
t.Run("filtered vals", func(t *testing.T) {
|
||||
url := "http://only.the.epoch.number.at.the.end.is.important/1"
|
||||
var body bytes.Buffer
|
||||
pubkey := fmt.Sprintf("%#x", secretKeys[10].PublicKey().Marshal())
|
||||
@@ -448,7 +448,7 @@ func TestAttestationRewards(t *testing.T) {
|
||||
}
|
||||
assert.Equal(t, uint64(794265), sum)
|
||||
})
|
||||
t.Run("ok - all vals", func(t *testing.T) {
|
||||
t.Run("all vals", func(t *testing.T) {
|
||||
url := "http://only.the.epoch.number.at.the.end.is.important/1"
|
||||
request := httptest.NewRequest("POST", url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
@@ -471,36 +471,12 @@ func TestAttestationRewards(t *testing.T) {
|
||||
}
|
||||
assert.Equal(t, uint64(54221955), sum)
|
||||
})
|
||||
t.Run("ok - penalty", func(t *testing.T) {
|
||||
st, err := util.NewBeaconStateCapella()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetSlot(params.BeaconConfig().SlotsPerEpoch*3-1))
|
||||
validators := make([]*eth.Validator, 0, valCount)
|
||||
balances := make([]uint64, 0, valCount)
|
||||
for i := 0; i < valCount; i++ {
|
||||
blsKey, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
validators = append(validators, ð.Validator{
|
||||
PublicKey: blsKey.PublicKey().Marshal(),
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance / 64 * uint64(i),
|
||||
})
|
||||
balances = append(balances, params.BeaconConfig().MaxEffectiveBalance/64*uint64(i))
|
||||
}
|
||||
t.Run("penalty", func(t *testing.T) {
|
||||
st := st.Copy()
|
||||
validators := st.Validators()
|
||||
validators[63].Slashed = true
|
||||
require.NoError(t, st.SetValidators(validators))
|
||||
require.NoError(t, st.SetBalances(balances))
|
||||
require.NoError(t, st.SetInactivityScores(make([]uint64, len(validators))))
|
||||
participation := make([]byte, len(validators))
|
||||
for i := range participation {
|
||||
participation[i] = 0b111
|
||||
}
|
||||
require.NoError(t, st.SetCurrentParticipationBits(participation))
|
||||
require.NoError(t, st.SetPreviousParticipationBits(participation))
|
||||
|
||||
currentSlot := params.BeaconConfig().SlotsPerEpoch * 3
|
||||
mockChainService := &mock.ChainService{Optimistic: true, Slot: ¤tSlot}
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{StatesBySlot: map[primitives.Slot]state.BeaconState{
|
||||
params.BeaconConfig().SlotsPerEpoch*3 - 1: st,
|
||||
@@ -525,8 +501,44 @@ func TestAttestationRewards(t *testing.T) {
|
||||
resp := &AttestationRewardsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
assert.Equal(t, "0", resp.Data.TotalRewards[0].Head)
|
||||
assert.Equal(t, "-432270", resp.Data.TotalRewards[0].Source)
|
||||
assert.Equal(t, "-802788", resp.Data.TotalRewards[0].Target)
|
||||
assert.Equal(t, "-439299", resp.Data.TotalRewards[0].Source)
|
||||
assert.Equal(t, "-815841", resp.Data.TotalRewards[0].Target)
|
||||
assert.Equal(t, "0", resp.Data.TotalRewards[0].Inactivity)
|
||||
})
|
||||
t.Run("inactivity", func(t *testing.T) {
|
||||
st := st.Copy()
|
||||
validators := st.Validators()
|
||||
validators[63].Slashed = true
|
||||
require.NoError(t, st.SetValidators(validators))
|
||||
inactivityScores, err := st.InactivityScores()
|
||||
require.NoError(t, err)
|
||||
inactivityScores[63] = 10
|
||||
require.NoError(t, st.SetInactivityScores(inactivityScores))
|
||||
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{StatesBySlot: map[primitives.Slot]state.BeaconState{
|
||||
params.BeaconConfig().SlotsPerEpoch*3 - 1: st,
|
||||
}},
|
||||
TimeFetcher: mockChainService,
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
FinalizationFetcher: mockChainService,
|
||||
}
|
||||
|
||||
url := "http://only.the.epoch.number.at.the.end.is.important/1"
|
||||
var body bytes.Buffer
|
||||
valIds, err := json.Marshal([]string{"63"})
|
||||
require.NoError(t, err)
|
||||
_, err = body.Write(valIds)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest("POST", url, &body)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.AttestationRewards(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &AttestationRewardsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
assert.Equal(t, "-4768", resp.Data.TotalRewards[0].Inactivity)
|
||||
})
|
||||
t.Run("invalid validator index/pubkey", func(t *testing.T) {
|
||||
url := "http://only.the.epoch.number.at.the.end.is.important/1"
|
||||
|
||||
@@ -31,6 +31,7 @@ type IdealAttestationReward struct {
|
||||
Head string `json:"head"`
|
||||
Target string `json:"target"`
|
||||
Source string `json:"source"`
|
||||
Inactivity string `json:"inactivity"`
|
||||
}
|
||||
|
||||
type TotalAttestationReward struct {
|
||||
@@ -38,7 +39,7 @@ type TotalAttestationReward struct {
|
||||
Head string `json:"head"`
|
||||
Target string `json:"target"`
|
||||
Source string `json:"source"`
|
||||
InclusionDelay string `json:"inclusion_delay"`
|
||||
Inactivity string `json:"inactivity"`
|
||||
}
|
||||
|
||||
type SyncCommitteeRewardsResponse struct {
|
||||
|
||||
@@ -388,10 +388,6 @@ func (s *Server) GetAttestationData(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
if isOptimistic, err := shared.IsOptimistic(ctx, w, s.OptimisticModeFetcher); isOptimistic || err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
_, slot, ok := shared.UintFromQuery(w, r, "slot", true)
|
||||
if !ok {
|
||||
return
|
||||
|
||||
@@ -201,7 +201,7 @@ func (s *Server) ProduceBlockV3(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
func (s *Server) produceBlockV3(ctx context.Context, w http.ResponseWriter, r *http.Request, v1alpha1req *eth.BlockRequest, requiredType blockType) {
|
||||
isSSZ := httputil.SszRequested(r)
|
||||
isSSZ := httputil.RespondWithSsz(r)
|
||||
v1alpha1resp, err := s.V1Alpha1Server.GetBeaconBlock(ctx, v1alpha1req)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, err.Error(), http.StatusInternalServerError)
|
||||
|
||||
@@ -831,10 +831,11 @@ func TestGetAttestationData(t *testing.T) {
|
||||
TimeFetcher: chain,
|
||||
OptimisticModeFetcher: chain,
|
||||
CoreService: &core.Service{
|
||||
HeadFetcher: chain,
|
||||
GenesisTimeFetcher: chain,
|
||||
FinalizedFetcher: chain,
|
||||
AttestationCache: cache.NewAttestationCache(),
|
||||
HeadFetcher: chain,
|
||||
GenesisTimeFetcher: chain,
|
||||
FinalizedFetcher: chain,
|
||||
AttestationCache: cache.NewAttestationCache(),
|
||||
OptimisticModeFetcher: chain,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -914,10 +915,11 @@ func TestGetAttestationData(t *testing.T) {
|
||||
TimeFetcher: chain,
|
||||
OptimisticModeFetcher: chain,
|
||||
CoreService: &core.Service{
|
||||
AttestationCache: cache.NewAttestationCache(),
|
||||
GenesisTimeFetcher: chain,
|
||||
HeadFetcher: chain,
|
||||
FinalizedFetcher: chain,
|
||||
AttestationCache: cache.NewAttestationCache(),
|
||||
GenesisTimeFetcher: chain,
|
||||
HeadFetcher: chain,
|
||||
FinalizedFetcher: chain,
|
||||
OptimisticModeFetcher: chain,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -959,8 +961,9 @@ func TestGetAttestationData(t *testing.T) {
|
||||
TimeFetcher: chain,
|
||||
OptimisticModeFetcher: chain,
|
||||
CoreService: &core.Service{
|
||||
GenesisTimeFetcher: chain,
|
||||
FinalizedFetcher: chain,
|
||||
GenesisTimeFetcher: chain,
|
||||
OptimisticModeFetcher: chain,
|
||||
FinalizedFetcher: chain,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1017,10 +1020,11 @@ func TestGetAttestationData(t *testing.T) {
|
||||
TimeFetcher: chain,
|
||||
OptimisticModeFetcher: chain,
|
||||
CoreService: &core.Service{
|
||||
HeadFetcher: chain,
|
||||
GenesisTimeFetcher: chain,
|
||||
StateGen: stategen.New(db, doublylinkedtree.New()),
|
||||
FinalizedFetcher: chain,
|
||||
HeadFetcher: chain,
|
||||
GenesisTimeFetcher: chain,
|
||||
OptimisticModeFetcher: chain,
|
||||
StateGen: stategen.New(db, doublylinkedtree.New()),
|
||||
FinalizedFetcher: chain,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1065,10 +1069,11 @@ func TestGetAttestationData(t *testing.T) {
|
||||
TimeFetcher: chain,
|
||||
OptimisticModeFetcher: chain,
|
||||
CoreService: &core.Service{
|
||||
AttestationCache: cache.NewAttestationCache(),
|
||||
HeadFetcher: chain,
|
||||
GenesisTimeFetcher: chain,
|
||||
FinalizedFetcher: chain,
|
||||
AttestationCache: cache.NewAttestationCache(),
|
||||
OptimisticModeFetcher: chain,
|
||||
HeadFetcher: chain,
|
||||
GenesisTimeFetcher: chain,
|
||||
FinalizedFetcher: chain,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1151,10 +1156,11 @@ func TestGetAttestationData(t *testing.T) {
|
||||
TimeFetcher: chain,
|
||||
OptimisticModeFetcher: chain,
|
||||
CoreService: &core.Service{
|
||||
AttestationCache: cache.NewAttestationCache(),
|
||||
HeadFetcher: chain,
|
||||
GenesisTimeFetcher: chain,
|
||||
FinalizedFetcher: chain,
|
||||
AttestationCache: cache.NewAttestationCache(),
|
||||
OptimisticModeFetcher: chain,
|
||||
HeadFetcher: chain,
|
||||
GenesisTimeFetcher: chain,
|
||||
FinalizedFetcher: chain,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1192,14 +1198,14 @@ func TestGetAttestationData(t *testing.T) {
|
||||
func TestProduceSyncCommitteeContribution(t *testing.T) {
|
||||
root := bytesutil.PadTo([]byte("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"), 32)
|
||||
sig := bls.NewAggregateSignature().Marshal()
|
||||
messsage := ðpbalpha.SyncCommitteeMessage{
|
||||
message := ðpbalpha.SyncCommitteeMessage{
|
||||
Slot: 1,
|
||||
BlockRoot: root,
|
||||
ValidatorIndex: 0,
|
||||
Signature: sig,
|
||||
}
|
||||
syncCommitteePool := synccommittee.NewStore()
|
||||
require.NoError(t, syncCommitteePool.SaveSyncCommitteeMessage(messsage))
|
||||
require.NoError(t, syncCommitteePool.SaveSyncCommitteeMessage(message))
|
||||
server := Server{
|
||||
CoreService: &core.Service{
|
||||
HeadFetcher: &mockChain.ChainService{
|
||||
|
||||
@@ -24,6 +24,7 @@ go_library(
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// BlockIdParseError represents an error scenario where a block ID could not be parsed.
|
||||
@@ -210,7 +211,10 @@ func (p *BeaconDbBlocker) Blobs(ctx context.Context, id string, indices []uint64
|
||||
if len(indices) == 0 {
|
||||
m, err := p.BlobStorage.Indices(bytesutil.ToBytes32(root))
|
||||
if err != nil {
|
||||
return nil, &core.RpcError{Err: errors.Wrapf(err, "could not retrieve blob indices for root %#x", root), Reason: core.Internal}
|
||||
log.WithFields(log.Fields{
|
||||
"block root": hexutil.Encode(root),
|
||||
}).Error(errors.Wrapf(err, "could not retrieve blob indices for root %#x", root))
|
||||
return nil, &core.RpcError{Err: fmt.Errorf("could not retrieve blob indices for root %#x", root), Reason: core.Internal}
|
||||
}
|
||||
for k, v := range m {
|
||||
if v {
|
||||
@@ -223,7 +227,11 @@ func (p *BeaconDbBlocker) Blobs(ctx context.Context, id string, indices []uint64
|
||||
for i, index := range indices {
|
||||
vblob, err := p.BlobStorage.Get(bytesutil.ToBytes32(root), index)
|
||||
if err != nil {
|
||||
return nil, &core.RpcError{Err: errors.Wrapf(err, "could not retrieve blob for block root %#x at index %d", root, index), Reason: core.Internal}
|
||||
log.WithFields(log.Fields{
|
||||
"block root": hexutil.Encode(root),
|
||||
"blob index": index,
|
||||
}).Error(errors.Wrapf(err, "could not retrieve blob for block root %#x at index %d", root, index))
|
||||
return nil, &core.RpcError{Err: fmt.Errorf("could not retrieve blob for block root %#x at index %d", root, index), Reason: core.Internal}
|
||||
}
|
||||
blobs[i] = &vblob
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"aggregator.go",
|
||||
"assignments.go",
|
||||
"duties.go",
|
||||
"attester.go",
|
||||
"blocks.go",
|
||||
"construct_generic_block.go",
|
||||
@@ -38,7 +38,6 @@ go_library(
|
||||
"//beacon-chain/builder:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/cache/depositcache:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/block:go_default_library",
|
||||
@@ -66,7 +65,6 @@ go_library(
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/payload-attribute:go_default_library",
|
||||
@@ -91,13 +89,13 @@ go_library(
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_golang_protobuf//ptypes/empty",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_bazel_rules_go//proto/wkt:empty_go_proto",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
"@org_golang_google_grpc//codes:go_default_library",
|
||||
"@org_golang_google_grpc//status:go_default_library",
|
||||
@@ -141,6 +139,7 @@ common_deps = [
|
||||
"//beacon-chain/sync/initial-sync/testing:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
@@ -179,7 +178,7 @@ go_test(
|
||||
timeout = "moderate",
|
||||
srcs = [
|
||||
"aggregator_test.go",
|
||||
"assignments_test.go",
|
||||
"duties_test.go",
|
||||
"attester_test.go",
|
||||
"blocks_test.go",
|
||||
"construct_generic_block_test.go",
|
||||
|
||||
@@ -31,12 +31,6 @@ func (vs *Server) GetAttestationData(ctx context.Context, req *ethpb.Attestation
|
||||
if vs.SyncChecker.Syncing() {
|
||||
return nil, status.Errorf(codes.Unavailable, "Syncing to latest head, not ready to respond")
|
||||
}
|
||||
|
||||
// An optimistic validator MUST NOT participate in attestation. (i.e., sign across the DOMAIN_BEACON_ATTESTER, DOMAIN_SELECTION_PROOF or DOMAIN_AGGREGATE_AND_PROOF domains).
|
||||
if err := vs.optimisticStatus(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res, err := vs.CoreService.GetAttestationData(ctx, req)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(core.ErrorReasonToGRPC(err.Reason), "Could not get attestation data: %v", err.Err)
|
||||
|
||||
@@ -61,10 +61,11 @@ func TestAttestationDataAtSlot_HandlesFarAwayJustifiedEpoch(t *testing.T) {
|
||||
OptimisticModeFetcher: &mock.ChainService{Optimistic: false},
|
||||
TimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
CoreService: &core.Service{
|
||||
AttestationCache: cache.NewAttestationCache(),
|
||||
HeadFetcher: &mock.ChainService{TargetRoot: blockRoot, Root: blockRoot[:]},
|
||||
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
FinalizedFetcher: &mock.ChainService{CurrentJustifiedCheckPoint: justifiedCheckpoint},
|
||||
AttestationCache: cache.NewAttestationCache(),
|
||||
HeadFetcher: &mock.ChainService{TargetRoot: blockRoot, Root: blockRoot[:]},
|
||||
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
FinalizedFetcher: &mock.ChainService{CurrentJustifiedCheckPoint: justifiedCheckpoint},
|
||||
OptimisticModeFetcher: &mock.ChainService{Optimistic: false},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -116,8 +116,9 @@ func TestGetAttestationData_OK(t *testing.T) {
|
||||
GenesisTimeFetcher: &mock.ChainService{
|
||||
Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second),
|
||||
},
|
||||
FinalizedFetcher: &mock.ChainService{CurrentJustifiedCheckPoint: justifiedCheckpoint},
|
||||
AttestationCache: cache.NewAttestationCache(),
|
||||
FinalizedFetcher: &mock.ChainService{CurrentJustifiedCheckPoint: justifiedCheckpoint},
|
||||
AttestationCache: cache.NewAttestationCache(),
|
||||
OptimisticModeFetcher: &mock.ChainService{Optimistic: false},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -176,7 +177,8 @@ func BenchmarkGetAttestationDataConcurrent(b *testing.B) {
|
||||
GenesisTimeFetcher: &mock.ChainService{
|
||||
Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second),
|
||||
},
|
||||
FinalizedFetcher: &mock.ChainService{CurrentJustifiedCheckPoint: justifiedCheckpoint},
|
||||
OptimisticModeFetcher: &mock.ChainService{Optimistic: false},
|
||||
FinalizedFetcher: &mock.ChainService{CurrentJustifiedCheckPoint: justifiedCheckpoint},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -222,9 +224,10 @@ func TestGetAttestationData_Optimistic(t *testing.T) {
|
||||
OptimisticModeFetcher: &mock.ChainService{Optimistic: true},
|
||||
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
|
||||
CoreService: &core.Service{
|
||||
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now()},
|
||||
HeadFetcher: &mock.ChainService{},
|
||||
AttestationCache: cache.NewAttestationCache(),
|
||||
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now()},
|
||||
HeadFetcher: &mock.ChainService{},
|
||||
AttestationCache: cache.NewAttestationCache(),
|
||||
OptimisticModeFetcher: &mock.ChainService{Optimistic: true},
|
||||
},
|
||||
}
|
||||
_, err := as.GetAttestationData(context.Background(), ðpb.AttestationDataRequest{})
|
||||
@@ -240,10 +243,11 @@ func TestGetAttestationData_Optimistic(t *testing.T) {
|
||||
OptimisticModeFetcher: &mock.ChainService{Optimistic: false},
|
||||
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
|
||||
CoreService: &core.Service{
|
||||
AttestationCache: cache.NewAttestationCache(),
|
||||
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now()},
|
||||
HeadFetcher: &mock.ChainService{Optimistic: false, State: beaconState},
|
||||
FinalizedFetcher: &mock.ChainService{CurrentJustifiedCheckPoint: ðpb.Checkpoint{}},
|
||||
AttestationCache: cache.NewAttestationCache(),
|
||||
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now()},
|
||||
HeadFetcher: &mock.ChainService{Optimistic: false, State: beaconState},
|
||||
FinalizedFetcher: &mock.ChainService{CurrentJustifiedCheckPoint: ðpb.Checkpoint{}},
|
||||
OptimisticModeFetcher: &mock.ChainService{Optimistic: false},
|
||||
},
|
||||
}
|
||||
_, err = as.GetAttestationData(context.Background(), ðpb.AttestationDataRequest{})
|
||||
@@ -260,7 +264,8 @@ func TestServer_GetAttestationData_InvalidRequestSlot(t *testing.T) {
|
||||
OptimisticModeFetcher: &mock.ChainService{Optimistic: false},
|
||||
TimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
CoreService: &core.Service{
|
||||
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
OptimisticModeFetcher: &mock.ChainService{Optimistic: false},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -301,10 +306,11 @@ func TestServer_GetAttestationData_RequestSlotIsDifferentThanCurrentSlot(t *test
|
||||
OptimisticModeFetcher: &mock.ChainService{Optimistic: false},
|
||||
TimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
CoreService: &core.Service{
|
||||
HeadFetcher: &mock.ChainService{TargetRoot: blockRoot2, Root: blockRoot[:]},
|
||||
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
StateGen: stategen.New(db, doublylinkedtree.New()),
|
||||
FinalizedFetcher: &mock.ChainService{CurrentJustifiedCheckPoint: justifiedCheckpoint},
|
||||
HeadFetcher: &mock.ChainService{TargetRoot: blockRoot2, Root: blockRoot[:]},
|
||||
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
StateGen: stategen.New(db, doublylinkedtree.New()),
|
||||
FinalizedFetcher: &mock.ChainService{CurrentJustifiedCheckPoint: justifiedCheckpoint},
|
||||
OptimisticModeFetcher: &mock.ChainService{Optimistic: false},
|
||||
},
|
||||
}
|
||||
util.SaveBlock(t, ctx, db, block)
|
||||
@@ -346,8 +352,9 @@ func TestGetAttestationData_SucceedsInFirstEpoch(t *testing.T) {
|
||||
HeadFetcher: &mock.ChainService{
|
||||
TargetRoot: targetRoot, Root: blockRoot[:],
|
||||
},
|
||||
GenesisTimeFetcher: &mock.ChainService{Genesis: prysmTime.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
FinalizedFetcher: &mock.ChainService{CurrentJustifiedCheckPoint: justifiedCheckpoint},
|
||||
GenesisTimeFetcher: &mock.ChainService{Genesis: prysmTime.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
FinalizedFetcher: &mock.ChainService{CurrentJustifiedCheckPoint: justifiedCheckpoint},
|
||||
OptimisticModeFetcher: &mock.ChainService{Optimistic: false},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -296,7 +296,7 @@ func (s *Service) initializeDebugServerRoutes(debugServer *debug.Server) {
|
||||
s.cfg.Router.HandleFunc("/eth/v1/debug/beacon/states/{state_id}", debugServer.GetBeaconStateSSZ).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v2/debug/beacon/states/{state_id}", debugServer.GetBeaconStateV2).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v2/debug/beacon/heads", debugServer.GetForkChoiceHeadsV2).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v2/debug/fork_choice", debugServer.GetForkChoice).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/debug/fork_choice", debugServer.GetForkChoice).Methods(http.MethodGet)
|
||||
}
|
||||
|
||||
// prysm internal routes
|
||||
@@ -359,16 +359,17 @@ func (s *Service) Start() {
|
||||
})
|
||||
|
||||
coreService := &core.Service{
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||
SyncChecker: s.cfg.SyncService,
|
||||
Broadcaster: s.cfg.Broadcaster,
|
||||
SyncCommitteePool: s.cfg.SyncCommitteeObjectPool,
|
||||
OperationNotifier: s.cfg.OperationNotifier,
|
||||
AttestationCache: cache.NewAttestationCache(),
|
||||
StateGen: s.cfg.StateGen,
|
||||
P2P: s.cfg.Broadcaster,
|
||||
FinalizedFetcher: s.cfg.FinalizationFetcher,
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||
SyncChecker: s.cfg.SyncService,
|
||||
Broadcaster: s.cfg.Broadcaster,
|
||||
SyncCommitteePool: s.cfg.SyncCommitteeObjectPool,
|
||||
OperationNotifier: s.cfg.OperationNotifier,
|
||||
AttestationCache: cache.NewAttestationCache(),
|
||||
StateGen: s.cfg.StateGen,
|
||||
P2P: s.cfg.Broadcaster,
|
||||
FinalizedFetcher: s.cfg.FinalizationFetcher,
|
||||
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
|
||||
}
|
||||
|
||||
validatorServer := &validatorv1alpha1.Server{
|
||||
|
||||
@@ -36,6 +36,18 @@ func init() {
|
||||
logrus.SetOutput(io.Discard)
|
||||
}
|
||||
|
||||
func combineMaps(maps ...map[string][]string) map[string][]string {
|
||||
combinedMap := make(map[string][]string)
|
||||
|
||||
for _, m := range maps {
|
||||
for k, v := range m {
|
||||
combinedMap[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
return combinedMap
|
||||
}
|
||||
|
||||
func TestServer_InitializeRoutes(t *testing.T) {
|
||||
s := Service{
|
||||
cfg: &Config{
|
||||
@@ -58,80 +70,106 @@ func TestServer_InitializeRoutes(t *testing.T) {
|
||||
s.initializePrysmNodeServerRoutes(&nodeprysm.Server{})
|
||||
s.initializePrysmValidatorServerRoutes(&validatorprysm.Server{})
|
||||
|
||||
wantRouteList := map[string][]string{
|
||||
"/eth/v1/beacon/rewards/blocks/{block_id}": {http.MethodGet},
|
||||
"/eth/v1/beacon/rewards/attestations/{epoch}": {http.MethodPost},
|
||||
"/eth/v1/beacon/rewards/sync_committee/{block_id}": {http.MethodPost},
|
||||
"/eth/v1/builder/states/{state_id}/expected_withdrawals": {http.MethodGet},
|
||||
"/eth/v1/beacon/blob_sidecars/{block_id}": {http.MethodGet},
|
||||
"/eth/v1/validator/aggregate_attestation": {http.MethodGet},
|
||||
"/eth/v1/validator/contribution_and_proofs": {http.MethodPost},
|
||||
"/eth/v1/validator/aggregate_and_proofs": {http.MethodPost},
|
||||
"/eth/v1/validator/sync_committee_contribution": {http.MethodGet},
|
||||
"/eth/v1/validator/sync_committee_subscriptions": {http.MethodPost},
|
||||
"/eth/v1/validator/beacon_committee_subscriptions": {http.MethodPost},
|
||||
"/eth/v1/validator/attestation_data": {http.MethodGet},
|
||||
"/eth/v1/validator/register_validator": {http.MethodPost},
|
||||
"/eth/v1/validator/duties/attester/{epoch}": {http.MethodPost},
|
||||
"/eth/v1/validator/duties/proposer/{epoch}": {http.MethodGet},
|
||||
"/eth/v1/validator/duties/sync/{epoch}": {http.MethodPost},
|
||||
"/eth/v1/validator/prepare_beacon_proposer": {http.MethodPost},
|
||||
"/eth/v1/validator/liveness/{epoch}": {http.MethodPost},
|
||||
"/eth/v2/validator/blocks/{slot}": {http.MethodGet},
|
||||
"/eth/v1/validator/blinded_blocks/{slot}": {http.MethodGet},
|
||||
"/eth/v3/validator/blocks/{slot}": {http.MethodGet},
|
||||
"/eth/v1/node/syncing": {http.MethodGet},
|
||||
"/eth/v1/node/identity": {http.MethodGet},
|
||||
"/eth/v1/node/peers/{peer_id}": {http.MethodGet},
|
||||
"/eth/v1/node/peers": {http.MethodGet},
|
||||
"/eth/v1/node/peer_count": {http.MethodGet},
|
||||
"/eth/v1/node/version": {http.MethodGet},
|
||||
"/eth/v1/node/health": {http.MethodGet},
|
||||
"/eth/v1/beacon/states/{state_id}/committees": {http.MethodGet},
|
||||
"/eth/v1/beacon/states/{state_id}/fork": {http.MethodGet},
|
||||
"/eth/v1/beacon/states/{state_id}/root": {http.MethodGet},
|
||||
"/eth/v1/beacon/states/{state_id}/sync_committees": {http.MethodGet},
|
||||
"/eth/v1/beacon/states/{state_id}/randao": {http.MethodGet},
|
||||
"/eth/v1/beacon/blocks": {http.MethodPost},
|
||||
"/eth/v1/beacon/blinded_blocks": {http.MethodPost},
|
||||
"/eth/v2/beacon/blocks": {http.MethodPost},
|
||||
"/eth/v2/beacon/blinded_blocks": {http.MethodPost},
|
||||
"/eth/v1/beacon/blocks/{block_id}": {http.MethodGet},
|
||||
"/eth/v2/beacon/blocks/{block_id}": {http.MethodGet},
|
||||
"/eth/v1/beacon/blocks/{block_id}/attestations": {http.MethodGet},
|
||||
"/eth/v1/beacon/blinded_blocks/{block_id}": {http.MethodGet},
|
||||
"/eth/v1/beacon/blocks/{block_id}/root": {http.MethodGet},
|
||||
"/eth/v1/beacon/pool/attestations": {http.MethodGet, http.MethodPost},
|
||||
"/eth/v1/beacon/pool/voluntary_exits": {http.MethodGet, http.MethodPost},
|
||||
"/eth/v1/beacon/pool/sync_committees": {http.MethodPost},
|
||||
"/eth/v1/beacon/pool/bls_to_execution_changes": {http.MethodGet, http.MethodPost},
|
||||
"/eth/v1/beacon/pool/attester_slashings": {http.MethodGet, http.MethodPost},
|
||||
"/eth/v1/beacon/pool/proposer_slashings": {http.MethodGet, http.MethodPost},
|
||||
"/eth/v1/beacon/headers": {http.MethodGet},
|
||||
"/eth/v1/beacon/headers/{block_id}": {http.MethodGet},
|
||||
beaconRoutes := map[string][]string{
|
||||
"/eth/v1/beacon/genesis": {http.MethodGet},
|
||||
"/eth/v1/beacon/states/{state_id}/root": {http.MethodGet},
|
||||
"/eth/v1/beacon/states/{state_id}/fork": {http.MethodGet},
|
||||
"/eth/v1/beacon/states/{state_id}/finality_checkpoints": {http.MethodGet},
|
||||
"/eth/v1/beacon/states/{state_id}/validators": {http.MethodGet, http.MethodPost},
|
||||
"/eth/v1/beacon/states/{state_id}/validators/{validator_id}": {http.MethodGet},
|
||||
"/eth/v1/beacon/states/{state_id}/validator_balances": {http.MethodGet, http.MethodPost},
|
||||
"/eth/v1/config/deposit_contract": {http.MethodGet},
|
||||
"/eth/v1/config/fork_schedule": {http.MethodGet},
|
||||
"/eth/v1/config/spec": {http.MethodGet},
|
||||
"/eth/v1/events": {http.MethodGet},
|
||||
"/eth/v1/beacon/light_client/bootstrap/{block_root}": {http.MethodGet},
|
||||
"/eth/v1/beacon/light_client/updates": {http.MethodGet},
|
||||
"/eth/v1/beacon/light_client/finality_update": {http.MethodGet},
|
||||
"/eth/v1/beacon/light_client/optimistic_update": {http.MethodGet},
|
||||
"/eth/v1/debug/beacon/states/{state_id}": {http.MethodGet},
|
||||
"/eth/v2/debug/beacon/states/{state_id}": {http.MethodGet},
|
||||
"/eth/v2/debug/beacon/heads": {http.MethodGet},
|
||||
"/eth/v2/debug/fork_choice": {http.MethodGet},
|
||||
"/prysm/v1/beacon/weak_subjectivity": {http.MethodGet},
|
||||
"/prysm/node/trusted_peers": {http.MethodGet, http.MethodPost},
|
||||
"/prysm/node/trusted_peers/{peer_id}": {http.MethodDelete},
|
||||
"/prysm/validators/performance": {http.MethodPost},
|
||||
"/eth/v1/beacon/states/{state_id}/validator_count": {http.MethodGet},
|
||||
"/eth/v1/beacon/states/{state_id}/committees": {http.MethodGet},
|
||||
"/eth/v1/beacon/states/{state_id}/sync_committees": {http.MethodGet},
|
||||
"/eth/v1/beacon/states/{state_id}/randao": {http.MethodGet},
|
||||
"/eth/v1/beacon/headers": {http.MethodGet},
|
||||
"/eth/v1/beacon/headers/{block_id}": {http.MethodGet},
|
||||
"/eth/v1/beacon/blinded_blocks": {http.MethodPost},
|
||||
"/eth/v2/beacon/blinded_blocks": {http.MethodPost},
|
||||
"/eth/v1/beacon/blocks": {http.MethodPost},
|
||||
"/eth/v2/beacon/blocks": {http.MethodPost},
|
||||
"/eth/v1/beacon/blocks/{block_id}": {http.MethodGet}, //deprecated
|
||||
"/eth/v2/beacon/blocks/{block_id}": {http.MethodGet},
|
||||
"/eth/v1/beacon/blocks/{block_id}/root": {http.MethodGet},
|
||||
"/eth/v1/beacon/blocks/{block_id}/attestations": {http.MethodGet},
|
||||
"/eth/v1/beacon/blob_sidecars/{block_id}": {http.MethodGet},
|
||||
"/eth/v1/beacon/rewards/sync_committee/{block_id}": {http.MethodPost},
|
||||
//"/eth/v1/beacon/deposit_snapshot": {http.MethodGet}, not implemented
|
||||
"/eth/v1/beacon/rewards/blocks/{block_id}": {http.MethodGet},
|
||||
"/eth/v1/beacon/rewards/attestations/{epoch}": {http.MethodPost},
|
||||
"/eth/v1/beacon/blinded_blocks/{block_id}": {http.MethodGet},
|
||||
"/eth/v1/beacon/light_client/bootstrap/{block_root}": {http.MethodGet},
|
||||
"/eth/v1/beacon/light_client/updates": {http.MethodGet},
|
||||
"/eth/v1/beacon/light_client/finality_update": {http.MethodGet},
|
||||
"/eth/v1/beacon/light_client/optimistic_update": {http.MethodGet},
|
||||
"/eth/v1/beacon/pool/attestations": {http.MethodGet, http.MethodPost},
|
||||
"/eth/v1/beacon/pool/attester_slashings": {http.MethodGet, http.MethodPost},
|
||||
"/eth/v1/beacon/pool/proposer_slashings": {http.MethodGet, http.MethodPost},
|
||||
"/eth/v1/beacon/pool/sync_committees": {http.MethodPost},
|
||||
"/eth/v1/beacon/pool/voluntary_exits": {http.MethodGet, http.MethodPost},
|
||||
"/eth/v1/beacon/pool/bls_to_execution_changes": {http.MethodGet, http.MethodPost},
|
||||
}
|
||||
|
||||
builderRoutes := map[string][]string{
|
||||
"/eth/v1/builder/states/{state_id}/expected_withdrawals": {http.MethodGet},
|
||||
}
|
||||
|
||||
configRoutes := map[string][]string{
|
||||
"/eth/v1/config/fork_schedule": {http.MethodGet},
|
||||
"/eth/v1/config/spec": {http.MethodGet},
|
||||
"/eth/v1/config/deposit_contract": {http.MethodGet},
|
||||
}
|
||||
|
||||
debugRoutes := map[string][]string{
|
||||
"/eth/v1/debug/beacon/states/{state_id}": {http.MethodGet}, //deprecated
|
||||
"/eth/v2/debug/beacon/states/{state_id}": {http.MethodGet},
|
||||
"/eth/v2/debug/beacon/heads": {http.MethodGet},
|
||||
"/eth/v1/debug/fork_choice": {http.MethodGet},
|
||||
}
|
||||
|
||||
eventsRoutes := map[string][]string{
|
||||
"/eth/v1/events": {http.MethodGet},
|
||||
}
|
||||
|
||||
nodeRoutes := map[string][]string{
|
||||
"/eth/v1/node/identity": {http.MethodGet},
|
||||
"/eth/v1/node/peers": {http.MethodGet},
|
||||
"/eth/v1/node/peers/{peer_id}": {http.MethodGet},
|
||||
"/eth/v1/node/peer_count": {http.MethodGet},
|
||||
"/eth/v1/node/version": {http.MethodGet},
|
||||
"/eth/v1/node/syncing": {http.MethodGet},
|
||||
"/eth/v1/node/health": {http.MethodGet},
|
||||
}
|
||||
|
||||
validatorRoutes := map[string][]string{
|
||||
"/eth/v1/validator/duties/attester/{epoch}": {http.MethodPost},
|
||||
"/eth/v1/validator/duties/proposer/{epoch}": {http.MethodGet},
|
||||
"/eth/v1/validator/duties/sync/{epoch}": {http.MethodPost},
|
||||
"/eth/v2/validator/blocks/{slot}": {http.MethodGet}, //deprecated
|
||||
"/eth/v3/validator/blocks/{slot}": {http.MethodGet},
|
||||
"/eth/v1/validator/blinded_blocks/{slot}": {http.MethodGet}, //deprecated
|
||||
"/eth/v1/validator/attestation_data": {http.MethodGet},
|
||||
"/eth/v1/validator/aggregate_attestation": {http.MethodGet},
|
||||
"/eth/v1/validator/aggregate_and_proofs": {http.MethodPost},
|
||||
"/eth/v1/validator/beacon_committee_subscriptions": {http.MethodPost},
|
||||
"/eth/v1/validator/sync_committee_subscriptions": {http.MethodPost},
|
||||
//"/eth/v1/validator/beacon_committee_selections": {http.MethodPost}, // not implemented
|
||||
"/eth/v1/validator/sync_committee_contribution": {http.MethodGet},
|
||||
//"/eth/v1/validator/sync_committee_selections": {http.MethodPost}, // not implemented
|
||||
"/eth/v1/validator/contribution_and_proofs": {http.MethodPost},
|
||||
"/eth/v1/validator/prepare_beacon_proposer": {http.MethodPost},
|
||||
"/eth/v1/validator/register_validator": {http.MethodPost},
|
||||
"/eth/v1/validator/liveness/{epoch}": {http.MethodPost},
|
||||
}
|
||||
|
||||
prysmCustomRoutes := map[string][]string{
|
||||
"/prysm/v1/beacon/weak_subjectivity": {http.MethodGet},
|
||||
"/prysm/node/trusted_peers": {http.MethodGet, http.MethodPost},
|
||||
"/prysm/node/trusted_peers/{peer_id}": {http.MethodDelete},
|
||||
"/prysm/validators/performance": {http.MethodPost},
|
||||
"/eth/v1/beacon/states/{state_id}/validator_count": {http.MethodGet},
|
||||
}
|
||||
|
||||
wantRouteList := combineMaps(beaconRoutes, builderRoutes, configRoutes, debugRoutes, eventsRoutes, nodeRoutes, validatorRoutes, prysmCustomRoutes)
|
||||
gotRouteList := make(map[string][]string)
|
||||
err := s.cfg.Router.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error {
|
||||
tpl, err1 := route.GetPathTemplate()
|
||||
|
||||
@@ -22,6 +22,7 @@ type BeaconState interface {
|
||||
WriteOnlyBeaconState
|
||||
Copy() BeaconState
|
||||
CopyAllTries()
|
||||
Defragment()
|
||||
HashTreeRoot(ctx context.Context) ([32]byte, error)
|
||||
Prover
|
||||
json.Marshaler
|
||||
@@ -120,6 +121,7 @@ type ReadOnlyValidators interface {
|
||||
ValidatorAtIndex(idx primitives.ValidatorIndex) (*ethpb.Validator, error)
|
||||
ValidatorAtIndexReadOnly(idx primitives.ValidatorIndex) (ReadOnlyValidator, error)
|
||||
ValidatorIndexByPubkey(key [fieldparams.BLSPubkeyLength]byte) (primitives.ValidatorIndex, bool)
|
||||
PublicKeys() ([][fieldparams.BLSPubkeyLength]byte, error)
|
||||
PubkeyAtIndex(idx primitives.ValidatorIndex) [fieldparams.BLSPubkeyLength]byte
|
||||
NumValidators() int
|
||||
ReadFromEveryValidator(f func(idx int, val ReadOnlyValidator) error) error
|
||||
|
||||
@@ -181,6 +181,27 @@ func (b *BeaconState) PubkeyAtIndex(idx primitives.ValidatorIndex) [fieldparams.
|
||||
return bytesutil.ToBytes48(v.PublicKey)
|
||||
}
|
||||
|
||||
// PublicKeys builds a list of all validator public keys, with each key's index aligned to its validator index.
|
||||
func (b *BeaconState) PublicKeys() ([][fieldparams.BLSPubkeyLength]byte, error) {
|
||||
b.lock.RLock()
|
||||
defer b.lock.RUnlock()
|
||||
|
||||
l := b.validatorsLen()
|
||||
res := make([][fieldparams.BLSPubkeyLength]byte, l)
|
||||
for i := 0; i < l; i++ {
|
||||
if features.Get().EnableExperimentalState {
|
||||
val, err := b.validatorsMultiValue.At(b, uint64(i))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
copy(res[i][:], val.PublicKey)
|
||||
} else {
|
||||
copy(res[i][:], b.validators[i].PublicKey)
|
||||
}
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// NumValidators returns the size of the validator registry.
|
||||
func (b *BeaconState) NumValidators() int {
|
||||
b.lock.RLock()
|
||||
|
||||
@@ -123,6 +123,55 @@ func NewMultiValueValidators(vals []*ethpb.Validator) *MultiValueValidators {
|
||||
return mv
|
||||
}
|
||||
|
||||
// Defragment checks whether each individual multi-value field in our state is fragmented
|
||||
// and if it is, it will 'reset' the field to create a new multivalue object.
|
||||
func (b *BeaconState) Defragment() {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
if b.blockRootsMultiValue != nil && b.blockRootsMultiValue.IsFragmented() {
|
||||
initialMVslice := b.blockRootsMultiValue
|
||||
b.blockRootsMultiValue = b.blockRootsMultiValue.Reset(b)
|
||||
initialMVslice.Detach(b)
|
||||
multiValueCountGauge.WithLabelValues(types.BlockRoots.String()).Inc()
|
||||
runtime.SetFinalizer(b.blockRootsMultiValue, blockRootsFinalizer)
|
||||
}
|
||||
if b.stateRootsMultiValue != nil && b.stateRootsMultiValue.IsFragmented() {
|
||||
initialMVslice := b.stateRootsMultiValue
|
||||
b.stateRootsMultiValue = b.stateRootsMultiValue.Reset(b)
|
||||
initialMVslice.Detach(b)
|
||||
multiValueCountGauge.WithLabelValues(types.StateRoots.String()).Inc()
|
||||
runtime.SetFinalizer(b.stateRootsMultiValue, stateRootsFinalizer)
|
||||
}
|
||||
if b.randaoMixesMultiValue != nil && b.randaoMixesMultiValue.IsFragmented() {
|
||||
initialMVslice := b.randaoMixesMultiValue
|
||||
b.randaoMixesMultiValue = b.randaoMixesMultiValue.Reset(b)
|
||||
initialMVslice.Detach(b)
|
||||
multiValueCountGauge.WithLabelValues(types.RandaoMixes.String()).Inc()
|
||||
runtime.SetFinalizer(b.randaoMixesMultiValue, randaoMixesFinalizer)
|
||||
}
|
||||
if b.balancesMultiValue != nil && b.balancesMultiValue.IsFragmented() {
|
||||
initialMVslice := b.balancesMultiValue
|
||||
b.balancesMultiValue = b.balancesMultiValue.Reset(b)
|
||||
initialMVslice.Detach(b)
|
||||
multiValueCountGauge.WithLabelValues(types.Balances.String()).Inc()
|
||||
runtime.SetFinalizer(b.balancesMultiValue, balancesFinalizer)
|
||||
}
|
||||
if b.validatorsMultiValue != nil && b.validatorsMultiValue.IsFragmented() {
|
||||
initialMVslice := b.validatorsMultiValue
|
||||
b.validatorsMultiValue = b.validatorsMultiValue.Reset(b)
|
||||
initialMVslice.Detach(b)
|
||||
multiValueCountGauge.WithLabelValues(types.Validators.String()).Inc()
|
||||
runtime.SetFinalizer(b.validatorsMultiValue, validatorsFinalizer)
|
||||
}
|
||||
if b.inactivityScoresMultiValue != nil && b.inactivityScoresMultiValue.IsFragmented() {
|
||||
initialMVslice := b.inactivityScoresMultiValue
|
||||
b.inactivityScoresMultiValue = b.inactivityScoresMultiValue.Reset(b)
|
||||
initialMVslice.Detach(b)
|
||||
multiValueCountGauge.WithLabelValues(types.InactivityScores.String()).Inc()
|
||||
runtime.SetFinalizer(b.inactivityScoresMultiValue, inactivityScoresFinalizer)
|
||||
}
|
||||
}
|
||||
|
||||
func randaoMixesFinalizer(m *MultiValueRandaoMixes) {
|
||||
multiValueCountGauge.WithLabelValues(types.RandaoMixes.String()).Dec()
|
||||
}
|
||||
|
||||
@@ -31,7 +31,7 @@ go_library(
|
||||
"//beacon-chain/db/filters:go_default_library",
|
||||
"//beacon-chain/forkchoice:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/sync/backfill:go_default_library",
|
||||
"//beacon-chain/sync/backfill/coverage:go_default_library",
|
||||
"//cache/lru:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
|
||||
@@ -332,9 +332,8 @@ func (s *State) CombinedCache() *CombinedCache {
|
||||
}
|
||||
|
||||
func (s *State) slotAvailable(slot primitives.Slot) bool {
|
||||
// default to assuming node was initialized from genesis - backfill only needs to be specified for checkpoint sync
|
||||
if s.backfillStatus == nil {
|
||||
if s.avb == nil {
|
||||
return true
|
||||
}
|
||||
return s.backfillStatus.SlotCovered(slot)
|
||||
return s.avb.AvailableBlock(slot)
|
||||
}
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/backfill"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/backfill/coverage"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
|
||||
@@ -51,7 +51,7 @@ type State struct {
|
||||
finalizedInfo *finalizedInfo
|
||||
epochBoundaryStateCache *epochBoundaryState
|
||||
saveHotStateDB *saveHotStateDbConfig
|
||||
backfillStatus *backfill.Status
|
||||
avb coverage.AvailableBlocker
|
||||
migrationLock *sync.Mutex
|
||||
fc forkchoice.ForkChoicer
|
||||
}
|
||||
@@ -78,9 +78,11 @@ type finalizedInfo struct {
|
||||
// Option is a functional option for controlling the initialization of a *State value
|
||||
type Option func(*State)
|
||||
|
||||
func WithBackfillStatus(bfs *backfill.Status) Option {
|
||||
// WithAvailableBlocker gives stategen an AvailableBlocker, which is used to determine if a given
|
||||
// block is available. This is necessary because backfill creates a hole in the block history.
|
||||
func WithAvailableBlocker(avb coverage.AvailableBlocker) Option {
|
||||
return func(sg *State) {
|
||||
sg.backfillStatus = bfs
|
||||
sg.avb = avb
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -2,30 +2,78 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["status.go"],
|
||||
srcs = [
|
||||
"batch.go",
|
||||
"batcher.go",
|
||||
"metrics.go",
|
||||
"pool.go",
|
||||
"service.go",
|
||||
"status.go",
|
||||
"verify.go",
|
||||
"worker.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/backfill",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/p2p/peers:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/sync:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/dbval:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["status_test.go"],
|
||||
srcs = [
|
||||
"batcher_test.go",
|
||||
"pool_test.go",
|
||||
"service_test.go",
|
||||
"status_test.go",
|
||||
"verify_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/p2p/testing:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/blocks/testing:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/dbval:go_default_library",
|
||||
"//runtime/interop:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
141
beacon-chain/sync/backfill/batch.go
Normal file
141
beacon-chain/sync/backfill/batch.go
Normal file
@@ -0,0 +1,141 @@
|
||||
package backfill
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ErrChainBroken indicates a backfill batch can't be imported to the db because it is not known to be the ancestor
|
||||
// of the canonical chain.
|
||||
var ErrChainBroken = errors.New("batch is not the ancestor of a known finalized root")
|
||||
|
||||
type batchState int
|
||||
|
||||
func (s batchState) String() string {
|
||||
switch s {
|
||||
case batchNil:
|
||||
return "nil"
|
||||
case batchInit:
|
||||
return "init"
|
||||
case batchSequenced:
|
||||
return "sequenced"
|
||||
case batchErrRetryable:
|
||||
return "error_retryable"
|
||||
case batchImportable:
|
||||
return "importable"
|
||||
case batchImportComplete:
|
||||
return "import_complete"
|
||||
case batchEndSequence:
|
||||
return "end_sequence"
|
||||
default:
|
||||
return "unknown"
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
batchNil batchState = iota
|
||||
batchInit
|
||||
batchSequenced
|
||||
batchErrRetryable
|
||||
batchImportable
|
||||
batchImportComplete
|
||||
batchEndSequence
|
||||
)
|
||||
|
||||
type batchId string
|
||||
|
||||
type batch struct {
|
||||
firstScheduled time.Time
|
||||
scheduled time.Time
|
||||
seq int // sequence identifier, ie how many times has the sequence() method served this batch
|
||||
retries int
|
||||
begin primitives.Slot
|
||||
end primitives.Slot // half-open interval, [begin, end), ie >= start, < end.
|
||||
results VerifiedROBlocks
|
||||
err error
|
||||
state batchState
|
||||
pid peer.ID
|
||||
}
|
||||
|
||||
func (b batch) logFields() log.Fields {
|
||||
return map[string]interface{}{
|
||||
"batch_id": b.id(),
|
||||
"state": b.state.String(),
|
||||
"scheduled": b.scheduled.String(),
|
||||
"seq": b.seq,
|
||||
"retries": b.retries,
|
||||
"begin": b.begin,
|
||||
"end": b.end,
|
||||
"pid": b.pid,
|
||||
}
|
||||
}
|
||||
|
||||
func (b batch) replaces(r batch) bool {
|
||||
if r.state == batchImportComplete {
|
||||
return false
|
||||
}
|
||||
if b.begin != r.begin {
|
||||
return false
|
||||
}
|
||||
if b.end != r.end {
|
||||
return false
|
||||
}
|
||||
return b.seq >= r.seq
|
||||
}
|
||||
|
||||
func (b batch) id() batchId {
|
||||
return batchId(fmt.Sprintf("%d:%d", b.begin, b.end))
|
||||
}
|
||||
|
||||
func (b batch) ensureParent(expected [32]byte) error {
|
||||
tail := b.results[len(b.results)-1]
|
||||
if tail.Root() != expected {
|
||||
return errors.Wrapf(ErrChainBroken, "last parent_root=%#x, tail root=%#x", expected, tail.Root())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b batch) request() *eth.BeaconBlocksByRangeRequest {
|
||||
return ð.BeaconBlocksByRangeRequest{
|
||||
StartSlot: b.begin,
|
||||
Count: uint64(b.end - b.begin),
|
||||
Step: 1,
|
||||
}
|
||||
}
|
||||
|
||||
func (b batch) withState(s batchState) batch {
|
||||
if s == batchSequenced {
|
||||
b.scheduled = time.Now()
|
||||
switch b.state {
|
||||
case batchErrRetryable:
|
||||
b.retries += 1
|
||||
log.WithFields(b.logFields()).Info("sequencing batch for retry")
|
||||
case batchInit, batchNil:
|
||||
b.firstScheduled = b.scheduled
|
||||
}
|
||||
}
|
||||
if s == batchImportComplete {
|
||||
backfillBatchTimeRoundtrip.Observe(float64(time.Since(b.firstScheduled).Milliseconds()))
|
||||
log.WithFields(b.logFields()).Debug("Backfill batch imported.")
|
||||
}
|
||||
b.state = s
|
||||
b.seq += 1
|
||||
return b
|
||||
}
|
||||
|
||||
func (b batch) withPeer(p peer.ID) batch {
|
||||
b.pid = p
|
||||
backfillBatchTimeWaiting.Observe(float64(time.Since(b.scheduled).Milliseconds()))
|
||||
return b
|
||||
}
|
||||
|
||||
func (b batch) withRetryableError(err error) batch {
|
||||
b.err = err
|
||||
return b.withState(batchErrRetryable)
|
||||
}
|
||||
197
beacon-chain/sync/backfill/batcher.go
Normal file
197
beacon-chain/sync/backfill/batcher.go
Normal file
@@ -0,0 +1,197 @@
|
||||
package backfill
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
)
|
||||
|
||||
var errMaxBatches = errors.New("backfill batch requested in excess of max outstanding batches")
|
||||
var errEndSequence = errors.New("sequence has terminated, no more backfill batches will be produced")
|
||||
var errCannotDecreaseMinimum = errors.New("the minimum backfill slot can only be increased, not decreased")
|
||||
|
||||
type batchSequencer struct {
|
||||
batcher batcher
|
||||
seq []batch
|
||||
}
|
||||
|
||||
// sequence() is meant as a verb "arrange in a particular order".
|
||||
// sequence determines the next set of batches that should be worked on based on the state of the batches
|
||||
// in its internal view. sequence relies on update() for updates to its view of the
|
||||
// batches it has previously sequenced.
|
||||
func (c *batchSequencer) sequence() ([]batch, error) {
|
||||
s := make([]batch, 0)
|
||||
// batch start slots are in descending order, c.seq[n].begin == c.seq[n+1].end
|
||||
for i := range c.seq {
|
||||
switch c.seq[i].state {
|
||||
case batchInit, batchErrRetryable:
|
||||
c.seq[i] = c.seq[i].withState(batchSequenced)
|
||||
s = append(s, c.seq[i])
|
||||
case batchNil:
|
||||
// batchNil is the zero value of the batch type.
|
||||
// This case means that we are initializing a batch that was created by the
|
||||
// initial allocation of the seq slice, so batcher need to compute its bounds.
|
||||
var b batch
|
||||
if i == 0 {
|
||||
// The first item in the list is a special case, subsequent items are initialized
|
||||
// relative to the preceding batches.
|
||||
b = c.batcher.before(c.batcher.max)
|
||||
} else {
|
||||
b = c.batcher.beforeBatch(c.seq[i-1])
|
||||
}
|
||||
c.seq[i] = b.withState(batchSequenced)
|
||||
s = append(s, c.seq[i])
|
||||
case batchEndSequence:
|
||||
if len(s) == 0 {
|
||||
s = append(s, c.seq[i])
|
||||
}
|
||||
default:
|
||||
continue
|
||||
}
|
||||
}
|
||||
if len(s) == 0 {
|
||||
return nil, errMaxBatches
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// update serves 2 roles.
|
||||
// - updating batchSequencer's copy of the given batch.
|
||||
// - removing batches that are completely imported from the sequence,
|
||||
// so that they are not returned the next time import() is called, and populating
|
||||
// seq with new batches that are ready to be worked on.
|
||||
func (c *batchSequencer) update(b batch) {
|
||||
done := 0
|
||||
for i := 0; i < len(c.seq); i++ {
|
||||
if b.replaces(c.seq[i]) {
|
||||
c.seq[i] = b
|
||||
}
|
||||
// Assumes invariant that batches complete and update is called in order.
|
||||
// This should be true because the code using the sequencer doesn't know the expected parent
|
||||
// for a batch until it imports the previous batch.
|
||||
if c.seq[i].state == batchImportComplete {
|
||||
done += 1
|
||||
continue
|
||||
}
|
||||
// Move the unfinished batches to overwrite the finished ones.
|
||||
// eg consider [a,b,c,d,e] where a,b are done
|
||||
// when i==2, done==2 (since done was incremented for a and b)
|
||||
// so we want to copy c to a, then on i=3, d to b, then on i=4 e to c.
|
||||
c.seq[i-done] = c.seq[i]
|
||||
}
|
||||
// Overwrite the moved batches with the next ones in the sequence.
|
||||
// Continuing the example in the comment above, len(c.seq)==5, done=2, so i=3.
|
||||
// We want to replace index 3 with the batch that should be processed after index 2,
|
||||
// which was previously the earliest known batch, and index 4 with the batch that should
|
||||
// be processed after index 3, the new earliest batch.
|
||||
for i := len(c.seq) - done; i < len(c.seq); i++ {
|
||||
c.seq[i] = c.batcher.beforeBatch(c.seq[i-1])
|
||||
}
|
||||
}
|
||||
|
||||
// importable returns all batches that are ready to be imported. This means they satisfy 2 conditions:
|
||||
// - They are in state batchImportable, which means their data has been downloaded and proposer signatures have been verified.
|
||||
// - There are no batches that are not in state batchImportable between them and the start of the slice. This ensures that they
|
||||
// can be connected to the canonical chain, either because the root of the last block in the batch matches the parent_root of
|
||||
// the oldest block in the canonical chain, or because the root of the last block in the batch matches the parent_root of the
|
||||
// new block preceding them in the slice (which must connect to the batch before it, or to the canonical chain if it is first).
|
||||
func (c *batchSequencer) importable() []batch {
|
||||
imp := make([]batch, 0)
|
||||
for i := range c.seq {
|
||||
if c.seq[i].state == batchImportable {
|
||||
imp = append(imp, c.seq[i])
|
||||
continue
|
||||
}
|
||||
// as soon as we hit a batch with a different state, we return everything leading to it.
|
||||
// If the first element isn't importable, we'll return an empty slice.
|
||||
break
|
||||
}
|
||||
return imp
|
||||
}
|
||||
|
||||
// moveMinimum enables the backfill service to change the slot where the batcher will start replying with
|
||||
// batch state batchEndSequence (signaling that no new batches will be produced). This is done in response to
|
||||
// epochs advancing, which shrinks the gap between <checkpoint slot> and <current slot>-MIN_EPOCHS_FOR_BLOCK_REQUESTS,
|
||||
// allowing the node to download a smaller number of blocks.
|
||||
func (c *batchSequencer) moveMinimum(min primitives.Slot) error {
|
||||
if min < c.batcher.min {
|
||||
return errCannotDecreaseMinimum
|
||||
}
|
||||
c.batcher.min = min
|
||||
return nil
|
||||
}
|
||||
|
||||
// countWithState provides a view into how many batches are in a particular state
|
||||
// to be used for logging or metrics purposes.
|
||||
func (c *batchSequencer) countWithState(s batchState) int {
|
||||
n := 0
|
||||
for i := 0; i < len(c.seq); i++ {
|
||||
if c.seq[i].state == s {
|
||||
n += 1
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// numTodo computes the number of remaining batches for metrics and logging purposes.
|
||||
func (c *batchSequencer) numTodo() int {
|
||||
if len(c.seq) == 0 {
|
||||
return 0
|
||||
}
|
||||
lowest := c.seq[len(c.seq)-1]
|
||||
todo := 0
|
||||
if lowest.state != batchEndSequence {
|
||||
todo = c.batcher.remaining(lowest.begin)
|
||||
}
|
||||
for _, b := range c.seq {
|
||||
switch b.state {
|
||||
case batchEndSequence, batchImportComplete, batchNil:
|
||||
continue
|
||||
default:
|
||||
todo += 1
|
||||
}
|
||||
}
|
||||
return todo
|
||||
}
|
||||
|
||||
func newBatchSequencer(seqLen int, min, max, size primitives.Slot) *batchSequencer {
|
||||
b := batcher{min: min, max: max, size: size}
|
||||
seq := make([]batch, seqLen)
|
||||
return &batchSequencer{batcher: b, seq: seq}
|
||||
}
|
||||
|
||||
type batcher struct {
|
||||
min primitives.Slot
|
||||
max primitives.Slot
|
||||
size primitives.Slot
|
||||
}
|
||||
|
||||
func (r batcher) remaining(upTo primitives.Slot) int {
|
||||
if r.min >= upTo {
|
||||
return 0
|
||||
}
|
||||
delta := upTo - r.min
|
||||
if delta%r.size != 0 {
|
||||
return int(delta/r.size) + 1
|
||||
}
|
||||
return int(delta / r.size)
|
||||
}
|
||||
|
||||
func (r batcher) beforeBatch(upTo batch) batch {
|
||||
return r.before(upTo.begin)
|
||||
}
|
||||
|
||||
func (r batcher) before(upTo primitives.Slot) batch {
|
||||
// upTo is an exclusive upper bound. Requesting a batch before the lower bound of backfill signals the end of the
|
||||
// backfill process.
|
||||
if upTo <= r.min {
|
||||
return batch{begin: upTo, end: upTo, state: batchEndSequence}
|
||||
}
|
||||
begin := r.min
|
||||
if upTo > r.size+r.min {
|
||||
begin = upTo - r.size
|
||||
}
|
||||
|
||||
// batch.end is exclusive, .begin is inclusive, so the prev.end = next.begin
|
||||
return batch{begin: begin, end: upTo, state: batchInit}
|
||||
}
|
||||
208
beacon-chain/sync/backfill/batcher_test.go
Normal file
208
beacon-chain/sync/backfill/batcher_test.go
Normal file
@@ -0,0 +1,208 @@
|
||||
package backfill
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
)
|
||||
|
||||
func TestBatcherBefore(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
b batcher
|
||||
upTo []primitives.Slot
|
||||
expect []batch
|
||||
}{
|
||||
{
|
||||
name: "size 10",
|
||||
b: batcher{min: 0, size: 10},
|
||||
upTo: []primitives.Slot{33, 30, 10, 6},
|
||||
expect: []batch{
|
||||
{begin: 23, end: 33, state: batchInit},
|
||||
{begin: 20, end: 30, state: batchInit},
|
||||
{begin: 0, end: 10, state: batchInit},
|
||||
{begin: 0, end: 6, state: batchInit},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "size 4",
|
||||
b: batcher{min: 0, size: 4},
|
||||
upTo: []primitives.Slot{33, 6, 4},
|
||||
expect: []batch{
|
||||
{begin: 29, end: 33, state: batchInit},
|
||||
{begin: 2, end: 6, state: batchInit},
|
||||
{begin: 0, end: 4, state: batchInit},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "trigger end",
|
||||
b: batcher{min: 20, size: 10},
|
||||
upTo: []primitives.Slot{33, 30, 25, 21, 20, 19},
|
||||
expect: []batch{
|
||||
{begin: 23, end: 33, state: batchInit},
|
||||
{begin: 20, end: 30, state: batchInit},
|
||||
{begin: 20, end: 25, state: batchInit},
|
||||
{begin: 20, end: 21, state: batchInit},
|
||||
{begin: 20, end: 20, state: batchEndSequence},
|
||||
{begin: 19, end: 19, state: batchEndSequence},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
for i := range c.upTo {
|
||||
upTo := c.upTo[i]
|
||||
expect := c.expect[i]
|
||||
t.Run(fmt.Sprintf("%s upTo %d", c.name, upTo), func(t *testing.T) {
|
||||
got := c.b.before(upTo)
|
||||
require.Equal(t, expect.begin, got.begin)
|
||||
require.Equal(t, expect.end, got.end)
|
||||
require.Equal(t, expect.state, got.state)
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBatchSequencer(t *testing.T) {
|
||||
var min, max, size primitives.Slot
|
||||
seqLen := 8
|
||||
min = 0
|
||||
max = 11235
|
||||
size = 64
|
||||
seq := newBatchSequencer(seqLen, min, max, size)
|
||||
expected := []batch{
|
||||
{begin: 11171, end: 11235},
|
||||
{begin: 11107, end: 11171},
|
||||
{begin: 11043, end: 11107},
|
||||
{begin: 10979, end: 11043},
|
||||
{begin: 10915, end: 10979},
|
||||
{begin: 10851, end: 10915},
|
||||
{begin: 10787, end: 10851},
|
||||
{begin: 10723, end: 10787},
|
||||
}
|
||||
got, err := seq.sequence()
|
||||
require.Equal(t, seqLen, len(got))
|
||||
for i := 0; i < seqLen; i++ {
|
||||
g := got[i]
|
||||
exp := expected[i]
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, exp.begin, g.begin)
|
||||
require.Equal(t, exp.end, g.end)
|
||||
require.Equal(t, batchSequenced, g.state)
|
||||
}
|
||||
// This should give us the error indicating there are too many outstanding batches.
|
||||
_, err = seq.sequence()
|
||||
require.ErrorIs(t, err, errMaxBatches)
|
||||
|
||||
// mark the last batch completed so we can call sequence again.
|
||||
last := seq.seq[len(seq.seq)-1]
|
||||
// With this state, the batch should get served back to us as the next batch.
|
||||
last.state = batchErrRetryable
|
||||
seq.update(last)
|
||||
nextS, err := seq.sequence()
|
||||
require.Equal(t, 1, len(nextS))
|
||||
next := nextS[0]
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, last.begin, next.begin)
|
||||
require.Equal(t, last.end, next.end)
|
||||
// sequence() should replace the batchErrRetryable state with batchSequenced.
|
||||
require.Equal(t, batchSequenced, next.state)
|
||||
|
||||
// No batches have been marked importable.
|
||||
require.Equal(t, 0, len(seq.importable()))
|
||||
|
||||
// Mark our batch importable and make sure it shows up in the list of importable batches.
|
||||
next.state = batchImportable
|
||||
seq.update(next)
|
||||
require.Equal(t, 0, len(seq.importable()))
|
||||
first := seq.seq[0]
|
||||
first.state = batchImportable
|
||||
seq.update(first)
|
||||
require.Equal(t, 1, len(seq.importable()))
|
||||
require.Equal(t, len(seq.seq), seqLen)
|
||||
// change the last element back to batchInit so that the importable test stays simple
|
||||
last = seq.seq[len(seq.seq)-1]
|
||||
last.state = batchInit
|
||||
seq.update(last)
|
||||
// ensure that the number of importable elements grows as the list is marked importable
|
||||
for i := 0; i < len(seq.seq); i++ {
|
||||
seq.seq[i].state = batchImportable
|
||||
require.Equal(t, i+1, len(seq.importable()))
|
||||
}
|
||||
// reset everything to init
|
||||
for i := 0; i < len(seq.seq); i++ {
|
||||
seq.seq[i].state = batchInit
|
||||
require.Equal(t, 0, len(seq.importable()))
|
||||
}
|
||||
// loop backwards and make sure importable is zero until the first element is importable
|
||||
for i := len(seq.seq) - 1; i > 0; i-- {
|
||||
seq.seq[i].state = batchImportable
|
||||
require.Equal(t, 0, len(seq.importable()))
|
||||
}
|
||||
seq.seq[0].state = batchImportable
|
||||
require.Equal(t, len(seq.seq), len(seq.importable()))
|
||||
|
||||
// reset everything to init again
|
||||
for i := 0; i < len(seq.seq); i++ {
|
||||
seq.seq[i].state = batchInit
|
||||
require.Equal(t, 0, len(seq.importable()))
|
||||
}
|
||||
// set first 3 elements to importable. we should see them in the result for importable()
|
||||
// and be able to use update to cycle them away.
|
||||
seq.seq[0].state, seq.seq[1].state, seq.seq[2].state = batchImportable, batchImportable, batchImportable
|
||||
require.Equal(t, 3, len(seq.importable()))
|
||||
a, b, c, z := seq.seq[0], seq.seq[1], seq.seq[2], seq.seq[3]
|
||||
require.NotEqual(t, z.begin, seq.seq[2].begin)
|
||||
require.NotEqual(t, z.begin, seq.seq[1].begin)
|
||||
require.NotEqual(t, z.begin, seq.seq[0].begin)
|
||||
a.state, b.state, c.state = batchImportComplete, batchImportComplete, batchImportComplete
|
||||
seq.update(a)
|
||||
|
||||
// follow z as it moves down the chain to the first spot
|
||||
require.Equal(t, z.begin, seq.seq[2].begin)
|
||||
require.NotEqual(t, z.begin, seq.seq[1].begin)
|
||||
require.NotEqual(t, z.begin, seq.seq[0].begin)
|
||||
seq.update(b)
|
||||
require.NotEqual(t, z.begin, seq.seq[2].begin)
|
||||
require.Equal(t, z.begin, seq.seq[1].begin)
|
||||
require.NotEqual(t, z.begin, seq.seq[0].begin)
|
||||
seq.update(c)
|
||||
require.NotEqual(t, z.begin, seq.seq[2].begin)
|
||||
require.NotEqual(t, z.begin, seq.seq[1].begin)
|
||||
require.Equal(t, z.begin, seq.seq[0].begin)
|
||||
|
||||
// Check integrity of begin/end alignment across the sequence.
|
||||
// Also update all the states to sequenced for the convenience of the next test.
|
||||
for i := 1; i < len(seq.seq); i++ {
|
||||
require.Equal(t, seq.seq[i].end, seq.seq[i-1].begin)
|
||||
// won't touch the first element, which is fine because it is marked complete below.
|
||||
seq.seq[i].state = batchSequenced
|
||||
}
|
||||
|
||||
// set the min for the batcher close to the lowest slot. This will force the next batch to be partial and the batch
|
||||
// after that to be the final batch.
|
||||
newMin := seq.seq[len(seq.seq)-1].begin - 30
|
||||
seq.batcher.min = newMin
|
||||
first = seq.seq[0]
|
||||
first.state = batchImportComplete
|
||||
// update() with a complete state will cause the sequence to be extended with an additional batch
|
||||
seq.update(first)
|
||||
lastS, err := seq.sequence()
|
||||
last = lastS[0]
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, newMin, last.begin)
|
||||
require.Equal(t, seq.seq[len(seq.seq)-2].begin, last.end)
|
||||
|
||||
// Mark first batch done again, this time check that sequence() gives errEndSequence.
|
||||
first = seq.seq[0]
|
||||
first.state = batchImportComplete
|
||||
// update() with a complete state will cause the sequence to be extended with an additional batch
|
||||
seq.update(first)
|
||||
endExp, err := seq.sequence()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(endExp))
|
||||
end := endExp[0]
|
||||
//require.ErrorIs(t, err, errEndSequence)
|
||||
require.Equal(t, batchEndSequence, end.state)
|
||||
}
|
||||
9
beacon-chain/sync/backfill/coverage/BUILD.bazel
Normal file
9
beacon-chain/sync/backfill/coverage/BUILD.bazel
Normal file
@@ -0,0 +1,9 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["coverage.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/backfill/coverage",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = ["//consensus-types/primitives:go_default_library"],
|
||||
)
|
||||
9
beacon-chain/sync/backfill/coverage/coverage.go
Normal file
9
beacon-chain/sync/backfill/coverage/coverage.go
Normal file
@@ -0,0 +1,9 @@
|
||||
package coverage
|
||||
|
||||
import "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
|
||||
// AvailableBlocker can be used to check whether there is a finalized block in the db for the given slot.
|
||||
// This interface is typically fulfilled by backfill.Store.
|
||||
type AvailableBlocker interface {
|
||||
AvailableBlock(primitives.Slot) bool
|
||||
}
|
||||
67
beacon-chain/sync/backfill/metrics.go
Normal file
67
beacon-chain/sync/backfill/metrics.go
Normal file
@@ -0,0 +1,67 @@
|
||||
package backfill
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
var (
|
||||
oldestBatch = promauto.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "backfill_earliest_wip_slot",
|
||||
Help: "Earliest slot that has been assigned to a worker.",
|
||||
},
|
||||
)
|
||||
batchesWaiting = promauto.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "backfill_importable_batches_waiting",
|
||||
Help: "Number of batches that are ready to be imported once they can be connected to the existing chain.",
|
||||
},
|
||||
)
|
||||
backfillRemainingBatches = promauto.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "backfill_remaining_batches",
|
||||
Help: "Backfill remaining batches.",
|
||||
},
|
||||
)
|
||||
backfillBatchesImported = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "backfill_batches_imported",
|
||||
Help: "Number of backfill batches downloaded and imported.",
|
||||
},
|
||||
)
|
||||
backfillBatchApproximateBytes = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "backfill_batch_bytes_downloaded",
|
||||
Help: "Count of bytes downloaded from peers",
|
||||
},
|
||||
)
|
||||
backfillBatchTimeRoundtrip = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "backfill_batch_time_roundtrip",
|
||||
Help: "Total time to import batch, from first scheduled to imported.",
|
||||
Buckets: []float64{400, 800, 1600, 3200, 6400, 12800},
|
||||
},
|
||||
)
|
||||
backfillBatchTimeWaiting = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "backfill_batch_time_waiting",
|
||||
Help: "Time batch waited for a suitable peer.",
|
||||
Buckets: []float64{50, 100, 300, 1000, 2000},
|
||||
},
|
||||
)
|
||||
backfillBatchTimeDownloading = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "backfill_batch_time_download",
|
||||
Help: "Time batch spent downloading blocks from peer.",
|
||||
Buckets: []float64{100, 300, 1000, 2000, 4000, 8000},
|
||||
},
|
||||
)
|
||||
backfillBatchTimeVerifying = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "backfill_batch_time_verify",
|
||||
Help: "Time batch spent downloading blocks from peer.",
|
||||
Buckets: []float64{100, 300, 1000, 2000, 4000, 8000},
|
||||
},
|
||||
)
|
||||
)
|
||||
152
beacon-chain/sync/backfill/pool.go
Normal file
152
beacon-chain/sync/backfill/pool.go
Normal file
@@ -0,0 +1,152 @@
|
||||
package backfill
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type batchWorkerPool interface {
|
||||
spawn(ctx context.Context, n int, clock *startup.Clock, a PeerAssigner, v *verifier)
|
||||
todo(b batch)
|
||||
complete() (batch, error)
|
||||
}
|
||||
|
||||
type worker interface {
|
||||
run(context.Context)
|
||||
}
|
||||
|
||||
type newWorker func(id workerId, in, out chan batch, c *startup.Clock, v *verifier) worker
|
||||
|
||||
func defaultNewWorker(p p2p.P2P) newWorker {
|
||||
return func(id workerId, in, out chan batch, c *startup.Clock, v *verifier) worker {
|
||||
return newP2pWorker(id, p, in, out, c, v)
|
||||
}
|
||||
}
|
||||
|
||||
type p2pBatchWorkerPool struct {
|
||||
maxBatches int
|
||||
newWorker newWorker
|
||||
toWorkers chan batch
|
||||
fromWorkers chan batch
|
||||
toRouter chan batch
|
||||
fromRouter chan batch
|
||||
shutdownErr chan error
|
||||
endSeq []batch
|
||||
ctx context.Context
|
||||
cancel func()
|
||||
}
|
||||
|
||||
var _ batchWorkerPool = &p2pBatchWorkerPool{}
|
||||
|
||||
func newP2PBatchWorkerPool(p p2p.P2P, maxBatches int) *p2pBatchWorkerPool {
|
||||
nw := defaultNewWorker(p)
|
||||
return &p2pBatchWorkerPool{
|
||||
newWorker: nw,
|
||||
toRouter: make(chan batch, maxBatches),
|
||||
fromRouter: make(chan batch, maxBatches),
|
||||
toWorkers: make(chan batch),
|
||||
fromWorkers: make(chan batch),
|
||||
maxBatches: maxBatches,
|
||||
shutdownErr: make(chan error),
|
||||
}
|
||||
}
|
||||
|
||||
func (p *p2pBatchWorkerPool) spawn(ctx context.Context, n int, c *startup.Clock, a PeerAssigner, v *verifier) {
|
||||
p.ctx, p.cancel = context.WithCancel(ctx)
|
||||
go p.batchRouter(a)
|
||||
for i := 0; i < n; i++ {
|
||||
go p.newWorker(workerId(i), p.toWorkers, p.fromWorkers, c, v).run(p.ctx)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *p2pBatchWorkerPool) todo(b batch) {
|
||||
// Intercept batchEndSequence batches so workers can remain unaware of this state.
|
||||
// Workers don't know what to do with batchEndSequence batches. They are a signal to the pool that the batcher
|
||||
// has stopped producing things for the workers to do and the pool is close to winding down. See complete()
|
||||
// to understand how the pool manages the state where all workers are idle
|
||||
// and all incoming batches signal end of sequence.
|
||||
if b.state == batchEndSequence {
|
||||
p.endSeq = append(p.endSeq, b)
|
||||
return
|
||||
}
|
||||
p.toRouter <- b
|
||||
}
|
||||
|
||||
func (p *p2pBatchWorkerPool) complete() (batch, error) {
|
||||
if len(p.endSeq) == p.maxBatches {
|
||||
return p.endSeq[0], errEndSequence
|
||||
}
|
||||
|
||||
select {
|
||||
case b := <-p.fromRouter:
|
||||
return b, nil
|
||||
case err := <-p.shutdownErr:
|
||||
return batch{}, errors.Wrap(err, "fatal error from backfill worker pool")
|
||||
case <-p.ctx.Done():
|
||||
log.WithError(p.ctx.Err()).Info("p2pBatchWorkerPool context canceled, shutting down")
|
||||
return batch{}, p.ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
func (p *p2pBatchWorkerPool) batchRouter(pa PeerAssigner) {
|
||||
busy := make(map[peer.ID]bool)
|
||||
todo := make([]batch, 0)
|
||||
rt := time.NewTicker(time.Second)
|
||||
earliest := primitives.Slot(math.MaxUint64)
|
||||
for {
|
||||
select {
|
||||
case b := <-p.toRouter:
|
||||
todo = append(todo, b)
|
||||
case <-rt.C:
|
||||
// Worker assignments can fail if assignBatch can't find a suitable peer.
|
||||
// This ticker exists to periodically break out of the channel select
|
||||
// to retry failed assignments.
|
||||
case b := <-p.fromWorkers:
|
||||
pid := b.pid
|
||||
busy[pid] = false
|
||||
p.fromRouter <- b
|
||||
case <-p.ctx.Done():
|
||||
log.WithError(p.ctx.Err()).Info("p2pBatchWorkerPool context canceled, shutting down")
|
||||
p.shutdown(p.ctx.Err())
|
||||
return
|
||||
}
|
||||
if len(todo) == 0 {
|
||||
continue
|
||||
}
|
||||
// Try to assign as many outstanding batches as possible to peers and feed the assigned batches to workers.
|
||||
assigned, err := pa.Assign(busy, len(todo))
|
||||
if err != nil {
|
||||
if errors.Is(err, peers.ErrInsufficientSuitable) {
|
||||
// Transient error resulting from insufficient number of connected peers. Leave batches in
|
||||
// queue and get to them whenever the peer situation is resolved.
|
||||
continue
|
||||
}
|
||||
p.shutdown(err)
|
||||
return
|
||||
}
|
||||
for _, pid := range assigned {
|
||||
busy[pid] = true
|
||||
todo[0].pid = pid
|
||||
p.toWorkers <- todo[0].withPeer(pid)
|
||||
if todo[0].begin < earliest {
|
||||
earliest = todo[0].begin
|
||||
oldestBatch.Set(float64(earliest))
|
||||
}
|
||||
todo = todo[1:]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *p2pBatchWorkerPool) shutdown(err error) {
|
||||
p.cancel()
|
||||
p.shutdownErr <- err
|
||||
}
|
||||
78
beacon-chain/sync/backfill/pool_test.go
Normal file
78
beacon-chain/sync/backfill/pool_test.go
Normal file
@@ -0,0 +1,78 @@
|
||||
package backfill
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
p2ptest "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||
)
|
||||
|
||||
type mockAssigner struct {
|
||||
err error
|
||||
assign []peer.ID
|
||||
}
|
||||
|
||||
// Assign satisfies the PeerAssigner interface so that mockAssigner can be used in tests
|
||||
// in place of the concrete p2p implementation of PeerAssigner.
|
||||
func (m mockAssigner) Assign(busy map[peer.ID]bool, n int) ([]peer.ID, error) {
|
||||
if m.err != nil {
|
||||
return nil, m.err
|
||||
}
|
||||
return m.assign, nil
|
||||
}
|
||||
|
||||
var _ PeerAssigner = &mockAssigner{}
|
||||
|
||||
func TestPoolDetectAllEnded(t *testing.T) {
|
||||
nw := 5
|
||||
p2p := p2ptest.NewTestP2P(t)
|
||||
ctx := context.Background()
|
||||
ma := &mockAssigner{}
|
||||
pool := newP2PBatchWorkerPool(p2p, nw)
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
keys, err := st.PublicKeys()
|
||||
require.NoError(t, err)
|
||||
v, err := newBackfillVerifier(st.GenesisValidatorsRoot(), keys)
|
||||
require.NoError(t, err)
|
||||
pool.spawn(ctx, nw, startup.NewClock(time.Now(), [32]byte{}), ma, v)
|
||||
br := batcher{min: 10, size: 10}
|
||||
endSeq := br.before(0)
|
||||
require.Equal(t, batchEndSequence, endSeq.state)
|
||||
for i := 0; i < nw; i++ {
|
||||
pool.todo(endSeq)
|
||||
}
|
||||
b, err := pool.complete()
|
||||
require.ErrorIs(t, err, errEndSequence)
|
||||
require.Equal(t, b.end, endSeq.end)
|
||||
}
|
||||
|
||||
type mockPool struct {
|
||||
spawnCalled []int
|
||||
finishedChan chan batch
|
||||
finishedErr chan error
|
||||
todoChan chan batch
|
||||
}
|
||||
|
||||
func (m *mockPool) spawn(_ context.Context, _ int, _ *startup.Clock, _ PeerAssigner, _ *verifier) {
|
||||
}
|
||||
|
||||
func (m *mockPool) todo(b batch) {
|
||||
m.todoChan <- b
|
||||
}
|
||||
|
||||
func (m *mockPool) complete() (batch, error) {
|
||||
select {
|
||||
case b := <-m.finishedChan:
|
||||
return b, nil
|
||||
case err := <-m.finishedErr:
|
||||
return batch{}, err
|
||||
}
|
||||
}
|
||||
|
||||
var _ batchWorkerPool = &mockPool{}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user