mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 22:07:59 -05:00
Compare commits
39 Commits
blockPropo
...
backfill-w
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1326f99a95 | ||
|
|
3afb80a4f3 | ||
|
|
78ea8c7375 | ||
|
|
278d233cce | ||
|
|
a79cb919e0 | ||
|
|
2773071074 | ||
|
|
e9e66dfef5 | ||
|
|
b4d19d9394 | ||
|
|
7d3a239ebc | ||
|
|
c4bce0393d | ||
|
|
3b59ed2122 | ||
|
|
ec7ec271e6 | ||
|
|
45f8249021 | ||
|
|
c9993d38e2 | ||
|
|
e105c09abc | ||
|
|
0213bdd80c | ||
|
|
28f7da8cbd | ||
|
|
d4541ced79 | ||
|
|
601ad67818 | ||
|
|
e72b8f1844 | ||
|
|
58df1f1ba5 | ||
|
|
cec32cb996 | ||
|
|
d56a530c86 | ||
|
|
0a68d2d302 | ||
|
|
25ebd335cb | ||
|
|
6a0db800b3 | ||
|
|
085f90a4f1 | ||
|
|
ecb26e9885 | ||
|
|
7eb0091936 | ||
|
|
f8408b9ec1 | ||
|
|
d6d5139d68 | ||
|
|
2e0e29ecbe | ||
|
|
e9b5e52ee2 | ||
|
|
2a4441762e | ||
|
|
401fccc723 | ||
|
|
c80f88fc07 | ||
|
|
faa0a2c4cf | ||
|
|
c45cb7e188 | ||
|
|
0b10263dd5 |
@@ -499,6 +499,13 @@ func (s *Service) Ancestor(ctx context.Context, root []byte, slot primitives.Slo
|
||||
return ar[:], nil
|
||||
}
|
||||
|
||||
// SetOptimisticToInvalid wraps the corresponding method in forkchoice
|
||||
func (s *Service) SetOptimisticToInvalid(ctx context.Context, root, parent, lvh [32]byte) ([][32]byte, error) {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
return s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, root, parent, lvh)
|
||||
}
|
||||
|
||||
// SetGenesisTime sets the genesis time of beacon chain.
|
||||
func (s *Service) SetGenesisTime(t time.Time) {
|
||||
s.genesisTime = t
|
||||
|
||||
@@ -182,21 +182,24 @@ func (s *Service) getPayloadHash(ctx context.Context, root []byte) ([32]byte, er
|
||||
|
||||
// notifyNewPayload signals execution engine on a new payload.
|
||||
// It returns true if the EL has returned VALID for the block
|
||||
func (s *Service) notifyNewPayload(ctx context.Context, postStateVersion int,
|
||||
postStateHeader interfaces.ExecutionData, blk interfaces.ReadOnlySignedBeaconBlock) (bool, error) {
|
||||
func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion int,
|
||||
preStateHeader interfaces.ExecutionData, blk interfaces.ReadOnlySignedBeaconBlock) (bool, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.notifyNewPayload")
|
||||
defer span.End()
|
||||
|
||||
// Execution payload is only supported in Bellatrix and beyond. Pre
|
||||
// merge blocks are never optimistic
|
||||
if blocks.IsPreBellatrixVersion(postStateVersion) {
|
||||
if blk == nil {
|
||||
return false, errors.New("signed beacon block can't be nil")
|
||||
}
|
||||
if preStateVersion < version.Bellatrix {
|
||||
return true, nil
|
||||
}
|
||||
if err := consensusblocks.BeaconBlockIsNil(blk); err != nil {
|
||||
return false, err
|
||||
}
|
||||
body := blk.Block().Body()
|
||||
enabled, err := blocks.IsExecutionEnabledUsingHeader(postStateHeader, body)
|
||||
enabled, err := blocks.IsExecutionEnabledUsingHeader(preStateHeader, body)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(invalidBlock{error: err}, "could not determine if execution is enabled")
|
||||
}
|
||||
@@ -231,9 +234,9 @@ func (s *Service) notifyNewPayload(ctx context.Context, postStateVersion int,
|
||||
}
|
||||
|
||||
// reportInvalidBlock deals with the event that an invalid block was detected by the execution layer
|
||||
func (s *Service) reportInvalidBlock(ctx context.Context, root, parentRoot, lvh [32]byte) error {
|
||||
func (s *Service) pruneInvalidBlock(ctx context.Context, root, parentRoot, lvh [32]byte) error {
|
||||
newPayloadInvalidNodeCount.Inc()
|
||||
invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, root, parentRoot, lvh)
|
||||
invalidRoots, err := s.SetOptimisticToInvalid(ctx, root, parentRoot, lvh)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -525,11 +525,13 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
{
|
||||
name: "phase 0 post state",
|
||||
postState: phase0State,
|
||||
blk: altairBlk, // same as phase 0 for this test
|
||||
isValidPayload: true,
|
||||
},
|
||||
{
|
||||
name: "altair post state",
|
||||
postState: altairState,
|
||||
blk: altairBlk,
|
||||
isValidPayload: true,
|
||||
},
|
||||
{
|
||||
@@ -764,7 +766,7 @@ func Test_reportInvalidBlock(t *testing.T) {
|
||||
require.NoError(t, fcs.InsertNode(ctx, st, root))
|
||||
|
||||
require.NoError(t, fcs.SetOptimisticToValid(ctx, [32]byte{'A'}))
|
||||
err = service.reportInvalidBlock(ctx, [32]byte{'D'}, [32]byte{'C'}, [32]byte{'a'})
|
||||
err = service.pruneInvalidBlock(ctx, [32]byte{'D'}, [32]byte{'C'}, [32]byte{'a'})
|
||||
require.Equal(t, IsInvalidBlock(err), true)
|
||||
require.Equal(t, InvalidBlockLVH(err), [32]byte{'a'})
|
||||
invalidRoots := InvalidAncestorRoots(err)
|
||||
|
||||
@@ -172,11 +172,15 @@ var (
|
||||
})
|
||||
onBlockProcessingTime = promauto.NewSummary(prometheus.SummaryOpts{
|
||||
Name: "on_block_processing_milliseconds",
|
||||
Help: "Total time in milliseconds to complete a call to onBlock()",
|
||||
Help: "Total time in milliseconds to complete a call to postBlockProcess()",
|
||||
})
|
||||
stateTransitionProcessingTime = promauto.NewSummary(prometheus.SummaryOpts{
|
||||
Name: "state_transition_processing_milliseconds",
|
||||
Help: "Total time to call a state transition in onBlock()",
|
||||
Help: "Total time to call a state transition in validateStateTransition()",
|
||||
})
|
||||
chainServiceProcessingTime = promauto.NewSummary(prometheus.SummaryOpts{
|
||||
Name: "chain_service_processing_milliseconds",
|
||||
Help: "Total time to call a chain service in ReceiveBlock()",
|
||||
})
|
||||
processAttsElapsedTime = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
|
||||
@@ -22,7 +22,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/monitoring/tracing"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
@@ -40,59 +39,11 @@ const depositDeadline = 20 * time.Second
|
||||
// This defines size of the upper bound for initial sync block cache.
|
||||
var initialSyncBlockCacheSize = uint64(2 * params.BeaconConfig().SlotsPerEpoch)
|
||||
|
||||
// onBlock is called when a gossip block is received. It runs regular state transition on the block.
|
||||
// The block's signing root should be computed before calling this method to avoid redundant
|
||||
// computation in this method and methods it calls into.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
//
|
||||
// def on_block(store: Store, signed_block: ReadOnlySignedBeaconBlock) -> None:
|
||||
// block = signed_block.message
|
||||
// # Parent block must be known
|
||||
// assert block.parent_root in store.block_states
|
||||
// # Make a copy of the state to avoid mutability issues
|
||||
// pre_state = copy(store.block_states[block.parent_root])
|
||||
// # Blocks cannot be in the future. If they are, their consideration must be delayed until the are in the past.
|
||||
// assert get_current_slot(store) >= block.slot
|
||||
//
|
||||
// # Check that block is later than the finalized epoch slot (optimization to reduce calls to get_ancestor)
|
||||
// finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
|
||||
// assert block.slot > finalized_slot
|
||||
// # Check block is a descendant of the finalized block at the checkpoint finalized slot
|
||||
// assert get_ancestor(store, block.parent_root, finalized_slot) == store.finalized_checkpoint.root
|
||||
//
|
||||
// # Check the block is valid and compute the post-state
|
||||
// state = pre_state.copy()
|
||||
// state_transition(state, signed_block, True)
|
||||
// # Add new block to the store
|
||||
// store.blocks[hash_tree_root(block)] = block
|
||||
// # Add new state for this block to the store
|
||||
// store.block_states[hash_tree_root(block)] = state
|
||||
//
|
||||
// # Update justified checkpoint
|
||||
// if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch:
|
||||
// if state.current_justified_checkpoint.epoch > store.best_justified_checkpoint.epoch:
|
||||
// store.best_justified_checkpoint = state.current_justified_checkpoint
|
||||
// if should_update_justified_checkpoint(store, state.current_justified_checkpoint):
|
||||
// store.justified_checkpoint = state.current_justified_checkpoint
|
||||
//
|
||||
// # Update finalized checkpoint
|
||||
// if state.finalized_checkpoint.epoch > store.finalized_checkpoint.epoch:
|
||||
// store.finalized_checkpoint = state.finalized_checkpoint
|
||||
//
|
||||
// # Potentially update justified if different from store
|
||||
// if store.justified_checkpoint != state.current_justified_checkpoint:
|
||||
// # Update justified if new justified is later than store justified
|
||||
// if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch:
|
||||
// store.justified_checkpoint = state.current_justified_checkpoint
|
||||
// return
|
||||
//
|
||||
// # Update justified if store justified is not in chain with finalized checkpoint
|
||||
// finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
|
||||
// ancestor_at_finalized_slot = get_ancestor(store, store.justified_checkpoint.root, finalized_slot)
|
||||
// if ancestor_at_finalized_slot != store.finalized_checkpoint.root:
|
||||
// store.justified_checkpoint = state.current_justified_checkpoint
|
||||
func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) error {
|
||||
// postBlockProcess is called when a gossip block is received. This function performs
|
||||
// several duties most importantly informing the engine if head was updated,
|
||||
// saving the new head information to the blockchain package and database and
|
||||
// handling attestations, slashings and similar included in the block.
|
||||
func (s *Service) postBlockProcess(ctx context.Context, signed interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, postState state.BeaconState, isValidPayload bool) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.onBlock")
|
||||
defer span.End()
|
||||
if err := consensusblocks.BeaconBlockIsNil(signed); err != nil {
|
||||
@@ -101,54 +52,6 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedB
|
||||
startTime := time.Now()
|
||||
b := signed.Block()
|
||||
|
||||
preState, err := s.getBlockPreState(ctx, b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Verify that the parent block is in forkchoice
|
||||
parentRoot := b.ParentRoot()
|
||||
if !s.cfg.ForkChoiceStore.HasNode(parentRoot) {
|
||||
return ErrNotDescendantOfFinalized
|
||||
}
|
||||
|
||||
// Save current justified and finalized epochs for future use.
|
||||
currStoreJustifiedEpoch := s.cfg.ForkChoiceStore.JustifiedCheckpoint().Epoch
|
||||
currStoreFinalizedEpoch := s.cfg.ForkChoiceStore.FinalizedCheckpoint().Epoch
|
||||
preStateFinalizedEpoch := preState.FinalizedCheckpoint().Epoch
|
||||
preStateJustifiedEpoch := preState.CurrentJustifiedCheckpoint().Epoch
|
||||
|
||||
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stateTransitionStartTime := time.Now()
|
||||
postState, err := transition.ExecuteStateTransition(ctx, preState, signed)
|
||||
if err != nil {
|
||||
return invalidBlock{error: err}
|
||||
}
|
||||
stateTransitionProcessingTime.Observe(float64(time.Since(stateTransitionStartTime).Milliseconds()))
|
||||
|
||||
postStateVersion, postStateHeader, err := getStateVersionAndPayload(postState)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
isValidPayload, err := s.notifyNewPayload(ctx, postStateVersion, postStateHeader, signed)
|
||||
if err != nil {
|
||||
if IsInvalidBlock(err) && InvalidBlockLVH(err) != [32]byte{} {
|
||||
return s.reportInvalidBlock(ctx, blockRoot, parentRoot, InvalidBlockLVH(err))
|
||||
}
|
||||
return errors.Wrap(err, "could not validate new payload")
|
||||
}
|
||||
if signed.Version() < version.Capella && isValidPayload {
|
||||
if err := s.validateMergeTransitionBlock(ctx, preStateVersion, preStateHeader, signed); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, postState, blockRoot); err != nil {
|
||||
return errors.Wrapf(err, "could not insert block %d to fork choice store", signed.Block().Slot())
|
||||
}
|
||||
@@ -163,33 +66,6 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedB
|
||||
}
|
||||
}
|
||||
|
||||
// If slasher is configured, forward the attestations in the block via
|
||||
// an event feed for processing.
|
||||
if features.Get().EnableSlasher {
|
||||
// Feed the indexed attestation to slasher if enabled. This action
|
||||
// is done in the background to avoid adding more load to this critical code path.
|
||||
go func() {
|
||||
// Using a different context to prevent timeouts as this operation can be expensive
|
||||
// and we want to avoid affecting the critical code path.
|
||||
ctx := context.TODO()
|
||||
for _, att := range signed.Block().Body().Attestations() {
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, preState, att.Data.Slot, att.Data.CommitteeIndex)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get attestation committee")
|
||||
tracing.AnnotateError(span, err)
|
||||
return
|
||||
}
|
||||
indexedAtt, err := attestation.ConvertToIndexed(ctx, att, committee)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not convert to indexed attestation")
|
||||
tracing.AnnotateError(span, err)
|
||||
return
|
||||
}
|
||||
s.cfg.SlasherAttestationsFeed.Send(indexedAtt)
|
||||
}
|
||||
}()
|
||||
}
|
||||
justified := s.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
start := time.Now()
|
||||
headRoot, err := s.cfg.ForkChoiceStore.Head(ctx)
|
||||
if err != nil {
|
||||
@@ -242,46 +118,6 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedB
|
||||
},
|
||||
})
|
||||
|
||||
// Save justified check point to db.
|
||||
postStateJustifiedEpoch := postState.CurrentJustifiedCheckpoint().Epoch
|
||||
if justified.Epoch > currStoreJustifiedEpoch || (justified.Epoch == postStateJustifiedEpoch && justified.Epoch > preStateJustifiedEpoch) {
|
||||
if err := s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, ðpb.Checkpoint{
|
||||
Epoch: justified.Epoch, Root: justified.Root[:],
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Save finalized check point to db and more.
|
||||
postStateFinalizedEpoch := postState.FinalizedCheckpoint().Epoch
|
||||
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
if finalized.Epoch > currStoreFinalizedEpoch || (finalized.Epoch == postStateFinalizedEpoch && finalized.Epoch > preStateFinalizedEpoch) {
|
||||
if err := s.updateFinalized(ctx, ðpb.Checkpoint{Epoch: finalized.Epoch, Root: finalized.Root[:]}); err != nil {
|
||||
return err
|
||||
}
|
||||
go func() {
|
||||
// Send an event regarding the new finalized checkpoint over a common event feed.
|
||||
stateRoot := signed.Block().StateRoot()
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.FinalizedCheckpoint,
|
||||
Data: ðpbv1.EventFinalizedCheckpoint{
|
||||
Epoch: postState.FinalizedCheckpoint().Epoch,
|
||||
Block: postState.FinalizedCheckpoint().Root,
|
||||
State: stateRoot[:],
|
||||
ExecutionOptimistic: isValidPayload,
|
||||
},
|
||||
})
|
||||
|
||||
// Use a custom deadline here, since this method runs asynchronously.
|
||||
// We ignore the parent method's context and instead create a new one
|
||||
// with a custom deadline, therefore using the background context instead.
|
||||
depCtx, cancel := context.WithTimeout(context.Background(), depositDeadline)
|
||||
defer cancel()
|
||||
if err := s.insertFinalizedDeposits(depCtx, finalized.Root); err != nil {
|
||||
log.WithError(err).Error("Could not insert finalized deposits.")
|
||||
}
|
||||
}()
|
||||
}
|
||||
defer reportAttestationInclusion(b)
|
||||
if err := s.handleEpochBoundary(ctx, postState, blockRoot[:]); err != nil {
|
||||
return err
|
||||
@@ -407,7 +243,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.ReadOnlySi
|
||||
postVersionAndHeaders[i].version,
|
||||
postVersionAndHeaders[i].header, b)
|
||||
if err != nil {
|
||||
return err
|
||||
return s.handleInvalidExecutionError(ctx, err, blockRoots[i], b.Block().ParentRoot())
|
||||
}
|
||||
if isValidPayload {
|
||||
if err := s.validateMergeTransitionBlock(ctx, preVersionAndHeaders[i].version,
|
||||
@@ -720,3 +556,10 @@ func (s *Service) waitForSync() error {
|
||||
return errors.New("context closed, exiting goroutine")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) handleInvalidExecutionError(ctx context.Context, err error, blockRoot [32]byte, parentRoot [32]byte) error {
|
||||
if IsInvalidBlock(err) && InvalidBlockLVH(err) != [32]byte{} {
|
||||
return s.pruneInvalidBlock(ctx, blockRoot, parentRoot, InvalidBlockLVH(err))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
mathutil "github.com/prysmaticlabs/prysm/v4/math"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/time"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
@@ -209,35 +210,44 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfa
|
||||
return s.cfg.ForkChoiceStore.InsertChain(ctx, pendingNodes)
|
||||
}
|
||||
|
||||
// inserts finalized deposits into our finalized deposit trie.
|
||||
func (s *Service) insertFinalizedDeposits(ctx context.Context, fRoot [32]byte) error {
|
||||
// inserts finalized deposits into our finalized deposit trie, needs to be
|
||||
// called in the background
|
||||
func (s *Service) insertFinalizedDeposits(ctx context.Context, fRoot [32]byte) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.insertFinalizedDeposits")
|
||||
defer span.End()
|
||||
startTime := time.Now()
|
||||
|
||||
// Update deposit cache.
|
||||
finalizedState, err := s.cfg.StateGen.StateByRoot(ctx, fRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not fetch finalized state")
|
||||
log.WithError(err).Error("could not fetch finalized state")
|
||||
return
|
||||
}
|
||||
// We update the cache up to the last deposit index in the finalized block's state.
|
||||
// We can be confident that these deposits will be included in some block
|
||||
// because the Eth1 follow distance makes such long-range reorgs extremely unlikely.
|
||||
eth1DepositIndex, err := mathutil.Int(finalizedState.Eth1DepositIndex())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not cast eth1 deposit index")
|
||||
log.WithError(err).Error("could not cast eth1 deposit index")
|
||||
return
|
||||
}
|
||||
// The deposit index in the state is always the index of the next deposit
|
||||
// to be included(rather than the last one to be processed). This was most likely
|
||||
// done as the state cannot represent signed integers.
|
||||
eth1DepositIndex -= 1
|
||||
if err = s.cfg.DepositCache.InsertFinalizedDeposits(ctx, int64(eth1DepositIndex)); err != nil {
|
||||
return err
|
||||
finalizedEth1DepIdx := eth1DepositIndex - 1
|
||||
if err = s.cfg.DepositCache.InsertFinalizedDeposits(ctx, int64(finalizedEth1DepIdx)); err != nil {
|
||||
log.WithError(err).Error("could not insert finalized deposits")
|
||||
return
|
||||
}
|
||||
// Deposit proofs are only used during state transition and can be safely removed to save space.
|
||||
if err = s.cfg.DepositCache.PruneProofs(ctx, int64(eth1DepositIndex)); err != nil {
|
||||
return errors.Wrap(err, "could not prune deposit proofs")
|
||||
if err = s.cfg.DepositCache.PruneProofs(ctx, int64(finalizedEth1DepIdx)); err != nil {
|
||||
log.WithError(err).Error("could not prune deposit proofs")
|
||||
}
|
||||
return nil
|
||||
// Prune deposits which have already been finalized, the below method prunes all pending deposits (non-inclusive) up
|
||||
// to the provided eth1 deposit index.
|
||||
s.cfg.DepositCache.PrunePendingDeposits(ctx, int64(eth1DepositIndex)) // lint:ignore uintcast -- Deposit index should not exceed int64 in your lifetime.
|
||||
|
||||
log.WithField("duration", time.Since(startTime).String()).Debug("Finalized deposit insertion completed")
|
||||
}
|
||||
|
||||
// This ensures that the input root defaults to using genesis root instead of zero hashes. This is needed for handling
|
||||
|
||||
@@ -41,103 +41,6 @@ import (
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func TestStore_OnBlock(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
|
||||
|
||||
var genesisStateRoot [32]byte
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
util.SaveBlock(t, ctx, beaconDB, genesis)
|
||||
validGenesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
|
||||
ojc := ðpb.Checkpoint{}
|
||||
stfcs, root, err := prepareForkchoiceState(ctx, 0, validGenesisRoot, [32]byte{}, [32]byte{}, ojc, ojc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, stfcs, root))
|
||||
roots, err := blockTree1(t, beaconDB, validGenesisRoot[:])
|
||||
require.NoError(t, err)
|
||||
random := util.NewBeaconBlock()
|
||||
random.Block.Slot = 1
|
||||
random.Block.ParentRoot = validGenesisRoot[:]
|
||||
util.SaveBlock(t, ctx, beaconDB, random)
|
||||
randomParentRoot, err := random.Block.HashTreeRoot()
|
||||
assert.NoError(t, err)
|
||||
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Slot: st.Slot(), Root: randomParentRoot[:]}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), randomParentRoot))
|
||||
randomParentRoot2 := roots[1]
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Slot: st.Slot(), Root: randomParentRoot2}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), bytesutil.ToBytes32(randomParentRoot2)))
|
||||
stfcs, root, err = prepareForkchoiceState(ctx, 2, bytesutil.ToBytes32(randomParentRoot2),
|
||||
validGenesisRoot, [32]byte{'r'}, ojc, ojc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, stfcs, root))
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
blk *ethpb.SignedBeaconBlock
|
||||
s state.BeaconState
|
||||
time uint64
|
||||
wantErrString string
|
||||
}{
|
||||
{
|
||||
name: "parent block root does not have a state",
|
||||
blk: util.NewBeaconBlock(),
|
||||
s: st.Copy(),
|
||||
wantErrString: "could not reconstruct parent state",
|
||||
},
|
||||
{
|
||||
name: "block is from the future",
|
||||
blk: func() *ethpb.SignedBeaconBlock {
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.ParentRoot = randomParentRoot2
|
||||
b.Block.Slot = params.BeaconConfig().FarFutureSlot
|
||||
return b
|
||||
}(),
|
||||
s: st.Copy(),
|
||||
wantErrString: "is in the far distant future",
|
||||
},
|
||||
{
|
||||
name: "could not get finalized block",
|
||||
blk: func() *ethpb.SignedBeaconBlock {
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.ParentRoot = randomParentRoot[:]
|
||||
b.Block.Slot = 2
|
||||
return b
|
||||
}(),
|
||||
s: st.Copy(),
|
||||
wantErrString: "not descendant of finalized checkpoint",
|
||||
},
|
||||
{
|
||||
name: "same slot as finalized block",
|
||||
blk: func() *ethpb.SignedBeaconBlock {
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 0
|
||||
b.Block.ParentRoot = randomParentRoot2
|
||||
return b
|
||||
}(),
|
||||
s: st.Copy(),
|
||||
wantErrString: "block is equal or earlier than finalized block, slot 0 < slot 0",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
fRoot := bytesutil.ToBytes32(roots[0])
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Root: fRoot}))
|
||||
root, err := tt.blk.Block.HashTreeRoot()
|
||||
assert.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(tt.blk)
|
||||
require.NoError(t, err)
|
||||
err = service.onBlock(ctx, wsb, root)
|
||||
assert.ErrorContains(t, tt.wantErrString, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_OnBlockBatch(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
@@ -657,7 +560,20 @@ func TestOnBlock_CanFinalize_WithOnTick(t *testing.T) {
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.NewSlot(ctx, i))
|
||||
require.NoError(t, service.onBlock(ctx, wsb, r))
|
||||
// Save current justified and finalized epochs for future use.
|
||||
currStoreJustifiedEpoch := service.CurrentJustifiedCheckpt().Epoch
|
||||
currStoreFinalizedEpoch := service.FinalizedCheckpt().Epoch
|
||||
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(ctx, wsb, r, postState, true))
|
||||
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
|
||||
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
testState, err = service.cfg.StateGen.StateByRoot(ctx, r)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -692,7 +608,20 @@ func TestOnBlock_CanFinalize(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.onBlock(ctx, wsb, r))
|
||||
// Save current justified and finalized epochs for future use.
|
||||
currStoreJustifiedEpoch := service.CurrentJustifiedCheckpt().Epoch
|
||||
currStoreFinalizedEpoch := service.FinalizedCheckpt().Epoch
|
||||
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(ctx, wsb, r, postState, true))
|
||||
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
|
||||
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
testState, err = service.cfg.StateGen.StateByRoot(ctx, r)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -714,8 +643,7 @@ func TestOnBlock_CanFinalize(t *testing.T) {
|
||||
|
||||
func TestOnBlock_NilBlock(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
|
||||
err := service.onBlock(tr.ctx, nil, [32]byte{})
|
||||
err := service.postBlockProcess(tr.ctx, nil, [32]byte{}, nil, true)
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
}
|
||||
|
||||
@@ -729,11 +657,11 @@ func TestOnBlock_InvalidSignature(t *testing.T) {
|
||||
blk, err := util.GenerateFullBlock(gs, keys, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
blk.Signature = []byte{'a'} // Mutate the signature.
|
||||
r, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
err = service.onBlock(ctx, wsb, r)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
_, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
}
|
||||
|
||||
@@ -757,7 +685,13 @@ func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.onBlock(ctx, wsb, r))
|
||||
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(ctx, wsb, r, postState, false))
|
||||
testState, err = service.cfg.StateGen.StateByRoot(ctx, r)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -783,7 +717,7 @@ func TestInsertFinalizedDeposits(t *testing.T) {
|
||||
Signature: zeroSig[:],
|
||||
}, Proof: [][]byte{root}}, 100+i, int64(i), bytesutil.ToBytes32(root)))
|
||||
}
|
||||
assert.NoError(t, service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k'}))
|
||||
service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k'})
|
||||
fDeposits := depositCache.FinalizedDeposits(ctx)
|
||||
assert.Equal(t, 7, int(fDeposits.MerkleTrieIndex), "Finalized deposits not inserted correctly")
|
||||
deps := depositCache.AllDeposits(ctx, big.NewInt(107))
|
||||
@@ -792,6 +726,45 @@ func TestInsertFinalizedDeposits(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestInsertFinalizedDeposits_PrunePendingDeposits(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx, depositCache := tr.ctx, tr.dc
|
||||
|
||||
gs, _ := util.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
gs = gs.Copy()
|
||||
assert.NoError(t, gs.SetEth1Data(ðpb.Eth1Data{DepositCount: 10}))
|
||||
assert.NoError(t, gs.SetEth1DepositIndex(8))
|
||||
assert.NoError(t, service.cfg.StateGen.SaveState(ctx, [32]byte{'m', 'o', 'c', 'k'}, gs))
|
||||
var zeroSig [96]byte
|
||||
for i := uint64(0); i < uint64(4*params.BeaconConfig().SlotsPerEpoch); i++ {
|
||||
root := []byte(strconv.Itoa(int(i)))
|
||||
assert.NoError(t, depositCache.InsertDeposit(ctx, ðpb.Deposit{Data: ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.FromBytes48([fieldparams.BLSPubkeyLength]byte{}),
|
||||
WithdrawalCredentials: params.BeaconConfig().ZeroHash[:],
|
||||
Amount: 0,
|
||||
Signature: zeroSig[:],
|
||||
}, Proof: [][]byte{root}}, 100+i, int64(i), bytesutil.ToBytes32(root)))
|
||||
depositCache.InsertPendingDeposit(ctx, ðpb.Deposit{Data: ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.FromBytes48([fieldparams.BLSPubkeyLength]byte{}),
|
||||
WithdrawalCredentials: params.BeaconConfig().ZeroHash[:],
|
||||
Amount: 0,
|
||||
Signature: zeroSig[:],
|
||||
}, Proof: [][]byte{root}}, 100+i, int64(i), bytesutil.ToBytes32(root))
|
||||
}
|
||||
service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k'})
|
||||
fDeposits := depositCache.FinalizedDeposits(ctx)
|
||||
assert.Equal(t, 7, int(fDeposits.MerkleTrieIndex), "Finalized deposits not inserted correctly")
|
||||
deps := depositCache.AllDeposits(ctx, big.NewInt(107))
|
||||
for _, d := range deps {
|
||||
assert.DeepEqual(t, [][]byte(nil), d.Proof, "Proofs are not empty")
|
||||
}
|
||||
pendingDeps := depositCache.PendingContainers(ctx, nil)
|
||||
for _, d := range pendingDeps {
|
||||
assert.DeepEqual(t, true, d.Index >= 8, "Pending deposits were not pruned")
|
||||
}
|
||||
}
|
||||
|
||||
func TestInsertFinalizedDeposits_MultipleFinalizedRoutines(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx, depositCache := tr.ctx, tr.dc
|
||||
@@ -819,7 +792,7 @@ func TestInsertFinalizedDeposits_MultipleFinalizedRoutines(t *testing.T) {
|
||||
// Insert 3 deposits before hand.
|
||||
require.NoError(t, depositCache.InsertFinalizedDeposits(ctx, 2))
|
||||
|
||||
assert.NoError(t, service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k'}))
|
||||
service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k'})
|
||||
fDeposits := depositCache.FinalizedDeposits(ctx)
|
||||
assert.Equal(t, 5, int(fDeposits.MerkleTrieIndex), "Finalized deposits not inserted correctly")
|
||||
|
||||
@@ -829,7 +802,7 @@ func TestInsertFinalizedDeposits_MultipleFinalizedRoutines(t *testing.T) {
|
||||
}
|
||||
|
||||
// Insert New Finalized State with higher deposit count.
|
||||
assert.NoError(t, service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k', '2'}))
|
||||
service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k', '2'})
|
||||
fDeposits = depositCache.FinalizedDeposits(ctx)
|
||||
assert.Equal(t, 12, int(fDeposits.MerkleTrieIndex), "Finalized deposits not inserted correctly")
|
||||
deps = depositCache.AllDeposits(ctx, big.NewInt(112))
|
||||
@@ -1131,19 +1104,35 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(4)
|
||||
go func() {
|
||||
require.NoError(t, service.onBlock(ctx, wsb1, r1))
|
||||
preState, err := service.getBlockPreState(ctx, wsb1.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(ctx, wsb1, r1, postState, true))
|
||||
wg.Done()
|
||||
}()
|
||||
go func() {
|
||||
require.NoError(t, service.onBlock(ctx, wsb2, r2))
|
||||
preState, err := service.getBlockPreState(ctx, wsb2.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb2)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(ctx, wsb2, r2, postState, true))
|
||||
wg.Done()
|
||||
}()
|
||||
go func() {
|
||||
require.NoError(t, service.onBlock(ctx, wsb3, r3))
|
||||
preState, err := service.getBlockPreState(ctx, wsb3.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb3)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(ctx, wsb3, r3, postState, true))
|
||||
wg.Done()
|
||||
}()
|
||||
go func() {
|
||||
require.NoError(t, service.onBlock(ctx, wsb4, r4))
|
||||
preState, err := service.getBlockPreState(ctx, wsb4.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb4)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(ctx, wsb4, r4, postState, true))
|
||||
wg.Done()
|
||||
}()
|
||||
wg.Wait()
|
||||
@@ -1211,7 +1200,13 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.onBlock(ctx, wsb, root))
|
||||
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(ctx, wsb, root, postState, false))
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
@@ -1224,7 +1219,12 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = service.onBlock(ctx, wsb, root)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(ctx, wsb, root, postState, false)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -1238,7 +1238,12 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = service.onBlock(ctx, wsb, root)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(ctx, wsb, root, postState, false)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
// Check that we haven't justified the second epoch yet
|
||||
@@ -1255,7 +1260,12 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
firstInvalidRoot, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = service.onBlock(ctx, wsb, firstInvalidRoot)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, firstInvalidRoot, wsb, postState))
|
||||
err = service.postBlockProcess(ctx, wsb, firstInvalidRoot, postState, false)
|
||||
require.NoError(t, err)
|
||||
jc = service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(2), jc.Epoch)
|
||||
@@ -1278,7 +1288,12 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = service.onBlock(ctx, wsb, root)
|
||||
preState, err = service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(ctx, wsb, root, postState, false)
|
||||
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
|
||||
// Check that forkchoice's head is the last invalid block imported. The
|
||||
// store's headroot is the previous head (since the invalid block did
|
||||
@@ -1301,7 +1316,13 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
root, err = b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = service.onBlock(ctx, wsb, root)
|
||||
|
||||
preState, err = service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(ctx, wsb, root, postState, true)
|
||||
require.NoError(t, err)
|
||||
// Check the newly imported block is head, it justified the right
|
||||
// checkpoint and the node is no longer optimistic
|
||||
@@ -1358,7 +1379,12 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.onBlock(ctx, wsb, root))
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(ctx, wsb, root, postState, false))
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
@@ -1371,7 +1397,12 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = service.onBlock(ctx, wsb, root)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(ctx, wsb, root, postState, false)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -1385,7 +1416,13 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = service.onBlock(ctx, wsb, root)
|
||||
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(ctx, wsb, root, postState, false)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
// Check that we haven't justified the second epoch yet
|
||||
@@ -1402,7 +1439,12 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
firstInvalidRoot, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = service.onBlock(ctx, wsb, firstInvalidRoot)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, firstInvalidRoot, wsb, postState))
|
||||
err = service.postBlockProcess(ctx, wsb, firstInvalidRoot, postState, false)
|
||||
require.NoError(t, err)
|
||||
jc = service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(2), jc.Epoch)
|
||||
@@ -1425,7 +1467,12 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = service.onBlock(ctx, wsb, root)
|
||||
|
||||
preState, err = service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
|
||||
require.NoError(t, err)
|
||||
_, err = service.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, wsb, root)
|
||||
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
|
||||
// Check that forkchoice's head and store's headroot are the previous head (since the invalid block did
|
||||
// not finish importing and it was never imported to forkchoice). Check
|
||||
@@ -1448,7 +1495,12 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
root, err = b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = service.onBlock(ctx, wsb, root)
|
||||
preState, err = service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(ctx, wsb, root, postState, true)
|
||||
require.NoError(t, err)
|
||||
// Check the newly imported block is head, it justified the right
|
||||
// checkpoint and the node is no longer optimistic
|
||||
@@ -1506,7 +1558,13 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.onBlock(ctx, wsb, root))
|
||||
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(ctx, wsb, root, postState, false))
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
@@ -1519,7 +1577,13 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = service.onBlock(ctx, wsb, root)
|
||||
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(ctx, wsb, root, postState, false)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -1533,7 +1597,12 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
lastValidRoot, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = service.onBlock(ctx, wsb, lastValidRoot)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, lastValidRoot, wsb, postState))
|
||||
err = service.postBlockProcess(ctx, wsb, lastValidRoot, postState, false)
|
||||
require.NoError(t, err)
|
||||
// save the post state and the payload Hash of this block since it will
|
||||
// be the LVH
|
||||
@@ -1555,7 +1624,12 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
invalidRoots[i-13], err = b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = service.onBlock(ctx, wsb, invalidRoots[i-13])
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, invalidRoots[i-13], wsb, postState))
|
||||
err = service.postBlockProcess(ctx, wsb, invalidRoots[i-13], postState, false)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
// Check that we have justified the second epoch
|
||||
@@ -1576,7 +1650,12 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = service.onBlock(ctx, wsb, root)
|
||||
|
||||
preState, err = service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
|
||||
require.NoError(t, err)
|
||||
_, err = service.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, wsb, root)
|
||||
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
|
||||
|
||||
// Check that forkchoice's head and store's headroot are the previous head (since the invalid block did
|
||||
@@ -1610,7 +1689,12 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
root, err = b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.onBlock(ctx, wsb, root))
|
||||
preState, err = service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(ctx, wsb, root, postState, true))
|
||||
// Check that the head is still INVALID and the node is still optimistic
|
||||
require.Equal(t, invalidHeadRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
optimistic, err = service.IsOptimistic(ctx)
|
||||
@@ -1628,7 +1712,12 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = service.onBlock(ctx, wsb, root)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(ctx, wsb, root, postState, true)
|
||||
require.NoError(t, err)
|
||||
st, err = service.cfg.StateGen.StateByRoot(ctx, root)
|
||||
require.NoError(t, err)
|
||||
@@ -1648,7 +1737,13 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
root, err = b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = service.onBlock(ctx, wsb, root)
|
||||
|
||||
preState, err = service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(ctx, wsb, root, postState, true)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
sjc = service.CurrentJustifiedCheckpt()
|
||||
@@ -1699,7 +1794,12 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.onBlock(ctx, wsb, root))
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(ctx, wsb, root, postState, false))
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
@@ -1712,7 +1812,12 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = service.onBlock(ctx, wsb, root)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(ctx, wsb, root, postState, false)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -1726,7 +1831,12 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
lastValidRoot, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = service.onBlock(ctx, wsb, lastValidRoot)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, lastValidRoot, wsb, postState))
|
||||
err = service.postBlockProcess(ctx, wsb, lastValidRoot, postState, false)
|
||||
require.NoError(t, err)
|
||||
// save the post state and the payload Hash of this block since it will
|
||||
// be the LVH
|
||||
@@ -1747,7 +1857,18 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.onBlock(ctx, wsb, root))
|
||||
// Save current justified and finalized epochs for future use.
|
||||
currStoreJustifiedEpoch := service.CurrentJustifiedCheckpt().Epoch
|
||||
currStoreFinalizedEpoch := service.FinalizedCheckpt().Epoch
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(ctx, wsb, root, postState, false))
|
||||
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
|
||||
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
// Check that we have justified the second epoch
|
||||
jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
@@ -1766,7 +1887,11 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = service.onBlock(ctx, wsb, root)
|
||||
preState, err = service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
|
||||
require.NoError(t, err)
|
||||
_, err = service.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, wsb, root)
|
||||
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
|
||||
|
||||
// Check that the headroot/state are not in DB and restart the node
|
||||
@@ -1848,7 +1973,12 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.onBlock(ctx, wsb, root))
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(ctx, wsb, root, postState, false))
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -128,7 +128,13 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
wsb, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.onBlock(ctx, wsb, tRoot))
|
||||
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, tRoot, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(ctx, wsb, tRoot, postState, false))
|
||||
copied, err = service.cfg.StateGen.StateByRoot(ctx, tRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, fcs.NodeCount())
|
||||
@@ -178,7 +184,13 @@ func TestService_UpdateHead_NoAtts(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
wsb, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.onBlock(ctx, wsb, tRoot))
|
||||
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, tRoot, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(ctx, wsb, tRoot, postState, false))
|
||||
require.Equal(t, 2, fcs.NodeCount())
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
require.Equal(t, tRoot, service.head.root)
|
||||
|
||||
@@ -7,11 +7,18 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/monitoring/tracing"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v4/time"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
@@ -47,15 +54,65 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
return err
|
||||
}
|
||||
|
||||
preState, err := s.getBlockPreState(ctx, blockCopy.Block())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get block's prestate")
|
||||
}
|
||||
// Save current justified and finalized epochs for future use.
|
||||
currStoreJustifiedEpoch := s.CurrentJustifiedCheckpt().Epoch
|
||||
currStoreFinalizedEpoch := s.FinalizedCheckpt().Epoch
|
||||
|
||||
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
postState, err := s.validateStateTransition(ctx, preState, blockCopy)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to validate consensus state transition function")
|
||||
}
|
||||
isValidPayload, err := s.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, blockCopy, blockRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not notify the engine of the new payload")
|
||||
}
|
||||
// The rest of block processing takes a lock on forkchoice.
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
if err := s.savePostStateInfo(ctx, blockRoot, blockCopy, postState); err != nil {
|
||||
return errors.Wrap(err, "could not save post state info")
|
||||
}
|
||||
|
||||
// Apply state transition on the new block.
|
||||
if err := s.onBlock(ctx, blockCopy, blockRoot); err != nil {
|
||||
if err := s.postBlockProcess(ctx, blockCopy, blockRoot, postState, isValidPayload); err != nil {
|
||||
err := errors.Wrap(err, "could not process block")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch); err != nil {
|
||||
return errors.Wrap(err, "could not update justified checkpoint")
|
||||
}
|
||||
|
||||
newFinalized, err := s.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not update finalized checkpoint")
|
||||
}
|
||||
// Send finalized events and finalized deposits in the background
|
||||
if newFinalized {
|
||||
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
go s.sendNewFinalizedEvent(ctx, blockCopy, postState, finalized)
|
||||
depCtx, cancel := context.WithTimeout(context.Background(), depositDeadline)
|
||||
go func() {
|
||||
s.insertFinalizedDeposits(depCtx, finalized.Root)
|
||||
cancel()
|
||||
}()
|
||||
}
|
||||
|
||||
// If slasher is configured, forward the attestations in the block via an event feed for processing.
|
||||
if features.Get().EnableSlasher {
|
||||
go s.sendBlockAttestationsToSlasher(blockCopy, preState)
|
||||
}
|
||||
|
||||
// Handle post block operations such as pruning exits and bls messages if incoming block is the head
|
||||
if err := s.prunePostBlockOperationPools(ctx, blockCopy, blockRoot); err != nil {
|
||||
log.WithError(err).Error("Could not prune canonical objects from pool ")
|
||||
@@ -86,6 +143,8 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
log.WithError(err).Error("Unable to log state transition data")
|
||||
}
|
||||
|
||||
chainServiceProcessingTime.Observe(float64(time.Since(receivedTime).Milliseconds()))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -226,3 +285,109 @@ func (s *Service) checkSaveHotStateDB(ctx context.Context) error {
|
||||
|
||||
return s.cfg.StateGen.DisableSaveHotStateToDB(ctx)
|
||||
}
|
||||
|
||||
// This performs the state transition function and returns the poststate or an
|
||||
// error if the block fails to verify the consensus rules
|
||||
func (s *Service) validateStateTransition(ctx context.Context, preState state.BeaconState, signed interfaces.ReadOnlySignedBeaconBlock) (state.BeaconState, error) {
|
||||
b := signed.Block()
|
||||
// Verify that the parent block is in forkchoice
|
||||
parentRoot := b.ParentRoot()
|
||||
if !s.InForkchoice(parentRoot) {
|
||||
return nil, ErrNotDescendantOfFinalized
|
||||
}
|
||||
stateTransitionStartTime := time.Now()
|
||||
postState, err := transition.ExecuteStateTransition(ctx, preState, signed)
|
||||
if err != nil {
|
||||
return nil, invalidBlock{error: err}
|
||||
}
|
||||
stateTransitionProcessingTime.Observe(float64(time.Since(stateTransitionStartTime).Milliseconds()))
|
||||
return postState, nil
|
||||
}
|
||||
|
||||
// updateJustificationOnBlock updates the justified checkpoint on DB if the
|
||||
// incoming block has updated it on forkchoice.
|
||||
func (s *Service) updateJustificationOnBlock(ctx context.Context, preState, postState state.BeaconState, preJustifiedEpoch primitives.Epoch) error {
|
||||
justified := s.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
preStateJustifiedEpoch := preState.CurrentJustifiedCheckpoint().Epoch
|
||||
postStateJustifiedEpoch := postState.CurrentJustifiedCheckpoint().Epoch
|
||||
if justified.Epoch > preJustifiedEpoch || (justified.Epoch == postStateJustifiedEpoch && justified.Epoch > preStateJustifiedEpoch) {
|
||||
if err := s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, ðpb.Checkpoint{
|
||||
Epoch: justified.Epoch, Root: justified.Root[:],
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateFinalizationOnBlock performs some duties when the incoming block
|
||||
// changes the finalized checkpoint. It returns true when this has happened.
|
||||
func (s *Service) updateFinalizationOnBlock(ctx context.Context, preState, postState state.BeaconState, preFinalizedEpoch primitives.Epoch) (bool, error) {
|
||||
preStateFinalizedEpoch := preState.FinalizedCheckpoint().Epoch
|
||||
postStateFinalizedEpoch := postState.FinalizedCheckpoint().Epoch
|
||||
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
if finalized.Epoch > preFinalizedEpoch || (finalized.Epoch == postStateFinalizedEpoch && finalized.Epoch > preStateFinalizedEpoch) {
|
||||
if err := s.updateFinalized(ctx, ðpb.Checkpoint{Epoch: finalized.Epoch, Root: finalized.Root[:]}); err != nil {
|
||||
return true, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// sendNewFinalizedEvent sends a new finalization checkpoint event over the
|
||||
// event feed. It needs to be called on the background
|
||||
func (s *Service) sendNewFinalizedEvent(ctx context.Context, signed interfaces.ReadOnlySignedBeaconBlock, postState state.BeaconState, finalized *forkchoicetypes.Checkpoint) {
|
||||
isValidPayload := false
|
||||
s.headLock.RLock()
|
||||
if s.head != nil {
|
||||
isValidPayload = s.head.optimistic
|
||||
}
|
||||
s.headLock.RUnlock()
|
||||
|
||||
// Send an event regarding the new finalized checkpoint over a common event feed.
|
||||
stateRoot := signed.Block().StateRoot()
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.FinalizedCheckpoint,
|
||||
Data: ðpbv1.EventFinalizedCheckpoint{
|
||||
Epoch: postState.FinalizedCheckpoint().Epoch,
|
||||
Block: postState.FinalizedCheckpoint().Root,
|
||||
State: stateRoot[:],
|
||||
ExecutionOptimistic: isValidPayload,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// sendBlockAttestationsToSlasher sends the incoming block's attestation to the slasher
|
||||
func (s *Service) sendBlockAttestationsToSlasher(signed interfaces.ReadOnlySignedBeaconBlock, preState state.BeaconState) {
|
||||
// Feed the indexed attestation to slasher if enabled. This action
|
||||
// is done in the background to avoid adding more load to this critical code path.
|
||||
ctx := context.TODO()
|
||||
for _, att := range signed.Block().Body().Attestations() {
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, preState, att.Data.Slot, att.Data.CommitteeIndex)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get attestation committee")
|
||||
return
|
||||
}
|
||||
indexedAtt, err := attestation.ConvertToIndexed(ctx, att, committee)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not convert to indexed attestation")
|
||||
return
|
||||
}
|
||||
s.cfg.SlasherAttestationsFeed.Send(indexedAtt)
|
||||
}
|
||||
}
|
||||
|
||||
// validateExecutionOnBlock notifies the engine of the incoming block execution payload and returns true if the payload is valid
|
||||
func (s *Service) validateExecutionOnBlock(ctx context.Context, ver int, header interfaces.ExecutionData, signed interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) (bool, error) {
|
||||
isValidPayload, err := s.notifyNewPayload(ctx, ver, header, signed)
|
||||
if err != nil {
|
||||
return false, s.handleInvalidExecutionError(ctx, err, blockRoot, signed.Block().ParentRoot())
|
||||
}
|
||||
if signed.Version() < version.Capella && isValidPayload {
|
||||
if err := s.validateMergeTransitionBlock(ctx, ver, header, signed); err != nil {
|
||||
return isValidPayload, err
|
||||
}
|
||||
}
|
||||
return isValidPayload, nil
|
||||
}
|
||||
|
||||
@@ -195,6 +195,7 @@ func IsSyncCommitteeAggregator(sig []byte) (bool, error) {
|
||||
}
|
||||
|
||||
// ValidateSyncMessageTime validates sync message to ensure that the provided slot is valid.
|
||||
// Spec: [IGNORE] The message's slot is for the current slot (with a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance), i.e. sync_committee_message.slot == current_slot
|
||||
func ValidateSyncMessageTime(slot primitives.Slot, genesisTime time.Time, clockDisparity time.Duration) error {
|
||||
if err := slots.ValidateClock(slot, uint64(genesisTime.Unix())); err != nil {
|
||||
return err
|
||||
@@ -223,13 +224,12 @@ func ValidateSyncMessageTime(slot primitives.Slot, genesisTime time.Time, clockD
|
||||
// Verify sync message slot is within the time range.
|
||||
if messageTime.Before(lowerBound) || messageTime.After(upperBound) {
|
||||
syncErr := fmt.Errorf(
|
||||
"sync message time %v (slot %d) not within allowable range of %v (slot %d) to %v (slot %d)",
|
||||
"sync message time %v (message slot %d) not within allowable range of %v to %v (current slot %d)",
|
||||
messageTime,
|
||||
slot,
|
||||
lowerBound,
|
||||
uint64(lowerBound.Unix()-genesisTime.Unix())/params.BeaconConfig().SecondsPerSlot,
|
||||
upperBound,
|
||||
uint64(upperBound.Unix()-genesisTime.Unix())/params.BeaconConfig().SecondsPerSlot,
|
||||
currentSlot,
|
||||
)
|
||||
// Wrap error message if sync message is too late.
|
||||
if messageTime.Before(lowerBound) {
|
||||
|
||||
@@ -311,7 +311,7 @@ func Test_ValidateSyncMessageTime(t *testing.T) {
|
||||
syncMessageSlot: 16,
|
||||
genesisTime: prysmTime.Now().Add(-(15 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)),
|
||||
},
|
||||
wantedErr: "(slot 16) not within allowable range of",
|
||||
wantedErr: "(message slot 16) not within allowable range of",
|
||||
},
|
||||
{
|
||||
name: "sync_message.slot == current_slot+CLOCK_DISPARITY",
|
||||
@@ -327,7 +327,7 @@ func Test_ValidateSyncMessageTime(t *testing.T) {
|
||||
syncMessageSlot: 100,
|
||||
genesisTime: prysmTime.Now().Add(-(100 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second) + params.BeaconNetworkConfig().MaximumGossipClockDisparity + 1000*time.Millisecond),
|
||||
},
|
||||
wantedErr: "(slot 100) not within allowable range of",
|
||||
wantedErr: "(message slot 100) not within allowable range of",
|
||||
},
|
||||
{
|
||||
name: "sync_message.slot == current_slot-CLOCK_DISPARITY",
|
||||
@@ -343,7 +343,7 @@ func Test_ValidateSyncMessageTime(t *testing.T) {
|
||||
syncMessageSlot: 101,
|
||||
genesisTime: prysmTime.Now().Add(-(100*time.Duration(params.BeaconConfig().SecondsPerSlot)*time.Second + params.BeaconNetworkConfig().MaximumGossipClockDisparity)),
|
||||
},
|
||||
wantedErr: "(slot 101) not within allowable range of",
|
||||
wantedErr: "(message slot 101) not within allowable range of",
|
||||
},
|
||||
{
|
||||
name: "sync_message.slot is well beyond current slot",
|
||||
|
||||
@@ -141,7 +141,7 @@ func ValidateBLSToExecutionChange(st state.ReadOnlyBeaconState, signed *ethpb.Si
|
||||
// next_validator_index = ValidatorIndex((expected_withdrawals[-1].validator_index + 1) % len(state.validators))
|
||||
// state.next_withdrawal_validator_index = next_validator_index
|
||||
// else:
|
||||
// # Advance sweep by the max length of the sweep if there was not a full set of withdrawals
|
||||
// # FillFwd sweep by the max length of the sweep if there was not a full set of withdrawals
|
||||
// next_index = state.next_withdrawal_validator_index + MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP
|
||||
// next_validator_index = ValidatorIndex(next_index % len(state.validators))
|
||||
// state.next_withdrawal_validator_index = next_validator_index
|
||||
|
||||
@@ -205,3 +205,17 @@ func ParseWeakSubjectivityInputString(wsCheckpointString string) (*v1alpha1.Chec
|
||||
Root: bRoot,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// MinEpochsForBlockRequests computes the number of epochs of block history that we need to maintain,
|
||||
// relative to the current epoch, per the p2p specs. This is used to compute the slot where backfill is complete.
|
||||
// value defined:
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/p2p-interface.md#configuration
|
||||
// MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2 (= 33024, ~5 months)
|
||||
// detailed rationale: https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/p2p-interface.md#why-are-blocksbyrange-requests-only-required-to-be-served-for-the-latest-min_epochs_for_block_requests-epochs
|
||||
// TODO: ask around to understand why the FAQ section of the p2p spec shows
|
||||
// multiplying and dividing the churn limit quotient by the max safety decay value of 100,
|
||||
// but in the definition of the constant, the simpler equation below is used.
|
||||
func MinEpochsForBlockRequests() primitives.Epoch {
|
||||
return params.BeaconConfig().MinValidatorWithdrawabilityDelay +
|
||||
primitives.Epoch(params.BeaconConfig().ChurnLimitQuotient/2)
|
||||
}
|
||||
|
||||
@@ -281,3 +281,19 @@ func genState(t *testing.T, valCount, avgBalance uint64) state.BeaconState {
|
||||
|
||||
return beaconState
|
||||
}
|
||||
|
||||
func TestMinEpochsForBlockRequests(t *testing.T) {
|
||||
params.SetActiveTestCleanup(t, params.MainnetConfig())
|
||||
var expected primitives.Epoch = 33024
|
||||
// expected value of 33024 via spec commentary:
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/p2p-interface.md#why-are-blocksbyrange-requests-only-required-to-be-served-for-the-latest-min_epochs_for_block_requests-epochs
|
||||
// MIN_EPOCHS_FOR_BLOCK_REQUESTS is calculated using the arithmetic from compute_weak_subjectivity_period found in the weak subjectivity guide. Specifically to find this max epoch range, we use the worst case event of a very large validator size (>= MIN_PER_EPOCH_CHURN_LIMIT * CHURN_LIMIT_QUOTIENT).
|
||||
//
|
||||
// MIN_EPOCHS_FOR_BLOCK_REQUESTS = (
|
||||
// MIN_VALIDATOR_WITHDRAWABILITY_DELAY
|
||||
// + MAX_SAFETY_DECAY * CHURN_LIMIT_QUOTIENT // (2 * 100)
|
||||
// )
|
||||
//
|
||||
// Where MAX_SAFETY_DECAY = 100 and thus MIN_EPOCHS_FOR_BLOCK_REQUESTS = 33024 (~5 months).
|
||||
require.Equal(t, expected, helpers.MinEpochsForBlockRequests())
|
||||
}
|
||||
|
||||
@@ -16,6 +16,7 @@ go_library(
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//monitoring/backup:go_default_library",
|
||||
"//proto/dbval:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
],
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/monitoring/backup"
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/dbval"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
@@ -56,7 +57,7 @@ type ReadOnlyDatabase interface {
|
||||
RegistrationByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (*ethpb.ValidatorRegistrationV1, error)
|
||||
// origin checkpoint sync support
|
||||
OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
BackfillBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
BackfillStatus(context.Context) (*dbval.BackfillStatus, error)
|
||||
}
|
||||
|
||||
// NoHeadAccessDatabase defines a struct without access to chain head data.
|
||||
@@ -107,7 +108,7 @@ type HeadAccessDatabase interface {
|
||||
|
||||
// initialization method needed for origin checkpoint sync
|
||||
SaveOrigin(ctx context.Context, serState, serBlock []byte) error
|
||||
SaveBackfillBlockRoot(ctx context.Context, blockRoot [32]byte) error
|
||||
SaveBackfillStatus(context.Context, *dbval.BackfillStatus) error
|
||||
}
|
||||
|
||||
// SlasherDatabase interface for persisting data related to detecting slashable offenses on Ethereum.
|
||||
|
||||
@@ -4,6 +4,7 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"archived_point.go",
|
||||
"backfill.go",
|
||||
"backup.go",
|
||||
"blocks.go",
|
||||
"checkpoint.go",
|
||||
@@ -48,6 +49,7 @@ go_library(
|
||||
"//io/file:go_default_library",
|
||||
"//monitoring/progress:go_default_library",
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//proto/dbval:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time:go_default_library",
|
||||
@@ -73,6 +75,7 @@ go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"archived_point_test.go",
|
||||
"backfill_test.go",
|
||||
"backup_test.go",
|
||||
"blocks_test.go",
|
||||
"checkpoint_test.go",
|
||||
@@ -107,6 +110,7 @@ go_test(
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/dbval:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/testing:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
|
||||
39
beacon-chain/db/kv/backfill.go
Normal file
39
beacon-chain/db/kv/backfill.go
Normal file
@@ -0,0 +1,39 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/dbval"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
"go.opencensus.io/trace"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func (s *Store) SaveBackfillStatus(ctx context.Context, bf *dbval.BackfillStatus) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveBackfillStatus")
|
||||
defer span.End()
|
||||
bfb, err := proto.Marshal(bf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(blocksBucket)
|
||||
return bucket.Put(backfillStatusKey, bfb)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Store) BackfillStatus(ctx context.Context) (*dbval.BackfillStatus, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveBackfillStatus")
|
||||
defer span.End()
|
||||
bf := &dbval.BackfillStatus{}
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(blocksBucket)
|
||||
bs := bucket.Get(backfillStatusKey)
|
||||
if len(bs) == 0 {
|
||||
return errors.Wrap(ErrNotFound, "BackfillStatus not found")
|
||||
}
|
||||
return proto.Unmarshal(bs, bf)
|
||||
})
|
||||
return bf, err
|
||||
}
|
||||
35
beacon-chain/db/kv/backfill_test.go
Normal file
35
beacon-chain/db/kv/backfill_test.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/dbval"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func TestBackfillRoundtrip(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
b := &dbval.BackfillStatus{}
|
||||
b.LowSlot = 23
|
||||
b.LowRoot = bytesutil.PadTo([]byte("low"), 32)
|
||||
b.LowParentRoot = bytesutil.PadTo([]byte("parent"), 32)
|
||||
m, err := proto.Marshal(b)
|
||||
require.NoError(t, err)
|
||||
ub := &dbval.BackfillStatus{}
|
||||
require.NoError(t, proto.Unmarshal(m, ub))
|
||||
require.Equal(t, b.LowSlot, ub.LowSlot)
|
||||
require.DeepEqual(t, b.LowRoot, ub.LowRoot)
|
||||
require.DeepEqual(t, b.LowParentRoot, ub.LowParentRoot)
|
||||
|
||||
ctx := context.Background()
|
||||
require.NoError(t, db.SaveBackfillStatus(ctx, b))
|
||||
dbub, err := db.BackfillStatus(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, b.LowSlot, dbub.LowSlot)
|
||||
require.DeepEqual(t, b.LowRoot, dbub.LowRoot)
|
||||
require.DeepEqual(t, b.LowParentRoot, dbub.LowParentRoot)
|
||||
}
|
||||
@@ -70,25 +70,6 @@ func (s *Store) OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
return root, err
|
||||
}
|
||||
|
||||
// BackfillBlockRoot keeps track of the highest block available before the OriginCheckpointBlockRoot
|
||||
func (s *Store) BackfillBlockRoot(ctx context.Context) ([32]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.BackfillBlockRoot")
|
||||
defer span.End()
|
||||
|
||||
var root [32]byte
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(blocksBucket)
|
||||
rootSlice := bkt.Get(backfillBlockRootKey)
|
||||
if len(rootSlice) == 0 {
|
||||
return ErrNotFoundBackfillBlockRoot
|
||||
}
|
||||
root = bytesutil.ToBytes32(rootSlice)
|
||||
return nil
|
||||
})
|
||||
|
||||
return root, err
|
||||
}
|
||||
|
||||
// HeadBlock returns the latest canonical block in the Ethereum Beacon Chain.
|
||||
func (s *Store) HeadBlock(ctx context.Context) (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.HeadBlock")
|
||||
@@ -417,17 +398,6 @@ func (s *Store) SaveOriginCheckpointBlockRoot(ctx context.Context, blockRoot [32
|
||||
})
|
||||
}
|
||||
|
||||
// SaveBackfillBlockRoot is used to keep track of the most recently backfilled block root when
|
||||
// the node was initialized via checkpoint sync.
|
||||
func (s *Store) SaveBackfillBlockRoot(ctx context.Context, blockRoot [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveBackfillBlockRoot")
|
||||
defer span.End()
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(blocksBucket)
|
||||
return bucket.Put(backfillBlockRootKey, blockRoot[:])
|
||||
})
|
||||
}
|
||||
|
||||
// HighestRootsBelowSlot returns roots from the database slot index from the highest slot below the input slot.
|
||||
// The slot value at the beginning of the return list is the slot where the roots were found. This is helpful so that
|
||||
// calling code can make decisions based on the slot without resolving the blocks to discover their slot (for instance
|
||||
|
||||
@@ -92,23 +92,6 @@ var blockTests = []struct {
|
||||
},
|
||||
}
|
||||
|
||||
func TestStore_SaveBackfillBlockRoot(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
_, err := db.BackfillBlockRoot(ctx)
|
||||
require.ErrorIs(t, err, ErrNotFoundBackfillBlockRoot)
|
||||
|
||||
var expected [32]byte
|
||||
copy(expected[:], []byte{0x23})
|
||||
err = db.SaveBackfillBlockRoot(ctx, expected)
|
||||
require.NoError(t, err)
|
||||
actual, err := db.BackfillBlockRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, actual)
|
||||
|
||||
}
|
||||
|
||||
func TestStore_SaveBlock_NoDuplicates(t *testing.T) {
|
||||
BlockCacheSize = 1
|
||||
slot := primitives.Slot(20)
|
||||
|
||||
@@ -57,8 +57,8 @@ var (
|
||||
saveBlindedBeaconBlocksKey = []byte("save-blinded-beacon-blocks")
|
||||
// block root included in the beacon state used by weak subjectivity initial sync
|
||||
originCheckpointBlockRootKey = []byte("origin-checkpoint-block-root")
|
||||
// block root tracking the progress of backfill, or pointing at genesis if backfill has not been initiated
|
||||
backfillBlockRootKey = []byte("backfill-block-root")
|
||||
// tracking data about an ongoing backfill
|
||||
backfillStatusKey = []byte("backfill-status")
|
||||
|
||||
// Deprecated: This index key was migrated in PR 6461. Do not use, except for migrations.
|
||||
lastArchivedIndexKey = []byte("last-archived")
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/ssz/detect"
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/dbval"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
)
|
||||
@@ -17,18 +18,6 @@ import (
|
||||
// syncing, using the provided values as their point of origin. This is an alternative
|
||||
// to syncing from genesis, and should only be run on an empty database.
|
||||
func (s *Store) SaveOrigin(ctx context.Context, serState, serBlock []byte) error {
|
||||
genesisRoot, err := s.GenesisBlockRoot(ctx)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrNotFoundGenesisBlockRoot) {
|
||||
return errors.Wrap(err, "genesis block root not found: genesis must be provided for checkpoint sync")
|
||||
}
|
||||
return errors.Wrap(err, "genesis block root query error: checkpoint sync must verify genesis to proceed")
|
||||
}
|
||||
err = s.SaveBackfillBlockRoot(ctx, genesisRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to save genesis root as initial backfill starting point for checkpoint sync")
|
||||
}
|
||||
|
||||
cf, err := detect.FromState(serState)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not sniff config+fork for origin state bytes")
|
||||
@@ -50,11 +39,24 @@ func (s *Store) SaveOrigin(ctx context.Context, serState, serBlock []byte) error
|
||||
}
|
||||
blk := wblk.Block()
|
||||
|
||||
// save block
|
||||
blockRoot, err := blk.HashTreeRoot()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not compute HashTreeRoot of checkpoint block")
|
||||
}
|
||||
|
||||
pr := blk.ParentRoot()
|
||||
bf := &dbval.BackfillStatus{
|
||||
LowSlot: uint64(wblk.Block().Slot()),
|
||||
LowRoot: blockRoot[:],
|
||||
LowParentRoot: pr[:],
|
||||
OriginRoot: blockRoot[:],
|
||||
OriginSlot: uint64(wblk.Block().Slot()),
|
||||
}
|
||||
|
||||
if err = s.SaveBackfillStatus(ctx, bf); err != nil {
|
||||
return errors.Wrap(err, "unable to save backfill status data to db for checkpoint sync.")
|
||||
}
|
||||
|
||||
log.Infof("saving checkpoint block to db, w/ root=%#x", blockRoot)
|
||||
if err := s.SaveBlock(ctx, wblk); err != nil {
|
||||
return errors.Wrap(err, "could not save checkpoint block")
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
)
|
||||
|
||||
func (s *Store) setOptimisticToInvalid(ctx context.Context, root, parentRoot, payloadHash [32]byte) ([][32]byte, error) {
|
||||
func (s *Store) setOptimisticToInvalid(ctx context.Context, root, parentRoot, lastValidHash [32]byte) ([][32]byte, error) {
|
||||
invalidRoots := make([][32]byte, 0)
|
||||
node, ok := s.nodeByRoot[root]
|
||||
if !ok {
|
||||
@@ -16,7 +16,7 @@ func (s *Store) setOptimisticToInvalid(ctx context.Context, root, parentRoot, pa
|
||||
return invalidRoots, errors.Wrap(ErrNilNode, "could not set node to invalid")
|
||||
}
|
||||
// return early if the parent is LVH
|
||||
if node.payloadHash == payloadHash {
|
||||
if node.payloadHash == lastValidHash {
|
||||
return invalidRoots, nil
|
||||
}
|
||||
} else {
|
||||
@@ -28,7 +28,7 @@ func (s *Store) setOptimisticToInvalid(ctx context.Context, root, parentRoot, pa
|
||||
}
|
||||
}
|
||||
firstInvalid := node
|
||||
for ; firstInvalid.parent != nil && firstInvalid.parent.payloadHash != payloadHash; firstInvalid = firstInvalid.parent {
|
||||
for ; firstInvalid.parent != nil && firstInvalid.parent.payloadHash != lastValidHash; firstInvalid = firstInvalid.parent {
|
||||
if ctx.Err() != nil {
|
||||
return invalidRoots, ctx.Err()
|
||||
}
|
||||
|
||||
@@ -205,21 +205,28 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
bfs := backfill.NewStatus(beacon.db)
|
||||
if err := bfs.Reload(ctx); err != nil {
|
||||
log.Debugln("Registering P2P Service")
|
||||
if err := beacon.registerP2P(cliCtx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
bfs, err := backfill.NewUpdater(ctx, beacon.db)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "backfill status initialization error")
|
||||
}
|
||||
bf, err := backfill.NewService(ctx, bfs, beacon.clockWaiter, beacon.fetchP2P())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error initializing backfill service")
|
||||
}
|
||||
if err := beacon.services.RegisterService(bf); err != nil {
|
||||
return nil, errors.Wrap(err, "error registering backfill service")
|
||||
}
|
||||
|
||||
log.Debugln("Starting State Gen")
|
||||
if err := beacon.startStateGen(ctx, bfs, beacon.forkChoicer); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugln("Registering P2P Service")
|
||||
if err := beacon.registerP2P(cliCtx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugln("Registering POW Chain Service")
|
||||
if err := beacon.registerPOWChainService(); err != nil {
|
||||
return nil, err
|
||||
@@ -496,8 +503,8 @@ func (b *BeaconNode) startSlasherDB(cliCtx *cli.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *BeaconNode) startStateGen(ctx context.Context, bfs *backfill.Status, fc forkchoice.ForkChoicer) error {
|
||||
opts := []stategen.StateGenOption{stategen.WithBackfillStatus(bfs)}
|
||||
func (b *BeaconNode) startStateGen(ctx context.Context, bfs *backfill.StatusUpdater, fc forkchoice.ForkChoicer) error {
|
||||
opts := []stategen.StateGenOption{stategen.WithAvailableBlocker(bfs)}
|
||||
sg := stategen.New(b.db, fc, opts...)
|
||||
|
||||
cp, err := b.db.FinalizedCheckpoint(ctx)
|
||||
|
||||
@@ -4,6 +4,7 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"addr_factory.go",
|
||||
"assigner.go",
|
||||
"broadcaster.go",
|
||||
"config.go",
|
||||
"connection_gater.go",
|
||||
|
||||
1
beacon-chain/p2p/assigner.go
Normal file
1
beacon-chain/p2p/assigner.go
Normal file
@@ -0,0 +1 @@
|
||||
package p2p
|
||||
@@ -3,6 +3,7 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"assigner.go",
|
||||
"log.go",
|
||||
"status.go",
|
||||
],
|
||||
@@ -14,6 +15,7 @@ go_library(
|
||||
deps = [
|
||||
"//beacon-chain/p2p/peers/peerdata:go_default_library",
|
||||
"//beacon-chain/p2p/peers/scorers:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
@@ -28,6 +30,7 @@ go_library(
|
||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||
"@com_github_multiformats_go_multiaddr//:go_default_library",
|
||||
"@com_github_multiformats_go_multiaddr//net:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
|
||||
72
beacon-chain/p2p/peers/assigner.go
Normal file
72
beacon-chain/p2p/peers/assigner.go
Normal file
@@ -0,0 +1,72 @@
|
||||
package peers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// handshakePollingInterval is a polling interval for checking the number of received handshakes.
|
||||
var handshakePollingInterval = 5 * time.Second
|
||||
|
||||
func NewAssigner(ctx context.Context, s *Status, max int, finalized primitives.Epoch) *Assigner {
|
||||
return &Assigner{
|
||||
ctx: ctx,
|
||||
ps: s,
|
||||
max: max,
|
||||
finalized: finalized,
|
||||
}
|
||||
}
|
||||
|
||||
type Assigner struct {
|
||||
sync.Mutex
|
||||
ctx context.Context
|
||||
ps *Status
|
||||
max int
|
||||
finalized primitives.Epoch
|
||||
}
|
||||
|
||||
var ErrInsufficientSuitable = errors.New("no suitable peers")
|
||||
|
||||
func (a *Assigner) freshPeers() ([]peer.ID, error) {
|
||||
required := params.BeaconConfig().MaxPeersToSync
|
||||
if flags.Get().MinimumSyncPeers < required {
|
||||
required = flags.Get().MinimumSyncPeers
|
||||
}
|
||||
_, peers := a.ps.BestFinalized(params.BeaconConfig().MaxPeersToSync, a.finalized)
|
||||
if len(peers) < required {
|
||||
log.WithFields(logrus.Fields{
|
||||
"suitable": len(peers),
|
||||
"required": required}).Info("Unable to assign peer while suitable peers < required ")
|
||||
return nil, ErrInsufficientSuitable
|
||||
}
|
||||
return peers, nil
|
||||
}
|
||||
|
||||
// Assign uses the BestFinalized method to select the best peers that agree on a canonical block
|
||||
// for the configured finalized epoch. At most `n` peers will be returned. The `busy` param can be used
|
||||
// to filter out peers that we know we don't want to connect to, for instance if we are trying to limit
|
||||
// the number of outbound requests to each peer from a given component.
|
||||
func (a *Assigner) Assign(busy map[peer.ID]bool, n int) ([]peer.ID, error) {
|
||||
best, err := a.freshPeers()
|
||||
ps := make([]peer.ID, 0, n)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, p := range best {
|
||||
if !busy[p] {
|
||||
ps = append(ps, p)
|
||||
if len(ps) == n {
|
||||
return ps, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return ps, nil
|
||||
}
|
||||
@@ -108,12 +108,31 @@ func (s *Store) DeletePeerData(pid peer.ID) {
|
||||
}
|
||||
|
||||
// SetTrustedPeers sets our desired trusted peer set.
|
||||
// Important: it is assumed that store mutex is locked when calling this method.
|
||||
func (s *Store) SetTrustedPeers(peers []peer.ID) {
|
||||
for _, p := range peers {
|
||||
s.trustedPeers[p] = true
|
||||
}
|
||||
}
|
||||
|
||||
// GetTrustedPeers gets our desired trusted peer ids.
|
||||
// Important: it is assumed that store mutex is locked when calling this method.
|
||||
func (s *Store) GetTrustedPeers() []peer.ID {
|
||||
peers := []peer.ID{}
|
||||
for p := range s.trustedPeers {
|
||||
peers = append(peers, p)
|
||||
}
|
||||
return peers
|
||||
}
|
||||
|
||||
// DeleteTrustedPeers removes peers from trusted peer set.
|
||||
// Important: it is assumed that store mutex is locked when calling this method.
|
||||
func (s *Store) DeleteTrustedPeers(peers []peer.ID) {
|
||||
for _, p := range peers {
|
||||
delete(s.trustedPeers, p)
|
||||
}
|
||||
}
|
||||
|
||||
// Peers returns map of peer data objects.
|
||||
// Important: it is assumed that store mutex is locked when calling this method.
|
||||
func (s *Store) Peers() map[peer.ID]*PeerData {
|
||||
|
||||
@@ -96,4 +96,16 @@ func TestStore_TrustedPeers(t *testing.T) {
|
||||
assert.Equal(t, true, store.IsTrustedPeer(pid1))
|
||||
assert.Equal(t, true, store.IsTrustedPeer(pid2))
|
||||
assert.Equal(t, true, store.IsTrustedPeer(pid3))
|
||||
|
||||
tPeers = store.GetTrustedPeers()
|
||||
assert.Equal(t, 3, len(tPeers))
|
||||
|
||||
store.DeleteTrustedPeers(tPeers)
|
||||
tPeers = store.GetTrustedPeers()
|
||||
assert.Equal(t, 0, len(tPeers))
|
||||
|
||||
assert.Equal(t, false, store.IsTrustedPeer(pid1))
|
||||
assert.Equal(t, false, store.IsTrustedPeer(pid2))
|
||||
assert.Equal(t, false, store.IsTrustedPeer(pid3))
|
||||
|
||||
}
|
||||
|
||||
@@ -560,6 +560,9 @@ func (p *Status) Prune() {
|
||||
notBadPeer := func(pid peer.ID) bool {
|
||||
return !p.isBad(pid)
|
||||
}
|
||||
notTrustedPeer := func(pid peer.ID) bool {
|
||||
return !p.isTrustedPeers(pid)
|
||||
}
|
||||
type peerResp struct {
|
||||
pid peer.ID
|
||||
score float64
|
||||
@@ -567,7 +570,8 @@ func (p *Status) Prune() {
|
||||
peersToPrune := make([]*peerResp, 0)
|
||||
// Select disconnected peers with a smaller bad response count.
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
if peerData.ConnState == PeerDisconnected && notBadPeer(pid) {
|
||||
// Should not prune trusted peer or prune the peer dara and unset trusted peer.
|
||||
if peerData.ConnState == PeerDisconnected && notBadPeer(pid) && notTrustedPeer(pid) {
|
||||
peersToPrune = append(peersToPrune, &peerResp{
|
||||
pid: pid,
|
||||
score: p.Scorers().ScoreNoLock(pid),
|
||||
@@ -608,6 +612,9 @@ func (p *Status) deprecatedPrune() {
|
||||
notBadPeer := func(peerData *peerdata.PeerData) bool {
|
||||
return peerData.BadResponses < p.scorers.BadResponsesScorer().Params().Threshold
|
||||
}
|
||||
notTrustedPeer := func(pid peer.ID) bool {
|
||||
return !p.isTrustedPeers(pid)
|
||||
}
|
||||
type peerResp struct {
|
||||
pid peer.ID
|
||||
badResp int
|
||||
@@ -615,7 +622,8 @@ func (p *Status) deprecatedPrune() {
|
||||
peersToPrune := make([]*peerResp, 0)
|
||||
// Select disconnected peers with a smaller bad response count.
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
if peerData.ConnState == PeerDisconnected && notBadPeer(peerData) {
|
||||
// Should not prune trusted peer or prune the peer dara and unset trusted peer.
|
||||
if peerData.ConnState == PeerDisconnected && notBadPeer(peerData) && notTrustedPeer(pid) {
|
||||
peersToPrune = append(peersToPrune, &peerResp{
|
||||
pid: pid,
|
||||
badResp: peerData.BadResponses,
|
||||
@@ -912,6 +920,32 @@ func (p *Status) SetTrustedPeers(peers []peer.ID) {
|
||||
p.store.SetTrustedPeers(peers)
|
||||
}
|
||||
|
||||
// GetTrustedPeers returns a list of all trusted peers' ids
|
||||
func (p *Status) GetTrustedPeers() []peer.ID {
|
||||
p.store.RLock()
|
||||
defer p.store.RUnlock()
|
||||
return p.store.GetTrustedPeers()
|
||||
}
|
||||
|
||||
// DeleteTrustedPeers removes peers from trusted peer set
|
||||
func (p *Status) DeleteTrustedPeers(peers []peer.ID) {
|
||||
p.store.Lock()
|
||||
defer p.store.Unlock()
|
||||
p.store.DeleteTrustedPeers(peers)
|
||||
}
|
||||
|
||||
// IsTrustedPeers returns if given peer is a Trusted peer
|
||||
func (p *Status) IsTrustedPeers(pid peer.ID) bool {
|
||||
p.store.RLock()
|
||||
defer p.store.RUnlock()
|
||||
return p.isTrustedPeers(pid)
|
||||
}
|
||||
|
||||
// isTrustedPeers is the lock-free version of IsTrustedPeers.
|
||||
func (p *Status) isTrustedPeers(pid peer.ID) bool {
|
||||
return p.store.IsTrustedPeer(pid)
|
||||
}
|
||||
|
||||
// this method assumes the store lock is acquired before
|
||||
// executing the method.
|
||||
func (p *Status) isfromBadIP(pid peer.ID) bool {
|
||||
|
||||
@@ -802,6 +802,11 @@ func TestPrunePeers_TrustedPeers(t *testing.T) {
|
||||
}
|
||||
}
|
||||
p.SetTrustedPeers(trustedPeers)
|
||||
|
||||
// Assert we have correct trusted peers
|
||||
trustedPeers = p.GetTrustedPeers()
|
||||
assert.Equal(t, 6, len(trustedPeers))
|
||||
|
||||
// Assert all peers more than max are prunable.
|
||||
peersToPrune = p.PeersToPrune()
|
||||
assert.Equal(t, 16, len(peersToPrune))
|
||||
@@ -812,6 +817,34 @@ func TestPrunePeers_TrustedPeers(t *testing.T) {
|
||||
assert.NotEqual(t, pid.String(), tPid.String())
|
||||
}
|
||||
}
|
||||
|
||||
// Add more peers to check if trusted peers can be pruned after they are deleted from trusted peer set.
|
||||
for i := 0; i < 9; i++ {
|
||||
// Peer added to peer handler.
|
||||
createPeer(t, p, nil, network.DirInbound, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED))
|
||||
}
|
||||
|
||||
// Delete trusted peers.
|
||||
p.DeleteTrustedPeers(trustedPeers)
|
||||
|
||||
peersToPrune = p.PeersToPrune()
|
||||
assert.Equal(t, 25, len(peersToPrune))
|
||||
|
||||
// Check that trusted peers are pruned.
|
||||
for _, tPid := range trustedPeers {
|
||||
pruned := false
|
||||
for _, pid := range peersToPrune {
|
||||
if pid.String() == tPid.String() {
|
||||
pruned = true
|
||||
}
|
||||
}
|
||||
assert.Equal(t, true, pruned)
|
||||
}
|
||||
|
||||
// Assert have zero trusted peers
|
||||
trustedPeers = p.GetTrustedPeers()
|
||||
assert.Equal(t, 0, len(trustedPeers))
|
||||
|
||||
for _, pid := range peersToPrune {
|
||||
dir, err := p.Direction(pid)
|
||||
require.NoError(t, err)
|
||||
@@ -821,8 +854,8 @@ func TestPrunePeers_TrustedPeers(t *testing.T) {
|
||||
// Ensure it is in the descending order.
|
||||
currScore := p.Scorers().Score(peersToPrune[0])
|
||||
for _, pid := range peersToPrune {
|
||||
score := p.Scorers().BadResponsesScorer().Score(pid)
|
||||
assert.Equal(t, true, currScore >= score)
|
||||
score := p.Scorers().Score(pid)
|
||||
assert.Equal(t, true, currScore <= score)
|
||||
currScore = score
|
||||
}
|
||||
}
|
||||
|
||||
@@ -174,9 +174,9 @@ func (s *Service) Start() {
|
||||
s.awaitStateInitialized()
|
||||
s.isPreGenesis = false
|
||||
|
||||
var peersToWatch []string
|
||||
var relayNodes []string
|
||||
if s.cfg.RelayNodeAddr != "" {
|
||||
peersToWatch = append(peersToWatch, s.cfg.RelayNodeAddr)
|
||||
relayNodes = append(relayNodes, s.cfg.RelayNodeAddr)
|
||||
if err := dialRelayNode(s.ctx, s.host, s.cfg.RelayNodeAddr); err != nil {
|
||||
log.WithError(err).Errorf("Could not dial relay node")
|
||||
}
|
||||
@@ -213,8 +213,7 @@ func (s *Service) Start() {
|
||||
// Set trusted peers for those that are provided as static addresses.
|
||||
pids := peerIdsFromMultiAddrs(addrs)
|
||||
s.peers.SetTrustedPeers(pids)
|
||||
peersToWatch = append(peersToWatch, s.cfg.StaticPeers...)
|
||||
s.connectWithAllPeers(addrs)
|
||||
s.connectWithAllTrustedPeers(addrs)
|
||||
}
|
||||
// Initialize metadata according to the
|
||||
// current epoch.
|
||||
@@ -226,7 +225,7 @@ func (s *Service) Start() {
|
||||
|
||||
// Periodic functions.
|
||||
async.RunEvery(s.ctx, params.BeaconNetworkConfig().TtfbTimeout, func() {
|
||||
ensurePeerConnections(s.ctx, s.host, peersToWatch...)
|
||||
ensurePeerConnections(s.ctx, s.host, s.peers, relayNodes...)
|
||||
})
|
||||
async.RunEvery(s.ctx, 30*time.Minute, s.Peers().Prune)
|
||||
async.RunEvery(s.ctx, params.BeaconNetworkConfig().RespTimeout, s.updateMetrics)
|
||||
@@ -399,6 +398,24 @@ func (s *Service) awaitStateInitialized() {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) connectWithAllTrustedPeers(multiAddrs []multiaddr.Multiaddr) {
|
||||
addrInfos, err := peer.AddrInfosFromP2pAddrs(multiAddrs...)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not convert to peer address info's from multiaddresses")
|
||||
return
|
||||
}
|
||||
for _, info := range addrInfos {
|
||||
// add peer into peer status
|
||||
s.peers.Add(nil, info.ID, info.Addrs[0], network.DirUnknown)
|
||||
// make each dial non-blocking
|
||||
go func(info peer.AddrInfo) {
|
||||
if err := s.connectWithPeer(s.ctx, info); err != nil {
|
||||
log.WithError(err).Tracef("Could not connect with peer %s", info.String())
|
||||
}
|
||||
}(info)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) connectWithAllPeers(multiAddrs []multiaddr.Multiaddr) {
|
||||
addrInfos, err := peer.AddrInfosFromP2pAddrs(multiAddrs...)
|
||||
if err != nil {
|
||||
|
||||
@@ -5,28 +5,52 @@ import (
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers"
|
||||
)
|
||||
|
||||
// ensurePeerConnections will attempt to reestablish connection to the peers
|
||||
// if there are currently no connections to that peer.
|
||||
func ensurePeerConnections(ctx context.Context, h host.Host, peers ...string) {
|
||||
if len(peers) == 0 {
|
||||
return
|
||||
}
|
||||
for _, p := range peers {
|
||||
if p == "" {
|
||||
func ensurePeerConnections(ctx context.Context, h host.Host, peers *peers.Status, relayNodes ...string) {
|
||||
// every time reset peersToWatch, add RelayNodes and trust peers
|
||||
var peersToWatch []*peer.AddrInfo
|
||||
|
||||
// add RelayNodes
|
||||
for _, node := range relayNodes {
|
||||
if node == "" {
|
||||
continue
|
||||
}
|
||||
peerInfo, err := MakePeer(p)
|
||||
peerInfo, err := MakePeer(node)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not make peer")
|
||||
continue
|
||||
}
|
||||
peersToWatch = append(peersToWatch, peerInfo)
|
||||
}
|
||||
|
||||
c := h.Network().ConnsToPeer(peerInfo.ID)
|
||||
// add trusted peers
|
||||
trustedPeers := peers.GetTrustedPeers()
|
||||
for _, trustedPeer := range trustedPeers {
|
||||
maddr, err := peers.Address(trustedPeer)
|
||||
|
||||
// avoid invalid trusted peers
|
||||
if err != nil || maddr == nil {
|
||||
log.WithField("peer", trustedPeers).WithError(err).Error("Could not get peer address")
|
||||
continue
|
||||
}
|
||||
peerInfo := &peer.AddrInfo{ID: trustedPeer}
|
||||
peerInfo.Addrs = []ma.Multiaddr{maddr}
|
||||
peersToWatch = append(peersToWatch, peerInfo)
|
||||
}
|
||||
|
||||
if len(peersToWatch) == 0 {
|
||||
return
|
||||
}
|
||||
for _, p := range peersToWatch {
|
||||
c := h.Network().ConnsToPeer(p.ID)
|
||||
if len(c) == 0 {
|
||||
if err := connectWithTimeout(ctx, h, peerInfo); err != nil {
|
||||
log.WithField("peer", peerInfo.ID).WithField("addrs", peerInfo.Addrs).WithError(err).Errorf("Failed to reconnect to peer")
|
||||
if err := connectWithTimeout(ctx, h, p); err != nil {
|
||||
log.WithField("peer", p.ID).WithField("addrs", p.Addrs).WithError(err).Errorf("Failed to reconnect to peer")
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,10 +32,12 @@ go_library(
|
||||
"//beacon-chain/rpc/eth/rewards:go_default_library",
|
||||
"//beacon-chain/rpc/eth/validator:go_default_library",
|
||||
"//beacon-chain/rpc/lookup:go_default_library",
|
||||
"//beacon-chain/rpc/prysm/node:go_default_library",
|
||||
"//beacon-chain/rpc/prysm/v1alpha1/beacon:go_default_library",
|
||||
"//beacon-chain/rpc/prysm/v1alpha1/debug:go_default_library",
|
||||
"//beacon-chain/rpc/prysm/v1alpha1/node:go_default_library",
|
||||
"//beacon-chain/rpc/prysm/v1alpha1/validator:go_default_library",
|
||||
"//beacon-chain/rpc/prysm/validator:go_default_library",
|
||||
"//beacon-chain/slasher:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
|
||||
25
beacon-chain/rpc/core/BUILD.bazel
Normal file
25
beacon-chain/rpc/core/BUILD.bazel
Normal file
@@ -0,0 +1,25 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"errors.go",
|
||||
"validator.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/core",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@org_golang_google_grpc//codes:go_default_library",
|
||||
],
|
||||
)
|
||||
49
beacon-chain/rpc/core/errors.go
Normal file
49
beacon-chain/rpc/core/errors.go
Normal file
@@ -0,0 +1,49 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
)
|
||||
|
||||
type ErrorReason uint8
|
||||
|
||||
const (
|
||||
Internal = iota
|
||||
Unavailable
|
||||
BadRequest
|
||||
// Add more errors as needed
|
||||
)
|
||||
|
||||
type RpcError struct {
|
||||
Err error
|
||||
Reason ErrorReason
|
||||
}
|
||||
|
||||
func ErrorReasonToGRPC(reason ErrorReason) codes.Code {
|
||||
switch reason {
|
||||
case Internal:
|
||||
return codes.Internal
|
||||
case Unavailable:
|
||||
return codes.Unavailable
|
||||
case BadRequest:
|
||||
return codes.InvalidArgument
|
||||
// Add more cases for other error reasons as needed
|
||||
default:
|
||||
return codes.Internal
|
||||
}
|
||||
}
|
||||
|
||||
func ErrorReasonToHTTP(reason ErrorReason) int {
|
||||
switch reason {
|
||||
case Internal:
|
||||
return http.StatusInternalServerError
|
||||
case Unavailable:
|
||||
return http.StatusServiceUnavailable
|
||||
case BadRequest:
|
||||
return http.StatusBadRequest
|
||||
// Add more cases for other error reasons as needed
|
||||
default:
|
||||
return http.StatusInternalServerError
|
||||
}
|
||||
}
|
||||
168
beacon-chain/rpc/core/validator.go
Normal file
168
beacon-chain/rpc/core/validator.go
Normal file
@@ -0,0 +1,168 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sort"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/epoch/precompute"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
coreTime "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
)
|
||||
|
||||
func ComputeValidatorPerformance(
|
||||
ctx context.Context,
|
||||
req *ethpb.ValidatorPerformanceRequest,
|
||||
headFetcher blockchain.HeadFetcher,
|
||||
currSlot primitives.Slot,
|
||||
) (*ethpb.ValidatorPerformanceResponse, *RpcError) {
|
||||
headState, err := headFetcher.HeadState(ctx)
|
||||
if err != nil {
|
||||
return nil, &RpcError{Err: errors.Wrap(err, "could not get head state"), Reason: Internal}
|
||||
}
|
||||
if currSlot > headState.Slot() {
|
||||
headRoot, err := headFetcher.HeadRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, &RpcError{Err: errors.Wrap(err, "could not get head root"), Reason: Internal}
|
||||
}
|
||||
headState, err = transition.ProcessSlotsUsingNextSlotCache(ctx, headState, headRoot, currSlot)
|
||||
if err != nil {
|
||||
return nil, &RpcError{Err: errors.Wrapf(err, "could not process slots up to %d", currSlot), Reason: Internal}
|
||||
}
|
||||
}
|
||||
var validatorSummary []*precompute.Validator
|
||||
if headState.Version() == version.Phase0 {
|
||||
vp, bp, err := precompute.New(ctx, headState)
|
||||
if err != nil {
|
||||
return nil, &RpcError{Err: err, Reason: Internal}
|
||||
}
|
||||
vp, bp, err = precompute.ProcessAttestations(ctx, headState, vp, bp)
|
||||
if err != nil {
|
||||
return nil, &RpcError{Err: err, Reason: Internal}
|
||||
}
|
||||
headState, err = precompute.ProcessRewardsAndPenaltiesPrecompute(headState, bp, vp, precompute.AttestationsDelta, precompute.ProposersDelta)
|
||||
if err != nil {
|
||||
return nil, &RpcError{Err: err, Reason: Internal}
|
||||
}
|
||||
validatorSummary = vp
|
||||
} else if headState.Version() >= version.Altair {
|
||||
vp, bp, err := altair.InitializePrecomputeValidators(ctx, headState)
|
||||
if err != nil {
|
||||
return nil, &RpcError{Err: err, Reason: Internal}
|
||||
}
|
||||
vp, bp, err = altair.ProcessEpochParticipation(ctx, headState, bp, vp)
|
||||
if err != nil {
|
||||
return nil, &RpcError{Err: err, Reason: Internal}
|
||||
}
|
||||
headState, vp, err = altair.ProcessInactivityScores(ctx, headState, vp)
|
||||
if err != nil {
|
||||
return nil, &RpcError{Err: err, Reason: Internal}
|
||||
}
|
||||
headState, err = altair.ProcessRewardsAndPenaltiesPrecompute(headState, bp, vp)
|
||||
if err != nil {
|
||||
return nil, &RpcError{Err: err, Reason: Internal}
|
||||
}
|
||||
validatorSummary = vp
|
||||
} else {
|
||||
return nil, &RpcError{Err: errors.Wrapf(err, "head state version %d not supported", headState.Version()), Reason: Internal}
|
||||
}
|
||||
|
||||
responseCap := len(req.Indices) + len(req.PublicKeys)
|
||||
validatorIndices := make([]primitives.ValidatorIndex, 0, responseCap)
|
||||
missingValidators := make([][]byte, 0, responseCap)
|
||||
|
||||
filtered := map[primitives.ValidatorIndex]bool{} // Track filtered validators to prevent duplication in the response.
|
||||
// Convert the list of validator public keys to validator indices and add to the indices set.
|
||||
for _, pubKey := range req.PublicKeys {
|
||||
// Skip empty public key.
|
||||
if len(pubKey) == 0 {
|
||||
continue
|
||||
}
|
||||
pubkeyBytes := bytesutil.ToBytes48(pubKey)
|
||||
idx, ok := headState.ValidatorIndexByPubkey(pubkeyBytes)
|
||||
if !ok {
|
||||
// Validator index not found, track as missing.
|
||||
missingValidators = append(missingValidators, pubKey)
|
||||
continue
|
||||
}
|
||||
if !filtered[idx] {
|
||||
validatorIndices = append(validatorIndices, idx)
|
||||
filtered[idx] = true
|
||||
}
|
||||
}
|
||||
// Add provided indices to the indices set.
|
||||
for _, idx := range req.Indices {
|
||||
if !filtered[idx] {
|
||||
validatorIndices = append(validatorIndices, idx)
|
||||
filtered[idx] = true
|
||||
}
|
||||
}
|
||||
// Depending on the indices and public keys given, results might not be sorted.
|
||||
sort.Slice(validatorIndices, func(i, j int) bool {
|
||||
return validatorIndices[i] < validatorIndices[j]
|
||||
})
|
||||
|
||||
currentEpoch := coreTime.CurrentEpoch(headState)
|
||||
responseCap = len(validatorIndices)
|
||||
pubKeys := make([][]byte, 0, responseCap)
|
||||
beforeTransitionBalances := make([]uint64, 0, responseCap)
|
||||
afterTransitionBalances := make([]uint64, 0, responseCap)
|
||||
effectiveBalances := make([]uint64, 0, responseCap)
|
||||
correctlyVotedSource := make([]bool, 0, responseCap)
|
||||
correctlyVotedTarget := make([]bool, 0, responseCap)
|
||||
correctlyVotedHead := make([]bool, 0, responseCap)
|
||||
inactivityScores := make([]uint64, 0, responseCap)
|
||||
// Append performance summaries.
|
||||
// Also track missing validators using public keys.
|
||||
for _, idx := range validatorIndices {
|
||||
val, err := headState.ValidatorAtIndexReadOnly(idx)
|
||||
if err != nil {
|
||||
return nil, &RpcError{Err: errors.Wrap(err, "could not get validator"), Reason: Internal}
|
||||
}
|
||||
pubKey := val.PublicKey()
|
||||
if uint64(idx) >= uint64(len(validatorSummary)) {
|
||||
// Not listed in validator summary yet; treat it as missing.
|
||||
missingValidators = append(missingValidators, pubKey[:])
|
||||
continue
|
||||
}
|
||||
if !helpers.IsActiveValidatorUsingTrie(val, currentEpoch) {
|
||||
// Inactive validator; treat it as missing.
|
||||
missingValidators = append(missingValidators, pubKey[:])
|
||||
continue
|
||||
}
|
||||
|
||||
summary := validatorSummary[idx]
|
||||
pubKeys = append(pubKeys, pubKey[:])
|
||||
effectiveBalances = append(effectiveBalances, summary.CurrentEpochEffectiveBalance)
|
||||
beforeTransitionBalances = append(beforeTransitionBalances, summary.BeforeEpochTransitionBalance)
|
||||
afterTransitionBalances = append(afterTransitionBalances, summary.AfterEpochTransitionBalance)
|
||||
correctlyVotedTarget = append(correctlyVotedTarget, summary.IsPrevEpochTargetAttester)
|
||||
correctlyVotedHead = append(correctlyVotedHead, summary.IsPrevEpochHeadAttester)
|
||||
|
||||
if headState.Version() == version.Phase0 {
|
||||
correctlyVotedSource = append(correctlyVotedSource, summary.IsPrevEpochAttester)
|
||||
} else {
|
||||
correctlyVotedSource = append(correctlyVotedSource, summary.IsPrevEpochSourceAttester)
|
||||
inactivityScores = append(inactivityScores, summary.InactivityScore)
|
||||
}
|
||||
}
|
||||
|
||||
return ðpb.ValidatorPerformanceResponse{
|
||||
PublicKeys: pubKeys,
|
||||
CorrectlyVotedSource: correctlyVotedSource,
|
||||
CorrectlyVotedTarget: correctlyVotedTarget, // In altair, when this is true then the attestation was definitely included.
|
||||
CorrectlyVotedHead: correctlyVotedHead,
|
||||
CurrentEffectiveBalances: effectiveBalances,
|
||||
BalancesBeforeEpochTransition: beforeTransitionBalances,
|
||||
BalancesAfterEpochTransition: afterTransitionBalances,
|
||||
MissingValidators: missingValidators,
|
||||
InactivityScores: inactivityScores, // Only populated in Altair
|
||||
}, nil
|
||||
}
|
||||
49
beacon-chain/rpc/prysm/node/BUILD.bazel
Normal file
49
beacon-chain/rpc/prysm/node/BUILD.bazel
Normal file
@@ -0,0 +1,49 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"handlers.go",
|
||||
"server.go",
|
||||
"structs.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/prysm/node",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/execution:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/p2p/peers:go_default_library",
|
||||
"//beacon-chain/p2p/peers/peerdata:go_default_library",
|
||||
"//beacon-chain/sync:go_default_library",
|
||||
"//network:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/network:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"handlers_test.go",
|
||||
"server_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/p2p/peers:go_default_library",
|
||||
"//beacon-chain/p2p/testing:go_default_library",
|
||||
"//network:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/network:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//p2p/host/peerstore/test:go_default_library",
|
||||
"@com_github_multiformats_go_multiaddr//:go_default_library",
|
||||
],
|
||||
)
|
||||
177
beacon-chain/rpc/prysm/node/handlers.go
Normal file
177
beacon-chain/rpc/prysm/node/handlers.go
Normal file
@@ -0,0 +1,177 @@
|
||||
package node
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
corenet "github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers/peerdata"
|
||||
"github.com/prysmaticlabs/prysm/v4/network"
|
||||
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// ListTrustedPeer retrieves data about the node's trusted peers.
|
||||
func (s *Server) ListTrustedPeer(w http.ResponseWriter, r *http.Request) {
|
||||
peerStatus := s.PeersFetcher.Peers()
|
||||
allIds := s.PeersFetcher.Peers().GetTrustedPeers()
|
||||
allPeers := make([]*Peer, 0, len(allIds))
|
||||
for _, id := range allIds {
|
||||
p, err := httpPeerInfo(peerStatus, id)
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: errors.Wrapf(err, "Could not get peer info").Error(),
|
||||
Code: http.StatusInternalServerError,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
// peers added into trusted set but never connected should also be listed
|
||||
if p == nil {
|
||||
p = &Peer{
|
||||
PeerID: id.String(),
|
||||
Enr: "",
|
||||
LastSeenP2PAddress: "",
|
||||
State: eth.ConnectionState(corenet.NotConnected).String(),
|
||||
Direction: eth.PeerDirection(corenet.DirUnknown).String(),
|
||||
}
|
||||
}
|
||||
allPeers = append(allPeers, p)
|
||||
}
|
||||
response := &PeersResponse{Peers: allPeers}
|
||||
network.WriteJson(w, response)
|
||||
}
|
||||
|
||||
// AddTrustedPeer adds a new peer into node's trusted peer set by Multiaddr
|
||||
func (s *Server) AddTrustedPeer(w http.ResponseWriter, r *http.Request) {
|
||||
body, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: errors.Wrapf(err, "Could not read request body").Error(),
|
||||
Code: http.StatusInternalServerError,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
var addrRequest *AddrRequest
|
||||
err = json.Unmarshal(body, &addrRequest)
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: errors.Wrapf(err, "Could not decode request body into peer address").Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
info, err := peer.AddrInfoFromString(addrRequest.Addr)
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: errors.Wrapf(err, "Could not derive peer info from multiaddress").Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
|
||||
// also add new peerdata to peers
|
||||
direction, err := s.PeersFetcher.Peers().Direction(info.ID)
|
||||
if err != nil {
|
||||
s.PeersFetcher.Peers().Add(nil, info.ID, info.Addrs[0], corenet.DirUnknown)
|
||||
} else {
|
||||
s.PeersFetcher.Peers().Add(nil, info.ID, info.Addrs[0], direction)
|
||||
}
|
||||
|
||||
peers := []peer.ID{}
|
||||
peers = append(peers, info.ID)
|
||||
s.PeersFetcher.Peers().SetTrustedPeers(peers)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
// RemoveTrustedPeer removes peer from our trusted peer set but does not close connection.
|
||||
func (s *Server) RemoveTrustedPeer(w http.ResponseWriter, r *http.Request) {
|
||||
segments := strings.Split(r.URL.Path, "/")
|
||||
id := segments[len(segments)-1]
|
||||
peerId, err := peer.Decode(id)
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: errors.Wrapf(err, "Could not decode peer id").Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
|
||||
// if the peer is not a trusted peer, do nothing but return 200
|
||||
if !s.PeersFetcher.Peers().IsTrustedPeers(peerId) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
return
|
||||
}
|
||||
|
||||
peers := []peer.ID{}
|
||||
peers = append(peers, peerId)
|
||||
s.PeersFetcher.Peers().DeleteTrustedPeers(peers)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
// httpPeerInfo does the same thing as peerInfo function in node.go but returns the
|
||||
// http peer response.
|
||||
func httpPeerInfo(peerStatus *peers.Status, id peer.ID) (*Peer, error) {
|
||||
enr, err := peerStatus.ENR(id)
|
||||
if err != nil {
|
||||
if errors.Is(err, peerdata.ErrPeerUnknown) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, errors.Wrap(err, "could not obtain ENR")
|
||||
}
|
||||
var serializedEnr string
|
||||
if enr != nil {
|
||||
serializedEnr, err = p2p.SerializeENR(enr)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not serialize ENR")
|
||||
}
|
||||
}
|
||||
address, err := peerStatus.Address(id)
|
||||
if err != nil {
|
||||
if errors.Is(err, peerdata.ErrPeerUnknown) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, errors.Wrap(err, "could not obtain address")
|
||||
}
|
||||
connectionState, err := peerStatus.ConnectionState(id)
|
||||
if err != nil {
|
||||
if errors.Is(err, peerdata.ErrPeerUnknown) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, errors.Wrap(err, "could not obtain connection state")
|
||||
}
|
||||
direction, err := peerStatus.Direction(id)
|
||||
if err != nil {
|
||||
if errors.Is(err, peerdata.ErrPeerUnknown) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, errors.Wrap(err, "could not obtain direction")
|
||||
}
|
||||
if eth.PeerDirection(direction) == eth.PeerDirection_UNKNOWN {
|
||||
return nil, nil
|
||||
}
|
||||
v1ConnState := eth.ConnectionState(connectionState).String()
|
||||
v1PeerDirection := eth.PeerDirection(direction).String()
|
||||
p := Peer{
|
||||
PeerID: id.String(),
|
||||
State: v1ConnState,
|
||||
Direction: v1PeerDirection,
|
||||
}
|
||||
if address != nil {
|
||||
p.LastSeenP2PAddress = address.String()
|
||||
}
|
||||
if serializedEnr != "" {
|
||||
p.Enr = "enr:" + serializedEnr
|
||||
}
|
||||
|
||||
return &p, nil
|
||||
}
|
||||
250
beacon-chain/rpc/prysm/node/handlers_test.go
Normal file
250
beacon-chain/rpc/prysm/node/handlers_test.go
Normal file
@@ -0,0 +1,250 @@
|
||||
package node
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
corenet "github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
libp2ptest "github.com/libp2p/go-libp2p/p2p/host/peerstore/test"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers"
|
||||
mockp2p "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/network"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
)
|
||||
|
||||
type testIdentity enode.ID
|
||||
|
||||
func (_ testIdentity) Verify(_ *enr.Record, _ []byte) error { return nil }
|
||||
func (id testIdentity) NodeAddr(_ *enr.Record) []byte { return id[:] }
|
||||
|
||||
func TestListTrustedPeer(t *testing.T) {
|
||||
ids := libp2ptest.GeneratePeerIDs(9)
|
||||
peerFetcher := &mockp2p.MockPeersProvider{}
|
||||
peerFetcher.ClearPeers()
|
||||
peerStatus := peerFetcher.Peers()
|
||||
|
||||
for i, id := range ids {
|
||||
if i == len(ids)-1 {
|
||||
var p2pAddr = "/ip4/127.0.0." + strconv.Itoa(i) + "/udp/12000/p2p/16Uiu2HAm7yD5fhhw1Kihg5pffaGbvKV3k7sqxRGHMZzkb7u9UUxQ"
|
||||
p2pMultiAddr, err := ma.NewMultiaddr(p2pAddr)
|
||||
require.NoError(t, err)
|
||||
peerStatus.Add(nil, id, p2pMultiAddr, corenet.DirUnknown)
|
||||
continue
|
||||
}
|
||||
enrRecord := &enr.Record{}
|
||||
err := enrRecord.SetSig(testIdentity{1}, []byte{42})
|
||||
require.NoError(t, err)
|
||||
enrRecord.Set(enr.IPv4{127, 0, 0, byte(i)})
|
||||
err = enrRecord.SetSig(testIdentity{}, []byte{})
|
||||
require.NoError(t, err)
|
||||
var p2pAddr = "/ip4/127.0.0." + strconv.Itoa(i) + "/udp/12000/p2p/16Uiu2HAm7yD5fhhw1Kihg5pffaGbvKV3k7sqxRGHMZzkb7u9UUxQ"
|
||||
p2pMultiAddr, err := ma.NewMultiaddr(p2pAddr)
|
||||
require.NoError(t, err)
|
||||
|
||||
var direction corenet.Direction
|
||||
if i%2 == 0 {
|
||||
direction = corenet.DirInbound
|
||||
} else {
|
||||
direction = corenet.DirOutbound
|
||||
}
|
||||
peerStatus.Add(enrRecord, id, p2pMultiAddr, direction)
|
||||
|
||||
switch i {
|
||||
case 0, 1:
|
||||
peerStatus.SetConnectionState(id, peers.PeerConnecting)
|
||||
case 2, 3:
|
||||
peerStatus.SetConnectionState(id, peers.PeerConnected)
|
||||
case 4, 5:
|
||||
peerStatus.SetConnectionState(id, peers.PeerDisconnecting)
|
||||
case 6, 7:
|
||||
peerStatus.SetConnectionState(id, peers.PeerDisconnected)
|
||||
default:
|
||||
t.Fatalf("Failed to set connection state for peer")
|
||||
}
|
||||
}
|
||||
|
||||
s := Server{PeersFetcher: peerFetcher}
|
||||
// set all peers as trusted peers
|
||||
s.PeersFetcher.Peers().SetTrustedPeers(ids)
|
||||
|
||||
t.Run("Peer data OK", func(t *testing.T) {
|
||||
url := "http://anything.is.fine"
|
||||
request := httptest.NewRequest("GET", url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.ListTrustedPeer(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &PeersResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
peers := resp.Peers
|
||||
// assert number of trusted peer is right
|
||||
assert.Equal(t, 9, len(peers))
|
||||
|
||||
for i := 0; i < 9; i++ {
|
||||
pid, err := peer.Decode(peers[i].PeerID)
|
||||
require.NoError(t, err)
|
||||
if pid == ids[8] {
|
||||
assert.Equal(t, "", peers[i].Enr)
|
||||
assert.Equal(t, "", peers[i].LastSeenP2PAddress)
|
||||
assert.Equal(t, "DISCONNECTED", peers[i].State)
|
||||
assert.Equal(t, "UNKNOWN", peers[i].Direction)
|
||||
continue
|
||||
}
|
||||
expectedEnr, err := peerStatus.ENR(pid)
|
||||
require.NoError(t, err)
|
||||
serializeENR, err := p2p.SerializeENR(expectedEnr)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "enr:"+serializeENR, peers[i].Enr)
|
||||
expectedP2PAddr, err := peerStatus.Address(pid)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectedP2PAddr.String(), peers[i].LastSeenP2PAddress)
|
||||
switch pid {
|
||||
case ids[0]:
|
||||
assert.Equal(t, "CONNECTING", peers[i].State)
|
||||
assert.Equal(t, "INBOUND", peers[i].Direction)
|
||||
case ids[1]:
|
||||
assert.Equal(t, "CONNECTING", peers[i].State)
|
||||
assert.Equal(t, "OUTBOUND", peers[i].Direction)
|
||||
case ids[2]:
|
||||
assert.Equal(t, "CONNECTED", peers[i].State)
|
||||
assert.Equal(t, "INBOUND", peers[i].Direction)
|
||||
case ids[3]:
|
||||
assert.Equal(t, "CONNECTED", peers[i].State)
|
||||
assert.Equal(t, "OUTBOUND", peers[i].Direction)
|
||||
case ids[4]:
|
||||
assert.Equal(t, "DISCONNECTING", peers[i].State)
|
||||
assert.Equal(t, "INBOUND", peers[i].Direction)
|
||||
case ids[5]:
|
||||
assert.Equal(t, "DISCONNECTING", peers[i].State)
|
||||
assert.Equal(t, "OUTBOUND", peers[i].Direction)
|
||||
case ids[6]:
|
||||
assert.Equal(t, "DISCONNECTED", peers[i].State)
|
||||
assert.Equal(t, "INBOUND", peers[i].Direction)
|
||||
case ids[7]:
|
||||
assert.Equal(t, "DISCONNECTED", peers[i].State)
|
||||
assert.Equal(t, "OUTBOUND", peers[i].Direction)
|
||||
default:
|
||||
t.Fatalf("Failed to get connection state and direction for peer")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestListTrustedPeers_NoPeersReturnsEmptyArray(t *testing.T) {
|
||||
peerFetcher := &mockp2p.MockPeersProvider{}
|
||||
peerFetcher.ClearPeers()
|
||||
s := Server{PeersFetcher: peerFetcher}
|
||||
|
||||
url := "http://anything.is.fine"
|
||||
request := httptest.NewRequest("GET", url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.ListTrustedPeer(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &PeersResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
peers := resp.Peers
|
||||
assert.Equal(t, 0, len(peers))
|
||||
}
|
||||
|
||||
func TestAddTrustedPeer(t *testing.T) {
|
||||
peerFetcher := &mockp2p.MockPeersProvider{}
|
||||
peerFetcher.ClearPeers()
|
||||
s := Server{PeersFetcher: peerFetcher}
|
||||
|
||||
url := "http://anything.is.fine"
|
||||
addr := &AddrRequest{
|
||||
Addr: "/ip4/127.0.0.1/tcp/30303/p2p/16Uiu2HAm1n583t4huDMMqEUUBuQs6bLts21mxCfX3tiqu9JfHvRJ",
|
||||
}
|
||||
addrJson, err := json.Marshal(addr)
|
||||
require.NoError(t, err)
|
||||
var body bytes.Buffer
|
||||
_, err = body.Write(addrJson)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest("POST", url, &body)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.AddTrustedPeer(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
}
|
||||
|
||||
func TestAddTrustedPeer_EmptyBody(t *testing.T) {
|
||||
peerFetcher := &mockp2p.MockPeersProvider{}
|
||||
peerFetcher.ClearPeers()
|
||||
s := Server{PeersFetcher: peerFetcher}
|
||||
|
||||
url := "http://anything.is.fine"
|
||||
request := httptest.NewRequest("POST", url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.AddTrustedPeer(writer, request)
|
||||
e := &network.DefaultErrorJson{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
assert.Equal(t, "Could not decode request body into peer address: unexpected end of JSON input", e.Message)
|
||||
|
||||
}
|
||||
|
||||
func TestAddTrustedPeer_BadAddress(t *testing.T) {
|
||||
peerFetcher := &mockp2p.MockPeersProvider{}
|
||||
peerFetcher.ClearPeers()
|
||||
s := Server{PeersFetcher: peerFetcher}
|
||||
|
||||
url := "http://anything.is.fine"
|
||||
addr := &AddrRequest{
|
||||
Addr: "anything/but/not/an/address",
|
||||
}
|
||||
addrJson, err := json.Marshal(addr)
|
||||
require.NoError(t, err)
|
||||
var body bytes.Buffer
|
||||
_, err = body.Write(addrJson)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest("POST", url, &body)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.AddTrustedPeer(writer, request)
|
||||
e := &network.DefaultErrorJson{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
assert.StringContains(t, "Could not derive peer info from multiaddress", e.Message)
|
||||
}
|
||||
|
||||
func TestRemoveTrustedPeer(t *testing.T) {
|
||||
peerFetcher := &mockp2p.MockPeersProvider{}
|
||||
peerFetcher.ClearPeers()
|
||||
s := Server{PeersFetcher: peerFetcher}
|
||||
|
||||
url := "http://anything.is.fine.but.last.is.important/16Uiu2HAm1n583t4huDMMqEUUBuQs6bLts21mxCfX3tiqu9JfHvRJ"
|
||||
request := httptest.NewRequest("DELETE", url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.RemoveTrustedPeer(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
|
||||
}
|
||||
|
||||
func TestRemoveTrustedPeer_EmptyParameter(t *testing.T) {
|
||||
peerFetcher := &mockp2p.MockPeersProvider{}
|
||||
peerFetcher.ClearPeers()
|
||||
s := Server{PeersFetcher: peerFetcher}
|
||||
|
||||
url := "http://anything.is.fine"
|
||||
request := httptest.NewRequest("DELETE", url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.RemoveTrustedPeer(writer, request)
|
||||
e := &network.DefaultErrorJson{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
assert.Equal(t, "Could not decode peer id: failed to parse peer ID: invalid cid: cid too short", e.Message)
|
||||
}
|
||||
21
beacon-chain/rpc/prysm/node/server.go
Normal file
21
beacon-chain/rpc/prysm/node/server.go
Normal file
@@ -0,0 +1,21 @@
|
||||
package node
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/execution"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/sync"
|
||||
)
|
||||
|
||||
type Server struct {
|
||||
SyncChecker sync.Checker
|
||||
OptimisticModeFetcher blockchain.OptimisticModeFetcher
|
||||
BeaconDB db.ReadOnlyDatabase
|
||||
PeersFetcher p2p.PeersProvider
|
||||
PeerManager p2p.PeerManager
|
||||
MetadataProvider p2p.MetadataProvider
|
||||
GenesisTimeFetcher blockchain.TimeFetcher
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
ExecutionChainInfoFetcher execution.ChainInfoFetcher
|
||||
}
|
||||
1
beacon-chain/rpc/prysm/node/server_test.go
Normal file
1
beacon-chain/rpc/prysm/node/server_test.go
Normal file
@@ -0,0 +1 @@
|
||||
package node
|
||||
17
beacon-chain/rpc/prysm/node/structs.go
Normal file
17
beacon-chain/rpc/prysm/node/structs.go
Normal file
@@ -0,0 +1,17 @@
|
||||
package node
|
||||
|
||||
type AddrRequest struct {
|
||||
Addr string `json:"addr"`
|
||||
}
|
||||
|
||||
type PeersResponse struct {
|
||||
Peers []*Peer `json:"Peers"`
|
||||
}
|
||||
|
||||
type Peer struct {
|
||||
PeerID string `json:"peer_id"`
|
||||
Enr string `json:"enr"`
|
||||
LastSeenP2PAddress string `json:"last_seen_p2p_address"`
|
||||
State string `json:"state"`
|
||||
Direction string `json:"direction"`
|
||||
}
|
||||
@@ -38,6 +38,7 @@ go_library(
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/rpc/core:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//beacon-chain/sync:go_default_library",
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
coreTime "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/validators"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/core"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/cmd"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
@@ -659,153 +660,14 @@ func (bs *Server) GetValidatorPerformance(
|
||||
ctx context.Context, req *ethpb.ValidatorPerformanceRequest,
|
||||
) (*ethpb.ValidatorPerformanceResponse, error) {
|
||||
if bs.SyncChecker.Syncing() {
|
||||
return nil, status.Errorf(codes.Unavailable, "Syncing to latest head, not ready to respond")
|
||||
}
|
||||
|
||||
headState, err := bs.HeadFetcher.HeadState(ctx)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get head state: %v", err)
|
||||
return nil, status.Error(codes.Unavailable, "Syncing to latest head, not ready to respond")
|
||||
}
|
||||
currSlot := bs.GenesisTimeFetcher.CurrentSlot()
|
||||
|
||||
if currSlot > headState.Slot() {
|
||||
headRoot, err := bs.HeadFetcher.HeadRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not retrieve head root: %v", err)
|
||||
}
|
||||
headState, err = transition.ProcessSlotsUsingNextSlotCache(ctx, headState, headRoot, currSlot)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not process slots up to %d: %v", currSlot, err)
|
||||
}
|
||||
response, err := core.ComputeValidatorPerformance(ctx, req, bs.HeadFetcher, currSlot)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(core.ErrorReasonToGRPC(err.Reason), "Could not compute validator performance: %v", err.Err)
|
||||
}
|
||||
var validatorSummary []*precompute.Validator
|
||||
if headState.Version() == version.Phase0 {
|
||||
vp, bp, err := precompute.New(ctx, headState)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vp, bp, err = precompute.ProcessAttestations(ctx, headState, vp, bp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
headState, err = precompute.ProcessRewardsAndPenaltiesPrecompute(headState, bp, vp, precompute.AttestationsDelta, precompute.ProposersDelta)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
validatorSummary = vp
|
||||
} else if headState.Version() >= version.Altair {
|
||||
vp, bp, err := altair.InitializePrecomputeValidators(ctx, headState)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vp, bp, err = altair.ProcessEpochParticipation(ctx, headState, bp, vp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
headState, vp, err = altair.ProcessInactivityScores(ctx, headState, vp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
headState, err = altair.ProcessRewardsAndPenaltiesPrecompute(headState, bp, vp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
validatorSummary = vp
|
||||
} else {
|
||||
return nil, status.Errorf(codes.Internal, "Head state version %d not supported", headState.Version())
|
||||
}
|
||||
|
||||
responseCap := len(req.Indices) + len(req.PublicKeys)
|
||||
validatorIndices := make([]primitives.ValidatorIndex, 0, responseCap)
|
||||
missingValidators := make([][]byte, 0, responseCap)
|
||||
|
||||
filtered := map[primitives.ValidatorIndex]bool{} // Track filtered validators to prevent duplication in the response.
|
||||
// Convert the list of validator public keys to validator indices and add to the indices set.
|
||||
for _, pubKey := range req.PublicKeys {
|
||||
// Skip empty public key.
|
||||
if len(pubKey) == 0 {
|
||||
continue
|
||||
}
|
||||
pubkeyBytes := bytesutil.ToBytes48(pubKey)
|
||||
idx, ok := headState.ValidatorIndexByPubkey(pubkeyBytes)
|
||||
if !ok {
|
||||
// Validator index not found, track as missing.
|
||||
missingValidators = append(missingValidators, pubKey)
|
||||
continue
|
||||
}
|
||||
if !filtered[idx] {
|
||||
validatorIndices = append(validatorIndices, idx)
|
||||
filtered[idx] = true
|
||||
}
|
||||
}
|
||||
// Add provided indices to the indices set.
|
||||
for _, idx := range req.Indices {
|
||||
if !filtered[idx] {
|
||||
validatorIndices = append(validatorIndices, idx)
|
||||
filtered[idx] = true
|
||||
}
|
||||
}
|
||||
// Depending on the indices and public keys given, results might not be sorted.
|
||||
sort.Slice(validatorIndices, func(i, j int) bool {
|
||||
return validatorIndices[i] < validatorIndices[j]
|
||||
})
|
||||
|
||||
currentEpoch := coreTime.CurrentEpoch(headState)
|
||||
responseCap = len(validatorIndices)
|
||||
pubKeys := make([][]byte, 0, responseCap)
|
||||
beforeTransitionBalances := make([]uint64, 0, responseCap)
|
||||
afterTransitionBalances := make([]uint64, 0, responseCap)
|
||||
effectiveBalances := make([]uint64, 0, responseCap)
|
||||
correctlyVotedSource := make([]bool, 0, responseCap)
|
||||
correctlyVotedTarget := make([]bool, 0, responseCap)
|
||||
correctlyVotedHead := make([]bool, 0, responseCap)
|
||||
inactivityScores := make([]uint64, 0, responseCap)
|
||||
// Append performance summaries.
|
||||
// Also track missing validators using public keys.
|
||||
for _, idx := range validatorIndices {
|
||||
val, err := headState.ValidatorAtIndexReadOnly(idx)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "could not get validator: %v", err)
|
||||
}
|
||||
pubKey := val.PublicKey()
|
||||
if uint64(idx) >= uint64(len(validatorSummary)) {
|
||||
// Not listed in validator summary yet; treat it as missing.
|
||||
missingValidators = append(missingValidators, pubKey[:])
|
||||
continue
|
||||
}
|
||||
if !helpers.IsActiveValidatorUsingTrie(val, currentEpoch) {
|
||||
// Inactive validator; treat it as missing.
|
||||
missingValidators = append(missingValidators, pubKey[:])
|
||||
continue
|
||||
}
|
||||
|
||||
summary := validatorSummary[idx]
|
||||
pubKeys = append(pubKeys, pubKey[:])
|
||||
effectiveBalances = append(effectiveBalances, summary.CurrentEpochEffectiveBalance)
|
||||
beforeTransitionBalances = append(beforeTransitionBalances, summary.BeforeEpochTransitionBalance)
|
||||
afterTransitionBalances = append(afterTransitionBalances, summary.AfterEpochTransitionBalance)
|
||||
correctlyVotedTarget = append(correctlyVotedTarget, summary.IsPrevEpochTargetAttester)
|
||||
correctlyVotedHead = append(correctlyVotedHead, summary.IsPrevEpochHeadAttester)
|
||||
|
||||
if headState.Version() == version.Phase0 {
|
||||
correctlyVotedSource = append(correctlyVotedSource, summary.IsPrevEpochAttester)
|
||||
} else {
|
||||
correctlyVotedSource = append(correctlyVotedSource, summary.IsPrevEpochSourceAttester)
|
||||
inactivityScores = append(inactivityScores, summary.InactivityScore)
|
||||
}
|
||||
}
|
||||
|
||||
return ðpb.ValidatorPerformanceResponse{
|
||||
PublicKeys: pubKeys,
|
||||
CorrectlyVotedSource: correctlyVotedSource,
|
||||
CorrectlyVotedTarget: correctlyVotedTarget, // In altair, when this is true then the attestation was definitely included.
|
||||
CorrectlyVotedHead: correctlyVotedHead,
|
||||
CurrentEffectiveBalances: effectiveBalances,
|
||||
BalancesBeforeEpochTransition: beforeTransitionBalances,
|
||||
BalancesAfterEpochTransition: afterTransitionBalances,
|
||||
MissingValidators: missingValidators,
|
||||
InactivityScores: inactivityScores, // Only populated in Altair
|
||||
}, nil
|
||||
return response, nil
|
||||
}
|
||||
|
||||
// GetIndividualVotes retrieves individual voting status of validators.
|
||||
|
||||
40
beacon-chain/rpc/prysm/validator/BUILD.bazel
Normal file
40
beacon-chain/rpc/prysm/validator/BUILD.bazel
Normal file
@@ -0,0 +1,40 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"server.go",
|
||||
"validator_performance.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/prysm/validator",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/rpc/core:go_default_library",
|
||||
"//beacon-chain/sync:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//network:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["validator_performance_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/sync/initial-sync/testing:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
],
|
||||
)
|
||||
14
beacon-chain/rpc/prysm/validator/server.go
Normal file
14
beacon-chain/rpc/prysm/validator/server.go
Normal file
@@ -0,0 +1,14 @@
|
||||
package validator
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/sync"
|
||||
)
|
||||
|
||||
// Server defines a server implementation for HTTP endpoints, providing
|
||||
// access data relevant to the Ethereum Beacon Chain.
|
||||
type Server struct {
|
||||
GenesisTimeFetcher blockchain.TimeFetcher
|
||||
SyncChecker sync.Checker
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
}
|
||||
78
beacon-chain/rpc/prysm/validator/validator_performance.go
Normal file
78
beacon-chain/rpc/prysm/validator/validator_performance.go
Normal file
@@ -0,0 +1,78 @@
|
||||
package validator
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/core"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/network"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
type ValidatorPerformanceRequest struct {
|
||||
PublicKeys [][]byte `json:"public_keys,omitempty"`
|
||||
Indices []primitives.ValidatorIndex `json:"indices,omitempty"`
|
||||
}
|
||||
|
||||
type ValidatorPerformanceResponse struct {
|
||||
PublicKeys [][]byte `json:"public_keys,omitempty"`
|
||||
CorrectlyVotedSource []bool `json:"correctly_voted_source,omitempty"`
|
||||
CorrectlyVotedTarget []bool `json:"correctly_voted_target,omitempty"`
|
||||
CorrectlyVotedHead []bool `json:"correctly_voted_head,omitempty"`
|
||||
CurrentEffectiveBalances []uint64 `json:"current_effective_balances,omitempty"`
|
||||
BalancesBeforeEpochTransition []uint64 `json:"balances_before_epoch_transition,omitempty"`
|
||||
BalancesAfterEpochTransition []uint64 `json:"balances_after_epoch_transition,omitempty"`
|
||||
MissingValidators [][]byte `json:"missing_validators,omitempty"`
|
||||
InactivityScores []uint64 `json:"inactivity_scores,omitempty"`
|
||||
}
|
||||
|
||||
// GetValidatorPerformance is an HTTP handler for GetValidatorPerformance.
|
||||
func (vs *Server) GetValidatorPerformance(w http.ResponseWriter, r *http.Request) {
|
||||
if vs.SyncChecker.Syncing() {
|
||||
handleHTTPError(w, "Syncing", http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
ctx := r.Context()
|
||||
currSlot := vs.GenesisTimeFetcher.CurrentSlot()
|
||||
var req ValidatorPerformanceRequest
|
||||
if r.Body != http.NoBody {
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
handleHTTPError(w, "Could not decode request body: "+err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
}
|
||||
computed, err := core.ComputeValidatorPerformance(
|
||||
ctx,
|
||||
ðpb.ValidatorPerformanceRequest{
|
||||
PublicKeys: req.PublicKeys,
|
||||
Indices: req.Indices,
|
||||
},
|
||||
vs.HeadFetcher,
|
||||
currSlot,
|
||||
)
|
||||
if err != nil {
|
||||
handleHTTPError(w, "Could not compute validator performance: "+err.Err.Error(), core.ErrorReasonToHTTP(err.Reason))
|
||||
return
|
||||
}
|
||||
response := &ValidatorPerformanceResponse{
|
||||
PublicKeys: computed.PublicKeys,
|
||||
CorrectlyVotedSource: computed.CorrectlyVotedSource,
|
||||
CorrectlyVotedTarget: computed.CorrectlyVotedTarget, // In altair, when this is true then the attestation was definitely included.
|
||||
CorrectlyVotedHead: computed.CorrectlyVotedHead,
|
||||
CurrentEffectiveBalances: computed.CurrentEffectiveBalances,
|
||||
BalancesBeforeEpochTransition: computed.BalancesBeforeEpochTransition,
|
||||
BalancesAfterEpochTransition: computed.BalancesAfterEpochTransition,
|
||||
MissingValidators: computed.MissingValidators,
|
||||
InactivityScores: computed.InactivityScores, // Only populated in Altair
|
||||
}
|
||||
network.WriteJson(w, response)
|
||||
}
|
||||
|
||||
func handleHTTPError(w http.ResponseWriter, message string, code int) {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: message,
|
||||
Code: code,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
}
|
||||
453
beacon-chain/rpc/prysm/validator/validator_performance_test.go
Normal file
453
beacon-chain/rpc/prysm/validator/validator_performance_test.go
Normal file
@@ -0,0 +1,453 @@
|
||||
package validator
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/epoch/precompute"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
mockSync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/initial-sync/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||
)
|
||||
|
||||
func TestServer_GetValidatorPerformance(t *testing.T) {
|
||||
t.Run("Syncing", func(t *testing.T) {
|
||||
vs := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
srv := httptest.NewServer(http.HandlerFunc(vs.GetValidatorPerformance))
|
||||
req := httptest.NewRequest("POST", "/foo", &buf)
|
||||
|
||||
client := &http.Client{}
|
||||
rawResp, err := client.Post(srv.URL, "application/json", req.Body)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, http.StatusServiceUnavailable, rawResp.StatusCode)
|
||||
})
|
||||
t.Run("OK", func(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.OverrideBeaconConfig(params.MinimalSpecConfig())
|
||||
|
||||
publicKeys := [][48]byte{
|
||||
bytesutil.ToBytes48([]byte{1}),
|
||||
bytesutil.ToBytes48([]byte{2}),
|
||||
bytesutil.ToBytes48([]byte{3}),
|
||||
}
|
||||
headState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
headState = setHeadState(t, headState, publicKeys)
|
||||
require.NoError(t, headState.SetBalances([]uint64{100, 101, 102}))
|
||||
|
||||
offset := int64(headState.Slot().Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
vs := &Server{
|
||||
HeadFetcher: &mock.ChainService{
|
||||
State: headState,
|
||||
},
|
||||
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
want := &ValidatorPerformanceResponse{
|
||||
PublicKeys: [][]byte{publicKeys[1][:], publicKeys[2][:]},
|
||||
CurrentEffectiveBalances: []uint64{params.BeaconConfig().MaxEffectiveBalance, params.BeaconConfig().MaxEffectiveBalance},
|
||||
CorrectlyVotedSource: []bool{false, false},
|
||||
CorrectlyVotedTarget: []bool{false, false},
|
||||
CorrectlyVotedHead: []bool{false, false},
|
||||
BalancesBeforeEpochTransition: []uint64{101, 102},
|
||||
BalancesAfterEpochTransition: []uint64{0, 0},
|
||||
MissingValidators: [][]byte{publicKeys[0][:]},
|
||||
}
|
||||
|
||||
request := &ValidatorPerformanceRequest{
|
||||
PublicKeys: [][]byte{publicKeys[0][:], publicKeys[2][:], publicKeys[1][:]},
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
err = json.NewEncoder(&buf).Encode(request)
|
||||
require.NoError(t, err)
|
||||
|
||||
srv := httptest.NewServer(http.HandlerFunc(vs.GetValidatorPerformance))
|
||||
req := httptest.NewRequest("POST", "/foo", &buf)
|
||||
client := &http.Client{}
|
||||
rawResp, err := client.Post(srv.URL, "application/json", req.Body)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
if err := rawResp.Body.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
body, err := io.ReadAll(rawResp.Body)
|
||||
require.NoError(t, err)
|
||||
|
||||
response := &ValidatorPerformanceResponse{}
|
||||
require.NoError(t, json.Unmarshal(body, response))
|
||||
require.DeepEqual(t, want, response)
|
||||
})
|
||||
t.Run("Indices", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
publicKeys := [][48]byte{
|
||||
bytesutil.ToBytes48([]byte{1}),
|
||||
bytesutil.ToBytes48([]byte{2}),
|
||||
bytesutil.ToBytes48([]byte{3}),
|
||||
}
|
||||
headState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
headState = setHeadState(t, headState, publicKeys)
|
||||
|
||||
offset := int64(headState.Slot().Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
vs := &Server{
|
||||
HeadFetcher: &mock.ChainService{
|
||||
// 10 epochs into the future.
|
||||
State: headState,
|
||||
},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
}
|
||||
c := headState.Copy()
|
||||
vp, bp, err := precompute.New(ctx, c)
|
||||
require.NoError(t, err)
|
||||
vp, bp, err = precompute.ProcessAttestations(ctx, c, vp, bp)
|
||||
require.NoError(t, err)
|
||||
_, err = precompute.ProcessRewardsAndPenaltiesPrecompute(c, bp, vp, precompute.AttestationsDelta, precompute.ProposersDelta)
|
||||
require.NoError(t, err)
|
||||
extraBal := params.BeaconConfig().MaxEffectiveBalance + params.BeaconConfig().GweiPerEth
|
||||
|
||||
want := &ValidatorPerformanceResponse{
|
||||
PublicKeys: [][]byte{publicKeys[1][:], publicKeys[2][:]},
|
||||
CurrentEffectiveBalances: []uint64{params.BeaconConfig().MaxEffectiveBalance, params.BeaconConfig().MaxEffectiveBalance},
|
||||
CorrectlyVotedSource: []bool{false, false},
|
||||
CorrectlyVotedTarget: []bool{false, false},
|
||||
CorrectlyVotedHead: []bool{false, false},
|
||||
BalancesBeforeEpochTransition: []uint64{extraBal, extraBal + params.BeaconConfig().GweiPerEth},
|
||||
BalancesAfterEpochTransition: []uint64{vp[1].AfterEpochTransitionBalance, vp[2].AfterEpochTransitionBalance},
|
||||
MissingValidators: [][]byte{publicKeys[0][:]},
|
||||
}
|
||||
request := &ValidatorPerformanceRequest{
|
||||
Indices: []primitives.ValidatorIndex{2, 1, 0},
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
err = json.NewEncoder(&buf).Encode(request)
|
||||
require.NoError(t, err)
|
||||
|
||||
srv := httptest.NewServer(http.HandlerFunc(vs.GetValidatorPerformance))
|
||||
req := httptest.NewRequest("POST", "/foo", &buf)
|
||||
client := &http.Client{}
|
||||
rawResp, err := client.Post(srv.URL, "application/json", req.Body)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
if err := rawResp.Body.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
body, err := io.ReadAll(rawResp.Body)
|
||||
require.NoError(t, err)
|
||||
|
||||
response := &ValidatorPerformanceResponse{}
|
||||
require.NoError(t, json.Unmarshal(body, response))
|
||||
require.DeepEqual(t, want, response)
|
||||
})
|
||||
t.Run("Indices Pubkeys", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
publicKeys := [][48]byte{
|
||||
bytesutil.ToBytes48([]byte{1}),
|
||||
bytesutil.ToBytes48([]byte{2}),
|
||||
bytesutil.ToBytes48([]byte{3}),
|
||||
}
|
||||
headState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
headState = setHeadState(t, headState, publicKeys)
|
||||
|
||||
offset := int64(headState.Slot().Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
vs := &Server{
|
||||
HeadFetcher: &mock.ChainService{
|
||||
// 10 epochs into the future.
|
||||
State: headState,
|
||||
},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
}
|
||||
c := headState.Copy()
|
||||
vp, bp, err := precompute.New(ctx, c)
|
||||
require.NoError(t, err)
|
||||
vp, bp, err = precompute.ProcessAttestations(ctx, c, vp, bp)
|
||||
require.NoError(t, err)
|
||||
_, err = precompute.ProcessRewardsAndPenaltiesPrecompute(c, bp, vp, precompute.AttestationsDelta, precompute.ProposersDelta)
|
||||
require.NoError(t, err)
|
||||
extraBal := params.BeaconConfig().MaxEffectiveBalance + params.BeaconConfig().GweiPerEth
|
||||
|
||||
want := &ValidatorPerformanceResponse{
|
||||
PublicKeys: [][]byte{publicKeys[1][:], publicKeys[2][:]},
|
||||
CurrentEffectiveBalances: []uint64{params.BeaconConfig().MaxEffectiveBalance, params.BeaconConfig().MaxEffectiveBalance},
|
||||
CorrectlyVotedSource: []bool{false, false},
|
||||
CorrectlyVotedTarget: []bool{false, false},
|
||||
CorrectlyVotedHead: []bool{false, false},
|
||||
BalancesBeforeEpochTransition: []uint64{extraBal, extraBal + params.BeaconConfig().GweiPerEth},
|
||||
BalancesAfterEpochTransition: []uint64{vp[1].AfterEpochTransitionBalance, vp[2].AfterEpochTransitionBalance},
|
||||
MissingValidators: [][]byte{publicKeys[0][:]},
|
||||
}
|
||||
request := &ValidatorPerformanceRequest{
|
||||
PublicKeys: [][]byte{publicKeys[0][:], publicKeys[2][:]}, Indices: []primitives.ValidatorIndex{1, 2},
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
err = json.NewEncoder(&buf).Encode(request)
|
||||
require.NoError(t, err)
|
||||
|
||||
srv := httptest.NewServer(http.HandlerFunc(vs.GetValidatorPerformance))
|
||||
req := httptest.NewRequest("POST", "/foo", &buf)
|
||||
client := &http.Client{}
|
||||
rawResp, err := client.Post(srv.URL, "application/json", req.Body)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
if err := rawResp.Body.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
body, err := io.ReadAll(rawResp.Body)
|
||||
require.NoError(t, err)
|
||||
|
||||
response := &ValidatorPerformanceResponse{}
|
||||
require.NoError(t, json.Unmarshal(body, response))
|
||||
require.DeepEqual(t, want, response)
|
||||
})
|
||||
t.Run("Altair OK", func(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.OverrideBeaconConfig(params.MinimalSpecConfig())
|
||||
|
||||
publicKeys := [][48]byte{
|
||||
bytesutil.ToBytes48([]byte{1}),
|
||||
bytesutil.ToBytes48([]byte{2}),
|
||||
bytesutil.ToBytes48([]byte{3}),
|
||||
}
|
||||
epoch := primitives.Epoch(1)
|
||||
headState, _ := util.DeterministicGenesisStateAltair(t, 32)
|
||||
require.NoError(t, headState.SetSlot(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epoch+1))))
|
||||
headState = setHeadState(t, headState, publicKeys)
|
||||
|
||||
require.NoError(t, headState.SetInactivityScores([]uint64{0, 0, 0}))
|
||||
require.NoError(t, headState.SetBalances([]uint64{100, 101, 102}))
|
||||
offset := int64(headState.Slot().Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
vs := &Server{
|
||||
HeadFetcher: &mock.ChainService{
|
||||
State: headState,
|
||||
},
|
||||
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
want := &ValidatorPerformanceResponse{
|
||||
PublicKeys: [][]byte{publicKeys[1][:], publicKeys[2][:]},
|
||||
CurrentEffectiveBalances: []uint64{params.BeaconConfig().MaxEffectiveBalance, params.BeaconConfig().MaxEffectiveBalance},
|
||||
CorrectlyVotedSource: []bool{false, false},
|
||||
CorrectlyVotedTarget: []bool{false, false},
|
||||
CorrectlyVotedHead: []bool{false, false},
|
||||
BalancesBeforeEpochTransition: []uint64{101, 102},
|
||||
BalancesAfterEpochTransition: []uint64{0, 0},
|
||||
MissingValidators: [][]byte{publicKeys[0][:]},
|
||||
InactivityScores: []uint64{0, 0},
|
||||
}
|
||||
request := &ValidatorPerformanceRequest{
|
||||
PublicKeys: [][]byte{publicKeys[0][:], publicKeys[2][:], publicKeys[1][:]},
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
err := json.NewEncoder(&buf).Encode(request)
|
||||
require.NoError(t, err)
|
||||
|
||||
srv := httptest.NewServer(http.HandlerFunc(vs.GetValidatorPerformance))
|
||||
req := httptest.NewRequest("POST", "/foo", &buf)
|
||||
client := &http.Client{}
|
||||
rawResp, err := client.Post(srv.URL, "application/json", req.Body)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
if err := rawResp.Body.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
body, err := io.ReadAll(rawResp.Body)
|
||||
require.NoError(t, err)
|
||||
|
||||
response := &ValidatorPerformanceResponse{}
|
||||
require.NoError(t, json.Unmarshal(body, response))
|
||||
require.DeepEqual(t, want, response)
|
||||
})
|
||||
t.Run("Bellatrix OK", func(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.OverrideBeaconConfig(params.MinimalSpecConfig())
|
||||
|
||||
publicKeys := [][48]byte{
|
||||
bytesutil.ToBytes48([]byte{1}),
|
||||
bytesutil.ToBytes48([]byte{2}),
|
||||
bytesutil.ToBytes48([]byte{3}),
|
||||
}
|
||||
epoch := primitives.Epoch(1)
|
||||
headState, _ := util.DeterministicGenesisStateBellatrix(t, 32)
|
||||
require.NoError(t, headState.SetSlot(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epoch+1))))
|
||||
headState = setHeadState(t, headState, publicKeys)
|
||||
|
||||
require.NoError(t, headState.SetInactivityScores([]uint64{0, 0, 0}))
|
||||
require.NoError(t, headState.SetBalances([]uint64{100, 101, 102}))
|
||||
offset := int64(headState.Slot().Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
vs := &Server{
|
||||
HeadFetcher: &mock.ChainService{
|
||||
State: headState,
|
||||
},
|
||||
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
want := &ValidatorPerformanceResponse{
|
||||
PublicKeys: [][]byte{publicKeys[1][:], publicKeys[2][:]},
|
||||
CurrentEffectiveBalances: []uint64{params.BeaconConfig().MaxEffectiveBalance, params.BeaconConfig().MaxEffectiveBalance},
|
||||
CorrectlyVotedSource: []bool{false, false},
|
||||
CorrectlyVotedTarget: []bool{false, false},
|
||||
CorrectlyVotedHead: []bool{false, false},
|
||||
BalancesBeforeEpochTransition: []uint64{101, 102},
|
||||
BalancesAfterEpochTransition: []uint64{0, 0},
|
||||
MissingValidators: [][]byte{publicKeys[0][:]},
|
||||
InactivityScores: []uint64{0, 0},
|
||||
}
|
||||
request := &ValidatorPerformanceRequest{
|
||||
PublicKeys: [][]byte{publicKeys[0][:], publicKeys[2][:], publicKeys[1][:]},
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
err := json.NewEncoder(&buf).Encode(request)
|
||||
require.NoError(t, err)
|
||||
|
||||
srv := httptest.NewServer(http.HandlerFunc(vs.GetValidatorPerformance))
|
||||
req := httptest.NewRequest("POST", "/foo", &buf)
|
||||
client := &http.Client{}
|
||||
rawResp, err := client.Post(srv.URL, "application/json", req.Body)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
if err := rawResp.Body.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
body, err := io.ReadAll(rawResp.Body)
|
||||
require.NoError(t, err)
|
||||
|
||||
response := &ValidatorPerformanceResponse{}
|
||||
require.NoError(t, json.Unmarshal(body, response))
|
||||
require.DeepEqual(t, want, response)
|
||||
})
|
||||
t.Run("Capella OK", func(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.OverrideBeaconConfig(params.MinimalSpecConfig())
|
||||
|
||||
publicKeys := [][48]byte{
|
||||
bytesutil.ToBytes48([]byte{1}),
|
||||
bytesutil.ToBytes48([]byte{2}),
|
||||
bytesutil.ToBytes48([]byte{3}),
|
||||
}
|
||||
epoch := primitives.Epoch(1)
|
||||
headState, _ := util.DeterministicGenesisStateCapella(t, 32)
|
||||
require.NoError(t, headState.SetSlot(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epoch+1))))
|
||||
headState = setHeadState(t, headState, publicKeys)
|
||||
|
||||
require.NoError(t, headState.SetInactivityScores([]uint64{0, 0, 0}))
|
||||
require.NoError(t, headState.SetBalances([]uint64{100, 101, 102}))
|
||||
offset := int64(headState.Slot().Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
vs := &Server{
|
||||
HeadFetcher: &mock.ChainService{
|
||||
State: headState,
|
||||
},
|
||||
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
want := &ValidatorPerformanceResponse{
|
||||
PublicKeys: [][]byte{publicKeys[1][:], publicKeys[2][:]},
|
||||
CurrentEffectiveBalances: []uint64{params.BeaconConfig().MaxEffectiveBalance, params.BeaconConfig().MaxEffectiveBalance},
|
||||
CorrectlyVotedSource: []bool{false, false},
|
||||
CorrectlyVotedTarget: []bool{false, false},
|
||||
CorrectlyVotedHead: []bool{false, false},
|
||||
BalancesBeforeEpochTransition: []uint64{101, 102},
|
||||
BalancesAfterEpochTransition: []uint64{0, 0},
|
||||
MissingValidators: [][]byte{publicKeys[0][:]},
|
||||
InactivityScores: []uint64{0, 0},
|
||||
}
|
||||
request := &ValidatorPerformanceRequest{
|
||||
PublicKeys: [][]byte{publicKeys[0][:], publicKeys[2][:], publicKeys[1][:]},
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
err := json.NewEncoder(&buf).Encode(request)
|
||||
require.NoError(t, err)
|
||||
|
||||
srv := httptest.NewServer(http.HandlerFunc(vs.GetValidatorPerformance))
|
||||
req := httptest.NewRequest("POST", "/foo", &buf)
|
||||
client := &http.Client{}
|
||||
rawResp, err := client.Post(srv.URL, "application/json", req.Body)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
if err := rawResp.Body.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
body, err := io.ReadAll(rawResp.Body)
|
||||
require.NoError(t, err)
|
||||
|
||||
response := &ValidatorPerformanceResponse{}
|
||||
require.NoError(t, json.Unmarshal(body, response))
|
||||
require.DeepEqual(t, want, response)
|
||||
})
|
||||
}
|
||||
|
||||
func setHeadState(t *testing.T, headState state.BeaconState, publicKeys [][48]byte) state.BeaconState {
|
||||
epoch := primitives.Epoch(1)
|
||||
require.NoError(t, headState.SetSlot(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epoch+1))))
|
||||
if headState.Version() < version.Altair {
|
||||
atts := make([]*ethpb.PendingAttestation, 3)
|
||||
for i := 0; i < len(atts); i++ {
|
||||
atts[i] = ðpb.PendingAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
Source: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{},
|
||||
InclusionDelay: 1,
|
||||
}
|
||||
require.NoError(t, headState.AppendPreviousEpochAttestations(atts[i]))
|
||||
}
|
||||
}
|
||||
|
||||
defaultBal := params.BeaconConfig().MaxEffectiveBalance
|
||||
extraBal := params.BeaconConfig().MaxEffectiveBalance + params.BeaconConfig().GweiPerEth
|
||||
balances := []uint64{defaultBal, extraBal, extraBal + params.BeaconConfig().GweiPerEth}
|
||||
require.NoError(t, headState.SetBalances(balances))
|
||||
|
||||
validators := []*ethpb.Validator{
|
||||
{
|
||||
PublicKey: publicKeys[0][:],
|
||||
ActivationEpoch: 5,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
},
|
||||
{
|
||||
PublicKey: publicKeys[1][:],
|
||||
EffectiveBalance: defaultBal,
|
||||
ActivationEpoch: 0,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
},
|
||||
{
|
||||
PublicKey: publicKeys[2][:],
|
||||
EffectiveBalance: defaultBal,
|
||||
ActivationEpoch: 0,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
},
|
||||
}
|
||||
require.NoError(t, headState.SetValidators(validators))
|
||||
return headState
|
||||
}
|
||||
@@ -37,10 +37,12 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/rewards"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/validator"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/lookup"
|
||||
nodeprysm "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/prysm/node"
|
||||
beaconv1alpha1 "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/prysm/v1alpha1/beacon"
|
||||
debugv1alpha1 "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/prysm/v1alpha1/debug"
|
||||
nodev1alpha1 "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/prysm/v1alpha1/node"
|
||||
validatorv1alpha1 "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/prysm/v1alpha1/validator"
|
||||
httpserver "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/prysm/validator"
|
||||
slasherservice "github.com/prysmaticlabs/prysm/v4/beacon-chain/slasher"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
|
||||
@@ -306,6 +308,22 @@ func (s *Service) Start() {
|
||||
ExecutionChainInfoFetcher: s.cfg.ExecutionChainInfoFetcher,
|
||||
}
|
||||
|
||||
nodeServerPrysm := &nodeprysm.Server{
|
||||
BeaconDB: s.cfg.BeaconDB,
|
||||
SyncChecker: s.cfg.SyncService,
|
||||
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
|
||||
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||
PeersFetcher: s.cfg.PeersFetcher,
|
||||
PeerManager: s.cfg.PeerManager,
|
||||
MetadataProvider: s.cfg.MetadataProvider,
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
ExecutionChainInfoFetcher: s.cfg.ExecutionChainInfoFetcher,
|
||||
}
|
||||
|
||||
s.cfg.Router.HandleFunc("/prysm/node/trusted_peers", nodeServerPrysm.ListTrustedPeer).Methods("GET")
|
||||
s.cfg.Router.HandleFunc("/prysm/node/trusted_peers", nodeServerPrysm.AddTrustedPeer).Methods("POST")
|
||||
s.cfg.Router.HandleFunc("/prysm/node/trusted_peers/{peer_id}", nodeServerPrysm.RemoveTrustedPeer).Methods("Delete")
|
||||
|
||||
beaconChainServer := &beaconv1alpha1.Server{
|
||||
Ctx: s.ctx,
|
||||
BeaconDB: s.cfg.BeaconDB,
|
||||
@@ -354,6 +372,12 @@ func (s *Service) Start() {
|
||||
FinalizationFetcher: s.cfg.FinalizationFetcher,
|
||||
ForkchoiceFetcher: s.cfg.ForkchoiceFetcher,
|
||||
}
|
||||
httpServer := &httpserver.Server{
|
||||
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
SyncChecker: s.cfg.SyncService,
|
||||
}
|
||||
s.cfg.Router.HandleFunc("/prysm/validators/performance", httpServer.GetValidatorPerformance)
|
||||
s.cfg.Router.HandleFunc("/eth/v2/beacon/blocks", beaconChainServerV1.PublishBlockV2)
|
||||
s.cfg.Router.HandleFunc("/eth/v2/beacon/blinded_blocks", beaconChainServerV1.PublishBlindedBlockV2)
|
||||
ethpbv1alpha1.RegisterNodeServer(s.grpcServer, nodeServer)
|
||||
|
||||
@@ -207,7 +207,7 @@ func handle32ByteArrays(val [][32]byte, indices []uint64, convertAll bool) ([][3
|
||||
func handleValidatorSlice(val []*ethpb.Validator, indices []uint64, convertAll bool) ([][32]byte, error) {
|
||||
length := len(indices)
|
||||
if convertAll {
|
||||
length = len(val)
|
||||
return stateutil.OptimizedValidatorRoots(val)
|
||||
}
|
||||
roots := make([][32]byte, 0, length)
|
||||
rootCreator := func(input *ethpb.Validator) error {
|
||||
@@ -218,15 +218,6 @@ func handleValidatorSlice(val []*ethpb.Validator, indices []uint64, convertAll b
|
||||
roots = append(roots, newRoot)
|
||||
return nil
|
||||
}
|
||||
if convertAll {
|
||||
for i := range val {
|
||||
err := rootCreator(val[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return roots, nil
|
||||
}
|
||||
if len(val) > 0 {
|
||||
for _, idx := range indices {
|
||||
if idx > uint64(len(val))-1 {
|
||||
|
||||
@@ -117,6 +117,7 @@ type ReadOnlyValidators interface {
|
||||
ValidatorAtIndex(idx primitives.ValidatorIndex) (*ethpb.Validator, error)
|
||||
ValidatorAtIndexReadOnly(idx primitives.ValidatorIndex) (ReadOnlyValidator, error)
|
||||
ValidatorIndexByPubkey(key [fieldparams.BLSPubkeyLength]byte) (primitives.ValidatorIndex, bool)
|
||||
PublicKeys() [][fieldparams.BLSPubkeyLength]byte
|
||||
PubkeyAtIndex(idx primitives.ValidatorIndex) [fieldparams.BLSPubkeyLength]byte
|
||||
NumValidators() int
|
||||
ReadFromEveryValidator(f func(idx int, val ReadOnlyValidator) error) error
|
||||
|
||||
@@ -145,6 +145,22 @@ func (b *BeaconState) PubkeyAtIndex(idx primitives.ValidatorIndex) [fieldparams.
|
||||
return bytesutil.ToBytes48(b.validators[idx].PublicKey)
|
||||
}
|
||||
|
||||
// PublicKeys builds a list of all validator public keys, with each key's index aligned to its validator index.
|
||||
func (b *BeaconState) PublicKeys() [][fieldparams.BLSPubkeyLength]byte {
|
||||
b.lock.RLock()
|
||||
defer b.lock.RLock()
|
||||
|
||||
res := make([][fieldparams.BLSPubkeyLength]byte, len(b.validators))
|
||||
for i := 0; i < len(b.validators); i++ {
|
||||
val := b.validators[i]
|
||||
if val == nil {
|
||||
continue
|
||||
}
|
||||
res[i] = bytesutil.ToBytes48(val.PublicKey)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// NumValidators returns the size of the validator registry.
|
||||
func (b *BeaconState) NumValidators() int {
|
||||
b.lock.RLock()
|
||||
|
||||
@@ -30,7 +30,7 @@ go_library(
|
||||
"//beacon-chain/db/filters:go_default_library",
|
||||
"//beacon-chain/forkchoice:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/sync/backfill:go_default_library",
|
||||
"//beacon-chain/sync/backfill/coverage:go_default_library",
|
||||
"//cache/lru:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
|
||||
@@ -155,6 +155,20 @@ func (e *epochBoundaryState) put(blockRoot [32]byte, s state.BeaconState) error
|
||||
func (e *epochBoundaryState) delete(blockRoot [32]byte) error {
|
||||
e.lock.Lock()
|
||||
defer e.lock.Unlock()
|
||||
rInfo, ok, err := e.getByBlockRootLockFree(blockRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
slotInfo := &slotRootInfo{
|
||||
slot: rInfo.state.Slot(),
|
||||
blockRoot: blockRoot,
|
||||
}
|
||||
if err = e.slotRootCache.Delete(slotInfo); err != nil {
|
||||
return err
|
||||
}
|
||||
return e.rootStateCache.Delete(&rootStateInfo{
|
||||
root: blockRoot,
|
||||
})
|
||||
|
||||
@@ -331,9 +331,8 @@ func (s *State) CombinedCache() *CombinedCache {
|
||||
}
|
||||
|
||||
func (s *State) slotAvailable(slot primitives.Slot) bool {
|
||||
// default to assuming node was initialized from genesis - backfill only needs to be specified for checkpoint sync
|
||||
if s.backfillStatus == nil {
|
||||
return true
|
||||
if s.avb == nil {
|
||||
return false
|
||||
}
|
||||
return s.backfillStatus.SlotCovered(slot)
|
||||
return s.avb.AvailableBlock(slot)
|
||||
}
|
||||
|
||||
@@ -36,12 +36,12 @@ func (_ *State) replayBlocks(
|
||||
var err error
|
||||
|
||||
start := time.Now()
|
||||
log = log.WithFields(logrus.Fields{
|
||||
rLog := log.WithFields(logrus.Fields{
|
||||
"startSlot": state.Slot(),
|
||||
"endSlot": targetSlot,
|
||||
"diff": targetSlot - state.Slot(),
|
||||
})
|
||||
log.Debug("Replaying state")
|
||||
rLog.Debug("Replaying state")
|
||||
// The input block list is sorted in decreasing slots order.
|
||||
if len(signed) > 0 {
|
||||
for i := len(signed) - 1; i >= 0; i-- {
|
||||
@@ -71,7 +71,7 @@ func (_ *State) replayBlocks(
|
||||
}
|
||||
|
||||
duration := time.Since(start)
|
||||
log.WithFields(logrus.Fields{
|
||||
rLog.WithFields(logrus.Fields{
|
||||
"duration": duration,
|
||||
}).Debug("Replayed state")
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/backfill"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/backfill/coverage"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
|
||||
@@ -50,7 +50,7 @@ type State struct {
|
||||
finalizedInfo *finalizedInfo
|
||||
epochBoundaryStateCache *epochBoundaryState
|
||||
saveHotStateDB *saveHotStateDbConfig
|
||||
backfillStatus *backfill.Status
|
||||
avb coverage.AvailableBlocker
|
||||
migrationLock *sync.Mutex
|
||||
fc forkchoice.ForkChoicer
|
||||
}
|
||||
@@ -77,9 +77,9 @@ type finalizedInfo struct {
|
||||
// StateGenOption is a functional option for controlling the initialization of a *State value
|
||||
type StateGenOption func(*State)
|
||||
|
||||
func WithBackfillStatus(bfs *backfill.Status) StateGenOption {
|
||||
func WithAvailableBlocker(avb coverage.AvailableBlocker) StateGenOption {
|
||||
return func(sg *State) {
|
||||
sg.backfillStatus = bfs
|
||||
sg.avb = avb
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -2,8 +2,10 @@ package stategen
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
@@ -79,6 +81,41 @@ func (s *State) saveStateByRoot(ctx context.Context, blockRoot [32]byte, st stat
|
||||
if err := s.epochBoundaryStateCache.put(blockRoot, st); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// Always check that the correct epoch boundary states have been saved
|
||||
// for the current epoch.
|
||||
epochStart, err := slots.EpochStart(slots.ToEpoch(st.Slot()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bRoot, err := helpers.BlockRootAtSlot(st, epochStart)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, ok, err := s.epochBoundaryStateCache.getByBlockRoot([32]byte(bRoot))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// We would only recover the boundary states under this condition:
|
||||
//
|
||||
// 1) Would indicate that the epoch boundary was skipped due to a missed slot, we
|
||||
// then recover by saving the state at that particular slot here.
|
||||
if !ok {
|
||||
// Only recover the state if it is in our hot state cache, otherwise we
|
||||
// simply skip this step.
|
||||
if s.hotStateCache.has([32]byte(bRoot)) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": epochStart,
|
||||
"root": fmt.Sprintf("%#x", bRoot),
|
||||
}).Debug("Recovering state for epoch boundary cache")
|
||||
|
||||
hState := s.hotStateCache.get([32]byte(bRoot))
|
||||
if err := s.epochBoundaryStateCache.put([32]byte(bRoot), hState); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// On an intermediate slot, save state summary.
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||
@@ -137,6 +138,34 @@ func TestSaveState_NoSaveNotEpochBoundary(t *testing.T) {
|
||||
require.Equal(t, false, beaconDB.HasState(ctx, r))
|
||||
}
|
||||
|
||||
func TestSaveState_RecoverForEpochBoundary(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := New(beaconDB, doublylinkedtree.New())
|
||||
|
||||
beaconState, _ := util.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, beaconState.SetSlot(params.BeaconConfig().SlotsPerEpoch-1))
|
||||
r := [32]byte{'A'}
|
||||
boundaryRoot := [32]byte{'B'}
|
||||
require.NoError(t, beaconState.UpdateBlockRootAtIndex(0, boundaryRoot))
|
||||
|
||||
b := util.NewBeaconBlock()
|
||||
util.SaveBlock(t, ctx, beaconDB, b)
|
||||
gRoot, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, gRoot))
|
||||
// Save boundary state to the hot state cache.
|
||||
boundaryState, _ := util.DeterministicGenesisState(t, 32)
|
||||
service.hotStateCache.put(boundaryRoot, boundaryState)
|
||||
require.NoError(t, service.SaveState(ctx, r, beaconState))
|
||||
|
||||
rInfo, ok, err := service.epochBoundaryStateCache.getByBlockRoot(boundaryRoot)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, ok, "state does not exist in cache")
|
||||
assert.Equal(t, rInfo.root, boundaryRoot, "incorrect root of root state info")
|
||||
assert.Equal(t, rInfo.state.Slot(), primitives.Slot(0), "incorrect slot of state")
|
||||
}
|
||||
|
||||
func TestSaveState_CanSaveHotStateToDB(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
ctx := context.Background()
|
||||
|
||||
@@ -30,7 +30,7 @@ func ValidatorRegistryRoot(vals []*ethpb.Validator) ([32]byte, error) {
|
||||
}
|
||||
|
||||
func validatorRegistryRoot(validators []*ethpb.Validator) ([32]byte, error) {
|
||||
roots, err := optimizedValidatorRoots(validators)
|
||||
roots, err := OptimizedValidatorRoots(validators)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
@@ -51,7 +51,9 @@ func validatorRegistryRoot(validators []*ethpb.Validator) ([32]byte, error) {
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func optimizedValidatorRoots(validators []*ethpb.Validator) ([][32]byte, error) {
|
||||
// OptimizedValidatorRoots uses an optimized routine with gohashtree in order to
|
||||
// derive a list of validator roots from a list of validator objects.
|
||||
func OptimizedValidatorRoots(validators []*ethpb.Validator) ([][32]byte, error) {
|
||||
// Exit early if no validators are provided.
|
||||
if len(validators) == 0 {
|
||||
return [][32]byte{}, nil
|
||||
|
||||
@@ -2,30 +2,68 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["status.go"],
|
||||
srcs = [
|
||||
"batch.go",
|
||||
"batcher.go",
|
||||
"pool.go",
|
||||
"service.go",
|
||||
"status.go",
|
||||
"verify.go",
|
||||
"worker.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/backfill",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/p2p/peers:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/sync:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/dbval:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["status_test.go"],
|
||||
srcs = [
|
||||
"batcher_test.go",
|
||||
"pool_test.go",
|
||||
"service_test.go",
|
||||
"status_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/p2p/testing:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/blocks/testing:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/dbval:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
124
beacon-chain/sync/backfill/batch.go
Normal file
124
beacon-chain/sync/backfill/batch.go
Normal file
@@ -0,0 +1,124 @@
|
||||
package backfill
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var ErrChainBroken = errors.New("batch is not the ancestor of backfilled batch")
|
||||
|
||||
type batchState int
|
||||
|
||||
func (s batchState) String() string {
|
||||
switch s {
|
||||
case batchNil:
|
||||
return "nil"
|
||||
case batchInit:
|
||||
return "init"
|
||||
case batchSequenced:
|
||||
return "sequenced"
|
||||
case batchErrRetryable:
|
||||
return "error_retryable"
|
||||
case batchImportable:
|
||||
return "importable"
|
||||
case batchImportComplete:
|
||||
return "import_complete"
|
||||
case batchEndSequence:
|
||||
return "end_sequence"
|
||||
default:
|
||||
return "unknown"
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
batchNil batchState = iota
|
||||
batchInit
|
||||
batchSequenced
|
||||
batchErrRetryable
|
||||
batchImportable
|
||||
batchImportComplete
|
||||
batchEndSequence
|
||||
)
|
||||
|
||||
type batchId string
|
||||
|
||||
type batch struct {
|
||||
scheduled time.Time
|
||||
seq int // sequence identifier, ie how many times has the sequence() method served this batch
|
||||
retries int
|
||||
begin primitives.Slot
|
||||
end primitives.Slot // half-open interval, [begin, end), ie >= start, < end.
|
||||
results VerifiedROBlocks
|
||||
err error
|
||||
state batchState
|
||||
pid peer.ID
|
||||
}
|
||||
|
||||
func (b batch) logFields() log.Fields {
|
||||
return map[string]interface{}{
|
||||
"batch_id": b.id(),
|
||||
"state": b.state.String(),
|
||||
"scheduled": b.scheduled.String(),
|
||||
"seq": b.seq,
|
||||
"retries": b.retries,
|
||||
"begin": b.begin,
|
||||
"end": b.end,
|
||||
"pid": b.pid,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *batch) inc() {
|
||||
b.seq += 1
|
||||
}
|
||||
|
||||
func (b batch) replaces(r batch) bool {
|
||||
if b.begin != r.begin {
|
||||
return false
|
||||
}
|
||||
if b.end != r.end {
|
||||
return false
|
||||
}
|
||||
return b.seq >= r.seq
|
||||
}
|
||||
|
||||
func (b batch) id() batchId {
|
||||
return batchId(fmt.Sprintf("%d:%d", b.begin, b.end))
|
||||
}
|
||||
|
||||
func (b batch) size() primitives.Slot {
|
||||
return b.end - b.begin
|
||||
}
|
||||
|
||||
func (b batch) ensureParent(expected [32]byte) error {
|
||||
tail := b.results[len(b.results)-1]
|
||||
if tail.Root() != expected {
|
||||
return errors.Wrapf(ErrChainBroken, "last parent_root=%#x, tail root=%#x", expected, tail.Root())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b batch) lowest() blocks.ROBlock {
|
||||
return b.results[0]
|
||||
}
|
||||
|
||||
func (b batch) request() *eth.BeaconBlocksByRangeRequest {
|
||||
return ð.BeaconBlocksByRangeRequest{
|
||||
StartSlot: b.begin,
|
||||
Count: uint64(b.end - b.begin),
|
||||
Step: 1,
|
||||
}
|
||||
}
|
||||
|
||||
func (b batch) withRetryableError(err error) batch {
|
||||
b.retries += 1
|
||||
b.err = err
|
||||
b.state = batchErrRetryable
|
||||
return b
|
||||
}
|
||||
166
beacon-chain/sync/backfill/batcher.go
Normal file
166
beacon-chain/sync/backfill/batcher.go
Normal file
@@ -0,0 +1,166 @@
|
||||
package backfill
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
)
|
||||
|
||||
var errSequencerMisconfigured = errors.New("backfill sequencer initialization error")
|
||||
var errMaxBatches = errors.New("backfill batch requested in excess of max outstanding batches")
|
||||
var errEndSequence = errors.New("sequence has terminated, no more backfill batches will be produced")
|
||||
|
||||
type batchSequencer struct {
|
||||
batcher batcher
|
||||
seq []batch
|
||||
}
|
||||
|
||||
var errCannotDecreaseMinimum = errors.New("The minimum backfill slot can only be increased, not decreased")
|
||||
|
||||
// moveMinimum enables the backfill service to change the slot where the batcher will start replying with
|
||||
// batch state batchEndSequence (signaling that no new batches will be produced). This is done in response to
|
||||
// epochs advancing, which shrinks the gap between <checkpoint slot> and <current slot>-MIN_EPOCHS_FOR_BLOCK_REQUESTS,
|
||||
// allowing the node to download a smaller number of blocks.
|
||||
func (c *batchSequencer) moveMinimum(min primitives.Slot) error {
|
||||
if min < c.batcher.min {
|
||||
return errCannotDecreaseMinimum
|
||||
}
|
||||
c.batcher.min = min
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *batchSequencer) minimum() primitives.Slot {
|
||||
return c.batcher.min
|
||||
}
|
||||
|
||||
func (c *batchSequencer) update(b batch) {
|
||||
done := 0
|
||||
for i := 0; i < len(c.seq); i++ {
|
||||
if b.replaces(c.seq[i]) {
|
||||
c.seq[i] = b
|
||||
}
|
||||
// Assumes invariant that batches complete and update is called in order.
|
||||
// This should be true because the code using the sequencer doesn't know the expected parent
|
||||
// for a batch until it imports the previous batch.
|
||||
if c.seq[i].state == batchImportComplete {
|
||||
done += 1
|
||||
continue
|
||||
}
|
||||
// Move the unfinished batches to overwrite the finished ones.
|
||||
c.seq[i-done] = c.seq[i]
|
||||
}
|
||||
// Overwrite the moved batches with the next ones in the sequence.
|
||||
last := c.seq[len(c.seq)-1]
|
||||
for i := len(c.seq) - done; i < len(c.seq); i++ {
|
||||
c.seq[i] = c.batcher.beforeBatch(last)
|
||||
last = c.seq[i]
|
||||
}
|
||||
}
|
||||
|
||||
func (c *batchSequencer) sequence() ([]batch, error) {
|
||||
s := make([]batch, 0)
|
||||
// batch start slots are in descending order, c.seq[n].begin == c.seq[n+1].end
|
||||
for i := range c.seq {
|
||||
switch c.seq[i].state {
|
||||
case batchInit, batchErrRetryable:
|
||||
c.seq[i].state = batchSequenced
|
||||
c.seq[i].inc()
|
||||
s = append(s, c.seq[i])
|
||||
case batchNil:
|
||||
if i == 0 {
|
||||
return nil, errSequencerMisconfigured
|
||||
}
|
||||
c.seq[i] = c.batcher.beforeBatch(c.seq[i-1])
|
||||
c.seq[i].state = batchSequenced
|
||||
c.seq[i].inc()
|
||||
s = append(s, c.seq[i])
|
||||
case batchEndSequence:
|
||||
if len(s) == 0 {
|
||||
s = append(s, c.seq[i])
|
||||
}
|
||||
break
|
||||
default:
|
||||
continue
|
||||
}
|
||||
}
|
||||
if len(s) == 0 {
|
||||
return nil, errMaxBatches
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (c *batchSequencer) numTodo() int {
|
||||
if len(c.seq) == 0 {
|
||||
return 0
|
||||
}
|
||||
lowest := c.seq[len(c.seq)-1]
|
||||
todo := 0
|
||||
if lowest.state != batchEndSequence {
|
||||
todo = c.batcher.remaining(lowest.begin)
|
||||
}
|
||||
for _, b := range c.seq {
|
||||
switch b.state {
|
||||
case batchEndSequence, batchImportComplete, batchNil:
|
||||
continue
|
||||
default:
|
||||
todo += 1
|
||||
}
|
||||
}
|
||||
return todo
|
||||
}
|
||||
|
||||
func (c *batchSequencer) importable() []batch {
|
||||
imp := make([]batch, 0)
|
||||
for i := range c.seq {
|
||||
if c.seq[i].state == batchImportable {
|
||||
imp = append(imp, c.seq[i])
|
||||
continue
|
||||
}
|
||||
// as soon as we hit a batch with a different state, we return everything leading to it.
|
||||
// if the first element isn't importable, we'll return slice [0:0] aka nothing.
|
||||
break
|
||||
}
|
||||
return imp
|
||||
}
|
||||
|
||||
func newBatchSequencer(seqLen int, min, max, size primitives.Slot) *batchSequencer {
|
||||
b := batcher{min: min, size: size}
|
||||
seq := make([]batch, seqLen)
|
||||
seq[0] = b.before(max)
|
||||
return &batchSequencer{batcher: b, seq: seq}
|
||||
}
|
||||
|
||||
type batcher struct {
|
||||
min primitives.Slot
|
||||
size primitives.Slot
|
||||
}
|
||||
|
||||
func (r batcher) remaining(upTo primitives.Slot) int {
|
||||
if r.min >= upTo {
|
||||
return 0
|
||||
}
|
||||
delta := upTo - r.min
|
||||
if delta%r.size != 0 {
|
||||
return int(delta/r.size) + 1
|
||||
}
|
||||
return int(delta / r.size)
|
||||
}
|
||||
|
||||
func (r batcher) beforeBatch(upTo batch) batch {
|
||||
return r.before(upTo.begin)
|
||||
}
|
||||
|
||||
func (r batcher) before(upTo primitives.Slot) batch {
|
||||
// upTo is an exclusive upper bound. Requesting a batch before the lower bound of backfill signals the end of the
|
||||
// backfill process.
|
||||
if upTo <= r.min {
|
||||
return batch{begin: upTo, end: upTo, state: batchEndSequence}
|
||||
}
|
||||
begin := r.min
|
||||
if upTo > r.size+r.min {
|
||||
begin = upTo - r.size
|
||||
}
|
||||
|
||||
// batch.end is exclusive, .begin is inclusive, so the prev.end = next.begin
|
||||
return batch{begin: begin, end: upTo, state: batchInit}
|
||||
}
|
||||
213
beacon-chain/sync/backfill/batcher_test.go
Normal file
213
beacon-chain/sync/backfill/batcher_test.go
Normal file
@@ -0,0 +1,213 @@
|
||||
package backfill
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
)
|
||||
|
||||
func TestBatcherBefore(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
b batcher
|
||||
upTo []primitives.Slot
|
||||
expect []batch
|
||||
}{
|
||||
{
|
||||
name: "size 10",
|
||||
b: batcher{min: 0, size: 10},
|
||||
upTo: []primitives.Slot{33, 30, 10, 6},
|
||||
expect: []batch{
|
||||
{begin: 23, end: 33, state: batchInit},
|
||||
{begin: 20, end: 30, state: batchInit},
|
||||
{begin: 0, end: 10, state: batchInit},
|
||||
{begin: 0, end: 6, state: batchInit},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "size 4",
|
||||
b: batcher{min: 0, size: 4},
|
||||
upTo: []primitives.Slot{33, 6, 4},
|
||||
expect: []batch{
|
||||
{begin: 29, end: 33, state: batchInit},
|
||||
{begin: 2, end: 6, state: batchInit},
|
||||
{begin: 0, end: 4, state: batchInit},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "trigger end",
|
||||
b: batcher{min: 20, size: 10},
|
||||
upTo: []primitives.Slot{33, 30, 25, 21, 20, 19},
|
||||
expect: []batch{
|
||||
{begin: 23, end: 33, state: batchInit},
|
||||
{begin: 20, end: 30, state: batchInit},
|
||||
{begin: 20, end: 25, state: batchInit},
|
||||
{begin: 20, end: 21, state: batchInit},
|
||||
{begin: 20, end: 20, state: batchEndSequence},
|
||||
{begin: 19, end: 19, state: batchEndSequence},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
for i := range c.upTo {
|
||||
upTo := c.upTo[i]
|
||||
expect := c.expect[i]
|
||||
t.Run(fmt.Sprintf("%s upTo %d", c.name, upTo), func(t *testing.T) {
|
||||
got := c.b.before(upTo)
|
||||
require.Equal(t, expect.begin, got.begin)
|
||||
require.Equal(t, expect.end, got.end)
|
||||
require.Equal(t, expect.state, got.state)
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBatchSequencer(t *testing.T) {
|
||||
var min, max, size primitives.Slot
|
||||
seqLen := 8
|
||||
min = 0
|
||||
max = 11235
|
||||
size = 64
|
||||
seq := newBatchSequencer(seqLen, min, max, size)
|
||||
expected := []batch{
|
||||
{begin: 11171, end: 11235},
|
||||
{begin: 11107, end: 11171},
|
||||
{begin: 11043, end: 11107},
|
||||
{begin: 10979, end: 11043},
|
||||
{begin: 10915, end: 10979},
|
||||
{begin: 10851, end: 10915},
|
||||
{begin: 10787, end: 10851},
|
||||
{begin: 10723, end: 10787},
|
||||
}
|
||||
got, err := seq.sequence()
|
||||
require.Equal(t, seqLen, len(got))
|
||||
for i := 0; i < seqLen; i++ {
|
||||
g := got[i]
|
||||
exp := expected[i]
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, exp.begin, g.begin)
|
||||
require.Equal(t, exp.end, g.end)
|
||||
require.Equal(t, batchSequenced, g.state)
|
||||
}
|
||||
// This should give us the error indicating there are too many outstanding batches.
|
||||
_, err = seq.sequence()
|
||||
require.ErrorIs(t, err, errMaxBatches)
|
||||
|
||||
// mark the last batch completed so we can call sequence again.
|
||||
last := seq.seq[len(seq.seq)-1]
|
||||
// With this state, the batch should get served back to us as the next batch.
|
||||
last.state = batchErrRetryable
|
||||
seq.update(last)
|
||||
nextS, err := seq.sequence()
|
||||
require.Equal(t, 1, len(nextS))
|
||||
next := nextS[0]
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, last.begin, next.begin)
|
||||
require.Equal(t, last.end, next.end)
|
||||
// sequence() should replace the batchErrRetryable state with batchSequenced.
|
||||
require.Equal(t, batchSequenced, next.state)
|
||||
|
||||
// No batches have been marked importable.
|
||||
require.Equal(t, 0, len(seq.importable()))
|
||||
|
||||
// Mark our batch importable and make sure it shows up in the list of importable batches.
|
||||
next.state = batchImportable
|
||||
seq.update(next)
|
||||
require.Equal(t, 0, len(seq.importable()))
|
||||
first := seq.seq[0]
|
||||
first.state = batchImportable
|
||||
seq.update(first)
|
||||
require.Equal(t, 1, len(seq.importable()))
|
||||
require.Equal(t, len(seq.seq), seqLen)
|
||||
// change the last element back to batchInit so that the importable test stays simple
|
||||
last = seq.seq[len(seq.seq)-1]
|
||||
last.state = batchInit
|
||||
seq.update(last)
|
||||
// ensure that the number of importable elements grows as the list is marked importable
|
||||
for i := 0; i < len(seq.seq); i++ {
|
||||
seq.seq[i].state = batchImportable
|
||||
require.Equal(t, i+1, len(seq.importable()))
|
||||
}
|
||||
// reset everything to init
|
||||
for i := 0; i < len(seq.seq); i++ {
|
||||
seq.seq[i].state = batchInit
|
||||
require.Equal(t, 0, len(seq.importable()))
|
||||
}
|
||||
// loop backwards and make sure importable is zero until the first element is importable
|
||||
for i := len(seq.seq) - 1; i > 0; i-- {
|
||||
seq.seq[i].state = batchImportable
|
||||
require.Equal(t, 0, len(seq.importable()))
|
||||
}
|
||||
seq.seq[0].state = batchImportable
|
||||
require.Equal(t, len(seq.seq), len(seq.importable()))
|
||||
|
||||
// reset everything to init again
|
||||
for i := 0; i < len(seq.seq); i++ {
|
||||
seq.seq[i].state = batchInit
|
||||
require.Equal(t, 0, len(seq.importable()))
|
||||
}
|
||||
// set first 3 elements to importable. we should see them in the result for importable()
|
||||
// and be able to use update to cycle them away.
|
||||
seq.seq[0].state, seq.seq[1].state, seq.seq[2].state = batchImportable, batchImportable, batchImportable
|
||||
require.Equal(t, 3, len(seq.importable()))
|
||||
a, b, c, z := seq.seq[0], seq.seq[1], seq.seq[2], seq.seq[3]
|
||||
require.NotEqual(t, z.begin, seq.seq[2].begin)
|
||||
require.NotEqual(t, z.begin, seq.seq[1].begin)
|
||||
require.NotEqual(t, z.begin, seq.seq[0].begin)
|
||||
a.state, b.state, c.state = batchImportComplete, batchImportComplete, batchImportComplete
|
||||
seq.update(a)
|
||||
|
||||
// follow z as it moves down the chain to the first spot
|
||||
require.Equal(t, z.begin, seq.seq[2].begin)
|
||||
require.NotEqual(t, z.begin, seq.seq[1].begin)
|
||||
require.NotEqual(t, z.begin, seq.seq[0].begin)
|
||||
seq.update(b)
|
||||
require.NotEqual(t, z.begin, seq.seq[2].begin)
|
||||
require.Equal(t, z.begin, seq.seq[1].begin)
|
||||
require.NotEqual(t, z.begin, seq.seq[0].begin)
|
||||
seq.update(c)
|
||||
require.NotEqual(t, z.begin, seq.seq[2].begin)
|
||||
require.NotEqual(t, z.begin, seq.seq[1].begin)
|
||||
require.Equal(t, z.begin, seq.seq[0].begin)
|
||||
|
||||
// Check integrity of begin/end alignment across the sequence.
|
||||
// Also update all the states to sequenced for the convenience of the next test.
|
||||
for i := 1; i < len(seq.seq); i++ {
|
||||
require.Equal(t, seq.seq[i].end, seq.seq[i-1].begin)
|
||||
// won't touch the first element, which is fine because it is marked complete below.
|
||||
seq.seq[i].state = batchSequenced
|
||||
}
|
||||
|
||||
// TODO: break this test scenario out into its own test function. It's important to ensure batcher acts correctly
|
||||
// when the minimum bound moves up. As epochs advance in real time, the lower slot bound of blocks we need to keep
|
||||
// per the spec moves closer to the original sync checkpoint. We want to update the minimum bound
|
||||
// when epochs advance in real time as an optimization to sync the smallest number of blocks possible.
|
||||
|
||||
// set the min for the batcher close to the lowest slot. This will force the next batch to be partial and the batch
|
||||
// after that to be the final batch.
|
||||
newMin := seq.seq[len(seq.seq)-1].begin - 30
|
||||
seq.batcher.min = newMin
|
||||
first = seq.seq[0]
|
||||
first.state = batchImportComplete
|
||||
// update() with a complete state will cause the sequence to be extended with an additional batch
|
||||
seq.update(first)
|
||||
lastS, err := seq.sequence()
|
||||
last = lastS[0]
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, newMin, last.begin)
|
||||
require.Equal(t, seq.seq[len(seq.seq)-2].begin, last.end)
|
||||
|
||||
// Mark first batch done again, this time check that sequence() gives errEndSequence.
|
||||
first = seq.seq[0]
|
||||
first.state = batchImportComplete
|
||||
// update() with a complete state will cause the sequence to be extended with an additional batch
|
||||
seq.update(first)
|
||||
endExp, err := seq.sequence()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(endExp))
|
||||
end := endExp[0]
|
||||
//require.ErrorIs(t, err, errEndSequence)
|
||||
require.Equal(t, batchEndSequence, end.state)
|
||||
}
|
||||
9
beacon-chain/sync/backfill/coverage/BUILD.bazel
Normal file
9
beacon-chain/sync/backfill/coverage/BUILD.bazel
Normal file
@@ -0,0 +1,9 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["coverage.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/backfill/coverage",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = ["//consensus-types/primitives:go_default_library"],
|
||||
)
|
||||
7
beacon-chain/sync/backfill/coverage/coverage.go
Normal file
7
beacon-chain/sync/backfill/coverage/coverage.go
Normal file
@@ -0,0 +1,7 @@
|
||||
package coverage
|
||||
|
||||
import "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
|
||||
type AvailableBlocker interface {
|
||||
AvailableBlock(primitives.Slot) bool
|
||||
}
|
||||
148
beacon-chain/sync/backfill/pool.go
Normal file
148
beacon-chain/sync/backfill/pool.go
Normal file
@@ -0,0 +1,148 @@
|
||||
package backfill
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type BatchWorkerPool interface {
|
||||
Spawn(ctx context.Context, n int, clock *startup.Clock, a peerAssigner, v *verifier)
|
||||
Todo(b batch)
|
||||
Complete() (batch, error)
|
||||
}
|
||||
|
||||
type peerAssigner interface {
|
||||
Assign(busy map[peer.ID]bool, n int) ([]peer.ID, error)
|
||||
}
|
||||
|
||||
type worker interface {
|
||||
run(context.Context)
|
||||
}
|
||||
|
||||
type newWorker func(id workerId, in, out chan batch, c *startup.Clock, v *verifier) worker
|
||||
|
||||
func DefaultNewWorker(p p2p.P2P) newWorker {
|
||||
return func(id workerId, in, out chan batch, c *startup.Clock, v *verifier) worker {
|
||||
return newP2pWorker(id, p, in, out, c, v)
|
||||
}
|
||||
}
|
||||
|
||||
type p2pBatchWorkerPool struct {
|
||||
maxBatches int
|
||||
newWorker newWorker
|
||||
assigner peerAssigner
|
||||
toWorkers chan batch
|
||||
fromWorkers chan batch
|
||||
toRouter chan batch
|
||||
fromRouter chan batch
|
||||
shutdownErr chan error
|
||||
endSeq []batch
|
||||
ctx context.Context
|
||||
cancel func()
|
||||
}
|
||||
|
||||
var _ BatchWorkerPool = &p2pBatchWorkerPool{}
|
||||
|
||||
func newP2PBatchWorkerPool(p p2p.P2P, maxBatches int) *p2pBatchWorkerPool {
|
||||
nw := DefaultNewWorker(p)
|
||||
return &p2pBatchWorkerPool{
|
||||
newWorker: nw,
|
||||
toRouter: make(chan batch, maxBatches),
|
||||
fromRouter: make(chan batch, maxBatches),
|
||||
toWorkers: make(chan batch),
|
||||
fromWorkers: make(chan batch),
|
||||
maxBatches: maxBatches,
|
||||
shutdownErr: make(chan error),
|
||||
}
|
||||
}
|
||||
|
||||
func (p *p2pBatchWorkerPool) Spawn(ctx context.Context, n int, c *startup.Clock, a peerAssigner, v *verifier) {
|
||||
p.ctx, p.cancel = context.WithCancel(ctx)
|
||||
go p.batchRouter(a)
|
||||
for i := 0; i < n; i++ {
|
||||
go p.newWorker(workerId(i), p.toWorkers, p.fromWorkers, c, v).run(p.ctx)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *p2pBatchWorkerPool) Todo(b batch) {
|
||||
// Intercept batchEndSequence batches so workers can remain unaware of this state.
|
||||
// Workers don't know what to do with batchEndSequence batches. They are a signal to the pool that the batcher
|
||||
// has stopped producing things for the workers to do and the pool is close to winding down. See Complete()
|
||||
// to understand how the pool manages the state where all workers are idle
|
||||
// and all incoming batches signal end of sequence.
|
||||
if b.state == batchEndSequence {
|
||||
p.endSeq = append(p.endSeq, b)
|
||||
return
|
||||
}
|
||||
p.toRouter <- b
|
||||
}
|
||||
|
||||
func (p *p2pBatchWorkerPool) Complete() (batch, error) {
|
||||
if len(p.endSeq) == p.maxBatches {
|
||||
return p.endSeq[0], errEndSequence
|
||||
}
|
||||
|
||||
select {
|
||||
case b := <-p.fromRouter:
|
||||
return b, nil
|
||||
case err := <-p.shutdownErr:
|
||||
return batch{}, errors.Wrap(err, "fatal error from backfill worker pool")
|
||||
case <-p.ctx.Done():
|
||||
return batch{}, p.ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
func (p *p2pBatchWorkerPool) batchRouter(pa peerAssigner) {
|
||||
busy := make(map[peer.ID]bool)
|
||||
todo := make([]batch, 0)
|
||||
rt := time.NewTicker(time.Second)
|
||||
for {
|
||||
select {
|
||||
case b := <-p.toRouter:
|
||||
todo = append(todo, b)
|
||||
case <-rt.C:
|
||||
// Worker assignments can fail if assignBatch can't find a suitable peer.
|
||||
// This ticker exists to periodically break out of the channel select
|
||||
// to retry failed assignments.
|
||||
case b := <-p.fromWorkers:
|
||||
pid := b.pid
|
||||
busy[pid] = false
|
||||
p.fromRouter <- b
|
||||
case <-p.ctx.Done():
|
||||
log.WithError(p.ctx.Err()).Info("p2pBatchWorkerPool context canceled, shutting down")
|
||||
return
|
||||
}
|
||||
if len(todo) == 0 {
|
||||
continue
|
||||
}
|
||||
// Try to assign as many outstanding batches as possible to peers and feed the assigned batches to workers.
|
||||
assigned, err := pa.Assign(busy, len(todo))
|
||||
if err != nil {
|
||||
if errors.Is(err, peers.ErrInsufficientSuitable) {
|
||||
// Transient error resulting from insufficient number of connected peers. Leave batches in
|
||||
// queue and get to them whenever the peer situation is resolved.
|
||||
continue
|
||||
}
|
||||
p.shutdown(err)
|
||||
return
|
||||
}
|
||||
for _, pid := range assigned {
|
||||
busy[pid] = true
|
||||
todo[0].pid = pid
|
||||
p.toWorkers <- todo[0]
|
||||
todo = todo[1:]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *p2pBatchWorkerPool) shutdown(err error) {
|
||||
p.cancel()
|
||||
p.shutdownErr <- err
|
||||
}
|
||||
74
beacon-chain/sync/backfill/pool_test.go
Normal file
74
beacon-chain/sync/backfill/pool_test.go
Normal file
@@ -0,0 +1,74 @@
|
||||
package backfill
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
p2ptest "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||
)
|
||||
|
||||
type MockAssigner struct {
|
||||
err error
|
||||
assign []peer.ID
|
||||
}
|
||||
|
||||
func (m MockAssigner) Assign(busy map[peer.ID]bool, n int) ([]peer.ID, error) {
|
||||
if m.err != nil {
|
||||
return nil, m.err
|
||||
}
|
||||
return m.assign, nil
|
||||
}
|
||||
|
||||
var _ peerAssigner = &MockAssigner{}
|
||||
|
||||
func TestPoolDetectAllEnded(t *testing.T) {
|
||||
nw := 5
|
||||
p2p := p2ptest.NewTestP2P(t)
|
||||
ctx := context.Background()
|
||||
ma := &MockAssigner{}
|
||||
pool := newP2PBatchWorkerPool(p2p, nw)
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
v, err := newBackfillVerifier(st)
|
||||
require.NoError(t, err)
|
||||
pool.Spawn(ctx, nw, startup.NewClock(time.Now(), [32]byte{}), ma, v)
|
||||
br := batcher{min: 10, size: 10}
|
||||
endSeq := br.before(0)
|
||||
require.Equal(t, batchEndSequence, endSeq.state)
|
||||
for i := 0; i < nw; i++ {
|
||||
pool.Todo(endSeq)
|
||||
}
|
||||
b, err := pool.Complete()
|
||||
require.ErrorIs(t, err, errEndSequence)
|
||||
require.Equal(t, b.end, endSeq.end)
|
||||
}
|
||||
|
||||
type mockPool struct {
|
||||
spawnCalled []int
|
||||
finishedChan chan batch
|
||||
finishedErr chan error
|
||||
todoChan chan batch
|
||||
}
|
||||
|
||||
func (m *mockPool) Spawn(_ context.Context, _ int, _ *startup.Clock, _ peerAssigner, _ *verifier) {
|
||||
}
|
||||
|
||||
func (m *mockPool) Todo(b batch) {
|
||||
m.todoChan <- b
|
||||
}
|
||||
|
||||
func (m *mockPool) Complete() (batch, error) {
|
||||
select {
|
||||
case b := <-m.finishedChan:
|
||||
return b, nil
|
||||
case err := <-m.finishedErr:
|
||||
return batch{}, err
|
||||
}
|
||||
}
|
||||
|
||||
var _ BatchWorkerPool = &mockPool{}
|
||||
256
beacon-chain/sync/backfill/service.go
Normal file
256
beacon-chain/sync/backfill/service.go
Normal file
@@ -0,0 +1,256 @@
|
||||
package backfill
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/dbval"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const defaultWorkerCount = 5
|
||||
|
||||
// TODO use the correct beacon param for blocks by range size instead
|
||||
const defaultBatchSize = 64
|
||||
|
||||
type Service struct {
|
||||
ctx context.Context
|
||||
su *StatusUpdater
|
||||
ms minimumSlotter
|
||||
cw startup.ClockWaiter
|
||||
nWorkers int
|
||||
errChan chan error
|
||||
batchSeq *batchSequencer
|
||||
batchSize uint64
|
||||
pool BatchWorkerPool
|
||||
verifier *verifier
|
||||
p2p p2p.P2P
|
||||
batchImporter batchImporter
|
||||
}
|
||||
|
||||
var _ runtime.Service = (*Service)(nil)
|
||||
|
||||
type ServiceOption func(*Service) error
|
||||
|
||||
func WithWorkerCount(n int) ServiceOption {
|
||||
return func(s *Service) error {
|
||||
s.nWorkers = n
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithBatchSize(n uint64) ServiceOption {
|
||||
return func(s *Service) error {
|
||||
s.batchSize = n
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
type minimumSlotter interface {
|
||||
minimumSlot() primitives.Slot
|
||||
setClock(*startup.Clock)
|
||||
}
|
||||
|
||||
type defaultMinimumSlotter struct {
|
||||
clock *startup.Clock
|
||||
cw startup.ClockWaiter
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
func (d defaultMinimumSlotter) minimumSlot() primitives.Slot {
|
||||
if d.clock == nil {
|
||||
var err error
|
||||
d.clock, err = d.cw.WaitForClock(d.ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Fatal("failed to obtain system/genesis clock, unable to start backfill service")
|
||||
}
|
||||
}
|
||||
return MinimumBackfillSlot(d.clock.CurrentSlot())
|
||||
}
|
||||
|
||||
func (d defaultMinimumSlotter) setClock(c *startup.Clock) {
|
||||
d.clock = c
|
||||
}
|
||||
|
||||
var _ minimumSlotter = &defaultMinimumSlotter{}
|
||||
|
||||
type batchImporter func(ctx context.Context, b batch, su *StatusUpdater) (*dbval.BackfillStatus, error)
|
||||
|
||||
func defaultBatchImporter(ctx context.Context, b batch, su *StatusUpdater) (*dbval.BackfillStatus, error) {
|
||||
status := su.status()
|
||||
if err := b.ensureParent(bytesutil.ToBytes32(status.LowParentRoot)); err != nil {
|
||||
return status, err
|
||||
}
|
||||
// Import blocks to db and update db state to reflect the newly imported blocks.
|
||||
// Other parts of the beacon node may use the same StatusUpdater instance
|
||||
// via the coverage.AvailableBlocker interface to safely determine if a given slot has been backfilled.
|
||||
status, err := su.fillBack(ctx, b.results)
|
||||
if err != nil {
|
||||
log.WithError(err).Fatal("Non-recoverable db error in backfill service, quitting.")
|
||||
}
|
||||
return status, nil
|
||||
}
|
||||
|
||||
func NewService(ctx context.Context, su *StatusUpdater, cw startup.ClockWaiter, p p2p.P2P, opts ...ServiceOption) (*Service, error) {
|
||||
s := &Service{
|
||||
ctx: ctx,
|
||||
su: su,
|
||||
cw: cw,
|
||||
ms: &defaultMinimumSlotter{cw: cw, ctx: ctx},
|
||||
p2p: p,
|
||||
batchImporter: defaultBatchImporter,
|
||||
}
|
||||
for _, o := range opts {
|
||||
if err := o(s); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if s.nWorkers == 0 {
|
||||
s.nWorkers = defaultWorkerCount
|
||||
}
|
||||
if s.batchSize == 0 {
|
||||
s.batchSize = defaultBatchSize
|
||||
}
|
||||
s.pool = newP2PBatchWorkerPool(p, s.nWorkers)
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (s *Service) initVerifier(ctx context.Context) (*verifier, error) {
|
||||
cps, err := s.su.originState(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newBackfillVerifier(cps)
|
||||
}
|
||||
|
||||
func (s *Service) Start() {
|
||||
ctx, cancel := context.WithCancel(s.ctx)
|
||||
defer func() {
|
||||
cancel()
|
||||
}()
|
||||
clock, err := s.cw.WaitForClock(ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Fatal("backfill service failed to start while waiting for genesis data")
|
||||
}
|
||||
s.ms.setClock(clock)
|
||||
|
||||
status := s.su.status()
|
||||
s.batchSeq = newBatchSequencer(s.nWorkers, s.ms.minimumSlot(), primitives.Slot(status.LowSlot), primitives.Slot(s.batchSize))
|
||||
// Exit early if there aren't going to be any batches to backfill.
|
||||
if s.batchSeq.numTodo() == 0 {
|
||||
return
|
||||
}
|
||||
originE := slots.ToEpoch(primitives.Slot(status.OriginSlot))
|
||||
assigner := peers.NewAssigner(ctx, s.p2p.Peers(), params.BeaconConfig().MaxPeersToSync, originE)
|
||||
s.verifier, err = s.initVerifier(ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Fatal("Unable to initialize backfill verifier, quitting.")
|
||||
}
|
||||
s.pool.Spawn(ctx, s.nWorkers, clock, assigner, s.verifier)
|
||||
|
||||
if err = s.initBatches(); err != nil {
|
||||
log.WithError(err).Fatal("Non-recoverable error in backfill service, quitting.")
|
||||
}
|
||||
|
||||
for {
|
||||
b, err := s.pool.Complete()
|
||||
if err != nil {
|
||||
if errors.Is(err, errEndSequence) {
|
||||
log.WithField("backfill_slot", b.begin).Info("Backfill is complete")
|
||||
} else {
|
||||
log.WithError(err).Fatal("Non-recoverable error in backfill service, quitting.")
|
||||
}
|
||||
return
|
||||
}
|
||||
s.batchSeq.update(b)
|
||||
importable := s.batchSeq.importable()
|
||||
imported := 0
|
||||
for i := range importable {
|
||||
ib := importable[i]
|
||||
if len(ib.results) == 0 {
|
||||
log.Error("wtf")
|
||||
}
|
||||
_, err := s.batchImporter(ctx, ib, s.su)
|
||||
if err != nil {
|
||||
log.WithError(err).WithFields(ib.logFields()).Debug("Backfill batch failed to import.")
|
||||
s.downscore(ib)
|
||||
ib.state = batchErrRetryable
|
||||
s.batchSeq.update(b)
|
||||
break
|
||||
}
|
||||
imported += 1
|
||||
log.WithFields(ib.logFields()).Debug("Backfill batch imported.")
|
||||
ib.state = batchImportComplete
|
||||
// Calling update with state=batchImportComplete will advance the batch list.
|
||||
s.batchSeq.update(ib)
|
||||
}
|
||||
if err := s.batchSeq.moveMinimum(s.ms.minimumSlot()); err != nil {
|
||||
log.WithError(err).Fatal("Non-recoverable error in backfill service, quitting.")
|
||||
}
|
||||
log.WithField("imported", imported).WithField("importable", len(importable)).
|
||||
WithField("batches_remaining", s.batchSeq.numTodo()).
|
||||
Info("Backfill batches processed.")
|
||||
batches, err := s.batchSeq.sequence()
|
||||
if err != nil {
|
||||
// This typically means we have several importable batches, but they are stuck behind a batch that needs
|
||||
// to complete first so that we can chain parent roots across batches.
|
||||
// ie backfilling [[90..100), [80..90), [70..80)], if we complete [70..80) and [80..90) but not [90..100),
|
||||
// we can't move forward until [90..100) completes, because we need to confirm 99 connects to 100,
|
||||
// and then we'll have the parent_root expected by 90 to ensure it matches the root for 89,
|
||||
// at which point we know we can process [80..90).
|
||||
if errors.Is(err, errMaxBatches) {
|
||||
log.Debug("Backfill batches waiting for descendent batch to complete.")
|
||||
continue
|
||||
}
|
||||
}
|
||||
for _, b := range batches {
|
||||
s.pool.Todo(b)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) initBatches() error {
|
||||
batches, err := s.batchSeq.sequence()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, b := range batches {
|
||||
s.pool.Todo(b)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) downscore(b batch) {
|
||||
}
|
||||
|
||||
func (s *Service) Stop() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) Status() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// MinimumBackfillSlot determines the lowest slot that backfill needs to download based on looking back
|
||||
// MIN_EPOCHS_FOR_BLOCK_REQUESTS from the current slot.
|
||||
func MinimumBackfillSlot(current primitives.Slot) primitives.Slot {
|
||||
oe := helpers.MinEpochsForBlockRequests()
|
||||
if oe > slots.MaxSafeEpoch() {
|
||||
oe = slots.MaxSafeEpoch()
|
||||
}
|
||||
offset := slots.UnsafeEpochStart(oe)
|
||||
if offset > current {
|
||||
return 0
|
||||
}
|
||||
return current - offset
|
||||
}
|
||||
89
beacon-chain/sync/backfill/service_test.go
Normal file
89
beacon-chain/sync/backfill/service_test.go
Normal file
@@ -0,0 +1,89 @@
|
||||
package backfill
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
p2ptest "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/dbval"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||
)
|
||||
|
||||
type mockMinimumSlotter struct {
|
||||
min primitives.Slot
|
||||
}
|
||||
|
||||
var _ minimumSlotter = &mockMinimumSlotter{}
|
||||
|
||||
func (m mockMinimumSlotter) minimumSlot() primitives.Slot {
|
||||
return m.min
|
||||
}
|
||||
|
||||
func (m mockMinimumSlotter) setClock(*startup.Clock) {
|
||||
}
|
||||
|
||||
func TestServiceInit(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*300)
|
||||
defer cancel()
|
||||
db := &mockBackfillDB{}
|
||||
su, err := NewUpdater(ctx, db)
|
||||
require.NoError(t, err)
|
||||
nWorkers := 5
|
||||
var batchSize uint64 = 100
|
||||
nBatches := nWorkers * 2
|
||||
var high uint64 = 11235
|
||||
originRoot := [32]byte{}
|
||||
origin, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
db.states = map[[32]byte]state.BeaconState{originRoot: origin}
|
||||
su.bs = &dbval.BackfillStatus{
|
||||
LowSlot: high,
|
||||
OriginRoot: originRoot[:],
|
||||
}
|
||||
remaining := nBatches
|
||||
cw := startup.NewClockSynchronizer()
|
||||
require.NoError(t, cw.SetClock(startup.NewClock(time.Now(), [32]byte{})))
|
||||
pool := &mockPool{todoChan: make(chan batch, nWorkers), finishedChan: make(chan batch, nWorkers)}
|
||||
p2pt := p2ptest.NewTestP2P(t)
|
||||
srv, err := NewService(ctx, su, cw, p2pt, WithBatchSize(batchSize), WithWorkerCount(nWorkers))
|
||||
require.NoError(t, err)
|
||||
srv.ms = mockMinimumSlotter{min: primitives.Slot(high - batchSize*uint64(nBatches))}
|
||||
srv.pool = pool
|
||||
srv.batchImporter = func(context.Context, batch, *StatusUpdater) (*dbval.BackfillStatus, error) {
|
||||
return &dbval.BackfillStatus{}, nil
|
||||
}
|
||||
go srv.Start()
|
||||
todo := make([]batch, 0)
|
||||
todo = testReadN(t, ctx, pool.todoChan, nWorkers, todo)
|
||||
require.Equal(t, nWorkers, len(todo))
|
||||
for i := 0; i < remaining; i++ {
|
||||
b := todo[i]
|
||||
if b.state == batchSequenced {
|
||||
b.state = batchImportable
|
||||
}
|
||||
pool.finishedChan <- b
|
||||
todo = testReadN(t, ctx, pool.todoChan, 1, todo)
|
||||
}
|
||||
require.Equal(t, remaining+nWorkers, len(todo))
|
||||
for i := remaining; i < remaining+nWorkers; i++ {
|
||||
require.Equal(t, batchEndSequence, todo[i].state)
|
||||
}
|
||||
}
|
||||
|
||||
func testReadN(t *testing.T, ctx context.Context, c chan batch, n int, into []batch) []batch {
|
||||
for i := 0; i < n; i++ {
|
||||
select {
|
||||
case b := <-c:
|
||||
into = append(into, b)
|
||||
case <-ctx.Done():
|
||||
// this means we hit the timeout, so something went wrong.
|
||||
require.Equal(t, true, false)
|
||||
}
|
||||
}
|
||||
return into
|
||||
}
|
||||
@@ -2,121 +2,153 @@ package backfill
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/dbval"
|
||||
)
|
||||
|
||||
// NewStatus correctly initializes a Status value with the required database value.
|
||||
func NewStatus(store BackfillDB) *Status {
|
||||
return &Status{
|
||||
// NewUpdater correctly initializes a StatusUpdater value with the required database value.
|
||||
func NewUpdater(ctx context.Context, store BackfillDB) (*StatusUpdater, error) {
|
||||
s := &StatusUpdater{
|
||||
store: store,
|
||||
}
|
||||
status, err := s.store.BackfillStatus(ctx)
|
||||
if err != nil {
|
||||
if errors.Is(err, db.ErrNotFound) {
|
||||
return s, s.recoverLegacy(ctx)
|
||||
}
|
||||
}
|
||||
s.swapStatus(status)
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// Status provides a way to update and query the status of a backfill process that may be necessary to track when
|
||||
// StatusUpdater provides a way to update and query the status of a backfill process that may be necessary to track when
|
||||
// a node was initialized via checkpoint sync. With checkpoint sync, there will be a gap in node history from genesis
|
||||
// until the checkpoint sync origin block. Status provides the means to update the value keeping track of the lower
|
||||
// end of the missing block range via the Advance() method, to check whether a Slot is missing from the database
|
||||
// via the SlotCovered() method, and to see the current StartGap() and EndGap().
|
||||
type Status struct {
|
||||
start primitives.Slot
|
||||
end primitives.Slot
|
||||
// until the checkpoint sync origin block. StatusUpdater provides the means to update the value keeping track of the lower
|
||||
// end of the missing block range via the FillFwd() method, to check whether a Slot is missing from the database
|
||||
// via the AvailableBlock() method, and to see the current StartGap() and EndGap().
|
||||
type StatusUpdater struct {
|
||||
sync.RWMutex
|
||||
store BackfillDB
|
||||
genesisSync bool
|
||||
bs *dbval.BackfillStatus
|
||||
}
|
||||
|
||||
// SlotCovered uses StartGap() and EndGap() to determine if the given slot is covered by the current chain history.
|
||||
// If the slot is <= StartGap(), or >= EndGap(), the result is true.
|
||||
// If the slot is between StartGap() and EndGap(), the result is false.
|
||||
func (s *Status) SlotCovered(sl primitives.Slot) bool {
|
||||
// AvailableBlock determines if the given slot is covered by the current chain history.
|
||||
// If the slot is <= backfill low slot, or >= backfill high slot, the result is true.
|
||||
// If the slot is between the backfill low and high slots, the result is false.
|
||||
func (s *StatusUpdater) AvailableBlock(sl primitives.Slot) bool {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
// short circuit if the node was synced from genesis
|
||||
if s.genesisSync {
|
||||
if s.genesisSync || sl == 0 || s.bs.LowSlot <= uint64(sl) {
|
||||
return true
|
||||
}
|
||||
if s.StartGap() < sl && sl < s.EndGap() {
|
||||
return false
|
||||
return false
|
||||
}
|
||||
|
||||
// Status is a threadsafe method to access a copy of the BackfillStatus value.
|
||||
func (s *StatusUpdater) status() *dbval.BackfillStatus {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
return &dbval.BackfillStatus{
|
||||
LowSlot: s.bs.LowSlot,
|
||||
LowRoot: s.bs.LowRoot,
|
||||
LowParentRoot: s.bs.LowParentRoot,
|
||||
OriginSlot: s.bs.OriginSlot,
|
||||
OriginRoot: s.bs.OriginRoot,
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// StartGap returns the slot at the beginning of the range that needs to be backfilled.
|
||||
func (s *Status) StartGap() primitives.Slot {
|
||||
return s.start
|
||||
}
|
||||
|
||||
// EndGap returns the slot at the end of the range that needs to be backfilled.
|
||||
func (s *Status) EndGap() primitives.Slot {
|
||||
return s.end
|
||||
}
|
||||
|
||||
var ErrAdvancePastOrigin = errors.New("cannot advance backfill Status beyond the origin checkpoint slot")
|
||||
|
||||
// Advance advances the backfill position to the given slot & root.
|
||||
// It updates the backfill block root entry in the database,
|
||||
// and also updates the Status value's copy of the backfill position slot.
|
||||
func (s *Status) Advance(ctx context.Context, upTo primitives.Slot, root [32]byte) error {
|
||||
if upTo > s.end {
|
||||
return errors.Wrapf(ErrAdvancePastOrigin, "advance slot=%d, origin slot=%d", upTo, s.end)
|
||||
// fillBack saves the slice of blocks and updates the BackfillStatus LowSlot/Root/ParentRoot tracker to the values
|
||||
// from the first block in the slice. This method assumes that the block slice has been fully validated and
|
||||
// sorted in slot order by the calling function.
|
||||
func (s *StatusUpdater) fillBack(ctx context.Context, blocks []blocks.ROBlock) (*dbval.BackfillStatus, error) {
|
||||
status := s.status()
|
||||
if len(blocks) == 0 {
|
||||
return status, nil
|
||||
}
|
||||
s.start = upTo
|
||||
return s.store.SaveBackfillBlockRoot(ctx, root)
|
||||
}
|
||||
|
||||
// Reload queries the database for backfill status, initializing the internal data and validating the database state.
|
||||
func (s *Status) Reload(ctx context.Context) error {
|
||||
cpRoot, err := s.store.OriginCheckpointBlockRoot(ctx)
|
||||
if err != nil {
|
||||
// mark genesis sync and short circuit further lookups
|
||||
if errors.Is(err, db.ErrNotFoundOriginBlockRoot) {
|
||||
s.genesisSync = true
|
||||
return nil
|
||||
for _, b := range blocks {
|
||||
if err := s.store.SaveBlock(ctx, b); err != nil {
|
||||
return nil, errors.Wrapf(err, "error saving backfill block with root=%#x, slot=%d", b.Root(), b.Block().Slot())
|
||||
}
|
||||
return err
|
||||
}
|
||||
cpBlock, err := s.store.Block(ctx, cpRoot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error retrieving block for origin checkpoint root=%#x", cpRoot)
|
||||
}
|
||||
if err := blocks.BeaconBlockIsNil(cpBlock); err != nil {
|
||||
return err
|
||||
}
|
||||
s.end = cpBlock.Block().Slot()
|
||||
|
||||
_, err = s.store.GenesisBlockRoot(ctx)
|
||||
// Update backfill status based on the block with the lowest slot in the batch.
|
||||
lowest := blocks[0]
|
||||
r := lowest.Root()
|
||||
pr := lowest.Block().ParentRoot()
|
||||
status.LowSlot = uint64(lowest.Block().Slot())
|
||||
status.LowRoot = r[:]
|
||||
status.LowParentRoot = pr[:]
|
||||
return status, s.saveStatus(ctx, status)
|
||||
}
|
||||
|
||||
// recoverLegacy will check to see if the db is from a legacy checkpoint sync, and either build a new BackfillStatus
|
||||
// or label the node as synced from genesis.
|
||||
func (s *StatusUpdater) recoverLegacy(ctx context.Context) error {
|
||||
cpr, err := s.store.OriginCheckpointBlockRoot(ctx)
|
||||
if errors.Is(err, db.ErrNotFoundOriginBlockRoot) {
|
||||
s.genesisSync = true
|
||||
return nil
|
||||
}
|
||||
|
||||
cpb, err := s.store.Block(ctx, cpr)
|
||||
if err != nil {
|
||||
if errors.Is(err, db.ErrNotFoundGenesisBlockRoot) {
|
||||
return errors.Wrap(err, "genesis block root required for checkpoint sync")
|
||||
}
|
||||
return errors.Wrapf(err, "error retrieving block for origin checkpoint root=%#x", cpr)
|
||||
}
|
||||
if err := blocks.BeaconBlockIsNil(cpb); err != nil {
|
||||
return errors.Wrapf(err, "nil block found for origin checkpoint root=%#x", cpr)
|
||||
}
|
||||
os := uint64(cpb.Block().Slot())
|
||||
lpr := cpb.Block().ParentRoot()
|
||||
bs := &dbval.BackfillStatus{
|
||||
LowSlot: os,
|
||||
LowRoot: cpr[:],
|
||||
LowParentRoot: lpr[:],
|
||||
OriginSlot: os,
|
||||
OriginRoot: cpr[:],
|
||||
}
|
||||
return s.saveStatus(ctx, bs)
|
||||
}
|
||||
|
||||
func (s *StatusUpdater) saveStatus(ctx context.Context, bs *dbval.BackfillStatus) error {
|
||||
if err := s.store.SaveBackfillStatus(ctx, bs); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bfRoot, err := s.store.BackfillBlockRoot(ctx)
|
||||
if err != nil {
|
||||
if errors.Is(err, db.ErrNotFoundBackfillBlockRoot) {
|
||||
return errors.Wrap(err, "found origin checkpoint block root, but no backfill block root")
|
||||
}
|
||||
return err
|
||||
}
|
||||
bfBlock, err := s.store.Block(ctx, bfRoot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error retrieving block for backfill root=%#x", bfRoot)
|
||||
}
|
||||
if err := blocks.BeaconBlockIsNil(bfBlock); err != nil {
|
||||
return err
|
||||
}
|
||||
s.start = bfBlock.Block().Slot()
|
||||
s.swapStatus(bs)
|
||||
return nil
|
||||
}
|
||||
|
||||
// BackfillDB describes the set of DB methods that the Status type needs to function.
|
||||
type BackfillDB interface {
|
||||
SaveBackfillBlockRoot(ctx context.Context, blockRoot [32]byte) error
|
||||
GenesisBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
BackfillBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
Block(ctx context.Context, blockRoot [32]byte) (interfaces.ReadOnlySignedBeaconBlock, error)
|
||||
func (s *StatusUpdater) swapStatus(bs *dbval.BackfillStatus) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
s.bs = bs
|
||||
}
|
||||
|
||||
// originState looks up the state for the checkpoint sync origin. This is a hack, because StatusUpdater is the only
|
||||
// thing that needs db access and it has the origin root handy, so it's convenient to look it up here. The state is
|
||||
// needed by the verifier.
|
||||
func (s *StatusUpdater) originState(ctx context.Context) (state.BeaconState, error) {
|
||||
return s.store.StateOrError(ctx, bytesutil.ToBytes32(s.status().OriginRoot))
|
||||
}
|
||||
|
||||
// BackfillDB describes the set of DB methods that the StatusUpdater type needs to function.
|
||||
type BackfillDB interface {
|
||||
SaveBackfillStatus(context.Context, *dbval.BackfillStatus) error
|
||||
BackfillStatus(context.Context) (*dbval.BackfillStatus, error)
|
||||
OriginCheckpointBlockRoot(context.Context) ([32]byte, error)
|
||||
Block(context.Context, [32]byte) (interfaces.ReadOnlySignedBeaconBlock, error)
|
||||
SaveBlock(ctx context.Context, signed interfaces.ReadOnlySignedBeaconBlock) error
|
||||
GenesisBlockRoot(context.Context) ([32]byte, error)
|
||||
StateOrError(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error)
|
||||
}
|
||||
|
||||
@@ -4,9 +4,12 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
blocktest "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/dbval"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
@@ -21,119 +24,128 @@ type mockBackfillDB struct {
|
||||
saveBackfillBlockRoot func(ctx context.Context, blockRoot [32]byte) error
|
||||
genesisBlockRoot func(ctx context.Context) ([32]byte, error)
|
||||
originCheckpointBlockRoot func(ctx context.Context) ([32]byte, error)
|
||||
backfillBlockRoot func(ctx context.Context) ([32]byte, error)
|
||||
block func(ctx context.Context, blockRoot [32]byte) (interfaces.ReadOnlySignedBeaconBlock, error)
|
||||
saveBackfillStatus func(ctx context.Context, status *dbval.BackfillStatus) error
|
||||
backfillStatus func(context.Context) (*dbval.BackfillStatus, error)
|
||||
status *dbval.BackfillStatus
|
||||
err error
|
||||
states map[[32]byte]state.BeaconState
|
||||
blocks map[[32]byte]interfaces.ReadOnlySignedBeaconBlock
|
||||
}
|
||||
|
||||
var _ BackfillDB = &mockBackfillDB{}
|
||||
|
||||
func (db *mockBackfillDB) SaveBackfillBlockRoot(ctx context.Context, blockRoot [32]byte) error {
|
||||
if db.saveBackfillBlockRoot != nil {
|
||||
return db.saveBackfillBlockRoot(ctx, blockRoot)
|
||||
func (d *mockBackfillDB) StateOrError(_ context.Context, blockRoot [32]byte) (state.BeaconState, error) {
|
||||
st, ok := d.states[blockRoot]
|
||||
if !ok {
|
||||
return nil, db.ErrNotFoundState
|
||||
}
|
||||
return errEmptyMockDBMethod
|
||||
return st, nil
|
||||
}
|
||||
|
||||
func (db *mockBackfillDB) GenesisBlockRoot(ctx context.Context) ([32]byte, error) {
|
||||
if db.genesisBlockRoot != nil {
|
||||
return db.genesisBlockRoot(ctx)
|
||||
func (d *mockBackfillDB) SaveBackfillStatus(ctx context.Context, status *dbval.BackfillStatus) error {
|
||||
if d.saveBackfillStatus != nil {
|
||||
return d.saveBackfillStatus(ctx, status)
|
||||
}
|
||||
d.status = status
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *mockBackfillDB) BackfillStatus(ctx context.Context) (*dbval.BackfillStatus, error) {
|
||||
if d.backfillStatus != nil {
|
||||
return d.backfillStatus(ctx)
|
||||
}
|
||||
return d.status, nil
|
||||
}
|
||||
|
||||
func (d *mockBackfillDB) GenesisBlockRoot(ctx context.Context) ([32]byte, error) {
|
||||
if d.genesisBlockRoot != nil {
|
||||
return d.genesisBlockRoot(ctx)
|
||||
}
|
||||
return [32]byte{}, errEmptyMockDBMethod
|
||||
}
|
||||
|
||||
func (db *mockBackfillDB) OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error) {
|
||||
if db.originCheckpointBlockRoot != nil {
|
||||
return db.originCheckpointBlockRoot(ctx)
|
||||
func (d *mockBackfillDB) OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error) {
|
||||
if d.originCheckpointBlockRoot != nil {
|
||||
return d.originCheckpointBlockRoot(ctx)
|
||||
}
|
||||
return [32]byte{}, errEmptyMockDBMethod
|
||||
}
|
||||
|
||||
func (db *mockBackfillDB) BackfillBlockRoot(ctx context.Context) ([32]byte, error) {
|
||||
if db.backfillBlockRoot != nil {
|
||||
return db.backfillBlockRoot(ctx)
|
||||
func (d *mockBackfillDB) Block(ctx context.Context, blockRoot [32]byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
if d.block != nil {
|
||||
return d.block(ctx, blockRoot)
|
||||
}
|
||||
return [32]byte{}, errEmptyMockDBMethod
|
||||
b, ok := d.blocks[blockRoot]
|
||||
if !ok {
|
||||
return nil, db.ErrNotFound
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (db *mockBackfillDB) Block(ctx context.Context, blockRoot [32]byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
if db.block != nil {
|
||||
return db.block(ctx, blockRoot)
|
||||
func (d *mockBackfillDB) SaveBlock(ctx context.Context, signed interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
if d.blocks == nil {
|
||||
d.blocks = make(map[[32]byte]interfaces.ReadOnlySignedBeaconBlock)
|
||||
}
|
||||
return nil, errEmptyMockDBMethod
|
||||
r, err := signed.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.blocks[r] = signed
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestSlotCovered(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
slot primitives.Slot
|
||||
status *Status
|
||||
status *StatusUpdater
|
||||
result bool
|
||||
}{
|
||||
{
|
||||
name: "below start true",
|
||||
status: &Status{start: 1},
|
||||
name: "genesis true",
|
||||
status: &StatusUpdater{bs: &dbval.BackfillStatus{LowSlot: 10}},
|
||||
slot: 0,
|
||||
result: true,
|
||||
},
|
||||
{
|
||||
name: "above end true",
|
||||
status: &Status{end: 1},
|
||||
status: &StatusUpdater{bs: &dbval.BackfillStatus{LowSlot: 1}},
|
||||
slot: 2,
|
||||
result: true,
|
||||
},
|
||||
{
|
||||
name: "equal end true",
|
||||
status: &Status{end: 1},
|
||||
status: &StatusUpdater{bs: &dbval.BackfillStatus{LowSlot: 1}},
|
||||
slot: 1,
|
||||
result: true,
|
||||
},
|
||||
{
|
||||
name: "equal start true",
|
||||
status: &Status{start: 2},
|
||||
slot: 2,
|
||||
result: true,
|
||||
},
|
||||
{
|
||||
name: "between false",
|
||||
status: &Status{start: 1, end: 3},
|
||||
slot: 2,
|
||||
result: false,
|
||||
},
|
||||
{
|
||||
name: "genesisSync always true",
|
||||
status: &Status{genesisSync: true},
|
||||
status: &StatusUpdater{genesisSync: true},
|
||||
slot: 100,
|
||||
result: true,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
result := c.status.SlotCovered(c.slot)
|
||||
require.Equal(t, c.result, result)
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
result := c.status.AvailableBlock(c.slot)
|
||||
require.Equal(t, c.result, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAdvance(t *testing.T) {
|
||||
func TestStatusUpdater_FillBack(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
saveBackfillBuf := make([][32]byte, 0)
|
||||
mdb := &mockBackfillDB{
|
||||
saveBackfillBlockRoot: func(ctx context.Context, root [32]byte) error {
|
||||
saveBackfillBuf = append(saveBackfillBuf, root)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
s := &Status{end: 100, store: mdb}
|
||||
var root [32]byte
|
||||
copy(root[:], []byte{0x23, 0x23})
|
||||
require.NoError(t, s.Advance(ctx, 90, root))
|
||||
require.Equal(t, root, saveBackfillBuf[0])
|
||||
not := s.SlotCovered(95)
|
||||
require.Equal(t, false, not)
|
||||
|
||||
// this should still be len 1 after failing to advance
|
||||
require.Equal(t, 1, len(saveBackfillBuf))
|
||||
require.ErrorIs(t, s.Advance(ctx, s.end+1, root), ErrAdvancePastOrigin)
|
||||
// this has an element in it from the previous test, there shouldn't be an additional one
|
||||
require.Equal(t, 1, len(saveBackfillBuf))
|
||||
mdb := &mockBackfillDB{}
|
||||
s := &StatusUpdater{bs: &dbval.BackfillStatus{LowSlot: 100}, store: mdb}
|
||||
b, err := setupTestBlock(90)
|
||||
require.NoError(t, err)
|
||||
rob, err := blocks.NewROBlock(b)
|
||||
require.NoError(t, err)
|
||||
_, err = s.fillBack(ctx, []blocks.ROBlock{rob})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, s.AvailableBlock(95))
|
||||
}
|
||||
|
||||
func goodBlockRoot(root [32]byte) func(ctx context.Context) ([32]byte, error) {
|
||||
@@ -171,7 +183,7 @@ func TestReload(t *testing.T) {
|
||||
name string
|
||||
db BackfillDB
|
||||
err error
|
||||
expected *Status
|
||||
expected *StatusUpdater
|
||||
}{
|
||||
/*{
|
||||
name: "origin not found, implying genesis sync ",
|
||||
@@ -180,7 +192,7 @@ func TestReload(t *testing.T) {
|
||||
originCheckpointBlockRoot: func(ctx context.Context) ([32]byte, error) {
|
||||
return [32]byte{}, db.ErrNotFoundOriginBlockRoot
|
||||
}},
|
||||
expected: &Status{genesisSync: true},
|
||||
expected: &StatusUpdater{genesisSync: true},
|
||||
},
|
||||
{
|
||||
name: "genesis not found error",
|
||||
@@ -318,7 +330,7 @@ func TestReload(t *testing.T) {
|
||||
err: derp,
|
||||
},*/
|
||||
{
|
||||
name: "complete happy path",
|
||||
name: "legacy recovery",
|
||||
db: &mockBackfillDB{
|
||||
genesisBlockRoot: goodBlockRoot(params.BeaconConfig().ZeroHash),
|
||||
originCheckpointBlockRoot: goodBlockRoot(originRoot),
|
||||
@@ -331,18 +343,15 @@ func TestReload(t *testing.T) {
|
||||
}
|
||||
return nil, errors.New("not derp")
|
||||
},
|
||||
backfillBlockRoot: goodBlockRoot(backfillRoot),
|
||||
backfillStatus: func(context.Context) (*dbval.BackfillStatus, error) { return nil, db.ErrNotFound },
|
||||
},
|
||||
err: derp,
|
||||
expected: &Status{genesisSync: false, start: backfillSlot, end: originSlot},
|
||||
expected: &StatusUpdater{genesisSync: false, bs: &dbval.BackfillStatus{LowSlot: uint64(originSlot)}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
s := &Status{
|
||||
store: c.db,
|
||||
}
|
||||
err := s.Reload(ctx)
|
||||
s, err := NewUpdater(ctx, c.db)
|
||||
if err != nil {
|
||||
require.ErrorIs(t, err, c.err)
|
||||
continue
|
||||
@@ -352,7 +361,6 @@ func TestReload(t *testing.T) {
|
||||
continue
|
||||
}
|
||||
require.Equal(t, c.expected.genesisSync, s.genesisSync)
|
||||
require.Equal(t, c.expected.start, s.start)
|
||||
require.Equal(t, c.expected.end, s.end)
|
||||
require.Equal(t, c.expected.bs.LowSlot, s.bs.LowSlot)
|
||||
}
|
||||
}
|
||||
|
||||
113
beacon-chain/sync/backfill/verify.go
Normal file
113
beacon-chain/sync/backfill/verify.go
Normal file
@@ -0,0 +1,113 @@
|
||||
package backfill
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v4/network/forks"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
)
|
||||
|
||||
var errInvalidBatchChain = errors.New("parent_root of block does not match root of previous")
|
||||
var errProposerIndexTooHigh = errors.New("proposer index not present in origin state")
|
||||
var errUnknownDomain = errors.New("runtime error looking up signing domain for fork")
|
||||
|
||||
// VerifiedROBlocks represents a slice of blocks that have passed signature verification.
|
||||
type VerifiedROBlocks []blocks.ROBlock
|
||||
|
||||
type verifier struct {
|
||||
// chkptVals is the set of validators from the state used to initialize the node via checkpoint sync.
|
||||
keys [][fieldparams.BLSPubkeyLength]byte
|
||||
maxVal primitives.ValidatorIndex
|
||||
vr []byte
|
||||
fsched forks.OrderedSchedule
|
||||
dt [bls.DomainByteLength]byte
|
||||
forkDomains map[[4]byte][]byte
|
||||
}
|
||||
|
||||
func (bs verifier) verify(blks []interfaces.ReadOnlySignedBeaconBlock) (VerifiedROBlocks, error) {
|
||||
var err error
|
||||
result := make([]blocks.ROBlock, len(blks))
|
||||
sigSet := bls.NewSet()
|
||||
for i := range blks {
|
||||
result[i], err = blocks.NewROBlock(blks[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if i > 0 && result[i-1].Root() != result[i].Block().ParentRoot() {
|
||||
p, b := result[i-1], result[i]
|
||||
return nil, errors.Wrapf(errInvalidBatchChain,
|
||||
"slot %d parent_root=%#x, slot %d root = %#x",
|
||||
b.Block().Slot(), b.Block().ParentRoot(),
|
||||
p.Block().Slot(), p.Root())
|
||||
}
|
||||
set, err := bs.blockSignatureBatch(result[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sigSet.Join(set)
|
||||
}
|
||||
v, err := sigSet.Verify()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "block signature verification error")
|
||||
}
|
||||
if !v {
|
||||
return nil, errors.New("batch block signature verification failed")
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (bs verifier) blockSignatureBatch(b blocks.ROBlock) (*bls.SignatureBatch, error) {
|
||||
pidx := b.Block().ProposerIndex()
|
||||
if pidx > bs.maxVal {
|
||||
return nil, errProposerIndexTooHigh
|
||||
}
|
||||
dom, err := bs.domainAtEpoch(slots.ToEpoch(b.Block().Slot()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sig := b.Signature()
|
||||
pk := bs.keys[pidx][:]
|
||||
root := b.Root()
|
||||
rootF := func() ([32]byte, error) { return root, nil }
|
||||
return signing.BlockSignatureBatch(pk, sig[:], dom, rootF)
|
||||
}
|
||||
|
||||
func (bs verifier) domainAtEpoch(e primitives.Epoch) ([]byte, error) {
|
||||
fork, err := bs.fsched.VersionForEpoch(e)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
d, ok := bs.forkDomains[fork]
|
||||
if !ok {
|
||||
return nil, errors.Wrapf(errUnknownDomain, "fork version=%#x, epoch=%d", fork, e)
|
||||
}
|
||||
return d, nil
|
||||
}
|
||||
|
||||
func newBackfillVerifier(st state.BeaconState) (*verifier, error) {
|
||||
fsched := forks.NewOrderedSchedule(params.BeaconConfig())
|
||||
v := &verifier{
|
||||
keys: st.PublicKeys(),
|
||||
vr: st.GenesisValidatorsRoot(),
|
||||
fsched: fsched,
|
||||
dt: params.BeaconConfig().DomainBeaconProposer,
|
||||
forkDomains: make(map[[4]byte][]byte, len(fsched)),
|
||||
}
|
||||
v.maxVal = primitives.ValidatorIndex(len(v.keys) - 1)
|
||||
// Precompute signing domains for known forks at startup.
|
||||
for _, entry := range fsched {
|
||||
d, err := signing.ComputeDomain(v.dt, entry.Version[:], v.vr)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to pre-compute signing domain for fork version=%#x", entry.Version)
|
||||
}
|
||||
v.forkDomains[entry.Version] = d
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
59
beacon-chain/sync/backfill/worker.go
Normal file
59
beacon-chain/sync/backfill/worker.go
Normal file
@@ -0,0 +1,59 @@
|
||||
package backfill
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/sync"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type workerId int
|
||||
|
||||
type p2pWorker struct {
|
||||
id workerId
|
||||
todo chan batch
|
||||
done chan batch
|
||||
p2p p2p.P2P
|
||||
v *verifier
|
||||
c *startup.Clock
|
||||
}
|
||||
|
||||
func (w *p2pWorker) run(ctx context.Context) {
|
||||
for {
|
||||
select {
|
||||
case b := <-w.todo:
|
||||
log.WithFields(b.logFields()).WithField("backfill_worker", w.id).Debug("Backfill worker received batch.")
|
||||
w.done <- w.handle(ctx, b)
|
||||
case <-ctx.Done():
|
||||
log.WithField("backfill_worker", w.id).Info("Backfill worker exiting after context canceled.")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (w *p2pWorker) handle(ctx context.Context, b batch) batch {
|
||||
results, err := sync.SendBeaconBlocksByRangeRequest(ctx, w.c, w.p2p, b.pid, b.request(), nil)
|
||||
if err != nil {
|
||||
return b.withRetryableError(err)
|
||||
}
|
||||
vb, err := w.v.verify(results)
|
||||
if err != nil {
|
||||
return b.withRetryableError(err)
|
||||
}
|
||||
b.state = batchImportable
|
||||
b.results = vb
|
||||
return b
|
||||
}
|
||||
|
||||
func newP2pWorker(id workerId, p p2p.P2P, todo, done chan batch, c *startup.Clock, v *verifier) *p2pWorker {
|
||||
return &p2pWorker{
|
||||
id: id,
|
||||
todo: todo,
|
||||
done: done,
|
||||
p2p: p,
|
||||
v: v,
|
||||
c: c,
|
||||
}
|
||||
}
|
||||
@@ -139,7 +139,7 @@ func (s *Service) writeBlockBatchToStream(ctx context.Context, batch blockBatch,
|
||||
continue
|
||||
}
|
||||
if chunkErr := s.chunkBlockWriter(stream, b); chunkErr != nil {
|
||||
log.WithError(chunkErr).Error("Could not send a chunked response")
|
||||
log.WithError(chunkErr).Debug("Could not send a chunked response")
|
||||
return chunkErr
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,7 +8,9 @@ go_library(
|
||||
deps = [
|
||||
"//beacon-chain/node:go_default_library",
|
||||
"//beacon-chain/sync/genesis:go_default_library",
|
||||
"//cmd/beacon-chain/sync/checkpoint:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -4,6 +4,8 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/node"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/genesis"
|
||||
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/sync/checkpoint"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
@@ -28,6 +30,10 @@ var (
|
||||
func BeaconNodeOptions(c *cli.Context) (node.Option, error) {
|
||||
statePath := c.Path(StatePath.Name)
|
||||
remoteURL := c.String(BeaconAPIURL.Name)
|
||||
if remoteURL == "" && c.String(checkpoint.RemoteURL.Name) != "" {
|
||||
log.Infof("using checkpoint sync url %s for value in --%s flag", c.String(checkpoint.RemoteURL.Name), BeaconAPIURL.Name)
|
||||
remoteURL = c.String(checkpoint.RemoteURL.Name)
|
||||
}
|
||||
if remoteURL != "" {
|
||||
return func(node *node.BeaconNode) error {
|
||||
var err error
|
||||
|
||||
@@ -105,7 +105,7 @@ func (r *configset) replace(cfg *BeaconChainConfig) error {
|
||||
|
||||
func (r *configset) replaceWithUndo(cfg *BeaconChainConfig) (func() error, error) {
|
||||
name := cfg.ConfigName
|
||||
prev := r.nameToConfig[name]
|
||||
prev := r.nameToConfig[name].Copy()
|
||||
if err := r.replace(cfg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -134,7 +134,7 @@ func (r *configset) setActive(c *BeaconChainConfig) error {
|
||||
}
|
||||
|
||||
func (r *configset) setActiveWithUndo(c *BeaconChainConfig) (func() error, error) {
|
||||
active := r.active
|
||||
active := r.active.Copy()
|
||||
r.active = c
|
||||
undo, err := r.replaceWithUndo(c)
|
||||
if err != nil {
|
||||
|
||||
@@ -11,15 +11,30 @@ func SetupTestConfigCleanup(t testing.TB) {
|
||||
temp := configs.getActive().Copy()
|
||||
undo, err := SetActiveWithUndo(temp)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
t.Fatal(err)
|
||||
}
|
||||
prevNetworkCfg := networkConfig.Copy()
|
||||
t.Cleanup(func() {
|
||||
mainnetBeaconConfig = prevDefaultBeaconConfig
|
||||
err = undo()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
t.Fatal(err)
|
||||
}
|
||||
networkConfig = prevNetworkCfg
|
||||
})
|
||||
}
|
||||
|
||||
// SetActiveTestCleanup sets an active config,
|
||||
// and adds a test cleanup hook to revert to the default config after the test completes.
|
||||
func SetActiveTestCleanup(t *testing.T, cfg *BeaconChainConfig) {
|
||||
undo, err := SetActiveWithUndo(cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Cleanup(func() {
|
||||
err = undo()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -737,6 +737,7 @@ func (s sorter) Swap(i, j int) {
|
||||
s.set.Signatures[i], s.set.Signatures[j] = s.set.Signatures[j], s.set.Signatures[i]
|
||||
s.set.PublicKeys[i], s.set.PublicKeys[j] = s.set.PublicKeys[j], s.set.PublicKeys[i]
|
||||
s.set.Messages[i], s.set.Messages[j] = s.set.Messages[j], s.set.Messages[i]
|
||||
s.set.Descriptions[i], s.set.Descriptions[j] = s.set.Descriptions[j], s.set.Descriptions[i]
|
||||
}
|
||||
|
||||
func (s sorter) Less(i, j int) bool {
|
||||
|
||||
754
deps.bzl
754
deps.bzl
@@ -463,8 +463,8 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_census_instrumentation_opencensus_proto",
|
||||
importpath = "github.com/census-instrumentation/opencensus-proto",
|
||||
sum = "h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk=",
|
||||
version = "v0.2.1",
|
||||
sum = "h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g=",
|
||||
version = "v0.4.1",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_cespare_cp",
|
||||
@@ -552,14 +552,14 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_cncf_udpa_go",
|
||||
importpath = "github.com/cncf/udpa/go",
|
||||
sum = "h1:cqQfy1jclcSy/FwLjemeg3SR1yaINm74aQyupQ0Bl8M=",
|
||||
version = "v0.0.0-20201120205902-5459f2c99403",
|
||||
sum = "h1:QQ3GSy+MqSHxm/d8nCtnAiZdYFd45cYZPs8vOOIYKfk=",
|
||||
version = "v0.0.0-20220112060539-c52dc94e7fbe",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_cncf_xds_go",
|
||||
importpath = "github.com/cncf/xds/go",
|
||||
sum = "h1:OZmjad4L3H8ncOIR8rnb5MREYqG8ixi5+WbeUsquF0c=",
|
||||
version = "v0.0.0-20210312221358-fbca930ec8ed",
|
||||
sum = "h1:ACGZRIr7HsgBKHsueQ1yM4WaVaXh21ynwqsF8M8tXhA=",
|
||||
version = "v0.0.0-20230105202645-06c439db220b",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
@@ -962,14 +962,14 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_envoyproxy_go_control_plane",
|
||||
importpath = "github.com/envoyproxy/go-control-plane",
|
||||
sum = "h1:dulLQAYQFYtG5MTplgNGHWuV2D+OBD+Z8lmDBmbLg+s=",
|
||||
version = "v0.9.9-0.20210512163311-63b5d3c536b0",
|
||||
sum = "h1:xdCVXxEe0Y3FQith+0cj2irwZudqGYvecuLB1HtdexY=",
|
||||
version = "v0.10.3",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_envoyproxy_protoc_gen_validate",
|
||||
importpath = "github.com/envoyproxy/protoc-gen-validate",
|
||||
sum = "h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A=",
|
||||
version = "v0.1.0",
|
||||
sum = "h1:PS7VIOgmSVhWUEeZwTe7z7zouA22Cr590PzXKbZHOVY=",
|
||||
version = "v0.9.1",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_etcd_io_bbolt",
|
||||
@@ -1451,8 +1451,8 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_golang_glog",
|
||||
importpath = "github.com/golang/glog",
|
||||
sum = "h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=",
|
||||
version = "v0.0.0-20160126235308-23def4e6c14b",
|
||||
sum = "h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ=",
|
||||
version = "v1.0.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_golang_groupcache",
|
||||
@@ -1694,8 +1694,8 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_grpc_ecosystem_grpc_gateway",
|
||||
importpath = "github.com/grpc-ecosystem/grpc-gateway",
|
||||
sum = "h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=",
|
||||
version = "v1.16.0",
|
||||
sum = "h1:UImYN5qQ8tuGpGE16ZmjvcTtTw24zw1QAp/SlnNrZhI=",
|
||||
version = "v1.9.5",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_grpc_ecosystem_grpc_gateway_v2",
|
||||
@@ -4218,15 +4218,105 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_google_cloud_go",
|
||||
importpath = "cloud.google.com/go",
|
||||
sum = "h1:Dg9iHVQfrhq82rUNu9ZxUDrJLaxFUe/HlCVaLyRruq8=",
|
||||
version = "v0.65.0",
|
||||
sum = "h1:DNtEKRBAAzeS4KyIory52wWHuClNaXJ5x1F7xa4q+5Y=",
|
||||
version = "v0.105.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_accessapproval",
|
||||
importpath = "cloud.google.com/go/accessapproval",
|
||||
sum = "h1:/nTivgnV/n1CaAeo+ekGexTYUsKEU9jUVkoY5359+3Q=",
|
||||
version = "v1.5.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_accesscontextmanager",
|
||||
importpath = "cloud.google.com/go/accesscontextmanager",
|
||||
sum = "h1:CFhNhU7pcD11cuDkQdrE6PQJgv0EXNKNv06jIzbLlCU=",
|
||||
version = "v1.4.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_aiplatform",
|
||||
importpath = "cloud.google.com/go/aiplatform",
|
||||
sum = "h1:DBi3Jk9XjCJ4pkkLM4NqKgj3ozUL1wq4l+d3/jTGXAI=",
|
||||
version = "v1.27.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_analytics",
|
||||
importpath = "cloud.google.com/go/analytics",
|
||||
sum = "h1:NKw6PpQi6V1O+KsjuTd+bhip9d0REYu4NevC45vtGp8=",
|
||||
version = "v0.12.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_apigateway",
|
||||
importpath = "cloud.google.com/go/apigateway",
|
||||
sum = "h1:IIoXKR7FKrEAQhMTz5hK2wiDz2WNFHS7eVr/L1lE/rM=",
|
||||
version = "v1.4.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_apigeeconnect",
|
||||
importpath = "cloud.google.com/go/apigeeconnect",
|
||||
sum = "h1:AONoTYJviyv1vS4IkvWzq69gEVdvHx35wKXc+e6wjZQ=",
|
||||
version = "v1.4.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_appengine",
|
||||
importpath = "cloud.google.com/go/appengine",
|
||||
sum = "h1:lmG+O5oaR9xNwaRBwE2XoMhwQHsHql5IoiGr1ptdDwU=",
|
||||
version = "v1.5.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_area120",
|
||||
importpath = "cloud.google.com/go/area120",
|
||||
sum = "h1:TCMhwWEWhCn8d44/Zs7UCICTWje9j3HuV6nVGMjdpYw=",
|
||||
version = "v0.6.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_artifactregistry",
|
||||
importpath = "cloud.google.com/go/artifactregistry",
|
||||
sum = "h1:3d0LRAU1K6vfqCahhl9fx2oGHcq+s5gftdix4v8Ibrc=",
|
||||
version = "v1.9.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_asset",
|
||||
importpath = "cloud.google.com/go/asset",
|
||||
sum = "h1:aCrlaLGJWTODJX4G56ZYzJefITKEWNfbjjtHSzWpxW0=",
|
||||
version = "v1.10.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_assuredworkloads",
|
||||
importpath = "cloud.google.com/go/assuredworkloads",
|
||||
sum = "h1:hhIdCOowsT1GG5eMCIA0OwK6USRuYTou/1ZeNxCSRtA=",
|
||||
version = "v1.9.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_automl",
|
||||
importpath = "cloud.google.com/go/automl",
|
||||
sum = "h1:BMioyXSbg7d7xLibn47cs0elW6RT780IUWr42W8rp2Q=",
|
||||
version = "v1.8.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_baremetalsolution",
|
||||
importpath = "cloud.google.com/go/baremetalsolution",
|
||||
sum = "h1:g9KO6SkakcYPcc/XjAzeuUrEOXlYPnMpuiaywYaGrmQ=",
|
||||
version = "v0.4.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_batch",
|
||||
importpath = "cloud.google.com/go/batch",
|
||||
sum = "h1:1jvEBY55OH4Sd2FxEXQfxGExFWov1A/IaRe+Z5Z71Fw=",
|
||||
version = "v0.4.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_beyondcorp",
|
||||
importpath = "cloud.google.com/go/beyondcorp",
|
||||
sum = "h1:w+4kThysgl0JiKshi2MKDCg2NZgOyqOI0wq2eBZyrzA=",
|
||||
version = "v0.3.0",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_bigquery",
|
||||
importpath = "cloud.google.com/go/bigquery",
|
||||
sum = "h1:PQcPefKFdaIzjQFbiyOgAqyx8q5djaE7x9Sqe712DPA=",
|
||||
version = "v1.8.0",
|
||||
sum = "h1:Wi4dITi+cf9VYp4VH2T9O41w0kCW0uQTELq2Z6tukN0=",
|
||||
version = "v1.44.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_bigtable",
|
||||
@@ -4234,38 +4324,636 @@ def prysm_deps():
|
||||
sum = "h1:F4cCmA4nuV84V5zYQ3MKY+M1Cw1avHDuf3S/LcZPA9c=",
|
||||
version = "v1.2.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_billing",
|
||||
importpath = "cloud.google.com/go/billing",
|
||||
sum = "h1:Xkii76HWELHwBtkQVZvqmSo9GTr0O+tIbRNnMcGdlg4=",
|
||||
version = "v1.7.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_binaryauthorization",
|
||||
importpath = "cloud.google.com/go/binaryauthorization",
|
||||
sum = "h1:pL70vXWn9TitQYXBWTK2abHl2JHLwkFRjYw6VflRqEA=",
|
||||
version = "v1.4.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_certificatemanager",
|
||||
importpath = "cloud.google.com/go/certificatemanager",
|
||||
sum = "h1:tzbR4UHBbgsewMWUD93JHi8EBi/gHBoSAcY1/sThFGk=",
|
||||
version = "v1.4.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_channel",
|
||||
importpath = "cloud.google.com/go/channel",
|
||||
sum = "h1:pNuUlZx0Jb0Ts9P312bmNMuH5IiFWIR4RUtLb70Ke5s=",
|
||||
version = "v1.9.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_cloudbuild",
|
||||
importpath = "cloud.google.com/go/cloudbuild",
|
||||
sum = "h1:TAAmCmAlOJ4uNBu6zwAjwhyl/7fLHHxIEazVhr3QBbQ=",
|
||||
version = "v1.4.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_clouddms",
|
||||
importpath = "cloud.google.com/go/clouddms",
|
||||
sum = "h1:UhzHIlgFfMr6luVYVNydw/pl9/U5kgtjCMJHnSvoVws=",
|
||||
version = "v1.4.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_cloudtasks",
|
||||
importpath = "cloud.google.com/go/cloudtasks",
|
||||
sum = "h1:faUiUgXjW8yVZ7XMnKHKm1WE4OldPBUWWfIRN/3z1dc=",
|
||||
version = "v1.8.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_compute",
|
||||
importpath = "cloud.google.com/go/compute",
|
||||
sum = "h1:7UGq3QknM33pw5xATlpzeoomNxsacIVvTqTTvbfajmE=",
|
||||
version = "v1.15.1",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_compute_metadata",
|
||||
importpath = "cloud.google.com/go/compute/metadata",
|
||||
sum = "h1:nBbNSZyDpkNlo3DepaaLKVuO7ClyifSAmNloSCZrHnQ=",
|
||||
version = "v0.2.0",
|
||||
sum = "h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=",
|
||||
version = "v0.2.3",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_contactcenterinsights",
|
||||
importpath = "cloud.google.com/go/contactcenterinsights",
|
||||
sum = "h1:tTQLI/ZvguUf9Hv+36BkG2+/PeC8Ol1q4pBW+tgCx0A=",
|
||||
version = "v1.4.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_container",
|
||||
importpath = "cloud.google.com/go/container",
|
||||
sum = "h1:nbEK/59GyDRKKlo1SqpohY1TK8LmJ2XNcvS9Gyom2A0=",
|
||||
version = "v1.7.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_containeranalysis",
|
||||
importpath = "cloud.google.com/go/containeranalysis",
|
||||
sum = "h1:2824iym832ljKdVpCBnpqm5K94YT/uHTVhNF+dRTXPI=",
|
||||
version = "v0.6.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_datacatalog",
|
||||
importpath = "cloud.google.com/go/datacatalog",
|
||||
sum = "h1:6kZ4RIOW/uT7QWC5SfPfq/G8sYzr/v+UOmOAxy4Z1TE=",
|
||||
version = "v1.8.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_dataflow",
|
||||
importpath = "cloud.google.com/go/dataflow",
|
||||
sum = "h1:CW3541Fm7KPTyZjJdnX6NtaGXYFn5XbFC5UcjgALKvU=",
|
||||
version = "v0.7.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_dataform",
|
||||
importpath = "cloud.google.com/go/dataform",
|
||||
sum = "h1:vLwowLF2ZB5J5gqiZCzv076lDI/Rd7zYQQFu5XO1PSg=",
|
||||
version = "v0.5.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_datafusion",
|
||||
importpath = "cloud.google.com/go/datafusion",
|
||||
sum = "h1:j5m2hjWovTZDTQak4MJeXAR9yN7O+zMfULnjGw/OOLg=",
|
||||
version = "v1.5.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_datalabeling",
|
||||
importpath = "cloud.google.com/go/datalabeling",
|
||||
sum = "h1:dp8jOF21n/7jwgo/uuA0RN8hvLcKO4q6s/yvwevs2ZM=",
|
||||
version = "v0.6.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_dataplex",
|
||||
importpath = "cloud.google.com/go/dataplex",
|
||||
sum = "h1:cNxeA2DiWliQGi21kPRqnVeQ5xFhNoEjPRt1400Pm8Y=",
|
||||
version = "v1.4.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_dataproc",
|
||||
importpath = "cloud.google.com/go/dataproc",
|
||||
sum = "h1:gVOqNmElfa6n/ccG/QDlfurMWwrK3ezvy2b2eDoCmS0=",
|
||||
version = "v1.8.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_dataqna",
|
||||
importpath = "cloud.google.com/go/dataqna",
|
||||
sum = "h1:gx9jr41ytcA3dXkbbd409euEaWtofCVXYBvJz3iYm18=",
|
||||
version = "v0.6.0",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_datastore",
|
||||
importpath = "cloud.google.com/go/datastore",
|
||||
sum = "h1:/May9ojXjRkPBNVrq+oWLqmWCkr4OU5uRY29bu0mRyQ=",
|
||||
version = "v1.1.0",
|
||||
sum = "h1:4siQRf4zTiAVt/oeH4GureGkApgb2vtPQAtOmhpqQwE=",
|
||||
version = "v1.10.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_datastream",
|
||||
importpath = "cloud.google.com/go/datastream",
|
||||
sum = "h1:PgIgbhedBtYBU6POGXFMn2uSl9vpqubc3ewTNdcU8Mk=",
|
||||
version = "v1.5.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_deploy",
|
||||
importpath = "cloud.google.com/go/deploy",
|
||||
sum = "h1:kI6dxt8Ml0is/x7YZjLveTvR7YPzXAUD/8wQZ2nH5zA=",
|
||||
version = "v1.5.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_dialogflow",
|
||||
importpath = "cloud.google.com/go/dialogflow",
|
||||
sum = "h1:HYHVOkoxQ9bSfNIelSZYNAtUi4CeSrCnROyOsbOqPq8=",
|
||||
version = "v1.19.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_dlp",
|
||||
importpath = "cloud.google.com/go/dlp",
|
||||
sum = "h1:9I4BYeJSVKoSKgjr70fLdRDumqcUeVmHV4fd5f9LR6Y=",
|
||||
version = "v1.7.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_documentai",
|
||||
importpath = "cloud.google.com/go/documentai",
|
||||
sum = "h1:jfq09Fdjtnpnmt/MLyf6A3DM3ynb8B2na0K+vSXvpFM=",
|
||||
version = "v1.10.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_domains",
|
||||
importpath = "cloud.google.com/go/domains",
|
||||
sum = "h1:pu3JIgC1rswIqi5romW0JgNO6CTUydLYX8zyjiAvO1c=",
|
||||
version = "v0.7.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_edgecontainer",
|
||||
importpath = "cloud.google.com/go/edgecontainer",
|
||||
sum = "h1:hd6J2n5dBBRuAqnNUEsKWrp6XNPKsaxwwIyzOPZTokk=",
|
||||
version = "v0.2.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_errorreporting",
|
||||
importpath = "cloud.google.com/go/errorreporting",
|
||||
sum = "h1:kj1XEWMu8P0qlLhm3FwcaFsUvXChV/OraZwA70trRR0=",
|
||||
version = "v0.3.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_essentialcontacts",
|
||||
importpath = "cloud.google.com/go/essentialcontacts",
|
||||
sum = "h1:b6csrQXCHKQmfo9h3dG/pHyoEh+fQG1Yg78a53LAviY=",
|
||||
version = "v1.4.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_eventarc",
|
||||
importpath = "cloud.google.com/go/eventarc",
|
||||
sum = "h1:AgCqrmMMIcel5WWKkzz5EkCUKC3Rl5LNMMYsS+LvsI0=",
|
||||
version = "v1.8.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_filestore",
|
||||
importpath = "cloud.google.com/go/filestore",
|
||||
sum = "h1:yjKOpzvqtDmL5AXbKttLc8j0hL20kuC1qPdy5HPcxp0=",
|
||||
version = "v1.4.0",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_firestore",
|
||||
importpath = "cloud.google.com/go/firestore",
|
||||
sum = "h1:9x7Bx0A9R5/M9jibeJeZWqjeVEIxYW9fZYqB9a70/bY=",
|
||||
version = "v1.1.0",
|
||||
sum = "h1:IBlRyxgGySXu5VuW0RgGFlTtLukSnNkpDiEOMkQkmpA=",
|
||||
version = "v1.9.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_functions",
|
||||
importpath = "cloud.google.com/go/functions",
|
||||
sum = "h1:35tgv1fQOtvKqH/uxJMzX3w6usneJ0zXpsFr9KAVhNE=",
|
||||
version = "v1.9.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_gaming",
|
||||
importpath = "cloud.google.com/go/gaming",
|
||||
sum = "h1:97OAEQtDazAJD7yh/kvQdSCQuTKdR0O+qWAJBZJ4xiA=",
|
||||
version = "v1.8.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_gkebackup",
|
||||
importpath = "cloud.google.com/go/gkebackup",
|
||||
sum = "h1:4K+jiv4ocqt1niN8q5Imd8imRoXBHTrdnJVt/uFFxF4=",
|
||||
version = "v0.3.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_gkeconnect",
|
||||
importpath = "cloud.google.com/go/gkeconnect",
|
||||
sum = "h1:zAcvDa04tTnGdu6TEZewaLN2tdMtUOJJ7fEceULjguA=",
|
||||
version = "v0.6.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_gkehub",
|
||||
importpath = "cloud.google.com/go/gkehub",
|
||||
sum = "h1:JTcTaYQRGsVm+qkah7WzHb6e9sf1C0laYdRPn9aN+vg=",
|
||||
version = "v0.10.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_gkemulticloud",
|
||||
importpath = "cloud.google.com/go/gkemulticloud",
|
||||
sum = "h1:8F1NhJj8ucNj7lK51UZMtAjSWTgP1zO18XF6vkfiPPU=",
|
||||
version = "v0.4.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_gsuiteaddons",
|
||||
importpath = "cloud.google.com/go/gsuiteaddons",
|
||||
sum = "h1:TGT2oGmO5q3VH6SjcrlgPUWI0njhYv4kywLm6jag0to=",
|
||||
version = "v1.4.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_iam",
|
||||
importpath = "cloud.google.com/go/iam",
|
||||
sum = "h1:E2osAkZzxI/+8pZcxVLcDtAQx/u+hZXVryUaYQ5O0Kk=",
|
||||
version = "v0.8.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_iap",
|
||||
importpath = "cloud.google.com/go/iap",
|
||||
sum = "h1:BGEXovwejOCt1zDk8hXq0bOhhRu9haXKWXXXp2B4wBM=",
|
||||
version = "v1.5.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_ids",
|
||||
importpath = "cloud.google.com/go/ids",
|
||||
sum = "h1:LncHK4HHucb5Du310X8XH9/ICtMwZ2PCfK0ScjWiJoY=",
|
||||
version = "v1.2.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_iot",
|
||||
importpath = "cloud.google.com/go/iot",
|
||||
sum = "h1:Y9+oZT9jD4GUZzORXTU45XsnQrhxmDT+TFbPil6pRVQ=",
|
||||
version = "v1.4.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_kms",
|
||||
importpath = "cloud.google.com/go/kms",
|
||||
sum = "h1:OWRZzrPmOZUzurjI2FBGtgY2mB1WaJkqhw6oIwSj0Yg=",
|
||||
version = "v1.6.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_language",
|
||||
importpath = "cloud.google.com/go/language",
|
||||
sum = "h1:3Wa+IUMamL4JH3Zd3cDZUHpwyqplTACt6UZKRD2eCL4=",
|
||||
version = "v1.8.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_lifesciences",
|
||||
importpath = "cloud.google.com/go/lifesciences",
|
||||
sum = "h1:tIqhivE2LMVYkX0BLgG7xL64oNpDaFFI7teunglt1tI=",
|
||||
version = "v0.6.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_logging",
|
||||
importpath = "cloud.google.com/go/logging",
|
||||
sum = "h1:ZBsZK+JG+oCDT+vaxwqF2egKNRjz8soXiS6Xv79benI=",
|
||||
version = "v1.6.1",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_longrunning",
|
||||
importpath = "cloud.google.com/go/longrunning",
|
||||
sum = "h1:NjljC+FYPV3uh5/OwWT6pVU+doBqMg2x/rZlE+CamDs=",
|
||||
version = "v0.3.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_managedidentities",
|
||||
importpath = "cloud.google.com/go/managedidentities",
|
||||
sum = "h1:3Kdajn6X25yWQFhFCErmKSYTSvkEd3chJROny//F1A0=",
|
||||
version = "v1.4.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_maps",
|
||||
importpath = "cloud.google.com/go/maps",
|
||||
sum = "h1:kLReRbclTgJefw2fcCbdLPLhPj0U6UUWN10ldG8sdOU=",
|
||||
version = "v0.1.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_mediatranslation",
|
||||
importpath = "cloud.google.com/go/mediatranslation",
|
||||
sum = "h1:qAJzpxmEX+SeND10Y/4868L5wfZpo4Y3BIEnIieP4dk=",
|
||||
version = "v0.6.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_memcache",
|
||||
importpath = "cloud.google.com/go/memcache",
|
||||
sum = "h1:yLxUzJkZVSH2kPaHut7k+7sbIBFpvSh1LW9qjM2JDjA=",
|
||||
version = "v1.7.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_metastore",
|
||||
importpath = "cloud.google.com/go/metastore",
|
||||
sum = "h1:3KcShzqWdqxrDEXIBWpYJpOOrgpDj+HlBi07Grot49Y=",
|
||||
version = "v1.8.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_monitoring",
|
||||
importpath = "cloud.google.com/go/monitoring",
|
||||
sum = "h1:c9riaGSPQ4dUKWB+M1Fl0N+iLxstMbCktdEwYSPGDvA=",
|
||||
version = "v1.8.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_networkconnectivity",
|
||||
importpath = "cloud.google.com/go/networkconnectivity",
|
||||
sum = "h1:BVdIKaI68bihnXGdCVL89Jsg9kq2kg+II30fjVqo62E=",
|
||||
version = "v1.7.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_networkmanagement",
|
||||
importpath = "cloud.google.com/go/networkmanagement",
|
||||
sum = "h1:mDHA3CDW00imTvC5RW6aMGsD1bH+FtKwZm/52BxaiMg=",
|
||||
version = "v1.5.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_networksecurity",
|
||||
importpath = "cloud.google.com/go/networksecurity",
|
||||
sum = "h1:qDEX/3sipg9dS5JYsAY+YvgTjPR63cozzAWop8oZS94=",
|
||||
version = "v0.6.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_notebooks",
|
||||
importpath = "cloud.google.com/go/notebooks",
|
||||
sum = "h1:AC8RPjNvel3ExgXjO1YOAz+teg9+j+89TNxa7pIZfww=",
|
||||
version = "v1.5.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_optimization",
|
||||
importpath = "cloud.google.com/go/optimization",
|
||||
sum = "h1:7PxOq9VTT7TMib/6dMoWpMvWS2E4dJEvtYzjvBreaec=",
|
||||
version = "v1.2.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_orchestration",
|
||||
importpath = "cloud.google.com/go/orchestration",
|
||||
sum = "h1:39d6tqvNjd/wsSub1Bn4cEmrYcet5Ur6xpaN+SxOxtY=",
|
||||
version = "v1.4.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_orgpolicy",
|
||||
importpath = "cloud.google.com/go/orgpolicy",
|
||||
sum = "h1:erF5PHqDZb6FeFrUHiYj2JK2BMhsk8CyAg4V4amJ3rE=",
|
||||
version = "v1.5.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_osconfig",
|
||||
importpath = "cloud.google.com/go/osconfig",
|
||||
sum = "h1:NO0RouqCOM7M2S85Eal6urMSSipWwHU8evzwS+siqUI=",
|
||||
version = "v1.10.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_oslogin",
|
||||
importpath = "cloud.google.com/go/oslogin",
|
||||
sum = "h1:pKGDPfeZHDybtw48WsnVLjoIPMi9Kw62kUE5TXCLCN4=",
|
||||
version = "v1.7.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_phishingprotection",
|
||||
importpath = "cloud.google.com/go/phishingprotection",
|
||||
sum = "h1:OrwHLSRSZyaiOt3tnY33dsKSedxbMzsXvqB21okItNQ=",
|
||||
version = "v0.6.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_policytroubleshooter",
|
||||
importpath = "cloud.google.com/go/policytroubleshooter",
|
||||
sum = "h1:NQklJuOUoz1BPP+Epjw81COx7IISWslkZubz/1i0UN8=",
|
||||
version = "v1.4.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_privatecatalog",
|
||||
importpath = "cloud.google.com/go/privatecatalog",
|
||||
sum = "h1:Vz86uiHCtNGm1DeC32HeG2VXmOq5JRYA3VRPf8ZEcSg=",
|
||||
version = "v0.6.0",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_pubsub",
|
||||
importpath = "cloud.google.com/go/pubsub",
|
||||
sum = "h1:ukjixP1wl0LpnZ6LWtZJ0mX5tBmjp1f8Sqer8Z2OMUU=",
|
||||
version = "v1.3.1",
|
||||
sum = "h1:q+J/Nfr6Qx4RQeu3rJcnN48SNC0qzlYzSeqkPq93VHs=",
|
||||
version = "v1.27.1",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_pubsublite",
|
||||
importpath = "cloud.google.com/go/pubsublite",
|
||||
sum = "h1:iqrD8vp3giTb7hI1q4TQQGj77cj8zzgmMPsTZtLnprM=",
|
||||
version = "v1.5.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_recaptchaenterprise_v2",
|
||||
importpath = "cloud.google.com/go/recaptchaenterprise/v2",
|
||||
sum = "h1:UqzFfb/WvhwXGDF1eQtdHLrmni+iByZXY4h3w9Kdyv8=",
|
||||
version = "v2.5.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_recommendationengine",
|
||||
importpath = "cloud.google.com/go/recommendationengine",
|
||||
sum = "h1:6w+WxPf2LmUEqX0YyvfCoYb8aBYOcbIV25Vg6R0FLGw=",
|
||||
version = "v0.6.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_recommender",
|
||||
importpath = "cloud.google.com/go/recommender",
|
||||
sum = "h1:9kMZQGeYfcOD/RtZfcNKGKtoex3DdoB4zRgYU/WaIwE=",
|
||||
version = "v1.8.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_redis",
|
||||
importpath = "cloud.google.com/go/redis",
|
||||
sum = "h1:/zTwwBKIAD2DEWTrXZp8WD9yD/gntReF/HkPssVYd0U=",
|
||||
version = "v1.10.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_resourcemanager",
|
||||
importpath = "cloud.google.com/go/resourcemanager",
|
||||
sum = "h1:NDao6CHMwEZIaNsdWy+tuvHaavNeGP06o1tgrR0kLvU=",
|
||||
version = "v1.4.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_resourcesettings",
|
||||
importpath = "cloud.google.com/go/resourcesettings",
|
||||
sum = "h1:eTzOwB13WrfF0kuzG2ZXCfB3TLunSHBur4s+HFU6uSM=",
|
||||
version = "v1.4.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_retail",
|
||||
importpath = "cloud.google.com/go/retail",
|
||||
sum = "h1:N9fa//ecFUOEPsW/6mJHfcapPV0wBSwIUwpVZB7MQ3o=",
|
||||
version = "v1.11.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_run",
|
||||
importpath = "cloud.google.com/go/run",
|
||||
sum = "h1:AWPuzU7Xtaj3Jf+QarDWIs6AJ5hM1VFQ+F6Q+VZ6OT4=",
|
||||
version = "v0.3.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_scheduler",
|
||||
importpath = "cloud.google.com/go/scheduler",
|
||||
sum = "h1:K/mxOewgHGeKuATUJNGylT75Mhtjmx1TOkKukATqMT8=",
|
||||
version = "v1.7.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_secretmanager",
|
||||
importpath = "cloud.google.com/go/secretmanager",
|
||||
sum = "h1:xE6uXljAC1kCR8iadt9+/blg1fvSbmenlsDN4fT9gqw=",
|
||||
version = "v1.9.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_security",
|
||||
importpath = "cloud.google.com/go/security",
|
||||
sum = "h1:KSKzzJMyUoMRQzcz7azIgqAUqxo7rmQ5rYvimMhikqg=",
|
||||
version = "v1.10.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_securitycenter",
|
||||
importpath = "cloud.google.com/go/securitycenter",
|
||||
sum = "h1:QTVtk/Reqnx2bVIZtJKm1+mpfmwRwymmNvlaFez7fQY=",
|
||||
version = "v1.16.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_servicecontrol",
|
||||
importpath = "cloud.google.com/go/servicecontrol",
|
||||
sum = "h1:ImIzbOu6y4jL6ob65I++QzvqgFaoAKgHOG+RU9/c4y8=",
|
||||
version = "v1.5.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_servicedirectory",
|
||||
importpath = "cloud.google.com/go/servicedirectory",
|
||||
sum = "h1:f7M8IMcVzO3T425AqlZbP3yLzeipsBHtRza8vVFYMhQ=",
|
||||
version = "v1.7.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_servicemanagement",
|
||||
importpath = "cloud.google.com/go/servicemanagement",
|
||||
sum = "h1:TpkCO5M7dhKSy1bKUD9o/sSEW/U1Gtx7opA1fsiMx0c=",
|
||||
version = "v1.5.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_serviceusage",
|
||||
importpath = "cloud.google.com/go/serviceusage",
|
||||
sum = "h1:b0EwJxPJLpavSljMQh0RcdHsUrr5DQ+Nelt/3BAs5ro=",
|
||||
version = "v1.4.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_shell",
|
||||
importpath = "cloud.google.com/go/shell",
|
||||
sum = "h1:b1LFhFBgKsG252inyhtmsUUZwchqSz3WTvAIf3JFo4g=",
|
||||
version = "v1.4.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_spanner",
|
||||
importpath = "cloud.google.com/go/spanner",
|
||||
sum = "h1:NvdTpRwf7DTegbfFdPjAWyD7bOVu0VeMqcvR9aCQCAc=",
|
||||
version = "v1.41.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_speech",
|
||||
importpath = "cloud.google.com/go/speech",
|
||||
sum = "h1:yK0ocnFH4Wsf0cMdUyndJQ/hPv02oTJOxzi6AgpBy4s=",
|
||||
version = "v1.9.0",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_storage",
|
||||
importpath = "cloud.google.com/go/storage",
|
||||
sum = "h1:STgFzyU5/8miMl0//zKh2aQeTyeaUH3WN9bSUiJ09bA=",
|
||||
version = "v1.10.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_storagetransfer",
|
||||
importpath = "cloud.google.com/go/storagetransfer",
|
||||
sum = "h1:fUe3OydbbvHcAYp07xY+2UpH4AermGbmnm7qdEj3tGE=",
|
||||
version = "v1.6.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_talent",
|
||||
importpath = "cloud.google.com/go/talent",
|
||||
sum = "h1:MrekAGxLqAeAol4Sc0allOVqUGO8j+Iim8NMvpiD7tM=",
|
||||
version = "v1.4.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_texttospeech",
|
||||
importpath = "cloud.google.com/go/texttospeech",
|
||||
sum = "h1:ccPiHgTewxgyAeCWgQWvZvrLmbfQSFABTMAfrSPLPyY=",
|
||||
version = "v1.5.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_tpu",
|
||||
importpath = "cloud.google.com/go/tpu",
|
||||
sum = "h1:ztIdKoma1Xob2qm6QwNh4Xi9/e7N3IfvtwG5AcNsj1g=",
|
||||
version = "v1.4.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_trace",
|
||||
importpath = "cloud.google.com/go/trace",
|
||||
sum = "h1:qO9eLn2esajC9sxpqp1YKX37nXC3L4BfGnPS0Cx9dYo=",
|
||||
version = "v1.4.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_translate",
|
||||
importpath = "cloud.google.com/go/translate",
|
||||
sum = "h1:AOYOH3MspzJ/bH1YXzB+xTE8fMpn3mwhLjugwGXvMPI=",
|
||||
version = "v1.4.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_video",
|
||||
importpath = "cloud.google.com/go/video",
|
||||
sum = "h1:ttlvO4J5c1VGq6FkHqWPD/aH6PfdxujHt+muTJlW1Zk=",
|
||||
version = "v1.9.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_videointelligence",
|
||||
importpath = "cloud.google.com/go/videointelligence",
|
||||
sum = "h1:RPFgVVXbI2b5vnrciZjtsUgpNKVtHO/WIyXUhEfuMhA=",
|
||||
version = "v1.9.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_vision_v2",
|
||||
importpath = "cloud.google.com/go/vision/v2",
|
||||
sum = "h1:TQHxRqvLMi19azwm3qYuDbEzZWmiKJNTpGbkNsfRCik=",
|
||||
version = "v2.5.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_vmmigration",
|
||||
importpath = "cloud.google.com/go/vmmigration",
|
||||
sum = "h1:A2Tl2ZmwMRpvEmhV2ibISY85fmQR+Y5w9a0PlRz5P3s=",
|
||||
version = "v1.3.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_vmwareengine",
|
||||
importpath = "cloud.google.com/go/vmwareengine",
|
||||
sum = "h1:JMPZaOT/gIUxVlTqSl/QQ32Y2k+r0stNeM1NSqhVP9o=",
|
||||
version = "v0.1.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_vpcaccess",
|
||||
importpath = "cloud.google.com/go/vpcaccess",
|
||||
sum = "h1:woHXXtnW8b9gLFdWO9HLPalAddBQ9V4LT+1vjKwR3W8=",
|
||||
version = "v1.5.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_webrisk",
|
||||
importpath = "cloud.google.com/go/webrisk",
|
||||
sum = "h1:ypSnpGlJnZSXbN9a13PDmAYvVekBLnGKxQ3Q9SMwnYY=",
|
||||
version = "v1.7.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_websecurityscanner",
|
||||
importpath = "cloud.google.com/go/websecurityscanner",
|
||||
sum = "h1:y7yIFg/h/mO+5Y5aCOtVAnpGUOgqCH5rXQ2Oc8Oq2+g=",
|
||||
version = "v1.4.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go_workflows",
|
||||
importpath = "cloud.google.com/go/workflows",
|
||||
sum = "h1:7Chpin9p50NTU8Tb7qk+I11U/IwVXmDhEoSsdccvInE=",
|
||||
version = "v1.9.0",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_lukechampine_blake3",
|
||||
importpath = "lukechampine.com/blake3",
|
||||
@@ -4581,12 +5269,6 @@ def prysm_deps():
|
||||
sum = "h1:yGBYzYMewVL0yO9qqJv3Z5+IRhPdU7e9o/2oKpX4YvI=",
|
||||
version = "v0.2.1",
|
||||
)
|
||||
go_repository(
|
||||
name = "io_opentelemetry_go_proto_otlp",
|
||||
importpath = "go.opentelemetry.io/proto/otlp",
|
||||
sum = "h1:rwOQPCuKAKmwGKq2aVNnYIibI6wnV7EvzgfTCzcdGg8=",
|
||||
version = "v0.7.0",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "io_rsc_binaryregexp",
|
||||
@@ -4660,16 +5342,16 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "org_golang_google_genproto",
|
||||
importpath = "google.golang.org/genproto",
|
||||
sum = "h1:R1r5J0u6Cx+RNl/6mezTw6oA14cmKC96FeUwL6A9bd4=",
|
||||
version = "v0.0.0-20210624195500-8bfb893ecb84",
|
||||
sum = "h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w=",
|
||||
version = "v0.0.0-20230110181048-76db0878b65f",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "org_golang_google_grpc",
|
||||
build_file_proto_mode = "disable",
|
||||
importpath = "google.golang.org/grpc",
|
||||
sum = "h1:AGJ0Ih4mHjSeibYkFGh1dD9KJ/eOtZ93I6hoHhukQ5Q=",
|
||||
version = "v1.40.0",
|
||||
sum = "h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc=",
|
||||
version = "v1.53.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "org_golang_google_grpc_cmd_protoc_gen_go_grpc",
|
||||
|
||||
@@ -71,6 +71,10 @@ func SafeCopyRootAtIndex(input [][]byte, idx uint64) ([]byte, error) {
|
||||
// SafeCopyBytes will copy and return a non-nil byte slice, otherwise it returns nil.
|
||||
func SafeCopyBytes(cp []byte) []byte {
|
||||
if cp != nil {
|
||||
if len(cp) == 32 {
|
||||
copied := [32]byte(cp)
|
||||
return copied[:]
|
||||
}
|
||||
copied := make([]byte, len(cp))
|
||||
copy(copied, cp)
|
||||
return copied
|
||||
|
||||
@@ -2,6 +2,7 @@ package bytesutil_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
@@ -248,3 +249,33 @@ func TestFromBytes48Array(t *testing.T) {
|
||||
assert.DeepEqual(t, tt.a, a)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSafeCopyBytes_Copy(t *testing.T) {
|
||||
slice := make([]byte, 32)
|
||||
slice[0] = 'A'
|
||||
|
||||
copiedSlice := bytesutil.SafeCopyBytes(slice)
|
||||
|
||||
assert.NotEqual(t, fmt.Sprintf("%p", slice), fmt.Sprintf("%p", copiedSlice))
|
||||
assert.Equal(t, slice[0], copiedSlice[0])
|
||||
slice[1] = 'B'
|
||||
|
||||
assert.NotEqual(t, slice[1], copiedSlice[1])
|
||||
}
|
||||
|
||||
func BenchmarkSafeCopyBytes(b *testing.B) {
|
||||
dSlice := make([][]byte, 900000)
|
||||
for i := 0; i < 900000; i++ {
|
||||
slice := make([]byte, 32)
|
||||
slice[0] = 'A'
|
||||
dSlice[i] = slice
|
||||
}
|
||||
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
b.Run("Copy Bytes", func(b *testing.B) {
|
||||
cSlice := bytesutil.SafeCopy2dBytes(dSlice)
|
||||
a := cSlice
|
||||
_ = a
|
||||
})
|
||||
}
|
||||
|
||||
4
go.mod
4
go.mod
@@ -85,8 +85,8 @@ require (
|
||||
golang.org/x/mod v0.10.0
|
||||
golang.org/x/sync v0.1.0
|
||||
golang.org/x/tools v0.8.0
|
||||
google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84
|
||||
google.golang.org/grpc v1.40.0
|
||||
google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f
|
||||
google.golang.org/grpc v1.53.0
|
||||
google.golang.org/protobuf v1.30.0
|
||||
gopkg.in/d4l3k/messagediff.v1 v1.2.1
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
|
||||
16
go.sum
16
go.sum
@@ -183,7 +183,6 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk
|
||||
github.com/cloudflare/cloudflare-go v0.14.0/go.mod h1:EnwdgGMaFOruiPZRFSgn+TsQ3hQ7C/YWzIGLeu5c304=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
|
||||
github.com/cockroachdb/datadriven v1.0.2 h1:H9MtNqVoVhvd9nCBwOyDjUEdZCREqbIdCJD93PBm/jA=
|
||||
github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU=
|
||||
@@ -298,7 +297,6 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw=
|
||||
github.com/ethereum/go-ethereum v1.10.26/go.mod h1:EYFyF19u3ezGLD4RqOkLq+ZCXzYbLoNDdZlMt7kyKFg=
|
||||
@@ -447,8 +445,8 @@ github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGw
|
||||
github.com/golang/gddo v0.0.0-20200528160355-8d077c1d8f4c h1:HoqgYR60VYu5+0BuG6pjeGp7LKEPZnHt+dUClx9PeIs=
|
||||
github.com/golang/gddo v0.0.0-20200528160355-8d077c1d8f4c/go.mod h1:sam69Hju0uq+5uvLJUMDlsKlQ21Vrs1Kd/1YFPNYdOU=
|
||||
github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ=
|
||||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
@@ -571,7 +569,6 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgf
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
|
||||
github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
|
||||
github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
|
||||
@@ -1320,7 +1317,6 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.6/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
|
||||
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
@@ -1803,7 +1799,6 @@ google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfG
|
||||
google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
|
||||
@@ -1813,8 +1808,9 @@ google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6D
|
||||
google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210207032614-bba0dbe2a9ea/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210426193834-eac7f76ac494/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
|
||||
google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84 h1:R1r5J0u6Cx+RNl/6mezTw6oA14cmKC96FeUwL6A9bd4=
|
||||
google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=
|
||||
google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w=
|
||||
google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
|
||||
google.golang.org/grpc v1.2.1-0.20170921194603-d4b75ebd4f9f/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
@@ -1837,15 +1833,13 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji
|
||||
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
|
||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||
google.golang.org/grpc v1.35.0-dev.0.20201218190559-666aea1fb34c/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
||||
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
||||
google.golang.org/grpc v1.40.0 h1:AGJ0Ih4mHjSeibYkFGh1dD9KJ/eOtZ93I6hoHhukQ5Q=
|
||||
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||
google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc=
|
||||
google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw=
|
||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.0.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
|
||||
@@ -18,8 +18,8 @@ searchstring="prysmaticlabs/prysm/v4/"
|
||||
for ((i = 0; i < arraylength; i++)); do
|
||||
color "34" "$destination"
|
||||
destination=${file_list[i]#*$searchstring}
|
||||
chmod 755 "$destination"
|
||||
cp -R -L "${file_list[i]}" "$destination"
|
||||
chmod 755 "$destination"
|
||||
done
|
||||
|
||||
# Run goimports on newly generated protos
|
||||
|
||||
23
proto/dbval/BUILD.bazel
Normal file
23
proto/dbval/BUILD.bazel
Normal file
@@ -0,0 +1,23 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
load("@rules_proto//proto:defs.bzl", "proto_library")
|
||||
load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library")
|
||||
|
||||
proto_library(
|
||||
name = "dbval_proto",
|
||||
srcs = ["dbval.proto"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
go_proto_library(
|
||||
name = "dbval_go_proto",
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/proto/dbval",
|
||||
proto = ":dbval_proto",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
embed = [":dbval_go_proto"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/proto/dbval",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
187
proto/dbval/dbval.pb.go
generated
Executable file
187
proto/dbval/dbval.pb.go
generated
Executable file
@@ -0,0 +1,187 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.30.0
|
||||
// protoc v4.23.3
|
||||
// source: proto/dbval/dbval.proto
|
||||
|
||||
package dbval
|
||||
|
||||
import (
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type BackfillStatus struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
LowSlot uint64 `protobuf:"varint,1,opt,name=low_slot,json=lowSlot,proto3" json:"low_slot,omitempty"`
|
||||
LowRoot []byte `protobuf:"bytes,2,opt,name=low_root,json=lowRoot,proto3" json:"low_root,omitempty"`
|
||||
LowParentRoot []byte `protobuf:"bytes,3,opt,name=low_parent_root,json=lowParentRoot,proto3" json:"low_parent_root,omitempty"`
|
||||
OriginSlot uint64 `protobuf:"varint,4,opt,name=origin_slot,json=originSlot,proto3" json:"origin_slot,omitempty"`
|
||||
OriginRoot []byte `protobuf:"bytes,6,opt,name=origin_root,json=originRoot,proto3" json:"origin_root,omitempty"`
|
||||
}
|
||||
|
||||
func (x *BackfillStatus) Reset() {
|
||||
*x = BackfillStatus{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_proto_dbval_dbval_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *BackfillStatus) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*BackfillStatus) ProtoMessage() {}
|
||||
|
||||
func (x *BackfillStatus) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_proto_dbval_dbval_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use BackfillStatus.ProtoReflect.Descriptor instead.
|
||||
func (*BackfillStatus) Descriptor() ([]byte, []int) {
|
||||
return file_proto_dbval_dbval_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *BackfillStatus) GetLowSlot() uint64 {
|
||||
if x != nil {
|
||||
return x.LowSlot
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *BackfillStatus) GetLowRoot() []byte {
|
||||
if x != nil {
|
||||
return x.LowRoot
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *BackfillStatus) GetLowParentRoot() []byte {
|
||||
if x != nil {
|
||||
return x.LowParentRoot
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *BackfillStatus) GetOriginSlot() uint64 {
|
||||
if x != nil {
|
||||
return x.OriginSlot
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *BackfillStatus) GetOriginRoot() []byte {
|
||||
if x != nil {
|
||||
return x.OriginRoot
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var File_proto_dbval_dbval_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_proto_dbval_dbval_proto_rawDesc = []byte{
|
||||
0x0a, 0x17, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x64, 0x62, 0x76, 0x61, 0x6c, 0x2f, 0x64, 0x62,
|
||||
0x76, 0x61, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x65, 0x74, 0x68, 0x65, 0x72,
|
||||
0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x64, 0x62, 0x76, 0x61, 0x6c, 0x22, 0xb0, 0x01,
|
||||
0x0a, 0x0e, 0x42, 0x61, 0x63, 0x6b, 0x66, 0x69, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
|
||||
0x12, 0x19, 0x0a, 0x08, 0x6c, 0x6f, 0x77, 0x5f, 0x73, 0x6c, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01,
|
||||
0x28, 0x04, 0x52, 0x07, 0x6c, 0x6f, 0x77, 0x53, 0x6c, 0x6f, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x6c,
|
||||
0x6f, 0x77, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6c,
|
||||
0x6f, 0x77, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x6c, 0x6f, 0x77, 0x5f, 0x70, 0x61,
|
||||
0x72, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52,
|
||||
0x0d, 0x6c, 0x6f, 0x77, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x1f,
|
||||
0x0a, 0x0b, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x5f, 0x73, 0x6c, 0x6f, 0x74, 0x18, 0x04, 0x20,
|
||||
0x01, 0x28, 0x04, 0x52, 0x0a, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x53, 0x6c, 0x6f, 0x74, 0x12,
|
||||
0x1f, 0x0a, 0x0b, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x06,
|
||||
0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x52, 0x6f, 0x6f, 0x74,
|
||||
0x42, 0x35, 0x5a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70,
|
||||
0x72, 0x79, 0x73, 0x6d, 0x61, 0x74, 0x69, 0x63, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79,
|
||||
0x73, 0x6d, 0x2f, 0x76, 0x34, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x64, 0x62, 0x76, 0x61,
|
||||
0x6c, 0x3b, 0x64, 0x62, 0x76, 0x61, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_proto_dbval_dbval_proto_rawDescOnce sync.Once
|
||||
file_proto_dbval_dbval_proto_rawDescData = file_proto_dbval_dbval_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_proto_dbval_dbval_proto_rawDescGZIP() []byte {
|
||||
file_proto_dbval_dbval_proto_rawDescOnce.Do(func() {
|
||||
file_proto_dbval_dbval_proto_rawDescData = protoimpl.X.CompressGZIP(file_proto_dbval_dbval_proto_rawDescData)
|
||||
})
|
||||
return file_proto_dbval_dbval_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_proto_dbval_dbval_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
|
||||
var file_proto_dbval_dbval_proto_goTypes = []interface{}{
|
||||
(*BackfillStatus)(nil), // 0: ethereum.eth.dbval.BackfillStatus
|
||||
}
|
||||
var file_proto_dbval_dbval_proto_depIdxs = []int32{
|
||||
0, // [0:0] is the sub-list for method output_type
|
||||
0, // [0:0] is the sub-list for method input_type
|
||||
0, // [0:0] is the sub-list for extension type_name
|
||||
0, // [0:0] is the sub-list for extension extendee
|
||||
0, // [0:0] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_proto_dbval_dbval_proto_init() }
|
||||
func file_proto_dbval_dbval_proto_init() {
|
||||
if File_proto_dbval_dbval_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_proto_dbval_dbval_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*BackfillStatus); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_proto_dbval_dbval_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 1,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_proto_dbval_dbval_proto_goTypes,
|
||||
DependencyIndexes: file_proto_dbval_dbval_proto_depIdxs,
|
||||
MessageInfos: file_proto_dbval_dbval_proto_msgTypes,
|
||||
}.Build()
|
||||
File_proto_dbval_dbval_proto = out.File
|
||||
file_proto_dbval_dbval_proto_rawDesc = nil
|
||||
file_proto_dbval_dbval_proto_goTypes = nil
|
||||
file_proto_dbval_dbval_proto_depIdxs = nil
|
||||
}
|
||||
13
proto/dbval/dbval.proto
Normal file
13
proto/dbval/dbval.proto
Normal file
@@ -0,0 +1,13 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package ethereum.eth.dbval;
|
||||
|
||||
option go_package = "github.com/prysmaticlabs/prysm/v4/proto/dbval;dbval";
|
||||
|
||||
message BackfillStatus {
|
||||
uint64 low_slot = 1;
|
||||
bytes low_root = 2;
|
||||
bytes low_parent_root = 3;
|
||||
uint64 origin_slot = 4;
|
||||
bytes origin_root = 6;
|
||||
}
|
||||
2
proto/engine/v1/execution_engine.pb.go
generated
2
proto/engine/v1/execution_engine.pb.go
generated
@@ -1,7 +1,7 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.30.0
|
||||
// protoc v3.15.8
|
||||
// protoc v4.23.3
|
||||
// source: proto/engine/v1/execution_engine.proto
|
||||
|
||||
package enginev1
|
||||
|
||||
@@ -98,6 +98,16 @@ func EpochStart(epoch primitives.Epoch) (primitives.Slot, error) {
|
||||
return slot, nil
|
||||
}
|
||||
|
||||
// UnsafeEpochStart is a version of EpochStart that panics if there is an overflow. It can be safely used by code
|
||||
// that first guarantees epoch <= MaxSafeEpoch.
|
||||
func UnsafeEpochStart(epoch primitives.Epoch) primitives.Slot {
|
||||
es, err := EpochStart(epoch)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return es
|
||||
}
|
||||
|
||||
// EpochEnd returns the last slot number of the
|
||||
// current epoch.
|
||||
func EpochEnd(epoch primitives.Epoch) (primitives.Slot, error) {
|
||||
@@ -258,3 +268,8 @@ func SecondsSinceSlotStart(s primitives.Slot, genesisTime, timeStamp uint64) (ui
|
||||
func TimeIntoSlot(genesisTime uint64) time.Duration {
|
||||
return time.Since(StartTime(genesisTime, CurrentSlot(genesisTime)))
|
||||
}
|
||||
|
||||
// MaxSafeEpoch gives the largest epoch value that can be safely converted to a slot.
|
||||
func MaxSafeEpoch() primitives.Epoch {
|
||||
return primitives.Epoch(math.MaxUint64 / uint64(params.BeaconConfig().SlotsPerEpoch))
|
||||
}
|
||||
|
||||
@@ -35,7 +35,7 @@ type multipleEndpointsGrpcResolver struct {
|
||||
}
|
||||
|
||||
func (r *multipleEndpointsGrpcResolver) start() {
|
||||
endpoints := strings.Split(r.target.Endpoint, ",")
|
||||
endpoints := strings.Split(r.target.Endpoint(), ",")
|
||||
var addrs []resolver.Address
|
||||
for _, endpoint := range endpoints {
|
||||
addrs = append(addrs, resolver.Address{Addr: endpoint, ServerName: endpoint})
|
||||
|
||||
@@ -58,7 +58,7 @@ func run(ctx context.Context, v iface.Validator) {
|
||||
if v.ProposerSettings() != nil {
|
||||
log.Infof("Validator client started with provided proposer settings that sets options such as fee recipient"+
|
||||
" and will periodically update the beacon node and custom builder (if --%s)", flags.EnableBuilderFlag.Name)
|
||||
deadline := time.Now().Add(time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
|
||||
deadline := time.Now().Add(5 * time.Minute)
|
||||
if err := v.PushProposerSettings(ctx, km, headSlot, deadline); err != nil {
|
||||
if errors.Is(err, ErrBuilderValidatorRegistration) {
|
||||
log.WithError(err).Warn("Push proposer settings error")
|
||||
|
||||
@@ -248,31 +248,6 @@ func TestUpdateProposerSettingsAt_EpochStart(t *testing.T) {
|
||||
assert.LogsContain(t, hook, "updated proposer settings")
|
||||
}
|
||||
|
||||
func TestUpdateProposerSettingsAt_EpochEndExceeded(t *testing.T) {
|
||||
v := &testutil.FakeValidator{Km: &mockKeymanager{accountsChangedFeed: &event.Feed{}}, ProposerSettingWait: time.Duration(params.BeaconConfig().SecondsPerSlot+1) * time.Second}
|
||||
err := v.SetProposerSettings(context.Background(), &validatorserviceconfig.ProposerSettings{
|
||||
DefaultConfig: &validatorserviceconfig.ProposerOption{
|
||||
FeeRecipientConfig: &validatorserviceconfig.FeeRecipientConfig{
|
||||
FeeRecipient: common.HexToAddress("0x046Fb65722E7b2455012BFEBf6177F1D2e9738D9"),
|
||||
},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
hook := logTest.NewGlobal()
|
||||
slot := params.BeaconConfig().SlotsPerEpoch - 1 //have it set close to the end of epoch
|
||||
ticker := make(chan primitives.Slot)
|
||||
v.NextSlotRet = ticker
|
||||
go func() {
|
||||
ticker <- slot
|
||||
cancel()
|
||||
}()
|
||||
|
||||
run(ctx, v)
|
||||
// can't test "Failed to update proposer settings" because of log.fatal
|
||||
assert.LogsContain(t, hook, "deadline exceeded")
|
||||
}
|
||||
|
||||
func TestUpdateProposerSettingsAt_EpochEndOk(t *testing.T) {
|
||||
v := &testutil.FakeValidator{Km: &mockKeymanager{accountsChangedFeed: &event.Feed{}}, ProposerSettingWait: time.Duration(params.BeaconConfig().SecondsPerSlot-1) * time.Second}
|
||||
err := v.SetProposerSettings(context.Background(), &validatorserviceconfig.ProposerSettings{
|
||||
|
||||
Reference in New Issue
Block a user