Compare commits

..

2 Commits

Author SHA1 Message Date
Preston Van Loon
403120adc3 Changelog fragment 2025-02-23 14:44:29 -06:00
Preston Van Loon
eddd965d18 Fix uses of rand.Seed. This is a no-op in go1.24 and deprecated since go1.20. 2025-02-23 14:43:32 -06:00
85 changed files with 799 additions and 1932 deletions

View File

@@ -54,5 +54,4 @@ type ForkChoiceNodeExtraData struct {
Balance string `json:"balance"`
ExecutionOptimistic bool `json:"execution_optimistic"`
TimeStamp string `json:"timestamp"`
Target string `json:"target"`
}

View File

@@ -12,7 +12,6 @@ go_library(
"forkchoice_update_execution.go",
"head.go",
"head_sync_committee_info.go",
"holeskyhack.go",
"init_sync_process_block.go",
"log.go",
"merge_ascii_art.go",
@@ -27,7 +26,6 @@ go_library(
"receive_blob.go",
"receive_block.go",
"service.go",
"setup_forchoice.go",
"tracked_proposer.go",
"weak_subjectivity_checks.go",
],

View File

@@ -1,21 +0,0 @@
package blockchain
import (
"encoding/hex"
"github.com/pkg/errors"
)
var errHoleskyForbiddenRoot = errors.New("refusing to process forbidden holesky block")
// hack to prevent bad holesky block importation
var badHoleskyRoot [32]byte
func init() {
hexStr := "2db899881ed8546476d0b92c6aa9110bea9a4cd0dbeb5519eb0ea69575f1f359"
bytes, err := hex.DecodeString(hexStr)
if err != nil {
panic(err)
}
badHoleskyRoot = [32]byte(bytes)
}

View File

@@ -213,10 +213,3 @@ func WithSyncChecker(checker Checker) Option {
return nil
}
}
func WithSlasherEnabled(enabled bool) Option {
return func(s *Service) error {
s.slasherEnabled = enabled
return nil
}
}

View File

@@ -173,9 +173,6 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
var set *bls.SignatureBatch
boundaries := make(map[[32]byte]state.BeaconState)
for i, b := range blks {
if b.Root() == badHoleskyRoot {
return errHoleskyForbiddenRoot
}
v, h, err := getStateVersionAndPayload(preState)
if err != nil {
return err

View File

@@ -2039,7 +2039,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
require.Equal(t, genesisRoot, bytesutil.ToBytes32(headRoot))
optimistic, err := service.IsOptimistic(ctx)
require.NoError(t, err)
require.Equal(t, true, optimistic)
require.Equal(t, false, optimistic)
// Check that the node's justified checkpoint does not agree with the
// last valid state's justified checkpoint

View File

@@ -16,6 +16,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/das"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/slasher/types"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/config/features"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
@@ -64,11 +65,6 @@ type SlashingReceiver interface {
func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, avs das.AvailabilityStore) error {
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveBlock")
defer span.End()
if blockRoot == badHoleskyRoot {
return errHoleskyForbiddenRoot
}
// Return early if the block has been synced
if s.InForkchoice(blockRoot) {
log.WithField("blockRoot", fmt.Sprintf("%#x", blockRoot)).Debug("Ignoring already synced block")
@@ -125,7 +121,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
return err
}
// If slasher is configured, forward the attestations in the block via an event feed for processing.
if s.slasherEnabled {
if features.Get().EnableSlasher {
go s.sendBlockAttestationsToSlasher(blockCopy, preState)
}

View File

@@ -3,6 +3,7 @@
package blockchain
import (
"bytes"
"context"
"fmt"
"runtime"
@@ -22,6 +23,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/execution"
f "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice"
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/blstoexec"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/slashings"
@@ -30,6 +32,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/v5/config/features"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
@@ -39,7 +42,6 @@ import (
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
prysmTime "github.com/prysmaticlabs/prysm/v5/time"
"github.com/prysmaticlabs/prysm/v5/time/slots"
"github.com/sirupsen/logrus"
)
// Service represents a service that handles the internal
@@ -63,7 +65,6 @@ type Service struct {
blobNotifiers *blobNotifierMap
blockBeingSynced *currentlySyncingBlock
blobStorage *filesystem.BlobStorage
slasherEnabled bool
}
// config options for the service.
@@ -268,18 +269,69 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
return err
}
s.originBlockRoot = originRoot
st, err := s.cfg.StateGen.Resume(s.ctx, s.cfg.FinalizedStateAtStartUp)
if err != nil {
return errors.Wrap(err, "could not get finalized state from db")
if err := s.initializeHeadFromDB(s.ctx); err != nil {
return errors.Wrap(err, "could not set up chain info")
}
spawnCountdownIfPreGenesis(s.ctx, s.genesisTime, s.cfg.BeaconDB)
if err := s.setupForkchoice(st); err != nil {
return errors.Wrap(err, "could not set up forkchoice")
justified, err := s.cfg.BeaconDB.JustifiedCheckpoint(s.ctx)
if err != nil {
return errors.Wrap(err, "could not get justified checkpoint")
}
if justified == nil {
return errNilJustifiedCheckpoint
}
finalized, err := s.cfg.BeaconDB.FinalizedCheckpoint(s.ctx)
if err != nil {
return errors.Wrap(err, "could not get finalized checkpoint")
}
if finalized == nil {
return errNilFinalizedCheckpoint
}
fRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(finalized.Root))
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(s.ctx, &forkchoicetypes.Checkpoint{Epoch: justified.Epoch,
Root: bytesutil.ToBytes32(justified.Root)}); err != nil {
return errors.Wrap(err, "could not update forkchoice's justified checkpoint")
}
if err := s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: finalized.Epoch,
Root: bytesutil.ToBytes32(finalized.Root)}); err != nil {
return errors.Wrap(err, "could not update forkchoice's finalized checkpoint")
}
s.cfg.ForkChoiceStore.SetGenesisTime(uint64(s.genesisTime.Unix()))
st, err := s.cfg.StateGen.StateByRoot(s.ctx, fRoot)
if err != nil {
return errors.Wrap(err, "could not get finalized checkpoint state")
}
finalizedBlock, err := s.cfg.BeaconDB.Block(s.ctx, fRoot)
if err != nil {
return errors.Wrap(err, "could not get finalized checkpoint block")
}
roblock, err := blocks.NewROBlockWithRoot(finalizedBlock, fRoot)
if err != nil {
return err
}
if err := s.cfg.ForkChoiceStore.InsertNode(s.ctx, st, roblock); err != nil {
return errors.Wrap(err, "could not insert finalized block to forkchoice")
}
if !features.Get().EnableStartOptimistic {
lastValidatedCheckpoint, err := s.cfg.BeaconDB.LastValidatedCheckpoint(s.ctx)
if err != nil {
return errors.Wrap(err, "could not get last validated checkpoint")
}
if bytes.Equal(finalized.Root, lastValidatedCheckpoint.Root) {
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(s.ctx, fRoot); err != nil {
return errors.Wrap(err, "could not set finalized block as validated")
}
}
}
// not attempting to save initial sync blocks here, because there shouldn't be any until
// after the statefeed.Initialized event is fired (below)
cp := s.FinalizedCheckpt()
if err := s.wsVerifier.VerifyWeakSubjectivity(s.ctx, cp.Epoch); err != nil {
if err := s.wsVerifier.VerifyWeakSubjectivity(s.ctx, finalized.Epoch); err != nil {
// Exit run time if the node failed to verify weak subjectivity checkpoint.
return errors.Wrap(err, "could not verify initial checkpoint provided for chain sync")
}
@@ -288,6 +340,7 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
if err := s.clockSetter.SetClock(startup.NewClock(s.genesisTime, vr)); err != nil {
return errors.Wrap(err, "failed to initialize blockchain service")
}
return nil
}
@@ -317,40 +370,46 @@ func (s *Service) originRootFromSavedState(ctx context.Context) ([32]byte, error
return genesisBlkRoot, nil
}
// initializeHeadFromDB uses the finalized checkpoint and head block root from forkchoice to set the current head.
// initializeHeadFromDB uses the finalized checkpoint and head block found in the database to set the current head.
// Note that this may block until stategen replays blocks between the finalized and head blocks
// if the head sync flag was specified and the gap between the finalized and head blocks is at least 128 epochs long.
func (s *Service) initializeHead(ctx context.Context, st state.BeaconState) error {
cp := s.FinalizedCheckpt()
fRoot := s.ensureRootNotZeros([32]byte(cp.Root))
if st == nil || st.IsNil() {
func (s *Service) initializeHeadFromDB(ctx context.Context) error {
finalized, err := s.cfg.BeaconDB.FinalizedCheckpoint(ctx)
if err != nil {
return errors.Wrap(err, "could not get finalized checkpoint from db")
}
if finalized == nil {
// This should never happen. At chain start, the finalized checkpoint
// would be the genesis state and block.
return errors.New("no finalized epoch in the database")
}
finalizedRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(finalized.Root))
var finalizedState state.BeaconState
finalizedState, err = s.cfg.StateGen.Resume(ctx, s.cfg.FinalizedStateAtStartUp)
if err != nil {
return errors.Wrap(err, "could not get finalized state from db")
}
if finalizedState == nil || finalizedState.IsNil() {
return errors.New("finalized state can't be nil")
}
s.cfg.ForkChoiceStore.RLock()
root := s.cfg.ForkChoiceStore.HighestReceivedBlockRoot()
s.cfg.ForkChoiceStore.RUnlock()
blk, err := s.cfg.BeaconDB.Block(ctx, root)
finalizedBlock, err := s.getBlock(ctx, finalizedRoot)
if err != nil {
return errors.Wrap(err, "could not get head block")
return errors.Wrap(err, "could not get finalized block")
}
if root != fRoot {
st, err = s.cfg.StateGen.StateByRoot(ctx, root)
if err != nil {
return errors.Wrap(err, "could not get head state")
}
}
log.WithFields(logrus.Fields{
"root": fmt.Sprintf("%#x", root),
"slot": blk.Block().Slot(),
}).Info("Initialized head block from DB")
return errors.Wrap(s.setHead(&head{
root,
blk,
st,
blk.Block().Slot(),
if err := s.setHead(&head{
finalizedRoot,
finalizedBlock,
finalizedState,
finalizedBlock.Block().Slot(),
false,
}), "could not set head")
}); err != nil {
return errors.Wrap(err, "could not set head")
}
return nil
}
func (s *Service) startFromExecutionChain() error {

View File

@@ -1,187 +0,0 @@
package blockchain
import (
"bytes"
"context"
"fmt"
"github.com/pkg/errors"
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/config/features"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/time/slots"
)
func (s *Service) setupForkchoice(st state.BeaconState) error {
if err := s.setupForkchoiceCheckpoints(); err != nil {
return errors.Wrap(err, "could not set up forkchoice checkpoints")
}
if err := s.setupForkchoiceTree(st); err != nil {
return errors.Wrap(err, "could not set up forkchoice root")
}
if err := s.initializeHead(s.ctx, st); err != nil {
return errors.Wrap(err, "could not initialize head from db")
}
return nil
}
func (s *Service) startupHeadRoot() [32]byte {
headStr := features.Get().ForceHead
cp := s.FinalizedCheckpt()
fRoot := s.ensureRootNotZeros([32]byte(cp.Root))
if headStr == "" {
return fRoot
}
if headStr == "head" {
root, err := s.cfg.BeaconDB.HeadBlockRoot()
if err != nil {
log.WithError(err).Error("could not get head block root, starting with finalized block as head")
return fRoot
}
log.Infof("Using Head root of %#x", root)
return root
}
root, err := bytesutil.DecodeHexWithLength(headStr, 32)
if err != nil {
log.WithError(err).Error("could not parse head root, starting with finalized block as head")
return fRoot
}
return [32]byte(root)
}
func (s *Service) setupForkchoiceTree(st state.BeaconState) error {
headRoot := s.startupHeadRoot()
cp := s.FinalizedCheckpt()
fRoot := s.ensureRootNotZeros([32]byte(cp.Root))
if err := s.setupForkchoiceRoot(st); err != nil {
return errors.Wrap(err, "could not set up forkchoice root")
}
fBlk, err := s.cfg.BeaconDB.Block(s.ctx, fRoot)
if err != nil {
return errors.Wrap(err, "could not get finalized block")
}
if err := s.setHead(&head{
fRoot,
fBlk,
st,
fBlk.Block().Slot(),
false,
}); err != nil {
return errors.Wrap(err, "could not set head")
}
if headRoot == fRoot {
return nil
}
blk, err := s.cfg.BeaconDB.Block(s.ctx, headRoot)
if err != nil {
log.WithError(err).Error("could not get head block, starting with finalized block as head")
return nil
}
if slots.ToEpoch(blk.Block().Slot()) < cp.Epoch {
log.WithField("headRoot", fmt.Sprintf("%#x", headRoot)).Error("head block is older than finalized block, starting with finalized block as head")
return nil
}
chain, err := s.buildForkchoiceChain(s.ctx, blk)
if err != nil {
log.WithError(err).Error("could not build forkchoice chain, starting with finalized block as head")
return nil
}
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
return s.cfg.ForkChoiceStore.InsertChain(s.ctx, chain)
}
func (s *Service) buildForkchoiceChain(ctx context.Context, head interfaces.ReadOnlySignedBeaconBlock) ([]*forkchoicetypes.BlockAndCheckpoints, error) {
chain := []*forkchoicetypes.BlockAndCheckpoints{}
cp := s.FinalizedCheckpt()
fRoot := s.ensureRootNotZeros([32]byte(cp.Root))
jp := s.CurrentJustifiedCheckpt()
root, err := head.Block().HashTreeRoot()
if err != nil {
return nil, errors.Wrap(err, "could not get head block root")
}
for {
roblock, err := blocks.NewROBlockWithRoot(head, root)
if err != nil {
return nil, err
}
// This chain sets the justified checkpoint for every block, including some that are older than jp.
// This should be however safe for forkchoice at startup.
chain = append(chain, &forkchoicetypes.BlockAndCheckpoints{Block: roblock, JustifiedCheckpoint: jp, FinalizedCheckpoint: cp})
root = head.Block().ParentRoot()
if root == fRoot {
break
}
head, err = s.cfg.BeaconDB.Block(s.ctx, root)
if err != nil {
return nil, errors.Wrap(err, "could not get block")
}
if slots.ToEpoch(head.Block().Slot()) < cp.Epoch {
return nil, errors.New("head block is not a descendant of the finalized checkpoint")
}
}
return chain, nil
}
func (s *Service) setupForkchoiceRoot(st state.BeaconState) error {
cp := s.FinalizedCheckpt()
fRoot := s.ensureRootNotZeros([32]byte(cp.Root))
finalizedBlock, err := s.cfg.BeaconDB.Block(s.ctx, fRoot)
if err != nil {
return errors.Wrap(err, "could not get finalized checkpoint block")
}
roblock, err := blocks.NewROBlockWithRoot(finalizedBlock, fRoot)
if err != nil {
return err
}
if err := s.cfg.ForkChoiceStore.InsertNode(s.ctx, st, roblock); err != nil {
return errors.Wrap(err, "could not insert finalized block to forkchoice")
}
if !features.Get().EnableStartOptimistic {
lastValidatedCheckpoint, err := s.cfg.BeaconDB.LastValidatedCheckpoint(s.ctx)
if err != nil {
return errors.Wrap(err, "could not get last validated checkpoint")
}
if bytes.Equal(fRoot[:], lastValidatedCheckpoint.Root) {
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(s.ctx, fRoot); err != nil {
return errors.Wrap(err, "could not set finalized block as validated")
}
}
}
return nil
}
func (s *Service) setupForkchoiceCheckpoints() error {
justified, err := s.cfg.BeaconDB.JustifiedCheckpoint(s.ctx)
if err != nil {
return errors.Wrap(err, "could not get justified checkpoint")
}
if justified == nil {
return errNilJustifiedCheckpoint
}
finalized, err := s.cfg.BeaconDB.FinalizedCheckpoint(s.ctx)
if err != nil {
return errors.Wrap(err, "could not get finalized checkpoint")
}
if finalized == nil {
return errNilFinalizedCheckpoint
}
fRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(finalized.Root))
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(s.ctx, &forkchoicetypes.Checkpoint{Epoch: justified.Epoch,
Root: bytesutil.ToBytes32(justified.Root)}); err != nil {
return errors.Wrap(err, "could not update forkchoice's justified checkpoint")
}
if err := s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: finalized.Epoch,
Root: fRoot}); err != nil {
return errors.Wrap(err, "could not update forkchoice's finalized checkpoint")
}
s.cfg.ForkChoiceStore.SetGenesisTime(uint64(s.genesisTime.Unix()))
return nil
}

View File

@@ -101,7 +101,7 @@ type NoHeadAccessDatabase interface {
SaveLightClientBootstrap(ctx context.Context, blockRoot []byte, bootstrap interfaces.LightClientBootstrap) error
CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint primitives.Slot) error
DeleteHistoricalDataBeforeSlot(ctx context.Context, slot primitives.Slot, batchSize int) (int, error)
DeleteHistoricalDataBeforeSlot(ctx context.Context, slot primitives.Slot) error
}
// HeadAccessDatabase defines a struct with access to reading chain head data.
@@ -110,7 +110,6 @@ type HeadAccessDatabase interface {
// Block related methods.
HeadBlock(ctx context.Context) (interfaces.ReadOnlySignedBeaconBlock, error)
HeadBlockRoot() ([32]byte, error)
SaveHeadBlockRoot(ctx context.Context, blockRoot [32]byte) error
// Genesis operations.

View File

@@ -70,21 +70,6 @@ func (s *Store) OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error)
return root, err
}
// HeadBlockRoot returns the latest canonical block root in the Ethereum Beacon Chain.
func (s *Store) HeadBlockRoot() ([32]byte, error) {
var root [32]byte
err := s.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(blocksBucket)
headRoot := bkt.Get(headBlockRootKey)
if headRoot == nil {
return errors.New("no head block root found")
}
copy(root[:], headRoot)
return nil
})
return root, err
}
// HeadBlock returns the latest canonical block in the Ethereum Beacon Chain.
func (s *Store) HeadBlock(ctx context.Context) (interfaces.ReadOnlySignedBeaconBlock, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.HeadBlock")
@@ -229,8 +214,7 @@ func (s *Store) DeleteBlock(ctx context.Context, root [32]byte) error {
defer span.End()
if err := s.DeleteState(ctx, root); err != nil {
// TODO: Find out why invalid states are in the db
log.WithError(err).Error("Could not delete state")
return err
}
if err := s.deleteStateSummary(root); err != nil {
@@ -261,82 +245,77 @@ func (s *Store) DeleteBlock(ctx context.Context, root [32]byte) error {
// - blockRootValidatorHashesBucket
// - blockSlotIndicesBucket
// - stateSlotIndicesBucket
func (s *Store) DeleteHistoricalDataBeforeSlot(ctx context.Context, cutoffSlot primitives.Slot, batchSize int) (int, error) {
func (s *Store) DeleteHistoricalDataBeforeSlot(ctx context.Context, cutoffSlot primitives.Slot) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.DeleteHistoricalDataBeforeSlot")
defer span.End()
// Collect slot/root pairs to perform deletions in a separate read only transaction.
slotRoots, err := s.slotRootsInRange(ctx, primitives.Slot(0), cutoffSlot, batchSize)
var (
roots [][]byte
slts []primitives.Slot
)
err := s.db.View(func(tx *bolt.Tx) error {
var err error
roots, slts, err = blockRootsBySlotRange(ctx, tx.Bucket(blockSlotIndicesBucket), primitives.Slot(0), cutoffSlot, nil, nil, nil)
if err != nil {
return errors.Wrap(err, "could not retrieve block roots")
}
return nil
})
if err != nil {
return 0, err
}
// Return early if there's nothing to delete.
if len(slotRoots) == 0 {
return 0, nil
return errors.Wrap(err, "could not retrieve block roots and slots")
}
// Perform all deletions in a single transaction for atomicity
var numSlotsDeleted int
err = s.db.Update(func(tx *bolt.Tx) error {
for _, sr := range slotRoots {
// Return if context is cancelled or deadline is exceeded.
if ctx.Err() != nil {
//nolint:nilerr
return nil
}
return s.db.Update(func(tx *bolt.Tx) error {
for _, root := range roots {
// Delete block
if err = s.deleteBlock(tx, sr.root[:]); err != nil {
if err = s.deleteBlock(tx, root); err != nil {
return err
}
// Delete finalized block roots index
if err = tx.Bucket(finalizedBlockRootsIndexBucket).Delete(sr.root[:]); err != nil {
if err = tx.Bucket(finalizedBlockRootsIndexBucket).Delete(root); err != nil {
return errors.Wrap(err, "could not delete finalized block root index")
}
// Delete state
if err = tx.Bucket(stateBucket).Delete(sr.root[:]); err != nil {
if err = tx.Bucket(stateBucket).Delete(root); err != nil {
return errors.Wrap(err, "could not delete state")
}
// Delete state summary
if err = tx.Bucket(stateSummaryBucket).Delete(sr.root[:]); err != nil {
if err = tx.Bucket(stateSummaryBucket).Delete(root); err != nil {
return errors.Wrap(err, "could not delete state summary")
}
// Delete validator entries
if err = s.deleteValidatorHashes(tx, sr.root[:]); err != nil {
if err = s.deleteValidatorHashes(tx, root); err != nil {
return errors.Wrap(err, "could not delete validators")
}
numSlotsDeleted++
}
for _, sr := range slotRoots {
for _, slot := range slts {
// Delete slot indices
if err = tx.Bucket(blockSlotIndicesBucket).Delete(bytesutil.SlotToBytesBigEndian(sr.slot)); err != nil {
if err = tx.Bucket(blockSlotIndicesBucket).Delete(bytesutil.SlotToBytesBigEndian(slot)); err != nil {
return errors.Wrap(err, "could not delete block slot index")
}
if err = tx.Bucket(stateSlotIndicesBucket).Delete(bytesutil.SlotToBytesBigEndian(sr.slot)); err != nil {
if err = tx.Bucket(stateSlotIndicesBucket).Delete(bytesutil.SlotToBytesBigEndian(slot)); err != nil {
return errors.Wrap(err, "could not delete state slot index")
}
}
// Delete all caches after we have deleted everything from buckets.
// This is done after the buckets are deleted to avoid any issues in case of transaction rollback.
for _, sr := range slotRoots {
for _, root := range roots {
// Delete block from cache
s.blockCache.Del(string(sr.root[:]))
s.blockCache.Del(string(root))
// Delete state summary from cache
s.stateSummaryCache.delete(sr.root)
s.stateSummaryCache.delete([32]byte(root))
}
return nil
})
return numSlotsDeleted, err
}
// SaveBlock to the db.
@@ -357,7 +336,7 @@ func (s *Store) SaveBlock(ctx context.Context, signed interfaces.ReadOnlySignedB
// if a `saveBlindedBeaconBlocks` key exists in the database. Otherwise, we check if the last
// blocked stored to check if it is blinded, and then write that `saveBlindedBeaconBlocks` key
// to the DB for future checks.
func (s *Store) shouldSaveBlinded() (bool, error) {
func (s *Store) shouldSaveBlinded(ctx context.Context) (bool, error) {
var saveBlinded bool
if err := s.db.View(func(tx *bolt.Tx) error {
metadataBkt := tx.Bucket(chainMetadataBucket)
@@ -419,7 +398,7 @@ func prepareBlockBatch(blks []blocks.ROBlock, shouldBlind bool) ([]blockBatchEnt
}
func (s *Store) SaveROBlocks(ctx context.Context, blks []blocks.ROBlock, cache bool) error {
shouldBlind, err := s.shouldSaveBlinded()
shouldBlind, err := s.shouldSaveBlinded(ctx)
if err != nil {
return err
}
@@ -690,49 +669,6 @@ func (s *Store) SaveRegistrationsByValidatorIDs(ctx context.Context, ids []primi
})
}
type slotRoot struct {
slot primitives.Slot
root [32]byte
}
// slotRootsInRange returns slot and block root pairs of length min(batchSize, end-slot)
func (s *Store) slotRootsInRange(ctx context.Context, start, end primitives.Slot, batchSize int) ([]slotRoot, error) {
_, span := trace.StartSpan(ctx, "BeaconDB.slotRootsInRange")
defer span.End()
if end < start {
return nil, errInvalidSlotRange
}
var pairs []slotRoot
key := bytesutil.SlotToBytesBigEndian(end)
err := s.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(blockSlotIndicesBucket)
c := bkt.Cursor()
for k, v := c.Seek(key); k != nil; k, v = c.Prev() {
slot := bytesutil.BytesToSlotBigEndian(k)
if slot > end {
continue // Seek will seek to the next key *after* the given one if not present
}
if slot < start {
return nil
}
roots, err := splitRoots(v)
if err != nil {
return errors.Wrapf(err, "corrupt value %v in block slot index for slot=%d", v, slot)
}
for _, r := range roots {
pairs = append(pairs, slotRoot{slot: slot, root: r})
}
if len(pairs) >= batchSize {
return nil // allows code to easily cap the number of items pruned
}
}
return nil
})
return pairs, err
}
// blockRootsByFilter retrieves the block roots given the filter criteria.
func blockRootsByFilter(ctx context.Context, tx *bolt.Tx, f *filters.QueryFilter) ([][]byte, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.blockRootsByFilter")
@@ -753,7 +689,7 @@ func blockRootsByFilter(ctx context.Context, tx *bolt.Tx, f *filters.QueryFilter
// We retrieve block roots that match a filter criteria of slot ranges, if specified.
filtersMap := f.Filters()
rootsBySlotRange, err := blockRootsBySlotRange(
rootsBySlotRange, _, err := blockRootsBySlotRange(
ctx,
tx.Bucket(blockSlotIndicesBucket),
filtersMap[filters.StartSlot],
@@ -798,13 +734,13 @@ func blockRootsBySlotRange(
ctx context.Context,
bkt *bolt.Bucket,
startSlotEncoded, endSlotEncoded, startEpochEncoded, endEpochEncoded, slotStepEncoded interface{},
) ([][]byte, error) {
) ([][]byte, []primitives.Slot, error) {
_, span := trace.StartSpan(ctx, "BeaconDB.blockRootsBySlotRange")
defer span.End()
// Return nothing when all slot parameters are missing
if startSlotEncoded == nil && endSlotEncoded == nil && startEpochEncoded == nil && endEpochEncoded == nil {
return [][]byte{}, nil
return [][]byte{}, nil, nil
}
var startSlot, endSlot primitives.Slot
@@ -825,11 +761,11 @@ func blockRootsBySlotRange(
if startEpochOk && endEpochOk {
startSlot, err = slots.EpochStart(startEpoch)
if err != nil {
return nil, err
return nil, nil, err
}
endSlot, err = slots.EpochStart(endEpoch)
if err != nil {
return nil, err
return nil, nil, err
}
endSlot = endSlot + params.BeaconConfig().SlotsPerEpoch - 1
}
@@ -840,10 +776,11 @@ func blockRootsBySlotRange(
return key != nil && bytes.Compare(key, max) <= 0
}
if endSlot < startSlot {
return nil, errInvalidSlotRange
return nil, nil, errInvalidSlotRange
}
rootsRange := endSlot.SubSlot(startSlot).Div(step)
roots := make([][]byte, 0, rootsRange)
var slts []primitives.Slot
c := bkt.Cursor()
for k, v := c.Seek(min); conditional(k, max); k, v = c.Next() {
slot := bytesutil.BytesToSlotBigEndian(k)
@@ -858,8 +795,9 @@ func blockRootsBySlotRange(
splitRoots = append(splitRoots, v[i:i+32])
}
roots = append(roots, splitRoots...)
slts = append(slts, slot)
}
return roots, nil
return roots, slts, nil
}
// blockRootsBySlot retrieves the block roots by slot

View File

@@ -359,221 +359,184 @@ func TestStore_DeleteFinalizedBlock(t *testing.T) {
func TestStore_HistoricalDataBeforeSlot(t *testing.T) {
slotsPerEpoch := uint64(params.BeaconConfig().SlotsPerEpoch)
db := setupDB(t)
ctx := context.Background()
tests := []struct {
name string
batchSize int
numOfEpochs uint64
deleteBeforeSlot uint64
}{
{
name: "batchSize less than delete range",
batchSize: 10,
numOfEpochs: 4,
deleteBeforeSlot: 25,
},
{
name: "batchSize greater than delete range",
batchSize: 30,
numOfEpochs: 4,
deleteBeforeSlot: 15,
},
}
// Save genesis block root
require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesisBlockRoot))
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
db := setupDB(t)
// Save genesis block root
require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesisBlockRoot))
// Create and save blocks for 4 epochs
blks := makeBlocks(t, 0, slotsPerEpoch*4, genesisBlockRoot)
require.NoError(t, db.SaveBlocks(ctx, blks))
// Create and save blocks for given epochs
blks := makeBlocks(t, 0, slotsPerEpoch*tt.numOfEpochs, genesisBlockRoot)
require.NoError(t, db.SaveBlocks(ctx, blks))
// Mark state validator migration as complete
err := db.db.Update(func(tx *bolt.Tx) error {
return tx.Bucket(migrationsBucket).Put(migrationStateValidatorsKey, migrationCompleted)
})
require.NoError(t, err)
// Mark state validator migration as complete
err := db.db.Update(func(tx *bolt.Tx) error {
return tx.Bucket(migrationsBucket).Put(migrationStateValidatorsKey, migrationCompleted)
})
require.NoError(t, err)
migrated, err := db.isStateValidatorMigrationOver()
require.NoError(t, err)
require.Equal(t, true, migrated)
migrated, err := db.isStateValidatorMigrationOver()
require.NoError(t, err)
require.Equal(t, true, migrated)
// Create state summaries and states for each block
ss := make([]*ethpb.StateSummary, len(blks))
states := make([]state.BeaconState, len(blks))
// Create state summaries and states for each block
ss := make([]*ethpb.StateSummary, len(blks))
states := make([]state.BeaconState, len(blks))
for i, blk := range blks {
slot := blk.Block().Slot()
r, err := blk.Block().HashTreeRoot()
require.NoError(t, err)
for i, blk := range blks {
slot := blk.Block().Slot()
r, err := blk.Block().HashTreeRoot()
require.NoError(t, err)
// Create and save state summary
ss[i] = &ethpb.StateSummary{
Slot: slot,
Root: r[:],
}
// Create and save state summary
ss[i] = &ethpb.StateSummary{
Slot: slot,
Root: r[:],
}
// Create and save state with validator entries
vals := make([]*ethpb.Validator, 2)
for j := range vals {
vals[j] = &ethpb.Validator{
PublicKey: bytesutil.PadTo([]byte{byte(i*j + 1)}, 48),
WithdrawalCredentials: bytesutil.PadTo([]byte{byte(i*j + 2)}, 32),
}
}
st, err := util.NewBeaconState(func(state *ethpb.BeaconState) error {
state.Validators = vals
state.Slot = slot
return nil
})
require.NoError(t, err)
require.NoError(t, db.SaveState(ctx, st, r))
states[i] = st
// Verify validator entries are saved to db
valsActual, err := db.validatorEntries(ctx, r)
require.NoError(t, err)
for j, val := range valsActual {
require.DeepEqual(t, vals[j], val)
}
// Create and save state with validator entries
vals := make([]*ethpb.Validator, 2)
for j := range vals {
vals[j] = &ethpb.Validator{
PublicKey: bytesutil.PadTo([]byte{byte(i*j + 1)}, 48),
WithdrawalCredentials: bytesutil.PadTo([]byte{byte(i*j + 2)}, 32),
}
require.NoError(t, db.SaveStateSummaries(ctx, ss))
}
// Verify slot indices exist before deletion
err = db.db.View(func(tx *bolt.Tx) error {
blockSlotBkt := tx.Bucket(blockSlotIndicesBucket)
stateSlotBkt := tx.Bucket(stateSlotIndicesBucket)
for i := uint64(0); i < uint64(tt.deleteBeforeSlot); i++ {
slot := bytesutil.SlotToBytesBigEndian(primitives.Slot(i + 1))
assert.NotNil(t, blockSlotBkt.Get(slot), "Expected block slot index to exist")
assert.NotNil(t, stateSlotBkt.Get(slot), "Expected state slot index to exist", i)
}
return nil
})
require.NoError(t, err)
// Delete data before slot
slotsDeleted, err := db.DeleteHistoricalDataBeforeSlot(ctx, primitives.Slot(tt.deleteBeforeSlot), tt.batchSize)
require.NoError(t, err)
var startSlotDeleted, endSlotDeleted uint64
if tt.batchSize >= int(tt.deleteBeforeSlot) {
startSlotDeleted = 1
endSlotDeleted = tt.deleteBeforeSlot
} else {
startSlotDeleted = tt.deleteBeforeSlot - uint64(tt.batchSize) + 1
endSlotDeleted = tt.deleteBeforeSlot
}
require.Equal(t, endSlotDeleted-startSlotDeleted+1, uint64(slotsDeleted))
// Verify blocks before given slot/batch are deleted
for i := startSlotDeleted; i < endSlotDeleted; i++ {
root, err := blks[i].Block().HashTreeRoot()
require.NoError(t, err)
// Check block is deleted
retrievedBlocks, err := db.BlocksBySlot(ctx, primitives.Slot(i))
require.NoError(t, err)
assert.Equal(t, 0, len(retrievedBlocks), fmt.Sprintf("Expected %d blocks, got %d for slot %d", 0, len(retrievedBlocks), i))
// Verify block does not exist
assert.Equal(t, false, db.HasBlock(ctx, root), fmt.Sprintf("Expected block index to not exist for slot %d", i))
// Verify block parent root does not exist
err = db.db.View(func(tx *bolt.Tx) error {
require.Equal(t, 0, len(tx.Bucket(blockParentRootIndicesBucket).Get(root[:])))
return nil
})
require.NoError(t, err)
// Verify state is deleted
hasState := db.HasState(ctx, root)
assert.Equal(t, false, hasState)
// Verify state summary is deleted
hasSummary := db.HasStateSummary(ctx, root)
assert.Equal(t, false, hasSummary)
// Verify validator hashes for block roots are deleted
err = db.db.View(func(tx *bolt.Tx) error {
assert.Equal(t, 0, len(tx.Bucket(blockRootValidatorHashesBucket).Get(root[:])))
return nil
})
require.NoError(t, err)
}
// Verify slot indices are deleted
err = db.db.View(func(tx *bolt.Tx) error {
blockSlotBkt := tx.Bucket(blockSlotIndicesBucket)
stateSlotBkt := tx.Bucket(stateSlotIndicesBucket)
for i := startSlotDeleted; i < endSlotDeleted; i++ {
slot := bytesutil.SlotToBytesBigEndian(primitives.Slot(i + 1))
assert.Equal(t, 0, len(blockSlotBkt.Get(slot)), fmt.Sprintf("Expected block slot index to be deleted, slot: %d", slot))
assert.Equal(t, 0, len(stateSlotBkt.Get(slot)), fmt.Sprintf("Expected state slot index to be deleted, slot: %d", slot))
}
return nil
})
require.NoError(t, err)
// Verify blocks from expectedLastDeletedSlot till numEpochs still exist
for i := endSlotDeleted; i < slotsPerEpoch*tt.numOfEpochs; i++ {
root, err := blks[i].Block().HashTreeRoot()
require.NoError(t, err)
// Verify block exists
assert.Equal(t, true, db.HasBlock(ctx, root))
// Verify remaining block parent root exists, except last slot since we store parent roots of each block.
if i < slotsPerEpoch*tt.numOfEpochs-1 {
err = db.db.View(func(tx *bolt.Tx) error {
require.NotNil(t, tx.Bucket(blockParentRootIndicesBucket).Get(root[:]), fmt.Sprintf("Expected block parent index to be deleted, slot: %d", i))
return nil
})
require.NoError(t, err)
}
// Verify state exists
hasState := db.HasState(ctx, root)
assert.Equal(t, true, hasState)
// Verify state summary exists
hasSummary := db.HasStateSummary(ctx, root)
assert.Equal(t, true, hasSummary)
// Verify slot indices still exist
err = db.db.View(func(tx *bolt.Tx) error {
blockSlotBkt := tx.Bucket(blockSlotIndicesBucket)
stateSlotBkt := tx.Bucket(stateSlotIndicesBucket)
slot := bytesutil.SlotToBytesBigEndian(primitives.Slot(i + 1))
assert.NotNil(t, blockSlotBkt.Get(slot), "Expected block slot index to exist")
assert.NotNil(t, stateSlotBkt.Get(slot), "Expected state slot index to exist")
return nil
})
require.NoError(t, err)
// Verify validator entries still exist
valsActual, err := db.validatorEntries(ctx, root)
require.NoError(t, err)
assert.NotNil(t, valsActual)
// Verify remaining validator hashes for block roots exists
err = db.db.View(func(tx *bolt.Tx) error {
assert.NotNil(t, tx.Bucket(blockRootValidatorHashesBucket).Get(root[:]))
return nil
})
require.NoError(t, err)
}
st, err := util.NewBeaconState(func(state *ethpb.BeaconState) error {
state.Validators = vals
state.Slot = slot
return nil
})
require.NoError(t, err)
require.NoError(t, db.SaveState(ctx, st, r))
states[i] = st
// Verify validator entries are saved to db
valsActual, err := db.validatorEntries(ctx, r)
require.NoError(t, err)
for j, val := range valsActual {
require.DeepEqual(t, vals[j], val)
}
}
require.NoError(t, db.SaveStateSummaries(ctx, ss))
// Verify slot indices exist before deletion
err = db.db.View(func(tx *bolt.Tx) error {
blockSlotBkt := tx.Bucket(blockSlotIndicesBucket)
stateSlotBkt := tx.Bucket(stateSlotIndicesBucket)
for i := uint64(0); i < slotsPerEpoch; i++ {
slot := bytesutil.SlotToBytesBigEndian(primitives.Slot(i + 1))
assert.NotNil(t, blockSlotBkt.Get(slot), "Expected block slot index to exist")
assert.NotNil(t, stateSlotBkt.Get(slot), "Expected state slot index to exist", i)
}
return nil
})
require.NoError(t, err)
// Delete data before slot at epoch 1
require.NoError(t, db.DeleteHistoricalDataBeforeSlot(ctx, primitives.Slot(slotsPerEpoch)))
// Verify blocks from epoch 0 are deleted
for i := uint64(0); i < slotsPerEpoch; i++ {
root, err := blks[i].Block().HashTreeRoot()
require.NoError(t, err)
// Check block is deleted
retrievedBlocks, err := db.BlocksBySlot(ctx, primitives.Slot(i))
require.NoError(t, err)
assert.Equal(t, 0, len(retrievedBlocks))
// Verify block does not exist
assert.Equal(t, false, db.HasBlock(ctx, root))
// Verify block parent root does not exist
err = db.db.View(func(tx *bolt.Tx) error {
require.Equal(t, 0, len(tx.Bucket(blockParentRootIndicesBucket).Get(root[:])))
return nil
})
require.NoError(t, err)
// Verify state is deleted
hasState := db.HasState(ctx, root)
assert.Equal(t, false, hasState)
// Verify state summary is deleted
hasSummary := db.HasStateSummary(ctx, root)
assert.Equal(t, false, hasSummary)
// Verify validator hashes for block roots are deleted
err = db.db.View(func(tx *bolt.Tx) error {
assert.Equal(t, 0, len(tx.Bucket(blockRootValidatorHashesBucket).Get(root[:])))
return nil
})
require.NoError(t, err)
}
// Verify slot indices are deleted
err = db.db.View(func(tx *bolt.Tx) error {
blockSlotBkt := tx.Bucket(blockSlotIndicesBucket)
stateSlotBkt := tx.Bucket(stateSlotIndicesBucket)
for i := uint64(0); i < slotsPerEpoch; i++ {
slot := bytesutil.SlotToBytesBigEndian(primitives.Slot(i + 1))
assert.Equal(t, 0, len(blockSlotBkt.Get(slot)), fmt.Sprintf("Expected block slot index to be deleted, slot: %d", slot))
assert.Equal(t, 0, len(stateSlotBkt.Get(slot)), fmt.Sprintf("Expected state slot index to be deleted, slot: %d", slot))
}
return nil
})
require.NoError(t, err)
// Verify blocks from epochs 1-3 still exist
for i := slotsPerEpoch; i < slotsPerEpoch*4; i++ {
root, err := blks[i].Block().HashTreeRoot()
require.NoError(t, err)
// Verify block exists
assert.Equal(t, true, db.HasBlock(ctx, root))
// Verify remaining block parent root exists, except last slot since we store parent roots of each block.
if i < slotsPerEpoch*4-1 {
err = db.db.View(func(tx *bolt.Tx) error {
require.NotNil(t, tx.Bucket(blockParentRootIndicesBucket).Get(root[:]), fmt.Sprintf("Expected block parent index to be deleted, slot: %d", i))
return nil
})
require.NoError(t, err)
}
// Verify state exists
hasState := db.HasState(ctx, root)
assert.Equal(t, true, hasState)
// Verify state summary exists
hasSummary := db.HasStateSummary(ctx, root)
assert.Equal(t, true, hasSummary)
// Verify slot indices still exist
err = db.db.View(func(tx *bolt.Tx) error {
blockSlotBkt := tx.Bucket(blockSlotIndicesBucket)
stateSlotBkt := tx.Bucket(stateSlotIndicesBucket)
slot := bytesutil.SlotToBytesBigEndian(primitives.Slot(i + 1))
assert.NotNil(t, blockSlotBkt.Get(slot), "Expected block slot index to exist")
assert.NotNil(t, stateSlotBkt.Get(slot), "Expected state slot index to exist")
return nil
})
require.NoError(t, err)
// Verify validator entries still exist
valsActual, err := db.validatorEntries(ctx, root)
require.NoError(t, err)
assert.NotNil(t, valsActual)
// Verify remaining validator hashes for block roots exists
err = db.db.View(func(tx *bolt.Tx) error {
assert.NotNil(t, tx.Bucket(blockRootValidatorHashesBucket).Get(root[:]))
return nil
})
require.NoError(t, err)
}
}
func TestStore_GenesisBlock(t *testing.T) {

View File

@@ -820,25 +820,30 @@ func (s *Store) slotByBlockRoot(ctx context.Context, tx *bolt.Tx, blockRoot []by
// no need to construct the validator entries as it is not used here.
s, err := s.unmarshalState(ctx, enc, nil)
if err != nil {
return 0, errors.Wrap(err, "could not unmarshal state")
return 0, err
}
if s == nil || s.IsNil() {
return 0, errors.New("state can't be nil")
}
return s.Slot(), nil
}
b, err := unmarshalBlock(ctx, enc)
b := &ethpb.SignedBeaconBlock{}
err := decode(ctx, enc, b)
if err != nil {
return 0, errors.Wrap(err, "could not unmarshal block")
}
if err := blocks.BeaconBlockIsNil(b); err != nil {
return 0, err
}
return b.Block().Slot(), nil
wsb, err := blocks.NewSignedBeaconBlock(b)
if err != nil {
return 0, err
}
if err := blocks.BeaconBlockIsNil(wsb); err != nil {
return 0, err
}
return b.Block.Slot, nil
}
stateSummary := &ethpb.StateSummary{}
if err := decode(ctx, enc, stateSummary); err != nil {
return 0, errors.Wrap(err, "could not unmarshal state summary")
return 0, err
}
return stateSummary.Slot, nil
}

View File

@@ -5,6 +5,7 @@ import (
"crypto/rand"
"encoding/binary"
mathRand "math/rand"
"strconv"
"testing"
"time"
@@ -1069,31 +1070,6 @@ func TestBellatrixState_CanDelete(t *testing.T) {
require.Equal(t, state.ReadOnlyBeaconState(nil), savedS, "Unsaved state should've been nil")
}
func TestBellatrixState_CanDeleteWithBlock(t *testing.T) {
db := setupDB(t)
b := util.NewBeaconBlockBellatrix()
b.Block.Slot = 100
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
wsb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, db.SaveBlock(context.Background(), wsb))
require.Equal(t, false, db.HasState(context.Background(), r))
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
require.NoError(t, st.SetSlot(100))
require.NoError(t, db.SaveState(context.Background(), st, r))
require.Equal(t, true, db.HasState(context.Background(), r))
require.NoError(t, db.DeleteState(context.Background(), r))
savedS, err := db.State(context.Background(), r)
require.NoError(t, err)
require.Equal(t, state.ReadOnlyBeaconState(nil), savedS, "Unsaved state should've been nil")
}
func TestDenebState_CanSaveRetrieve(t *testing.T) {
db := setupDB(t)

View File

@@ -16,15 +16,6 @@ import (
var log = logrus.WithField("prefix", "db-pruner")
const (
// defaultPrunableBatchSize is the number of slots that can be pruned at once.
defaultPrunableBatchSize = 32
// defaultPruningWindow is the duration of one pruning window.
defaultPruningWindow = time.Second * 3
// defaultNumBatchesToPrune is the number of batches to prune in one pruning window.
defaultNumBatchesToPrune = 15
)
type ServiceOption func(*Service)
// WithRetentionPeriod allows the user to specify a different data retention period than the spec default.
@@ -152,17 +143,14 @@ func (p *Service) prune(slot primitives.Slot) error {
}).Debug("Pruning chain data")
tt := time.Now()
numBatches, err := p.pruneBatches(pruneUpto)
if err != nil {
return errors.Wrap(err, "failed to prune batches")
if err := p.db.DeleteHistoricalDataBeforeSlot(p.ctx, pruneUpto); err != nil {
return errors.Wrapf(err, "could not delete upto slot %d", pruneUpto)
}
log.WithFields(logrus.Fields{
"prunedUpto": pruneUpto,
"duration": time.Since(tt),
"currentSlot": slot,
"batchSize": defaultPrunableBatchSize,
"numBatches": numBatches,
}).Debug("Successfully pruned chain data")
// Update pruning checkpoint.
@@ -171,33 +159,6 @@ func (p *Service) prune(slot primitives.Slot) error {
return nil
}
func (p *Service) pruneBatches(pruneUpto primitives.Slot) (int, error) {
ctx, cancel := context.WithTimeout(p.ctx, defaultPruningWindow)
defer cancel()
numBatches := 0
for {
select {
case <-ctx.Done():
return numBatches, nil
default:
for i := 0; i < defaultNumBatchesToPrune; i++ {
slotsDeleted, err := p.db.DeleteHistoricalDataBeforeSlot(ctx, pruneUpto, defaultPrunableBatchSize)
if err != nil {
return 0, errors.Wrapf(err, "could not delete upto slot %d", pruneUpto)
}
// Return if there's nothing to delete.
if slotsDeleted == 0 {
return numBatches, nil
}
numBatches++
}
}
}
}
// pruneStartSlotFunc returns the function to determine the start slot to start pruning.
func pruneStartSlotFunc(retentionEpochs primitives.Epoch) func(primitives.Slot) primitives.Slot {
return func(current primitives.Slot) primitives.Slot {

View File

@@ -156,10 +156,6 @@ func (n *Node) nodeTreeDump(ctx context.Context, nodes []*forkchoice2.Node) ([]*
if n.parent != nil {
parentRoot = n.parent.root
}
target := [32]byte{}
if n.target != nil {
target = n.target.root
}
thisNode := &forkchoice2.Node{
Slot: n.slot,
BlockRoot: n.root[:],
@@ -173,7 +169,6 @@ func (n *Node) nodeTreeDump(ctx context.Context, nodes []*forkchoice2.Node) ([]*
ExecutionOptimistic: n.optimistic,
ExecutionBlockHash: n.payloadHash[:],
Timestamp: n.timestamp,
Target: target[:],
}
if n.optimistic {
thisNode.Validity = forkchoice2.Optimistic

View File

@@ -252,13 +252,6 @@ func (s *Store) tips() ([][32]byte, []primitives.Slot) {
return roots, slots
}
func (f *ForkChoice) HighestReceivedBlockRoot() [32]byte {
if f.store.highestReceivedNode == nil {
return [32]byte{}
}
return f.store.highestReceivedNode.root
}
// HighestReceivedBlockSlot returns the highest slot received by the forkchoice
func (f *ForkChoice) HighestReceivedBlockSlot() primitives.Slot {
if f.store.highestReceivedNode == nil {

View File

@@ -65,7 +65,6 @@ type FastGetter interface {
FinalizedPayloadBlockHash() [32]byte
HasNode([32]byte) bool
HighestReceivedBlockSlot() primitives.Slot
HighestReceivedBlockRoot() [32]byte
HighestReceivedBlockDelay() primitives.Slot
IsCanonical(root [32]byte) bool
IsOptimistic(root [32]byte) (bool, error)

View File

@@ -114,13 +114,6 @@ func (ro *ROForkChoice) HighestReceivedBlockSlot() primitives.Slot {
return ro.getter.HighestReceivedBlockSlot()
}
// HighestReceivedBlockRoot delegates to the underlying forkchoice call, under a lock.
func (ro *ROForkChoice) HighestReceivedBlockRoot() [32]byte {
ro.l.RLock()
defer ro.l.RUnlock()
return ro.getter.HighestReceivedBlockRoot()
}
// HighestReceivedBlockDelay delegates to the underlying forkchoice call, under a lock.
func (ro *ROForkChoice) HighestReceivedBlockDelay() primitives.Slot {
ro.l.RLock()

View File

@@ -29,7 +29,6 @@ const (
unrealizedJustifiedPayloadBlockHashCalled
nodeCountCalled
highestReceivedBlockSlotCalled
highestReceivedBlockRootCalled
highestReceivedBlockDelayCalled
receivedBlocksLastEpochCalled
weightCalled
@@ -253,11 +252,6 @@ func (ro *mockROForkchoice) HighestReceivedBlockSlot() primitives.Slot {
return 0
}
func (ro *mockROForkchoice) HighestReceivedBlockRoot() [32]byte {
ro.calls = append(ro.calls, highestReceivedBlockRootCalled)
return [32]byte{}
}
func (ro *mockROForkchoice) HighestReceivedBlockDelay() primitives.Slot {
ro.calls = append(ro.calls, highestReceivedBlockDelayCalled)
return 0

View File

@@ -122,7 +122,6 @@ type BeaconNode struct {
BlobStorageOptions []filesystem.BlobStorageOption
verifyInitWaiter *verification.InitializerWaiter
syncChecker *initialsync.SyncChecker
slasherEnabled bool
}
// New creates a new node instance, sets up configuration options, and registers
@@ -160,7 +159,6 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
serviceFlagOpts: &serviceFlagOpts{},
initialSyncComplete: make(chan struct{}),
syncChecker: &initialsync.SyncChecker{},
slasherEnabled: cliCtx.Bool(flags.SlasherFlag.Name),
}
for _, opt := range opts {
@@ -344,7 +342,7 @@ func registerServices(cliCtx *cli.Context, beacon *BeaconNode, synchronizer *sta
return errors.Wrap(err, "could not register slashing pool service")
}
log.WithField("enabled", beacon.slasherEnabled).Debugln("Registering Slasher Service")
log.Debugln("Registering Slasher Service")
if err := beacon.registerSlasherService(); err != nil {
return errors.Wrap(err, "could not register slasher service")
}
@@ -589,7 +587,7 @@ func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
}
func (b *BeaconNode) startSlasherDB(cliCtx *cli.Context) error {
if !b.slasherEnabled {
if !features.Get().EnableSlasher {
return nil
}
baseDir := cliCtx.String(cmd.DataDirFlag.Name)
@@ -777,7 +775,6 @@ func (b *BeaconNode) registerBlockchainService(fc forkchoice.ForkChoicer, gs *st
blockchain.WithTrackedValidatorsCache(b.trackedValidatorsCache),
blockchain.WithPayloadIDCache(b.payloadIDCache),
blockchain.WithSyncChecker(b.syncChecker),
blockchain.WithSlasherEnabled(b.slasherEnabled),
)
blockchainService, err := blockchain.NewService(b.ctx, opts...)
@@ -862,7 +859,6 @@ func (b *BeaconNode) registerSyncService(initialSyncComplete chan struct{}, bFil
regularsync.WithBlobStorage(b.BlobStorage),
regularsync.WithVerifierWaiter(b.verifyInitWaiter),
regularsync.WithAvailableBlocker(bFillStore),
regularsync.WithSlasherEnabled(b.slasherEnabled),
)
return b.services.RegisterService(rs)
}
@@ -891,7 +887,7 @@ func (b *BeaconNode) registerInitialSyncService(complete chan struct{}) error {
}
func (b *BeaconNode) registerSlasherService() error {
if !b.slasherEnabled {
if !features.Get().EnableSlasher {
return nil
}
var chainService *blockchain.Service
@@ -938,7 +934,7 @@ func (b *BeaconNode) registerRPCService(router *http.ServeMux) error {
}
var slasherService *slasher.Service
if b.slasherEnabled {
if features.Get().EnableSlasher {
if err := b.services.FetchService(&slasherService); err != nil {
return err
}

View File

@@ -9,7 +9,6 @@ import (
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
log "github.com/sirupsen/logrus"
)
// SaveUnaggregatedAttestation saves an unaggregated attestation in cache.
@@ -61,8 +60,7 @@ func (c *AttCaches) UnaggregatedAttestations() ([]ethpb.Att, error) {
for _, att := range unAggregatedAtts {
seen, err := c.hasSeenBit(att)
if err != nil {
log.WithError(err).Debug("Could not check if attestations bits have been seen")
continue
return nil, err
}
if !seen {
atts = append(atts, att.Clone())
@@ -165,13 +163,7 @@ func (c *AttCaches) DeleteSeenUnaggregatedAttestations() (int, error) {
if att == nil || att.IsNil() || att.IsAggregated() {
continue
}
seen, err := c.hasSeenBit(att)
if err != nil {
log.WithError(err).Debug("Could not check if attestations bits have been seen")
delete(c.unAggregatedAtt, r)
count++
}
if seen {
if seen, err := c.hasSeenBit(att); err == nil && seen {
delete(c.unAggregatedAtt, r)
count++
}

View File

@@ -33,9 +33,6 @@ var (
// AggregateAttestationMap maps the fork-version to the underlying data type for that
// particular fork period.
AggregateAttestationMap map[[4]byte]func() (ethpb.SignedAggregateAttAndProof, error)
// AttesterSlashingMap maps the fork-version to the underlying data type for that particular
// fork period.
AttesterSlashingMap map[[4]byte]func() (ethpb.AttSlashing, error)
)
// InitializeDataMaps initializes all the relevant object maps. This function is called to
@@ -154,29 +151,4 @@ func InitializeDataMaps() {
return &ethpb.SignedAggregateAttestationAndProofElectra{}, nil
},
}
// Reset our aggregate attestation map.
AttesterSlashingMap = map[[4]byte]func() (ethpb.AttSlashing, error){
bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion): func() (ethpb.AttSlashing, error) {
return &ethpb.AttesterSlashing{}, nil
},
bytesutil.ToBytes4(params.BeaconConfig().AltairForkVersion): func() (ethpb.AttSlashing, error) {
return &ethpb.AttesterSlashing{}, nil
},
bytesutil.ToBytes4(params.BeaconConfig().BellatrixForkVersion): func() (ethpb.AttSlashing, error) {
return &ethpb.AttesterSlashing{}, nil
},
bytesutil.ToBytes4(params.BeaconConfig().CapellaForkVersion): func() (ethpb.AttSlashing, error) {
return &ethpb.AttesterSlashing{}, nil
},
bytesutil.ToBytes4(params.BeaconConfig().DenebForkVersion): func() (ethpb.AttSlashing, error) {
return &ethpb.AttesterSlashing{}, nil
},
bytesutil.ToBytes4(params.BeaconConfig().ElectraForkVersion): func() (ethpb.AttSlashing, error) {
return &ethpb.AttesterSlashingElectra{}, nil
},
bytesutil.ToBytes4(params.BeaconConfig().FuluForkVersion): func() (ethpb.AttSlashing, error) {
return &ethpb.AttesterSlashingElectra{}, nil
},
}
}

View File

@@ -76,13 +76,6 @@ func TestInitializeDataMaps(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, version.Phase0, agg.Version())
}
attSlashFunc, ok := AttesterSlashingMap[bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)]
assert.Equal(t, tt.exists, ok)
if tt.exists {
attSlash, err := attSlashFunc()
require.NoError(t, err)
assert.Equal(t, version.Phase0, attSlash.Version())
}
})
}
}

View File

@@ -190,7 +190,6 @@ func (s *Server) GetForkChoice(w http.ResponseWriter, r *http.Request) {
Balance: fmt.Sprintf("%d", n.Balance),
ExecutionOptimistic: n.ExecutionOptimistic,
TimeStamp: fmt.Sprintf("%d", n.Timestamp),
Target: fmt.Sprintf("%#x", n.Target),
},
}
}

View File

@@ -4,6 +4,7 @@ go_library(
name = "go_default_library",
srcs = [
"handlers.go",
"helpers.go",
"server.go",
],
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/light-client",
@@ -16,9 +17,11 @@ go_library(
"//beacon-chain/db:go_default_library",
"//beacon-chain/rpc/eth/shared:go_default_library",
"//beacon-chain/rpc/lookup:go_default_library",
"//beacon-chain/state:go_default_library",
"//config/features:go_default_library",
"//config/params:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//monitoring/tracing/trace:go_default_library",
"//network/httputil:go_default_library",
"//runtime/version:go_default_library",

View File

@@ -10,7 +10,6 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/api"
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
lightclient "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/light-client"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/shared"
"github.com/prysmaticlabs/prysm/v5/config/features"
"github.com/prysmaticlabs/prysm/v5/config/params"
@@ -183,31 +182,18 @@ func (s *Server) GetLightClientFinalityUpdate(w http.ResponseWriter, req *http.R
return
}
update, err := lightclient.NewLightClientFinalityUpdateFromBeaconState(ctx, s.ChainInfoFetcher.CurrentSlot(), st, block, attestedState, attestedBlock, finalizedBlock)
update, err := newLightClientFinalityUpdateFromBeaconState(ctx, s.ChainInfoFetcher.CurrentSlot(), st, block, attestedState, attestedBlock, finalizedBlock)
if err != nil {
httputil.HandleError(w, "Could not get light client finality update: "+err.Error(), http.StatusInternalServerError)
return
}
if httputil.RespondWithSsz(req) {
ssz, err := update.MarshalSSZ()
if err != nil {
httputil.HandleError(w, "Could not marshal finality update to SSZ: "+err.Error(), http.StatusInternalServerError)
return
}
httputil.WriteSsz(w, ssz, "light_client_finality_update.ssz")
} else {
updateStruct, err := structs.LightClientFinalityUpdateFromConsensus(update)
if err != nil {
httputil.HandleError(w, "Could not convert light client finality update to API struct: "+err.Error(), http.StatusInternalServerError)
return
}
response := &structs.LightClientFinalityUpdateResponse{
Version: version.String(attestedState.Version()),
Data: updateStruct,
}
httputil.WriteJson(w, response)
response := &structs.LightClientFinalityUpdateResponse{
Version: version.String(attestedState.Version()),
Data: update,
}
httputil.WriteJson(w, response)
}
// GetLightClientOptimisticUpdate - implements https://github.com/ethereum/beacon-APIs/blob/263f4ed6c263c967f13279c7a9f5629b51c5fc55/apis/beacon/light_client/optimistic_update.yaml
@@ -246,31 +232,18 @@ func (s *Server) GetLightClientOptimisticUpdate(w http.ResponseWriter, req *http
return
}
update, err := lightclient.NewLightClientOptimisticUpdateFromBeaconState(ctx, s.ChainInfoFetcher.CurrentSlot(), st, block, attestedState, attestedBlock)
update, err := newLightClientOptimisticUpdateFromBeaconState(ctx, s.ChainInfoFetcher.CurrentSlot(), st, block, attestedState, attestedBlock)
if err != nil {
httputil.HandleError(w, "Could not get light client optimistic update: "+err.Error(), http.StatusInternalServerError)
return
}
if httputil.RespondWithSsz(req) {
ssz, err := update.MarshalSSZ()
if err != nil {
httputil.HandleError(w, "Could not marshal optimistic update to SSZ: "+err.Error(), http.StatusInternalServerError)
return
}
httputil.WriteSsz(w, ssz, "light_client_optimistic_update.ssz")
} else {
updateStruct, err := structs.LightClientOptimisticUpdateFromConsensus(update)
if err != nil {
httputil.HandleError(w, "Could not convert light client optimistic update to API struct: "+err.Error(), http.StatusInternalServerError)
return
}
response := &structs.LightClientOptimisticUpdateResponse{
Version: version.String(attestedState.Version()),
Data: updateStruct,
}
httputil.WriteJson(w, response)
response := &structs.LightClientOptimisticUpdateResponse{
Version: version.String(attestedState.Version()),
Data: update,
}
httputil.WriteJson(w, response)
}
// suitableBlock returns the latest block that satisfies all criteria required for creating a new update

View File

@@ -1105,226 +1105,116 @@ func TestLightClientHandler_GetLightClientFinalityUpdate(t *testing.T) {
EnableLightClient: true,
})
defer resetFn()
helpers.ClearCache()
ctx := context.Background()
config := params.BeaconConfig()
slot := primitives.Slot(config.AltairForkEpoch * primitives.Epoch(config.SlotsPerEpoch)).Add(1)
t.Run("altair", func(t *testing.T) {
ctx := context.Background()
slot := primitives.Slot(config.AltairForkEpoch * primitives.Epoch(config.SlotsPerEpoch)).Add(1)
attestedState, err := util.NewBeaconStateAltair()
require.NoError(t, err)
err = attestedState.SetSlot(slot.Sub(1))
require.NoError(t, err)
attestedState, err := util.NewBeaconStateAltair()
require.NoError(t, err)
err = attestedState.SetSlot(slot.Sub(1))
require.NoError(t, err)
require.NoError(t, attestedState.SetFinalizedCheckpoint(&pb.Checkpoint{
Epoch: config.AltairForkEpoch - 10,
Root: make([]byte, 32),
}))
require.NoError(t, attestedState.SetFinalizedCheckpoint(&pb.Checkpoint{
Epoch: config.AltairForkEpoch - 10,
Root: make([]byte, 32),
}))
parent := util.NewBeaconBlockAltair()
parent.Block.Slot = slot.Sub(1)
parent := util.NewBeaconBlockAltair()
parent.Block.Slot = slot.Sub(1)
signedParent, err := blocks.NewSignedBeaconBlock(parent)
require.NoError(t, err)
signedParent, err := blocks.NewSignedBeaconBlock(parent)
require.NoError(t, err)
parentHeader, err := signedParent.Header()
require.NoError(t, err)
attestedHeader := parentHeader.Header
parentHeader, err := signedParent.Header()
require.NoError(t, err)
attestedHeader := parentHeader.Header
err = attestedState.SetLatestBlockHeader(attestedHeader)
require.NoError(t, err)
attestedStateRoot, err := attestedState.HashTreeRoot(ctx)
require.NoError(t, err)
err = attestedState.SetLatestBlockHeader(attestedHeader)
require.NoError(t, err)
attestedStateRoot, err := attestedState.HashTreeRoot(ctx)
require.NoError(t, err)
// get a new signed block so the root is updated with the new state root
parent.Block.StateRoot = attestedStateRoot[:]
signedParent, err = blocks.NewSignedBeaconBlock(parent)
require.NoError(t, err)
// get a new signed block so the root is updated with the new state root
parent.Block.StateRoot = attestedStateRoot[:]
signedParent, err = blocks.NewSignedBeaconBlock(parent)
require.NoError(t, err)
st, err := util.NewBeaconStateAltair()
require.NoError(t, err)
err = st.SetSlot(slot)
require.NoError(t, err)
st, err := util.NewBeaconStateAltair()
require.NoError(t, err)
err = st.SetSlot(slot)
require.NoError(t, err)
parentRoot, err := signedParent.Block().HashTreeRoot()
require.NoError(t, err)
parentRoot, err := signedParent.Block().HashTreeRoot()
require.NoError(t, err)
block := util.NewBeaconBlockAltair()
block.Block.Slot = slot
block.Block.ParentRoot = parentRoot[:]
block := util.NewBeaconBlockAltair()
block.Block.Slot = slot
block.Block.ParentRoot = parentRoot[:]
for i := uint64(0); i < config.SyncCommitteeSize; i++ {
block.Block.Body.SyncAggregate.SyncCommitteeBits.SetBitAt(i, true)
}
for i := uint64(0); i < config.SyncCommitteeSize; i++ {
block.Block.Body.SyncAggregate.SyncCommitteeBits.SetBitAt(i, true)
}
signedBlock, err := blocks.NewSignedBeaconBlock(block)
require.NoError(t, err)
signedBlock, err := blocks.NewSignedBeaconBlock(block)
require.NoError(t, err)
h, err := signedBlock.Header()
require.NoError(t, err)
h, err := signedBlock.Header()
require.NoError(t, err)
err = st.SetLatestBlockHeader(h.Header)
require.NoError(t, err)
stateRoot, err := st.HashTreeRoot(ctx)
require.NoError(t, err)
err = st.SetLatestBlockHeader(h.Header)
require.NoError(t, err)
stateRoot, err := st.HashTreeRoot(ctx)
require.NoError(t, err)
// get a new signed block so the root is updated with the new state root
block.Block.StateRoot = stateRoot[:]
signedBlock, err = blocks.NewSignedBeaconBlock(block)
require.NoError(t, err)
// get a new signed block so the root is updated with the new state root
block.Block.StateRoot = stateRoot[:]
signedBlock, err = blocks.NewSignedBeaconBlock(block)
require.NoError(t, err)
root, err := block.Block.HashTreeRoot()
require.NoError(t, err)
root, err := block.Block.HashTreeRoot()
require.NoError(t, err)
mockBlocker := &testutil.MockBlocker{
RootBlockMap: map[[32]byte]interfaces.ReadOnlySignedBeaconBlock{
parentRoot: signedParent,
root: signedBlock,
},
SlotBlockMap: map[primitives.Slot]interfaces.ReadOnlySignedBeaconBlock{
slot.Sub(1): signedParent,
slot: signedBlock,
},
}
mockChainService := &mock.ChainService{Optimistic: true, Slot: &slot, State: st, FinalizedRoots: map[[32]byte]bool{
root: true,
}}
mockChainInfoFetcher := &mock.ChainService{Slot: &slot}
s := &Server{
Stater: &testutil.MockStater{StatesBySlot: map[primitives.Slot]state.BeaconState{
slot.Sub(1): attestedState,
slot: st,
}},
Blocker: mockBlocker,
HeadFetcher: mockChainService,
ChainInfoFetcher: mockChainInfoFetcher,
}
request := httptest.NewRequest("GET", "http://foo.com", nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
mockBlocker := &testutil.MockBlocker{
RootBlockMap: map[[32]byte]interfaces.ReadOnlySignedBeaconBlock{
parentRoot: signedParent,
root: signedBlock,
},
SlotBlockMap: map[primitives.Slot]interfaces.ReadOnlySignedBeaconBlock{
slot.Sub(1): signedParent,
slot: signedBlock,
},
}
mockChainService := &mock.ChainService{Optimistic: true, Slot: &slot, State: st, FinalizedRoots: map[[32]byte]bool{
root: true,
}}
mockChainInfoFetcher := &mock.ChainService{Slot: &slot}
s := &Server{
Stater: &testutil.MockStater{StatesBySlot: map[primitives.Slot]state.BeaconState{
slot.Sub(1): attestedState,
slot: st,
}},
Blocker: mockBlocker,
HeadFetcher: mockChainService,
ChainInfoFetcher: mockChainInfoFetcher,
}
request := httptest.NewRequest("GET", "http://foo.com", nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.GetLightClientFinalityUpdate(writer, request)
s.GetLightClientFinalityUpdate(writer, request)
require.Equal(t, http.StatusOK, writer.Code)
var resp *structs.LightClientUpdateResponse
err = json.Unmarshal(writer.Body.Bytes(), &resp)
require.NoError(t, err)
var respHeader structs.LightClientHeader
err = json.Unmarshal(resp.Data.AttestedHeader, &respHeader)
require.NoError(t, err)
require.Equal(t, "altair", resp.Version)
require.Equal(t, hexutil.Encode(attestedHeader.BodyRoot), respHeader.Beacon.BodyRoot)
require.NotNil(t, resp.Data)
})
t.Run("altair SSZ", func(t *testing.T) {
ctx := context.Background()
slot := primitives.Slot(config.AltairForkEpoch * primitives.Epoch(config.SlotsPerEpoch)).Add(1)
attestedState, err := util.NewBeaconStateAltair()
require.NoError(t, err)
err = attestedState.SetSlot(slot.Sub(1))
require.NoError(t, err)
require.NoError(t, attestedState.SetFinalizedCheckpoint(&pb.Checkpoint{
Epoch: config.AltairForkEpoch - 10,
Root: make([]byte, 32),
}))
parent := util.NewBeaconBlockAltair()
parent.Block.Slot = slot.Sub(1)
signedParent, err := blocks.NewSignedBeaconBlock(parent)
require.NoError(t, err)
parentHeader, err := signedParent.Header()
require.NoError(t, err)
attestedHeader := parentHeader.Header
err = attestedState.SetLatestBlockHeader(attestedHeader)
require.NoError(t, err)
attestedStateRoot, err := attestedState.HashTreeRoot(ctx)
require.NoError(t, err)
// get a new signed block so the root is updated with the new state root
parent.Block.StateRoot = attestedStateRoot[:]
signedParent, err = blocks.NewSignedBeaconBlock(parent)
require.NoError(t, err)
st, err := util.NewBeaconStateAltair()
require.NoError(t, err)
err = st.SetSlot(slot)
require.NoError(t, err)
parentRoot, err := signedParent.Block().HashTreeRoot()
require.NoError(t, err)
block := util.NewBeaconBlockAltair()
block.Block.Slot = slot
block.Block.ParentRoot = parentRoot[:]
for i := uint64(0); i < config.SyncCommitteeSize; i++ {
block.Block.Body.SyncAggregate.SyncCommitteeBits.SetBitAt(i, true)
}
signedBlock, err := blocks.NewSignedBeaconBlock(block)
require.NoError(t, err)
h, err := signedBlock.Header()
require.NoError(t, err)
err = st.SetLatestBlockHeader(h.Header)
require.NoError(t, err)
stateRoot, err := st.HashTreeRoot(ctx)
require.NoError(t, err)
// get a new signed block so the root is updated with the new state root
block.Block.StateRoot = stateRoot[:]
signedBlock, err = blocks.NewSignedBeaconBlock(block)
require.NoError(t, err)
root, err := block.Block.HashTreeRoot()
require.NoError(t, err)
mockBlocker := &testutil.MockBlocker{
RootBlockMap: map[[32]byte]interfaces.ReadOnlySignedBeaconBlock{
parentRoot: signedParent,
root: signedBlock,
},
SlotBlockMap: map[primitives.Slot]interfaces.ReadOnlySignedBeaconBlock{
slot.Sub(1): signedParent,
slot: signedBlock,
},
}
mockChainService := &mock.ChainService{Optimistic: true, Slot: &slot, State: st, FinalizedRoots: map[[32]byte]bool{
root: true,
}}
mockChainInfoFetcher := &mock.ChainService{Slot: &slot}
s := &Server{
Stater: &testutil.MockStater{StatesBySlot: map[primitives.Slot]state.BeaconState{
slot.Sub(1): attestedState,
slot: st,
}},
Blocker: mockBlocker,
HeadFetcher: mockChainService,
ChainInfoFetcher: mockChainInfoFetcher,
}
request := httptest.NewRequest("GET", "http://foo.com", nil)
request.Header.Add("Accept", "application/octet-stream")
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.GetLightClientFinalityUpdate(writer, request)
require.Equal(t, http.StatusOK, writer.Code)
var resp pb.LightClientFinalityUpdateAltair
err = resp.UnmarshalSSZ(writer.Body.Bytes())
require.NoError(t, err)
require.Equal(t, attestedHeader.Slot, resp.AttestedHeader.Beacon.Slot)
require.DeepEqual(t, attestedHeader.BodyRoot, resp.AttestedHeader.Beacon.BodyRoot)
})
require.Equal(t, http.StatusOK, writer.Code)
var resp *structs.LightClientUpdateResponse
err = json.Unmarshal(writer.Body.Bytes(), &resp)
require.NoError(t, err)
var respHeader structs.LightClientHeader
err = json.Unmarshal(resp.Data.AttestedHeader, &respHeader)
require.NoError(t, err)
require.Equal(t, "altair", resp.Version)
require.Equal(t, hexutil.Encode(attestedHeader.BodyRoot), respHeader.Beacon.BodyRoot)
require.NotNil(t, resp.Data)
}
func TestLightClientHandler_GetLightClientOptimisticUpdate(t *testing.T) {
@@ -1445,114 +1335,6 @@ func TestLightClientHandler_GetLightClientOptimisticUpdate(t *testing.T) {
require.NotNil(t, resp.Data)
})
t.Run("altair SSZ", func(t *testing.T) {
ctx := context.Background()
slot := primitives.Slot(config.AltairForkEpoch * primitives.Epoch(config.SlotsPerEpoch)).Add(1)
attestedState, err := util.NewBeaconStateAltair()
require.NoError(t, err)
err = attestedState.SetSlot(slot.Sub(1))
require.NoError(t, err)
require.NoError(t, attestedState.SetFinalizedCheckpoint(&pb.Checkpoint{
Epoch: config.AltairForkEpoch - 10,
Root: make([]byte, 32),
}))
parent := util.NewBeaconBlockAltair()
parent.Block.Slot = slot.Sub(1)
signedParent, err := blocks.NewSignedBeaconBlock(parent)
require.NoError(t, err)
parentHeader, err := signedParent.Header()
require.NoError(t, err)
attestedHeader := parentHeader.Header
err = attestedState.SetLatestBlockHeader(attestedHeader)
require.NoError(t, err)
attestedStateRoot, err := attestedState.HashTreeRoot(ctx)
require.NoError(t, err)
// get a new signed block so the root is updated with the new state root
parent.Block.StateRoot = attestedStateRoot[:]
signedParent, err = blocks.NewSignedBeaconBlock(parent)
require.NoError(t, err)
st, err := util.NewBeaconStateAltair()
require.NoError(t, err)
err = st.SetSlot(slot)
require.NoError(t, err)
parentRoot, err := signedParent.Block().HashTreeRoot()
require.NoError(t, err)
block := util.NewBeaconBlockAltair()
block.Block.Slot = slot
block.Block.ParentRoot = parentRoot[:]
for i := uint64(0); i < config.SyncCommitteeSize; i++ {
block.Block.Body.SyncAggregate.SyncCommitteeBits.SetBitAt(i, true)
}
signedBlock, err := blocks.NewSignedBeaconBlock(block)
require.NoError(t, err)
h, err := signedBlock.Header()
require.NoError(t, err)
err = st.SetLatestBlockHeader(h.Header)
require.NoError(t, err)
stateRoot, err := st.HashTreeRoot(ctx)
require.NoError(t, err)
// get a new signed block so the root is updated with the new state root
block.Block.StateRoot = stateRoot[:]
signedBlock, err = blocks.NewSignedBeaconBlock(block)
require.NoError(t, err)
root, err := block.Block.HashTreeRoot()
require.NoError(t, err)
mockBlocker := &testutil.MockBlocker{
RootBlockMap: map[[32]byte]interfaces.ReadOnlySignedBeaconBlock{
parentRoot: signedParent,
root: signedBlock,
},
SlotBlockMap: map[primitives.Slot]interfaces.ReadOnlySignedBeaconBlock{
slot.Sub(1): signedParent,
slot: signedBlock,
},
}
mockChainService := &mock.ChainService{Optimistic: true, Slot: &slot, State: st, FinalizedRoots: map[[32]byte]bool{
root: true,
}}
mockChainInfoFetcher := &mock.ChainService{Slot: &slot}
s := &Server{
Stater: &testutil.MockStater{StatesBySlot: map[primitives.Slot]state.BeaconState{
slot.Sub(1): attestedState,
slot: st,
}},
Blocker: mockBlocker,
HeadFetcher: mockChainService,
ChainInfoFetcher: mockChainInfoFetcher,
}
request := httptest.NewRequest("GET", "http://foo.com", nil)
request.Header.Add("Accept", "application/octet-stream")
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.GetLightClientOptimisticUpdate(writer, request)
require.Equal(t, http.StatusOK, writer.Code)
var resp pb.LightClientOptimisticUpdateAltair
err = resp.UnmarshalSSZ(writer.Body.Bytes())
require.NoError(t, err)
require.Equal(t, resp.AttestedHeader.Beacon.Slot, attestedHeader.Slot)
require.DeepEqual(t, resp.AttestedHeader.Beacon.BodyRoot, attestedHeader.BodyRoot)
})
t.Run("capella", func(t *testing.T) {
ctx := context.Background()
slot := primitives.Slot(config.CapellaForkEpoch * primitives.Epoch(config.SlotsPerEpoch)).Add(1)
@@ -1663,114 +1445,6 @@ func TestLightClientHandler_GetLightClientOptimisticUpdate(t *testing.T) {
require.NotNil(t, resp.Data)
})
t.Run("capella SSZ", func(t *testing.T) {
ctx := context.Background()
slot := primitives.Slot(config.CapellaForkEpoch * primitives.Epoch(config.SlotsPerEpoch)).Add(1)
attestedState, err := util.NewBeaconStateCapella()
require.NoError(t, err)
err = attestedState.SetSlot(slot.Sub(1))
require.NoError(t, err)
require.NoError(t, attestedState.SetFinalizedCheckpoint(&pb.Checkpoint{
Epoch: config.AltairForkEpoch - 10,
Root: make([]byte, 32),
}))
parent := util.NewBeaconBlockCapella()
parent.Block.Slot = slot.Sub(1)
signedParent, err := blocks.NewSignedBeaconBlock(parent)
require.NoError(t, err)
parentHeader, err := signedParent.Header()
require.NoError(t, err)
attestedHeader := parentHeader.Header
err = attestedState.SetLatestBlockHeader(attestedHeader)
require.NoError(t, err)
attestedStateRoot, err := attestedState.HashTreeRoot(ctx)
require.NoError(t, err)
// get a new signed block so the root is updated with the new state root
parent.Block.StateRoot = attestedStateRoot[:]
signedParent, err = blocks.NewSignedBeaconBlock(parent)
require.NoError(t, err)
st, err := util.NewBeaconStateCapella()
require.NoError(t, err)
err = st.SetSlot(slot)
require.NoError(t, err)
parentRoot, err := signedParent.Block().HashTreeRoot()
require.NoError(t, err)
block := util.NewBeaconBlockCapella()
block.Block.Slot = slot
block.Block.ParentRoot = parentRoot[:]
for i := uint64(0); i < config.SyncCommitteeSize; i++ {
block.Block.Body.SyncAggregate.SyncCommitteeBits.SetBitAt(i, true)
}
signedBlock, err := blocks.NewSignedBeaconBlock(block)
require.NoError(t, err)
h, err := signedBlock.Header()
require.NoError(t, err)
err = st.SetLatestBlockHeader(h.Header)
require.NoError(t, err)
stateRoot, err := st.HashTreeRoot(ctx)
require.NoError(t, err)
// get a new signed block so the root is updated with the new state root
block.Block.StateRoot = stateRoot[:]
signedBlock, err = blocks.NewSignedBeaconBlock(block)
require.NoError(t, err)
root, err := block.Block.HashTreeRoot()
require.NoError(t, err)
mockBlocker := &testutil.MockBlocker{
RootBlockMap: map[[32]byte]interfaces.ReadOnlySignedBeaconBlock{
parentRoot: signedParent,
root: signedBlock,
},
SlotBlockMap: map[primitives.Slot]interfaces.ReadOnlySignedBeaconBlock{
slot.Sub(1): signedParent,
slot: signedBlock,
},
}
mockChainService := &mock.ChainService{Optimistic: true, Slot: &slot, State: st, FinalizedRoots: map[[32]byte]bool{
root: true,
}}
mockChainInfoFetcher := &mock.ChainService{Slot: &slot}
s := &Server{
Stater: &testutil.MockStater{StatesBySlot: map[primitives.Slot]state.BeaconState{
slot.Sub(1): attestedState,
slot: st,
}},
Blocker: mockBlocker,
HeadFetcher: mockChainService,
ChainInfoFetcher: mockChainInfoFetcher,
}
request := httptest.NewRequest("GET", "http://foo.com", nil)
request.Header.Add("Accept", "application/octet-stream")
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.GetLightClientOptimisticUpdate(writer, request)
require.Equal(t, http.StatusOK, writer.Code)
var resp pb.LightClientOptimisticUpdateCapella
err = resp.UnmarshalSSZ(writer.Body.Bytes())
require.NoError(t, err)
require.Equal(t, resp.AttestedHeader.Beacon.Slot, attestedHeader.Slot)
require.DeepEqual(t, resp.AttestedHeader.Beacon.BodyRoot, attestedHeader.BodyRoot)
})
t.Run("deneb", func(t *testing.T) {
ctx := context.Background()
slot := primitives.Slot(config.DenebForkEpoch * primitives.Epoch(config.SlotsPerEpoch)).Add(1)
@@ -1880,114 +1554,6 @@ func TestLightClientHandler_GetLightClientOptimisticUpdate(t *testing.T) {
require.Equal(t, hexutil.Encode(attestedHeader.BodyRoot), respHeader.Beacon.BodyRoot)
require.NotNil(t, resp.Data)
})
t.Run("deneb SSZ", func(t *testing.T) {
ctx := context.Background()
slot := primitives.Slot(config.DenebForkEpoch * primitives.Epoch(config.SlotsPerEpoch)).Add(1)
attestedState, err := util.NewBeaconStateDeneb()
require.NoError(t, err)
err = attestedState.SetSlot(slot.Sub(1))
require.NoError(t, err)
require.NoError(t, attestedState.SetFinalizedCheckpoint(&pb.Checkpoint{
Epoch: config.AltairForkEpoch - 10,
Root: make([]byte, 32),
}))
parent := util.NewBeaconBlockDeneb()
parent.Block.Slot = slot.Sub(1)
signedParent, err := blocks.NewSignedBeaconBlock(parent)
require.NoError(t, err)
parentHeader, err := signedParent.Header()
require.NoError(t, err)
attestedHeader := parentHeader.Header
err = attestedState.SetLatestBlockHeader(attestedHeader)
require.NoError(t, err)
attestedStateRoot, err := attestedState.HashTreeRoot(ctx)
require.NoError(t, err)
// get a new signed block so the root is updated with the new state root
parent.Block.StateRoot = attestedStateRoot[:]
signedParent, err = blocks.NewSignedBeaconBlock(parent)
require.NoError(t, err)
st, err := util.NewBeaconStateDeneb()
require.NoError(t, err)
err = st.SetSlot(slot)
require.NoError(t, err)
parentRoot, err := signedParent.Block().HashTreeRoot()
require.NoError(t, err)
block := util.NewBeaconBlockDeneb()
block.Block.Slot = slot
block.Block.ParentRoot = parentRoot[:]
for i := uint64(0); i < config.SyncCommitteeSize; i++ {
block.Block.Body.SyncAggregate.SyncCommitteeBits.SetBitAt(i, true)
}
signedBlock, err := blocks.NewSignedBeaconBlock(block)
require.NoError(t, err)
h, err := signedBlock.Header()
require.NoError(t, err)
err = st.SetLatestBlockHeader(h.Header)
require.NoError(t, err)
stateRoot, err := st.HashTreeRoot(ctx)
require.NoError(t, err)
// get a new signed block so the root is updated with the new state root
block.Block.StateRoot = stateRoot[:]
signedBlock, err = blocks.NewSignedBeaconBlock(block)
require.NoError(t, err)
root, err := block.Block.HashTreeRoot()
require.NoError(t, err)
mockBlocker := &testutil.MockBlocker{
RootBlockMap: map[[32]byte]interfaces.ReadOnlySignedBeaconBlock{
parentRoot: signedParent,
root: signedBlock,
},
SlotBlockMap: map[primitives.Slot]interfaces.ReadOnlySignedBeaconBlock{
slot.Sub(1): signedParent,
slot: signedBlock,
},
}
mockChainService := &mock.ChainService{Optimistic: true, Slot: &slot, State: st, FinalizedRoots: map[[32]byte]bool{
root: true,
}}
mockChainInfoFetcher := &mock.ChainService{Slot: &slot}
s := &Server{
Stater: &testutil.MockStater{StatesBySlot: map[primitives.Slot]state.BeaconState{
slot.Sub(1): attestedState,
slot: st,
}},
Blocker: mockBlocker,
HeadFetcher: mockChainService,
ChainInfoFetcher: mockChainInfoFetcher,
}
request := httptest.NewRequest("GET", "http://foo.com", nil)
request.Header.Add("Accept", "application/octet-stream")
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.GetLightClientOptimisticUpdate(writer, request)
require.Equal(t, http.StatusOK, writer.Code)
var resp pb.LightClientOptimisticUpdateDeneb
err = resp.UnmarshalSSZ(writer.Body.Bytes())
require.NoError(t, err)
require.Equal(t, resp.AttestedHeader.Beacon.Slot, attestedHeader.Slot)
require.DeepEqual(t, resp.AttestedHeader.Beacon.BodyRoot, attestedHeader.BodyRoot)
})
}
func TestLightClientHandler_GetLightClientEventBlock(t *testing.T) {

View File

@@ -0,0 +1,45 @@
package lightclient
import (
"context"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
lightclient "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/light-client"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
)
func newLightClientFinalityUpdateFromBeaconState(
ctx context.Context,
currentSlot primitives.Slot,
state state.BeaconState,
block interfaces.ReadOnlySignedBeaconBlock,
attestedState state.BeaconState,
attestedBlock interfaces.ReadOnlySignedBeaconBlock,
finalizedBlock interfaces.ReadOnlySignedBeaconBlock,
) (*structs.LightClientFinalityUpdate, error) {
result, err := lightclient.NewLightClientFinalityUpdateFromBeaconState(ctx, currentSlot, state, block, attestedState, attestedBlock, finalizedBlock)
if err != nil {
return nil, err
}
return structs.LightClientFinalityUpdateFromConsensus(result)
}
func newLightClientOptimisticUpdateFromBeaconState(
ctx context.Context,
currentSlot primitives.Slot,
state state.BeaconState,
block interfaces.ReadOnlySignedBeaconBlock,
attestedState state.BeaconState,
attestedBlock interfaces.ReadOnlySignedBeaconBlock,
) (*structs.LightClientOptimisticUpdate, error) {
result, err := lightclient.NewLightClientOptimisticUpdateFromBeaconState(ctx, currentSlot, state, block, attestedState, attestedBlock)
if err != nil {
return nil, err
}
return structs.LightClientOptimisticUpdateFromConsensus(result)
}

View File

@@ -39,6 +39,13 @@ func (vs *Server) packAttestations(ctx context.Context, latestState state.Beacon
} else {
atts = vs.AttPool.AggregatedAttestations()
atts = vs.validateAndDeleteAttsInPool(ctx, latestState, atts)
uAtts, err := vs.AttPool.UnaggregatedAttestations()
if err != nil {
return nil, errors.Wrap(err, "could not get unaggregated attestations")
}
uAtts = vs.validateAndDeleteAttsInPool(ctx, latestState, uAtts)
atts = append(atts, uAtts...)
}
// Checking the state's version here will give the wrong result if the last slot of Deneb is missed.

View File

@@ -121,10 +121,9 @@ func (s *State) Resume(ctx context.Context, fState state.BeaconState) (state.Bea
return nil, err
}
fRoot := bytesutil.ToBytes32(c.Root)
st := fState
// Resume as genesis state if last finalized root is zero hashes.
if fRoot == params.BeaconConfig().ZeroHash {
st, err = s.beaconDB.GenesisState(ctx)
st, err := s.beaconDB.GenesisState(ctx)
if err != nil {
return nil, errors.Wrap(err, "could not get genesis state")
}
@@ -133,13 +132,10 @@ func (s *State) Resume(ctx context.Context, fState state.BeaconState) (state.Bea
if err != nil {
return nil, stderrors.Join(ErrNoGenesisBlock, err)
}
fRoot = gbr
if err := s.SaveState(ctx, gbr, st); err != nil {
return nil, errors.Wrap(err, "could not save genesis state")
}
return st, s.SaveState(ctx, gbr, st)
}
if st == nil || st.IsNil() {
if fState == nil || fState.IsNil() {
return nil, errors.New("finalized state is nil")
}
@@ -149,22 +145,20 @@ func (s *State) Resume(ctx context.Context, fState state.BeaconState) (state.Bea
}
}()
s.finalizedInfo = &finalizedInfo{slot: st.Slot(), root: fRoot, state: st.Copy()}
populatePubkeyCache(ctx, st)
return st, nil
}
s.finalizedInfo = &finalizedInfo{slot: fState.Slot(), root: fRoot, state: fState.Copy()}
fEpoch := slots.ToEpoch(fState.Slot())
func populatePubkeyCache(ctx context.Context, st state.BeaconState) {
epoch := slots.ToEpoch(st.Slot())
// Pre-populate the pubkey cache with the validator public keys from the finalized state.
// This process takes about 30 seconds on mainnet with 450,000 validators.
go populatePubkeyCacheOnce.Do(func() {
log.Debug("Populating pubkey cache")
start := time.Now()
if err := st.ReadFromEveryValidator(func(_ int, val state.ReadOnlyValidator) error {
if err := fState.ReadFromEveryValidator(func(_ int, val state.ReadOnlyValidator) error {
if ctx.Err() != nil {
return ctx.Err()
}
// Do not cache for non-active validators.
if !helpers.IsActiveValidatorUsingTrie(val, epoch) {
if !helpers.IsActiveValidatorUsingTrie(val, fEpoch) {
return nil
}
pub := val.PublicKey()
@@ -175,6 +169,8 @@ func populatePubkeyCache(ctx context.Context, st state.BeaconState) {
}
log.WithField("duration", time.Since(start)).Debug("Done populating pubkey cache")
})
return fState, nil
}
// SaveFinalizedState saves the finalized slot, root and state into memory to be used by state gen service.

View File

@@ -87,8 +87,6 @@ func extractValidDataTypeFromTopic(topic string, digest []byte, clock *startup.C
return extractDataTypeFromTypeMap(types.AttestationMap, digest, clock)
case p2p.AggregateAndProofSubnetTopicFormat:
return extractDataTypeFromTypeMap(types.AggregateAttestationMap, digest, clock)
case p2p.AttesterSlashingSubnetTopicFormat:
return extractDataTypeFromTypeMap(types.AttesterSlashingMap, digest, clock)
}
return nil, nil
}

View File

@@ -137,14 +137,13 @@ func TestExtractDataType(t *testing.T) {
chain blockchain.ChainInfoFetcher
}
tests := []struct {
name string
args args
wantBlock interfaces.ReadOnlySignedBeaconBlock
wantMd metadata.Metadata
wantAtt ethpb.Att
wantAggregate ethpb.SignedAggregateAttAndProof
wantAttSlashing ethpb.AttSlashing
wantErr bool
name string
args args
wantBlock interfaces.ReadOnlySignedBeaconBlock
wantMd metadata.Metadata
wantAtt ethpb.Att
wantAggregate ethpb.SignedAggregateAttAndProof
wantErr bool
}{
{
name: "no digest",
@@ -157,11 +156,10 @@ func TestExtractDataType(t *testing.T) {
require.NoError(t, err)
return wsb
}(),
wantMd: wrapper.WrappedMetadataV0(&ethpb.MetaDataV0{}),
wantAtt: &ethpb.Attestation{},
wantAggregate: &ethpb.SignedAggregateAttestationAndProof{},
wantAttSlashing: &ethpb.AttesterSlashing{},
wantErr: false,
wantMd: wrapper.WrappedMetadataV0(&ethpb.MetaDataV0{}),
wantAtt: &ethpb.Attestation{},
wantAggregate: &ethpb.SignedAggregateAttestationAndProof{},
wantErr: false,
},
{
name: "invalid digest",
@@ -169,12 +167,11 @@ func TestExtractDataType(t *testing.T) {
digest: []byte{0x00, 0x01},
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
},
wantBlock: nil,
wantMd: nil,
wantAtt: nil,
wantAggregate: nil,
wantAttSlashing: nil,
wantErr: true,
wantBlock: nil,
wantMd: nil,
wantAtt: nil,
wantAggregate: nil,
wantErr: true,
},
{
name: "non existent digest",
@@ -182,12 +179,11 @@ func TestExtractDataType(t *testing.T) {
digest: []byte{0x00, 0x01, 0x02, 0x03},
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
},
wantBlock: nil,
wantMd: nil,
wantAtt: nil,
wantAggregate: nil,
wantAttSlashing: nil,
wantErr: true,
wantBlock: nil,
wantMd: nil,
wantAtt: nil,
wantAggregate: nil,
wantErr: true,
},
{
name: "genesis fork version",
@@ -200,10 +196,9 @@ func TestExtractDataType(t *testing.T) {
require.NoError(t, err)
return wsb
}(),
wantAtt: &ethpb.Attestation{},
wantAggregate: &ethpb.SignedAggregateAttestationAndProof{},
wantAttSlashing: &ethpb.AttesterSlashing{},
wantErr: false,
wantAtt: &ethpb.Attestation{},
wantAggregate: &ethpb.SignedAggregateAttestationAndProof{},
wantErr: false,
},
{
name: "altair fork version",
@@ -216,11 +211,10 @@ func TestExtractDataType(t *testing.T) {
require.NoError(t, err)
return wsb
}(),
wantMd: wrapper.WrappedMetadataV1(&ethpb.MetaDataV1{}),
wantAtt: &ethpb.Attestation{},
wantAggregate: &ethpb.SignedAggregateAttestationAndProof{},
wantAttSlashing: &ethpb.AttesterSlashing{},
wantErr: false,
wantMd: wrapper.WrappedMetadataV1(&ethpb.MetaDataV1{}),
wantAtt: &ethpb.Attestation{},
wantAggregate: &ethpb.SignedAggregateAttestationAndProof{},
wantErr: false,
},
{
name: "bellatrix fork version",
@@ -233,11 +227,10 @@ func TestExtractDataType(t *testing.T) {
require.NoError(t, err)
return wsb
}(),
wantMd: wrapper.WrappedMetadataV1(&ethpb.MetaDataV1{}),
wantAtt: &ethpb.Attestation{},
wantAggregate: &ethpb.SignedAggregateAttestationAndProof{},
wantAttSlashing: &ethpb.AttesterSlashing{},
wantErr: false,
wantMd: wrapper.WrappedMetadataV1(&ethpb.MetaDataV1{}),
wantAtt: &ethpb.Attestation{},
wantAggregate: &ethpb.SignedAggregateAttestationAndProof{},
wantErr: false,
},
{
name: "capella fork version",
@@ -250,11 +243,10 @@ func TestExtractDataType(t *testing.T) {
require.NoError(t, err)
return wsb
}(),
wantMd: wrapper.WrappedMetadataV1(&ethpb.MetaDataV1{}),
wantAtt: &ethpb.Attestation{},
wantAggregate: &ethpb.SignedAggregateAttestationAndProof{},
wantAttSlashing: &ethpb.AttesterSlashing{},
wantErr: false,
wantMd: wrapper.WrappedMetadataV1(&ethpb.MetaDataV1{}),
wantAtt: &ethpb.Attestation{},
wantAggregate: &ethpb.SignedAggregateAttestationAndProof{},
wantErr: false,
},
{
name: "deneb fork version",
@@ -267,11 +259,10 @@ func TestExtractDataType(t *testing.T) {
require.NoError(t, err)
return wsb
}(),
wantMd: wrapper.WrappedMetadataV1(&ethpb.MetaDataV1{}),
wantAtt: &ethpb.Attestation{},
wantAggregate: &ethpb.SignedAggregateAttestationAndProof{},
wantAttSlashing: &ethpb.AttesterSlashing{},
wantErr: false,
wantMd: wrapper.WrappedMetadataV1(&ethpb.MetaDataV1{}),
wantAtt: &ethpb.Attestation{},
wantAggregate: &ethpb.SignedAggregateAttestationAndProof{},
wantErr: false,
},
{
name: "electra fork version",
@@ -284,11 +275,10 @@ func TestExtractDataType(t *testing.T) {
require.NoError(t, err)
return wsb
}(),
wantMd: wrapper.WrappedMetadataV1(&ethpb.MetaDataV1{}),
wantAtt: &ethpb.SingleAttestation{},
wantAggregate: &ethpb.SignedAggregateAttestationAndProofElectra{},
wantAttSlashing: &ethpb.AttesterSlashingElectra{},
wantErr: false,
wantMd: wrapper.WrappedMetadataV1(&ethpb.MetaDataV1{}),
wantAtt: &ethpb.SingleAttestation{},
wantAggregate: &ethpb.SignedAggregateAttestationAndProofElectra{},
wantErr: false,
},
{
name: "fulu fork version",
@@ -301,11 +291,10 @@ func TestExtractDataType(t *testing.T) {
require.NoError(t, err)
return wsb
}(),
wantMd: wrapper.WrappedMetadataV1(&ethpb.MetaDataV1{}),
wantAtt: &ethpb.SingleAttestation{},
wantAggregate: &ethpb.SignedAggregateAttestationAndProofElectra{},
wantAttSlashing: &ethpb.AttesterSlashingElectra{},
wantErr: false,
wantMd: wrapper.WrappedMetadataV1(&ethpb.MetaDataV1{}),
wantAtt: &ethpb.SingleAttestation{},
wantAggregate: &ethpb.SignedAggregateAttestationAndProofElectra{},
wantErr: false,
},
}
for _, tt := range tests {
@@ -334,14 +323,6 @@ func TestExtractDataType(t *testing.T) {
if !reflect.DeepEqual(gotAggregate, tt.wantAggregate) {
t.Errorf("aggregate: got = %v, want %v", gotAggregate, tt.wantAggregate)
}
gotAttSlashing, err := extractDataTypeFromTypeMap(types.AttesterSlashingMap, tt.args.digest, tt.args.chain)
if (err != nil) != tt.wantErr {
t.Errorf("attester slashing: error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(gotAttSlashing, tt.wantAttSlashing) {
t.Errorf("attester slashin: got = %v, want %v", gotAttSlashing, tt.wantAttSlashing)
}
})
}
}

View File

@@ -38,7 +38,6 @@ go_library(
"//consensus-types/primitives:go_default_library",
"//container/leaky-bucket:go_default_library",
"//crypto/rand:go_default_library",
"//encoding/bytesutil:go_default_library",
"//math:go_default_library",
"//monitoring/tracing/trace:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",

View File

@@ -2,7 +2,6 @@ package initialsync
import (
"context"
"encoding/hex"
"fmt"
"sort"
"strings"
@@ -25,7 +24,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
leakybucket "github.com/prysmaticlabs/prysm/v5/container/leaky-bucket"
"github.com/prysmaticlabs/prysm/v5/crypto/rand"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/math"
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
p2ppb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
@@ -69,8 +67,6 @@ var (
// Period to calculate expected limit for a single peer.
var blockLimiterPeriod = 30 * time.Second
type isBannedBlock func(root [32]byte) bool
// blocksFetcherConfig is a config to setup the block fetcher.
type blocksFetcherConfig struct {
clock *startup.Clock
@@ -105,7 +101,6 @@ type blocksFetcher struct {
capacityWeight float64 // how remaining capacity affects peer selection
mode syncMode // allows to use fetcher in different sync scenarios
quit chan struct{} // termination notifier
isBannedBlock isBannedBlock
}
// peerLock restricts fetcher actions on per peer basis. Currently, used for rate limiting.
@@ -131,13 +126,6 @@ type fetchRequestResponse struct {
err error
}
// set in init()
var holeskyBadRoot [32]byte
func isHoleskyBannedBlock(root [32]byte) bool {
return root == holeskyBadRoot
}
// newBlocksFetcher creates ready to use fetcher.
func newBlocksFetcher(ctx context.Context, cfg *blocksFetcherConfig) *blocksFetcher {
blockBatchLimit := maxBatchLimit()
@@ -172,7 +160,6 @@ func newBlocksFetcher(ctx context.Context, cfg *blocksFetcherConfig) *blocksFetc
capacityWeight: capacityWeight,
mode: cfg.mode,
quit: make(chan struct{}),
isBannedBlock: isHoleskyBannedBlock,
}
}
@@ -370,13 +357,6 @@ func (f *blocksFetcher) fetchBlocksFromPeer(
log.WithField("peer", p).WithError(err).Debug("invalid BeaconBlocksByRange response")
continue
}
if f.isBannedBlock != nil {
for _, b := range robs {
if f.isBannedBlock(b.Block.Root()) {
return nil, p, prysmsync.ErrInvalidFetchedData
}
}
}
return robs, p, err
}
return nil, "", errNoPeersAvailable
@@ -747,11 +727,3 @@ func dedupPeers(peers []peer.ID) []peer.ID {
}
return newPeerList
}
func init() {
bytes, err := hex.DecodeString("2db899881ed8546476d0b92c6aa9110bea9a4cd0dbeb5519eb0ea69575f1f359")
if err != nil {
panic(err)
}
holeskyBadRoot = bytesutil.ToBytes32(bytes)
}

View File

@@ -125,7 +125,6 @@ func (s *Service) syncToNonFinalizedEpoch(ctx context.Context, genesis time.Time
if err != nil {
return err
}
for data := range queue.fetchedData {
s.processFetchedDataRegSync(ctx, genesis, s.cfg.Chain.HeadSlot(), data)
}
@@ -170,7 +169,6 @@ func (s *Service) processFetchedDataRegSync(
"firstSlot": data.bwb[0].Block.Block().Slot(),
"firstUnprocessed": bwb[0].Block.Block().Slot(),
}
for _, b := range bwb {
if err := avs.Persist(s.clock.CurrentSlot(), b.Blobs...); err != nil {
log.WithError(err).WithFields(batchFields).WithFields(syncFields(b.Block)).Warn("Batch failure due to BlobSidecar issues")

View File

@@ -188,11 +188,3 @@ func WithAvailableBlocker(avb coverage.AvailableBlocker) Option {
return nil
}
}
// WithSlasherEnabled configures the sync package to support slashing detection.
func WithSlasherEnabled(enabled bool) Option {
return func(s *Service) error {
s.slasherEnabled = enabled
return nil
}
}

View File

@@ -140,7 +140,8 @@ func (s *Service) processUnaggregated(ctx context.Context, att ethpb.Att) {
data := att.GetData()
// This is an important validation before retrieving attestation pre state to defend against
// attestation's target intentionally referencing a checkpoint that's long ago.
// attestation's target intentionally reference checkpoint that's long ago.
// Verify current finalized checkpoint is an ancestor of the block defined by the attestation's beacon block root.
if !s.cfg.chain.InForkchoice(bytesutil.ToBytes32(data.BeaconBlockRoot)) {
log.WithError(blockchain.ErrNotDescendantOfFinalized).Debug("Could not verify finalized consistency")
return
@@ -168,57 +169,35 @@ func (s *Service) processUnaggregated(ctx context.Context, att ethpb.Att) {
return
}
// Decide if the attestation is an Electra SingleAttestation or a Phase0 unaggregated attestation
var (
attForValidation ethpb.Att
broadcastAtt ethpb.Att
eventType feed.EventType
eventData interface{}
)
var singleAtt *ethpb.SingleAttestation
if att.Version() >= version.Electra {
singleAtt, ok := att.(*ethpb.SingleAttestation)
var ok bool
singleAtt, ok = att.(*ethpb.SingleAttestation)
if !ok {
log.Debugf("Attestation has wrong type (expected %T, got %T)", &ethpb.SingleAttestation{}, att)
return
}
// Convert Electra SingleAttestation to unaggregated ElectraAttestation. This is needed because many parts of the codebase assume that attestations have a certain structure and SingleAttestation validates these assumptions.
attForValidation = singleAtt.ToAttestationElectra(committee)
broadcastAtt = singleAtt
eventType = operation.SingleAttReceived
eventData = &operation.SingleAttReceivedData{
Attestation: singleAtt,
}
} else {
// Phase0 attestation
attForValidation = att
broadcastAtt = att
eventType = operation.UnaggregatedAttReceived
eventData = &operation.UnAggregatedAttReceivedData{
Attestation: att,
}
att = singleAtt.ToAttestationElectra(committee)
}
valid, err = s.validateUnaggregatedAttWithState(ctx, attForValidation, preState)
valid, err = s.validateUnaggregatedAttWithState(ctx, att, preState)
if err != nil {
log.WithError(err).Debug("Pending unaggregated attestation failed validation")
return
}
if valid == pubsub.ValidationAccept {
if features.Get().EnableExperimentalAttestationPool {
if err = s.cfg.attestationCache.Add(attForValidation); err != nil {
if err = s.cfg.attestationCache.Add(att); err != nil {
log.WithError(err).Debug("Could not save unaggregated attestation")
return
}
} else {
if err := s.cfg.attPool.SaveUnaggregatedAttestation(attForValidation); err != nil {
if err := s.cfg.attPool.SaveUnaggregatedAttestation(att); err != nil {
log.WithError(err).Debug("Could not save unaggregated attestation")
return
}
}
s.setSeenCommitteeIndicesSlot(data.Slot, attForValidation.GetCommitteeIndex(), attForValidation.GetAggregationBits())
s.setSeenCommitteeIndicesSlot(data.Slot, data.CommitteeIndex, att.GetAggregationBits())
valCount, err := helpers.ActiveValidatorCount(ctx, preState, slots.ToEpoch(data.Slot))
if err != nil {
@@ -226,16 +205,34 @@ func (s *Service) processUnaggregated(ctx context.Context, att ethpb.Att) {
return
}
// Broadcast the final 'broadcastAtt' object
if err := s.cfg.p2p.BroadcastAttestation(ctx, helpers.ComputeSubnetForAttestation(valCount, broadcastAtt), broadcastAtt); err != nil {
// Broadcasting the signed attestation again once a node is able to process it.
var attToBroadcast ethpb.Att
if singleAtt != nil {
attToBroadcast = singleAtt
} else {
attToBroadcast = att
}
if err := s.cfg.p2p.BroadcastAttestation(ctx, helpers.ComputeSubnetForAttestation(valCount, attToBroadcast), attToBroadcast); err != nil {
log.WithError(err).Debug("Could not broadcast")
}
// Feed event notification for other services
s.cfg.attestationNotifier.OperationFeed().Send(&feed.Event{
Type: eventType,
Data: eventData,
})
// Broadcast the unaggregated attestation on a feed to notify other services in the beacon node
// of a received unaggregated attestation.
if singleAtt != nil {
s.cfg.attestationNotifier.OperationFeed().Send(&feed.Event{
Type: operation.SingleAttReceived,
Data: &operation.SingleAttReceivedData{
Attestation: singleAtt,
},
})
} else {
s.cfg.attestationNotifier.OperationFeed().Send(&feed.Event{
Type: operation.UnaggregatedAttReceived,
Data: &operation.UnAggregatedAttReceivedData{
Attestation: att,
},
})
}
}
}

View File

@@ -706,41 +706,3 @@ func Test_attsAreEqual_Committee(t *testing.T) {
assert.Equal(t, false, attsAreEqual(att1, att2))
})
}
func Test_SeenCommitteeIndicesSlot(t *testing.T) {
t.Run("phase 0 success", func(t *testing.T) {
s := &Service{
seenUnAggregatedAttestationCache: lruwrpr.New(1),
}
data := &ethpb.AttestationData{Slot: 1, CommitteeIndex: 44}
att := &ethpb.Attestation{
AggregationBits: bitfield.Bitlist{0x01},
Data: data,
}
s.setSeenCommitteeIndicesSlot(data.Slot, att.GetCommitteeIndex(), att.GetAggregationBits())
b := append(bytesutil.Bytes32(uint64(1)), bytesutil.Bytes32(uint64(44))...)
b = append(b, bytesutil.SafeCopyBytes(att.GetAggregationBits())...)
_, ok := s.seenUnAggregatedAttestationCache.Get(string(b))
require.Equal(t, true, ok)
})
t.Run("electra success", func(t *testing.T) {
s := &Service{
seenUnAggregatedAttestationCache: lruwrpr.New(1),
}
// committee index is 0 post electra for attestation electra
data := &ethpb.AttestationData{Slot: 1, CommitteeIndex: 0}
cb := primitives.NewAttestationCommitteeBits()
cb.SetBitAt(uint64(63), true)
att := &ethpb.AttestationElectra{
AggregationBits: bitfield.Bitlist{0x01},
Data: data,
CommitteeBits: cb,
}
ci := att.GetCommitteeIndex()
s.setSeenCommitteeIndicesSlot(data.Slot, ci, att.GetAggregationBits())
b := append(bytesutil.Bytes32(uint64(1)), bytesutil.Bytes32(uint64(63))...)
b = append(b, bytesutil.SafeCopyBytes(att.GetAggregationBits())...)
_, ok := s.seenUnAggregatedAttestationCache.Get(string(b))
require.Equal(t, true, ok)
})
}

View File

@@ -6,7 +6,6 @@ package sync
import (
"context"
"encoding/hex"
"sync"
"time"
@@ -50,9 +49,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/time/slots"
)
// hack to prevent bad holesky block importation
var badHoleskyRoot [32]byte
var _ runtime.Service = (*Service)(nil)
const (
@@ -168,7 +164,6 @@ type Service struct {
newBlobVerifier verification.NewBlobVerifier
availableBlocker coverage.AvailableBlocker
ctxMap ContextByteVersions
slasherEnabled bool
}
// NewService initializes new regular sync service.
@@ -387,13 +382,3 @@ type Checker interface {
Status() error
Resync() error
}
func init() {
hexStr := "2db899881ed8546476d0b92c6aa9110bea9a4cd0dbeb5519eb0ea69575f1f359"
bytes, err := hex.DecodeString(hexStr)
if err != nil {
log.WithError(err).Error("Could not decode hex string")
return
}
badHoleskyRoot = [32]byte(bytes)
}

View File

@@ -17,6 +17,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/slasher/types"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/config/features"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
@@ -33,11 +34,7 @@ import (
// - The attestation is unaggregated -- that is, it has exactly one participating validator (len(get_attesting_indices(state, attestation.data, attestation.aggregation_bits)) == 1).
// - attestation.data.slot is within the last ATTESTATION_PROPAGATION_SLOT_RANGE slots (attestation.data.slot + ATTESTATION_PROPAGATION_SLOT_RANGE >= current_slot >= attestation.data.slot).
// - The signature of attestation is valid.
func (s *Service) validateCommitteeIndexBeaconAttestation(
ctx context.Context,
pid peer.ID,
msg *pubsub.Message,
) (pubsub.ValidationResult, error) {
func (s *Service) validateCommitteeIndexBeaconAttestation(ctx context.Context, pid peer.ID, msg *pubsub.Message) (pubsub.ValidationResult, error) {
if pid == s.cfg.p2p.PeerID() {
return pubsub.ValidationAccept, nil
}
@@ -67,7 +64,6 @@ func (s *Service) validateCommitteeIndexBeaconAttestation(
if err := helpers.ValidateNilAttestation(att); err != nil {
return pubsub.ValidationReject, err
}
data := att.GetData()
// Do not process slot 0 attestations.
@@ -77,7 +73,8 @@ func (s *Service) validateCommitteeIndexBeaconAttestation(
// Attestation's slot is within ATTESTATION_PROPAGATION_SLOT_RANGE and early attestation
// processing tolerance.
if err := helpers.ValidateAttestationTime(data.Slot, s.cfg.clock.GenesisTime(), earlyAttestationProcessingTolerance); err != nil {
if err := helpers.ValidateAttestationTime(data.Slot, s.cfg.clock.GenesisTime(),
earlyAttestationProcessingTolerance); err != nil {
tracing.AnnotateError(span, err)
return pubsub.ValidationIgnore, err
}
@@ -87,11 +84,12 @@ func (s *Service) validateCommitteeIndexBeaconAttestation(
committeeIndex := att.GetCommitteeIndex()
if !s.slasherEnabled {
if !features.Get().EnableSlasher {
// Verify this the first attestation received for the participating validator for the slot.
if s.hasSeenCommitteeIndicesSlot(data.Slot, committeeIndex, att.GetAggregationBits()) {
return pubsub.ValidationIgnore, nil
}
// Reject an attestation if it references an invalid block.
if s.hasBadBlock(bytesutil.ToBytes32(data.BeaconBlockRoot)) ||
s.hasBadBlock(bytesutil.ToBytes32(data.Target.Root)) ||
@@ -101,12 +99,15 @@ func (s *Service) validateCommitteeIndexBeaconAttestation(
}
}
var validationRes pubsub.ValidationResult
// Verify the block being voted and the processed state is in beaconDB and the block has passed validation if it's in the beaconDB.
blockRoot := bytesutil.ToBytes32(data.BeaconBlockRoot)
if !s.hasBlockAndState(ctx, blockRoot) {
return s.saveToPendingAttPool(att)
}
if !s.cfg.chain.InForkchoice(blockRoot) {
if !s.cfg.chain.InForkchoice(bytesutil.ToBytes32(data.BeaconBlockRoot)) {
tracing.AnnotateError(span, blockchain.ErrNotDescendantOfFinalized)
return pubsub.ValidationIgnore, blockchain.ErrNotDescendantOfFinalized
}
@@ -122,12 +123,12 @@ func (s *Service) validateCommitteeIndexBeaconAttestation(
return pubsub.ValidationIgnore, err
}
validationRes, err := s.validateUnaggregatedAttTopic(ctx, att, preState, *msg.Topic)
validationRes, err = s.validateUnaggregatedAttTopic(ctx, att, preState, *msg.Topic)
if validationRes != pubsub.ValidationAccept {
return validationRes, err
}
committee, err := helpers.BeaconCommitteeFromState(ctx, preState, data.Slot, committeeIndex)
committee, err := helpers.BeaconCommitteeFromState(ctx, preState, att.GetData().Slot, committeeIndex)
if err != nil {
tracing.AnnotateError(span, err)
return pubsub.ValidationIgnore, err
@@ -138,42 +139,21 @@ func (s *Service) validateCommitteeIndexBeaconAttestation(
return validationRes, err
}
// Consolidated handling of Electra SingleAttestation vs Phase0 unaggregated attestation
var (
attForValidation eth.Att // what we'll pass to further validation
eventType feed.EventType
eventData interface{}
)
var singleAtt *eth.SingleAttestation
if att.Version() >= version.Electra {
singleAtt, ok := att.(*eth.SingleAttestation)
singleAtt, ok = att.(*eth.SingleAttestation)
if !ok {
return pubsub.ValidationIgnore, fmt.Errorf(
"attestation has wrong type (expected %T, got %T)",
&eth.SingleAttestation{}, att,
)
}
// Convert Electra SingleAttestation to unaggregated ElectraAttestation. This is needed because many parts of the codebase assume that attestations have a certain structure and SingleAttestation validates these assumptions.
attForValidation = singleAtt.ToAttestationElectra(committee)
eventType = operation.SingleAttReceived
eventData = &operation.SingleAttReceivedData{
Attestation: singleAtt,
}
} else {
// Phase0 unaggregated attestation
attForValidation = att
eventType = operation.UnaggregatedAttReceived
eventData = &operation.UnAggregatedAttReceivedData{
Attestation: att,
return pubsub.ValidationIgnore, fmt.Errorf("attestation has wrong type (expected %T, got %T)", &eth.SingleAttestation{}, att)
}
att = singleAtt.ToAttestationElectra(committee)
}
validationRes, err = s.validateUnaggregatedAttWithState(ctx, attForValidation, preState)
validationRes, err = s.validateUnaggregatedAttWithState(ctx, att, preState)
if validationRes != pubsub.ValidationAccept {
return validationRes, err
}
if s.slasherEnabled {
if features.Get().EnableSlasher {
// Feed the indexed attestation to slasher if enabled. This action
// is done in the background to avoid adding more load to this critical code path.
go func() {
@@ -192,7 +172,7 @@ func (s *Service) validateCommitteeIndexBeaconAttestation(
tracing.AnnotateError(span, err)
return
}
indexedAtt, err := attestation.ConvertToIndexed(ctx, attForValidation, committee)
indexedAtt, err := attestation.ConvertToIndexed(ctx, att, committee)
if err != nil {
log.WithError(err).Error("Could not convert to indexed attestation")
tracing.AnnotateError(span, err)
@@ -202,16 +182,27 @@ func (s *Service) validateCommitteeIndexBeaconAttestation(
}()
}
// Notify other services in the beacon node
s.cfg.attestationNotifier.OperationFeed().Send(&feed.Event{
Type: eventType,
Data: eventData,
})
// Broadcast the unaggregated attestation on a feed to notify other services in the beacon node
// of a received unaggregated attestation.
if singleAtt != nil {
s.cfg.attestationNotifier.OperationFeed().Send(&feed.Event{
Type: operation.SingleAttReceived,
Data: &operation.SingleAttReceivedData{
Attestation: singleAtt,
},
})
} else {
s.cfg.attestationNotifier.OperationFeed().Send(&feed.Event{
Type: operation.UnaggregatedAttReceived,
Data: &operation.UnAggregatedAttReceivedData{
Attestation: att,
},
})
}
s.setSeenCommitteeIndicesSlot(data.Slot, committeeIndex, attForValidation.GetAggregationBits())
s.setSeenCommitteeIndicesSlot(data.Slot, committeeIndex, att.GetAggregationBits())
// Attach final validated attestation to the message for further pipeline use
msg.ValidatorData = attForValidation
msg.ValidatorData = att
return pubsub.ValidationAccept, nil
}

View File

@@ -15,6 +15,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/config/features"
"github.com/prysmaticlabs/prysm/v5/config/params"
consensusblocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
@@ -79,7 +80,7 @@ func (s *Service) validateBeaconBlockPubSub(ctx context.Context, pid peer.ID, ms
},
})
if s.slasherEnabled {
if features.Get().EnableSlasher {
// Feed the block header to slasher if enabled. This action
// is done in the background to avoid adding more load to this critical code path.
go func() {
@@ -399,9 +400,6 @@ func (s *Service) setSeenBlockIndexSlot(slot primitives.Slot, proposerIdx primit
// Returns true if the block is marked as a bad block.
func (s *Service) hasBadBlock(root [32]byte) bool {
if root == badHoleskyRoot {
return true
}
s.badBlockLock.RLock()
defer s.badBlockLock.RUnlock()
_, seen := s.badBlockCache.Get(string(root[:]))

View File

@@ -1,3 +0,0 @@
### Added
- Add SSZ support to light client finality and optimistic APIs. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14836)

View File

@@ -1,3 +0,0 @@
### Fixed
- Fixed pruner to not block while pruning large database by introducing batchSize

View File

@@ -1,4 +0,0 @@
### Ignored
- Cleanup single attestation code for readability.

View File

@@ -1,3 +0,0 @@
### Added
- add log to committee index when committeebits are not the expected length of 1

View File

@@ -1,3 +0,0 @@
### Fixed
- cosmetic fix for post electra validator logs displaying attestation committee information correctly.

View File

@@ -1,3 +0,0 @@
### Fixed
- fix inserting the wrong committee index into the seen cache for electra attestations

View File

@@ -1,3 +0,0 @@
## Changed
- `--validators-registration-batch-size`: Change default value from `0` to `200`.

View File

@@ -1,3 +0,0 @@
### Fixed
- Check for the correct attester slashing type during gossip validation.

View File

@@ -1,3 +0,0 @@
### Fixed
- Allow any block type to be unmarshaled rather than only phase0 blocks in `slotByBlockRoot`.

View File

@@ -1,3 +0,0 @@
### Ignored
- Add target root to forkchoice dump

View File

@@ -1,3 +0,0 @@
### Ignored
- Split out forkchoice startup from the main service startup.

View File

@@ -1,3 +0,0 @@
### Ignored
- Populate pubkey cache at genesis.

View File

@@ -1,3 +0,0 @@
### Added
- Added a feature flag to sync from an arbitrary beacon block root at startup.

View File

@@ -1,2 +0,0 @@
### Changed
- Reorganized beacon chain flags in `--help` text into logical sections.

View File

@@ -1,3 +0,0 @@
### Fixed
- Fixed violations of gosec G301. This is a check that created files and directories have file permissions 0750 and 0600 respectively.

View File

@@ -1,3 +0,0 @@
### Changed
- Use go-cmp for printing better diffs for assertions.DeepEqual

View File

@@ -0,0 +1,3 @@
### Fixed
- Fixed use of deprecated rand.Seed.

View File

@@ -296,11 +296,6 @@ var (
Usage: "Directory for the slasher database",
Value: cmd.DefaultDataDir(),
}
// SlasherFlag defines a flag to enable the beacon chain slasher.
SlasherFlag = &cli.BoolFlag{
Name: "slasher",
Usage: "Enables a slasher in the beacon node for detecting slashable offenses.",
}
// BeaconDBPruning enables the pruning of beacon db.
BeaconDBPruning = &cli.BoolFlag{
Name: "beacon-db-pruning",

View File

@@ -142,7 +142,6 @@ var appFlags = []cli.Flag{
genesis.StatePath,
genesis.BeaconAPIURL,
flags.SlasherDirFlag,
flags.SlasherFlag,
flags.JwtId,
storage.BlobStoragePathFlag,
storage.BlobRetentionEpochFlag,

View File

@@ -45,188 +45,154 @@ type flagGroup struct {
}
var appHelpFlagGroups = []flagGroup{
{ // Flags relevant to running the process.
{
Name: "cmd",
Flags: []cli.Flag{
cmd.AcceptTosFlag,
cmd.ConfigFileFlag,
},
},
{ // Flags relevant to configuring the beacon chain and APIs.
Name: "beacon-chain",
Flags: []cli.Flag{
cmd.ApiTimeoutFlag,
cmd.ChainConfigFileFlag,
cmd.E2EConfigFlag,
cmd.GrpcMaxCallRecvMsgSizeFlag,
cmd.MinimalConfigFlag,
cmd.E2EConfigFlag,
cmd.RPCMaxPageSizeFlag,
flags.CertFlag,
flags.ChainID,
flags.DisableDebugRPCEndpoints,
flags.HTTPModules,
flags.HTTPServerCorsDomain,
flags.HTTPServerHost,
flags.HTTPServerPort,
flags.KeyFlag,
flags.NetworkID,
flags.RPCHost,
flags.RPCPort,
cmd.NoDiscovery,
cmd.BootstrapNode,
cmd.RelayNode,
cmd.P2PUDPPort,
cmd.P2PQUICPort,
cmd.P2PTCPPort,
cmd.DataDirFlag,
cmd.VerbosityFlag,
cmd.EnableTracingFlag,
cmd.TracingProcessNameFlag,
cmd.TracingEndpointFlag,
cmd.TraceSampleFractionFlag,
cmd.MonitoringHostFlag,
flags.MonitoringPortFlag,
cmd.DisableMonitoringFlag,
cmd.MaxGoroutines,
cmd.ForceClearDB,
cmd.ClearDB,
cmd.ConfigFileFlag,
cmd.ChainConfigFileFlag,
cmd.GrpcMaxCallRecvMsgSizeFlag,
cmd.AcceptTosFlag,
cmd.RestoreSourceFileFlag,
cmd.RestoreTargetDirFlag,
cmd.ValidatorMonitorIndicesFlag,
cmd.ApiTimeoutFlag,
},
},
{
// p2p flags configure the p2p side of beacon-chain.
Name: "p2p",
Name: "debug",
Flags: []cli.Flag{
cmd.BootstrapNode,
cmd.EnableUPnPFlag,
cmd.NoDiscovery,
cmd.P2PAllowList,
cmd.P2PDenyList,
cmd.P2PHost,
cmd.P2PHostDNS,
cmd.P2PIP,
cmd.P2PMaxPeers,
cmd.P2PMetadata,
cmd.P2PPrivKey,
cmd.P2PQUICPort,
cmd.P2PStaticID,
cmd.P2PTCPPort,
cmd.P2PUDPPort,
cmd.PubsubQueueSize,
cmd.RelayNode,
cmd.StaticPeers,
flags.BlobBatchLimit,
flags.BlobBatchLimitBurstFactor,
flags.BlockBatchLimit,
flags.BlockBatchLimitBurstFactor,
flags.MaxConcurrentDials,
flags.MinPeersPerSubnet,
flags.MinSyncPeers,
flags.SubscribeToAllSubnets,
debug.PProfFlag,
debug.PProfAddrFlag,
debug.PProfPortFlag,
debug.MemProfileRateFlag,
debug.CPUProfileFlag,
debug.TraceFlag,
debug.BlockProfileRateFlag,
debug.MutexProfileFractionFlag,
},
},
{ // Flags relevant to storing data on disk and configuring the beacon chain database.
Name: "db",
{
Name: "beacon-chain",
Flags: []cli.Flag{
backfill.BackfillBatchSize,
backfill.BackfillOldestSlot,
backfill.BackfillWorkerCount,
backfill.EnableExperimentalBackfill,
cmd.ClearDB,
cmd.DataDirFlag,
cmd.ForceClearDB,
cmd.RestoreSourceFileFlag,
cmd.RestoreTargetDirFlag,
flags.BeaconDBPruning,
flags.PrunerRetentionEpochs,
flags.SlotsPerArchivedPoint,
storage.BlobRetentionEpochFlag,
storage.BlobStorageLayout,
storage.BlobStoragePathFlag,
},
},
{ // Flags relevant to configuring local block production or external builders such as mev-boost.
Name: "builder",
Flags: []cli.Flag{
flags.LocalBlockValueBoost,
flags.MaxBuilderConsecutiveMissedSlots,
flags.MaxBuilderEpochMissedSlots,
flags.MevRelayEndpoint,
flags.MinBuilderBid,
flags.MinBuilderDiff,
flags.SuggestedFeeRecipient,
},
},
{ // Flags relevant to syncing the beacon chain.
Name: "sync",
Flags: []cli.Flag{
checkpoint.BlockPath,
checkpoint.RemoteURL,
checkpoint.StatePath,
flags.WeakSubjectivityCheckpoint,
genesis.BeaconAPIURL,
genesis.StatePath,
},
},
{ // Flags relevant to interacting with the execution layer.
Name: "execution layer",
Flags: []cli.Flag{
flags.ContractDeploymentBlock,
flags.InteropMockEth1DataVotesFlag,
flags.DepositContractFlag,
flags.EngineEndpointTimeoutSeconds,
flags.Eth1HeaderReqLimit,
flags.ContractDeploymentBlock,
flags.RPCHost,
flags.RPCPort,
flags.CertFlag,
flags.KeyFlag,
flags.HTTPModules,
flags.HTTPServerHost,
flags.HTTPServerPort,
flags.HTTPServerCorsDomain,
flags.ExecutionEngineEndpoint,
flags.ExecutionEngineHeaders,
flags.ExecutionJWTSecretFlag,
flags.JwtId,
flags.InteropMockEth1DataVotesFlag,
},
},
{ // Flags relevant to configuring beacon chain monitoring.
Name: "monitoring",
Flags: []cli.Flag{
cmd.DisableMonitoringFlag,
cmd.EnableTracingFlag,
cmd.MonitoringHostFlag,
cmd.TraceSampleFractionFlag,
cmd.TracingEndpointFlag,
cmd.TracingProcessNameFlag,
cmd.ValidatorMonitorIndicesFlag,
flags.MonitoringPortFlag,
},
},
{ // Flags relevant to slasher operation.
Name: "slasher",
Flags: []cli.Flag{
flags.SetGCPercent,
flags.SlotsPerArchivedPoint,
flags.BlockBatchLimit,
flags.BlockBatchLimitBurstFactor,
flags.BlobBatchLimit,
flags.BlobBatchLimitBurstFactor,
flags.DisableDebugRPCEndpoints,
flags.SubscribeToAllSubnets,
flags.HistoricalSlasherNode,
flags.ChainID,
flags.NetworkID,
flags.WeakSubjectivityCheckpoint,
flags.Eth1HeaderReqLimit,
flags.MinPeersPerSubnet,
flags.MaxConcurrentDials,
flags.MevRelayEndpoint,
flags.MaxBuilderEpochMissedSlots,
flags.MaxBuilderConsecutiveMissedSlots,
flags.EngineEndpointTimeoutSeconds,
flags.SlasherDirFlag,
flags.SlasherFlag,
flags.LocalBlockValueBoost,
flags.MinBuilderBid,
flags.MinBuilderDiff,
flags.JwtId,
flags.BeaconDBPruning,
flags.PrunerRetentionEpochs,
checkpoint.BlockPath,
checkpoint.StatePath,
checkpoint.RemoteURL,
genesis.StatePath,
genesis.BeaconAPIURL,
storage.BlobStoragePathFlag,
storage.BlobRetentionEpochFlag,
storage.BlobStorageLayout,
backfill.EnableExperimentalBackfill,
backfill.BackfillWorkerCount,
backfill.BackfillBatchSize,
backfill.BackfillOldestSlot,
},
},
{
// Flags in the "log" section control how Prysm handles logging.
Name: "log",
Flags: []cli.Flag{
cmd.LogFormat,
cmd.LogFileName,
cmd.VerbosityFlag,
},
},
{ // Feature flags.
Name: "features",
Flags: features.ActiveFlags(features.BeaconChainFlags),
},
{ // Flags required to configure the merge.
Name: "merge",
Flags: []cli.Flag{
flags.SuggestedFeeRecipient,
flags.TerminalTotalDifficultyOverride,
flags.TerminalBlockHashOverride,
flags.TerminalBlockHashActivationEpochOverride,
},
},
{ // The deprecated section represents beacon flags that still have use, but should not be used
// as they are expected to be deleted in a feature release.
{
Name: "p2p",
Flags: []cli.Flag{
cmd.P2PIP,
cmd.P2PHost,
cmd.P2PHostDNS,
cmd.P2PMaxPeers,
cmd.P2PPrivKey,
cmd.P2PStaticID,
cmd.P2PMetadata,
cmd.P2PAllowList,
cmd.P2PDenyList,
cmd.PubsubQueueSize,
cmd.StaticPeers,
cmd.EnableUPnPFlag,
flags.MinSyncPeers,
},
},
{
Name: "log",
Flags: []cli.Flag{
cmd.LogFormat,
cmd.LogFileName,
},
},
{
Name: "features",
Flags: features.ActiveFlags(features.BeaconChainFlags),
},
{
Name: "deprecated",
Flags: []cli.Flag{
cmd.BackupWebhookOutputDir,
},
},
{ // Flags used in debugging Prysm. These are flags not usually run by end users.
Name: "debug",
Flags: []cli.Flag{
cmd.MaxGoroutines,
debug.BlockProfileRateFlag,
debug.CPUProfileFlag,
debug.MemProfileRateFlag,
debug.MutexProfileFractionFlag,
debug.PProfAddrFlag,
debug.PProfFlag,
debug.PProfPortFlag,
debug.TraceFlag,
flags.SetGCPercent,
},
},
}
func init() {

View File

@@ -288,7 +288,7 @@ func generateGenesis(ctx context.Context) (state.BeaconState, error) {
if err != nil {
return nil, err
}
if err := os.WriteFile(f.GethGenesisJsonOut, gbytes, 0600); err != nil {
if err := os.WriteFile(f.GethGenesisJsonOut, gbytes, os.ModePerm); err != nil {
return nil, errors.Wrapf(err, "failed to write %s", f.GethGenesisJsonOut)
}
}

View File

@@ -381,7 +381,7 @@ var (
ValidatorsRegistrationBatchSizeFlag = &cli.IntFlag{
Name: "validators-registration-batch-size",
Usage: "Sets the maximum size for one batch of validator registrations. Use a non-positive value to disable batching.",
Value: 200,
Value: 0,
}
// EnableDistributed enables the usage of prysm validator client in a Distributed Validator Cluster.
EnableDistributed = &cli.BoolFlag{

View File

@@ -60,6 +60,7 @@ type Flags struct {
// Bug fixes related flags.
AttestTimely bool // AttestTimely fixes #8185. It is gated behind a flag to ensure beacon node's fix can safely roll out first. We'll invert this in v1.1.0.
EnableSlasher bool // Enable slasher in the beacon node runtime.
EnableSlashingProtectionPruning bool // Enable slashing protection pruning for the validator client.
EnableMinimalSlashingProtection bool // Enable minimal slashing protection database for the validator client.
@@ -86,9 +87,6 @@ type Flags struct {
// AggregateIntervals specifies the time durations at which we aggregate attestations preparing for forkchoice.
AggregateIntervals [3]time.Duration
// Feature related flags (alignment forced in the end)
ForceHead string // ForceHead forces the head block to be a specific block root, the last head block, or the last finalized block.
}
var featureConfig *Flags
@@ -213,6 +211,10 @@ func ConfigureBeaconChain(ctx *cli.Context) error {
logDisabled(disableBroadcastSlashingFlag)
cfg.DisableBroadcastSlashings = true
}
if ctx.Bool(enableSlasherFlag.Name) {
log.WithField(enableSlasherFlag.Name, enableSlasherFlag.Usage).Warn(enabledFeatureFlag)
cfg.EnableSlasher = true
}
if ctx.Bool(enableHistoricalSpaceRepresentation.Name) {
log.WithField(enableHistoricalSpaceRepresentation.Name, enableHistoricalSpaceRepresentation.Usage).Warn(enabledFeatureFlag)
cfg.EnableHistoricalSpaceRepresentation = true
@@ -271,10 +273,6 @@ func ConfigureBeaconChain(ctx *cli.Context) error {
logEnabled(enableExperimentalAttestationPool)
cfg.EnableExperimentalAttestationPool = true
}
if ctx.IsSet(forceHeadFlag.Name) {
logEnabled(forceHeadFlag)
cfg.ForceHead = ctx.String(forceHeadFlag.Name)
}
cfg.AggregateIntervals = [3]time.Duration{aggregateFirstInterval.Value, aggregateSecondInterval.Value, aggregateThirdInterval.Value}
Init(cfg)

View File

@@ -12,39 +12,39 @@ import (
func TestInitFeatureConfig(t *testing.T) {
defer Init(&Flags{})
cfg := &Flags{
EnableDoppelGanger: true,
EnableSlasher: true,
}
Init(cfg)
c := Get()
assert.Equal(t, true, c.EnableDoppelGanger)
assert.Equal(t, true, c.EnableSlasher)
}
func TestInitWithReset(t *testing.T) {
defer Init(&Flags{})
Init(&Flags{
EnableDoppelGanger: true,
EnableSlasher: true,
})
assert.Equal(t, true, Get().EnableDoppelGanger)
assert.Equal(t, true, Get().EnableSlasher)
// Overwrite previously set value (value that didn't come by default).
resetCfg := InitWithReset(&Flags{
EnableDoppelGanger: false,
EnableSlasher: false,
})
assert.Equal(t, false, Get().EnableDoppelGanger)
assert.Equal(t, false, Get().EnableSlasher)
// Reset must get to previously set configuration (not to default config values).
resetCfg()
assert.Equal(t, true, Get().EnableDoppelGanger)
assert.Equal(t, true, Get().EnableSlasher)
}
func TestConfigureBeaconConfig(t *testing.T) {
app := cli.App{}
set := flag.NewFlagSet("test", 0)
set.Bool(saveInvalidBlockTempFlag.Name, true, "test")
set.Bool(enableSlasherFlag.Name, true, "test")
context := cli.NewContext(&app, set, nil)
require.NoError(t, ConfigureBeaconChain(context))
c := Get()
assert.Equal(t, true, c.SaveInvalidBlock)
assert.Equal(t, true, c.EnableSlasher)
}
func TestValidateNetworkFlags(t *testing.T) {

View File

@@ -89,6 +89,10 @@ var (
Name: "attest-timely",
Usage: "Fixes validator can attest timely after current block processes. See #8185 for more details.",
}
enableSlasherFlag = &cli.BoolFlag{
Name: "slasher",
Usage: "Enables a slasher in the beacon node for detecting slashable offenses.",
}
enableSlashingProtectionPruning = &cli.BoolFlag{
Name: "enable-slashing-protection-history-pruning",
Usage: "Enables the pruning of the validator client's slashing protection database.",
@@ -174,12 +178,6 @@ var (
Name: "enable-experimental-attestation-pool",
Usage: "Enables an experimental attestation pool design.",
}
// forceHeadFlag is a flag to force the head of the beacon chain to a specific block.
forceHeadFlag = &cli.StringFlag{
Name: "sync-from",
Usage: "Forces the head of the beacon chain to a specific block root. Values can be 'head' or a block root." +
" The block root has to be known to the beacon node and correspond to a block newer than the current finalized checkpoint.",
}
)
// devModeFlags holds list of flags that are set when development mode is on.
@@ -219,6 +217,7 @@ var BeaconChainFlags = combinedFlags([]cli.Flag{
Mainnet,
disablePeerScorer,
disableBroadcastSlashingFlag,
enableSlasherFlag,
disableStakinContractCheck,
SaveFullExecutionPayloads,
enableStartupOptimistic,
@@ -236,7 +235,6 @@ var BeaconChainFlags = combinedFlags([]cli.Flag{
DisableCommitteeAwarePacking,
EnableDiscoveryReboot,
enableExperimentalAttestationPool,
forceHeadFlag,
}, deprecatedBeaconFlags, deprecatedFlags, upcomingDeprecation)
func combinedFlags(flags ...[]cli.Flag) []cli.Flag {

View File

@@ -7,10 +7,15 @@ func UseHoleskyNetworkConfig() {
cfg := BeaconNetworkConfig().Copy()
cfg.ContractDeploymentBlock = 0
cfg.BootstrapNodes = []string{
"enr:-Oa4QOuWj-_JX6iJWjdS_67qQkkVoomo3YztvdOLVF4lVwc3NzFR3D8y-8CmGjmQA16DJBOZWwbc3XMOw_w_ScU6-qCCAgSHYXR0bmV0c4gAAAMAAAAAAIZjbGllbnTYikxpZ2h0aG91c2WMNy4wLjAtYmV0YS4whGV0aDKQAZ4hrQYBcAD__________4JpZIJ2NIJpcISyE90mhHF1aWOCfLuJc2VjcDI1NmsxoQK-fTVglh3wyHIcyauubuyvLeFmn6QpUwog7Aio2OSucYhzeW5jbmV0cwCDdGNwgny6g3VkcIJ8ug",
"enr:-PW4QAOnzqnCuwuNNrUEXebSD3MFMOe-9NApsb8UkAQK-MquYtUhj35Ksz4EWcmdB0Cmj43bGBJJEpt9fYMAg1vOHXobh2F0dG5ldHOIAAAYAAAAAACGY2xpZW502IpMaWdodGhvdXNljDcuMC4wLWJldGEuMIRldGgykAGeIa0GAXAA__________-CaWSCdjSCaXCEff1tSYRxdWljgiMphXF1aWM2giMpiXNlY3AyNTZrMaECUiAFSBathSIPGhDHbZjQS5gTqaPcRkAe4HECCk-vt6KIc3luY25ldHMPg3RjcIIjKIR0Y3A2giMog3VkcIIjKA",
"enr:-QESuEA2tFgFDu5LX9T6j1_bayowdRzrtdQcjwmTq_zOVjwe1WQOsM7-Q4qRcgc7AjpAQOcdb2F3wyPDBkbP-vxW2dLgXYdhdHRuZXRziAADAAAAAAAAhmNsaWVudNiKTGlnaHRob3VzZYw3LjAuMC1iZXRhLjCEZXRoMpABniGtBgFwAP__________gmlkgnY0gmlwhIe1ME2DaXA2kCoBBPkwgDCeAAAAAAAAAAKEcXVpY4IjKYVxdWljNoIjg4lzZWNwMjU2azGhA4oHjOmlWOfLizFFIQSI_dzn4rzvDvMG8h7zmxhmOVzXiHN5bmNuZXRzD4N0Y3CCIyiEdGNwNoIjgoN1ZHCCIyiEdWRwNoIjgg",
"enr:-KG4QCvyykb0pA1T-EZJUPkl2P1PKYk8-4El8TqwWdKwtoI6NtIBGMJVDgGZKVy2eMszI0_ermORtQ340lj1dTHzGVVPhGV0aDKQAZ4hrQYBcAD__________4JpZIJ2NIJpcIQ5gNI3iXNlY3AyNTZrMaEDMQYffETNbuGwVzWEJSgCpA50LTxHUWU1A0TDfleEa5mDdGNwgjvFg3VkcII7xQ",
// EF
"enr:-Ku4QFo-9q73SspYI8cac_4kTX7yF800VXqJW4Lj3HkIkb5CMqFLxciNHePmMt4XdJzHvhrCC5ADI4D_GkAsxGJRLnQBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpAhnTT-AQFwAP__________gmlkgnY0gmlwhLKAiOmJc2VjcDI1NmsxoQORcM6e19T1T9gi7jxEZjk_sjVLGFscUNqAY9obgZaxbIN1ZHCCIyk",
"enr:-Ku4QPG7F72mbKx3gEQEx07wpYYusGDh-ni6SNkLvOS-hhN-BxIggN7tKlmalb0L5JPoAfqD-akTZ-gX06hFeBEz4WoBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpAhnTT-AQFwAP__________gmlkgnY0gmlwhJK-DYCJc2VjcDI1NmsxoQKLVXFOhp2uX6jeT0DvvDpPcU8FWMjQdR4wMuORMhpX24N1ZHCCIyk",
"enr:-LK4QPxe-mDiSOtEB_Y82ozvxn9aQM07Ui8A-vQHNgYGMMthfsfOabaaTHhhJHFCBQQVRjBww_A5bM1rf8MlkJU_l68Eh2F0dG5ldHOIAADAAAAAAACEZXRoMpBpt9l0BAFwAAABAAAAAAAAgmlkgnY0gmlwhLKAiOmJc2VjcDI1NmsxoQJu6T9pclPObAzEVQ53DpVQqjadmVxdTLL-J3h9NFoCeIN0Y3CCIyiDdWRwgiMo",
"enr:-Ly4QGbOw4xNel5EhmDsJJ-QhC9XycWtsetnWoZ0uRy381GHdHsNHJiCwDTOkb3S1Ade0SFQkWJX_pgb3g8Jfh93rvMBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpBpt9l0BAFwAAABAAAAAAAAgmlkgnY0gmlwhJK-DYCJc2VjcDI1NmsxoQOxKv9sv3zKF8GDewgFGGHKP5HCZZpPpTrwl9eXKAWGxIhzeW5jbmV0cwCDdGNwgiMog3VkcIIjKA",
// Teku
"enr:-LS4QG0uV4qvcpJ-HFDJRGBmnlD3TJo7yc4jwK8iP7iKaTlfQ5kZvIDspLMJhk7j9KapuL9yyHaZmwTEZqr10k9XumyCEcmHYXR0bmV0c4gAAAAABgAAAIRldGgykGm32XQEAXAAAAEAAAAAAACCaWSCdjSCaXCErK4j-YlzZWNwMjU2azGhAgfWRBEJlb7gAhXIB5ePmjj2b8io0UpEenq1Kl9cxStJg3RjcIIjKIN1ZHCCIyg",
// Sigma Prime
"enr:-Le4QLoE1wFHSlGcm48a9ZESb_MRLqPPu6G0vHqu4MaUcQNDHS69tsy-zkN0K6pglyzX8m24mkb-LtBcbjAYdP1uxm4BhGV0aDKQabfZdAQBcAAAAQAAAAAAAIJpZIJ2NIJpcIQ5gR6Wg2lwNpAgAUHQBwEQAAAAAAAAADR-iXNlY3AyNTZrMaEDPMSNdcL92uNIyCsS177Z6KTXlbZakQqxv3aQcWawNXeDdWRwgiMohHVkcDaCI4I",
}
OverrideBeaconNetworkConfig(cfg)
}

View File

@@ -51,5 +51,4 @@ type Node struct {
BlockRoot []byte
ParentRoot []byte
ExecutionBlockHash []byte
Target []byte
}

View File

@@ -22,10 +22,14 @@ func IsHex(b []byte) bool {
// DecodeHexWithLength takes a string and a length in bytes,
// and validates whether the string is a hex and has the correct length.
func DecodeHexWithLength(s string, length int) ([]byte, error) {
if len(s) != 2*length+2 {
bytes, err := hexutil.Decode(s)
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("%s is not a valid hex", s))
}
if len(bytes) != length {
return nil, fmt.Errorf("%s is not length %d bytes", s, length)
}
return hexutil.Decode(s)
return bytes, nil
}
// DecodeHexWithMaxLength takes a string and a length in bytes,

View File

@@ -333,7 +333,6 @@ go_library(
srcs = [
"attestation.go",
"beacon_block.go",
"log.go",
"cloners.go",
"eip_7521.go",
"sync_committee_mainnet.go",
@@ -374,8 +373,6 @@ go_library(
"@org_golang_google_protobuf//runtime/protoimpl:go_default_library",
"@org_golang_google_protobuf//types/descriptorpb:go_default_library",
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
],
)

View File

@@ -291,8 +291,6 @@ func (a *AttestationElectra) GetCommitteeIndex() primitives.CommitteeIndex {
indices := a.CommitteeBits.BitIndices()
if len(indices) == 0 {
return 0
} else if len(indices) != 1 {
log.WithField("indices", a.CommitteeBits).Debugf("expected 1 committee bit indice got %d", len(indices))
}
return primitives.CommitteeIndex(uint64(indices[0]))
}

View File

@@ -46,11 +46,11 @@ func Bitlists64WithSingleBitSet(n, length uint64) []*bitfield.Bitlist64 {
func BitlistsWithMultipleBitSet(t testing.TB, n, length, count uint64) []bitfield.Bitlist {
seed := time.Now().UnixNano()
t.Logf("bitlistsWithMultipleBitSet random seed: %v", seed)
rand.Seed(seed)
r := rand.New(rand.NewSource(seed))
lists := make([]bitfield.Bitlist, n)
for i := uint64(0); i < n; i++ {
b := bitfield.NewBitlist(length)
keys := rand.Perm(int(length)) // lint:ignore uintcast -- This is safe in test code.
keys := r.Perm(int(length)) // lint:ignore uintcast -- This is safe in test code.
for _, key := range keys[:count] {
b.SetBitAt(uint64(key), true)
}
@@ -63,11 +63,11 @@ func BitlistsWithMultipleBitSet(t testing.TB, n, length, count uint64) []bitfiel
func Bitlists64WithMultipleBitSet(t testing.TB, n, length, count uint64) []*bitfield.Bitlist64 {
seed := time.Now().UnixNano()
t.Logf("Bitlists64WithMultipleBitSet random seed: %v", seed)
rand.Seed(seed)
r := rand.New(rand.NewSource(seed))
lists := make([]*bitfield.Bitlist64, n)
for i := uint64(0); i < n; i++ {
b := bitfield.NewBitlist64(length)
keys := rand.Perm(int(length)) // lint:ignore uintcast -- This is safe in test code.
keys := r.Perm(int(length)) // lint:ignore uintcast -- This is safe in test code.
for _, key := range keys[:count] {
b.SetBitAt(uint64(key), true)
}

View File

@@ -1,6 +0,0 @@
package eth
import "github.com/sirupsen/logrus"
var logger = logrus.StandardLogger()
var log = logger.WithField("prefix", "protobuf")

View File

@@ -8,10 +8,8 @@ go_library(
deps = [
"//encoding/ssz/equality:go_default_library",
"@com_github_d4l3k_messagediff//:go_default_library",
"@com_github_google_go_cmp//cmp:go_default_library",
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
"@org_golang_google_protobuf//testing/protocmp:go_default_library",
],
)

View File

@@ -10,11 +10,9 @@ import (
"strings"
"github.com/d4l3k/messagediff"
"github.com/google/go-cmp/cmp"
"github.com/prysmaticlabs/prysm/v5/encoding/ssz/equality"
"github.com/sirupsen/logrus/hooks/test"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/testing/protocmp"
)
// AssertionTestingTB exposes enough testing.TB methods for assertions.
@@ -54,12 +52,13 @@ func DeepEqual(loggerFn assertionLoggerFn, expected, actual interface{}, msg ...
if !isDeepEqual(expected, actual) {
errMsg := parseMsg("Values are not equal", msg...)
_, file, line, _ := runtime.Caller(2)
opts := cmp.Options{cmp.AllowUnexported(expected), cmp.AllowUnexported(actual)}
var diff string
if _, isProto := expected.(proto.Message); isProto {
opts = append(opts, protocmp.Transform())
diff = ProtobufPrettyDiff(expected, actual)
} else {
diff, _ = messagediff.PrettyDiff(expected, actual)
}
diff := cmp.Diff(expected, actual, opts...)
loggerFn("%s:%d %s, expected != actual, diff: %s", filepath.Base(file), line, errMsg, diff)
loggerFn("%s:%d %s, want: %#v, got: %#v, diff: %s", filepath.Base(file), line, errMsg, expected, actual, diff)
}
}

View File

@@ -5,7 +5,6 @@ import (
"fmt"
"strings"
"testing"
"unicode"
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
testpb "github.com/prysmaticlabs/prysm/v5/proto/testing"
@@ -189,7 +188,7 @@ func TestAssert_DeepEqual(t *testing.T) {
expected: struct{ i int }{42},
actual: struct{ i int }{41},
},
expectedErr: "Values are not equal, expected != actual, diff: struct{ i int }{\n- \ti: 42,\n+ \ti: 41,\n }\n",
expectedErr: "Values are not equal, want: struct { i int }{i:42}, got: struct { i int }{i:41}",
},
{
name: "custom error message",
@@ -199,7 +198,7 @@ func TestAssert_DeepEqual(t *testing.T) {
actual: struct{ i int }{41},
msgs: []interface{}{"Custom values are not equal"},
},
expectedErr: "Custom values are not equal, expected != actual, diff: struct{ i int }{\n- \ti: 42,\n+ \ti: 41,\n }",
expectedErr: "Custom values are not equal, want: struct { i int }{i:42}, got: struct { i int }{i:41}",
},
{
name: "custom error message with params",
@@ -209,39 +208,24 @@ func TestAssert_DeepEqual(t *testing.T) {
actual: struct{ i int }{41},
msgs: []interface{}{"Custom values are not equal (for slot %d)", 12},
},
expectedErr: "Custom values are not equal (for slot 12), expected != actual, diff: struct{ i int }{\n- \ti: 42,\n+ \ti: 41,\n }\n",
expectedErr: "Custom values are not equal (for slot 12), want: struct { i int }{i:42}, got: struct { i int }{i:41}",
},
}
for _, tt := range tests {
verify := func(t testing.TB) {
// Trim unicode space characters for an easier comparison.
got := strings.Map(func(r rune) rune {
if unicode.IsSpace(r) {
return -1
}
return r
}, tt.args.tb.ErrorfMsg)
want := strings.Map(func(r rune) rune {
if unicode.IsSpace(r) {
return -1
}
return r
}, tt.expectedErr)
if want == "" && got != "" {
t.Errorf("Unexpected error: %v", got)
} else if !strings.Contains(got, want) {
t.Logf("got=%q", got)
t.Logf("want=%q", want)
verify := func() {
if tt.expectedErr == "" && tt.args.tb.ErrorfMsg != "" {
t.Errorf("Unexpected error: %v", tt.args.tb.ErrorfMsg)
} else if !strings.Contains(tt.args.tb.ErrorfMsg, tt.expectedErr) {
t.Errorf("got: %q, want: %q", tt.args.tb.ErrorfMsg, tt.expectedErr)
}
}
t.Run(fmt.Sprintf("Assert/%s", tt.name), func(t *testing.T) {
assert.DeepEqual(tt.args.tb, tt.args.expected, tt.args.actual, tt.args.msgs...)
verify(t)
verify()
})
t.Run(fmt.Sprintf("Require/%s", tt.name), func(t *testing.T) {
require.DeepEqual(tt.args.tb, tt.args.expected, tt.args.actual, tt.args.msgs...)
verify(t)
verify()
})
}
}

View File

@@ -196,7 +196,7 @@ random:
- "Takoyaki"
`)
f := filepath.Join(testDir, "graffiti.yaml")
if err := os.WriteFile(f, b, 0600); err != nil {
if err := os.WriteFile(f, b, os.ModePerm); err != nil {
return "", err
}
return f, nil

View File

@@ -20,7 +20,7 @@ func tempDir() string {
func UseOsMkdirAllAndWriteFile() {
randPath, _ := rand.Int(rand.Reader, big.NewInt(1000000))
p := filepath.Join(tempDir(), fmt.Sprintf("/%d", randPath))
_ = os.MkdirAll(p, 0750) // want "os and ioutil dir and file writing functions are not permissions-safe, use shared/file"
_ = os.MkdirAll(p, os.ModePerm) // want "os and ioutil dir and file writing functions are not permissions-safe, use shared/file"
someFile := filepath.Join(p, "some.txt")
_ = os.WriteFile(someFile, []byte("hello"), 0600) // want "os and ioutil dir and file writing functions are not permissions-safe, use shared/file"
_ = os.WriteFile(someFile, []byte("hello"), os.ModePerm) // want "os and ioutil dir and file writing functions are not permissions-safe, use shared/file"
}

View File

@@ -78,5 +78,5 @@ func getAndSaveFile(specDocUrl, outFilePath string) error {
}
func prepareDir(dirPath string) error {
return os.MkdirAll(dirPath, 0750)
return os.MkdirAll(dirPath, os.ModePerm)
}

View File

@@ -105,7 +105,6 @@ go_test(
"aggregate_test.go",
"attest_test.go",
"key_reload_test.go",
"log_test.go",
"metrics_test.go",
"propose_test.go",
"registration_test.go",

View File

@@ -158,7 +158,7 @@ func (v *validator) SubmitAggregateAndProof(ctx context.Context, slot primitives
}
}
if err := v.saveSubmittedAtt(agg.AggregateVal(), pubKey[:], true); err != nil {
if err := v.saveSubmittedAtt(agg.AggregateVal().GetData(), pubKey[:], true); err != nil {
log.WithError(err).Error("Could not add aggregator indices to logs")
if v.emitAccountMetrics {
ValidatorAggFailVec.WithLabelValues(fmtKey).Inc()

View File

@@ -133,17 +133,16 @@ func (v *validator) SubmitAttestation(ctx context.Context, slot primitives.Slot,
}
var aggregationBitfield bitfield.Bitlist
var attestation ethpb.Att
var attResp *ethpb.AttestResponse
if postElectra {
sa := &ethpb.SingleAttestation{
attestation := &ethpb.SingleAttestation{
Data: data,
AttesterIndex: duty.ValidatorIndex,
CommitteeId: duty.CommitteeIndex,
Signature: sig,
}
attestation = sa
attResp, err = v.validatorClient.ProposeAttestationElectra(ctx, sa)
attResp, err = v.validatorClient.ProposeAttestationElectra(ctx, attestation)
} else {
var indexInCommittee uint64
var found bool
@@ -163,13 +162,12 @@ func (v *validator) SubmitAttestation(ctx context.Context, slot primitives.Slot,
}
aggregationBitfield = bitfield.NewBitlist(uint64(len(duty.Committee)))
aggregationBitfield.SetBitAt(indexInCommittee, true)
a := &ethpb.Attestation{
attestation := &ethpb.Attestation{
Data: data,
AggregationBits: aggregationBitfield,
Signature: sig,
}
attestation = a
attResp, err = v.validatorClient.ProposeAttestation(ctx, a)
attResp, err = v.validatorClient.ProposeAttestation(ctx, attestation)
}
if err != nil {
log.WithError(err).Error("Could not submit attestation to beacon node")
@@ -180,7 +178,7 @@ func (v *validator) SubmitAttestation(ctx context.Context, slot primitives.Slot,
return
}
if err := v.saveSubmittedAtt(attestation, pubKey[:], false); err != nil {
if err := v.saveSubmittedAtt(data, pubKey[:], false); err != nil {
log.WithError(err).Error("Could not save validator index for logging")
if v.emitAccountMetrics {
ValidatorAttestFailVec.WithLabelValues(fmtKey).Inc()

View File

@@ -49,10 +49,10 @@ func (k submittedAttKey) FromAttData(data *ethpb.AttestationData) error {
// saveSubmittedAtt saves the submitted attestation data along with the attester's pubkey.
// The purpose of this is to display combined attesting logs for all keys managed by the validator client.
func (v *validator) saveSubmittedAtt(att ethpb.Att, pubkey []byte, isAggregate bool) error {
func (v *validator) saveSubmittedAtt(data *ethpb.AttestationData, pubkey []byte, isAggregate bool) error {
v.attLogsLock.Lock()
defer v.attLogsLock.Unlock()
data := att.GetData()
key := submittedAttKey{}
if err := key.FromAttData(data); err != nil {
return errors.Wrapf(err, "could not create submitted attestation key")
@@ -80,7 +80,7 @@ func (v *validator) saveSubmittedAtt(att ethpb.Att, pubkey []byte, isAggregate b
submittedAtts[key] = &submittedAtt{
d,
append(submittedAtts[key].pubkeys, pubkey),
append(submittedAtts[key].committees, att.GetCommitteeIndex()),
append(submittedAtts[key].committees, data.CommitteeIndex),
}
return nil

View File

@@ -1,84 +0,0 @@
package client
import (
"testing"
field_params "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/testing/util"
logTest "github.com/sirupsen/logrus/hooks/test"
)
func TestLogSubmittedAtts(t *testing.T) {
t.Run("phase0 attestations", func(t *testing.T) {
logHook := logTest.NewGlobal()
v := validator{
submittedAtts: make(map[submittedAttKey]*submittedAtt),
}
att := util.HydrateAttestation(&ethpb.Attestation{})
att.Data.CommitteeIndex = 12
require.NoError(t, v.saveSubmittedAtt(att, make([]byte, field_params.BLSPubkeyLength), false))
v.LogSubmittedAtts(0)
assert.LogsContain(t, logHook, "committeeIndices=\"[12]\"")
})
t.Run("electra attestations", func(t *testing.T) {
logHook := logTest.NewGlobal()
v := validator{
submittedAtts: make(map[submittedAttKey]*submittedAtt),
}
att := util.HydrateAttestationElectra(&ethpb.AttestationElectra{})
att.Data.CommitteeIndex = 0
att.CommitteeBits = primitives.NewAttestationCommitteeBits()
att.CommitteeBits.SetBitAt(44, true)
require.NoError(t, v.saveSubmittedAtt(att, make([]byte, field_params.BLSPubkeyLength), false))
v.LogSubmittedAtts(0)
assert.LogsContain(t, logHook, "committeeIndices=\"[44]\"")
})
t.Run("electra attestations multiple saved", func(t *testing.T) {
logHook := logTest.NewGlobal()
v := validator{
submittedAtts: make(map[submittedAttKey]*submittedAtt),
}
att := util.HydrateAttestationElectra(&ethpb.AttestationElectra{})
att.Data.CommitteeIndex = 0
att.CommitteeBits = primitives.NewAttestationCommitteeBits()
att.CommitteeBits.SetBitAt(23, true)
require.NoError(t, v.saveSubmittedAtt(att, make([]byte, field_params.BLSPubkeyLength), false))
att2 := util.HydrateAttestationElectra(&ethpb.AttestationElectra{})
att2.Data.CommitteeIndex = 0
att2.CommitteeBits = primitives.NewAttestationCommitteeBits()
att2.CommitteeBits.SetBitAt(2, true)
require.NoError(t, v.saveSubmittedAtt(att2, make([]byte, field_params.BLSPubkeyLength), false))
v.LogSubmittedAtts(0)
assert.LogsContain(t, logHook, "committeeIndices=\"[23 2]\"")
})
t.Run("phase0 aggregates", func(t *testing.T) {
logHook := logTest.NewGlobal()
v := validator{
submittedAggregates: make(map[submittedAttKey]*submittedAtt),
}
agg := &ethpb.AggregateAttestationAndProof{}
agg.Aggregate = util.HydrateAttestation(&ethpb.Attestation{})
agg.Aggregate.Data.CommitteeIndex = 12
require.NoError(t, v.saveSubmittedAtt(agg.AggregateVal(), make([]byte, field_params.BLSPubkeyLength), true))
v.LogSubmittedAtts(0)
assert.LogsContain(t, logHook, "committeeIndices=\"[12]\"")
})
t.Run("electra aggregates", func(t *testing.T) {
logHook := logTest.NewGlobal()
v := validator{
submittedAggregates: make(map[submittedAttKey]*submittedAtt),
}
agg := &ethpb.AggregateAttestationAndProofElectra{}
agg.Aggregate = util.HydrateAttestationElectra(&ethpb.AttestationElectra{})
agg.Aggregate.Data.CommitteeIndex = 0
agg.Aggregate.CommitteeBits = primitives.NewAttestationCommitteeBits()
agg.Aggregate.CommitteeBits.SetBitAt(63, true)
require.NoError(t, v.saveSubmittedAtt(agg.AggregateVal(), make([]byte, field_params.BLSPubkeyLength), true))
v.LogSubmittedAtts(0)
assert.LogsContain(t, logHook, "committeeIndices=\"[63]\"")
})
}