Compare commits

...

23 Commits

Author SHA1 Message Date
Preston Van Loon
36ea65ae93 kasey: Option 2 2024-03-23 12:15:00 -05:00
Bharath Vedartham
3d2230223f create the log file along with its parent directory if not present (#12675)
* Remove Feature Flag From Prater (#12082)

* Use Epoch boundary cache to retrieve balances (#12083)

* Use Epoch boundary cache to retrieve balances

* save boundary states before inserting to forkchoice

* move up last block save

* remove boundary checks on balances

* fix ordering

---------

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>

* create the log file along with its parent directory if not present

* only give ReadWritePermissions to the log file

* use io/file package to create the parent directories

* fix ci related issues

* add regression tests

* run gazelle

* fix tests

* remove print statements

* gazelle

* Remove failing test for MkdirAll, this failure is not expected

---------

Co-authored-by: Nishant Das <nishdas93@gmail.com>
Co-authored-by: Potuz <potuz@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
2024-03-22 15:32:08 +00:00
Preston Van Loon
b008a6422d Add tarball support for docker images (#13790) 2024-03-22 15:31:29 +00:00
Fredrik Svantes
d19365507f Set default LocalBlockValueBoost to 10 (#13772)
* Set default LocalBlockValueBoost to 10

* Update base.go

* Update mainnet_config.go
2024-03-22 13:18:20 +00:00
kasey
c05e39a668 fix handling of goodbye messages for limited peers (#13785)
Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2024-03-22 13:06:16 +00:00
Radosław Kapka
63c2b3563a Optimize GetDuties VC action (#13789)
* wait groups

* errgroup

* tests

* bzl

* review
2024-03-22 09:50:19 +00:00
Justin Traglia
a6e86c6731 Rename payloadattribute Timestamps to Timestamp (#13523)
Co-authored-by: terence <terence@prysmaticlabs.com>
2024-03-21 21:11:01 +00:00
Radosław Kapka
32fb183392 Modify the algorithm of updateFinalizedBlockRoots (#13486)
* rename error var

* new algo

* replay_test

* add comment

* review

* fill out parent root

* handle edge cases

* review
2024-03-21 21:09:56 +00:00
carrychair
cade09ba0b chore: fix some typos (#13726)
Signed-off-by: carrychair <linghuchong404@gmail.com>
2024-03-21 21:00:21 +00:00
Potuz
f85ddfe265 Log the slot and blockroot when we deadline waiting for blobs (#13774) 2024-03-21 20:29:23 +00:00
terence
3b97094ea4 Log da block root in hex (#13787) 2024-03-21 20:26:17 +00:00
Nishant Das
acdbf7c491 expand it (#13770) 2024-03-21 19:57:22 +00:00
Potuz
1cc1effd75 Revert "pass justified=finalized in Prater (#13695)" (#13709)
This reverts commit 102518e106.
2024-03-21 17:42:40 +00:00
james-prysm
f7f1d249f2 Fix get validator endpoint for empty query parameters (#13780)
* fix handlers for get validators

* removing log
2024-03-21 14:00:07 +00:00
kasey
02abb3e3c0 add log message if in da check at slot end (#13776)
* add log message if in da check at slot end

* don't bother logging late da check start

* break up defer with a var, too dense all together

* pass slot instead of block ref

---------

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2024-03-20 19:31:09 +00:00
james-prysm
2255c8b287 setting missing beacon API (#13778) 2024-03-20 17:59:15 +00:00
terence
27ecf448a7 Add da waited time to sync block log (#13775) 2024-03-20 14:53:02 +00:00
james-prysm
e243f04e44 validator client on rest mode has an inappropriate context deadline for events (#13771)
* addressing errors on events endpoint

* reverting timeout on get health

* fixing linting

* fixing more linting

* Update validator/client/beacon-api/beacon_api_validator_client.go

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>

* Update beacon-chain/rpc/eth/events/events.go

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>

* reverting change and removing line on context done which creates a superfluous response.WriteHeader error

* gofmt

---------

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
2024-03-20 13:19:05 +00:00
Manu NALEPA
fca1adbad7 Re-design TestStartDiscV5_DiscoverPeersWithSubnets test (#13766)
* `Test_AttSubnets`: Factorize.

* `filterPeerForAttSubnet`: `O(n)` ==> `O(1)`

* `FindPeersWithSubnet`: Optimize.

* `TestStartDiscV5_DiscoverPeersWithSubnets`: Complete re-design.

* `broadcastAttestation`: User `log.WithFields`.

* `filterPeer`: Refactor comments.

* Make deepsource happy.

* `TestStartDiscV5_FindPeersWithSubnet`: Add context cancellation.

Add some notes on `FindPeersWithSubnet` about
this limitation as well.
2024-03-20 03:36:00 +00:00
Radosław Kapka
b692722ddf Optimize SubmitAggregateSelectionProof VC action (#13711)
* Optimize `SubscribeCommitteeSubnets` VC action

* test fixes

* remove newline

* Optimize `SubmitAggregateSelectionProof`

* mock

* bzl gzl

* test fixes
2024-03-19 14:09:07 +00:00
Nishant Das
c4f6020677 add mplex timeout (#13745) 2024-03-19 13:37:23 +00:00
Chanh Le
d779e65d4e chore(kzg): Additional tests for KZG commitments (#13758)
* add a test explaining kzgRootIndex

* minor

* minor
2024-03-19 09:08:02 +00:00
terence
357211b7d9 Update spec test to official 1.4.0 (#13761) 2024-03-18 23:39:03 +00:00
56 changed files with 1285 additions and 1246 deletions

View File

@@ -2,7 +2,7 @@
[![Build status](https://badge.buildkite.com/b555891daf3614bae4284dcf365b2340cefc0089839526f096.svg?branch=master)](https://buildkite.com/prysmatic-labs/prysm)
[![Go Report Card](https://goreportcard.com/badge/github.com/prysmaticlabs/prysm)](https://goreportcard.com/report/github.com/prysmaticlabs/prysm)
[![Consensus_Spec_Version 1.3.0](https://img.shields.io/badge/Consensus%20Spec%20Version-v1.3.0-blue.svg)](https://github.com/ethereum/consensus-specs/tree/v1.3.0)
[![Consensus_Spec_Version 1.4.0](https://img.shields.io/badge/Consensus%20Spec%20Version-v1.4.0-blue.svg)](https://github.com/ethereum/consensus-specs/tree/v1.4.0)
[![Execution_API_Version 1.0.0-beta.2](https://img.shields.io/badge/Execution%20API%20Version-v1.0.0.beta.2-blue.svg)](https://github.com/ethereum/execution-apis/tree/v1.0.0-beta.2/src/engine)
[![Discord](https://user-images.githubusercontent.com/7288322/34471967-1df7808a-efbb-11e7-9088-ed0b04151291.png)](https://discord.gg/prysmaticlabs)
[![GitPOAP Badge](https://public-api.gitpoap.io/v1/repo/prysmaticlabs/prysm/badge)](https://www.gitpoap.io/gh/prysmaticlabs/prysm)

View File

@@ -130,9 +130,9 @@ aspect_bazel_lib_register_toolchains()
http_archive(
name = "rules_oci",
sha256 = "c71c25ed333a4909d2dd77e0b16c39e9912525a98c7fa85144282be8d04ef54c",
strip_prefix = "rules_oci-1.3.4",
url = "https://github.com/bazel-contrib/rules_oci/releases/download/v1.3.4/rules_oci-v1.3.4.tar.gz",
sha256 = "4a276e9566c03491649eef63f27c2816cc222f41ccdebd97d2c5159e84917c3b",
strip_prefix = "rules_oci-1.7.4",
url = "https://github.com/bazel-contrib/rules_oci/releases/download/v1.7.4/rules_oci-v1.7.4.tar.gz",
)
load("@rules_oci//oci:dependencies.bzl", "rules_oci_dependencies")
@@ -243,9 +243,7 @@ filegroup(
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
)
consensus_spec_version = "v1.4.0-beta.7"
consensus_spec_test_version = "v1.4.0-beta.7-hotfix"
consensus_spec_version = "v1.4.0"
bls_test_version = "v0.1.1"
@@ -262,7 +260,7 @@ filegroup(
)
""",
sha256 = "c282c0f86f23f3d2e0f71f5975769a4077e62a7e3c7382a16bd26a7e589811a0",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_test_version,
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
)
http_archive(
@@ -278,7 +276,7 @@ filegroup(
)
""",
sha256 = "4649c35aa3b8eb0cfdc81bee7c05649f90ef36bede5b0513e1f2e8baf37d6033",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_test_version,
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
)
http_archive(
@@ -294,7 +292,7 @@ filegroup(
)
""",
sha256 = "c5a03f724f757456ffaabd2a899992a71d2baf45ee4db65ca3518f2b7ee928c8",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_test_version,
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
)
http_archive(
@@ -308,7 +306,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "049c29267310e6b88280f4f834a75866c2f5b9036fa97acb9d9c6db8f64d9118",
sha256 = "cd1c9d97baccbdde1d2454a7dceb8c6c61192a3b581eee12ffc94969f2db8453",
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
)

View File

@@ -82,19 +82,20 @@ func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte
if level >= logrus.DebugLevel {
parentRoot := block.ParentRoot()
lf := logrus.Fields{
"slot": block.Slot(),
"slotInEpoch": block.Slot() % params.BeaconConfig().SlotsPerEpoch,
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
"epoch": slots.ToEpoch(block.Slot()),
"justifiedEpoch": justified.Epoch,
"justifiedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(justified.Root)[:8]),
"finalizedEpoch": finalized.Epoch,
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
"parentRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(parentRoot[:])[:8]),
"version": version.String(block.Version()),
"sinceSlotStartTime": prysmTime.Now().Sub(startTime),
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime) - daWaitedTime,
"deposits": len(block.Body().Deposits()),
"slot": block.Slot(),
"slotInEpoch": block.Slot() % params.BeaconConfig().SlotsPerEpoch,
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
"epoch": slots.ToEpoch(block.Slot()),
"justifiedEpoch": justified.Epoch,
"justifiedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(justified.Root)[:8]),
"finalizedEpoch": finalized.Epoch,
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
"parentRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(parentRoot[:])[:8]),
"version": version.String(block.Version()),
"sinceSlotStartTime": prysmTime.Now().Sub(startTime),
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime) - daWaitedTime,
"dataAvailabilityWaitedTime": daWaitedTime,
"deposits": len(block.Body().Deposits()),
}
log.WithFields(lf).Debug("Synced new block")
} else {

View File

@@ -6,6 +6,7 @@ import (
"time"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
@@ -558,6 +559,20 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
// The gossip handler for blobs writes the index of each verified blob referencing the given
// root to the channel returned by blobNotifiers.forRoot.
nc := s.blobNotifiers.forRoot(root)
// Log for DA checks that cross over into the next slot; helpful for debugging.
nextSlot := slots.BeginsAt(signed.Block().Slot()+1, s.genesisTime)
// Avoid logging if DA check is called after next slot start.
if nextSlot.After(time.Now()) {
nst := time.AfterFunc(time.Until(nextSlot), func() {
if len(missing) == 0 {
return
}
log.WithFields(daCheckLogFields(root, signed.Block().Slot(), expected, len(missing))).
Error("Still waiting for DA check at slot end.")
})
defer nst.Stop()
}
for {
select {
case idx := <-nc:
@@ -571,11 +586,20 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
s.blobNotifiers.delete(root)
return nil
case <-ctx.Done():
return errors.Wrap(ctx.Err(), "context deadline waiting for blob sidecars")
return errors.Wrapf(ctx.Err(), "context deadline waiting for blob sidecars slot: %d, BlockRoot: %#x", block.Slot(), root)
}
}
}
func daCheckLogFields(root [32]byte, slot primitives.Slot, expected, missing int) logrus.Fields {
return logrus.Fields{
"slot": slot,
"root": fmt.Sprintf("%#x", root),
"blobsExpected": expected,
"blobsWaiting": missing,
}
}
// lateBlockTasks is called 4 seconds into the slot and performs tasks
// related to late blocks. It emits a MissedSlot state feed event.
// It calls FCU and sets the right attributes if we are proposing next slot

View File

@@ -290,18 +290,10 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
fRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(finalized.Root))
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
if params.BeaconConfig().ConfigName != params.PraterName {
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(s.ctx, &forkchoicetypes.Checkpoint{Epoch: justified.Epoch,
Root: bytesutil.ToBytes32(justified.Root)}); err != nil {
return errors.Wrap(err, "could not update forkchoice's justified checkpoint")
}
} else {
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(s.ctx, &forkchoicetypes.Checkpoint{Epoch: finalized.Epoch,
Root: bytesutil.ToBytes32(finalized.Root)}); err != nil {
return errors.Wrap(err, "could not update forkchoice's justified checkpoint")
}
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(s.ctx, &forkchoicetypes.Checkpoint{Epoch: justified.Epoch,
Root: bytesutil.ToBytes32(justified.Root)}); err != nil {
return errors.Wrap(err, "could not update forkchoice's justified checkpoint")
}
if err := s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: finalized.Epoch,
Root: bytesutil.ToBytes32(finalized.Root)}); err != nil {
return errors.Wrap(err, "could not update forkchoice's finalized checkpoint")

View File

@@ -224,7 +224,7 @@ func (s *Store) DeleteBlock(ctx context.Context, root [32]byte) error {
return s.db.Update(func(tx *bolt.Tx) error {
bkt := tx.Bucket(finalizedBlockRootsIndexBucket)
if b := bkt.Get(root[:]); b != nil {
return ErrDeleteJustifiedAndFinalized
return ErrDeleteFinalized
}
if err := tx.Bucket(blocksBucket).Delete(root[:]); err != nil {

View File

@@ -289,7 +289,7 @@ func TestStore_DeleteBlock(t *testing.T) {
require.Equal(t, b, nil)
require.Equal(t, false, db.HasStateSummary(ctx, root2))
require.ErrorIs(t, db.DeleteBlock(ctx, root), ErrDeleteJustifiedAndFinalized)
require.ErrorIs(t, db.DeleteBlock(ctx, root), ErrDeleteFinalized)
}
func TestStore_DeleteJustifiedBlock(t *testing.T) {
@@ -309,7 +309,7 @@ func TestStore_DeleteJustifiedBlock(t *testing.T) {
require.NoError(t, db.SaveBlock(ctx, blk))
require.NoError(t, db.SaveState(ctx, st, root))
require.NoError(t, db.SaveJustifiedCheckpoint(ctx, cp))
require.ErrorIs(t, db.DeleteBlock(ctx, root), ErrDeleteJustifiedAndFinalized)
require.ErrorIs(t, db.DeleteBlock(ctx, root), ErrDeleteFinalized)
}
func TestStore_DeleteFinalizedBlock(t *testing.T) {
@@ -329,7 +329,7 @@ func TestStore_DeleteFinalizedBlock(t *testing.T) {
require.NoError(t, db.SaveState(ctx, st, root))
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, cp))
require.ErrorIs(t, db.DeleteBlock(ctx, root), ErrDeleteJustifiedAndFinalized)
require.ErrorIs(t, db.DeleteBlock(ctx, root), ErrDeleteFinalized)
}
func TestStore_GenesisBlock(t *testing.T) {
db := setupDB(t)

View File

@@ -2,8 +2,8 @@ package kv
import "github.com/pkg/errors"
// ErrDeleteJustifiedAndFinalized is raised when we attempt to delete a finalized block/state
var ErrDeleteJustifiedAndFinalized = errors.New("cannot delete finalized block or state")
// ErrDeleteFinalized is raised when we attempt to delete a finalized block/state
var ErrDeleteFinalized = errors.New("cannot delete finalized block or state")
// ErrNotFound can be used directly, or as a wrapped DBError, whenever a db method needs to
// indicate that a value couldn't be found.

View File

@@ -5,7 +5,6 @@ import (
"context"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filters"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
@@ -29,72 +28,76 @@ var containerFinalizedButNotCanonical = []byte("recent block needs reindexing to
// beacon block chain using the finalized root alone as this would exclude all other blocks in the
// finalized epoch from being indexed as "final and canonical".
//
// The algorithm for building the index works as follows:
// - De-index all finalized beacon block roots from previous_finalized_epoch to
// new_finalized_epoch. (I.e. delete these roots from the index, to be re-indexed.)
// - Build the canonical finalized chain by walking up the ancestry chain from the finalized block
// root until a parent is found in the index, or the parent is genesis or the origin checkpoint.
// - Add all block roots in the database where epoch(block.slot) == checkpoint.epoch.
//
// This method ensures that all blocks from the current finalized epoch are considered "final" while
// maintaining only canonical and finalized blocks older than the current finalized epoch.
// The main part of the algorithm traverses parent->child block relationships in the
// `blockParentRootIndicesBucket` bucket to find the path between the last finalized checkpoint
// and the current finalized checkpoint. It relies on the invariant that there is a unique path
// between two finalized checkpoints.
func (s *Store) updateFinalizedBlockRoots(ctx context.Context, tx *bolt.Tx, checkpoint *ethpb.Checkpoint) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.updateFinalizedBlockRoots")
defer span.End()
bkt := tx.Bucket(finalizedBlockRootsIndexBucket)
root := checkpoint.Root
var previousRoot []byte
genesisRoot := tx.Bucket(blocksBucket).Get(genesisBlockRootKey)
initCheckpointRoot := tx.Bucket(blocksBucket).Get(originCheckpointBlockRootKey)
// De-index recent finalized block roots, to be re-indexed.
finalizedBkt := tx.Bucket(finalizedBlockRootsIndexBucket)
previousFinalizedCheckpoint := &ethpb.Checkpoint{}
if b := bkt.Get(previousFinalizedCheckpointKey); b != nil {
if b := finalizedBkt.Get(previousFinalizedCheckpointKey); b != nil {
if err := decode(ctx, b, previousFinalizedCheckpoint); err != nil {
tracing.AnnotateError(span, err)
return err
}
}
blockRoots, err := s.BlockRoots(ctx, filters.NewFilter().
SetStartEpoch(previousFinalizedCheckpoint.Epoch).
SetEndEpoch(checkpoint.Epoch+1),
)
if err != nil {
tracing.AnnotateError(span, err)
return err
}
for _, root := range blockRoots {
if err := bkt.Delete(root[:]); err != nil {
tracing.AnnotateError(span, err)
return err
}
}
// Walk up the ancestry chain until we reach a block root present in the finalized block roots
// index bucket or genesis block root.
for {
if bytes.Equal(root, genesisRoot) {
break
}
signedBlock, err := s.Block(ctx, bytesutil.ToBytes32(root))
// Handle the case of checkpoint sync.
if previousFinalizedCheckpoint.Root == nil && bytes.Equal(checkpoint.Root, tx.Bucket(blocksBucket).Get(originCheckpointBlockRootKey)) {
container := &ethpb.FinalizedBlockRootContainer{}
enc, err := encode(ctx, container)
if err != nil {
tracing.AnnotateError(span, err)
return err
}
if err := blocks.BeaconBlockIsNil(signedBlock); err != nil {
if err = finalizedBkt.Put(checkpoint.Root, enc); err != nil {
tracing.AnnotateError(span, err)
return err
}
block := signedBlock.Block()
return updatePrevFinalizedCheckpoint(ctx, span, finalizedBkt, checkpoint)
}
parentRoot := block.ParentRoot()
container := &ethpb.FinalizedBlockRootContainer{
ParentRoot: parentRoot[:],
ChildRoot: previousRoot,
var finalized [][]byte
if previousFinalizedCheckpoint.Root == nil {
genesisRoot := tx.Bucket(blocksBucket).Get(genesisBlockRootKey)
_, finalized = pathToFinalizedCheckpoint(ctx, [][]byte{genesisRoot}, checkpoint.Root, tx)
} else {
if err := updateChildOfPrevFinalizedCheckpoint(
ctx,
span,
finalizedBkt,
tx.Bucket(blockParentRootIndicesBucket), previousFinalizedCheckpoint.Root,
); err != nil {
return err
}
_, finalized = pathToFinalizedCheckpoint(ctx, [][]byte{previousFinalizedCheckpoint.Root}, checkpoint.Root, tx)
}
for i, r := range finalized {
var container *ethpb.FinalizedBlockRootContainer
switch i {
case 0:
container = &ethpb.FinalizedBlockRootContainer{
ParentRoot: previousFinalizedCheckpoint.Root,
}
if len(finalized) > 1 {
container.ChildRoot = finalized[i+1]
}
case len(finalized) - 1:
// We don't know the finalized child of the new finalized checkpoint.
// It will be filled out in the next function call.
container = &ethpb.FinalizedBlockRootContainer{}
if len(finalized) > 1 {
container.ParentRoot = finalized[i-1]
}
default:
container = &ethpb.FinalizedBlockRootContainer{
ParentRoot: finalized[i-1],
ChildRoot: finalized[i+1],
}
}
enc, err := encode(ctx, container)
@@ -102,66 +105,13 @@ func (s *Store) updateFinalizedBlockRoots(ctx context.Context, tx *bolt.Tx, chec
tracing.AnnotateError(span, err)
return err
}
if err := bkt.Put(root, enc); err != nil {
tracing.AnnotateError(span, err)
return err
}
// breaking here allows the initial checkpoint root to be correctly inserted,
// but stops the loop from trying to search for its parent.
if bytes.Equal(root, initCheckpointRoot) {
break
}
// Found parent, loop exit condition.
pr := block.ParentRoot()
if parentBytes := bkt.Get(pr[:]); parentBytes != nil {
parent := &ethpb.FinalizedBlockRootContainer{}
if err := decode(ctx, parentBytes, parent); err != nil {
tracing.AnnotateError(span, err)
return err
}
parent.ChildRoot = root
enc, err := encode(ctx, parent)
if err != nil {
tracing.AnnotateError(span, err)
return err
}
if err := bkt.Put(pr[:], enc); err != nil {
tracing.AnnotateError(span, err)
return err
}
break
}
previousRoot = root
root = pr[:]
}
// Upsert blocks from the current finalized epoch.
roots, err := s.BlockRoots(ctx, filters.NewFilter().SetStartEpoch(checkpoint.Epoch).SetEndEpoch(checkpoint.Epoch+1))
if err != nil {
tracing.AnnotateError(span, err)
return err
}
for _, root := range roots {
root := root[:]
if bytes.Equal(root, checkpoint.Root) || bkt.Get(root) != nil {
continue
}
if err := bkt.Put(root, containerFinalizedButNotCanonical); err != nil {
if err = finalizedBkt.Put(r, enc); err != nil {
tracing.AnnotateError(span, err)
return err
}
}
// Update previous checkpoint
enc, err := encode(ctx, checkpoint)
if err != nil {
tracing.AnnotateError(span, err)
return err
}
return bkt.Put(previousFinalizedCheckpointKey, enc)
return updatePrevFinalizedCheckpoint(ctx, span, finalizedBkt, checkpoint)
}
// BackfillFinalizedIndex updates the finalized index for a contiguous chain of blocks that are the ancestors of the
@@ -242,8 +192,6 @@ func (s *Store) BackfillFinalizedIndex(ctx context.Context, blocks []blocks.ROBl
// IsFinalizedBlock returns true if the block root is present in the finalized block root index.
// A beacon block root contained exists in this index if it is considered finalized and canonical.
// Note: beacon blocks from the latest finalized epoch return true, whether or not they are
// considered canonical in the "head view" of the beacon node.
func (s *Store) IsFinalizedBlock(ctx context.Context, blockRoot [32]byte) bool {
_, span := trace.StartSpan(ctx, "BeaconDB.IsFinalizedBlock")
defer span.End()
@@ -296,3 +244,53 @@ func (s *Store) FinalizedChildBlock(ctx context.Context, blockRoot [32]byte) (in
tracing.AnnotateError(span, err)
return blk, err
}
func pathToFinalizedCheckpoint(ctx context.Context, roots [][]byte, checkpointRoot []byte, tx *bolt.Tx) (bool, [][]byte) {
if len(roots) == 0 || (len(roots) == 1 && roots[0] == nil) {
return false, nil
}
for _, r := range roots {
if bytes.Equal(r, checkpointRoot) {
return true, [][]byte{r}
}
children := lookupValuesForIndices(ctx, map[string][]byte{string(blockParentRootIndicesBucket): r}, tx)
if len(children) == 0 {
children = [][][]byte{nil}
}
isPath, path := pathToFinalizedCheckpoint(ctx, children[0], checkpointRoot, tx)
if isPath {
return true, append([][]byte{r}, path...)
}
}
return false, nil
}
func updatePrevFinalizedCheckpoint(ctx context.Context, span *trace.Span, finalizedBkt *bolt.Bucket, checkpoint *ethpb.Checkpoint) error {
enc, err := encode(ctx, checkpoint)
if err != nil {
tracing.AnnotateError(span, err)
return err
}
return finalizedBkt.Put(previousFinalizedCheckpointKey, enc)
}
func updateChildOfPrevFinalizedCheckpoint(ctx context.Context, span *trace.Span, finalizedBkt, parentBkt *bolt.Bucket, checkpointRoot []byte) error {
container := &ethpb.FinalizedBlockRootContainer{}
if err := decode(ctx, finalizedBkt.Get(checkpointRoot), container); err != nil {
tracing.AnnotateError(span, err)
return err
}
container.ChildRoot = parentBkt.Get(checkpointRoot)
enc, err := encode(ctx, container)
if err != nil {
tracing.AnnotateError(span, err)
return err
}
if err = finalizedBkt.Put(checkpointRoot, enc); err != nil {
tracing.AnnotateError(span, err)
return err
}
return nil
}

View File

@@ -26,38 +26,30 @@ func TestStore_IsFinalizedBlock(t *testing.T) {
ctx := context.Background()
require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesisBlockRoot))
blks := makeBlocks(t, 0, slotsPerEpoch*3, genesisBlockRoot)
blks := makeBlocks(t, 0, slotsPerEpoch*2, genesisBlockRoot)
require.NoError(t, db.SaveBlocks(ctx, blks))
root, err := blks[slotsPerEpoch].Block().HashTreeRoot()
require.NoError(t, err)
cp := &ethpb.Checkpoint{
Epoch: 1,
Root: root[:],
}
st, err := util.NewBeaconState()
require.NoError(t, err)
// a state is required to save checkpoint
require.NoError(t, db.SaveState(ctx, st, root))
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, cp))
// All blocks up to slotsPerEpoch*2 should be in the finalized index.
for i := uint64(0); i < slotsPerEpoch*2; i++ {
root, err := blks[i].Block().HashTreeRoot()
for i := uint64(0); i <= slotsPerEpoch; i++ {
root, err = blks[i].Block().HashTreeRoot()
require.NoError(t, err)
assert.Equal(t, true, db.IsFinalizedBlock(ctx, root), "Block at index %d was not considered finalized in the index", i)
assert.Equal(t, true, db.IsFinalizedBlock(ctx, root), "Block at index %d was not considered finalized", i)
}
for i := slotsPerEpoch * 3; i < uint64(len(blks)); i++ {
root, err := blks[i].Block().HashTreeRoot()
for i := slotsPerEpoch + 1; i < uint64(len(blks)); i++ {
root, err = blks[i].Block().HashTreeRoot()
require.NoError(t, err)
assert.Equal(t, false, db.IsFinalizedBlock(ctx, root), "Block at index %d was considered finalized in the index, but should not have", i)
assert.Equal(t, false, db.IsFinalizedBlock(ctx, root), "Block at index %d was considered finalized, but should not have", i)
}
}
func TestStore_IsFinalizedBlockGenesis(t *testing.T) {
func TestStore_IsFinalizedGenesisBlock(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
@@ -69,136 +61,114 @@ func TestStore_IsFinalizedBlockGenesis(t *testing.T) {
require.NoError(t, err)
require.NoError(t, db.SaveBlock(ctx, wsb))
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
assert.Equal(t, true, db.IsFinalizedBlock(ctx, root), "Finalized genesis block doesn't exist in db")
}
// This test scenario is to test a specific edge case where the finalized block root is not part of
// the finalized and canonical chain.
//
// Example:
// 0 1 2 3 4 5 6 slot
// a <- b <-- d <- e <- f <- g roots
//
// ^- c
//
// Imagine that epochs are 2 slots and that epoch 1, 2, and 3 are finalized. Checkpoint roots would
// be c, e, and g. In this scenario, c was a finalized checkpoint root but no block built upon it so
// it should not be considered "final and canonical" in the view at slot 6.
func TestStore_IsFinalized_ForkEdgeCase(t *testing.T) {
slotsPerEpoch := uint64(params.BeaconConfig().SlotsPerEpoch)
blocks0 := makeBlocks(t, slotsPerEpoch*0, slotsPerEpoch, genesisBlockRoot)
blocks1 := append(
makeBlocks(t, slotsPerEpoch*1, 1, bytesutil.ToBytes32(sszRootOrDie(t, blocks0[len(blocks0)-1]))), // No block builds off of the first block in epoch.
makeBlocks(t, slotsPerEpoch*1+1, slotsPerEpoch-1, bytesutil.ToBytes32(sszRootOrDie(t, blocks0[len(blocks0)-1])))...,
)
blocks2 := makeBlocks(t, slotsPerEpoch*2, slotsPerEpoch, bytesutil.ToBytes32(sszRootOrDie(t, blocks1[len(blocks1)-1])))
db := setupDB(t)
ctx := context.Background()
require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesisBlockRoot))
require.NoError(t, db.SaveBlocks(ctx, blocks0))
require.NoError(t, db.SaveBlocks(ctx, blocks1))
require.NoError(t, db.SaveBlocks(ctx, blocks2))
// First checkpoint
checkpoint1 := &ethpb.Checkpoint{
Root: sszRootOrDie(t, blocks1[0]),
Epoch: 1,
}
st, err := util.NewBeaconState()
require.NoError(t, err)
// A state is required to save checkpoint
require.NoError(t, db.SaveState(ctx, st, bytesutil.ToBytes32(checkpoint1.Root)))
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, checkpoint1))
// All blocks in blocks0 and blocks1 should be finalized and canonical.
for i, block := range append(blocks0, blocks1...) {
root := sszRootOrDie(t, block)
assert.Equal(t, true, db.IsFinalizedBlock(ctx, bytesutil.ToBytes32(root)), "%d - Expected block %#x to be finalized", i, root)
}
// Second checkpoint
checkpoint2 := &ethpb.Checkpoint{
Root: sszRootOrDie(t, blocks2[0]),
Epoch: 2,
}
// A state is required to save checkpoint
require.NoError(t, db.SaveState(ctx, st, bytesutil.ToBytes32(checkpoint2.Root)))
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, checkpoint2))
// All blocks in blocks0 and blocks2 should be finalized and canonical.
for i, block := range append(blocks0, blocks2...) {
root := sszRootOrDie(t, block)
assert.Equal(t, true, db.IsFinalizedBlock(ctx, bytesutil.ToBytes32(root)), "%d - Expected block %#x to be finalized", i, root)
}
// All blocks in blocks1 should be finalized and canonical, except blocks1[0].
for i, block := range blocks1 {
root := sszRootOrDie(t, block)
if db.IsFinalizedBlock(ctx, bytesutil.ToBytes32(root)) == (i == 0) {
t.Errorf("Expected db.IsFinalizedBlock(ctx, blocks1[%d]) to be %v", i, i != 0)
}
}
assert.Equal(t, true, db.IsFinalizedBlock(ctx, root))
}
func TestStore_IsFinalizedChildBlock(t *testing.T) {
slotsPerEpoch := uint64(params.BeaconConfig().SlotsPerEpoch)
ctx := context.Background()
db := setupDB(t)
require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesisBlockRoot))
eval := func(t testing.TB, ctx context.Context, db *Store, blks []interfaces.ReadOnlySignedBeaconBlock) {
require.NoError(t, db.SaveBlocks(ctx, blks))
root, err := blks[slotsPerEpoch].Block().HashTreeRoot()
require.NoError(t, err)
cp := &ethpb.Checkpoint{
Epoch: 1,
Root: root[:],
}
st, err := util.NewBeaconState()
require.NoError(t, err)
// a state is required to save checkpoint
require.NoError(t, db.SaveState(ctx, st, root))
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, cp))
// All blocks up to slotsPerEpoch should have a finalized child block.
for i := uint64(0); i < slotsPerEpoch; i++ {
root, err := blks[i].Block().HashTreeRoot()
require.NoError(t, err)
assert.Equal(t, true, db.IsFinalizedBlock(ctx, root), "Block at index %d was not considered finalized in the index", i)
blk, err := db.FinalizedChildBlock(ctx, root)
assert.NoError(t, err)
if blk == nil {
t.Error("Child block doesn't exist for valid finalized block.")
}
}
blks := makeBlocks(t, 0, slotsPerEpoch*2, genesisBlockRoot)
require.NoError(t, db.SaveBlocks(ctx, blks))
root, err := blks[slotsPerEpoch].Block().HashTreeRoot()
require.NoError(t, err)
cp := &ethpb.Checkpoint{
Epoch: 1,
Root: root[:],
}
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, cp))
setup := func(t testing.TB) *Store {
db := setupDB(t)
require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesisBlockRoot))
return db
for i := uint64(0); i < slotsPerEpoch; i++ {
root, err = blks[i].Block().HashTreeRoot()
require.NoError(t, err)
assert.Equal(t, true, db.IsFinalizedBlock(ctx, root), "Block at index %d was not considered finalized", i)
blk, err := db.FinalizedChildBlock(ctx, root)
assert.NoError(t, err)
assert.Equal(t, false, blk == nil, "Child block at index %d was not considered finalized", i)
}
t.Run("phase0", func(t *testing.T) {
db := setup(t)
blks := makeBlocks(t, 0, slotsPerEpoch*3, genesisBlockRoot)
eval(t, ctx, db, blks)
})
t.Run("altair", func(t *testing.T) {
db := setup(t)
blks := makeBlocksAltair(t, 0, slotsPerEpoch*3, genesisBlockRoot)
eval(t, ctx, db, blks)
})
}
func sszRootOrDie(t *testing.T, block interfaces.ReadOnlySignedBeaconBlock) []byte {
root, err := block.Block().HashTreeRoot()
func TestStore_ChildRootOfPrevFinalizedCheckpointIsUpdated(t *testing.T) {
slotsPerEpoch := uint64(params.BeaconConfig().SlotsPerEpoch)
ctx := context.Background()
db := setupDB(t)
require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesisBlockRoot))
blks := makeBlocks(t, 0, slotsPerEpoch*3, genesisBlockRoot)
require.NoError(t, db.SaveBlocks(ctx, blks))
root, err := blks[slotsPerEpoch].Block().HashTreeRoot()
require.NoError(t, err)
return root[:]
cp := &ethpb.Checkpoint{
Epoch: 1,
Root: root[:],
}
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, cp))
root2, err := blks[slotsPerEpoch*2].Block().HashTreeRoot()
require.NoError(t, err)
cp = &ethpb.Checkpoint{
Epoch: 2,
Root: root2[:],
}
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, cp))
require.NoError(t, db.db.View(func(tx *bolt.Tx) error {
container := &ethpb.FinalizedBlockRootContainer{}
f := tx.Bucket(finalizedBlockRootsIndexBucket).Get(root[:])
require.NoError(t, decode(ctx, f, container))
r, err := blks[slotsPerEpoch+1].Block().HashTreeRoot()
require.NoError(t, err)
assert.DeepEqual(t, r[:], container.ChildRoot)
return nil
}))
}
func TestStore_OrphanedBlockIsNotFinalized(t *testing.T) {
slotsPerEpoch := uint64(params.BeaconConfig().SlotsPerEpoch)
db := setupDB(t)
ctx := context.Background()
require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesisBlockRoot))
blk0 := util.NewBeaconBlock()
blk0.Block.ParentRoot = genesisBlockRoot[:]
blk0Root, err := blk0.Block.HashTreeRoot()
require.NoError(t, err)
blk1 := util.NewBeaconBlock()
blk1.Block.Slot = 1
blk1.Block.ParentRoot = blk0Root[:]
blk2 := util.NewBeaconBlock()
blk2.Block.Slot = 2
// orphan block at index 1
blk2.Block.ParentRoot = blk0Root[:]
blk2Root, err := blk2.Block.HashTreeRoot()
require.NoError(t, err)
sBlk0, err := consensusblocks.NewSignedBeaconBlock(blk0)
require.NoError(t, err)
sBlk1, err := consensusblocks.NewSignedBeaconBlock(blk1)
require.NoError(t, err)
sBlk2, err := consensusblocks.NewSignedBeaconBlock(blk2)
require.NoError(t, err)
blks := append([]interfaces.ReadOnlySignedBeaconBlock{sBlk0, sBlk1, sBlk2}, makeBlocks(t, 3, slotsPerEpoch*2-3, blk2Root)...)
require.NoError(t, db.SaveBlocks(ctx, blks))
root, err := blks[slotsPerEpoch].Block().HashTreeRoot()
require.NoError(t, err)
cp := &ethpb.Checkpoint{
Epoch: 1,
Root: root[:],
}
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, cp))
for i := uint64(0); i <= slotsPerEpoch; i++ {
root, err = blks[i].Block().HashTreeRoot()
require.NoError(t, err)
if i == 1 {
assert.Equal(t, false, db.IsFinalizedBlock(ctx, root), "Block at index 1 was considered finalized, but should not have")
} else {
assert.Equal(t, true, db.IsFinalizedBlock(ctx, root), "Block at index %d was not considered finalized", i)
}
}
}
func makeBlocks(t *testing.T, i, n uint64, previousRoot [32]byte) []interfaces.ReadOnlySignedBeaconBlock {
@@ -219,24 +189,6 @@ func makeBlocks(t *testing.T, i, n uint64, previousRoot [32]byte) []interfaces.R
return ifaceBlocks
}
func makeBlocksAltair(t *testing.T, startIdx, num uint64, previousRoot [32]byte) []interfaces.ReadOnlySignedBeaconBlock {
blocks := make([]*ethpb.SignedBeaconBlockAltair, num)
ifaceBlocks := make([]interfaces.ReadOnlySignedBeaconBlock, num)
for j := startIdx; j < num+startIdx; j++ {
parentRoot := make([]byte, fieldparams.RootLength)
copy(parentRoot, previousRoot[:])
blocks[j-startIdx] = util.NewBeaconBlockAltair()
blocks[j-startIdx].Block.Slot = primitives.Slot(j + 1)
blocks[j-startIdx].Block.ParentRoot = parentRoot
var err error
previousRoot, err = blocks[j-startIdx].Block.HashTreeRoot()
require.NoError(t, err)
ifaceBlocks[j-startIdx], err = consensusblocks.NewSignedBeaconBlock(blocks[j-startIdx])
require.NoError(t, err)
}
return ifaceBlocks
}
func TestStore_BackfillFinalizedIndexSingle(t *testing.T) {
db := setupDB(t)
ctx := context.Background()

View File

@@ -458,7 +458,7 @@ func (s *Store) DeleteState(ctx context.Context, blockRoot [32]byte) error {
bkt = tx.Bucket(stateBucket)
// Safeguard against deleting genesis, finalized, head state.
if bytes.Equal(blockRoot[:], finalized.Root) || bytes.Equal(blockRoot[:], genesisBlockRoot) || bytes.Equal(blockRoot[:], justified.Root) {
return ErrDeleteJustifiedAndFinalized
return ErrDeleteFinalized
}
// Nothing to delete if state doesn't exist.

View File

@@ -94,6 +94,7 @@ go_library(
"@com_github_libp2p_go_libp2p_mplex//:go_default_library",
"@com_github_libp2p_go_libp2p_pubsub//:go_default_library",
"@com_github_libp2p_go_libp2p_pubsub//pb:go_default_library",
"@com_github_libp2p_go_mplex//:go_default_library",
"@com_github_multiformats_go_multiaddr//:go_default_library",
"@com_github_multiformats_go_multiaddr//net:go_default_library",
"@com_github_pkg_errors//:go_default_library",

View File

@@ -15,6 +15,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/time/slots"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
"google.golang.org/protobuf/proto"
)
@@ -68,7 +69,7 @@ func (s *Service) BroadcastAttestation(ctx context.Context, subnet uint64, att *
}
// Non-blocking broadcast, with attempts to discover a subnet peer if none available.
go s.broadcastAttestation(ctx, subnet, att, forkDigest)
go s.internalBroadcastAttestation(ctx, subnet, att, forkDigest)
return nil
}
@@ -94,8 +95,8 @@ func (s *Service) BroadcastSyncCommitteeMessage(ctx context.Context, subnet uint
return nil
}
func (s *Service) broadcastAttestation(ctx context.Context, subnet uint64, att *ethpb.Attestation, forkDigest [4]byte) {
_, span := trace.StartSpan(ctx, "p2p.broadcastAttestation")
func (s *Service) internalBroadcastAttestation(ctx context.Context, subnet uint64, att *ethpb.Attestation, forkDigest [4]byte) {
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastAttestation")
defer span.End()
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.
@@ -137,7 +138,10 @@ func (s *Service) broadcastAttestation(ctx context.Context, subnet uint64, att *
// acceptable threshold, we exit early and do not broadcast it.
currSlot := slots.CurrentSlot(uint64(s.genesisTime.Unix()))
if att.Data.Slot+params.BeaconConfig().SlotsPerEpoch < currSlot {
log.Warnf("Attestation is too old to broadcast, discarding it. Current Slot: %d , Attestation Slot: %d", currSlot, att.Data.Slot)
log.WithFields(logrus.Fields{
"attestationSlot": att.Data.Slot,
"currentSlot": currSlot,
}).Warning("Attestation is too old to broadcast, discarding it")
return
}
@@ -218,13 +222,13 @@ func (s *Service) BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.
}
// Non-blocking broadcast, with attempts to discover a subnet peer if none available.
go s.broadcastBlob(ctx, subnet, blob, forkDigest)
go s.internalBroadcastBlob(ctx, subnet, blob, forkDigest)
return nil
}
func (s *Service) broadcastBlob(ctx context.Context, subnet uint64, blobSidecar *ethpb.BlobSidecar, forkDigest [4]byte) {
_, span := trace.StartSpan(ctx, "p2p.broadcastBlob")
func (s *Service) internalBroadcastBlob(ctx context.Context, subnet uint64, blobSidecar *ethpb.BlobSidecar, forkDigest [4]byte) {
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastBlob")
defer span.End()
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.

View File

@@ -277,58 +277,69 @@ func (s *Service) startDiscoveryV5(
// filterPeer validates each node that we retrieve from our dht. We
// try to ascertain that the peer can be a valid protocol peer.
// Validity Conditions:
// 1. The local node is still actively looking for peers to
// connect to.
// 2. Peer has a valid IP and TCP port set in their enr.
// 3. Peer hasn't been marked as 'bad'
// 4. Peer is not currently active or connected.
// 5. Peer is ready to receive incoming connections.
// 6. Peer's fork digest in their ENR matches that of
// 1. Peer has a valid IP and TCP port set in their enr.
// 2. Peer hasn't been marked as 'bad'.
// 3. Peer is not currently active or connected.
// 4. Peer is ready to receive incoming connections.
// 5. Peer's fork digest in their ENR matches that of
// our localnodes.
func (s *Service) filterPeer(node *enode.Node) bool {
// Ignore nil node entries passed in.
if node == nil {
return false
}
// ignore nodes with no ip address stored.
// Ignore nodes with no IP address stored.
if node.IP() == nil {
return false
}
// do not dial nodes with their tcp ports not set
// Ignore nodes with their TCP ports not set.
if err := node.Record().Load(enr.WithEntry("tcp", new(enr.TCP))); err != nil {
if !enr.IsNotFound(err) {
log.WithError(err).Debug("Could not retrieve tcp port")
}
return false
}
peerData, multiAddr, err := convertToAddrInfo(node)
if err != nil {
log.WithError(err).Debug("Could not convert to peer data")
return false
}
// Ignore bad nodes.
if s.peers.IsBad(peerData.ID) {
return false
}
// Ignore nodes that are already active.
if s.peers.IsActive(peerData.ID) {
return false
}
// Ignore nodes that are already connected.
if s.host.Network().Connectedness(peerData.ID) == network.Connected {
return false
}
// Ignore nodes that are not ready to receive incoming connections.
if !s.peers.IsReadyToDial(peerData.ID) {
return false
}
// Ignore nodes that don't match our fork digest.
nodeENR := node.Record()
// Decide whether or not to connect to peer that does not
// match the proper fork ENR data with our local node.
if s.genesisValidatorsRoot != nil {
if err := s.compareForkENR(nodeENR); err != nil {
log.WithError(err).Trace("Fork ENR mismatches between peer and local node")
return false
}
}
// Add peer to peer handler.
s.peers.Add(nodeENR, peerData.ID, multiAddr, network.DirUnknown)
return true
}

View File

@@ -4,6 +4,7 @@ import (
"crypto/ecdsa"
"fmt"
"net"
"time"
"github.com/libp2p/go-libp2p"
mplex "github.com/libp2p/go-libp2p-mplex"
@@ -11,6 +12,7 @@ import (
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/p2p/security/noise"
"github.com/libp2p/go-libp2p/p2p/transport/tcp"
gomplex "github.com/libp2p/go-mplex"
ma "github.com/multiformats/go-multiaddr"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/config/features"
@@ -135,3 +137,8 @@ func privKeyOption(privkey *ecdsa.PrivateKey) libp2p.Option {
return cfg.Apply(libp2p.Identity(ifaceKey))
}
}
// Configures stream timeouts on mplex.
func configureMplex() {
gomplex.ResetStreamTimeout = 5 * time.Second
}

View File

@@ -22,7 +22,7 @@ func TestGossipParameters(t *testing.T) {
pms := pubsubGossipParam()
assert.Equal(t, gossipSubMcacheLen, pms.HistoryLength, "gossipSubMcacheLen")
assert.Equal(t, gossipSubMcacheGossip, pms.HistoryGossip, "gossipSubMcacheGossip")
assert.Equal(t, gossipSubSeenTTL, int(pubsub.TimeCacheDuration.Milliseconds()/pms.HeartbeatInterval.Milliseconds()), "gossipSubSeenTtl")
assert.Equal(t, gossipSubSeenTTL, int(pubsub.TimeCacheDuration.Seconds()), "gossipSubSeenTtl")
}
func TestFanoutParameters(t *testing.T) {

View File

@@ -25,7 +25,7 @@ const (
// gossip parameters
gossipSubMcacheLen = 6 // number of windows to retain full messages in cache for `IWANT` responses
gossipSubMcacheGossip = 3 // number of windows to gossip about
gossipSubSeenTTL = 550 // number of heartbeat intervals to retain message IDs
gossipSubSeenTTL = 768 // number of seconds to retain message IDs ( 2 epochs)
// fanout ttl
gossipSubFanoutTTL = 60000000000 // TTL for fanout maps for topics we are not subscribed to but have published to, in nano seconds
@@ -165,7 +165,8 @@ func pubsubGossipParam() pubsub.GossipSubParams {
// to configure our message id time-cache rather than instantiating
// it with a router instance.
func setPubSubParameters() {
pubsub.TimeCacheDuration = 550 * gossipSubHeartbeatInterval
seenTtl := 2 * time.Second * time.Duration(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
pubsub.TimeCacheDuration = seenTtl
}
// convert from libp2p's internal schema to a compatible prysm protobuf format.

View File

@@ -124,7 +124,8 @@ func NewService(ctx context.Context, cfg *Config) (*Service, error) {
if err != nil {
return nil, errors.Wrapf(err, "failed to build p2p options")
}
// Sets mplex timeouts
configureMplex()
h, err := libp2p.New(opts...)
if err != nil {
log.WithError(err).Error("Failed to create p2p host")

View File

@@ -46,9 +46,13 @@ const syncLockerVal = 100
const blobSubnetLockerVal = 110
// FindPeersWithSubnet performs a network search for peers
// subscribed to a particular subnet. Then we try to connect
// with those peers. This method will block until the required amount of
// peers are found, the method only exits in the event of context timeouts.
// subscribed to a particular subnet. Then it tries to connect
// with those peers. This method will block until either:
// - the required amount of peers are found, or
// - the context is terminated.
// On some edge cases, this method may hang indefinitely while peers
// are actually found. In such a case, the user should cancel the context
// and re-run the method again.
func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string,
index uint64, threshold int) (bool, error) {
ctx, span := trace.StartSpan(ctx, "p2p.FindPeersWithSubnet")
@@ -73,9 +77,9 @@ func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string,
return false, errors.New("no subnet exists for provided topic")
}
currNum := len(s.pubsub.ListPeers(topic))
wg := new(sync.WaitGroup)
for {
currNum := len(s.pubsub.ListPeers(topic))
if currNum >= threshold {
break
}
@@ -99,7 +103,6 @@ func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string,
}
// Wait for all dials to be completed.
wg.Wait()
currNum = len(s.pubsub.ListPeers(topic))
}
return true, nil
}
@@ -110,18 +113,13 @@ func (s *Service) filterPeerForAttSubnet(index uint64) func(node *enode.Node) bo
if !s.filterPeer(node) {
return false
}
subnets, err := attSubnets(node.Record())
if err != nil {
return false
}
indExists := false
for _, comIdx := range subnets {
if comIdx == index {
indExists = true
break
}
}
return indExists
return subnets[index]
}
}
@@ -205,8 +203,10 @@ func initializePersistentSubnets(id enode.ID, epoch primitives.Epoch) error {
//
// return [compute_subscribed_subnet(node_id, epoch, index) for index in range(SUBNETS_PER_NODE)]
func computeSubscribedSubnets(nodeID enode.ID, epoch primitives.Epoch) ([]uint64, error) {
subs := []uint64{}
for i := uint64(0); i < params.BeaconConfig().SubnetsPerNode; i++ {
subnetsPerNode := params.BeaconConfig().SubnetsPerNode
subs := make([]uint64, 0, subnetsPerNode)
for i := uint64(0); i < subnetsPerNode; i++ {
sub, err := computeSubscribedSubnet(nodeID, epoch, i)
if err != nil {
return nil, err
@@ -281,19 +281,20 @@ func initializeSyncCommSubnets(node *enode.LocalNode) *enode.LocalNode {
// Reads the attestation subnets entry from a node's ENR and determines
// the committee indices of the attestation subnets the node is subscribed to.
func attSubnets(record *enr.Record) ([]uint64, error) {
func attSubnets(record *enr.Record) (map[uint64]bool, error) {
bitV, err := attBitvector(record)
if err != nil {
return nil, err
}
committeeIdxs := make(map[uint64]bool)
// lint:ignore uintcast -- subnet count can be safely cast to int.
if len(bitV) != byteCount(int(attestationSubnetCount)) {
return []uint64{}, errors.Errorf("invalid bitvector provided, it has a size of %d", len(bitV))
return committeeIdxs, errors.Errorf("invalid bitvector provided, it has a size of %d", len(bitV))
}
var committeeIdxs []uint64
for i := uint64(0); i < attestationSubnetCount; i++ {
if bitV.BitAt(i) {
committeeIdxs = append(committeeIdxs, i)
committeeIdxs[i] = true
}
}
return committeeIdxs, nil

View File

@@ -3,49 +3,46 @@ package p2p
import (
"context"
"crypto/rand"
"encoding/hex"
"fmt"
"reflect"
"testing"
"time"
"github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/enr"
"github.com/libp2p/go-libp2p/core/crypto"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/wrapper"
ecdsaprysm "github.com/prysmaticlabs/prysm/v5/crypto/ecdsa"
pb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
"github.com/prysmaticlabs/prysm/v5/testing/require"
)
func TestStartDiscV5_DiscoverPeersWithSubnets(t *testing.T) {
params.SetupTestConfigCleanup(t)
// This test needs to be entirely rewritten and should be done in a follow up PR from #7885.
t.Skip("This test is now failing after PR 7885 due to false positive")
gFlags := new(flags.GlobalFlags)
gFlags.MinimumPeersPerSubnet = 4
flags.Init(gFlags)
// Reset config.
defer flags.Init(new(flags.GlobalFlags))
port := 2000
ipAddr, pkey := createAddrAndPrivKey(t)
genesisTime := time.Now()
genesisValidatorsRoot := make([]byte, 32)
s := &Service{
cfg: &Config{UDPPort: uint(port)},
genesisTime: genesisTime,
genesisValidatorsRoot: genesisValidatorsRoot,
}
bootListener, err := s.createListener(ipAddr, pkey)
require.NoError(t, err)
defer bootListener.Close()
func TestStartDiscV5_FindPeersWithSubnet(t *testing.T) {
// Topology of this test:
//
//
// Node 1 (subscribed to subnet 1) --\
// |
// Node 2 (subscribed to subnet 2) --+--> BootNode (not subscribed to any subnet) <------- Node 0 (not subscribed to any subnet)
// |
// Node 3 (subscribed to subnet 3) --/
//
// The purpose of this test is to ensure that the "Node 0" (connected only to the boot node) is able to
// find and connect to a node already subscribed to a specific subnet.
// In our case: The node i is subscribed to subnet i, with i = 1, 2, 3
// Define the genesis validators root, to ensure everybody is on the same network.
const genesisValidatorRootStr = "0xdeadbeefcafecafedeadbeefcafecafedeadbeefcafecafedeadbeefcafecafe"
genesisValidatorsRoot, err := hex.DecodeString(genesisValidatorRootStr[2:])
require.NoError(t, err)
// Create a context.
ctx := context.Background()
bootNode := bootListener.Self()
// Use shorter period for testing.
currentPeriod := pollingPeriod
pollingPeriod = 1 * time.Second
@@ -53,111 +50,150 @@ func TestStartDiscV5_DiscoverPeersWithSubnets(t *testing.T) {
pollingPeriod = currentPeriod
}()
var listeners []*discover.UDPv5
// Create flags.
params.SetupTestConfigCleanup(t)
gFlags := new(flags.GlobalFlags)
gFlags.MinimumPeersPerSubnet = 1
flags.Init(gFlags)
params.BeaconNetworkConfig().MinimumPeersInSubnetSearch = 1
// Reset config.
defer flags.Init(new(flags.GlobalFlags))
// First, generate a bootstrap node.
ipAddr, pkey := createAddrAndPrivKey(t)
genesisTime := time.Now()
bootNodeService := &Service{
cfg: &Config{TCPPort: 2000, UDPPort: 3000},
genesisTime: genesisTime,
genesisValidatorsRoot: genesisValidatorsRoot,
}
bootNodeForkDigest, err := bootNodeService.currentForkDigest()
require.NoError(t, err)
bootListener, err := bootNodeService.createListener(ipAddr, pkey)
require.NoError(t, err)
defer bootListener.Close()
bootNodeENR := bootListener.Self().String()
// Create 3 nodes, each subscribed to a different subnet.
// Each node is connected to the boostrap node.
services := make([]*Service, 0, 3)
for i := 1; i <= 3; i++ {
port = 3000 + i
cfg := &Config{
Discv5BootStrapAddrs: []string{bootNode.String()},
subnet := uint64(i)
service, err := NewService(ctx, &Config{
Discv5BootStrapAddrs: []string{bootNodeENR},
MaxPeers: 30,
UDPPort: uint(port),
}
ipAddr, pkey := createAddrAndPrivKey(t)
s = &Service{
cfg: cfg,
genesisTime: genesisTime,
genesisValidatorsRoot: genesisValidatorsRoot,
}
listener, err := s.startDiscoveryV5(ipAddr, pkey)
assert.NoError(t, err, "Could not start discovery for node")
TCPPort: uint(2000 + i),
UDPPort: uint(3000 + i),
})
require.NoError(t, err)
service.genesisTime = genesisTime
service.genesisValidatorsRoot = genesisValidatorsRoot
nodeForkDigest, err := service.currentForkDigest()
require.NoError(t, err)
require.Equal(t, true, nodeForkDigest == bootNodeForkDigest, "fork digest of the node doesn't match the boot node")
// Start the service.
service.Start()
// Set the ENR `attnets`, used by Prysm to filter peers by subnet.
bitV := bitfield.NewBitvector64()
bitV.SetBitAt(uint64(i), true)
bitV.SetBitAt(subnet, true)
entry := enr.WithEntry(attSubnetEnrKey, &bitV)
listener.LocalNode().Set(entry)
listeners = append(listeners, listener)
service.dv5Listener.LocalNode().Set(entry)
// Join and subscribe to the subnet, needed by libp2p.
topic, err := service.pubsub.Join(fmt.Sprintf(AttestationSubnetTopicFormat, bootNodeForkDigest, subnet) + "/ssz_snappy")
require.NoError(t, err)
_, err = topic.Subscribe()
require.NoError(t, err)
// Memoize the service.
services = append(services, service)
}
// Stop the services.
defer func() {
// Close down all peers.
for _, listener := range listeners {
listener.Close()
for _, service := range services {
err := service.Stop()
require.NoError(t, err)
}
}()
// Make one service on port 4001.
port = 4001
gs := startup.NewClockSynchronizer()
cfg := &Config{
Discv5BootStrapAddrs: []string{bootNode.String()},
Discv5BootStrapAddrs: []string{bootNodeENR},
MaxPeers: 30,
UDPPort: uint(port),
ClockWaiter: gs,
TCPPort: 2010,
UDPPort: 3010,
}
s, err = NewService(context.Background(), cfg)
service, err := NewService(ctx, cfg)
require.NoError(t, err)
exitRoutine := make(chan bool)
go func() {
s.Start()
<-exitRoutine
service.genesisTime = genesisTime
service.genesisValidatorsRoot = genesisValidatorsRoot
service.Start()
defer func() {
err := service.Stop()
require.NoError(t, err)
}()
time.Sleep(50 * time.Millisecond)
// Send in a loop to ensure it is delivered (busy wait for the service to subscribe to the state feed).
var vr [32]byte
require.NoError(t, gs.SetClock(startup.NewClock(time.Now(), vr)))
// Wait for the nodes to have their local routing tables to be populated with the other nodes
time.Sleep(6 * discoveryWaitTime)
// Look up 3 different subnets.
exists := make([]bool, 0, 3)
for i := 1; i <= 3; i++ {
subnet := uint64(i)
topic := fmt.Sprintf(AttestationSubnetTopicFormat, bootNodeForkDigest, subnet)
exist := false
// This for loop is used to ensure we don't get stuck in `FindPeersWithSubnet`.
// Read the documentation of `FindPeersWithSubnet` for more details.
for j := 0; j < 3; j++ {
ctxWithTimeOut, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
exist, err = service.FindPeersWithSubnet(ctxWithTimeOut, topic, subnet, 1)
require.NoError(t, err)
if exist {
break
}
}
require.NoError(t, err)
exists = append(exists, exist)
// look up 3 different subnets
ctx := context.Background()
exists, err := s.FindPeersWithSubnet(ctx, "", 1, flags.Get().MinimumPeersPerSubnet)
require.NoError(t, err)
exists2, err := s.FindPeersWithSubnet(ctx, "", 2, flags.Get().MinimumPeersPerSubnet)
require.NoError(t, err)
exists3, err := s.FindPeersWithSubnet(ctx, "", 3, flags.Get().MinimumPeersPerSubnet)
require.NoError(t, err)
if !exists || !exists2 || !exists3 {
t.Fatal("Peer with subnet doesn't exist")
}
// Update ENR of a peer.
testService := &Service{
dv5Listener: listeners[0],
metaData: wrapper.WrappedMetadataV0(&pb.MetaDataV0{
Attnets: bitfield.NewBitvector64(),
}),
// Check if all peers are found.
for _, exist := range exists {
require.Equal(t, true, exist, "Peer with subnet doesn't exist")
}
cache.SubnetIDs.AddAttesterSubnetID(0, 10)
testService.RefreshENR()
time.Sleep(2 * time.Second)
exists, err = s.FindPeersWithSubnet(ctx, "", 2, flags.Get().MinimumPeersPerSubnet)
require.NoError(t, err)
assert.Equal(t, true, exists, "Peer with subnet doesn't exist")
assert.NoError(t, s.Stop())
exitRoutine <- true
}
func Test_AttSubnets(t *testing.T) {
params.SetupTestConfigCleanup(t)
tests := []struct {
name string
record func(t *testing.T) *enr.Record
record func(localNode *enode.LocalNode) *enr.Record
want []uint64
wantErr bool
errContains string
}{
{
name: "valid record",
record: func(t *testing.T) *enr.Record {
db, err := enode.OpenDB("")
assert.NoError(t, err)
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
assert.NoError(t, err)
convertedKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(priv)
assert.NoError(t, err)
localNode := enode.NewLocalNode(db, convertedKey)
record: func(localNode *enode.LocalNode) *enr.Record {
localNode = initializeAttSubnets(localNode)
return localNode.Node().Record()
},
@@ -166,14 +202,7 @@ func Test_AttSubnets(t *testing.T) {
},
{
name: "too small subnet",
record: func(t *testing.T) *enr.Record {
db, err := enode.OpenDB("")
assert.NoError(t, err)
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
assert.NoError(t, err)
convertedKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(priv)
assert.NoError(t, err)
localNode := enode.NewLocalNode(db, convertedKey)
record: func(localNode *enode.LocalNode) *enr.Record {
entry := enr.WithEntry(attSubnetEnrKey, []byte{})
localNode.Set(entry)
return localNode.Node().Record()
@@ -184,14 +213,7 @@ func Test_AttSubnets(t *testing.T) {
},
{
name: "half sized subnet",
record: func(t *testing.T) *enr.Record {
db, err := enode.OpenDB("")
assert.NoError(t, err)
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
assert.NoError(t, err)
convertedKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(priv)
assert.NoError(t, err)
localNode := enode.NewLocalNode(db, convertedKey)
record: func(localNode *enode.LocalNode) *enr.Record {
entry := enr.WithEntry(attSubnetEnrKey, make([]byte, 4))
localNode.Set(entry)
return localNode.Node().Record()
@@ -202,14 +224,7 @@ func Test_AttSubnets(t *testing.T) {
},
{
name: "too large subnet",
record: func(t *testing.T) *enr.Record {
db, err := enode.OpenDB("")
assert.NoError(t, err)
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
assert.NoError(t, err)
convertedKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(priv)
assert.NoError(t, err)
localNode := enode.NewLocalNode(db, convertedKey)
record: func(localNode *enode.LocalNode) *enr.Record {
entry := enr.WithEntry(attSubnetEnrKey, make([]byte, byteCount(int(attestationSubnetCount))+1))
localNode.Set(entry)
return localNode.Node().Record()
@@ -220,14 +235,7 @@ func Test_AttSubnets(t *testing.T) {
},
{
name: "very large subnet",
record: func(t *testing.T) *enr.Record {
db, err := enode.OpenDB("")
assert.NoError(t, err)
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
assert.NoError(t, err)
convertedKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(priv)
assert.NoError(t, err)
localNode := enode.NewLocalNode(db, convertedKey)
record: func(localNode *enode.LocalNode) *enr.Record {
entry := enr.WithEntry(attSubnetEnrKey, make([]byte, byteCount(int(attestationSubnetCount))+100))
localNode.Set(entry)
return localNode.Node().Record()
@@ -238,14 +246,7 @@ func Test_AttSubnets(t *testing.T) {
},
{
name: "single subnet",
record: func(t *testing.T) *enr.Record {
db, err := enode.OpenDB("")
assert.NoError(t, err)
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
assert.NoError(t, err)
convertedKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(priv)
assert.NoError(t, err)
localNode := enode.NewLocalNode(db, convertedKey)
record: func(localNode *enode.LocalNode) *enr.Record {
bitV := bitfield.NewBitvector64()
bitV.SetBitAt(0, true)
entry := enr.WithEntry(attSubnetEnrKey, bitV.Bytes())
@@ -257,17 +258,10 @@ func Test_AttSubnets(t *testing.T) {
},
{
name: "multiple subnets",
record: func(t *testing.T) *enr.Record {
db, err := enode.OpenDB("")
assert.NoError(t, err)
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
assert.NoError(t, err)
convertedKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(priv)
assert.NoError(t, err)
localNode := enode.NewLocalNode(db, convertedKey)
record: func(localNode *enode.LocalNode) *enr.Record {
bitV := bitfield.NewBitvector64()
for i := uint64(0); i < bitV.Len(); i++ {
// skip 2 subnets
// Keep only odd subnets.
if (i+1)%2 == 0 {
continue
}
@@ -285,14 +279,7 @@ func Test_AttSubnets(t *testing.T) {
},
{
name: "all subnets",
record: func(t *testing.T) *enr.Record {
db, err := enode.OpenDB("")
assert.NoError(t, err)
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
assert.NoError(t, err)
convertedKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(priv)
assert.NoError(t, err)
localNode := enode.NewLocalNode(db, convertedKey)
record: func(localNode *enode.LocalNode) *enr.Record {
bitV := bitfield.NewBitvector64()
for i := uint64(0); i < bitV.Len(); i++ {
bitV.SetBitAt(i, true)
@@ -309,16 +296,35 @@ func Test_AttSubnets(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := attSubnets(tt.record(t))
db, err := enode.OpenDB("")
assert.NoError(t, err)
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
assert.NoError(t, err)
convertedKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(priv)
assert.NoError(t, err)
localNode := enode.NewLocalNode(db, convertedKey)
record := tt.record(localNode)
got, err := attSubnets(record)
if (err != nil) != tt.wantErr {
t.Errorf("syncSubnets() error = %v, wantErr %v", err, tt.wantErr)
return
}
if tt.wantErr {
assert.ErrorContains(t, tt.errContains, err)
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("syncSubnets() got = %v, want %v", got, tt.want)
want := make(map[uint64]bool, len(tt.want))
for _, subnet := range tt.want {
want[subnet] = true
}
if !reflect.DeepEqual(got, want) {
t.Errorf("syncSubnets() got = %v, want %v", got, want)
}
})
}

View File

@@ -25,7 +25,7 @@ go_library(
"//runtime/version:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@io_opencensus_go//trace:go_default_library",
],
)

View File

@@ -8,6 +8,7 @@ import (
time2 "time"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/api"
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
@@ -23,7 +24,6 @@ import (
ethpbv2 "github.com/prysmaticlabs/prysm/v5/proto/eth/v2"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"github.com/prysmaticlabs/prysm/v5/time/slots"
log "github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
@@ -124,86 +124,93 @@ func (s *Server) StreamEvents(w http.ResponseWriter, r *http.Request) {
// stalling while waiting for the first response chunk.
// After that we send a keepalive dummy message every SECONDS_PER_SLOT
// to prevent anyone (e.g. proxy servers) from closing connections.
sendKeepalive(w, flusher)
if err := sendKeepalive(w, flusher); err != nil {
httputil.HandleError(w, err.Error(), http.StatusInternalServerError)
return
}
keepaliveTicker := time2.NewTicker(time2.Duration(params.BeaconConfig().SecondsPerSlot) * time2.Second)
for {
select {
case event := <-opsChan:
handleBlockOperationEvents(w, flusher, topicsMap, event)
if err := handleBlockOperationEvents(w, flusher, topicsMap, event); err != nil {
httputil.HandleError(w, err.Error(), http.StatusInternalServerError)
return
}
case event := <-stateChan:
s.handleStateEvents(ctx, w, flusher, topicsMap, event)
if err := s.handleStateEvents(ctx, w, flusher, topicsMap, event); err != nil {
httputil.HandleError(w, err.Error(), http.StatusInternalServerError)
return
}
case <-keepaliveTicker.C:
sendKeepalive(w, flusher)
if err := sendKeepalive(w, flusher); err != nil {
httputil.HandleError(w, err.Error(), http.StatusInternalServerError)
return
}
case <-ctx.Done():
return
}
}
}
func handleBlockOperationEvents(w http.ResponseWriter, flusher http.Flusher, requestedTopics map[string]bool, event *feed.Event) {
func handleBlockOperationEvents(w http.ResponseWriter, flusher http.Flusher, requestedTopics map[string]bool, event *feed.Event) error {
switch event.Type {
case operation.AggregatedAttReceived:
if _, ok := requestedTopics[AttestationTopic]; !ok {
return
return nil
}
attData, ok := event.Data.(*operation.AggregatedAttReceivedData)
if !ok {
write(w, flusher, topicDataMismatch, event.Data, AttestationTopic)
return
return write(w, flusher, topicDataMismatch, event.Data, AttestationTopic)
}
att := structs.AttFromConsensus(attData.Attestation.Aggregate)
send(w, flusher, AttestationTopic, att)
return send(w, flusher, AttestationTopic, att)
case operation.UnaggregatedAttReceived:
if _, ok := requestedTopics[AttestationTopic]; !ok {
return
return nil
}
attData, ok := event.Data.(*operation.UnAggregatedAttReceivedData)
if !ok {
write(w, flusher, topicDataMismatch, event.Data, AttestationTopic)
return
return write(w, flusher, topicDataMismatch, event.Data, AttestationTopic)
}
att := structs.AttFromConsensus(attData.Attestation)
send(w, flusher, AttestationTopic, att)
return send(w, flusher, AttestationTopic, att)
case operation.ExitReceived:
if _, ok := requestedTopics[VoluntaryExitTopic]; !ok {
return
return nil
}
exitData, ok := event.Data.(*operation.ExitReceivedData)
if !ok {
write(w, flusher, topicDataMismatch, event.Data, VoluntaryExitTopic)
return
return write(w, flusher, topicDataMismatch, event.Data, VoluntaryExitTopic)
}
exit := structs.SignedExitFromConsensus(exitData.Exit)
send(w, flusher, VoluntaryExitTopic, exit)
return send(w, flusher, VoluntaryExitTopic, exit)
case operation.SyncCommitteeContributionReceived:
if _, ok := requestedTopics[SyncCommitteeContributionTopic]; !ok {
return
return nil
}
contributionData, ok := event.Data.(*operation.SyncCommitteeContributionReceivedData)
if !ok {
write(w, flusher, topicDataMismatch, event.Data, SyncCommitteeContributionTopic)
return
return write(w, flusher, topicDataMismatch, event.Data, SyncCommitteeContributionTopic)
}
contribution := structs.SignedContributionAndProofFromConsensus(contributionData.Contribution)
send(w, flusher, SyncCommitteeContributionTopic, contribution)
return send(w, flusher, SyncCommitteeContributionTopic, contribution)
case operation.BLSToExecutionChangeReceived:
if _, ok := requestedTopics[BLSToExecutionChangeTopic]; !ok {
return
return nil
}
changeData, ok := event.Data.(*operation.BLSToExecutionChangeReceivedData)
if !ok {
write(w, flusher, topicDataMismatch, event.Data, BLSToExecutionChangeTopic)
return
return write(w, flusher, topicDataMismatch, event.Data, BLSToExecutionChangeTopic)
}
send(w, flusher, BLSToExecutionChangeTopic, structs.SignedBLSChangeFromConsensus(changeData.Change))
return send(w, flusher, BLSToExecutionChangeTopic, structs.SignedBLSChangeFromConsensus(changeData.Change))
case operation.BlobSidecarReceived:
if _, ok := requestedTopics[BlobSidecarTopic]; !ok {
return
return nil
}
blobData, ok := event.Data.(*operation.BlobSidecarReceivedData)
if !ok {
write(w, flusher, topicDataMismatch, event.Data, BlobSidecarTopic)
return
return write(w, flusher, topicDataMismatch, event.Data, BlobSidecarTopic)
}
versionedHash := blockchain.ConvertKzgCommitmentToVersionedHash(blobData.Blob.KzgCommitment)
blobEvent := &structs.BlobSidecarEvent{
@@ -213,38 +220,36 @@ func handleBlockOperationEvents(w http.ResponseWriter, flusher http.Flusher, req
VersionedHash: versionedHash.String(),
KzgCommitment: hexutil.Encode(blobData.Blob.KzgCommitment),
}
send(w, flusher, BlobSidecarTopic, blobEvent)
return send(w, flusher, BlobSidecarTopic, blobEvent)
case operation.AttesterSlashingReceived:
if _, ok := requestedTopics[AttesterSlashingTopic]; !ok {
return
return nil
}
attesterSlashingData, ok := event.Data.(*operation.AttesterSlashingReceivedData)
if !ok {
write(w, flusher, topicDataMismatch, event.Data, AttesterSlashingTopic)
return
return write(w, flusher, topicDataMismatch, event.Data, AttesterSlashingTopic)
}
send(w, flusher, AttesterSlashingTopic, structs.AttesterSlashingFromConsensus(attesterSlashingData.AttesterSlashing))
return send(w, flusher, AttesterSlashingTopic, structs.AttesterSlashingFromConsensus(attesterSlashingData.AttesterSlashing))
case operation.ProposerSlashingReceived:
if _, ok := requestedTopics[ProposerSlashingTopic]; !ok {
return
return nil
}
proposerSlashingData, ok := event.Data.(*operation.ProposerSlashingReceivedData)
if !ok {
write(w, flusher, topicDataMismatch, event.Data, ProposerSlashingTopic)
return
return write(w, flusher, topicDataMismatch, event.Data, ProposerSlashingTopic)
}
send(w, flusher, ProposerSlashingTopic, structs.ProposerSlashingFromConsensus(proposerSlashingData.ProposerSlashing))
return send(w, flusher, ProposerSlashingTopic, structs.ProposerSlashingFromConsensus(proposerSlashingData.ProposerSlashing))
}
return nil
}
func (s *Server) handleStateEvents(ctx context.Context, w http.ResponseWriter, flusher http.Flusher, requestedTopics map[string]bool, event *feed.Event) {
func (s *Server) handleStateEvents(ctx context.Context, w http.ResponseWriter, flusher http.Flusher, requestedTopics map[string]bool, event *feed.Event) error {
switch event.Type {
case statefeed.NewHead:
if _, ok := requestedTopics[HeadTopic]; ok {
headData, ok := event.Data.(*ethpb.EventHead)
if !ok {
write(w, flusher, topicDataMismatch, event.Data, HeadTopic)
return
return write(w, flusher, topicDataMismatch, event.Data, HeadTopic)
}
head := &structs.HeadEvent{
Slot: fmt.Sprintf("%d", headData.Slot),
@@ -255,23 +260,22 @@ func (s *Server) handleStateEvents(ctx context.Context, w http.ResponseWriter, f
PreviousDutyDependentRoot: hexutil.Encode(headData.PreviousDutyDependentRoot),
CurrentDutyDependentRoot: hexutil.Encode(headData.CurrentDutyDependentRoot),
}
send(w, flusher, HeadTopic, head)
return send(w, flusher, HeadTopic, head)
}
if _, ok := requestedTopics[PayloadAttributesTopic]; ok {
s.sendPayloadAttributes(ctx, w, flusher)
return s.sendPayloadAttributes(ctx, w, flusher)
}
case statefeed.MissedSlot:
if _, ok := requestedTopics[PayloadAttributesTopic]; ok {
s.sendPayloadAttributes(ctx, w, flusher)
return s.sendPayloadAttributes(ctx, w, flusher)
}
case statefeed.FinalizedCheckpoint:
if _, ok := requestedTopics[FinalizedCheckpointTopic]; !ok {
return
return nil
}
checkpointData, ok := event.Data.(*ethpb.EventFinalizedCheckpoint)
if !ok {
write(w, flusher, topicDataMismatch, event.Data, FinalizedCheckpointTopic)
return
return write(w, flusher, topicDataMismatch, event.Data, FinalizedCheckpointTopic)
}
checkpoint := &structs.FinalizedCheckpointEvent{
Block: hexutil.Encode(checkpointData.Block),
@@ -279,15 +283,14 @@ func (s *Server) handleStateEvents(ctx context.Context, w http.ResponseWriter, f
Epoch: fmt.Sprintf("%d", checkpointData.Epoch),
ExecutionOptimistic: checkpointData.ExecutionOptimistic,
}
send(w, flusher, FinalizedCheckpointTopic, checkpoint)
return send(w, flusher, FinalizedCheckpointTopic, checkpoint)
case statefeed.LightClientFinalityUpdate:
if _, ok := requestedTopics[LightClientFinalityUpdateTopic]; !ok {
return
return nil
}
updateData, ok := event.Data.(*ethpbv2.LightClientFinalityUpdateWithVersion)
if !ok {
write(w, flusher, topicDataMismatch, event.Data, LightClientFinalityUpdateTopic)
return
return write(w, flusher, topicDataMismatch, event.Data, LightClientFinalityUpdateTopic)
}
var finalityBranch []string
@@ -318,15 +321,14 @@ func (s *Server) handleStateEvents(ctx context.Context, w http.ResponseWriter, f
SignatureSlot: fmt.Sprintf("%d", updateData.Data.SignatureSlot),
},
}
send(w, flusher, LightClientFinalityUpdateTopic, update)
return send(w, flusher, LightClientFinalityUpdateTopic, update)
case statefeed.LightClientOptimisticUpdate:
if _, ok := requestedTopics[LightClientOptimisticUpdateTopic]; !ok {
return
return nil
}
updateData, ok := event.Data.(*ethpbv2.LightClientOptimisticUpdateWithVersion)
if !ok {
write(w, flusher, topicDataMismatch, event.Data, LightClientOptimisticUpdateTopic)
return
return write(w, flusher, topicDataMismatch, event.Data, LightClientOptimisticUpdateTopic)
}
update := &structs.LightClientOptimisticUpdateEvent{
Version: version.String(int(updateData.Version)),
@@ -345,15 +347,14 @@ func (s *Server) handleStateEvents(ctx context.Context, w http.ResponseWriter, f
SignatureSlot: fmt.Sprintf("%d", updateData.Data.SignatureSlot),
},
}
send(w, flusher, LightClientOptimisticUpdateTopic, update)
return send(w, flusher, LightClientOptimisticUpdateTopic, update)
case statefeed.Reorg:
if _, ok := requestedTopics[ChainReorgTopic]; !ok {
return
return nil
}
reorgData, ok := event.Data.(*ethpb.EventChainReorg)
if !ok {
write(w, flusher, topicDataMismatch, event.Data, ChainReorgTopic)
return
return write(w, flusher, topicDataMismatch, event.Data, ChainReorgTopic)
}
reorg := &structs.ChainReorgEvent{
Slot: fmt.Sprintf("%d", reorgData.Slot),
@@ -365,78 +366,69 @@ func (s *Server) handleStateEvents(ctx context.Context, w http.ResponseWriter, f
Epoch: fmt.Sprintf("%d", reorgData.Epoch),
ExecutionOptimistic: reorgData.ExecutionOptimistic,
}
send(w, flusher, ChainReorgTopic, reorg)
return send(w, flusher, ChainReorgTopic, reorg)
case statefeed.BlockProcessed:
if _, ok := requestedTopics[BlockTopic]; !ok {
return
return nil
}
blkData, ok := event.Data.(*statefeed.BlockProcessedData)
if !ok {
write(w, flusher, topicDataMismatch, event.Data, BlockTopic)
return
return write(w, flusher, topicDataMismatch, event.Data, BlockTopic)
}
blockRoot, err := blkData.SignedBlock.Block().HashTreeRoot()
if err != nil {
write(w, flusher, "Could not get block root: "+err.Error())
return
return write(w, flusher, "Could not get block root: "+err.Error())
}
blk := &structs.BlockEvent{
Slot: fmt.Sprintf("%d", blkData.Slot),
Block: hexutil.Encode(blockRoot[:]),
ExecutionOptimistic: blkData.Optimistic,
}
send(w, flusher, BlockTopic, blk)
return send(w, flusher, BlockTopic, blk)
}
return nil
}
// This event stream is intended to be used by builders and relays.
// Parent fields are based on state at N_{current_slot}, while the rest of fields are based on state of N_{current_slot + 1}
func (s *Server) sendPayloadAttributes(ctx context.Context, w http.ResponseWriter, flusher http.Flusher) {
func (s *Server) sendPayloadAttributes(ctx context.Context, w http.ResponseWriter, flusher http.Flusher) error {
headRoot, err := s.HeadFetcher.HeadRoot(ctx)
if err != nil {
write(w, flusher, "Could not get head root: "+err.Error())
return
return write(w, flusher, "Could not get head root: "+err.Error())
}
st, err := s.HeadFetcher.HeadState(ctx)
if err != nil {
write(w, flusher, "Could not get head state: "+err.Error())
return
return write(w, flusher, "Could not get head state: "+err.Error())
}
// advance the head state
headState, err := transition.ProcessSlotsIfPossible(ctx, st, s.ChainInfoFetcher.CurrentSlot()+1)
if err != nil {
write(w, flusher, "Could not advance head state: "+err.Error())
return
return write(w, flusher, "Could not advance head state: "+err.Error())
}
headBlock, err := s.HeadFetcher.HeadBlock(ctx)
if err != nil {
write(w, flusher, "Could not get head block: "+err.Error())
return
return write(w, flusher, "Could not get head block: "+err.Error())
}
headPayload, err := headBlock.Block().Body().Execution()
if err != nil {
write(w, flusher, "Could not get execution payload: "+err.Error())
return
return write(w, flusher, "Could not get execution payload: "+err.Error())
}
t, err := slots.ToTime(headState.GenesisTime(), headState.Slot())
if err != nil {
write(w, flusher, "Could not get head state slot time: "+err.Error())
return
return write(w, flusher, "Could not get head state slot time: "+err.Error())
}
prevRando, err := helpers.RandaoMix(headState, time.CurrentEpoch(headState))
if err != nil {
write(w, flusher, "Could not get head state randao mix: "+err.Error())
return
return write(w, flusher, "Could not get head state randao mix: "+err.Error())
}
proposerIndex, err := helpers.BeaconProposerIndex(ctx, headState)
if err != nil {
write(w, flusher, "Could not get head state proposer index: "+err.Error())
return
return write(w, flusher, "Could not get head state proposer index: "+err.Error())
}
var attributes interface{}
@@ -450,8 +442,7 @@ func (s *Server) sendPayloadAttributes(ctx context.Context, w http.ResponseWrite
case version.Capella:
withdrawals, err := headState.ExpectedWithdrawals()
if err != nil {
write(w, flusher, "Could not get head state expected withdrawals: "+err.Error())
return
return write(w, flusher, "Could not get head state expected withdrawals: "+err.Error())
}
attributes = &structs.PayloadAttributesV2{
Timestamp: fmt.Sprintf("%d", t.Unix()),
@@ -462,13 +453,11 @@ func (s *Server) sendPayloadAttributes(ctx context.Context, w http.ResponseWrite
case version.Deneb:
withdrawals, err := headState.ExpectedWithdrawals()
if err != nil {
write(w, flusher, "Could not get head state expected withdrawals: "+err.Error())
return
return write(w, flusher, "Could not get head state expected withdrawals: "+err.Error())
}
parentRoot, err := headBlock.Block().HashTreeRoot()
if err != nil {
write(w, flusher, "Could not get head block root: "+err.Error())
return
return write(w, flusher, "Could not get head block root: "+err.Error())
}
attributes = &structs.PayloadAttributesV3{
Timestamp: fmt.Sprintf("%d", t.Unix()),
@@ -478,14 +467,12 @@ func (s *Server) sendPayloadAttributes(ctx context.Context, w http.ResponseWrite
ParentBeaconBlockRoot: hexutil.Encode(parentRoot[:]),
}
default:
write(w, flusher, "Payload version %s is not supported", version.String(headState.Version()))
return
return write(w, flusher, "Payload version %s is not supported", version.String(headState.Version()))
}
attributesBytes, err := json.Marshal(attributes)
if err != nil {
write(w, flusher, err.Error())
return
return write(w, flusher, err.Error())
}
eventData := structs.PayloadAttributesEventData{
ProposerIndex: fmt.Sprintf("%d", proposerIndex),
@@ -497,32 +484,31 @@ func (s *Server) sendPayloadAttributes(ctx context.Context, w http.ResponseWrite
}
eventDataBytes, err := json.Marshal(eventData)
if err != nil {
write(w, flusher, err.Error())
return
return write(w, flusher, err.Error())
}
send(w, flusher, PayloadAttributesTopic, &structs.PayloadAttributesEvent{
return send(w, flusher, PayloadAttributesTopic, &structs.PayloadAttributesEvent{
Version: version.String(headState.Version()),
Data: eventDataBytes,
})
}
func send(w http.ResponseWriter, flusher http.Flusher, name string, data interface{}) {
func send(w http.ResponseWriter, flusher http.Flusher, name string, data interface{}) error {
j, err := json.Marshal(data)
if err != nil {
write(w, flusher, "Could not marshal event to JSON: "+err.Error())
return
return write(w, flusher, "Could not marshal event to JSON: "+err.Error())
}
write(w, flusher, "event: %s\ndata: %s\n\n", name, string(j))
return write(w, flusher, "event: %s\ndata: %s\n\n", name, string(j))
}
func sendKeepalive(w http.ResponseWriter, flusher http.Flusher) {
write(w, flusher, ":\n\n")
func sendKeepalive(w http.ResponseWriter, flusher http.Flusher) error {
return write(w, flusher, ":\n\n")
}
func write(w http.ResponseWriter, flusher http.Flusher, format string, a ...any) {
func write(w http.ResponseWriter, flusher http.Flusher, format string, a ...any) error {
_, err := fmt.Fprintf(w, format, a...)
if err != nil {
log.WithError(err).Error("Could not write to response writer")
return errors.Wrap(err, "could not write to response writer")
}
flusher.Flush()
return nil
}

View File

@@ -189,7 +189,7 @@ func TestLoadBlocks_FirstBranch(t *testing.T) {
roots, savedBlocks, err := tree1(t, beaconDB, bytesutil.PadTo([]byte{'A'}, 32))
require.NoError(t, err)
filteredBlocks, err := s.loadBlocks(ctx, 0, 8, roots[len(roots)-1])
filteredBlocks, err := s.loadBlocks(ctx, 0, 9, roots[len(roots)-1])
require.NoError(t, err)
wanted := []*ethpb.SignedBeaconBlock{
@@ -220,7 +220,7 @@ func TestLoadBlocks_SecondBranch(t *testing.T) {
roots, savedBlocks, err := tree1(t, beaconDB, bytesutil.PadTo([]byte{'A'}, 32))
require.NoError(t, err)
filteredBlocks, err := s.loadBlocks(ctx, 0, 5, roots[5])
filteredBlocks, err := s.loadBlocks(ctx, 0, 6, roots[5])
require.NoError(t, err)
wanted := []*ethpb.SignedBeaconBlock{
@@ -249,7 +249,7 @@ func TestLoadBlocks_ThirdBranch(t *testing.T) {
roots, savedBlocks, err := tree1(t, beaconDB, bytesutil.PadTo([]byte{'A'}, 32))
require.NoError(t, err)
filteredBlocks, err := s.loadBlocks(ctx, 0, 7, roots[7])
filteredBlocks, err := s.loadBlocks(ctx, 0, 8, roots[7])
require.NoError(t, err)
wanted := []*ethpb.SignedBeaconBlock{
@@ -280,7 +280,7 @@ func TestLoadBlocks_SameSlots(t *testing.T) {
roots, savedBlocks, err := tree2(t, beaconDB, bytesutil.PadTo([]byte{'A'}, 32))
require.NoError(t, err)
filteredBlocks, err := s.loadBlocks(ctx, 0, 3, roots[6])
filteredBlocks, err := s.loadBlocks(ctx, 0, 4, roots[6])
require.NoError(t, err)
wanted := []*ethpb.SignedBeaconBlock{
@@ -309,7 +309,7 @@ func TestLoadBlocks_SameEndSlots(t *testing.T) {
roots, savedBlocks, err := tree3(t, beaconDB, bytesutil.PadTo([]byte{'A'}, 32))
require.NoError(t, err)
filteredBlocks, err := s.loadBlocks(ctx, 0, 2, roots[2])
filteredBlocks, err := s.loadBlocks(ctx, 0, 3, roots[2])
require.NoError(t, err)
wanted := []*ethpb.SignedBeaconBlock{
@@ -337,7 +337,7 @@ func TestLoadBlocks_SameEndSlotsWith2blocks(t *testing.T) {
roots, savedBlocks, err := tree4(t, beaconDB, bytesutil.PadTo([]byte{'A'}, 32))
require.NoError(t, err)
filteredBlocks, err := s.loadBlocks(ctx, 0, 2, roots[1])
filteredBlocks, err := s.loadBlocks(ctx, 0, 3, roots[1])
require.NoError(t, err)
wanted := []*ethpb.SignedBeaconBlock{
@@ -363,7 +363,7 @@ func TestLoadBlocks_BadStart(t *testing.T) {
roots, _, err := tree1(t, beaconDB, bytesutil.PadTo([]byte{'A'}, 32))
require.NoError(t, err)
_, err = s.loadBlocks(ctx, 0, 5, roots[8])
_, err = s.loadBlocks(ctx, 0, 6, roots[8])
assert.ErrorContains(t, "end block roots don't match", err)
}
@@ -374,63 +374,63 @@ func TestLoadBlocks_BadStart(t *testing.T) {
// \- B7
func tree1(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte, []*ethpb.SignedBeaconBlock, error) {
b0 := util.NewBeaconBlock()
b0.Block.Slot = 0
b0.Block.Slot = 1
b0.Block.ParentRoot = genesisRoot
r0, err := b0.Block.HashTreeRoot()
if err != nil {
return nil, nil, err
}
b1 := util.NewBeaconBlock()
b1.Block.Slot = 1
b1.Block.Slot = 2
b1.Block.ParentRoot = r0[:]
r1, err := b1.Block.HashTreeRoot()
if err != nil {
return nil, nil, err
}
b2 := util.NewBeaconBlock()
b2.Block.Slot = 2
b2.Block.Slot = 3
b2.Block.ParentRoot = r1[:]
r2, err := b2.Block.HashTreeRoot()
if err != nil {
return nil, nil, err
}
b3 := util.NewBeaconBlock()
b3.Block.Slot = 3
b3.Block.Slot = 4
b3.Block.ParentRoot = r1[:]
r3, err := b3.Block.HashTreeRoot()
if err != nil {
return nil, nil, err
}
b4 := util.NewBeaconBlock()
b4.Block.Slot = 4
b4.Block.Slot = 5
b4.Block.ParentRoot = r2[:]
r4, err := b4.Block.HashTreeRoot()
if err != nil {
return nil, nil, err
}
b5 := util.NewBeaconBlock()
b5.Block.Slot = 5
b5.Block.Slot = 6
b5.Block.ParentRoot = r3[:]
r5, err := b5.Block.HashTreeRoot()
if err != nil {
return nil, nil, err
}
b6 := util.NewBeaconBlock()
b6.Block.Slot = 6
b6.Block.Slot = 7
b6.Block.ParentRoot = r4[:]
r6, err := b6.Block.HashTreeRoot()
if err != nil {
return nil, nil, err
}
b7 := util.NewBeaconBlock()
b7.Block.Slot = 7
b7.Block.Slot = 8
b7.Block.ParentRoot = r6[:]
r7, err := b7.Block.HashTreeRoot()
if err != nil {
return nil, nil, err
}
b8 := util.NewBeaconBlock()
b8.Block.Slot = 8
b8.Block.Slot = 9
b8.Block.ParentRoot = r6[:]
r8, err := b8.Block.HashTreeRoot()
if err != nil {
@@ -466,21 +466,21 @@ func tree1(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte,
// \- B2 -- B3
func tree2(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte, []*ethpb.SignedBeaconBlock, error) {
b0 := util.NewBeaconBlock()
b0.Block.Slot = 0
b0.Block.Slot = 1
b0.Block.ParentRoot = genesisRoot
r0, err := b0.Block.HashTreeRoot()
if err != nil {
return nil, nil, err
}
b1 := util.NewBeaconBlock()
b1.Block.Slot = 1
b1.Block.Slot = 2
b1.Block.ParentRoot = r0[:]
r1, err := b1.Block.HashTreeRoot()
if err != nil {
return nil, nil, err
}
b21 := util.NewBeaconBlock()
b21.Block.Slot = 2
b21.Block.Slot = 3
b21.Block.ParentRoot = r1[:]
b21.Block.StateRoot = bytesutil.PadTo([]byte{'A'}, 32)
r21, err := b21.Block.HashTreeRoot()
@@ -488,7 +488,7 @@ func tree2(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte,
return nil, nil, err
}
b22 := util.NewBeaconBlock()
b22.Block.Slot = 2
b22.Block.Slot = 3
b22.Block.ParentRoot = r1[:]
b22.Block.StateRoot = bytesutil.PadTo([]byte{'B'}, 32)
r22, err := b22.Block.HashTreeRoot()
@@ -496,7 +496,7 @@ func tree2(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte,
return nil, nil, err
}
b23 := util.NewBeaconBlock()
b23.Block.Slot = 2
b23.Block.Slot = 3
b23.Block.ParentRoot = r1[:]
b23.Block.StateRoot = bytesutil.PadTo([]byte{'C'}, 32)
r23, err := b23.Block.HashTreeRoot()
@@ -504,7 +504,7 @@ func tree2(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte,
return nil, nil, err
}
b24 := util.NewBeaconBlock()
b24.Block.Slot = 2
b24.Block.Slot = 3
b24.Block.ParentRoot = r1[:]
b24.Block.StateRoot = bytesutil.PadTo([]byte{'D'}, 32)
r24, err := b24.Block.HashTreeRoot()
@@ -512,7 +512,7 @@ func tree2(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte,
return nil, nil, err
}
b3 := util.NewBeaconBlock()
b3.Block.Slot = 3
b3.Block.Slot = 4
b3.Block.ParentRoot = r24[:]
r3, err := b3.Block.HashTreeRoot()
if err != nil {
@@ -549,21 +549,21 @@ func tree2(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte,
// \- B2
func tree3(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte, []*ethpb.SignedBeaconBlock, error) {
b0 := util.NewBeaconBlock()
b0.Block.Slot = 0
b0.Block.Slot = 1
b0.Block.ParentRoot = genesisRoot
r0, err := b0.Block.HashTreeRoot()
if err != nil {
return nil, nil, err
}
b1 := util.NewBeaconBlock()
b1.Block.Slot = 1
b1.Block.Slot = 2
b1.Block.ParentRoot = r0[:]
r1, err := b1.Block.HashTreeRoot()
if err != nil {
return nil, nil, err
}
b21 := util.NewBeaconBlock()
b21.Block.Slot = 2
b21.Block.Slot = 3
b21.Block.ParentRoot = r1[:]
b21.Block.StateRoot = bytesutil.PadTo([]byte{'A'}, 32)
r21, err := b21.Block.HashTreeRoot()
@@ -571,7 +571,7 @@ func tree3(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte,
return nil, nil, err
}
b22 := util.NewBeaconBlock()
b22.Block.Slot = 2
b22.Block.Slot = 3
b22.Block.ParentRoot = r1[:]
b22.Block.StateRoot = bytesutil.PadTo([]byte{'B'}, 32)
r22, err := b22.Block.HashTreeRoot()
@@ -579,7 +579,7 @@ func tree3(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte,
return nil, nil, err
}
b23 := util.NewBeaconBlock()
b23.Block.Slot = 2
b23.Block.Slot = 3
b23.Block.ParentRoot = r1[:]
b23.Block.StateRoot = bytesutil.PadTo([]byte{'C'}, 32)
r23, err := b23.Block.HashTreeRoot()
@@ -587,7 +587,7 @@ func tree3(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte,
return nil, nil, err
}
b24 := util.NewBeaconBlock()
b24.Block.Slot = 2
b24.Block.Slot = 3
b24.Block.ParentRoot = r1[:]
b24.Block.StateRoot = bytesutil.PadTo([]byte{'D'}, 32)
r24, err := b24.Block.HashTreeRoot()
@@ -626,14 +626,14 @@ func tree3(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte,
// \- B2
func tree4(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte, []*ethpb.SignedBeaconBlock, error) {
b0 := util.NewBeaconBlock()
b0.Block.Slot = 0
b0.Block.Slot = 1
b0.Block.ParentRoot = genesisRoot
r0, err := b0.Block.HashTreeRoot()
if err != nil {
return nil, nil, err
}
b21 := util.NewBeaconBlock()
b21.Block.Slot = 2
b21.Block.Slot = 3
b21.Block.ParentRoot = r0[:]
b21.Block.StateRoot = bytesutil.PadTo([]byte{'A'}, 32)
r21, err := b21.Block.HashTreeRoot()
@@ -641,7 +641,7 @@ func tree4(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte,
return nil, nil, err
}
b22 := util.NewBeaconBlock()
b22.Block.Slot = 2
b22.Block.Slot = 3
b22.Block.ParentRoot = r0[:]
b22.Block.StateRoot = bytesutil.PadTo([]byte{'B'}, 32)
r22, err := b22.Block.HashTreeRoot()
@@ -649,7 +649,7 @@ func tree4(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte,
return nil, nil, err
}
b23 := util.NewBeaconBlock()
b23.Block.Slot = 2
b23.Block.Slot = 3
b23.Block.ParentRoot = r0[:]
b23.Block.StateRoot = bytesutil.PadTo([]byte{'C'}, 32)
r23, err := b23.Block.HashTreeRoot()
@@ -657,7 +657,7 @@ func tree4(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte,
return nil, nil, err
}
b24 := util.NewBeaconBlock()
b24.Block.Slot = 2
b24.Block.Slot = 3
b24.Block.ParentRoot = r0[:]
b24.Block.StateRoot = bytesutil.PadTo([]byte{'D'}, 32)
r24, err := b24.Block.HashTreeRoot()
@@ -697,17 +697,17 @@ func TestLoadFinalizedBlocks(t *testing.T) {
gRoot, err := gBlock.Block.HashTreeRoot()
require.NoError(t, err)
util.SaveBlock(t, ctx, beaconDB, gBlock)
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, [32]byte{}))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, gRoot))
roots, _, err := tree1(t, beaconDB, gRoot[:])
require.NoError(t, err)
filteredBlocks, err := s.loadFinalizedBlocks(ctx, 0, 8)
filteredBlocks, err := s.loadFinalizedBlocks(ctx, 0, 9)
require.NoError(t, err)
require.Equal(t, 0, len(filteredBlocks))
require.Equal(t, 1, len(filteredBlocks))
require.NoError(t, beaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Root: roots[8][:]}))
require.NoError(t, s.beaconDB.SaveFinalizedCheckpoint(ctx, &ethpb.Checkpoint{Root: roots[8][:]}))
filteredBlocks, err = s.loadFinalizedBlocks(ctx, 0, 8)
filteredBlocks, err = s.loadFinalizedBlocks(ctx, 0, 9)
require.NoError(t, err)
require.Equal(t, 10, len(filteredBlocks))
require.Equal(t, 7, len(filteredBlocks))
}

View File

@@ -2,14 +2,10 @@ package sync
import (
"context"
"encoding/binary"
"math"
"math/big"
"testing"
"time"
"github.com/ethereum/go-ethereum/common"
gethTypes "github.com/ethereum/go-ethereum/core/types"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/protocol"
mock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
@@ -22,12 +18,10 @@ import (
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
types "github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
leakybucket "github.com/prysmaticlabs/prysm/v5/container/leaky-bucket"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/network/forks"
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/testing/util"
@@ -57,88 +51,6 @@ type requestFromSidecars func([]blocks.ROBlob) interface{}
type oldestSlotCallback func(t *testing.T) types.Slot
type expectedRequirer func(*testing.T, *Service, []*expectedBlobChunk) func(network.Stream)
func generateTestBlockWithSidecars(t *testing.T, parent [32]byte, slot types.Slot, nblobs int) (*ethpb.SignedBeaconBlockDeneb, []blocks.ROBlob) {
// Start service with 160 as allowed blocks capacity (and almost zero capacity recovery).
stateRoot := bytesutil.PadTo([]byte("stateRoot"), fieldparams.RootLength)
receiptsRoot := bytesutil.PadTo([]byte("receiptsRoot"), fieldparams.RootLength)
logsBloom := bytesutil.PadTo([]byte("logs"), fieldparams.LogsBloomLength)
parentHash := bytesutil.PadTo([]byte("parentHash"), fieldparams.RootLength)
tx := gethTypes.NewTransaction(
0,
common.HexToAddress("095e7baea6a6c7c4c2dfeb977efac326af552d87"),
big.NewInt(0), 0, big.NewInt(0),
nil,
)
txs := []*gethTypes.Transaction{tx}
encodedBinaryTxs := make([][]byte, 1)
var err error
encodedBinaryTxs[0], err = txs[0].MarshalBinary()
require.NoError(t, err)
blockHash := bytesutil.ToBytes32([]byte("foo"))
payload := &enginev1.ExecutionPayloadDeneb{
ParentHash: parentHash,
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
StateRoot: stateRoot,
ReceiptsRoot: receiptsRoot,
LogsBloom: logsBloom,
PrevRandao: blockHash[:],
BlockNumber: 0,
GasLimit: 0,
GasUsed: 0,
Timestamp: 0,
ExtraData: make([]byte, 0),
BaseFeePerGas: bytesutil.PadTo([]byte("baseFeePerGas"), fieldparams.RootLength),
ExcessBlobGas: 0,
BlobGasUsed: 0,
BlockHash: blockHash[:],
Transactions: encodedBinaryTxs,
}
block := util.NewBeaconBlockDeneb()
block.Block.Body.ExecutionPayload = payload
block.Block.Slot = slot
block.Block.ParentRoot = parent[:]
commitments := make([][48]byte, nblobs)
block.Block.Body.BlobKzgCommitments = make([][]byte, nblobs)
for i := range commitments {
binary.LittleEndian.PutUint64(commitments[i][:], uint64(i))
block.Block.Body.BlobKzgCommitments[i] = commitments[i][:]
}
root, err := block.Block.HashTreeRoot()
require.NoError(t, err)
sbb, err := blocks.NewSignedBeaconBlock(block)
require.NoError(t, err)
sidecars := make([]blocks.ROBlob, len(commitments))
for i, c := range block.Block.Body.BlobKzgCommitments {
sidecars[i] = generateTestSidecar(t, root, sbb, i, c)
}
return block, sidecars
}
func generateTestSidecar(t *testing.T, root [32]byte, block interfaces.ReadOnlySignedBeaconBlock, index int, commitment []byte) blocks.ROBlob {
header, err := block.Header()
require.NoError(t, err)
blob := make([]byte, fieldparams.BlobSize)
binary.LittleEndian.PutUint64(blob, uint64(index))
pb := &ethpb.BlobSidecar{
Index: uint64(index),
Blob: blob,
KzgCommitment: commitment,
KzgProof: commitment,
SignedBlockHeader: header,
}
pb.CommitmentInclusionProof = fakeEmptyProof(t, block, pb)
sc, err := blocks.NewROBlobWithRoot(pb, root)
require.NoError(t, err)
return sc
}
func fakeEmptyProof(_ *testing.T, _ interfaces.ReadOnlySignedBeaconBlock, _ *ethpb.BlobSidecar) [][]byte {
return util.HydrateCommitmentInclusionProofs()
}
type expectedBlobChunk struct {
code uint8
sidecar *blocks.ROBlob
@@ -204,12 +116,12 @@ func (c *blobsTestCase) setup(t *testing.T) (*Service, []blocks.ROBlob, func())
} else {
bs = oldest + types.Slot(i)
}
block, bsc := generateTestBlockWithSidecars(t, parentRoot, bs, maxBlobs)
root, err := block.Block.HashTreeRoot()
require.NoError(t, err)
block, bsc := util.GenerateTestDenebBlockWithSidecar(t, parentRoot, bs, maxBlobs)
sidecars = append(sidecars, bsc...)
util.SaveBlock(t, context.Background(), d, block)
parentRoot = root
pb, err := block.Proto()
require.NoError(t, err)
util.SaveBlock(t, context.Background(), d, pb)
parentRoot = block.Root()
}
client := p2ptest.NewTestP2P(t)

View File

@@ -139,15 +139,15 @@ func (s *Service) sendAndSaveBlobSidecars(ctx context.Context, request types.Blo
return nil
}
sidecars, err := SendBlobSidecarByRoot(ctx, s.cfg.clock, s.cfg.p2p, peerID, s.ctxMap, &request)
if err != nil {
return err
}
RoBlock, err := blocks.NewROBlock(block)
if err != nil {
return err
}
sidecars, err := SendBlobSidecarByRoot(ctx, s.cfg.clock, s.cfg.p2p, peerID, s.ctxMap, &request, s.decoderValidation(RoBlock))
if err != nil {
return err
}
if len(sidecars) != len(request) {
return fmt.Errorf("received %d blob sidecars, expected %d for RPC", len(sidecars), len(request))
}

View File

@@ -375,8 +375,7 @@ func TestRequestPendingBlobs(t *testing.T) {
require.NoError(t, s.sendAndSaveBlobSidecars(context.Background(), request, "test", b))
})
t.Run("empty commitment block should not fail", func(t *testing.T) {
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
b, _ := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 0, 0)
request, err := s.pendingBlobsRequestForBlock([32]byte{}, b)
require.NoError(t, err)
require.NoError(t, s.sendAndSaveBlobSidecars(context.Background(), request, "test", b))
@@ -406,10 +405,7 @@ func TestRequestPendingBlobs(t *testing.T) {
blobStorage: filesystem.NewEphemeralBlobStorage(t),
},
}
b := util.NewBeaconBlockDeneb()
b.Block.Body.BlobKzgCommitments = make([][]byte, 1)
b1, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
b1, _ := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 0, 1)
request, err := s.pendingBlobsRequestForBlock([32]byte{}, b1)
require.NoError(t, err)
require.ErrorContains(t, "protocols not supported", s.sendAndSaveBlobSidecars(context.Background(), request, p2.PeerID(), b1))

View File

@@ -42,9 +42,10 @@ func (s *Service) goodbyeRPCHandler(_ context.Context, msg interface{}, stream l
return fmt.Errorf("wrong message type for goodbye, got %T, wanted *uint64", msg)
}
if err := s.rateLimiter.validateRequest(stream, 1); err != nil {
return err
log.WithError(err).Warn("Goodbye message from rate-limited peer.")
} else {
s.rateLimiter.add(stream, 1)
}
s.rateLimiter.add(stream, 1)
log := log.WithField("Reason", goodbyeMessage(*m))
log.WithField("peer", stream.Conn().RemotePeer()).Trace("Peer has sent a goodbye message")
s.cfg.p2p.Peers().SetNextValidTime(stream.Conn().RemotePeer(), goodByeBackoff(*m))

View File

@@ -12,6 +12,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/encoder"
p2ptypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/types"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
@@ -184,7 +185,7 @@ func SendBlobsByRangeRequest(ctx context.Context, tor blockchain.TemporalOracle,
func SendBlobSidecarByRoot(
ctx context.Context, tor blockchain.TemporalOracle, p2pApi p2p.P2P, pid peer.ID,
ctxMap ContextByteVersions, req *p2ptypes.BlobSidecarsByRootReq,
ctxMap ContextByteVersions, req *p2ptypes.BlobSidecarsByRootReq, bvs ...BlobResponseValidation,
) ([]blocks.ROBlob, error) {
if uint64(len(*req)) > params.BeaconConfig().MaxRequestBlobSidecars {
return nil, errors.Wrapf(p2ptypes.ErrMaxBlobReqExceeded, "length=%d", len(*req))
@@ -205,7 +206,11 @@ func SendBlobSidecarByRoot(
if max > uint64(len(*req))*fieldparams.MaxBlobsPerBlock {
max = uint64(len(*req)) * fieldparams.MaxBlobsPerBlock
}
return readChunkEncodedBlobs(stream, p2pApi.Encoding(), ctxMap, blobValidatorFromRootReq(req), max)
vfuncs := []BlobResponseValidation{blobValidatorFromRootReq(req)}
if len(bvs) > 0 {
vfuncs = append(vfuncs, bvs...)
}
return readChunkEncodedBlobs(stream, p2pApi.Encoding(), ctxMap, composeBlobValidations(vfuncs...), max)
}
// BlobResponseValidation represents a function that can validate aspects of a single unmarshaled blob
@@ -374,3 +379,23 @@ func readChunkedBlobSidecar(stream network.Stream, encoding encoder.NetworkEncod
return rob, nil
}
// Save bandwidth by stopping chunked reading as soon as we get an unwanted blob.
func (s *Service) decoderValidation(block blocks.ROBlock) BlobResponseValidation {
return func(sc blocks.ROBlob) error {
if block.Signature() != bytesutil.ToBytes96(sc.SignedBlockHeader.Signature) {
return verification.ErrInvalidProposerSignature
}
bv := s.newBlobVerifier(sc, verification.InitsyncSidecarRequirements)
if err := bv.BlobIndexInBounds(); err != nil {
return err
}
if err := bv.SidecarInclusionProven(); err != nil {
return err
}
if err := bv.SidecarKzgProofVerified(); err != nil {
return err
}
return nil
}
}

View File

@@ -604,14 +604,13 @@ func TestBlobValidatorFromRangeReq(t *testing.T) {
}
func TestSeqBlobValid(t *testing.T) {
one, oneBlobs := generateTestBlockWithSidecars(t, [32]byte{}, 0, 3)
r1, err := one.Block.HashTreeRoot()
require.NoError(t, err)
two, twoBlobs := generateTestBlockWithSidecars(t, r1, 1, 3)
r2, err := two.Block.HashTreeRoot()
require.NoError(t, err)
_, oops := generateTestBlockWithSidecars(t, r2, 0, 4)
oops[1].SignedBlockHeader.Header.ParentRoot = bytesutil.PadTo([]byte("derp"), 32)
one, oneBlobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 0, 3)
r1 := one.Root()
two, twoBlobs := util.GenerateTestDenebBlockWithSidecar(t, r1, 1, 3)
r2 := two.Root()
_, oops := util.GenerateTestDenebBlockWithSidecar(t, r2, 0, 4)
_, wrongParent := util.GenerateTestDenebBlockWithSidecar(t, bytesutil.ToBytes32([]byte("derp")), 0, 4)
oops[1] = wrongParent[1]
wrongRoot, err := blocks.NewROBlobWithRoot(oops[2].BlobSidecar, bytesutil.ToBytes32([]byte("parentderp")))
require.NoError(t, err)
oob := oops[3]

View File

@@ -29,6 +29,7 @@ var (
Name: "local-block-value-boost",
Usage: "A percentage boost for local block construction as a Uint64. This is used to prioritize local block construction over relay/builder block construction" +
"Boost is an additional percentage to multiple local block value. Use builder block if: builder_bid_value * 100 > local_block_value * (local-block-value-boost + 100)",
Value: 10,
}
// ExecutionEngineEndpoint provides an HTTP access endpoint to connect to an execution client on the execution layer
ExecutionEngineEndpoint = &cli.StringFlag{

View File

@@ -15,7 +15,8 @@ const (
bodyLength = 12 // The number of elements in the BeaconBlockBody Container
logBodyLength = 4 // The log 2 of bodyLength
kzgPosition = 11 // The index of the KZG commitment list in the Body
KZGOffset = 54 * field_params.MaxBlobCommitmentsPerBlock
kzgRootIndex = 54 // The Merkle index of the KZG commitment list's root in the Body's Merkle tree
KZGOffset = kzgRootIndex * field_params.MaxBlobCommitmentsPerBlock
)
var (
@@ -37,9 +38,7 @@ func VerifyKZGInclusionProof(blob ROBlob) error {
if len(root) != field_params.RootLength {
return errInvalidBodyRoot
}
chunks := make([][32]byte, 2)
copy(chunks[0][:], blob.KzgCommitment)
copy(chunks[1][:], blob.KzgCommitment[field_params.RootLength:])
chunks := makeChunk(blob.KzgCommitment)
gohashtree.HashChunks(chunks, chunks)
verified := trie.VerifyMerkleProof(root, chunks[0][:], blob.Index+KZGOffset, blob.CommitmentInclusionProof)
if !verified {
@@ -85,15 +84,21 @@ func MerkleProofKZGCommitment(body interfaces.ReadOnlyBeaconBlockBody, index int
func leavesFromCommitments(commitments [][]byte) [][]byte {
leaves := make([][]byte, len(commitments))
for i, kzg := range commitments {
chunk := make([][32]byte, 2)
copy(chunk[0][:], kzg)
copy(chunk[1][:], kzg[field_params.RootLength:])
chunk := makeChunk(kzg)
gohashtree.HashChunks(chunk, chunk)
leaves[i] = chunk[0][:]
}
return leaves
}
// makeChunk constructs a chunk from a KZG commitment.
func makeChunk(commitment []byte) [][32]byte {
chunk := make([][32]byte, 2)
copy(chunk[0][:], commitment)
copy(chunk[1][:], commitment[field_params.RootLength:])
return chunk
}
// bodyProof returns the Merkle proof of the subtree up to the root of the KZG
// commitment list.
func bodyProof(commitments [][]byte, index int) ([][]byte, error) {

View File

@@ -1,7 +1,8 @@
package blocks
import (
"math/rand"
"crypto/rand"
"errors"
"testing"
"github.com/prysmaticlabs/gohashtree"
@@ -74,14 +75,79 @@ func Test_MerkleProofKZGCommitment(t *testing.T) {
proof, err := MerkleProofKZGCommitment(body, index)
require.NoError(t, err)
chunk := make([][32]byte, 2)
copy(chunk[0][:], kzgs[index])
copy(chunk[1][:], kzgs[index][32:])
gohashtree.HashChunks(chunk, chunk)
// Test the logic of topProof in MerkleProofKZGCommitment.
commitmentsRoot, err := getBlobKzgCommitmentsRoot(kzgs)
require.NoError(t, err)
bodyMembersRoots, err := topLevelRoots(body)
require.NoError(t, err, "Failed to get top level roots")
bodySparse, err := trie.GenerateTrieFromItems(
bodyMembersRoots,
logBodyLength,
)
require.NoError(t, err, "Failed to generate trie from member roots")
require.Equal(t, bodyLength, bodySparse.NumOfItems())
topProof, err := bodySparse.MerkleProof(kzgPosition)
require.NoError(t, err, "Failed to generate Merkle proof")
require.DeepEqual(t,
topProof[:len(topProof)-1],
proof[fieldparams.LogMaxBlobCommitments+1:],
)
root, err := body.HashTreeRoot()
require.NoError(t, err)
kzgOffset := 54 * fieldparams.MaxBlobCommitmentsPerBlock
require.Equal(t, true, trie.VerifyMerkleProof(root[:], chunk[0][:], uint64(index+kzgOffset), proof))
// Partially verify if the commitments root is in the body root.
// Proof of the commitment length is not needed.
require.Equal(t, true, trie.VerifyMerkleProof(root[:], commitmentsRoot[:], kzgPosition, topProof[:len(topProof)-1]))
chunk := makeChunk(kzgs[index])
gohashtree.HashChunks(chunk, chunk)
require.Equal(t, true, trie.VerifyMerkleProof(root[:], chunk[0][:], uint64(index+KZGOffset), proof))
}
// This test explains the calculation of the KZG commitment root's Merkle index
// in the Body's Merkle tree based on the index of the KZG commitment list in the Body.
func Test_KZGRootIndex(t *testing.T) {
// Level of the KZG commitment root's parent.
kzgParentRootLevel, err := ceilLog2(kzgPosition)
require.NoError(t, err)
// Merkle index of the KZG commitment root's parent.
// The parent's left child is the KZG commitment root,
// and its right child is the KZG commitment size.
kzgParentRootIndex := kzgPosition + (1 << kzgParentRootLevel)
// The KZG commitment root is the left child of its parent.
// Its Merkle index is the double of its parent's Merkle index.
require.Equal(t, 2*kzgParentRootIndex, kzgRootIndex)
}
// ceilLog2 returns the smallest integer greater than or equal to
// the base-2 logarithm of x.
func ceilLog2(x uint32) (uint32, error) {
if x == 0 {
return 0, errors.New("log2(0) is undefined")
}
var y uint32
if (x & (x - 1)) == 0 {
y = 0
} else {
y = 1
}
for x > 1 {
x >>= 1
y += 1
}
return y, nil
}
func getBlobKzgCommitmentsRoot(commitments [][]byte) ([32]byte, error) {
commitmentsLeaves := leavesFromCommitments(commitments)
commitmentsSparse, err := trie.GenerateTrieFromItems(
commitmentsLeaves,
fieldparams.LogMaxBlobCommitments,
)
if err != nil {
return [32]byte{}, err
}
return commitmentsSparse.HashTreeRoot()
}
func Benchmark_MerkleProofKZGCommitment(b *testing.B) {

View File

@@ -16,8 +16,8 @@ func (a *data) PrevRandao() []byte {
return a.prevRandao
}
// Timestamps returns the timestamp of the payload attribute.
func (a *data) Timestamps() uint64 {
// Timestamp returns the timestamp of the payload attribute.
func (a *data) Timestamp() uint64 {
return a.timeStamp
}
@@ -100,7 +100,7 @@ func (a *data) IsEmpty() bool {
if len(a.PrevRandao()) != 0 {
return false
}
if a.Timestamps() != 0 {
if a.Timestamp() != 0 {
return false
}
if len(a.SuggestedFeeRecipient()) != 0 {

View File

@@ -44,7 +44,7 @@ func TestPayloadAttributeGetters(t *testing.T) {
r := uint64(123)
a, err := New(&enginev1.PayloadAttributes{Timestamp: r})
require.NoError(t, err)
require.Equal(t, r, a.Timestamps())
require.Equal(t, r, a.Timestamp())
},
},
{

View File

@@ -7,7 +7,7 @@ import (
type Attributer interface {
Version() int
PrevRandao() []byte
Timestamps() uint64
Timestamp() uint64
SuggestedFeeRecipient() []byte
Withdrawals() ([]*enginev1.Withdrawal, error)
PbV1() (*enginev1.PayloadAttributes, error)

View File

@@ -13,6 +13,7 @@ go_library(
"//cache/lru:go_default_library",
"//config/params:go_default_library",
"//crypto/rand:go_default_library",
"//io/file:go_default_library",
"@com_github_hashicorp_golang_lru//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],

View File

@@ -6,9 +6,11 @@ import (
"io"
"net/url"
"os"
"path/filepath"
"strings"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/io/file"
"github.com/sirupsen/logrus"
)
@@ -20,6 +22,9 @@ func addLogWriter(w io.Writer) {
// ConfigurePersistentLogging adds a log-to-file writer. File content is identical to stdout.
func ConfigurePersistentLogging(logFileName string) error {
logrus.WithField("logFileName", logFileName).Info("Logs will be made persistent")
if err := file.MkdirAll(filepath.Dir(logFileName)); err != nil {
return err
}
f, err := os.OpenFile(logFileName, os.O_CREATE|os.O_WRONLY|os.O_APPEND, params.BeaconIoConfig().ReadWritePermissions) // #nosec G304
if err != nil {
return err

View File

@@ -1,6 +1,8 @@
package logs
import (
"fmt"
"os"
"testing"
"github.com/prysmaticlabs/prysm/v5/testing/require"
@@ -24,3 +26,38 @@ func TestMaskCredentialsLogging(t *testing.T) {
require.Equal(t, MaskCredentialsLogging(test.url), test.maskedUrl)
}
}
func TestConfigurePersistantLogging(t *testing.T) {
testParentDir := t.TempDir()
// 1. Test creation of file in an existing parent directory
logFileName := "test.log"
existingDirectory := "test-1-existing-testing-dir"
err := ConfigurePersistentLogging(fmt.Sprintf("%s/%s/%s", testParentDir, existingDirectory, logFileName))
require.NoError(t, err)
// 2. Test creation of file along with parent directory
nonExistingDirectory := "test-2-non-existing-testing-dir"
err = ConfigurePersistentLogging(fmt.Sprintf("%s/%s/%s", testParentDir, nonExistingDirectory, logFileName))
require.NoError(t, err)
// 3. Test creation of file in an existing parent directory with a non-existing sub-directory
existingDirectory = "test-3-existing-testing-dir"
nonExistingSubDirectory := "test-3-non-existing-sub-dir"
err = os.Mkdir(fmt.Sprintf("%s/%s", testParentDir, existingDirectory), 0700)
if err != nil {
return
}
err = ConfigurePersistentLogging(fmt.Sprintf("%s/%s/%s/%s", testParentDir, existingDirectory, nonExistingSubDirectory, logFileName))
require.NoError(t, err)
//4. Create log file in a directory without 700 permissions
existingDirectory = "test-4-existing-testing-dir"
err = os.Mkdir(fmt.Sprintf("%s/%s", testParentDir, existingDirectory), 0750)
if err != nil {
return
}
}

View File

@@ -15,6 +15,7 @@ go_library(
deps = [
"//api/client/beacon:go_default_library",
"//api/client/event:go_default_library",
"//consensus-types/primitives:go_default_library",
"//consensus-types/validator:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//validator/client/iface:go_default_library",

View File

@@ -13,8 +13,8 @@ import (
context "context"
reflect "reflect"
"github.com/prysmaticlabs/prysm/v5/api/client/beacon"
"github.com/prysmaticlabs/prysm/v5/api/client/event"
event "github.com/prysmaticlabs/prysm/v5/api/client/event"
primitives "github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
iface "github.com/prysmaticlabs/prysm/v5/validator/client/iface"
gomock "go.uber.org/mock/gomock"
@@ -113,7 +113,7 @@ func (m *MockValidatorClient) GetAggregatedSyncSelections(arg0 context.Context,
}
// GetAggregatedSyncSelections indicates an expected call of GetAggregatedSyncSelections.
func (mr *MockValidatorClientMockRecorder) GetAggregatedSyncSelections(arg0, arg1 interface{}) *gomock.Call {
func (mr *MockValidatorClientMockRecorder) GetAggregatedSyncSelections(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAggregatedSyncSelections", reflect.TypeOf((*MockValidatorClient)(nil).GetAggregatedSyncSelections), arg0, arg1)
}
@@ -299,72 +299,30 @@ func (mr *MockValidatorClientMockRecorder) ProposeExit(arg0, arg1 any) *gomock.C
}
// StartEventStream mocks base method.
func (m *MockValidatorClient) StartEventStream(arg0 context.Context, arg1 []string, arg2 chan<- *event.Event){
func (m *MockValidatorClient) StartEventStream(arg0 context.Context, arg1 []string, arg2 chan<- *event.Event) {
m.ctrl.T.Helper()
_ = m.ctrl.Call(m, "StartEventStream", arg0,arg1,arg2)
m.ctrl.Call(m, "StartEventStream", arg0, arg1, arg2)
}
// StartEventStream indicates an expected call of StartEventStream.
func (mr *MockValidatorClientMockRecorder) StartEventStream(arg0,arg1,arg2 interface{}) *gomock.Call {
func (mr *MockValidatorClientMockRecorder) StartEventStream(arg0, arg1, arg2 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartEventStream", reflect.TypeOf((*MockValidatorClient)(nil).StartEventStream), arg0, arg1, arg2)
}
// ProcessEvent mocks base method.
func (m *MockValidatorClient) ProcessEvent(arg0 *event.Event) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ProcessEvent", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// ProcessEvent indicates an expected call of ProcessEvent.
func (mr *MockValidatorClientMockRecorder) ProcessEvent(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ProcessEvent", reflect.TypeOf((*MockValidatorClient)(nil).ProcessEvent), arg0)
}
// NodeIsHealthy mocks base method.
func (m *MockValidatorClient) NodeIsHealthy(arg0 context.Context) bool {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "NodeIsHealthy",arg0)
ret0, _ := ret[0].(bool)
return ret0
}
// NodeIsHealthy indicates an expected call of NodeIsHealthy.
func (mr *MockValidatorClientMockRecorder) NodeIsHealthy(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeIsHealthy", reflect.TypeOf((*MockValidatorClient)(nil).NodeIsHealthy), arg0)
}
// NodeHealthTracker mocks base method.
func (m *MockValidatorClient) NodeHealthTracker() *beacon.NodeHealthTracker {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "NodeHealthTracker")
ret0, _ := ret[0].(*beacon.NodeHealthTracker)
return ret0
}
// NodeHealthTracker indicates an expected call of NodeHealthTracker.
func (mr *MockValidatorClientMockRecorder) NodeHealthTracker() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeHealthTracker", reflect.TypeOf((*MockValidatorClient)(nil).NodeHealthTracker))
}
// SubmitAggregateSelectionProof mocks base method.
func (m *MockValidatorClient) SubmitAggregateSelectionProof(arg0 context.Context, arg1 *eth.AggregateSelectionRequest) (*eth.AggregateSelectionResponse, error) {
func (m *MockValidatorClient) SubmitAggregateSelectionProof(arg0 context.Context, arg1 *eth.AggregateSelectionRequest, arg2 primitives.ValidatorIndex, arg3 uint64) (*eth.AggregateSelectionResponse, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SubmitAggregateSelectionProof", arg0, arg1)
ret := m.ctrl.Call(m, "SubmitAggregateSelectionProof", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(*eth.AggregateSelectionResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// SubmitAggregateSelectionProof indicates an expected call of SubmitAggregateSelectionProof.
func (mr *MockValidatorClientMockRecorder) SubmitAggregateSelectionProof(arg0, arg1 any) *gomock.Call {
func (mr *MockValidatorClientMockRecorder) SubmitAggregateSelectionProof(arg0, arg1, arg2, arg3 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubmitAggregateSelectionProof", reflect.TypeOf((*MockValidatorClient)(nil).SubmitAggregateSelectionProof), arg0, arg1)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubmitAggregateSelectionProof", reflect.TypeOf((*MockValidatorClient)(nil).SubmitAggregateSelectionProof), arg0, arg1, arg2, arg3)
}
// SubmitSignedAggregateSelectionProof mocks base method.

View File

@@ -34,19 +34,19 @@ def cc_autoconf_toolchains_impl(repository_ctx):
else:
repository_ctx.file("BUILD", "# C++ toolchain autoconfiguration was disabled by BAZEL_DO_NOT_DETECT_CPP_TOOLCHAIN env variable.")
def cc_autoconf_impl(repository_ctx, overriden_tools = dict()):
def cc_autoconf_impl(repository_ctx, overridden_tools = dict()):
"""Generate BUILD file with 'cc_toolchain' targets for the local host C++ toolchain.
Args:
repository_ctx: repository context
overriden_tools: dict of tool paths to use instead of autoconfigured tools
overridden_tools: dict of tool paths to use instead of autoconfigured tools
"""
cpu_value = get_cpu_value(repository_ctx)
if cpu_value.startswith("darwin"):
print("Configuring local C++ toolchain for Darwin. This is non-hermetic and builds may " +
"not be reproducible. Consider building on linux for a hermetic build.")
configure_unix_toolchain(repository_ctx, cpu_value, overriden_tools)
configure_unix_toolchain(repository_ctx, cpu_value, overridden_tools)
else:
paths = resolve_labels(repository_ctx, [
"@bazel_tools//tools/cpp:BUILD.empty.tpl",

View File

@@ -1,4 +1,4 @@
load("@rules_oci//oci:defs.bzl", "oci_image", "oci_image_index", "oci_push")
load("@rules_oci//oci:defs.bzl", "oci_image", "oci_image_index", "oci_push", "oci_tarball")
load("@rules_pkg//:pkg.bzl", "pkg_tar")
load("//tools:multi_arch.bzl", "multi_arch")
@@ -74,3 +74,9 @@ def prysm_image_upload(
repository = repository,
tags = tags,
)
oci_tarball(
name = "oci_image_tarball",
image = ":oci_image",
repo_tags = [repository+":latest"],
)

View File

@@ -84,7 +84,7 @@ func (v *validator) SubmitAggregateAndProof(ctx context.Context, slot primitives
CommitteeIndex: duty.CommitteeIndex,
PublicKey: pubKey[:],
SlotSignature: slotSig,
})
}, duty.ValidatorIndex, uint64(len(duty.Committee)))
if err != nil {
// handle grpc not found
s, ok := status.FromError(err)

View File

@@ -63,6 +63,8 @@ func TestSubmitAggregateAndProof_SignFails(t *testing.T) {
m.validatorClient.EXPECT().SubmitAggregateSelectionProof(
gomock.Any(), // ctx
gomock.AssignableToTypeOf(&ethpb.AggregateSelectionRequest{}),
gomock.Any(),
gomock.Any(),
).Return(&ethpb.AggregateSelectionResponse{
AggregateAndProof: &ethpb.AggregateAttestationAndProof{
AggregatorIndex: 0,
@@ -106,6 +108,8 @@ func TestSubmitAggregateAndProof_Ok(t *testing.T) {
m.validatorClient.EXPECT().SubmitAggregateSelectionProof(
gomock.Any(), // ctx
gomock.AssignableToTypeOf(&ethpb.AggregateSelectionRequest{}),
gomock.Any(),
gomock.Any(),
).Return(&ethpb.AggregateSelectionResponse{
AggregateAndProof: &ethpb.AggregateAttestationAndProof{
AggregatorIndex: 0,
@@ -166,6 +170,8 @@ func TestSubmitAggregateAndProof_Distributed(t *testing.T) {
m.validatorClient.EXPECT().SubmitAggregateSelectionProof(
gomock.Any(), // ctx
gomock.AssignableToTypeOf(&ethpb.AggregateSelectionRequest{}),
gomock.Any(),
gomock.Any(),
).Return(&ethpb.AggregateSelectionResponse{
AggregateAndProof: &ethpb.AggregateAttestationAndProof{
AggregatorIndex: 0,

View File

@@ -66,6 +66,7 @@ go_library(
"@com_github_sirupsen_logrus//:go_default_library",
"@org_golang_google_grpc//:go_default_library",
"@org_golang_google_protobuf//types/known/timestamppb:go_default_library",
"@org_golang_x_sync//errgroup:go_default_library",
],
)
@@ -129,7 +130,6 @@ go_test(
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"//testing/validator-mock:go_default_library",
"//time/slots:go_default_library",
"//validator/client/beacon-api/mock:go_default_library",
"//validator/client/beacon-api/test-helpers:go_default_library",

View File

@@ -2,12 +2,14 @@ package beacon_api
import (
"context"
"net/http"
"time"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/golang/protobuf/ptypes/empty"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/api/client/event"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/validator/client/iface"
@@ -135,9 +137,9 @@ func (c *beaconApiValidatorClient) StreamBlocksAltair(ctx context.Context, in *e
return c.streamBlocks(ctx, in, time.Second), nil
}
func (c *beaconApiValidatorClient) SubmitAggregateSelectionProof(ctx context.Context, in *ethpb.AggregateSelectionRequest) (*ethpb.AggregateSelectionResponse, error) {
func (c *beaconApiValidatorClient) SubmitAggregateSelectionProof(ctx context.Context, in *ethpb.AggregateSelectionRequest, index primitives.ValidatorIndex, committeeLength uint64) (*ethpb.AggregateSelectionResponse, error) {
return wrapInMetrics[*ethpb.AggregateSelectionResponse]("SubmitAggregateSelectionProof", func() (*ethpb.AggregateSelectionResponse, error) {
return c.submitAggregateSelectionProof(ctx, in)
return c.submitAggregateSelectionProof(ctx, in, index, committeeLength)
})
}
@@ -191,7 +193,8 @@ func (c *beaconApiValidatorClient) WaitForChainStart(ctx context.Context, _ *emp
}
func (c *beaconApiValidatorClient) StartEventStream(ctx context.Context, topics []string, eventsChannel chan<- *event.Event) {
eventStream, err := event.NewEventStream(ctx, c.jsonRestHandler.HttpClient(), c.jsonRestHandler.Host(), topics)
client := &http.Client{} // event stream should not be subject to the same settings as other api calls, so we won't use c.jsonRestHandler.HttpClient()
eventStream, err := event.NewEventStream(ctx, client, c.jsonRestHandler.Host(), topics)
if err != nil {
eventsChannel <- &event.Event{
EventType: event.EventError,

View File

@@ -8,11 +8,14 @@ import (
"net/url"
"strconv"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/consensus-types/validator"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"golang.org/x/sync/errgroup"
)
type dutiesProvider interface {
@@ -31,37 +34,42 @@ type committeeIndexSlotPair struct {
slot primitives.Slot
}
type validatorForDuty struct {
pubkey []byte
index primitives.ValidatorIndex
status ethpb.ValidatorStatus
}
func (c beaconApiValidatorClient) getDuties(ctx context.Context, in *ethpb.DutiesRequest) (*ethpb.DutiesResponse, error) {
all, err := c.multipleValidatorStatus(ctx, &ethpb.MultipleValidatorStatusRequest{PublicKeys: in.PublicKeys})
vals, err := c.getValidatorsForDuties(ctx, in.PublicKeys)
if err != nil {
return nil, errors.Wrap(err, "failed to get validator status")
}
known := &ethpb.MultipleValidatorStatusResponse{
PublicKeys: make([][]byte, 0, len(all.PublicKeys)),
Statuses: make([]*ethpb.ValidatorStatusResponse, 0, len(all.Statuses)),
Indices: make([]primitives.ValidatorIndex, 0, len(all.Indices)),
}
for i, status := range all.Statuses {
if status.Status != ethpb.ValidatorStatus_UNKNOWN_STATUS {
known.PublicKeys = append(known.PublicKeys, all.PublicKeys[i])
known.Statuses = append(known.Statuses, all.Statuses[i])
known.Indices = append(known.Indices, all.Indices[i])
}
return nil, errors.Wrap(err, "failed to get validators for duties")
}
// Sync committees are an Altair feature
fetchSyncDuties := in.Epoch >= params.BeaconConfig().AltairForkEpoch
currentEpochDuties, err := c.getDutiesForEpoch(ctx, in.Epoch, known, fetchSyncDuties)
if err != nil {
return nil, errors.Wrapf(err, "failed to get duties for current epoch `%d`", in.Epoch)
}
errCh := make(chan error, 1)
nextEpochDuties, err := c.getDutiesForEpoch(ctx, in.Epoch+1, known, fetchSyncDuties)
var currentEpochDuties []*ethpb.DutiesResponse_Duty
go func() {
currentEpochDuties, err = c.getDutiesForEpoch(ctx, in.Epoch, vals, fetchSyncDuties)
if err != nil {
errCh <- errors.Wrapf(err, "failed to get duties for current epoch `%d`", in.Epoch)
return
}
errCh <- nil
}()
nextEpochDuties, err := c.getDutiesForEpoch(ctx, in.Epoch+1, vals, fetchSyncDuties)
if err != nil {
return nil, errors.Wrapf(err, "failed to get duties for next epoch `%d`", in.Epoch+1)
}
if err = <-errCh; err != nil {
return nil, err
}
return &ethpb.DutiesResponse{
CurrentEpochDuties: currentEpochDuties,
NextEpochDuties: nextEpochDuties,
@@ -71,25 +79,94 @@ func (c beaconApiValidatorClient) getDuties(ctx context.Context, in *ethpb.Dutie
func (c beaconApiValidatorClient) getDutiesForEpoch(
ctx context.Context,
epoch primitives.Epoch,
multipleValidatorStatus *ethpb.MultipleValidatorStatusResponse,
vals []validatorForDuty,
fetchSyncDuties bool,
) ([]*ethpb.DutiesResponse_Duty, error) {
attesterDuties, err := c.dutiesProvider.GetAttesterDuties(ctx, epoch, multipleValidatorStatus.Indices)
if err != nil {
return nil, errors.Wrapf(err, "failed to get attester duties for epoch `%d`", epoch)
indices := make([]primitives.ValidatorIndex, len(vals))
for i, v := range vals {
indices[i] = v.index
}
var syncDuties []*structs.SyncCommitteeDuty
if fetchSyncDuties {
if syncDuties, err = c.dutiesProvider.GetSyncDuties(ctx, epoch, multipleValidatorStatus.Indices); err != nil {
return nil, errors.Wrapf(err, "failed to get sync duties for epoch `%d`", epoch)
// Below variables MUST NOT be used in the main function before wg.Wait().
// This is because they are populated in goroutines and wg.Wait()
// will return only once all goroutines finish their execution.
// Mapping from a validator index to its attesting committee's index and slot
attesterDutiesMapping := make(map[primitives.ValidatorIndex]committeeIndexSlotPair)
// Set containing all validator indices that are part of a sync committee for this epoch
syncDutiesMapping := make(map[primitives.ValidatorIndex]bool)
// Mapping from a validator index to its proposal slot
proposerDutySlots := make(map[primitives.ValidatorIndex][]primitives.Slot)
// Mapping from the {committeeIndex, slot} to each of the committee's validator indices
committeeMapping := make(map[committeeIndexSlotPair][]primitives.ValidatorIndex)
var wg errgroup.Group
wg.Go(func() error {
attesterDuties, err := c.dutiesProvider.GetAttesterDuties(ctx, epoch, indices)
if err != nil {
return errors.Wrapf(err, "failed to get attester duties for epoch `%d`", epoch)
}
for _, attesterDuty := range attesterDuties {
validatorIndex, err := strconv.ParseUint(attesterDuty.ValidatorIndex, 10, 64)
if err != nil {
return errors.Wrapf(err, "failed to parse attester validator index `%s`", attesterDuty.ValidatorIndex)
}
slot, err := strconv.ParseUint(attesterDuty.Slot, 10, 64)
if err != nil {
return errors.Wrapf(err, "failed to parse attester slot `%s`", attesterDuty.Slot)
}
committeeIndex, err := strconv.ParseUint(attesterDuty.CommitteeIndex, 10, 64)
if err != nil {
return errors.Wrapf(err, "failed to parse attester committee index `%s`", attesterDuty.CommitteeIndex)
}
attesterDutiesMapping[primitives.ValidatorIndex(validatorIndex)] = committeeIndexSlotPair{
slot: primitives.Slot(slot),
committeeIndex: primitives.CommitteeIndex(committeeIndex),
}
}
return nil
})
if fetchSyncDuties {
wg.Go(func() error {
syncDuties, err := c.dutiesProvider.GetSyncDuties(ctx, epoch, indices)
if err != nil {
return errors.Wrapf(err, "failed to get sync duties for epoch `%d`", epoch)
}
for _, syncDuty := range syncDuties {
validatorIndex, err := strconv.ParseUint(syncDuty.ValidatorIndex, 10, 64)
if err != nil {
return errors.Wrapf(err, "failed to parse sync validator index `%s`", syncDuty.ValidatorIndex)
}
syncDutiesMapping[primitives.ValidatorIndex(validatorIndex)] = true
}
return nil
})
}
var proposerDuties []*structs.ProposerDuty
if proposerDuties, err = c.dutiesProvider.GetProposerDuties(ctx, epoch); err != nil {
return nil, errors.Wrapf(err, "failed to get proposer duties for epoch `%d`", epoch)
}
wg.Go(func() error {
proposerDuties, err := c.dutiesProvider.GetProposerDuties(ctx, epoch)
if err != nil {
return errors.Wrapf(err, "failed to get proposer duties for epoch `%d`", epoch)
}
for _, proposerDuty := range proposerDuties {
validatorIndex, err := strconv.ParseUint(proposerDuty.ValidatorIndex, 10, 64)
if err != nil {
return errors.Wrapf(err, "failed to parse proposer validator index `%s`", proposerDuty.ValidatorIndex)
}
slot, err := strconv.ParseUint(proposerDuty.Slot, 10, 64)
if err != nil {
return errors.Wrapf(err, "failed to parse proposer slot `%s`", proposerDuty.Slot)
}
proposerDutySlots[primitives.ValidatorIndex(validatorIndex)] =
append(proposerDutySlots[primitives.ValidatorIndex(validatorIndex)], primitives.Slot(slot))
}
return nil
})
committees, err := c.dutiesProvider.GetCommittees(ctx, epoch)
if err != nil {
@@ -104,70 +181,15 @@ func (c beaconApiValidatorClient) getDutiesForEpoch(
slotCommittees[c.Slot] = n + 1
}
// Mapping from a validator index to its attesting committee's index and slot
attesterDutiesMapping := make(map[primitives.ValidatorIndex]committeeIndexSlotPair)
for _, attesterDuty := range attesterDuties {
validatorIndex, err := strconv.ParseUint(attesterDuty.ValidatorIndex, 10, 64)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse attester validator index `%s`", attesterDuty.ValidatorIndex)
}
slot, err := strconv.ParseUint(attesterDuty.Slot, 10, 64)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse attester slot `%s`", attesterDuty.Slot)
}
committeeIndex, err := strconv.ParseUint(attesterDuty.CommitteeIndex, 10, 64)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse attester committee index `%s`", attesterDuty.CommitteeIndex)
}
attesterDutiesMapping[primitives.ValidatorIndex(validatorIndex)] = committeeIndexSlotPair{
slot: primitives.Slot(slot),
committeeIndex: primitives.CommitteeIndex(committeeIndex),
}
}
// Mapping from a validator index to its proposal slot
proposerDutySlots := make(map[primitives.ValidatorIndex][]primitives.Slot)
for _, proposerDuty := range proposerDuties {
validatorIndex, err := strconv.ParseUint(proposerDuty.ValidatorIndex, 10, 64)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse proposer validator index `%s`", proposerDuty.ValidatorIndex)
}
slot, err := strconv.ParseUint(proposerDuty.Slot, 10, 64)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse proposer slot `%s`", proposerDuty.Slot)
}
proposerDutySlots[primitives.ValidatorIndex(validatorIndex)] = append(proposerDutySlots[primitives.ValidatorIndex(validatorIndex)], primitives.Slot(slot))
}
// Set containing all validator indices that are part of a sync committee for this epoch
syncDutiesMapping := make(map[primitives.ValidatorIndex]bool)
for _, syncDuty := range syncDuties {
validatorIndex, err := strconv.ParseUint(syncDuty.ValidatorIndex, 10, 64)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse sync validator index `%s`", syncDuty.ValidatorIndex)
}
syncDutiesMapping[primitives.ValidatorIndex(validatorIndex)] = true
}
// Mapping from the {committeeIndex, slot} to each of the committee's validator indices
committeeMapping := make(map[committeeIndexSlotPair][]primitives.ValidatorIndex)
for _, committee := range committees {
committeeIndex, err := strconv.ParseUint(committee.Index, 10, 64)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse committee index `%s`", committee.Index)
}
slot, err := strconv.ParseUint(committee.Slot, 10, 64)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse slot `%s`", committee.Slot)
}
validatorIndices := make([]primitives.ValidatorIndex, len(committee.Validators))
for index, validatorIndexString := range committee.Validators {
validatorIndex, err := strconv.ParseUint(validatorIndexString, 10, 64)
@@ -176,7 +198,6 @@ func (c beaconApiValidatorClient) getDutiesForEpoch(
}
validatorIndices[index] = primitives.ValidatorIndex(validatorIndex)
}
key := committeeIndexSlotPair{
committeeIndex: primitives.CommitteeIndex(committeeIndex),
slot: primitives.Slot(slot),
@@ -184,16 +205,19 @@ func (c beaconApiValidatorClient) getDutiesForEpoch(
committeeMapping[key] = validatorIndices
}
duties := make([]*ethpb.DutiesResponse_Duty, len(multipleValidatorStatus.Statuses))
for index, validatorStatus := range multipleValidatorStatus.Statuses {
validatorIndex := multipleValidatorStatus.Indices[index]
pubkey := multipleValidatorStatus.PublicKeys[index]
if err = wg.Wait(); err != nil {
return nil, err
}
var attesterSlot primitives.Slot
var committeeIndex primitives.CommitteeIndex
var committeeValidatorIndices []primitives.ValidatorIndex
duties := make([]*ethpb.DutiesResponse_Duty, len(vals))
for i, v := range vals {
var (
attesterSlot primitives.Slot
committeeIndex primitives.CommitteeIndex
committeeValidatorIndices []primitives.ValidatorIndex
)
if committeeMappingKey, ok := attesterDutiesMapping[validatorIndex]; ok {
if committeeMappingKey, ok := attesterDutiesMapping[v.index]; ok {
committeeIndex = committeeMappingKey.committeeIndex
attesterSlot = committeeMappingKey.slot
@@ -202,15 +226,15 @@ func (c beaconApiValidatorClient) getDutiesForEpoch(
}
}
duties[index] = &ethpb.DutiesResponse_Duty{
duties[i] = &ethpb.DutiesResponse_Duty{
Committee: committeeValidatorIndices,
CommitteeIndex: committeeIndex,
AttesterSlot: attesterSlot,
ProposerSlots: proposerDutySlots[validatorIndex],
PublicKey: pubkey,
Status: validatorStatus.Status,
ValidatorIndex: validatorIndex,
IsSyncCommittee: syncDutiesMapping[validatorIndex],
ProposerSlots: proposerDutySlots[v.index],
PublicKey: v.pubkey,
Status: v.status,
ValidatorIndex: v.index,
IsSyncCommittee: syncDutiesMapping[v.index],
CommitteesAtSlot: slotCommittees[strconv.FormatUint(uint64(attesterSlot), 10)],
}
}
@@ -218,6 +242,51 @@ func (c beaconApiValidatorClient) getDutiesForEpoch(
return duties, nil
}
func (c *beaconApiValidatorClient) getValidatorsForDuties(ctx context.Context, pubkeys [][]byte) ([]validatorForDuty, error) {
vals := make([]validatorForDuty, 0, len(pubkeys))
stringPubkeysToPubkeys := make(map[string][]byte, len(pubkeys))
stringPubkeys := make([]string, len(pubkeys))
for i, pk := range pubkeys {
stringPk := hexutil.Encode(pk)
stringPubkeysToPubkeys[stringPk] = pk
stringPubkeys[i] = stringPk
}
statusesWithDuties := []string{validator.ActiveOngoing.String(), validator.ActiveExiting.String()}
stateValidatorsResponse, err := c.stateValidatorsProvider.GetStateValidators(ctx, stringPubkeys, nil, statusesWithDuties)
if err != nil {
return nil, errors.Wrap(err, "failed to get state validators")
}
for _, validatorContainer := range stateValidatorsResponse.Data {
val := validatorForDuty{}
validatorIndex, err := strconv.ParseUint(validatorContainer.Index, 10, 64)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse validator index %s", validatorContainer.Index)
}
val.index = primitives.ValidatorIndex(validatorIndex)
stringPubkey := validatorContainer.Validator.Pubkey
pubkey, ok := stringPubkeysToPubkeys[stringPubkey]
if !ok {
return nil, errors.Wrapf(err, "returned public key %s not requested", stringPubkey)
}
val.pubkey = pubkey
status, ok := beaconAPITogRPCValidatorStatus[validatorContainer.Status]
if !ok {
return nil, errors.New("invalid validator status " + validatorContainer.Status)
}
val.status = status
vals = append(vals, val)
}
return vals, nil
}
// GetCommittees retrieves the committees for the given epoch
func (c beaconApiDutiesProvider) GetCommittees(ctx context.Context, epoch primitives.Epoch) ([]*structs.Committee, error) {
committeeParams := url.Values{}

View File

@@ -9,11 +9,8 @@ import (
"strconv"
"testing"
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
validatormock "github.com/prysmaticlabs/prysm/v5/testing/validator-mock"
"github.com/prysmaticlabs/prysm/v5/validator/client/iface"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
@@ -541,7 +538,6 @@ func TestGetDutiesForEpoch_Error(t *testing.T) {
{
name: "get proposer duties failed",
expectedError: "failed to get proposer duties for epoch `1`: foo error",
fetchAttesterDutiesError: nil,
fetchProposerDutiesError: errors.New("foo error"),
},
{
@@ -720,28 +716,20 @@ func TestGetDutiesForEpoch_Error(t *testing.T) {
testCase.fetchCommitteesError,
).AnyTimes()
vals := make([]validatorForDuty, len(pubkeys))
for i := 0; i < len(pubkeys); i++ {
vals[i] = validatorForDuty{
pubkey: pubkeys[i],
index: validatorIndices[i],
status: ethpb.ValidatorStatus_ACTIVE,
}
}
validatorClient := &beaconApiValidatorClient{dutiesProvider: dutiesProvider}
_, err := validatorClient.getDutiesForEpoch(
ctx,
epoch,
&ethpb.MultipleValidatorStatusResponse{
PublicKeys: pubkeys,
Indices: validatorIndices,
Statuses: []*ethpb.ValidatorStatusResponse{
{Status: ethpb.ValidatorStatus_UNKNOWN_STATUS},
{Status: ethpb.ValidatorStatus_DEPOSITED},
{Status: ethpb.ValidatorStatus_PENDING},
{Status: ethpb.ValidatorStatus_ACTIVE},
{Status: ethpb.ValidatorStatus_EXITING},
{Status: ethpb.ValidatorStatus_SLASHING},
{Status: ethpb.ValidatorStatus_EXITED},
{Status: ethpb.ValidatorStatus_INVALID},
{Status: ethpb.ValidatorStatus_PARTIALLY_DEPOSITED},
{Status: ethpb.ValidatorStatus_UNKNOWN_STATUS},
{Status: ethpb.ValidatorStatus_DEPOSITED},
{Status: ethpb.ValidatorStatus_PENDING},
},
},
vals,
true,
)
assert.ErrorContains(t, testCase.expectedError, err)
@@ -773,40 +761,6 @@ func TestGetDutiesForEpoch_Valid(t *testing.T) {
committeeSlots := []primitives.Slot{28, 29, 30}
proposerSlots := []primitives.Slot{31, 32, 33, 34, 35, 36, 37, 38}
statuses := []ethpb.ValidatorStatus{
ethpb.ValidatorStatus_UNKNOWN_STATUS,
ethpb.ValidatorStatus_DEPOSITED,
ethpb.ValidatorStatus_PENDING,
ethpb.ValidatorStatus_ACTIVE,
ethpb.ValidatorStatus_EXITING,
ethpb.ValidatorStatus_SLASHING,
ethpb.ValidatorStatus_EXITED,
ethpb.ValidatorStatus_INVALID,
ethpb.ValidatorStatus_PARTIALLY_DEPOSITED,
ethpb.ValidatorStatus_UNKNOWN_STATUS,
ethpb.ValidatorStatus_DEPOSITED,
ethpb.ValidatorStatus_PENDING,
}
multipleValidatorStatus := &ethpb.MultipleValidatorStatusResponse{
PublicKeys: pubkeys,
Indices: validatorIndices,
Statuses: []*ethpb.ValidatorStatusResponse{
{Status: statuses[0]},
{Status: statuses[1]},
{Status: statuses[2]},
{Status: statuses[3]},
{Status: statuses[4]},
{Status: statuses[5]},
{Status: statuses[6]},
{Status: statuses[7]},
{Status: statuses[8]},
{Status: statuses[9]},
{Status: statuses[10]},
{Status: statuses[11]},
},
}
ctrl := gomock.NewController(t)
defer ctrl.Finish()
@@ -824,7 +778,7 @@ func TestGetDutiesForEpoch_Valid(t *testing.T) {
dutiesProvider.EXPECT().GetAttesterDuties(
ctx,
epoch,
multipleValidatorStatus.Indices,
validatorIndices,
).Return(
generateValidAttesterDuties(pubkeys, validatorIndices, committeeIndices, committeeSlots),
nil,
@@ -842,7 +796,7 @@ func TestGetDutiesForEpoch_Valid(t *testing.T) {
dutiesProvider.EXPECT().GetSyncDuties(
ctx,
epoch,
multipleValidatorStatus.Indices,
validatorIndices,
).Return(
generateValidSyncDuties(pubkeys, validatorIndices),
nil,
@@ -883,7 +837,7 @@ func TestGetDutiesForEpoch_Valid(t *testing.T) {
CommitteeIndex: committeeIndices[0],
AttesterSlot: committeeSlots[0],
PublicKey: pubkeys[0],
Status: statuses[0],
Status: ethpb.ValidatorStatus_ACTIVE,
ValidatorIndex: validatorIndices[0],
CommitteesAtSlot: 1,
},
@@ -895,7 +849,7 @@ func TestGetDutiesForEpoch_Valid(t *testing.T) {
CommitteeIndex: committeeIndices[0],
AttesterSlot: committeeSlots[0],
PublicKey: pubkeys[1],
Status: statuses[1],
Status: ethpb.ValidatorStatus_ACTIVE,
ValidatorIndex: validatorIndices[1],
CommitteesAtSlot: 1,
},
@@ -907,7 +861,7 @@ func TestGetDutiesForEpoch_Valid(t *testing.T) {
CommitteeIndex: committeeIndices[1],
AttesterSlot: committeeSlots[1],
PublicKey: pubkeys[2],
Status: statuses[2],
Status: ethpb.ValidatorStatus_ACTIVE,
ValidatorIndex: validatorIndices[2],
CommitteesAtSlot: 1,
},
@@ -919,7 +873,7 @@ func TestGetDutiesForEpoch_Valid(t *testing.T) {
CommitteeIndex: committeeIndices[1],
AttesterSlot: committeeSlots[1],
PublicKey: pubkeys[3],
Status: statuses[3],
Status: ethpb.ValidatorStatus_ACTIVE,
ValidatorIndex: validatorIndices[3],
CommitteesAtSlot: 1,
},
@@ -931,7 +885,7 @@ func TestGetDutiesForEpoch_Valid(t *testing.T) {
CommitteeIndex: committeeIndices[2],
AttesterSlot: committeeSlots[2],
PublicKey: pubkeys[4],
Status: statuses[4],
Status: ethpb.ValidatorStatus_ACTIVE,
ValidatorIndex: validatorIndices[4],
ProposerSlots: expectedProposerSlots1,
CommitteesAtSlot: 1,
@@ -944,7 +898,7 @@ func TestGetDutiesForEpoch_Valid(t *testing.T) {
CommitteeIndex: committeeIndices[2],
AttesterSlot: committeeSlots[2],
PublicKey: pubkeys[5],
Status: statuses[5],
Status: ethpb.ValidatorStatus_ACTIVE,
ValidatorIndex: validatorIndices[5],
ProposerSlots: expectedProposerSlots2,
IsSyncCommittee: testCase.fetchSyncDuties,
@@ -952,47 +906,55 @@ func TestGetDutiesForEpoch_Valid(t *testing.T) {
},
{
PublicKey: pubkeys[6],
Status: statuses[6],
Status: ethpb.ValidatorStatus_ACTIVE,
ValidatorIndex: validatorIndices[6],
ProposerSlots: expectedProposerSlots3,
IsSyncCommittee: testCase.fetchSyncDuties,
},
{
PublicKey: pubkeys[7],
Status: statuses[7],
Status: ethpb.ValidatorStatus_ACTIVE,
ValidatorIndex: validatorIndices[7],
ProposerSlots: expectedProposerSlots4,
IsSyncCommittee: testCase.fetchSyncDuties,
},
{
PublicKey: pubkeys[8],
Status: statuses[8],
Status: ethpb.ValidatorStatus_ACTIVE,
ValidatorIndex: validatorIndices[8],
IsSyncCommittee: testCase.fetchSyncDuties,
},
{
PublicKey: pubkeys[9],
Status: statuses[9],
Status: ethpb.ValidatorStatus_ACTIVE,
ValidatorIndex: validatorIndices[9],
IsSyncCommittee: testCase.fetchSyncDuties,
},
{
PublicKey: pubkeys[10],
Status: statuses[10],
Status: ethpb.ValidatorStatus_ACTIVE,
ValidatorIndex: validatorIndices[10],
},
{
PublicKey: pubkeys[11],
Status: statuses[11],
Status: ethpb.ValidatorStatus_ACTIVE,
ValidatorIndex: validatorIndices[11],
},
}
validatorClient := &beaconApiValidatorClient{dutiesProvider: dutiesProvider}
vals := make([]validatorForDuty, len(pubkeys))
for i := 0; i < len(pubkeys); i++ {
vals[i] = validatorForDuty{
pubkey: pubkeys[i],
index: validatorIndices[i],
status: ethpb.ValidatorStatus_ACTIVE,
}
}
duties, err := validatorClient.getDutiesForEpoch(
ctx,
epoch,
multipleValidatorStatus,
vals,
testCase.fetchSyncDuties,
)
require.NoError(t, err)
@@ -1018,41 +980,24 @@ func TestGetDuties_Valid(t *testing.T) {
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
statuses := []ethpb.ValidatorStatus{
ethpb.ValidatorStatus_DEPOSITED,
ethpb.ValidatorStatus_PENDING,
ethpb.ValidatorStatus_ACTIVE,
ethpb.ValidatorStatus_EXITING,
ethpb.ValidatorStatus_SLASHING,
ethpb.ValidatorStatus_EXITED,
ethpb.ValidatorStatus_EXITED,
ethpb.ValidatorStatus_EXITED,
ethpb.ValidatorStatus_EXITED,
ethpb.ValidatorStatus_DEPOSITED,
ethpb.ValidatorStatus_PENDING,
ethpb.ValidatorStatus_ACTIVE,
}
pubkeys := make([][]byte, len(statuses))
validatorIndices := make([]primitives.ValidatorIndex, len(statuses))
for i := range statuses {
valCount := 12
pubkeys := make([][]byte, valCount)
validatorIndices := make([]primitives.ValidatorIndex, valCount)
vals := make([]validatorForDuty, valCount)
for i := 0; i < valCount; i++ {
pubkeys[i] = []byte(strconv.Itoa(i))
validatorIndices[i] = primitives.ValidatorIndex(i)
vals[i] = validatorForDuty{
pubkey: pubkeys[i],
index: validatorIndices[i],
status: ethpb.ValidatorStatus_ACTIVE,
}
}
committeeIndices := []primitives.CommitteeIndex{25, 26, 27}
committeeSlots := []primitives.Slot{28, 29, 30}
proposerSlots := []primitives.Slot{31, 32, 33, 34, 35, 36, 37, 38}
statusResps := make([]*ethpb.ValidatorStatusResponse, len(statuses))
for i, s := range statuses {
statusResps[i] = &ethpb.ValidatorStatusResponse{Status: s}
}
multipleValidatorStatus := &ethpb.MultipleValidatorStatusResponse{
PublicKeys: pubkeys,
Indices: validatorIndices,
Statuses: statusResps,
}
ctrl := gomock.NewController(t)
defer ctrl.Finish()
@@ -1070,7 +1015,7 @@ func TestGetDuties_Valid(t *testing.T) {
dutiesProvider.EXPECT().GetAttesterDuties(
ctx,
testCase.epoch,
multipleValidatorStatus.Indices,
validatorIndices,
).Return(
generateValidAttesterDuties(pubkeys, validatorIndices, committeeIndices, committeeSlots),
nil,
@@ -1089,7 +1034,7 @@ func TestGetDuties_Valid(t *testing.T) {
dutiesProvider.EXPECT().GetSyncDuties(
ctx,
testCase.epoch,
multipleValidatorStatus.Indices,
validatorIndices,
).Return(
generateValidSyncDuties(pubkeys, validatorIndices),
nil,
@@ -1143,7 +1088,7 @@ func TestGetDuties_Valid(t *testing.T) {
Data: []*structs.ValidatorContainer{
{
Index: strconv.FormatUint(uint64(validatorIndices[0]), 10),
Status: "pending_initialized",
Status: "active_ongoing",
Validator: &structs.Validator{
Pubkey: hexutil.Encode(pubkeys[0]),
ActivationEpoch: strconv.FormatUint(uint64(testCase.epoch), 10),
@@ -1151,7 +1096,7 @@ func TestGetDuties_Valid(t *testing.T) {
},
{
Index: strconv.FormatUint(uint64(validatorIndices[1]), 10),
Status: "pending_queued",
Status: "active_ongoing",
Validator: &structs.Validator{
Pubkey: hexutil.Encode(pubkeys[1]),
ActivationEpoch: strconv.FormatUint(uint64(testCase.epoch), 10),
@@ -1167,7 +1112,7 @@ func TestGetDuties_Valid(t *testing.T) {
},
{
Index: strconv.FormatUint(uint64(validatorIndices[3]), 10),
Status: "active_exiting",
Status: "active_ongoing",
Validator: &structs.Validator{
Pubkey: hexutil.Encode(pubkeys[3]),
ActivationEpoch: strconv.FormatUint(uint64(testCase.epoch), 10),
@@ -1175,7 +1120,7 @@ func TestGetDuties_Valid(t *testing.T) {
},
{
Index: strconv.FormatUint(uint64(validatorIndices[4]), 10),
Status: "active_slashed",
Status: "active_ongoing",
Validator: &structs.Validator{
Pubkey: hexutil.Encode(pubkeys[4]),
ActivationEpoch: strconv.FormatUint(uint64(testCase.epoch), 10),
@@ -1183,7 +1128,7 @@ func TestGetDuties_Valid(t *testing.T) {
},
{
Index: strconv.FormatUint(uint64(validatorIndices[5]), 10),
Status: "exited_unslashed",
Status: "active_ongoing",
Validator: &structs.Validator{
Pubkey: hexutil.Encode(pubkeys[5]),
ActivationEpoch: strconv.FormatUint(uint64(testCase.epoch), 10),
@@ -1191,7 +1136,7 @@ func TestGetDuties_Valid(t *testing.T) {
},
{
Index: strconv.FormatUint(uint64(validatorIndices[6]), 10),
Status: "exited_slashed",
Status: "active_ongoing",
Validator: &structs.Validator{
Pubkey: hexutil.Encode(pubkeys[6]),
ActivationEpoch: strconv.FormatUint(uint64(testCase.epoch), 10),
@@ -1199,7 +1144,7 @@ func TestGetDuties_Valid(t *testing.T) {
},
{
Index: strconv.FormatUint(uint64(validatorIndices[7]), 10),
Status: "withdrawal_possible",
Status: "active_ongoing",
Validator: &structs.Validator{
Pubkey: hexutil.Encode(pubkeys[7]),
ActivationEpoch: strconv.FormatUint(uint64(testCase.epoch), 10),
@@ -1207,7 +1152,7 @@ func TestGetDuties_Valid(t *testing.T) {
},
{
Index: strconv.FormatUint(uint64(validatorIndices[8]), 10),
Status: "withdrawal_done",
Status: "active_ongoing",
Validator: &structs.Validator{
Pubkey: hexutil.Encode(pubkeys[8]),
ActivationEpoch: strconv.FormatUint(uint64(testCase.epoch), 10),
@@ -1215,7 +1160,7 @@ func TestGetDuties_Valid(t *testing.T) {
},
{
Index: strconv.FormatUint(uint64(validatorIndices[9]), 10),
Status: "pending_initialized",
Status: "active_ongoing",
Validator: &structs.Validator{
Pubkey: hexutil.Encode(pubkeys[9]),
ActivationEpoch: strconv.FormatUint(uint64(testCase.epoch), 10),
@@ -1223,7 +1168,7 @@ func TestGetDuties_Valid(t *testing.T) {
},
{
Index: strconv.FormatUint(uint64(validatorIndices[10]), 10),
Status: "pending_queued",
Status: "active_ongoing",
Validator: &structs.Validator{
Pubkey: hexutil.Encode(pubkeys[10]),
ActivationEpoch: strconv.FormatUint(uint64(testCase.epoch), 10),
@@ -1242,27 +1187,16 @@ func TestGetDuties_Valid(t *testing.T) {
nil,
).MinTimes(1)
prysmBeaconChainClient := validatormock.NewMockPrysmBeaconChainClient(ctrl)
prysmBeaconChainClient.EXPECT().GetValidatorCount(
ctx,
gomock.Any(),
gomock.Any(),
).Return(
nil,
iface.ErrNotSupported,
).MinTimes(1)
// Make sure that our values are equal to what would be returned by calling getDutiesForEpoch individually
validatorClient := &beaconApiValidatorClient{
dutiesProvider: dutiesProvider,
stateValidatorsProvider: stateValidatorsProvider,
prysmBeaconChainCLient: prysmBeaconChainClient,
}
expectedCurrentEpochDuties, err := validatorClient.getDutiesForEpoch(
ctx,
testCase.epoch,
multipleValidatorStatus,
vals,
fetchSyncDuties,
)
require.NoError(t, err)
@@ -1270,7 +1204,7 @@ func TestGetDuties_Valid(t *testing.T) {
expectedNextEpochDuties, err := validatorClient.getDutiesForEpoch(
ctx,
testCase.epoch+1,
multipleValidatorStatus,
vals,
fetchSyncDuties,
)
require.NoError(t, err)
@@ -1291,7 +1225,7 @@ func TestGetDuties_Valid(t *testing.T) {
}
}
func TestGetDuties_GetValidatorStatusFailed(t *testing.T) {
func TestGetDuties_GetStateValidatorsFailed(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
@@ -1316,7 +1250,7 @@ func TestGetDuties_GetValidatorStatusFailed(t *testing.T) {
Epoch: 1,
PublicKeys: [][]byte{},
})
assert.ErrorContains(t, "failed to get validator status", err)
assert.ErrorContains(t, "failed to get state validators", err)
assert.ErrorContains(t, "foo error", err)
}
@@ -1325,6 +1259,7 @@ func TestGetDuties_GetDutiesForEpochFailed(t *testing.T) {
defer ctrl.Finish()
ctx := context.Background()
pubkey := []byte{1, 2, 3}
stateValidatorsProvider := mock.NewMockStateValidatorsProvider(ctrl)
stateValidatorsProvider.EXPECT().GetStateValidators(
@@ -1334,7 +1269,13 @@ func TestGetDuties_GetDutiesForEpochFailed(t *testing.T) {
gomock.Any(),
).Return(
&structs.GetValidatorsResponse{
Data: []*structs.ValidatorContainer{},
Data: []*structs.ValidatorContainer{{
Index: "0",
Status: "active_ongoing",
Validator: &structs.Validator{
Pubkey: hexutil.Encode(pubkey),
},
}},
},
nil,
).Times(1)
@@ -1348,26 +1289,28 @@ func TestGetDuties_GetDutiesForEpochFailed(t *testing.T) {
nil,
errors.New("foo error"),
).Times(1)
prysmBeaconChainClient := validatormock.NewMockPrysmBeaconChainClient(ctrl)
prysmBeaconChainClient.EXPECT().GetValidatorCount(
dutiesProvider.EXPECT().GetAttesterDuties(
ctx,
primitives.Epoch(2),
gomock.Any(),
).Times(1)
dutiesProvider.EXPECT().GetProposerDuties(
ctx,
gomock.Any(),
).Times(2)
dutiesProvider.EXPECT().GetCommittees(
ctx,
gomock.Any(),
).Return(
nil,
iface.ErrNotSupported,
).MinTimes(1)
).Times(2)
validatorClient := &beaconApiValidatorClient{
stateValidatorsProvider: stateValidatorsProvider,
dutiesProvider: dutiesProvider,
prysmBeaconChainCLient: prysmBeaconChainClient,
}
_, err := validatorClient.getDuties(ctx, &ethpb.DutiesRequest{
Epoch: 1,
PublicKeys: [][]byte{},
PublicKeys: [][]byte{pubkey},
})
assert.ErrorContains(t, "failed to get duties for current epoch `1`", err)
assert.ErrorContains(t, "foo error", err)

View File

@@ -11,10 +11,14 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/time/slots"
)
func (c *beaconApiValidatorClient) submitAggregateSelectionProof(ctx context.Context, in *ethpb.AggregateSelectionRequest) (*ethpb.AggregateSelectionResponse, error) {
func (c *beaconApiValidatorClient) submitAggregateSelectionProof(
ctx context.Context,
in *ethpb.AggregateSelectionRequest,
index primitives.ValidatorIndex,
committeeLength uint64,
) (*ethpb.AggregateSelectionResponse, error) {
isOptimistic, err := c.isOptimistic(ctx)
if err != nil {
return nil, err
@@ -25,29 +29,7 @@ func (c *beaconApiValidatorClient) submitAggregateSelectionProof(ctx context.Con
return nil, errors.New("the node is currently optimistic and cannot serve validators")
}
validatorIndexResponse, err := c.validatorIndex(ctx, &ethpb.ValidatorIndexRequest{PublicKey: in.PublicKey})
if err != nil {
return nil, errors.Wrap(err, "failed to get validator index")
}
attesterDuties, err := c.dutiesProvider.GetAttesterDuties(ctx, slots.ToEpoch(in.Slot), []primitives.ValidatorIndex{validatorIndexResponse.Index})
if err != nil {
return nil, errors.Wrap(err, "failed to get attester duties")
}
if len(attesterDuties) == 0 {
return nil, errors.Errorf("no attester duty for the given slot %d", in.Slot)
}
// First attester duty is required since we requested attester duties for one validator index.
attesterDuty := attesterDuties[0]
committeeLen, err := strconv.ParseUint(attesterDuty.CommitteeLength, 10, 64)
if err != nil {
return nil, errors.Wrap(err, "failed to parse committee length")
}
isAggregator, err := helpers.IsAggregator(committeeLen, in.SlotSignature)
isAggregator, err := helpers.IsAggregator(committeeLength, in.SlotSignature)
if err != nil {
return nil, errors.Wrap(err, "failed to get aggregator status")
}
@@ -77,7 +59,7 @@ func (c *beaconApiValidatorClient) submitAggregateSelectionProof(ctx context.Con
return &ethpb.AggregateSelectionResponse{
AggregateAndProof: &ethpb.AggregateAttestationAndProof{
AggregatorIndex: validatorIndexResponse.Index,
AggregatorIndex: index,
Aggregate: aggregatedAttestation,
SelectionProof: in.SlotSignature,
},

View File

@@ -1,12 +1,9 @@
package beacon_api
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"net/url"
"testing"
"github.com/ethereum/go-ethereum/common/hexutil"
@@ -15,7 +12,6 @@ import (
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/time/slots"
"github.com/prysmaticlabs/prysm/v5/validator/client/beacon-api/mock"
test_helpers "github.com/prysmaticlabs/prysm/v5/validator/client/beacon-api/test-helpers"
"go.uber.org/mock/gomock"
@@ -25,26 +21,15 @@ func TestSubmitAggregateSelectionProof(t *testing.T) {
const (
pubkeyStr = "0x8000091c2ae64ee414a54c1cc1fc67dec663408bc636cb86756e0200e41a75c8f86603f104f02c856983d2783116be13"
syncingEndpoint = "/eth/v1/node/syncing"
attesterDutiesEndpoint = "/eth/v1/validator/duties/attester"
validatorsEndpoint = "/eth/v1/beacon/states/head/validators"
attestationDataEndpoint = "/eth/v1/validator/attestation_data"
aggregateAttestationEndpoint = "/eth/v1/validator/aggregate_attestation"
validatorIndex = "55293"
validatorIndex = primitives.ValidatorIndex(55293)
slotSignature = "0x8776a37d6802c4797d113169c5fcfda50e68a32058eb6356a6f00d06d7da64c841a00c7c38b9b94a204751eca53707bd03523ce4797827d9bacff116a6e776a20bbccff4b683bf5201b610797ed0502557a58a65c8395f8a1649b976c3112d15"
slot = primitives.Slot(123)
committeeIndex = primitives.CommitteeIndex(1)
committeesAtSlot = uint64(1)
)
attesterDuties := []*structs.AttesterDuty{
{
Pubkey: pubkeyStr,
ValidatorIndex: validatorIndex,
Slot: "123",
CommitteeIndex: "1",
CommitteeLength: "3",
},
}
attestationDataResponse := generateValidAttestation(uint64(slot), uint64(committeeIndex))
attestationDataProto, err := attestationDataResponse.Data.ToConsensus()
require.NoError(t, err)
@@ -64,22 +49,15 @@ func TestSubmitAggregateSelectionProof(t *testing.T) {
name string
isOptimistic bool
syncingErr error
validatorsErr error
dutiesErr error
attestationDataErr error
aggregateAttestationErr error
duties []*structs.AttesterDuty
validatorsCalled int
attesterDutiesCalled int
attestationDataCalled int
aggregateAttestationCalled int
expectedErrorMsg string
committeesAtSlot uint64
}{
{
name: "success",
duties: attesterDuties,
validatorsCalled: 1,
attesterDutiesCalled: 1,
attestationDataCalled: 1,
aggregateAttestationCalled: 1,
},
@@ -93,60 +71,23 @@ func TestSubmitAggregateSelectionProof(t *testing.T) {
syncingErr: errors.New("bad request"),
expectedErrorMsg: "failed to get syncing status",
},
{
name: "validator index error",
validatorsCalled: 1,
validatorsErr: errors.New("bad request"),
expectedErrorMsg: "failed to get validator index",
},
{
name: "attester duties error",
duties: attesterDuties,
validatorsCalled: 1,
attesterDutiesCalled: 1,
dutiesErr: errors.New("bad request"),
expectedErrorMsg: "failed to get attester duties",
},
{
name: "attestation data error",
duties: attesterDuties,
validatorsCalled: 1,
attesterDutiesCalled: 1,
attestationDataCalled: 1,
attestationDataErr: errors.New("bad request"),
expectedErrorMsg: fmt.Sprintf("failed to get attestation data for slot=%d and committee_index=%d", slot, committeeIndex),
},
{
name: "aggregate attestation error",
duties: attesterDuties,
validatorsCalled: 1,
attesterDutiesCalled: 1,
attestationDataCalled: 1,
aggregateAttestationCalled: 1,
aggregateAttestationErr: errors.New("bad request"),
expectedErrorMsg: "bad request",
},
{
name: "validator is not an aggregator",
duties: []*structs.AttesterDuty{
{
Pubkey: pubkeyStr,
ValidatorIndex: validatorIndex,
Slot: "123",
CommitteeIndex: "1",
CommitteeLength: "64",
},
},
validatorsCalled: 1,
attesterDutiesCalled: 1,
expectedErrorMsg: "validator is not an aggregator",
},
{
name: "no attester duties",
duties: []*structs.AttesterDuty{},
validatorsCalled: 1,
attesterDutiesCalled: 1,
expectedErrorMsg: fmt.Sprintf("no attester duty for the given slot %d", slot),
name: "validator is not an aggregator",
committeesAtSlot: 64,
expectedErrorMsg: "validator is not an aggregator",
},
}
@@ -171,76 +112,6 @@ func TestSubmitAggregateSelectionProof(t *testing.T) {
test.syncingErr,
).Times(1)
valsReq := &structs.GetValidatorsRequest{
Ids: []string{stringPubKey},
Statuses: []string{},
}
valReqBytes, err := json.Marshal(valsReq)
require.NoError(t, err)
// Call validators endpoint to get validator index.
jsonRestHandler.EXPECT().Post(
ctx,
validatorsEndpoint,
nil,
bytes.NewBuffer(valReqBytes),
&structs.GetValidatorsResponse{},
).SetArg(
4,
structs.GetValidatorsResponse{
Data: []*structs.ValidatorContainer{
{
Index: validatorIndex,
Status: "active_ongoing",
Validator: &structs.Validator{
Pubkey: pubkeyStr,
},
},
},
},
).Return(
test.validatorsErr,
).Times(test.validatorsCalled)
if test.validatorsErr != nil {
// Then try the GET call which will also return error.
queryParams := url.Values{}
for _, id := range valsReq.Ids {
queryParams.Add("id", id)
}
for _, st := range valsReq.Statuses {
queryParams.Add("status", st)
}
query := buildURL("/eth/v1/beacon/states/head/validators", queryParams)
jsonRestHandler.EXPECT().Get(
ctx,
query,
&structs.GetValidatorsResponse{},
).Return(
test.validatorsErr,
).Times(1)
}
// Call attester duties endpoint to get attester duties.
validatorIndicesBytes, err := json.Marshal([]string{validatorIndex})
require.NoError(t, err)
jsonRestHandler.EXPECT().Post(
ctx,
fmt.Sprintf("%s/%d", attesterDutiesEndpoint, slots.ToEpoch(slot)),
nil,
bytes.NewBuffer(validatorIndicesBytes),
&structs.GetAttesterDutiesResponse{},
).SetArg(
4,
structs.GetAttesterDutiesResponse{
Data: test.duties,
},
).Return(
test.dutiesErr,
).Times(test.attesterDutiesCalled)
// Call attestation data to get attestation data root to query aggregate attestation.
jsonRestHandler.EXPECT().Get(
ctx,
@@ -290,12 +161,17 @@ func TestSubmitAggregateSelectionProof(t *testing.T) {
jsonRestHandler: jsonRestHandler,
},
}
committees := committeesAtSlot
if test.committeesAtSlot != 0 {
committees = test.committeesAtSlot
}
actualResponse, err := validatorClient.submitAggregateSelectionProof(ctx, &ethpb.AggregateSelectionRequest{
Slot: slot,
CommitteeIndex: committeeIndex,
PublicKey: pubkey,
SlotSignature: slotSignatureBytes,
})
}, validatorIndex, committees)
if test.expectedErrorMsg == "" {
require.NoError(t, err)
assert.DeepEqual(t, expectedResponse, actualResponse)

View File

@@ -10,6 +10,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/api/client"
eventClient "github.com/prysmaticlabs/prysm/v5/api/client/event"
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/validator/client/iface"
log "github.com/sirupsen/logrus"
@@ -82,7 +83,7 @@ func (c *grpcValidatorClient) StreamBlocksAltair(ctx context.Context, in *ethpb.
return c.beaconNodeValidatorClient.StreamBlocksAltair(ctx, in)
}
func (c *grpcValidatorClient) SubmitAggregateSelectionProof(ctx context.Context, in *ethpb.AggregateSelectionRequest) (*ethpb.AggregateSelectionResponse, error) {
func (c *grpcValidatorClient) SubmitAggregateSelectionProof(ctx context.Context, in *ethpb.AggregateSelectionRequest, _ primitives.ValidatorIndex, _ uint64) (*ethpb.AggregateSelectionResponse, error) {
return c.beaconNodeValidatorClient.SubmitAggregateSelectionProof(ctx, in)
}

View File

@@ -135,7 +135,7 @@ type ValidatorClient interface {
GetFeeRecipientByPubKey(ctx context.Context, in *ethpb.FeeRecipientByPubKeyRequest) (*ethpb.FeeRecipientByPubKeyResponse, error)
GetAttestationData(ctx context.Context, in *ethpb.AttestationDataRequest) (*ethpb.AttestationData, error)
ProposeAttestation(ctx context.Context, in *ethpb.Attestation) (*ethpb.AttestResponse, error)
SubmitAggregateSelectionProof(ctx context.Context, in *ethpb.AggregateSelectionRequest) (*ethpb.AggregateSelectionResponse, error)
SubmitAggregateSelectionProof(ctx context.Context, in *ethpb.AggregateSelectionRequest, index primitives.ValidatorIndex, committeeLength uint64) (*ethpb.AggregateSelectionResponse, error)
SubmitSignedAggregateSelectionProof(ctx context.Context, in *ethpb.SignedAggregateSubmitRequest) (*ethpb.SignedAggregateSubmitResponse, error)
ProposeExit(ctx context.Context, in *ethpb.SignedVoluntaryExit) (*ethpb.ProposeExitResponse, error)
SubscribeCommitteeSubnets(ctx context.Context, in *ethpb.CommitteeSubnetsSubscribeRequest, duties []*ethpb.DutiesResponse_Duty) (*empty.Empty, error)

View File

@@ -660,6 +660,8 @@ func (c *ValidatorClient) registerRPCService(router *mux.Router) error {
ClientGrpcRetryDelay: grpcRetryDelay,
ClientGrpcHeaders: strings.Split(grpcHeaders, ","),
ClientWithCert: clientCert,
BeaconApiTimeout: time.Second * 30,
BeaconApiEndpoint: c.cliCtx.String(flags.BeaconRESTApiProviderFlag.Name),
Router: router,
})
return c.services.RegisterService(server)

View File

@@ -158,24 +158,29 @@ func (s *Server) GetValidators(w http.ResponseWriter, r *http.Request) {
}
pageToken := r.URL.Query().Get("page_token")
publicKeys := r.URL.Query()["public_keys"]
pubkeys := make([][]byte, len(publicKeys))
pubkeys := make([][]byte, 0)
for i, key := range publicKeys {
var pk []byte
if key == "" {
continue
}
if strings.HasPrefix(key, "0x") {
k, ok := shared.ValidateHex(w, fmt.Sprintf("PublicKeys[%d]", i), key, fieldparams.BLSPubkeyLength)
if !ok {
return
}
pk = bytesutil.SafeCopyBytes(k)
pubkeys = append(pubkeys, bytesutil.SafeCopyBytes(k))
} else {
data, err := base64.StdEncoding.DecodeString(key)
if err != nil {
httputil.HandleError(w, errors.Wrap(err, "Failed to decode base64").Error(), http.StatusBadRequest)
return
}
pk = bytesutil.SafeCopyBytes(data)
pubkeys = append(pubkeys, bytesutil.SafeCopyBytes(data))
}
pubkeys[i] = pk
}
if len(pubkeys) == 0 {
httputil.HandleError(w, "no pubkeys provided", http.StatusBadRequest)
return
}
req := &ethpb.ListValidatorsRequest{
PublicKeys: pubkeys,

View File

@@ -9,6 +9,7 @@ import (
"testing"
"time"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
@@ -102,3 +103,154 @@ func TestGetBeaconStatus_OK(t *testing.T) {
}
assert.DeepEqual(t, want, resp)
}
func TestServer_GetValidators(t *testing.T) {
tests := []struct {
name string
query string
expectedReq *ethpb.ListValidatorsRequest
chainResp *ethpb.Validators
want *ValidatorsResponse
wantCode int
wantErr string
}{
{
name: "happypath on page_size, page_token, public_keys",
wantCode: http.StatusOK,
query: "page_size=4&page_token=0&public_keys=0x855ae9c6184d6edd46351b375f16f541b2d33b0ed0da9be4571b13938588aee840ba606a946f0e8023ae3a4b2a43b4d4",
expectedReq: func() *ethpb.ListValidatorsRequest {
b, err := hexutil.Decode("0x855ae9c6184d6edd46351b375f16f541b2d33b0ed0da9be4571b13938588aee840ba606a946f0e8023ae3a4b2a43b4d4")
require.NoError(t, err)
pubkeys := [][]byte{b}
return &ethpb.ListValidatorsRequest{
PublicKeys: pubkeys,
PageSize: int32(4),
PageToken: "0",
}
}(),
chainResp: func() *ethpb.Validators {
b, err := hexutil.Decode("0x855ae9c6184d6edd46351b375f16f541b2d33b0ed0da9be4571b13938588aee840ba606a946f0e8023ae3a4b2a43b4d4")
require.NoError(t, err)
return &ethpb.Validators{
Epoch: 0,
ValidatorList: []*ethpb.Validators_ValidatorContainer{
{
Index: 0,
Validator: &ethpb.Validator{
PublicKey: b,
},
},
},
NextPageToken: "0",
TotalSize: 0,
}
}(),
want: &ValidatorsResponse{
Epoch: 0,
ValidatorList: []*ValidatorContainer{
{
Index: 0,
Validator: &Validator{
PublicKey: "0x855ae9c6184d6edd46351b375f16f541b2d33b0ed0da9be4571b13938588aee840ba606a946f0e8023ae3a4b2a43b4d4",
WithdrawalCredentials: "0x",
EffectiveBalance: 0,
Slashed: false,
ActivationEligibilityEpoch: 0,
ActivationEpoch: 0,
ExitEpoch: 0,
WithdrawableEpoch: 0,
},
},
},
NextPageToken: "0",
TotalSize: 0,
},
},
{
name: "extra public key that's empty still returns correct response",
wantCode: http.StatusOK,
query: "page_size=4&page_token=0&public_keys=0x855ae9c6184d6edd46351b375f16f541b2d33b0ed0da9be4571b13938588aee840ba606a946f0e8023ae3a4b2a43b4d4&public_keys=",
expectedReq: func() *ethpb.ListValidatorsRequest {
b, err := hexutil.Decode("0x855ae9c6184d6edd46351b375f16f541b2d33b0ed0da9be4571b13938588aee840ba606a946f0e8023ae3a4b2a43b4d4")
require.NoError(t, err)
pubkeys := [][]byte{b}
return &ethpb.ListValidatorsRequest{
PublicKeys: pubkeys,
PageSize: int32(4),
PageToken: "0",
}
}(),
chainResp: func() *ethpb.Validators {
b, err := hexutil.Decode("0x855ae9c6184d6edd46351b375f16f541b2d33b0ed0da9be4571b13938588aee840ba606a946f0e8023ae3a4b2a43b4d4")
require.NoError(t, err)
return &ethpb.Validators{
Epoch: 0,
ValidatorList: []*ethpb.Validators_ValidatorContainer{
{
Index: 0,
Validator: &ethpb.Validator{
PublicKey: b,
},
},
},
NextPageToken: "0",
TotalSize: 0,
}
}(),
want: &ValidatorsResponse{
Epoch: 0,
ValidatorList: []*ValidatorContainer{
{
Index: 0,
Validator: &Validator{
PublicKey: "0x855ae9c6184d6edd46351b375f16f541b2d33b0ed0da9be4571b13938588aee840ba606a946f0e8023ae3a4b2a43b4d4",
WithdrawalCredentials: "0x",
EffectiveBalance: 0,
Slashed: false,
ActivationEligibilityEpoch: 0,
ActivationEpoch: 0,
ExitEpoch: 0,
WithdrawableEpoch: 0,
},
},
},
NextPageToken: "0",
TotalSize: 0,
},
},
{
name: "no public keys passed results in error",
wantCode: http.StatusBadRequest,
query: "page_size=4&page_token=0&public_keys=",
wantErr: "no pubkeys provided",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ctrl := gomock.NewController(t)
beaconChainClient := validatormock.NewMockBeaconChainClient(ctrl)
if tt.wantErr == "" {
beaconChainClient.EXPECT().ListValidators(
gomock.Any(), // ctx
tt.expectedReq,
).Return(tt.chainResp, nil)
}
s := &Server{
beaconChainClient: beaconChainClient,
}
req := httptest.NewRequest(http.MethodGet, fmt.Sprintf("/v2/validator/beacon/validators?%s", tt.query), http.NoBody)
wr := httptest.NewRecorder()
wr.Body = &bytes.Buffer{}
s.GetValidators(wr, req)
require.Equal(t, tt.wantCode, wr.Code)
if tt.wantErr != "" {
require.StringContains(t, tt.wantErr, string(wr.Body.Bytes()))
} else {
resp := &ValidatorsResponse{}
require.NoError(t, json.Unmarshal(wr.Body.Bytes(), resp))
require.DeepEqual(t, resp, tt.want)
}
})
}
}

View File

@@ -53,6 +53,8 @@ type Config struct {
GenesisFetcher client.GenesisFetcher
WalletInitializedFeed *event.Feed
NodeGatewayEndpoint string
BeaconApiEndpoint string
BeaconApiTimeout time.Duration
Router *mux.Router
Wallet *wallet.Wallet
}
@@ -130,6 +132,8 @@ func NewServer(ctx context.Context, cfg *Config) *Server {
validatorMonitoringPort: cfg.ValidatorMonitoringPort,
validatorGatewayHost: cfg.ValidatorGatewayHost,
validatorGatewayPort: cfg.ValidatorGatewayPort,
beaconApiTimeout: cfg.BeaconApiTimeout,
beaconApiEndpoint: cfg.BeaconApiEndpoint,
router: cfg.Router,
}
// immediately register routes to override any catchalls