Compare commits

...

13 Commits

Author SHA1 Message Date
kasey
b3053dc96a Refactor batch verifier for sharing across packages (#13812)
* refactor batch verifier to share with pending queue

* unit test for batch verifier

---------

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
(cherry picked from commit cdd1d819df)
2024-03-27 07:43:15 -05:00
Bharath Vedartham
3d2230223f create the log file along with its parent directory if not present (#12675)
* Remove Feature Flag From Prater (#12082)

* Use Epoch boundary cache to retrieve balances (#12083)

* Use Epoch boundary cache to retrieve balances

* save boundary states before inserting to forkchoice

* move up last block save

* remove boundary checks on balances

* fix ordering

---------

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>

* create the log file along with its parent directory if not present

* only give ReadWritePermissions to the log file

* use io/file package to create the parent directories

* fix ci related issues

* add regression tests

* run gazelle

* fix tests

* remove print statements

* gazelle

* Remove failing test for MkdirAll, this failure is not expected

---------

Co-authored-by: Nishant Das <nishdas93@gmail.com>
Co-authored-by: Potuz <potuz@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
2024-03-22 15:32:08 +00:00
Preston Van Loon
b008a6422d Add tarball support for docker images (#13790) 2024-03-22 15:31:29 +00:00
Fredrik Svantes
d19365507f Set default LocalBlockValueBoost to 10 (#13772)
* Set default LocalBlockValueBoost to 10

* Update base.go

* Update mainnet_config.go
2024-03-22 13:18:20 +00:00
kasey
c05e39a668 fix handling of goodbye messages for limited peers (#13785)
Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2024-03-22 13:06:16 +00:00
Radosław Kapka
63c2b3563a Optimize GetDuties VC action (#13789)
* wait groups

* errgroup

* tests

* bzl

* review
2024-03-22 09:50:19 +00:00
Justin Traglia
a6e86c6731 Rename payloadattribute Timestamps to Timestamp (#13523)
Co-authored-by: terence <terence@prysmaticlabs.com>
2024-03-21 21:11:01 +00:00
Radosław Kapka
32fb183392 Modify the algorithm of updateFinalizedBlockRoots (#13486)
* rename error var

* new algo

* replay_test

* add comment

* review

* fill out parent root

* handle edge cases

* review
2024-03-21 21:09:56 +00:00
carrychair
cade09ba0b chore: fix some typos (#13726)
Signed-off-by: carrychair <linghuchong404@gmail.com>
2024-03-21 21:00:21 +00:00
Potuz
f85ddfe265 Log the slot and blockroot when we deadline waiting for blobs (#13774) 2024-03-21 20:29:23 +00:00
terence
3b97094ea4 Log da block root in hex (#13787) 2024-03-21 20:26:17 +00:00
Nishant Das
acdbf7c491 expand it (#13770) 2024-03-21 19:57:22 +00:00
Potuz
1cc1effd75 Revert "pass justified=finalized in Prater (#13695)" (#13709)
This reverts commit 102518e106.
2024-03-21 17:42:40 +00:00
34 changed files with 801 additions and 603 deletions

View File

@@ -130,9 +130,9 @@ aspect_bazel_lib_register_toolchains()
http_archive(
name = "rules_oci",
sha256 = "c71c25ed333a4909d2dd77e0b16c39e9912525a98c7fa85144282be8d04ef54c",
strip_prefix = "rules_oci-1.3.4",
url = "https://github.com/bazel-contrib/rules_oci/releases/download/v1.3.4/rules_oci-v1.3.4.tar.gz",
sha256 = "4a276e9566c03491649eef63f27c2816cc222f41ccdebd97d2c5159e84917c3b",
strip_prefix = "rules_oci-1.7.4",
url = "https://github.com/bazel-contrib/rules_oci/releases/download/v1.7.4/rules_oci-v1.7.4.tar.gz",
)
load("@rules_oci//oci:dependencies.bzl", "rules_oci_dependencies")

View File

@@ -586,7 +586,7 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
s.blobNotifiers.delete(root)
return nil
case <-ctx.Done():
return errors.Wrap(ctx.Err(), "context deadline waiting for blob sidecars")
return errors.Wrapf(ctx.Err(), "context deadline waiting for blob sidecars slot: %d, BlockRoot: %#x", block.Slot(), root)
}
}
}
@@ -594,7 +594,7 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
func daCheckLogFields(root [32]byte, slot primitives.Slot, expected, missing int) logrus.Fields {
return logrus.Fields{
"slot": slot,
"root": root,
"root": fmt.Sprintf("%#x", root),
"blobsExpected": expected,
"blobsWaiting": missing,
}

View File

@@ -290,18 +290,10 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
fRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(finalized.Root))
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
if params.BeaconConfig().ConfigName != params.PraterName {
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(s.ctx, &forkchoicetypes.Checkpoint{Epoch: justified.Epoch,
Root: bytesutil.ToBytes32(justified.Root)}); err != nil {
return errors.Wrap(err, "could not update forkchoice's justified checkpoint")
}
} else {
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(s.ctx, &forkchoicetypes.Checkpoint{Epoch: finalized.Epoch,
Root: bytesutil.ToBytes32(finalized.Root)}); err != nil {
return errors.Wrap(err, "could not update forkchoice's justified checkpoint")
}
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(s.ctx, &forkchoicetypes.Checkpoint{Epoch: justified.Epoch,
Root: bytesutil.ToBytes32(justified.Root)}); err != nil {
return errors.Wrap(err, "could not update forkchoice's justified checkpoint")
}
if err := s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: finalized.Epoch,
Root: bytesutil.ToBytes32(finalized.Root)}); err != nil {
return errors.Wrap(err, "could not update forkchoice's finalized checkpoint")

View File

@@ -224,7 +224,7 @@ func (s *Store) DeleteBlock(ctx context.Context, root [32]byte) error {
return s.db.Update(func(tx *bolt.Tx) error {
bkt := tx.Bucket(finalizedBlockRootsIndexBucket)
if b := bkt.Get(root[:]); b != nil {
return ErrDeleteJustifiedAndFinalized
return ErrDeleteFinalized
}
if err := tx.Bucket(blocksBucket).Delete(root[:]); err != nil {

View File

@@ -289,7 +289,7 @@ func TestStore_DeleteBlock(t *testing.T) {
require.Equal(t, b, nil)
require.Equal(t, false, db.HasStateSummary(ctx, root2))
require.ErrorIs(t, db.DeleteBlock(ctx, root), ErrDeleteJustifiedAndFinalized)
require.ErrorIs(t, db.DeleteBlock(ctx, root), ErrDeleteFinalized)
}
func TestStore_DeleteJustifiedBlock(t *testing.T) {
@@ -309,7 +309,7 @@ func TestStore_DeleteJustifiedBlock(t *testing.T) {
require.NoError(t, db.SaveBlock(ctx, blk))
require.NoError(t, db.SaveState(ctx, st, root))
require.NoError(t, db.SaveJustifiedCheckpoint(ctx, cp))
require.ErrorIs(t, db.DeleteBlock(ctx, root), ErrDeleteJustifiedAndFinalized)
require.ErrorIs(t, db.DeleteBlock(ctx, root), ErrDeleteFinalized)
}
func TestStore_DeleteFinalizedBlock(t *testing.T) {
@@ -329,7 +329,7 @@ func TestStore_DeleteFinalizedBlock(t *testing.T) {
require.NoError(t, db.SaveState(ctx, st, root))
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, cp))
require.ErrorIs(t, db.DeleteBlock(ctx, root), ErrDeleteJustifiedAndFinalized)
require.ErrorIs(t, db.DeleteBlock(ctx, root), ErrDeleteFinalized)
}
func TestStore_GenesisBlock(t *testing.T) {
db := setupDB(t)

View File

@@ -2,8 +2,8 @@ package kv
import "github.com/pkg/errors"
// ErrDeleteJustifiedAndFinalized is raised when we attempt to delete a finalized block/state
var ErrDeleteJustifiedAndFinalized = errors.New("cannot delete finalized block or state")
// ErrDeleteFinalized is raised when we attempt to delete a finalized block/state
var ErrDeleteFinalized = errors.New("cannot delete finalized block or state")
// ErrNotFound can be used directly, or as a wrapped DBError, whenever a db method needs to
// indicate that a value couldn't be found.

View File

@@ -5,7 +5,6 @@ import (
"context"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filters"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
@@ -29,72 +28,76 @@ var containerFinalizedButNotCanonical = []byte("recent block needs reindexing to
// beacon block chain using the finalized root alone as this would exclude all other blocks in the
// finalized epoch from being indexed as "final and canonical".
//
// The algorithm for building the index works as follows:
// - De-index all finalized beacon block roots from previous_finalized_epoch to
// new_finalized_epoch. (I.e. delete these roots from the index, to be re-indexed.)
// - Build the canonical finalized chain by walking up the ancestry chain from the finalized block
// root until a parent is found in the index, or the parent is genesis or the origin checkpoint.
// - Add all block roots in the database where epoch(block.slot) == checkpoint.epoch.
//
// This method ensures that all blocks from the current finalized epoch are considered "final" while
// maintaining only canonical and finalized blocks older than the current finalized epoch.
// The main part of the algorithm traverses parent->child block relationships in the
// `blockParentRootIndicesBucket` bucket to find the path between the last finalized checkpoint
// and the current finalized checkpoint. It relies on the invariant that there is a unique path
// between two finalized checkpoints.
func (s *Store) updateFinalizedBlockRoots(ctx context.Context, tx *bolt.Tx, checkpoint *ethpb.Checkpoint) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.updateFinalizedBlockRoots")
defer span.End()
bkt := tx.Bucket(finalizedBlockRootsIndexBucket)
root := checkpoint.Root
var previousRoot []byte
genesisRoot := tx.Bucket(blocksBucket).Get(genesisBlockRootKey)
initCheckpointRoot := tx.Bucket(blocksBucket).Get(originCheckpointBlockRootKey)
// De-index recent finalized block roots, to be re-indexed.
finalizedBkt := tx.Bucket(finalizedBlockRootsIndexBucket)
previousFinalizedCheckpoint := &ethpb.Checkpoint{}
if b := bkt.Get(previousFinalizedCheckpointKey); b != nil {
if b := finalizedBkt.Get(previousFinalizedCheckpointKey); b != nil {
if err := decode(ctx, b, previousFinalizedCheckpoint); err != nil {
tracing.AnnotateError(span, err)
return err
}
}
blockRoots, err := s.BlockRoots(ctx, filters.NewFilter().
SetStartEpoch(previousFinalizedCheckpoint.Epoch).
SetEndEpoch(checkpoint.Epoch+1),
)
if err != nil {
tracing.AnnotateError(span, err)
return err
}
for _, root := range blockRoots {
if err := bkt.Delete(root[:]); err != nil {
tracing.AnnotateError(span, err)
return err
}
}
// Walk up the ancestry chain until we reach a block root present in the finalized block roots
// index bucket or genesis block root.
for {
if bytes.Equal(root, genesisRoot) {
break
}
signedBlock, err := s.Block(ctx, bytesutil.ToBytes32(root))
// Handle the case of checkpoint sync.
if previousFinalizedCheckpoint.Root == nil && bytes.Equal(checkpoint.Root, tx.Bucket(blocksBucket).Get(originCheckpointBlockRootKey)) {
container := &ethpb.FinalizedBlockRootContainer{}
enc, err := encode(ctx, container)
if err != nil {
tracing.AnnotateError(span, err)
return err
}
if err := blocks.BeaconBlockIsNil(signedBlock); err != nil {
if err = finalizedBkt.Put(checkpoint.Root, enc); err != nil {
tracing.AnnotateError(span, err)
return err
}
block := signedBlock.Block()
return updatePrevFinalizedCheckpoint(ctx, span, finalizedBkt, checkpoint)
}
parentRoot := block.ParentRoot()
container := &ethpb.FinalizedBlockRootContainer{
ParentRoot: parentRoot[:],
ChildRoot: previousRoot,
var finalized [][]byte
if previousFinalizedCheckpoint.Root == nil {
genesisRoot := tx.Bucket(blocksBucket).Get(genesisBlockRootKey)
_, finalized = pathToFinalizedCheckpoint(ctx, [][]byte{genesisRoot}, checkpoint.Root, tx)
} else {
if err := updateChildOfPrevFinalizedCheckpoint(
ctx,
span,
finalizedBkt,
tx.Bucket(blockParentRootIndicesBucket), previousFinalizedCheckpoint.Root,
); err != nil {
return err
}
_, finalized = pathToFinalizedCheckpoint(ctx, [][]byte{previousFinalizedCheckpoint.Root}, checkpoint.Root, tx)
}
for i, r := range finalized {
var container *ethpb.FinalizedBlockRootContainer
switch i {
case 0:
container = &ethpb.FinalizedBlockRootContainer{
ParentRoot: previousFinalizedCheckpoint.Root,
}
if len(finalized) > 1 {
container.ChildRoot = finalized[i+1]
}
case len(finalized) - 1:
// We don't know the finalized child of the new finalized checkpoint.
// It will be filled out in the next function call.
container = &ethpb.FinalizedBlockRootContainer{}
if len(finalized) > 1 {
container.ParentRoot = finalized[i-1]
}
default:
container = &ethpb.FinalizedBlockRootContainer{
ParentRoot: finalized[i-1],
ChildRoot: finalized[i+1],
}
}
enc, err := encode(ctx, container)
@@ -102,66 +105,13 @@ func (s *Store) updateFinalizedBlockRoots(ctx context.Context, tx *bolt.Tx, chec
tracing.AnnotateError(span, err)
return err
}
if err := bkt.Put(root, enc); err != nil {
tracing.AnnotateError(span, err)
return err
}
// breaking here allows the initial checkpoint root to be correctly inserted,
// but stops the loop from trying to search for its parent.
if bytes.Equal(root, initCheckpointRoot) {
break
}
// Found parent, loop exit condition.
pr := block.ParentRoot()
if parentBytes := bkt.Get(pr[:]); parentBytes != nil {
parent := &ethpb.FinalizedBlockRootContainer{}
if err := decode(ctx, parentBytes, parent); err != nil {
tracing.AnnotateError(span, err)
return err
}
parent.ChildRoot = root
enc, err := encode(ctx, parent)
if err != nil {
tracing.AnnotateError(span, err)
return err
}
if err := bkt.Put(pr[:], enc); err != nil {
tracing.AnnotateError(span, err)
return err
}
break
}
previousRoot = root
root = pr[:]
}
// Upsert blocks from the current finalized epoch.
roots, err := s.BlockRoots(ctx, filters.NewFilter().SetStartEpoch(checkpoint.Epoch).SetEndEpoch(checkpoint.Epoch+1))
if err != nil {
tracing.AnnotateError(span, err)
return err
}
for _, root := range roots {
root := root[:]
if bytes.Equal(root, checkpoint.Root) || bkt.Get(root) != nil {
continue
}
if err := bkt.Put(root, containerFinalizedButNotCanonical); err != nil {
if err = finalizedBkt.Put(r, enc); err != nil {
tracing.AnnotateError(span, err)
return err
}
}
// Update previous checkpoint
enc, err := encode(ctx, checkpoint)
if err != nil {
tracing.AnnotateError(span, err)
return err
}
return bkt.Put(previousFinalizedCheckpointKey, enc)
return updatePrevFinalizedCheckpoint(ctx, span, finalizedBkt, checkpoint)
}
// BackfillFinalizedIndex updates the finalized index for a contiguous chain of blocks that are the ancestors of the
@@ -242,8 +192,6 @@ func (s *Store) BackfillFinalizedIndex(ctx context.Context, blocks []blocks.ROBl
// IsFinalizedBlock returns true if the block root is present in the finalized block root index.
// A beacon block root contained exists in this index if it is considered finalized and canonical.
// Note: beacon blocks from the latest finalized epoch return true, whether or not they are
// considered canonical in the "head view" of the beacon node.
func (s *Store) IsFinalizedBlock(ctx context.Context, blockRoot [32]byte) bool {
_, span := trace.StartSpan(ctx, "BeaconDB.IsFinalizedBlock")
defer span.End()
@@ -296,3 +244,53 @@ func (s *Store) FinalizedChildBlock(ctx context.Context, blockRoot [32]byte) (in
tracing.AnnotateError(span, err)
return blk, err
}
func pathToFinalizedCheckpoint(ctx context.Context, roots [][]byte, checkpointRoot []byte, tx *bolt.Tx) (bool, [][]byte) {
if len(roots) == 0 || (len(roots) == 1 && roots[0] == nil) {
return false, nil
}
for _, r := range roots {
if bytes.Equal(r, checkpointRoot) {
return true, [][]byte{r}
}
children := lookupValuesForIndices(ctx, map[string][]byte{string(blockParentRootIndicesBucket): r}, tx)
if len(children) == 0 {
children = [][][]byte{nil}
}
isPath, path := pathToFinalizedCheckpoint(ctx, children[0], checkpointRoot, tx)
if isPath {
return true, append([][]byte{r}, path...)
}
}
return false, nil
}
func updatePrevFinalizedCheckpoint(ctx context.Context, span *trace.Span, finalizedBkt *bolt.Bucket, checkpoint *ethpb.Checkpoint) error {
enc, err := encode(ctx, checkpoint)
if err != nil {
tracing.AnnotateError(span, err)
return err
}
return finalizedBkt.Put(previousFinalizedCheckpointKey, enc)
}
func updateChildOfPrevFinalizedCheckpoint(ctx context.Context, span *trace.Span, finalizedBkt, parentBkt *bolt.Bucket, checkpointRoot []byte) error {
container := &ethpb.FinalizedBlockRootContainer{}
if err := decode(ctx, finalizedBkt.Get(checkpointRoot), container); err != nil {
tracing.AnnotateError(span, err)
return err
}
container.ChildRoot = parentBkt.Get(checkpointRoot)
enc, err := encode(ctx, container)
if err != nil {
tracing.AnnotateError(span, err)
return err
}
if err = finalizedBkt.Put(checkpointRoot, enc); err != nil {
tracing.AnnotateError(span, err)
return err
}
return nil
}

View File

@@ -26,38 +26,30 @@ func TestStore_IsFinalizedBlock(t *testing.T) {
ctx := context.Background()
require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesisBlockRoot))
blks := makeBlocks(t, 0, slotsPerEpoch*3, genesisBlockRoot)
blks := makeBlocks(t, 0, slotsPerEpoch*2, genesisBlockRoot)
require.NoError(t, db.SaveBlocks(ctx, blks))
root, err := blks[slotsPerEpoch].Block().HashTreeRoot()
require.NoError(t, err)
cp := &ethpb.Checkpoint{
Epoch: 1,
Root: root[:],
}
st, err := util.NewBeaconState()
require.NoError(t, err)
// a state is required to save checkpoint
require.NoError(t, db.SaveState(ctx, st, root))
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, cp))
// All blocks up to slotsPerEpoch*2 should be in the finalized index.
for i := uint64(0); i < slotsPerEpoch*2; i++ {
root, err := blks[i].Block().HashTreeRoot()
for i := uint64(0); i <= slotsPerEpoch; i++ {
root, err = blks[i].Block().HashTreeRoot()
require.NoError(t, err)
assert.Equal(t, true, db.IsFinalizedBlock(ctx, root), "Block at index %d was not considered finalized in the index", i)
assert.Equal(t, true, db.IsFinalizedBlock(ctx, root), "Block at index %d was not considered finalized", i)
}
for i := slotsPerEpoch * 3; i < uint64(len(blks)); i++ {
root, err := blks[i].Block().HashTreeRoot()
for i := slotsPerEpoch + 1; i < uint64(len(blks)); i++ {
root, err = blks[i].Block().HashTreeRoot()
require.NoError(t, err)
assert.Equal(t, false, db.IsFinalizedBlock(ctx, root), "Block at index %d was considered finalized in the index, but should not have", i)
assert.Equal(t, false, db.IsFinalizedBlock(ctx, root), "Block at index %d was considered finalized, but should not have", i)
}
}
func TestStore_IsFinalizedBlockGenesis(t *testing.T) {
func TestStore_IsFinalizedGenesisBlock(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
@@ -69,136 +61,114 @@ func TestStore_IsFinalizedBlockGenesis(t *testing.T) {
require.NoError(t, err)
require.NoError(t, db.SaveBlock(ctx, wsb))
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
assert.Equal(t, true, db.IsFinalizedBlock(ctx, root), "Finalized genesis block doesn't exist in db")
}
// This test scenario is to test a specific edge case where the finalized block root is not part of
// the finalized and canonical chain.
//
// Example:
// 0 1 2 3 4 5 6 slot
// a <- b <-- d <- e <- f <- g roots
//
// ^- c
//
// Imagine that epochs are 2 slots and that epoch 1, 2, and 3 are finalized. Checkpoint roots would
// be c, e, and g. In this scenario, c was a finalized checkpoint root but no block built upon it so
// it should not be considered "final and canonical" in the view at slot 6.
func TestStore_IsFinalized_ForkEdgeCase(t *testing.T) {
slotsPerEpoch := uint64(params.BeaconConfig().SlotsPerEpoch)
blocks0 := makeBlocks(t, slotsPerEpoch*0, slotsPerEpoch, genesisBlockRoot)
blocks1 := append(
makeBlocks(t, slotsPerEpoch*1, 1, bytesutil.ToBytes32(sszRootOrDie(t, blocks0[len(blocks0)-1]))), // No block builds off of the first block in epoch.
makeBlocks(t, slotsPerEpoch*1+1, slotsPerEpoch-1, bytesutil.ToBytes32(sszRootOrDie(t, blocks0[len(blocks0)-1])))...,
)
blocks2 := makeBlocks(t, slotsPerEpoch*2, slotsPerEpoch, bytesutil.ToBytes32(sszRootOrDie(t, blocks1[len(blocks1)-1])))
db := setupDB(t)
ctx := context.Background()
require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesisBlockRoot))
require.NoError(t, db.SaveBlocks(ctx, blocks0))
require.NoError(t, db.SaveBlocks(ctx, blocks1))
require.NoError(t, db.SaveBlocks(ctx, blocks2))
// First checkpoint
checkpoint1 := &ethpb.Checkpoint{
Root: sszRootOrDie(t, blocks1[0]),
Epoch: 1,
}
st, err := util.NewBeaconState()
require.NoError(t, err)
// A state is required to save checkpoint
require.NoError(t, db.SaveState(ctx, st, bytesutil.ToBytes32(checkpoint1.Root)))
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, checkpoint1))
// All blocks in blocks0 and blocks1 should be finalized and canonical.
for i, block := range append(blocks0, blocks1...) {
root := sszRootOrDie(t, block)
assert.Equal(t, true, db.IsFinalizedBlock(ctx, bytesutil.ToBytes32(root)), "%d - Expected block %#x to be finalized", i, root)
}
// Second checkpoint
checkpoint2 := &ethpb.Checkpoint{
Root: sszRootOrDie(t, blocks2[0]),
Epoch: 2,
}
// A state is required to save checkpoint
require.NoError(t, db.SaveState(ctx, st, bytesutil.ToBytes32(checkpoint2.Root)))
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, checkpoint2))
// All blocks in blocks0 and blocks2 should be finalized and canonical.
for i, block := range append(blocks0, blocks2...) {
root := sszRootOrDie(t, block)
assert.Equal(t, true, db.IsFinalizedBlock(ctx, bytesutil.ToBytes32(root)), "%d - Expected block %#x to be finalized", i, root)
}
// All blocks in blocks1 should be finalized and canonical, except blocks1[0].
for i, block := range blocks1 {
root := sszRootOrDie(t, block)
if db.IsFinalizedBlock(ctx, bytesutil.ToBytes32(root)) == (i == 0) {
t.Errorf("Expected db.IsFinalizedBlock(ctx, blocks1[%d]) to be %v", i, i != 0)
}
}
assert.Equal(t, true, db.IsFinalizedBlock(ctx, root))
}
func TestStore_IsFinalizedChildBlock(t *testing.T) {
slotsPerEpoch := uint64(params.BeaconConfig().SlotsPerEpoch)
ctx := context.Background()
db := setupDB(t)
require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesisBlockRoot))
eval := func(t testing.TB, ctx context.Context, db *Store, blks []interfaces.ReadOnlySignedBeaconBlock) {
require.NoError(t, db.SaveBlocks(ctx, blks))
root, err := blks[slotsPerEpoch].Block().HashTreeRoot()
require.NoError(t, err)
cp := &ethpb.Checkpoint{
Epoch: 1,
Root: root[:],
}
st, err := util.NewBeaconState()
require.NoError(t, err)
// a state is required to save checkpoint
require.NoError(t, db.SaveState(ctx, st, root))
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, cp))
// All blocks up to slotsPerEpoch should have a finalized child block.
for i := uint64(0); i < slotsPerEpoch; i++ {
root, err := blks[i].Block().HashTreeRoot()
require.NoError(t, err)
assert.Equal(t, true, db.IsFinalizedBlock(ctx, root), "Block at index %d was not considered finalized in the index", i)
blk, err := db.FinalizedChildBlock(ctx, root)
assert.NoError(t, err)
if blk == nil {
t.Error("Child block doesn't exist for valid finalized block.")
}
}
blks := makeBlocks(t, 0, slotsPerEpoch*2, genesisBlockRoot)
require.NoError(t, db.SaveBlocks(ctx, blks))
root, err := blks[slotsPerEpoch].Block().HashTreeRoot()
require.NoError(t, err)
cp := &ethpb.Checkpoint{
Epoch: 1,
Root: root[:],
}
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, cp))
setup := func(t testing.TB) *Store {
db := setupDB(t)
require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesisBlockRoot))
return db
for i := uint64(0); i < slotsPerEpoch; i++ {
root, err = blks[i].Block().HashTreeRoot()
require.NoError(t, err)
assert.Equal(t, true, db.IsFinalizedBlock(ctx, root), "Block at index %d was not considered finalized", i)
blk, err := db.FinalizedChildBlock(ctx, root)
assert.NoError(t, err)
assert.Equal(t, false, blk == nil, "Child block at index %d was not considered finalized", i)
}
t.Run("phase0", func(t *testing.T) {
db := setup(t)
blks := makeBlocks(t, 0, slotsPerEpoch*3, genesisBlockRoot)
eval(t, ctx, db, blks)
})
t.Run("altair", func(t *testing.T) {
db := setup(t)
blks := makeBlocksAltair(t, 0, slotsPerEpoch*3, genesisBlockRoot)
eval(t, ctx, db, blks)
})
}
func sszRootOrDie(t *testing.T, block interfaces.ReadOnlySignedBeaconBlock) []byte {
root, err := block.Block().HashTreeRoot()
func TestStore_ChildRootOfPrevFinalizedCheckpointIsUpdated(t *testing.T) {
slotsPerEpoch := uint64(params.BeaconConfig().SlotsPerEpoch)
ctx := context.Background()
db := setupDB(t)
require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesisBlockRoot))
blks := makeBlocks(t, 0, slotsPerEpoch*3, genesisBlockRoot)
require.NoError(t, db.SaveBlocks(ctx, blks))
root, err := blks[slotsPerEpoch].Block().HashTreeRoot()
require.NoError(t, err)
return root[:]
cp := &ethpb.Checkpoint{
Epoch: 1,
Root: root[:],
}
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, cp))
root2, err := blks[slotsPerEpoch*2].Block().HashTreeRoot()
require.NoError(t, err)
cp = &ethpb.Checkpoint{
Epoch: 2,
Root: root2[:],
}
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, cp))
require.NoError(t, db.db.View(func(tx *bolt.Tx) error {
container := &ethpb.FinalizedBlockRootContainer{}
f := tx.Bucket(finalizedBlockRootsIndexBucket).Get(root[:])
require.NoError(t, decode(ctx, f, container))
r, err := blks[slotsPerEpoch+1].Block().HashTreeRoot()
require.NoError(t, err)
assert.DeepEqual(t, r[:], container.ChildRoot)
return nil
}))
}
func TestStore_OrphanedBlockIsNotFinalized(t *testing.T) {
slotsPerEpoch := uint64(params.BeaconConfig().SlotsPerEpoch)
db := setupDB(t)
ctx := context.Background()
require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesisBlockRoot))
blk0 := util.NewBeaconBlock()
blk0.Block.ParentRoot = genesisBlockRoot[:]
blk0Root, err := blk0.Block.HashTreeRoot()
require.NoError(t, err)
blk1 := util.NewBeaconBlock()
blk1.Block.Slot = 1
blk1.Block.ParentRoot = blk0Root[:]
blk2 := util.NewBeaconBlock()
blk2.Block.Slot = 2
// orphan block at index 1
blk2.Block.ParentRoot = blk0Root[:]
blk2Root, err := blk2.Block.HashTreeRoot()
require.NoError(t, err)
sBlk0, err := consensusblocks.NewSignedBeaconBlock(blk0)
require.NoError(t, err)
sBlk1, err := consensusblocks.NewSignedBeaconBlock(blk1)
require.NoError(t, err)
sBlk2, err := consensusblocks.NewSignedBeaconBlock(blk2)
require.NoError(t, err)
blks := append([]interfaces.ReadOnlySignedBeaconBlock{sBlk0, sBlk1, sBlk2}, makeBlocks(t, 3, slotsPerEpoch*2-3, blk2Root)...)
require.NoError(t, db.SaveBlocks(ctx, blks))
root, err := blks[slotsPerEpoch].Block().HashTreeRoot()
require.NoError(t, err)
cp := &ethpb.Checkpoint{
Epoch: 1,
Root: root[:],
}
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, cp))
for i := uint64(0); i <= slotsPerEpoch; i++ {
root, err = blks[i].Block().HashTreeRoot()
require.NoError(t, err)
if i == 1 {
assert.Equal(t, false, db.IsFinalizedBlock(ctx, root), "Block at index 1 was considered finalized, but should not have")
} else {
assert.Equal(t, true, db.IsFinalizedBlock(ctx, root), "Block at index %d was not considered finalized", i)
}
}
}
func makeBlocks(t *testing.T, i, n uint64, previousRoot [32]byte) []interfaces.ReadOnlySignedBeaconBlock {
@@ -219,24 +189,6 @@ func makeBlocks(t *testing.T, i, n uint64, previousRoot [32]byte) []interfaces.R
return ifaceBlocks
}
func makeBlocksAltair(t *testing.T, startIdx, num uint64, previousRoot [32]byte) []interfaces.ReadOnlySignedBeaconBlock {
blocks := make([]*ethpb.SignedBeaconBlockAltair, num)
ifaceBlocks := make([]interfaces.ReadOnlySignedBeaconBlock, num)
for j := startIdx; j < num+startIdx; j++ {
parentRoot := make([]byte, fieldparams.RootLength)
copy(parentRoot, previousRoot[:])
blocks[j-startIdx] = util.NewBeaconBlockAltair()
blocks[j-startIdx].Block.Slot = primitives.Slot(j + 1)
blocks[j-startIdx].Block.ParentRoot = parentRoot
var err error
previousRoot, err = blocks[j-startIdx].Block.HashTreeRoot()
require.NoError(t, err)
ifaceBlocks[j-startIdx], err = consensusblocks.NewSignedBeaconBlock(blocks[j-startIdx])
require.NoError(t, err)
}
return ifaceBlocks
}
func TestStore_BackfillFinalizedIndexSingle(t *testing.T) {
db := setupDB(t)
ctx := context.Background()

View File

@@ -458,7 +458,7 @@ func (s *Store) DeleteState(ctx context.Context, blockRoot [32]byte) error {
bkt = tx.Bucket(stateBucket)
// Safeguard against deleting genesis, finalized, head state.
if bytes.Equal(blockRoot[:], finalized.Root) || bytes.Equal(blockRoot[:], genesisBlockRoot) || bytes.Equal(blockRoot[:], justified.Root) {
return ErrDeleteJustifiedAndFinalized
return ErrDeleteFinalized
}
// Nothing to delete if state doesn't exist.

View File

@@ -22,7 +22,7 @@ func TestGossipParameters(t *testing.T) {
pms := pubsubGossipParam()
assert.Equal(t, gossipSubMcacheLen, pms.HistoryLength, "gossipSubMcacheLen")
assert.Equal(t, gossipSubMcacheGossip, pms.HistoryGossip, "gossipSubMcacheGossip")
assert.Equal(t, gossipSubSeenTTL, int(pubsub.TimeCacheDuration.Milliseconds()/pms.HeartbeatInterval.Milliseconds()), "gossipSubSeenTtl")
assert.Equal(t, gossipSubSeenTTL, int(pubsub.TimeCacheDuration.Seconds()), "gossipSubSeenTtl")
}
func TestFanoutParameters(t *testing.T) {

View File

@@ -25,7 +25,7 @@ const (
// gossip parameters
gossipSubMcacheLen = 6 // number of windows to retain full messages in cache for `IWANT` responses
gossipSubMcacheGossip = 3 // number of windows to gossip about
gossipSubSeenTTL = 550 // number of heartbeat intervals to retain message IDs
gossipSubSeenTTL = 768 // number of seconds to retain message IDs ( 2 epochs)
// fanout ttl
gossipSubFanoutTTL = 60000000000 // TTL for fanout maps for topics we are not subscribed to but have published to, in nano seconds
@@ -165,7 +165,8 @@ func pubsubGossipParam() pubsub.GossipSubParams {
// to configure our message id time-cache rather than instantiating
// it with a router instance.
func setPubSubParameters() {
pubsub.TimeCacheDuration = 550 * gossipSubHeartbeatInterval
seenTtl := 2 * time.Second * time.Duration(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
pubsub.TimeCacheDuration = seenTtl
}
// convert from libp2p's internal schema to a compatible prysm protobuf format.

View File

@@ -189,7 +189,7 @@ func TestLoadBlocks_FirstBranch(t *testing.T) {
roots, savedBlocks, err := tree1(t, beaconDB, bytesutil.PadTo([]byte{'A'}, 32))
require.NoError(t, err)
filteredBlocks, err := s.loadBlocks(ctx, 0, 8, roots[len(roots)-1])
filteredBlocks, err := s.loadBlocks(ctx, 0, 9, roots[len(roots)-1])
require.NoError(t, err)
wanted := []*ethpb.SignedBeaconBlock{
@@ -220,7 +220,7 @@ func TestLoadBlocks_SecondBranch(t *testing.T) {
roots, savedBlocks, err := tree1(t, beaconDB, bytesutil.PadTo([]byte{'A'}, 32))
require.NoError(t, err)
filteredBlocks, err := s.loadBlocks(ctx, 0, 5, roots[5])
filteredBlocks, err := s.loadBlocks(ctx, 0, 6, roots[5])
require.NoError(t, err)
wanted := []*ethpb.SignedBeaconBlock{
@@ -249,7 +249,7 @@ func TestLoadBlocks_ThirdBranch(t *testing.T) {
roots, savedBlocks, err := tree1(t, beaconDB, bytesutil.PadTo([]byte{'A'}, 32))
require.NoError(t, err)
filteredBlocks, err := s.loadBlocks(ctx, 0, 7, roots[7])
filteredBlocks, err := s.loadBlocks(ctx, 0, 8, roots[7])
require.NoError(t, err)
wanted := []*ethpb.SignedBeaconBlock{
@@ -280,7 +280,7 @@ func TestLoadBlocks_SameSlots(t *testing.T) {
roots, savedBlocks, err := tree2(t, beaconDB, bytesutil.PadTo([]byte{'A'}, 32))
require.NoError(t, err)
filteredBlocks, err := s.loadBlocks(ctx, 0, 3, roots[6])
filteredBlocks, err := s.loadBlocks(ctx, 0, 4, roots[6])
require.NoError(t, err)
wanted := []*ethpb.SignedBeaconBlock{
@@ -309,7 +309,7 @@ func TestLoadBlocks_SameEndSlots(t *testing.T) {
roots, savedBlocks, err := tree3(t, beaconDB, bytesutil.PadTo([]byte{'A'}, 32))
require.NoError(t, err)
filteredBlocks, err := s.loadBlocks(ctx, 0, 2, roots[2])
filteredBlocks, err := s.loadBlocks(ctx, 0, 3, roots[2])
require.NoError(t, err)
wanted := []*ethpb.SignedBeaconBlock{
@@ -337,7 +337,7 @@ func TestLoadBlocks_SameEndSlotsWith2blocks(t *testing.T) {
roots, savedBlocks, err := tree4(t, beaconDB, bytesutil.PadTo([]byte{'A'}, 32))
require.NoError(t, err)
filteredBlocks, err := s.loadBlocks(ctx, 0, 2, roots[1])
filteredBlocks, err := s.loadBlocks(ctx, 0, 3, roots[1])
require.NoError(t, err)
wanted := []*ethpb.SignedBeaconBlock{
@@ -363,7 +363,7 @@ func TestLoadBlocks_BadStart(t *testing.T) {
roots, _, err := tree1(t, beaconDB, bytesutil.PadTo([]byte{'A'}, 32))
require.NoError(t, err)
_, err = s.loadBlocks(ctx, 0, 5, roots[8])
_, err = s.loadBlocks(ctx, 0, 6, roots[8])
assert.ErrorContains(t, "end block roots don't match", err)
}
@@ -374,63 +374,63 @@ func TestLoadBlocks_BadStart(t *testing.T) {
// \- B7
func tree1(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte, []*ethpb.SignedBeaconBlock, error) {
b0 := util.NewBeaconBlock()
b0.Block.Slot = 0
b0.Block.Slot = 1
b0.Block.ParentRoot = genesisRoot
r0, err := b0.Block.HashTreeRoot()
if err != nil {
return nil, nil, err
}
b1 := util.NewBeaconBlock()
b1.Block.Slot = 1
b1.Block.Slot = 2
b1.Block.ParentRoot = r0[:]
r1, err := b1.Block.HashTreeRoot()
if err != nil {
return nil, nil, err
}
b2 := util.NewBeaconBlock()
b2.Block.Slot = 2
b2.Block.Slot = 3
b2.Block.ParentRoot = r1[:]
r2, err := b2.Block.HashTreeRoot()
if err != nil {
return nil, nil, err
}
b3 := util.NewBeaconBlock()
b3.Block.Slot = 3
b3.Block.Slot = 4
b3.Block.ParentRoot = r1[:]
r3, err := b3.Block.HashTreeRoot()
if err != nil {
return nil, nil, err
}
b4 := util.NewBeaconBlock()
b4.Block.Slot = 4
b4.Block.Slot = 5
b4.Block.ParentRoot = r2[:]
r4, err := b4.Block.HashTreeRoot()
if err != nil {
return nil, nil, err
}
b5 := util.NewBeaconBlock()
b5.Block.Slot = 5
b5.Block.Slot = 6
b5.Block.ParentRoot = r3[:]
r5, err := b5.Block.HashTreeRoot()
if err != nil {
return nil, nil, err
}
b6 := util.NewBeaconBlock()
b6.Block.Slot = 6
b6.Block.Slot = 7
b6.Block.ParentRoot = r4[:]
r6, err := b6.Block.HashTreeRoot()
if err != nil {
return nil, nil, err
}
b7 := util.NewBeaconBlock()
b7.Block.Slot = 7
b7.Block.Slot = 8
b7.Block.ParentRoot = r6[:]
r7, err := b7.Block.HashTreeRoot()
if err != nil {
return nil, nil, err
}
b8 := util.NewBeaconBlock()
b8.Block.Slot = 8
b8.Block.Slot = 9
b8.Block.ParentRoot = r6[:]
r8, err := b8.Block.HashTreeRoot()
if err != nil {
@@ -466,21 +466,21 @@ func tree1(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte,
// \- B2 -- B3
func tree2(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte, []*ethpb.SignedBeaconBlock, error) {
b0 := util.NewBeaconBlock()
b0.Block.Slot = 0
b0.Block.Slot = 1
b0.Block.ParentRoot = genesisRoot
r0, err := b0.Block.HashTreeRoot()
if err != nil {
return nil, nil, err
}
b1 := util.NewBeaconBlock()
b1.Block.Slot = 1
b1.Block.Slot = 2
b1.Block.ParentRoot = r0[:]
r1, err := b1.Block.HashTreeRoot()
if err != nil {
return nil, nil, err
}
b21 := util.NewBeaconBlock()
b21.Block.Slot = 2
b21.Block.Slot = 3
b21.Block.ParentRoot = r1[:]
b21.Block.StateRoot = bytesutil.PadTo([]byte{'A'}, 32)
r21, err := b21.Block.HashTreeRoot()
@@ -488,7 +488,7 @@ func tree2(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte,
return nil, nil, err
}
b22 := util.NewBeaconBlock()
b22.Block.Slot = 2
b22.Block.Slot = 3
b22.Block.ParentRoot = r1[:]
b22.Block.StateRoot = bytesutil.PadTo([]byte{'B'}, 32)
r22, err := b22.Block.HashTreeRoot()
@@ -496,7 +496,7 @@ func tree2(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte,
return nil, nil, err
}
b23 := util.NewBeaconBlock()
b23.Block.Slot = 2
b23.Block.Slot = 3
b23.Block.ParentRoot = r1[:]
b23.Block.StateRoot = bytesutil.PadTo([]byte{'C'}, 32)
r23, err := b23.Block.HashTreeRoot()
@@ -504,7 +504,7 @@ func tree2(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte,
return nil, nil, err
}
b24 := util.NewBeaconBlock()
b24.Block.Slot = 2
b24.Block.Slot = 3
b24.Block.ParentRoot = r1[:]
b24.Block.StateRoot = bytesutil.PadTo([]byte{'D'}, 32)
r24, err := b24.Block.HashTreeRoot()
@@ -512,7 +512,7 @@ func tree2(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte,
return nil, nil, err
}
b3 := util.NewBeaconBlock()
b3.Block.Slot = 3
b3.Block.Slot = 4
b3.Block.ParentRoot = r24[:]
r3, err := b3.Block.HashTreeRoot()
if err != nil {
@@ -549,21 +549,21 @@ func tree2(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte,
// \- B2
func tree3(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte, []*ethpb.SignedBeaconBlock, error) {
b0 := util.NewBeaconBlock()
b0.Block.Slot = 0
b0.Block.Slot = 1
b0.Block.ParentRoot = genesisRoot
r0, err := b0.Block.HashTreeRoot()
if err != nil {
return nil, nil, err
}
b1 := util.NewBeaconBlock()
b1.Block.Slot = 1
b1.Block.Slot = 2
b1.Block.ParentRoot = r0[:]
r1, err := b1.Block.HashTreeRoot()
if err != nil {
return nil, nil, err
}
b21 := util.NewBeaconBlock()
b21.Block.Slot = 2
b21.Block.Slot = 3
b21.Block.ParentRoot = r1[:]
b21.Block.StateRoot = bytesutil.PadTo([]byte{'A'}, 32)
r21, err := b21.Block.HashTreeRoot()
@@ -571,7 +571,7 @@ func tree3(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte,
return nil, nil, err
}
b22 := util.NewBeaconBlock()
b22.Block.Slot = 2
b22.Block.Slot = 3
b22.Block.ParentRoot = r1[:]
b22.Block.StateRoot = bytesutil.PadTo([]byte{'B'}, 32)
r22, err := b22.Block.HashTreeRoot()
@@ -579,7 +579,7 @@ func tree3(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte,
return nil, nil, err
}
b23 := util.NewBeaconBlock()
b23.Block.Slot = 2
b23.Block.Slot = 3
b23.Block.ParentRoot = r1[:]
b23.Block.StateRoot = bytesutil.PadTo([]byte{'C'}, 32)
r23, err := b23.Block.HashTreeRoot()
@@ -587,7 +587,7 @@ func tree3(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte,
return nil, nil, err
}
b24 := util.NewBeaconBlock()
b24.Block.Slot = 2
b24.Block.Slot = 3
b24.Block.ParentRoot = r1[:]
b24.Block.StateRoot = bytesutil.PadTo([]byte{'D'}, 32)
r24, err := b24.Block.HashTreeRoot()
@@ -626,14 +626,14 @@ func tree3(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte,
// \- B2
func tree4(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte, []*ethpb.SignedBeaconBlock, error) {
b0 := util.NewBeaconBlock()
b0.Block.Slot = 0
b0.Block.Slot = 1
b0.Block.ParentRoot = genesisRoot
r0, err := b0.Block.HashTreeRoot()
if err != nil {
return nil, nil, err
}
b21 := util.NewBeaconBlock()
b21.Block.Slot = 2
b21.Block.Slot = 3
b21.Block.ParentRoot = r0[:]
b21.Block.StateRoot = bytesutil.PadTo([]byte{'A'}, 32)
r21, err := b21.Block.HashTreeRoot()
@@ -641,7 +641,7 @@ func tree4(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte,
return nil, nil, err
}
b22 := util.NewBeaconBlock()
b22.Block.Slot = 2
b22.Block.Slot = 3
b22.Block.ParentRoot = r0[:]
b22.Block.StateRoot = bytesutil.PadTo([]byte{'B'}, 32)
r22, err := b22.Block.HashTreeRoot()
@@ -649,7 +649,7 @@ func tree4(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte,
return nil, nil, err
}
b23 := util.NewBeaconBlock()
b23.Block.Slot = 2
b23.Block.Slot = 3
b23.Block.ParentRoot = r0[:]
b23.Block.StateRoot = bytesutil.PadTo([]byte{'C'}, 32)
r23, err := b23.Block.HashTreeRoot()
@@ -657,7 +657,7 @@ func tree4(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte,
return nil, nil, err
}
b24 := util.NewBeaconBlock()
b24.Block.Slot = 2
b24.Block.Slot = 3
b24.Block.ParentRoot = r0[:]
b24.Block.StateRoot = bytesutil.PadTo([]byte{'D'}, 32)
r24, err := b24.Block.HashTreeRoot()
@@ -697,17 +697,17 @@ func TestLoadFinalizedBlocks(t *testing.T) {
gRoot, err := gBlock.Block.HashTreeRoot()
require.NoError(t, err)
util.SaveBlock(t, ctx, beaconDB, gBlock)
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, [32]byte{}))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, gRoot))
roots, _, err := tree1(t, beaconDB, gRoot[:])
require.NoError(t, err)
filteredBlocks, err := s.loadFinalizedBlocks(ctx, 0, 8)
filteredBlocks, err := s.loadFinalizedBlocks(ctx, 0, 9)
require.NoError(t, err)
require.Equal(t, 0, len(filteredBlocks))
require.Equal(t, 1, len(filteredBlocks))
require.NoError(t, beaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Root: roots[8][:]}))
require.NoError(t, s.beaconDB.SaveFinalizedCheckpoint(ctx, &ethpb.Checkpoint{Root: roots[8][:]}))
filteredBlocks, err = s.loadFinalizedBlocks(ctx, 0, 8)
filteredBlocks, err = s.loadFinalizedBlocks(ctx, 0, 9)
require.NoError(t, err)
require.Equal(t, 10, len(filteredBlocks))
require.Equal(t, 7, len(filteredBlocks))
}

View File

@@ -12,14 +12,12 @@ go_library(
"log.go",
"round_robin.go",
"service.go",
"verification.go",
],
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/sync/initial-sync",
visibility = ["//beacon-chain:__subpackages__"],
deps = [
"//async/abool:go_default_library",
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/blockchain/kzg:go_default_library",
"//beacon-chain/core/feed/block:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/core/transition:go_default_library",
@@ -41,7 +39,6 @@ go_library(
"//consensus-types/primitives:go_default_library",
"//container/leaky-bucket:go_default_library",
"//crypto/rand:go_default_library",
"//encoding/bytesutil:go_default_library",
"//math:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime:go_default_library",

View File

@@ -12,6 +12,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/das"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/sync"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
@@ -167,7 +168,7 @@ func (s *Service) processFetchedDataRegSync(
if len(bwb) == 0 {
return
}
bv := newBlobBatchVerifier(s.newBlobVerifier)
bv := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.InitsyncSidecarRequirements)
avs := das.NewLazilyPersistentStore(s.cfg.BlobStorage, bv)
batchFields := logrus.Fields{
"firstSlot": data.bwb[0].Block.Block().Slot(),
@@ -326,7 +327,7 @@ func (s *Service) processBatchedBlocks(ctx context.Context, genesis time.Time,
errParentDoesNotExist, first.Block().ParentRoot(), first.Block().Slot())
}
bv := newBlobBatchVerifier(s.newBlobVerifier)
bv := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.InitsyncSidecarRequirements)
avs := das.NewLazilyPersistentStore(s.cfg.BlobStorage, bv)
s.logBatchSyncStatus(genesis, first, len(bwb))
for _, bb := range bwb {

View File

@@ -340,7 +340,7 @@ func (s *Service) fetchOriginBlobs(pids []peer.ID) error {
if len(sidecars) != len(req) {
continue
}
bv := newBlobBatchVerifier(s.newBlobVerifier)
bv := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.InitsyncSidecarRequirements)
avs := das.NewLazilyPersistentStore(s.cfg.BlobStorage, bv)
current := s.clock.CurrentSlot()
if err := avs.Persist(current, sidecars...); err != nil {
@@ -362,3 +362,9 @@ func shufflePeers(pids []peer.ID) {
pids[i], pids[j] = pids[j], pids[i]
})
}
func newBlobVerifierFromInitializer(ini *verification.Initializer) verification.NewBlobVerifier {
return func(b blocks.ROBlob, reqs []verification.Requirement) verification.BlobVerifier {
return ini.NewBlobVerifier(b, reqs)
}
}

View File

@@ -151,14 +151,14 @@ func (s *Service) sendAndSaveBlobSidecars(ctx context.Context, request types.Blo
if len(sidecars) != len(request) {
return fmt.Errorf("received %d blob sidecars, expected %d for RPC", len(sidecars), len(request))
}
bv := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.PendingQueueSidecarRequirements)
for _, sidecar := range sidecars {
if err := verify.BlobAlignsWithBlock(sidecar, RoBlock); err != nil {
return err
}
log.WithFields(blobFields(sidecar)).Debug("Received blob sidecar RPC")
}
vscs, err := verification.BlobSidecarSliceNoop(sidecars)
vscs, err := bv.VerifiedROBlobs(ctx, RoBlock, sidecars)
if err != nil {
return err
}

View File

@@ -42,9 +42,10 @@ func (s *Service) goodbyeRPCHandler(_ context.Context, msg interface{}, stream l
return fmt.Errorf("wrong message type for goodbye, got %T, wanted *uint64", msg)
}
if err := s.rateLimiter.validateRequest(stream, 1); err != nil {
return err
log.WithError(err).Warn("Goodbye message from rate-limited peer.")
} else {
s.rateLimiter.add(stream, 1)
}
s.rateLimiter.add(stream, 1)
log := log.WithField("Reason", goodbyeMessage(*m))
log.WithField("peer", stream.Conn().RemotePeer()).Trace("Peer has sent a goodbye message")
s.cfg.p2p.Peers().SetNextValidTime(stream.Conn().RemotePeer(), goodByeBackoff(*m))

View File

@@ -3,6 +3,7 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"batch.go",
"blob.go",
"cache.go",
"error.go",
@@ -45,6 +46,7 @@ go_library(
go_test(
name = "go_default_test",
srcs = [
"batch_test.go",
"blob_test.go",
"cache_test.go",
"initializer_test.go",
@@ -69,5 +71,6 @@ go_test(
"//testing/util:go_default_library",
"//time/slots:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_stretchr_testify//require:go_default_library",
],
)

View File

@@ -1,12 +1,10 @@
package initialsync
package verification
import (
"context"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/das"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
)
@@ -20,21 +18,17 @@ var (
ErrBatchBlockRootMismatch = errors.New("Sidecar block header root does not match signed block")
)
func newBlobVerifierFromInitializer(ini *verification.Initializer) verification.NewBlobVerifier {
return func(b blocks.ROBlob, reqs []verification.Requirement) verification.BlobVerifier {
return ini.NewBlobVerifier(b, reqs)
}
}
func newBlobBatchVerifier(newVerifier verification.NewBlobVerifier) *BlobBatchVerifier {
// NewBlobBatchVerifier initializes a blob batch verifier. It requires the caller to correctly specify
// verification Requirements and to also pass in a NewBlobVerifier, which is a callback function that
// returns a new BlobVerifier for handling a single blob in the batch.
func NewBlobBatchVerifier(newVerifier NewBlobVerifier, reqs []Requirement) *BlobBatchVerifier {
return &BlobBatchVerifier{
verifyKzg: kzg.Verify,
newVerifier: newVerifier,
reqs: reqs,
}
}
type kzgVerifier func(b ...blocks.ROBlob) error
// BlobBatchVerifier solves problems that come from verifying batches of blobs from RPC.
// First: we only update forkchoice after the entire batch has completed, so the n+1 elements in the batch
// won't be in forkchoice yet.
@@ -42,18 +36,17 @@ type kzgVerifier func(b ...blocks.ROBlob) error
// method to BlobVerifier to verify the kzg commitments of all blob sidecars for a block together, then using the cached
// result of the batch verification when verifying the individual blobs.
type BlobBatchVerifier struct {
verifyKzg kzgVerifier
newVerifier verification.NewBlobVerifier
verifyKzg roblobCommitmentVerifier
newVerifier NewBlobVerifier
reqs []Requirement
}
var _ das.BlobBatchVerifier = &BlobBatchVerifier{}
// VerifiedROBlobs satisfies the das.BlobBatchVerifier interface, used by das.AvailabilityStore.
func (batch *BlobBatchVerifier) VerifiedROBlobs(ctx context.Context, blk blocks.ROBlock, scs []blocks.ROBlob) ([]blocks.VerifiedROBlob, error) {
if len(scs) == 0 {
return nil, nil
}
// We assume the proposer was validated wrt the block in batch block processing before performing the DA check.
// We assume the proposer is validated wrt the block in batch block processing before performing the DA check.
// So at this stage we just need to make sure the value being signed and signature bytes match the block.
for i := range scs {
if blk.Signature() != bytesutil.ToBytes96(scs[i].SignedBlockHeader.Signature) {
@@ -71,7 +64,7 @@ func (batch *BlobBatchVerifier) VerifiedROBlobs(ctx context.Context, blk blocks.
}
vs := make([]blocks.VerifiedROBlob, len(scs))
for i := range scs {
vb, err := batch.verifyOneBlob(ctx, scs[i])
vb, err := batch.verifyOneBlob(scs[i])
if err != nil {
return nil, err
}
@@ -80,13 +73,13 @@ func (batch *BlobBatchVerifier) VerifiedROBlobs(ctx context.Context, blk blocks.
return vs, nil
}
func (batch *BlobBatchVerifier) verifyOneBlob(ctx context.Context, sc blocks.ROBlob) (blocks.VerifiedROBlob, error) {
func (batch *BlobBatchVerifier) verifyOneBlob(sc blocks.ROBlob) (blocks.VerifiedROBlob, error) {
vb := blocks.VerifiedROBlob{}
bv := batch.newVerifier(sc, verification.InitsyncSidecarRequirements)
bv := batch.newVerifier(sc, batch.reqs)
// We can satisfy the following 2 requirements immediately because VerifiedROBlobs always verifies commitments
// and block signature for all blobs in the batch before calling verifyOneBlob.
bv.SatisfyRequirement(verification.RequireSidecarKzgProofVerified)
bv.SatisfyRequirement(verification.RequireValidProposerSignature)
bv.SatisfyRequirement(RequireSidecarKzgProofVerified)
bv.SatisfyRequirement(RequireValidProposerSignature)
if err := bv.BlobIndexInBounds(); err != nil {
return vb, err

View File

@@ -0,0 +1,189 @@
package verification
import (
"context"
"testing"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/testing/util"
"github.com/stretchr/testify/require"
)
func TestBatchVerifier(t *testing.T) {
ctx := context.Background()
mockCV := func(err error) roblobCommitmentVerifier {
return func(...blocks.ROBlob) error {
return err
}
}
var invCmtErr = errors.New("mock invalid commitment")
type vbcbt func() (blocks.VerifiedROBlob, error)
vbcb := func(bl blocks.ROBlob, err error) vbcbt {
return func() (blocks.VerifiedROBlob, error) {
return blocks.VerifiedROBlob{ROBlob: bl}, err
}
}
cases := []struct {
name string
nv func() NewBlobVerifier
cv roblobCommitmentVerifier
bandb func(t *testing.T, n int) (blocks.ROBlock, []blocks.ROBlob)
err error
nblobs int
reqs []Requirement
}{
{
name: "no blobs",
bandb: func(t *testing.T, nb int) (blocks.ROBlock, []blocks.ROBlob) {
return util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 0, nb)
},
nv: func() NewBlobVerifier {
return func(bl blocks.ROBlob, reqs []Requirement) BlobVerifier {
return &MockBlobVerifier{cbVerifiedROBlob: vbcb(bl, nil)}
}
},
nblobs: 0,
},
{
name: "happy path",
nv: func() NewBlobVerifier {
return func(bl blocks.ROBlob, reqs []Requirement) BlobVerifier {
return &MockBlobVerifier{cbVerifiedROBlob: vbcb(bl, nil)}
}
},
bandb: func(t *testing.T, nb int) (blocks.ROBlock, []blocks.ROBlob) {
return util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 0, nb)
},
nblobs: 3,
},
{
name: "partial batch",
nv: func() NewBlobVerifier {
return func(bl blocks.ROBlob, reqs []Requirement) BlobVerifier {
return &MockBlobVerifier{cbVerifiedROBlob: vbcb(bl, nil)}
}
},
bandb: func(t *testing.T, nb int) (blocks.ROBlock, []blocks.ROBlob) {
// Add extra blobs to the block that we won't return
blk, blbs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 0, nb+3)
return blk, blbs[0:3]
},
nblobs: 3,
},
{
name: "invalid commitment",
nv: func() NewBlobVerifier {
return func(bl blocks.ROBlob, reqs []Requirement) BlobVerifier {
return &MockBlobVerifier{cbVerifiedROBlob: func() (blocks.VerifiedROBlob, error) {
t.Fatal("Batch verifier should stop before this point")
return blocks.VerifiedROBlob{}, nil
}}
}
},
cv: mockCV(invCmtErr),
bandb: func(t *testing.T, nb int) (blocks.ROBlock, []blocks.ROBlob) {
return util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 0, nb)
},
err: invCmtErr,
nblobs: 1,
},
{
name: "signature mismatch",
nv: func() NewBlobVerifier {
return func(bl blocks.ROBlob, reqs []Requirement) BlobVerifier {
return &MockBlobVerifier{cbVerifiedROBlob: func() (blocks.VerifiedROBlob, error) {
t.Fatal("Batch verifier should stop before this point")
return blocks.VerifiedROBlob{}, nil
}}
}
},
bandb: func(t *testing.T, nb int) (blocks.ROBlock, []blocks.ROBlob) {
blk, blbs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 0, nb)
blbs[0].SignedBlockHeader.Signature = []byte("wrong")
return blk, blbs
},
err: ErrBatchSignatureMismatch,
nblobs: 2,
},
{
name: "root mismatch",
nv: func() NewBlobVerifier {
return func(bl blocks.ROBlob, reqs []Requirement) BlobVerifier {
return &MockBlobVerifier{cbVerifiedROBlob: func() (blocks.VerifiedROBlob, error) {
t.Fatal("Batch verifier should stop before this point")
return blocks.VerifiedROBlob{}, nil
}}
}
},
bandb: func(t *testing.T, nb int) (blocks.ROBlock, []blocks.ROBlob) {
blk, blbs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 0, nb)
wr, err := blocks.NewROBlobWithRoot(blbs[0].BlobSidecar, bytesutil.ToBytes32([]byte("wrong")))
require.NoError(t, err)
blbs[0] = wr
return blk, blbs
},
err: ErrBatchBlockRootMismatch,
nblobs: 1,
},
{
name: "idx oob",
nv: func() NewBlobVerifier {
return func(bl blocks.ROBlob, reqs []Requirement) BlobVerifier {
return &MockBlobVerifier{
ErrBlobIndexInBounds: ErrBlobIndexInvalid,
cbVerifiedROBlob: func() (blocks.VerifiedROBlob, error) {
t.Fatal("Batch verifier should stop before this point")
return blocks.VerifiedROBlob{}, nil
}}
}
},
bandb: func(t *testing.T, nb int) (blocks.ROBlock, []blocks.ROBlob) {
return util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 0, nb)
},
nblobs: 1,
err: ErrBlobIndexInvalid,
},
{
name: "inclusion proof invalid",
nv: func() NewBlobVerifier {
return func(bl blocks.ROBlob, reqs []Requirement) BlobVerifier {
return &MockBlobVerifier{
ErrSidecarInclusionProven: ErrSidecarInclusionProofInvalid,
cbVerifiedROBlob: func() (blocks.VerifiedROBlob, error) {
t.Fatal("Batch verifier should stop before this point")
return blocks.VerifiedROBlob{}, nil
}}
}
},
bandb: func(t *testing.T, nb int) (blocks.ROBlock, []blocks.ROBlob) {
return util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 0, nb)
},
nblobs: 1,
err: ErrSidecarInclusionProofInvalid,
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
blk, blbs := c.bandb(t, c.nblobs)
reqs := c.reqs
if reqs == nil {
reqs = InitsyncSidecarRequirements
}
bbv := NewBlobBatchVerifier(c.nv(), reqs)
if c.cv == nil {
bbv.verifyKzg = mockCV(nil)
} else {
bbv.verifyKzg = c.cv
}
vb, err := bbv.VerifiedROBlobs(ctx, blk, blbs)
if c.err != nil {
require.ErrorIs(t, err, c.err)
return
}
require.NoError(t, err)
require.Equal(t, c.nblobs, len(vb))
})
}
}

View File

@@ -70,6 +70,9 @@ var InitsyncSidecarRequirements = requirementList(GossipSidecarRequirements).exc
// BackfillSidecarRequirements is the same as InitsyncSidecarRequirements.
var BackfillSidecarRequirements = requirementList(InitsyncSidecarRequirements).excluding()
// PendingQueueSidecarRequirements is the same as InitsyncSidecarRequirements, used by the pending blocks queue.
var PendingQueueSidecarRequirements = requirementList(InitsyncSidecarRequirements).excluding()
var (
ErrBlobInvalid = errors.New("blob failed verification")
// ErrBlobIndexInvalid means RequireBlobIndexInBounds failed.

View File

@@ -39,7 +39,7 @@ func TestResultList(t *testing.T) {
func TestExportedBlobSanityCheck(t *testing.T) {
// make sure all requirement lists contain the bare minimum checks
sanity := []Requirement{RequireValidProposerSignature, RequireSidecarKzgProofVerified, RequireBlobIndexInBounds, RequireSidecarInclusionProven}
reqs := [][]Requirement{GossipSidecarRequirements, SpectestSidecarRequirements, InitsyncSidecarRequirements, BackfillSidecarRequirements}
reqs := [][]Requirement{GossipSidecarRequirements, SpectestSidecarRequirements, InitsyncSidecarRequirements, BackfillSidecarRequirements, PendingQueueSidecarRequirements}
for i := range reqs {
r := reqs[i]
reqMap := make(map[Requirement]struct{})

View File

@@ -29,6 +29,7 @@ var (
Name: "local-block-value-boost",
Usage: "A percentage boost for local block construction as a Uint64. This is used to prioritize local block construction over relay/builder block construction" +
"Boost is an additional percentage to multiple local block value. Use builder block if: builder_bid_value * 100 > local_block_value * (local-block-value-boost + 100)",
Value: 10,
}
// ExecutionEngineEndpoint provides an HTTP access endpoint to connect to an execution client on the execution layer
ExecutionEngineEndpoint = &cli.StringFlag{

View File

@@ -16,8 +16,8 @@ func (a *data) PrevRandao() []byte {
return a.prevRandao
}
// Timestamps returns the timestamp of the payload attribute.
func (a *data) Timestamps() uint64 {
// Timestamp returns the timestamp of the payload attribute.
func (a *data) Timestamp() uint64 {
return a.timeStamp
}
@@ -100,7 +100,7 @@ func (a *data) IsEmpty() bool {
if len(a.PrevRandao()) != 0 {
return false
}
if a.Timestamps() != 0 {
if a.Timestamp() != 0 {
return false
}
if len(a.SuggestedFeeRecipient()) != 0 {

View File

@@ -44,7 +44,7 @@ func TestPayloadAttributeGetters(t *testing.T) {
r := uint64(123)
a, err := New(&enginev1.PayloadAttributes{Timestamp: r})
require.NoError(t, err)
require.Equal(t, r, a.Timestamps())
require.Equal(t, r, a.Timestamp())
},
},
{

View File

@@ -7,7 +7,7 @@ import (
type Attributer interface {
Version() int
PrevRandao() []byte
Timestamps() uint64
Timestamp() uint64
SuggestedFeeRecipient() []byte
Withdrawals() ([]*enginev1.Withdrawal, error)
PbV1() (*enginev1.PayloadAttributes, error)

View File

@@ -13,6 +13,7 @@ go_library(
"//cache/lru:go_default_library",
"//config/params:go_default_library",
"//crypto/rand:go_default_library",
"//io/file:go_default_library",
"@com_github_hashicorp_golang_lru//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],

View File

@@ -6,9 +6,11 @@ import (
"io"
"net/url"
"os"
"path/filepath"
"strings"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/io/file"
"github.com/sirupsen/logrus"
)
@@ -20,6 +22,9 @@ func addLogWriter(w io.Writer) {
// ConfigurePersistentLogging adds a log-to-file writer. File content is identical to stdout.
func ConfigurePersistentLogging(logFileName string) error {
logrus.WithField("logFileName", logFileName).Info("Logs will be made persistent")
if err := file.MkdirAll(filepath.Dir(logFileName)); err != nil {
return err
}
f, err := os.OpenFile(logFileName, os.O_CREATE|os.O_WRONLY|os.O_APPEND, params.BeaconIoConfig().ReadWritePermissions) // #nosec G304
if err != nil {
return err

View File

@@ -1,6 +1,8 @@
package logs
import (
"fmt"
"os"
"testing"
"github.com/prysmaticlabs/prysm/v5/testing/require"
@@ -24,3 +26,38 @@ func TestMaskCredentialsLogging(t *testing.T) {
require.Equal(t, MaskCredentialsLogging(test.url), test.maskedUrl)
}
}
func TestConfigurePersistantLogging(t *testing.T) {
testParentDir := t.TempDir()
// 1. Test creation of file in an existing parent directory
logFileName := "test.log"
existingDirectory := "test-1-existing-testing-dir"
err := ConfigurePersistentLogging(fmt.Sprintf("%s/%s/%s", testParentDir, existingDirectory, logFileName))
require.NoError(t, err)
// 2. Test creation of file along with parent directory
nonExistingDirectory := "test-2-non-existing-testing-dir"
err = ConfigurePersistentLogging(fmt.Sprintf("%s/%s/%s", testParentDir, nonExistingDirectory, logFileName))
require.NoError(t, err)
// 3. Test creation of file in an existing parent directory with a non-existing sub-directory
existingDirectory = "test-3-existing-testing-dir"
nonExistingSubDirectory := "test-3-non-existing-sub-dir"
err = os.Mkdir(fmt.Sprintf("%s/%s", testParentDir, existingDirectory), 0700)
if err != nil {
return
}
err = ConfigurePersistentLogging(fmt.Sprintf("%s/%s/%s/%s", testParentDir, existingDirectory, nonExistingSubDirectory, logFileName))
require.NoError(t, err)
//4. Create log file in a directory without 700 permissions
existingDirectory = "test-4-existing-testing-dir"
err = os.Mkdir(fmt.Sprintf("%s/%s", testParentDir, existingDirectory), 0750)
if err != nil {
return
}
}

View File

@@ -34,19 +34,19 @@ def cc_autoconf_toolchains_impl(repository_ctx):
else:
repository_ctx.file("BUILD", "# C++ toolchain autoconfiguration was disabled by BAZEL_DO_NOT_DETECT_CPP_TOOLCHAIN env variable.")
def cc_autoconf_impl(repository_ctx, overriden_tools = dict()):
def cc_autoconf_impl(repository_ctx, overridden_tools = dict()):
"""Generate BUILD file with 'cc_toolchain' targets for the local host C++ toolchain.
Args:
repository_ctx: repository context
overriden_tools: dict of tool paths to use instead of autoconfigured tools
overridden_tools: dict of tool paths to use instead of autoconfigured tools
"""
cpu_value = get_cpu_value(repository_ctx)
if cpu_value.startswith("darwin"):
print("Configuring local C++ toolchain for Darwin. This is non-hermetic and builds may " +
"not be reproducible. Consider building on linux for a hermetic build.")
configure_unix_toolchain(repository_ctx, cpu_value, overriden_tools)
configure_unix_toolchain(repository_ctx, cpu_value, overridden_tools)
else:
paths = resolve_labels(repository_ctx, [
"@bazel_tools//tools/cpp:BUILD.empty.tpl",

View File

@@ -1,4 +1,4 @@
load("@rules_oci//oci:defs.bzl", "oci_image", "oci_image_index", "oci_push")
load("@rules_oci//oci:defs.bzl", "oci_image", "oci_image_index", "oci_push", "oci_tarball")
load("@rules_pkg//:pkg.bzl", "pkg_tar")
load("//tools:multi_arch.bzl", "multi_arch")
@@ -74,3 +74,9 @@ def prysm_image_upload(
repository = repository,
tags = tags,
)
oci_tarball(
name = "oci_image_tarball",
image = ":oci_image",
repo_tags = [repository+":latest"],
)

View File

@@ -66,6 +66,7 @@ go_library(
"@com_github_sirupsen_logrus//:go_default_library",
"@org_golang_google_grpc//:go_default_library",
"@org_golang_google_protobuf//types/known/timestamppb:go_default_library",
"@org_golang_x_sync//errgroup:go_default_library",
],
)
@@ -129,7 +130,6 @@ go_test(
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"//testing/validator-mock:go_default_library",
"//time/slots:go_default_library",
"//validator/client/beacon-api/mock:go_default_library",
"//validator/client/beacon-api/test-helpers:go_default_library",

View File

@@ -8,11 +8,14 @@ import (
"net/url"
"strconv"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/consensus-types/validator"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"golang.org/x/sync/errgroup"
)
type dutiesProvider interface {
@@ -31,37 +34,42 @@ type committeeIndexSlotPair struct {
slot primitives.Slot
}
type validatorForDuty struct {
pubkey []byte
index primitives.ValidatorIndex
status ethpb.ValidatorStatus
}
func (c beaconApiValidatorClient) getDuties(ctx context.Context, in *ethpb.DutiesRequest) (*ethpb.DutiesResponse, error) {
all, err := c.multipleValidatorStatus(ctx, &ethpb.MultipleValidatorStatusRequest{PublicKeys: in.PublicKeys})
vals, err := c.getValidatorsForDuties(ctx, in.PublicKeys)
if err != nil {
return nil, errors.Wrap(err, "failed to get validator status")
}
known := &ethpb.MultipleValidatorStatusResponse{
PublicKeys: make([][]byte, 0, len(all.PublicKeys)),
Statuses: make([]*ethpb.ValidatorStatusResponse, 0, len(all.Statuses)),
Indices: make([]primitives.ValidatorIndex, 0, len(all.Indices)),
}
for i, status := range all.Statuses {
if status.Status != ethpb.ValidatorStatus_UNKNOWN_STATUS {
known.PublicKeys = append(known.PublicKeys, all.PublicKeys[i])
known.Statuses = append(known.Statuses, all.Statuses[i])
known.Indices = append(known.Indices, all.Indices[i])
}
return nil, errors.Wrap(err, "failed to get validators for duties")
}
// Sync committees are an Altair feature
fetchSyncDuties := in.Epoch >= params.BeaconConfig().AltairForkEpoch
currentEpochDuties, err := c.getDutiesForEpoch(ctx, in.Epoch, known, fetchSyncDuties)
if err != nil {
return nil, errors.Wrapf(err, "failed to get duties for current epoch `%d`", in.Epoch)
}
errCh := make(chan error, 1)
nextEpochDuties, err := c.getDutiesForEpoch(ctx, in.Epoch+1, known, fetchSyncDuties)
var currentEpochDuties []*ethpb.DutiesResponse_Duty
go func() {
currentEpochDuties, err = c.getDutiesForEpoch(ctx, in.Epoch, vals, fetchSyncDuties)
if err != nil {
errCh <- errors.Wrapf(err, "failed to get duties for current epoch `%d`", in.Epoch)
return
}
errCh <- nil
}()
nextEpochDuties, err := c.getDutiesForEpoch(ctx, in.Epoch+1, vals, fetchSyncDuties)
if err != nil {
return nil, errors.Wrapf(err, "failed to get duties for next epoch `%d`", in.Epoch+1)
}
if err = <-errCh; err != nil {
return nil, err
}
return &ethpb.DutiesResponse{
CurrentEpochDuties: currentEpochDuties,
NextEpochDuties: nextEpochDuties,
@@ -71,25 +79,94 @@ func (c beaconApiValidatorClient) getDuties(ctx context.Context, in *ethpb.Dutie
func (c beaconApiValidatorClient) getDutiesForEpoch(
ctx context.Context,
epoch primitives.Epoch,
multipleValidatorStatus *ethpb.MultipleValidatorStatusResponse,
vals []validatorForDuty,
fetchSyncDuties bool,
) ([]*ethpb.DutiesResponse_Duty, error) {
attesterDuties, err := c.dutiesProvider.GetAttesterDuties(ctx, epoch, multipleValidatorStatus.Indices)
if err != nil {
return nil, errors.Wrapf(err, "failed to get attester duties for epoch `%d`", epoch)
indices := make([]primitives.ValidatorIndex, len(vals))
for i, v := range vals {
indices[i] = v.index
}
var syncDuties []*structs.SyncCommitteeDuty
if fetchSyncDuties {
if syncDuties, err = c.dutiesProvider.GetSyncDuties(ctx, epoch, multipleValidatorStatus.Indices); err != nil {
return nil, errors.Wrapf(err, "failed to get sync duties for epoch `%d`", epoch)
// Below variables MUST NOT be used in the main function before wg.Wait().
// This is because they are populated in goroutines and wg.Wait()
// will return only once all goroutines finish their execution.
// Mapping from a validator index to its attesting committee's index and slot
attesterDutiesMapping := make(map[primitives.ValidatorIndex]committeeIndexSlotPair)
// Set containing all validator indices that are part of a sync committee for this epoch
syncDutiesMapping := make(map[primitives.ValidatorIndex]bool)
// Mapping from a validator index to its proposal slot
proposerDutySlots := make(map[primitives.ValidatorIndex][]primitives.Slot)
// Mapping from the {committeeIndex, slot} to each of the committee's validator indices
committeeMapping := make(map[committeeIndexSlotPair][]primitives.ValidatorIndex)
var wg errgroup.Group
wg.Go(func() error {
attesterDuties, err := c.dutiesProvider.GetAttesterDuties(ctx, epoch, indices)
if err != nil {
return errors.Wrapf(err, "failed to get attester duties for epoch `%d`", epoch)
}
for _, attesterDuty := range attesterDuties {
validatorIndex, err := strconv.ParseUint(attesterDuty.ValidatorIndex, 10, 64)
if err != nil {
return errors.Wrapf(err, "failed to parse attester validator index `%s`", attesterDuty.ValidatorIndex)
}
slot, err := strconv.ParseUint(attesterDuty.Slot, 10, 64)
if err != nil {
return errors.Wrapf(err, "failed to parse attester slot `%s`", attesterDuty.Slot)
}
committeeIndex, err := strconv.ParseUint(attesterDuty.CommitteeIndex, 10, 64)
if err != nil {
return errors.Wrapf(err, "failed to parse attester committee index `%s`", attesterDuty.CommitteeIndex)
}
attesterDutiesMapping[primitives.ValidatorIndex(validatorIndex)] = committeeIndexSlotPair{
slot: primitives.Slot(slot),
committeeIndex: primitives.CommitteeIndex(committeeIndex),
}
}
return nil
})
if fetchSyncDuties {
wg.Go(func() error {
syncDuties, err := c.dutiesProvider.GetSyncDuties(ctx, epoch, indices)
if err != nil {
return errors.Wrapf(err, "failed to get sync duties for epoch `%d`", epoch)
}
for _, syncDuty := range syncDuties {
validatorIndex, err := strconv.ParseUint(syncDuty.ValidatorIndex, 10, 64)
if err != nil {
return errors.Wrapf(err, "failed to parse sync validator index `%s`", syncDuty.ValidatorIndex)
}
syncDutiesMapping[primitives.ValidatorIndex(validatorIndex)] = true
}
return nil
})
}
var proposerDuties []*structs.ProposerDuty
if proposerDuties, err = c.dutiesProvider.GetProposerDuties(ctx, epoch); err != nil {
return nil, errors.Wrapf(err, "failed to get proposer duties for epoch `%d`", epoch)
}
wg.Go(func() error {
proposerDuties, err := c.dutiesProvider.GetProposerDuties(ctx, epoch)
if err != nil {
return errors.Wrapf(err, "failed to get proposer duties for epoch `%d`", epoch)
}
for _, proposerDuty := range proposerDuties {
validatorIndex, err := strconv.ParseUint(proposerDuty.ValidatorIndex, 10, 64)
if err != nil {
return errors.Wrapf(err, "failed to parse proposer validator index `%s`", proposerDuty.ValidatorIndex)
}
slot, err := strconv.ParseUint(proposerDuty.Slot, 10, 64)
if err != nil {
return errors.Wrapf(err, "failed to parse proposer slot `%s`", proposerDuty.Slot)
}
proposerDutySlots[primitives.ValidatorIndex(validatorIndex)] =
append(proposerDutySlots[primitives.ValidatorIndex(validatorIndex)], primitives.Slot(slot))
}
return nil
})
committees, err := c.dutiesProvider.GetCommittees(ctx, epoch)
if err != nil {
@@ -104,70 +181,15 @@ func (c beaconApiValidatorClient) getDutiesForEpoch(
slotCommittees[c.Slot] = n + 1
}
// Mapping from a validator index to its attesting committee's index and slot
attesterDutiesMapping := make(map[primitives.ValidatorIndex]committeeIndexSlotPair)
for _, attesterDuty := range attesterDuties {
validatorIndex, err := strconv.ParseUint(attesterDuty.ValidatorIndex, 10, 64)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse attester validator index `%s`", attesterDuty.ValidatorIndex)
}
slot, err := strconv.ParseUint(attesterDuty.Slot, 10, 64)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse attester slot `%s`", attesterDuty.Slot)
}
committeeIndex, err := strconv.ParseUint(attesterDuty.CommitteeIndex, 10, 64)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse attester committee index `%s`", attesterDuty.CommitteeIndex)
}
attesterDutiesMapping[primitives.ValidatorIndex(validatorIndex)] = committeeIndexSlotPair{
slot: primitives.Slot(slot),
committeeIndex: primitives.CommitteeIndex(committeeIndex),
}
}
// Mapping from a validator index to its proposal slot
proposerDutySlots := make(map[primitives.ValidatorIndex][]primitives.Slot)
for _, proposerDuty := range proposerDuties {
validatorIndex, err := strconv.ParseUint(proposerDuty.ValidatorIndex, 10, 64)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse proposer validator index `%s`", proposerDuty.ValidatorIndex)
}
slot, err := strconv.ParseUint(proposerDuty.Slot, 10, 64)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse proposer slot `%s`", proposerDuty.Slot)
}
proposerDutySlots[primitives.ValidatorIndex(validatorIndex)] = append(proposerDutySlots[primitives.ValidatorIndex(validatorIndex)], primitives.Slot(slot))
}
// Set containing all validator indices that are part of a sync committee for this epoch
syncDutiesMapping := make(map[primitives.ValidatorIndex]bool)
for _, syncDuty := range syncDuties {
validatorIndex, err := strconv.ParseUint(syncDuty.ValidatorIndex, 10, 64)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse sync validator index `%s`", syncDuty.ValidatorIndex)
}
syncDutiesMapping[primitives.ValidatorIndex(validatorIndex)] = true
}
// Mapping from the {committeeIndex, slot} to each of the committee's validator indices
committeeMapping := make(map[committeeIndexSlotPair][]primitives.ValidatorIndex)
for _, committee := range committees {
committeeIndex, err := strconv.ParseUint(committee.Index, 10, 64)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse committee index `%s`", committee.Index)
}
slot, err := strconv.ParseUint(committee.Slot, 10, 64)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse slot `%s`", committee.Slot)
}
validatorIndices := make([]primitives.ValidatorIndex, len(committee.Validators))
for index, validatorIndexString := range committee.Validators {
validatorIndex, err := strconv.ParseUint(validatorIndexString, 10, 64)
@@ -176,7 +198,6 @@ func (c beaconApiValidatorClient) getDutiesForEpoch(
}
validatorIndices[index] = primitives.ValidatorIndex(validatorIndex)
}
key := committeeIndexSlotPair{
committeeIndex: primitives.CommitteeIndex(committeeIndex),
slot: primitives.Slot(slot),
@@ -184,16 +205,19 @@ func (c beaconApiValidatorClient) getDutiesForEpoch(
committeeMapping[key] = validatorIndices
}
duties := make([]*ethpb.DutiesResponse_Duty, len(multipleValidatorStatus.Statuses))
for index, validatorStatus := range multipleValidatorStatus.Statuses {
validatorIndex := multipleValidatorStatus.Indices[index]
pubkey := multipleValidatorStatus.PublicKeys[index]
if err = wg.Wait(); err != nil {
return nil, err
}
var attesterSlot primitives.Slot
var committeeIndex primitives.CommitteeIndex
var committeeValidatorIndices []primitives.ValidatorIndex
duties := make([]*ethpb.DutiesResponse_Duty, len(vals))
for i, v := range vals {
var (
attesterSlot primitives.Slot
committeeIndex primitives.CommitteeIndex
committeeValidatorIndices []primitives.ValidatorIndex
)
if committeeMappingKey, ok := attesterDutiesMapping[validatorIndex]; ok {
if committeeMappingKey, ok := attesterDutiesMapping[v.index]; ok {
committeeIndex = committeeMappingKey.committeeIndex
attesterSlot = committeeMappingKey.slot
@@ -202,15 +226,15 @@ func (c beaconApiValidatorClient) getDutiesForEpoch(
}
}
duties[index] = &ethpb.DutiesResponse_Duty{
duties[i] = &ethpb.DutiesResponse_Duty{
Committee: committeeValidatorIndices,
CommitteeIndex: committeeIndex,
AttesterSlot: attesterSlot,
ProposerSlots: proposerDutySlots[validatorIndex],
PublicKey: pubkey,
Status: validatorStatus.Status,
ValidatorIndex: validatorIndex,
IsSyncCommittee: syncDutiesMapping[validatorIndex],
ProposerSlots: proposerDutySlots[v.index],
PublicKey: v.pubkey,
Status: v.status,
ValidatorIndex: v.index,
IsSyncCommittee: syncDutiesMapping[v.index],
CommitteesAtSlot: slotCommittees[strconv.FormatUint(uint64(attesterSlot), 10)],
}
}
@@ -218,6 +242,51 @@ func (c beaconApiValidatorClient) getDutiesForEpoch(
return duties, nil
}
func (c *beaconApiValidatorClient) getValidatorsForDuties(ctx context.Context, pubkeys [][]byte) ([]validatorForDuty, error) {
vals := make([]validatorForDuty, 0, len(pubkeys))
stringPubkeysToPubkeys := make(map[string][]byte, len(pubkeys))
stringPubkeys := make([]string, len(pubkeys))
for i, pk := range pubkeys {
stringPk := hexutil.Encode(pk)
stringPubkeysToPubkeys[stringPk] = pk
stringPubkeys[i] = stringPk
}
statusesWithDuties := []string{validator.ActiveOngoing.String(), validator.ActiveExiting.String()}
stateValidatorsResponse, err := c.stateValidatorsProvider.GetStateValidators(ctx, stringPubkeys, nil, statusesWithDuties)
if err != nil {
return nil, errors.Wrap(err, "failed to get state validators")
}
for _, validatorContainer := range stateValidatorsResponse.Data {
val := validatorForDuty{}
validatorIndex, err := strconv.ParseUint(validatorContainer.Index, 10, 64)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse validator index %s", validatorContainer.Index)
}
val.index = primitives.ValidatorIndex(validatorIndex)
stringPubkey := validatorContainer.Validator.Pubkey
pubkey, ok := stringPubkeysToPubkeys[stringPubkey]
if !ok {
return nil, errors.Wrapf(err, "returned public key %s not requested", stringPubkey)
}
val.pubkey = pubkey
status, ok := beaconAPITogRPCValidatorStatus[validatorContainer.Status]
if !ok {
return nil, errors.New("invalid validator status " + validatorContainer.Status)
}
val.status = status
vals = append(vals, val)
}
return vals, nil
}
// GetCommittees retrieves the committees for the given epoch
func (c beaconApiDutiesProvider) GetCommittees(ctx context.Context, epoch primitives.Epoch) ([]*structs.Committee, error) {
committeeParams := url.Values{}

View File

@@ -9,11 +9,8 @@ import (
"strconv"
"testing"
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
validatormock "github.com/prysmaticlabs/prysm/v5/testing/validator-mock"
"github.com/prysmaticlabs/prysm/v5/validator/client/iface"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
@@ -541,7 +538,6 @@ func TestGetDutiesForEpoch_Error(t *testing.T) {
{
name: "get proposer duties failed",
expectedError: "failed to get proposer duties for epoch `1`: foo error",
fetchAttesterDutiesError: nil,
fetchProposerDutiesError: errors.New("foo error"),
},
{
@@ -720,28 +716,20 @@ func TestGetDutiesForEpoch_Error(t *testing.T) {
testCase.fetchCommitteesError,
).AnyTimes()
vals := make([]validatorForDuty, len(pubkeys))
for i := 0; i < len(pubkeys); i++ {
vals[i] = validatorForDuty{
pubkey: pubkeys[i],
index: validatorIndices[i],
status: ethpb.ValidatorStatus_ACTIVE,
}
}
validatorClient := &beaconApiValidatorClient{dutiesProvider: dutiesProvider}
_, err := validatorClient.getDutiesForEpoch(
ctx,
epoch,
&ethpb.MultipleValidatorStatusResponse{
PublicKeys: pubkeys,
Indices: validatorIndices,
Statuses: []*ethpb.ValidatorStatusResponse{
{Status: ethpb.ValidatorStatus_UNKNOWN_STATUS},
{Status: ethpb.ValidatorStatus_DEPOSITED},
{Status: ethpb.ValidatorStatus_PENDING},
{Status: ethpb.ValidatorStatus_ACTIVE},
{Status: ethpb.ValidatorStatus_EXITING},
{Status: ethpb.ValidatorStatus_SLASHING},
{Status: ethpb.ValidatorStatus_EXITED},
{Status: ethpb.ValidatorStatus_INVALID},
{Status: ethpb.ValidatorStatus_PARTIALLY_DEPOSITED},
{Status: ethpb.ValidatorStatus_UNKNOWN_STATUS},
{Status: ethpb.ValidatorStatus_DEPOSITED},
{Status: ethpb.ValidatorStatus_PENDING},
},
},
vals,
true,
)
assert.ErrorContains(t, testCase.expectedError, err)
@@ -773,40 +761,6 @@ func TestGetDutiesForEpoch_Valid(t *testing.T) {
committeeSlots := []primitives.Slot{28, 29, 30}
proposerSlots := []primitives.Slot{31, 32, 33, 34, 35, 36, 37, 38}
statuses := []ethpb.ValidatorStatus{
ethpb.ValidatorStatus_UNKNOWN_STATUS,
ethpb.ValidatorStatus_DEPOSITED,
ethpb.ValidatorStatus_PENDING,
ethpb.ValidatorStatus_ACTIVE,
ethpb.ValidatorStatus_EXITING,
ethpb.ValidatorStatus_SLASHING,
ethpb.ValidatorStatus_EXITED,
ethpb.ValidatorStatus_INVALID,
ethpb.ValidatorStatus_PARTIALLY_DEPOSITED,
ethpb.ValidatorStatus_UNKNOWN_STATUS,
ethpb.ValidatorStatus_DEPOSITED,
ethpb.ValidatorStatus_PENDING,
}
multipleValidatorStatus := &ethpb.MultipleValidatorStatusResponse{
PublicKeys: pubkeys,
Indices: validatorIndices,
Statuses: []*ethpb.ValidatorStatusResponse{
{Status: statuses[0]},
{Status: statuses[1]},
{Status: statuses[2]},
{Status: statuses[3]},
{Status: statuses[4]},
{Status: statuses[5]},
{Status: statuses[6]},
{Status: statuses[7]},
{Status: statuses[8]},
{Status: statuses[9]},
{Status: statuses[10]},
{Status: statuses[11]},
},
}
ctrl := gomock.NewController(t)
defer ctrl.Finish()
@@ -824,7 +778,7 @@ func TestGetDutiesForEpoch_Valid(t *testing.T) {
dutiesProvider.EXPECT().GetAttesterDuties(
ctx,
epoch,
multipleValidatorStatus.Indices,
validatorIndices,
).Return(
generateValidAttesterDuties(pubkeys, validatorIndices, committeeIndices, committeeSlots),
nil,
@@ -842,7 +796,7 @@ func TestGetDutiesForEpoch_Valid(t *testing.T) {
dutiesProvider.EXPECT().GetSyncDuties(
ctx,
epoch,
multipleValidatorStatus.Indices,
validatorIndices,
).Return(
generateValidSyncDuties(pubkeys, validatorIndices),
nil,
@@ -883,7 +837,7 @@ func TestGetDutiesForEpoch_Valid(t *testing.T) {
CommitteeIndex: committeeIndices[0],
AttesterSlot: committeeSlots[0],
PublicKey: pubkeys[0],
Status: statuses[0],
Status: ethpb.ValidatorStatus_ACTIVE,
ValidatorIndex: validatorIndices[0],
CommitteesAtSlot: 1,
},
@@ -895,7 +849,7 @@ func TestGetDutiesForEpoch_Valid(t *testing.T) {
CommitteeIndex: committeeIndices[0],
AttesterSlot: committeeSlots[0],
PublicKey: pubkeys[1],
Status: statuses[1],
Status: ethpb.ValidatorStatus_ACTIVE,
ValidatorIndex: validatorIndices[1],
CommitteesAtSlot: 1,
},
@@ -907,7 +861,7 @@ func TestGetDutiesForEpoch_Valid(t *testing.T) {
CommitteeIndex: committeeIndices[1],
AttesterSlot: committeeSlots[1],
PublicKey: pubkeys[2],
Status: statuses[2],
Status: ethpb.ValidatorStatus_ACTIVE,
ValidatorIndex: validatorIndices[2],
CommitteesAtSlot: 1,
},
@@ -919,7 +873,7 @@ func TestGetDutiesForEpoch_Valid(t *testing.T) {
CommitteeIndex: committeeIndices[1],
AttesterSlot: committeeSlots[1],
PublicKey: pubkeys[3],
Status: statuses[3],
Status: ethpb.ValidatorStatus_ACTIVE,
ValidatorIndex: validatorIndices[3],
CommitteesAtSlot: 1,
},
@@ -931,7 +885,7 @@ func TestGetDutiesForEpoch_Valid(t *testing.T) {
CommitteeIndex: committeeIndices[2],
AttesterSlot: committeeSlots[2],
PublicKey: pubkeys[4],
Status: statuses[4],
Status: ethpb.ValidatorStatus_ACTIVE,
ValidatorIndex: validatorIndices[4],
ProposerSlots: expectedProposerSlots1,
CommitteesAtSlot: 1,
@@ -944,7 +898,7 @@ func TestGetDutiesForEpoch_Valid(t *testing.T) {
CommitteeIndex: committeeIndices[2],
AttesterSlot: committeeSlots[2],
PublicKey: pubkeys[5],
Status: statuses[5],
Status: ethpb.ValidatorStatus_ACTIVE,
ValidatorIndex: validatorIndices[5],
ProposerSlots: expectedProposerSlots2,
IsSyncCommittee: testCase.fetchSyncDuties,
@@ -952,47 +906,55 @@ func TestGetDutiesForEpoch_Valid(t *testing.T) {
},
{
PublicKey: pubkeys[6],
Status: statuses[6],
Status: ethpb.ValidatorStatus_ACTIVE,
ValidatorIndex: validatorIndices[6],
ProposerSlots: expectedProposerSlots3,
IsSyncCommittee: testCase.fetchSyncDuties,
},
{
PublicKey: pubkeys[7],
Status: statuses[7],
Status: ethpb.ValidatorStatus_ACTIVE,
ValidatorIndex: validatorIndices[7],
ProposerSlots: expectedProposerSlots4,
IsSyncCommittee: testCase.fetchSyncDuties,
},
{
PublicKey: pubkeys[8],
Status: statuses[8],
Status: ethpb.ValidatorStatus_ACTIVE,
ValidatorIndex: validatorIndices[8],
IsSyncCommittee: testCase.fetchSyncDuties,
},
{
PublicKey: pubkeys[9],
Status: statuses[9],
Status: ethpb.ValidatorStatus_ACTIVE,
ValidatorIndex: validatorIndices[9],
IsSyncCommittee: testCase.fetchSyncDuties,
},
{
PublicKey: pubkeys[10],
Status: statuses[10],
Status: ethpb.ValidatorStatus_ACTIVE,
ValidatorIndex: validatorIndices[10],
},
{
PublicKey: pubkeys[11],
Status: statuses[11],
Status: ethpb.ValidatorStatus_ACTIVE,
ValidatorIndex: validatorIndices[11],
},
}
validatorClient := &beaconApiValidatorClient{dutiesProvider: dutiesProvider}
vals := make([]validatorForDuty, len(pubkeys))
for i := 0; i < len(pubkeys); i++ {
vals[i] = validatorForDuty{
pubkey: pubkeys[i],
index: validatorIndices[i],
status: ethpb.ValidatorStatus_ACTIVE,
}
}
duties, err := validatorClient.getDutiesForEpoch(
ctx,
epoch,
multipleValidatorStatus,
vals,
testCase.fetchSyncDuties,
)
require.NoError(t, err)
@@ -1018,41 +980,24 @@ func TestGetDuties_Valid(t *testing.T) {
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
statuses := []ethpb.ValidatorStatus{
ethpb.ValidatorStatus_DEPOSITED,
ethpb.ValidatorStatus_PENDING,
ethpb.ValidatorStatus_ACTIVE,
ethpb.ValidatorStatus_EXITING,
ethpb.ValidatorStatus_SLASHING,
ethpb.ValidatorStatus_EXITED,
ethpb.ValidatorStatus_EXITED,
ethpb.ValidatorStatus_EXITED,
ethpb.ValidatorStatus_EXITED,
ethpb.ValidatorStatus_DEPOSITED,
ethpb.ValidatorStatus_PENDING,
ethpb.ValidatorStatus_ACTIVE,
}
pubkeys := make([][]byte, len(statuses))
validatorIndices := make([]primitives.ValidatorIndex, len(statuses))
for i := range statuses {
valCount := 12
pubkeys := make([][]byte, valCount)
validatorIndices := make([]primitives.ValidatorIndex, valCount)
vals := make([]validatorForDuty, valCount)
for i := 0; i < valCount; i++ {
pubkeys[i] = []byte(strconv.Itoa(i))
validatorIndices[i] = primitives.ValidatorIndex(i)
vals[i] = validatorForDuty{
pubkey: pubkeys[i],
index: validatorIndices[i],
status: ethpb.ValidatorStatus_ACTIVE,
}
}
committeeIndices := []primitives.CommitteeIndex{25, 26, 27}
committeeSlots := []primitives.Slot{28, 29, 30}
proposerSlots := []primitives.Slot{31, 32, 33, 34, 35, 36, 37, 38}
statusResps := make([]*ethpb.ValidatorStatusResponse, len(statuses))
for i, s := range statuses {
statusResps[i] = &ethpb.ValidatorStatusResponse{Status: s}
}
multipleValidatorStatus := &ethpb.MultipleValidatorStatusResponse{
PublicKeys: pubkeys,
Indices: validatorIndices,
Statuses: statusResps,
}
ctrl := gomock.NewController(t)
defer ctrl.Finish()
@@ -1070,7 +1015,7 @@ func TestGetDuties_Valid(t *testing.T) {
dutiesProvider.EXPECT().GetAttesterDuties(
ctx,
testCase.epoch,
multipleValidatorStatus.Indices,
validatorIndices,
).Return(
generateValidAttesterDuties(pubkeys, validatorIndices, committeeIndices, committeeSlots),
nil,
@@ -1089,7 +1034,7 @@ func TestGetDuties_Valid(t *testing.T) {
dutiesProvider.EXPECT().GetSyncDuties(
ctx,
testCase.epoch,
multipleValidatorStatus.Indices,
validatorIndices,
).Return(
generateValidSyncDuties(pubkeys, validatorIndices),
nil,
@@ -1143,7 +1088,7 @@ func TestGetDuties_Valid(t *testing.T) {
Data: []*structs.ValidatorContainer{
{
Index: strconv.FormatUint(uint64(validatorIndices[0]), 10),
Status: "pending_initialized",
Status: "active_ongoing",
Validator: &structs.Validator{
Pubkey: hexutil.Encode(pubkeys[0]),
ActivationEpoch: strconv.FormatUint(uint64(testCase.epoch), 10),
@@ -1151,7 +1096,7 @@ func TestGetDuties_Valid(t *testing.T) {
},
{
Index: strconv.FormatUint(uint64(validatorIndices[1]), 10),
Status: "pending_queued",
Status: "active_ongoing",
Validator: &structs.Validator{
Pubkey: hexutil.Encode(pubkeys[1]),
ActivationEpoch: strconv.FormatUint(uint64(testCase.epoch), 10),
@@ -1167,7 +1112,7 @@ func TestGetDuties_Valid(t *testing.T) {
},
{
Index: strconv.FormatUint(uint64(validatorIndices[3]), 10),
Status: "active_exiting",
Status: "active_ongoing",
Validator: &structs.Validator{
Pubkey: hexutil.Encode(pubkeys[3]),
ActivationEpoch: strconv.FormatUint(uint64(testCase.epoch), 10),
@@ -1175,7 +1120,7 @@ func TestGetDuties_Valid(t *testing.T) {
},
{
Index: strconv.FormatUint(uint64(validatorIndices[4]), 10),
Status: "active_slashed",
Status: "active_ongoing",
Validator: &structs.Validator{
Pubkey: hexutil.Encode(pubkeys[4]),
ActivationEpoch: strconv.FormatUint(uint64(testCase.epoch), 10),
@@ -1183,7 +1128,7 @@ func TestGetDuties_Valid(t *testing.T) {
},
{
Index: strconv.FormatUint(uint64(validatorIndices[5]), 10),
Status: "exited_unslashed",
Status: "active_ongoing",
Validator: &structs.Validator{
Pubkey: hexutil.Encode(pubkeys[5]),
ActivationEpoch: strconv.FormatUint(uint64(testCase.epoch), 10),
@@ -1191,7 +1136,7 @@ func TestGetDuties_Valid(t *testing.T) {
},
{
Index: strconv.FormatUint(uint64(validatorIndices[6]), 10),
Status: "exited_slashed",
Status: "active_ongoing",
Validator: &structs.Validator{
Pubkey: hexutil.Encode(pubkeys[6]),
ActivationEpoch: strconv.FormatUint(uint64(testCase.epoch), 10),
@@ -1199,7 +1144,7 @@ func TestGetDuties_Valid(t *testing.T) {
},
{
Index: strconv.FormatUint(uint64(validatorIndices[7]), 10),
Status: "withdrawal_possible",
Status: "active_ongoing",
Validator: &structs.Validator{
Pubkey: hexutil.Encode(pubkeys[7]),
ActivationEpoch: strconv.FormatUint(uint64(testCase.epoch), 10),
@@ -1207,7 +1152,7 @@ func TestGetDuties_Valid(t *testing.T) {
},
{
Index: strconv.FormatUint(uint64(validatorIndices[8]), 10),
Status: "withdrawal_done",
Status: "active_ongoing",
Validator: &structs.Validator{
Pubkey: hexutil.Encode(pubkeys[8]),
ActivationEpoch: strconv.FormatUint(uint64(testCase.epoch), 10),
@@ -1215,7 +1160,7 @@ func TestGetDuties_Valid(t *testing.T) {
},
{
Index: strconv.FormatUint(uint64(validatorIndices[9]), 10),
Status: "pending_initialized",
Status: "active_ongoing",
Validator: &structs.Validator{
Pubkey: hexutil.Encode(pubkeys[9]),
ActivationEpoch: strconv.FormatUint(uint64(testCase.epoch), 10),
@@ -1223,7 +1168,7 @@ func TestGetDuties_Valid(t *testing.T) {
},
{
Index: strconv.FormatUint(uint64(validatorIndices[10]), 10),
Status: "pending_queued",
Status: "active_ongoing",
Validator: &structs.Validator{
Pubkey: hexutil.Encode(pubkeys[10]),
ActivationEpoch: strconv.FormatUint(uint64(testCase.epoch), 10),
@@ -1242,27 +1187,16 @@ func TestGetDuties_Valid(t *testing.T) {
nil,
).MinTimes(1)
prysmBeaconChainClient := validatormock.NewMockPrysmBeaconChainClient(ctrl)
prysmBeaconChainClient.EXPECT().GetValidatorCount(
ctx,
gomock.Any(),
gomock.Any(),
).Return(
nil,
iface.ErrNotSupported,
).MinTimes(1)
// Make sure that our values are equal to what would be returned by calling getDutiesForEpoch individually
validatorClient := &beaconApiValidatorClient{
dutiesProvider: dutiesProvider,
stateValidatorsProvider: stateValidatorsProvider,
prysmBeaconChainCLient: prysmBeaconChainClient,
}
expectedCurrentEpochDuties, err := validatorClient.getDutiesForEpoch(
ctx,
testCase.epoch,
multipleValidatorStatus,
vals,
fetchSyncDuties,
)
require.NoError(t, err)
@@ -1270,7 +1204,7 @@ func TestGetDuties_Valid(t *testing.T) {
expectedNextEpochDuties, err := validatorClient.getDutiesForEpoch(
ctx,
testCase.epoch+1,
multipleValidatorStatus,
vals,
fetchSyncDuties,
)
require.NoError(t, err)
@@ -1291,7 +1225,7 @@ func TestGetDuties_Valid(t *testing.T) {
}
}
func TestGetDuties_GetValidatorStatusFailed(t *testing.T) {
func TestGetDuties_GetStateValidatorsFailed(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
@@ -1316,7 +1250,7 @@ func TestGetDuties_GetValidatorStatusFailed(t *testing.T) {
Epoch: 1,
PublicKeys: [][]byte{},
})
assert.ErrorContains(t, "failed to get validator status", err)
assert.ErrorContains(t, "failed to get state validators", err)
assert.ErrorContains(t, "foo error", err)
}
@@ -1325,6 +1259,7 @@ func TestGetDuties_GetDutiesForEpochFailed(t *testing.T) {
defer ctrl.Finish()
ctx := context.Background()
pubkey := []byte{1, 2, 3}
stateValidatorsProvider := mock.NewMockStateValidatorsProvider(ctrl)
stateValidatorsProvider.EXPECT().GetStateValidators(
@@ -1334,7 +1269,13 @@ func TestGetDuties_GetDutiesForEpochFailed(t *testing.T) {
gomock.Any(),
).Return(
&structs.GetValidatorsResponse{
Data: []*structs.ValidatorContainer{},
Data: []*structs.ValidatorContainer{{
Index: "0",
Status: "active_ongoing",
Validator: &structs.Validator{
Pubkey: hexutil.Encode(pubkey),
},
}},
},
nil,
).Times(1)
@@ -1348,26 +1289,28 @@ func TestGetDuties_GetDutiesForEpochFailed(t *testing.T) {
nil,
errors.New("foo error"),
).Times(1)
prysmBeaconChainClient := validatormock.NewMockPrysmBeaconChainClient(ctrl)
prysmBeaconChainClient.EXPECT().GetValidatorCount(
dutiesProvider.EXPECT().GetAttesterDuties(
ctx,
primitives.Epoch(2),
gomock.Any(),
).Times(1)
dutiesProvider.EXPECT().GetProposerDuties(
ctx,
gomock.Any(),
).Times(2)
dutiesProvider.EXPECT().GetCommittees(
ctx,
gomock.Any(),
).Return(
nil,
iface.ErrNotSupported,
).MinTimes(1)
).Times(2)
validatorClient := &beaconApiValidatorClient{
stateValidatorsProvider: stateValidatorsProvider,
dutiesProvider: dutiesProvider,
prysmBeaconChainCLient: prysmBeaconChainClient,
}
_, err := validatorClient.getDuties(ctx, &ethpb.DutiesRequest{
Epoch: 1,
PublicKeys: [][]byte{},
PublicKeys: [][]byte{pubkey},
})
assert.ErrorContains(t, "failed to get duties for current epoch `1`", err)
assert.ErrorContains(t, "foo error", err)