Compare commits

...

10 Commits

Author SHA1 Message Date
terence tsao
ffd63a6210 Add back finalized info in new block log 2022-05-31 06:48:05 -07:00
terence tsao
5c372d3c69 Fix build 2022-05-31 06:46:13 -07:00
terence tsao
b0b6345f06 Add builder service skeleton and flag 2022-05-30 07:57:38 -07:00
Potuz
3ff285dda5 Do not fill in blocks that are not in the finalized branch (#10776)
* Do not fill in blocks that are not in the finalized branch

* mark blocks as invalid

* Fix old off-by-ones, do not pass finalized state
2022-05-30 11:55:39 +00:00
terencechain
364ad3fbda Reinsert reorg atts (#10767)
* Add common ancestor root for protoarray

* More efficient algo

* Tests

* Fix linting

* Fix linting

* Fix linting

* Fix linting

* Fix linting

* Fix linting

* Apply suggestions from code review

Co-authored-by: Potuz <potuz@prysmaticlabs.com>

* Feedbacks

* Revert saveHead changes

* Revert "Revert saveHead changes"

This reverts commit a15fddc2e6.

* Fix rest of the tests

* Update beacon-chain/blockchain/head.go

Co-authored-by: Potuz <potuz@prysmaticlabs.com>

Co-authored-by: Potuz <potuz@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-05-29 19:32:42 +00:00
Potuz
4cbb69602f Clean up onBlockBatch and prune forkchoice on init sync (#10768)
Co-authored-by: terencechain <terence@prysmaticlabs.com>
2022-05-28 11:56:58 +00:00
David
64920d719d add mutex for validator.highestValidSlot (#10722)
* add mutex for validator.highestValidSlot

* fixed deadlock issue

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: terencechain <terence@prysmaticlabs.com>
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2022-05-28 00:44:06 +00:00
Potuz
3cf385fe91 Unrealized justification (#10659)
* unrealized justification API

* Add time elapse logging

* add unrealized justification checkpoint

* Use UnrealizedJustificationCheckpoint

* Refactor unrealized checkpoints

* Move logic to state package

* do not use ctx on a sum

* fix ctx

* add tests

* fix conflicts

* unhandled error

* Fix ordering in computing checkpoints

* gaz

* keep finalized checkpoint if nothing justified

* gaz

* copy checkpoint

* fix check for nil

* Add state package tests

* Add tests

* Radek's review

* add more tests

* Update beacon-chain/core/epoch/precompute/justification_finalization.go

Co-authored-by: terencechain <terence@prysmaticlabs.com>

* deduplicate to stateutil

* missing file

* Add stateutil test

* Minor refactor, don't export certain things

* Fix exports in tests

* remove unused error

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-05-27 16:38:00 +00:00
David
adabd1fa4f service.head race condition fix (#10741)
* added various read mutex locks for service.head

* added RLocks around all calls to s.headRoot()

* added RLocks around all calls to s.headBlock()

* reduce lock surface-> Stop(),handleEpochBoundary()

* refactor Stop() to +performance, -lock_surface

* Apply suggestions from code review

Co-authored-by: terencechain <terence@prysmaticlabs.com>

* fixed indentation

Co-authored-by: terencechain <terence@prysmaticlabs.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2022-05-27 14:24:43 +00:00
Nishant Das
9be2111b7a Write To Only One File For Geth Logs (#10769) 2022-05-27 13:06:30 +00:00
58 changed files with 1792 additions and 206 deletions

View File

@@ -27,6 +27,8 @@ var (
errWSBlockNotFound = errors.New("weak subjectivity root not found in db")
// errWSBlockNotFoundInEpoch is returned when a block is not found in the WS cache or DB within epoch.
errWSBlockNotFoundInEpoch = errors.New("weak subjectivity root not found in db within epoch")
// errNotDescendantOfFinalized is returned when a block is not a descendant of the finalized checkpoint
errNotDescendantOfFinalized = invalidBlock{errors.New("not descendant of finalized checkpoint")}
)
// An invalid block is the block that fails state transition based on the core protocol rules.

View File

@@ -9,6 +9,7 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice"
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
@@ -107,16 +108,16 @@ func (s *Service) updateHead(ctx context.Context, balances []uint64) ([32]byte,
// This saves head info to the local service cache, it also saves the
// new head root to the DB.
func (s *Service) saveHead(ctx context.Context, headRoot [32]byte, headBlock interfaces.SignedBeaconBlock, headState state.BeaconState) error {
func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock interfaces.SignedBeaconBlock, headState state.BeaconState) error {
ctx, span := trace.StartSpan(ctx, "blockChain.saveHead")
defer span.End()
// Do nothing if head hasn't changed.
r, err := s.HeadRoot(ctx)
oldHeadroot, err := s.HeadRoot(ctx)
if err != nil {
return err
}
if headRoot == bytesutil.ToBytes32(r) {
if newHeadRoot == bytesutil.ToBytes32(oldHeadroot) {
return nil
}
if err := wrapper.BeaconBlockIsNil(headBlock); err != nil {
@@ -128,17 +129,19 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte, headBlock int
// If the head state is not available, just return nil.
// There's nothing to cache
if !s.cfg.BeaconDB.HasStateSummary(ctx, headRoot) {
if !s.cfg.BeaconDB.HasStateSummary(ctx, newHeadRoot) {
return nil
}
// A chain re-org occurred, so we fire an event notifying the rest of the services.
headSlot := s.HeadSlot()
newHeadSlot := headBlock.Block().Slot()
s.headLock.RLock()
oldHeadRoot := s.headRoot()
oldStateRoot := s.headBlock().Block().StateRoot()
s.headLock.RUnlock()
headSlot := s.HeadSlot()
newHeadSlot := headBlock.Block().Slot()
newStateRoot := headBlock.Block().StateRoot()
if bytesutil.ToBytes32(headBlock.Block().ParentRoot()) != bytesutil.ToBytes32(r) {
if bytesutil.ToBytes32(headBlock.Block().ParentRoot()) != bytesutil.ToBytes32(oldHeadroot) {
log.WithFields(logrus.Fields{
"newSlot": fmt.Sprintf("%d", newHeadSlot),
"oldSlot": fmt.Sprintf("%d", headSlot),
@@ -154,7 +157,7 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte, headBlock int
Slot: newHeadSlot,
Depth: absoluteSlotDifference,
OldHeadBlock: oldHeadRoot[:],
NewHeadBlock: headRoot[:],
NewHeadBlock: newHeadRoot[:],
OldHeadState: oldStateRoot,
NewHeadState: newStateRoot,
Epoch: slots.ToEpoch(newHeadSlot),
@@ -162,25 +165,24 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte, headBlock int
},
})
if err := s.saveOrphanedAtts(ctx, bytesutil.ToBytes32(r)); err != nil {
if err := s.saveOrphanedAtts(ctx, bytesutil.ToBytes32(oldHeadroot), newHeadRoot); err != nil {
return err
}
reorgCount.Inc()
}
// Cache the new head info.
s.setHead(headRoot, headBlock, headState)
s.setHead(newHeadRoot, headBlock, headState)
// Save the new head root to DB.
if err := s.cfg.BeaconDB.SaveHeadBlockRoot(ctx, headRoot); err != nil {
if err := s.cfg.BeaconDB.SaveHeadBlockRoot(ctx, newHeadRoot); err != nil {
return errors.Wrap(err, "could not save head root in DB")
}
// Forward an event capturing a new chain head over a common event feed
// done in a goroutine to avoid blocking the critical runtime main routine.
go func() {
if err := s.notifyNewHeadEvent(ctx, newHeadSlot, headState, newStateRoot, headRoot[:]); err != nil {
if err := s.notifyNewHeadEvent(ctx, newHeadSlot, headState, newStateRoot, newHeadRoot[:]); err != nil {
log.WithError(err).Error("Could not notify event feed of new chain head")
}
}()
@@ -353,35 +355,48 @@ func (s *Service) notifyNewHeadEvent(
return nil
}
// This saves the attestations inside the beacon block with respect to root `orphanedRoot` back into the
// attestation pool. It also filters out the attestations that is one epoch older as a
// defense so invalid attestations don't flow into the attestation pool.
func (s *Service) saveOrphanedAtts(ctx context.Context, orphanedRoot [32]byte) error {
orphanedBlk, err := s.getBlock(ctx, orphanedRoot)
if err != nil {
// This saves the attestations between `orphanedRoot` and the common ancestor root that is derived using `newHeadRoot`.
// It also filters out the attestations that is one epoch older as a defense so invalid attestations don't flow into the attestation pool.
func (s *Service) saveOrphanedAtts(ctx context.Context, orphanedRoot [32]byte, newHeadRoot [32]byte) error {
commonAncestorRoot, err := s.ForkChoicer().CommonAncestorRoot(ctx, newHeadRoot, orphanedRoot)
switch {
// Exit early if there's no common ancestor as there would be nothing to save.
case errors.Is(err, forkchoice.ErrUnknownCommonAncestor):
return nil
case err != nil:
return err
}
if orphanedBlk == nil || orphanedBlk.IsNil() {
return errors.New("orphaned block can't be nil")
}
for _, a := range orphanedBlk.Block().Body().Attestations() {
// Is the attestation one epoch older.
if a.Data.Slot+params.BeaconConfig().SlotsPerEpoch < s.CurrentSlot() {
continue
for orphanedRoot != commonAncestorRoot {
if ctx.Err() != nil {
return ctx.Err()
}
if helpers.IsAggregated(a) {
if err := s.cfg.AttPool.SaveAggregatedAttestation(a); err != nil {
return err
}
} else {
if err := s.cfg.AttPool.SaveUnaggregatedAttestation(a); err != nil {
return err
}
}
saveOrphanedAttCount.Inc()
}
orphanedBlk, err := s.getBlock(ctx, orphanedRoot)
if err != nil {
return err
}
// If the block is an epoch older, break out of the loop since we can't include atts anyway.
// This prevents stuck within this for loop longer than necessary.
if orphanedBlk.Block().Slot()+params.BeaconConfig().SlotsPerEpoch <= s.CurrentSlot() {
break
}
for _, a := range orphanedBlk.Block().Body().Attestations() {
// if the attestation is one epoch older, it wouldn't been useful to save it.
if a.Data.Slot+params.BeaconConfig().SlotsPerEpoch < s.CurrentSlot() {
continue
}
if helpers.IsAggregated(a) {
if err := s.cfg.AttPool.SaveAggregatedAttestation(a); err != nil {
return err
}
} else {
if err := s.cfg.AttPool.SaveUnaggregatedAttestation(a); err != nil {
return err
}
}
saveOrphanedAttCount.Inc()
}
orphanedRoot = bytesutil.ToBytes32(orphanedBlk.Block().ParentRoot())
}
return nil
}

View File

@@ -3,6 +3,7 @@ package blockchain
import (
"bytes"
"context"
"sort"
"testing"
"time"
@@ -10,9 +11,11 @@ import (
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/config/features"
"github.com/prysmaticlabs/prysm/config/params"
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpbv1 "github.com/prysmaticlabs/prysm/proto/eth/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/assert"
@@ -49,6 +52,8 @@ func TestSaveHead_Different(t *testing.T) {
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), oldBlock))
oldRoot, err := oldBlock.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(
ctx, oldBlock.Block().Slot(), oldRoot, bytesutil.ToBytes32(oldBlock.Block().ParentRoot()), [32]byte{}, 0, 0))
service.head = &head{
slot: 0,
root: oldRoot,
@@ -64,6 +69,8 @@ func TestSaveHead_Different(t *testing.T) {
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), wsb))
newRoot, err := newHeadBlock.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(
ctx, wsb.Block().Slot(), newRoot, bytesutil.ToBytes32(wsb.Block().ParentRoot()), [32]byte{}, 0, 0))
headState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, headState.SetSlot(1))
@@ -93,6 +100,8 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), oldBlock))
oldRoot, err := oldBlock.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(
ctx, oldBlock.Block().Slot(), oldRoot, bytesutil.ToBytes32(oldBlock.Block().ParentRoot()), [32]byte{}, 0, 0))
service.head = &head{
slot: 0,
root: oldRoot,
@@ -110,6 +119,8 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), wsb))
newRoot, err := newHeadBlock.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(
ctx, wsb.Block().Slot(), newRoot, bytesutil.ToBytes32(wsb.Block().ParentRoot()), [32]byte{}, 0, 0))
headState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, headState.SetSlot(1))
@@ -229,50 +240,355 @@ func Test_notifyNewHeadEvent(t *testing.T) {
})
}
func TestSaveOrphanedAtts(t *testing.T) {
genesis, keys := util.DeterministicGenesisState(t, 64)
b, err := util.GenerateFullBlock(genesis, keys, util.DefaultBlockGenConfig(), 1)
assert.NoError(t, err)
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
func TestSaveOrphanedAtts_NoCommonAncestor(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
service.genesisTime = time.Now()
service.genesisTime = time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
// Chain setup
// 0 -- 1 -- 2 -- 3
// -4
st, keys := util.DeterministicGenesisState(t, 64)
blkG, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 0)
assert.NoError(t, err)
b, err := wrapper.WrappedSignedBeaconBlock(blkG)
assert.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, b))
rG, err := blkG.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
require.NoError(t, service.saveOrphanedAtts(ctx, r))
require.Equal(t, len(b.Block.Body.Attestations), service.cfg.AttPool.AggregatedAttestationCount())
savedAtts := service.cfg.AttPool.AggregatedAttestations()
atts := b.Block.Body.Attestations
require.DeepSSZEqual(t, atts, savedAtts)
blk1, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
assert.NoError(t, err)
blk1.Block.ParentRoot = rG[:]
r1, err := blk1.Block.HashTreeRoot()
require.NoError(t, err)
blk2, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 2)
assert.NoError(t, err)
blk2.Block.ParentRoot = r1[:]
r2, err := blk2.Block.HashTreeRoot()
require.NoError(t, err)
blk3, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 3)
assert.NoError(t, err)
blk3.Block.ParentRoot = r2[:]
r3, err := blk3.Block.HashTreeRoot()
require.NoError(t, err)
blk4 := util.NewBeaconBlock()
blk4.Block.Slot = 4
r4, err := blk4.Block.HashTreeRoot()
require.NoError(t, err)
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk3, blk4} {
r, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.ForkChoicer().InsertOptimisticBlock(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, 0, 0))
b, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, b))
}
require.NoError(t, service.saveOrphanedAtts(ctx, r3, r4))
require.Equal(t, 0, service.cfg.AttPool.AggregatedAttestationCount())
}
func TestSaveOrphanedAtts(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
service.genesisTime = time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
// Chain setup
// 0 -- 1 -- 2 -- 3
// \-4
st, keys := util.DeterministicGenesisState(t, 64)
blkG, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 0)
assert.NoError(t, err)
b, err := wrapper.WrappedSignedBeaconBlock(blkG)
assert.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, b))
rG, err := blkG.Block.HashTreeRoot()
require.NoError(t, err)
blk1, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
assert.NoError(t, err)
blk1.Block.ParentRoot = rG[:]
r1, err := blk1.Block.HashTreeRoot()
require.NoError(t, err)
blk2, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 2)
assert.NoError(t, err)
blk2.Block.ParentRoot = r1[:]
r2, err := blk2.Block.HashTreeRoot()
require.NoError(t, err)
blk3, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 3)
assert.NoError(t, err)
blk3.Block.ParentRoot = r2[:]
r3, err := blk3.Block.HashTreeRoot()
require.NoError(t, err)
blk4 := util.NewBeaconBlock()
blk4.Block.Slot = 4
blk4.Block.ParentRoot = rG[:]
r4, err := blk4.Block.HashTreeRoot()
require.NoError(t, err)
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk3, blk4} {
r, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.ForkChoicer().InsertOptimisticBlock(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, 0, 0))
b, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, b))
}
require.NoError(t, service.saveOrphanedAtts(ctx, r3, r4))
require.Equal(t, 3, service.cfg.AttPool.AggregatedAttestationCount())
wantAtts := []*ethpb.Attestation{
blk3.Block.Body.Attestations[0],
blk2.Block.Body.Attestations[0],
blk1.Block.Body.Attestations[0],
}
atts := service.cfg.AttPool.AggregatedAttestations()
sort.Slice(atts, func(i, j int) bool {
return atts[i].Data.Slot > atts[j].Data.Slot
})
require.DeepEqual(t, wantAtts, atts)
}
func TestSaveOrphanedAtts_CanFilter(t *testing.T) {
genesis, keys := util.DeterministicGenesisState(t, 64)
b, err := util.GenerateFullBlock(genesis, keys, util.DefaultBlockGenConfig(), 1)
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
service.genesisTime = time.Now().Add(time.Duration(-1*int64(params.BeaconConfig().SlotsPerEpoch+2)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
// Chain setup
// 0 -- 1 -- 2
// \-4
st, keys := util.DeterministicGenesisState(t, 64)
blkG, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 0)
assert.NoError(t, err)
r, err := b.Block.HashTreeRoot()
b, err := wrapper.WrappedSignedBeaconBlock(blkG)
assert.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, b))
rG, err := blkG.Block.HashTreeRoot()
require.NoError(t, err)
blk1, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
assert.NoError(t, err)
blk1.Block.ParentRoot = rG[:]
r1, err := blk1.Block.HashTreeRoot()
require.NoError(t, err)
blk2, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 2)
assert.NoError(t, err)
blk2.Block.ParentRoot = r1[:]
r2, err := blk2.Block.HashTreeRoot()
require.NoError(t, err)
blk4 := util.NewBeaconBlock()
blk4.Block.Slot = 4
blk4.Block.ParentRoot = rG[:]
r4, err := blk4.Block.HashTreeRoot()
require.NoError(t, err)
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk4} {
r, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.ForkChoicer().InsertOptimisticBlock(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, 0, 0))
b, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, b))
}
require.NoError(t, service.saveOrphanedAtts(ctx, r2, r4))
require.Equal(t, 0, service.cfg.AttPool.AggregatedAttestationCount())
}
func TestSaveOrphanedAtts_NoCommonAncestor_DoublyLinkedTrie(t *testing.T) {
resetCfg := features.InitWithReset(&features.Flags{
EnableForkChoiceDoublyLinkedTree: true,
})
defer resetCfg()
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
service.genesisTime = time.Now().Add(time.Duration(-1*int64(params.BeaconConfig().SlotsPerEpoch+1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
service.genesisTime = time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
// Chain setup
// 0 -- 1 -- 2 -- 3
// -4
st, keys := util.DeterministicGenesisState(t, 64)
blkG, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 0)
assert.NoError(t, err)
b, err := wrapper.WrappedSignedBeaconBlock(blkG)
assert.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, b))
rG, err := blkG.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
require.NoError(t, service.saveOrphanedAtts(ctx, r))
blk1, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
assert.NoError(t, err)
blk1.Block.ParentRoot = rG[:]
r1, err := blk1.Block.HashTreeRoot()
require.NoError(t, err)
blk2, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 2)
assert.NoError(t, err)
blk2.Block.ParentRoot = r1[:]
r2, err := blk2.Block.HashTreeRoot()
require.NoError(t, err)
blk3, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 3)
assert.NoError(t, err)
blk3.Block.ParentRoot = r2[:]
r3, err := blk3.Block.HashTreeRoot()
require.NoError(t, err)
blk4 := util.NewBeaconBlock()
blk4.Block.Slot = 4
r4, err := blk4.Block.HashTreeRoot()
require.NoError(t, err)
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk3, blk4} {
r, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.ForkChoicer().InsertOptimisticBlock(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, 0, 0))
b, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, b))
}
require.NoError(t, service.saveOrphanedAtts(ctx, r3, r4))
require.Equal(t, 0, service.cfg.AttPool.AggregatedAttestationCount())
}
func TestSaveOrphanedAtts_DoublyLinkedTrie(t *testing.T) {
resetCfg := features.InitWithReset(&features.Flags{
EnableForkChoiceDoublyLinkedTree: true,
})
defer resetCfg()
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
service.genesisTime = time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
// Chain setup
// 0 -- 1 -- 2 -- 3
// \-4
st, keys := util.DeterministicGenesisState(t, 64)
blkG, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 0)
assert.NoError(t, err)
b, err := wrapper.WrappedSignedBeaconBlock(blkG)
assert.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, b))
rG, err := blkG.Block.HashTreeRoot()
require.NoError(t, err)
blk1, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
assert.NoError(t, err)
blk1.Block.ParentRoot = rG[:]
r1, err := blk1.Block.HashTreeRoot()
require.NoError(t, err)
blk2, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 2)
assert.NoError(t, err)
blk2.Block.ParentRoot = r1[:]
r2, err := blk2.Block.HashTreeRoot()
require.NoError(t, err)
blk3, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 3)
assert.NoError(t, err)
blk3.Block.ParentRoot = r2[:]
r3, err := blk3.Block.HashTreeRoot()
require.NoError(t, err)
blk4 := util.NewBeaconBlock()
blk4.Block.Slot = 4
blk4.Block.ParentRoot = rG[:]
r4, err := blk4.Block.HashTreeRoot()
require.NoError(t, err)
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk3, blk4} {
r, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.ForkChoicer().InsertOptimisticBlock(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, 0, 0))
b, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, b))
}
require.NoError(t, service.saveOrphanedAtts(ctx, r3, r4))
require.Equal(t, 3, service.cfg.AttPool.AggregatedAttestationCount())
wantAtts := []*ethpb.Attestation{
blk3.Block.Body.Attestations[0],
blk2.Block.Body.Attestations[0],
blk1.Block.Body.Attestations[0],
}
atts := service.cfg.AttPool.AggregatedAttestations()
sort.Slice(atts, func(i, j int) bool {
return atts[i].Data.Slot > atts[j].Data.Slot
})
require.DeepEqual(t, wantAtts, atts)
}
func TestSaveOrphanedAtts_CanFilter_DoublyLinkedTrie(t *testing.T) {
resetCfg := features.InitWithReset(&features.Flags{
EnableForkChoiceDoublyLinkedTree: true,
})
defer resetCfg()
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
service.genesisTime = time.Now().Add(time.Duration(-1*int64(params.BeaconConfig().SlotsPerEpoch+2)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
// Chain setup
// 0 -- 1 -- 2
// \-4
st, keys := util.DeterministicGenesisState(t, 64)
blkG, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 0)
assert.NoError(t, err)
b, err := wrapper.WrappedSignedBeaconBlock(blkG)
assert.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, b))
rG, err := blkG.Block.HashTreeRoot()
require.NoError(t, err)
blk1, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
assert.NoError(t, err)
blk1.Block.ParentRoot = rG[:]
r1, err := blk1.Block.HashTreeRoot()
require.NoError(t, err)
blk2, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 2)
assert.NoError(t, err)
blk2.Block.ParentRoot = r1[:]
r2, err := blk2.Block.HashTreeRoot()
require.NoError(t, err)
blk4 := util.NewBeaconBlock()
blk4.Block.Slot = 4
blk4.Block.ParentRoot = rG[:]
r4, err := blk4.Block.HashTreeRoot()
require.NoError(t, err)
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk4} {
r, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.ForkChoicer().InsertOptimisticBlock(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, 0, 0))
b, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, b))
}
require.NoError(t, service.saveOrphanedAtts(ctx, r2, r4))
require.Equal(t, 0, service.cfg.AttPool.AggregatedAttestationCount())
savedAtts := service.cfg.AttPool.AggregatedAttestations()
atts := b.Block.Body.Attestations
require.DeepNotSSZEqual(t, atts, savedAtts)
}
func TestUpdateHead_noSavedChanges(t *testing.T) {

View File

@@ -68,8 +68,6 @@ func logBlockSyncStatus(block interfaces.BeaconBlock, blockRoot [32]byte, justif
"epoch": slots.ToEpoch(block.Slot()),
"justifiedEpoch": justified.Epoch,
"justifiedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(justified.Root)[:8]),
"finalizedEpoch": finalized.Epoch,
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
"parentRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(block.ParentRoot())[:8]),
"version": version.String(block.Version()),
"sinceSlotStartTime": prysmTime.Now().Sub(startTime),
@@ -77,9 +75,11 @@ func logBlockSyncStatus(block interfaces.BeaconBlock, blockRoot [32]byte, justif
}).Debug("Synced new block")
} else {
log.WithFields(logrus.Fields{
"slot": block.Slot(),
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
"epoch": slots.ToEpoch(block.Slot()),
"slot": block.Slot(),
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
"epoch": slots.ToEpoch(block.Slot()),
"finalizedEpoch": finalized.Epoch,
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
}).Info("Synced new block")
}
return nil

View File

@@ -335,33 +335,33 @@ func getStateVersionAndPayload(st state.BeaconState) (int, *ethpb.ExecutionPaylo
}
func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeaconBlock,
blockRoots [][32]byte) ([]*ethpb.Checkpoint, []*ethpb.Checkpoint, error) {
blockRoots [][32]byte) error {
ctx, span := trace.StartSpan(ctx, "blockChain.onBlockBatch")
defer span.End()
if len(blks) == 0 || len(blockRoots) == 0 {
return nil, nil, errors.New("no blocks provided")
return errors.New("no blocks provided")
}
if len(blks) != len(blockRoots) {
return nil, nil, errWrongBlockCount
return errWrongBlockCount
}
if err := wrapper.BeaconBlockIsNil(blks[0]); err != nil {
return nil, nil, invalidBlock{err}
return invalidBlock{err}
}
b := blks[0].Block()
// Retrieve incoming block's pre state.
if err := s.verifyBlkPreState(ctx, b); err != nil {
return nil, nil, err
return err
}
preState, err := s.cfg.StateGen.StateByRootInitialSync(ctx, bytesutil.ToBytes32(b.ParentRoot()))
if err != nil {
return nil, nil, err
return err
}
if preState == nil || preState.IsNil() {
return nil, nil, fmt.Errorf("nil pre state for slot %d", b.Slot())
return fmt.Errorf("nil pre state for slot %d", b.Slot())
}
jCheckpoints := make([]*ethpb.Checkpoint, len(blks))
@@ -382,7 +382,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
for i, b := range blks {
v, h, err := getStateVersionAndPayload(preState)
if err != nil {
return nil, nil, err
return err
}
preVersionAndHeaders[i] = &versionAndHeader{
version: v,
@@ -391,7 +391,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
set, preState, err = transition.ExecuteStateTransitionNoVerifyAnySig(ctx, preState, b)
if err != nil {
return nil, nil, invalidBlock{err}
return invalidBlock{err}
}
// Save potential boundary states.
if slots.IsEpochStart(preState.Slot()) {
@@ -402,7 +402,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
v, h, err = getStateVersionAndPayload(preState)
if err != nil {
return nil, nil, err
return err
}
postVersionAndHeaders[i] = &versionAndHeader{
version: v,
@@ -412,10 +412,10 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
}
verify, err := sigSet.Verify()
if err != nil {
return nil, nil, invalidBlock{err}
return invalidBlock{err}
}
if !verify {
return nil, nil, errors.New("batch block signature verification failed")
return errors.New("batch block signature verification failed")
}
// blocks have been verified, add them to forkchoice and call the engine
@@ -424,40 +424,40 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
postVersionAndHeaders[i].version,
postVersionAndHeaders[i].header, b)
if err != nil {
return nil, nil, err
return err
}
if isValidPayload {
if err := s.validateMergeTransitionBlock(ctx, preVersionAndHeaders[i].version,
preVersionAndHeaders[i].header, b); err != nil {
return nil, nil, err
return err
}
}
if err := s.insertBlockToForkChoiceStore(ctx, b.Block(), blockRoots[i], fCheckpoints[i], jCheckpoints[i]); err != nil {
return nil, nil, err
return err
}
if isValidPayload {
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, blockRoots[i]); err != nil {
return nil, nil, errors.Wrap(err, "could not set optimistic block to valid")
return errors.Wrap(err, "could not set optimistic block to valid")
}
}
s.saveInitSyncBlock(blockRoots[i], b)
if err = s.handleBlockAfterBatchVerify(ctx, b, blockRoots[i], fCheckpoints[i], jCheckpoints[i]); err != nil {
tracing.AnnotateError(span, err)
return nil, nil, err
return err
}
}
for r, st := range boundaries {
if err := s.cfg.StateGen.SaveState(ctx, r, st); err != nil {
return nil, nil, err
return err
}
}
// Also saves the last post state which to be used as pre state for the next batch.
lastB := blks[len(blks)-1]
lastBR := blockRoots[len(blockRoots)-1]
if err := s.cfg.StateGen.SaveState(ctx, lastBR, preState); err != nil {
return nil, nil, err
return err
}
arg := &notifyForkchoiceUpdateArg{
headState: preState,
@@ -465,12 +465,9 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
headBlock: lastB.Block(),
}
if _, err := s.notifyForkchoiceUpdate(ctx, arg); err != nil {
return nil, nil, err
return err
}
if err := s.saveHeadNoDB(ctx, lastB, lastBR, preState); err != nil {
return nil, nil, err
}
return fCheckpoints, jCheckpoints, nil
return s.saveHeadNoDB(ctx, lastB, lastBR, preState)
}
// handles a block after the block's batch has been verified, where we can save blocks
@@ -521,6 +518,9 @@ func (s *Service) handleBlockAfterBatchVerify(ctx context.Context, signed interf
return err
}
s.store.SetFinalizedCheckptAndPayloadHash(fCheckpoint, h)
if err := s.cfg.ForkChoiceStore.Prune(ctx, bytesutil.ToBytes32(fCheckpoint.Root)); err != nil {
return errors.Wrap(err, "could not prune proto array fork choice nodes")
}
}
return nil
}
@@ -544,9 +544,13 @@ func (s *Service) handleEpochBoundary(ctx context.Context, postState state.Beaco
return err
}
} else if postState.Slot() >= s.nextEpochBoundarySlot {
if err := reportEpochMetrics(ctx, postState, s.head.state); err != nil {
s.headLock.RLock()
st := s.head.state
s.headLock.RUnlock()
if err := reportEpochMetrics(ctx, postState, st); err != nil {
return err
}
var err error
s.nextEpochBoundarySlot, err = slots.EpochStart(coreTime.NextEpoch(postState))
if err != nil {

View File

@@ -349,8 +349,6 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfa
pendingNodes := make([]interfaces.BeaconBlock, 0)
pendingRoots := make([][32]byte, 0)
parentRoot := bytesutil.ToBytes32(blk.ParentRoot())
slot := blk.Slot()
// Fork choice only matters from last finalized slot.
finalized, err := s.store.FinalizedCheckpt()
if err != nil {
@@ -360,20 +358,23 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfa
if err != nil {
return err
}
higherThanFinalized := slot > fSlot
// As long as parent node is not in fork choice store, and parent node is in DB.
for !s.cfg.ForkChoiceStore.HasNode(parentRoot) && s.cfg.BeaconDB.HasBlock(ctx, parentRoot) && higherThanFinalized {
b, err := s.getBlock(ctx, parentRoot)
root := bytesutil.ToBytes32(blk.ParentRoot())
for !s.cfg.ForkChoiceStore.HasNode(root) && s.cfg.BeaconDB.HasBlock(ctx, root) {
b, err := s.getBlock(ctx, root)
if err != nil {
return err
}
if b.Block().Slot() <= fSlot {
break
}
pendingNodes = append(pendingNodes, b.Block())
copiedRoot := parentRoot
copiedRoot := root
pendingRoots = append(pendingRoots, copiedRoot)
parentRoot = bytesutil.ToBytes32(b.Block().ParentRoot())
slot = b.Block().Slot()
higherThanFinalized = slot > fSlot
root = bytesutil.ToBytes32(b.Block().ParentRoot())
}
if len(pendingRoots) > 0 && root != bytesutil.ToBytes32(finalized.Root) {
return errNotDescendantOfFinalized
}
// Insert parent nodes to fork choice store in reverse order.

View File

@@ -331,9 +331,9 @@ func TestStore_OnBlockBatch_ProtoArray(t *testing.T) {
rBlock.Block.ParentRoot = gRoot[:]
require.NoError(t, beaconDB.SaveBlock(context.Background(), blks[0]))
require.NoError(t, service.cfg.StateGen.SaveState(ctx, blkRoots[0], firstState))
_, _, err = service.onBlockBatch(ctx, blks, blkRoots[1:])
err = service.onBlockBatch(ctx, blks, blkRoots[1:])
require.ErrorIs(t, errWrongBlockCount, err)
_, _, err = service.onBlockBatch(ctx, blks[1:], blkRoots[1:])
err = service.onBlockBatch(ctx, blks[1:], blkRoots[1:])
require.NoError(t, err)
}
@@ -396,9 +396,9 @@ func TestStore_OnBlockBatch_DoublyLinkedTree(t *testing.T) {
rBlock.Block.ParentRoot = gRoot[:]
require.NoError(t, beaconDB.SaveBlock(context.Background(), blks[0]))
require.NoError(t, service.cfg.StateGen.SaveState(ctx, blkRoots[0], firstState))
_, _, err = service.onBlockBatch(ctx, blks, blkRoots[1:])
err = service.onBlockBatch(ctx, blks, blkRoots[1:])
require.ErrorIs(t, errWrongBlockCount, err)
_, _, err = service.onBlockBatch(ctx, blks[1:], blkRoots[1:])
err = service.onBlockBatch(ctx, blks[1:], blkRoots[1:])
require.NoError(t, err)
}
@@ -452,10 +452,8 @@ func TestStore_OnBlockBatch_NotifyNewPayload(t *testing.T) {
rBlock.Block.ParentRoot = gRoot[:]
require.NoError(t, beaconDB.SaveBlock(context.Background(), blks[0]))
require.NoError(t, service.cfg.StateGen.SaveState(ctx, blkRoots[0], firstState))
cp1, cp2, err := service.onBlockBatch(ctx, blks[1:], blkRoots[1:])
err = service.onBlockBatch(ctx, blks[1:], blkRoots[1:])
require.NoError(t, err)
require.Equal(t, blkCount-1, len(cp1))
require.Equal(t, blkCount-1, len(cp2))
}
func TestRemoveStateSinceLastFinalized_EmptyStartSlot(t *testing.T) {
@@ -732,7 +730,6 @@ func TestFillForkChoiceMissingBlocks_CanSave_ProtoArray(t *testing.T) {
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ForkChoiceStore = protoarray.New(0, 0)
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: make([]byte, 32)}, [32]byte{})
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
@@ -755,12 +752,14 @@ func TestFillForkChoiceMissingBlocks_CanSave_ProtoArray(t *testing.T) {
wsb, err = wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: roots[0]}, [32]byte{})
err = service.fillInForkChoiceMissingBlocks(
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
require.NoError(t, err)
// 5 nodes from the block tree 1. B0 - B3 - B4 - B6 - B8
assert.Equal(t, 5, service.cfg.ForkChoiceStore.NodeCount(), "Miss match nodes")
// 4 nodes from the block tree 1. B3 - B4 - B6 - B8
assert.Equal(t, 4, service.cfg.ForkChoiceStore.NodeCount(), "Miss match nodes")
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(roots[3])), "Didn't save node")
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(roots[4])), "Didn't save node")
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(roots[6])), "Didn't save node")
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(roots[8])), "Didn't save node")
@@ -777,7 +776,6 @@ func TestFillForkChoiceMissingBlocks_CanSave_DoublyLinkedTree(t *testing.T) {
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: make([]byte, 32)}, [32]byte{})
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
@@ -801,12 +799,14 @@ func TestFillForkChoiceMissingBlocks_CanSave_DoublyLinkedTree(t *testing.T) {
wsb, err = wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: roots[0]}, [32]byte{})
err = service.fillInForkChoiceMissingBlocks(
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
require.NoError(t, err)
// 5 nodes from the block tree 1. B0 - B3 - B4 - B6 - B8
assert.Equal(t, 5, service.cfg.ForkChoiceStore.NodeCount(), "Miss match nodes")
assert.Equal(t, 4, service.cfg.ForkChoiceStore.NodeCount(), "Miss match nodes")
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(roots[3])), "Didn't save node")
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(roots[4])), "Didn't save node")
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(roots[6])), "Didn't save node")
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(roots[8])), "Didn't save node")
@@ -823,7 +823,6 @@ func TestFillForkChoiceMissingBlocks_RootsMatch_ProtoArray(t *testing.T) {
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ForkChoiceStore = protoarray.New(0, 0)
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: make([]byte, 32)}, [32]byte{})
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
@@ -847,14 +846,15 @@ func TestFillForkChoiceMissingBlocks_RootsMatch_ProtoArray(t *testing.T) {
wsb, err = wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: roots[0]}, [32]byte{})
err = service.fillInForkChoiceMissingBlocks(
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
require.NoError(t, err)
// 5 nodes from the block tree 1. B0 - B3 - B4 - B6 - B8
assert.Equal(t, 5, service.cfg.ForkChoiceStore.NodeCount(), "Miss match nodes")
// 4 nodes from the block tree 1. B3 - B4 - B6 - B8
assert.Equal(t, 4, service.cfg.ForkChoiceStore.NodeCount(), "Miss match nodes")
// Ensure all roots and their respective blocks exist.
wantedRoots := [][]byte{roots[0], roots[3], roots[4], roots[6], roots[8]}
wantedRoots := [][]byte{roots[3], roots[4], roots[6], roots[8]}
for i, rt := range wantedRoots {
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(rt)), fmt.Sprintf("Didn't save node: %d", i))
assert.Equal(t, true, service.cfg.BeaconDB.HasBlock(context.Background(), bytesutil.ToBytes32(rt)))
@@ -872,7 +872,6 @@ func TestFillForkChoiceMissingBlocks_RootsMatch_DoublyLinkedTree(t *testing.T) {
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: make([]byte, 32)}, [32]byte{})
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
@@ -896,14 +895,15 @@ func TestFillForkChoiceMissingBlocks_RootsMatch_DoublyLinkedTree(t *testing.T) {
wsb, err = wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: roots[0]}, [32]byte{})
err = service.fillInForkChoiceMissingBlocks(
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
require.NoError(t, err)
// 5 nodes from the block tree 1. B0 - B3 - B4 - B6 - B8
assert.Equal(t, 5, service.cfg.ForkChoiceStore.NodeCount(), "Miss match nodes")
// 5 nodes from the block tree 1. B3 - B4 - B6 - B8
assert.Equal(t, 4, service.cfg.ForkChoiceStore.NodeCount(), "Miss match nodes")
// Ensure all roots and their respective blocks exist.
wantedRoots := [][]byte{roots[0], roots[3], roots[4], roots[6], roots[8]}
wantedRoots := [][]byte{roots[3], roots[4], roots[6], roots[8]}
for i, rt := range wantedRoots {
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(rt)), fmt.Sprintf("Didn't save node: %d", i))
assert.Equal(t, true, service.cfg.BeaconDB.HasBlock(context.Background(), bytesutil.ToBytes32(rt)))
@@ -921,8 +921,6 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized_ProtoArray(t *testing.T) {
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ForkChoiceStore = protoarray.New(0, 0)
// Set finalized epoch to 1.
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Epoch: 1}, [32]byte{})
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
@@ -936,7 +934,7 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized_ProtoArray(t *testing.T) {
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
// Define a tree branch, slot 63 <- 64 <- 65
// Define a tree branch, slot 63 <- 64 <- 65 <- 66
b63 := util.NewBeaconBlock()
b63.Block.Slot = 63
wsb, err = wrapper.WrappedSignedBeaconBlock(b63)
@@ -955,20 +953,28 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized_ProtoArray(t *testing.T) {
b65 := util.NewBeaconBlock()
b65.Block.Slot = 65
b65.Block.ParentRoot = r64[:]
r65, err := b65.Block.HashTreeRoot()
require.NoError(t, err)
wsb, err = wrapper.WrappedSignedBeaconBlock(b65)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
b66 := util.NewBeaconBlock()
b66.Block.Slot = 66
b66.Block.ParentRoot = r65[:]
wsb, err = wrapper.WrappedSignedBeaconBlock(b66)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
beaconState, _ := util.DeterministicGenesisState(t, 32)
// Set finalized epoch to 2.
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Epoch: 2, Root: r64[:]}, [32]byte{})
err = service.fillInForkChoiceMissingBlocks(
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
require.NoError(t, err)
// There should be 2 nodes, block 65 and block 64.
assert.Equal(t, 2, service.cfg.ForkChoiceStore.NodeCount(), "Miss match nodes")
// Block with slot 63 should be in fork choice because it's less than finalized epoch 1.
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(r63), "Didn't save node")
// We should have saved 1 node: block 65
assert.Equal(t, 1, service.cfg.ForkChoiceStore.NodeCount(), "Miss match nodes")
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(r65), "Didn't save node")
}
func TestFillForkChoiceMissingBlocks_FilterFinalized_DoublyLinkedTree(t *testing.T) {
@@ -982,8 +988,6 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized_DoublyLinkedTree(t *testing
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
// Set finalized epoch to 1.
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Epoch: 1}, [32]byte{})
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
@@ -1016,27 +1020,75 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized_DoublyLinkedTree(t *testing
b65 := util.NewBeaconBlock()
b65.Block.Slot = 65
b65.Block.ParentRoot = r64[:]
r65, err := b65.Block.HashTreeRoot()
require.NoError(t, err)
wsb, err = wrapper.WrappedSignedBeaconBlock(b65)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
b66 := util.NewBeaconBlock()
b66.Block.Slot = 66
b66.Block.ParentRoot = r65[:]
wsb, err = wrapper.WrappedSignedBeaconBlock(b66)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
beaconState, _ := util.DeterministicGenesisState(t, 32)
// Set finalized epoch to 1.
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Epoch: 2, Root: r64[:]}, [32]byte{})
err = service.fillInForkChoiceMissingBlocks(
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
require.NoError(t, err)
// There should be 2 nodes, block 65 and block 64.
assert.Equal(t, 2, service.cfg.ForkChoiceStore.NodeCount(), "Miss match nodes")
// There should be 1 node: block 65
assert.Equal(t, 1, service.cfg.ForkChoiceStore.NodeCount(), "Miss match nodes")
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(r65), "Didn't save node")
}
// Block with slot 63 should be in fork choice because it's less than finalized epoch 1.
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(r63), "Didn't save node")
func TestFillForkChoiceMissingBlocks_FinalizedSibling_DoublyLinkedTree(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
validGenesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
st, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
roots, err := blockTree1(t, beaconDB, validGenesisRoot[:])
require.NoError(t, err)
beaconState, _ := util.DeterministicGenesisState(t, 32)
blk := util.NewBeaconBlock()
blk.Block.Slot = 9
blk.Block.ParentRoot = roots[8]
wsb, err = wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: roots[1]}, [32]byte{})
err = service.fillInForkChoiceMissingBlocks(
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
require.ErrorIs(t, errNotDescendantOfFinalized, err)
}
// blockTree1 constructs the following tree:
// /- B1
// B0 /- B5 - B7
// \- B3 - B4 - B6 - B8
// (B1, and B3 are all from the same slots)
func blockTree1(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][]byte, error) {
genesisRoot = bytesutil.PadTo(genesisRoot, 32)
b0 := util.NewBeaconBlock()

View File

@@ -164,21 +164,26 @@ func (s *Service) UpdateHead(ctx context.Context) error {
if err != nil {
log.WithError(err).Warn("Resolving fork due to new attestation")
}
s.headLock.RLock()
if s.headRoot() != newHeadRoot {
log.WithFields(logrus.Fields{
"oldHeadRoot": fmt.Sprintf("%#x", s.headRoot()),
"newHeadRoot": fmt.Sprintf("%#x", newHeadRoot),
}).Debug("Head changed due to attestations")
}
s.headLock.RUnlock()
s.notifyEngineIfChangedHead(ctx, newHeadRoot)
return nil
}
// This calls notify Forkchoice Update in the event that the head has changed
func (s *Service) notifyEngineIfChangedHead(ctx context.Context, newHeadRoot [32]byte) {
s.headLock.RLock()
if newHeadRoot == [32]byte{} || s.headRoot() == newHeadRoot {
s.headLock.RUnlock()
return
}
s.headLock.RUnlock()
if !s.hasBlockInInitSyncOrDB(ctx, newHeadRoot) {
log.Debug("New head does not exist in DB. Do nothing")

View File

@@ -233,6 +233,8 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wb))
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(
ctx, wb.Block().Slot(), r, bytesutil.ToBytes32(wb.Block().ParentRoot()), [32]byte{}, 0, 0))
service.head.root = r // Old head
require.Equal(t, 1, len(service.cfg.AttPool.ForkchoiceAttestations()))
require.NoError(t, err, service.UpdateHead(ctx))

View File

@@ -89,8 +89,7 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []interfaces.Sig
defer span.End()
// Apply state transition on the incoming newly received block batches, one by one.
_, _, err := s.onBlockBatch(ctx, blocks, blkRoots)
if err != nil {
if err := s.onBlockBatch(ctx, blocks, blkRoots); err != nil {
err := errors.Wrap(err, "could not process block in batch")
tracing.AnnotateError(span, err)
return err

View File

@@ -144,13 +144,18 @@ func (s *Service) Start() {
func (s *Service) Stop() error {
defer s.cancel()
// lock before accessing s.head, s.head.state, s.head.state.FinalizedCheckpoint().Root
s.headLock.RLock()
if s.cfg.StateGen != nil && s.head != nil && s.head.state != nil {
r := s.head.state.FinalizedCheckpoint().Root
s.headLock.RUnlock()
// Save the last finalized state so that starting up in the following run will be much faster.
if err := s.cfg.StateGen.ForceCheckpoint(s.ctx, s.head.state.FinalizedCheckpoint().Root); err != nil {
if err := s.cfg.StateGen.ForceCheckpoint(s.ctx, r); err != nil {
return err
}
} else {
s.headLock.RUnlock()
}
// Save initial sync cached blocks to the DB before stop.
return s.cfg.BeaconDB.SaveBlocks(s.ctx, s.getInitSyncBlocks())
}
@@ -231,8 +236,9 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
return errors.Wrap(err, "could not set finalized block as validated")
}
}
s.headLock.RLock()
h := s.headBlock().Block()
s.headLock.RUnlock()
if h.Slot() > fSlot {
log.WithFields(logrus.Fields{
"startSlot": fSlot,

View File

@@ -0,0 +1,22 @@
load("@prysm//tools/go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"option.go",
"service.go",
],
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/builder",
visibility = ["//visibility:public"],
deps = [
"//api/client/builder:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
"//consensus-types/primitives:go_default_library",
"//network:go_default_library",
"//network/authorization:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_urfave_cli_v2//:go_default_library",
],
)

View File

@@ -0,0 +1,36 @@
package builder
import (
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/network"
"github.com/prysmaticlabs/prysm/network/authorization"
"github.com/urfave/cli/v2"
)
type Option func(s *Service) error
// FlagOptions for builder service flag configurations.
func FlagOptions(c *cli.Context) ([]Option, error) {
endpoint := c.String(flags.MevRelayEndpoint.Name)
opts := []Option{
WithBuilderEndpoints(endpoint),
}
return opts, nil
}
// WithBuilderEndpoints sets the endpoint for the beacon chain builder service.
func WithBuilderEndpoints(endpoint string) Option {
return func(s *Service) error {
s.cfg.builderEndpoint = covertEndPoint(endpoint)
return nil
}
}
func covertEndPoint(ep string) network.Endpoint {
return network.Endpoint{
Url: ep,
Auth: network.AuthorizationData{ // Auth is not used for builder.
Method: authorization.None,
Value: "",
}}
}

View File

@@ -0,0 +1,77 @@
package builder
import (
"context"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/api/client/builder"
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/network"
v1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
)
// BlockBuilder defines the interface for interacting with the block builder
type BlockBuilder interface {
SubmitBlindedBlock(ctx context.Context, block *ethpb.SignedBlindedBeaconBlockBellatrix) (*v1.ExecutionPayload, error)
GetHeader(ctx context.Context, slot types.Slot, parentHash [32]byte, pubKey [48]byte) (*ethpb.SignedBuilderBid, error)
Status() error
RegisterValidator(ctx context.Context, reg *ethpb.SignedValidatorRegistrationV1) error
}
// config defines a config struct for dependencies into the service.
type config struct {
builderEndpoint network.Endpoint
}
// Service defines a service that provides a client for interacting with the beacon chain and MEV relay network.
type Service struct {
cfg *config
c *builder.Client
}
// NewService instantiates a new service.
func NewService(ctx context.Context, opts ...Option) (*Service, error) {
s := &Service{}
for _, opt := range opts {
if err := opt(s); err != nil {
return nil, err
}
}
if s.cfg.builderEndpoint.Url != "" {
c, err := builder.NewClient(s.cfg.builderEndpoint.Url)
if err != nil {
return nil, err
}
s.c = c
}
return s, nil
}
// Start initializes the service.
func (*Service) Start() {}
// Stop halts the service.
func (*Service) Stop() error {
return nil
}
// SubmitBlindedBlock is currently a stub.
func (*Service) SubmitBlindedBlock(context.Context, *ethpb.SignedBlindedBeaconBlockBellatrix) (*v1.ExecutionPayload, error) {
return nil, errors.New("not implemented")
}
// GetHeader is currently a stub.
func (*Service) GetHeader(context.Context, types.Slot, [32]byte, [48]byte) (*ethpb.SignedBuilderBid, error) {
return nil, errors.New("not implemented")
}
// Status is currently a stub.
func (*Service) Status() error {
return errors.New("not implemented")
}
// RegisterValidator is currently a stub.
func (*Service) RegisterValidator(context.Context, *ethpb.SignedValidatorRegistrationV1) error {
return errors.New("not implemented")
}

View File

@@ -47,7 +47,7 @@ func InitializePrecomputeValidators(ctx context.Context, beaconState state.Beaco
return err
}
}
// Set validator's active status for preivous epoch.
// Set validator's active status for previous epoch.
if helpers.IsActiveValidatorUsingTrie(val, prevEpoch) {
v.IsActivePrevEpoch = true
bal.ActivePrevEpoch, err = math.Add64(bal.ActivePrevEpoch, val.EffectiveBalance())

View File

@@ -28,6 +28,7 @@ go_library(
"//runtime/version:go_default_library",
"//time/slots:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@io_opencensus_go//trace:go_default_library",
],
)
@@ -43,11 +44,13 @@ go_test(
],
embed = [":go_default_library"],
deps = [
"//beacon-chain/core/altair:go_default_library",
"//beacon-chain/core/epoch:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/v1:go_default_library",
"//beacon-chain/state/v2:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",

View File

@@ -2,6 +2,7 @@ package precompute
import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
@@ -9,6 +10,24 @@ import (
"github.com/prysmaticlabs/prysm/time/slots"
)
var errNilState = errors.New("nil state")
// UnrealizedCheckpoints returns the justification and finalization checkpoints of the
// given state as if it was progressed with empty slots until the next epoch.
func UnrealizedCheckpoints(st state.BeaconState) (*ethpb.Checkpoint, *ethpb.Checkpoint, error) {
if st == nil || st.IsNil() {
return nil, nil, errNilState
}
activeBalance, prevTarget, currentTarget, err := st.UnrealizedCheckpointBalances()
if err != nil {
return nil, nil, err
}
justification := processJustificationBits(st, activeBalance, prevTarget, currentTarget)
return computeCheckpoints(st, justification)
}
// ProcessJustificationAndFinalizationPreCompute processes justification and finalization during
// epoch processing. This is where a beacon node can justify and finalize a new epoch.
// Note: this is an optimized version by passing in precomputed total and attesting balances.
@@ -34,12 +53,55 @@ func ProcessJustificationAndFinalizationPreCompute(state state.BeaconState, pBal
return state, nil
}
return weighJustificationAndFinalization(state, pBal.ActiveCurrentEpoch, pBal.PrevEpochTargetAttested, pBal.CurrentEpochTargetAttested)
newBits := processJustificationBits(state, pBal.ActiveCurrentEpoch, pBal.PrevEpochTargetAttested, pBal.CurrentEpochTargetAttested)
return weighJustificationAndFinalization(state, newBits)
}
// weighJustificationAndFinalization processes justification and finalization during
// processJustificationBits processes the justification bits during epoch processing.
func processJustificationBits(state state.BeaconState, totalActiveBalance, prevEpochTargetBalance, currEpochTargetBalance uint64) bitfield.Bitvector4 {
newBits := state.JustificationBits()
newBits.Shift(1)
// If 2/3 or more of total balance attested in the previous epoch.
if 3*prevEpochTargetBalance >= 2*totalActiveBalance {
newBits.SetBitAt(1, true)
}
if 3*currEpochTargetBalance >= 2*totalActiveBalance {
newBits.SetBitAt(0, true)
}
return newBits
}
// updateJustificationAndFinalization processes justification and finalization during
// epoch processing. This is where a beacon node can justify and finalize a new epoch.
//
func weighJustificationAndFinalization(state state.BeaconState, newBits bitfield.Bitvector4) (state.BeaconState, error) {
jc, fc, err := computeCheckpoints(state, newBits)
if err != nil {
return nil, err
}
if err := state.SetPreviousJustifiedCheckpoint(state.CurrentJustifiedCheckpoint()); err != nil {
return nil, err
}
if err := state.SetCurrentJustifiedCheckpoint(jc); err != nil {
return nil, err
}
if err := state.SetJustificationBits(newBits); err != nil {
return nil, err
}
if err := state.SetFinalizedCheckpoint(fc); err != nil {
return nil, err
}
return state, nil
}
// computeCheckpoints computes the new Justification and Finalization
// checkpoints at epoch transition
// Spec pseudocode definition:
// def weigh_justification_and_finalization(state: BeaconState,
// total_active_balance: Gwei,
@@ -77,88 +139,57 @@ func ProcessJustificationAndFinalizationPreCompute(state state.BeaconState, pBal
// # The 1st/2nd most recent epochs are justified, the 1st using the 2nd as source
// if all(bits[0:2]) and old_current_justified_checkpoint.epoch + 1 == current_epoch:
// state.finalized_checkpoint = old_current_justified_checkpoint
func weighJustificationAndFinalization(state state.BeaconState,
totalActiveBalance, prevEpochTargetBalance, currEpochTargetBalance uint64) (state.BeaconState, error) {
func computeCheckpoints(state state.BeaconState, newBits bitfield.Bitvector4) (*ethpb.Checkpoint, *ethpb.Checkpoint, error) {
prevEpoch := time.PrevEpoch(state)
currentEpoch := time.CurrentEpoch(state)
oldPrevJustifiedCheckpoint := state.PreviousJustifiedCheckpoint()
oldCurrJustifiedCheckpoint := state.CurrentJustifiedCheckpoint()
// Process justifications
if err := state.SetPreviousJustifiedCheckpoint(state.CurrentJustifiedCheckpoint()); err != nil {
return nil, err
}
newBits := state.JustificationBits()
newBits.Shift(1)
if err := state.SetJustificationBits(newBits); err != nil {
return nil, err
}
// Note: the spec refers to the bit index position starting at 1 instead of starting at zero.
// We will use that paradigm here for consistency with the godoc spec definition.
// If 2/3 or more of total balance attested in the previous epoch.
if 3*prevEpochTargetBalance >= 2*totalActiveBalance {
blockRoot, err := helpers.BlockRoot(state, prevEpoch)
if err != nil {
return nil, errors.Wrapf(err, "could not get block root for previous epoch %d", prevEpoch)
}
if err := state.SetCurrentJustifiedCheckpoint(&ethpb.Checkpoint{Epoch: prevEpoch, Root: blockRoot}); err != nil {
return nil, err
}
newBits = state.JustificationBits()
newBits.SetBitAt(1, true)
if err := state.SetJustificationBits(newBits); err != nil {
return nil, err
}
}
justifiedCheckpoint := state.CurrentJustifiedCheckpoint()
finalizedCheckpoint := state.FinalizedCheckpoint()
// If 2/3 or more of the total balance attested in the current epoch.
if 3*currEpochTargetBalance >= 2*totalActiveBalance {
if newBits.BitAt(0) {
blockRoot, err := helpers.BlockRoot(state, currentEpoch)
if err != nil {
return nil, errors.Wrapf(err, "could not get block root for current epoch %d", prevEpoch)
return nil, nil, errors.Wrapf(err, "could not get block root for current epoch %d", currentEpoch)
}
if err := state.SetCurrentJustifiedCheckpoint(&ethpb.Checkpoint{Epoch: currentEpoch, Root: blockRoot}); err != nil {
return nil, err
}
newBits = state.JustificationBits()
newBits.SetBitAt(0, true)
if err := state.SetJustificationBits(newBits); err != nil {
return nil, err
justifiedCheckpoint.Epoch = currentEpoch
justifiedCheckpoint.Root = blockRoot
} else if newBits.BitAt(1) {
// If 2/3 or more of total balance attested in the previous epoch.
blockRoot, err := helpers.BlockRoot(state, prevEpoch)
if err != nil {
return nil, nil, errors.Wrapf(err, "could not get block root for previous epoch %d", prevEpoch)
}
justifiedCheckpoint.Epoch = prevEpoch
justifiedCheckpoint.Root = blockRoot
}
// Process finalization according to Ethereum Beacon Chain specification.
justification := state.JustificationBits().Bytes()[0]
if len(newBits) == 0 {
return nil, nil, errors.New("empty justification bits")
}
justification := newBits.Bytes()[0]
// 2nd/3rd/4th (0b1110) most recent epochs are justified, the 2nd using the 4th as source.
if justification&0x0E == 0x0E && (oldPrevJustifiedCheckpoint.Epoch+3) == currentEpoch {
if err := state.SetFinalizedCheckpoint(oldPrevJustifiedCheckpoint); err != nil {
return nil, err
}
finalizedCheckpoint = oldPrevJustifiedCheckpoint
}
// 2nd/3rd (0b0110) most recent epochs are justified, the 2nd using the 3rd as source.
if justification&0x06 == 0x06 && (oldPrevJustifiedCheckpoint.Epoch+2) == currentEpoch {
if err := state.SetFinalizedCheckpoint(oldPrevJustifiedCheckpoint); err != nil {
return nil, err
}
finalizedCheckpoint = oldPrevJustifiedCheckpoint
}
// 1st/2nd/3rd (0b0111) most recent epochs are justified, the 1st using the 3rd as source.
if justification&0x07 == 0x07 && (oldCurrJustifiedCheckpoint.Epoch+2) == currentEpoch {
if err := state.SetFinalizedCheckpoint(oldCurrJustifiedCheckpoint); err != nil {
return nil, err
}
finalizedCheckpoint = oldCurrJustifiedCheckpoint
}
// The 1st/2nd (0b0011) most recent epochs are justified, the 1st using the 2nd as source
if justification&0x03 == 0x03 && (oldCurrJustifiedCheckpoint.Epoch+1) == currentEpoch {
if err := state.SetFinalizedCheckpoint(oldCurrJustifiedCheckpoint); err != nil {
return nil, err
}
finalizedCheckpoint = oldCurrJustifiedCheckpoint
}
return state, nil
return justifiedCheckpoint, finalizedCheckpoint, nil
}

View File

@@ -1,11 +1,14 @@
package precompute_test
import (
"context"
"testing"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
v2 "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
"github.com/prysmaticlabs/prysm/config/params"
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
@@ -123,3 +126,127 @@ func TestProcessJustificationAndFinalizationPreCompute_JustifyPrevEpoch(t *testi
assert.DeepEqual(t, params.BeaconConfig().ZeroHash[:], newState.FinalizedCheckpoint().Root)
assert.Equal(t, types.Epoch(0), newState.FinalizedCheckpointEpoch(), "Unexpected finalized epoch")
}
func TestUnrealizedCheckpoints(t *testing.T) {
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
balances := make([]uint64, len(validators))
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
}
balances[i] = params.BeaconConfig().MaxEffectiveBalance
}
pjr := [32]byte{'p'}
cjr := [32]byte{'c'}
je := types.Epoch(3)
fe := types.Epoch(2)
pjcp := &ethpb.Checkpoint{Root: pjr[:], Epoch: fe}
cjcp := &ethpb.Checkpoint{Root: cjr[:], Epoch: je}
fcp := &ethpb.Checkpoint{Root: pjr[:], Epoch: fe}
tests := []struct {
name string
slot types.Slot
prevVals, currVals int
expectedJustified, expectedFinalized types.Epoch // The expected unrealized checkpoint epochs
}{
{
"Not enough votes, keep previous justification",
129,
len(validators) / 3,
len(validators) / 3,
je,
fe,
},
{
"Not enough votes, keep previous justification, N+2",
161,
len(validators) / 3,
len(validators) / 3,
je,
fe,
},
{
"Enough to justify previous epoch but not current",
129,
2*len(validators)/3 + 3,
len(validators) / 3,
je,
fe,
},
{
"Enough to justify previous epoch but not current, N+2",
161,
2*len(validators)/3 + 3,
len(validators) / 3,
je + 1,
fe,
},
{
"Enough to justify current epoch",
129,
len(validators) / 3,
2*len(validators)/3 + 3,
je + 1,
fe,
},
{
"Enough to justify current epoch, but not previous",
161,
len(validators) / 3,
2*len(validators)/3 + 3,
je + 2,
fe,
},
{
"Enough to justify current and previous",
161,
2*len(validators)/3 + 3,
2*len(validators)/3 + 3,
je + 2,
fe,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
base := &ethpb.BeaconStateAltair{
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
Validators: validators,
Slot: test.slot,
CurrentEpochParticipation: make([]byte, params.BeaconConfig().MinGenesisActiveValidatorCount),
PreviousEpochParticipation: make([]byte, params.BeaconConfig().MinGenesisActiveValidatorCount),
Balances: balances,
PreviousJustifiedCheckpoint: pjcp,
CurrentJustifiedCheckpoint: cjcp,
FinalizedCheckpoint: fcp,
InactivityScores: make([]uint64, len(validators)),
JustificationBits: make(bitfield.Bitvector4, 1),
}
for i := 0; i < test.prevVals; i++ {
base.PreviousEpochParticipation[i] = 0xFF
}
for i := 0; i < test.currVals; i++ {
base.CurrentEpochParticipation[i] = 0xFF
}
if test.slot > 130 {
base.JustificationBits.SetBitAt(2, true)
base.JustificationBits.SetBitAt(3, true)
} else {
base.JustificationBits.SetBitAt(1, true)
base.JustificationBits.SetBitAt(2, true)
}
state, err := v2.InitializeFromProto(base)
require.NoError(t, err)
_, _, err = altair.InitializePrecomputeValidators(context.Background(), state)
require.NoError(t, err)
jc, fc, err := precompute.UnrealizedCheckpoints(state)
require.NoError(t, err)
require.DeepEqual(t, test.expectedJustified, jc.Epoch)
require.DeepEqual(t, test.expectedFinalized, fc.Epoch)
})
}
}

View File

@@ -4,6 +4,7 @@ go_library(
name = "go_default_library",
srcs = [
"doc.go",
"error.go",
"interfaces.go",
],
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice",
@@ -16,5 +17,6 @@ go_library(
"//config/fieldparams:go_default_library",
"//consensus-types/primitives:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)

View File

@@ -20,6 +20,7 @@ go_library(
"//testing/spectest:__subpackages__",
],
deps = [
"//beacon-chain/forkchoice:go_default_library",
"//beacon-chain/forkchoice/types:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
@@ -49,6 +50,7 @@ go_test(
],
embed = [":go_default_library"],
deps = [
"//beacon-chain/forkchoice:go_default_library",
"//beacon-chain/forkchoice/types:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",

View File

@@ -5,6 +5,7 @@ import (
"fmt"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
"github.com/prysmaticlabs/prysm/config/params"
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
@@ -379,3 +380,48 @@ func (f *ForkChoice) UpdateFinalizedCheckpoint(fc *pbrpc.Checkpoint) error {
f.store.finalizedEpoch = fc.Epoch
return nil
}
// CommonAncestorRoot returns the common ancestor root between the two block roots r1 and r2.
func (f *ForkChoice) CommonAncestorRoot(ctx context.Context, r1 [32]byte, r2 [32]byte) ([32]byte, error) {
ctx, span := trace.StartSpan(ctx, "doublelinkedtree.CommonAncestorRoot")
defer span.End()
// Do nothing if the input roots are the same.
if r1 == r2 {
return r1, nil
}
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
n1, ok := f.store.nodeByRoot[r1]
if !ok || n1 == nil {
return [32]byte{}, ErrNilNode
}
n2, ok := f.store.nodeByRoot[r2]
if !ok || n2 == nil {
return [32]byte{}, ErrNilNode
}
for {
if ctx.Err() != nil {
return [32]byte{}, ctx.Err()
}
if n1.slot > n2.slot {
n1 = n1.parent
// Reaches the end of the tree and unable to find common ancestor.
if n1 == nil {
return [32]byte{}, forkchoice.ErrUnknownCommonAncestor
}
} else {
n2 = n2.parent
// Reaches the end of the tree and unable to find common ancestor.
if n2 == nil {
return [32]byte{}, forkchoice.ErrUnknownCommonAncestor
}
}
if n1 == n2 {
return n1.root, nil
}
}
}

View File

@@ -5,6 +5,7 @@ import (
"encoding/binary"
"testing"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice"
"github.com/prysmaticlabs/prysm/config/params"
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/crypto/hash"
@@ -251,3 +252,158 @@ func TestStore_UpdateCheckpoints(t *testing.T) {
require.Equal(t, f.store.justifiedEpoch, jc.Epoch)
require.Equal(t, f.store.finalizedEpoch, fc.Epoch)
}
func TestStore_CommonAncestor(t *testing.T) {
{
ctx := context.Background()
f := setup(0, 0)
// /-- b -- d -- e
// a
// \-- c -- f
// \-- g
// \ -- h -- i -- j
require.NoError(t, f.InsertOptimisticBlock(ctx, 0, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, [32]byte{'c'}, [32]byte{'a'}, [32]byte{'C'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, [32]byte{'d'}, [32]byte{'b'}, [32]byte{}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 4, [32]byte{'e'}, [32]byte{'d'}, [32]byte{}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 5, [32]byte{'f'}, [32]byte{'c'}, [32]byte{}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 6, [32]byte{'g'}, [32]byte{'c'}, [32]byte{}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 7, [32]byte{'h'}, [32]byte{'c'}, [32]byte{}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 8, [32]byte{'i'}, [32]byte{'h'}, [32]byte{}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 9, [32]byte{'j'}, [32]byte{'i'}, [32]byte{}, 1, 1))
tests := []struct {
name string
r1 [32]byte
r2 [32]byte
wantRoot [32]byte
}{
{
name: "Common ancestor between c and b is a",
r1: [32]byte{'c'},
r2: [32]byte{'b'},
wantRoot: [32]byte{'a'},
},
{
name: "Common ancestor between c and d is a",
r1: [32]byte{'c'},
r2: [32]byte{'d'},
wantRoot: [32]byte{'a'},
},
{
name: "Common ancestor between c and e is a",
r1: [32]byte{'c'},
r2: [32]byte{'e'},
wantRoot: [32]byte{'a'},
},
{
name: "Common ancestor between g and f is c",
r1: [32]byte{'g'},
r2: [32]byte{'f'},
wantRoot: [32]byte{'c'},
},
{
name: "Common ancestor between f and h is c",
r1: [32]byte{'f'},
r2: [32]byte{'h'},
wantRoot: [32]byte{'c'},
},
{
name: "Common ancestor between g and h is c",
r1: [32]byte{'g'},
r2: [32]byte{'h'},
wantRoot: [32]byte{'c'},
},
{
name: "Common ancestor between b and h is a",
r1: [32]byte{'b'},
r2: [32]byte{'h'},
wantRoot: [32]byte{'a'},
},
{
name: "Common ancestor between e and h is a",
r1: [32]byte{'e'},
r2: [32]byte{'h'},
wantRoot: [32]byte{'a'},
},
{
name: "Common ancestor between i and f is c",
r1: [32]byte{'i'},
r2: [32]byte{'f'},
wantRoot: [32]byte{'c'},
},
{
name: "Common ancestor between e and h is a",
r1: [32]byte{'j'},
r2: [32]byte{'g'},
wantRoot: [32]byte{'c'},
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
gotRoot, err := f.CommonAncestorRoot(ctx, tc.r1, tc.r2)
require.NoError(t, err)
require.Equal(t, tc.wantRoot, gotRoot)
})
}
// a -- b -- c -- d
f = setup(0, 0)
require.NoError(t, f.InsertOptimisticBlock(ctx, 0, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, [32]byte{'d'}, [32]byte{'c'}, [32]byte{}, 1, 1))
tests = []struct {
name string
r1 [32]byte
r2 [32]byte
wantRoot [32]byte
}{
{
name: "Common ancestor between a and b is a",
r1: [32]byte{'a'},
r2: [32]byte{'b'},
wantRoot: [32]byte{'a'},
},
{
name: "Common ancestor between b and d is b",
r1: [32]byte{'d'},
r2: [32]byte{'b'},
wantRoot: [32]byte{'b'},
},
{
name: "Common ancestor between d and a is a",
r1: [32]byte{'d'},
r2: [32]byte{'a'},
wantRoot: [32]byte{'a'},
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
gotRoot, err := f.CommonAncestorRoot(ctx, tc.r1, tc.r2)
require.NoError(t, err)
require.Equal(t, tc.wantRoot, gotRoot)
})
}
// Equal inputs should return the same root.
r, err := f.CommonAncestorRoot(ctx, [32]byte{'b'}, [32]byte{'b'})
require.NoError(t, err)
require.Equal(t, [32]byte{'b'}, r)
// Requesting finalized root (last node) should return the same root.
r, err = f.CommonAncestorRoot(ctx, [32]byte{'a'}, [32]byte{'a'})
require.NoError(t, err)
require.Equal(t, [32]byte{'a'}, r)
// Requesting unknown root
_, err = f.CommonAncestorRoot(ctx, [32]byte{'a'}, [32]byte{'z'})
require.ErrorIs(t, err, ErrNilNode)
_, err = f.CommonAncestorRoot(ctx, [32]byte{'z'}, [32]byte{'a'})
require.ErrorIs(t, err, ErrNilNode)
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'y'}, [32]byte{'z'}, [32]byte{}, 1, 1))
// broken link
_, err = f.CommonAncestorRoot(ctx, [32]byte{'y'}, [32]byte{'a'})
require.ErrorIs(t, err, forkchoice.ErrUnknownCommonAncestor)
}
}

View File

@@ -0,0 +1,5 @@
package forkchoice
import "github.com/pkg/errors"
var ErrUnknownCommonAncestor = errors.New("unknown common ancestor")

View File

@@ -62,6 +62,7 @@ type Getter interface {
ProposerBoost() [fieldparams.RootLength]byte
HasParent(root [32]byte) bool
AncestorRoot(ctx context.Context, root [32]byte, slot types.Slot) ([]byte, error)
CommonAncestorRoot(ctx context.Context, root1 [32]byte, root2 [32]byte) ([32]byte, error)
IsCanonical(root [32]byte) bool
FinalizedEpoch() types.Epoch
JustifiedEpoch() types.Epoch

View File

@@ -20,6 +20,7 @@ go_library(
"//testing/spectest:__subpackages__",
],
deps = [
"//beacon-chain/forkchoice:go_default_library",
"//beacon-chain/forkchoice/types:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
@@ -50,6 +51,7 @@ go_test(
],
embed = [":go_default_library"],
deps = [
"//beacon-chain/forkchoice:go_default_library",
"//beacon-chain/forkchoice/types:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",

View File

@@ -6,6 +6,7 @@ import (
"fmt"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
"github.com/prysmaticlabs/prysm/config/params"
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
@@ -195,6 +196,52 @@ func (f *ForkChoice) AncestorRoot(ctx context.Context, root [32]byte, slot types
return f.store.nodes[i].root[:], nil
}
// CommonAncestorRoot returns the common ancestor root between the two block roots r1 and r2.
func (f *ForkChoice) CommonAncestorRoot(ctx context.Context, r1 [32]byte, r2 [32]byte) ([32]byte, error) {
ctx, span := trace.StartSpan(ctx, "protoArray.CommonAncestorRoot")
defer span.End()
// Do nothing if the two input roots are the same.
if r1 == r2 {
return r1, nil
}
i1, ok := f.store.nodesIndices[r1]
if !ok || i1 >= uint64(len(f.store.nodes)) {
return [32]byte{}, errInvalidNodeIndex
}
i2, ok := f.store.nodesIndices[r2]
if !ok || i2 >= uint64(len(f.store.nodes)) {
return [32]byte{}, errInvalidNodeIndex
}
for {
if ctx.Err() != nil {
return [32]byte{}, ctx.Err()
}
if i1 > i2 {
n1 := f.store.nodes[i1]
i1 = n1.parent
// Reaches the end of the tree and unable to find common ancestor.
if i1 >= uint64(len(f.store.nodes)) {
return [32]byte{}, forkchoice.ErrUnknownCommonAncestor
}
} else {
n2 := f.store.nodes[i2]
i2 = n2.parent
// Reaches the end of the tree and unable to find common ancestor.
if i2 >= uint64(len(f.store.nodes)) {
return [32]byte{}, forkchoice.ErrUnknownCommonAncestor
}
}
if i1 == i2 {
n1 := f.store.nodes[i1]
return n1.root, nil
}
}
}
// PruneThreshold of fork choice store.
func (s *Store) PruneThreshold() uint64 {
return s.pruneThreshold

View File

@@ -4,6 +4,7 @@ import (
"context"
"testing"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice"
"github.com/prysmaticlabs/prysm/config/params"
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
@@ -560,6 +561,159 @@ func TestStore_PruneBranched(t *testing.T) {
}
}
func TestStore_CommonAncestor(t *testing.T) {
ctx := context.Background()
f := setup(0, 0)
// /-- b -- d -- e
// a
// \-- c -- f
// \-- g
// \ -- h -- i -- j
require.NoError(t, f.InsertOptimisticBlock(ctx, 0, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, [32]byte{'c'}, [32]byte{'a'}, [32]byte{'C'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, [32]byte{'d'}, [32]byte{'b'}, [32]byte{}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 4, [32]byte{'e'}, [32]byte{'d'}, [32]byte{}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 5, [32]byte{'f'}, [32]byte{'c'}, [32]byte{}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 6, [32]byte{'g'}, [32]byte{'c'}, [32]byte{}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 7, [32]byte{'h'}, [32]byte{'c'}, [32]byte{}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 8, [32]byte{'i'}, [32]byte{'h'}, [32]byte{}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 9, [32]byte{'j'}, [32]byte{'i'}, [32]byte{}, 1, 1))
tests := []struct {
name string
r1 [32]byte
r2 [32]byte
wantRoot [32]byte
}{
{
name: "Common ancestor between c and b is a",
r1: [32]byte{'c'},
r2: [32]byte{'b'},
wantRoot: [32]byte{'a'},
},
{
name: "Common ancestor between c and d is a",
r1: [32]byte{'c'},
r2: [32]byte{'d'},
wantRoot: [32]byte{'a'},
},
{
name: "Common ancestor between c and e is a",
r1: [32]byte{'c'},
r2: [32]byte{'e'},
wantRoot: [32]byte{'a'},
},
{
name: "Common ancestor between g and f is c",
r1: [32]byte{'g'},
r2: [32]byte{'f'},
wantRoot: [32]byte{'c'},
},
{
name: "Common ancestor between f and h is c",
r1: [32]byte{'f'},
r2: [32]byte{'h'},
wantRoot: [32]byte{'c'},
},
{
name: "Common ancestor between g and h is c",
r1: [32]byte{'g'},
r2: [32]byte{'h'},
wantRoot: [32]byte{'c'},
},
{
name: "Common ancestor between b and h is a",
r1: [32]byte{'b'},
r2: [32]byte{'h'},
wantRoot: [32]byte{'a'},
},
{
name: "Common ancestor between e and h is a",
r1: [32]byte{'e'},
r2: [32]byte{'h'},
wantRoot: [32]byte{'a'},
},
{
name: "Common ancestor between i and f is c",
r1: [32]byte{'i'},
r2: [32]byte{'f'},
wantRoot: [32]byte{'c'},
},
{
name: "Common ancestor between e and h is a",
r1: [32]byte{'j'},
r2: [32]byte{'g'},
wantRoot: [32]byte{'c'},
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
gotRoot, err := f.CommonAncestorRoot(ctx, tc.r1, tc.r2)
require.NoError(t, err)
require.Equal(t, tc.wantRoot, gotRoot)
})
}
// a -- b -- c -- d
f = setup(0, 0)
require.NoError(t, f.InsertOptimisticBlock(ctx, 0, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, [32]byte{'d'}, [32]byte{'c'}, [32]byte{}, 1, 1))
tests = []struct {
name string
r1 [32]byte
r2 [32]byte
wantRoot [32]byte
}{
{
name: "Common ancestor between a and b is a",
r1: [32]byte{'a'},
r2: [32]byte{'b'},
wantRoot: [32]byte{'a'},
},
{
name: "Common ancestor between b and d is b",
r1: [32]byte{'d'},
r2: [32]byte{'b'},
wantRoot: [32]byte{'b'},
},
{
name: "Common ancestor between d and a is a",
r1: [32]byte{'d'},
r2: [32]byte{'a'},
wantRoot: [32]byte{'a'},
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
gotRoot, err := f.CommonAncestorRoot(ctx, tc.r1, tc.r2)
require.NoError(t, err)
require.Equal(t, tc.wantRoot, gotRoot)
})
}
// Equal inputs should return the same root.
r, err := f.CommonAncestorRoot(ctx, [32]byte{'b'}, [32]byte{'b'})
require.NoError(t, err)
require.Equal(t, [32]byte{'b'}, r)
// Requesting finalized root (last node) should return the same root.
r, err = f.CommonAncestorRoot(ctx, [32]byte{'a'}, [32]byte{'a'})
require.NoError(t, err)
require.Equal(t, [32]byte{'a'}, r)
// Requesting unknown root
_, err = f.CommonAncestorRoot(ctx, [32]byte{'a'}, [32]byte{'z'})
require.ErrorIs(t, err, errInvalidNodeIndex)
_, err = f.CommonAncestorRoot(ctx, [32]byte{'z'}, [32]byte{'a'})
require.ErrorIs(t, err, errInvalidNodeIndex)
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'y'}, [32]byte{'z'}, [32]byte{}, 1, 1))
// broken link
_, err = f.CommonAncestorRoot(ctx, [32]byte{'y'}, [32]byte{'a'})
require.ErrorIs(t, err, forkchoice.ErrUnknownCommonAncestor)
}
func TestStore_LeadsToViableHead(t *testing.T) {
tests := []struct {
n *Node

View File

@@ -18,6 +18,7 @@ go_library(
"//api/gateway:go_default_library",
"//async/event:go_default_library",
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/builder:go_default_library",
"//beacon-chain/cache:go_default_library",
"//beacon-chain/cache/depositcache:go_default_library",
"//beacon-chain/db:go_default_library",

View File

@@ -20,6 +20,7 @@ import (
apigateway "github.com/prysmaticlabs/prysm/api/gateway"
"github.com/prysmaticlabs/prysm/async/event"
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/beacon-chain/builder"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
"github.com/prysmaticlabs/prysm/beacon-chain/db"
@@ -76,6 +77,7 @@ const debugGrpcMaxMsgSize = 1 << 27
type serviceFlagOpts struct {
blockchainFlagOpts []blockchain.Option
powchainFlagOpts []powchain.Option
builderOpts []builder.Option
}
// BeaconNode defines a struct that handles the services running a random beacon chain

View File

@@ -2,6 +2,7 @@ package node
import (
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/beacon-chain/builder"
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
)
@@ -23,3 +24,11 @@ func WithPowchainFlagOptions(opts []powchain.Option) Option {
return nil
}
}
// WithBuilderFlagOptions includes functional options for the builder service related to CLI flags.
func WithBuilderFlagOptions(opts []builder.Option) Option {
return func(bn *BeaconNode) error {
bn.serviceFlagOpts.builderOpts = opts
return nil
}
}

View File

@@ -223,6 +223,7 @@ type FutureForkStub interface {
AppendInactivityScore(s uint64) error
CurrentEpochParticipation() ([]byte, error)
PreviousEpochParticipation() ([]byte, error)
UnrealizedCheckpointBalances() (uint64, uint64, uint64, error)
InactivityScores() ([]uint64, error)
SetInactivityScores(val []uint64) error
CurrentSyncCommittee() (*ethpb.SyncCommittee, error)

View File

@@ -4,6 +4,7 @@ go_library(
name = "go_default_library",
srcs = [
"doc.go",
"error.go",
"getters_attestation.go",
"getters_block.go",
"getters_checkpoint.go",
@@ -54,6 +55,7 @@ go_library(
"//tools/pcli:__pkg__",
],
deps = [
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/fieldtrie:go_default_library",
"//beacon-chain/state/state-native/custom-types:go_default_library",
@@ -83,6 +85,7 @@ go_test(
"getters_attestation_test.go",
"getters_block_test.go",
"getters_checkpoint_test.go",
"getters_participation_test.go",
"getters_test.go",
"getters_validator_test.go",
"hasher_test.go",

View File

@@ -0,0 +1,5 @@
package state_native
import "errors"
var ErrNilParticipation = errors.New("Nil epoch participation in state")

View File

@@ -1,6 +1,8 @@
package state_native
import (
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
"github.com/prysmaticlabs/prysm/runtime/version"
)
@@ -36,6 +38,28 @@ func (b *BeaconState) PreviousEpochParticipation() ([]byte, error) {
return b.previousEpochParticipationVal(), nil
}
// UnrealizedCheckpointBalances returns the total balances: active, target attested in
// current epoch and target attested in previous epoch. This function is used to
// compute the "unrealized justification" that a synced Beacon Block will have.
func (b *BeaconState) UnrealizedCheckpointBalances() (uint64, uint64, uint64, error) {
if b.version == version.Phase0 {
return 0, 0, 0, errNotSupported("UnrealizedCheckpointBalances", b.version)
}
b.lock.RLock()
defer b.lock.RUnlock()
cp := b.currentEpochParticipation
pp := b.previousEpochParticipation
if cp == nil || pp == nil {
return 0, 0, 0, ErrNilParticipation
}
currentEpoch := time.CurrentEpoch(b)
return stateutil.UnrealizedCheckpointBalances(cp, pp, b.validators, currentEpoch)
}
// currentEpochParticipationVal corresponding to participation bits on the beacon chain.
// This assumes that a lock is already held on BeaconState.
func (b *BeaconState) currentEpochParticipationVal() []byte {

View File

@@ -0,0 +1,64 @@
package state_native
import (
"testing"
"github.com/prysmaticlabs/prysm/config/params"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/require"
)
func TestState_UnrealizedCheckpointBalances(t *testing.T) {
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
balances := make([]uint64, params.BeaconConfig().MinGenesisActiveValidatorCount)
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
}
balances[i] = params.BeaconConfig().MaxEffectiveBalance
}
base := &ethpb.BeaconStateAltair{
Slot: 2,
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
Validators: validators,
CurrentEpochParticipation: make([]byte, params.BeaconConfig().MinGenesisActiveValidatorCount),
PreviousEpochParticipation: make([]byte, params.BeaconConfig().MinGenesisActiveValidatorCount),
Balances: balances,
}
state, err := InitializeFromProtoAltair(base)
require.NoError(t, err)
// No one voted in the last two epochs
allActive := params.BeaconConfig().MinGenesisActiveValidatorCount * params.BeaconConfig().MaxEffectiveBalance
active, previous, current, err := state.UnrealizedCheckpointBalances()
require.NoError(t, err)
require.Equal(t, allActive, active)
require.Equal(t, uint64(0), current)
require.Equal(t, uint64(0), previous)
// Add some votes in the last two epochs:
base.CurrentEpochParticipation[0] = 0xFF
base.PreviousEpochParticipation[0] = 0xFF
base.PreviousEpochParticipation[1] = 0xFF
state, err = InitializeFromProtoAltair(base)
require.NoError(t, err)
active, previous, current, err = state.UnrealizedCheckpointBalances()
require.NoError(t, err)
require.Equal(t, allActive, active)
require.Equal(t, params.BeaconConfig().MaxEffectiveBalance, current)
require.Equal(t, 2*params.BeaconConfig().MaxEffectiveBalance, previous)
// Slash some validators
validators[0].Slashed = true
state, err = InitializeFromProtoAltair(base)
require.NoError(t, err)
active, previous, current, err = state.UnrealizedCheckpointBalances()
require.NoError(t, err)
require.Equal(t, allActive-params.BeaconConfig().MaxEffectiveBalance, active)
require.Equal(t, uint64(0), current)
require.Equal(t, params.BeaconConfig().MaxEffectiveBalance, previous)
}

View File

@@ -15,6 +15,7 @@ go_library(
"state_hasher.go",
"sync_committee.root.go",
"trie_helpers.go",
"unrealized_justification.go",
"validator_map_handler.go",
"validator_root.go",
],
@@ -57,6 +58,7 @@ go_test(
"reference_bench_test.go",
"state_root_test.go",
"trie_helpers_test.go",
"unrealized_justification_test.go",
"validator_root_test.go",
],
embed = [":go_default_library"],

View File

@@ -0,0 +1,43 @@
package stateutil
import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/config/params"
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/math"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
)
func UnrealizedCheckpointBalances(cp, pp []byte, validators []*ethpb.Validator, currentEpoch types.Epoch) (uint64, uint64, uint64, error) {
targetIdx := params.BeaconConfig().TimelyTargetFlagIndex
activeBalance := uint64(0)
currentTarget := uint64(0)
prevTarget := uint64(0)
if len(cp) < len(validators) || len(pp) < len(validators) {
return 0, 0, 0, errors.New("participation does not match validator set")
}
var err error
for i, v := range validators {
active := v.ActivationEpoch <= currentEpoch && currentEpoch < v.ExitEpoch
if active && !v.Slashed {
activeBalance, err = math.Add64(activeBalance, v.EffectiveBalance)
if err != nil {
return 0, 0, 0, err
}
if ((cp[i] >> targetIdx) & 1) == 1 {
currentTarget, err = math.Add64(currentTarget, v.EffectiveBalance)
if err != nil {
return 0, 0, 0, err
}
}
if ((pp[i] >> targetIdx) & 1) == 1 {
prevTarget, err = math.Add64(prevTarget, v.EffectiveBalance)
if err != nil {
return 0, 0, 0, err
}
}
}
}
return activeBalance, prevTarget, currentTarget, nil
}

View File

@@ -0,0 +1,95 @@
package stateutil
import (
"testing"
"github.com/prysmaticlabs/prysm/config/params"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/require"
)
func TestState_UnrealizedCheckpointBalances(t *testing.T) {
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
targetFlag := params.BeaconConfig().TimelyTargetFlagIndex
expectedActive := params.BeaconConfig().MinGenesisActiveValidatorCount * params.BeaconConfig().MaxEffectiveBalance
balances := make([]uint64, params.BeaconConfig().MinGenesisActiveValidatorCount)
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
}
balances[i] = params.BeaconConfig().MaxEffectiveBalance
}
cp := make([]byte, len(validators))
pp := make([]byte, len(validators))
t.Run("No one voted last two epochs", func(tt *testing.T) {
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, validators, 0)
require.NoError(tt, err)
require.Equal(tt, expectedActive, active)
require.Equal(tt, uint64(0), current)
require.Equal(tt, uint64(0), previous)
})
t.Run("bad votes in last two epochs", func(tt *testing.T) {
copy(cp, []byte{0xFF ^ (1 << targetFlag), 0xFF ^ (1 << targetFlag), 0xFF ^ (1 << targetFlag), 0x00})
copy(pp, []byte{0x00, 0x00, 0x00, 0x00})
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, validators, 1)
require.NoError(tt, err)
require.Equal(tt, expectedActive, active)
require.Equal(tt, uint64(0), current)
require.Equal(tt, uint64(0), previous)
})
t.Run("two votes in last epoch", func(tt *testing.T) {
copy(cp, []byte{0xFF ^ (1 << targetFlag), 0xFF ^ (1 << targetFlag), 0xFF ^ (1 << targetFlag), 0x00, 1 << targetFlag, 1 << targetFlag})
copy(pp, []byte{0x00, 0x00, 0x00, 0x00, 0xFF ^ (1 << targetFlag)})
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, validators, 1)
require.NoError(tt, err)
require.Equal(tt, expectedActive, active)
require.Equal(tt, 2*params.BeaconConfig().MaxEffectiveBalance, current)
require.Equal(tt, uint64(0), previous)
})
t.Run("two votes in previous epoch", func(tt *testing.T) {
copy(cp, []byte{0x00, 0x00, 0x00, 0x00, 0xFF ^ (1 << targetFlag), 0x00})
copy(pp, []byte{0xFF ^ (1 << targetFlag), 0xFF ^ (1 << targetFlag), 0xFF ^ (1 << targetFlag), 0x00, 1 << targetFlag, 1 << targetFlag})
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, validators, 1)
require.NoError(tt, err)
require.Equal(tt, expectedActive, active)
require.Equal(tt, uint64(0), current)
require.Equal(tt, 2*params.BeaconConfig().MaxEffectiveBalance, previous)
})
t.Run("votes in both epochs, decreased balance in first validator", func(tt *testing.T) {
validators[0].EffectiveBalance = params.BeaconConfig().MaxEffectiveBalance - params.BeaconConfig().MinDepositAmount
copy(cp, []byte{0xFF, 0xFF, 0x00, 0x00, 0xFF ^ (1 << targetFlag), 0})
copy(pp, []byte{0xFF ^ (1 << targetFlag), 0xFF ^ (1 << targetFlag), 0xFF ^ (1 << targetFlag), 0x00, 0xFF, 0xFF})
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, validators, 1)
require.NoError(tt, err)
expectedActive -= params.BeaconConfig().MinDepositAmount
require.Equal(tt, expectedActive, active)
require.Equal(tt, 2*params.BeaconConfig().MaxEffectiveBalance-params.BeaconConfig().MinDepositAmount, current)
require.Equal(tt, 2*params.BeaconConfig().MaxEffectiveBalance, previous)
})
t.Run("slash a validator", func(tt *testing.T) {
validators[1].Slashed = true
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, validators, 1)
require.NoError(tt, err)
expectedActive -= params.BeaconConfig().MaxEffectiveBalance
require.Equal(tt, expectedActive, active)
require.Equal(tt, params.BeaconConfig().MaxEffectiveBalance-params.BeaconConfig().MinDepositAmount, current)
require.Equal(tt, 2*params.BeaconConfig().MaxEffectiveBalance, previous)
})
t.Run("Exit a validator", func(tt *testing.T) {
validators[4].ExitEpoch = 1
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, validators, 2)
require.NoError(tt, err)
expectedActive -= params.BeaconConfig().MaxEffectiveBalance
require.Equal(tt, expectedActive, active)
require.Equal(tt, params.BeaconConfig().MaxEffectiveBalance-params.BeaconConfig().MinDepositAmount, current)
require.Equal(tt, params.BeaconConfig().MaxEffectiveBalance, previous)
})
}

View File

@@ -63,6 +63,8 @@ func TestNilState_NoPanic(t *testing.T) {
_ = st.PreviousJustifiedCheckpoint()
_ = st.CurrentJustifiedCheckpoint()
_ = st.FinalizedCheckpoint()
_, _, _, err = st.UnrealizedCheckpointBalances()
_ = err
}
func TestBeaconState_MatchCurrentJustifiedCheckpt(t *testing.T) {

View File

@@ -15,6 +15,11 @@ func (*BeaconState) PreviousEpochParticipation() ([]byte, error) {
return nil, errors.New("PreviousEpochParticipation is not supported for phase 0 beacon state")
}
// UnrealizedCheckpointBalances is not supported for phase 0 beacon state.
func (*BeaconState) UnrealizedCheckpointBalances() (uint64, uint64, uint64, error) {
return 0, 0, 0, errors.New("UnrealizedCheckpointBalances is not supported for phase0 beacon state")
}
// InactivityScores is not supported for phase 0 beacon state.
func (*BeaconState) InactivityScores() ([]uint64, error) {
return nil, errors.New("InactivityScores is not supported for phase 0 beacon state")

View File

@@ -5,6 +5,7 @@ go_library(
srcs = [
"deprecated_getters.go",
"deprecated_setters.go",
"error.go",
"field_roots.go",
"getters_block.go",
"getters_checkpoint.go",
@@ -32,6 +33,7 @@ go_library(
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/state/v2",
visibility = ["//visibility:public"],
deps = [
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/fieldtrie:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
@@ -62,6 +64,7 @@ go_test(
"deprecated_setters_test.go",
"getters_block_test.go",
"getters_checkpoint_test.go",
"getters_participation_test.go",
"getters_test.go",
"getters_validator_test.go",
"proofs_test.go",

View File

@@ -0,0 +1,5 @@
package v2
import "errors"
var ErrNilParticipation = errors.New("Nil epoch participation in state")

View File

@@ -1,5 +1,10 @@
package v2
import (
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
)
// CurrentEpochParticipation corresponding to participation bits on the beacon chain.
func (b *BeaconState) CurrentEpochParticipation() ([]byte, error) {
if !b.hasInnerState() {
@@ -30,6 +35,26 @@ func (b *BeaconState) PreviousEpochParticipation() ([]byte, error) {
return b.previousEpochParticipation(), nil
}
// UnrealizedCheckpointBalances returns the total balances: active, target attested in
// current epoch and target attested in previous epoch. This function is used to
// compute the "unrealized justification" that a synced Beacon Block will have.
func (b *BeaconState) UnrealizedCheckpointBalances() (uint64, uint64, uint64, error) {
if !b.hasInnerState() {
return 0, 0, 0, ErrNilInnerState
}
b.lock.RLock()
defer b.lock.RUnlock()
cp := b.state.CurrentEpochParticipation
pp := b.state.PreviousEpochParticipation
if cp == nil || pp == nil {
return 0, 0, 0, ErrNilParticipation
}
currentEpoch := time.CurrentEpoch(b)
return stateutil.UnrealizedCheckpointBalances(cp, pp, b.state.Validators, currentEpoch)
}
// currentEpochParticipation corresponding to participation bits on the beacon chain.
// This assumes that a lock is already held on BeaconState.
func (b *BeaconState) currentEpochParticipation() []byte {

View File

@@ -0,0 +1,64 @@
package v2
import (
"testing"
"github.com/prysmaticlabs/prysm/config/params"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/require"
)
func TestState_UnrealizedCheckpointBalances(t *testing.T) {
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
balances := make([]uint64, params.BeaconConfig().MinGenesisActiveValidatorCount)
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
}
balances[i] = params.BeaconConfig().MaxEffectiveBalance
}
base := &ethpb.BeaconStateAltair{
Slot: 2,
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
Validators: validators,
CurrentEpochParticipation: make([]byte, params.BeaconConfig().MinGenesisActiveValidatorCount),
PreviousEpochParticipation: make([]byte, params.BeaconConfig().MinGenesisActiveValidatorCount),
Balances: balances,
}
state, err := InitializeFromProto(base)
require.NoError(t, err)
// No one voted in the last two epochs
allActive := params.BeaconConfig().MinGenesisActiveValidatorCount * params.BeaconConfig().MaxEffectiveBalance
active, previous, current, err := state.UnrealizedCheckpointBalances()
require.NoError(t, err)
require.Equal(t, allActive, active)
require.Equal(t, uint64(0), current)
require.Equal(t, uint64(0), previous)
// Add some votes in the last two epochs:
base.CurrentEpochParticipation[0] = 0xFF
base.PreviousEpochParticipation[0] = 0xFF
base.PreviousEpochParticipation[1] = 0xFF
state, err = InitializeFromProto(base)
require.NoError(t, err)
active, previous, current, err = state.UnrealizedCheckpointBalances()
require.NoError(t, err)
require.Equal(t, allActive, active)
require.Equal(t, params.BeaconConfig().MaxEffectiveBalance, current)
require.Equal(t, 2*params.BeaconConfig().MaxEffectiveBalance, previous)
// Slash some validators
validators[0].Slashed = true
state, err = InitializeFromProto(base)
require.NoError(t, err)
active, previous, current, err = state.UnrealizedCheckpointBalances()
require.NoError(t, err)
require.Equal(t, allActive-params.BeaconConfig().MaxEffectiveBalance, active)
require.Equal(t, uint64(0), current)
require.Equal(t, params.BeaconConfig().MaxEffectiveBalance, previous)
}

View File

@@ -74,6 +74,8 @@ func TestNilState_NoPanic(t *testing.T) {
require.ErrorIs(t, ErrNilInnerState, err)
_, err = st.NextSyncCommittee()
require.ErrorIs(t, ErrNilInnerState, err)
_, _, _, err = st.UnrealizedCheckpointBalances()
require.ErrorIs(t, ErrNilInnerState, err)
}
func TestBeaconState_MatchCurrentJustifiedCheckpt(t *testing.T) {

View File

@@ -5,6 +5,7 @@ go_library(
srcs = [
"deprecated_getters.go",
"deprecated_setters.go",
"error.go",
"field_roots.go",
"getters_block.go",
"getters_checkpoint.go",
@@ -34,6 +35,7 @@ go_library(
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/state/v3",
visibility = ["//visibility:public"],
deps = [
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/fieldtrie:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
@@ -64,6 +66,7 @@ go_test(
"deprecated_setters_test.go",
"getters_block_test.go",
"getters_checkpoint_test.go",
"getters_participation_test.go",
"getters_test.go",
"getters_validator_test.go",
"proofs_test.go",

View File

@@ -0,0 +1,5 @@
package v3
import "errors"
var ErrNilParticipation = errors.New("Nil epoch participation in state")

View File

@@ -1,5 +1,10 @@
package v3
import (
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
)
// CurrentEpochParticipation corresponding to participation bits on the beacon chain.
func (b *BeaconState) CurrentEpochParticipation() ([]byte, error) {
if !b.hasInnerState() {
@@ -30,6 +35,26 @@ func (b *BeaconState) PreviousEpochParticipation() ([]byte, error) {
return b.previousEpochParticipation(), nil
}
// UnrealizedCheckpointBalances returns the total balances: active, target attested in
// current epoch and target attested in previous epoch. This function is used to
// compute the "unrealized justification" that a synced Beacon Block will have.
func (b *BeaconState) UnrealizedCheckpointBalances() (uint64, uint64, uint64, error) {
if !b.hasInnerState() {
return 0, 0, 0, ErrNilInnerState
}
b.lock.RLock()
defer b.lock.RUnlock()
cp := b.state.CurrentEpochParticipation
pp := b.state.PreviousEpochParticipation
if cp == nil || pp == nil {
return 0, 0, 0, ErrNilParticipation
}
currentEpoch := time.CurrentEpoch(b)
return stateutil.UnrealizedCheckpointBalances(cp, pp, b.state.Validators, currentEpoch)
}
// currentEpochParticipation corresponding to participation bits on the beacon chain.
// This assumes that a lock is already held on BeaconState.
func (b *BeaconState) currentEpochParticipation() []byte {

View File

@@ -0,0 +1,64 @@
package v3
import (
"testing"
"github.com/prysmaticlabs/prysm/config/params"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/require"
)
func TestState_UnrealizedCheckpointBalances(t *testing.T) {
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
balances := make([]uint64, params.BeaconConfig().MinGenesisActiveValidatorCount)
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
}
balances[i] = params.BeaconConfig().MaxEffectiveBalance
}
base := &ethpb.BeaconStateBellatrix{
Slot: 2,
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
Validators: validators,
CurrentEpochParticipation: make([]byte, params.BeaconConfig().MinGenesisActiveValidatorCount),
PreviousEpochParticipation: make([]byte, params.BeaconConfig().MinGenesisActiveValidatorCount),
Balances: balances,
}
state, err := InitializeFromProto(base)
require.NoError(t, err)
// No one voted in the last two epochs
allActive := params.BeaconConfig().MinGenesisActiveValidatorCount * params.BeaconConfig().MaxEffectiveBalance
active, previous, current, err := state.UnrealizedCheckpointBalances()
require.NoError(t, err)
require.Equal(t, allActive, active)
require.Equal(t, uint64(0), current)
require.Equal(t, uint64(0), previous)
// Add some votes in the last two epochs:
base.CurrentEpochParticipation[0] = 0xFF
base.PreviousEpochParticipation[0] = 0xFF
base.PreviousEpochParticipation[1] = 0xFF
state, err = InitializeFromProto(base)
require.NoError(t, err)
active, previous, current, err = state.UnrealizedCheckpointBalances()
require.NoError(t, err)
require.Equal(t, allActive, active)
require.Equal(t, params.BeaconConfig().MaxEffectiveBalance, current)
require.Equal(t, 2*params.BeaconConfig().MaxEffectiveBalance, previous)
// Slash some validators
validators[0].Slashed = true
state, err = InitializeFromProto(base)
require.NoError(t, err)
active, previous, current, err = state.UnrealizedCheckpointBalances()
require.NoError(t, err)
require.Equal(t, allActive-params.BeaconConfig().MaxEffectiveBalance, active)
require.Equal(t, uint64(0), current)
require.Equal(t, params.BeaconConfig().MaxEffectiveBalance, previous)
}

View File

@@ -75,6 +75,8 @@ func TestNilState_NoPanic(t *testing.T) {
require.ErrorIs(t, ErrNilInnerState, err)
_, err = st.LatestExecutionPayloadHeader()
require.ErrorIs(t, ErrNilInnerState, err)
_, _, _, err = st.UnrealizedCheckpointBalances()
require.ErrorIs(t, ErrNilInnerState, err)
}

View File

@@ -15,6 +15,7 @@ go_library(
importpath = "github.com/prysmaticlabs/prysm/cmd/beacon-chain",
visibility = ["//beacon-chain:__subpackages__"],
deps = [
"//beacon-chain/builder:go_default_library",
"//beacon-chain/node:go_default_library",
"//cmd:go_default_library",
"//cmd/beacon-chain/blockchain:go_default_library",

View File

@@ -11,6 +11,12 @@ import (
)
var (
// MevRelayEndpoint provides an HTTP access endpoint to a MEV builder network.
MevRelayEndpoint = &cli.StringFlag{
Name: "http-mev-relay",
Usage: "A MEV builder relay string http endpoint, this wil be used to interact MEV builder network using API defined in: https://ethereum.github.io/builder-specs/#/Builder",
Value: "",
}
// HTTPWeb3ProviderFlag provides an HTTP access endpoint to an ETH 1.0 RPC.
HTTPWeb3ProviderFlag = &cli.StringFlag{
Name: "http-web3provider",

View File

@@ -11,6 +11,7 @@ import (
gethlog "github.com/ethereum/go-ethereum/log"
golog "github.com/ipfs/go-log/v2"
joonix "github.com/joonix/log"
"github.com/prysmaticlabs/prysm/beacon-chain/builder"
"github.com/prysmaticlabs/prysm/beacon-chain/node"
"github.com/prysmaticlabs/prysm/cmd"
blockchaincmd "github.com/prysmaticlabs/prysm/cmd/beacon-chain/blockchain"
@@ -72,6 +73,7 @@ var appFlags = []cli.Flag{
flags.TerminalTotalDifficultyOverride,
flags.TerminalBlockHashOverride,
flags.TerminalBlockHashActivationEpochOverride,
flags.MevRelayEndpoint,
cmd.EnableBackupWebhookFlag,
cmd.BackupWebhookOutputDir,
cmd.MinimalConfigFlag,
@@ -251,9 +253,14 @@ func startNode(ctx *cli.Context) error {
if err != nil {
return err
}
builderFlagOpts, err := builder.FlagOptions(ctx)
if err != nil {
return err
}
opts := []node.Option{
node.WithBlockchainFlagOptions(blockchainFlagOpts),
node.WithPowchainFlagOptions(powchainFlagOpts),
node.WithBuilderFlagOptions(builderFlagOpts),
}
optFuncs := []func(*cli.Context) (node.Option, error){

View File

@@ -126,6 +126,7 @@ var appHelpFlagGroups = []flagGroup{
flags.WeakSubjectivityCheckpoint,
flags.Eth1HeaderReqLimit,
flags.MinPeersPerSubnet,
flags.MevRelayEndpoint,
checkpoint.BlockPath,
checkpoint.StatePath,
checkpoint.RemoteURL,

View File

@@ -151,11 +151,10 @@ func (m *Miner) Start(ctx context.Context) error {
}
runCmd := exec.CommandContext(ctx, binaryPath, args...) // #nosec G204 -- Safe
file, err := helpers.DeleteAndCreateFile(e2e.TestParams.LogPath, "eth1_miner.log")
file, err := os.Create(path.Join(e2e.TestParams.LogPath, "eth1_miner.log"))
if err != nil {
return err
}
runCmd.Stdout = file
runCmd.Stderr = file
log.Infof("Starting eth1 miner with flags: %s", strings.Join(args[2:], " "))

View File

@@ -98,11 +98,10 @@ func (node *Node) Start(ctx context.Context) error {
args = append(args, []string{"--syncmode=full"}...)
}
runCmd := exec.CommandContext(ctx, binaryPath, args...) // #nosec G204 -- Safe
file, err := helpers.DeleteAndCreateFile(e2e.TestParams.LogPath, "eth1_"+strconv.Itoa(node.index)+".log")
file, err := os.Create(path.Join(e2e.TestParams.LogPath, "eth1_"+strconv.Itoa(node.index)+".log"))
if err != nil {
return err
}
runCmd.Stdout = file
runCmd.Stderr = file
log.Infof("Starting eth1 node %d with flags: %s", node.index, strings.Join(args[2:], " "))

View File

@@ -254,9 +254,12 @@ func (v *validator) waitOneThirdOrValidBlock(ctx context.Context, slot types.Slo
defer span.End()
// Don't need to wait if requested slot is the same as highest valid slot.
v.highestValidSlotLock.Lock()
if slot <= v.highestValidSlot {
v.highestValidSlotLock.Unlock()
return
}
v.highestValidSlotLock.Unlock()
delay := slots.DivideSlotBy(3 /* a third of the slot duration */)
startTime := slots.StartTime(v.genesisTime, slot)

View File

@@ -67,6 +67,7 @@ type validator struct {
domainDataLock sync.Mutex
attLogsLock sync.Mutex
aggregatedSlotCommitteeIDCacheLock sync.Mutex
highestValidSlotLock sync.Mutex
prevBalanceLock sync.RWMutex
slashableKeysLock sync.RWMutex
eipImportBlacklistedPublicKeys map[[fieldparams.BLSPubkeyLength]byte]bool
@@ -357,9 +358,11 @@ func (v *validator) ReceiveBlocks(ctx context.Context, connectionErrorChannel ch
log.Error("Received nil block")
continue
}
v.highestValidSlotLock.Lock()
if blk.Block().Slot() > v.highestValidSlot {
v.highestValidSlot = blk.Block().Slot()
}
v.highestValidSlotLock.Unlock()
v.blockFeed.Send(blk)
}
}