Compare commits

..

9 Commits

Author SHA1 Message Date
nisdas
38bc62b3ff save progress 2022-04-04 19:29:32 +08:00
nisdas
7f637d72b2 fix error log 2022-04-04 19:24:44 +08:00
nisdas
6ea8c3b2fc Merge remote-tracking branch 'origin/addLockAnalyzer' into addLockAnalyzer 2022-04-04 17:15:21 +08:00
nisdas
99f33630f6 fix failures 2022-04-04 17:15:10 +08:00
Nishant Das
fa2f17662a Merge branch 'develop' into addLockAnalyzer 2022-04-04 15:43:00 +08:00
nisdas
88b4b68204 progress 2022-04-04 15:42:35 +08:00
nisdas
624e124f38 fix locks 2022-04-04 08:46:19 +08:00
nisdas
e48ae09762 Merge branch 'develop' of https://github.com/prysmaticlabs/geth-sharding into addLockAnalyzer 2022-04-03 23:28:45 +08:00
nisdas
e4bdeed3a6 add lock analyzer 2022-03-25 10:36:19 +08:00
167 changed files with 2706 additions and 6923 deletions

View File

@@ -1,22 +0,0 @@
name: Horusec Security Scan
on:
schedule:
# Runs cron at 16.00 UTC on
- cron: '0 0 * * SUN'
jobs:
Horusec_Scan:
name: horusec-Scan
runs-on: ubuntu-latest
if: github.ref == 'refs/heads/develop'
steps:
- name: Check out code
uses: actions/checkout@v2
with: # Required when commit authors is enabled
fetch-depth: 0
- name: Running Security Scan
run: |
curl -fsSL https://raw.githubusercontent.com/ZupIT/horusec/main/deployments/scripts/install.sh | bash -s latest
horusec start -t="10000" -p="./" -e="true" -i="**/crypto/bls/herumi/**, **/**/*_test.go, **/third_party/afl/**, **/crypto/keystore/key.go"

View File

@@ -40,15 +40,15 @@ func (od *OriginData) CheckpointString() string {
// SaveBlock saves the downloaded block to a unique file in the given path.
// For readability and collision avoidance, the file name includes: type, config name, slot and root
func (od *OriginData) SaveBlock(dir string) (string, error) {
blockPath := path.Join(dir, fname("block", od.cf, od.b.Block().Slot(), od.wsd.BlockRoot))
return blockPath, file.WriteFile(blockPath, od.BlockBytes())
blockPath := path.Join(dir, fname("state", od.cf, od.st.Slot(), od.wsd.BlockRoot))
return blockPath, file.WriteFile(blockPath, od.sb)
}
// SaveState saves the downloaded state to a unique file in the given path.
// For readability and collision avoidance, the file name includes: type, config name, slot and root
func (od *OriginData) SaveState(dir string) (string, error) {
statePath := path.Join(dir, fname("state", od.cf, od.st.Slot(), od.wsd.StateRoot))
return statePath, file.WriteFile(statePath, od.StateBytes())
return statePath, file.WriteFile(statePath, od.sb)
}
// StateBytes returns the ssz-encoded bytes of the downloaded BeaconState value.

View File

@@ -48,7 +48,6 @@ go_library(
"//beacon-chain/core/transition:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/filters:go_default_library",
"//beacon-chain/db/kv:go_default_library",
"//beacon-chain/forkchoice:go_default_library",
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
"//beacon-chain/forkchoice/protoarray:go_default_library",

View File

@@ -331,7 +331,7 @@ func (s *Service) IsOptimistic(ctx context.Context) (bool, error) {
// IsOptimisticForRoot takes the root and slot as arguments instead of the current head
// and returns true if it is optimistic.
func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error) {
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(root)
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(ctx, root)
if err == nil {
return optimistic, nil
}
@@ -358,14 +358,10 @@ func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool,
return false, nil
}
// checkpoint root could be zeros before the first finalized epoch. Use genesis root if the case.
lastValidated, err := s.cfg.BeaconDB.StateSummary(ctx, s.ensureRootNotZeros(bytesutil.ToBytes32(validatedCheckpoint.Root)))
lastValidated, err := s.cfg.BeaconDB.StateSummary(ctx, bytesutil.ToBytes32(validatedCheckpoint.Root))
if err != nil {
return false, err
}
if lastValidated == nil {
return false, errInvalidNilSummary
}
if ss.Slot > lastValidated.Slot {
return true, nil

View File

@@ -9,7 +9,6 @@ import (
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
)
func TestHeadSlot_DataRace(t *testing.T) {
@@ -17,13 +16,10 @@ func TestHeadSlot_DataRace(t *testing.T) {
s := &Service{
cfg: &config{BeaconDB: beaconDB},
}
b, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
st, _ := util.DeterministicGenesisState(t, 1)
wait := make(chan struct{})
go func() {
defer close(wait)
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
}()
s.HeadSlot()
<-wait
@@ -35,16 +31,12 @@ func TestHeadRoot_DataRace(t *testing.T) {
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
head: &head{root: [32]byte{'A'}},
}
b, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
wait := make(chan struct{})
st, _ := util.DeterministicGenesisState(t, 1)
go func() {
defer close(wait)
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
}()
_, err = s.HeadRoot(context.Background())
_, err := s.HeadRoot(context.Background())
require.NoError(t, err)
<-wait
}
@@ -57,14 +49,10 @@ func TestHeadBlock_DataRace(t *testing.T) {
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
head: &head{block: wsb},
}
b, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
wait := make(chan struct{})
st, _ := util.DeterministicGenesisState(t, 1)
go func() {
defer close(wait)
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
}()
_, err = s.HeadBlock(context.Background())
require.NoError(t, err)
@@ -76,16 +64,12 @@ func TestHeadState_DataRace(t *testing.T) {
s := &Service{
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
}
b, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
wait := make(chan struct{})
st, _ := util.DeterministicGenesisState(t, 1)
go func() {
defer close(wait)
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
}()
_, err = s.HeadState(context.Background())
_, err := s.HeadState(context.Background())
require.NoError(t, err)
<-wait
}

View File

@@ -477,9 +477,6 @@ func TestService_IsOptimisticForRoot_DB_ProtoArray(t *testing.T) {
validatedCheckpoint := &ethpb.Checkpoint{Root: br[:]}
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
_, err = c.IsOptimisticForRoot(ctx, optimisticRoot)
require.ErrorContains(t, "nil summary returned from the DB", err)
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: optimisticRoot[:], Slot: 11}))
optimistic, err := c.IsOptimisticForRoot(ctx, optimisticRoot)
require.NoError(t, err)
@@ -496,17 +493,6 @@ func TestService_IsOptimisticForRoot_DB_ProtoArray(t *testing.T) {
validated, err := c.IsOptimisticForRoot(ctx, validatedRoot)
require.NoError(t, err)
require.Equal(t, false, validated)
// Before the first finalized epoch, finalized root could be zeros.
validatedCheckpoint = &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, br))
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: params.BeaconConfig().ZeroHash[:], Slot: 10}))
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: optimisticRoot[:], Slot: 11}))
optimistic, err = c.IsOptimisticForRoot(ctx, optimisticRoot)
require.NoError(t, err)
require.Equal(t, true, optimistic)
}
func TestService_IsOptimisticForRoot_DB_DoublyLinkedTree(t *testing.T) {
@@ -542,9 +528,6 @@ func TestService_IsOptimisticForRoot_DB_DoublyLinkedTree(t *testing.T) {
validatedCheckpoint := &ethpb.Checkpoint{Root: br[:]}
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
_, err = c.IsOptimisticForRoot(ctx, optimisticRoot)
require.ErrorContains(t, "nil summary returned from the DB", err)
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: optimisticRoot[:], Slot: 11}))
optimistic, err := c.IsOptimisticForRoot(ctx, optimisticRoot)
require.NoError(t, err)
@@ -560,17 +543,6 @@ func TestService_IsOptimisticForRoot_DB_DoublyLinkedTree(t *testing.T) {
validated, err := c.IsOptimisticForRoot(ctx, validatedRoot)
require.NoError(t, err)
require.Equal(t, false, validated)
// Before the first finalized epoch, finalized root could be zeros.
validatedCheckpoint = &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, br))
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: params.BeaconConfig().ZeroHash[:], Slot: 10}))
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: optimisticRoot[:], Slot: 11}))
optimistic, err = c.IsOptimisticForRoot(ctx, optimisticRoot)
require.NoError(t, err)
require.Equal(t, true, optimistic)
}
func TestService_IsOptimisticForRoot_DB_non_canonical(t *testing.T) {

View File

@@ -40,15 +40,7 @@ func (s *Service) UpdateAndSaveHeadWithBalances(ctx context.Context) error {
if err != nil {
return errors.Wrap(err, "could not update head")
}
headBlock, err := s.cfg.BeaconDB.Block(ctx, headRoot)
if err != nil {
return err
}
headState, err := s.cfg.StateGen.StateByRoot(ctx, headRoot)
if err != nil {
return errors.Wrap(err, "could not retrieve head state in DB")
}
return s.saveHead(ctx, headRoot, headBlock, headState)
return s.saveHead(ctx, headRoot)
}
// This defines the current chain service's view of head.
@@ -105,7 +97,7 @@ func (s *Service) updateHead(ctx context.Context, balances []uint64) ([32]byte,
// This saves head info to the local service cache, it also saves the
// new head root to the DB.
func (s *Service) saveHead(ctx context.Context, headRoot [32]byte, headBlock block.SignedBeaconBlock, headState state.BeaconState) error {
func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
ctx, span := trace.StartSpan(ctx, "blockChain.saveHead")
defer span.End()
@@ -117,12 +109,6 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte, headBlock blo
if headRoot == bytesutil.ToBytes32(r) {
return nil
}
if err := helpers.BeaconBlockIsNil(headBlock); err != nil {
return err
}
if headState == nil || headState.IsNil() {
return errors.New("cannot save nil head state")
}
// If the head state is not available, just return nil.
// There's nothing to cache
@@ -130,13 +116,31 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte, headBlock blo
return nil
}
// Get the new head block from DB.
newHeadBlock, err := s.cfg.BeaconDB.Block(ctx, headRoot)
if err != nil {
return err
}
if err := helpers.BeaconBlockIsNil(newHeadBlock); err != nil {
return err
}
// Get the new head state from cached state or DB.
newHeadState, err := s.cfg.StateGen.StateByRoot(ctx, headRoot)
if err != nil {
return errors.Wrap(err, "could not retrieve head state in DB")
}
if newHeadState == nil || newHeadState.IsNil() {
return errors.New("cannot save nil head state")
}
// A chain re-org occurred, so we fire an event notifying the rest of the services.
headSlot := s.HeadSlot()
newHeadSlot := headBlock.Block().Slot()
newHeadSlot := newHeadBlock.Block().Slot()
oldHeadRoot := s.headRoot()
oldStateRoot := s.headBlock().Block().StateRoot()
newStateRoot := headBlock.Block().StateRoot()
if bytesutil.ToBytes32(headBlock.Block().ParentRoot()) != bytesutil.ToBytes32(r) {
newStateRoot := newHeadBlock.Block().StateRoot()
if bytesutil.ToBytes32(newHeadBlock.Block().ParentRoot()) != bytesutil.ToBytes32(r) {
log.WithFields(logrus.Fields{
"newSlot": fmt.Sprintf("%d", newHeadSlot),
"oldSlot": fmt.Sprintf("%d", headSlot),
@@ -168,7 +172,7 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte, headBlock blo
}
// Cache the new head info.
s.setHead(headRoot, headBlock, headState)
s.setHead(headRoot, newHeadBlock, newHeadState)
// Save the new head root to DB.
if err := s.cfg.BeaconDB.SaveHeadBlockRoot(ctx, headRoot); err != nil {
@@ -178,7 +182,7 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte, headBlock blo
// Forward an event capturing a new chain head over a common event feed
// done in a goroutine to avoid blocking the critical runtime main routine.
go func() {
if err := s.notifyNewHeadEvent(ctx, newHeadSlot, headState, newStateRoot, headRoot[:]); err != nil {
if err := s.notifyNewHeadEvent(ctx, newHeadSlot, newHeadState, newStateRoot, headRoot[:]); err != nil {
log.WithError(err).Error("Could not notify event feed of new chain head")
}
}()

View File

@@ -29,10 +29,8 @@ func TestSaveHead_Same(t *testing.T) {
r := [32]byte{'A'}
service.head = &head{slot: 0, root: r}
b, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
st, _ := util.DeterministicGenesisState(t, 1)
require.NoError(t, service.saveHead(context.Background(), r, b, st))
require.NoError(t, service.saveHead(context.Background(), r))
assert.Equal(t, types.Slot(0), service.headSlot(), "Head did not stay the same")
assert.Equal(t, r, service.headRoot(), "Head did not stay the same")
}
@@ -70,7 +68,7 @@ func TestSaveHead_Different(t *testing.T) {
require.NoError(t, headState.SetSlot(1))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Slot: 1, Root: newRoot[:]}))
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), headState, newRoot))
require.NoError(t, service.saveHead(context.Background(), newRoot, wsb, headState))
require.NoError(t, service.saveHead(context.Background(), newRoot))
assert.Equal(t, types.Slot(1), service.HeadSlot(), "Head did not change")
@@ -116,7 +114,7 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
require.NoError(t, headState.SetSlot(1))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Slot: 1, Root: newRoot[:]}))
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), headState, newRoot))
require.NoError(t, service.saveHead(context.Background(), newRoot, wsb, headState))
require.NoError(t, service.saveHead(context.Background(), newRoot))
assert.Equal(t, types.Slot(1), service.HeadSlot(), "Head did not change")
@@ -160,8 +158,7 @@ func TestUpdateHead_MissingJustifiedRoot(t *testing.T) {
service.store.SetBestJustifiedCheckpt(&ethpb.Checkpoint{})
headRoot, err := service.updateHead(context.Background(), []uint64{})
require.NoError(t, err)
st, _ := util.DeterministicGenesisState(t, 1)
require.NoError(t, service.saveHead(context.Background(), headRoot, wsb, st))
require.NoError(t, service.saveHead(context.Background(), headRoot))
}
func Test_notifyNewHeadEvent(t *testing.T) {

View File

@@ -5,21 +5,14 @@ import (
"fmt"
"github.com/pkg/errors"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/beacon-chain/db/kv"
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/prysmaticlabs/prysm/time/slots"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
@@ -27,7 +20,7 @@ import (
// notifyForkchoiceUpdate signals execution engine the fork choice updates. Execution engine should:
// 1. Re-organizes the execution payload chain and corresponding state to make head_block_hash the head.
// 2. Applies finality to the execution state: it irreversibly persists the chain of all execution payloads and corresponding state, up to and including finalized_block_hash.
func (s *Service) notifyForkchoiceUpdate(ctx context.Context, headState state.BeaconState, headBlk block.BeaconBlock, headRoot [32]byte, finalizedRoot [32]byte) (*enginev1.PayloadIDBytes, error) {
func (s *Service) notifyForkchoiceUpdate(ctx context.Context, headBlk block.BeaconBlock, headRoot [32]byte, finalizedRoot [32]byte) (*enginev1.PayloadIDBytes, error) {
ctx, span := trace.StartSpan(ctx, "blockChain.notifyForkchoiceUpdate")
defer span.End()
@@ -46,31 +39,36 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, headState state.Be
if err != nil {
return nil, errors.Wrap(err, "could not get execution payload")
}
finalizedHash, err := s.getFinalizedPayloadHash(ctx, finalizedRoot)
finalizedBlock, err := s.cfg.BeaconDB.Block(ctx, s.ensureRootNotZeros(finalizedRoot))
if err != nil {
return nil, errors.Wrap(err, "could not get finalized payload hash")
return nil, errors.Wrap(err, "could not get finalized block")
}
var finalizedHash []byte
if blocks.IsPreBellatrixVersion(finalizedBlock.Block().Version()) {
finalizedHash = params.BeaconConfig().ZeroHash[:]
} else {
payload, err := finalizedBlock.Block().Body().ExecutionPayload()
if err != nil {
return nil, errors.Wrap(err, "could not get finalized block execution payload")
}
finalizedHash = payload.BlockHash
}
fcs := &enginev1.ForkchoiceState{
HeadBlockHash: headPayload.BlockHash,
SafeBlockHash: headPayload.BlockHash,
FinalizedBlockHash: finalizedHash[:],
FinalizedBlockHash: finalizedHash,
}
nextSlot := s.CurrentSlot() + 1 // Cache payload ID for next slot proposer.
hasAttr, attr, proposerId, err := s.getPayloadAttribute(ctx, headState, nextSlot)
if err != nil {
return nil, errors.Wrap(err, "could not get payload attribute")
}
payloadID, _, err := s.cfg.ExecutionEngineCaller.ForkchoiceUpdated(ctx, fcs, attr)
// payload attribute is only required when requesting payload, here we are just updating fork choice, so it is nil.
payloadID, _, err := s.cfg.ExecutionEngineCaller.ForkchoiceUpdated(ctx, fcs, nil /*payload attribute*/)
if err != nil {
switch err {
case powchain.ErrAcceptedSyncingPayloadStatus:
log.WithFields(logrus.Fields{
"headSlot": headBlk.Slot(),
"headPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(headPayload.BlockHash)),
"finalizedPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(finalizedHash[:])),
"headSlot": headBlk.Slot(),
"headHash": fmt.Sprintf("%#x", bytesutil.Trunc(headPayload.BlockHash)),
"finalizedHash": fmt.Sprintf("%#x", bytesutil.Trunc(finalizedHash)),
}).Info("Called fork choice updated with optimistic block")
return payloadID, nil
default:
@@ -80,63 +78,9 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, headState state.Be
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, headRoot); err != nil {
return nil, errors.Wrap(err, "could not set block to valid")
}
if hasAttr { // If the forkchoice update call has an attribute, update the proposer payload ID cache.
var pId [8]byte
copy(pId[:], payloadID[:])
s.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(nextSlot, proposerId, pId)
}
return payloadID, nil
}
// getFinalizedPayloadHash returns the finalized payload hash for the given finalized block root.
// It checks the following in order:
// 1. The finalized block exists in db
// 2. The finalized block exists in initial sync block cache
// 3. The finalized block is the weak subjectivity block and exists in db
// Error is returned if the finalized block is not found from above.
func (s *Service) getFinalizedPayloadHash(ctx context.Context, finalizedRoot [32]byte) ([32]byte, error) {
b, err := s.cfg.BeaconDB.Block(ctx, s.ensureRootNotZeros(finalizedRoot))
if err != nil {
return [32]byte{}, errors.Wrap(err, "could not get finalized block")
}
if b != nil {
return getPayloadHash(b.Block())
}
b = s.getInitSyncBlock(finalizedRoot)
if b != nil {
return getPayloadHash(b.Block())
}
r, err := s.cfg.BeaconDB.OriginCheckpointBlockRoot(ctx)
if err != nil {
return [32]byte{}, errors.Wrap(err, "could not get finalized block")
}
b, err = s.cfg.BeaconDB.Block(ctx, r)
if err != nil {
return [32]byte{}, errors.Wrap(err, "could not get finalized block")
}
if b != nil {
return getPayloadHash(b.Block())
}
return [32]byte{}, errors.Errorf("finalized block with root %#x does not exist in the db or our cache", s.ensureRootNotZeros(finalizedRoot))
}
// getPayloadHash returns the payload hash for the input given block.
// zeros are returned if the block is older than bellatrix.
func getPayloadHash(b block.BeaconBlock) ([32]byte, error) {
if blocks.IsPreBellatrixVersion(b.Version()) {
return params.BeaconConfig().ZeroHash, nil
}
payload, err := b.Body().ExecutionPayload()
if err != nil {
return [32]byte{}, errors.Wrap(err, "could not get finalized block execution payload")
}
return bytesutil.ToBytes32(payload.BlockHash), nil
}
// notifyForkchoiceUpdate signals execution engine on a new payload.
// It returns true if the EL has returned VALID for the block
func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion, postStateVersion int,
@@ -164,7 +108,7 @@ func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion, postSta
if err != nil {
return false, errors.Wrap(err, "could not get execution payload")
}
lastValidHash, err := s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload)
_, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload)
if err != nil {
switch err {
case powchain.ErrAcceptedSyncingPayloadStatus:
@@ -173,19 +117,6 @@ func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion, postSta
"blockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash)),
}).Info("Called new payload with optimistic block")
return false, nil
case powchain.ErrInvalidPayloadStatus:
root, err := blk.Block().HashTreeRoot()
if err != nil {
return false, err
}
invalidRoots, err := s.ForkChoicer().SetOptimisticToInvalid(ctx, root, bytesutil.ToBytes32(lastValidHash))
if err != nil {
return false, err
}
if err := s.removeInvalidBlockAndState(ctx, invalidRoots); err != nil {
return false, err
}
return false, errors.New("could not validate an INVALID payload from execution engine")
default:
return false, errors.Wrap(err, "could not validate execution payload from execution engine")
}
@@ -237,69 +168,3 @@ func (s *Service) optimisticCandidateBlock(ctx context.Context, blk block.Beacon
}
return parentIsExecutionBlock, nil
}
// getPayloadAttributes returns the payload attributes for the given state and slot.
// The attribute is required to initiate a payload build process in the context of an `engine_forkchoiceUpdated` call.
func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState, slot types.Slot) (bool, *enginev1.PayloadAttributes, types.ValidatorIndex, error) {
proposerID, _, ok := s.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(slot)
if !ok { // There's no need to build attribute if there is no proposer for slot.
return false, nil, 0, nil
}
// Get previous randao.
st = st.Copy()
st, err := transition.ProcessSlotsIfPossible(ctx, st, slot)
if err != nil {
return false, nil, 0, err
}
prevRando, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
if err != nil {
return false, nil, 0, nil
}
// Get fee recipient.
feeRecipient := params.BeaconConfig().DefaultFeeRecipient
recipient, err := s.cfg.BeaconDB.FeeRecipientByValidatorID(ctx, proposerID)
switch {
case errors.Is(err, kv.ErrNotFoundFeeRecipient):
if feeRecipient.String() == fieldparams.EthBurnAddressHex {
logrus.WithFields(logrus.Fields{
"validatorIndex": proposerID,
"burnAddress": fieldparams.EthBurnAddressHex,
}).Error("Fee recipient not set. Using burn address")
}
case err != nil:
return false, nil, 0, errors.Wrap(err, "could not get fee recipient in db")
default:
feeRecipient = recipient
}
// Get timestamp.
t, err := slots.ToTime(uint64(s.genesisTime.Unix()), slot)
if err != nil {
return false, nil, 0, err
}
attr := &enginev1.PayloadAttributes{
Timestamp: uint64(t.Unix()),
PrevRandao: prevRando,
SuggestedFeeRecipient: feeRecipient.Bytes(),
}
return true, attr, proposerID, nil
}
// removeInvalidBlockAndState removes the invalid block and its corresponding state from the cache and DB.
func (s *Service) removeInvalidBlockAndState(ctx context.Context, blkRoots [][32]byte) error {
for _, root := range blkRoots {
if err := s.cfg.StateGen.DeleteStateFromCaches(ctx, root); err != nil {
return err
}
// Delete block also deletes the state as well.
if err := s.cfg.BeaconDB.DeleteBlock(ctx, root); err != nil {
// TODO(10487): If a caller requests to delete a root that's justified and finalized. We should gracefully shutdown.
// This is an irreparable condition, it would me a justified or finalized block has become invalid.
return err
}
}
return nil
}

View File

@@ -5,9 +5,6 @@ import (
"testing"
"time"
"github.com/ethereum/go-ethereum/common"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
@@ -27,7 +24,6 @@ import (
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
"github.com/prysmaticlabs/prysm/time/slots"
logTest "github.com/sirupsen/logrus/hooks/test"
)
func Test_NotifyForkchoiceUpdate(t *testing.T) {
@@ -48,13 +44,8 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
}
service, err := NewService(ctx, opts...)
st, _ := util.DeterministicGenesisState(t, 1)
service.head = &head{
state: st,
}
require.NoError(t, err)
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
@@ -166,8 +157,7 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
service.cfg.ExecutionEngineCaller = &mockPOW.EngineClient{ErrForkchoiceUpdated: tt.newForkchoiceErr}
st, _ := util.DeterministicGenesisState(t, 1)
_, err := service.notifyForkchoiceUpdate(ctx, st, tt.blk, service.headRoot(), tt.finalizedRoot)
_, err := service.notifyForkchoiceUpdate(ctx, tt.blk, service.headRoot(), tt.finalizedRoot)
if tt.errString != "" {
require.ErrorContains(t, tt.errString, err)
} else {
@@ -193,6 +183,13 @@ func Test_NotifyNewPayload(t *testing.T) {
phase0State, _ := util.DeterministicGenesisState(t, 1)
altairState, _ := util.DeterministicGenesisStateAltair(t, 1)
bellatrixState, _ := util.DeterministicGenesisStateBellatrix(t, 2)
blk := &ethpb.SignedBeaconBlockBellatrix{
Block: &ethpb.BeaconBlockBellatrix{
Body: &ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: &v1.ExecutionPayload{},
},
},
}
a := &ethpb.SignedBeaconBlockAltair{
Block: &ethpb.BeaconBlockAltair{
Body: &ethpb.BeaconBlockBodyAltair{},
@@ -200,32 +197,10 @@ func Test_NotifyNewPayload(t *testing.T) {
}
altairBlk, err := wrapper.WrappedSignedBeaconBlock(a)
require.NoError(t, err)
blk := &ethpb.SignedBeaconBlockBellatrix{
Block: &ethpb.BeaconBlockBellatrix{
Slot: 1,
Body: &ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: &v1.ExecutionPayload{
BlockNumber: 1,
ParentHash: make([]byte, fieldparams.RootLength),
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
StateRoot: make([]byte, fieldparams.RootLength),
ReceiptsRoot: make([]byte, fieldparams.RootLength),
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
PrevRandao: make([]byte, fieldparams.RootLength),
BaseFeePerGas: make([]byte, fieldparams.RootLength),
BlockHash: make([]byte, fieldparams.RootLength),
},
},
},
}
bellatrixBlk, err := wrapper.WrappedSignedBeaconBlock(util.HydrateSignedBeaconBlockBellatrix(blk))
bellatrixBlk, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
r, err := bellatrixBlk.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 1, r, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
tests := []struct {
name string
@@ -269,7 +244,7 @@ func Test_NotifyNewPayload(t *testing.T) {
preState: bellatrixState,
blk: bellatrixBlk,
newPayloadErr: powchain.ErrInvalidPayloadStatus,
errString: "could not validate an INVALID payload from execution engine",
errString: "could not validate execution payload from execution engine: payload status is INVALID",
isValidPayload: false,
},
{
@@ -598,47 +573,6 @@ func Test_IsOptimisticShallowExecutionParent(t *testing.T) {
require.Equal(t, true, candidate)
}
func Test_GetPayloadAttribute(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
}
// Cache miss
service, err := NewService(ctx, opts...)
require.NoError(t, err)
hasPayload, _, vId, err := service.getPayloadAttribute(ctx, nil, 0)
require.NoError(t, err)
require.Equal(t, false, hasPayload)
require.Equal(t, types.ValidatorIndex(0), vId)
// Cache hit, advance state, no fee recipient
suggestedVid := types.ValidatorIndex(1)
slot := types.Slot(1)
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(slot, suggestedVid, [8]byte{})
st, _ := util.DeterministicGenesisState(t, 1)
hook := logTest.NewGlobal()
hasPayload, attr, vId, err := service.getPayloadAttribute(ctx, st, slot)
require.NoError(t, err)
require.Equal(t, true, hasPayload)
require.Equal(t, suggestedVid, vId)
require.Equal(t, fieldparams.EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient).String())
require.LogsContain(t, hook, "Fee recipient not set. Using burn address")
// Cache hit, advance state, has fee recipient
suggestedAddr := common.HexToAddress("123")
require.NoError(t, service.cfg.BeaconDB.SaveFeeRecipientsByValidatorIDs(ctx, []types.ValidatorIndex{suggestedVid}, []common.Address{suggestedAddr}))
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(slot, suggestedVid, [8]byte{})
hasPayload, attr, vId, err = service.getPayloadAttribute(ctx, st, slot)
require.NoError(t, err)
require.Equal(t, true, hasPayload)
require.Equal(t, suggestedVid, vId)
require.Equal(t, suggestedAddr, common.BytesToAddress(attr.SuggestedFeeRecipient))
}
func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
params.SetupTestConfigCleanup(t)
params.OverrideBeaconConfig(params.MainnetConfig())
@@ -654,6 +588,7 @@ func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
genesisStateRoot := [32]byte{}
genesisBlk := blocks.NewGenesisBlock(genesisStateRoot[:])
wr, err := wrapper.WrappedSignedBeaconBlock(genesisBlk)
@@ -736,130 +671,3 @@ func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
require.DeepEqual(t, validCheckpoint.Root, cp.Root)
require.Equal(t, validCheckpoint.Epoch, cp.Epoch)
}
func TestService_removeInvalidBlockAndState(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(protoarray.New(0, 0, [32]byte{})),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
// Deleting unknown block should not error.
require.NoError(t, service.removeInvalidBlockAndState(ctx, [][32]byte{{'a'}, {'b'}, {'c'}}))
// Happy case
b1 := util.NewBeaconBlock()
b1.Block.Slot = 1
blk1, err := wrapper.WrappedSignedBeaconBlock(b1)
require.NoError(t, err)
r1, err := blk1.Block().HashTreeRoot()
require.NoError(t, err)
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, blk1))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{
Slot: 1,
Root: r1[:],
}))
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, r1))
b2 := util.NewBeaconBlock()
b2.Block.Slot = 2
blk2, err := wrapper.WrappedSignedBeaconBlock(b2)
require.NoError(t, err)
r2, err := blk2.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, blk2))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{
Slot: 2,
Root: r2[:],
}))
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, r2))
require.NoError(t, service.removeInvalidBlockAndState(ctx, [][32]byte{r1, r2}))
require.Equal(t, false, service.hasBlock(ctx, r1))
require.Equal(t, false, service.hasBlock(ctx, r2))
require.Equal(t, false, service.cfg.BeaconDB.HasStateSummary(ctx, r1))
require.Equal(t, false, service.cfg.BeaconDB.HasStateSummary(ctx, r2))
has, err := service.cfg.StateGen.HasState(ctx, r1)
require.NoError(t, err)
require.Equal(t, false, has)
has, err = service.cfg.StateGen.HasState(ctx, r2)
require.NoError(t, err)
require.Equal(t, false, has)
}
func TestService_getFinalizedPayloadHash(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
// Use the block in DB
b := util.NewBeaconBlockBellatrix()
b.Block.Body.ExecutionPayload.BlockHash = bytesutil.PadTo([]byte("hi"), 32)
blk, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, blk))
h, err := service.getFinalizedPayloadHash(ctx, r)
require.NoError(t, err)
require.Equal(t, bytesutil.ToBytes32(b.Block.Body.ExecutionPayload.BlockHash), h)
// Use the block in init sync cache
b = util.NewBeaconBlockBellatrix()
b.Block.Body.ExecutionPayload.BlockHash = bytesutil.PadTo([]byte("hello"), 32)
blk, err = wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
r, err = b.Block.HashTreeRoot()
require.NoError(t, err)
service.initSyncBlocks[r] = blk
h, err = service.getFinalizedPayloadHash(ctx, r)
require.NoError(t, err)
require.Equal(t, bytesutil.ToBytes32(b.Block.Body.ExecutionPayload.BlockHash), h)
// Use the weak subjectivity sync block
b = util.NewBeaconBlockBellatrix()
b.Block.Body.ExecutionPayload.BlockHash = bytesutil.PadTo([]byte("howdy"), 32)
blk, err = wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
r, err = b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, blk))
require.NoError(t, service.cfg.BeaconDB.SaveOriginCheckpointBlockRoot(ctx, r))
h, err = service.getFinalizedPayloadHash(ctx, r)
require.NoError(t, err)
require.Equal(t, bytesutil.ToBytes32(b.Block.Body.ExecutionPayload.BlockHash), h)
// None of the above should error
require.NoError(t, service.cfg.BeaconDB.SaveOriginCheckpointBlockRoot(ctx, [32]byte{'a'}))
_, err = service.getFinalizedPayloadHash(ctx, [32]byte{'a'})
require.ErrorContains(t, "does not exist in the db or our cache", err)
}
func TestService_getPayloadHash(t *testing.T) {
// Pre-bellatrix
blk, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
h, err := getPayloadHash(blk.Block())
require.NoError(t, err)
require.Equal(t, [32]byte{}, h)
// Post bellatrix
b := util.NewBeaconBlockBellatrix()
b.Block.Body.ExecutionPayload.BlockHash = bytesutil.PadTo([]byte("hi"), 32)
blk, err = wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
h, err = getPayloadHash(blk.Block())
require.NoError(t, err)
require.Equal(t, bytesutil.ToBytes32(bytesutil.PadTo([]byte("hi"), 32)), h)
}

View File

@@ -2,7 +2,6 @@ package blockchain
import (
"github.com/prysmaticlabs/prysm/async/event"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/beacon-chain/db"
@@ -67,14 +66,6 @@ func WithDepositCache(c *depositcache.DepositCache) Option {
}
}
// WithProposerIdsCache for proposer id cache.
func WithProposerIdsCache(c *cache.ProposerPayloadIDsCache) Option {
return func(s *Service) error {
s.cfg.ProposerSlotIndexCache = c
return nil
}
}
// WithAttestationPool for attestation lifecycle after chain inclusion.
func WithAttestationPool(p attestations.Pool) Option {
return func(s *Service) error {

View File

@@ -208,18 +208,10 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
if err != nil {
log.WithError(err).Warn("Could not update head")
}
headBlock, err := s.cfg.BeaconDB.Block(ctx, headRoot)
if err != nil {
if _, err := s.notifyForkchoiceUpdate(ctx, s.headBlock().Block(), s.headRoot(), bytesutil.ToBytes32(finalized.Root)); err != nil {
return err
}
headState, err := s.cfg.StateGen.StateByRoot(ctx, headRoot)
if err != nil {
return err
}
if _, err := s.notifyForkchoiceUpdate(ctx, headState, headBlock.Block(), headRoot, bytesutil.ToBytes32(finalized.Root)); err != nil {
return err
}
if err := s.saveHead(ctx, headRoot, headBlock, headState); err != nil {
if err := s.saveHead(ctx, headRoot); err != nil {
return errors.Wrap(err, "could not save head")
}
@@ -266,7 +258,7 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
if err := s.cfg.ForkChoiceStore.Prune(ctx, fRoot); err != nil {
return errors.Wrap(err, "could not prune proto array fork choice nodes")
}
isOptimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(fRoot)
isOptimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(ctx, fRoot)
if err != nil {
return errors.Wrap(err, "could not check if node is optimistically synced")
}
@@ -432,7 +424,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []block.SignedBeaconBlo
}
}
if _, err := s.notifyForkchoiceUpdate(ctx, preState, b.Block(), blockRoots[i], bytesutil.ToBytes32(fCheckpoints[i].Root)); err != nil {
if _, err := s.notifyForkchoiceUpdate(ctx, b.Block(), blockRoots[i], bytesutil.ToBytes32(fCheckpoints[i].Root)); err != nil {
return nil, nil, err
}
}
@@ -506,7 +498,6 @@ func (s *Service) handleEpochBoundary(ctx context.Context, postState state.Beaco
if postState.Slot()+1 == s.nextEpochBoundarySlot {
// Update caches for the next epoch at epoch boundary slot - 1.
log.Infof("UpdateCommitteeCache from handleEpochBoundary (postState.Slot()+1 == s.nextEpochBoundarySlot), slot=%d, epoch=%d", postState.Slot(), coreTime.CurrentEpoch(postState))
if err := helpers.UpdateCommitteeCache(postState, coreTime.NextEpoch(postState)); err != nil {
return err
}
@@ -515,7 +506,6 @@ func (s *Service) handleEpochBoundary(ctx context.Context, postState state.Beaco
if err != nil {
return err
}
log.Info("UpdateProposerIndicesInCache from handleEpochBoundary (postState.Slot()+1 == s.nextEpochBoundarySlot)")
if err := helpers.UpdateProposerIndicesInCache(ctx, copied); err != nil {
return err
}
@@ -531,12 +521,9 @@ func (s *Service) handleEpochBoundary(ctx context.Context, postState state.Beaco
// Update caches at epoch boundary slot.
// The following updates have short cut to return nil cheaply if fulfilled during boundary slot - 1.
log.Info("UpdateCommitteeCache from handleEpochBoundary (postState.Slot() >= s.nextEpochBoundarySlot)")
if err := helpers.UpdateCommitteeCache(postState, coreTime.CurrentEpoch(postState)); err != nil {
return err
}
log.Info("UpdateProposerIndicesInCache from handleEpochBoundary (postState.Slot() >= s.nextEpochBoundarySlot)")
if err := helpers.UpdateProposerIndicesInCache(ctx, postState); err != nil {
return err
}

View File

@@ -247,7 +247,7 @@ func (s *Service) updateFinalized(ctx context.Context, cp *ethpb.Checkpoint) err
}
fRoot := bytesutil.ToBytes32(cp.Root)
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(fRoot)
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(ctx, fRoot)
if err != nil {
return err
}

View File

@@ -181,26 +181,15 @@ func (s *Service) notifyEngineIfChangedHead(ctx context.Context, newHeadRoot [32
log.WithError(errNilFinalizedInStore).Error("could not get finalized checkpoint")
return
}
newHeadBlock, err := s.cfg.BeaconDB.Block(ctx, newHeadRoot)
if err != nil {
log.WithError(err).Error("Could not get block from db")
return
}
headState, err := s.cfg.StateGen.StateByRoot(ctx, newHeadRoot)
if err != nil {
log.WithError(err).Error("Could not get state from db")
return
}
_, err = s.notifyForkchoiceUpdate(s.ctx,
headState,
newHeadBlock.Block(),
newHeadRoot,
_, err := s.notifyForkchoiceUpdate(s.ctx,
s.headBlock().Block(),
s.headRoot(),
bytesutil.ToBytes32(finalized.Root),
)
if err != nil {
log.WithError(err).Error("could not notify forkchoice update")
}
if err := s.saveHead(ctx, newHeadRoot, newHeadBlock, headState); err != nil {
if err := s.saveHead(ctx, newHeadRoot); err != nil {
log.WithError(err).Error("could not save head")
}
}

View File

@@ -6,7 +6,6 @@ import (
"time"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
@@ -134,7 +133,7 @@ func TestNotifyEngineIfChangedHead(t *testing.T) {
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ProposerSlotIndexCache = cache.NewProposerPayloadIDsCache()
service.notifyEngineIfChangedHead(ctx, service.headRoot())
hookErr := "could not notify forkchoice update"
finalizedErr := "could not get finalized checkpoint"
@@ -144,33 +143,21 @@ func TestNotifyEngineIfChangedHead(t *testing.T) {
require.LogsContain(t, hook, finalizedErr)
hook.Reset()
service.head = &head{
root: [32]byte{'a'},
block: nil, /* should not panic if notify head uses correct head */
}
b := util.NewBeaconBlock()
b.Block.Slot = 2
b.Block.Slot = 1
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
r1, err := b.Block.HashTreeRoot()
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
finalized := &ethpb.Checkpoint{Root: r1[:], Epoch: 0}
st, _ := util.DeterministicGenesisState(t, 1)
finalized := &ethpb.Checkpoint{Root: r[:], Epoch: 0}
service.head = &head{
slot: 1,
root: r1,
root: r,
block: wsb,
state: st,
}
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(2, 1, [8]byte{1})
service.store.SetFinalizedCheckpt(finalized)
service.notifyEngineIfChangedHead(ctx, r1)
service.notifyEngineIfChangedHead(ctx, [32]byte{'b'})
require.LogsDoNotContain(t, hook, finalizedErr)
require.LogsDoNotContain(t, hook, hookErr)
vId, payloadID, has := service.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(2)
require.Equal(t, true, has)
require.Equal(t, types.ValidatorIndex(1), vId)
require.Equal(t, [8]byte{1}, payloadID)
}

View File

@@ -74,7 +74,6 @@ type config struct {
ChainStartFetcher powchain.ChainStartFetcher
BeaconDB db.HeadAccessDatabase
DepositCache *depositcache.DepositCache
ProposerSlotIndexCache *cache.ProposerPayloadIDsCache
AttPool attestations.Pool
ExitPool voluntaryexits.PoolManager
SlashingPool slashings.PoolManager
@@ -440,11 +439,9 @@ func (s *Service) initializeBeaconChain(
s.cfg.ChainStartFetcher.ClearPreGenesisData()
// Update committee shuffled indices for genesis epoch.
log.Infof("UpdateCommitteeCache from initializeBeaconChain, slot=%d", genesisState.Slot())
if err := helpers.UpdateCommitteeCache(genesisState, 0 /* genesis epoch */); err != nil {
return nil, err
}
log.Info("UpdateProposerIndicesInCache from initializeBeaconChain")
if err := helpers.UpdateProposerIndicesInCache(ctx, genesisState); err != nil {
return nil, err
}

View File

@@ -6,9 +6,7 @@ import (
"testing"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
"github.com/sirupsen/logrus"
)
@@ -22,11 +20,8 @@ func TestChainService_SaveHead_DataRace(t *testing.T) {
s := &Service{
cfg: &config{BeaconDB: beaconDB},
}
b, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
st, _ := util.DeterministicGenesisState(t, 1)
require.NoError(t, err)
go func() {
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
}()
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
}

View File

@@ -13,7 +13,6 @@ go_library(
"common.go",
"doc.go",
"error.go",
"payload_id.go",
"proposer_indices.go",
"proposer_indices_disabled.go", # keep
"proposer_indices_type.go",
@@ -27,7 +26,6 @@ go_library(
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/cache",
visibility = [
"//beacon-chain:__subpackages__",
"//testing/spectest:__subpackages__",
"//tools:__subpackages__",
],
deps = [
@@ -63,7 +61,6 @@ go_test(
"checkpoint_state_test.go",
"committee_fuzz_test.go",
"committee_test.go",
"payload_id_test.go",
"proposer_indices_test.go",
"skip_slot_cache_test.go",
"subnet_ids_test.go",

View File

@@ -1,73 +0,0 @@
package cache
import (
"sync"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
)
const vIdLength = 8
const pIdLength = 8
const vpIdsLength = vIdLength + pIdLength
// ProposerPayloadIDsCache is a cache of proposer payload IDs.
// The key is the slot. The value is the concatenation of the proposer and payload IDs. 8 bytes each.
type ProposerPayloadIDsCache struct {
slotToProposerAndPayloadIDs map[types.Slot][vpIdsLength]byte
sync.RWMutex
}
// NewProposerPayloadIDsCache creates a new proposer payload IDs cache.
func NewProposerPayloadIDsCache() *ProposerPayloadIDsCache {
return &ProposerPayloadIDsCache{
slotToProposerAndPayloadIDs: make(map[types.Slot][vpIdsLength]byte),
}
}
// GetProposerPayloadIDs returns the proposer and payload IDs for the given slot.
func (f *ProposerPayloadIDsCache) GetProposerPayloadIDs(slot types.Slot) (types.ValidatorIndex, [8]byte, bool) {
f.RLock()
defer f.RUnlock()
ids, ok := f.slotToProposerAndPayloadIDs[slot]
if !ok {
return 0, [8]byte{}, false
}
vId := ids[:vIdLength]
b := ids[vIdLength:]
var pId [pIdLength]byte
copy(pId[:], b)
return types.ValidatorIndex(bytesutil.BytesToUint64BigEndian(vId)), pId, true
}
// SetProposerAndPayloadIDs sets the proposer and payload IDs for the given slot.
func (f *ProposerPayloadIDsCache) SetProposerAndPayloadIDs(slot types.Slot, vId types.ValidatorIndex, pId [8]byte) {
f.Lock()
defer f.Unlock()
var vIdBytes [vIdLength]byte
copy(vIdBytes[:], bytesutil.Uint64ToBytesBigEndian(uint64(vId)))
var bytes [vpIdsLength]byte
copy(bytes[:], append(vIdBytes[:], pId[:]...))
_, ok := f.slotToProposerAndPayloadIDs[slot]
// Ok to overwrite if the slot is already set but the payload ID is not set.
// This combats the re-org case where payload assignment could change the epoch of.
if !ok || (ok && pId != [pIdLength]byte{}) {
f.slotToProposerAndPayloadIDs[slot] = bytes
}
}
// PrunePayloadIDs removes the payload id entries that's current than input slot.
func (f *ProposerPayloadIDsCache) PrunePayloadIDs(slot types.Slot) {
f.Lock()
defer f.Unlock()
for s := range f.slotToProposerAndPayloadIDs {
if slot > s {
delete(f.slotToProposerAndPayloadIDs, s)
}
}
}

View File

@@ -1,60 +0,0 @@
package cache
import (
"testing"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/testing/require"
)
func TestValidatorPayloadIDsCache_GetAndSaveValidatorPayloadIDs(t *testing.T) {
cache := NewProposerPayloadIDsCache()
i, p, ok := cache.GetProposerPayloadIDs(0)
require.Equal(t, false, ok)
require.Equal(t, types.ValidatorIndex(0), i)
require.Equal(t, [pIdLength]byte{}, p)
slot := types.Slot(1234)
vid := types.ValidatorIndex(34234324)
pid := [8]byte{1, 2, 3, 3, 7, 8, 7, 8}
cache.SetProposerAndPayloadIDs(slot, vid, pid)
i, p, ok = cache.GetProposerPayloadIDs(slot)
require.Equal(t, true, ok)
require.Equal(t, vid, i)
require.Equal(t, pid, p)
slot = types.Slot(9456456)
vid = types.ValidatorIndex(6786745)
cache.SetProposerAndPayloadIDs(slot, vid, [pIdLength]byte{})
i, p, ok = cache.GetProposerPayloadIDs(slot)
require.Equal(t, true, ok)
require.Equal(t, vid, i)
require.Equal(t, [pIdLength]byte{}, p)
// reset cache without pid
slot = types.Slot(9456456)
vid = types.ValidatorIndex(11111)
pid = [8]byte{3, 2, 3, 33, 72, 8, 7, 8}
cache.SetProposerAndPayloadIDs(slot, vid, pid)
i, p, ok = cache.GetProposerPayloadIDs(slot)
require.Equal(t, true, ok)
require.Equal(t, vid, i)
require.Equal(t, pid, p)
// reset cache with existing pid
slot = types.Slot(9456456)
vid = types.ValidatorIndex(11111)
newPid := [8]byte{1, 2, 3, 33, 72, 8, 7, 1}
cache.SetProposerAndPayloadIDs(slot, vid, newPid)
i, p, ok = cache.GetProposerPayloadIDs(slot)
require.Equal(t, true, ok)
require.Equal(t, vid, i)
require.Equal(t, newPid, p)
// remove cache entry
cache.PrunePayloadIDs(slot + 1)
i, p, ok = cache.GetProposerPayloadIDs(slot)
require.Equal(t, false, ok)
require.Equal(t, types.ValidatorIndex(0), i)
require.Equal(t, [pIdLength]byte{}, p)
}

View File

@@ -6,7 +6,6 @@ import (
"bytes"
"context"
"fmt"
log "github.com/sirupsen/logrus"
"sort"
"github.com/pkg/errors"
@@ -176,7 +175,9 @@ func CommitteeAssignments(
return nil, nil, err
}
proposerIndexToSlots := make(map[types.ValidatorIndex][]types.Slot, params.BeaconConfig().SlotsPerEpoch)
for slot := startSlot; slot < startSlot+params.BeaconConfig().SlotsPerEpoch; slot++ {
// Proposal epochs do not have a look ahead, so we skip them over here.
validProposalEpoch := epoch < nextEpoch
for slot := startSlot; slot < startSlot+params.BeaconConfig().SlotsPerEpoch && validProposalEpoch; slot++ {
// Skip proposer assignment for genesis slot.
if slot == 0 {
continue
@@ -191,15 +192,6 @@ func CommitteeAssignments(
proposerIndexToSlots[i] = append(proposerIndexToSlots[i], slot)
}
// If previous proposer indices computation is outside if current proposal epoch range,
// we need to reset state slot back to start slot so that we can compute the correct committees.
currentProposalEpoch := epoch < nextEpoch
if !currentProposalEpoch {
if err := state.SetSlot(state.Slot() - params.BeaconConfig().SlotsPerEpoch); err != nil {
return nil, nil, err
}
}
activeValidatorIndices, err := ActiveValidatorIndices(ctx, state, epoch)
if err != nil {
return nil, nil, err
@@ -287,44 +279,41 @@ func ShuffledIndices(s state.ReadOnlyBeaconState, epoch types.Epoch) ([]types.Va
// UpdateCommitteeCache gets called at the beginning of every epoch to cache the committee shuffled indices
// list with committee index and epoch number. It caches the shuffled indices for current epoch and next epoch.
func UpdateCommitteeCache(state state.ReadOnlyBeaconState, epoch types.Epoch) error {
//for _, e := range []types.Epoch{epoch, epoch + 1} {
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
if err != nil {
return err
}
log.Infof("computed seed=%#x for slot=%d", seed, state.Slot())
if committeeCache.HasEntry(string(seed[:])) {
log.Infof("UpdateCommitteeCache: seed=%#x already in cache at slot=%d", seed, state.Slot())
return nil
}
for _, e := range []types.Epoch{epoch, epoch + 1} {
seed, err := Seed(state, e, params.BeaconConfig().DomainBeaconAttester)
if err != nil {
return err
}
if committeeCache.HasEntry(string(seed[:])) {
return nil
}
shuffledIndices, err := ShuffledIndices(state, epoch)
if err != nil {
return err
shuffledIndices, err := ShuffledIndices(state, e)
if err != nil {
return err
}
count := SlotCommitteeCount(uint64(len(shuffledIndices)))
// Store the sorted indices as well as shuffled indices. In current spec,
// sorted indices is required to retrieve proposer index. This is also
// used for failing verify signature fallback.
sortedIndices := make([]types.ValidatorIndex, len(shuffledIndices))
copy(sortedIndices, shuffledIndices)
sort.Slice(sortedIndices, func(i, j int) bool {
return sortedIndices[i] < sortedIndices[j]
})
if err := committeeCache.AddCommitteeShuffledList(&cache.Committees{
ShuffledIndices: shuffledIndices,
CommitteeCount: uint64(params.BeaconConfig().SlotsPerEpoch.Mul(count)),
Seed: seed,
SortedIndices: sortedIndices,
}); err != nil {
return err
}
}
count := SlotCommitteeCount(uint64(len(shuffledIndices)))
// Store the sorted indices as well as shuffled indices. In current spec,
// sorted indices is required to retrieve proposer index. This is also
// used for failing verify signature fallback.
sortedIndices := make([]types.ValidatorIndex, len(shuffledIndices))
copy(sortedIndices, shuffledIndices)
sort.Slice(sortedIndices, func(i, j int) bool {
return sortedIndices[i] < sortedIndices[j]
})
log.Infof("UpdateCommitteeCache: epoch=%d, state.slot=%d, indices=%v, seed=%#x", epoch, state.Slot(), sortedIndices, seed)
if err := committeeCache.AddCommitteeShuffledList(&cache.Committees{
ShuffledIndices: shuffledIndices,
CommitteeCount: uint64(params.BeaconConfig().SlotsPerEpoch.Mul(count)),
Seed: seed,
SortedIndices: sortedIndices,
}); err != nil {
return err
}
//}
return nil
}
@@ -367,7 +356,6 @@ func UpdateProposerIndicesInCache(ctx context.Context, state state.ReadOnlyBeaco
if err != nil {
return err
}
log.Infof("UpdateProposerIndicesInCache: state.slot=%d, slot=%d, root=%#x, indices=%v", state.Slot(), s, r, indices)
return proposerIndicesCache.AddProposerIndices(&cache.ProposerIndices{
BlockRoot: bytesutil.ToBytes32(r),
ProposerIndices: proposerIndices,

View File

@@ -232,7 +232,7 @@ func TestCommitteeAssignments_CannotRetrieveFuture(t *testing.T) {
_, proposerIndxs, err = CommitteeAssignments(context.Background(), state, time.CurrentEpoch(state)+1)
require.NoError(t, err)
require.NotEqual(t, 0, len(proposerIndxs), "wanted non-zero proposer index set")
require.Equal(t, 0, len(proposerIndxs), "wanted empty proposer index set")
}
func TestCommitteeAssignments_EverySlotHasMin1Proposer(t *testing.T) {

View File

@@ -7,7 +7,6 @@ import (
"github.com/prysmaticlabs/prysm/crypto/bls"
"github.com/prysmaticlabs/prysm/crypto/hash"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
log "github.com/sirupsen/logrus"
)
// Seed returns the randao seed used for shuffling of a given epoch.
@@ -34,8 +33,6 @@ func Seed(state state.ReadOnlyBeaconState, epoch types.Epoch, domain [bls.Domain
seed32 := hash.Hash(seed)
log.Infof("seed computation params for slot=%d: domain=%#x, epoch=%d, randaoMix=%#x", state.Slot(), domain, epoch, randaoMix)
return seed32, nil
}

View File

@@ -3,8 +3,6 @@ package helpers
import (
"bytes"
"context"
"fmt"
"github.com/prysmaticlabs/prysm/time/slots"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
@@ -17,6 +15,7 @@ import (
"github.com/prysmaticlabs/prysm/crypto/hash"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/time/slots"
log "github.com/sirupsen/logrus"
)
@@ -89,27 +88,11 @@ func ActiveValidatorIndices(ctx context.Context, s state.ReadOnlyBeaconState, ep
if err != nil {
return nil, errors.Wrap(err, "could not get seed")
}
var ci []types.ValidatorIndex
if s.Slot() == 78 {
if err := s.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
if IsActiveValidatorUsingTrie(val, epoch) {
ci = append(ci, types.ValidatorIndex(idx))
}
return nil
}); err != nil {
log.Errorf("got error doing double-check validator index computation=%v", err)
return nil, err
}
}
activeIndices, err := committeeCache.ActiveIndices(ctx, seed)
if err != nil {
return nil, errors.Wrap(err, "could not interface with committee cache")
}
if activeIndices != nil {
if s.Slot() == 78 {
log.Infof("double check indices for 78, len=%d, low=%d, high=%d", len(ci), ci[0], ci[len(ci)-1])
}
log.Infof("found indices in cache for slot=%d, len=%d, low=%d, high=%d", s.Slot(), len(activeIndices), activeIndices[0], activeIndices[len(activeIndices)-1])
return activeIndices, nil
}
@@ -123,7 +106,6 @@ func ActiveValidatorIndices(ctx context.Context, s state.ReadOnlyBeaconState, ep
return nil, errors.New("nil active indices")
}
CommitteeCacheInProgressHit.Inc()
log.Infof("found indices in in-progress cache for slot=%d, len=%d, low=%d, high=%d", s.Slot(), len(activeIndices), activeIndices[0], activeIndices[len(activeIndices)-1])
return activeIndices, nil
}
return nil, errors.Wrap(err, "could not mark committee cache as in progress")
@@ -144,16 +126,9 @@ func ActiveValidatorIndices(ctx context.Context, s state.ReadOnlyBeaconState, ep
return nil, err
}
log.Infof("computed indices slot=%d, len=%d, low=%d, high=%d", s.Slot(), len(indices), indices[0], indices[len(indices)-1])
log.Infof("UpdateCommitteeCache from ActiveValidatorIndices, slot=%d", s.Slot())
if err := UpdateCommitteeCache(s, epoch); err != nil {
return nil, errors.Wrap(err, "could not update committee cache")
}
/*
if err := UpdateProposerIndicesInCache(ctx, s); err != nil {
return nil, errors.Wrap(err, "failed to update proposer indices cache in ActiveValidatorIndices")
}
*/
return indices, nil
}
@@ -200,7 +175,6 @@ func ActiveValidatorCount(ctx context.Context, s state.ReadOnlyBeaconState, epoc
return 0, err
}
log.Infof("UpdateCommitteeCache from ActiveValidatorCount, slot=%d", s.Slot())
if err := UpdateCommitteeCache(s, epoch); err != nil {
return 0, errors.Wrap(err, "could not update committee cache")
}
@@ -275,7 +249,6 @@ func BeaconProposerIndex(ctx context.Context, state state.ReadOnlyBeaconState) (
}
return proposerIndices[state.Slot()%params.BeaconConfig().SlotsPerEpoch], nil
}
log.Info("UpdateProposerIndicesInCache from BeaconProposerIndex")
if err := UpdateProposerIndicesInCache(ctx, state); err != nil {
return 0, errors.Wrap(err, "could not update committee cache")
}
@@ -286,19 +259,15 @@ func BeaconProposerIndex(ctx context.Context, state state.ReadOnlyBeaconState) (
if err != nil {
return 0, errors.Wrap(err, "could not generate seed")
}
fmt.Printf("BeaconProposerIndex:seed=%#x", seed)
seedWithSlot := append(seed[:], bytesutil.Bytes8(uint64(state.Slot()))...)
fmt.Printf("BeaconProposerIndex:seedWithSlot=%#x", seed)
seedWithSlotHash := hash.Hash(seedWithSlot)
fmt.Printf("BeaconProposerIndex:seedWithSlotHash=%#x", seed)
indices, err := ActiveValidatorIndices(ctx, state, e)
if err != nil {
return 0, errors.Wrap(err, "could not get active indices")
}
log.Infof("validator index length=%d, low=%d, high=%d", len(indices), indices[0], indices[len(indices)-1])
return ComputeProposerIndex(state, indices, seedWithSlotHash)
}

View File

@@ -138,7 +138,6 @@ func ProcessSlotsUsingNextSlotCache(
ctx, span := trace.StartSpan(ctx, "core.state.ProcessSlotsUsingNextSlotCache")
defer span.End()
/*
// Check whether the parent state has been advanced by 1 slot in next slot cache.
nextSlotState, err := NextSlotState(ctx, parentRoot)
if err != nil {
@@ -149,11 +148,6 @@ func ProcessSlotsUsingNextSlotCache(
// We replace next slot state with parent state.
if cachedStateExists {
parentState = nextSlotState
root, err := parentState.HashTreeRoot(ctx)
if err != nil {
log.Errorf("weird, got an error calling HTR for the state=%v where root should be=%#x", err, parentRoot)
}
log.Infof("found state in NextSlotCache at slot=%d with root=%#x (parentRoot=%#x)", parentState.Slot(), root, parentRoot)
}
// In the event our cached state has advanced our
@@ -161,12 +155,9 @@ func ProcessSlotsUsingNextSlotCache(
if cachedStateExists && parentState.Slot() == slot {
return parentState, nil
}
*/
log.Infof("process_slots being called up to slot=%d where state.slot=%d", slot, parentState.Slot())
// Since next slot cache only advances state by 1 slot,
// we check if there's more slots that need to process.
parentState, err := ProcessSlots(ctx, parentState, slot)
parentState, err = ProcessSlots(ctx, parentState, slot)
if err != nil {
return nil, errors.Wrap(err, "could not process slots")
}

View File

@@ -282,11 +282,7 @@ func ProcessBlockForStateRoot(
state, err = b.ProcessBlockHeaderNoVerify(ctx, state, blk.Slot(), blk.ProposerIndex(), blk.ParentRoot(), bodyRoot[:])
if err != nil {
tracing.AnnotateError(span, err)
r, err := signed.Block().HashTreeRoot()
if err != nil {
return nil, errors.Wrap(err, "could not process block header, also failed to compute its htr")
}
return nil, errors.Wrapf(err, "could not process block header, state slot=%d, root=%#x", state.Slot(), r)
return nil, errors.Wrap(err, "could not process block header")
}
enabled, err := b.IsExecutionEnabled(state, blk.Body())

View File

@@ -105,7 +105,6 @@ type HeadAccessDatabase interface {
// initialization method needed for origin checkpoint sync
SaveOrigin(ctx context.Context, serState, serBlock []byte) error
SaveOriginCheckpointBlockRoot(ctx context.Context, blockRoot [32]byte) error
SaveBackfillBlockRoot(ctx context.Context, blockRoot [32]byte) error
}

View File

@@ -94,7 +94,6 @@ go_test(
"state_test.go",
"utils_test.go",
"validated_checkpoint_test.go",
"wss_test.go",
],
data = glob(["testdata/**"]),
embed = [":go_default_library"],
@@ -102,7 +101,6 @@ go_test(
"//beacon-chain/db/filters:go_default_library",
"//beacon-chain/db/iface:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/genesis:go_default_library",
"//beacon-chain/state/v1:go_default_library",
"//beacon-chain/state/v2:go_default_library",
"//config/features:go_default_library",

View File

@@ -233,17 +233,13 @@ func (s *Store) DeleteBlock(ctx context.Context, root [32]byte) error {
defer span.End()
if err := s.DeleteState(ctx, root); err != nil {
return err
}
if err := s.deleteStateSummary(root); err != nil {
return err
return errDeleteFinalized
}
return s.db.Update(func(tx *bolt.Tx) error {
bkt := tx.Bucket(finalizedBlockRootsIndexBucket)
if b := bkt.Get(root[:]); b != nil {
return ErrDeleteJustifiedAndFinalized
return errDeleteFinalized
}
if err := tx.Bucket(blocksBucket).Delete(root[:]); err != nil {

View File

@@ -191,16 +191,6 @@ func TestStore_DeleteBlock(t *testing.T) {
require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesisBlockRoot))
blks := makeBlocks(t, 0, slotsPerEpoch*4, genesisBlockRoot)
require.NoError(t, db.SaveBlocks(ctx, blks))
ss := make([]*ethpb.StateSummary, len(blks))
for i, blk := range blks {
r, err := blk.Block().HashTreeRoot()
require.NoError(t, err)
ss[i] = &ethpb.StateSummary{
Slot: blk.Block().Slot(),
Root: r[:],
}
}
require.NoError(t, db.SaveStateSummaries(ctx, ss))
root, err := blks[slotsPerEpoch].Block().HashTreeRoot()
require.NoError(t, err)
@@ -226,50 +216,11 @@ func TestStore_DeleteBlock(t *testing.T) {
b, err = db.Block(ctx, root2)
require.NoError(t, err)
require.Equal(t, b, nil)
require.Equal(t, false, db.HasStateSummary(ctx, root2))
require.ErrorIs(t, db.DeleteBlock(ctx, root), ErrDeleteJustifiedAndFinalized)
require.ErrorIs(t, db.DeleteBlock(ctx, root), errDeleteFinalized)
}
func TestStore_DeleteJustifiedBlock(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
b := util.NewBeaconBlock()
b.Block.Slot = 1
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
cp := &ethpb.Checkpoint{
Root: root[:],
}
st, err := util.NewBeaconState()
require.NoError(t, err)
blk, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, db.SaveBlock(ctx, blk))
require.NoError(t, db.SaveState(ctx, st, root))
require.NoError(t, db.SaveJustifiedCheckpoint(ctx, cp))
require.ErrorIs(t, db.DeleteBlock(ctx, root), ErrDeleteJustifiedAndFinalized)
}
func TestStore_DeleteFinalizedBlock(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
b := util.NewBeaconBlock()
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
cp := &ethpb.Checkpoint{
Root: root[:],
}
st, err := util.NewBeaconState()
require.NoError(t, err)
blk, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, db.SaveBlock(ctx, blk))
require.NoError(t, db.SaveState(ctx, st, root))
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, cp))
require.ErrorIs(t, db.DeleteBlock(ctx, root), ErrDeleteJustifiedAndFinalized)
}
func TestStore_GenesisBlock(t *testing.T) {
db := setupDB(t)
ctx := context.Background()

View File

@@ -2,8 +2,8 @@ package kv
import "github.com/pkg/errors"
// ErrDeleteJustifiedAndFinalized is raised when we attempt to delete a finalized block/state
var ErrDeleteJustifiedAndFinalized = errors.New("cannot delete finalized block or state")
// errDeleteFinalized is raised when we attempt to delete a finalized block/state
var errDeleteFinalized = errors.New("cannot delete finalized block or state")
// ErrNotFound can be used directly, or as a wrapped DBError, whenever a db method needs to
// indicate that a value couldn't be found.

View File

@@ -23,7 +23,7 @@ var previousFinalizedCheckpointKey = []byte("previous-finalized-checkpoint")
var containerFinalizedButNotCanonical = []byte("recent block needs reindexing to determine canonical")
// The finalized block roots index tracks beacon blocks which are finalized in the canonical chain.
// The finalized checkpoint contains the epoch which was finalized and the highest beacon block
// The finalized checkpoint contains the the epoch which was finalized and the highest beacon block
// root where block.slot <= start_slot(epoch). As a result, we cannot index the finalized canonical
// beacon block chain using the finalized root alone as this would exclude all other blocks in the
// finalized epoch from being indexed as "final and canonical".
@@ -75,7 +75,7 @@ func (s *Store) updateFinalizedBlockRoots(ctx context.Context, tx *bolt.Tx, chec
// Walk up the ancestry chain until we reach a block root present in the finalized block roots
// index bucket or genesis block root.
for {
if bytes.Equal(root, genesisRoot) {
if bytes.Equal(root, genesisRoot) || bytes.Equal(root, initCheckpointRoot) {
break
}
@@ -105,12 +105,6 @@ func (s *Store) updateFinalizedBlockRoots(ctx context.Context, tx *bolt.Tx, chec
return err
}
// breaking here allows the initial checkpoint root to be correctly inserted,
// but stops the loop from trying to search for its parent.
if bytes.Equal(root, initCheckpointRoot) {
break
}
// Found parent, loop exit condition.
if parentBytes := bkt.Get(block.ParentRoot()); parentBytes != nil {
parent := &ethpb.FinalizedBlockRootContainer{}

View File

@@ -346,31 +346,19 @@ func (s *Store) DeleteState(ctx context.Context, blockRoot [32]byte) error {
bkt = tx.Bucket(checkpointBucket)
enc := bkt.Get(finalizedCheckpointKey)
finalized := &ethpb.Checkpoint{}
checkpoint := &ethpb.Checkpoint{}
if enc == nil {
finalized = &ethpb.Checkpoint{Root: genesisBlockRoot}
} else if err := decode(ctx, enc, finalized); err != nil {
return err
}
enc = bkt.Get(justifiedCheckpointKey)
justified := &ethpb.Checkpoint{}
if enc == nil {
justified = &ethpb.Checkpoint{Root: genesisBlockRoot}
} else if err := decode(ctx, enc, justified); err != nil {
checkpoint = &ethpb.Checkpoint{Root: genesisBlockRoot}
} else if err := decode(ctx, enc, checkpoint); err != nil {
return err
}
blockBkt := tx.Bucket(blocksBucket)
headBlkRoot := blockBkt.Get(headBlockRootKey)
bkt = tx.Bucket(stateBucket)
// Safeguard against deleting genesis, finalized, head state.
if bytes.Equal(blockRoot[:], finalized.Root) || bytes.Equal(blockRoot[:], genesisBlockRoot) || bytes.Equal(blockRoot[:], justified.Root) {
return ErrDeleteJustifiedAndFinalized
}
// Nothing to delete if state doesn't exist.
enc = bkt.Get(blockRoot[:])
if enc == nil {
return nil
// Safe guard against deleting genesis, finalized, head state.
if bytes.Equal(blockRoot[:], checkpoint.Root) || bytes.Equal(blockRoot[:], genesisBlockRoot) || bytes.Equal(blockRoot[:], headBlkRoot) {
return errors.New("cannot delete genesis, finalized, or head state")
}
slot, err := s.slotByBlockRoot(ctx, tx, blockRoot[:])

View File

@@ -110,12 +110,3 @@ func (s *Store) saveCachedStateSummariesDB(ctx context.Context) error {
s.stateSummaryCache.clear()
return nil
}
// deleteStateSummary deletes a state summary object from the db using input block root.
func (s *Store) deleteStateSummary(blockRoot [32]byte) error {
s.stateSummaryCache.delete(blockRoot)
return s.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(stateSummaryBucket)
return bucket.Delete(blockRoot[:])
})
}

View File

@@ -37,13 +37,6 @@ func (c *stateSummaryCache) has(r [32]byte) bool {
return ok
}
// delete state summary in cache.
func (c *stateSummaryCache) delete(r [32]byte) {
c.initSyncStateSummariesLock.Lock()
defer c.initSyncStateSummariesLock.Unlock()
delete(c.initSyncStateSummaries, r)
}
// get retrieves a state summary from the initial sync state summaries cache using the root of
// the block.
func (c *stateSummaryCache) get(r [32]byte) *ethpb.StateSummary {

View File

@@ -62,17 +62,3 @@ func TestStateSummary_CacheToDB(t *testing.T) {
require.Equal(t, true, db.HasStateSummary(context.Background(), bytesutil.ToBytes32(r)))
}
}
func TestStateSummary_CanDelete(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
r1 := bytesutil.ToBytes32([]byte{'A'})
s1 := &ethpb.StateSummary{Slot: 1, Root: r1[:]}
require.Equal(t, false, db.HasStateSummary(ctx, r1), "State summary should not be saved")
require.NoError(t, db.SaveStateSummary(ctx, s1))
require.Equal(t, true, db.HasStateSummary(ctx, r1), "State summary should be saved")
require.NoError(t, db.deleteStateSummary(r1))
require.Equal(t, false, db.HasStateSummary(ctx, r1), "State summary should not be saved")
}

View File

@@ -412,7 +412,7 @@ func TestStore_DeleteGenesisState(t *testing.T) {
require.NoError(t, err)
require.NoError(t, st.SetSlot(100))
require.NoError(t, db.SaveState(ctx, st, genesisBlockRoot))
wantedErr := "cannot delete finalized block or state"
wantedErr := "cannot delete genesis, finalized, or head state"
assert.ErrorContains(t, wantedErr, db.DeleteState(ctx, genesisBlockRoot))
}
@@ -440,7 +440,7 @@ func TestStore_DeleteFinalizedState(t *testing.T) {
require.NoError(t, db.SaveState(ctx, finalizedState, finalizedBlockRoot))
finalizedCheckpoint := &ethpb.Checkpoint{Root: finalizedBlockRoot[:]}
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, finalizedCheckpoint))
wantedErr := "cannot delete finalized block or state"
wantedErr := "cannot delete genesis, finalized, or head state"
assert.ErrorContains(t, wantedErr, db.DeleteState(ctx, finalizedBlockRoot))
}
@@ -465,7 +465,8 @@ func TestStore_DeleteHeadState(t *testing.T) {
require.NoError(t, st.SetSlot(100))
require.NoError(t, db.SaveState(ctx, st, headBlockRoot))
require.NoError(t, db.SaveHeadBlockRoot(ctx, headBlockRoot))
require.NoError(t, db.DeleteState(ctx, headBlockRoot)) // Ok to delete head state if it's optimistic.
wantedErr := "cannot delete genesis, finalized, or head state"
assert.ErrorContains(t, wantedErr, db.DeleteState(ctx, headBlockRoot))
}
func TestStore_SaveDeleteState_CanGetHighestBelow(t *testing.T) {

View File

@@ -77,12 +77,6 @@ func (s *Store) SaveOrigin(ctx context.Context, serState, serBlock []byte) error
return errors.Wrap(err, "could not save head block root")
}
// save origin block root in a special key, to be used when the canonical
// origin (start of chain, ie alternative to genesis) block or state is needed
if err = s.SaveOriginCheckpointBlockRoot(ctx, blockRoot); err != nil {
return errors.Wrap(err, "could not save origin block root")
}
// rebuild the checkpoint from the block
// use it to mark the block as justified and finalized
slotEpoch, err := wblk.Block().Slot().SafeDivSlot(params.BeaconConfig().SlotsPerEpoch)
@@ -100,5 +94,11 @@ func (s *Store) SaveOrigin(ctx context.Context, serState, serBlock []byte) error
return errors.Wrap(err, "could not mark checkpoint sync block as finalized")
}
// save origin block root in a special key, to be used when the canonical
// origin (start of chain, ie alternative to genesis) block or state is needed
if err = s.SaveOriginCheckpointBlockRoot(ctx, blockRoot); err != nil {
return errors.Wrap(err, "could not save origin block root")
}
return nil
}

View File

@@ -1,49 +0,0 @@
package kv
import (
"context"
"testing"
"github.com/prysmaticlabs/prysm/beacon-chain/state/genesis"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
)
func TestSaveOrigin(t *testing.T) {
// Embedded Genesis works with Mainnet config
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.ConfigName = params.ConfigNames[params.Mainnet]
params.OverrideBeaconConfig(cfg)
ctx := context.Background()
db := setupDB(t)
st, err := genesis.State(params.Mainnet.String())
require.NoError(t, err)
sb, err := st.MarshalSSZ()
require.NoError(t, err)
require.NoError(t, db.LoadGenesis(ctx, sb))
// this is necessary for mainnet, because LoadGenesis is short-circuited by the embedded state,
// so the genesis root key is never written to the db.
require.NoError(t, db.EnsureEmbeddedGenesis(ctx))
cst, err := util.NewBeaconState()
require.NoError(t, err)
csb, err := cst.MarshalSSZ()
require.NoError(t, err)
cb := util.NewBeaconBlock()
scb, err := wrapper.WrappedSignedBeaconBlock(cb)
require.NoError(t, err)
cbb, err := scb.MarshalSSZ()
require.NoError(t, err)
require.NoError(t, db.SaveOrigin(ctx, csb, cbb))
broot, err := scb.Block().HashTreeRoot()
require.NoError(t, err)
require.Equal(t, true, db.IsFinalizedBlock(ctx, broot))
}

View File

@@ -22,13 +22,11 @@ go_library(
"//beacon-chain/forkchoice/types:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@io_opencensus_go//trace:go_default_library",
],
)

View File

@@ -3,8 +3,8 @@ package doublylinkedtree
import "errors"
var ErrNilNode = errors.New("invalid nil or unknown node")
var errInvalidBalance = errors.New("invalid node balance")
var errInvalidProposerBoostRoot = errors.New("invalid proposer boost root")
var errUnknownFinalizedRoot = errors.New("unknown finalized root")
var errUnknownJustifiedRoot = errors.New("unknown justified root")
var errInvalidOptimisticStatus = errors.New("invalid optimistic status")
var errUnknownPayloadHash = errors.New("unknown payload hash")

View File

@@ -2,15 +2,12 @@ package doublylinkedtree
import (
"context"
"fmt"
"github.com/pkg/errors"
types "github.com/prysmaticlabs/eth2-types"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
pbrpc "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
@@ -21,7 +18,6 @@ func New(justifiedEpoch, finalizedEpoch types.Epoch) *ForkChoice {
finalizedEpoch: finalizedEpoch,
proposerBoostRoot: [32]byte{},
nodeByRoot: make(map[[fieldparams.RootLength]byte]*Node),
nodeByPayload: make(map[[fieldparams.RootLength]byte]*Node),
pruneThreshold: defaultPruneThreshold,
}
@@ -172,7 +168,7 @@ func (f *ForkChoice) IsCanonical(root [32]byte) bool {
}
// IsOptimistic returns true if the given root has been optimistically synced.
func (f *ForkChoice) IsOptimistic(root [32]byte) (bool, error) {
func (f *ForkChoice) IsOptimistic(_ context.Context, root [32]byte) (bool, error) {
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
@@ -253,21 +249,9 @@ func (f *ForkChoice) updateBalances(newBalances []uint64) error {
return ErrNilNode
}
if currentNode.balance < oldBalance {
f.store.proposerBoostLock.RLock()
log.WithFields(logrus.Fields{
"nodeRoot": fmt.Sprintf("%#x", bytesutil.Trunc(vote.currentRoot[:])),
"oldBalance": oldBalance,
"nodeBalance": currentNode.balance,
"nodeWeight": currentNode.weight,
"proposerBoostRoot": fmt.Sprintf("%#x", bytesutil.Trunc(f.store.proposerBoostRoot[:])),
"previousProposerBoostRoot": fmt.Sprintf("%#x", bytesutil.Trunc(f.store.previousProposerBoostRoot[:])),
"previousProposerBoostScore": f.store.previousProposerBoostScore,
}).Warning("node with invalid balance, setting it to zero")
f.store.proposerBoostLock.RUnlock()
currentNode.balance = 0
} else {
currentNode.balance -= oldBalance
return errInvalidBalance
}
currentNode.balance -= oldBalance
}
}
@@ -318,6 +302,6 @@ func (f *ForkChoice) ForkChoiceNodes() []*pbrpc.ForkChoiceNode {
}
// SetOptimisticToInvalid removes a block with an invalid execution payload from fork choice store
func (f *ForkChoice) SetOptimisticToInvalid(ctx context.Context, root, payloadHash [fieldparams.RootLength]byte) ([][32]byte, error) {
return f.store.setOptimisticToInvalid(ctx, root, payloadHash)
func (f *ForkChoice) SetOptimisticToInvalid(ctx context.Context, root [fieldparams.RootLength]byte) ([][32]byte, error) {
return f.store.removeNode(ctx, root)
}

View File

@@ -58,30 +58,6 @@ func TestForkChoice_UpdateBalancesNegativeChange(t *testing.T) {
assert.Equal(t, uint64(30), s.nodeByRoot[indexToHash(3)].balance)
}
func TestForkChoice_UpdateBalancesUnderflow(t *testing.T) {
f := setup(0, 0)
ctx := context.Background()
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0))
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0))
s := f.store
s.nodeByRoot[indexToHash(1)].balance = 100
s.nodeByRoot[indexToHash(2)].balance = 100
s.nodeByRoot[indexToHash(3)].balance = 100
f.balances = []uint64{125, 125, 125}
f.votes = []Vote{
{indexToHash(1), indexToHash(1), 0},
{indexToHash(2), indexToHash(2), 0},
{indexToHash(3), indexToHash(3), 0},
}
require.NoError(t, f.updateBalances([]uint64{10, 20, 30}))
assert.Equal(t, uint64(0), s.nodeByRoot[indexToHash(1)].balance)
assert.Equal(t, uint64(0), s.nodeByRoot[indexToHash(2)].balance)
assert.Equal(t, uint64(5), s.nodeByRoot[indexToHash(3)].balance)
}
func TestForkChoice_IsCanonical(t *testing.T) {
f := setup(1, 1)
ctx := context.Background()

View File

@@ -3,12 +3,9 @@ package doublylinkedtree
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/sirupsen/logrus"
)
var (
log = logrus.WithField("prefix", "forkchoice-doublylinkedtree")
headSlotNumber = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "doublylinkedtree_head_slot",

View File

@@ -109,7 +109,7 @@ func (n *Node) leadsToViableHead(justifiedEpoch, finalizedEpoch types.Epoch) boo
return n.bestDescendant.viableForHead(justifiedEpoch, finalizedEpoch)
}
// setNodeAndParentValidated sets the current node and all the ancestors as validated (i.e. non-optimistic).
// setNodeAndParentValidated sets the current node and the parent as validated (i.e. non-optimistic).
func (n *Node) setNodeAndParentValidated(ctx context.Context) error {
if ctx.Err() != nil {
return ctx.Err()

View File

@@ -180,27 +180,27 @@ func TestNode_SetFullyValidated(t *testing.T) {
require.NoError(t, f.InsertOptimisticBlock(ctx, 4, indexToHash(4), indexToHash(3), params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 5, indexToHash(5), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
opt, err := f.IsOptimistic(indexToHash(5))
opt, err := f.IsOptimistic(ctx, indexToHash(5))
require.NoError(t, err)
require.Equal(t, true, opt)
opt, err = f.IsOptimistic(indexToHash(4))
opt, err = f.IsOptimistic(ctx, indexToHash(4))
require.NoError(t, err)
require.Equal(t, true, opt)
require.NoError(t, f.store.nodeByRoot[indexToHash(4)].setNodeAndParentValidated(ctx))
// block 5 should still be optimistic
opt, err = f.IsOptimistic(indexToHash(5))
opt, err = f.IsOptimistic(ctx, indexToHash(5))
require.NoError(t, err)
require.Equal(t, true, opt)
// block 4 and 3 should now be valid
opt, err = f.IsOptimistic(indexToHash(4))
opt, err = f.IsOptimistic(ctx, indexToHash(4))
require.NoError(t, err)
require.Equal(t, false, opt)
opt, err = f.IsOptimistic(indexToHash(3))
opt, err = f.IsOptimistic(ctx, indexToHash(3))
require.NoError(t, err)
require.Equal(t, false, opt)
}

View File

@@ -2,54 +2,22 @@ package doublylinkedtree
import (
"context"
"github.com/prysmaticlabs/prysm/config/params"
)
func (s *Store) setOptimisticToInvalid(ctx context.Context, root, payloadHash [32]byte) ([][32]byte, error) {
s.nodesLock.Lock()
invalidRoots := make([][32]byte, 0)
node, ok := s.nodeByRoot[root]
if !ok || node == nil {
s.nodesLock.Unlock()
return invalidRoots, ErrNilNode
}
// Check if last valid hash is an ancestor of the passed node.
lastValid, ok := s.nodeByPayload[payloadHash]
if !ok || lastValid == nil {
s.nodesLock.Unlock()
return invalidRoots, errUnknownPayloadHash
}
firstInvalid := node
for ; firstInvalid.parent != nil && firstInvalid.parent.payloadHash != payloadHash; firstInvalid = firstInvalid.parent {
if ctx.Err() != nil {
s.nodesLock.Unlock()
return invalidRoots, ctx.Err()
}
}
// If the last valid payload is in a different fork, we remove only the
// passed node.
if firstInvalid.parent == nil {
firstInvalid = node
}
s.nodesLock.Unlock()
return s.removeNode(ctx, firstInvalid)
}
// removeNode removes the node with the given root and all of its children
// from the Fork Choice Store.
func (s *Store) removeNode(ctx context.Context, node *Node) ([][32]byte, error) {
func (s *Store) removeNode(ctx context.Context, root [32]byte) ([][32]byte, error) {
s.nodesLock.Lock()
defer s.nodesLock.Unlock()
invalidRoots := make([][32]byte, 0)
if node == nil {
node, ok := s.nodeByRoot[root]
if !ok || node == nil {
return invalidRoots, ErrNilNode
}
if !node.optimistic || node.parent == nil {
return invalidRoots, errInvalidOptimisticStatus
}
children := node.parent.children
if len(children) == 1 {
node.parent.children = []*Node{}
@@ -79,16 +47,6 @@ func (s *Store) removeNodeAndChildren(ctx context.Context, node *Node, invalidRo
}
}
invalidRoots = append(invalidRoots, node.root)
s.proposerBoostLock.Lock()
if node.root == s.proposerBoostRoot {
s.proposerBoostRoot = [32]byte{}
}
if node.root == s.previousProposerBoostRoot {
s.previousProposerBoostRoot = params.BeaconConfig().ZeroHash
s.previousProposerBoostScore = 0
}
s.proposerBoostLock.Unlock()
delete(s.nodeByRoot, node.root)
delete(s.nodeByPayload, node.payloadHash)
return invalidRoots, nil
}

View File

@@ -24,71 +24,32 @@ import (
func TestPruneInvalid(t *testing.T) {
tests := []struct {
root [32]byte // the root of the new INVALID block
payload [32]byte // the last valid hash
wantedNodeNumber int
wantedRoots [][32]byte
}{
{
[32]byte{'j'},
[32]byte{'B'},
12,
[][32]byte{[32]byte{'j'}},
},
{
[32]byte{'c'},
[32]byte{'B'},
4,
[][32]byte{[32]byte{'f'}, [32]byte{'e'}, [32]byte{'i'}, [32]byte{'h'}, [32]byte{'l'},
[32]byte{'k'}, [32]byte{'g'}, [32]byte{'d'}, [32]byte{'c'}},
},
{
[32]byte{'i'},
[32]byte{'H'},
12,
[][32]byte{[32]byte{'i'}},
},
{
[32]byte{'h'},
[32]byte{'G'},
11,
[][32]byte{[32]byte{'i'}, [32]byte{'h'}},
},
{
[32]byte{'g'},
[32]byte{'D'},
8,
[][32]byte{[32]byte{'i'}, [32]byte{'h'}, [32]byte{'l'}, [32]byte{'k'}, [32]byte{'g'}},
},
{
[32]byte{'i'},
[32]byte{'D'},
8,
[][32]byte{[32]byte{'i'}, [32]byte{'h'}, [32]byte{'l'}, [32]byte{'k'}, [32]byte{'g'}},
},
{
[32]byte{'f'},
[32]byte{'D'},
11,
[][32]byte{[32]byte{'f'}, [32]byte{'e'}},
},
{
[32]byte{'h'},
[32]byte{'C'},
5,
[][32]byte{
[32]byte{'f'},
[32]byte{'e'},
[32]byte{'i'},
[32]byte{'h'},
[32]byte{'l'},
[32]byte{'k'},
[32]byte{'g'},
[32]byte{'d'},
},
},
{
[32]byte{'g'},
[32]byte{'E'},
8,
[][32]byte{[32]byte{'i'}, [32]byte{'h'}, [32]byte{'l'}, [32]byte{'k'}, [32]byte{'g'}},
},
@@ -97,45 +58,22 @@ func TestPruneInvalid(t *testing.T) {
ctx := context.Background()
f := setup(1, 1)
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, [32]byte{'J'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, [32]byte{'D'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, [32]byte{'E'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, [32]byte{'G'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, [32]byte{'F'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, [32]byte{'H'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, [32]byte{'K'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, [32]byte{'I'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, [32]byte{'L'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, params.BeaconConfig().ZeroHash, 1, 1))
roots, err := f.store.setOptimisticToInvalid(context.Background(), tc.root, tc.payload)
roots, err := f.store.removeNode(context.Background(), tc.root)
require.NoError(t, err)
require.DeepEqual(t, tc.wantedRoots, roots)
require.Equal(t, tc.wantedNodeNumber, f.NodeCount())
}
}
// This is a regression test (10445)
func TestSetOptimisticToInvalid_ProposerBoost(t *testing.T) {
ctx := context.Background()
f := setup(1, 1)
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1))
f.store.proposerBoostLock.Lock()
f.store.proposerBoostRoot = [32]byte{'c'}
f.store.previousProposerBoostScore = 10
f.store.previousProposerBoostRoot = [32]byte{'b'}
f.store.proposerBoostLock.Unlock()
_, err := f.SetOptimisticToInvalid(ctx, [32]byte{'c'}, [32]byte{'A'})
require.NoError(t, err)
f.store.proposerBoostLock.RLock()
require.Equal(t, uint64(0), f.store.previousProposerBoostScore)
require.DeepEqual(t, [32]byte{}, f.store.proposerBoostRoot)
require.DeepEqual(t, params.BeaconConfig().ZeroHash, f.store.previousProposerBoostRoot)
f.store.proposerBoostLock.RUnlock()
}

View File

@@ -121,7 +121,6 @@ func (s *Store) insert(ctx context.Context,
payloadHash: payloadHash,
}
s.nodeByPayload[payloadHash] = n
s.nodeByRoot[root] = n
if parent != nil {
parent.children = append(parent.children, n)

View File

@@ -107,8 +107,7 @@ func TestStore_Insert(t *testing.T) {
// The new node does not have a parent.
treeRootNode := &Node{slot: 0, root: indexToHash(0)}
nodeByRoot := map[[32]byte]*Node{indexToHash(0): treeRootNode}
nodeByPayload := map[[32]byte]*Node{indexToHash(0): treeRootNode}
s := &Store{nodeByRoot: nodeByRoot, treeRootNode: treeRootNode, nodeByPayload: nodeByPayload}
s := &Store{nodeByRoot: nodeByRoot, treeRootNode: treeRootNode}
payloadHash := [32]byte{'a'}
require.NoError(t, s.insert(context.Background(), 100, indexToHash(100), indexToHash(0), payloadHash, 1, 1))
assert.Equal(t, 2, len(s.nodeByRoot), "Did not insert block")

View File

@@ -26,7 +26,6 @@ type Store struct {
treeRootNode *Node // the root node of the store tree.
headNode *Node // last head Node
nodeByRoot map[[fieldparams.RootLength]byte]*Node // nodes indexed by roots.
nodeByPayload map[[fieldparams.RootLength]byte]*Node // nodes indexed by payload Hash
nodesLock sync.RWMutex
proposerBoostLock sync.RWMutex
}

View File

@@ -24,7 +24,7 @@ type ForkChoicer interface {
type HeadRetriever interface {
Head(context.Context, types.Epoch, [32]byte, []uint64, types.Epoch) ([32]byte, error)
Tips() ([][32]byte, []types.Slot)
IsOptimistic(root [32]byte) (bool, error)
IsOptimistic(ctx context.Context, root [32]byte) (bool, error)
}
// BlockProcessor processes the block that's used for accounting fork choice.
@@ -71,5 +71,5 @@ type Getter interface {
// Setter allows to set forkchoice information
type Setter interface {
SetOptimisticToValid(context.Context, [fieldparams.RootLength]byte) error
SetOptimisticToInvalid(context.Context, [fieldparams.RootLength]byte, [fieldparams.RootLength]byte) ([][32]byte, error)
SetOptimisticToInvalid(context.Context, [fieldparams.RootLength]byte) ([][32]byte, error)
}

View File

@@ -22,14 +22,12 @@ go_library(
"//beacon-chain/forkchoice/types:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//encoding/bytesutil:go_default_library",
"//math:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@io_opencensus_go//trace:go_default_library",
],
)

View File

@@ -5,11 +5,11 @@ import "errors"
var errUnknownFinalizedRoot = errors.New("unknown finalized root")
var errUnknownJustifiedRoot = errors.New("unknown justified root")
var errInvalidNodeIndex = errors.New("node index is invalid")
var errInvalidFinalizedNode = errors.New("invalid finalized block on chain")
var ErrUnknownNodeRoot = errors.New("unknown block root")
var errInvalidJustifiedIndex = errors.New("justified index is invalid")
var errInvalidBestChildIndex = errors.New("best child index is invalid")
var errInvalidBestDescendantIndex = errors.New("best descendant index is invalid")
var errInvalidParentDelta = errors.New("parent delta is invalid")
var errInvalidNodeDelta = errors.New("node delta is invalid")
var errInvalidDeltaLength = errors.New("delta length is invalid")
var errInvalidOptimisticStatus = errors.New("invalid optimistic status")
var errInvalidSyncedTips = errors.New("invalid synced tips")

View File

@@ -85,9 +85,12 @@ func copyNode(node *Node) *Node {
return &Node{}
}
copiedRoot := [32]byte{}
copy(copiedRoot[:], node.root[:])
return &Node{
slot: node.slot,
root: node.root,
root: copiedRoot,
parent: node.parent,
payloadHash: node.payloadHash,
justifiedEpoch: node.justifiedEpoch,
@@ -95,6 +98,5 @@ func copyNode(node *Node) *Node {
weight: node.weight,
bestChild: node.bestChild,
bestDescendant: node.bestDescendant,
status: node.status,
}
}

View File

@@ -3,12 +3,9 @@ package protoarray
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/sirupsen/logrus"
)
var (
log = logrus.WithField("prefix", "forkchoice-protoarray")
headSlotNumber = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "proto_array_head_slot",
@@ -51,10 +48,16 @@ var (
Help: "The number of times pruning happened.",
},
)
validatedNodesCount = promauto.NewCounter(
prometheus.CounterOpts{
Name: "proto_array_validated_nodes_count",
Help: "The number of nodes that have been fully validated.",
lastSyncedTipSlot = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "proto_array_last_synced_tip_slot",
Help: "The slot of the last fully validated block added to the proto array.",
},
)
syncedTipsCount = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "proto_array_synced_tips_count",
Help: "The number of elements in the syncedTips structure.",
},
)
)

View File

@@ -43,3 +43,8 @@ func (n *Node) BestChild() uint64 {
func (n *Node) BestDescendant() uint64 {
return n.bestDescendant
}
// Graffiti of the fork choice node.
func (n *Node) Graffiti() [32]byte {
return n.graffiti
}

View File

@@ -16,6 +16,7 @@ func TestNode_Getters(t *testing.T) {
weight := uint64(10000)
bestChild := uint64(5)
bestDescendant := uint64(4)
graffiti := [32]byte{'b'}
n := &Node{
slot: slot,
root: root,
@@ -25,6 +26,7 @@ func TestNode_Getters(t *testing.T) {
weight: weight,
bestChild: bestChild,
bestDescendant: bestDescendant,
graffiti: graffiti,
}
require.Equal(t, slot, n.Slot())
@@ -35,4 +37,5 @@ func TestNode_Getters(t *testing.T) {
require.Equal(t, weight, n.Weight())
require.Equal(t, bestChild, n.BestChild())
require.Equal(t, bestDescendant, n.BestDescendant())
require.Equal(t, graffiti, n.Graffiti())
}

View File

@@ -3,160 +3,314 @@ package protoarray
import (
"context"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/config/params"
)
// This returns the minimum and maximum slot of the synced_tips tree
func (f *ForkChoice) boundarySyncedTips() (types.Slot, types.Slot) {
f.syncedTips.RLock()
defer f.syncedTips.RUnlock()
min := params.BeaconConfig().FarFutureSlot
max := types.Slot(0)
for _, slot := range f.syncedTips.validatedTips {
if slot > max {
max = slot
}
if slot < min {
min = slot
}
}
return min, max
}
// IsOptimistic returns true if this node is optimistically synced
// A optimistically synced block is synced as usual, but its
// execution payload is not validated, while the EL is still syncing.
// This function returns an error if the block is not found in the fork choice
// store
func (f *ForkChoice) IsOptimistic(root [32]byte) (bool, error) {
func (f *ForkChoice) IsOptimistic(ctx context.Context, root [32]byte) (bool, error) {
if ctx.Err() != nil {
return false, ctx.Err()
}
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
index, ok := f.store.nodesIndices[root]
if !ok {
f.store.nodesLock.RUnlock()
return false, ErrUnknownNodeRoot
}
node := f.store.nodes[index]
return node.status == syncing, nil
slot := node.slot
// If the node is a synced tip, then it's fully validated
f.syncedTips.RLock()
_, ok = f.syncedTips.validatedTips[root]
if ok {
f.syncedTips.RUnlock()
f.store.nodesLock.RUnlock()
return false, nil
}
f.syncedTips.RUnlock()
// If the slot is higher than the max synced tip, it's optimistic
min, max := f.boundarySyncedTips()
if slot > max {
f.store.nodesLock.RUnlock()
return true, nil
}
// If the slot is lower than the min synced tip, it's fully validated
if slot <= min {
f.store.nodesLock.RUnlock()
return false, nil
}
// if the node is a leaf of the Fork Choice tree, then it's
// optimistic
childIndex := node.BestChild()
if childIndex == NonExistentNode {
f.store.nodesLock.RUnlock()
return true, nil
}
// recurse to the child
child := f.store.nodes[childIndex]
root = child.root
f.store.nodesLock.RUnlock()
return f.IsOptimistic(ctx, root)
}
// This function returns the index of sync tip node that's ancestor to the input node.
// In the event of none, `NonExistentNode` is returned.
// This internal method assumes the caller holds a lock on syncedTips and s.nodesLock
func (s *Store) findSyncedTip(ctx context.Context, node *Node, syncedTips *optimisticStore) (uint64, error) {
for {
if ctx.Err() != nil {
return 0, ctx.Err()
}
if _, ok := syncedTips.validatedTips[node.root]; ok {
return s.nodesIndices[node.root], nil
}
if node.parent == NonExistentNode {
return NonExistentNode, nil
}
node = s.nodes[node.parent]
}
}
// SetOptimisticToValid is called with the root of a block that was returned as
// VALID by the EL.
// WARNING: This method returns an error if the root is not found in forkchoice
// VALID by the EL. This routine recomputes and updates the synced_tips map to
// account for this new tip.
// WARNING: This method returns an error if the root is not found in forkchoice or
// if the root is not a leaf of the fork choice tree.
func (f *ForkChoice) SetOptimisticToValid(ctx context.Context, root [32]byte) error {
f.store.nodesLock.Lock()
defer f.store.nodesLock.Unlock()
f.store.nodesLock.RLock()
// We can only update if given root is in Fork Choice
index, ok := f.store.nodesIndices[root]
if !ok {
return ErrUnknownNodeRoot
return errInvalidNodeIndex
}
node := f.store.nodes[index]
f.store.nodesLock.RUnlock()
for node := f.store.nodes[index]; node.status == syncing; node = f.store.nodes[index] {
// Stop early if the node is Valid
optimistic, err := f.IsOptimistic(ctx, root)
if err != nil {
return err
}
if !optimistic {
return nil
}
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
// Cache root and slot to validated tips
newTips := make(map[[32]byte]types.Slot)
newValidSlot := node.slot
newTips[root] = newValidSlot
// Compute the full valid path from the given node to its previous synced tip
// This path will now consist of fully validated blocks. Notice that
// the previous tip may have been outside the Fork Choice store.
// In this case, only one block can be in syncedTips as the whole
// Fork Choice would be a descendant of this block.
validPath := make(map[uint64]bool)
validPath[index] = true
for {
if ctx.Err() != nil {
return ctx.Err()
}
node.status = valid
index = node.parent
if index == NonExistentNode {
parentIndex := node.parent
if parentIndex == NonExistentNode {
break
}
validatedNodesCount.Inc()
if parentIndex >= uint64(len(f.store.nodes)) {
return errInvalidNodeIndex
}
node = f.store.nodes[parentIndex]
_, ok = f.syncedTips.validatedTips[node.root]
if ok {
break
}
validPath[parentIndex] = true
}
// Retrieve the list of leaves in the Fork Choice
// These are all the nodes that have NonExistentNode as best child.
leaves, err := f.store.leaves()
if err != nil {
return err
}
// For each leaf, recompute the new tip.
for _, i := range leaves {
node = f.store.nodes[i]
j := i
for {
if ctx.Err() != nil {
return ctx.Err()
}
// Stop if we reached the previous tip
_, ok = f.syncedTips.validatedTips[node.root]
if ok {
newTips[node.root] = node.slot
break
}
// Stop if we reach valid path
_, ok = validPath[j]
if ok {
newTips[node.root] = node.slot
break
}
j = node.parent
if j == NonExistentNode {
break
}
if j >= uint64(len(f.store.nodes)) {
return errInvalidNodeIndex
}
node = f.store.nodes[j]
}
}
f.syncedTips.validatedTips = newTips
lastSyncedTipSlot.Set(float64(newValidSlot))
syncedTipsCount.Set(float64(len(newTips)))
return nil
}
// SetOptimisticToInvalid updates the synced_tips map when the block with the given root becomes INVALID.
// It takes two parameters: the root of the INVALID block and the payload Hash
// of the last valid block.s
func (f *ForkChoice) SetOptimisticToInvalid(ctx context.Context, root, payloadHash [32]byte) ([][32]byte, error) {
// SetOptimisticToInvalid updates the synced_tips map when the block with the given root becomes INVALID.
func (f *ForkChoice) SetOptimisticToInvalid(ctx context.Context, root [32]byte) ([][32]byte, error) {
f.store.nodesLock.Lock()
defer f.store.nodesLock.Unlock()
invalidRoots := make([][32]byte, 0)
// We only support setting invalid a node existing in Forkchoice
invalidIndex, ok := f.store.nodesIndices[root]
idx, ok := f.store.nodesIndices[root]
if !ok {
return invalidRoots, ErrUnknownNodeRoot
return invalidRoots, errInvalidNodeIndex
}
node := f.store.nodes[invalidIndex]
lastValidIndex, ok := f.store.payloadIndices[payloadHash]
if !ok || lastValidIndex == NonExistentNode {
return invalidRoots, errInvalidFinalizedNode
node := f.store.nodes[idx]
// We only support changing status for the tips in Fork Choice store.
if node.bestChild != NonExistentNode {
return invalidRoots, errInvalidNodeIndex
}
// Check if last valid hash is an ancestor of the passed node
firstInvalidIndex := node.parent
for ; firstInvalidIndex != NonExistentNode && firstInvalidIndex != lastValidIndex; firstInvalidIndex = node.parent {
node = f.store.nodes[firstInvalidIndex]
parentIndex := node.parent
// This should not happen
if parentIndex == NonExistentNode {
return invalidRoots, errInvalidNodeIndex
}
// if the last valid hash is not an ancestor of the invalid block, we
// just remove the invalid block.
if node.parent != lastValidIndex {
node = f.store.nodes[invalidIndex]
firstInvalidIndex = invalidIndex
lastValidIndex = node.parent
if lastValidIndex == NonExistentNode {
return invalidRoots, errInvalidFinalizedNode
}
} else {
firstInvalidIndex = f.store.nodesIndices[node.root]
}
// Update the weights of the nodes subtracting the first INVALID node's weight
// Update the weights of the nodes subtracting the INVALID node's weight
weight := node.weight
var validNode *Node
for index := lastValidIndex; index != NonExistentNode; index = validNode.parent {
validNode = f.store.nodes[index]
validNode.weight -= weight
node = f.store.nodes[parentIndex]
for {
if ctx.Err() != nil {
return invalidRoots, ctx.Err()
}
node.weight -= weight
if node.parent == NonExistentNode {
break
}
node = f.store.nodes[node.parent]
}
parent := copyNode(f.store.nodes[parentIndex])
// Find the current proposer boost (it should be set to zero if an
// INVALID block was boosted)
f.store.proposerBoostLock.RLock()
boostRoot := f.store.proposerBoostRoot
previousBoostRoot := f.store.previousProposerBoostRoot
f.store.proposerBoostLock.RUnlock()
// Remove the invalid roots from our store maps and adjust their weight
// to zero
boosted := node.root == boostRoot
previouslyBoosted := node.root == previousBoostRoot
invalidIndices := map[uint64]bool{firstInvalidIndex: true}
node.status = invalid
node.weight = 0
delete(f.store.nodesIndices, node.root)
delete(f.store.canonicalNodes, node.root)
delete(f.store.payloadIndices, node.payloadHash)
for index := firstInvalidIndex + 1; index < uint64(len(f.store.nodes)); index++ {
invalidNode := f.store.nodes[index]
if _, ok := invalidIndices[invalidNode.parent]; !ok {
continue
// delete the invalid node, order is important
f.store.nodes = append(f.store.nodes[:idx], f.store.nodes[idx+1:]...)
delete(f.store.nodesIndices, root)
invalidRoots = append(invalidRoots, root)
// Fix parent and best child for each node
for _, node := range f.store.nodes {
if node.parent == NonExistentNode {
node.parent = NonExistentNode
} else if node.parent > idx {
node.parent -= 1
}
if invalidNode.status == valid {
return invalidRoots, errInvalidOptimisticStatus
if node.bestChild == NonExistentNode || node.bestChild == idx {
node.bestChild = NonExistentNode
} else if node.bestChild > idx {
node.bestChild -= 1
}
if !boosted && invalidNode.root == boostRoot {
boosted = true
}
if !previouslyBoosted && invalidNode.root == previousBoostRoot {
previouslyBoosted = true
}
invalidNode.status = invalid
invalidIndices[index] = true
invalidNode.weight = 0
delete(f.store.nodesIndices, invalidNode.root)
delete(f.store.canonicalNodes, invalidNode.root)
delete(f.store.payloadIndices, invalidNode.payloadHash)
}
if boosted {
if err := f.ResetBoostedProposerRoot(ctx); err != nil {
return invalidRoots, err
if node.bestDescendant == NonExistentNode || node.bestDescendant == idx {
node.bestDescendant = NonExistentNode
} else if node.bestDescendant > idx {
node.bestDescendant -= 1
}
}
if previouslyBoosted {
f.store.proposerBoostLock.Lock()
f.store.previousProposerBoostRoot = params.BeaconConfig().ZeroHash
f.store.previousProposerBoostScore = 0
f.store.proposerBoostLock.Unlock()
}
for index := range invalidIndices {
invalidRoots = append(invalidRoots, f.store.nodes[index].root)
}
// Update the best child and descendant
for i := len(f.store.nodes) - 1; i >= 0; i-- {
n := f.store.nodes[i]
if n.parent != NonExistentNode {
if err := f.store.updateBestChildAndDescendant(n.parent, uint64(i)); err != nil {
return invalidRoots, err
// Update the parent's best child and best descendant if necessary.
if parent.bestChild == idx || parent.bestDescendant == idx {
for childIndex, child := range f.store.nodes {
if child.parent == parentIndex {
err := f.store.updateBestChildAndDescendant(
parentIndex, uint64(childIndex))
if err != nil {
return invalidRoots, err
}
break
}
}
}
// Return early if the parent is not a synced_tip.
f.syncedTips.Lock()
defer f.syncedTips.Unlock()
parentRoot := parent.root
_, ok = f.syncedTips.validatedTips[parentRoot]
if !ok {
return invalidRoots, nil
}
leaves, err := f.store.leaves()
if err != nil {
return invalidRoots, err
}
for _, i := range leaves {
node = f.store.nodes[i]
for {
if ctx.Err() != nil {
return invalidRoots, ctx.Err()
}
// Return early if the parent is still a synced tip
if node.root == parentRoot {
return invalidRoots, nil
}
_, ok = f.syncedTips.validatedTips[node.root]
if ok {
break
}
if node.parent == NonExistentNode {
break
}
node = f.store.nodes[node.parent]
}
}
delete(f.syncedTips.validatedTips, parentRoot)
syncedTipsCount.Set(float64(len(f.syncedTips.validatedTips)))
return invalidRoots, nil
}

View File

@@ -10,38 +10,116 @@ import (
"github.com/prysmaticlabs/prysm/testing/require"
)
func slicesEqual(a, b [][32]byte) bool {
if len(a) != len(b) {
return false
}
// We test the algorithm to check the optimistic status of a node. The
// status for this test is the following branching diagram
//
// -- E -- F
// /
// -- C -- D
// /
// 0 -- 1 -- A -- B -- J -- K
// \ /
// -- G -- H -- I
//
// Here nodes 0, 1, A, B, C, D are fully validated and nodes
// E, F, G, H, J, K are optimistic.
// Synced Tips are nodes B, C, D
// nodes 0 and 1 are outside the Fork Choice Store.
mapA := make(map[[32]byte]bool, len(a))
for _, root := range a {
mapA[root] = true
}
for _, root := range b {
_, ok := mapA[root]
if !ok {
return false
}
}
return true
}
func TestOptimistic_Outside_ForkChoice(t *testing.T) {
func TestOptimistic(t *testing.T) {
root0 := bytesutil.ToBytes32([]byte("hello0"))
root1 := bytesutil.ToBytes32([]byte("hello1"))
nodeA := &Node{
slot: types.Slot(100),
root: bytesutil.ToBytes32([]byte("helloA")),
bestChild: 1,
status: valid,
}
nodeB := &Node{
slot: types.Slot(101),
root: bytesutil.ToBytes32([]byte("helloB")),
bestChild: 2,
parent: 0,
}
nodeC := &Node{
slot: types.Slot(102),
root: bytesutil.ToBytes32([]byte("helloC")),
bestChild: 3,
parent: 1,
}
nodeD := &Node{
slot: types.Slot(103),
root: bytesutil.ToBytes32([]byte("helloD")),
bestChild: NonExistentNode,
parent: 2,
}
nodeE := &Node{
slot: types.Slot(103),
root: bytesutil.ToBytes32([]byte("helloE")),
bestChild: 5,
parent: 2,
}
nodeF := &Node{
slot: types.Slot(104),
root: bytesutil.ToBytes32([]byte("helloF")),
bestChild: NonExistentNode,
parent: 4,
}
nodeG := &Node{
slot: types.Slot(102),
root: bytesutil.ToBytes32([]byte("helloG")),
bestChild: 7,
parent: 1,
}
nodeH := &Node{
slot: types.Slot(103),
root: bytesutil.ToBytes32([]byte("helloH")),
bestChild: 8,
parent: 6,
}
nodeI := &Node{
slot: types.Slot(104),
root: bytesutil.ToBytes32([]byte("helloI")),
bestChild: NonExistentNode,
parent: 7,
}
nodeJ := &Node{
slot: types.Slot(103),
root: bytesutil.ToBytes32([]byte("helloJ")),
bestChild: 10,
parent: 6,
}
nodeK := &Node{
slot: types.Slot(104),
root: bytesutil.ToBytes32([]byte("helloK")),
bestChild: NonExistentNode,
parent: 9,
}
nodes := []*Node{
nodeA,
nodeB,
nodeC,
nodeD,
nodeE,
nodeF,
nodeG,
nodeH,
nodeI,
nodeJ,
nodeK,
}
ni := map[[32]byte]uint64{
nodeA.root: 0,
nodeB.root: 1,
nodeC.root: 2,
nodeD.root: 3,
nodeE.root: 4,
nodeF.root: 5,
nodeG.root: 6,
nodeH.root: 7,
nodeI.root: 8,
nodeJ.root: 9,
nodeK.root: 10,
}
s := &Store{
@@ -49,14 +127,82 @@ func TestOptimistic_Outside_ForkChoice(t *testing.T) {
nodesIndices: ni,
}
f := &ForkChoice{
store: s,
tips := map[[32]byte]types.Slot{
nodeB.root: nodeB.slot,
nodeC.root: nodeC.slot,
nodeD.root: nodeD.slot,
}
_, err := f.IsOptimistic(root0)
st := &optimisticStore{
validatedTips: tips,
}
f := &ForkChoice{
store: s,
syncedTips: st,
}
ctx := context.Background()
// We test the implementation of boundarySyncedTips
min, max := f.boundarySyncedTips()
require.Equal(t, min, types.Slot(101), "minimum tip slot is different")
require.Equal(t, max, types.Slot(103), "maximum tip slot is different")
// We test first nodes outside the Fork Choice store
_, err := f.IsOptimistic(ctx, root0)
require.ErrorIs(t, ErrUnknownNodeRoot, err)
_, err = f.IsOptimistic(ctx, root1)
require.ErrorIs(t, ErrUnknownNodeRoot, err)
// We check all nodes in the Fork Choice store.
op, err := f.IsOptimistic(ctx, nodeA.root)
require.NoError(t, err)
require.Equal(t, op, false)
op, err = f.IsOptimistic(ctx, nodeB.root)
require.NoError(t, err)
require.Equal(t, op, false)
op, err = f.IsOptimistic(ctx, nodeC.root)
require.NoError(t, err)
require.Equal(t, op, false)
op, err = f.IsOptimistic(ctx, nodeD.root)
require.NoError(t, err)
require.Equal(t, op, false)
op, err = f.IsOptimistic(ctx, nodeE.root)
require.NoError(t, err)
require.Equal(t, op, true)
op, err = f.IsOptimistic(ctx, nodeF.root)
require.NoError(t, err)
require.Equal(t, op, true)
op, err = f.IsOptimistic(ctx, nodeG.root)
require.NoError(t, err)
require.Equal(t, op, true)
op, err = f.IsOptimistic(ctx, nodeH.root)
require.NoError(t, err)
require.Equal(t, op, true)
op, err = f.IsOptimistic(ctx, nodeI.root)
require.NoError(t, err)
require.Equal(t, op, true)
op, err = f.IsOptimistic(ctx, nodeJ.root)
require.NoError(t, err)
require.Equal(t, op, true)
op, err = f.IsOptimistic(ctx, nodeK.root)
require.NoError(t, err)
require.Equal(t, op, true)
// request a write Lock to synced Tips regression #10289
f.syncedTips.Lock()
defer f.syncedTips.Unlock()
}
// This tests the algorithm to update optimistic Status
// This tests the algorithm to update syncedTips
// We start with the following diagram
//
// E -- F
@@ -67,105 +213,165 @@ func TestOptimistic_Outside_ForkChoice(t *testing.T) {
// \ \
// J -- K -- L
//
// The Chain A -- B -- C -- D -- E is VALID.
// And every block in the Fork choice is optimistic. Synced_Tips contains a
// single block that is outside of Fork choice
//
func TestSetOptimisticToValid(t *testing.T) {
ctx := context.Background()
f := setup(1, 1)
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, params.BeaconConfig().ZeroHash, 1, 1))
tests := []struct {
root [32]byte // the root of the new VALID block
testRoot [32]byte // root of the node we will test optimistic status
wantedOptimistic bool // wanted optimistic status for tested node
wantedErr error // wanted error message
root [32]byte // the root of the new VALID block
tips map[[32]byte]types.Slot // the old synced tips
newTips map[[32]byte]types.Slot // the updated synced tips
wantedErr error
}{
{
[32]byte{'i'},
[32]byte{'i'},
false,
map[[32]byte]types.Slot{[32]byte{'z'}: 90},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'d'}: 103,
[32]byte{'g'}: 104,
[32]byte{'i'}: 106,
},
nil,
},
{
[32]byte{'i'},
[32]byte{'f'},
true,
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'d'}: 103,
},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'d'}: 103,
[32]byte{'g'}: 104,
[32]byte{'i'}: 106,
},
nil,
},
{
[32]byte{'i'},
[32]byte{'b'},
false,
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'d'}: 103,
[32]byte{'e'}: 103,
},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'e'}: 104,
[32]byte{'g'}: 104,
[32]byte{'i'}: 106,
},
nil,
},
{
[32]byte{'j'},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'f'}: 105,
[32]byte{'g'}: 104,
[32]byte{'i'}: 106,
},
map[[32]byte]types.Slot{
[32]byte{'f'}: 105,
[32]byte{'g'}: 104,
[32]byte{'i'}: 106,
[32]byte{'j'}: 102,
},
nil,
},
{
[32]byte{'g'},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'f'}: 105,
[32]byte{'g'}: 104,
[32]byte{'i'}: 106,
},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'f'}: 105,
[32]byte{'g'}: 104,
[32]byte{'i'}: 106,
},
nil,
},
{
[32]byte{'i'},
[32]byte{'h'},
false,
map[[32]byte]types.Slot{
[32]byte{'z'}: 90,
},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'d'}: 103,
[32]byte{'g'}: 104,
[32]byte{'h'}: 105,
},
nil,
},
{
[32]byte{'b'},
[32]byte{'b'},
false,
nil,
},
{
[32]byte{'b'},
[32]byte{'h'},
true,
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'d'}: 103,
[32]byte{'g'}: 104,
[32]byte{'i'}: 106,
},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'d'}: 103,
[32]byte{'g'}: 104,
[32]byte{'i'}: 106,
},
nil,
},
{
[32]byte{'b'},
[32]byte{'a'},
false,
nil,
},
{
[32]byte{'k'},
[32]byte{'k'},
false,
nil,
},
{
[32]byte{'k'},
[32]byte{'l'},
true,
[32]byte{'g'},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'d'}: 103,
[32]byte{'e'}: 104,
},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'e'}: 104,
[32]byte{'g'}: 104,
},
nil,
},
{
[32]byte{'p'},
[32]byte{},
false,
ErrUnknownNodeRoot,
map[[32]byte]types.Slot{},
map[[32]byte]types.Slot{},
errInvalidNodeIndex,
},
}
for _, tc := range tests {
f := setup(1, 1)
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, [32]byte{'J'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, [32]byte{'D'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, [32]byte{'E'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, [32]byte{'G'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, [32]byte{'F'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, [32]byte{'H'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, [32]byte{'K'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, [32]byte{'I'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, [32]byte{'L'}, 1, 1))
require.NoError(t, f.SetOptimisticToValid(context.Background(), [32]byte{'e'}))
optimistic, err := f.IsOptimistic([32]byte{'b'})
require.NoError(t, err)
require.Equal(t, false, optimistic)
err = f.SetOptimisticToValid(context.Background(), tc.root)
f.syncedTips.Lock()
f.syncedTips.validatedTips = tc.tips
f.syncedTips.Unlock()
err := f.SetOptimisticToValid(context.Background(), tc.root)
if tc.wantedErr != nil {
require.ErrorIs(t, err, tc.wantedErr)
} else {
require.NoError(t, err)
optimistic, err := f.IsOptimistic(tc.testRoot)
require.NoError(t, err)
require.Equal(t, tc.wantedOptimistic, optimistic)
f.syncedTips.RLock()
require.DeepEqual(t, f.syncedTips.validatedTips, tc.newTips)
f.syncedTips.RUnlock()
}
}
}
@@ -181,81 +387,70 @@ func TestSetOptimisticToValid(t *testing.T) {
// \ \
// J(1) -- K(1) -- L(0)
//
// And the chain A -- B -- C -- D -- E has been fully validated. The numbers in parentheses are
// the weights of the nodes.
// And every block in the Fork choice is optimistic. Synced_Tips contains a
// single block that is outside of Fork choice. The numbers in parentheses are
// the weights of the nodes before removal
//
func TestSetOptimisticToInvalid(t *testing.T) {
tests := []struct {
name string // test description
root [32]byte // the root of the new INVALID block
payload [32]byte // the payload of the last valid hash
root [32]byte // the root of the new INVALID block
tips map[[32]byte]types.Slot // the old synced tips
wantedParentTip bool
newBestChild uint64
newBestDescendant uint64
newParentWeight uint64
returnedRoots [][32]byte
}{
{
"Remove tip, parent was valid",
[32]byte{'j'},
[32]byte{'B'},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'d'}: 103,
[32]byte{'g'}: 104,
},
false,
3,
12,
4,
8,
[][32]byte{[32]byte{'j'}},
},
{
[32]byte{'j'},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
},
true,
3,
4,
8,
[][32]byte{[32]byte{'j'}},
},
{
"Remove tip, parent was optimistic",
[32]byte{'i'},
[32]byte{'H'},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'d'}: 103,
[32]byte{'g'}: 104,
[32]byte{'h'}: 105,
},
true,
NonExistentNode,
NonExistentNode,
1,
[][32]byte{[32]byte{'i'}},
},
{
"Remove tip, lvh is inner and valid",
[32]byte{'i'},
[32]byte{'D'},
6,
8,
3,
[][32]byte{[32]byte{'g'}, [32]byte{'h'}, [32]byte{'k'}, [32]byte{'i'}, [32]byte{'l'}},
},
{
"Remove inner, lvh is inner and optimistic",
[32]byte{'h'},
[32]byte{'G'},
10,
12,
2,
[][32]byte{[32]byte{'h'}, [32]byte{'i'}},
},
{
"Remove tip, lvh is inner and optimistic",
[32]byte{'l'},
[32]byte{'G'},
9,
11,
2,
[][32]byte{[32]byte{'k'}, [32]byte{'l'}},
},
{
"Remove tip, lvh is not an ancestor",
[32]byte{'j'},
[32]byte{'C'},
5,
12,
7,
[][32]byte{[32]byte{'j'}},
},
{
"Remove inner, lvh is not an ancestor",
[32]byte{'g'},
[32]byte{'J'},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'d'}: 103,
[32]byte{'g'}: 104,
},
false,
NonExistentNode,
NonExistentNode,
1,
[][32]byte{[32]byte{'g'}, [32]byte{'h'}, [32]byte{'k'}, [32]byte{'i'}, [32]byte{'l'}},
[][32]byte{[32]byte{'i'}},
},
}
for _, tc := range tests {
@@ -263,71 +458,184 @@ func TestSetOptimisticToInvalid(t *testing.T) {
f := setup(1, 1)
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, [32]byte{'J'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, [32]byte{'D'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, [32]byte{'E'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, [32]byte{'G'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, [32]byte{'F'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, [32]byte{'H'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, [32]byte{'K'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, [32]byte{'I'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, [32]byte{'L'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, params.BeaconConfig().ZeroHash, 1, 1))
weights := []uint64{10, 10, 9, 7, 1, 6, 2, 3, 1, 1, 1, 0, 0}
f.syncedTips.Lock()
f.syncedTips.validatedTips = tc.tips
f.syncedTips.Unlock()
f.store.nodesLock.Lock()
for i, node := range f.store.nodes {
node.weight = weights[i]
}
// Make j be the best child and descendant of b
nodeB := f.store.nodes[2]
nodeB.bestChild = 4
nodeB.bestDescendant = 4
idx := f.store.nodesIndices[tc.root]
node := f.store.nodes[idx]
parentIndex := node.parent
require.NotEqual(t, NonExistentNode, parentIndex)
parent := f.store.nodes[parentIndex]
f.store.nodesLock.Unlock()
require.NoError(t, f.SetOptimisticToValid(ctx, [32]byte{'e'}))
roots, err := f.SetOptimisticToInvalid(ctx, tc.root, tc.payload)
roots, err := f.SetOptimisticToInvalid(context.Background(), tc.root)
require.NoError(t, err)
f.store.nodesLock.RLock()
_, ok := f.store.nodesIndices[tc.root]
require.Equal(t, false, ok)
lvh := f.store.nodes[f.store.payloadIndices[tc.payload]]
require.Equal(t, true, slicesEqual(tc.returnedRoots, roots))
require.Equal(t, tc.newBestChild, lvh.bestChild)
require.Equal(t, tc.newBestDescendant, lvh.bestDescendant)
require.Equal(t, tc.newParentWeight, lvh.weight)
require.Equal(t, syncing, f.store.nodes[8].status /* F */)
require.Equal(t, valid, f.store.nodes[5].status /* E */)
f.store.nodesLock.RUnlock()
require.DeepEqual(t, tc.returnedRoots, roots)
f.syncedTips.RLock()
_, parentSyncedTip := f.syncedTips.validatedTips[parent.root]
f.syncedTips.RUnlock()
require.Equal(t, tc.wantedParentTip, parentSyncedTip)
require.Equal(t, tc.newBestChild, parent.bestChild)
require.Equal(t, tc.newBestDescendant, parent.bestDescendant)
require.Equal(t, tc.newParentWeight, parent.weight)
}
}
func TestSetOptimisticToInvalid_InvalidRoots(t *testing.T) {
// This tests the algorithm to find the tip of a given node
// We start with the following diagram
//
// E -- F
// /
// C -- D
// / \
// A -- B G -- H -- I
// \ \
// J -- K -- L
//
//
func TestFindSyncedTip(t *testing.T) {
ctx := context.Background()
f := setup(1, 1)
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1))
_, err := f.SetOptimisticToInvalid(ctx, [32]byte{'p'}, [32]byte{'B'})
require.ErrorIs(t, ErrUnknownNodeRoot, err)
_, err = f.SetOptimisticToInvalid(ctx, [32]byte{'a'}, [32]byte{'p'})
require.ErrorIs(t, errInvalidFinalizedNode, err)
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, params.BeaconConfig().ZeroHash, 1, 1))
tests := []struct {
root [32]byte // the root of the block
tips map[[32]byte]types.Slot // the synced tips
wanted [32]byte // the root of expected tip
}{
{
[32]byte{'i'},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'d'}: 103,
[32]byte{'g'}: 104,
},
[32]byte{'g'},
},
{
[32]byte{'g'},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'d'}: 103,
[32]byte{'h'}: 104,
[32]byte{'k'}: 106,
},
[32]byte{'d'},
},
{
[32]byte{'e'},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'d'}: 103,
[32]byte{'g'}: 103,
},
[32]byte{'d'},
},
{
[32]byte{'j'},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'f'}: 105,
[32]byte{'g'}: 104,
[32]byte{'i'}: 106,
},
[32]byte{'b'},
},
{
[32]byte{'g'},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'f'}: 105,
[32]byte{'g'}: 104,
[32]byte{'i'}: 106,
},
[32]byte{'g'},
},
}
for _, tc := range tests {
f.store.nodesLock.RLock()
node := f.store.nodes[f.store.nodesIndices[tc.root]]
syncedTips := &optimisticStore{
validatedTips: tc.tips,
}
syncedTips.RLock()
idx, err := f.store.findSyncedTip(ctx, node, syncedTips)
require.NoError(t, err)
require.Equal(t, tc.wanted, f.store.nodes[idx].root)
f.store.nodesLock.RUnlock()
syncedTips.RUnlock()
}
}
// This is a regression test (10445)
func TestSetOptimisticToInvalid_ProposerBoost(t *testing.T) {
// This is a regression test (10341)
func TestIsOptimistic_DeadLock(t *testing.T) {
ctx := context.Background()
f := setup(1, 1)
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1))
f.store.proposerBoostLock.Lock()
f.store.proposerBoostRoot = [32]byte{'c'}
f.store.previousProposerBoostScore = 10
f.store.previousProposerBoostRoot = [32]byte{'b'}
f.store.proposerBoostLock.Unlock()
_, err := f.SetOptimisticToInvalid(ctx, [32]byte{'c'}, [32]byte{'A'})
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 90, [32]byte{'b'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'c'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'d'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'e'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
tips := map[[32]byte]types.Slot{
[32]byte{'a'}: 100,
[32]byte{'d'}: 102,
}
f.syncedTips.validatedTips = tips
_, err := f.IsOptimistic(ctx, [32]byte{'a'})
require.NoError(t, err)
f.store.proposerBoostLock.RLock()
require.Equal(t, uint64(0), f.store.previousProposerBoostScore)
require.DeepEqual(t, [32]byte{}, f.store.proposerBoostRoot)
require.DeepEqual(t, params.BeaconConfig().ZeroHash, f.store.previousProposerBoostRoot)
f.store.proposerBoostLock.RUnlock()
// Acquire a write lock, this should not hang
f.store.nodesLock.Lock()
f.store.nodesLock.Unlock()
_, err = f.IsOptimistic(ctx, [32]byte{'e'})
require.NoError(t, err)
// Acquire a write lock, this should not hang
f.store.nodesLock.Lock()
f.store.nodesLock.Unlock()
_, err = f.IsOptimistic(ctx, [32]byte{'b'})
require.NoError(t, err)
// Acquire a write lock, this should not hang
f.store.nodesLock.Lock()
f.store.nodesLock.Unlock()
_, err = f.IsOptimistic(ctx, [32]byte{'c'})
require.NoError(t, err)
// Acquire a write lock, this should not hang
f.store.nodesLock.Lock()
f.store.nodesLock.Unlock()
}

View File

@@ -9,10 +9,8 @@ import (
types "github.com/prysmaticlabs/eth2-types"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
pmath "github.com/prysmaticlabs/prysm/math"
pbrpc "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
@@ -32,14 +30,43 @@ func New(justifiedEpoch, finalizedEpoch types.Epoch, finalizedRoot [32]byte) *Fo
proposerBoostRoot: [32]byte{},
nodes: make([]*Node, 0),
nodesIndices: make(map[[32]byte]uint64),
payloadIndices: make(map[[32]byte]uint64),
canonicalNodes: make(map[[32]byte]bool),
pruneThreshold: defaultPruneThreshold,
}
b := make([]uint64, 0)
v := make([]Vote, 0)
return &ForkChoice{store: s, balances: b, votes: v}
st := &optimisticStore{
validatedTips: make(map[[32]byte]types.Slot),
}
return &ForkChoice{store: s, balances: b, votes: v, syncedTips: st}
}
// SetSyncedTips sets the synced and validated tips from the passed map
func (f *ForkChoice) SetSyncedTips(tips map[[32]byte]types.Slot) error {
if len(tips) == 0 {
return errInvalidSyncedTips
}
newTips := make(map[[32]byte]types.Slot, len(tips))
for k, v := range tips {
newTips[k] = v
}
f.syncedTips.Lock()
defer f.syncedTips.Unlock()
f.syncedTips.validatedTips = newTips
return nil
}
// SyncedTips returns the synced and validated tips from the fork choice store.
func (f *ForkChoice) SyncedTips() map[[32]byte]types.Slot {
f.syncedTips.RLock()
defer f.syncedTips.RUnlock()
m := make(map[[32]byte]types.Slot)
for k, v := range f.syncedTips.validatedTips {
m[k] = v
}
return m
}
// Head returns the head root from fork choice store.
@@ -132,7 +159,7 @@ func (f *ForkChoice) InsertOptimisticBlock(
// Prune prunes the fork choice store with the new finalized root. The store is only pruned if the input
// root is different than the current store finalized root, and the number of the store has met prune threshold.
func (f *ForkChoice) Prune(ctx context.Context, finalizedRoot [32]byte) error {
return f.store.prune(ctx, finalizedRoot)
return f.store.prune(ctx, finalizedRoot, f.syncedTips)
}
// HasNode returns true if the node exists in fork choice store,
@@ -348,7 +375,6 @@ func (s *Store) insert(ctx context.Context,
}
s.nodesIndices[root] = index
s.payloadIndices[payloadHash] = index
s.nodes = append(s.nodes, n)
// Update parent with the best child and descendant only if it's available.
@@ -428,16 +454,6 @@ func (s *Store) applyWeightChanges(
if nodeDelta < 0 {
d := uint64(-nodeDelta)
if n.weight < d {
s.proposerBoostLock.RLock()
log.WithFields(logrus.Fields{
"nodeDelta": d,
"nodeRoot": fmt.Sprintf("%#x", bytesutil.Trunc(n.root[:])),
"nodeWeight": n.weight,
"proposerBoostRoot": fmt.Sprintf("%#x", bytesutil.Trunc(s.proposerBoostRoot[:])),
"previousProposerBoostRoot": fmt.Sprintf("%#x", bytesutil.Trunc(s.previousProposerBoostRoot[:])),
"previousProposerBoostScore": s.previousProposerBoostScore,
}).Warning("node with invalid weight, setting it to zero")
s.proposerBoostLock.RUnlock()
n.weight = 0
} else {
n.weight -= d
@@ -583,7 +599,7 @@ func (s *Store) updateBestChildAndDescendant(parentIndex, childIndex uint64) err
// prune prunes the store with the new finalized root. The tree is only
// pruned if the input finalized root are different than the one in stored and
// the number of the nodes in store has met prune threshold.
func (s *Store) prune(ctx context.Context, finalizedRoot [32]byte) error {
func (s *Store) prune(ctx context.Context, finalizedRoot [32]byte, syncedTips *optimisticStore) error {
_, span := trace.StartSpan(ctx, "protoArrayForkChoice.prune")
defer span.End()
@@ -603,9 +619,18 @@ func (s *Store) prune(ctx context.Context, finalizedRoot [32]byte) error {
return nil
}
// Traverse through the node list starting from the finalized node at index 0.
// Nodes that are not branching off from the finalized node will be removed.
syncedTips.Lock()
defer syncedTips.Unlock()
canonicalNodesMap := make(map[uint64]uint64, uint64(len(s.nodes))-finalizedIndex)
canonicalNodes := make([]*Node, 1, uint64(len(s.nodes))-finalizedIndex)
finalizedNode := s.nodes[finalizedIndex]
finalizedTipIndex, err := s.findSyncedTip(ctx, finalizedNode, syncedTips)
if err != nil {
return err
}
finalizedNode.parent = NonExistentNode
canonicalNodes[0] = finalizedNode
canonicalNodesMap[finalizedIndex] = uint64(0)
@@ -621,6 +646,10 @@ func (s *Store) prune(ctx context.Context, finalizedRoot [32]byte) error {
} else {
// Remove node and synced tip that is not part of finalized branch.
delete(s.nodesIndices, node.root)
_, ok := syncedTips.validatedTips[node.root]
if ok && idx != finalizedTipIndex {
delete(syncedTips.validatedTips, node.root)
}
}
}
s.nodesIndices[finalizedRoot] = uint64(0)
@@ -637,6 +666,7 @@ func (s *Store) prune(ctx context.Context, finalizedRoot [32]byte) error {
s.nodes = canonicalNodes
prunedCount.Inc()
syncedTipsCount.Set(float64(len(syncedTips.validatedTips)))
return nil
}
@@ -644,10 +674,6 @@ func (s *Store) prune(ctx context.Context, finalizedRoot [32]byte) error {
// Any node with diff finalized or justified epoch than the ones in fork choice store
// should not be viable to head.
func (s *Store) leadsToViableHead(node *Node) (bool, error) {
if node.status == invalid {
return false, nil
}
var bestDescendantViable bool
bestDescendantIndex := node.bestDescendant
@@ -679,6 +705,20 @@ func (s *Store) viableForHead(node *Node) bool {
return justified && finalized
}
// Returns the list of leaves in the Fork Choice store.
// These are all the nodes that have NonExistentNode as best child.
// This internal method assumes that the caller holds a lock in s.nodesLock.
func (s *Store) leaves() ([]uint64, error) {
var leaves []uint64
for i := uint64(0); i < uint64(len(s.nodes)); i++ {
node := s.nodes[i]
if node.bestChild == NonExistentNode {
leaves = append(leaves, i)
}
}
return leaves, nil
}
// Tips returns all possible chain heads (leaves of fork choice tree).
// Heads roots and heads slots are returned.
func (f *ForkChoice) Tips() ([][32]byte, []types.Slot) {

View File

@@ -100,7 +100,7 @@ func TestStore_Head_ContextCancelled(t *testing.T) {
func TestStore_Insert_UnknownParent(t *testing.T) {
// The new node does not have a parent.
s := &Store{nodesIndices: make(map[[32]byte]uint64), payloadIndices: make(map[[32]byte]uint64)}
s := &Store{nodesIndices: make(map[[32]byte]uint64)}
require.NoError(t, s.insert(context.Background(), 100, [32]byte{'A'}, [32]byte{'B'}, params.BeaconConfig().ZeroHash, 1, 1))
assert.Equal(t, 1, len(s.nodes), "Did not insert block")
assert.Equal(t, 1, len(s.nodesIndices), "Did not insert block")
@@ -113,7 +113,7 @@ func TestStore_Insert_UnknownParent(t *testing.T) {
func TestStore_Insert_KnownParent(t *testing.T) {
// Similar to UnknownParent test, but this time the new node has a valid parent already in store.
// The new node builds on top of the parent.
s := &Store{nodesIndices: make(map[[32]byte]uint64), payloadIndices: make(map[[32]byte]uint64)}
s := &Store{nodesIndices: make(map[[32]byte]uint64)}
s.nodes = []*Node{{}}
p := [32]byte{'B'}
s.nodesIndices[p] = 0
@@ -336,10 +336,11 @@ func TestStore_Prune_LessThanThreshold(t *testing.T) {
})
s := &Store{nodes: nodes, nodesIndices: indices, pruneThreshold: 100}
syncedTips := &optimisticStore{}
// Finalized root is at index 99 so everything before 99 should be pruned,
// but PruneThreshold is at 100 so nothing will be pruned.
require.NoError(t, s.prune(context.Background(), indexToHash(99)))
require.NoError(t, s.prune(context.Background(), indexToHash(99), syncedTips))
assert.Equal(t, 100, len(s.nodes), "Incorrect nodes count")
assert.Equal(t, 100, len(s.nodesIndices), "Incorrect node indices count")
}
@@ -376,9 +377,10 @@ func TestStore_Prune_MoreThanThreshold(t *testing.T) {
})
indices[indexToHash(uint64(numOfNodes-1))] = uint64(numOfNodes - 1)
s := &Store{nodes: nodes, nodesIndices: indices}
syncedTips := &optimisticStore{}
// Finalized root is at index 99 so everything before 99 should be pruned.
require.NoError(t, s.prune(context.Background(), indexToHash(99)))
require.NoError(t, s.prune(context.Background(), indexToHash(99), syncedTips))
assert.Equal(t, 1, len(s.nodes), "Incorrect nodes count")
assert.Equal(t, 1, len(s.nodesIndices), "Incorrect node indices count")
}
@@ -414,14 +416,15 @@ func TestStore_Prune_MoreThanOnce(t *testing.T) {
})
s := &Store{nodes: nodes, nodesIndices: indices}
syncedTips := &optimisticStore{}
// Finalized root is at index 11 so everything before 11 should be pruned.
require.NoError(t, s.prune(context.Background(), indexToHash(10)))
require.NoError(t, s.prune(context.Background(), indexToHash(10), syncedTips))
assert.Equal(t, 90, len(s.nodes), "Incorrect nodes count")
assert.Equal(t, 90, len(s.nodesIndices), "Incorrect node indices count")
// One more time.
require.NoError(t, s.prune(context.Background(), indexToHash(20)))
require.NoError(t, s.prune(context.Background(), indexToHash(20), syncedTips))
assert.Equal(t, 80, len(s.nodes), "Incorrect nodes count")
assert.Equal(t, 80, len(s.nodesIndices), "Incorrect node indices count")
}
@@ -457,6 +460,7 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) {
bestDescendant: NonExistentNode,
},
}
syncedTips := &optimisticStore{}
s := &Store{
pruneThreshold: 0,
nodes: nodes,
@@ -466,7 +470,7 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) {
indexToHash(uint64(2)): 2,
},
}
require.NoError(t, s.prune(context.Background(), indexToHash(uint64(1))))
require.NoError(t, s.prune(context.Background(), indexToHash(uint64(1)), syncedTips))
require.Equal(t, len(s.nodes), 1)
}
@@ -482,6 +486,9 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) {
// J -- K -- L
//
//
// Synced tips are B, D and E. And we finalize F. All that is left in fork
// choice is F, and the only synced tip left is E which is now away from Fork
// Choice.
func TestStore_PruneSyncedTips(t *testing.T) {
ctx := context.Background()
f := setup(1, 1)
@@ -498,9 +505,19 @@ func TestStore_PruneSyncedTips(t *testing.T) {
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, params.BeaconConfig().ZeroHash, 1, 1))
syncedTips := &optimisticStore{
validatedTips: map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'d'}: 103,
[32]byte{'e'}: 104,
},
}
f.syncedTips = syncedTips
f.store.pruneThreshold = 0
require.NoError(t, f.Prune(ctx, [32]byte{'f'}))
require.Equal(t, 1, f.NodeCount())
require.Equal(t, 1, len(f.syncedTips.validatedTips))
_, ok := f.syncedTips.validatedTips[[32]byte{'e'}]
require.Equal(t, true, ok)
}
func TestStore_LeadsToViableHead(t *testing.T) {
@@ -529,6 +546,20 @@ func TestStore_LeadsToViableHead(t *testing.T) {
}
}
func TestStore_SetSyncedTips(t *testing.T) {
f := setup(1, 1)
tips := make(map[[32]byte]types.Slot)
require.ErrorIs(t, errInvalidSyncedTips, f.SetSyncedTips(tips))
tips[bytesutil.ToBytes32([]byte{'a'})] = 1
require.NoError(t, f.SetSyncedTips(tips))
f.syncedTips.RLock()
defer f.syncedTips.RUnlock()
require.Equal(t, 1, len(f.syncedTips.validatedTips))
slot, ok := f.syncedTips.validatedTips[bytesutil.ToBytes32([]byte{'a'})]
require.Equal(t, true, ok)
require.Equal(t, types.Slot(1), slot)
}
func TestStore_ViableForHead(t *testing.T) {
tests := []struct {
n *Node

View File

@@ -9,10 +9,11 @@ import (
// ForkChoice defines the overall fork choice store which includes all block nodes, validator's latest votes and balances.
type ForkChoice struct {
store *Store
votes []Vote // tracks individual validator's last vote.
votesLock sync.RWMutex
balances []uint64 // tracks individual validator's last justified balances.
store *Store
votes []Vote // tracks individual validator's last vote.
votesLock sync.RWMutex
balances []uint64 // tracks individual validator's last justified balances.
syncedTips *optimisticStore
}
// Store defines the fork choice store which includes block nodes and the last view of checkpoint information.
@@ -27,7 +28,6 @@ type Store struct {
nodes []*Node // list of block nodes, each node is a representation of one block.
nodesIndices map[[fieldparams.RootLength]byte]uint64 // the root of block node and the nodes index in the list.
canonicalNodes map[[fieldparams.RootLength]byte]bool // the canonical block nodes.
payloadIndices map[[fieldparams.RootLength]byte]uint64 // the payload hash of block node and the index in the list
nodesLock sync.RWMutex
proposerBoostLock sync.RWMutex
}
@@ -44,17 +44,15 @@ type Node struct {
weight uint64 // weight of this node.
bestChild uint64 // bestChild index of this node.
bestDescendant uint64 // bestDescendant of this node.
status status // optimistic status of this node
graffiti [fieldparams.RootLength]byte // graffiti of the block node.
}
// enum used as optimistic status of a node
type status uint8
const (
syncing status = iota // the node is optimistic
valid //fully validated node
invalid // invalid execution payload
)
// optimisticStore defines a structure that tracks the tips of the fully
// validated blocks tree.
type optimisticStore struct {
validatedTips map[[32]byte]types.Slot
sync.RWMutex
}
// Vote defines an individual validator's vote.
type Vote struct {

View File

@@ -12,6 +12,7 @@ import (
func TestVotes_CanFindHead(t *testing.T) {
balances := []uint64{1, 1}
f := setup(1, 1)
syncedTips := &optimisticStore{}
// The head should always start at the finalized block.
r, err := f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
@@ -248,7 +249,7 @@ func TestVotes_CanFindHead(t *testing.T) {
// Verify pruning below the prune threshold does not affect head.
f.store.pruneThreshold = 1000
require.NoError(t, f.store.prune(context.Background(), indexToHash(5)))
require.NoError(t, f.store.prune(context.Background(), indexToHash(5), syncedTips))
assert.Equal(t, 11, len(f.store.nodes), "Incorrect nodes length after prune")
r, err = f.Head(context.Background(), 2, indexToHash(5), balances, 2)
@@ -272,7 +273,7 @@ func TestVotes_CanFindHead(t *testing.T) {
// / \
// 9 10
f.store.pruneThreshold = 1
require.NoError(t, f.store.prune(context.Background(), indexToHash(5)))
require.NoError(t, f.store.prune(context.Background(), indexToHash(5), syncedTips))
assert.Equal(t, 5, len(f.store.nodes), "Incorrect nodes length after prune")
r, err = f.Head(context.Background(), 2, indexToHash(5), balances, 2)

View File

@@ -18,7 +18,6 @@ go_library(
"//api/gateway:go_default_library",
"//async/event:go_default_library",
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/cache:go_default_library",
"//beacon-chain/cache/depositcache:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/kv:go_default_library",

View File

@@ -21,7 +21,6 @@ import (
apigateway "github.com/prysmaticlabs/prysm/api/gateway"
"github.com/prysmaticlabs/prysm/async/event"
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
"github.com/prysmaticlabs/prysm/beacon-chain/db"
"github.com/prysmaticlabs/prysm/beacon-chain/db/kv"
@@ -95,7 +94,6 @@ type BeaconNode struct {
slashingsPool slashings.PoolManager
syncCommitteePool synccommittee.Pool
depositCache *depositcache.DepositCache
proposerIdsCache *cache.ProposerPayloadIDsCache
stateFeed *event.Feed
blockFeed *event.Feed
opFeed *event.Feed
@@ -154,7 +152,6 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
slasherBlockHeadersFeed: new(event.Feed),
slasherAttestationsFeed: new(event.Feed),
serviceFlagOpts: &serviceFlagOpts{},
proposerIdsCache: cache.NewProposerPayloadIDsCache(),
}
for _, opt := range opts {
@@ -588,7 +585,6 @@ func (b *BeaconNode) registerBlockchainService() error {
blockchain.WithStateGen(b.stateGen),
blockchain.WithSlasherAttestationsFeed(b.slasherAttestationsFeed),
blockchain.WithFinalizedStateAtStartUp(b.finalizedStateAtStartUp),
blockchain.WithProposerIdsCache(b.proposerIdsCache),
)
blockchainService, err := blockchain.NewService(b.ctx, opts...)
if err != nil {
@@ -805,7 +801,6 @@ func (b *BeaconNode) registerRPCService() error {
StateGen: b.stateGen,
EnableDebugRPCEndpoints: enableDebugRPCEndpoints,
MaxMsgSize: maxMsgSize,
ProposerIdsCache: b.proposerIdsCache,
ExecutionEngineCaller: web3Service,
})

View File

@@ -581,7 +581,6 @@ func (s *Service) processBlockHeader(header *gethTypes.Header) {
log.WithFields(logrus.Fields{
"blockNumber": s.latestEth1Data.BlockHeight,
"blockHash": hexutil.Encode(s.latestEth1Data.BlockHash),
"difficulty": header.Difficulty.String(),
}).Debug("Latest eth1 chain event")
}

View File

@@ -424,11 +424,11 @@ type beaconBlockBodyBellatrixJson struct {
type executionPayloadJson struct {
ParentHash string `json:"parent_hash" hex:"true"`
FeeRecipient string `json:"fee_recipient" hex:"true"`
CoinBase string `json:"coinbase" hex:"true"`
StateRoot string `json:"state_root" hex:"true"`
ReceiptsRoot string `json:"receipts_root" hex:"true"`
ReceiptRoot string `json:"receipt_root" hex:"true"`
LogsBloom string `json:"logs_bloom" hex:"true"`
PrevRandao string `json:"prev_randao" hex:"true"`
Random string `json:"random" hex:"true"`
BlockNumber string `json:"block_number"`
GasLimit string `json:"gas_limit"`
GasUsed string `json:"gas_used"`
@@ -441,11 +441,11 @@ type executionPayloadJson struct {
type executionPayloadHeaderJson struct {
ParentHash string `json:"parent_hash" hex:"true"`
FeeRecipient string `json:"fee_recipient" hex:"true"`
CoinBase string `json:"coinbase" hex:"true"`
StateRoot string `json:"state_root" hex:"true"`
ReceiptsRoot string `json:"receipts_root" hex:"true"`
ReceiptRoot string `json:"receipt_root" hex:"true"`
LogsBloom string `json:"logs_bloom" hex:"true"`
PrevRandao string `json:"prev_randao" hex:"true"`
Random string `json:"random" hex:"true"`
BlockNumber string `json:"block_number"`
GasLimit string `json:"gas_limit"`
GasUsed string `json:"gas_used"`

View File

@@ -1024,20 +1024,19 @@ func TestProduceBlockV2(t *testing.T) {
TotalDifficulty: "0x1",
},
},
TimeFetcher: &mockChain.ChainService{},
HeadFetcher: &mockChain.ChainService{State: beaconState, Root: parentRoot[:]},
SyncChecker: &mockSync.Sync{IsSyncing: false},
BlockReceiver: &mockChain.ChainService{},
ChainStartFetcher: &mockPOW.POWChain{},
Eth1InfoFetcher: &mockPOW.POWChain{},
Eth1BlockFetcher: &mockPOW.POWChain{},
MockEth1Votes: true,
AttPool: attestations.NewPool(),
SlashingsPool: slashings.NewPool(),
ExitPool: voluntaryexits.NewPool(),
StateGen: stategen.New(db),
SyncCommitteePool: synccommittee.NewStore(),
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
TimeFetcher: &mockChain.ChainService{},
HeadFetcher: &mockChain.ChainService{State: beaconState, Root: parentRoot[:]},
SyncChecker: &mockSync.Sync{IsSyncing: false},
BlockReceiver: &mockChain.ChainService{},
ChainStartFetcher: &mockPOW.POWChain{},
Eth1InfoFetcher: &mockPOW.POWChain{},
Eth1BlockFetcher: &mockPOW.POWChain{},
MockEth1Votes: true,
AttPool: attestations.NewPool(),
SlashingsPool: slashings.NewPool(),
ExitPool: voluntaryexits.NewPool(),
StateGen: stategen.New(db),
SyncCommitteePool: synccommittee.NewStore(),
}
proposerSlashings := make([]*ethpbalpha.ProposerSlashing, params.BeaconConfig().MaxProposerSlashings)

View File

@@ -693,7 +693,7 @@ func (bs *Server) GetValidatorPerformance(
return nil, err
}
validatorSummary = vp
case version.Altair, version.Bellatrix:
case version.Altair:
vp, bp, err := altair.InitializePrecomputeValidators(ctx, headState)
if err != nil {
return nil, err
@@ -871,7 +871,7 @@ func (bs *Server) GetIndividualVotes(
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not pre compute attestations: %v", err)
}
case version.Altair, version.Bellatrix:
case version.Altair:
v, bal, err = altair.InitializePrecomputeValidators(ctx, st)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not set up altair pre compute instance: %v", err)

View File

@@ -2100,76 +2100,6 @@ func TestGetValidatorPerformanceAltair_OK(t *testing.T) {
}
}
func TestGetValidatorPerformanceBellatrix_OK(t *testing.T) {
helpers.ClearCache()
params.SetupTestConfigCleanup(t)
params.OverrideBeaconConfig(params.MinimalSpecConfig())
ctx := context.Background()
epoch := types.Epoch(1)
headState, _ := util.DeterministicGenesisStateBellatrix(t, 32)
require.NoError(t, headState.SetSlot(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epoch+1))))
defaultBal := params.BeaconConfig().MaxEffectiveBalance
extraBal := params.BeaconConfig().MaxEffectiveBalance + params.BeaconConfig().GweiPerEth
balances := []uint64{defaultBal, extraBal, extraBal + params.BeaconConfig().GweiPerEth}
require.NoError(t, headState.SetBalances(balances))
publicKey1 := bytesutil.ToBytes48([]byte{1})
publicKey2 := bytesutil.ToBytes48([]byte{2})
publicKey3 := bytesutil.ToBytes48([]byte{3})
validators := []*ethpb.Validator{
{
PublicKey: publicKey1[:],
ActivationEpoch: 5,
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
},
{
PublicKey: publicKey2[:],
EffectiveBalance: defaultBal,
ActivationEpoch: 0,
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
},
{
PublicKey: publicKey3[:],
EffectiveBalance: defaultBal,
ActivationEpoch: 0,
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
},
}
require.NoError(t, headState.SetValidators(validators))
require.NoError(t, headState.SetInactivityScores([]uint64{0, 0, 0}))
require.NoError(t, headState.SetBalances([]uint64{100, 101, 102}))
offset := int64(headState.Slot().Mul(params.BeaconConfig().SecondsPerSlot))
bs := &Server{
HeadFetcher: &mock.ChainService{
State: headState,
},
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
want := &ethpb.ValidatorPerformanceResponse{
PublicKeys: [][]byte{publicKey2[:], publicKey3[:]},
CurrentEffectiveBalances: []uint64{params.BeaconConfig().MaxEffectiveBalance, params.BeaconConfig().MaxEffectiveBalance},
InclusionSlots: nil,
InclusionDistances: nil,
CorrectlyVotedSource: []bool{false, false},
CorrectlyVotedTarget: []bool{false, false},
CorrectlyVotedHead: []bool{false, false},
BalancesBeforeEpochTransition: []uint64{101, 102},
BalancesAfterEpochTransition: []uint64{0, 0},
MissingValidators: [][]byte{publicKey1[:]},
InactivityScores: []uint64{0, 0},
}
res, err := bs.GetValidatorPerformance(ctx, &ethpb.ValidatorPerformanceRequest{
PublicKeys: [][]byte{publicKey1[:], publicKey3[:], publicKey2[:]},
})
require.NoError(t, err)
if !proto.Equal(want, res) {
t.Errorf("Wanted %v\nReceived %v", want, res)
}
}
func BenchmarkListValidatorBalances(b *testing.B) {
b.StopTimer()
beaconDB := dbTest.SetupDB(b)
@@ -2545,98 +2475,6 @@ func TestServer_GetIndividualVotes_AltairEndOfEpoch(t *testing.T) {
assert.DeepEqual(t, wanted, res, "Unexpected response")
}
func TestServer_GetIndividualVotes_BellatrixEndOfEpoch(t *testing.T) {
helpers.ClearCache()
params.SetupTestConfigCleanup(t)
params.OverrideBeaconConfig(params.MainnetConfig())
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
validators := uint64(32)
beaconState, _ := util.DeterministicGenesisStateBellatrix(t, validators)
startSlot, err := slots.EpochStart(1)
assert.NoError(t, err)
require.NoError(t, beaconState.SetSlot(startSlot))
b := util.NewBeaconBlock()
b.Block.Slot = startSlot
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
gRoot, err := b.Block.HashTreeRoot()
require.NoError(t, err)
gen := stategen.New(beaconDB)
require.NoError(t, gen.SaveState(ctx, gRoot, beaconState))
require.NoError(t, beaconDB.SaveState(ctx, beaconState, gRoot))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, gRoot))
// Save State at the end of the epoch:
endSlot, err := slots.EpochEnd(1)
assert.NoError(t, err)
beaconState, _ = util.DeterministicGenesisStateBellatrix(t, validators)
require.NoError(t, beaconState.SetSlot(endSlot))
pb, err := beaconState.CurrentEpochParticipation()
require.NoError(t, err)
for i := range pb {
pb[i] = 0xff
}
require.NoError(t, beaconState.SetCurrentParticipationBits(pb))
require.NoError(t, beaconState.SetPreviousParticipationBits(pb))
b.Block.Slot = endSlot
wsb, err = wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
gRoot, err = b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, gen.SaveState(ctx, gRoot, beaconState))
require.NoError(t, beaconDB.SaveState(ctx, beaconState, gRoot))
bs := &Server{
StateGen: gen,
GenesisTimeFetcher: &mock.ChainService{},
}
addDefaultReplayerBuilder(bs, beaconDB)
res, err := bs.GetIndividualVotes(ctx, &ethpb.IndividualVotesRequest{
Indices: []types.ValidatorIndex{0, 1},
Epoch: 1,
})
require.NoError(t, err)
wanted := &ethpb.IndividualVotesRespond{
IndividualVotes: []*ethpb.IndividualVotesRespond_IndividualVote{
{
ValidatorIndex: 0,
PublicKey: beaconState.Validators()[0].PublicKey,
IsActiveInCurrentEpoch: true,
IsActiveInPreviousEpoch: true,
IsCurrentEpochTargetAttester: true,
IsCurrentEpochAttester: true,
IsPreviousEpochAttester: true,
IsPreviousEpochHeadAttester: true,
IsPreviousEpochTargetAttester: true,
CurrentEpochEffectiveBalanceGwei: params.BeaconConfig().MaxEffectiveBalance,
Epoch: 1,
},
{
ValidatorIndex: 1,
PublicKey: beaconState.Validators()[1].PublicKey,
IsActiveInCurrentEpoch: true,
IsActiveInPreviousEpoch: true,
IsCurrentEpochTargetAttester: true,
IsCurrentEpochAttester: true,
IsPreviousEpochAttester: true,
IsPreviousEpochHeadAttester: true,
IsPreviousEpochTargetAttester: true,
CurrentEpochEffectiveBalanceGwei: params.BeaconConfig().MaxEffectiveBalance,
Epoch: 1,
},
},
}
assert.DeepEqual(t, wanted, res, "Unexpected response")
}
func Test_validatorStatus(t *testing.T) {
tests := []struct {
name string

View File

@@ -79,8 +79,6 @@ go_library(
"@com_github_ferranbt_fastssz//:go_default_library",
"@com_github_holiman_uint256//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",

View File

@@ -138,7 +138,7 @@ func (vs *Server) duties(ctx context.Context, req *ethpb.DutiesRequest) (*ethpb.
return nil, status.Errorf(codes.Internal, "Could not compute committee assignments: %v", err)
}
// Query the next epoch assignments for committee subnet subscriptions.
nextCommitteeAssignments, nextProposerIndexToSlots, err := helpers.CommitteeAssignments(ctx, s, req.Epoch+1)
nextCommitteeAssignments, _, err := helpers.CommitteeAssignments(ctx, s, req.Epoch+1)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not compute next committee assignments: %v", err)
}
@@ -180,16 +180,6 @@ func (vs *Server) duties(ctx context.Context, req *ethpb.DutiesRequest) (*ethpb.
nextAssignment.AttesterSlot = ca.AttesterSlot
nextAssignment.CommitteeIndex = ca.CommitteeIndex
}
// Cache proposer assignment for the current epoch.
for _, slot := range proposerIndexToSlots[idx] {
vs.ProposerSlotIndexCache.SetProposerAndPayloadIDs(slot, idx, [8]byte{} /* payloadID */)
}
// Cache proposer assignment for the next epoch.
for _, slot := range nextProposerIndexToSlots[idx] {
vs.ProposerSlotIndexCache.SetProposerAndPayloadIDs(slot, idx, [8]byte{} /* payloadID */)
}
// Prune payload ID cache for any slots before request slot.
vs.ProposerSlotIndexCache.PrunePayloadIDs(epochStartSlot)
} else {
// If the validator isn't in the beacon state, try finding their deposit to determine their status.
vStatus, _ := vs.validatorStatus(ctx, s, pubKey)

View File

@@ -60,10 +60,9 @@ func TestGetDuties_OK(t *testing.T) {
State: bs, Root: genesisRoot[:], Genesis: time.Now(),
}
vs := &Server{
HeadFetcher: chain,
TimeFetcher: chain,
SyncChecker: &mockSync.Sync{IsSyncing: false},
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
HeadFetcher: chain,
TimeFetcher: chain,
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
// Test the first validator in registry.
@@ -145,11 +144,10 @@ func TestGetAltairDuties_SyncCommitteeOK(t *testing.T) {
State: bs, Root: genesisRoot[:], Genesis: time.Now().Add(time.Duration(-1*int64(slot-1)) * time.Second),
}
vs := &Server{
HeadFetcher: chain,
TimeFetcher: chain,
Eth1InfoFetcher: &mockPOW.POWChain{},
SyncChecker: &mockSync.Sync{IsSyncing: false},
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
HeadFetcher: chain,
TimeFetcher: chain,
Eth1InfoFetcher: &mockPOW.POWChain{},
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
// Test the first validator in registry.
@@ -183,12 +181,12 @@ func TestGetAltairDuties_SyncCommitteeOK(t *testing.T) {
res, err = vs.GetDuties(context.Background(), req)
require.NoError(t, err, "Could not call epoch committee assignment")
for i := 0; i < len(res.CurrentEpochDuties); i++ {
require.Equal(t, types.ValidatorIndex(i), res.CurrentEpochDuties[i].ValidatorIndex)
assert.Equal(t, types.ValidatorIndex(i), res.CurrentEpochDuties[i].ValidatorIndex)
}
for i := 0; i < len(res.CurrentEpochDuties); i++ {
require.Equal(t, true, res.CurrentEpochDuties[i].IsSyncCommittee)
assert.Equal(t, true, res.CurrentEpochDuties[i].IsSyncCommittee)
// Current epoch and next epoch duties should be equal before the sync period epoch boundary.
require.Equal(t, res.CurrentEpochDuties[i].IsSyncCommittee, res.NextEpochDuties[i].IsSyncCommittee)
assert.Equal(t, res.CurrentEpochDuties[i].IsSyncCommittee, res.NextEpochDuties[i].IsSyncCommittee)
}
// Current epoch and next epoch duties should not be equal at the sync period epoch boundary.
@@ -199,7 +197,7 @@ func TestGetAltairDuties_SyncCommitteeOK(t *testing.T) {
res, err = vs.GetDuties(context.Background(), req)
require.NoError(t, err, "Could not call epoch committee assignment")
for i := 0; i < len(res.CurrentEpochDuties); i++ {
require.NotEqual(t, res.CurrentEpochDuties[i].IsSyncCommittee, res.NextEpochDuties[i].IsSyncCommittee)
assert.NotEqual(t, res.CurrentEpochDuties[i].IsSyncCommittee, res.NextEpochDuties[i].IsSyncCommittee)
}
}
@@ -251,11 +249,10 @@ func TestGetBellatrixDuties_SyncCommitteeOK(t *testing.T) {
State: bs, Root: genesisRoot[:], Genesis: time.Now().Add(time.Duration(-1*int64(slot-1)) * time.Second),
}
vs := &Server{
HeadFetcher: chain,
TimeFetcher: chain,
Eth1InfoFetcher: &mockPOW.POWChain{},
SyncChecker: &mockSync.Sync{IsSyncing: false},
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
HeadFetcher: chain,
TimeFetcher: chain,
Eth1InfoFetcher: &mockPOW.POWChain{},
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
// Test the first validator in registry.
@@ -305,7 +302,7 @@ func TestGetBellatrixDuties_SyncCommitteeOK(t *testing.T) {
res, err = vs.GetDuties(context.Background(), req)
require.NoError(t, err, "Could not call epoch committee assignment")
for i := 0; i < len(res.CurrentEpochDuties); i++ {
require.NotEqual(t, res.CurrentEpochDuties[i].IsSyncCommittee, res.NextEpochDuties[i].IsSyncCommittee)
assert.NotEqual(t, res.CurrentEpochDuties[i].IsSyncCommittee, res.NextEpochDuties[i].IsSyncCommittee)
}
}
@@ -343,12 +340,11 @@ func TestGetAltairDuties_UnknownPubkey(t *testing.T) {
require.NoError(t, err)
vs := &Server{
HeadFetcher: chain,
TimeFetcher: chain,
Eth1InfoFetcher: &mockPOW.POWChain{},
SyncChecker: &mockSync.Sync{IsSyncing: false},
DepositFetcher: depositCache,
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
HeadFetcher: chain,
TimeFetcher: chain,
Eth1InfoFetcher: &mockPOW.POWChain{},
SyncChecker: &mockSync.Sync{IsSyncing: false},
DepositFetcher: depositCache,
}
unknownPubkey := bytesutil.PadTo([]byte{'u'}, 48)
@@ -403,10 +399,9 @@ func TestGetDuties_CurrentEpoch_ShouldNotFail(t *testing.T) {
State: bState, Root: genesisRoot[:], Genesis: time.Now(),
}
vs := &Server{
HeadFetcher: chain,
TimeFetcher: chain,
SyncChecker: &mockSync.Sync{IsSyncing: false},
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
HeadFetcher: chain,
TimeFetcher: chain,
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
// Test the first validator in registry.
@@ -442,10 +437,9 @@ func TestGetDuties_MultipleKeys_OK(t *testing.T) {
State: bs, Root: genesisRoot[:], Genesis: time.Now(),
}
vs := &Server{
HeadFetcher: chain,
TimeFetcher: chain,
SyncChecker: &mockSync.Sync{IsSyncing: false},
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
HeadFetcher: chain,
TimeFetcher: chain,
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
pubkey0 := deposits[0].Data.PublicKey
@@ -509,12 +503,11 @@ func TestStreamDuties_OK(t *testing.T) {
Genesis: time.Now(),
}
vs := &Server{
Ctx: ctx,
HeadFetcher: &mockChain.ChainService{State: bs, Root: genesisRoot[:]},
SyncChecker: &mockSync.Sync{IsSyncing: false},
TimeFetcher: c,
StateNotifier: &mockChain.MockStateNotifier{},
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
Ctx: ctx,
HeadFetcher: &mockChain.ChainService{State: bs, Root: genesisRoot[:]},
SyncChecker: &mockSync.Sync{IsSyncing: false},
TimeFetcher: c,
StateNotifier: &mockChain.MockStateNotifier{},
}
// Test the first validator in registry.
@@ -567,12 +560,11 @@ func TestStreamDuties_OK_ChainReorg(t *testing.T) {
Genesis: time.Now(),
}
vs := &Server{
Ctx: ctx,
HeadFetcher: &mockChain.ChainService{State: bs, Root: genesisRoot[:]},
SyncChecker: &mockSync.Sync{IsSyncing: false},
TimeFetcher: c,
StateNotifier: &mockChain.MockStateNotifier{},
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
Ctx: ctx,
HeadFetcher: &mockChain.ChainService{State: bs, Root: genesisRoot[:]},
SyncChecker: &mockSync.Sync{IsSyncing: false},
TimeFetcher: c,
StateNotifier: &mockChain.MockStateNotifier{},
}
// Test the first validator in registry.

View File

@@ -9,8 +9,6 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/holiman/uint256"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
@@ -26,31 +24,9 @@ import (
"github.com/sirupsen/logrus"
)
var (
// payloadIDCacheMiss tracks the number of payload ID requests that aren't present in the cache.
payloadIDCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
Name: "payload_id_cache_miss",
Help: "The number of payload id get requests that aren't present in the cache.",
})
// payloadIDCacheHit tracks the number of payload ID requests that are present in the cache.
payloadIDCacheHit = promauto.NewCounter(prometheus.CounterOpts{
Name: "payload_id_cache_hit",
Help: "The number of payload id get requests that are present in the cache.",
})
)
// This returns the execution payload of a given slot. The function has full awareness of pre and post merge.
// The payload is computed given the respected time of merge.
func (vs *Server) getExecutionPayload(ctx context.Context, slot types.Slot, vIdx types.ValidatorIndex) (*enginev1.ExecutionPayload, error) {
proposerID, payloadId, ok := vs.ProposerSlotIndexCache.GetProposerPayloadIDs(slot)
if ok && proposerID == vIdx && payloadId != [8]byte{} { // Payload ID is cache hit. Return the cached payload ID.
var pid [8]byte
copy(pid[:], payloadId[:])
payloadIDCacheHit.Inc()
return vs.ExecutionEngineCaller.GetPayload(ctx, pid)
}
payloadIDCacheMiss.Inc()
st, err := vs.HeadFetcher.HeadState(ctx)
if err != nil {
return nil, err

View File

@@ -9,7 +9,6 @@ import (
"github.com/holiman/uint256"
types "github.com/prysmaticlabs/eth2-types"
chainMock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
dbTest "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
powtesting "github.com/prysmaticlabs/prysm/beacon-chain/powchain/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
@@ -107,11 +106,6 @@ func TestServer_getExecutionPayload(t *testing.T) {
payloadID: &pb.PayloadIDBytes{0x1},
validatorIndx: 1,
},
{
name: "transition completed, happy case, payload ID cached)",
st: transitionSt,
validatorIndx: 100,
},
{
name: "transition completed, could not prepare payload",
st: transitionSt,
@@ -138,12 +132,10 @@ func TestServer_getExecutionPayload(t *testing.T) {
params.OverrideBeaconConfig(cfg)
vs := &Server{
ExecutionEngineCaller: &powtesting.EngineClient{PayloadIDBytes: tt.payloadID, ErrForkchoiceUpdated: tt.forkchoiceErr},
HeadFetcher: &chainMock.ChainService{State: tt.st},
BeaconDB: beaconDB,
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
ExecutionEngineCaller: &powtesting.EngineClient{PayloadIDBytes: tt.payloadID, ErrForkchoiceUpdated: tt.forkchoiceErr},
HeadFetcher: &chainMock.ChainService{State: tt.st},
BeaconDB: beaconDB,
}
vs.ProposerSlotIndexCache.SetProposerAndPayloadIDs(tt.st.Slot(), 100, [8]byte{100})
_, err := vs.getExecutionPayload(context.Background(), tt.st.Slot(), tt.validatorIndx)
if tt.errString != "" {
require.ErrorContains(t, tt.errString, err)

View File

@@ -10,7 +10,6 @@ import (
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/go-bitfield"
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
b "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
@@ -2345,8 +2344,7 @@ func TestProposer_GetBeaconBlock_BellatrixEpoch(t *testing.T) {
PayloadIDBytes: &enginev1.PayloadIDBytes{1},
ExecutionPayload: payload,
},
BeaconDB: db,
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
BeaconDB: db,
}
randaoReveal, err := util.RandaoReveal(beaconState, 0, privKeys)

View File

@@ -41,7 +41,6 @@ import (
type Server struct {
Ctx context.Context
AttestationCache *cache.AttestationCache
ProposerSlotIndexCache *cache.ProposerPayloadIDsCache
HeadFetcher blockchain.HeadFetcher
ForkFetcher blockchain.ForkFetcher
FinalizationFetcher blockchain.FinalizationFetcher

View File

@@ -15,7 +15,6 @@ import (
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/monitoring/tracing"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/runtime/version"
"github.com/prysmaticlabs/prysm/time/slots"
"go.opencensus.io/trace"
"google.golang.org/grpc/codes"
@@ -26,7 +25,7 @@ var errPubkeyDoesNotExist = errors.New("pubkey does not exist")
var errOptimisticMode = errors.New("the node is currently optimistic and cannot serve validators")
var nonExistentIndex = types.ValidatorIndex(^uint64(0))
var errParticipation = status.Errorf(codes.Internal, "Failed to obtain epoch participation")
const numStatesToCheck = 2
// ValidatorStatus returns the validator status of the current epoch.
// The status response can be one of the following:
@@ -111,67 +110,44 @@ func (vs *Server) CheckDoppelGanger(ctx context.Context, req *ethpb.DoppelGanger
return nil, status.Error(codes.Internal, "Could not get head state")
}
// Return early if we are in phase0.
if headState.Version() == version.Phase0 {
log.Info("Skipping goppelganger check for Phase 0")
resp := &ethpb.DoppelGangerResponse{
Responses: []*ethpb.DoppelGangerResponse_ValidatorResponse{},
}
for _, v := range req.ValidatorRequests {
resp.Responses = append(resp.Responses,
&ethpb.DoppelGangerResponse_ValidatorResponse{
PublicKey: v.PublicKey,
DuplicateExists: false,
})
}
return resp, nil
}
headSlot := headState.Slot()
currEpoch := slots.ToEpoch(headSlot)
currEpoch := slots.ToEpoch(headState.Slot())
isRecent, resp := checkValidatorsAreRecent(currEpoch, req)
// If all provided keys are recent we skip this check
// as we are unable to effectively determine if a doppelganger
// is active.
isRecent, resp := checkValidatorsAreRecent(currEpoch, req)
if isRecent {
return resp, nil
}
// We request a state 32 slots ago. We are guaranteed to have
// currentSlot > 32 since we assume that we are in Altair's fork.
prevState, err := vs.ReplayerBuilder.ReplayerForSlot(headSlot - params.BeaconConfig().SlotsPerEpoch).ReplayBlocks(ctx)
// We walk back from the current head state to the state at the beginning of the previous 2 epochs.
// Where S_i , i := 0,1,2. i = 0 would signify the current head state in this epoch.
previousEpoch, err := currEpoch.SafeSub(1)
if err != nil {
previousEpoch = currEpoch
}
olderEpoch, err := previousEpoch.SafeSub(1)
if err != nil {
olderEpoch = previousEpoch
}
prevState, err := vs.retrieveAfterEpochTransition(ctx, previousEpoch)
if err != nil {
return nil, status.Error(codes.Internal, "Could not get previous state")
}
headCurrentParticipation, err := headState.CurrentEpochParticipation()
olderState, err := vs.retrieveAfterEpochTransition(ctx, olderEpoch)
if err != nil {
return nil, errParticipation
return nil, status.Error(codes.Internal, "Could not get older state")
}
headPreviousParticipation, err := headState.PreviousEpochParticipation()
if err != nil {
return nil, errParticipation
}
prevCurrentParticipation, err := prevState.CurrentEpochParticipation()
if err != nil {
return nil, errParticipation
}
prevPreviousParticipation, err := prevState.PreviousEpochParticipation()
if err != nil {
return nil, errParticipation
}
resp = &ethpb.DoppelGangerResponse{
Responses: []*ethpb.DoppelGangerResponse_ValidatorResponse{},
}
for _, v := range req.ValidatorRequests {
// If the validator's last recorded epoch was less than 1 epoch
// ago, the current doppelganger check will not be able to
// identify dopplelgangers since an attestation can take up to
// 31 slots to be included.
if v.Epoch+1 >= currEpoch {
// If the validator's last recorded epoch was
// less than or equal to `numStatesToCheck` epochs ago, this method will not
// be able to catch duplicates. This is due to how attestation
// inclusion works, where an attestation for the current epoch
// is able to included in the current or next epoch. Depending
// on which epoch it is included the balance change will be
// reflected in the following epoch.
if v.Epoch+numStatesToCheck >= currEpoch {
resp.Responses = append(resp.Responses,
&ethpb.DoppelGangerResponse_ValidatorResponse{
PublicKey: v.PublicKey,
@@ -179,15 +155,37 @@ func (vs *Server) CheckDoppelGanger(ctx context.Context, req *ethpb.DoppelGanger
})
continue
}
valIndex, ok := prevState.ValidatorIndexByPubkey(bytesutil.ToBytes48(v.PublicKey))
valIndex, ok := olderState.ValidatorIndexByPubkey(bytesutil.ToBytes48(v.PublicKey))
if !ok {
// Ignore if validator pubkey doesn't exist.
continue
}
if (headCurrentParticipation[valIndex] != 0) || (headPreviousParticipation[valIndex] != 0) ||
(prevCurrentParticipation[valIndex] != 0) || (prevPreviousParticipation[valIndex] != 0) {
log.WithField("ValidatorIndex", valIndex).Infof("Participation flag found")
baseBal, err := olderState.BalanceAtIndex(valIndex)
if err != nil {
return nil, status.Error(codes.Internal, "Could not get validator's balance")
}
nextBal, err := prevState.BalanceAtIndex(valIndex)
if err != nil {
return nil, status.Error(codes.Internal, "Could not get validator's balance")
}
// If the next epoch's balance is higher, we mark it as an existing
// duplicate.
if nextBal > baseBal {
log.Infof("current %d with last epoch %d and difference in bal %d gwei", currEpoch, v.Epoch, nextBal-baseBal)
resp.Responses = append(resp.Responses,
&ethpb.DoppelGangerResponse_ValidatorResponse{
PublicKey: v.PublicKey,
DuplicateExists: true,
})
continue
}
currBal, err := headState.BalanceAtIndex(valIndex)
if err != nil {
return nil, status.Error(codes.Internal, "Could not get validator's balance")
}
// If the current epoch's balance is higher, we mark it as an existing
// duplicate.
if currBal > nextBal {
resp.Responses = append(resp.Responses,
&ethpb.DoppelGangerResponse_ValidatorResponse{
PublicKey: v.PublicKey,
@@ -376,8 +374,8 @@ func checkValidatorsAreRecent(headEpoch types.Epoch, req *ethpb.DoppelGangerRequ
// Due to how balances are reflected for individual
// validators, we can only effectively determine if a
// validator voted or not if we are able to look
// back more than 1 epoch into the past.
if v.Epoch+1 < headEpoch {
// back more than `numStatesToCheck` epochs into the past.
if v.Epoch+numStatesToCheck < headEpoch {
validatorsAreRecent = false
// Zero out response if we encounter non-recent validators to
// guard against potential misuse.

View File

@@ -8,6 +8,7 @@ import (
"github.com/d4l3k/messagediff"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/go-bitfield"
mockChain "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
@@ -16,6 +17,7 @@ import (
mockstategen "github.com/prysmaticlabs/prysm/beacon-chain/state/stategen/mock"
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
mockSync "github.com/prysmaticlabs/prysm/beacon-chain/sync/initial-sync/testing"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/container/trie"
"github.com/prysmaticlabs/prysm/crypto/bls"
@@ -959,15 +961,27 @@ func TestServer_CheckDoppelGanger(t *testing.T) {
name: "normal doppelganger request",
wantErr: false,
svSetup: func(t *testing.T) (*Server, *ethpb.DoppelGangerRequest, *ethpb.DoppelGangerResponse) {
hs, ps, keys := createStateSetupAltair(t, 3)
rb := mockstategen.NewMockReplayerBuilder()
rb.SetMockStateForSlot(ps, 20)
hs, ps, os, keys, builder := createStateSetup(t, 4)
// Previous Epoch State
for i := 0; i < 3; i++ {
bal, err := ps.BalanceAtIndex(types.ValidatorIndex(i))
assert.NoError(t, err)
// Add 100 gwei, to mock an inactivity leak
assert.NoError(t, ps.UpdateBalancesAtIndex(types.ValidatorIndex(i), bal+1000000000))
}
// Older Epoch State
for i := 0; i < 3; i++ {
bal, err := os.BalanceAtIndex(types.ValidatorIndex(i))
assert.NoError(t, err)
// Add 200 gwei, to mock an inactivity leak
assert.NoError(t, os.UpdateBalancesAtIndex(types.ValidatorIndex(i), bal+2000000000))
}
vs := &Server{
HeadFetcher: &mockChain.ChainService{
State: hs,
},
SyncChecker: &mockSync.Sync{IsSyncing: false},
ReplayerBuilder: rb,
ReplayerBuilder: builder,
}
request := &ethpb.DoppelGangerRequest{
ValidatorRequests: make([]*ethpb.DoppelGangerRequest_ValidatorRequest, 0),
@@ -991,19 +1005,37 @@ func TestServer_CheckDoppelGanger(t *testing.T) {
name: "doppelganger exists current epoch",
wantErr: false,
svSetup: func(t *testing.T) (*Server, *ethpb.DoppelGangerRequest, *ethpb.DoppelGangerResponse) {
hs, ps, keys := createStateSetupAltair(t, 3)
rb := mockstategen.NewMockReplayerBuilder()
rb.SetMockStateForSlot(ps, 20)
currentIndices := make([]byte, 64)
currentIndices[2] = 1
require.NoError(t, hs.SetCurrentParticipationBits(currentIndices))
hs, ps, os, keys, builder := createStateSetup(t, 4)
// Previous Epoch State
for i := 0; i < 2; i++ {
bal, err := ps.BalanceAtIndex(types.ValidatorIndex(i))
assert.NoError(t, err)
// Add 100 gwei, to mock an inactivity leak
assert.NoError(t, ps.UpdateBalancesAtIndex(types.ValidatorIndex(i), bal+1000000000))
}
bal, err := ps.BalanceAtIndex(types.ValidatorIndex(2))
assert.NoError(t, err)
// Sub 100 gwei, to mock an active validator.
assert.NoError(t, ps.UpdateBalancesAtIndex(types.ValidatorIndex(2), bal-1000000000))
// Older Epoch State
for i := 0; i < 2; i++ {
bal, err := os.BalanceAtIndex(types.ValidatorIndex(i))
assert.NoError(t, err)
// Add 200 gwei, to mock an inactivity leak
assert.NoError(t, os.UpdateBalancesAtIndex(types.ValidatorIndex(i), bal+2000000000))
}
bal, err = os.BalanceAtIndex(types.ValidatorIndex(2))
assert.NoError(t, err)
// Sub 100 gwei, to mock an active validator.
assert.NoError(t, os.UpdateBalancesAtIndex(types.ValidatorIndex(2), bal-1000000000))
vs := &Server{
HeadFetcher: &mockChain.ChainService{
State: hs,
},
SyncChecker: &mockSync.Sync{IsSyncing: false},
ReplayerBuilder: rb,
ReplayerBuilder: builder,
}
request := &ethpb.DoppelGangerRequest{
ValidatorRequests: make([]*ethpb.DoppelGangerRequest_ValidatorRequest, 0),
@@ -1038,19 +1070,37 @@ func TestServer_CheckDoppelGanger(t *testing.T) {
name: "doppelganger exists previous epoch",
wantErr: false,
svSetup: func(t *testing.T) (*Server, *ethpb.DoppelGangerRequest, *ethpb.DoppelGangerResponse) {
hs, ps, keys := createStateSetupAltair(t, 3)
prevIndices := make([]byte, 64)
prevIndices[2] = 1
require.NoError(t, ps.SetPreviousParticipationBits(prevIndices))
rb := mockstategen.NewMockReplayerBuilder()
rb.SetMockStateForSlot(ps, 20)
hs, ps, os, keys, builder := createStateSetup(t, 4)
// Previous Epoch State
for i := 0; i < 2; i++ {
bal, err := ps.BalanceAtIndex(types.ValidatorIndex(i))
assert.NoError(t, err)
// Add 100 gwei, to mock an inactivity leak
assert.NoError(t, ps.UpdateBalancesAtIndex(types.ValidatorIndex(i), bal+1000000000))
}
bal, err := ps.BalanceAtIndex(types.ValidatorIndex(2))
assert.NoError(t, err)
// Sub 100 gwei, to mock an active validator.
assert.NoError(t, ps.UpdateBalancesAtIndex(types.ValidatorIndex(2), bal-1000000000))
// Older Epoch State
for i := 0; i < 2; i++ {
bal, err := os.BalanceAtIndex(types.ValidatorIndex(i))
assert.NoError(t, err)
// Add 200 gwei, to mock an inactivity leak
assert.NoError(t, os.UpdateBalancesAtIndex(types.ValidatorIndex(i), bal+2000000000))
}
bal, err = os.BalanceAtIndex(types.ValidatorIndex(2))
assert.NoError(t, err)
// Sub 200 gwei, to mock an active validator.
assert.NoError(t, os.UpdateBalancesAtIndex(types.ValidatorIndex(2), bal-2000000000))
vs := &Server{
HeadFetcher: &mockChain.ChainService{
State: hs,
},
SyncChecker: &mockSync.Sync{IsSyncing: false},
ReplayerBuilder: rb,
ReplayerBuilder: builder,
}
request := &ethpb.DoppelGangerRequest{
ValidatorRequests: make([]*ethpb.DoppelGangerRequest_ValidatorRequest, 0),
@@ -1085,26 +1135,29 @@ func TestServer_CheckDoppelGanger(t *testing.T) {
name: "multiple doppelganger exists",
wantErr: false,
svSetup: func(t *testing.T) (*Server, *ethpb.DoppelGangerRequest, *ethpb.DoppelGangerResponse) {
hs, ps, keys := createStateSetupAltair(t, 3)
currentIndices := make([]byte, 64)
currentIndices[10] = 1
currentIndices[11] = 2
require.NoError(t, hs.SetPreviousParticipationBits(currentIndices))
rb := mockstategen.NewMockReplayerBuilder()
rb.SetMockStateForSlot(ps, 20)
prevIndices := make([]byte, 64)
for i := 12; i < 20; i++ {
prevIndices[i] = 1
hs, ps, os, keys, builder := createStateSetup(t, 4)
// Previous Epoch State
for i := 10; i < 15; i++ {
bal, err := ps.BalanceAtIndex(types.ValidatorIndex(i))
assert.NoError(t, err)
// Add 100 gwei, to mock an inactivity leak
assert.NoError(t, ps.UpdateBalancesAtIndex(types.ValidatorIndex(i), bal-1000000000))
}
// Older Epoch State
for i := 10; i < 15; i++ {
bal, err := os.BalanceAtIndex(types.ValidatorIndex(i))
assert.NoError(t, err)
// Add 200 gwei, to mock an inactivity leak
assert.NoError(t, os.UpdateBalancesAtIndex(types.ValidatorIndex(i), bal-2000000000))
}
require.NoError(t, ps.SetCurrentParticipationBits(prevIndices))
vs := &Server{
HeadFetcher: &mockChain.ChainService{
State: hs,
},
SyncChecker: &mockSync.Sync{IsSyncing: false},
ReplayerBuilder: rb,
ReplayerBuilder: builder,
}
request := &ethpb.DoppelGangerRequest{
ValidatorRequests: make([]*ethpb.DoppelGangerRequest_ValidatorRequest, 0),
@@ -1122,17 +1175,6 @@ func TestServer_CheckDoppelGanger(t *testing.T) {
DuplicateExists: true,
})
}
for i := 15; i < 20; i++ {
request.ValidatorRequests = append(request.ValidatorRequests, &ethpb.DoppelGangerRequest_ValidatorRequest{
PublicKey: keys[i].PublicKey().Marshal(),
Epoch: 3,
SignedRoot: []byte{'A'},
})
response.Responses = append(response.Responses, &ethpb.DoppelGangerResponse_ValidatorResponse{
PublicKey: keys[i].PublicKey().Marshal(),
DuplicateExists: false,
})
}
return vs, request, response
},
@@ -1141,16 +1183,14 @@ func TestServer_CheckDoppelGanger(t *testing.T) {
name: "attesters are too recent",
wantErr: false,
svSetup: func(t *testing.T) (*Server, *ethpb.DoppelGangerRequest, *ethpb.DoppelGangerResponse) {
hs, ps, keys := createStateSetupAltair(t, 3)
rb := mockstategen.NewMockReplayerBuilder()
rb.SetMockStateForSlot(ps, 20)
hs, _, _, keys, _ := createStateSetup(t, 4)
vs := &Server{
HeadFetcher: &mockChain.ChainService{
State: hs,
},
SyncChecker: &mockSync.Sync{IsSyncing: false},
ReplayerBuilder: rb,
ReplayerBuilder: nil,
}
request := &ethpb.DoppelGangerRequest{
ValidatorRequests: make([]*ethpb.DoppelGangerRequest_ValidatorRequest, 0),
@@ -1168,39 +1208,6 @@ func TestServer_CheckDoppelGanger(t *testing.T) {
})
}
return vs, request, response
},
},
{
name: "exit early for Phase 0",
wantErr: false,
svSetup: func(t *testing.T) (*Server, *ethpb.DoppelGangerRequest, *ethpb.DoppelGangerResponse) {
hs, _, keys := createStateSetupPhase0(t, 3)
vs := &Server{
HeadFetcher: &mockChain.ChainService{
State: hs,
},
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
request := &ethpb.DoppelGangerRequest{
ValidatorRequests: []*ethpb.DoppelGangerRequest_ValidatorRequest{
{
PublicKey: keys[0].PublicKey().Marshal(),
Epoch: 1,
SignedRoot: []byte{'A'},
},
},
}
response := &ethpb.DoppelGangerResponse{
Responses: []*ethpb.DoppelGangerResponse_ValidatorResponse{
{
PublicKey: keys[0].PublicKey().Marshal(),
DuplicateExists: false,
},
},
}
return vs, request, response
},
},
@@ -1221,36 +1228,104 @@ func TestServer_CheckDoppelGanger(t *testing.T) {
}
}
func createStateSetupPhase0(t *testing.T, head types.Epoch) (state.BeaconState,
state.BeaconState, []bls.SecretKey) {
func createStateSetup(t *testing.T, head types.Epoch) (state.BeaconState,
state.BeaconState, state.BeaconState, []bls.SecretKey, *mockstategen.MockReplayerBuilder) {
rb := &mockstategen.MockReplayerBuilder{}
gs, keys := util.DeterministicGenesisState(t, 64)
hs := gs.Copy()
// Head State
headSlot := types.Slot(head)*params.BeaconConfig().SlotsPerEpoch + params.BeaconConfig().SlotsPerEpoch/2
headEpoch := head
headSlot := types.Slot(headEpoch) * params.BeaconConfig().SlotsPerEpoch
assert.NoError(t, hs.SetSlot(headSlot))
assingments, _, err := helpers.CommitteeAssignments(context.Background(), hs, headEpoch)
assert.NoError(t, err)
for _, ctr := range assingments {
pendingAtt := &ethpb.PendingAttestation{
AggregationBits: bitfield.NewBitlist64(uint64(len(ctr.Committee))).ToBitlist().Not(),
Data: &ethpb.AttestationData{
Slot: ctr.AttesterSlot,
CommitteeIndex: ctr.CommitteeIndex,
BeaconBlockRoot: make([]byte, fieldparams.RootLength),
Source: &ethpb.Checkpoint{
Epoch: 0,
Root: make([]byte, fieldparams.RootLength),
},
Target: &ethpb.Checkpoint{
Epoch: 1,
Root: make([]byte, fieldparams.RootLength),
},
},
InclusionDelay: 1,
ProposerIndex: 10,
}
assert.NoError(t, hs.AppendCurrentEpochAttestations(pendingAtt))
}
rb.SetMockState(hs)
// Previous Epoch State
prevSlot := headSlot - params.BeaconConfig().SlotsPerEpoch
prevEpoch := headEpoch - 1
ps := gs.Copy()
prevSlot, err := slots.EpochEnd(prevEpoch)
assert.NoError(t, err)
assert.NoError(t, ps.SetSlot(prevSlot))
assingments, _, err = helpers.CommitteeAssignments(context.Background(), ps, prevEpoch)
assert.NoError(t, err)
for _, ctr := range assingments {
pendingAtt := &ethpb.PendingAttestation{
AggregationBits: bitfield.NewBitlist64(uint64(len(ctr.Committee))).ToBitlist().Not(),
Data: &ethpb.AttestationData{
Slot: ctr.AttesterSlot,
CommitteeIndex: ctr.CommitteeIndex,
BeaconBlockRoot: make([]byte, fieldparams.RootLength),
Source: &ethpb.Checkpoint{
Epoch: 0,
Root: make([]byte, fieldparams.RootLength),
},
Target: &ethpb.Checkpoint{
Epoch: 1,
Root: make([]byte, fieldparams.RootLength),
},
},
InclusionDelay: 1,
ProposerIndex: 10,
}
assert.NoError(t, ps.AppendCurrentEpochAttestations(pendingAtt))
}
rb.SetMockState(ps)
return hs, ps, keys
}
func createStateSetupAltair(t *testing.T, head types.Epoch) (state.BeaconState,
state.BeaconState, []bls.SecretKey) {
gs, keys := util.DeterministicGenesisStateAltair(t, 64)
hs := gs.Copy()
// Head State
headSlot := types.Slot(head)*params.BeaconConfig().SlotsPerEpoch + params.BeaconConfig().SlotsPerEpoch/2
assert.NoError(t, hs.SetSlot(headSlot))
// Previous Epoch State
prevSlot := headSlot - params.BeaconConfig().SlotsPerEpoch
ps := gs.Copy()
assert.NoError(t, ps.SetSlot(prevSlot))
return hs, ps, keys
// Older Epoch State
olderEpoch := prevEpoch - 1
os := gs.Copy()
olderSlot, err := slots.EpochEnd(olderEpoch)
assert.NoError(t, err)
assert.NoError(t, os.SetSlot(olderSlot))
assingments, _, err = helpers.CommitteeAssignments(context.Background(), os, olderEpoch)
assert.NoError(t, err)
for _, ctr := range assingments {
attSlot := ctr.AttesterSlot
if attSlot == olderSlot {
continue
}
pendingAtt := &ethpb.PendingAttestation{
AggregationBits: bitfield.NewBitlist64(uint64(len(ctr.Committee))).ToBitlist().Not(),
Data: &ethpb.AttestationData{
Slot: attSlot,
CommitteeIndex: ctr.CommitteeIndex,
BeaconBlockRoot: make([]byte, fieldparams.RootLength),
Source: &ethpb.Checkpoint{
Epoch: 0,
Root: make([]byte, fieldparams.RootLength),
},
Target: &ethpb.Checkpoint{
Epoch: 1,
Root: make([]byte, fieldparams.RootLength),
},
},
InclusionDelay: 1,
ProposerIndex: 10,
}
assert.NoError(t, os.AppendCurrentEpochAttestations(pendingAtt))
}
rb.SetMockState(os)
return hs, ps, os, keys, rb
}

View File

@@ -109,7 +109,6 @@ type Config struct {
StateGen *stategen.State
MaxMsgSize int
ExecutionEngineCaller powchain.EngineCaller
ProposerIdsCache *cache.ProposerPayloadIDsCache
}
// NewService instantiates a new RPC service instance that will
@@ -208,7 +207,6 @@ func (s *Service) Start() {
ReplayerBuilder: ch,
ExecutionEngineCaller: s.cfg.ExecutionEngineCaller,
BeaconDB: s.cfg.BeaconDB,
ProposerSlotIndexCache: s.cfg.ProposerIdsCache,
}
validatorServerV1 := &validator.Server{
HeadFetcher: s.cfg.HeadFetcher,

View File

@@ -1,5 +1,5 @@
// Code generated by fastssz. DO NOT EDIT.
// Hash: 2e923b42b8e4fcc278301da6506b212334a78169cb32c70e0d66a636435b8925
// Hash: 6de36f732d72b5c4c0c967bc0edcc752b7afdd337e829486954eb6affda84da8
package v1
import (

2
beacon-chain/state/state-native/v2/generated.ssz.go Normal file → Executable file
View File

@@ -1,5 +1,5 @@
// Code generated by fastssz. DO NOT EDIT.
// Hash: ec98b14e43fd11e74e0d9e705a7afe74a77706c3e215d7940b11411859873f4b
// Hash: 6a7886393e8874ccf57ea6c160647da09f5e541234a235ee71f3bf786d56a100
package v2
import (

View File

@@ -1,5 +1,5 @@
// Code generated by fastssz. DO NOT EDIT.
// Hash: aa2156293aac4326afe2b8c0ba985a0291c83f20c8d8b92d148bc810a7f442e9
// Hash: a71c6e70ae416774612961057f4c96b97b5c3323270a80167d30ea672ea2f5cd
package v3
import (

View File

@@ -151,15 +151,6 @@ func (e *epochBoundaryState) put(r [32]byte, s state.BeaconState) error {
return nil
}
// delete the state from the epoch boundary state cache.
func (e *epochBoundaryState) delete(r [32]byte) error {
e.lock.Lock()
defer e.lock.Unlock()
return e.rootStateCache.Delete(&rootStateInfo{
root: r,
})
}
// trim the FIFO queue to the maxSize.
func trim(queue *cache.FIFO, maxSize uint64) {
for s := uint64(len(queue.ListKeys())); s > maxSize; s-- {

View File

@@ -19,7 +19,7 @@ func TestEpochBoundaryStateCache_BadRootKey(t *testing.T) {
assert.ErrorContains(t, errNotRootStateInfo.Error(), err, "Did not get wanted error")
}
func TestEpochBoundaryStateCache_CanSaveAndDelete(t *testing.T) {
func TestEpochBoundaryStateCache_CanSave(t *testing.T) {
e := newBoundaryStateCache()
s, err := util.NewBeaconState()
require.NoError(t, err)
@@ -46,17 +46,6 @@ func TestEpochBoundaryStateCache_CanSaveAndDelete(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, true, exists, "Should exist")
assert.DeepSSZEqual(t, s.InnerStateUnsafe(), got.state.InnerStateUnsafe(), "Should have the same state")
require.NoError(t, e.delete(r))
got, exists, err = e.getByRoot([32]byte{'b'})
require.NoError(t, err)
assert.Equal(t, false, exists, "Should not exist")
assert.Equal(t, (*rootStateInfo)(nil), got, "Should not exist")
got, exists, err = e.getBySlot(1)
require.NoError(t, err)
assert.Equal(t, false, exists, "Should not exist")
assert.Equal(t, (*rootStateInfo)(nil), got, "Should not exist")
}
func TestEpochBoundaryStateCache_CanTrim(t *testing.T) {

View File

@@ -39,7 +39,7 @@ func (s *State) HasStateInCache(ctx context.Context, blockRoot [32]byte) (bool,
return has, nil
}
// StateByRootIfCachedNoCopy retrieves a state using the input block root only if the state is already in the cache
// StateByRootIfCached retrieves a state using the input block root only if the state is already in the cache
func (s *State) StateByRootIfCachedNoCopy(blockRoot [32]byte) state.BeaconState {
if !s.hotStateCache.has(blockRoot) {
return nil
@@ -146,12 +146,6 @@ func (s *State) RecoverStateSummary(ctx context.Context, blockRoot [32]byte) (*e
return nil, errors.New("could not find block in DB")
}
// DeleteStateFromCaches deletes the state from the caches.
func (s *State) DeleteStateFromCaches(_ context.Context, blockRoot [32]byte) error {
s.hotStateCache.delete(blockRoot)
return s.epochBoundaryStateCache.delete(blockRoot)
}
// This loads a beacon state from either the cache or DB then replay blocks up the requested block root.
func (s *State) loadStateByRoot(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "stateGen.loadStateByRoot")

View File

@@ -165,35 +165,6 @@ func TestStateByRoot_HotStateCached(t *testing.T) {
require.DeepSSZEqual(t, loadedState.InnerStateUnsafe(), beaconState.InnerStateUnsafe())
}
func TestDeleteStateFromCaches(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service := New(beaconDB)
beaconState, _ := util.DeterministicGenesisState(t, 32)
r := [32]byte{'A'}
require.Equal(t, false, service.hotStateCache.has(r))
_, has, err := service.epochBoundaryStateCache.getByRoot(r)
require.NoError(t, err)
require.Equal(t, false, has)
service.hotStateCache.put(r, beaconState)
require.NoError(t, service.epochBoundaryStateCache.put(r, beaconState))
require.Equal(t, true, service.hotStateCache.has(r))
_, has, err = service.epochBoundaryStateCache.getByRoot(r)
require.NoError(t, err)
require.Equal(t, true, has)
require.NoError(t, service.DeleteStateFromCaches(ctx, r))
require.Equal(t, false, service.hotStateCache.has(r))
_, has, err = service.epochBoundaryStateCache.getByRoot(r)
require.NoError(t, err)
require.Equal(t, false, has)
}
func TestStateByRoot_StateByRootInitialSync(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)

View File

@@ -23,7 +23,7 @@ func NewMockService() *MockStateManager {
}
}
// StateByRootIfCachedNoCopy
// StateByRootIfCached
func (_ *MockStateManager) StateByRootIfCachedNoCopy(_ [32]byte) state.BeaconState {
panic("implement me")
}
@@ -124,8 +124,3 @@ func (m *MockStateManager) AddStateForRoot(state state.BeaconState, blockRoot [3
func (m *MockStateManager) AddStateForSlot(state state.BeaconState, slot types.Slot) {
m.StatesBySlot[slot] = state
}
// DeleteStateFromCaches --
func (m *MockStateManager) DeleteStateFromCaches(context.Context, [32]byte) error {
return nil
}

View File

@@ -41,13 +41,6 @@ func (b *MockReplayerBuilder) SetMockState(s state.BeaconState) {
b.forSlot[s.Slot()] = &MockReplayer{State: s}
}
func (b *MockReplayerBuilder) SetMockStateForSlot(s state.BeaconState, slot types.Slot) {
if b.forSlot == nil {
b.forSlot = make(map[types.Slot]*MockReplayer)
}
b.forSlot[slot] = &MockReplayer{State: s}
}
func (b *MockReplayerBuilder) SetMockSlotError(s types.Slot, e error) {
if b.forSlot == nil {
b.forSlot = make(map[types.Slot]*MockReplayer)

View File

@@ -39,7 +39,6 @@ type StateManager interface {
ForceCheckpoint(ctx context.Context, root []byte) error
EnableSaveHotStateToDB(_ context.Context)
DisableSaveHotStateToDB(ctx context.Context) error
DeleteStateFromCaches(ctx context.Context, blockRoot [32]byte) error
}
// State is a concrete implementation of StateManager.

View File

@@ -45,7 +45,7 @@ func (f *blocksFetcher) nonSkippedSlotAfter(ctx context.Context, slot types.Slot
// Exit early if no peers with epoch higher than our known head are found.
if targetEpoch <= headEpoch {
return 0, errors.Wrapf(errSlotIsTooHigh, "no peers with epoch higher than our known head, peer epoch=%d, head=%d", targetEpoch, headEpoch)
return 0, errSlotIsTooHigh
}
// Transform peer list to avoid eclipsing (filter, shuffle, trim).

View File

@@ -2,7 +2,7 @@ package initialsync
import (
"context"
"github.com/pkg/errors"
"errors"
"time"
"github.com/libp2p/go-libp2p-core/peer"
@@ -285,7 +285,7 @@ func (q *blocksQueue) onScheduleEvent(ctx context.Context) eventHandlerFn {
}
if m.start > q.highestExpectedSlot {
m.setState(stateSkipped)
return m.state, errors.Wrapf(errSlotIsTooHigh, "slot=%d", m.start)
return m.state, errSlotIsTooHigh
}
blocksPerRequest := q.blocksFetcher.blocksPerSecond
if err := q.blocksFetcher.scheduleRequest(ctx, m.start, blocksPerRequest); err != nil {

View File

@@ -152,16 +152,14 @@ func (s *Service) processPendingBlocks(ctx context.Context) error {
continue
}
err = s.validateBeaconBlock(ctx, b, blkRoot)
switch {
case errors.Is(ErrOptimisticParent, err): // Ok to continue process block with parent that is an optimistic candidate.
case err != nil:
if err := s.validateBeaconBlock(ctx, b, blkRoot); err != nil {
log.Debugf("Could not validate block from slot %d: %v", b.Block().Slot(), err)
s.setBadBlock(ctx, blkRoot)
tracing.AnnotateError(span, err)
// In the next iteration of the queue, this block will be removed from
// the pending queue as it has been marked as a 'bad' block.
span.End()
continue
default:
}
if err := s.cfg.chain.ReceiveBlock(ctx, b, blkRoot); err != nil {

View File

@@ -111,84 +111,6 @@ func TestRegularSyncBeaconBlockSubscriber_ProcessPendingBlocks1(t *testing.T) {
assert.Equal(t, 2, len(r.seenPendingBlocks), "Incorrect size for seen pending block")
}
func TestRegularSyncBeaconBlockSubscriber_OptimisticStatus(t *testing.T) {
db := dbtest.SetupDB(t)
p1 := p2ptest.NewTestP2P(t)
r := &Service{
cfg: &config{
p2p: p1,
beaconDB: db,
chain: &mock.ChainService{
Optimistic: true,
FinalizedCheckPoint: &ethpb.Checkpoint{
Epoch: 0,
},
},
stateGen: stategen.New(db),
},
slotToPendingBlocks: gcache.New(time.Second, 2*time.Second),
seenPendingBlocks: make(map[[32]byte]bool),
}
r.initCaches()
b0 := util.NewBeaconBlock()
wsb, err := wrapper.WrappedSignedBeaconBlock(b0)
require.NoError(t, err)
require.NoError(t, r.cfg.beaconDB.SaveBlock(context.Background(), wsb))
b0Root, err := b0.Block.HashTreeRoot()
require.NoError(t, err)
b3 := util.NewBeaconBlock()
b3.Block.Slot = 3
b3.Block.ParentRoot = b0Root[:]
wsb, err = wrapper.WrappedSignedBeaconBlock(b3)
require.NoError(t, err)
require.NoError(t, r.cfg.beaconDB.SaveBlock(context.Background(), wsb))
// Incomplete block link
b1 := util.NewBeaconBlock()
b1.Block.Slot = 1
b1.Block.ParentRoot = b0Root[:]
b1Root, err := b1.Block.HashTreeRoot()
require.NoError(t, err)
b2 := util.NewBeaconBlock()
b2.Block.Slot = 2
b2.Block.ParentRoot = b1Root[:]
b2Root, err := b1.Block.HashTreeRoot()
require.NoError(t, err)
// Add b2 to the cache
wsb, err = wrapper.WrappedSignedBeaconBlock(b2)
require.NoError(t, err)
require.NoError(t, r.insertBlockToPendingQueue(b2.Block.Slot, wsb, b2Root))
require.NoError(t, r.processPendingBlocks(context.Background()))
assert.Equal(t, 1, len(r.slotToPendingBlocks.Items()), "Incorrect size for slot to pending blocks cache")
assert.Equal(t, 1, len(r.seenPendingBlocks), "Incorrect size for seen pending block")
// Add b1 to the cache
wsb, err = wrapper.WrappedSignedBeaconBlock(b1)
require.NoError(t, err)
require.NoError(t, r.insertBlockToPendingQueue(b1.Block.Slot, wsb, b1Root))
wsb, err = wrapper.WrappedSignedBeaconBlock(b1)
require.NoError(t, err)
require.NoError(t, r.cfg.beaconDB.SaveBlock(context.Background(), wsb))
nBlock := util.NewBeaconBlock()
nBlock.Block.Slot = b1.Block.Slot
nRoot, err := nBlock.Block.HashTreeRoot()
require.NoError(t, err)
// Insert bad b1 in the cache to verify the good one doesn't get replaced.
wsb, err = wrapper.WrappedSignedBeaconBlock(nBlock)
require.NoError(t, err)
require.NoError(t, r.insertBlockToPendingQueue(nBlock.Block.Slot, wsb, nRoot))
require.NoError(t, r.processPendingBlocks(context.Background())) // Marks a block as bad
require.NoError(t, r.processPendingBlocks(context.Background())) // Bad block removed on second run
assert.Equal(t, 1, len(r.slotToPendingBlocks.Items()), "Incorrect size for slot to pending blocks cache")
assert.Equal(t, 2, len(r.seenPendingBlocks), "Incorrect size for seen pending block")
}
func TestRegularSyncBeaconBlockSubscriber_ExecutionEngineTimesOut(t *testing.T) {
db := dbtest.SetupDB(t)

View File

@@ -3,7 +3,6 @@ package sync
import (
"bytes"
"context"
"fmt"
"sync"
"time"
@@ -308,7 +307,7 @@ func (s *Service) validateStatusMessage(ctx context.Context, msg *pb.Status) err
return nil
}
if !s.cfg.beaconDB.IsFinalizedBlock(ctx, bytesutil.ToBytes32(msg.FinalizedRoot)) {
return errors.Wrap(p2ptypes.ErrInvalidFinalizedRoot, fmt.Sprintf("root=%#x", msg.FinalizedRoot))
return p2ptypes.ErrInvalidFinalizedRoot
}
blk, err := s.cfg.beaconDB.Block(ctx, bytesutil.ToBytes32(msg.FinalizedRoot))
if err != nil {

View File

@@ -97,15 +97,10 @@ func (s *Service) validateAggregateAndProof(ctx context.Context, pid peer.ID, ms
return pubsub.ValidationIgnore, nil
}
// Check that the block being voted on isn't invalid.
errBadBlockRef := errors.New("bad block referenced in attestation data")
if s.hasBadBlock(bytesutil.ToBytes32(m.Message.Aggregate.Data.BeaconBlockRoot)) {
return pubsub.ValidationReject, errors.Wrapf(errBadBlockRef, "block=BeaconBlockRoot, root=%#x", m.Message.Aggregate.Data.BeaconBlockRoot)
}
if s.hasBadBlock(bytesutil.ToBytes32(m.Message.Aggregate.Data.Target.Root)) {
return pubsub.ValidationReject, errors.Wrapf(errBadBlockRef, "block=Target, root=%#x", m.Message.Aggregate.Data.Target.Root)
}
if s.hasBadBlock(bytesutil.ToBytes32(m.Message.Aggregate.Data.Source.Root)) {
return pubsub.ValidationReject, errors.Wrapf(errBadBlockRef, "block=Source, root=%#x", m.Message.Aggregate.Data.Source.Root)
if s.hasBadBlock(bytesutil.ToBytes32(m.Message.Aggregate.Data.BeaconBlockRoot)) ||
s.hasBadBlock(bytesutil.ToBytes32(m.Message.Aggregate.Data.Target.Root)) ||
s.hasBadBlock(bytesutil.ToBytes32(m.Message.Aggregate.Data.Source.Root)) {
return pubsub.ValidationReject, errors.New("bad block referenced in attestation data")
}
// Verify aggregate attestation has not already been seen via aggregate gossip, within a block, or through the creation locally.

View File

@@ -189,8 +189,6 @@ func (s *Service) validateBeaconBlockPubSub(ctx context.Context, pid peer.ID, ms
return pubsub.ValidationAccept, nil
}
var errIncorrectProposerIndex = errors.New("incorrect proposer index")
func (s *Service) validateBeaconBlock(ctx context.Context, blk block.SignedBeaconBlock, blockRoot [32]byte) error {
ctx, span := trace.StartSpan(ctx, "sync.validateBeaconBlock")
defer span.End()
@@ -222,19 +220,13 @@ func (s *Service) validateBeaconBlock(ctx context.Context, blk block.SignedBeaco
if err != nil {
return err
}
sRoot, err := parentState.HashTreeRoot(ctx)
if err != nil {
log.Errorf("that's weird, htr fail")
}
log.Infof("validating block with slot=%d, state.slot=%d, block_root=%#x, state_root=%#x", blk.Block().Slot(), parentState.Slot(), blockRoot, sRoot)
idx, err := helpers.BeaconProposerIndex(ctx, parentState)
if err != nil {
return err
}
log.Infof("got BeaconProposerIndex=%d, block proposer index=%d", idx, blk.Block().ProposerIndex())
if blk.Block().ProposerIndex() != idx {
s.setBadBlock(ctx, blockRoot)
return errors.Wrapf(errIncorrectProposerIndex, "state slot=%d, root=%#x, block_root=%#x", parentState.Slot(), sRoot, blockRoot)
return errors.New("incorrect proposer index")
}
if err = s.validateBellatrixBeaconBlock(ctx, parentState, blk.Block()); err != nil {

View File

@@ -26,15 +26,6 @@ func BeaconConfig() *BeaconChainConfig {
func OverrideBeaconConfig(c *BeaconChainConfig) {
beaconConfigLock.Lock()
defer beaconConfigLock.Unlock()
c.InitializeForkSchedule()
name, ok := reverseConfigNames[c.ConfigName]
// if name collides with an existing config name, override it, because the fork versions probably conflict
if !ok {
// otherwise define it as the special "Dynamic" name, ie for a config loaded from a file at runtime
name = Dynamic
}
KnownConfigs[name] = func() *BeaconChainConfig { return c }
rebuildKnownForkVersions()
beaconConfig = c
}

View File

@@ -19,15 +19,6 @@ func BeaconConfig() *BeaconChainConfig {
// OverrideBeaconConfig(c). Any subsequent calls to params.BeaconConfig() will
// return this new configuration.
func OverrideBeaconConfig(c *BeaconChainConfig) {
c.InitializeForkSchedule()
name, ok := reverseConfigNames[c.ConfigName]
// if name collides with an existing config name, override it, because the fork versions probably conflict
if !ok {
// otherwise define it as the special "Dynamic" name, ie for a config loaded from a file at runtime
name = Dynamic
}
KnownConfigs[name] = func() *BeaconChainConfig { return c }
rebuildKnownForkVersions()
beaconConfig = c
}

Some files were not shown because too many files have changed in this diff Show More