Deprecate store in blockchain pkg (#10903)

* Deprecate store WIP

* fix spectests build

* mock right interface

* sync tests build

* more tests builds

* blockchain tests

- TestFinalizedCheckpt_GenesisRootOk
- TestCurrentJustifiedCheckpt_CanRetrieve
- TestJustifiedCheckpt_GenesisRootOk
- TestHeadRoot_CanRetrieve
- TestHeadRoot_UseDB
- TestService_ProcessAttestationsAndUpdateHead
- TestService_VerifyWeakSubjectivityRoot
- TestVerifyFinalizedConsistency_InconsistentRoot_ProtoArray
- TestVerifyFinalizedConsistency_InconsistentRoot_DoublyLinkedTree
- TestVerifyFinalizedConsistency_Ok
- TestStore_OnBlock_ProtoArray
- TestStore_OnBlock_DoublyLinkedTree
- TestStore_OnBlockBatch_ProtoArray
- TestStore_OnBlockBatch_DoublyLinkedTree
- TestStore_OnBlockBatch_NotifyNewPayload
- TestCachedPreState_CanGetFromStateSummary_ProtoArray
- TestCachedPreState_CanGetFromStateSummary_DoublyLinkedTree

* more blockchain tests

- TestStore_OnBlockBatch_PruneOK_Protoarray
- TestFillForkChoiceMissingBlocks_CanSave_ProtoArray
- TestFillForkChoiceMissingBlocks_CanSave_DoublyLinkedTree
- TestFillForkChoiceMissingBlocks_RootsMatch_Protoarray
- TestFillForkChoiceMissingBlocks_RootsMatch_DoublyLinkedTree
- TestFillForkChoiceMissingBlocks_FilterFinalized_ProtoArray
- TestFillForkChoiceMissingBlocks_FilterFinalized_DoublyLinkedTree
- TestVerifyBlkDescendant
- Test_verifyBlkFinalizedSlot_invalidBlock
- TestChainService_SaveHeadNoDB

* update best justified from genesis

* deal with nil head on saveHead

* initialize prev justified checkpoint

* update finalization correctly

* update finalization logic

* update finalization logic

* track the wall clock on forkchoice spectests

* export copies of checkpoints from blockchain package

* do not use forkchoice's head on HeadRoot

* Remove debug remain

Co-authored-by: terencechain <terence@prysmaticlabs.com>

* terence's review

* add forkchoice types deps

* wtf

* debugging

* init-sync: update justified and finalized checkpoints on db

* call updateFinalized instead of only DB

* remove debug symbols

* safe copy headroot

Co-authored-by: terencechain <terence@prysmaticlabs.com>
This commit is contained in:
Potuz
2022-06-25 00:57:52 -03:00
committed by GitHub
parent b7463d0070
commit f8b4d8c57a
53 changed files with 771 additions and 1542 deletions

View File

@@ -12,7 +12,6 @@ go_library(
"log.go",
"merge_ascii_art.go",
"metrics.go",
"new_slot.go",
"options.go",
"pow_block.go",
"process_attestation.go",
@@ -35,7 +34,6 @@ go_library(
deps = [
"//async:go_default_library",
"//async/event:go_default_library",
"//beacon-chain/blockchain/store:go_default_library",
"//beacon-chain/cache:go_default_library",
"//beacon-chain/cache/depositcache:go_default_library",
"//beacon-chain/core/altair:go_default_library",
@@ -113,7 +111,6 @@ go_test(
"log_test.go",
"metrics_test.go",
"mock_test.go",
"new_slot_test.go",
"pow_block_test.go",
"process_attestation_test.go",
"process_block_test.go",
@@ -133,6 +130,7 @@ go_test(
"//beacon-chain/core/transition:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/testing:go_default_library",
"//beacon-chain/forkchoice/types:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/powchain:go_default_library",
"//beacon-chain/powchain/testing:go_default_library",
@@ -189,6 +187,7 @@ go_test(
"//beacon-chain/core/transition:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/testing:go_default_library",
"//beacon-chain/forkchoice/types:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/powchain:go_default_library",
"//beacon-chain/powchain/testing:go_default_library",

View File

@@ -4,8 +4,6 @@ import (
"context"
"time"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain/store"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice"
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
@@ -81,9 +79,9 @@ type CanonicalFetcher interface {
// FinalizationFetcher defines a common interface for methods in blockchain service which
// directly retrieve finalization and justification related data.
type FinalizationFetcher interface {
FinalizedCheckpt() (*ethpb.Checkpoint, error)
CurrentJustifiedCheckpt() (*ethpb.Checkpoint, error)
PreviousJustifiedCheckpt() (*ethpb.Checkpoint, error)
FinalizedCheckpt() *ethpb.Checkpoint
CurrentJustifiedCheckpt() *ethpb.Checkpoint
PreviousJustifiedCheckpt() *ethpb.Checkpoint
VerifyFinalizedBlkDescendant(ctx context.Context, blockRoot [32]byte) error
}
@@ -94,47 +92,27 @@ type OptimisticModeFetcher interface {
}
// FinalizedCheckpt returns the latest finalized checkpoint from chain store.
func (s *Service) FinalizedCheckpt() (*ethpb.Checkpoint, error) {
cp, err := s.store.FinalizedCheckpt()
if err != nil {
return nil, err
}
func (s *Service) FinalizedCheckpt() *ethpb.Checkpoint {
cp := s.ForkChoicer().FinalizedCheckpoint()
return &ethpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
}
return ethpb.CopyCheckpoint(cp), nil
// PreviousJustifiedCheckpt returns the current justified checkpoint from chain store.
func (s *Service) PreviousJustifiedCheckpt() *ethpb.Checkpoint {
cp := s.ForkChoicer().PreviousJustifiedCheckpoint()
return &ethpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
}
// CurrentJustifiedCheckpt returns the current justified checkpoint from chain store.
func (s *Service) CurrentJustifiedCheckpt() (*ethpb.Checkpoint, error) {
cp, err := s.store.JustifiedCheckpt()
if err != nil {
return nil, err
}
return ethpb.CopyCheckpoint(cp), nil
}
// PreviousJustifiedCheckpt returns the previous justified checkpoint from chain store.
func (s *Service) PreviousJustifiedCheckpt() (*ethpb.Checkpoint, error) {
cp, err := s.store.PrevJustifiedCheckpt()
if err != nil {
return nil, err
}
return ethpb.CopyCheckpoint(cp), nil
func (s *Service) CurrentJustifiedCheckpt() *ethpb.Checkpoint {
cp := s.ForkChoicer().JustifiedCheckpoint()
return &ethpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
}
// BestJustifiedCheckpt returns the best justified checkpoint from store.
func (s *Service) BestJustifiedCheckpt() (*ethpb.Checkpoint, error) {
cp, err := s.store.BestJustifiedCheckpt()
if err != nil {
// If there is no best justified checkpoint, return the checkpoint with root as zeros to be used for genesis cases.
if errors.Is(err, store.ErrNilCheckpoint) {
return &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}, nil
}
return nil, err
}
return ethpb.CopyCheckpoint(cp), nil
func (s *Service) BestJustifiedCheckpt() *ethpb.Checkpoint {
cp := s.ForkChoicer().BestJustifiedCheckpoint()
return &ethpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
}
// HeadSlot returns the slot of the head of the chain.
@@ -154,9 +132,8 @@ func (s *Service) HeadRoot(ctx context.Context) ([]byte, error) {
s.headLock.RLock()
defer s.headLock.RUnlock()
if s.headRoot() != params.BeaconConfig().ZeroHash {
r := s.headRoot()
return r[:], nil
if s.head != nil && s.head.root != params.BeaconConfig().ZeroHash {
return bytesutil.SafeCopyBytes(s.head.root[:]), nil
}
b, err := s.cfg.BeaconDB.HeadBlock(ctx)

View File

@@ -5,11 +5,12 @@ import (
"testing"
"time"
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain/store"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
v3 "github.com/prysmaticlabs/prysm/beacon-chain/state/v3"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
@@ -78,81 +79,48 @@ func TestService_ForkChoiceStore(t *testing.T) {
require.Equal(t, types.Epoch(0), p.FinalizedCheckpoint().Epoch)
}
func TestFinalizedCheckpt_CanRetrieve(t *testing.T) {
beaconDB := testDB.SetupDB(t)
cp := &ethpb.Checkpoint{Epoch: 5, Root: bytesutil.PadTo([]byte("foo"), 32)}
c := setupBeaconChain(t, beaconDB)
c.store.SetFinalizedCheckptAndPayloadHash(cp, [32]byte{'a'})
cp, err := c.FinalizedCheckpt()
require.NoError(t, err)
assert.Equal(t, cp.Epoch, cp.Epoch, "Unexpected finalized epoch")
}
func TestFinalizedCheckpt_GenesisRootOk(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
genesisRoot := [32]byte{'A'}
cp := &ethpb.Checkpoint{Root: genesisRoot[:]}
c := setupBeaconChain(t, beaconDB)
c.store.SetFinalizedCheckptAndPayloadHash(cp, [32]byte{'a'})
c.originBlockRoot = genesisRoot
cp, err := c.FinalizedCheckpt()
fcs := protoarray.New()
opts := []Option{
WithDatabase(beaconDB),
WithForkChoiceStore(fcs),
WithStateGen(stategen.New(beaconDB)),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
assert.DeepEqual(t, c.originBlockRoot[:], cp.Root)
gs, _ := util.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
cp := service.FinalizedCheckpt()
assert.DeepEqual(t, [32]byte{}, bytesutil.ToBytes32(cp.Root))
cp = service.CurrentJustifiedCheckpt()
assert.DeepEqual(t, [32]byte{}, bytesutil.ToBytes32(cp.Root))
// check that forkchoice has the right genesis root as the node root
root, err := fcs.Head(ctx, []uint64{})
require.NoError(t, err)
require.Equal(t, service.originBlockRoot, root)
}
func TestCurrentJustifiedCheckpt_CanRetrieve(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
c := setupBeaconChain(t, beaconDB)
_, err := c.CurrentJustifiedCheckpt()
require.ErrorIs(t, err, store.ErrNilCheckpoint)
cp := &ethpb.Checkpoint{Epoch: 6, Root: bytesutil.PadTo([]byte("foo"), 32)}
c.store.SetJustifiedCheckptAndPayloadHash(cp, [32]byte{})
jp, err := c.CurrentJustifiedCheckpt()
fcs := protoarray.New()
opts := []Option{
WithDatabase(beaconDB),
WithForkChoiceStore(fcs),
WithStateGen(stategen.New(beaconDB)),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
cp := &forkchoicetypes.Checkpoint{Epoch: 6, Root: [32]byte{'j'}}
require.NoError(t, fcs.UpdateJustifiedCheckpoint(cp))
jp := service.CurrentJustifiedCheckpt()
assert.Equal(t, cp.Epoch, jp.Epoch, "Unexpected justified epoch")
}
func TestJustifiedCheckpt_GenesisRootOk(t *testing.T) {
beaconDB := testDB.SetupDB(t)
c := setupBeaconChain(t, beaconDB)
genesisRoot := [32]byte{'B'}
cp := &ethpb.Checkpoint{Root: genesisRoot[:]}
c.store.SetJustifiedCheckptAndPayloadHash(cp, [32]byte{})
c.originBlockRoot = genesisRoot
cp, err := c.CurrentJustifiedCheckpt()
require.NoError(t, err)
assert.DeepEqual(t, c.originBlockRoot[:], cp.Root)
}
func TestPreviousJustifiedCheckpt_CanRetrieve(t *testing.T) {
beaconDB := testDB.SetupDB(t)
cp := &ethpb.Checkpoint{Epoch: 7, Root: bytesutil.PadTo([]byte("foo"), 32)}
c := setupBeaconChain(t, beaconDB)
_, err := c.PreviousJustifiedCheckpt()
require.ErrorIs(t, err, store.ErrNilCheckpoint)
c.store.SetPrevJustifiedCheckpt(cp)
pcp, err := c.PreviousJustifiedCheckpt()
require.NoError(t, err)
assert.Equal(t, cp.Epoch, pcp.Epoch, "Unexpected previous justified epoch")
}
func TestPrevJustifiedCheckpt_GenesisRootOk(t *testing.T) {
beaconDB := testDB.SetupDB(t)
genesisRoot := [32]byte{'C'}
cp := &ethpb.Checkpoint{Root: genesisRoot[:]}
c := setupBeaconChain(t, beaconDB)
c.store.SetPrevJustifiedCheckpt(cp)
c.originBlockRoot = genesisRoot
pcp, err := c.PreviousJustifiedCheckpt()
require.NoError(t, err)
assert.DeepEqual(t, c.originBlockRoot[:], pcp.Root)
require.Equal(t, cp.Root, bytesutil.ToBytes32(jp.Root))
}
func TestHeadSlot_CanRetrieve(t *testing.T) {
@@ -164,24 +132,46 @@ func TestHeadSlot_CanRetrieve(t *testing.T) {
}
func TestHeadRoot_CanRetrieve(t *testing.T) {
c := &Service{}
c.head = &head{root: [32]byte{'A'}}
r, err := c.HeadRoot(context.Background())
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New()
opts := []Option{
WithDatabase(beaconDB),
WithForkChoiceStore(fcs),
WithStateGen(stategen.New(beaconDB)),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
assert.Equal(t, [32]byte{'A'}, bytesutil.ToBytes32(r))
gs, _ := util.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
r, err := service.HeadRoot(ctx)
require.NoError(t, err)
assert.Equal(t, service.originBlockRoot, bytesutil.ToBytes32(r))
}
func TestHeadRoot_UseDB(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
c := &Service{cfg: &config{BeaconDB: beaconDB}}
c.head = &head{root: params.BeaconConfig().ZeroHash}
fcs := protoarray.New()
opts := []Option{
WithDatabase(beaconDB),
WithForkChoiceStore(fcs),
WithStateGen(stategen.New(beaconDB)),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.head = &head{root: params.BeaconConfig().ZeroHash}
b := util.NewBeaconBlock()
br, err := b.Block.HashTreeRoot()
require.NoError(t, err)
util.SaveBlock(t, context.Background(), beaconDB, b)
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: br[:]}))
require.NoError(t, beaconDB.SaveHeadBlockRoot(context.Background(), br))
r, err := c.HeadRoot(context.Background())
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
require.NoError(t, beaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Root: br[:]}))
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, br))
r, err := service.HeadRoot(ctx)
require.NoError(t, err)
assert.Equal(t, br, bytesutil.ToBytes32(r))
}

View File

@@ -53,8 +53,8 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
if err != nil {
return nil, errors.Wrap(err, "could not get execution payload")
}
finalizedHash := s.store.FinalizedPayloadBlockHash()
justifiedHash := s.store.JustifiedPayloadBlockHash()
finalizedHash := s.ForkChoicer().FinalizedPayloadBlockHash()
justifiedHash := s.ForkChoicer().JustifiedPayloadBlockHash()
fcs := &enginev1.ForkchoiceState{
HeadBlockHash: headPayload.BlockHash,
SafeBlockHash: justifiedHash[:],

View File

@@ -27,7 +27,6 @@ import (
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
"github.com/prysmaticlabs/prysm/time/slots"
logTest "github.com/sirupsen/logrus/hooks/test"
)
@@ -183,9 +182,6 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
st, _ := util.DeterministicGenesisState(t, 1)
require.NoError(t, beaconDB.SaveState(ctx, st, tt.finalizedRoot))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, tt.finalizedRoot))
fc := &ethpb.Checkpoint{Epoch: 0, Root: tt.finalizedRoot[:]}
service.store.SetFinalizedCheckptAndPayloadHash(fc, [32]byte{'a'})
service.store.SetJustifiedCheckptAndPayloadHash(fc, [32]byte{'b'})
arg := &notifyForkchoiceUpdateArg{
headState: st,
headRoot: tt.headRoot,
@@ -325,9 +321,6 @@ func Test_NotifyForkchoiceUpdateRecursive(t *testing.T) {
require.NoError(t, beaconDB.SaveState(ctx, st, bra))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, bra))
fc := &ethpb.Checkpoint{Epoch: 0, Root: bra[:]}
service.store.SetFinalizedCheckptAndPayloadHash(fc, [32]byte{'a'})
service.store.SetJustifiedCheckptAndPayloadHash(fc, [32]byte{'b'})
a := &notifyForkchoiceUpdateArg{
headState: st,
headBlock: wbg.Block(),
@@ -705,14 +698,7 @@ func Test_IsOptimisticCandidateBlock(t *testing.T) {
},
}
for _, tt := range tests {
jRoot, err := tt.justified.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, tt.justified))
service.store.SetJustifiedCheckptAndPayloadHash(
&ethpb.Checkpoint{
Root: jRoot[:],
Epoch: slots.ToEpoch(tt.justified.Block().Slot()),
}, [32]byte{'a'})
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrappedParentBlock))
err = service.optimisticCandidateBlock(ctx, tt.blk)
@@ -985,7 +971,7 @@ func TestService_getPayloadHash(t *testing.T) {
require.NoError(t, err)
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
service.saveInitSyncBlock(r, wsb)
require.NoError(t, service.saveInitSyncBlock(ctx, r, wsb))
h, err := service.getPayloadHash(ctx, r[:])
require.NoError(t, err)
@@ -998,7 +984,7 @@ func TestService_getPayloadHash(t *testing.T) {
require.NoError(t, err)
wsb, err = wrapper.WrappedSignedBeaconBlock(bb)
require.NoError(t, err)
service.saveInitSyncBlock(r, wsb)
require.NoError(t, service.saveInitSyncBlock(ctx, r, wsb))
h, err = service.getPayloadHash(ctx, r[:])
require.NoError(t, err)

View File

@@ -26,10 +26,7 @@ import (
// UpdateAndSaveHeadWithBalances updates the beacon state head after getting justified balanced from cache.
// This function is only used in spec-tests, it does save the head after updating it.
func (s *Service) UpdateAndSaveHeadWithBalances(ctx context.Context) error {
jp, err := s.store.JustifiedCheckpt()
if err != nil {
return err
}
jp := s.CurrentJustifiedCheckpt()
balances, err := s.justifiedBalances.get(ctx, bytesutil.ToBytes32(jp.Root))
if err != nil {
@@ -66,11 +63,15 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
defer span.End()
// Do nothing if head hasn't changed.
oldHeadroot, err := s.HeadRoot(ctx)
if err != nil {
return err
var oldHeadRoot [32]byte
s.headLock.RLock()
if s.head == nil {
oldHeadRoot = s.originBlockRoot
} else {
oldHeadRoot = s.head.root
}
if newHeadRoot == bytesutil.ToBytes32(oldHeadroot) {
s.headLock.RUnlock()
if newHeadRoot == oldHeadRoot {
return nil
}
if err := wrapper.BeaconBlockIsNil(headBlock); err != nil {
@@ -88,13 +89,12 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
// A chain re-org occurred, so we fire an event notifying the rest of the services.
s.headLock.RLock()
oldHeadRoot := s.headRoot()
oldStateRoot := s.headBlock().Block().StateRoot()
s.headLock.RUnlock()
headSlot := s.HeadSlot()
newHeadSlot := headBlock.Block().Slot()
newStateRoot := headBlock.Block().StateRoot()
if bytesutil.ToBytes32(headBlock.Block().ParentRoot()) != bytesutil.ToBytes32(oldHeadroot) {
if bytesutil.ToBytes32(headBlock.Block().ParentRoot()) != oldHeadRoot {
log.WithFields(logrus.Fields{
"newSlot": fmt.Sprintf("%d", newHeadSlot),
"oldSlot": fmt.Sprintf("%d", headSlot),
@@ -118,7 +118,7 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
},
})
if err := s.saveOrphanedAtts(ctx, bytesutil.ToBytes32(oldHeadroot), newHeadRoot); err != nil {
if err := s.saveOrphanedAtts(ctx, oldHeadRoot, newHeadRoot); err != nil {
return err
}
reorgCount.Inc()

View File

@@ -592,8 +592,6 @@ func TestUpdateHead_noSavedChanges(t *testing.T) {
Root: bellatrixBlkRoot[:],
Epoch: 0,
}
service.store.SetFinalizedCheckptAndPayloadHash(fcp, [32]byte{'a'})
service.store.SetJustifiedCheckptAndPayloadHash(fcp, [32]byte{'b'})
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, bellatrixBlkRoot))
bellatrixState, _ := util.DeterministicGenesisStateBellatrix(t, 2)

View File

@@ -8,11 +8,20 @@ import (
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
)
// This saves a beacon block to the initial sync blocks cache.
func (s *Service) saveInitSyncBlock(r [32]byte, b interfaces.SignedBeaconBlock) {
// This saves a beacon block to the initial sync blocks cache. It rate limits how many blocks
// the cache keeps in memory (2 epochs worth of blocks) and saves them to DB when it hits this limit.
func (s *Service) saveInitSyncBlock(ctx context.Context, r [32]byte, b interfaces.SignedBeaconBlock) error {
s.initSyncBlocksLock.Lock()
defer s.initSyncBlocksLock.Unlock()
s.initSyncBlocks[r] = b
numBlocks := len(s.initSyncBlocks)
s.initSyncBlocksLock.Unlock()
if uint64(numBlocks) > initialSyncBlockCacheSize {
if err := s.cfg.BeaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
return err
}
s.clearInitSyncBlocks()
}
return nil
}
// This checks if a beacon block exists in the initial sync blocks cache using the root

View File

@@ -29,7 +29,7 @@ func TestService_getBlock(t *testing.T) {
// block in cache
b, err := wrapper.WrappedSignedBeaconBlock(b1)
require.NoError(t, err)
s.saveInitSyncBlock(r1, b)
s.saveInitSyncBlock(ctx, r1, b)
got, err := s.getBlock(ctx, r1)
require.NoError(t, err)
require.DeepEqual(t, b, got)
@@ -59,7 +59,7 @@ func TestService_hasBlockInInitSyncOrDB(t *testing.T) {
// block in cache
b, err := wrapper.WrappedSignedBeaconBlock(b1)
require.NoError(t, err)
s.saveInitSyncBlock(r1, b)
s.saveInitSyncBlock(ctx, r1, b)
require.Equal(t, true, s.hasBlockInInitSyncOrDB(ctx, r1))
// block in db

View File

@@ -1,128 +0,0 @@
package blockchain
import (
"context"
"testing"
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain/store"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/config/params"
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
)
func TestService_newSlot(t *testing.T) {
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
}
ctx := context.Background()
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
util.SaveBlock(t, ctx, beaconDB, genesis)
bj, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
state, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot)) // genesis
state, blkRoot, err = prepareForkchoiceState(ctx, 32, [32]byte{'a'}, [32]byte{}, [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot)) // finalized
state, blkRoot, err = prepareForkchoiceState(ctx, 64, [32]byte{'b'}, [32]byte{'a'}, [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot)) // justified
state, blkRoot, err = prepareForkchoiceState(ctx, 96, bj, [32]byte{'a'}, [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot)) // best justified
state, blkRoot, err = prepareForkchoiceState(ctx, 97, [32]byte{'d'}, [32]byte{}, [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot)) // bad
type args struct {
slot types.Slot
finalized *ethpb.Checkpoint
justified *ethpb.Checkpoint
bestJustified *ethpb.Checkpoint
shouldEqual bool
}
tests := []struct {
name string
args args
}{
{
name: "Not epoch boundary. No change",
args: args{
slot: params.BeaconConfig().SlotsPerEpoch + 1,
finalized: &ethpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'a'}, 32)},
justified: &ethpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte{'b'}, 32)},
bestJustified: &ethpb.Checkpoint{Epoch: 3, Root: bj[:]},
shouldEqual: false,
},
},
{
name: "Justified higher than best justified. No change",
args: args{
slot: params.BeaconConfig().SlotsPerEpoch,
finalized: &ethpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'a'}, 32)},
justified: &ethpb.Checkpoint{Epoch: 3, Root: bytesutil.PadTo([]byte{'b'}, 32)},
bestJustified: &ethpb.Checkpoint{Epoch: 2, Root: bj[:]},
shouldEqual: false,
},
},
{
name: "Best justified not on the same chain as finalized. No change",
args: args{
slot: params.BeaconConfig().SlotsPerEpoch,
finalized: &ethpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'a'}, 32)},
justified: &ethpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte{'b'}, 32)},
bestJustified: &ethpb.Checkpoint{Epoch: 3, Root: bytesutil.PadTo([]byte{'d'}, 32)},
shouldEqual: false,
},
},
{
name: "Best justified on the same chain as finalized. Yes change",
args: args{
slot: params.BeaconConfig().SlotsPerEpoch,
finalized: &ethpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'a'}, 32)},
justified: &ethpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte{'b'}, 32)},
bestJustified: &ethpb.Checkpoint{Epoch: 3, Root: bj[:]},
shouldEqual: true,
},
},
}
for _, test := range tests {
service, err := NewService(ctx, opts...)
require.NoError(t, err)
s := store.New(test.args.justified, test.args.finalized)
s.SetBestJustifiedCheckpt(test.args.bestJustified)
service.store = s
require.NoError(t, service.NewSlot(ctx, test.args.slot))
if test.args.shouldEqual {
bcp, err := service.store.BestJustifiedCheckpt()
require.NoError(t, err)
cp, err := service.store.JustifiedCheckpt()
require.NoError(t, err)
require.DeepSSZEqual(t, bcp, cp)
} else {
bcp, err := service.store.BestJustifiedCheckpt()
require.NoError(t, err)
cp, err := service.store.JustifiedCheckpt()
require.NoError(t, err)
require.DeepNotSSZEqual(t, bcp, cp)
}
}
}

View File

@@ -320,12 +320,6 @@ func TestStore_SaveCheckpointState(t *testing.T) {
r := [32]byte{'g'}
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, r))
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: r[:]}, [32]byte{'a'})
service.store.SetBestJustifiedCheckpt(&ethpb.Checkpoint{Root: r[:]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: r[:]}, [32]byte{'b'})
service.store.SetPrevFinalizedCheckpt(&ethpb.Checkpoint{Root: r[:]})
r = bytesutil.ToBytes32([]byte{'A'})
cp1 := &ethpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)}
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'A'})))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Root: bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)}))
@@ -354,10 +348,6 @@ func TestStore_SaveCheckpointState(t *testing.T) {
assert.Equal(t, 2*params.BeaconConfig().SlotsPerEpoch, s2.Slot(), "Unexpected state slot")
require.NoError(t, s.SetSlot(params.BeaconConfig().SlotsPerEpoch+1))
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: r[:]}, [32]byte{'a'})
service.store.SetBestJustifiedCheckpt(&ethpb.Checkpoint{Root: r[:]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: r[:]}, [32]byte{'b'})
service.store.SetPrevFinalizedCheckpt(&ethpb.Checkpoint{Root: r[:]})
cp3 := &ethpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'C'}, fieldparams.RootLength)}
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'C'})))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Root: bytesutil.PadTo([]byte{'C'}, fieldparams.RootLength)}))
@@ -490,7 +480,7 @@ func TestVerifyFinalizedConsistency_InconsistentRoot_ProtoArray(t *testing.T) {
r32, err := b32.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Epoch: 1}, [32]byte{})
require.NoError(t, service.ForkChoicer().UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: 1}))
b33 := util.NewBeaconBlock()
b33.Block.Slot = 33
b33.Block.ParentRoot = r32[:]
@@ -521,7 +511,7 @@ func TestVerifyFinalizedConsistency_InconsistentRoot_DoublyLinkedTree(t *testing
r32, err := b32.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Epoch: 1}, [32]byte{})
require.NoError(t, service.ForkChoicer().UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: 1}))
b33 := util.NewBeaconBlock()
b33.Block.Slot = 33
b33.Block.ParentRoot = r32[:]
@@ -546,8 +536,7 @@ func TestVerifyFinalizedConsistency_OK(t *testing.T) {
r32, err := b32.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: r32[:], Epoch: 1}, [32]byte{})
require.NoError(t, service.ForkChoicer().UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: 1, Root: r32}))
b33 := util.NewBeaconBlock()
b33.Block.Slot = 33
b33.Block.ParentRoot = r32[:]
@@ -571,8 +560,6 @@ func TestVerifyFinalizedConsistency_IsCanonical(t *testing.T) {
r32, err := b32.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: r32[:], Epoch: 1}, [32]byte{})
b33 := util.NewBeaconBlock()
b33.Block.Slot = 33
b33.Block.ParentRoot = r32[:]

View File

@@ -130,6 +130,10 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState); err != nil {
return err
}
// save current justified and finalized epochs for future use
currJustifiedEpoch := s.ForkChoicer().JustifiedCheckpoint().Epoch
currFinalizedEpoch := s.ForkChoicer().FinalizedCheckpoint().Epoch
if err := s.insertBlockAndAttestationsToForkChoiceStore(ctx, signed.Block(), blockRoot, postState); err != nil {
return errors.Wrapf(err, "could not insert block %d to fork choice store", signed.Block().Slot())
}
@@ -167,52 +171,8 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
}()
}
// Update justified check point.
justified, err := s.store.JustifiedCheckpt()
if err != nil {
return errors.Wrap(err, "could not get justified checkpoint")
}
currJustifiedEpoch := justified.Epoch
psj := postState.CurrentJustifiedCheckpoint()
if psj == nil {
return errNilJustifiedCheckpoint
}
if psj.Epoch > currJustifiedEpoch {
if err := s.updateJustified(ctx, postState); err != nil {
return err
}
}
finalized, err := s.store.FinalizedCheckpt()
if err != nil {
return errors.Wrap(err, "could not get finalized checkpoint")
}
if finalized == nil {
return errNilFinalizedInStore
}
psf := postState.FinalizedCheckpoint()
if psf == nil {
return errNilFinalizedCheckpoint
}
newFinalized := psf.Epoch > finalized.Epoch
if newFinalized {
s.store.SetPrevFinalizedCheckpt(finalized)
h, err := s.getPayloadHash(ctx, psf.Root)
if err != nil {
return err
}
s.store.SetFinalizedCheckptAndPayloadHash(psf, h)
s.store.SetPrevJustifiedCheckpt(justified)
h, err = s.getPayloadHash(ctx, psj.Root)
if err != nil {
return err
}
s.store.SetJustifiedCheckptAndPayloadHash(postState.CurrentJustifiedCheckpoint(), h)
}
balances, err := s.justifiedBalances.get(ctx, bytesutil.ToBytes32(justified.Root))
justified := s.ForkChoicer().JustifiedCheckpoint()
balances, err := s.justifiedBalances.get(ctx, justified.Root)
if err != nil {
msg := fmt.Sprintf("could not read balances for state w/ justified checkpoint %#x", justified.Root)
return errors.Wrap(err, msg)
@@ -251,19 +211,19 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
}()
// Save justified check point to db.
if postState.CurrentJustifiedCheckpoint().Epoch > currJustifiedEpoch {
if justified.Epoch > currJustifiedEpoch {
if err := s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, postState.CurrentJustifiedCheckpoint()); err != nil {
return err
}
}
// Update finalized check point.
if newFinalized {
finalized := s.ForkChoicer().FinalizedCheckpoint()
if finalized.Epoch > currFinalizedEpoch {
if err := s.updateFinalized(ctx, postState.FinalizedCheckpoint()); err != nil {
return err
}
fRoot := bytesutil.ToBytes32(postState.FinalizedCheckpoint().Root)
isOptimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(fRoot)
isOptimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(finalized.Root)
if err != nil {
return errors.Wrap(err, "could not check if node is optimistically synced")
}
@@ -284,15 +244,13 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
// with a custom deadline, therefore using the background context instead.
depCtx, cancel := context.WithTimeout(context.Background(), depositDeadline)
defer cancel()
if err := s.insertFinalizedDeposits(depCtx, fRoot); err != nil {
if err := s.insertFinalizedDeposits(depCtx, finalized.Root); err != nil {
log.WithError(err).Error("Could not insert finalized deposits.")
}
}()
}
defer reportAttestationInclusion(b)
return s.handleEpochBoundary(ctx, postState)
}
@@ -423,11 +381,29 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
JustifiedCheckpoint: jCheckpoints[i],
FinalizedCheckpoint: fCheckpoints[i]}
pendingNodes[len(blks)-i-1] = args
s.saveInitSyncBlock(blockRoots[i], b)
if err = s.handleBlockAfterBatchVerify(ctx, b, blockRoots[i], fCheckpoints[i], jCheckpoints[i]); err != nil {
if err := s.saveInitSyncBlock(ctx, blockRoots[i], b); err != nil {
tracing.AnnotateError(span, err)
return err
}
if err := s.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{
Slot: b.Block().Slot(),
Root: blockRoots[i][:],
}); err != nil {
tracing.AnnotateError(span, err)
return err
}
if i > 0 && jCheckpoints[i].Epoch > jCheckpoints[i-1].Epoch {
if err := s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, jCheckpoints[i]); err != nil {
tracing.AnnotateError(span, err)
return err
}
}
if i > 0 && fCheckpoints[i].Epoch > fCheckpoints[i-1].Epoch {
if err := s.updateFinalized(ctx, fCheckpoints[i]); err != nil {
tracing.AnnotateError(span, err)
return err
}
}
}
// Insert all nodes but the last one to forkchoice
if err := s.cfg.ForkChoiceStore.InsertOptimisticChain(ctx, pendingNodes); err != nil {
@@ -466,62 +442,6 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
return s.saveHeadNoDB(ctx, lastB, lastBR, preState)
}
// handles a block after the block's batch has been verified, where we can save blocks
// their state summaries and split them off to relative hot/cold storage.
func (s *Service) handleBlockAfterBatchVerify(ctx context.Context, signed interfaces.SignedBeaconBlock,
blockRoot [32]byte, fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
if err := s.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{
Slot: signed.Block().Slot(),
Root: blockRoot[:],
}); err != nil {
return err
}
// Rate limit how many blocks (2 epochs worth of blocks) a node keeps in the memory.
if uint64(len(s.getInitSyncBlocks())) > initialSyncBlockCacheSize {
if err := s.cfg.BeaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
return err
}
s.clearInitSyncBlocks()
}
justified, err := s.store.JustifiedCheckpt()
if err != nil {
return errors.Wrap(err, "could not get justified checkpoint")
}
if jCheckpoint.Epoch > justified.Epoch {
if err := s.updateJustifiedInitSync(ctx, jCheckpoint); err != nil {
return err
}
}
finalized, err := s.store.FinalizedCheckpt()
if err != nil {
return errors.Wrap(err, "could not get finalized checkpoint")
}
if finalized == nil {
return errNilFinalizedInStore
}
// Update finalized check point. Prune the block cache and helper caches on every new finalized epoch.
if fCheckpoint.Epoch > finalized.Epoch {
if err := s.updateFinalized(ctx, fCheckpoint); err != nil {
return err
}
s.store.SetPrevFinalizedCheckpt(finalized)
h, err := s.getPayloadHash(ctx, fCheckpoint.Root)
if err != nil {
return err
}
s.store.SetFinalizedCheckptAndPayloadHash(fCheckpoint, h)
if err := s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{
Epoch: fCheckpoint.Epoch, Root: bytesutil.ToBytes32(fCheckpoint.Root)}); err != nil {
return err
}
}
return nil
}
// Epoch boundary bookkeeping such as logging epoch summaries.
func (s *Service) handleEpochBoundary(ctx context.Context, postState state.BeaconState) error {
ctx, span := trace.StartSpan(ctx, "blockChain.handleEpochBoundary")

View File

@@ -95,11 +95,8 @@ func (s *Service) verifyBlkPreState(ctx context.Context, b interfaces.BeaconBloc
func (s *Service) VerifyFinalizedBlkDescendant(ctx context.Context, root [32]byte) error {
ctx, span := trace.StartSpan(ctx, "blockChain.VerifyFinalizedBlkDescendant")
defer span.End()
finalized, err := s.store.FinalizedCheckpt()
if err != nil {
return errors.Wrap(err, "could not get finalized checkpoint")
}
fRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(finalized.Root))
finalized := s.ForkChoicer().FinalizedCheckpoint()
fRoot := s.ensureRootNotZeros(finalized.Root)
fSlot, err := slots.EpochStart(finalized.Epoch)
if err != nil {
return err
@@ -125,10 +122,7 @@ func (s *Service) VerifyFinalizedBlkDescendant(ctx context.Context, root [32]byt
// verifyBlkFinalizedSlot validates input block is not less than or equal
// to current finalized slot.
func (s *Service) verifyBlkFinalizedSlot(b interfaces.BeaconBlock) error {
finalized, err := s.store.FinalizedCheckpt()
if err != nil {
return errors.Wrap(err, "could not get finalized checkpoint")
}
finalized := s.ForkChoicer().FinalizedCheckpoint()
finalizedSlot, err := slots.EpochStart(finalized.Epoch)
if err != nil {
return err
@@ -140,108 +134,8 @@ func (s *Service) verifyBlkFinalizedSlot(b interfaces.BeaconBlock) error {
return nil
}
// shouldUpdateCurrentJustified prevents bouncing attack, by only update conflicting justified
// checkpoints in the fork choice if in the early slots of the epoch.
// Otherwise, delay incorporation of new justified checkpoint until next epoch boundary.
//
// Spec code:
// def should_update_justified_checkpoint(store: Store, new_justified_checkpoint: Checkpoint) -> bool:
// """
// To address the bouncing attack, only update conflicting justified
// checkpoints in the fork choice if in the early slots of the epoch.
// Otherwise, delay incorporation of new justified checkpoint until next epoch boundary.
//
// See https://ethresear.ch/t/prevention-of-bouncing-attack-on-ffg/6114 for more detailed analysis and discussion.
// """
// if compute_slots_since_epoch_start(get_current_slot(store)) < SAFE_SLOTS_TO_UPDATE_JUSTIFIED:
// return True
//
// justified_slot = compute_start_slot_at_epoch(store.justified_checkpoint.epoch)
// if not get_ancestor(store, new_justified_checkpoint.root, justified_slot) == store.justified_checkpoint.root:
// return False
//
// return True
func (s *Service) shouldUpdateCurrentJustified(ctx context.Context, newJustifiedCheckpt *ethpb.Checkpoint) (bool, error) {
ctx, span := trace.StartSpan(ctx, "blockChain.shouldUpdateCurrentJustified")
defer span.End()
if slots.SinceEpochStarts(s.CurrentSlot()) < params.BeaconConfig().SafeSlotsToUpdateJustified {
return true, nil
}
justified, err := s.store.JustifiedCheckpt()
if err != nil {
return false, errors.Wrap(err, "could not get justified checkpoint")
}
jSlot, err := slots.EpochStart(justified.Epoch)
if err != nil {
return false, err
}
justifiedRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(newJustifiedCheckpt.Root))
b, err := s.ancestor(ctx, justifiedRoot[:], jSlot)
if err != nil {
return false, err
}
if !bytes.Equal(b, justified.Root) {
return false, nil
}
return true, nil
}
func (s *Service) updateJustified(ctx context.Context, state state.ReadOnlyBeaconState) error {
ctx, span := trace.StartSpan(ctx, "blockChain.updateJustified")
defer span.End()
cpt := state.CurrentJustifiedCheckpoint()
bestJustified, err := s.store.BestJustifiedCheckpt()
if err != nil {
return errors.Wrap(err, "could not get best justified checkpoint")
}
if cpt.Epoch > bestJustified.Epoch {
s.store.SetBestJustifiedCheckpt(cpt)
}
canUpdate, err := s.shouldUpdateCurrentJustified(ctx, cpt)
if err != nil {
return err
}
if canUpdate {
justified, err := s.store.JustifiedCheckpt()
if err != nil {
return errors.Wrap(err, "could not get justified checkpoint")
}
s.store.SetPrevJustifiedCheckpt(justified)
h, err := s.getPayloadHash(ctx, cpt.Root)
if err != nil {
return err
}
s.store.SetJustifiedCheckptAndPayloadHash(cpt, h)
}
return nil
}
// This caches input checkpoint as justified for the service struct. It rotates current justified to previous justified,
// caches justified checkpoint balances for fork choice and save justified checkpoint in DB.
// This method does not have defense against fork choice bouncing attack, which is why it's only recommend to be used during initial syncing.
func (s *Service) updateJustifiedInitSync(ctx context.Context, cp *ethpb.Checkpoint) error {
justified, err := s.store.JustifiedCheckpt()
if err != nil {
return errors.Wrap(err, "could not get justified checkpoint")
}
s.store.SetPrevJustifiedCheckpt(justified)
if err := s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, cp); err != nil {
return err
}
h, err := s.getPayloadHash(ctx, cp.Root)
if err != nil {
return err
}
s.store.SetJustifiedCheckptAndPayloadHash(cp, h)
return s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(&forkchoicetypes.Checkpoint{
Epoch: cp.Epoch, Root: bytesutil.ToBytes32(cp.Root)})
}
// updateFinalized saves the init sync blocks, finalized checkpoint, migrates
// to cold old states and saves the last validated checkpoint to DB
func (s *Service) updateFinalized(ctx context.Context, cp *ethpb.Checkpoint) error {
ctx, span := trace.StartSpan(ctx, "blockChain.updateFinalized")
defer span.End()
@@ -347,10 +241,7 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfa
pendingNodes := make([]*forkchoicetypes.BlockAndCheckpoints, 0)
// Fork choice only matters from last finalized slot.
finalized, err := s.store.FinalizedCheckpt()
if err != nil {
return err
}
finalized := s.ForkChoicer().FinalizedCheckpoint()
fSlot, err := slots.EpochStart(finalized.Epoch)
if err != nil {
return err
@@ -376,7 +267,7 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfa
if len(pendingNodes) == 1 {
return nil
}
if root != s.ensureRootNotZeros(bytesutil.ToBytes32(finalized.Root)) {
if root != s.ensureRootNotZeros(finalized.Root) {
return errNotDescendantOfFinalized
}
return s.cfg.ForkChoiceStore.InsertOptimisticChain(ctx, pendingNodes)

View File

@@ -20,6 +20,7 @@ import (
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
mockPOW "github.com/prysmaticlabs/prysm/beacon-chain/powchain/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
@@ -126,11 +127,8 @@ func TestStore_OnBlock_ProtoArray(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: validGenesisRoot[:]}, [32]byte{'a'})
service.store.SetBestJustifiedCheckpt(&ethpb.Checkpoint{Root: validGenesisRoot[:]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: roots[0]}, [32]byte{'b'})
service.store.SetPrevFinalizedCheckpt(&ethpb.Checkpoint{Root: validGenesisRoot[:]})
fRoot := bytesutil.ToBytes32(roots[0])
require.NoError(t, service.ForkChoicer().UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Root: fRoot}))
root, err := tt.blk.Block.HashTreeRoot()
assert.NoError(t, err)
wsb, err := wrapper.WrappedSignedBeaconBlock(tt.blk)
@@ -225,11 +223,8 @@ func TestStore_OnBlock_DoublyLinkedTree(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: validGenesisRoot[:]}, [32]byte{'a'})
service.store.SetBestJustifiedCheckpt(&ethpb.Checkpoint{Root: validGenesisRoot[:]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: roots[0]}, [32]byte{'b'})
service.store.SetPrevFinalizedCheckpt(&ethpb.Checkpoint{Root: validGenesisRoot[:]})
fRoot := bytesutil.ToBytes32(roots[0])
require.NoError(t, service.ForkChoicer().UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Root: fRoot}))
root, err := tt.blk.Block.HashTreeRoot()
assert.NoError(t, err)
wsb, err := wrapper.WrappedSignedBeaconBlock(tt.blk)
@@ -247,127 +242,74 @@ func TestStore_OnBlockBatch_ProtoArray(t *testing.T) {
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(protoarray.New()),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
util.SaveBlock(t, ctx, beaconDB, genesis)
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{'a'})
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{'b'})
service.cfg.ForkChoiceStore = protoarray.New()
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
service.saveInitSyncBlock(gRoot, wsb)
st, keys := util.DeterministicGenesisState(t, 64)
require.NoError(t, service.saveGenesisData(ctx, st))
bState := st.Copy()
var blks []interfaces.SignedBeaconBlock
var blkRoots [][32]byte
var firstState state.BeaconState
for i := 1; i < 97; i++ {
for i := 0; i < 97; i++ {
b, err := util.GenerateFullBlock(bState, keys, util.DefaultBlockGenConfig(), types.Slot(i))
require.NoError(t, err)
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
bState, err = transition.ExecuteStateTransition(ctx, bState, wsb)
require.NoError(t, err)
if i == 1 {
firstState = bState.Copy()
}
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
wsb, err = wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
service.saveInitSyncBlock(root, wsb)
wsb, err = wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, service.saveInitSyncBlock(ctx, root, wsb))
blks = append(blks, wsb)
blkRoots = append(blkRoots, root)
}
rBlock, err := blks[0].PbPhase0Block()
assert.NoError(t, err)
rBlock.Block.ParentRoot = gRoot[:]
require.NoError(t, beaconDB.SaveBlock(context.Background(), blks[0]))
require.NoError(t, service.cfg.StateGen.SaveState(ctx, blkRoots[0], firstState))
err = service.onBlockBatch(ctx, blks, blkRoots[1:])
require.ErrorIs(t, errWrongBlockCount, err)
service.originBlockRoot = blkRoots[1]
err = service.onBlockBatch(ctx, blks[1:], blkRoots[1:])
require.NoError(t, err)
jcp, err := service.store.JustifiedCheckpt()
err = service.onBlockBatch(ctx, blks, blkRoots)
require.NoError(t, err)
jcp := service.CurrentJustifiedCheckpt()
jroot := bytesutil.ToBytes32(jcp.Root)
require.Equal(t, blkRoots[63], jroot)
require.Equal(t, types.Epoch(2), service.cfg.ForkChoiceStore.JustifiedCheckpoint().Epoch)
}
func TestStore_OnBlockBatch_PruneOK(t *testing.T) {
func TestStore_OnBlockBatch_PruneOK_Protoarray(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(protoarray.New()),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
util.SaveBlock(t, ctx, beaconDB, genesis)
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
service.cfg.ForkChoiceStore = protoarray.New()
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
service.saveInitSyncBlock(gRoot, wsb)
st, keys := util.DeterministicGenesisState(t, 64)
require.NoError(t, service.saveGenesisData(ctx, st))
bState := st.Copy()
var blks []interfaces.SignedBeaconBlock
var blkRoots [][32]byte
var firstState state.BeaconState
for i := 1; i < 128; i++ {
for i := 0; i < 320; i++ {
b, err := util.GenerateFullBlock(bState, keys, util.DefaultBlockGenConfig(), types.Slot(i))
require.NoError(t, err)
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
bState, err = transition.ExecuteStateTransition(ctx, bState, wsb)
if i == 32 {
firstState = bState.Copy()
}
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
wsb, err = wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
service.saveInitSyncBlock(root, wsb)
wsb, err = wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, service.saveInitSyncBlock(ctx, root, wsb))
blks = append(blks, wsb)
blkRoots = append(blkRoots, root)
}
for i := 0; i < 32; i++ {
require.NoError(t, beaconDB.SaveBlock(context.Background(), blks[i]))
}
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: blkRoots[31][:], Epoch: 1}, [32]byte{'a'})
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: blkRoots[31][:], Epoch: 1}, [32]byte{'b'})
require.NoError(t, service.cfg.StateGen.SaveState(ctx, blkRoots[31], firstState))
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, firstState, blkRoots[31]))
err = service.onBlockBatch(ctx, blks[32:], blkRoots[32:])
err = service.onBlockBatch(ctx, blks, blkRoots)
require.NoError(t, err)
require.Equal(t, 65, service.ForkChoicer().NodeCount())
}
func TestStore_OnBlockBatch_DoublyLinkedTree(t *testing.T) {
@@ -377,63 +319,37 @@ func TestStore_OnBlockBatch_DoublyLinkedTree(t *testing.T) {
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(protoarray.New()),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
util.SaveBlock(t, ctx, beaconDB, genesis)
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{'a'})
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{'b'})
service.cfg.ForkChoiceStore = doublylinkedtree.New()
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
service.saveInitSyncBlock(gRoot, wsb)
st, keys := util.DeterministicGenesisState(t, 64)
require.NoError(t, service.saveGenesisData(ctx, st))
bState := st.Copy()
var blks []interfaces.SignedBeaconBlock
var blkRoots [][32]byte
var firstState state.BeaconState
for i := 1; i < 97; i++ {
for i := 0; i < 97; i++ {
b, err := util.GenerateFullBlock(bState, keys, util.DefaultBlockGenConfig(), types.Slot(i))
require.NoError(t, err)
wsb, err = wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
bState, err = transition.ExecuteStateTransition(ctx, bState, wsb)
require.NoError(t, err)
if i == 1 {
firstState = bState.Copy()
}
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
service.saveInitSyncBlock(root, wsb)
bState, err = transition.ExecuteStateTransition(ctx, bState, wsb)
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.saveInitSyncBlock(ctx, root, wsb))
wsb, err = wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
blks = append(blks, wsb)
blkRoots = append(blkRoots, root)
}
rBlock, err := blks[0].PbPhase0Block()
assert.NoError(t, err)
rBlock.Block.ParentRoot = gRoot[:]
require.NoError(t, beaconDB.SaveBlock(context.Background(), blks[0]))
require.NoError(t, service.cfg.StateGen.SaveState(ctx, blkRoots[0], firstState))
err = service.onBlockBatch(ctx, blks, blkRoots[1:])
require.ErrorIs(t, errWrongBlockCount, err)
service.originBlockRoot = blkRoots[1]
err = service.onBlockBatch(ctx, blks[1:], blkRoots[1:])
require.NoError(t, err)
jcp, err := service.store.JustifiedCheckpt()
err = service.onBlockBatch(ctx, blks, blkRoots)
require.NoError(t, err)
jcp := service.CurrentJustifiedCheckpt()
jroot := bytesutil.ToBytes32(jcp.Root)
require.Equal(t, blkRoots[63], jroot)
require.Equal(t, types.Epoch(2), service.cfg.ForkChoiceStore.JustifiedCheckpoint().Epoch)
@@ -446,258 +362,38 @@ func TestStore_OnBlockBatch_NotifyNewPayload(t *testing.T) {
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(doublylinkedtree.New()),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
genesis := blocks.NewGenesisBlock(params.BeaconConfig().ZeroHash[:])
wsb := util.SaveBlock(t, ctx, beaconDB, genesis)
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{'a'})
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{'b'})
service.cfg.ForkChoiceStore = doublylinkedtree.New()
service.saveInitSyncBlock(gRoot, wsb)
st, keys := util.DeterministicGenesisState(t, 64)
require.NoError(t, service.saveGenesisData(ctx, st))
bState := st.Copy()
var blks []interfaces.SignedBeaconBlock
var blkRoots [][32]byte
var firstState state.BeaconState
blkCount := 4
for i := 1; i <= blkCount; i++ {
for i := 0; i <= blkCount; i++ {
b, err := util.GenerateFullBlock(bState, keys, util.DefaultBlockGenConfig(), types.Slot(i))
require.NoError(t, err)
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
bState, err = transition.ExecuteStateTransition(ctx, bState, wsb)
require.NoError(t, err)
if i == 1 {
firstState = bState.Copy()
}
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
service.saveInitSyncBlock(root, wsb)
require.NoError(t, service.saveInitSyncBlock(ctx, root, wsb))
blks = append(blks, wsb)
blkRoots = append(blkRoots, root)
}
rBlock, err := blks[0].PbPhase0Block()
assert.NoError(t, err)
rBlock.Block.ParentRoot = gRoot[:]
require.NoError(t, beaconDB.SaveBlock(context.Background(), blks[0]))
require.NoError(t, service.cfg.StateGen.SaveState(ctx, blkRoots[0], firstState))
service.originBlockRoot = blkRoots[1]
err = service.onBlockBatch(ctx, blks[1:], blkRoots[1:])
err = service.onBlockBatch(ctx, blks, blkRoots)
require.NoError(t, err)
}
func TestRemoveStateSinceLastFinalized_EmptyStartSlot(t *testing.T) {
ctx := context.Background()
params.SetupTestConfigCleanup(t)
params.OverrideBeaconConfig(params.MinimalSpecConfig())
opts := testServiceOptsWithDB(t)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.genesisTime = time.Now()
update, err := service.shouldUpdateCurrentJustified(ctx, &ethpb.Checkpoint{Root: make([]byte, 32)})
require.NoError(t, err)
assert.Equal(t, true, update, "Should be able to update justified")
lastJustifiedBlk := util.NewBeaconBlock()
lastJustifiedBlk.Block.ParentRoot = bytesutil.PadTo([]byte{'G'}, 32)
lastJustifiedRoot, err := lastJustifiedBlk.Block.HashTreeRoot()
require.NoError(t, err)
newJustifiedBlk := util.NewBeaconBlock()
newJustifiedBlk.Block.Slot = 1
newJustifiedBlk.Block.ParentRoot = bytesutil.PadTo(lastJustifiedRoot[:], 32)
newJustifiedRoot, err := newJustifiedBlk.Block.HashTreeRoot()
require.NoError(t, err)
util.SaveBlock(t, ctx, service.cfg.BeaconDB, newJustifiedBlk)
util.SaveBlock(t, ctx, service.cfg.BeaconDB, lastJustifiedBlk)
diff := params.BeaconConfig().SlotsPerEpoch.Sub(1).Mul(params.BeaconConfig().SecondsPerSlot)
service.genesisTime = time.Unix(time.Now().Unix()-int64(diff), 0)
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: lastJustifiedRoot[:]}, [32]byte{'a'})
update, err = service.shouldUpdateCurrentJustified(ctx, &ethpb.Checkpoint{Root: newJustifiedRoot[:]})
require.NoError(t, err)
assert.Equal(t, true, update, "Should be able to update justified")
}
func TestShouldUpdateJustified_ReturnFalse_ProtoArray(t *testing.T) {
ctx := context.Background()
params.SetupTestConfigCleanup(t)
params.OverrideBeaconConfig(params.MinimalSpecConfig())
opts := testServiceOptsWithDB(t)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ForkChoiceStore = protoarray.New()
lastJustifiedBlk := util.NewBeaconBlock()
lastJustifiedBlk.Block.ParentRoot = bytesutil.PadTo([]byte{'G'}, 32)
lastJustifiedRoot, err := lastJustifiedBlk.Block.HashTreeRoot()
require.NoError(t, err)
newJustifiedBlk := util.NewBeaconBlock()
newJustifiedBlk.Block.ParentRoot = bytesutil.PadTo(lastJustifiedRoot[:], 32)
newJustifiedRoot, err := newJustifiedBlk.Block.HashTreeRoot()
require.NoError(t, err)
util.SaveBlock(t, ctx, service.cfg.BeaconDB, newJustifiedBlk)
util.SaveBlock(t, ctx, service.cfg.BeaconDB, lastJustifiedBlk)
diff := params.BeaconConfig().SlotsPerEpoch.Sub(1).Mul(params.BeaconConfig().SecondsPerSlot)
service.genesisTime = time.Unix(time.Now().Unix()-int64(diff), 0)
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: lastJustifiedRoot[:]}, [32]byte{'a'})
update, err := service.shouldUpdateCurrentJustified(ctx, &ethpb.Checkpoint{Root: newJustifiedRoot[:]})
require.NoError(t, err)
assert.Equal(t, false, update, "Should not be able to update justified, received true")
}
func TestShouldUpdateJustified_ReturnFalse_DoublyLinkedTree(t *testing.T) {
ctx := context.Background()
params.SetupTestConfigCleanup(t)
params.OverrideBeaconConfig(params.MinimalSpecConfig())
opts := testServiceOptsWithDB(t)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ForkChoiceStore = doublylinkedtree.New()
lastJustifiedBlk := util.NewBeaconBlock()
lastJustifiedBlk.Block.ParentRoot = bytesutil.PadTo([]byte{'G'}, 32)
lastJustifiedRoot, err := lastJustifiedBlk.Block.HashTreeRoot()
require.NoError(t, err)
newJustifiedBlk := util.NewBeaconBlock()
newJustifiedBlk.Block.ParentRoot = bytesutil.PadTo(lastJustifiedRoot[:], 32)
newJustifiedRoot, err := newJustifiedBlk.Block.HashTreeRoot()
require.NoError(t, err)
util.SaveBlock(t, ctx, service.cfg.BeaconDB, newJustifiedBlk)
util.SaveBlock(t, ctx, service.cfg.BeaconDB, lastJustifiedBlk)
diff := params.BeaconConfig().SlotsPerEpoch.Sub(1).Mul(params.BeaconConfig().SecondsPerSlot)
service.genesisTime = time.Unix(time.Now().Unix()-int64(diff), 0)
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: lastJustifiedRoot[:]}, [32]byte{'a'})
update, err := service.shouldUpdateCurrentJustified(ctx, &ethpb.Checkpoint{Root: newJustifiedRoot[:]})
require.NoError(t, err)
assert.Equal(t, false, update, "Should not be able to update justified, received true")
}
func TestCachedPreState_CanGetFromStateSummary_ProtoArray(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
s, err := v1.InitializeFromProto(&ethpb.BeaconState{Slot: 1, GenesisValidatorsRoot: params.BeaconConfig().ZeroHash[:]})
require.NoError(t, err)
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
util.SaveBlock(t, ctx, beaconDB, genesis)
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
service.cfg.ForkChoiceStore = protoarray.New()
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
service.saveInitSyncBlock(gRoot, wsb)
b := util.NewBeaconBlock()
b.Block.Slot = 1
b.Block.ParentRoot = gRoot[:]
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Slot: 1, Root: gRoot[:]}))
require.NoError(t, service.cfg.StateGen.SaveState(ctx, gRoot, s))
wb, err := wrapper.WrappedBeaconBlock(b.Block)
require.NoError(t, err)
require.NoError(t, service.verifyBlkPreState(ctx, wb))
}
func TestCachedPreState_CanGetFromStateSummary_DoublyLinkedTree(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
s, err := v1.InitializeFromProto(&ethpb.BeaconState{Slot: 1, GenesisValidatorsRoot: params.BeaconConfig().ZeroHash[:]})
require.NoError(t, err)
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
util.SaveBlock(t, ctx, beaconDB, genesis)
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
service.cfg.ForkChoiceStore = doublylinkedtree.New()
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
service.saveInitSyncBlock(gRoot, wsb)
b := util.NewBeaconBlock()
b.Block.Slot = 1
b.Block.ParentRoot = gRoot[:]
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Slot: 1, Root: gRoot[:]}))
require.NoError(t, service.cfg.StateGen.SaveState(ctx, gRoot, s))
wb, err := wrapper.WrappedBeaconBlock(b.Block)
require.NoError(t, err)
require.NoError(t, service.verifyBlkPreState(ctx, wb))
}
func TestCachedPreState_CanGetFromDB(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
util.SaveBlock(t, ctx, beaconDB, genesis)
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
service.cfg.ForkChoiceStore = protoarray.New()
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
service.saveInitSyncBlock(gRoot, wsb)
b := util.NewBeaconBlock()
b.Block.Slot = 1
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
wb, err := wrapper.WrappedBeaconBlock(b.Block)
require.NoError(t, err)
err = service.verifyBlkPreState(ctx, wb)
wanted := "could not reconstruct parent state"
assert.ErrorContains(t, wanted, err)
b.Block.ParentRoot = gRoot[:]
s, err := v1.InitializeFromProto(&ethpb.BeaconState{Slot: 1})
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Slot: 1, Root: gRoot[:]}))
require.NoError(t, service.cfg.StateGen.SaveState(ctx, gRoot, s))
wsb, err = wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, service.verifyBlkPreState(ctx, wsb.Block()))
}
func TestUpdateJustified_CouldUpdateBest(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
@@ -706,33 +402,46 @@ func TestUpdateJustified_CouldUpdateBest(t *testing.T) {
service, err := NewService(ctx, opts...)
require.NoError(t, err)
signedBlock := util.NewBeaconBlock()
util.SaveBlock(t, ctx, beaconDB, signedBlock)
r, err := signedBlock.Block.HashTreeRoot()
st, keys := util.DeterministicGenesisState(t, 64)
require.NoError(t, service.saveGenesisData(ctx, st))
b, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), types.Slot(1))
require.NoError(t, err)
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: []byte{'A'}}, [32]byte{'a'})
service.store.SetBestJustifiedCheckpt(&ethpb.Checkpoint{Root: []byte{'A'}})
st, err := util.NewBeaconState()
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveState(ctx, st.Copy(), r))
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
// Could update
s, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, s.SetCurrentJustifiedCheckpoint(&ethpb.Checkpoint{Epoch: 1, Root: r[:]}))
require.NoError(t, service.updateJustified(context.Background(), s))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Slot: 1, Root: root[:]}))
require.NoError(t, service.cfg.StateGen.SaveState(ctx, root, st))
require.NoError(t, service.verifyBlkPreState(ctx, wsb.Block()))
}
cp, err := service.store.BestJustifiedCheckpt()
require.NoError(t, err)
assert.Equal(t, s.CurrentJustifiedCheckpoint().Epoch, cp.Epoch, "Incorrect justified epoch in service")
func TestCachedPreState_CanGetFromStateSummary_DoublyLinkedTree(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
// Could not update
service.store.SetBestJustifiedCheckpt(&ethpb.Checkpoint{Root: []byte{'A'}, Epoch: 2})
require.NoError(t, service.updateJustified(context.Background(), s))
cp, err = service.store.BestJustifiedCheckpt()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(doublylinkedtree.New()),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
assert.Equal(t, types.Epoch(2), cp.Epoch, "Incorrect justified epoch in service")
st, keys := util.DeterministicGenesisState(t, 64)
require.NoError(t, service.saveGenesisData(ctx, st))
b, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), types.Slot(1))
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Slot: 1, Root: root[:]}))
require.NoError(t, service.cfg.StateGen.SaveState(ctx, root, st))
require.NoError(t, service.verifyBlkPreState(ctx, wsb.Block()))
}
func TestFillForkChoiceMissingBlocks_CanSave_ProtoArray(t *testing.T) {
@@ -747,18 +456,11 @@ func TestFillForkChoiceMissingBlocks_CanSave_ProtoArray(t *testing.T) {
require.NoError(t, err)
service.cfg.ForkChoiceStore = protoarray.New()
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
util.SaveBlock(t, ctx, beaconDB, genesis)
validGenesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
st, err := util.NewBeaconState()
require.NoError(t, err)
st, _ := util.DeterministicGenesisState(t, 64)
require.NoError(t, service.saveGenesisData(ctx, st))
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
roots, err := blockTree1(t, beaconDB, validGenesisRoot[:])
roots, err := blockTree1(t, beaconDB, service.originBlockRoot[:])
require.NoError(t, err)
beaconState, _ := util.DeterministicGenesisState(t, 32)
blk := util.NewBeaconBlock()
blk.Block.Slot = 9
@@ -766,13 +468,17 @@ func TestFillForkChoiceMissingBlocks_CanSave_ProtoArray(t *testing.T) {
wsb, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: roots[0]}, [32]byte{})
r0 := bytesutil.ToBytes32(roots[0])
require.NoError(t, service.ForkChoicer().UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: 0, Root: r0}))
err = service.fillInForkChoiceMissingBlocks(
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
require.NoError(t, err)
// 4 nodes from the block tree 1. B3 - B4 - B6 - B8
assert.Equal(t, 4, service.cfg.ForkChoiceStore.NodeCount(), "Miss match nodes")
// plus 1 node for genesis block root.
// block 0 is not inserted because its slot is 0 which is invalid
assert.Equal(t, 5, service.cfg.ForkChoiceStore.NodeCount(), "Miss match nodes")
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(service.originBlockRoot), "Didn't save node")
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(roots[3])), "Didn't save node")
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(roots[4])), "Didn't save node")
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(roots[6])), "Didn't save node")
@@ -791,33 +497,37 @@ func TestFillForkChoiceMissingBlocks_CanSave_DoublyLinkedTree(t *testing.T) {
require.NoError(t, err)
service.cfg.ForkChoiceStore = doublylinkedtree.New()
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
util.SaveBlock(t, ctx, beaconDB, genesis)
validGenesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
st, err := util.NewBeaconState()
require.NoError(t, err)
st, _ := util.DeterministicGenesisState(t, 64)
require.NoError(t, service.saveGenesisData(ctx, st))
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
roots, err := blockTree1(t, beaconDB, validGenesisRoot[:])
roots, err := blockTree1(t, beaconDB, service.originBlockRoot[:])
require.NoError(t, err)
beaconState, _ := util.DeterministicGenesisState(t, 32)
blk := util.NewBeaconBlock()
blk.Block.Slot = 9
blk.Block.ParentRoot = roots[8]
wsb, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: roots[0]}, [32]byte{})
// save invalid block at slot 0 because doubly linked tree enforces that
// the parent of the last block inserted is the tree node.
fcp := &ethpb.Checkpoint{Epoch: 0, Root: service.originBlockRoot[:]}
r0 := bytesutil.ToBytes32(roots[0])
state, blkRoot, err := prepareForkchoiceState(ctx, 0, r0, service.originBlockRoot, [32]byte{}, fcp, fcp)
require.NoError(t, err)
require.NoError(t, service.ForkChoicer().InsertNode(ctx, state, blkRoot))
fcp2 := &forkchoicetypes.Checkpoint{Epoch: 0, Root: r0}
require.NoError(t, service.ForkChoicer().UpdateFinalizedCheckpoint(fcp2))
err = service.fillInForkChoiceMissingBlocks(
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
require.NoError(t, err)
// 5 nodes from the block tree 1. B0 - B3 - B4 - B6 - B8
assert.Equal(t, 4, service.cfg.ForkChoiceStore.NodeCount(), "Miss match nodes")
// plus 1 node for genesis block root.
assert.Equal(t, 6, service.cfg.ForkChoiceStore.NodeCount(), "Miss match nodes")
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(service.originBlockRoot), "Didn't save node")
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(r0), "Didn't save node")
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(roots[3])), "Didn't save node")
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(roots[4])), "Didn't save node")
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(roots[6])), "Didn't save node")
@@ -836,16 +546,9 @@ func TestFillForkChoiceMissingBlocks_RootsMatch_ProtoArray(t *testing.T) {
require.NoError(t, err)
service.cfg.ForkChoiceStore = protoarray.New()
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
util.SaveBlock(t, ctx, beaconDB, genesis)
validGenesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
st, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
roots, err := blockTree1(t, beaconDB, validGenesisRoot[:])
st, _ := util.DeterministicGenesisState(t, 64)
require.NoError(t, service.saveGenesisData(ctx, st))
roots, err := blockTree1(t, beaconDB, service.originBlockRoot[:])
require.NoError(t, err)
beaconState, _ := util.DeterministicGenesisState(t, 32)
@@ -856,13 +559,16 @@ func TestFillForkChoiceMissingBlocks_RootsMatch_ProtoArray(t *testing.T) {
wsb, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: roots[0]}, [32]byte{})
r0 := bytesutil.ToBytes32(roots[0])
require.NoError(t, service.ForkChoicer().UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: 0, Root: r0}))
err = service.fillInForkChoiceMissingBlocks(
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
require.NoError(t, err)
// 4 nodes from the block tree 1. B3 - B4 - B6 - B8
assert.Equal(t, 4, service.cfg.ForkChoiceStore.NodeCount(), "Miss match nodes")
// plus the origin block root
assert.Equal(t, 5, service.cfg.ForkChoiceStore.NodeCount(), "Miss match nodes")
// Ensure all roots and their respective blocks exist.
wantedRoots := [][]byte{roots[3], roots[4], roots[6], roots[8]}
for i, rt := range wantedRoots {
@@ -883,16 +589,10 @@ func TestFillForkChoiceMissingBlocks_RootsMatch_DoublyLinkedTree(t *testing.T) {
require.NoError(t, err)
service.cfg.ForkChoiceStore = doublylinkedtree.New()
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
util.SaveBlock(t, ctx, beaconDB, genesis)
validGenesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
st, err := util.NewBeaconState()
require.NoError(t, err)
st, _ := util.DeterministicGenesisState(t, 64)
require.NoError(t, service.saveGenesisData(ctx, st))
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
roots, err := blockTree1(t, beaconDB, validGenesisRoot[:])
roots, err := blockTree1(t, beaconDB, service.originBlockRoot[:])
require.NoError(t, err)
beaconState, _ := util.DeterministicGenesisState(t, 32)
@@ -903,15 +603,25 @@ func TestFillForkChoiceMissingBlocks_RootsMatch_DoublyLinkedTree(t *testing.T) {
wsb, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: roots[0]}, [32]byte{})
// save invalid block at slot 0 because doubly linked tree enforces that
// the parent of the last block inserted is the tree node.
fcp := &ethpb.Checkpoint{Epoch: 0, Root: service.originBlockRoot[:]}
r0 := bytesutil.ToBytes32(roots[0])
state, blkRoot, err := prepareForkchoiceState(ctx, 0, r0, service.originBlockRoot, [32]byte{}, fcp, fcp)
require.NoError(t, err)
require.NoError(t, service.ForkChoicer().InsertNode(ctx, state, blkRoot))
fcp2 := &forkchoicetypes.Checkpoint{Epoch: 0, Root: r0}
require.NoError(t, service.ForkChoicer().UpdateFinalizedCheckpoint(fcp2))
err = service.fillInForkChoiceMissingBlocks(
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
require.NoError(t, err)
// 5 nodes from the block tree 1. B3 - B4 - B6 - B8
assert.Equal(t, 4, service.cfg.ForkChoiceStore.NodeCount(), "Miss match nodes")
// 5 nodes from the block tree 1. B0 - B3 - B4 - B6 - B8
// plus the origin block root
assert.Equal(t, 6, service.cfg.ForkChoiceStore.NodeCount(), "Miss match nodes")
// Ensure all roots and their respective blocks exist.
wantedRoots := [][]byte{roots[3], roots[4], roots[6], roots[8]}
wantedRoots := [][]byte{roots[0], roots[3], roots[4], roots[6], roots[8]}
for i, rt := range wantedRoots {
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(rt)), fmt.Sprintf("Didn't save node: %d", i))
assert.Equal(t, true, service.cfg.BeaconDB.HasBlock(context.Background(), bytesutil.ToBytes32(rt)))
@@ -965,7 +675,7 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized_ProtoArray(t *testing.T) {
beaconState, _ := util.DeterministicGenesisState(t, 32)
// Set finalized epoch to 2.
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Epoch: 2, Root: r64[:]}, [32]byte{})
require.NoError(t, service.ForkChoicer().UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: 2, Root: r64}))
err = service.fillInForkChoiceMissingBlocks(
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
require.NoError(t, err)
@@ -1022,8 +732,8 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized_DoublyLinkedTree(t *testing
beaconState, _ := util.DeterministicGenesisState(t, 32)
// Set finalized epoch to 1.
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Epoch: 2, Root: r64[:]}, [32]byte{})
// Set finalized epoch to 2.
require.NoError(t, service.ForkChoicer().UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: 2, Root: r64}))
err = service.fillInForkChoiceMissingBlocks(
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
require.NoError(t, err)
@@ -1065,7 +775,6 @@ func TestFillForkChoiceMissingBlocks_FinalizedSibling_DoublyLinkedTree(t *testin
wsb, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: roots[1]}, [32]byte{})
err = service.fillInForkChoiceMissingBlocks(
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
require.ErrorIs(t, errNotDescendantOfFinalized, err)
@@ -1399,7 +1108,7 @@ func TestVerifyBlkDescendant(t *testing.T) {
for _, tt := range tests {
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: tt.args.finalizedRoot[:], Epoch: 1}, [32]byte{})
require.NoError(t, service.ForkChoicer().UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Root: tt.args.finalizedRoot, Epoch: 1}))
err = service.VerifyFinalizedBlkDescendant(ctx, tt.args.parentRoot)
if tt.wantedErr != "" {
assert.ErrorContains(t, tt.wantedErr, err)
@@ -1412,38 +1121,6 @@ func TestVerifyBlkDescendant(t *testing.T) {
}
}
func TestUpdateJustifiedInitSync(t *testing.T) {
ctx := context.Background()
opts := testServiceOptsWithDB(t)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
gBlk := util.NewBeaconBlock()
gRoot, err := gBlk.Block.HashTreeRoot()
require.NoError(t, err)
util.SaveBlock(t, ctx, service.cfg.BeaconDB, gBlk)
require.NoError(t, service.cfg.BeaconDB.SaveGenesisBlockRoot(ctx, gRoot))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Root: gRoot[:]}))
beaconState, _ := util.DeterministicGenesisState(t, 32)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, beaconState, gRoot))
service.originBlockRoot = gRoot
currentCp := &ethpb.Checkpoint{Epoch: 1}
service.store.SetJustifiedCheckptAndPayloadHash(currentCp, [32]byte{'a'})
newCp := &ethpb.Checkpoint{Epoch: 2, Root: gRoot[:]}
require.NoError(t, service.updateJustifiedInitSync(ctx, newCp))
cp, err := service.PreviousJustifiedCheckpt()
require.NoError(t, err)
assert.DeepSSZEqual(t, currentCp, cp, "Incorrect previous justified checkpoint")
cp, err = service.CurrentJustifiedCheckpt()
require.NoError(t, err)
assert.DeepSSZEqual(t, newCp, cp, "Incorrect current justified checkpoint in cache")
cp, err = service.cfg.BeaconDB.JustifiedCheckpoint(ctx)
require.NoError(t, err)
assert.DeepSSZEqual(t, newCp, cp, "Incorrect current justified checkpoint in db")
}
func TestHandleEpochBoundary_BadMetrics(t *testing.T) {
ctx := context.Background()
opts := testServiceOptsNoDB()
@@ -1490,11 +1167,6 @@ func TestOnBlock_CanFinalize(t *testing.T) {
gs, keys := util.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
gBlk, err := service.cfg.BeaconDB.GenesisBlock(ctx)
require.NoError(t, err)
gRoot, err := gBlk.Block().HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
testState := gs.Copy()
for i := types.Slot(1); i <= 4*params.BeaconConfig().SlotsPerEpoch; i++ {
@@ -1508,23 +1180,19 @@ func TestOnBlock_CanFinalize(t *testing.T) {
testState, err = service.cfg.StateGen.StateByRoot(ctx, r)
require.NoError(t, err)
}
cp, err := service.CurrentJustifiedCheckpt()
require.NoError(t, err)
cp := service.CurrentJustifiedCheckpt()
require.Equal(t, types.Epoch(3), cp.Epoch)
cp, err = service.FinalizedCheckpt()
require.NoError(t, err)
cp = service.FinalizedCheckpt()
require.Equal(t, types.Epoch(2), cp.Epoch)
// The update should persist in DB.
j, err := service.cfg.BeaconDB.JustifiedCheckpoint(ctx)
require.NoError(t, err)
cp, err = service.CurrentJustifiedCheckpt()
require.NoError(t, err)
cp = service.CurrentJustifiedCheckpt()
require.Equal(t, j.Epoch, cp.Epoch)
f, err := service.cfg.BeaconDB.FinalizedCheckpoint(ctx)
require.NoError(t, err)
cp, err = service.FinalizedCheckpt()
require.NoError(t, err)
cp = service.FinalizedCheckpt()
require.Equal(t, f.Epoch, cp.Epoch)
}
@@ -1565,11 +1233,6 @@ func TestOnBlock_InvalidSignature(t *testing.T) {
gs, keys := util.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
gBlk, err := service.cfg.BeaconDB.GenesisBlock(ctx)
require.NoError(t, err)
gRoot, err := gBlk.Block().HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
blk, err := util.GenerateFullBlock(gs, keys, util.DefaultBlockGenConfig(), 1)
require.NoError(t, err)
@@ -1607,12 +1270,6 @@ func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) {
gs, keys := util.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
gBlk, err := service.cfg.BeaconDB.GenesisBlock(ctx)
require.NoError(t, err)
gRoot, err := gBlk.Block().HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
testState := gs.Copy()
for i := types.Slot(1); i < params.BeaconConfig().SlotsPerEpoch; i++ {
blk, err := util.GenerateFullBlock(testState, keys, util.DefaultBlockGenConfig(), i)
@@ -1638,11 +1295,6 @@ func TestInsertFinalizedDeposits(t *testing.T) {
gs, _ := util.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
gBlk, err := service.cfg.BeaconDB.GenesisBlock(ctx)
require.NoError(t, err)
gRoot, err := gBlk.Block().HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
gs = gs.Copy()
assert.NoError(t, gs.SetEth1Data(&ethpb.Eth1Data{DepositCount: 10}))
assert.NoError(t, gs.SetEth1DepositIndex(8))
@@ -1677,11 +1329,6 @@ func TestInsertFinalizedDeposits_MultipleFinalizedRoutines(t *testing.T) {
gs, _ := util.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
gBlk, err := service.cfg.BeaconDB.GenesisBlock(ctx)
require.NoError(t, err)
gRoot, err := gBlk.Block().HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
gs = gs.Copy()
assert.NoError(t, gs.SetEth1Data(&ethpb.Eth1Data{DepositCount: 7}))
assert.NoError(t, gs.SetEth1DepositIndex(6))
@@ -2016,11 +1663,6 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
gs, keys := util.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
gBlk, err := service.cfg.BeaconDB.GenesisBlock(ctx)
require.NoError(t, err)
gRoot, err := gBlk.Block().HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{'a'})
blk1, err := util.GenerateFullBlock(gs, keys, util.DefaultBlockGenConfig(), 1)
require.NoError(t, err)
@@ -2089,7 +1731,7 @@ func Test_verifyBlkFinalizedSlot_invalidBlock(t *testing.T) {
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Epoch: 1}, [32]byte{'a'})
require.NoError(t, service.ForkChoicer().UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: 1}))
blk := util.HydrateBeaconBlock(&ethpb.BeaconBlock{Slot: 1})
wb, err := wrapper.WrappedBeaconBlock(blk)
require.NoError(t, err)

View File

@@ -71,10 +71,7 @@ func (s *Service) VerifyFinalizedConsistency(ctx context.Context, root []byte) e
return nil
}
f, err := s.FinalizedCheckpt()
if err != nil {
return err
}
f := s.FinalizedCheckpt()
ss, err := slots.EpochStart(f.Epoch)
if err != nil {
return err
@@ -123,7 +120,7 @@ func (s *Service) spawnProcessAttestationsRoutine(stateFeed *event.Feed) {
case <-s.ctx.Done():
return
case <-st.C():
if err := s.NewSlot(s.ctx, s.CurrentSlot()); err != nil {
if err := s.ForkChoicer().NewSlot(s.ctx, s.CurrentSlot()); err != nil {
log.WithError(err).Error("Could not process new slot")
return
}
@@ -152,11 +149,8 @@ func (s *Service) UpdateHead(ctx context.Context) error {
s.processAttestations(ctx)
justified, err := s.store.JustifiedCheckpt()
if err != nil {
return err
}
balances, err := s.justifiedBalances.get(ctx, bytesutil.ToBytes32(justified.Root))
justified := s.ForkChoicer().JustifiedCheckpoint()
balances, err := s.justifiedBalances.get(ctx, justified.Root)
if err != nil {
return err
}

View File

@@ -139,7 +139,7 @@ func TestNotifyEngineIfChangedHead(t *testing.T) {
require.LogsDoNotContain(t, hook, hookErr)
gb, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
service.saveInitSyncBlock([32]byte{'a'}, gb)
require.NoError(t, service.saveInitSyncBlock(ctx, [32]byte{'a'}, gb))
service.notifyEngineIfChangedHead(ctx, [32]byte{'a'})
require.LogsContain(t, hook, invalidStateErr)
@@ -156,8 +156,7 @@ func TestNotifyEngineIfChangedHead(t *testing.T) {
require.NoError(t, err)
r1, err := b.Block.HashTreeRoot()
require.NoError(t, err)
service.saveInitSyncBlock(r1, wsb)
finalized := &ethpb.Checkpoint{Root: r1[:], Epoch: 0}
require.NoError(t, service.saveInitSyncBlock(ctx, r1, wsb))
st, _ := util.DeterministicGenesisState(t, 1)
service.head = &head{
slot: 1,
@@ -166,7 +165,6 @@ func TestNotifyEngineIfChangedHead(t *testing.T) {
state: st,
}
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(2, 1, [8]byte{1})
service.store.SetFinalizedCheckptAndPayloadHash(finalized, [32]byte{})
service.notifyEngineIfChangedHead(ctx, r1)
require.LogsDoNotContain(t, hook, invalidStateErr)
require.LogsDoNotContain(t, hook, hookErr)
@@ -177,7 +175,6 @@ func TestNotifyEngineIfChangedHead(t *testing.T) {
util.SaveBlock(t, ctx, service.cfg.BeaconDB, b)
r1, err = b.Block.HashTreeRoot()
require.NoError(t, err)
finalized = &ethpb.Checkpoint{Root: r1[:], Epoch: 0}
st, _ = util.DeterministicGenesisState(t, 1)
service.head = &head{
slot: 1,
@@ -186,7 +183,6 @@ func TestNotifyEngineIfChangedHead(t *testing.T) {
state: st,
}
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(2, 1, [8]byte{1})
service.store.SetFinalizedCheckptAndPayloadHash(finalized, [32]byte{})
service.notifyEngineIfChangedHead(ctx, r1)
require.LogsDoNotContain(t, hook, invalidStateErr)
require.LogsDoNotContain(t, hook, hookErr)

View File

@@ -59,17 +59,11 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.SignedBeaco
}
// Reports on block and fork choice metrics.
justified, err := s.store.JustifiedCheckpt()
if err != nil {
return err
}
finalized, err := s.store.FinalizedCheckpt()
if err != nil {
return errNilFinalizedInStore
}
finalized := s.FinalizedCheckpt()
reportSlotMetrics(blockCopy.Block().Slot(), s.HeadSlot(), s.CurrentSlot(), finalized)
// Log block sync status.
justified := s.CurrentJustifiedCheckpt()
if err := logBlockSyncStatus(blockCopy.Block(), blockRoot, justified, finalized, receivedTime, uint64(s.genesisTime.Unix())); err != nil {
log.WithError(err).Error("Unable to log block sync status")
}
@@ -113,20 +107,14 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []interfaces.Sig
})
// Reports on blockCopy and fork choice metrics.
finalized, err := s.store.FinalizedCheckpt()
if err != nil {
return errors.Wrap(err, "could not get finalized checkpoint")
}
finalized := s.FinalizedCheckpt()
reportSlotMetrics(blockCopy.Block().Slot(), s.HeadSlot(), s.CurrentSlot(), finalized)
}
if err := s.cfg.BeaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
return err
}
finalized, err := s.store.FinalizedCheckpt()
if err != nil {
return errors.Wrap(err, "could not get finalized checkpoint")
}
finalized := s.FinalizedCheckpt()
if finalized == nil {
return errNilFinalizedInStore
}
@@ -179,10 +167,7 @@ func (s *Service) checkSaveHotStateDB(ctx context.Context) error {
currentEpoch := slots.ToEpoch(s.CurrentSlot())
// Prevent `sinceFinality` going underflow.
var sinceFinality types.Epoch
finalized, err := s.store.FinalizedCheckpt()
if err != nil {
return err
}
finalized := s.FinalizedCheckpt()
if finalized == nil {
return errNilFinalizedInStore
}

View File

@@ -137,12 +137,6 @@ func TestService_ReceiveBlock(t *testing.T) {
s, err := NewService(ctx, opts...)
require.NoError(t, err)
require.NoError(t, s.saveGenesisData(ctx, genesis))
gBlk, err := s.cfg.BeaconDB.GenesisBlock(ctx)
require.NoError(t, err)
gRoot, err := gBlk.Block().HashTreeRoot()
require.NoError(t, err)
h := [32]byte{'a'}
s.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, h)
root, err := tt.args.block.Block.HashTreeRoot()
require.NoError(t, err)
wsb, err := wrapper.WrappedSignedBeaconBlock(tt.args.block)
@@ -178,11 +172,6 @@ func TestService_ReceiveBlockUpdateHead(t *testing.T) {
s, err := NewService(ctx, opts...)
require.NoError(t, err)
require.NoError(t, s.saveGenesisData(ctx, genesis))
gBlk, err := s.cfg.BeaconDB.GenesisBlock(ctx)
require.NoError(t, err)
gRoot, err := gBlk.Block().HashTreeRoot()
require.NoError(t, err)
s.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{'a'})
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
wg := sync.WaitGroup{}
@@ -256,12 +245,6 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
require.NoError(t, err)
err = s.saveGenesisData(ctx, genesis)
require.NoError(t, err)
gBlk, err := s.cfg.BeaconDB.GenesisBlock(ctx)
require.NoError(t, err)
gRoot, err := gBlk.Block().HashTreeRoot()
require.NoError(t, err)
s.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{'a'})
root, err := tt.args.block.Block.HashTreeRoot()
require.NoError(t, err)
wsb, err := wrapper.WrappedSignedBeaconBlock(tt.args.block)
@@ -290,7 +273,7 @@ func TestService_HasBlock(t *testing.T) {
}
wsb, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
s.saveInitSyncBlock(r, wsb)
require.NoError(t, s.saveInitSyncBlock(context.Background(), r, wsb))
if !s.HasBlock(context.Background(), r) {
t.Error("Should have block")
}
@@ -309,7 +292,6 @@ func TestCheckSaveHotStateDB_Enabling(t *testing.T) {
require.NoError(t, err)
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
s.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{}, [32]byte{})
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
assert.LogsContain(t, hook, "Entering mode to save hot states in DB")
@@ -322,7 +304,6 @@ func TestCheckSaveHotStateDB_Disabling(t *testing.T) {
require.NoError(t, err)
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
s.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{}, [32]byte{})
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
s.genesisTime = time.Now()
@@ -335,7 +316,6 @@ func TestCheckSaveHotStateDB_Overflow(t *testing.T) {
opts := testServiceOptsWithDB(t)
s, err := NewService(context.Background(), opts...)
require.NoError(t, err)
s.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{}, [32]byte{})
s.genesisTime = time.Now()
require.NoError(t, s.checkSaveHotStateDB(context.Background()))

View File

@@ -12,7 +12,6 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/async/event"
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain/store"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
@@ -65,7 +64,6 @@ type Service struct {
initSyncBlocksLock sync.RWMutex
justifiedBalances *stateBalanceCache
wsVerifier *WeakSubjectivityVerifier
store *store.Store
processAttestationsLock sync.Mutex
}
@@ -103,7 +101,6 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
checkpointStateCache: cache.NewCheckpointStateCache(),
initSyncBlocks: make(map[[32]byte]interfaces.SignedBeaconBlock),
cfg: &config{},
store: &store.Store{},
}
for _, opt := range opts {
if err := opt(srv); err != nil {
@@ -204,7 +201,6 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
if finalized == nil {
return errNilFinalizedCheckpoint
}
s.store = store.New(justified, finalized)
var forkChoicer f.ForkChoicer
fRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(finalized.Root))
@@ -472,10 +468,6 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon
s.originBlockRoot = genesisBlkRoot
s.cfg.StateGen.SaveFinalizedState(0 /*slot*/, genesisBlkRoot, genesisState)
// Finalized checkpoint at genesis is a zero hash.
genesisCheckpoint := genesisState.FinalizedCheckpoint()
s.store = store.New(genesisCheckpoint, genesisCheckpoint)
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, genesisState, genesisBlkRoot); err != nil {
log.Fatalf("Could not process genesis block for fork choice: %v", err)
}

View File

@@ -9,7 +9,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/prysmaticlabs/prysm/async/event"
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain/store"
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
b "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
@@ -271,10 +270,9 @@ func TestChainService_CorrectGenesisRoots(t *testing.T) {
// Test the start function.
chainService.Start()
cp, err := chainService.store.FinalizedCheckpt()
require.NoError(t, err)
cp := chainService.FinalizedCheckpt()
require.DeepEqual(t, blkRoot[:], cp.Root, "Finalize Checkpoint root is incorrect")
cp, err = chainService.store.JustifiedCheckpt()
cp = chainService.CurrentJustifiedCheckpt()
require.NoError(t, err)
require.DeepEqual(t, params.BeaconConfig().ZeroHash[:], cp.Root, "Justified Checkpoint root is incorrect")
@@ -459,7 +457,7 @@ func TestChainService_SaveHeadNoDB(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
s := &Service{
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB), ForkChoiceStore: protoarray.New()},
}
blk := util.NewBeaconBlock()
blk.Block.Slot = 1
@@ -483,10 +481,8 @@ func TestHasBlock_ForkChoiceAndDB_ProtoArray(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
s := &Service{
cfg: &config{ForkChoiceStore: protoarray.New(), BeaconDB: beaconDB},
store: &store.Store{},
cfg: &config{ForkChoiceStore: protoarray.New(), BeaconDB: beaconDB},
}
s.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]}, [32]byte{})
b := util.NewBeaconBlock()
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
@@ -504,10 +500,8 @@ func TestHasBlock_ForkChoiceAndDB_DoublyLinkedTree(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
s := &Service{
cfg: &config{ForkChoiceStore: doublylinkedtree.New(), BeaconDB: beaconDB},
store: &store.Store{},
cfg: &config{ForkChoiceStore: doublylinkedtree.New(), BeaconDB: beaconDB},
}
s.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]}, [32]byte{})
b := util.NewBeaconBlock()
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
@@ -535,7 +529,7 @@ func TestServiceStop_SaveCachedBlocks(t *testing.T) {
require.NoError(t, err)
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
s.saveInitSyncBlock(r, wsb)
require.NoError(t, s.saveInitSyncBlock(ctx, r, wsb))
require.NoError(t, s.Stop())
require.Equal(t, true, s.cfg.BeaconDB.HasBlock(ctx, r))
}
@@ -577,10 +571,8 @@ func BenchmarkHasBlockForkChoiceStore_ProtoArray(b *testing.B) {
ctx := context.Background()
beaconDB := testDB.SetupDB(b)
s := &Service{
cfg: &config{ForkChoiceStore: protoarray.New(), BeaconDB: beaconDB},
store: &store.Store{},
cfg: &config{ForkChoiceStore: protoarray.New(), BeaconDB: beaconDB},
}
s.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]}, [32]byte{})
blk := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{}}}
r, err := blk.Block.HashTreeRoot()
require.NoError(b, err)
@@ -600,10 +592,8 @@ func BenchmarkHasBlockForkChoiceStore_DoublyLinkedTree(b *testing.B) {
ctx := context.Background()
beaconDB := testDB.SetupDB(b)
s := &Service{
cfg: &config{ForkChoiceStore: doublylinkedtree.New(), BeaconDB: beaconDB},
store: &store.Store{},
cfg: &config{ForkChoiceStore: doublylinkedtree.New(), BeaconDB: beaconDB},
}
s.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]}, [32]byte{})
blk := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{}}}
r, err := blk.Block.HashTreeRoot()
require.NoError(b, err)

View File

@@ -1,30 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"new.go",
"setter_getter.go",
"type.go",
],
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/store",
visibility = ["//beacon-chain:__subpackages__"],
deps = [
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"new_test.go",
"setter_getter_test.go",
],
embed = [":go_default_library"],
deps = [
"//proto/prysm/v1alpha1:go_default_library",
"//testing/require:go_default_library",
],
)

View File

@@ -1,4 +0,0 @@
// Package store implements the store object defined in the phase0 fork choice spec.
// It serves as a helpful middleware layer in between blockchain pkg and fork choice protoarray pkg.
// All the getters and setters are concurrent thread safe
package store

View File

@@ -1,16 +0,0 @@
package store
import (
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
)
// New creates a store object.
func New(justifiedCheckpt *ethpb.Checkpoint, finalizedCheckpt *ethpb.Checkpoint) *Store {
return &Store{
justifiedCheckpt: justifiedCheckpt,
prevJustifiedCheckpt: justifiedCheckpt,
bestJustifiedCheckpt: justifiedCheckpt,
finalizedCheckpt: finalizedCheckpt,
prevFinalizedCheckpt: finalizedCheckpt,
}
}

View File

@@ -1,35 +0,0 @@
package store
import (
"testing"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/require"
)
func TestNew(t *testing.T) {
j := &ethpb.Checkpoint{
Epoch: 0,
Root: []byte("hi"),
}
f := &ethpb.Checkpoint{
Epoch: 0,
Root: []byte("hello"),
}
s := New(j, f)
cp, err := s.JustifiedCheckpt()
require.NoError(t, err)
require.DeepSSZEqual(t, j, cp)
cp, err = s.BestJustifiedCheckpt()
require.NoError(t, err)
require.DeepSSZEqual(t, j, cp)
cp, err = s.PrevJustifiedCheckpt()
require.NoError(t, err)
require.DeepSSZEqual(t, j, cp)
cp, err = s.FinalizedCheckpt()
require.NoError(t, err)
require.DeepSSZEqual(t, f, cp)
cp, err = s.PrevFinalizedCheckpt()
require.NoError(t, err)
require.DeepSSZEqual(t, f, cp)
}

View File

@@ -1,111 +0,0 @@
package store
import (
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
)
var (
ErrNilCheckpoint = errors.New("nil checkpoint")
)
// PrevJustifiedCheckpt returns the previous justified checkpoint in the Store.
func (s *Store) PrevJustifiedCheckpt() (*ethpb.Checkpoint, error) {
s.RLock()
defer s.RUnlock()
if s.prevJustifiedCheckpt == nil {
return nil, ErrNilCheckpoint
}
return s.prevJustifiedCheckpt, nil
}
// BestJustifiedCheckpt returns the best justified checkpoint in the Store.
func (s *Store) BestJustifiedCheckpt() (*ethpb.Checkpoint, error) {
s.RLock()
defer s.RUnlock()
if s.bestJustifiedCheckpt == nil {
return nil, ErrNilCheckpoint
}
return s.bestJustifiedCheckpt, nil
}
// JustifiedCheckpt returns the justified checkpoint in the Store.
func (s *Store) JustifiedCheckpt() (*ethpb.Checkpoint, error) {
s.RLock()
defer s.RUnlock()
if s.justifiedCheckpt == nil {
return nil, ErrNilCheckpoint
}
return s.justifiedCheckpt, nil
}
// JustifiedPayloadBlockHash returns the justified payload block hash reflecting justified check point.
func (s *Store) JustifiedPayloadBlockHash() [32]byte {
s.RLock()
defer s.RUnlock()
return s.justifiedPayloadBlockHash
}
// PrevFinalizedCheckpt returns the previous finalized checkpoint in the Store.
func (s *Store) PrevFinalizedCheckpt() (*ethpb.Checkpoint, error) {
s.RLock()
defer s.RUnlock()
if s.prevFinalizedCheckpt == nil {
return nil, ErrNilCheckpoint
}
return s.prevFinalizedCheckpt, nil
}
// FinalizedCheckpt returns the finalized checkpoint in the Store.
func (s *Store) FinalizedCheckpt() (*ethpb.Checkpoint, error) {
s.RLock()
defer s.RUnlock()
if s.finalizedCheckpt == nil {
return nil, ErrNilCheckpoint
}
return s.finalizedCheckpt, nil
}
// FinalizedPayloadBlockHash returns the finalized payload block hash reflecting finalized check point.
func (s *Store) FinalizedPayloadBlockHash() [32]byte {
s.RLock()
defer s.RUnlock()
return s.finalizedPayloadBlockHash
}
// SetPrevJustifiedCheckpt sets the previous justified checkpoint in the Store.
func (s *Store) SetPrevJustifiedCheckpt(cp *ethpb.Checkpoint) {
s.Lock()
defer s.Unlock()
s.prevJustifiedCheckpt = cp
}
// SetBestJustifiedCheckpt sets the best justified checkpoint in the Store.
func (s *Store) SetBestJustifiedCheckpt(cp *ethpb.Checkpoint) {
s.Lock()
defer s.Unlock()
s.bestJustifiedCheckpt = cp
}
// SetJustifiedCheckptAndPayloadHash sets the justified checkpoint and blockhash in the Store.
func (s *Store) SetJustifiedCheckptAndPayloadHash(cp *ethpb.Checkpoint, h [32]byte) {
s.Lock()
defer s.Unlock()
s.justifiedCheckpt = cp
s.justifiedPayloadBlockHash = h
}
// SetFinalizedCheckptAndPayloadHash sets the finalized checkpoint and blockhash in the Store.
func (s *Store) SetFinalizedCheckptAndPayloadHash(cp *ethpb.Checkpoint, h [32]byte) {
s.Lock()
defer s.Unlock()
s.finalizedCheckpt = cp
s.finalizedPayloadBlockHash = h
}
// SetPrevFinalizedCheckpt sets the previous finalized checkpoint in the Store.
func (s *Store) SetPrevFinalizedCheckpt(cp *ethpb.Checkpoint) {
s.Lock()
defer s.Unlock()
s.prevFinalizedCheckpt = cp
}

View File

@@ -1,72 +0,0 @@
package store
import (
"testing"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/require"
)
func Test_store_PrevJustifiedCheckpt(t *testing.T) {
s := &Store{}
var cp *ethpb.Checkpoint
_, err := s.PrevJustifiedCheckpt()
require.ErrorIs(t, ErrNilCheckpoint, err)
cp = &ethpb.Checkpoint{Epoch: 1, Root: []byte{'a'}}
s.SetPrevJustifiedCheckpt(cp)
got, err := s.PrevJustifiedCheckpt()
require.NoError(t, err)
require.Equal(t, cp, got)
}
func Test_store_BestJustifiedCheckpt(t *testing.T) {
s := &Store{}
var cp *ethpb.Checkpoint
_, err := s.BestJustifiedCheckpt()
require.ErrorIs(t, ErrNilCheckpoint, err)
cp = &ethpb.Checkpoint{Epoch: 1, Root: []byte{'a'}}
s.SetBestJustifiedCheckpt(cp)
got, err := s.BestJustifiedCheckpt()
require.NoError(t, err)
require.Equal(t, cp, got)
}
func Test_store_JustifiedCheckpt(t *testing.T) {
s := &Store{}
var cp *ethpb.Checkpoint
_, err := s.JustifiedCheckpt()
require.ErrorIs(t, ErrNilCheckpoint, err)
cp = &ethpb.Checkpoint{Epoch: 1, Root: []byte{'a'}}
h := [32]byte{'b'}
s.SetJustifiedCheckptAndPayloadHash(cp, h)
got, err := s.JustifiedCheckpt()
require.NoError(t, err)
require.Equal(t, cp, got)
require.Equal(t, h, s.JustifiedPayloadBlockHash())
}
func Test_store_FinalizedCheckpt(t *testing.T) {
s := &Store{}
var cp *ethpb.Checkpoint
_, err := s.FinalizedCheckpt()
require.ErrorIs(t, ErrNilCheckpoint, err)
cp = &ethpb.Checkpoint{Epoch: 1, Root: []byte{'a'}}
h := [32]byte{'b'}
s.SetFinalizedCheckptAndPayloadHash(cp, h)
got, err := s.FinalizedCheckpt()
require.NoError(t, err)
require.Equal(t, cp, got)
require.Equal(t, h, s.FinalizedPayloadBlockHash())
}
func Test_store_PrevFinalizedCheckpt(t *testing.T) {
s := &Store{}
var cp *ethpb.Checkpoint
_, err := s.PrevFinalizedCheckpt()
require.ErrorIs(t, ErrNilCheckpoint, err)
cp = &ethpb.Checkpoint{Epoch: 1, Root: []byte{'a'}}
s.SetPrevFinalizedCheckpt(cp)
got, err := s.PrevFinalizedCheckpt()
require.NoError(t, err)
require.Equal(t, cp, got)
}

View File

@@ -1,30 +0,0 @@
package store
import (
"sync"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
)
// Store is defined in the fork choice consensus spec for tracking current time and various versions of checkpoints.
//
// Spec code:
// class Store(object):
// time: uint64
// genesis_time: uint64
// justified_checkpoint: Checkpoint
// finalized_checkpoint: Checkpoint
// best_justified_checkpoint: Checkpoint
// proposerBoostRoot: Root
type Store struct {
justifiedCheckpt *ethpb.Checkpoint
justifiedPayloadBlockHash [32]byte
finalizedCheckpt *ethpb.Checkpoint
finalizedPayloadBlockHash [32]byte
bestJustifiedCheckpt *ethpb.Checkpoint
sync.RWMutex
// These are not part of the consensus spec, but we do use them to return gRPC API requests.
// TODO(10094): Consider removing in v3.
prevFinalizedCheckpt *ethpb.Checkpoint
prevJustifiedCheckpt *ethpb.Checkpoint
}

View File

@@ -283,18 +283,18 @@ func (s *ChainService) CurrentFork() *ethpb.Fork {
}
// FinalizedCheckpt mocks FinalizedCheckpt method in chain service.
func (s *ChainService) FinalizedCheckpt() (*ethpb.Checkpoint, error) {
return s.FinalizedCheckPoint, nil
func (s *ChainService) FinalizedCheckpt() *ethpb.Checkpoint {
return s.FinalizedCheckPoint
}
// CurrentJustifiedCheckpt mocks CurrentJustifiedCheckpt method in chain service.
func (s *ChainService) CurrentJustifiedCheckpt() (*ethpb.Checkpoint, error) {
return s.CurrentJustifiedCheckPoint, nil
func (s *ChainService) CurrentJustifiedCheckpt() *ethpb.Checkpoint {
return s.CurrentJustifiedCheckPoint
}
// PreviousJustifiedCheckpt mocks PreviousJustifiedCheckpt method in chain service.
func (s *ChainService) PreviousJustifiedCheckpt() (*ethpb.Checkpoint, error) {
return s.PreviousJustifiedCheckPoint, nil
func (s *ChainService) PreviousJustifiedCheckpt() *ethpb.Checkpoint {
return s.PreviousJustifiedCheckPoint
}
// ReceiveAttestation mocks ReceiveAttestation method in chain service.

View File

@@ -5,8 +5,9 @@ import (
"testing"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain/store"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
@@ -71,14 +72,13 @@ func TestService_VerifyWeakSubjectivityRoot(t *testing.T) {
wv, err := NewWeakSubjectivityVerifier(tt.checkpt, beaconDB)
require.Equal(t, !tt.disabled, wv.enabled)
require.NoError(t, err)
fcs := protoarray.New()
s := &Service{
cfg: &config{BeaconDB: beaconDB, WeakSubjectivityCheckpt: tt.checkpt},
store: &store.Store{},
cfg: &config{BeaconDB: beaconDB, WeakSubjectivityCheckpt: tt.checkpt, ForkChoiceStore: fcs},
wsVerifier: wv,
}
s.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Epoch: tt.finalizedEpoch}, [32]byte{})
cp, err := s.store.FinalizedCheckpt()
require.NoError(t, err)
require.NoError(t, fcs.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: tt.finalizedEpoch}))
cp := s.ForkChoicer().FinalizedCheckpoint()
err = s.wsVerifier.VerifyWeakSubjectivity(context.Background(), cp.Epoch)
if tt.wantErr == nil {
require.NoError(t, err)

View File

@@ -8,6 +8,7 @@ go_library(
"forkchoice.go",
"metrics.go",
"node.go",
"on_tick.go",
"optimistic_sync.go",
"proposer_boost.go",
"store.go",
@@ -46,6 +47,7 @@ go_test(
"forkchoice_test.go",
"no_vote_test.go",
"node_test.go",
"on_tick_test.go",
"optimistic_sync_test.go",
"proposer_boost_test.go",
"store_test.go",

View File

@@ -25,6 +25,7 @@ func New() *ForkChoice {
s := &Store{
justifiedCheckpoint: &forkchoicetypes.Checkpoint{},
bestJustifiedCheckpoint: &forkchoicetypes.Checkpoint{},
prevJustifiedCheckpoint: &forkchoicetypes.Checkpoint{},
finalizedCheckpoint: &forkchoicetypes.Checkpoint{},
proposerBoostRoot: [32]byte{},
nodeByRoot: make(map[[fieldparams.RootLength]byte]*Node),
@@ -158,6 +159,7 @@ func (f *ForkChoice) updateCheckpoints(ctx context.Context, jc, fc *ethpb.Checkp
}
currentSlot := slots.CurrentSlot(f.store.genesisTime)
if slots.SinceEpochStarts(currentSlot) < params.BeaconConfig().SafeSlotsToUpdateJustified {
f.store.prevJustifiedCheckpoint = f.store.justifiedCheckpoint
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: jc.Epoch,
Root: bytesutil.ToBytes32(jc.Root)}
} else {
@@ -178,6 +180,7 @@ func (f *ForkChoice) updateCheckpoints(ctx context.Context, jc, fc *ethpb.Checkp
return err
}
if root == currentRoot {
f.store.prevJustifiedCheckpoint = f.store.justifiedCheckpoint
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: jc.Epoch,
Root: jcRoot}
}
@@ -375,6 +378,20 @@ func (f *ForkChoice) SetOptimisticToValid(ctx context.Context, root [fieldparams
return node.setNodeAndParentValidated(ctx)
}
// BestJustifiedCheckpoint of fork choice store.
func (f *ForkChoice) BestJustifiedCheckpoint() *forkchoicetypes.Checkpoint {
f.store.checkpointsLock.RLock()
defer f.store.checkpointsLock.RUnlock()
return f.store.bestJustifiedCheckpoint
}
// PreviousJustifiedCheckpoint of fork choice store.
func (f *ForkChoice) PreviousJustifiedCheckpoint() *forkchoicetypes.Checkpoint {
f.store.checkpointsLock.RLock()
defer f.store.checkpointsLock.RUnlock()
return f.store.prevJustifiedCheckpoint
}
// JustifiedCheckpoint of fork choice store.
func (f *ForkChoice) JustifiedCheckpoint() *forkchoicetypes.Checkpoint {
f.store.checkpointsLock.RLock()
@@ -444,9 +461,10 @@ func (f *ForkChoice) UpdateJustifiedCheckpoint(jc *forkchoicetypes.Checkpoint) e
}
f.store.checkpointsLock.Lock()
defer f.store.checkpointsLock.Unlock()
f.store.prevJustifiedCheckpoint = f.store.justifiedCheckpoint
f.store.justifiedCheckpoint = jc
bj := f.store.bestJustifiedCheckpoint
if bj == nil || jc.Epoch > bj.Epoch {
if bj == nil || bj.Root == params.BeaconConfig().ZeroHash || jc.Epoch > bj.Epoch {
f.store.bestJustifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: jc.Epoch, Root: jc.Root}
}
return nil
@@ -533,6 +551,9 @@ func (f *ForkChoice) InsertOptimisticChain(ctx context.Context, chain []*forkcho
chain[i].JustifiedCheckpoint.Epoch, chain[i].FinalizedCheckpoint.Epoch); err != nil {
return err
}
if err := f.updateCheckpoints(ctx, chain[i].JustifiedCheckpoint, chain[i].FinalizedCheckpoint); err != nil {
return err
}
}
return nil
}
@@ -546,3 +567,40 @@ func (f *ForkChoice) SetGenesisTime(genesisTime uint64) {
func (f *ForkChoice) SetOriginRoot(root [32]byte) {
f.store.originRoot = root
}
// CachedHeadRoot returns the last cached head root
func (f *ForkChoice) CachedHeadRoot() [32]byte {
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
node := f.store.headNode
if node == nil {
return [32]byte{}
}
return f.store.headNode.root
}
// FinalizedPayloadBlockHash returns the hash of the payload at the finalized checkpoint
func (f *ForkChoice) FinalizedPayloadBlockHash() [32]byte {
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
root := f.FinalizedCheckpoint().Root
node, ok := f.store.nodeByRoot[root]
if !ok || node == nil {
// This should not happen
return [32]byte{}
}
return node.payloadHash
}
// JustifiedPayloadBlockHash returns the hash of the payload at the justified checkpoint
func (f *ForkChoice) JustifiedPayloadBlockHash() [32]byte {
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
root := f.JustifiedCheckpoint().Root
node, ok := f.store.nodeByRoot[root]
if !ok || node == nil {
// This should not happen
return [32]byte{}
}
return node.payloadHash
}

View File

@@ -1,13 +1,10 @@
package blockchain
package doublylinkedtree
import (
"bytes"
"context"
"github.com/pkg/errors"
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/time/slots"
)
@@ -31,9 +28,9 @@ import (
// ancestor_at_finalized_slot = get_ancestor(store, store.best_justified_checkpoint.root, finalized_slot)
// if ancestor_at_finalized_slot == store.finalized_checkpoint.root:
// store.justified_checkpoint = store.best_justified_checkpoint
func (s *Service) NewSlot(ctx context.Context, slot types.Slot) error {
// Reset proposer boost root in fork choice.
if err := s.cfg.ForkChoiceStore.ResetBoostedProposerRoot(ctx); err != nil {
func (f *ForkChoice) NewSlot(ctx context.Context, slot types.Slot) error {
// Reset proposer boost root
if err := f.ResetBoostedProposerRoot(ctx); err != nil {
return errors.Wrap(err, "could not reset boosted proposer root in fork choice")
}
@@ -43,39 +40,29 @@ func (s *Service) NewSlot(ctx context.Context, slot types.Slot) error {
}
// Update store.justified_checkpoint if a better checkpoint on the store.finalized_checkpoint chain
bj, err := s.store.BestJustifiedCheckpt()
if err != nil {
return errors.Wrap(err, "could not get best justified checkpoint")
}
j, err := s.store.JustifiedCheckpt()
if err != nil {
return errors.Wrap(err, "could not get justified checkpoint")
}
f, err := s.store.FinalizedCheckpt()
if err != nil {
return errors.Wrap(err, "could not get finalized checkpoint")
}
if bj.Epoch > j.Epoch {
finalizedSlot, err := slots.EpochStart(f.Epoch)
f.store.checkpointsLock.Lock()
defer f.store.checkpointsLock.Unlock()
bjcp := f.store.bestJustifiedCheckpoint
jcp := f.store.justifiedCheckpoint
fcp := f.store.finalizedCheckpoint
if bjcp.Epoch > jcp.Epoch {
finalizedSlot, err := slots.EpochStart(fcp.Epoch)
if err != nil {
return err
}
r, err := s.ancestor(ctx, bj.Root, finalizedSlot)
// We check that the best justified checkpoint is a descendant of the finalized checkpoint.
// This should always happen as forkchoice enforces that every node is a descendant of the
// finalized checkpoint. This check is here for additional security, consider removing the extra
// loop call here.
r, err := f.AncestorRoot(ctx, bjcp.Root, finalizedSlot)
if err != nil {
return err
}
if bytes.Equal(r, f.Root) {
h, err := s.getPayloadHash(ctx, bj.Root)
if err != nil {
return err
}
s.store.SetJustifiedCheckptAndPayloadHash(bj, h)
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(&forkchoicetypes.Checkpoint{
Epoch: bj.Epoch, Root: bytesutil.ToBytes32(bj.Root)}); err != nil {
return err
}
if r == fcp.Root {
f.store.justifiedCheckpoint = bjcp
}
}
return nil
}

View File

@@ -0,0 +1,105 @@
package doublylinkedtree
import (
"context"
"testing"
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/config/params"
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/testing/require"
)
func TestStore_NewSlot(t *testing.T) {
ctx := context.Background()
bj := [32]byte{'z'}
type args struct {
slot types.Slot
finalized *forkchoicetypes.Checkpoint
justified *forkchoicetypes.Checkpoint
bestJustified *forkchoicetypes.Checkpoint
shouldEqual bool
}
tests := []struct {
name string
args args
}{
{
name: "Not epoch boundary. No change",
args: args{
slot: params.BeaconConfig().SlotsPerEpoch + 1,
finalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'a'}},
justified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'b'}},
bestJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: bj},
shouldEqual: false,
},
},
{
name: "Justified higher than best justified. No change",
args: args{
slot: params.BeaconConfig().SlotsPerEpoch,
finalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'a'}},
justified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'b'}},
bestJustified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: bj},
shouldEqual: false,
},
},
{
name: "Best justified not on the same chain as finalized. No change",
args: args{
slot: params.BeaconConfig().SlotsPerEpoch,
finalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'a'}},
justified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'b'}},
bestJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'d'}},
shouldEqual: false,
},
},
{
name: "Best justified on the same chain as finalized. Yes change",
args: args{
slot: params.BeaconConfig().SlotsPerEpoch,
finalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'a'}},
justified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'b'}},
bestJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: bj},
shouldEqual: true,
},
},
}
for _, test := range tests {
f := setup(test.args.justified.Epoch, test.args.finalized.Epoch)
state, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, [32]byte{}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot)) // genesis
state, blkRoot, err = prepareForkchoiceState(ctx, 32, [32]byte{'a'}, [32]byte{}, [32]byte{}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot)) // finalized
state, blkRoot, err = prepareForkchoiceState(ctx, 64, [32]byte{'b'}, [32]byte{'a'}, [32]byte{}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot)) // justified
state, blkRoot, err = prepareForkchoiceState(ctx, 96, bj, [32]byte{'a'}, [32]byte{}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot)) // best justified
state, blkRoot, err = prepareForkchoiceState(ctx, 97, [32]byte{'d'}, [32]byte{}, [32]byte{}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot)) // bad
require.NoError(t, f.UpdateFinalizedCheckpoint(test.args.finalized))
require.NoError(t, f.UpdateJustifiedCheckpoint(test.args.justified))
f.store.bestJustifiedCheckpoint = test.args.bestJustified
require.NoError(t, f.NewSlot(ctx, test.args.slot))
if test.args.shouldEqual {
bcp := f.BestJustifiedCheckpoint()
cp := f.JustifiedCheckpoint()
require.Equal(t, bcp.Epoch, cp.Epoch)
require.Equal(t, bcp.Root, cp.Root)
} else {
bcp := f.BestJustifiedCheckpoint()
cp := f.JustifiedCheckpoint()
epochsEqual := bcp.Epoch == cp.Epoch
rootsEqual := bcp.Root == cp.Root
require.Equal(t, false, epochsEqual && rootsEqual)
}
}
}

View File

@@ -20,6 +20,7 @@ type ForkChoice struct {
type Store struct {
justifiedCheckpoint *forkchoicetypes.Checkpoint // latest justified epoch in store.
bestJustifiedCheckpoint *forkchoicetypes.Checkpoint // best justified checkpoint in store.
prevJustifiedCheckpoint *forkchoicetypes.Checkpoint // previous justified checkpoint in store.
finalizedCheckpoint *forkchoicetypes.Checkpoint // latest finalized epoch in store.
pruneThreshold uint64 // do not prune tree unless threshold is reached.
proposerBoostRoot [fieldparams.RootLength]byte // latest block root that was boosted after being received in a timely manner.

View File

@@ -23,6 +23,7 @@ type ForkChoicer interface {
// HeadRetriever retrieves head root and optimistic info of the current chain.
type HeadRetriever interface {
Head(context.Context, []uint64) ([32]byte, error)
CachedHeadRoot() [32]byte
Tips() ([][32]byte, []types.Slot)
IsOptimistic(root [32]byte) (bool, error)
}
@@ -53,7 +54,11 @@ type Getter interface {
CommonAncestorRoot(ctx context.Context, root1 [32]byte, root2 [32]byte) ([32]byte, error)
IsCanonical(root [32]byte) bool
FinalizedCheckpoint() *forkchoicetypes.Checkpoint
FinalizedPayloadBlockHash() [32]byte
JustifiedCheckpoint() *forkchoicetypes.Checkpoint
PreviousJustifiedCheckpoint() *forkchoicetypes.Checkpoint
JustifiedPayloadBlockHash() [32]byte
BestJustifiedCheckpoint() *forkchoicetypes.Checkpoint
ForkChoiceNodes() []*ethpb.ForkChoiceNode
NodeCount() int
}
@@ -66,4 +71,5 @@ type Setter interface {
UpdateFinalizedCheckpoint(*forkchoicetypes.Checkpoint) error
SetGenesisTime(uint64)
SetOriginRoot([32]byte)
NewSlot(context.Context, types.Slot) error
}

View File

@@ -8,6 +8,7 @@ go_library(
"helpers.go",
"metrics.go",
"node.go",
"on_tick.go",
"optimistic_sync.go",
"proposer_boost.go",
"store.go",
@@ -47,6 +48,7 @@ go_test(
"helpers_test.go",
"no_vote_test.go",
"node_test.go",
"on_tick_test.go",
"optimistic_sync_test.go",
"proposer_boost_test.go",
"store_test.go",

View File

@@ -0,0 +1,68 @@
package protoarray
import (
"context"
"github.com/pkg/errors"
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/time/slots"
)
// NewSlot mimics the implementation of `on_tick` in fork choice consensus spec.
// It resets the proposer boost root in fork choice, and it updates store's justified checkpoint
// if a better checkpoint on the store's finalized checkpoint chain.
// This should only be called at the start of every slot interval.
//
// Spec pseudocode definition:
// # Reset store.proposer_boost_root if this is a new slot
// if current_slot > previous_slot:
// store.proposer_boost_root = Root()
//
// # Not a new epoch, return
// if not (current_slot > previous_slot and compute_slots_since_epoch_start(current_slot) == 0):
// return
//
// # Update store.justified_checkpoint if a better checkpoint on the store.finalized_checkpoint chain
// if store.best_justified_checkpoint.epoch > store.justified_checkpoint.epoch:
// finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
// ancestor_at_finalized_slot = get_ancestor(store, store.best_justified_checkpoint.root, finalized_slot)
// if ancestor_at_finalized_slot == store.finalized_checkpoint.root:
// store.justified_checkpoint = store.best_justified_checkpoint
func (f *ForkChoice) NewSlot(ctx context.Context, slot types.Slot) error {
// Reset proposer boost root
if err := f.ResetBoostedProposerRoot(ctx); err != nil {
return errors.Wrap(err, "could not reset boosted proposer root in fork choice")
}
// Return if it's not a new epoch.
if !slots.IsEpochStart(slot) {
return nil
}
// Update store.justified_checkpoint if a better checkpoint on the store.finalized_checkpoint chain
f.store.checkpointsLock.Lock()
defer f.store.checkpointsLock.Unlock()
bjcp := f.store.bestJustifiedCheckpoint
jcp := f.store.justifiedCheckpoint
fcp := f.store.finalizedCheckpoint
if bjcp.Epoch > jcp.Epoch {
finalizedSlot, err := slots.EpochStart(fcp.Epoch)
if err != nil {
return err
}
// We check that the best justified checkpoint is a descendant of the finalized checkpoint.
// This should always happen as forkchoice enforces that every node is a descendant of the
// finalized checkpoint. This check is here for additional security, consider removing the extra
// loop call here.
r, err := f.AncestorRoot(ctx, bjcp.Root, finalizedSlot)
if err != nil {
return err
}
if r == fcp.Root {
f.store.justifiedCheckpoint = bjcp
}
}
return nil
}

View File

@@ -0,0 +1,105 @@
package protoarray
import (
"context"
"testing"
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/config/params"
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/testing/require"
)
func TestStore_NewSlot(t *testing.T) {
ctx := context.Background()
bj := [32]byte{'z'}
type args struct {
slot types.Slot
finalized *forkchoicetypes.Checkpoint
justified *forkchoicetypes.Checkpoint
bestJustified *forkchoicetypes.Checkpoint
shouldEqual bool
}
tests := []struct {
name string
args args
}{
{
name: "Not epoch boundary. No change",
args: args{
slot: params.BeaconConfig().SlotsPerEpoch + 1,
finalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'a'}},
justified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'b'}},
bestJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: bj},
shouldEqual: false,
},
},
{
name: "Justified higher than best justified. No change",
args: args{
slot: params.BeaconConfig().SlotsPerEpoch,
finalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'a'}},
justified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'b'}},
bestJustified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: bj},
shouldEqual: false,
},
},
{
name: "Best justified not on the same chain as finalized. No change",
args: args{
slot: params.BeaconConfig().SlotsPerEpoch,
finalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'a'}},
justified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'b'}},
bestJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'d'}},
shouldEqual: false,
},
},
{
name: "Best justified on the same chain as finalized. Yes change",
args: args{
slot: params.BeaconConfig().SlotsPerEpoch,
finalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'a'}},
justified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'b'}},
bestJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: bj},
shouldEqual: true,
},
},
}
for _, test := range tests {
f := setup(test.args.justified.Epoch, test.args.finalized.Epoch)
state, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, [32]byte{}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot)) // genesis
state, blkRoot, err = prepareForkchoiceState(ctx, 32, [32]byte{'a'}, [32]byte{}, [32]byte{}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot)) // finalized
state, blkRoot, err = prepareForkchoiceState(ctx, 64, [32]byte{'b'}, [32]byte{'a'}, [32]byte{}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot)) // justified
state, blkRoot, err = prepareForkchoiceState(ctx, 96, bj, [32]byte{'a'}, [32]byte{}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot)) // best justified
state, blkRoot, err = prepareForkchoiceState(ctx, 97, [32]byte{'d'}, [32]byte{}, [32]byte{}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot)) // bad
require.NoError(t, f.UpdateFinalizedCheckpoint(test.args.finalized))
require.NoError(t, f.UpdateJustifiedCheckpoint(test.args.justified))
f.store.bestJustifiedCheckpoint = test.args.bestJustified
require.NoError(t, f.NewSlot(ctx, test.args.slot))
if test.args.shouldEqual {
bcp := f.BestJustifiedCheckpoint()
cp := f.JustifiedCheckpoint()
require.Equal(t, bcp.Epoch, cp.Epoch)
require.Equal(t, bcp.Root, cp.Root)
} else {
bcp := f.BestJustifiedCheckpoint()
cp := f.JustifiedCheckpoint()
epochsEqual := bcp.Epoch == cp.Epoch
rootsEqual := bcp.Root == cp.Root
require.Equal(t, false, epochsEqual && rootsEqual)
}
}
}

View File

@@ -27,14 +27,12 @@ import (
// before getting pruned upon new finalization.
const defaultPruneThreshold = 256
// This tracks the last reported head root. Used for metrics.
var lastHeadRoot [32]byte
// New initializes a new fork choice store.
func New() *ForkChoice {
s := &Store{
justifiedCheckpoint: &forkchoicetypes.Checkpoint{},
bestJustifiedCheckpoint: &forkchoicetypes.Checkpoint{},
prevJustifiedCheckpoint: &forkchoicetypes.Checkpoint{},
finalizedCheckpoint: &forkchoicetypes.Checkpoint{},
proposerBoostRoot: [32]byte{},
nodes: make([]*Node, 0),
@@ -167,6 +165,7 @@ func (f *ForkChoice) updateCheckpoints(ctx context.Context, jc, fc *ethpb.Checkp
}
currentSlot := slots.CurrentSlot(f.store.genesisTime)
if slots.SinceEpochStarts(currentSlot) < params.BeaconConfig().SafeSlotsToUpdateJustified {
f.store.prevJustifiedCheckpoint = f.store.justifiedCheckpoint
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: jc.Epoch,
Root: bytesutil.ToBytes32(jc.Root)}
} else {
@@ -187,6 +186,7 @@ func (f *ForkChoice) updateCheckpoints(ctx context.Context, jc, fc *ethpb.Checkp
return err
}
if root == currentRoot {
f.store.prevJustifiedCheckpoint = f.store.justifiedCheckpoint
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: jc.Epoch,
Root: jcRoot}
}
@@ -319,6 +319,20 @@ func (s *Store) PruneThreshold() uint64 {
return s.pruneThreshold
}
// BestJustifiedCheckpoint of fork choice store.
func (f *ForkChoice) BestJustifiedCheckpoint() *forkchoicetypes.Checkpoint {
f.store.checkpointsLock.RLock()
defer f.store.checkpointsLock.RUnlock()
return f.store.bestJustifiedCheckpoint
}
// PreviousJustifiedCheckpoint of fork choice store.
func (f *ForkChoice) PreviousJustifiedCheckpoint() *forkchoicetypes.Checkpoint {
f.store.checkpointsLock.RLock()
defer f.store.checkpointsLock.RUnlock()
return f.store.prevJustifiedCheckpoint
}
// JustifiedCheckpoint of fork choice store.
func (f *ForkChoice) JustifiedCheckpoint() *forkchoicetypes.Checkpoint {
f.store.checkpointsLock.RLock()
@@ -385,11 +399,11 @@ func (s *Store) head(ctx context.Context) ([32]byte, error) {
bestNode.slot, bestNode.weight/10e9, bestNode.finalizedEpoch, s.finalizedCheckpoint.Epoch, bestNode.justifiedEpoch, s.justifiedCheckpoint.Epoch)
}
// Update metrics.
if bestNode.root != lastHeadRoot {
// Update metrics and tracked head Root
if bestNode.root != s.lastHeadRoot {
headChangesCount.Inc()
headSlotNumber.Set(float64(bestNode.slot))
lastHeadRoot = bestNode.root
s.lastHeadRoot = bestNode.root
}
// Update canonical mapping given the head root.
@@ -943,9 +957,10 @@ func (f *ForkChoice) UpdateJustifiedCheckpoint(jc *forkchoicetypes.Checkpoint) e
}
f.store.checkpointsLock.Lock()
defer f.store.checkpointsLock.Unlock()
f.store.prevJustifiedCheckpoint = f.store.justifiedCheckpoint
f.store.justifiedCheckpoint = jc
bj := f.store.bestJustifiedCheckpoint
if bj == nil || jc.Epoch > bj.Epoch {
if bj == nil || bj.Root == params.BeaconConfig().ZeroHash || jc.Epoch > bj.Epoch {
f.store.bestJustifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: jc.Epoch, Root: jc.Root}
}
return nil
@@ -981,6 +996,9 @@ func (f *ForkChoice) InsertOptimisticChain(ctx context.Context, chain []*forkcho
chain[i].JustifiedCheckpoint.Epoch, chain[i].FinalizedCheckpoint.Epoch); err != nil {
return err
}
if err := f.updateCheckpoints(ctx, chain[i].JustifiedCheckpoint, chain[i].FinalizedCheckpoint); err != nil {
return err
}
}
return nil
}
@@ -994,3 +1012,36 @@ func (f *ForkChoice) SetGenesisTime(genesisTime uint64) {
func (f *ForkChoice) SetOriginRoot(root [32]byte) {
f.store.originRoot = root
}
// CachedHeadRoot returns the last cached head root
func (f *ForkChoice) CachedHeadRoot() [32]byte {
return f.store.lastHeadRoot
}
// FinalizedPayloadBlockHash returns the hash of the payload at the finalized checkpoint
func (f *ForkChoice) FinalizedPayloadBlockHash() [32]byte {
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
root := f.FinalizedCheckpoint().Root
idx := f.store.nodesIndices[root]
if idx >= uint64(len(f.store.nodes)) {
// This should not happen
return [32]byte{}
}
node := f.store.nodes[idx]
return node.payloadHash
}
// JustifiedPayloadBlockHash returns the hash of the payload at the justified checkpoint
func (f *ForkChoice) JustifiedPayloadBlockHash() [32]byte {
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
root := f.JustifiedCheckpoint().Root
idx := f.store.nodesIndices[root]
if idx >= uint64(len(f.store.nodes)) {
// This should not happen
return [32]byte{}
}
node := f.store.nodes[idx]
return node.payloadHash
}

View File

@@ -21,6 +21,7 @@ type Store struct {
pruneThreshold uint64 // do not prune tree unless threshold is reached.
justifiedCheckpoint *forkchoicetypes.Checkpoint // latest justified checkpoint in store.
bestJustifiedCheckpoint *forkchoicetypes.Checkpoint // best justified checkpoint in store.
prevJustifiedCheckpoint *forkchoicetypes.Checkpoint // previous justified checkpoint in store.
finalizedCheckpoint *forkchoicetypes.Checkpoint // latest finalized checkpoint in store.
proposerBoostRoot [fieldparams.RootLength]byte // latest block root that was boosted after being received in a timely manner.
previousProposerBoostRoot [fieldparams.RootLength]byte // previous block root that was boosted after being received in a timely manner.
@@ -31,6 +32,7 @@ type Store struct {
payloadIndices map[[fieldparams.RootLength]byte]uint64 // the payload hash of block node and the index in the list
slashedIndices map[types.ValidatorIndex]bool // The list of equivocating validators
originRoot [fieldparams.RootLength]byte // The genesis block root
lastHeadRoot [fieldparams.RootLength]byte // The last cached head block root
nodesLock sync.RWMutex
proposerBoostLock sync.RWMutex
checkpointsLock sync.RWMutex

View File

@@ -578,10 +578,7 @@ func (bs *Server) GetBlockRoot(ctx context.Context, req *ethpbv1.BlockRequest) (
return nil, status.Errorf(codes.NotFound, "No head root was found")
}
case "finalized":
finalized, err := bs.ChainInfoFetcher.FinalizedCheckpt()
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not retrieve finalized checkpoint: %v", err)
}
finalized := bs.ChainInfoFetcher.FinalizedCheckpt()
root = finalized.Root
case "genesis":
blk, err := bs.BeaconDB.GenesisBlock(ctx)
@@ -732,10 +729,7 @@ func (bs *Server) blockFromBlockID(ctx context.Context, blockId []byte) (interfa
return nil, errors.Wrap(err, "could not retrieve head block")
}
case "finalized":
finalized, err := bs.ChainInfoFetcher.FinalizedCheckpt()
if err != nil {
return nil, errors.Wrap(err, "could not retrieve finalized checkpoint")
}
finalized := bs.ChainInfoFetcher.FinalizedCheckpt()
finalizedRoot := bytesutil.ToBytes32(finalized.Root)
blk, err = bs.BeaconDB.Block(ctx, finalizedRoot)
if err != nil {

View File

@@ -429,26 +429,17 @@ func (bs *Server) chainHeadRetrieval(ctx context.Context) (*ethpb.ChainHead, err
return nil
}
finalizedCheckpoint, err := bs.FinalizationFetcher.FinalizedCheckpt()
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get finalized checkpoint: %v", err)
}
finalizedCheckpoint := bs.FinalizationFetcher.FinalizedCheckpt()
if err := validateCP(finalizedCheckpoint, "finalized"); err != nil {
return nil, err
}
justifiedCheckpoint, err := bs.FinalizationFetcher.CurrentJustifiedCheckpt()
if err != nil {
return nil, err
}
justifiedCheckpoint := bs.FinalizationFetcher.CurrentJustifiedCheckpt()
if err := validateCP(justifiedCheckpoint, "justified"); err != nil {
return nil, err
}
prevJustifiedCheckpoint, err := bs.FinalizationFetcher.PreviousJustifiedCheckpt()
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get previous justified checkpoint: %v", err)
}
prevJustifiedCheckpoint := bs.FinalizationFetcher.PreviousJustifiedCheckpt()
if err := validateCP(prevJustifiedCheckpoint, "prev justified"); err != nil {
return nil, err
}

View File

@@ -540,10 +540,7 @@ func (bs *Server) GetValidatorParticipation(
default:
return nil, status.Errorf(codes.Internal, "Invalid state type retrieved with a version of %d", beaconState.Version())
}
cp, err := bs.FinalizationFetcher.FinalizedCheckpt()
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get finalized checkpoint: %v", err)
}
cp := bs.FinalizationFetcher.FinalizedCheckpt()
p := &ethpb.ValidatorParticipationResponse{
Epoch: requestedEpoch,
Finalized: requestedEpoch <= cp.Epoch,

View File

@@ -112,19 +112,13 @@ func (p *StateProvider) State(ctx context.Context, stateId []byte) (state.Beacon
return nil, errors.Wrap(err, "could not get genesis state")
}
case "finalized":
checkpoint, err := p.ChainInfoFetcher.FinalizedCheckpt()
if err != nil {
return nil, errors.Wrap(err, "could not get finalized checkpoint")
}
checkpoint := p.ChainInfoFetcher.FinalizedCheckpt()
s, err = p.StateGenService.StateByRoot(ctx, bytesutil.ToBytes32(checkpoint.Root))
if err != nil {
return nil, errors.Wrap(err, "could not get finalized state")
}
case "justified":
checkpoint, err := p.ChainInfoFetcher.CurrentJustifiedCheckpt()
if err != nil {
return nil, errors.Wrap(err, "could not get justified checkpoint")
}
checkpoint := p.ChainInfoFetcher.CurrentJustifiedCheckpt()
s, err = p.StateGenService.StateByRoot(ctx, bytesutil.ToBytes32(checkpoint.Root))
if err != nil {
return nil, errors.Wrap(err, "could not get justified state")

View File

@@ -73,10 +73,7 @@ func (f *blocksFetcher) waitForMinimumPeers(ctx context.Context) ([]peer.ID, err
}
var peers []peer.ID
if f.mode == modeStopOnFinalizedEpoch {
cp, err := f.chain.FinalizedCheckpt()
if err != nil {
return nil, err
}
cp := f.chain.FinalizedCheckpt()
headEpoch := cp.Epoch
_, peers = f.p2p.Peers().BestFinalized(params.BeaconConfig().MaxPeersToSync, headEpoch)
} else {

View File

@@ -160,17 +160,14 @@ func (f *blocksFetcher) findFork(ctx context.Context, slot types.Slot) (*forkDat
// The current slot's epoch must be after the finalization epoch,
// triggering backtracking on earlier epochs is unnecessary.
cp, err := f.chain.FinalizedCheckpt()
if err != nil {
return nil, err
}
cp := f.chain.FinalizedCheckpt()
finalizedEpoch := cp.Epoch
epoch := slots.ToEpoch(slot)
if epoch <= finalizedEpoch {
return nil, errors.New("slot is not after the finalized epoch, no backtracking is necessary")
}
// Update slot to the beginning of the current epoch (preserve original slot for comparison).
slot, err = slots.EpochStart(epoch)
slot, err := slots.EpochStart(epoch)
if err != nil {
return nil, err
}
@@ -294,11 +291,7 @@ func (f *blocksFetcher) findAncestor(ctx context.Context, pid peer.ID, b interfa
// bestFinalizedSlot returns the highest finalized slot of the majority of connected peers.
func (f *blocksFetcher) bestFinalizedSlot() types.Slot {
cp, err := f.chain.FinalizedCheckpt()
if err != nil {
log.WithError(err).Error("Failed to get finalized checkpoint")
return 0
}
cp := f.chain.FinalizedCheckpt()
finalizedEpoch, _ := f.p2p.Peers().BestFinalized(
params.BeaconConfig().MaxPeersToSync, cp.Epoch)
return params.BeaconConfig().SlotsPerEpoch.Mul(uint64(finalizedEpoch))
@@ -315,11 +308,7 @@ func (f *blocksFetcher) bestNonFinalizedSlot() types.Slot {
// epoch. For the latter peers supporting that target epoch are returned as well.
func (f *blocksFetcher) calculateHeadAndTargetEpochs() (headEpoch, targetEpoch types.Epoch, peers []peer.ID) {
if f.mode == modeStopOnFinalizedEpoch {
cp, err := f.chain.FinalizedCheckpt()
if err != nil {
log.WithError(err).Error("Failed to get finalized checkpoint")
return 0, 0, peers
}
cp := f.chain.FinalizedCheckpt()
headEpoch = cp.Epoch
targetEpoch, peers = f.p2p.Peers().BestFinalized(params.BeaconConfig().MaxPeersToSync, headEpoch)
} else {

View File

@@ -1311,8 +1311,7 @@ func TestBlocksQueue_stuckWhenHeadIsSetToOrphanedBlock(t *testing.T) {
require.NoError(t, queue.start())
isProcessedBlock := func(ctx context.Context, blk interfaces.SignedBeaconBlock, blkRoot [32]byte) bool {
cp, err := mc.FinalizedCheckpt()
require.NoError(t, err)
cp := mc.FinalizedCheckpt()
finalizedSlot, err := slots.EpochStart(cp.Epoch)
if err != nil {
return false

View File

@@ -308,11 +308,7 @@ func (s *Service) updatePeerScorerStats(pid peer.ID, startSlot types.Slot) {
// isProcessedBlock checks DB and local cache for presence of a given block, to avoid duplicates.
func (s *Service) isProcessedBlock(ctx context.Context, blk interfaces.SignedBeaconBlock, blkRoot [32]byte) bool {
cp, err := s.cfg.Chain.FinalizedCheckpt()
if err != nil {
log.Errorf("could not get finalized checkpoint: %v", err)
return false
}
cp := s.cfg.Chain.FinalizedCheckpt()
finalizedSlot, err := slots.EpochStart(cp.Epoch)
if err != nil {
return false

View File

@@ -173,11 +173,7 @@ func (s *Service) waitForMinimumPeers() {
required = flags.Get().MinimumSyncPeers
}
for {
cp, err := s.cfg.Chain.FinalizedCheckpt()
if err != nil {
log.WithError(err).Error("Could not retrieve finalized checkpoint")
return
}
cp := s.cfg.Chain.FinalizedCheckpt()
_, peers := s.cfg.P2P.Peers().BestNonFinalized(flags.Get().MinimumSyncPeers, cp.Epoch)
if len(peers) >= required {
break

View File

@@ -214,10 +214,7 @@ func (s *Service) sendBatchRootRequest(ctx context.Context, roots [][32]byte, ra
if len(roots) == 0 {
return nil
}
cp, err := s.cfg.chain.FinalizedCheckpt()
if err != nil {
return err
}
cp := s.cfg.chain.FinalizedCheckpt()
_, bestPeers := s.cfg.p2p.Peers().BestFinalized(maxPeerRequest, cp.Epoch)
if len(bestPeers) == 0 {
return nil
@@ -279,10 +276,7 @@ func (s *Service) validatePendingSlots() error {
defer s.pendingQueueLock.Unlock()
oldBlockRoots := make(map[[32]byte]bool)
cp, err := s.cfg.chain.FinalizedCheckpt()
if err != nil {
return err
}
cp := s.cfg.chain.FinalizedCheckpt()
finalizedEpoch := cp.Epoch
if s.slotToPendingBlocks == nil {
return errors.New("slotToPendingBlocks cache can't be nil")

View File

@@ -131,10 +131,7 @@ func (s *Service) sendRPCStatusRequest(ctx context.Context, id peer.ID) error {
if err != nil {
return err
}
cp, err := s.cfg.chain.FinalizedCheckpt()
if err != nil {
return err
}
cp := s.cfg.chain.FinalizedCheckpt()
resp := &pb.Status{
ForkDigest: forkDigest[:],
FinalizedRoot: cp.Root,
@@ -264,10 +261,7 @@ func (s *Service) respondWithStatus(ctx context.Context, stream network.Stream)
if err != nil {
return err
}
cp, err := s.cfg.chain.FinalizedCheckpt()
if err != nil {
return err
}
cp := s.cfg.chain.FinalizedCheckpt()
resp := &pb.Status{
ForkDigest: forkDigest[:],
FinalizedRoot: cp.Root,
@@ -292,10 +286,7 @@ func (s *Service) validateStatusMessage(ctx context.Context, msg *pb.Status) err
return p2ptypes.ErrWrongForkDigestVersion
}
genesis := s.cfg.chain.GenesisTime()
cp, err := s.cfg.chain.FinalizedCheckpt()
if err != nil {
return err
}
cp := s.cfg.chain.FinalizedCheckpt()
finalizedEpoch := cp.Epoch
maxEpoch := slots.EpochsSinceGenesis(genesis)
// It would take a minimum of 2 epochs to finalize a

View File

@@ -132,10 +132,7 @@ func (s *Service) validateBeaconBlockPubSub(ctx context.Context, pid peer.ID, ms
return pubsub.ValidationIgnore, nil
}
cp, err := s.cfg.chain.FinalizedCheckpt()
if err != nil {
return pubsub.ValidationIgnore, nil
}
cp := s.cfg.chain.FinalizedCheckpt()
startSlot, err := slots.EpochStart(cp.Epoch)
if err != nil {
log.WithError(err).WithFields(getBlockFields(blk)).Debug("Ignored block: could not calculate epoch start slot")

View File

@@ -40,7 +40,7 @@ func (bb *Builder) Tick(t testing.TB, tick int64) {
bb.service.ForkChoicer().SetGenesisTime(uint64(time.Now().Unix() - tick))
if tick > bb.lastTick {
slot := uint64(tick) / params.BeaconConfig().SecondsPerSlot
require.NoError(t, bb.service.NewSlot(context.TODO(), types.Slot(slot)))
require.NoError(t, bb.service.ForkChoicer().NewSlot(context.TODO(), types.Slot(slot)))
bb.lastTick = tick
}
}
@@ -98,8 +98,7 @@ func (bb *Builder) Check(t testing.TB, c *Check) {
Epoch: types.Epoch(c.JustifiedCheckPoint.Epoch),
Root: common.FromHex(c.JustifiedCheckPoint.Root),
}
got, err := bb.service.CurrentJustifiedCheckpt()
require.NoError(t, err)
got := bb.service.CurrentJustifiedCheckpt()
require.DeepEqual(t, cp, got)
}
if c.BestJustifiedCheckPoint != nil {
@@ -107,8 +106,7 @@ func (bb *Builder) Check(t testing.TB, c *Check) {
Epoch: types.Epoch(c.BestJustifiedCheckPoint.Epoch),
Root: common.FromHex(c.BestJustifiedCheckPoint.Root),
}
got, err := bb.service.BestJustifiedCheckpt()
require.NoError(t, err)
got := bb.service.BestJustifiedCheckpt()
require.DeepEqual(t, cp, got)
}
if c.FinalizedCheckPoint != nil {
@@ -116,8 +114,7 @@ func (bb *Builder) Check(t testing.TB, c *Check) {
Epoch: types.Epoch(c.FinalizedCheckPoint.Epoch),
Root: common.FromHex(c.FinalizedCheckPoint.Root),
}
got, err := bb.service.FinalizedCheckpt()
require.NoError(t, err)
got := bb.service.FinalizedCheckpt()
require.DeepSSZEqual(t, cp, got)
}
if c.ProposerBoostRoot != nil {