Embed Config Pattern For Blockchain Service and Update Dependency Names Accordingly (#8618)

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
This commit is contained in:
kevlu93
2021-03-17 19:36:56 +01:00
committed by GitHub
parent 0a73be7389
commit ecf25d1284
22 changed files with 241 additions and 276 deletions

View File

@@ -118,7 +118,7 @@ func (s *Service) HeadRoot(ctx context.Context) ([]byte, error) {
return r[:], nil
}
b, err := s.beaconDB.HeadBlock(ctx)
b, err := s.cfg.BeaconDB.HeadBlock(ctx)
if err != nil {
return nil, err
}
@@ -145,7 +145,7 @@ func (s *Service) HeadBlock(ctx context.Context) (*ethpb.SignedBeaconBlock, erro
return s.headBlock(), nil
}
return s.beaconDB.HeadBlock(ctx)
return s.cfg.BeaconDB.HeadBlock(ctx)
}
// HeadState returns the head state of the chain.
@@ -164,7 +164,7 @@ func (s *Service) HeadState(ctx context.Context) (iface.BeaconState, error) {
return s.headState(ctx), nil
}
return s.stateGen.StateByRoot(ctx, s.headRoot())
return s.cfg.StateGen.StateByRoot(ctx, s.headRoot())
}
// HeadValidatorsIndices returns a list of active validator indices from the head view of a given epoch.
@@ -215,7 +215,7 @@ func (s *Service) HeadETH1Data() *ethpb.Eth1Data {
// ProtoArrayStore returns the proto array store object.
func (s *Service) ProtoArrayStore() *protoarray.Store {
return s.forkChoiceStore.Store()
return s.cfg.ForkChoiceStore.Store()
}
// GenesisTime returns the genesis time of beacon chain.
@@ -252,10 +252,10 @@ func (s *Service) CurrentFork() *pb.Fork {
// IsCanonical returns true if the input block root is part of the canonical chain.
func (s *Service) IsCanonical(ctx context.Context, blockRoot [32]byte) (bool, error) {
// If the block has been finalized, the block will always be part of the canonical chain.
if s.beaconDB.IsFinalizedBlock(ctx, blockRoot) {
if s.cfg.BeaconDB.IsFinalizedBlock(ctx, blockRoot) {
return true, nil
}
// If the block has not been finalized, check fork choice store to see if the block is canonical
return s.forkChoiceStore.IsCanonical(blockRoot), nil
return s.cfg.ForkChoiceStore.IsCanonical(blockRoot), nil
}

View File

@@ -13,7 +13,7 @@ import (
func TestHeadSlot_DataRace(t *testing.T) {
beaconDB := testDB.SetupDB(t)
s := &Service{
beaconDB: beaconDB,
cfg: &Config{BeaconDB: beaconDB},
}
go func() {
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
@@ -24,9 +24,8 @@ func TestHeadSlot_DataRace(t *testing.T) {
func TestHeadRoot_DataRace(t *testing.T) {
beaconDB := testDB.SetupDB(t)
s := &Service{
beaconDB: beaconDB,
head: &head{root: [32]byte{'A'}},
stateGen: stategen.New(beaconDB),
cfg: &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
head: &head{root: [32]byte{'A'}},
}
go func() {
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
@@ -38,9 +37,8 @@ func TestHeadRoot_DataRace(t *testing.T) {
func TestHeadBlock_DataRace(t *testing.T) {
beaconDB := testDB.SetupDB(t)
s := &Service{
beaconDB: beaconDB,
head: &head{block: &ethpb.SignedBeaconBlock{}},
stateGen: stategen.New(beaconDB),
cfg: &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
head: &head{block: &ethpb.SignedBeaconBlock{}},
}
go func() {
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
@@ -52,8 +50,7 @@ func TestHeadBlock_DataRace(t *testing.T) {
func TestHeadState_DataRace(t *testing.T) {
beaconDB := testDB.SetupDB(t)
s := &Service{
beaconDB: beaconDB,
stateGen: stategen.New(beaconDB),
cfg: &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
}
go func() {
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))

View File

@@ -120,7 +120,7 @@ func TestHeadRoot_CanRetrieve(t *testing.T) {
func TestHeadRoot_UseDB(t *testing.T) {
beaconDB := testDB.SetupDB(t)
c := &Service{beaconDB: beaconDB}
c := &Service{cfg: &Config{BeaconDB: beaconDB}}
c.head = &head{root: params.BeaconConfig().ZeroHash}
b := testutil.NewBeaconBlock()
br, err := b.Block.HashTreeRoot()
@@ -278,7 +278,7 @@ func TestService_HeadGenesisValidatorRoot(t *testing.T) {
}
func TestService_ProtoArrayStore(t *testing.T) {
c := &Service{forkChoiceStore: protoarray.New(0, 0, [32]byte{})}
c := &Service{cfg: &Config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}}
p := c.ProtoArrayStore()
require.Equal(t, 0, int(p.FinalizedEpoch()))
}

View File

@@ -58,18 +58,18 @@ func (s *Service) updateHead(ctx context.Context, balances []uint64) error {
// If the fork choice store is missing justified block info, a node should
// re-initiate fork choice store using the latest justified info.
// This recovers a fatal condition and should not happen in run time.
if !s.forkChoiceStore.HasNode(headStartRoot) {
jb, err := s.beaconDB.Block(ctx, headStartRoot)
if !s.cfg.ForkChoiceStore.HasNode(headStartRoot) {
jb, err := s.cfg.BeaconDB.Block(ctx, headStartRoot)
if err != nil {
return err
}
s.forkChoiceStore = protoarray.New(j.Epoch, f.Epoch, bytesutil.ToBytes32(f.Root))
s.cfg.ForkChoiceStore = protoarray.New(j.Epoch, f.Epoch, bytesutil.ToBytes32(f.Root))
if err := s.insertBlockToForkChoiceStore(ctx, jb.Block, headStartRoot, f, j); err != nil {
return err
}
}
headRoot, err := s.forkChoiceStore.Head(ctx, j.Epoch, headStartRoot, balances, f.Epoch)
headRoot, err := s.cfg.ForkChoiceStore.Head(ctx, j.Epoch, headStartRoot, balances, f.Epoch)
if err != nil {
return err
}
@@ -95,12 +95,12 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
// If the head state is not available, just return nil.
// There's nothing to cache
if !s.beaconDB.HasStateSummary(ctx, headRoot) {
if !s.cfg.BeaconDB.HasStateSummary(ctx, headRoot) {
return nil
}
// Get the new head block from DB.
newHeadBlock, err := s.beaconDB.Block(ctx, headRoot)
newHeadBlock, err := s.cfg.BeaconDB.Block(ctx, headRoot)
if err != nil {
return err
}
@@ -109,7 +109,7 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
}
// Get the new head state from cached state or DB.
newHeadState, err := s.stateGen.StateByRoot(ctx, headRoot)
newHeadState, err := s.cfg.StateGen.StateByRoot(ctx, headRoot)
if err != nil {
return errors.Wrap(err, "could not retrieve head state in DB")
}
@@ -124,7 +124,7 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
"newSlot": fmt.Sprintf("%d", newHeadBlock.Block.Slot),
"oldSlot": fmt.Sprintf("%d", headSlot),
}).Debug("Chain reorg occurred")
s.stateNotifier.StateFeed().Send(&feed.Event{
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.Reorg,
Data: &statefeed.ReorgData{
NewSlot: newHeadBlock.Block.Slot,
@@ -139,7 +139,7 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
s.setHead(headRoot, newHeadBlock, newHeadState)
// Save the new head root to DB.
if err := s.beaconDB.SaveHeadBlockRoot(ctx, headRoot); err != nil {
if err := s.cfg.BeaconDB.SaveHeadBlockRoot(ctx, headRoot); err != nil {
return errors.Wrap(err, "could not save head root in DB")
}
@@ -243,7 +243,7 @@ func (s *Service) hasHeadState() bool {
// This caches justified state balances to be used for fork choice.
func (s *Service) cacheJustifiedStateBalances(ctx context.Context, justifiedRoot [32]byte) error {
if err := s.beaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
if err := s.cfg.BeaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
return err
}
@@ -252,12 +252,12 @@ func (s *Service) cacheJustifiedStateBalances(ctx context.Context, justifiedRoot
var justifiedState iface.BeaconState
var err error
if justifiedRoot == s.genesisRoot {
justifiedState, err = s.beaconDB.GenesisState(ctx)
justifiedState, err = s.cfg.BeaconDB.GenesisState(ctx)
if err != nil {
return err
}
} else {
justifiedState, err = s.stateGen.StateByRoot(ctx, justifiedRoot)
justifiedState, err = s.cfg.StateGen.StateByRoot(ctx, justifiedRoot)
if err != nil {
return err
}

View File

@@ -39,14 +39,14 @@ func TestSaveHead_Different(t *testing.T) {
newHeadSignedBlock.Block.Slot = 1
newHeadBlock := newHeadSignedBlock.Block
require.NoError(t, service.beaconDB.SaveBlock(context.Background(), newHeadSignedBlock))
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), newHeadSignedBlock))
newRoot, err := newHeadBlock.HashTreeRoot()
require.NoError(t, err)
headState, err := testutil.NewBeaconState()
require.NoError(t, err)
require.NoError(t, headState.SetSlot(1))
require.NoError(t, service.beaconDB.SaveStateSummary(context.Background(), &pb.StateSummary{Slot: 1, Root: newRoot[:]}))
require.NoError(t, service.beaconDB.SaveState(context.Background(), headState, newRoot))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(context.Background(), &pb.StateSummary{Slot: 1, Root: newRoot[:]}))
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), headState, newRoot))
require.NoError(t, service.saveHead(context.Background(), newRoot))
assert.Equal(t, types.Slot(1), service.HeadSlot(), "Head did not change")
@@ -73,14 +73,14 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
newHeadSignedBlock.Block.ParentRoot = reorgChainParent[:]
newHeadBlock := newHeadSignedBlock.Block
require.NoError(t, service.beaconDB.SaveBlock(context.Background(), newHeadSignedBlock))
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), newHeadSignedBlock))
newRoot, err := newHeadBlock.HashTreeRoot()
require.NoError(t, err)
headState, err := testutil.NewBeaconState()
require.NoError(t, err)
require.NoError(t, headState.SetSlot(1))
require.NoError(t, service.beaconDB.SaveStateSummary(context.Background(), &pb.StateSummary{Slot: 1, Root: newRoot[:]}))
require.NoError(t, service.beaconDB.SaveState(context.Background(), headState, newRoot))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(context.Background(), &pb.StateSummary{Slot: 1, Root: newRoot[:]}))
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), headState, newRoot))
require.NoError(t, service.saveHead(context.Background(), newRoot))
assert.Equal(t, types.Slot(1), service.HeadSlot(), "Head did not change")
@@ -101,8 +101,8 @@ func TestCacheJustifiedStateBalances_CanCache(t *testing.T) {
state, _ := testutil.DeterministicGenesisState(t, 100)
r := [32]byte{'a'}
require.NoError(t, service.beaconDB.SaveStateSummary(context.Background(), &pb.StateSummary{Root: r[:]}))
require.NoError(t, service.beaconDB.SaveState(context.Background(), state, r))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(context.Background(), &pb.StateSummary{Root: r[:]}))
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), state, r))
require.NoError(t, service.cacheJustifiedStateBalances(context.Background(), r))
require.DeepEqual(t, service.getJustifiedBalances(), state.Balances(), "Incorrect justified balances")
}
@@ -112,7 +112,7 @@ func TestUpdateHead_MissingJustifiedRoot(t *testing.T) {
service := setupBeaconChain(t, beaconDB)
b := testutil.NewBeaconBlock()
require.NoError(t, service.beaconDB.SaveBlock(context.Background(), b))
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), b))
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)

View File

@@ -45,7 +45,7 @@ func (s *Service) TreeHandler(w http.ResponseWriter, r *http.Request) {
}
}
nodes := s.forkChoiceStore.Nodes()
nodes := s.cfg.ForkChoiceStore.Nodes()
graph := dot.NewGraph(dot.Directed)
graph.Attr("rankdir", "RL")

View File

@@ -35,8 +35,8 @@ func TestService_TreeHandler(t *testing.T) {
}
s, err := NewService(ctx, cfg)
require.NoError(t, err)
require.NoError(t, s.forkChoiceStore.ProcessBlock(ctx, 0, [32]byte{'a'}, [32]byte{'g'}, [32]byte{'c'}, 0, 0))
require.NoError(t, s.forkChoiceStore.ProcessBlock(ctx, 1, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'c'}, 0, 0))
require.NoError(t, s.cfg.ForkChoiceStore.ProcessBlock(ctx, 0, [32]byte{'a'}, [32]byte{'g'}, [32]byte{'c'}, 0, 0))
require.NoError(t, s.cfg.ForkChoiceStore.ProcessBlock(ctx, 1, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'c'}, 0, 0))
s.setHead([32]byte{'a'}, testutil.NewBeaconBlock(), headState)
rr := httptest.NewRecorder()

View File

@@ -97,7 +97,7 @@ func (s *Service) onAttestation(ctx context.Context, a *ethpb.Attestation) error
// We assume trusted attestation in this function has verified signature.
// Update forkchoice store with the new attestation for updating weight.
s.forkChoiceStore.ProcessAttestation(ctx, indexedAtt.AttestingIndices, bytesutil.ToBytes32(a.Data.BeaconBlockRoot), a.Data.Target.Epoch)
s.cfg.ForkChoiceStore.ProcessAttestation(ctx, indexedAtt.AttestingIndices, bytesutil.ToBytes32(a.Data.BeaconBlockRoot), a.Data.Target.Epoch)
return nil
}

View File

@@ -33,7 +33,7 @@ func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (ifac
return cachedState, nil
}
baseState, err := s.stateGen.StateByRoot(ctx, bytesutil.ToBytes32(c.Root))
baseState, err := s.cfg.StateGen.StateByRoot(ctx, bytesutil.ToBytes32(c.Root))
if err != nil {
return nil, errors.Wrapf(err, "could not get pre state for epoch %d", c.Epoch)
}
@@ -62,7 +62,7 @@ func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (ifac
// To avoid sharing the same state across checkpoint state cache and hot state cache,
// we don't add the state to check point cache.
has, err := s.stateGen.HasStateInCache(ctx, bytesutil.ToBytes32(c.Root))
has, err := s.cfg.StateGen.HasStateInCache(ctx, bytesutil.ToBytes32(c.Root))
if err != nil {
return nil, err
}
@@ -93,7 +93,7 @@ func (s *Service) verifyAttTargetEpoch(_ context.Context, genesisTime, nowTime u
// verifyBeaconBlock verifies beacon head block is known and not from the future.
func (s *Service) verifyBeaconBlock(ctx context.Context, data *ethpb.AttestationData) error {
r := bytesutil.ToBytes32(data.BeaconBlockRoot)
b, err := s.beaconDB.Block(ctx, r)
b, err := s.cfg.BeaconDB.Block(ctx, r)
if err != nil {
return err
}

View File

@@ -50,7 +50,7 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
s, err := testutil.NewBeaconState()
require.NoError(t, err)
require.NoError(t, s.SetSlot(100*params.BeaconConfig().SlotsPerEpoch))
require.NoError(t, service.beaconDB.SaveState(ctx, s, BlkWithStateBadAttRoot))
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, BlkWithStateBadAttRoot))
BlkWithValidState := testutil.NewBeaconBlock()
BlkWithValidState.Block.Slot = 2
@@ -66,7 +66,7 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
})
require.NoError(t, err)
require.NoError(t, service.beaconDB.SaveState(ctx, s, BlkWithValidStateRoot))
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, BlkWithValidStateRoot))
tests := []struct {
name string
@@ -146,8 +146,8 @@ func TestStore_OnAttestation_Ok(t *testing.T) {
copied := genesisState.Copy()
copied, err = state.ProcessSlots(ctx, copied, 1)
require.NoError(t, err)
require.NoError(t, service.beaconDB.SaveState(ctx, copied, tRoot))
require.NoError(t, service.forkChoiceStore.ProcessBlock(ctx, 0, tRoot, tRoot, tRoot, 1, 1))
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(ctx, 0, tRoot, tRoot, tRoot, 1, 1))
require.NoError(t, service.onAttestation(ctx, att[0]))
}
@@ -175,7 +175,7 @@ func TestStore_SaveCheckpointState(t *testing.T) {
err = s.SetBalances([]uint64{0})
require.NoError(t, err)
r := [32]byte{'g'}
require.NoError(t, service.beaconDB.SaveState(ctx, s, r))
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, r))
service.justifiedCheckpt = &ethpb.Checkpoint{Root: r[:]}
service.bestJustifiedCheckpt = &ethpb.Checkpoint{Root: r[:]}
@@ -184,16 +184,16 @@ func TestStore_SaveCheckpointState(t *testing.T) {
r = bytesutil.ToBytes32([]byte{'A'})
cp1 := &ethpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'A'}, 32)}
require.NoError(t, service.beaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'A'})))
require.NoError(t, service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{Root: bytesutil.PadTo([]byte{'A'}, 32)}))
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'A'})))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &pb.StateSummary{Root: bytesutil.PadTo([]byte{'A'}, 32)}))
s1, err := service.getAttPreState(ctx, cp1)
require.NoError(t, err)
assert.Equal(t, 1*params.BeaconConfig().SlotsPerEpoch, s1.Slot(), "Unexpected state slot")
cp2 := &ethpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte{'B'}, 32)}
require.NoError(t, service.beaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'B'})))
require.NoError(t, service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{Root: bytesutil.PadTo([]byte{'B'}, 32)}))
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'B'})))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &pb.StateSummary{Root: bytesutil.PadTo([]byte{'B'}, 32)}))
s2, err := service.getAttPreState(ctx, cp2)
require.NoError(t, err)
assert.Equal(t, 2*params.BeaconConfig().SlotsPerEpoch, s2.Slot(), "Unexpected state slot")
@@ -216,8 +216,8 @@ func TestStore_SaveCheckpointState(t *testing.T) {
service.finalizedCheckpt = &ethpb.Checkpoint{Root: r[:]}
service.prevFinalizedCheckpt = &ethpb.Checkpoint{Root: r[:]}
cp3 := &ethpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'C'}, 32)}
require.NoError(t, service.beaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'C'})))
require.NoError(t, service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{Root: bytesutil.PadTo([]byte{'C'}, 32)}))
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'C'})))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &pb.StateSummary{Root: bytesutil.PadTo([]byte{'C'}, 32)}))
s3, err := service.getAttPreState(ctx, cp3)
require.NoError(t, err)
assert.Equal(t, s.Slot(), s3.Slot(), "Unexpected state slot")
@@ -237,7 +237,7 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
epoch := types.Epoch(1)
baseState, _ := testutil.DeterministicGenesisState(t, 1)
checkpoint := &ethpb.Checkpoint{Epoch: epoch, Root: bytesutil.PadTo([]byte("hi"), 32)}
require.NoError(t, service.beaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(checkpoint.Root)))
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(checkpoint.Root)))
returned, err := service.getAttPreState(ctx, checkpoint)
require.NoError(t, err)
assert.Equal(t, params.BeaconConfig().SlotsPerEpoch.Mul(uint64(checkpoint.Epoch)), returned.Slot(), "Incorrectly returned base state")
@@ -248,7 +248,7 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
epoch = 2
newCheckpoint := &ethpb.Checkpoint{Epoch: epoch, Root: bytesutil.PadTo([]byte("bye"), 32)}
require.NoError(t, service.beaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(newCheckpoint.Root)))
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(newCheckpoint.Root)))
returned, err = service.getAttPreState(ctx, newCheckpoint)
require.NoError(t, err)
s, err := helpers.StartSlot(newCheckpoint.Epoch)
@@ -321,7 +321,7 @@ func TestVerifyBeaconBlock_futureBlock(t *testing.T) {
b := testutil.NewBeaconBlock()
b.Block.Slot = 2
require.NoError(t, service.beaconDB.SaveBlock(ctx, b))
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, b))
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
d := &ethpb.AttestationData{Slot: 1, BeaconBlockRoot: r[:]}
@@ -339,7 +339,7 @@ func TestVerifyBeaconBlock_OK(t *testing.T) {
b := testutil.NewBeaconBlock()
b.Block.Slot = 2
require.NoError(t, service.beaconDB.SaveBlock(ctx, b))
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, b))
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
d := &ethpb.AttestationData{Slot: 2, BeaconBlockRoot: r[:]}
@@ -357,7 +357,7 @@ func TestVerifyFinalizedConsistency_InconsistentRoot(t *testing.T) {
b32 := testutil.NewBeaconBlock()
b32.Block.Slot = 32
require.NoError(t, service.beaconDB.SaveBlock(ctx, b32))
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, b32))
r32, err := b32.Block.HashTreeRoot()
require.NoError(t, err)
@@ -366,7 +366,7 @@ func TestVerifyFinalizedConsistency_InconsistentRoot(t *testing.T) {
b33 := testutil.NewBeaconBlock()
b33.Block.Slot = 33
b33.Block.ParentRoot = r32[:]
require.NoError(t, service.beaconDB.SaveBlock(ctx, b33))
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, b33))
r33, err := b33.Block.HashTreeRoot()
require.NoError(t, err)
@@ -384,7 +384,7 @@ func TestVerifyFinalizedConsistency_OK(t *testing.T) {
b32 := testutil.NewBeaconBlock()
b32.Block.Slot = 32
require.NoError(t, service.beaconDB.SaveBlock(ctx, b32))
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, b32))
r32, err := b32.Block.HashTreeRoot()
require.NoError(t, err)
@@ -393,7 +393,7 @@ func TestVerifyFinalizedConsistency_OK(t *testing.T) {
b33 := testutil.NewBeaconBlock()
b33.Block.Slot = 33
b33.Block.ParentRoot = r32[:]
require.NoError(t, service.beaconDB.SaveBlock(ctx, b33))
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, b33))
r33, err := b33.Block.HashTreeRoot()
require.NoError(t, err)
@@ -422,10 +422,10 @@ func TestVerifyFinalizedConsistency_IsCanonical(t *testing.T) {
r33, err := b33.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.forkChoiceStore.ProcessBlock(ctx, b32.Block.Slot, r32, [32]byte{}, [32]byte{}, 0, 0))
require.NoError(t, service.forkChoiceStore.ProcessBlock(ctx, b33.Block.Slot, r33, r32, [32]byte{}, 0, 0))
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(ctx, b32.Block.Slot, r32, [32]byte{}, [32]byte{}, 0, 0))
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(ctx, b33.Block.Slot, r33, r32, [32]byte{}, 0, 0))
_, err = service.forkChoiceStore.Head(ctx, 0, r32, []uint64{}, 0)
_, err = service.cfg.ForkChoiceStore.Head(ctx, 0, r32, []uint64{}, 0)
require.NoError(t, err)
err = service.VerifyFinalizedConsistency(context.Background(), r33[:])
require.NoError(t, err)

View File

@@ -146,7 +146,7 @@ func (s *Service) onBlock(ctx context.Context, signed *ethpb.SignedBeaconBlock,
}
// Send notification of the processed block to the state feed.
s.stateNotifier.StateFeed().Send(&feed.Event{
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.BlockProcessed,
Data: &statefeed.BlockProcessedData{
Slot: signed.Block.Slot,
@@ -163,7 +163,7 @@ func (s *Service) onBlock(ctx context.Context, signed *ethpb.SignedBeaconBlock,
return err
}
fRoot := bytesutil.ToBytes32(postState.FinalizedCheckpoint().Root)
if err := s.forkChoiceStore.Prune(ctx, fRoot); err != nil {
if err := s.cfg.ForkChoiceStore.Prune(ctx, fRoot); err != nil {
return errors.Wrap(err, "could not prune proto array fork choice nodes")
}
if !featureconfig.Get().UpdateHeadTimely {
@@ -205,7 +205,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []*ethpb.SignedBeaconBl
if err := s.verifyBlkPreState(ctx, b); err != nil {
return nil, nil, err
}
preState, err := s.stateGen.StateByRootInitialSync(ctx, bytesutil.ToBytes32(b.ParentRoot))
preState, err := s.cfg.StateGen.StateByRootInitialSync(ctx, bytesutil.ToBytes32(b.ParentRoot))
if err != nil {
return nil, nil, err
}
@@ -246,14 +246,14 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []*ethpb.SignedBeaconBl
return nil, nil, errors.New("batch block signature verification failed")
}
for r, st := range boundaries {
if err := s.stateGen.SaveState(ctx, r, st); err != nil {
if err := s.cfg.StateGen.SaveState(ctx, r, st); err != nil {
return nil, nil, err
}
}
// Also saves the last post state which to be used as pre state for the next batch.
lastB := blks[len(blks)-1]
lastBR := blockRoots[len(blockRoots)-1]
if err := s.stateGen.SaveState(ctx, lastBR, preState); err != nil {
if err := s.cfg.StateGen.SaveState(ctx, lastBR, preState); err != nil {
return nil, nil, err
}
if err := s.saveHeadNoDB(ctx, lastB, lastBR, preState); err != nil {
@@ -272,7 +272,7 @@ func (s *Service) handleBlockAfterBatchVerify(ctx context.Context, signed *ethpb
if err := s.insertBlockToForkChoiceStore(ctx, b, blockRoot, fCheckpoint, jCheckpoint); err != nil {
return err
}
if err := s.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{
if err := s.cfg.BeaconDB.SaveStateSummary(ctx, &pb.StateSummary{
Slot: signed.Block.Slot,
Root: blockRoot[:],
}); err != nil {
@@ -281,7 +281,7 @@ func (s *Service) handleBlockAfterBatchVerify(ctx context.Context, signed *ethpb
// Rate limit how many blocks (2 epochs worth of blocks) a node keeps in the memory.
if uint64(len(s.getInitSyncBlocks())) > initialSyncBlockCacheSize {
if err := s.beaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
if err := s.cfg.BeaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
return err
}
s.clearInitSyncBlocks()
@@ -359,7 +359,7 @@ func (s *Service) insertBlockAndAttestationsToForkChoiceStore(ctx context.Contex
if err != nil {
return err
}
s.forkChoiceStore.ProcessAttestation(ctx, indices, bytesutil.ToBytes32(a.Data.BeaconBlockRoot), a.Data.Target.Epoch)
s.cfg.ForkChoiceStore.ProcessAttestation(ctx, indices, bytesutil.ToBytes32(a.Data.BeaconBlockRoot), a.Data.Target.Epoch)
}
return nil
}
@@ -370,7 +370,7 @@ func (s *Service) insertBlockToForkChoiceStore(ctx context.Context, blk *ethpb.B
return err
}
// Feed in block to fork choice store.
if err := s.forkChoiceStore.ProcessBlock(ctx,
if err := s.cfg.ForkChoiceStore.ProcessBlock(ctx,
blk.Slot, root, bytesutil.ToBytes32(blk.ParentRoot), bytesutil.ToBytes32(blk.Body.Graffiti),
jCheckpoint.Epoch,
fCheckpoint.Epoch); err != nil {
@@ -386,10 +386,10 @@ func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b *ethpb.Si
defer span.End()
if initSync {
s.saveInitSyncBlock(r, b)
} else if err := s.beaconDB.SaveBlock(ctx, b); err != nil {
} else if err := s.cfg.BeaconDB.SaveBlock(ctx, b); err != nil {
return errors.Wrapf(err, "could not save block from slot %d", b.Block.Slot)
}
if err := s.stateGen.SaveState(ctx, r, st); err != nil {
if err := s.cfg.StateGen.SaveState(ctx, r, st); err != nil {
return errors.Wrap(err, "could not save state")
}
if err := s.insertBlockAndAttestationsToForkChoiceStore(ctx, b.Block, r, st); err != nil {

View File

@@ -35,7 +35,7 @@ func (s *Service) getBlockPreState(ctx context.Context, b *ethpb.BeaconBlock) (i
return nil, err
}
preState, err := s.stateGen.StateByRoot(ctx, bytesutil.ToBytes32(b.ParentRoot))
preState, err := s.cfg.StateGen.StateByRoot(ctx, bytesutil.ToBytes32(b.ParentRoot))
if err != nil {
return nil, errors.Wrapf(err, "could not get pre state for slot %d", b.Slot)
}
@@ -65,7 +65,7 @@ func (s *Service) verifyBlkPreState(ctx context.Context, b *ethpb.BeaconBlock) e
// Loosen the check to HasBlock because state summary gets saved in batches
// during initial syncing. There's no risk given a state summary object is just a
// a subset of the block object.
if !s.beaconDB.HasStateSummary(ctx, parentRoot) && !s.beaconDB.HasBlock(ctx, parentRoot) {
if !s.cfg.BeaconDB.HasStateSummary(ctx, parentRoot) && !s.cfg.BeaconDB.HasBlock(ctx, parentRoot) {
return errors.New("could not reconstruct parent state")
}
@@ -73,12 +73,12 @@ func (s *Service) verifyBlkPreState(ctx context.Context, b *ethpb.BeaconBlock) e
return err
}
has, err := s.stateGen.HasState(ctx, parentRoot)
has, err := s.cfg.StateGen.HasState(ctx, parentRoot)
if err != nil {
return err
}
if !has {
if err := s.beaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
if err := s.cfg.BeaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
return errors.Wrap(err, "could not save initial sync blocks")
}
s.clearInitSyncBlocks()
@@ -92,7 +92,7 @@ func (s *Service) VerifyBlkDescendant(ctx context.Context, root [32]byte) error
ctx, span := trace.StartSpan(ctx, "blockChain.VerifyBlkDescendant")
defer span.End()
fRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(s.finalizedCheckpt.Root))
finalizedBlkSigned, err := s.beaconDB.Block(ctx, fRoot)
finalizedBlkSigned, err := s.cfg.BeaconDB.Block(ctx, fRoot)
if err != nil {
return err
}
@@ -145,7 +145,7 @@ func (s *Service) shouldUpdateCurrentJustified(ctx context.Context, newJustified
if s.hasInitSyncBlock(justifiedRoot) {
newJustifiedBlockSigned = s.getInitSyncBlock(justifiedRoot)
} else {
newJustifiedBlockSigned, err = s.beaconDB.Block(ctx, justifiedRoot)
newJustifiedBlockSigned, err = s.cfg.BeaconDB.Block(ctx, justifiedRoot)
if err != nil {
return false, err
}
@@ -167,7 +167,7 @@ func (s *Service) shouldUpdateCurrentJustified(ctx context.Context, newJustified
if s.hasInitSyncBlock(cachedJustifiedRoot) {
justifiedBlockSigned = s.getInitSyncBlock(cachedJustifiedRoot)
} else {
justifiedBlockSigned, err = s.beaconDB.Block(ctx, cachedJustifiedRoot)
justifiedBlockSigned, err = s.cfg.BeaconDB.Block(ctx, cachedJustifiedRoot)
if err != nil {
return false, err
}
@@ -205,7 +205,7 @@ func (s *Service) updateJustified(ctx context.Context, state iface.ReadOnlyBeaco
}
}
return s.beaconDB.SaveJustifiedCheckpoint(ctx, cpt)
return s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, cpt)
}
// This caches input checkpoint as justified for the service struct. It rotates current justified to previous justified,
@@ -218,18 +218,18 @@ func (s *Service) updateJustifiedInitSync(ctx context.Context, cp *ethpb.Checkpo
return err
}
return s.beaconDB.SaveJustifiedCheckpoint(ctx, cp)
return s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, cp)
}
func (s *Service) updateFinalized(ctx context.Context, cp *ethpb.Checkpoint) error {
// Blocks need to be saved so that we can retrieve finalized block from
// DB when migrating states.
if err := s.beaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
if err := s.cfg.BeaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
return err
}
s.clearInitSyncBlocks()
if err := s.beaconDB.SaveFinalizedCheckpoint(ctx, cp); err != nil {
if err := s.cfg.BeaconDB.SaveFinalizedCheckpoint(ctx, cp); err != nil {
return err
}
if !featureconfig.Get().UpdateHeadTimely {
@@ -238,7 +238,7 @@ func (s *Service) updateFinalized(ctx context.Context, cp *ethpb.Checkpoint) err
}
fRoot := bytesutil.ToBytes32(cp.Root)
if err := s.stateGen.MigrateToCold(ctx, fRoot); err != nil {
if err := s.cfg.StateGen.MigrateToCold(ctx, fRoot); err != nil {
return errors.Wrap(err, "could not migrate to cold")
}
@@ -282,10 +282,10 @@ func (s *Service) ancestorByForkChoiceStore(ctx context.Context, r [32]byte, slo
ctx, span := trace.StartSpan(ctx, "blockChain.ancestorByForkChoiceStore")
defer span.End()
if !s.forkChoiceStore.HasParent(r) {
if !s.cfg.ForkChoiceStore.HasParent(r) {
return nil, errors.New("could not find root in fork choice store")
}
return s.forkChoiceStore.AncestorRoot(ctx, r, slot)
return s.cfg.ForkChoiceStore.AncestorRoot(ctx, r, slot)
}
// This retrieves an ancestor root using DB. The look up is recursively looking up DB. Slower than `ancestorByForkChoiceStore`.
@@ -298,7 +298,7 @@ func (s *Service) ancestorByDB(ctx context.Context, r [32]byte, slot types.Slot)
return nil, ctx.Err()
}
signed, err := s.beaconDB.Block(ctx, r)
signed, err := s.cfg.BeaconDB.Block(ctx, r)
if err != nil {
return nil, errors.Wrap(err, "could not get ancestor block")
}
@@ -377,8 +377,8 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk *ethpb.
}
higherThanFinalized := slot > fSlot
// As long as parent node is not in fork choice store, and parent node is in DB.
for !s.forkChoiceStore.HasNode(parentRoot) && s.beaconDB.HasBlock(ctx, parentRoot) && higherThanFinalized {
b, err := s.beaconDB.Block(ctx, parentRoot)
for !s.cfg.ForkChoiceStore.HasNode(parentRoot) && s.cfg.BeaconDB.HasBlock(ctx, parentRoot) && higherThanFinalized {
b, err := s.cfg.BeaconDB.Block(ctx, parentRoot)
if err != nil {
return err
}
@@ -396,7 +396,7 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk *ethpb.
for i := len(pendingNodes) - 1; i >= 0; i-- {
b := pendingNodes[i]
r := pendingRoots[i]
if err := s.forkChoiceStore.ProcessBlock(ctx,
if err := s.cfg.ForkChoiceStore.ProcessBlock(ctx,
b.Slot, r, bytesutil.ToBytes32(b.ParentRoot), bytesutil.ToBytes32(b.Body.Graffiti),
jCheckpoint.Epoch,
fCheckpoint.Epoch); err != nil {
@@ -413,7 +413,7 @@ func (s *Service) insertFinalizedDeposits(ctx context.Context, fRoot [32]byte) e
defer span.End()
// Update deposit cache.
finalizedState, err := s.stateGen.StateByRoot(ctx, fRoot)
finalizedState, err := s.cfg.StateGen.StateByRoot(ctx, fRoot)
if err != nil {
return errors.Wrap(err, "could not fetch finalized state")
}
@@ -421,9 +421,9 @@ func (s *Service) insertFinalizedDeposits(ctx context.Context, fRoot [32]byte) e
// We can be confident that these deposits will be included in some block
// because the Eth1 follow distance makes such long-range reorgs extremely unlikely.
eth1DepositIndex := int64(finalizedState.Eth1Data().DepositCount - 1)
s.depositCache.InsertFinalizedDeposits(ctx, eth1DepositIndex)
s.cfg.DepositCache.InsertFinalizedDeposits(ctx, eth1DepositIndex)
// Deposit proofs are only used during state transition and can be safely removed to save space.
if err = s.depositCache.PruneProofs(ctx, eth1DepositIndex); err != nil {
if err = s.cfg.DepositCache.PruneProofs(ctx, eth1DepositIndex); err != nil {
return errors.Wrap(err, "could not prune deposit proofs")
}
return nil
@@ -433,11 +433,11 @@ func (s *Service) insertFinalizedDeposits(ctx context.Context, fRoot [32]byte) e
func (s *Service) deletePoolAtts(atts []*ethpb.Attestation) error {
for _, att := range atts {
if helpers.IsAggregated(att) {
if err := s.attPool.DeleteAggregatedAttestation(att); err != nil {
if err := s.cfg.AttPool.DeleteAggregatedAttestation(att); err != nil {
return err
}
} else {
if err := s.attPool.DeleteUnaggregatedAttestation(att); err != nil {
if err := s.cfg.AttPool.DeleteUnaggregatedAttestation(att); err != nil {
return err
}
}

View File

@@ -48,7 +48,7 @@ func TestStore_OnBlock(t *testing.T) {
require.NoError(t, err)
st, err := testutil.NewBeaconState()
require.NoError(t, err)
require.NoError(t, service.beaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
roots, err := blockTree1(t, beaconDB, validGenesisRoot[:])
require.NoError(t, err)
random := testutil.NewBeaconBlock()
@@ -57,11 +57,11 @@ func TestStore_OnBlock(t *testing.T) {
assert.NoError(t, beaconDB.SaveBlock(ctx, random))
randomParentRoot, err := random.Block.HashTreeRoot()
assert.NoError(t, err)
require.NoError(t, service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{Slot: st.Slot(), Root: randomParentRoot[:]}))
require.NoError(t, service.beaconDB.SaveState(ctx, st.Copy(), randomParentRoot))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &pb.StateSummary{Slot: st.Slot(), Root: randomParentRoot[:]}))
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), randomParentRoot))
randomParentRoot2 := roots[1]
require.NoError(t, service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{Slot: st.Slot(), Root: randomParentRoot2}))
require.NoError(t, service.beaconDB.SaveState(ctx, st.Copy(), bytesutil.ToBytes32(randomParentRoot2)))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &pb.StateSummary{Slot: st.Slot(), Root: randomParentRoot2}))
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), bytesutil.ToBytes32(randomParentRoot2)))
tests := []struct {
name string
@@ -145,7 +145,7 @@ func TestStore_OnBlockBatch(t *testing.T) {
service.finalizedCheckpt = &ethpb.Checkpoint{
Root: gRoot[:],
}
service.forkChoiceStore = protoarray.New(0, 0, [32]byte{})
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{})
service.saveInitSyncBlock(gRoot, genesis)
st, keys := testutil.DeterministicGenesisState(t, 64)
@@ -172,7 +172,7 @@ func TestStore_OnBlockBatch(t *testing.T) {
blks[0].Block.ParentRoot = gRoot[:]
require.NoError(t, beaconDB.SaveBlock(context.Background(), blks[0]))
require.NoError(t, service.stateGen.SaveState(ctx, blkRoots[0], firstState))
require.NoError(t, service.cfg.StateGen.SaveState(ctx, blkRoots[0], firstState))
_, _, err = service.onBlockBatch(ctx, blks[1:], blkRoots[1:])
require.NoError(t, err)
}
@@ -200,8 +200,8 @@ func TestRemoveStateSinceLastFinalized_EmptyStartSlot(t *testing.T) {
newJustifiedBlk.Block.ParentRoot = bytesutil.PadTo(lastJustifiedRoot[:], 32)
newJustifiedRoot, err := newJustifiedBlk.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.beaconDB.SaveBlock(ctx, newJustifiedBlk))
require.NoError(t, service.beaconDB.SaveBlock(ctx, lastJustifiedBlk))
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, newJustifiedBlk))
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, lastJustifiedBlk))
diff := params.BeaconConfig().SlotsPerEpoch.Sub(1).Mul(params.BeaconConfig().SecondsPerSlot)
service.genesisTime = time.Unix(time.Now().Unix()-int64(diff), 0)
@@ -228,8 +228,8 @@ func TestShouldUpdateJustified_ReturnFalse(t *testing.T) {
newJustifiedBlk.Block.ParentRoot = bytesutil.PadTo(lastJustifiedRoot[:], 32)
newJustifiedRoot, err := newJustifiedBlk.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.beaconDB.SaveBlock(ctx, newJustifiedBlk))
require.NoError(t, service.beaconDB.SaveBlock(ctx, lastJustifiedBlk))
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, newJustifiedBlk))
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, lastJustifiedBlk))
diff := params.BeaconConfig().SlotsPerEpoch.Sub(1).Mul(params.BeaconConfig().SecondsPerSlot)
service.genesisTime = time.Unix(time.Now().Unix()-int64(diff), 0)
@@ -262,14 +262,14 @@ func TestCachedPreState_CanGetFromStateSummary(t *testing.T) {
service.finalizedCheckpt = &ethpb.Checkpoint{
Root: gRoot[:],
}
service.forkChoiceStore = protoarray.New(0, 0, [32]byte{})
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{})
service.saveInitSyncBlock(gRoot, genesis)
b := testutil.NewBeaconBlock()
b.Block.Slot = 1
b.Block.ParentRoot = gRoot[:]
require.NoError(t, service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{Slot: 1, Root: gRoot[:]}))
require.NoError(t, service.stateGen.SaveState(ctx, gRoot, s))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &pb.StateSummary{Slot: 1, Root: gRoot[:]}))
require.NoError(t, service.cfg.StateGen.SaveState(ctx, gRoot, s))
require.NoError(t, service.verifyBlkPreState(ctx, b.Block))
}
@@ -292,7 +292,7 @@ func TestCachedPreState_CanGetFromDB(t *testing.T) {
service.finalizedCheckpt = &ethpb.Checkpoint{
Root: gRoot[:],
}
service.forkChoiceStore = protoarray.New(0, 0, [32]byte{})
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{})
service.saveInitSyncBlock(gRoot, genesis)
b := testutil.NewBeaconBlock()
@@ -305,8 +305,8 @@ func TestCachedPreState_CanGetFromDB(t *testing.T) {
b.Block.ParentRoot = gRoot[:]
s, err := stateTrie.InitializeFromProto(&pb.BeaconState{Slot: 1})
require.NoError(t, err)
require.NoError(t, service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{Slot: 1, Root: gRoot[:]}))
require.NoError(t, service.stateGen.SaveState(ctx, gRoot, s))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &pb.StateSummary{Slot: 1, Root: gRoot[:]}))
require.NoError(t, service.cfg.StateGen.SaveState(ctx, gRoot, s))
require.NoError(t, service.verifyBlkPreState(ctx, b.Block))
}
@@ -350,7 +350,7 @@ func TestFillForkChoiceMissingBlocks_CanSave(t *testing.T) {
cfg := &Config{BeaconDB: beaconDB}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
service.forkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
service.finalizedCheckpt = &ethpb.Checkpoint{Root: make([]byte, 32)}
genesisStateRoot := [32]byte{}
@@ -361,7 +361,7 @@ func TestFillForkChoiceMissingBlocks_CanSave(t *testing.T) {
st, err := testutil.NewBeaconState()
require.NoError(t, err)
require.NoError(t, service.beaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
roots, err := blockTree1(t, beaconDB, validGenesisRoot[:])
require.NoError(t, err)
@@ -375,10 +375,10 @@ func TestFillForkChoiceMissingBlocks_CanSave(t *testing.T) {
require.NoError(t, err)
// 5 nodes from the block tree 1. B0 - B3 - B4 - B6 - B8
assert.Equal(t, 5, len(service.forkChoiceStore.Nodes()), "Miss match nodes")
assert.Equal(t, true, service.forkChoiceStore.HasNode(bytesutil.ToBytes32(roots[4])), "Didn't save node")
assert.Equal(t, true, service.forkChoiceStore.HasNode(bytesutil.ToBytes32(roots[6])), "Didn't save node")
assert.Equal(t, true, service.forkChoiceStore.HasNode(bytesutil.ToBytes32(roots[8])), "Didn't save node")
assert.Equal(t, 5, len(service.cfg.ForkChoiceStore.Nodes()), "Miss match nodes")
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(roots[4])), "Didn't save node")
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(roots[6])), "Didn't save node")
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(roots[8])), "Didn't save node")
}
func TestFillForkChoiceMissingBlocks_RootsMatch(t *testing.T) {
@@ -388,7 +388,7 @@ func TestFillForkChoiceMissingBlocks_RootsMatch(t *testing.T) {
cfg := &Config{BeaconDB: beaconDB}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
service.forkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
service.finalizedCheckpt = &ethpb.Checkpoint{Root: make([]byte, 32)}
genesisStateRoot := [32]byte{}
@@ -399,7 +399,7 @@ func TestFillForkChoiceMissingBlocks_RootsMatch(t *testing.T) {
st, err := testutil.NewBeaconState()
require.NoError(t, err)
require.NoError(t, service.beaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
roots, err := blockTree1(t, beaconDB, validGenesisRoot[:])
require.NoError(t, err)
@@ -413,12 +413,12 @@ func TestFillForkChoiceMissingBlocks_RootsMatch(t *testing.T) {
require.NoError(t, err)
// 5 nodes from the block tree 1. B0 - B3 - B4 - B6 - B8
assert.Equal(t, 5, len(service.forkChoiceStore.Nodes()), "Miss match nodes")
assert.Equal(t, 5, len(service.cfg.ForkChoiceStore.Nodes()), "Miss match nodes")
// Ensure all roots and their respective blocks exist.
wantedRoots := [][]byte{roots[0], roots[3], roots[4], roots[6], roots[8]}
for i, rt := range wantedRoots {
assert.Equal(t, true, service.forkChoiceStore.HasNode(bytesutil.ToBytes32(rt)), fmt.Sprintf("Didn't save node: %d", i))
assert.Equal(t, true, service.beaconDB.HasBlock(context.Background(), bytesutil.ToBytes32(rt)))
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(rt)), fmt.Sprintf("Didn't save node: %d", i))
assert.Equal(t, true, service.cfg.BeaconDB.HasBlock(context.Background(), bytesutil.ToBytes32(rt)))
}
}
@@ -429,7 +429,7 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized(t *testing.T) {
cfg := &Config{BeaconDB: beaconDB}
service, err := NewService(ctx, cfg)
require.NoError(t, err)
service.forkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
// Set finalized epoch to 1.
service.finalizedCheckpt = &ethpb.Checkpoint{Epoch: 1}
@@ -441,24 +441,24 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized(t *testing.T) {
st, err := testutil.NewBeaconState()
require.NoError(t, err)
require.NoError(t, service.beaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
// Define a tree branch, slot 63 <- 64 <- 65
b63 := testutil.NewBeaconBlock()
b63.Block.Slot = 63
require.NoError(t, service.beaconDB.SaveBlock(ctx, b63))
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, b63))
r63, err := b63.Block.HashTreeRoot()
require.NoError(t, err)
b64 := testutil.NewBeaconBlock()
b64.Block.Slot = 64
b64.Block.ParentRoot = r63[:]
require.NoError(t, service.beaconDB.SaveBlock(ctx, b64))
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, b64))
r64, err := b64.Block.HashTreeRoot()
require.NoError(t, err)
b65 := testutil.NewBeaconBlock()
b65.Block.Slot = 65
b65.Block.ParentRoot = r64[:]
require.NoError(t, service.beaconDB.SaveBlock(ctx, b65))
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, b65))
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
err = service.fillInForkChoiceMissingBlocks(
@@ -466,10 +466,10 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized(t *testing.T) {
require.NoError(t, err)
// There should be 2 nodes, block 65 and block 64.
assert.Equal(t, 2, len(service.forkChoiceStore.Nodes()), "Miss match nodes")
assert.Equal(t, 2, len(service.cfg.ForkChoiceStore.Nodes()), "Miss match nodes")
// Block with slot 63 should be in fork choice because it's less than finalized epoch 1.
assert.Equal(t, true, service.forkChoiceStore.HasNode(r63), "Didn't save node")
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(r63), "Didn't save node")
}
// blockTree1 constructs the following tree:
@@ -649,7 +649,7 @@ func TestAncestor_CanUseForkchoice(t *testing.T) {
beaconBlock.Block.ParentRoot = bytesutil.PadTo(b.Block.ParentRoot, 32)
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.forkChoiceStore.ProcessBlock(context.Background(), b.Block.Slot, r, bytesutil.ToBytes32(b.Block.ParentRoot), [32]byte{}, 0, 0)) // Saves blocks to fork choice store.
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(context.Background(), b.Block.Slot, r, bytesutil.ToBytes32(b.Block.ParentRoot), [32]byte{}, 0, 0)) // Saves blocks to fork choice store.
}
r, err := service.ancestor(context.Background(), r200[:], 150)
@@ -689,7 +689,7 @@ func TestAncestor_CanUseDB(t *testing.T) {
require.NoError(t, beaconDB.SaveBlock(context.Background(), beaconBlock)) // Saves blocks to DB.
}
require.NoError(t, service.forkChoiceStore.ProcessBlock(context.Background(), 200, r200, r200, [32]byte{}, 0, 0))
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(context.Background(), 200, r200, r200, [32]byte{}, 0, 0))
r, err := service.ancestor(context.Background(), r200[:], 150)
require.NoError(t, err)
@@ -758,10 +758,10 @@ func TestFinalizedImpliesNewJustified(t *testing.T) {
service, err := NewService(ctx, &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB), ForkChoiceStore: protoarray.New(0, 0, [32]byte{})})
require.NoError(t, err)
service.justifiedCheckpt = test.args.cachedCheckPoint
require.NoError(t, service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{Root: bytesutil.PadTo(test.want.Root, 32)}))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &pb.StateSummary{Root: bytesutil.PadTo(test.want.Root, 32)}))
genesisState, err := testutil.NewBeaconState()
require.NoError(t, err)
require.NoError(t, service.beaconDB.SaveState(ctx, genesisState, bytesutil.ToBytes32(test.want.Root)))
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, genesisState, bytesutil.ToBytes32(test.want.Root)))
if test.args.diffFinalizedCheckPoint {
b1 := testutil.NewBeaconBlock()
@@ -778,7 +778,7 @@ func TestFinalizedImpliesNewJustified(t *testing.T) {
beaconBlock := testutil.NewBeaconBlock()
beaconBlock.Block.Slot = b.Block.Slot
beaconBlock.Block.ParentRoot = bytesutil.PadTo(b.Block.ParentRoot, 32)
require.NoError(t, service.beaconDB.SaveBlock(context.Background(), beaconBlock))
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), beaconBlock))
}
service.finalizedCheckpt = &ethpb.Checkpoint{Root: []byte{'c'}, Epoch: 1}
service.justifiedCheckpt.Root = r100[:]
@@ -871,11 +871,11 @@ func TestUpdateJustifiedInitSync(t *testing.T) {
gBlk := testutil.NewBeaconBlock()
gRoot, err := gBlk.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.beaconDB.SaveBlock(ctx, gBlk))
require.NoError(t, service.beaconDB.SaveGenesisBlockRoot(ctx, gRoot))
require.NoError(t, service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{Root: gRoot[:]}))
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, gBlk))
require.NoError(t, service.cfg.BeaconDB.SaveGenesisBlockRoot(ctx, gRoot))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &pb.StateSummary{Root: gRoot[:]}))
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
require.NoError(t, service.beaconDB.SaveState(ctx, beaconState, gRoot))
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, beaconState, gRoot))
service.genesisRoot = gRoot
currentCp := &ethpb.Checkpoint{Epoch: 1}
service.justifiedCheckpt = currentCp
@@ -885,7 +885,7 @@ func TestUpdateJustifiedInitSync(t *testing.T) {
assert.DeepSSZEqual(t, currentCp, service.prevJustifiedCheckpt, "Incorrect previous justified checkpoint")
assert.DeepSSZEqual(t, newCp, service.CurrentJustifiedCheckpt(), "Incorrect current justified checkpoint in cache")
cp, err := service.beaconDB.JustifiedCheckpoint(ctx)
cp, err := service.cfg.BeaconDB.JustifiedCheckpoint(ctx)
require.NoError(t, err)
assert.DeepSSZEqual(t, newCp, cp, "Incorrect current justified checkpoint in db")
}
@@ -933,7 +933,7 @@ func TestOnBlock_CanFinalize(t *testing.T) {
gs, keys := testutil.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
gBlk, err := service.beaconDB.GenesisBlock(ctx)
gBlk, err := service.cfg.BeaconDB.GenesisBlock(ctx)
require.NoError(t, err)
gRoot, err := gBlk.Block.HashTreeRoot()
require.NoError(t, err)
@@ -946,7 +946,7 @@ func TestOnBlock_CanFinalize(t *testing.T) {
r, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.onBlock(ctx, blk, r))
testState, err = service.stateGen.StateByRoot(ctx, r)
testState, err = service.cfg.StateGen.StateByRoot(ctx, r)
require.NoError(t, err)
}
require.Equal(t, types.Epoch(3), service.CurrentJustifiedCheckpt().Epoch)
@@ -969,14 +969,14 @@ func TestInsertFinalizedDeposits(t *testing.T) {
gs, _ := testutil.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
gBlk, err := service.beaconDB.GenesisBlock(ctx)
gBlk, err := service.cfg.BeaconDB.GenesisBlock(ctx)
require.NoError(t, err)
gRoot, err := gBlk.Block.HashTreeRoot()
require.NoError(t, err)
service.finalizedCheckpt = &ethpb.Checkpoint{Root: gRoot[:]}
gs = gs.Copy()
assert.NoError(t, gs.SetEth1Data(&ethpb.Eth1Data{DepositCount: 10}))
assert.NoError(t, service.stateGen.SaveState(ctx, [32]byte{'m', 'o', 'c', 'k'}, gs))
assert.NoError(t, service.cfg.StateGen.SaveState(ctx, [32]byte{'m', 'o', 'c', 'k'}, gs))
zeroSig := [96]byte{}
for i := uint64(0); i < uint64(4*params.BeaconConfig().SlotsPerEpoch); i++ {
root := []byte(strconv.Itoa(int(i)))

View File

@@ -76,7 +76,7 @@ func (s *Service) VerifyLmdFfgConsistency(ctx context.Context, a *ethpb.Attestat
func (s *Service) VerifyFinalizedConsistency(ctx context.Context, root []byte) error {
// A canonical root implies the root to has an ancestor that aligns with finalized check point.
// In this case, we could exit early to save on additional computation.
if s.forkChoiceStore.IsCanonical(bytesutil.ToBytes32(root)) {
if s.cfg.ForkChoiceStore.IsCanonical(bytesutil.ToBytes32(root)) {
return nil
}
@@ -100,7 +100,7 @@ func (s *Service) VerifyFinalizedConsistency(ctx context.Context, root []byte) e
func (s *Service) processAttestationsRoutine(subscribedToStateEvents chan<- struct{}) {
// Wait for state to be initialized.
stateChannel := make(chan *feed.Event, 1)
stateSub := s.stateNotifier.StateFeed().Subscribe(stateChannel)
stateSub := s.cfg.StateNotifier.StateFeed().Subscribe(stateChannel)
subscribedToStateEvents <- struct{}{}
<-stateChannel
stateSub.Unsubscribe()
@@ -121,7 +121,7 @@ func (s *Service) processAttestationsRoutine(subscribedToStateEvents chan<- stru
case <-st.C():
// Continue when there's no fork choice attestation, there's nothing to process and update head.
// This covers the condition when the node is still initial syncing to the head of the chain.
if s.attPool.ForkchoiceAttestationCount() == 0 {
if s.cfg.AttPool.ForkchoiceAttestationCount() == 0 {
continue
}
s.processAttestations(s.ctx)
@@ -134,7 +134,7 @@ func (s *Service) processAttestationsRoutine(subscribedToStateEvents chan<- stru
// This processes fork choice attestations from the pool to account for validator votes and fork choice.
func (s *Service) processAttestations(ctx context.Context) {
atts := s.attPool.ForkchoiceAttestations()
atts := s.cfg.AttPool.ForkchoiceAttestations()
for _, a := range atts {
// Based on the spec, don't process the attestation until the subsequent slot.
// This delays consideration in the fork choice until their slot is in the past.
@@ -144,13 +144,13 @@ func (s *Service) processAttestations(ctx context.Context) {
continue
}
hasState := s.beaconDB.HasStateSummary(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot))
hasState := s.cfg.BeaconDB.HasStateSummary(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot))
hasBlock := s.hasBlock(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot))
if !(hasState && hasBlock) {
continue
}
if err := s.attPool.DeleteForkchoiceAttestation(a); err != nil {
if err := s.cfg.AttPool.DeleteForkchoiceAttestation(a); err != nil {
log.WithError(err).Error("Could not delete fork choice attestation in pool")
}

View File

@@ -43,13 +43,13 @@ func TestVerifyLMDFFGConsistent_NotOK(t *testing.T) {
b32 := testutil.NewBeaconBlock()
b32.Block.Slot = 32
require.NoError(t, service.beaconDB.SaveBlock(ctx, b32))
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, b32))
r32, err := b32.Block.HashTreeRoot()
require.NoError(t, err)
b33 := testutil.NewBeaconBlock()
b33.Block.Slot = 33
b33.Block.ParentRoot = r32[:]
require.NoError(t, service.beaconDB.SaveBlock(ctx, b33))
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, b33))
r33, err := b33.Block.HashTreeRoot()
require.NoError(t, err)
@@ -71,13 +71,13 @@ func TestVerifyLMDFFGConsistent_OK(t *testing.T) {
b32 := testutil.NewBeaconBlock()
b32.Block.Slot = 32
require.NoError(t, service.beaconDB.SaveBlock(ctx, b32))
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, b32))
r32, err := b32.Block.HashTreeRoot()
require.NoError(t, err)
b33 := testutil.NewBeaconBlock()
b33.Block.Slot = 33
b33.Block.ParentRoot = r32[:]
require.NoError(t, service.beaconDB.SaveBlock(ctx, b33))
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, b33))
r33, err := b33.Block.HashTreeRoot()
require.NoError(t, err)
@@ -112,10 +112,10 @@ func TestProcessAttestations_Ok(t *testing.T) {
copied := genesisState.Copy()
copied, err = state.ProcessSlots(ctx, copied, 1)
require.NoError(t, err)
require.NoError(t, service.beaconDB.SaveState(ctx, copied, tRoot))
require.NoError(t, service.forkChoiceStore.ProcessBlock(ctx, 0, tRoot, tRoot, tRoot, 1, 1))
require.NoError(t, service.attPool.SaveForkchoiceAttestations(atts))
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(ctx, 0, tRoot, tRoot, tRoot, 1, 1))
require.NoError(t, service.cfg.AttPool.SaveForkchoiceAttestations(atts))
service.processAttestations(ctx)
require.Equal(t, 0, len(service.attPool.ForkchoiceAttestations()))
require.Equal(t, 0, len(service.cfg.AttPool.ForkchoiceAttestations()))
require.LogsDoNotContain(t, hook, "Could not process attestation for fork choice")
}

View File

@@ -50,7 +50,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block *ethpb.SignedBeaconBlo
log.WithError(err).Warn("Could not update head")
}
// Send notification of the processed block to the state feed.
s.stateNotifier.StateFeed().Send(&feed.Event{
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.BlockProcessed,
Data: &statefeed.BlockProcessedData{
Slot: blockCopy.Block.Slot,
@@ -106,7 +106,7 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []*ethpb.SignedB
return err
}
// Send notification of the processed block to the state feed.
s.stateNotifier.StateFeed().Send(&feed.Event{
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.BlockProcessed,
Data: &statefeed.BlockProcessedData{
Slot: blockCopy.Block.Slot,
@@ -142,18 +142,18 @@ func (s *Service) handlePostBlockOperations(b *ethpb.BeaconBlock) error {
}
// Add block attestations to the fork choice pool to compute head.
if err := s.attPool.SaveBlockAttestations(b.Body.Attestations); err != nil {
if err := s.cfg.AttPool.SaveBlockAttestations(b.Body.Attestations); err != nil {
log.Errorf("Could not save block attestations for fork choice: %v", err)
return nil
}
// Mark block exits as seen so we don't include same ones in future blocks.
for _, e := range b.Body.VoluntaryExits {
s.exitPool.MarkIncluded(e)
s.cfg.ExitPool.MarkIncluded(e)
}
// Mark attester slashings as seen so we don't include same ones in future blocks.
for _, as := range b.Body.AttesterSlashings {
s.slashingPool.MarkIncludedAttesterSlashing(as)
s.cfg.SlashingPool.MarkIncludedAttesterSlashing(as)
}
return nil
}
@@ -169,9 +169,9 @@ func (s *Service) checkSaveHotStateDB(ctx context.Context) error {
}
if sinceFinality >= epochsSinceFinalitySaveHotStateDB {
s.stateGen.EnableSaveHotStateToDB(ctx)
s.cfg.StateGen.EnableSaveHotStateToDB(ctx)
return nil
}
return s.stateGen.DisableSaveHotStateToDB(ctx)
return s.cfg.StateGen.DisableSaveHotStateToDB(ctx)
}

View File

@@ -73,7 +73,7 @@ func TestService_ReceiveBlock(t *testing.T) {
),
},
check: func(t *testing.T, s *Service) {
if baCount := len(s.attPool.BlockAttestations()); baCount != 2 {
if baCount := len(s.cfg.AttPool.BlockAttestations()); baCount != 2 {
t.Errorf("Did not get the correct number of block attestations saved to the pool. "+
"Got %d but wanted %d", baCount, 2)
}
@@ -93,7 +93,7 @@ func TestService_ReceiveBlock(t *testing.T) {
),
},
check: func(t *testing.T, s *Service) {
pending := s.exitPool.PendingExits(genesis, 1, true /* no limit */)
pending := s.cfg.ExitPool.PendingExits(genesis, 1, true /* no limit */)
if len(pending) != 0 {
t.Errorf(
"Did not mark the correct number of exits. Got %d pending but wanted %d",
@@ -109,7 +109,7 @@ func TestService_ReceiveBlock(t *testing.T) {
block: genFullBlock(t, testutil.DefaultBlockGenConfig(), 1 /*slot*/),
},
check: func(t *testing.T, s *Service) {
if recvd := len(s.stateNotifier.(*blockchainTesting.MockStateNotifier).ReceivedEvents()); recvd < 1 {
if recvd := len(s.cfg.StateNotifier.(*blockchainTesting.MockStateNotifier).ReceivedEvents()); recvd < 1 {
t.Errorf("Received %d state notifications, expected at least 1", recvd)
}
},
@@ -137,7 +137,7 @@ func TestService_ReceiveBlock(t *testing.T) {
s, err := NewService(ctx, cfg)
require.NoError(t, err)
require.NoError(t, s.saveGenesisData(ctx, genesis))
gBlk, err := s.beaconDB.GenesisBlock(ctx)
gBlk, err := s.cfg.BeaconDB.GenesisBlock(ctx)
require.NoError(t, err)
gRoot, err := gBlk.Block.HashTreeRoot()
require.NoError(t, err)
@@ -178,7 +178,7 @@ func TestService_ReceiveBlockUpdateHead(t *testing.T) {
s, err := NewService(ctx, cfg)
require.NoError(t, err)
require.NoError(t, s.saveGenesisData(ctx, genesis))
gBlk, err := s.beaconDB.GenesisBlock(ctx)
gBlk, err := s.cfg.BeaconDB.GenesisBlock(ctx)
require.NoError(t, err)
gRoot, err := gBlk.Block.HashTreeRoot()
require.NoError(t, err)
@@ -192,11 +192,11 @@ func TestService_ReceiveBlockUpdateHead(t *testing.T) {
wg.Done()
}()
wg.Wait()
if recvd := len(s.stateNotifier.(*blockchainTesting.MockStateNotifier).ReceivedEvents()); recvd < 1 {
if recvd := len(s.cfg.StateNotifier.(*blockchainTesting.MockStateNotifier).ReceivedEvents()); recvd < 1 {
t.Errorf("Received %d state notifications, expected at least 1", recvd)
}
// Verify fork choice has processed the block. (Genesis block and the new block)
assert.Equal(t, 2, len(s.forkChoiceStore.Nodes()))
assert.Equal(t, 2, len(s.cfg.ForkChoiceStore.Nodes()))
}
func TestService_ReceiveBlockBatch(t *testing.T) {
@@ -234,7 +234,7 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
block: genFullBlock(t, testutil.DefaultBlockGenConfig(), 1 /*slot*/),
},
check: func(t *testing.T, s *Service) {
if recvd := len(s.stateNotifier.(*blockchainTesting.MockStateNotifier).ReceivedEvents()); recvd < 1 {
if recvd := len(s.cfg.StateNotifier.(*blockchainTesting.MockStateNotifier).ReceivedEvents()); recvd < 1 {
t.Errorf("Received %d state notifications, expected at least 1", recvd)
}
},
@@ -260,7 +260,7 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
require.NoError(t, err)
err = s.saveGenesisData(ctx, genesis)
require.NoError(t, err)
gBlk, err := s.beaconDB.GenesisBlock(ctx)
gBlk, err := s.cfg.BeaconDB.GenesisBlock(ctx)
require.NoError(t, err)
gRoot, err := gBlk.Block.HashTreeRoot()

View File

@@ -47,22 +47,13 @@ const headSyncMinEpochsAfterCheckpoint = 128
// Service represents a service that handles the internal
// logic of managing the full PoS beacon chain.
type Service struct {
cfg *Config
ctx context.Context
cancel context.CancelFunc
beaconDB db.HeadAccessDatabase
depositCache *depositcache.DepositCache
chainStartFetcher powchain.ChainStartFetcher
attPool attestations.Pool
slashingPool slashings.PoolManager
exitPool voluntaryexits.PoolManager
genesisTime time.Time
p2p p2p.Broadcaster
maxRoutines int
head *head
headLock sync.RWMutex
stateNotifier statefeed.Notifier
genesisRoot [32]byte
forkChoiceStore f.ForkChoicer
justifiedCheckpt *ethpb.Checkpoint
prevJustifiedCheckpt *ethpb.Checkpoint
bestJustifiedCheckpt *ethpb.Checkpoint
@@ -71,14 +62,10 @@ type Service struct {
nextEpochBoundarySlot types.Slot
boundaryRoots [][32]byte
checkpointStateCache *cache.CheckpointStateCache
stateGen *stategen.State
opsService *attestations.Service
initSyncBlocks map[[32]byte]*ethpb.SignedBeaconBlock
initSyncBlocksLock sync.RWMutex
justifiedBalances []uint64
justifiedBalancesLock sync.RWMutex
wsEpoch types.Epoch
wsRoot []byte
wsVerified bool
}
@@ -106,26 +93,13 @@ type Config struct {
func NewService(ctx context.Context, cfg *Config) (*Service, error) {
ctx, cancel := context.WithCancel(ctx)
return &Service{
cfg: cfg,
ctx: ctx,
cancel: cancel,
beaconDB: cfg.BeaconDB,
depositCache: cfg.DepositCache,
chainStartFetcher: cfg.ChainStartFetcher,
attPool: cfg.AttPool,
exitPool: cfg.ExitPool,
slashingPool: cfg.SlashingPool,
p2p: cfg.P2p,
maxRoutines: cfg.MaxRoutines,
stateNotifier: cfg.StateNotifier,
forkChoiceStore: cfg.ForkChoiceStore,
boundaryRoots: [][32]byte{},
checkpointStateCache: cache.NewCheckpointStateCache(),
opsService: cfg.OpsService,
stateGen: cfg.StateGen,
initSyncBlocks: make(map[[32]byte]*ethpb.SignedBeaconBlock),
justifiedBalances: make([]uint64, 0),
wsEpoch: cfg.WspEpoch,
wsRoot: cfg.WspBlockRoot,
}, nil
}
@@ -134,7 +108,7 @@ func (s *Service) Start() {
// For running initial sync with state cache, in an event of restart, we use
// last finalized check point as start point to sync instead of head
// state. This is because we no longer save state every slot during sync.
cp, err := s.beaconDB.FinalizedCheckpoint(s.ctx)
cp, err := s.cfg.BeaconDB.FinalizedCheckpoint(s.ctx)
if err != nil {
log.Fatalf("Could not fetch finalized cp: %v", err)
}
@@ -144,7 +118,7 @@ func (s *Service) Start() {
// the finalized root is defined as zero hashes instead of genesis root hash.
// We want to use genesis root to retrieve for state.
if r == params.BeaconConfig().ZeroHash {
genesisBlock, err := s.beaconDB.GenesisBlock(s.ctx)
genesisBlock, err := s.cfg.BeaconDB.GenesisBlock(s.ctx)
if err != nil {
log.Fatalf("Could not fetch finalized cp: %v", err)
}
@@ -155,7 +129,7 @@ func (s *Service) Start() {
}
}
}
beaconState, err := s.stateGen.StateByRoot(s.ctx, r)
beaconState, err := s.cfg.StateGen.StateByRoot(s.ctx, r)
if err != nil {
log.Fatalf("Could not fetch beacon state by root: %v", err)
}
@@ -167,13 +141,13 @@ func (s *Service) Start() {
if beaconState != nil {
log.Info("Blockchain data already exists in DB, initializing...")
s.genesisTime = time.Unix(int64(beaconState.GenesisTime()), 0)
s.opsService.SetGenesisTime(beaconState.GenesisTime())
s.cfg.OpsService.SetGenesisTime(beaconState.GenesisTime())
if err := s.initializeChainInfo(s.ctx); err != nil {
log.Fatalf("Could not set up chain info: %v", err)
}
// We start a counter to genesis, if needed.
gState, err := s.beaconDB.GenesisState(s.ctx)
gState, err := s.cfg.BeaconDB.GenesisState(s.ctx)
if err != nil {
log.Fatalf("Could not retrieve genesis state: %v", err)
}
@@ -183,11 +157,11 @@ func (s *Service) Start() {
}
go slotutil.CountdownToGenesis(s.ctx, s.genesisTime, uint64(gState.NumValidators()), gRoot)
justifiedCheckpoint, err := s.beaconDB.JustifiedCheckpoint(s.ctx)
justifiedCheckpoint, err := s.cfg.BeaconDB.JustifiedCheckpoint(s.ctx)
if err != nil {
log.Fatalf("Could not get justified checkpoint: %v", err)
}
finalizedCheckpoint, err := s.beaconDB.FinalizedCheckpoint(s.ctx)
finalizedCheckpoint, err := s.cfg.BeaconDB.FinalizedCheckpoint(s.ctx)
if err != nil {
log.Fatalf("Could not get finalized checkpoint: %v", err)
}
@@ -223,7 +197,7 @@ func (s *Service) Start() {
log.Fatalf("Could not verify weak subjectivity checkpoint: %v", err)
}
s.stateNotifier.StateFeed().Send(&feed.Event{
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.Initialized,
Data: &statefeed.InitializedData{
StartTime: s.genesisTime,
@@ -232,13 +206,13 @@ func (s *Service) Start() {
})
} else {
log.Info("Waiting to reach the validator deposit threshold to start the beacon chain...")
if s.chainStartFetcher == nil {
if s.cfg.ChainStartFetcher == nil {
log.Fatal("Not configured web3Service for POW chain")
return // return need for TestStartUninitializedChainWithoutConfigPOWChain.
}
go func() {
stateChannel := make(chan *feed.Event, 1)
stateSub := s.stateNotifier.StateFeed().Subscribe(stateChannel)
stateSub := s.cfg.StateNotifier.StateFeed().Subscribe(stateChannel)
defer stateSub.Unsubscribe()
<-attestationProcessorSubscribed
for {
@@ -271,8 +245,8 @@ func (s *Service) Start() {
// processChainStartTime initializes a series of deposits from the ChainStart deposits in the eth1
// deposit contract, initializes the beacon chain's state, and kicks off the beacon chain.
func (s *Service) processChainStartTime(ctx context.Context, genesisTime time.Time) {
preGenesisState := s.chainStartFetcher.PreGenesisState()
initializedState, err := s.initializeBeaconChain(ctx, genesisTime, preGenesisState, s.chainStartFetcher.ChainStartEth1Data())
preGenesisState := s.cfg.ChainStartFetcher.PreGenesisState()
initializedState, err := s.initializeBeaconChain(ctx, genesisTime, preGenesisState, s.cfg.ChainStartFetcher.ChainStartEth1Data())
if err != nil {
log.Fatalf("Could not initialize beacon chain: %v", err)
}
@@ -285,7 +259,7 @@ func (s *Service) processChainStartTime(ctx context.Context, genesisTime time.Ti
// We send out a state initialized event to the rest of the services
// running in the beacon node.
s.stateNotifier.StateFeed().Send(&feed.Event{
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.Initialized,
Data: &statefeed.InitializedData{
StartTime: genesisTime,
@@ -319,7 +293,7 @@ func (s *Service) initializeBeaconChain(
log.Info("Initialized beacon chain genesis state")
// Clear out all pre-genesis data now that the state is initialized.
s.chainStartFetcher.ClearPreGenesisData()
s.cfg.ChainStartFetcher.ClearPreGenesisData()
// Update committee shuffled indices for genesis epoch.
if err := helpers.UpdateCommitteeCache(genesisState, 0 /* genesis epoch */); err != nil {
@@ -329,7 +303,7 @@ func (s *Service) initializeBeaconChain(
return nil, err
}
s.opsService.SetGenesisTime(genesisState.GenesisTime())
s.cfg.OpsService.SetGenesisTime(genesisState.GenesisTime())
return genesisState, nil
}
@@ -338,14 +312,14 @@ func (s *Service) initializeBeaconChain(
func (s *Service) Stop() error {
defer s.cancel()
if s.stateGen != nil && s.head != nil && s.head.state != nil {
if err := s.stateGen.ForceCheckpoint(s.ctx, s.head.state.FinalizedCheckpoint().Root); err != nil {
if s.cfg.StateGen != nil && s.head != nil && s.head.state != nil {
if err := s.cfg.StateGen.ForceCheckpoint(s.ctx, s.head.state.FinalizedCheckpoint().Root); err != nil {
return err
}
}
// Save initial sync cached blocks to the DB before stop.
return s.beaconDB.SaveBlocks(s.ctx, s.getInitSyncBlocks())
return s.cfg.BeaconDB.SaveBlocks(s.ctx, s.getInitSyncBlocks())
}
// Status always returns nil unless there is an error condition that causes
@@ -354,7 +328,7 @@ func (s *Service) Status() error {
if s.genesisRoot == params.BeaconConfig().ZeroHash {
return errors.New("genesis state has not been created")
}
if runtime.NumGoroutine() > s.maxRoutines {
if runtime.NumGoroutine() > s.cfg.MaxRoutines {
return fmt.Errorf("too many goroutines %d", runtime.NumGoroutine())
}
return nil
@@ -373,25 +347,25 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState iface.Beacon
}
s.genesisRoot = genesisBlkRoot
if err := s.beaconDB.SaveBlock(ctx, genesisBlk); err != nil {
if err := s.cfg.BeaconDB.SaveBlock(ctx, genesisBlk); err != nil {
return errors.Wrap(err, "could not save genesis block")
}
if err := s.beaconDB.SaveState(ctx, genesisState, genesisBlkRoot); err != nil {
if err := s.cfg.BeaconDB.SaveState(ctx, genesisState, genesisBlkRoot); err != nil {
return errors.Wrap(err, "could not save genesis state")
}
if err := s.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{
if err := s.cfg.BeaconDB.SaveStateSummary(ctx, &pb.StateSummary{
Slot: 0,
Root: genesisBlkRoot[:],
}); err != nil {
return err
}
s.stateGen.SaveFinalizedState(0, genesisBlkRoot, genesisState)
s.cfg.StateGen.SaveFinalizedState(0, genesisBlkRoot, genesisState)
if err := s.beaconDB.SaveHeadBlockRoot(ctx, genesisBlkRoot); err != nil {
if err := s.cfg.BeaconDB.SaveHeadBlockRoot(ctx, genesisBlkRoot); err != nil {
return errors.Wrap(err, "could not save head block root")
}
if err := s.beaconDB.SaveGenesisBlockRoot(ctx, genesisBlkRoot); err != nil {
if err := s.cfg.BeaconDB.SaveGenesisBlockRoot(ctx, genesisBlkRoot); err != nil {
return errors.Wrap(err, "could not save genesis block root")
}
@@ -407,7 +381,7 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState iface.Beacon
s.finalizedCheckpt = stateTrie.CopyCheckpoint(genesisCheckpoint)
s.prevFinalizedCheckpt = stateTrie.CopyCheckpoint(genesisCheckpoint)
if err := s.forkChoiceStore.ProcessBlock(ctx,
if err := s.cfg.ForkChoiceStore.ProcessBlock(ctx,
genesisBlk.Block.Slot,
genesisBlkRoot,
params.BeaconConfig().ZeroHash,
@@ -424,7 +398,7 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState iface.Beacon
// This gets called to initialize chain info variables using the finalized checkpoint stored in DB
func (s *Service) initializeChainInfo(ctx context.Context) error {
genesisBlock, err := s.beaconDB.GenesisBlock(ctx)
genesisBlock, err := s.cfg.BeaconDB.GenesisBlock(ctx)
if err != nil {
return errors.Wrap(err, "could not get genesis block from db")
}
@@ -437,7 +411,7 @@ func (s *Service) initializeChainInfo(ctx context.Context) error {
}
s.genesisRoot = genesisBlkRoot
finalized, err := s.beaconDB.FinalizedCheckpoint(ctx)
finalized, err := s.cfg.BeaconDB.FinalizedCheckpoint(ctx)
if err != nil {
return errors.Wrap(err, "could not get finalized checkpoint from db")
}
@@ -449,13 +423,13 @@ func (s *Service) initializeChainInfo(ctx context.Context) error {
finalizedRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(finalized.Root))
var finalizedState iface.BeaconState
finalizedState, err = s.stateGen.Resume(ctx)
finalizedState, err = s.cfg.StateGen.Resume(ctx)
if err != nil {
return errors.Wrap(err, "could not get finalized state from db")
}
if flags.Get().HeadSync {
headBlock, err := s.beaconDB.HeadBlock(ctx)
headBlock, err := s.cfg.BeaconDB.HeadBlock(ctx)
if err != nil {
return errors.Wrap(err, "could not retrieve head block")
}
@@ -471,13 +445,13 @@ func (s *Service) initializeChainInfo(ctx context.Context) error {
if err != nil {
return errors.Wrap(err, "could not hash head block")
}
finalizedState, err := s.stateGen.Resume(ctx)
finalizedState, err := s.cfg.StateGen.Resume(ctx)
if err != nil {
return errors.Wrap(err, "could not get finalized state from db")
}
log.Infof("Regenerating state from the last checkpoint at slot %d to current head slot of %d."+
"This process may take a while, please wait.", finalizedState.Slot(), headBlock.Block.Slot)
headState, err := s.stateGen.StateByRoot(ctx, headRoot)
headState, err := s.cfg.StateGen.StateByRoot(ctx, headRoot)
if err != nil {
return errors.Wrap(err, "could not retrieve head state")
}
@@ -490,7 +464,7 @@ func (s *Service) initializeChainInfo(ctx context.Context) error {
}
}
finalizedBlock, err := s.beaconDB.Block(ctx, finalizedRoot)
finalizedBlock, err := s.cfg.BeaconDB.Block(ctx, finalizedRoot)
if err != nil {
return errors.Wrap(err, "could not get finalized block from db")
}
@@ -507,7 +481,7 @@ func (s *Service) initializeChainInfo(ctx context.Context) error {
// information to fork choice service to initializes fork choice store.
func (s *Service) resumeForkChoice(justifiedCheckpoint, finalizedCheckpoint *ethpb.Checkpoint) {
store := protoarray.New(justifiedCheckpoint.Epoch, finalizedCheckpoint.Epoch, bytesutil.ToBytes32(finalizedCheckpoint.Root))
s.forkChoiceStore = store
s.cfg.ForkChoiceStore = store
}
// This returns true if block has been processed before. Two ways to verify the block has been processed:
@@ -515,9 +489,9 @@ func (s *Service) resumeForkChoice(justifiedCheckpoint, finalizedCheckpoint *eth
// 2.) Check DB.
// Checking 1.) is ten times faster than checking 2.)
func (s *Service) hasBlock(ctx context.Context, root [32]byte) bool {
if s.forkChoiceStore.HasNode(root) {
if s.cfg.ForkChoiceStore.HasNode(root) {
return true
}
return s.beaconDB.HasBlock(ctx, root)
return s.cfg.BeaconDB.HasBlock(ctx, root)
}

View File

@@ -18,7 +18,7 @@ func init() {
func TestChainService_SaveHead_DataRace(t *testing.T) {
beaconDB := testDB.SetupDB(t)
s := &Service{
beaconDB: beaconDB,
cfg: &Config{BeaconDB: beaconDB},
}
go func() {
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))

View File

@@ -276,7 +276,7 @@ func TestChainService_InitializeChainInfo(t *testing.T) {
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
require.NoError(t, beaconDB.SaveBlock(ctx, headBlock))
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, &ethpb.Checkpoint{Epoch: helpers.SlotToEpoch(finalizedSlot), Root: headRoot[:]}))
c := &Service{beaconDB: beaconDB, stateGen: stategen.New(beaconDB)}
c := &Service{cfg: &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)}}
require.NoError(t, c.initializeChainInfo(ctx))
headBlk, err := c.HeadBlock(ctx)
require.NoError(t, err)
@@ -316,7 +316,7 @@ func TestChainService_InitializeChainInfo_SetHeadAtGenesis(t *testing.T) {
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
require.NoError(t, beaconDB.SaveBlock(ctx, headBlock))
c := &Service{beaconDB: beaconDB, stateGen: stategen.New(beaconDB)}
c := &Service{cfg: &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)}}
require.NoError(t, c.initializeChainInfo(ctx))
s, err := c.HeadState(ctx)
require.NoError(t, err)
@@ -373,7 +373,7 @@ func TestChainService_InitializeChainInfo_HeadSync(t *testing.T) {
Root: finalizedRoot[:],
}))
c := &Service{beaconDB: beaconDB, stateGen: stategen.New(beaconDB)}
c := &Service{cfg: &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)}}
require.NoError(t, c.initializeChainInfo(ctx))
s, err := c.HeadState(ctx)
@@ -411,8 +411,7 @@ func TestChainService_SaveHeadNoDB(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
s := &Service{
beaconDB: beaconDB,
stateGen: stategen.New(beaconDB),
cfg: &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
}
blk := testutil.NewBeaconBlock()
blk.Block.Slot = 1
@@ -420,10 +419,10 @@ func TestChainService_SaveHeadNoDB(t *testing.T) {
require.NoError(t, err)
newState, err := testutil.NewBeaconState()
require.NoError(t, err)
require.NoError(t, s.stateGen.SaveState(ctx, r, newState))
require.NoError(t, s.cfg.StateGen.SaveState(ctx, r, newState))
require.NoError(t, s.saveHeadNoDB(ctx, blk, r, newState))
newB, err := s.beaconDB.HeadBlock(ctx)
newB, err := s.cfg.BeaconDB.HeadBlock(ctx)
require.NoError(t, err)
if reflect.DeepEqual(newB, blk) {
t.Error("head block should not be equal")
@@ -434,9 +433,8 @@ func TestHasBlock_ForkChoiceAndDB(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
s := &Service{
forkChoiceStore: protoarray.New(0, 0, [32]byte{}),
cfg: &Config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{}), BeaconDB: beaconDB},
finalizedCheckpt: &ethpb.Checkpoint{Root: make([]byte, 32)},
beaconDB: beaconDB,
}
block := testutil.NewBeaconBlock()
r, err := block.Block.HashTreeRoot()
@@ -453,25 +451,24 @@ func TestServiceStop_SaveCachedBlocks(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
beaconDB := testDB.SetupDB(t)
s := &Service{
cfg: &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
ctx: ctx,
cancel: cancel,
beaconDB: beaconDB,
initSyncBlocks: make(map[[32]byte]*ethpb.SignedBeaconBlock),
stateGen: stategen.New(beaconDB),
}
b := testutil.NewBeaconBlock()
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
s.saveInitSyncBlock(r, b)
require.NoError(t, s.Stop())
require.Equal(t, true, s.beaconDB.HasBlock(ctx, r))
require.Equal(t, true, s.cfg.BeaconDB.HasBlock(ctx, r))
}
func TestProcessChainStartTime_ReceivedFeed(t *testing.T) {
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
stateChannel := make(chan *feed.Event, 1)
stateSub := service.stateNotifier.StateFeed().Subscribe(stateChannel)
stateSub := service.cfg.StateNotifier.StateFeed().Subscribe(stateChannel)
defer stateSub.Unsubscribe()
service.processChainStartTime(context.Background(), time.Now())
@@ -485,16 +482,16 @@ func BenchmarkHasBlockDB(b *testing.B) {
beaconDB := testDB.SetupDB(b)
ctx := context.Background()
s := &Service{
beaconDB: beaconDB,
cfg: &Config{BeaconDB: beaconDB},
}
block := testutil.NewBeaconBlock()
require.NoError(b, s.beaconDB.SaveBlock(ctx, block))
require.NoError(b, s.cfg.BeaconDB.SaveBlock(ctx, block))
r, err := block.Block.HashTreeRoot()
require.NoError(b, err)
b.ResetTimer()
for i := 0; i < b.N; i++ {
require.Equal(b, true, s.beaconDB.HasBlock(ctx, r), "Block is not in DB")
require.Equal(b, true, s.cfg.BeaconDB.HasBlock(ctx, r), "Block is not in DB")
}
}
@@ -502,9 +499,8 @@ func BenchmarkHasBlockForkChoiceStore(b *testing.B) {
ctx := context.Background()
beaconDB := testDB.SetupDB(b)
s := &Service{
forkChoiceStore: protoarray.New(0, 0, [32]byte{}),
cfg: &Config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{}), BeaconDB: beaconDB},
finalizedCheckpt: &ethpb.Checkpoint{Root: make([]byte, 32)},
beaconDB: beaconDB,
}
block := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{}}}
r, err := block.Block.HashTreeRoot()
@@ -516,6 +512,6 @@ func BenchmarkHasBlockForkChoiceStore(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
require.Equal(b, true, s.forkChoiceStore.HasNode(r), "Block is not in fork choice store")
require.Equal(b, true, s.cfg.ForkChoiceStore.HasNode(r), "Block is not in fork choice store")
}
}

View File

@@ -14,7 +14,7 @@ import (
// Reference design: https://github.com/ethereum/eth2.0-specs/blob/master/specs/phase0/weak-subjectivity.md#weak-subjectivity-sync-procedure
func (s *Service) VerifyWeakSubjectivityRoot(ctx context.Context) error {
// TODO(7342): Remove the following to fully use weak subjectivity in production.
if len(s.wsRoot) == 0 || s.wsEpoch == 0 {
if len(s.cfg.WspBlockRoot) == 0 || s.cfg.WspEpoch == 0 {
return nil
}
@@ -23,28 +23,28 @@ func (s *Service) VerifyWeakSubjectivityRoot(ctx context.Context) error {
if s.wsVerified {
return nil
}
if s.wsEpoch > s.finalizedCheckpt.Epoch {
if s.cfg.WspEpoch > s.finalizedCheckpt.Epoch {
return nil
}
r := bytesutil.ToBytes32(s.wsRoot)
log.Infof("Performing weak subjectivity check for root %#x in epoch %d", r, s.wsEpoch)
r := bytesutil.ToBytes32(s.cfg.WspBlockRoot)
log.Infof("Performing weak subjectivity check for root %#x in epoch %d", r, s.cfg.WspEpoch)
// Save initial sync cached blocks to DB.
if err := s.beaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
if err := s.cfg.BeaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
return err
}
// A node should have the weak subjectivity block in the DB.
if !s.beaconDB.HasBlock(ctx, r) {
if !s.cfg.BeaconDB.HasBlock(ctx, r) {
return fmt.Errorf("node does not have root in DB: %#x", r)
}
startSlot, err := helpers.StartSlot(s.wsEpoch)
startSlot, err := helpers.StartSlot(s.cfg.WspEpoch)
if err != nil {
return err
}
// A node should have the weak subjectivity block corresponds to the correct epoch in the DB.
filter := filters.NewFilter().SetStartSlot(startSlot).SetEndSlot(startSlot + params.BeaconConfig().SlotsPerEpoch)
roots, err := s.beaconDB.BlockRoots(ctx, filter)
roots, err := s.cfg.BeaconDB.BlockRoots(ctx, filter)
if err != nil {
return err
}
@@ -56,5 +56,5 @@ func (s *Service) VerifyWeakSubjectivityRoot(ctx context.Context) error {
}
}
return fmt.Errorf("node does not have root in db corresponding to epoch: %#x %d", r, s.wsEpoch)
return fmt.Errorf("node does not have root in db corresponding to epoch: %#x %d", r, s.cfg.WspEpoch)
}

View File

@@ -72,9 +72,7 @@ func TestService_VerifyWeakSubjectivityRoot(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := &Service{
beaconDB: beaconDB,
wsRoot: tt.wsRoot[:],
wsEpoch: tt.wsEpoch,
cfg: &Config{BeaconDB: beaconDB, WspBlockRoot: tt.wsRoot[:], WspEpoch: tt.wsEpoch},
wsVerified: tt.wsVerified,
finalizedCheckpt: &ethpb.Checkpoint{Epoch: tt.finalizedEpoch},
}