Compare commits

..

3 Commits

Author SHA1 Message Date
Zahoor Mohamed
c2beb647dd fixed proto generation 2021-11-30 12:45:06 +05:30
Zahoor Mohamed
35711cac8a proto debug commit 2021-11-30 11:28:54 +05:30
Zahoor Mohamed
9238e72a91 add growth command 2021-11-17 12:07:09 +05:30
140 changed files with 1213 additions and 5915 deletions

View File

@@ -18,7 +18,6 @@ go_library(
"receive_attestation.go",
"receive_block.go",
"service.go",
"state_balance_cache.go",
"weak_subjectivity_checks.go",
],
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/blockchain",
@@ -96,7 +95,6 @@ go_test(
"init_test.go",
"log_test.go",
"metrics_test.go",
"mock_test.go",
"process_attestation_test.go",
"process_block_test.go",
"receive_attestation_test.go",
@@ -143,7 +141,6 @@ go_test(
"chain_info_norace_test.go",
"checktags_test.go",
"init_test.go",
"mock_test.go",
"receive_block_test.go",
"service_norace_test.go",
],

View File

@@ -10,6 +10,7 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/config/features"
@@ -41,6 +42,9 @@ func (s *Service) updateHead(ctx context.Context, balances []uint64) error {
// ensure head gets its best justified info.
if s.bestJustifiedCheckpt.Epoch > s.justifiedCheckpt.Epoch {
s.justifiedCheckpt = s.bestJustifiedCheckpt
if err := s.cacheJustifiedStateBalances(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root)); err != nil {
return err
}
}
// Get head from the fork choice service.
@@ -269,6 +273,57 @@ func (s *Service) hasHeadState() bool {
return s.head != nil && s.head.state != nil
}
// This caches justified state balances to be used for fork choice.
func (s *Service) cacheJustifiedStateBalances(ctx context.Context, justifiedRoot [32]byte) error {
if err := s.cfg.BeaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
return err
}
s.clearInitSyncBlocks()
var justifiedState state.BeaconState
var err error
if justifiedRoot == s.genesisRoot {
justifiedState, err = s.cfg.BeaconDB.GenesisState(ctx)
if err != nil {
return err
}
} else {
justifiedState, err = s.cfg.StateGen.StateByRoot(ctx, justifiedRoot)
if err != nil {
return err
}
}
if justifiedState == nil || justifiedState.IsNil() {
return errors.New("justified state can't be nil")
}
epoch := time.CurrentEpoch(justifiedState)
justifiedBalances := make([]uint64, justifiedState.NumValidators())
if err := justifiedState.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
if helpers.IsActiveValidatorUsingTrie(val, epoch) {
justifiedBalances[idx] = val.EffectiveBalance()
} else {
justifiedBalances[idx] = 0
}
return nil
}); err != nil {
return err
}
s.justifiedBalancesLock.Lock()
defer s.justifiedBalancesLock.Unlock()
s.justifiedBalances = justifiedBalances
return nil
}
func (s *Service) getJustifiedBalances() []uint64 {
s.justifiedBalancesLock.RLock()
defer s.justifiedBalancesLock.RUnlock()
return s.justifiedBalances
}
// Notifies a common event feed of a new chain head event. Called right after a new
// chain head is determined, set, and saved to disk.
func (s *Service) notifyNewHeadEvent(

View File

@@ -123,15 +123,13 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
func TestCacheJustifiedStateBalances_CanCache(t *testing.T) {
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
ctx := context.Background()
state, _ := util.DeterministicGenesisState(t, 100)
r := [32]byte{'a'}
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: r[:]}))
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), state, r))
balances, err := service.justifiedBalances.get(ctx, r)
require.NoError(t, err)
require.DeepEqual(t, balances, state.Balances(), "Incorrect justified balances")
require.NoError(t, service.cacheJustifiedStateBalances(context.Background(), r))
require.DeepEqual(t, service.getJustifiedBalances(), state.Balances(), "Incorrect justified balances")
}
func TestUpdateHead_MissingJustifiedRoot(t *testing.T) {

View File

@@ -25,18 +25,18 @@ func TestService_TreeHandler(t *testing.T) {
headState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, headState.SetBalances([]uint64{params.BeaconConfig().GweiPerEth}))
fcs := protoarray.New(
0, // justifiedEpoch
0, // finalizedEpoch
[32]byte{'a'},
)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
cfg := &config{
BeaconDB: beaconDB,
ForkChoiceStore: protoarray.New(
0, // justifiedEpoch
0, // finalizedEpoch
[32]byte{'a'},
),
StateGen: stategen.New(beaconDB),
}
s, err := NewService(ctx, opts...)
s, err := NewService(ctx)
require.NoError(t, err)
s.cfg = cfg
require.NoError(t, s.cfg.ForkChoiceStore.ProcessBlock(ctx, 0, [32]byte{'a'}, [32]byte{'g'}, [32]byte{'c'}, 0, 0))
require.NoError(t, s.cfg.ForkChoiceStore.ProcessBlock(ctx, 1, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'c'}, 0, 0))
s.setHead([32]byte{'a'}, wrapper.WrappedPhase0SignedBeaconBlock(util.NewBeaconBlock()), headState)

View File

@@ -130,14 +130,6 @@ var (
Name: "sync_head_state_hit",
Help: "The number of sync head state requests that are present in the cache.",
})
stateBalanceCacheHit = promauto.NewCounter(prometheus.CounterOpts{
Name: "state_balance_cache_hit",
Help: "Count the number of state balance cache hits.",
})
stateBalanceCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
Name: "state_balance_cache_miss",
Help: "Count the number of state balance cache hits.",
})
)
// reportSlotMetrics reports slot related metrics.

View File

@@ -1,50 +0,0 @@
package blockchain
import (
"context"
"errors"
"testing"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
)
func testServiceOptsWithDB(t *testing.T) []Option {
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0, [32]byte{'a'})
return []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
}
}
// warning: only use these opts when you are certain there are no db calls
// in your code path. this is a lightweight way to satisfy the stategen/beacondb
// initialization requirements w/o the overhead of db init.
func testServiceOptsNoDB() []Option {
return []Option{
withStateBalanceCache(satisfactoryStateBalanceCache()),
}
}
type mockStateByRooter struct {
state state.BeaconState
err error
}
var _ stateByRooter = &mockStateByRooter{}
func (m mockStateByRooter) StateByRoot(_ context.Context, _ [32]byte) (state.BeaconState, error) {
return m.state, m.err
}
// returns an instance of the state balance cache that can be used
// to satisfy the requirement for one in NewService, but which will
// always return an error if used.
func satisfactoryStateBalanceCache() *stateBalanceCache {
err := errors.New("satisfactoryStateBalanceCache doesn't perform real caching")
return &stateBalanceCache{stateGen: mockStateByRooter{err: err}}
}

View File

@@ -130,13 +130,6 @@ func WithSlasherAttestationsFeed(f *event.Feed) Option {
}
}
func withStateBalanceCache(c *stateBalanceCache) Option {
return func(s *Service) error {
s.justifiedBalances = c
return nil
}
}
// WithFinalizedStateAtStartUp to store finalized state at start up.
func WithFinalizedStateAtStartUp(st state.BeaconState) Option {
return func(s *Service) error {

View File

@@ -24,13 +24,14 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithForkChoiceStore(protoarray.New(0, 0, [32]byte{})),
WithStateGen(stategen.New(beaconDB)),
cfg := &config{
BeaconDB: beaconDB,
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
StateGen: stategen.New(beaconDB),
}
service, err := NewService(ctx, opts...)
service, err := NewService(ctx)
require.NoError(t, err)
service.cfg = cfg
_, err = blockTree1(t, beaconDB, []byte{'g'})
require.NoError(t, err)
@@ -130,14 +131,14 @@ func TestStore_OnAttestation_Ok(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0, [32]byte{'a'})
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
cfg := &config{
BeaconDB: beaconDB,
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
StateGen: stategen.New(beaconDB),
}
service, err := NewService(ctx, opts...)
service, err := NewService(ctx)
require.NoError(t, err)
service.cfg = cfg
genesisState, pks := util.DeterministicGenesisState(t, 64)
require.NoError(t, genesisState.SetGenesisTime(uint64(time.Now().Unix())-params.BeaconConfig().SecondsPerSlot))
require.NoError(t, service.saveGenesisData(ctx, genesisState))
@@ -156,12 +157,13 @@ func TestStore_SaveCheckpointState(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
cfg := &config{
BeaconDB: beaconDB,
StateGen: stategen.New(beaconDB),
}
service, err := NewService(ctx, opts...)
service, err := NewService(ctx)
require.NoError(t, err)
service.cfg = cfg
s, err := util.NewBeaconState()
require.NoError(t, err)
@@ -228,12 +230,13 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
cfg := &config{
BeaconDB: beaconDB,
StateGen: stategen.New(beaconDB),
}
service, err := NewService(ctx, opts...)
service, err := NewService(ctx)
require.NoError(t, err)
service.cfg = cfg
epoch := types.Epoch(1)
baseState, _ := util.DeterministicGenesisState(t, 1)
@@ -265,10 +268,12 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
func TestAttEpoch_MatchPrevEpoch(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := testServiceOptsNoDB()
service, err := NewService(ctx, opts...)
cfg := &config{BeaconDB: beaconDB}
service, err := NewService(ctx)
require.NoError(t, err)
service.cfg = cfg
nowTime := uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().SecondsPerSlot
require.NoError(t, service.verifyAttTargetEpoch(ctx, 0, nowTime, &ethpb.Checkpoint{Root: make([]byte, 32)}))
@@ -276,10 +281,12 @@ func TestAttEpoch_MatchPrevEpoch(t *testing.T) {
func TestAttEpoch_MatchCurrentEpoch(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := testServiceOptsNoDB()
service, err := NewService(ctx, opts...)
cfg := &config{BeaconDB: beaconDB}
service, err := NewService(ctx)
require.NoError(t, err)
service.cfg = cfg
nowTime := uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().SecondsPerSlot
require.NoError(t, service.verifyAttTargetEpoch(ctx, 0, nowTime, &ethpb.Checkpoint{Epoch: 1}))
@@ -287,10 +294,12 @@ func TestAttEpoch_MatchCurrentEpoch(t *testing.T) {
func TestAttEpoch_NotMatch(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := testServiceOptsNoDB()
service, err := NewService(ctx, opts...)
cfg := &config{BeaconDB: beaconDB}
service, err := NewService(ctx)
require.NoError(t, err)
service.cfg = cfg
nowTime := 2 * uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().SecondsPerSlot
err = service.verifyAttTargetEpoch(ctx, 0, nowTime, &ethpb.Checkpoint{Root: make([]byte, 32)})
@@ -299,9 +308,12 @@ func TestAttEpoch_NotMatch(t *testing.T) {
func TestVerifyBeaconBlock_NoBlock(t *testing.T) {
ctx := context.Background()
opts := testServiceOptsWithDB(t)
service, err := NewService(ctx, opts...)
beaconDB := testDB.SetupDB(t)
cfg := &config{BeaconDB: beaconDB}
service, err := NewService(ctx)
require.NoError(t, err)
service.cfg = cfg
d := util.HydrateAttestationData(&ethpb.AttestationData{})
assert.ErrorContains(t, "signed beacon block can't be nil", service.verifyBeaconBlock(ctx, d))
@@ -309,10 +321,12 @@ func TestVerifyBeaconBlock_NoBlock(t *testing.T) {
func TestVerifyBeaconBlock_futureBlock(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := testServiceOptsWithDB(t)
service, err := NewService(ctx, opts...)
cfg := &config{BeaconDB: beaconDB}
service, err := NewService(ctx)
require.NoError(t, err)
service.cfg = cfg
b := util.NewBeaconBlock()
b.Block.Slot = 2
@@ -326,10 +340,12 @@ func TestVerifyBeaconBlock_futureBlock(t *testing.T) {
func TestVerifyBeaconBlock_OK(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := testServiceOptsWithDB(t)
service, err := NewService(ctx, opts...)
cfg := &config{BeaconDB: beaconDB}
service, err := NewService(ctx)
require.NoError(t, err)
service.cfg = cfg
b := util.NewBeaconBlock()
b.Block.Slot = 2
@@ -345,14 +361,10 @@ func TestVerifyFinalizedConsistency_InconsistentRoot(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0, [32]byte{'a'})
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
}
service, err := NewService(ctx, opts...)
cfg := &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
service, err := NewService(ctx)
require.NoError(t, err)
service.cfg = cfg
b32 := util.NewBeaconBlock()
b32.Block.Slot = 32
@@ -375,10 +387,12 @@ func TestVerifyFinalizedConsistency_InconsistentRoot(t *testing.T) {
func TestVerifyFinalizedConsistency_OK(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := testServiceOptsWithDB(t)
service, err := NewService(ctx, opts...)
cfg := &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
service, err := NewService(ctx)
require.NoError(t, err)
service.cfg = cfg
b32 := util.NewBeaconBlock()
b32.Block.Slot = 32
@@ -401,10 +415,12 @@ func TestVerifyFinalizedConsistency_OK(t *testing.T) {
func TestVerifyFinalizedConsistency_IsCanonical(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := testServiceOptsWithDB(t)
service, err := NewService(ctx, opts...)
cfg := &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
service, err := NewService(ctx)
require.NoError(t, err)
service.cfg = cfg
b32 := util.NewBeaconBlock()
b32.Block.Slot = 32

View File

@@ -151,12 +151,7 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
s.finalizedCheckpt = postState.FinalizedCheckpoint()
}
balances, err := s.justifiedBalances.get(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root))
if err != nil {
msg := fmt.Sprintf("could not read balances for state w/ justified checkpoint %#x", s.justifiedCheckpt.Root)
return errors.Wrap(err, msg)
}
if err := s.updateHead(ctx, balances); err != nil {
if err := s.updateHead(ctx, s.getJustifiedBalances()); err != nil {
log.WithError(err).Warn("Could not update head")
}

View File

@@ -193,6 +193,9 @@ func (s *Service) updateJustified(ctx context.Context, state state.ReadOnlyBeaco
if canUpdate {
s.prevJustifiedCheckpt = s.justifiedCheckpt
s.justifiedCheckpt = cpt
if err := s.cacheJustifiedStateBalances(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root)); err != nil {
return err
}
}
return nil
@@ -203,13 +206,12 @@ func (s *Service) updateJustified(ctx context.Context, state state.ReadOnlyBeaco
// This method does not have defense against fork choice bouncing attack, which is why it's only recommend to be used during initial syncing.
func (s *Service) updateJustifiedInitSync(ctx context.Context, cp *ethpb.Checkpoint) error {
s.prevJustifiedCheckpt = s.justifiedCheckpt
if err := s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, cp); err != nil {
s.justifiedCheckpt = cp
if err := s.cacheJustifiedStateBalances(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root)); err != nil {
return err
}
s.justifiedCheckpt = cp
return nil
return s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, cp)
}
func (s *Service) updateFinalized(ctx context.Context, cp *ethpb.Checkpoint) error {
@@ -328,9 +330,7 @@ func (s *Service) finalizedImpliesNewJustified(ctx context.Context, state state.
if !attestation.CheckPointIsEqual(s.justifiedCheckpt, state.CurrentJustifiedCheckpoint()) {
if state.CurrentJustifiedCheckpoint().Epoch > s.justifiedCheckpt.Epoch {
s.justifiedCheckpt = state.CurrentJustifiedCheckpoint()
// we don't need to check if the previous justified checkpoint was an ancestor since the new
// finalized checkpoint is overriding it.
return nil
return s.cacheJustifiedStateBalances(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root))
}
// Update justified if store justified is not in chain with finalized check point.
@@ -345,6 +345,9 @@ func (s *Service) finalizedImpliesNewJustified(ctx context.Context, state state.
}
if !bytes.Equal(anc, s.finalizedCheckpt.Root) {
s.justifiedCheckpt = state.CurrentJustifiedCheckpoint()
if err := s.cacheJustifiedStateBalances(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root)); err != nil {
return err
}
}
}
return nil

View File

@@ -35,17 +35,16 @@ import (
func TestStore_OnBlock(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0, [32]byte{'a'})
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
}
service, err := NewService(ctx, opts...)
cfg := &config{
BeaconDB: beaconDB,
StateGen: stategen.New(beaconDB),
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
}
service, err := NewService(ctx)
require.NoError(t, err)
service.cfg = cfg
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
assert.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
@@ -135,12 +134,13 @@ func TestStore_OnBlockBatch(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
cfg := &config{
BeaconDB: beaconDB,
StateGen: stategen.New(beaconDB),
}
service, err := NewService(ctx, opts...)
service, err := NewService(ctx)
require.NoError(t, err)
service.cfg = cfg
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
@@ -186,12 +186,14 @@ func TestStore_OnBlockBatch(t *testing.T) {
func TestRemoveStateSinceLastFinalized_EmptyStartSlot(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
params.UseMinimalConfig()
defer params.UseMainnetConfig()
opts := testServiceOptsWithDB(t)
service, err := NewService(ctx, opts...)
cfg := &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
service, err := NewService(ctx)
require.NoError(t, err)
service.cfg = cfg
service.genesisTime = time.Now()
update, err := service.shouldUpdateCurrentJustified(ctx, &ethpb.Checkpoint{Root: make([]byte, 32)})
@@ -219,13 +221,16 @@ func TestRemoveStateSinceLastFinalized_EmptyStartSlot(t *testing.T) {
func TestShouldUpdateJustified_ReturnFalse(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
params.UseMinimalConfig()
defer params.UseMainnetConfig()
opts := testServiceOptsWithDB(t)
service, err := NewService(ctx, opts...)
cfg := &config{BeaconDB: beaconDB}
service, err := NewService(ctx)
require.NoError(t, err)
service.cfg = cfg
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{})
lastJustifiedBlk := util.NewBeaconBlock()
lastJustifiedBlk.Block.ParentRoot = bytesutil.PadTo([]byte{'G'}, 32)
lastJustifiedRoot, err := lastJustifiedBlk.Block.HashTreeRoot()
@@ -250,12 +255,13 @@ func TestCachedPreState_CanGetFromStateSummary(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
cfg := &config{
BeaconDB: beaconDB,
StateGen: stategen.New(beaconDB),
}
service, err := NewService(ctx, opts...)
service, err := NewService(ctx)
require.NoError(t, err)
service.cfg = cfg
s, err := v1.InitializeFromProto(&ethpb.BeaconState{Slot: 1, GenesisValidatorsRoot: params.BeaconConfig().ZeroHash[:]})
require.NoError(t, err)
@@ -283,12 +289,13 @@ func TestCachedPreState_CanGetFromDB(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
cfg := &config{
BeaconDB: beaconDB,
StateGen: stategen.New(beaconDB),
}
service, err := NewService(ctx, opts...)
service, err := NewService(ctx)
require.NoError(t, err)
service.cfg = cfg
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
@@ -320,13 +327,10 @@ func TestUpdateJustified_CouldUpdateBest(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(protoarray.New(0, 0, [32]byte{})),
}
service, err := NewService(ctx, opts...)
cfg := &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB), ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
service, err := NewService(ctx)
require.NoError(t, err)
service.cfg = cfg
signedBlock := util.NewBeaconBlock()
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(signedBlock)))
@@ -357,12 +361,10 @@ func TestFillForkChoiceMissingBlocks_CanSave(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
}
service, err := NewService(ctx, opts...)
cfg := &config{BeaconDB: beaconDB}
service, err := NewService(ctx)
require.NoError(t, err)
service.cfg = cfg
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
service.finalizedCheckpt = &ethpb.Checkpoint{Root: make([]byte, 32)}
@@ -398,12 +400,10 @@ func TestFillForkChoiceMissingBlocks_RootsMatch(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
}
service, err := NewService(ctx, opts...)
cfg := &config{BeaconDB: beaconDB}
service, err := NewService(ctx)
require.NoError(t, err)
service.cfg = cfg
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
service.finalizedCheckpt = &ethpb.Checkpoint{Root: make([]byte, 32)}
@@ -442,12 +442,10 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
}
service, err := NewService(ctx, opts...)
cfg := &config{BeaconDB: beaconDB}
service, err := NewService(ctx)
require.NoError(t, err)
service.cfg = cfg
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
// Set finalized epoch to 1.
service.finalizedCheckpt = &ethpb.Checkpoint{Epoch: 1}
@@ -588,8 +586,7 @@ func TestCurrentSlot_HandlesOverflow(t *testing.T) {
}
func TestAncestorByDB_CtxErr(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
opts := testServiceOptsWithDB(t)
service, err := NewService(ctx, opts...)
service, err := NewService(ctx)
require.NoError(t, err)
cancel()
@@ -601,14 +598,10 @@ func TestAncestor_HandleSkipSlot(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0, [32]byte{'a'})
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
}
service, err := NewService(ctx, opts...)
cfg := &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
service, err := NewService(ctx)
require.NoError(t, err)
service.cfg = cfg
b1 := util.NewBeaconBlock()
b1.Block.Slot = 1
@@ -649,9 +642,10 @@ func TestAncestor_HandleSkipSlot(t *testing.T) {
func TestAncestor_CanUseForkchoice(t *testing.T) {
ctx := context.Background()
opts := testServiceOptsWithDB(t)
service, err := NewService(ctx, opts...)
cfg := &config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
service, err := NewService(ctx)
require.NoError(t, err)
service.cfg = cfg
b1 := util.NewBeaconBlock()
b1.Block.Slot = 1
@@ -688,14 +682,10 @@ func TestAncestor_CanUseDB(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0, [32]byte{'a'})
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
}
service, err := NewService(ctx, opts...)
cfg := &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
service, err := NewService(ctx)
require.NoError(t, err)
service.cfg = cfg
b1 := util.NewBeaconBlock()
b1.Block.Slot = 1
@@ -730,9 +720,10 @@ func TestAncestor_CanUseDB(t *testing.T) {
func TestEnsureRootNotZeroHashes(t *testing.T) {
ctx := context.Background()
opts := testServiceOptsNoDB()
service, err := NewService(ctx, opts...)
cfg := &config{}
service, err := NewService(ctx)
require.NoError(t, err)
service.cfg = cfg
service.genesisRoot = [32]byte{'a'}
r := service.ensureRootNotZeros(params.BeaconConfig().ZeroHash)
@@ -744,12 +735,6 @@ func TestEnsureRootNotZeroHashes(t *testing.T) {
func TestFinalizedImpliesNewJustified(t *testing.T) {
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0, [32]byte{'a'})
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
}
ctx := context.Background()
type args struct {
cachedCheckPoint *ethpb.Checkpoint
@@ -791,8 +776,9 @@ func TestFinalizedImpliesNewJustified(t *testing.T) {
beaconState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(test.args.stateCheckPoint))
service, err := NewService(ctx, opts...)
service, err := NewService(ctx)
require.NoError(t, err)
service.cfg = &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB), ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
service.justifiedCheckpt = test.args.cachedCheckPoint
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Root: bytesutil.PadTo(test.want.Root, 32)}))
genesisState, err := util.NewBeaconState()
@@ -829,12 +815,6 @@ func TestVerifyBlkDescendant(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
fcs := protoarray.New(0, 0, [32]byte{'a'})
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
}
b := util.NewBeaconBlock()
b.Block.Slot = 1
r, err := b.Block.HashTreeRoot()
@@ -889,8 +869,9 @@ func TestVerifyBlkDescendant(t *testing.T) {
},
}
for _, tt := range tests {
service, err := NewService(ctx, opts...)
service, err := NewService(ctx)
require.NoError(t, err)
service.cfg = &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB), ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
service.finalizedCheckpt = &ethpb.Checkpoint{
Root: tt.args.finalizedRoot[:],
}
@@ -904,10 +885,12 @@ func TestVerifyBlkDescendant(t *testing.T) {
}
func TestUpdateJustifiedInitSync(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
opts := testServiceOptsWithDB(t)
service, err := NewService(ctx, opts...)
cfg := &config{BeaconDB: beaconDB}
service, err := NewService(ctx)
require.NoError(t, err)
service.cfg = cfg
gBlk := util.NewBeaconBlock()
gRoot, err := gBlk.Block.HashTreeRoot()
@@ -933,9 +916,10 @@ func TestUpdateJustifiedInitSync(t *testing.T) {
func TestHandleEpochBoundary_BadMetrics(t *testing.T) {
ctx := context.Background()
opts := testServiceOptsNoDB()
service, err := NewService(ctx, opts...)
cfg := &config{}
service, err := NewService(ctx)
require.NoError(t, err)
service.cfg = cfg
s, err := util.NewBeaconState()
require.NoError(t, err)
@@ -947,9 +931,10 @@ func TestHandleEpochBoundary_BadMetrics(t *testing.T) {
func TestHandleEpochBoundary_UpdateFirstSlot(t *testing.T) {
ctx := context.Background()
opts := testServiceOptsNoDB()
service, err := NewService(ctx, opts...)
cfg := &config{}
service, err := NewService(ctx)
require.NoError(t, err)
service.cfg = cfg
s, _ := util.DeterministicGenesisState(t, 1024)
service.head = &head{state: s}
@@ -961,18 +946,18 @@ func TestHandleEpochBoundary_UpdateFirstSlot(t *testing.T) {
func TestOnBlock_CanFinalize(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0, [32]byte{'a'})
depositCache, err := depositcache.New()
require.NoError(t, err)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
WithDepositCache(depositCache),
WithStateNotifier(&mock.MockStateNotifier{}),
cfg := &config{
BeaconDB: beaconDB,
StateGen: stategen.New(beaconDB),
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
DepositCache: depositCache,
StateNotifier: &mock.MockStateNotifier{},
}
service, err := NewService(ctx, opts...)
service, err := NewService(ctx)
require.NoError(t, err)
service.cfg = cfg
gs, keys := util.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
@@ -1006,12 +991,18 @@ func TestOnBlock_CanFinalize(t *testing.T) {
func TestInsertFinalizedDeposits(t *testing.T) {
ctx := context.Background()
opts := testServiceOptsWithDB(t)
beaconDB := testDB.SetupDB(t)
depositCache, err := depositcache.New()
require.NoError(t, err)
opts = append(opts, WithDepositCache(depositCache))
service, err := NewService(ctx, opts...)
cfg := &config{
BeaconDB: beaconDB,
StateGen: stategen.New(beaconDB),
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
DepositCache: depositCache,
}
service, err := NewService(ctx)
require.NoError(t, err)
service.cfg = cfg
gs, _ := util.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))

View File

@@ -131,13 +131,7 @@ func (s *Service) processAttestationsRoutine(subscribedToStateEvents chan<- stru
continue
}
s.processAttestations(s.ctx)
balances, err := s.justifiedBalances.get(s.ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root))
if err != nil {
log.Errorf("Unable to get justified balances for root %v w/ error %s", s.justifiedCheckpt.Root, err)
continue
}
if err := s.updateHead(s.ctx, balances); err != nil {
if err := s.updateHead(s.ctx, s.getJustifiedBalances()); err != nil {
log.Warnf("Resolving fork due to new attestation: %v", err)
}
}

View File

@@ -9,7 +9,9 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
@@ -40,10 +42,12 @@ func TestAttestationCheckPtState_FarFutureSlot(t *testing.T) {
func TestVerifyLMDFFGConsistent_NotOK(t *testing.T) {
ctx := context.Background()
opts := testServiceOptsWithDB(t)
beaconDB := testDB.SetupDB(t)
service, err := NewService(ctx, opts...)
cfg := &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
service, err := NewService(ctx)
require.NoError(t, err)
service.cfg = cfg
b32 := util.NewBeaconBlock()
b32.Block.Slot = 32
@@ -67,10 +71,12 @@ func TestVerifyLMDFFGConsistent_NotOK(t *testing.T) {
func TestVerifyLMDFFGConsistent_OK(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := testServiceOptsWithDB(t)
service, err := NewService(ctx, opts...)
cfg := &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
service, err := NewService(ctx)
require.NoError(t, err)
service.cfg = cfg
b32 := util.NewBeaconBlock()
b32.Block.Slot = 32
@@ -95,12 +101,18 @@ func TestVerifyLMDFFGConsistent_OK(t *testing.T) {
func TestProcessAttestations_Ok(t *testing.T) {
hook := logTest.NewGlobal()
ctx := context.Background()
opts := testServiceOptsWithDB(t)
opts = append(opts, WithAttestationPool(attestations.NewPool()))
beaconDB := testDB.SetupDB(t)
service, err := NewService(ctx, opts...)
cfg := &config{
BeaconDB: beaconDB,
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
StateGen: stategen.New(beaconDB),
AttPool: attestations.NewPool(),
}
service, err := NewService(ctx)
require.NoError(t, err)
service.genesisTime = prysmTime.Now().Add(-1 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
service.cfg = cfg
genesisState, pks := util.DeterministicGenesisState(t, 64)
require.NoError(t, genesisState.SetGenesisTime(uint64(prysmTime.Now().Unix())-params.BeaconConfig().SecondsPerSlot))
require.NoError(t, service.saveGenesisData(ctx, genesisState))

View File

@@ -124,16 +124,21 @@ func TestService_ReceiveBlock(t *testing.T) {
genesisBlockRoot := bytesutil.ToBytes32(nil)
require.NoError(t, beaconDB.SaveState(ctx, genesis, genesisBlockRoot))
opts := []Option{
WithDatabase(beaconDB),
WithForkChoiceStore(protoarray.New(0, 0, genesisBlockRoot)),
WithAttestationPool(attestations.NewPool()),
WithExitPool(voluntaryexits.NewPool()),
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}),
WithStateGen(stategen.New(beaconDB)),
cfg := &config{
BeaconDB: beaconDB,
ForkChoiceStore: protoarray.New(
0, // justifiedEpoch
0, // finalizedEpoch
genesisBlockRoot,
),
AttPool: attestations.NewPool(),
ExitPool: voluntaryexits.NewPool(),
StateNotifier: &blockchainTesting.MockStateNotifier{RecordEvents: true},
StateGen: stategen.New(beaconDB),
}
s, err := NewService(ctx, opts...)
s, err := NewService(ctx)
require.NoError(t, err)
s.cfg = cfg
require.NoError(t, s.saveGenesisData(ctx, genesis))
gBlk, err := s.cfg.BeaconDB.GenesisBlock(ctx)
require.NoError(t, err)
@@ -161,17 +166,21 @@ func TestService_ReceiveBlockUpdateHead(t *testing.T) {
beaconDB := testDB.SetupDB(t)
genesisBlockRoot := bytesutil.ToBytes32(nil)
require.NoError(t, beaconDB.SaveState(ctx, genesis, genesisBlockRoot))
opts := []Option{
WithDatabase(beaconDB),
WithForkChoiceStore(protoarray.New(0, 0, genesisBlockRoot)),
WithAttestationPool(attestations.NewPool()),
WithExitPool(voluntaryexits.NewPool()),
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}),
WithStateGen(stategen.New(beaconDB)),
cfg := &config{
BeaconDB: beaconDB,
ForkChoiceStore: protoarray.New(
0, // justifiedEpoch
0, // finalizedEpoch
genesisBlockRoot,
),
AttPool: attestations.NewPool(),
ExitPool: voluntaryexits.NewPool(),
StateNotifier: &blockchainTesting.MockStateNotifier{RecordEvents: true},
StateGen: stategen.New(beaconDB),
}
s, err := NewService(ctx, opts...)
s, err := NewService(ctx)
require.NoError(t, err)
s.cfg = cfg
require.NoError(t, s.saveGenesisData(ctx, genesis))
gBlk, err := s.cfg.BeaconDB.GenesisBlock(ctx)
require.NoError(t, err)
@@ -241,14 +250,19 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
beaconDB := testDB.SetupDB(t)
genesisBlockRoot, err := genesis.HashTreeRoot(ctx)
require.NoError(t, err)
opts := []Option{
WithDatabase(beaconDB),
WithForkChoiceStore(protoarray.New(0, 0, genesisBlockRoot)),
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}),
WithStateGen(stategen.New(beaconDB)),
cfg := &config{
BeaconDB: beaconDB,
ForkChoiceStore: protoarray.New(
0, // justifiedEpoch
0, // finalizedEpoch
genesisBlockRoot,
),
StateNotifier: &blockchainTesting.MockStateNotifier{RecordEvents: true},
StateGen: stategen.New(beaconDB),
}
s, err := NewService(ctx, opts...)
s, err := NewService(ctx)
require.NoError(t, err)
s.cfg = cfg
err = s.saveGenesisData(ctx, genesis)
require.NoError(t, err)
gBlk, err := s.cfg.BeaconDB.GenesisBlock(ctx)
@@ -273,10 +287,9 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
}
func TestService_HasInitSyncBlock(t *testing.T) {
opts := testServiceOptsNoDB()
opts = append(opts, WithStateNotifier(&blockchainTesting.MockStateNotifier{}))
s, err := NewService(context.Background(), opts...)
s, err := NewService(context.Background())
require.NoError(t, err)
s.cfg = &config{StateNotifier: &blockchainTesting.MockStateNotifier{}}
r := [32]byte{'a'}
if s.HasInitSyncBlock(r) {
t.Error("Should not have block")
@@ -288,10 +301,11 @@ func TestService_HasInitSyncBlock(t *testing.T) {
}
func TestCheckSaveHotStateDB_Enabling(t *testing.T) {
opts := testServiceOptsWithDB(t)
beaconDB := testDB.SetupDB(t)
hook := logTest.NewGlobal()
s, err := NewService(context.Background(), opts...)
s, err := NewService(context.Background())
require.NoError(t, err)
s.cfg = &config{StateGen: stategen.New(beaconDB)}
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
s.finalizedCheckpt = &ethpb.Checkpoint{}
@@ -301,10 +315,11 @@ func TestCheckSaveHotStateDB_Enabling(t *testing.T) {
}
func TestCheckSaveHotStateDB_Disabling(t *testing.T) {
beaconDB := testDB.SetupDB(t)
hook := logTest.NewGlobal()
opts := testServiceOptsWithDB(t)
s, err := NewService(context.Background(), opts...)
s, err := NewService(context.Background())
require.NoError(t, err)
s.cfg = &config{StateGen: stategen.New(beaconDB)}
s.finalizedCheckpt = &ethpb.Checkpoint{}
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
s.genesisTime = time.Now()
@@ -314,10 +329,11 @@ func TestCheckSaveHotStateDB_Disabling(t *testing.T) {
}
func TestCheckSaveHotStateDB_Overflow(t *testing.T) {
beaconDB := testDB.SetupDB(t)
hook := logTest.NewGlobal()
opts := testServiceOptsWithDB(t)
s, err := NewService(context.Background(), opts...)
s, err := NewService(context.Background())
require.NoError(t, err)
s.cfg = &config{StateGen: stategen.New(beaconDB)}
s.finalizedCheckpt = &ethpb.Checkpoint{Epoch: 10000000}
s.genesisTime = time.Now()

View File

@@ -62,9 +62,9 @@ type Service struct {
checkpointStateCache *cache.CheckpointStateCache
initSyncBlocks map[[32]byte]block.SignedBeaconBlock
initSyncBlocksLock sync.RWMutex
//justifiedBalances []uint64
justifiedBalances *stateBalanceCache
wsVerifier *WeakSubjectivityVerifier
justifiedBalances []uint64
justifiedBalancesLock sync.RWMutex
wsVerifier *WeakSubjectivityVerifier
}
// config options for the service.
@@ -97,6 +97,7 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
boundaryRoots: [][32]byte{},
checkpointStateCache: cache.NewCheckpointStateCache(),
initSyncBlocks: make(map[[32]byte]block.SignedBeaconBlock),
justifiedBalances: make([]uint64, 0),
cfg: &config{},
}
for _, opt := range opts {
@@ -105,12 +106,6 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
}
}
var err error
if srv.justifiedBalances == nil {
srv.justifiedBalances, err = newStateBalanceCache(srv.cfg.StateGen)
if err != nil {
return nil, err
}
}
srv.wsVerifier, err = NewWeakSubjectivityVerifier(srv.cfg.WeakSubjectivityCheckpt, srv.cfg.BeaconDB)
if err != nil {
return nil, err
@@ -156,6 +151,9 @@ func (s *Service) Start() {
// Resume fork choice.
s.justifiedCheckpt = ethpb.CopyCheckpoint(justifiedCheckpoint)
if err := s.cacheJustifiedStateBalances(s.ctx, s.ensureRootNotZeros(bytesutil.ToBytes32(s.justifiedCheckpt.Root))); err != nil {
log.Fatalf("Could not cache justified state balances: %v", err)
}
s.prevJustifiedCheckpt = ethpb.CopyCheckpoint(justifiedCheckpoint)
s.bestJustifiedCheckpt = ethpb.CopyCheckpoint(justifiedCheckpoint)
s.finalizedCheckpt = ethpb.CopyCheckpoint(finalizedCheckpoint)
@@ -342,6 +340,9 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon
genesisCheckpoint := genesisState.FinalizedCheckpoint()
s.justifiedCheckpt = ethpb.CopyCheckpoint(genesisCheckpoint)
if err := s.cacheJustifiedStateBalances(ctx, genesisBlkRoot); err != nil {
return err
}
s.prevJustifiedCheckpt = ethpb.CopyCheckpoint(genesisCheckpoint)
s.bestJustifiedCheckpt = ethpb.CopyCheckpoint(genesisCheckpoint)
s.finalizedCheckpt = ethpb.CopyCheckpoint(genesisCheckpoint)

View File

@@ -108,24 +108,25 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
depositCache, err := depositcache.New()
require.NoError(t, err)
stateGen := stategen.New(beaconDB)
// Safe a state in stategen to purposes of testing a service stop / shutdown.
require.NoError(t, stateGen.SaveState(ctx, bytesutil.ToBytes32(bState.FinalizedCheckpoint().Root), bState))
opts := []Option{
WithDatabase(beaconDB),
WithDepositCache(depositCache),
WithChainStartFetcher(web3Service),
WithAttestationPool(attestations.NewPool()),
WithP2PBroadcaster(&mockBroadcaster{}),
WithStateNotifier(&mockBeaconNode{}),
WithForkChoiceStore(protoarray.New(0, 0, params.BeaconConfig().ZeroHash)),
WithAttestationService(attService),
WithStateGen(stateGen),
cfg := &config{
BeaconBlockBuf: 0,
BeaconDB: beaconDB,
DepositCache: depositCache,
ChainStartFetcher: web3Service,
P2p: &mockBroadcaster{},
StateNotifier: &mockBeaconNode{},
AttPool: attestations.NewPool(),
StateGen: stategen.New(beaconDB),
ForkChoiceStore: protoarray.New(0, 0, params.BeaconConfig().ZeroHash),
AttService: attService,
}
chainService, err := NewService(ctx, opts...)
// Safe a state in stategen to purposes of testing a service stop / shutdown.
require.NoError(t, cfg.StateGen.SaveState(ctx, bytesutil.ToBytes32(bState.FinalizedCheckpoint().Root), bState))
chainService, err := NewService(ctx)
require.NoError(t, err, "Unable to setup chain service")
chainService.cfg = cfg
chainService.genesisTime = time.Unix(1, 0) // non-zero time
return chainService

View File

@@ -1,82 +0,0 @@
package blockchain
import (
"context"
"errors"
"sync"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
)
var errNilStateFromStategen = errors.New("justified state can't be nil")
type stateBalanceCache struct {
sync.Mutex
balances []uint64
root [32]byte
stateGen stateByRooter
}
type stateByRooter interface {
StateByRoot(context.Context, [32]byte) (state.BeaconState, error)
}
// newStateBalanceCache exists to remind us that stateBalanceCache needs a stagegen
// to avoid nil pointer bugs when updating the cache in the read path (get())
func newStateBalanceCache(sg *stategen.State) (*stateBalanceCache, error) {
if sg == nil {
return nil, errors.New("Can't initialize state balance cache without stategen")
}
return &stateBalanceCache{stateGen: sg}, nil
}
// update is called by get() when the requested root doesn't match
// the previously read value. This cache assumes we only want to cache one
// set of balances for a single root (the current justified root).
//
// warning: this is not thread-safe on its own, relies on get() for locking
func (c *stateBalanceCache) update(ctx context.Context, justifiedRoot [32]byte) ([]uint64, error) {
stateBalanceCacheMiss.Inc()
justifiedState, err := c.stateGen.StateByRoot(ctx, justifiedRoot)
if err != nil {
return nil, err
}
if justifiedState == nil || justifiedState.IsNil() {
return nil, errNilStateFromStategen
}
epoch := time.CurrentEpoch(justifiedState)
justifiedBalances := make([]uint64, justifiedState.NumValidators())
var balanceAccumulator = func(idx int, val state.ReadOnlyValidator) error {
if helpers.IsActiveValidatorUsingTrie(val, epoch) {
justifiedBalances[idx] = val.EffectiveBalance()
} else {
justifiedBalances[idx] = 0
}
return nil
}
if err := justifiedState.ReadFromEveryValidator(balanceAccumulator); err != nil {
return nil, err
}
c.balances = justifiedBalances
c.root = justifiedRoot
return c.balances, nil
}
// getBalances takes an explicit justifiedRoot so it can invalidate the singleton cache key
// when the justified root changes, and takes a context so that the long-running stategen
// read path can connect to the upstream cancellation/timeout chain.
func (c *stateBalanceCache) get(ctx context.Context, justifiedRoot [32]byte) ([]uint64, error) {
c.Lock()
defer c.Unlock()
if justifiedRoot == c.root {
stateBalanceCacheHit.Inc()
return c.balances, nil
}
return c.update(ctx, justifiedRoot)
}

View File

@@ -1,225 +0,0 @@
package blockchain
import (
"context"
"encoding/binary"
"errors"
"testing"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
v2 "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/time/slots"
)
type mockStateByRoot struct {
state state.BeaconState
err error
}
func (m *mockStateByRoot) StateByRoot(context.Context, [32]byte) (state.BeaconState, error) {
return m.state, m.err
}
type testStateOpt func(*ethpb.BeaconStateAltair)
func testStateWithValidators(v []*ethpb.Validator) testStateOpt {
return func(a *ethpb.BeaconStateAltair) {
a.Validators = v
}
}
func testStateWithSlot(slot types.Slot) testStateOpt {
return func(a *ethpb.BeaconStateAltair) {
a.Slot = slot
}
}
func testStateFixture(opts ...testStateOpt) state.BeaconState {
a := &ethpb.BeaconStateAltair{}
for _, o := range opts {
o(a)
}
s, _ := v2.InitializeFromProtoUnsafe(a)
return s
}
func generateTestValidators(count int, opts ...func(*ethpb.Validator)) []*ethpb.Validator {
vs := make([]*ethpb.Validator, count)
var i uint32 = 0
for ; i < uint32(count); i++ {
pk := make([]byte, 48)
binary.LittleEndian.PutUint32(pk, i)
v := &ethpb.Validator{PublicKey: pk}
for _, o := range opts {
o(v)
}
vs[i] = v
}
return vs
}
func oddValidatorsExpired(currentSlot types.Slot) func(*ethpb.Validator) {
return func(v *ethpb.Validator) {
pki := binary.LittleEndian.Uint64(v.PublicKey)
if pki%2 == 0 {
v.ExitEpoch = types.Epoch(int(slots.ToEpoch(currentSlot)) + 1)
} else {
v.ExitEpoch = types.Epoch(int(slots.ToEpoch(currentSlot)) - 1)
}
}
}
func oddValidatorsQueued(currentSlot types.Slot) func(*ethpb.Validator) {
return func(v *ethpb.Validator) {
v.ExitEpoch = types.Epoch(int(slots.ToEpoch(currentSlot)) + 1)
pki := binary.LittleEndian.Uint64(v.PublicKey)
if pki%2 == 0 {
v.ActivationEpoch = types.Epoch(int(slots.ToEpoch(currentSlot)) - 1)
} else {
v.ActivationEpoch = types.Epoch(int(slots.ToEpoch(currentSlot)) + 1)
}
}
}
func allValidatorsValid(currentSlot types.Slot) func(*ethpb.Validator) {
return func(v *ethpb.Validator) {
v.ActivationEpoch = types.Epoch(int(slots.ToEpoch(currentSlot)) - 1)
v.ExitEpoch = types.Epoch(int(slots.ToEpoch(currentSlot)) + 1)
}
}
func balanceIsKeyTimes2(v *ethpb.Validator) {
pki := binary.LittleEndian.Uint64(v.PublicKey)
v.EffectiveBalance = uint64(pki) * 2
}
func testHalfExpiredValidators() ([]*ethpb.Validator, []uint64) {
balances := []uint64{0, 0, 4, 0, 8, 0, 12, 0, 16, 0}
return generateTestValidators(10,
oddValidatorsExpired(types.Slot(99)),
balanceIsKeyTimes2), balances
}
func testHalfQueuedValidators() ([]*ethpb.Validator, []uint64) {
balances := []uint64{0, 0, 4, 0, 8, 0, 12, 0, 16, 0}
return generateTestValidators(10,
oddValidatorsQueued(types.Slot(99)),
balanceIsKeyTimes2), balances
}
func testAllValidValidators() ([]*ethpb.Validator, []uint64) {
balances := []uint64{0, 2, 4, 6, 8, 10, 12, 14, 16, 18}
return generateTestValidators(10,
allValidatorsValid(types.Slot(99)),
balanceIsKeyTimes2), balances
}
func TestStateBalanceCache(t *testing.T) {
type sbcTestCase struct {
err error
root [32]byte
sbc *stateBalanceCache
balances []uint64
name string
}
sentinelCacheMiss := errors.New("Cache missed, as expected!")
sentinelBalances := []uint64{1, 2, 3, 4, 5}
halfExpiredValidators, halfExpiredBalances := testHalfExpiredValidators()
halfQueuedValidators, halfQueuedBalances := testHalfQueuedValidators()
allValidValidators, allValidBalances := testAllValidValidators()
cases := []sbcTestCase{
{
root: bytesutil.ToBytes32([]byte{'A'}),
balances: sentinelBalances,
sbc: &stateBalanceCache{
stateGen: &mockStateByRooter{
err: sentinelCacheMiss,
},
root: bytesutil.ToBytes32([]byte{'A'}),
balances: sentinelBalances,
},
name: "cache hit",
},
// this works by using a staterooter that returns a known error
// so really we're testing the miss by making sure stategen got called
// this also tells us stategen errors are propagated
{
sbc: &stateBalanceCache{
stateGen: &mockStateByRooter{
//state: generateTestValidators(1, testWithBadEpoch),
err: sentinelCacheMiss,
},
root: bytesutil.ToBytes32([]byte{'B'}),
},
err: sentinelCacheMiss,
root: bytesutil.ToBytes32([]byte{'A'}),
name: "cache miss",
},
{
sbc: &stateBalanceCache{
stateGen: &mockStateByRooter{},
root: bytesutil.ToBytes32([]byte{'B'}),
},
err: errNilStateFromStategen,
root: bytesutil.ToBytes32([]byte{'A'}),
name: "error for nil state upon cache miss",
},
{
sbc: &stateBalanceCache{
stateGen: &mockStateByRooter{
state: testStateFixture(
testStateWithSlot(99),
testStateWithValidators(halfExpiredValidators)),
},
},
balances: halfExpiredBalances,
root: bytesutil.ToBytes32([]byte{'A'}),
name: "test filtering by exit epoch",
},
{
sbc: &stateBalanceCache{
stateGen: &mockStateByRooter{
state: testStateFixture(
testStateWithSlot(99),
testStateWithValidators(halfQueuedValidators)),
},
},
balances: halfQueuedBalances,
root: bytesutil.ToBytes32([]byte{'A'}),
name: "test filtering by activation epoch",
},
{
sbc: &stateBalanceCache{
stateGen: &mockStateByRooter{
state: testStateFixture(
testStateWithSlot(99),
testStateWithValidators(allValidValidators)),
},
},
balances: allValidBalances,
root: bytesutil.ToBytes32([]byte{'A'}),
name: "happy path",
},
}
ctx := context.Background()
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
cache := c.sbc
cacheRootStart := cache.root
b, err := cache.get(ctx, c.root)
require.ErrorIs(t, err, c.err)
require.DeepEqual(t, c.balances, b)
if c.err != nil {
// if there was an error somewhere, the root should not have changed (unless it already matched)
require.Equal(t, cacheRootStart, cache.root)
} else {
// when successful, the cache should always end with a root matching the request
require.Equal(t, c.root, cache.root)
}
})
}
}

View File

@@ -147,6 +147,7 @@ type Database interface {
io.Closer
backup.BackupExporter
HeadAccessDatabase
DatabasePath() string
ClearDB() error
}

View File

@@ -13,8 +13,8 @@ go_library(
"finalized_block_roots.go",
"genesis.go",
"kv.go",
"light.go",
"log.go",
"migrate.go",
"migration.go",
"migration_archived_index.go",
"migration_block_slot_index.go",
@@ -41,6 +41,7 @@ go_library(
"//beacon-chain/state/genesis:go_default_library",
"//beacon-chain/state/v1:go_default_library",
"//beacon-chain/state/v2:go_default_library",
"//cmd:go_default_library",
"//config/features:go_default_library",
"//config/params:go_default_library",
"//container/slice:go_default_library",
@@ -64,6 +65,7 @@ go_library(
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
"@com_github_prysmaticlabs_prombbolt//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_urfave_cli_v2//:go_default_library",
"@io_etcd_go_bbolt//:go_default_library",
"@io_opencensus_go//trace:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",

View File

@@ -0,0 +1,241 @@
package kv
import (
"bytes"
"fmt"
"path"
"time"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/golang/snappy"
"github.com/prysmaticlabs/prysm/monitoring/progress"
v1alpha1 "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
bolt "go.etcd.io/bbolt"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/cmd"
"github.com/prysmaticlabs/prysm/io/file"
"github.com/urfave/cli/v2"
)
const batchSize = 10
var migrationStateKey = []byte("migration_state")
// MigrateStateDB normalizes the state object and saves space.
// it also saves space by not storing the already stored elements in the
// circular buffers.
func MigrateStateDB(cliCtx *cli.Context) error {
dataDir := cliCtx.String(cmd.DataDirFlag.Name)
dbDir := path.Join(dataDir, BeaconNodeDbDirName)
dbNameWithPath := path.Join(dbDir, DatabaseFileName)
// check if the database file exists
if !file.FileExists(dbNameWithPath) {
return errors.New(fmt.Sprintf("database file not found: %s", dbNameWithPath))
}
// open the raw database file. If the file is busy, then exit.
db, openErr := bolt.Open(dbNameWithPath, 0600, &bolt.Options{Timeout: 1 * time.Second})
if openErr != nil {
return errors.New(fmt.Sprintf("could not open db: , %v", openErr))
}
yes, valErr := isValidatorEntriesAlreadyMigrated(db)
if valErr != nil {
return errors.New(fmt.Sprintf("could not check if entries migrated : , %v", valErr))
}
if yes {
migrateWithValidatorEntries(db)
} else {
migrateOnlyTheValidatorEntryHashes(db)
}
log.Info("migration completed successfully")
return nil
}
func isValidatorEntriesAlreadyMigrated(db *bolt.DB) (bool, error) {
// if the flag is not enabled, but the migration is over, then
// follow the new code path as if the flag is enabled.
returnFlag := false
if err := db.View(func(tx *bolt.Tx) error {
mb := tx.Bucket(migrationsBucket)
b := mb.Get(migrationStateValidatorsKey)
returnFlag = bytes.Equal(b, migrationCompleted)
return nil
}); err != nil {
return returnFlag, err
}
return returnFlag, nil
}
func migrateWithValidatorEntries(db *bolt.DB) error {
// create the new buckets
// - stateValidatorsBucket
// - stateValidatorsHashesBucket
if err := createNewBuckets(db, stateValidatorsBucket, stateValidatorHashesBucket); err != nil {
return err
}
// get the state bucket row count.
// this is used to show the progress bar.
keys, err := getStateBucketKeys(db)
if err != nil{
return err
}
// prepare the progress bar with the total count of the keys to migrate
bar := progress.InitializeProgressBar(len(keys), "Migrating state bucket to new schema.")
// for each state, do in batches
// - store the unique validator entries in stateValidatorsBucket
// - get the ID for the validator entrypoint
// compress all the validator entry IDs and store in the "validators" field in state
batchNo := 0
for batchIndex := 0; batchIndex < len(keys); batchIndex += batchSize {
if err := db.Update(func(tx *bolt.Tx) error {
//create the source and destination buckets
stateBkt := tx.Bucket(stateBucket);
if stateBkt == nil {
return errors.New("could not open \"state\" bucket")
}
valEntryBkt := tx.Bucket(stateValidatorsBucket)
if valEntryBkt == nil {
return errors.New("could not open \"state-validators\" bucket")
}
valHashBkt := tx.Bucket(stateValidatorHashesBucket)
if valHashBkt == nil {
return errors.New("could not open \"state-validator-hashes\" bucket")
}
// migrate the key values for this batch
cursor := stateBkt.Cursor()
count := 0
index := batchIndex
for _, v := cursor.Seek(keys[index]); count < batchSize && index < len(keys); _, v = cursor.Next() {
enc, err := snappy.Decode(nil, v)
if err != nil {
return err
}
protoState := &v1alpha1.BeaconState{}
if err := protoState.UnmarshalSSZ(enc); err != nil {
return errors.Wrap(err, "failed to unmarshal encoding for phase0")
}
// no validators in state to migrate
if len(protoState.Validators) == 0 {
return fmt.Errorf("no validator entries in state key 0x%s", hexutil.Encode(keys[index]))
}
validatorHashes, err := insertValidators(ctx, protoState.Validators, valEntryBkt)
if err != nil {
return err
}
validaorIndexs, err := insertValidatorHashes(ctx, validatorHashes)
if err != nil {
return err
}
// add the validator entry indexes for a given block root.
compValidatorIndexs := snappy.Encode(nil, validaorIndexs)
// zero the validator entries in BeaconState object .
protoState.Validators = make([]*v1alpha1.Validator, 0)
rawObj, err := protoState.MarshalSSZ()
if err != nil {
return err
}
stateBytes := snappy.Encode(nil, append(altairKey, rawObj...))
if stateErr := stateBkt.Put(keys[index], stateBytes); stateErr != nil {
return stateErr
}
// add the entry indexs in validator field of BeaconState object .
protoState.Validators = compValidatorIndexs
rawObj, err := protoState.MarshalSSZ()
if err != nil {
return err
}
stateBytes, err := encode(ctx, compValidatorIndexs)
if err != nil {
return err
}
if stateErr := stateBkt.Put(keys[index], stateBytes); stateErr != nil {
return stateErr
}
count++
index++
if barErr := bar.Add(1); barErr != nil {
return barErr
}
}
return nil
}); err != nil {
return err
}
batchNo++
}
// set the migration entry to done
if err := db.Update(func(tx *bolt.Tx) error {
mb := tx.Bucket(migrationsBucket)
if mb == nil {
return nil
}
return mb.Put(migrationStateKey, migrationCompleted)
}); err != nil {
return err
}
log.Infof("migration done for bucket %s.", stateBucket)
return nil
}
func migrateOnlyTheValidatorEntryHashes(db *bolt.DB) {
}
func createNewBuckets(db *bolt.DB, buckets ...[]byte) error {
if err := db.Update(func(tx *bolt.Tx) error {
for _, bucket := range buckets {
if _, err := tx.CreateBucket(bucket); err != nil {
return err
}
}
return nil
}); err != nil {
return err
}
return nil
}
func getStateBucketKeys(db *bolt.DB) ([][]byte, error){
var keys [][]byte
if err := db.Update(func(tx *bolt.Tx) error {
stateBkt := tx.Bucket(stateBucket)
if stateBkt == nil {
return nil
}
k, err := stateBucketKeys(stateBkt)
if err != nil {
return err
}
keys = k
return nil
}); err != nil {
return nil, err
}
return keys, nil
}

View File

@@ -35,6 +35,7 @@ var (
attestationTargetEpochIndicesBucket = []byte("attestation-target-epoch-indices")
finalizedBlockRootsIndexBucket = []byte("finalized-block-roots-index")
blockRootValidatorHashesBucket = []byte("block-root-validator-hashes")
stateValidatorHashesBucket = []byte("state-validator-hashes")
// Specific item keys.
headBlockRootKey = []byte("head-root")

View File

@@ -1,32 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"service.go",
"update_comparison.go",
"updater.go",
],
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/light",
visibility = ["//visibility:public"],
deps = [
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/core/feed:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/db/iface:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//beacon-chain/sync:go_default_library",
"//config/params:go_default_library",
"//encoding/bytesutil:go_default_library",
"//network/forks:go_default_library",
"//proto/eth/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/block:go_default_library",
"//time/slots:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)

View File

@@ -1,209 +0,0 @@
package light
import (
"context"
"fmt"
"sync"
"time"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/beacon-chain/db/iface"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
syncSrv "github.com/prysmaticlabs/prysm/beacon-chain/sync"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/io/file"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
block2 "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/prysmaticlabs/prysm/time/slots"
)
type UpdatesFetcher interface {
BestUpdateForPeriod(ctx context.Context, period uint64) (*ethpb.LightClientUpdate, error)
LatestFinalizedUpdate(ctx context.Context) *ethpb.LightClientUpdate
LatestNonFinalizedUpdate(ctx context.Context) *ethpb.LightClientUpdate
}
type Config struct {
StateGen stategen.StateManager
Database iface.Database
HeadFetcher blockchain.HeadFetcher
FinalizationFetcher blockchain.FinalizationFetcher
StateNotifier statefeed.Notifier
TimeFetcher blockchain.TimeFetcher
SyncChecker syncSrv.Checker
}
type Service struct {
cfg *Config
cancelFunc context.CancelFunc
prevHeadData map[[32]byte]*ethpb.SyncAttestedData
lock sync.RWMutex
genesisTime time.Time
finalizedByEpoch map[types.Epoch]*ethpb.LightClientFinalizedCheckpoint
bestUpdateByPeriod map[uint64]*ethpb.LightClientUpdate
latestFinalizedUpdate *ethpb.LightClientUpdate
latestNonFinalizedUpdate *ethpb.LightClientUpdate
}
// New --
func New(ctx context.Context, cfg *Config) *Service {
return &Service{
cfg: cfg,
prevHeadData: make(map[[32]byte]*ethpb.SyncAttestedData),
finalizedByEpoch: make(map[types.Epoch]*ethpb.LightClientFinalizedCheckpoint),
bestUpdateByPeriod: make(map[uint64]*ethpb.LightClientUpdate),
}
}
func (s *Service) Start() {
go s.run()
}
func (s *Service) Stop() error {
s.cancelFunc()
return nil
}
func (s *Service) Status() error {
return nil
}
func (s *Service) BestUpdateForPeriod(ctx context.Context, period uint64) (*ethpb.LightClientUpdate, error) {
s.lock.RLock()
defer s.lock.RUnlock()
update, ok := s.bestUpdateByPeriod[period]
if !ok {
return nil, fmt.Errorf("no update found for period %d", period)
}
return update, nil
}
func (s *Service) LatestFinalizedUpdate(ctx context.Context) *ethpb.LightClientUpdate {
s.lock.RLock()
defer s.lock.RUnlock()
return s.latestFinalizedUpdate
}
func (s *Service) LatestNonFinalizedUpdate(ctx context.Context) *ethpb.LightClientUpdate {
return s.latestNonFinalizedUpdate
}
func (s *Service) run() {
ctx, cancel := context.WithCancel(context.Background())
s.cancelFunc = cancel
s.waitForChainInitialization(ctx)
s.waitForSync(ctx)
// Initialize the service from finalized (state, block) data.
log.Info("Initializing from finalized data")
if err := s.initializeFromFinalizedData(ctx); err != nil {
log.Fatal(err)
}
log.Info("Beginning subscriptions")
// Begin listening for new chain head and finalized checkpoint events.
go s.subscribeHeadEvent(ctx)
go s.subscribeFinalizedEvent(ctx)
}
func (s *Service) waitForChainInitialization(ctx context.Context) {
stateChannel := make(chan *feed.Event, 1)
stateSub := s.cfg.StateNotifier.StateFeed().Subscribe(stateChannel)
defer stateSub.Unsubscribe()
defer close(stateChannel)
for {
select {
case stateEvent := <-stateChannel:
// Wait for us to receive the genesis time via a chain started notification.
if stateEvent.Type == statefeed.Initialized {
// Alternatively, if the chain has already started, we then read the genesis
// time value from this data.
data, ok := stateEvent.Data.(*statefeed.InitializedData)
if !ok {
log.Error(
"Could not receive chain start notification, want *statefeed.ChainStartedData",
)
return
}
s.genesisTime = data.StartTime
log.WithField("genesisTime", s.genesisTime).Info(
"Received chain initialization event",
)
return
}
case err := <-stateSub.Err():
log.WithError(err).Error(
"Could not subscribe to state events",
)
return
case <-ctx.Done():
return
}
}
}
func (s *Service) waitForSync(ctx context.Context) {
slotTicker := slots.NewSlotTicker(s.genesisTime, params.BeaconConfig().SecondsPerSlot)
defer slotTicker.Done()
for {
select {
case <-slotTicker.C():
if slots.ToEpoch(slots.SinceGenesis(s.genesisTime)) < 6 {
continue
}
return
case <-ctx.Done():
return
}
}
}
func (s *Service) finalizedBlockOrGenesis(ctx context.Context, cpt *ethpb.Checkpoint) (block2.SignedBeaconBlock, error) {
checkpointRoot := bytesutil.ToBytes32(cpt.Root)
block, err := s.cfg.Database.Block(ctx, checkpointRoot)
if err != nil {
return nil, err
}
if block == nil || block.IsNil() {
return s.cfg.Database.GenesisBlock(ctx)
}
return block, nil
}
func (s *Service) finalizedStateOrGenesis(ctx context.Context, cpt *ethpb.Checkpoint) (state.BeaconState, error) {
checkpointRoot := bytesutil.ToBytes32(cpt.Root)
st, err := s.cfg.StateGen.StateByRoot(ctx, checkpointRoot)
if err != nil {
return nil, err
}
if st == nil || st.IsNil() {
return s.cfg.Database.GenesisState(ctx)
}
return st, nil
}
func (s *Service) initializeFromFinalizedData(ctx context.Context) error {
cpt, err := s.cfg.Database.FinalizedCheckpoint(ctx)
if err != nil {
return err
}
finalizedBlock, err := s.finalizedBlockOrGenesis(ctx, cpt)
if err != nil {
return err
}
finalizedState, err := s.finalizedStateOrGenesis(ctx, cpt)
if err != nil {
return err
}
enc, err := finalizedState.MarshalSSZ()
if err != nil {
return err
}
if err := file.WriteFile("/tmp/state.ssz", enc); err != nil {
return err
}
return s.onFinalized(ctx, finalizedBlock, finalizedState)
}

View File

@@ -1,120 +0,0 @@
package light
import (
"context"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/encoding/ssz"
v1 "github.com/prysmaticlabs/prysm/proto/eth/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/prysmaticlabs/prysm/time/slots"
)
const (
finalizedCheckpointStateIndex = 20
nextSyncCommitteeStateIndex = 23
)
func (s *Service) subscribeFinalizedEvent(ctx context.Context) {
stateChan := make(chan *feed.Event, 1)
sub := s.cfg.StateNotifier.StateFeed().Subscribe(stateChan)
defer sub.Unsubscribe()
for {
select {
case ev := <-stateChan:
if ev.Type == statefeed.FinalizedCheckpoint {
blk, beaconState, err := s.parseFinalizedEvent(ctx, ev.Data)
if err != nil {
log.Error(err)
continue
}
if err := s.onFinalized(ctx, blk, beaconState); err != nil {
log.Error(err)
continue
}
}
}
}
}
func (s *Service) parseFinalizedEvent(
ctx context.Context, eventData interface{},
) (block.SignedBeaconBlock, state.BeaconState, error) {
finalizedCheckpoint, ok := eventData.(*v1.EventFinalizedCheckpoint)
if !ok {
return nil, nil, errors.New("expected finalized checkpoint event")
}
checkpointRoot := bytesutil.ToBytes32(finalizedCheckpoint.Block)
blk, err := s.cfg.Database.Block(ctx, checkpointRoot)
if err != nil {
return nil, nil, err
}
if blk == nil || blk.IsNil() {
return nil, nil, err
}
st, err := s.cfg.StateGen.StateByRoot(ctx, checkpointRoot)
if err != nil {
return nil, nil, err
}
if st == nil || st.IsNil() {
return nil, nil, err
}
return blk, st, nil
}
func (s *Service) onFinalized(
ctx context.Context, signedBlock block.SignedBeaconBlock, postState state.BeaconStateAltair,
) error {
if _, ok := postState.InnerStateUnsafe().(*ethpb.BeaconStateAltair); !ok {
return errors.New("expected an Altair beacon state")
}
blk := signedBlock.Block()
header, err := block.BeaconBlockHeaderFromBlockInterface(blk)
if err != nil {
return err
}
tb, err := ssz.NewTreeBackedState(postState)
if err != nil {
return err
}
proof, gIndex, err := tb.Proof(nextSyncCommitteeStateIndex)
if err != nil {
return err
}
nextSyncCommittee, err := postState.NextSyncCommittee()
if err != nil {
return err
}
root, err := postState.HashTreeRoot(ctx)
if err != nil {
return err
}
nextSyncCommitteeRoot, err := nextSyncCommittee.HashTreeRoot()
if err != nil {
return err
}
log.Info("On finalized update")
log.Infof("Header state root %#x, state hash tree root %#x", header.StateRoot, root)
log.Infof("Generating proof against root %#x with gindex %d and leaf root %#x", root, gIndex, nextSyncCommitteeRoot)
log.Info("-----")
log.Infof("Proof with length %d", len(proof))
for _, elem := range proof {
log.Infof("%#x", bytesutil.Trunc(elem))
}
log.Info("-----")
s.lock.Lock()
defer s.lock.Unlock()
currentEpoch := slots.ToEpoch(blk.Slot())
s.finalizedByEpoch[currentEpoch] = &ethpb.LightClientFinalizedCheckpoint{
Header: header,
NextSyncCommittee: nextSyncCommittee,
NextSyncCommitteeBranch: proof,
}
return nil
}

View File

@@ -1,154 +0,0 @@
package light
import (
"context"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/encoding/ssz"
"github.com/prysmaticlabs/prysm/network/forks"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/prysmaticlabs/prysm/time/slots"
)
func (s *Service) subscribeHeadEvent(ctx context.Context) {
stateChan := make(chan *feed.Event, 1)
sub := s.cfg.StateNotifier.StateFeed().Subscribe(stateChan)
defer sub.Unsubscribe()
for {
select {
case ev := <-stateChan:
if ev.Type == statefeed.NewHead {
head, beaconState, err := s.getChainHeadAndState(ctx)
if err != nil {
log.Error(err)
continue
}
if err := s.onHead(ctx, head, beaconState); err != nil {
log.Error(err)
continue
}
}
case <-sub.Err():
return
case <-ctx.Done():
return
}
}
}
func (s *Service) getChainHeadAndState(ctx context.Context) (block.SignedBeaconBlock, state.BeaconState, error) {
head, err := s.cfg.HeadFetcher.HeadBlock(ctx)
if err != nil {
return nil, nil, err
}
if head == nil || head.IsNil() {
return nil, nil, errors.New("head block is nil")
}
st, err := s.cfg.HeadFetcher.HeadState(ctx)
if err != nil {
return nil, nil, errors.New("head state is nil")
}
if st == nil || st.IsNil() {
return nil, nil, err
}
return head, st, nil
}
func (s *Service) onHead(ctx context.Context, head block.SignedBeaconBlock, postState state.BeaconStateAltair) error {
if _, ok := postState.InnerStateUnsafe().(*ethpb.BeaconStateAltair); !ok {
return errors.New("expected an Altair beacon state")
}
blk := head.Block()
tb, err := ssz.NewTreeBackedState(postState)
if err != nil {
return err
}
header, err := block.BeaconBlockHeaderFromBlockInterface(blk)
if err != nil {
return err
}
finalityBranch, _, err := tb.Proof(finalizedCheckpointStateIndex)
if err != nil {
return err
}
nextSyncCommitteeBranch, gIndex, err := tb.Proof(nextSyncCommitteeStateIndex)
if err != nil {
return err
}
stRoot, err := postState.HashTreeRoot(ctx)
if err != nil {
return err
}
blkRoot, err := blk.HashTreeRoot()
if err != nil {
return err
}
log.Infof("On head, generating sync committee proof for root %#x and index %d, block root %#x, header state root %#x", stRoot[:], gIndex, blkRoot, header.StateRoot)
nextSyncCommittee, err := postState.NextSyncCommittee()
if err != nil {
return err
}
s.lock.Lock()
s.prevHeadData[blkRoot] = &ethpb.SyncAttestedData{
Header: header,
FinalityCheckpoint: postState.FinalizedCheckpoint(),
FinalityBranch: finalityBranch,
NextSyncCommittee: nextSyncCommittee,
NextSyncCommitteeBranch: nextSyncCommitteeBranch,
}
s.lock.Unlock()
syncAttestedBlockRoot, err := helpers.BlockRootAtSlot(postState, postState.Slot()-1)
if err != nil {
return err
}
fork, err := forks.Fork(slots.ToEpoch(blk.Slot()))
if err != nil {
return err
}
syncAggregate, err := blk.Body().SyncAggregate()
if err != nil {
return err
}
sigData := &signatureData{
slot: blk.Slot(),
forkVersion: fork.CurrentVersion,
syncAggregate: syncAggregate,
}
s.lock.Lock()
syncAttestedData, ok := s.prevHeadData[bytesutil.ToBytes32(syncAttestedBlockRoot)]
if !ok {
s.lock.Unlock()
log.Info("Got useless data, skipping")
return nil // Useless data.
}
s.lock.Unlock()
commmitteePeriodWithFinalized, err := s.persistBestFinalizedUpdate(ctx, syncAttestedData, sigData)
if err != nil {
return err
}
if err := s.persistBestNonFinalizedUpdate(ctx, syncAttestedData, sigData, commmitteePeriodWithFinalized); err != nil {
return err
}
s.lock.Lock()
if len(s.prevHeadData) > PrevDataMaxSize {
for k := range s.prevHeadData {
delete(s.prevHeadData, k)
if len(s.prevHeadData) <= PrevDataMaxSize {
break
}
}
}
s.lock.Unlock()
return nil
}

View File

@@ -1,50 +0,0 @@
package light
import (
"bytes"
"github.com/prysmaticlabs/prysm/config/params"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
)
func isBetterUpdate(prevUpdate *ethpb.LightClientUpdate, newUpdate *ethpb.LightClientUpdate) bool {
prevIsFinalized := isFinalizedUpdate(prevUpdate)
newIsFinalized := isFinalizedUpdate(newUpdate)
// newUpdate becomes finalized, it's better.
if newIsFinalized && !prevIsFinalized {
return true
}
// newUpdate is no longer finalized, it's worse.
if !newIsFinalized && prevIsFinalized {
return false
}
return hasMoreBits(newUpdate, prevUpdate)
}
func isLatestBestFinalizedUpdate(prevUpdate *ethpb.LightClientUpdate, newUpdate *ethpb.LightClientUpdate) bool {
if newUpdate.FinalityHeader.Slot > prevUpdate.FinalityHeader.Slot {
return true
}
if newUpdate.FinalityHeader.Slot < prevUpdate.FinalityHeader.Slot {
return false
}
return hasMoreBits(newUpdate, prevUpdate)
}
func isLatestBestNonFinalizedUpdate(prevUpdate *ethpb.LightClientUpdate, newUpdate *ethpb.LightClientUpdate) bool {
if newUpdate.Header.Slot > prevUpdate.Header.Slot {
return true
}
if newUpdate.Header.Slot < prevUpdate.Header.Slot {
return false
}
return hasMoreBits(newUpdate, prevUpdate)
}
func isFinalizedUpdate(update *ethpb.LightClientUpdate) bool {
return !bytes.Equal(params.BeaconConfig().ZeroHash[:], update.FinalityHeader.StateRoot)
}
func hasMoreBits(a *ethpb.LightClientUpdate, b *ethpb.LightClientUpdate) bool {
return a.SyncCommitteeBits.Count() > b.SyncCommitteeBits.Count()
}

View File

@@ -1,134 +0,0 @@
package light
import (
"context"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/time/slots"
"github.com/sirupsen/logrus"
)
// Precomputed values for generalized indices.
const (
FinalizedRootIndex = 105
NextSyncCommitteeIndex = 55
PrevDataMaxSize = 64
)
var log = logrus.WithField("prefix", "light")
type signatureData struct {
slot types.Slot
forkVersion []byte
syncAggregate *ethpb.SyncAggregate
}
func (s *Service) persistBestFinalizedUpdate(ctx context.Context, syncAttestedData *ethpb.SyncAttestedData, sigData *signatureData) (uint64, error) {
finalizedEpoch := syncAttestedData.FinalityCheckpoint.Epoch
s.lock.RLock()
finalizedData := s.finalizedByEpoch[finalizedEpoch]
s.lock.RUnlock()
if finalizedData == nil {
return 0, nil
}
committeePeriod := slots.SyncCommitteePeriod(slots.ToEpoch(syncAttestedData.Header.Slot))
signaturePeriod := slots.SyncCommitteePeriod(slots.ToEpoch(sigData.slot))
if committeePeriod != signaturePeriod {
return 0, nil
}
newUpdate := &ethpb.LightClientUpdate{
Header: finalizedData.Header,
NextSyncCommittee: finalizedData.NextSyncCommittee,
NextSyncCommitteeBranch: finalizedData.NextSyncCommitteeBranch,
FinalityHeader: syncAttestedData.Header,
FinalityBranch: syncAttestedData.FinalityBranch,
SyncCommitteeBits: sigData.syncAggregate.SyncCommitteeBits,
SyncCommitteeSignature: sigData.syncAggregate.SyncCommitteeSignature,
ForkVersion: sigData.forkVersion,
}
s.lock.RLock()
prevBestUpdate := s.bestUpdateByPeriod[committeePeriod]
s.lock.RUnlock()
if prevBestUpdate == nil || isBetterUpdate(prevBestUpdate, newUpdate) {
s.lock.Lock()
s.bestUpdateByPeriod[committeePeriod] = newUpdate
s.lock.Unlock()
}
s.lock.RLock()
prevLatestUpdate := s.latestFinalizedUpdate
s.lock.RUnlock()
if prevLatestUpdate == nil || isLatestBestFinalizedUpdate(prevLatestUpdate, newUpdate) {
s.lock.Lock()
s.latestFinalizedUpdate = newUpdate
s.lock.Unlock()
log.Info("Putting latest best finalized update")
rt, err := newUpdate.NextSyncCommittee.HashTreeRoot()
if err != nil {
return 0, err
}
log.Infof("Header state root %#x, state hash tree root %#x", newUpdate.Header.StateRoot, newUpdate.Header.StateRoot)
log.Infof("Generating proof against root %#x with gindex %d and leaf root %#x", newUpdate.Header.StateRoot, 55, rt)
log.Info("-----")
log.Infof("Proof with length %d", len(newUpdate.NextSyncCommitteeBranch))
for _, elem := range newUpdate.NextSyncCommitteeBranch {
log.Infof("%#x", bytesutil.Trunc(elem))
}
log.Info("-----")
}
return committeePeriod, nil
}
func (s *Service) persistBestNonFinalizedUpdate(ctx context.Context, syncAttestedData *ethpb.SyncAttestedData, sigData *signatureData, period uint64) error {
committeePeriod := slots.SyncCommitteePeriod(slots.ToEpoch(syncAttestedData.Header.Slot))
signaturePeriod := slots.SyncCommitteePeriod(slots.ToEpoch(sigData.slot))
if committeePeriod != signaturePeriod {
return nil
}
newUpdate := &ethpb.LightClientUpdate{
Header: syncAttestedData.Header,
NextSyncCommittee: syncAttestedData.NextSyncCommittee,
NextSyncCommitteeBranch: syncAttestedData.NextSyncCommitteeBranch,
FinalityHeader: nil,
FinalityBranch: nil,
SyncCommitteeBits: sigData.syncAggregate.SyncCommitteeBits,
SyncCommitteeSignature: sigData.syncAggregate.SyncCommitteeSignature,
ForkVersion: sigData.forkVersion,
}
// Optimization: If there's already a finalized update for this committee period, no need to
// create a non-finalized update>
if committeePeriod != period {
s.lock.RLock()
prevBestUpdate := s.bestUpdateByPeriod[committeePeriod]
s.lock.RUnlock()
if prevBestUpdate == nil || isBetterUpdate(prevBestUpdate, newUpdate) {
s.lock.Lock()
s.bestUpdateByPeriod[committeePeriod] = newUpdate
s.lock.Unlock()
}
}
// Store the latest update here overall. Not checking it's the best
s.lock.RLock()
prevLatestUpdate := s.latestNonFinalizedUpdate
s.lock.RUnlock()
if prevLatestUpdate == nil || isLatestBestNonFinalizedUpdate(prevLatestUpdate, newUpdate) {
// TODO: Don't store nextCommittee, that can be fetched through getBestUpdates()
s.lock.Lock()
s.latestNonFinalizedUpdate = newUpdate
s.lock.Unlock()
}
return nil
}

View File

@@ -4,8 +4,6 @@ go_library(
name = "go_default_library",
srcs = [
"doc.go",
"metrics.go",
"process_attestation.go",
"process_block.go",
"process_exit.go",
"service.go",
@@ -13,20 +11,10 @@ go_library(
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/monitor",
visibility = ["//beacon-chain:__subpackages__"],
deps = [
"//beacon-chain/core/altair:go_default_library",
"//beacon-chain/core/blocks:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//config/params:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/attestation:go_default_library",
"//proto/prysm/v1alpha1/block:go_default_library",
"//runtime/version:go_default_library",
"//time/slots:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
@@ -35,24 +23,18 @@ go_library(
go_test(
name = "go_default_test",
srcs = [
"process_attestation_test.go",
"process_block_test.go",
"process_exit_test.go",
"service_test.go",
],
embed = [":go_default_library"],
deps = [
"//beacon-chain/db/testing:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//config/params:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/wrapper:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
],
)

View File

@@ -1,70 +0,0 @@
package monitor
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/sirupsen/logrus"
)
var (
log = logrus.WithField("prefix", "monitor")
// TODO: The Prometheus gauge vectors and counters in this package deprecate the
// corresponding gauge vectors and counters in the validator client.
// inclusionSlotGauge used to track attestation inclusion distance
inclusionSlotGauge = promauto.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: "monitor",
Name: "inclusion_slot",
Help: "Attestations inclusion slot",
},
[]string{
"validator_index",
},
)
// timelyHeadCounter used to track attestation timely head flags
timelyHeadCounter = promauto.NewCounterVec(
prometheus.CounterOpts{
Namespace: "monitor",
Name: "timely_head",
Help: "Attestation timely Head flag",
},
[]string{
"validator_index",
},
)
// timelyTargetCounter used to track attestation timely head flags
timelyTargetCounter = promauto.NewCounterVec(
prometheus.CounterOpts{
Namespace: "monitor",
Name: "timely_target",
Help: "Attestation timely Target flag",
},
[]string{
"validator_index",
},
)
// timelySourceCounter used to track attestation timely head flags
timelySourceCounter = promauto.NewCounterVec(
prometheus.CounterOpts{
Namespace: "monitor",
Name: "timely_source",
Help: "Attestation timely Source flag",
},
[]string{
"validator_index",
},
)
// aggregationCounter used to track aggregations
aggregationCounter = promauto.NewCounterVec(
prometheus.CounterOpts{
Namespace: "monitor",
Name: "aggregations",
Help: "Number of aggregation duties performed",
},
[]string{
"validator_index",
},
)
)

View File

@@ -1,218 +0,0 @@
package monitor
import (
"context"
"fmt"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/attestation"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/prysmaticlabs/prysm/runtime/version"
"github.com/prysmaticlabs/prysm/time/slots"
"github.com/sirupsen/logrus"
)
// updatedPerformanceFromTrackedVal returns true if the validator is tracked and if the
// given slot is different than the last attested slot from this validator.
func (s *Service) updatedPerformanceFromTrackedVal(idx types.ValidatorIndex, slot types.Slot) bool {
if !s.TrackedIndex(types.ValidatorIndex(idx)) {
return false
}
if lp, ok := s.latestPerformance[types.ValidatorIndex(idx)]; ok {
return lp.attestedSlot != slot
}
return false
}
// attestingIndices returns the indices of validators that appear in the
// given aggregated atestation.
func attestingIndices(ctx context.Context, state state.BeaconState, att *ethpb.Attestation) ([]uint64, error) {
committee, err := helpers.BeaconCommitteeFromState(ctx, state, att.Data.Slot, att.Data.CommitteeIndex)
if err != nil {
return nil, err
}
return attestation.AttestingIndices(att.AggregationBits, committee)
}
// logMessageTimelyFlagsForIndex returns the log message with the basic
// performance indicators for the attestation (head, source, target)
func logMessageTimelyFlagsForIndex(idx types.ValidatorIndex, data *ethpb.AttestationData) logrus.Fields {
return logrus.Fields{
"ValidatorIndex": idx,
"Slot": data.Slot,
"Source": fmt.Sprintf("%#x", bytesutil.Trunc(data.Source.Root)),
"Target": fmt.Sprintf("%#x", bytesutil.Trunc(data.Target.Root)),
"Head": fmt.Sprintf("%#x", bytesutil.Trunc(data.BeaconBlockRoot)),
}
}
// processAttestations logs the event that one of our tracked validators'
// attestations was included in a block
func (s *Service) processAttestations(ctx context.Context, state state.BeaconState, blk block.BeaconBlock) {
if blk == nil || blk.Body() == nil {
return
}
for _, attestation := range blk.Body().Attestations() {
s.processIncludedAttestation(ctx, state, attestation)
}
}
// processIncludedAttestation logs in the event that one of our tracked validators'
// appears in the attesting indices and this included attestation was not included
// before.
func (s *Service) processIncludedAttestation(ctx context.Context, state state.BeaconState, att *ethpb.Attestation) {
attestingIndices, err := attestingIndices(ctx, state, att)
if err != nil {
log.WithError(err).Error("Could not get attesting indices")
return
}
for _, idx := range attestingIndices {
if s.updatedPerformanceFromTrackedVal(types.ValidatorIndex(idx), att.Data.Slot) {
logFields := logMessageTimelyFlagsForIndex(types.ValidatorIndex(idx), att.Data)
balance, err := state.BalanceAtIndex(types.ValidatorIndex(idx))
if err != nil {
log.WithError(err).Error("Could not get balance")
return
}
aggregatedPerf := s.aggregatedPerformance[types.ValidatorIndex(idx)]
aggregatedPerf.totalAttestedCount++
aggregatedPerf.totalRequestedCount++
latestPerf := s.latestPerformance[types.ValidatorIndex(idx)]
balanceChg := balance - latestPerf.balance
latestPerf.balanceChange = balanceChg
latestPerf.balance = balance
latestPerf.attestedSlot = att.Data.Slot
latestPerf.inclusionSlot = state.Slot()
inclusionSlotGauge.WithLabelValues(fmt.Sprintf("%d", idx)).Set(float64(latestPerf.inclusionSlot))
aggregatedPerf.totalDistance += uint64(latestPerf.inclusionSlot - latestPerf.attestedSlot)
if state.Version() == version.Altair {
targetIdx := params.BeaconConfig().TimelyTargetFlagIndex
sourceIdx := params.BeaconConfig().TimelySourceFlagIndex
headIdx := params.BeaconConfig().TimelyHeadFlagIndex
var participation []byte
if slots.ToEpoch(latestPerf.inclusionSlot) ==
slots.ToEpoch(latestPerf.attestedSlot) {
participation, err = state.CurrentEpochParticipation()
if err != nil {
log.WithError(err).Error("Could not get current epoch participation")
return
}
} else {
participation, err = state.PreviousEpochParticipation()
if err != nil {
log.WithError(err).Error("Could not get previous epoch participation")
return
}
}
flags := participation[idx]
hasFlag, err := altair.HasValidatorFlag(flags, sourceIdx)
if err != nil {
log.WithError(err).Error("Could not get timely Source flag")
return
}
latestPerf.timelySource = hasFlag
hasFlag, err = altair.HasValidatorFlag(flags, headIdx)
if err != nil {
log.WithError(err).Error("Could not get timely Head flag")
return
}
latestPerf.timelyHead = hasFlag
hasFlag, err = altair.HasValidatorFlag(flags, targetIdx)
if err != nil {
log.WithError(err).Error("Could not get timely Target flag")
return
}
latestPerf.timelyTarget = hasFlag
if latestPerf.timelySource {
timelySourceCounter.WithLabelValues(fmt.Sprintf("%d", idx)).Inc()
aggregatedPerf.totalCorrectSource++
}
if latestPerf.timelyHead {
timelyHeadCounter.WithLabelValues(fmt.Sprintf("%d", idx)).Inc()
aggregatedPerf.totalCorrectHead++
}
if latestPerf.timelyTarget {
timelyTargetCounter.WithLabelValues(fmt.Sprintf("%d", idx)).Inc()
aggregatedPerf.totalCorrectTarget++
}
}
logFields["CorrectHead"] = latestPerf.timelyHead
logFields["CorrectSource"] = latestPerf.timelySource
logFields["CorrectTarget"] = latestPerf.timelyTarget
logFields["InclusionSlot"] = latestPerf.inclusionSlot
logFields["NewBalance"] = balance
logFields["BalanceChange"] = balanceChg
s.latestPerformance[types.ValidatorIndex(idx)] = latestPerf
s.aggregatedPerformance[types.ValidatorIndex(idx)] = aggregatedPerf
log.WithFields(logFields).Info("Attestation included")
}
}
}
// processUnaggregatedAttestation logs when the beacon node sees an unaggregated attestation from one of our
// tracked validators
func (s *Service) processUnaggregatedAttestation(ctx context.Context, att *ethpb.Attestation) {
root := bytesutil.ToBytes32(att.Data.BeaconBlockRoot)
state := s.config.StateGen.StateByRootIfCachedNoCopy(root)
if state == nil {
log.Debug("Skipping unaggregated attestation due to state not found in cache")
return
}
attestingIndices, err := attestingIndices(ctx, state, att)
if err != nil {
log.WithError(err).Error("Could not get attesting indices")
return
}
for _, idx := range attestingIndices {
if s.updatedPerformanceFromTrackedVal(types.ValidatorIndex(idx), att.Data.Slot) {
logFields := logMessageTimelyFlagsForIndex(types.ValidatorIndex(idx), att.Data)
log.WithFields(logFields).Info("Processed unaggregated attestation")
}
}
}
// processAggregatedAttestation logs when we see an aggregation from one of our tracked validators or an aggregated
// attestation from one of our tracked validators
func (s *Service) processAggregatedAttestation(ctx context.Context, att *ethpb.AggregateAttestationAndProof) {
if s.TrackedIndex(att.AggregatorIndex) {
log.WithFields(logrus.Fields{
"ValidatorIndex": att.AggregatorIndex,
}).Info("Processed attestation aggregation")
aggregatedPerf := s.aggregatedPerformance[att.AggregatorIndex]
aggregatedPerf.totalAggregations++
s.aggregatedPerformance[att.AggregatorIndex] = aggregatedPerf
aggregationCounter.WithLabelValues(fmt.Sprintf("%d", att.AggregatorIndex)).Inc()
}
var root [32]byte
copy(root[:], att.Aggregate.Data.BeaconBlockRoot)
state := s.config.StateGen.StateByRootIfCachedNoCopy(root)
if state == nil {
log.Debug("Skipping agregated attestation due to state not found in cache")
return
}
attestingIndices, err := attestingIndices(ctx, state, att.Aggregate)
if err != nil {
log.WithError(err).Error("Could not get attesting indices")
return
}
for _, idx := range attestingIndices {
if s.updatedPerformanceFromTrackedVal(types.ValidatorIndex(idx), att.Aggregate.Data.Slot) {
logFields := logMessageTimelyFlagsForIndex(types.ValidatorIndex(idx), att.Aggregate.Data)
log.WithFields(logFields).Info("Processed aggregated attestation")
}
}
}

View File

@@ -1,287 +0,0 @@
package monitor
import (
"bytes"
"context"
"testing"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/go-bitfield"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
"github.com/sirupsen/logrus"
logTest "github.com/sirupsen/logrus/hooks/test"
)
func setupService(t *testing.T) *Service {
beaconDB := testDB.SetupDB(t)
trackedVals := map[types.ValidatorIndex]interface{}{
1: nil,
2: nil,
12: nil,
}
latestPerformance := map[types.ValidatorIndex]ValidatorLatestPerformance{
1: {
balance: 32000000000,
},
2: {
balance: 32000000000,
},
12: {
balance: 31900000000,
},
}
aggregatedPerformance := map[types.ValidatorIndex]ValidatorAggregatedPerformance{
1: {},
2: {},
12: {},
}
return &Service{
config: &ValidatorMonitorConfig{
StateGen: stategen.New(beaconDB),
TrackedValidators: trackedVals,
},
latestPerformance: latestPerformance,
aggregatedPerformance: aggregatedPerformance,
}
}
func TestGetAttestingIndices(t *testing.T) {
ctx := context.Background()
beaconState, _ := util.DeterministicGenesisState(t, 256)
att := &ethpb.Attestation{
Data: &ethpb.AttestationData{
Slot: 1,
CommitteeIndex: 0,
},
AggregationBits: bitfield.Bitlist{0b11, 0b1},
}
attestingIndices, err := attestingIndices(ctx, beaconState, att)
require.NoError(t, err)
require.DeepEqual(t, attestingIndices, []uint64{0xc, 0x2})
}
func TestProcessIncludedAttestationTwoTracked(t *testing.T) {
hook := logTest.NewGlobal()
s := setupService(t)
state, _ := util.DeterministicGenesisStateAltair(t, 256)
require.NoError(t, state.SetSlot(2))
require.NoError(t, state.SetCurrentParticipationBits(bytes.Repeat([]byte{0xff}, 13)))
att := &ethpb.Attestation{
Data: &ethpb.AttestationData{
Slot: 1,
CommitteeIndex: 0,
BeaconBlockRoot: bytesutil.PadTo([]byte("hello-world"), 32),
Source: &ethpb.Checkpoint{
Epoch: 0,
Root: bytesutil.PadTo([]byte("hello-world"), 32),
},
Target: &ethpb.Checkpoint{
Epoch: 1,
Root: bytesutil.PadTo([]byte("hello-world"), 32),
},
},
AggregationBits: bitfield.Bitlist{0b11, 0b1},
}
s.processIncludedAttestation(context.Background(), state, att)
wanted1 := "\"Attestation included\" BalanceChange=0 CorrectHead=true CorrectSource=true CorrectTarget=true Head=0x68656c6c6f2d InclusionSlot=2 NewBalance=32000000000 Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=2 prefix=monitor"
wanted2 := "\"Attestation included\" BalanceChange=100000000 CorrectHead=true CorrectSource=true CorrectTarget=true Head=0x68656c6c6f2d InclusionSlot=2 NewBalance=32000000000 Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=12 prefix=monitor"
require.LogsContain(t, hook, wanted1)
require.LogsContain(t, hook, wanted2)
}
func TestProcessUnaggregatedAttestationStateNotCached(t *testing.T) {
logrus.SetLevel(logrus.DebugLevel)
hook := logTest.NewGlobal()
ctx := context.Background()
s := setupService(t)
state, _ := util.DeterministicGenesisStateAltair(t, 256)
require.NoError(t, state.SetSlot(2))
header := state.LatestBlockHeader()
participation := []byte{0xff, 0xff, 0x01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
require.NoError(t, state.SetCurrentParticipationBits(participation))
att := &ethpb.Attestation{
Data: &ethpb.AttestationData{
Slot: 1,
CommitteeIndex: 0,
BeaconBlockRoot: header.GetStateRoot(),
Source: &ethpb.Checkpoint{
Epoch: 0,
Root: bytesutil.PadTo([]byte("hello-world"), 32),
},
Target: &ethpb.Checkpoint{
Epoch: 1,
Root: bytesutil.PadTo([]byte("hello-world"), 32),
},
},
AggregationBits: bitfield.Bitlist{0b11, 0b1},
}
s.processUnaggregatedAttestation(ctx, att)
require.LogsContain(t, hook, "Skipping unaggregated attestation due to state not found in cache")
logrus.SetLevel(logrus.InfoLevel)
}
func TestProcessUnaggregatedAttestationStateCached(t *testing.T) {
ctx := context.Background()
hook := logTest.NewGlobal()
s := setupService(t)
state, _ := util.DeterministicGenesisStateAltair(t, 256)
participation := []byte{0xff, 0xff, 0x01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
require.NoError(t, state.SetCurrentParticipationBits(participation))
root := [32]byte{}
copy(root[:], "hello-world")
att := &ethpb.Attestation{
Data: &ethpb.AttestationData{
Slot: 1,
CommitteeIndex: 0,
BeaconBlockRoot: root[:],
Source: &ethpb.Checkpoint{
Epoch: 0,
Root: root[:],
},
Target: &ethpb.Checkpoint{
Epoch: 1,
Root: root[:],
},
},
AggregationBits: bitfield.Bitlist{0b11, 0b1},
}
require.NoError(t, s.config.StateGen.SaveState(ctx, root, state))
s.processUnaggregatedAttestation(context.Background(), att)
wanted1 := "\"Processed unaggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=2 prefix=monitor"
wanted2 := "\"Processed unaggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=12 prefix=monitor"
require.LogsContain(t, hook, wanted1)
require.LogsContain(t, hook, wanted2)
}
func TestProcessAggregatedAttestationStateNotCached(t *testing.T) {
logrus.SetLevel(logrus.DebugLevel)
hook := logTest.NewGlobal()
ctx := context.Background()
s := setupService(t)
state, _ := util.DeterministicGenesisStateAltair(t, 256)
require.NoError(t, state.SetSlot(2))
header := state.LatestBlockHeader()
participation := []byte{0xff, 0xff, 0x01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
require.NoError(t, state.SetCurrentParticipationBits(participation))
att := &ethpb.AggregateAttestationAndProof{
AggregatorIndex: 2,
Aggregate: &ethpb.Attestation{
Data: &ethpb.AttestationData{
Slot: 1,
CommitteeIndex: 0,
BeaconBlockRoot: header.GetStateRoot(),
Source: &ethpb.Checkpoint{
Epoch: 0,
Root: bytesutil.PadTo([]byte("hello-world"), 32),
},
Target: &ethpb.Checkpoint{
Epoch: 1,
Root: bytesutil.PadTo([]byte("hello-world"), 32),
},
},
AggregationBits: bitfield.Bitlist{0b11, 0b1},
},
}
s.processAggregatedAttestation(ctx, att)
require.LogsContain(t, hook, "\"Processed attestation aggregation\" ValidatorIndex=2 prefix=monitor")
require.LogsContain(t, hook, "Skipping agregated attestation due to state not found in cache")
logrus.SetLevel(logrus.InfoLevel)
}
func TestProcessAggregatedAttestationStateCached(t *testing.T) {
hook := logTest.NewGlobal()
ctx := context.Background()
s := setupService(t)
state, _ := util.DeterministicGenesisStateAltair(t, 256)
participation := []byte{0xff, 0xff, 0x01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
require.NoError(t, state.SetCurrentParticipationBits(participation))
root := [32]byte{}
copy(root[:], "hello-world")
att := &ethpb.AggregateAttestationAndProof{
AggregatorIndex: 2,
Aggregate: &ethpb.Attestation{
Data: &ethpb.AttestationData{
Slot: 1,
CommitteeIndex: 0,
BeaconBlockRoot: root[:],
Source: &ethpb.Checkpoint{
Epoch: 0,
Root: root[:],
},
Target: &ethpb.Checkpoint{
Epoch: 1,
Root: root[:],
},
},
AggregationBits: bitfield.Bitlist{0b10, 0b1},
},
}
require.NoError(t, s.config.StateGen.SaveState(ctx, root, state))
s.processAggregatedAttestation(ctx, att)
require.LogsContain(t, hook, "\"Processed attestation aggregation\" ValidatorIndex=2 prefix=monitor")
require.LogsContain(t, hook, "\"Processed aggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=2 prefix=monitor")
require.LogsDoNotContain(t, hook, "\"Processed aggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=12 prefix=monitor")
}
func TestProcessAttestations(t *testing.T) {
hook := logTest.NewGlobal()
s := setupService(t)
ctx := context.Background()
state, _ := util.DeterministicGenesisStateAltair(t, 256)
require.NoError(t, state.SetSlot(2))
require.NoError(t, state.SetCurrentParticipationBits(bytes.Repeat([]byte{0xff}, 13)))
att := &ethpb.Attestation{
Data: &ethpb.AttestationData{
Slot: 1,
CommitteeIndex: 0,
BeaconBlockRoot: bytesutil.PadTo([]byte("hello-world"), 32),
Source: &ethpb.Checkpoint{
Epoch: 0,
Root: bytesutil.PadTo([]byte("hello-world"), 32),
},
Target: &ethpb.Checkpoint{
Epoch: 1,
Root: bytesutil.PadTo([]byte("hello-world"), 32),
},
},
AggregationBits: bitfield.Bitlist{0b11, 0b1},
}
block := &ethpb.BeaconBlockAltair{
Slot: 2,
Body: &ethpb.BeaconBlockBodyAltair{
Attestations: []*ethpb.Attestation{att},
},
}
wrappedBlock, err := wrapper.WrappedAltairBeaconBlock(block)
require.NoError(t, err)
s.processAttestations(ctx, state, wrappedBlock)
wanted1 := "\"Attestation included\" BalanceChange=0 CorrectHead=true CorrectSource=true CorrectTarget=true Head=0x68656c6c6f2d InclusionSlot=2 NewBalance=32000000000 Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=2 prefix=monitor"
wanted2 := "\"Attestation included\" BalanceChange=100000000 CorrectHead=true CorrectSource=true CorrectTarget=true Head=0x68656c6c6f2d InclusionSlot=2 NewBalance=32000000000 Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=12 prefix=monitor"
require.LogsContain(t, hook, wanted1)
require.LogsContain(t, hook, wanted2)
}

View File

@@ -2,46 +2,24 @@ package monitor
import (
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
"github.com/sirupsen/logrus"
)
// ValidatorLatestPerformance keeps track of the latest participation of the validator
type ValidatorLatestPerformance struct {
attestedSlot types.Slot
inclusionSlot types.Slot
timelySource bool
timelyTarget bool
timelyHead bool
balance uint64
balanceChange uint64
}
// ValidatorAggregatedPerformance keeps track of the accumulated performance of
// the validator since launch
type ValidatorAggregatedPerformance struct {
totalAttestedCount uint64
totalRequestedCount uint64
totalDistance uint64
totalCorrectSource uint64
totalCorrectTarget uint64
totalCorrectHead uint64
totalAggregations uint64
}
var (
log = logrus.WithField("prefix", "monitor")
)
// ValidatorMonitorConfig contains the list of validator indices that the
// monitor service tracks, as well as the event feed notifier that the
// monitor needs to subscribe.
type ValidatorMonitorConfig struct {
StateGen stategen.StateManager
TrackedValidators map[types.ValidatorIndex]interface{}
}
// Service is the main structure that tracks validators and reports logs and
// metrics of their performances throughout their lifetime.
type Service struct {
config *ValidatorMonitorConfig
latestPerformance map[types.ValidatorIndex]ValidatorLatestPerformance
aggregatedPerformance map[types.ValidatorIndex]ValidatorAggregatedPerformance
config *ValidatorMonitorConfig
}
// TrackedIndex returns if the given validator index corresponds to one of the

View File

@@ -26,7 +26,6 @@ go_library(
"//beacon-chain/forkchoice:go_default_library",
"//beacon-chain/forkchoice/protoarray:go_default_library",
"//beacon-chain/gateway:go_default_library",
"//beacon-chain/light:go_default_library",
"//beacon-chain/node/registration:go_default_library",
"//beacon-chain/operations/attestations:go_default_library",
"//beacon-chain/operations/slashings:go_default_library",

View File

@@ -28,7 +28,6 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/beacon-chain/gateway"
"github.com/prysmaticlabs/prysm/beacon-chain/light"
"github.com/prysmaticlabs/prysm/beacon-chain/node/registration"
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/beacon-chain/operations/slashings"
@@ -194,10 +193,6 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
return nil, err
}
if err := beacon.registerLightClientServer(); err != nil {
return nil, err
}
if err := beacon.registerSlasherService(); err != nil {
return nil, err
}
@@ -686,11 +681,6 @@ func (b *BeaconNode) registerRPCService() error {
return err
}
var lightService *light.Service
if err := b.services.FetchService(&lightService); err != nil {
return err
}
var slasherService *slasher.Service
if features.Get().EnableSlasher {
if err := b.services.FetchService(&slasherService); err != nil {
@@ -767,7 +757,6 @@ func (b *BeaconNode) registerRPCService() error {
StateGen: b.stateGen,
EnableDebugRPCEndpoints: enableDebugRPCEndpoints,
MaxMsgSize: maxMsgSize,
LightUpdatesFetcher: lightService,
})
return b.services.RegisterService(rpcService)
@@ -877,25 +866,3 @@ func (b *BeaconNode) registerDeterminsticGenesisService() error {
}
return nil
}
func (b *BeaconNode) registerLightClientServer() error {
var chainService *blockchain.Service
if err := b.services.FetchService(&chainService); err != nil {
return err
}
var syncService *initialsync.Service
if err := b.services.FetchService(&syncService); err != nil {
return err
}
svc := light.New(b.ctx, &light.Config{
Database: b.db,
StateGen: b.stateGen,
HeadFetcher: chainService,
FinalizationFetcher: chainService,
StateNotifier: b,
TimeFetcher: chainService,
SyncChecker: syncService,
})
return b.services.RegisterService(svc)
}

View File

@@ -29,7 +29,6 @@ go_library(
"//beacon-chain/rpc/eth/validator:go_default_library",
"//beacon-chain/rpc/prysm/v1alpha1/beacon:go_default_library",
"//beacon-chain/rpc/prysm/v1alpha1/debug:go_default_library",
"//beacon-chain/rpc/prysm/v1alpha1/light:go_default_library",
"//beacon-chain/rpc/prysm/v1alpha1/node:go_default_library",
"//beacon-chain/rpc/prysm/v1alpha1/validator:go_default_library",
"//beacon-chain/rpc/statefetcher:go_default_library",

View File

@@ -1,17 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["server.go"],
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/rpc/prysm/v1alpha1/light",
visibility = ["//beacon-chain:__subpackages__"],
deps = [
"//beacon-chain/db/iface:go_default_library",
"//beacon-chain/db/kv:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@io_bazel_rules_go//proto/wkt:empty_go_proto",
"@org_golang_google_grpc//codes:go_default_library",
"@org_golang_google_grpc//status:go_default_library",
],
)

View File

@@ -1,38 +0,0 @@
package light
import (
"context"
"github.com/golang/protobuf/ptypes/empty"
"github.com/prysmaticlabs/prysm/beacon-chain/light"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
log "github.com/sirupsen/logrus"
)
type Server struct {
Fetcher light.UpdatesFetcher
}
// BestUpdates GET /eth/v1alpha1/lightclient/best_update/:periods.
func (s *Server) BestUpdates(ctx context.Context, req *ethpb.BestUpdatesRequest) (*ethpb.BestUpdatesResponse, error) {
updates := make([]*ethpb.LightClientUpdate, 0)
for _, period := range req.SyncCommitteePeriods {
update, err := s.Fetcher.BestUpdateForPeriod(ctx, period)
if err != nil {
log.Error(err)
continue
}
updates = append(updates, update)
}
return &ethpb.BestUpdatesResponse{Updates: updates}, nil
}
// LatestUpdateFinalized GET /eth/v1alpha1/lightclient/latest_update_finalized/
func (s *Server) LatestUpdateFinalized(ctx context.Context, _ *empty.Empty) (*ethpb.LightClientUpdate, error) {
return s.Fetcher.LatestFinalizedUpdate(ctx), nil
}
// LatestUpdateNonFinalized /eth/v1alpha1/lightclient/latest_update_nonfinalized/
func (s *Server) LatestUpdateNonFinalized(ctx context.Context, _ *empty.Empty) (*ethpb.LightClientUpdate, error) {
return s.Fetcher.LatestNonFinalizedUpdate(ctx), nil
}

View File

@@ -94,12 +94,7 @@ func (vs *Server) deposits(
ctx, span := trace.StartSpan(ctx, "ProposerServer.deposits")
defer span.End()
if vs.MockEth1Votes {
return []*ethpb.Deposit{}, nil
}
if !vs.Eth1InfoFetcher.IsConnectedToETH1() {
log.Warn("not connected to eth1 node, skip pending deposit insertion")
if vs.MockEth1Votes || !vs.Eth1InfoFetcher.IsConnectedToETH1() {
return []*ethpb.Deposit{}, nil
}
// Need to fetch if the deposits up to the state's latest eth1 data matches
@@ -117,7 +112,6 @@ func (vs *Server) deposits(
// If there are no pending deposits, exit early.
allPendingContainers := vs.PendingDepositsFetcher.PendingContainers(ctx, canonicalEth1DataHeight)
if len(allPendingContainers) == 0 {
log.Debug("no pending deposits for inclusion in block")
return []*ethpb.Deposit{}, nil
}
@@ -133,21 +127,21 @@ func (vs *Server) deposits(
if uint64(dep.Index) >= beaconState.Eth1DepositIndex() && uint64(dep.Index) < canonicalEth1Data.DepositCount {
pendingDeps = append(pendingDeps, dep)
}
// Don't try to pack more than the max allowed in a block
if uint64(len(pendingDeps)) == params.BeaconConfig().MaxDeposits {
break
}
}
for i := range pendingDeps {
// Don't construct merkle proof if the number of deposits is more than max allowed in block.
if uint64(i) == params.BeaconConfig().MaxDeposits {
break
}
pendingDeps[i].Deposit, err = constructMerkleProof(depositTrie, int(pendingDeps[i].Index), pendingDeps[i].Deposit)
if err != nil {
return nil, err
}
}
// Limit the return of pending deposits to not be more than max deposits allowed in block.
var pendingDeposits []*ethpb.Deposit
for i := uint64(0); i < uint64(len(pendingDeps)); i++ {
for i := uint64(0); i < uint64(len(pendingDeps)) && i < params.BeaconConfig().MaxDeposits; i++ {
pendingDeposits = append(pendingDeposits, pendingDeps[i].Deposit)
}
return pendingDeposits, nil

View File

@@ -20,7 +20,6 @@ import (
opfeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/operation"
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/beacon-chain/db"
"github.com/prysmaticlabs/prysm/beacon-chain/light"
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/beacon-chain/operations/slashings"
"github.com/prysmaticlabs/prysm/beacon-chain/operations/synccommittee"
@@ -34,7 +33,6 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/rpc/eth/validator"
beaconv1alpha1 "github.com/prysmaticlabs/prysm/beacon-chain/rpc/prysm/v1alpha1/beacon"
debugv1alpha1 "github.com/prysmaticlabs/prysm/beacon-chain/rpc/prysm/v1alpha1/debug"
lightserver "github.com/prysmaticlabs/prysm/beacon-chain/rpc/prysm/v1alpha1/light"
nodev1alpha1 "github.com/prysmaticlabs/prysm/beacon-chain/rpc/prysm/v1alpha1/node"
validatorv1alpha1 "github.com/prysmaticlabs/prysm/beacon-chain/rpc/prysm/v1alpha1/validator"
"github.com/prysmaticlabs/prysm/beacon-chain/rpc/statefetcher"
@@ -110,7 +108,6 @@ type Config struct {
OperationNotifier opfeed.Notifier
StateGen *stategen.State
MaxMsgSize int
LightUpdatesFetcher light.UpdatesFetcher
}
// NewService instantiates a new RPC service instance that will
@@ -282,10 +279,6 @@ func (s *Service) Start() {
VoluntaryExitsPool: s.cfg.ExitPool,
V1Alpha1ValidatorServer: validatorServer,
}
lightClientServer := &lightserver.Server{
Fetcher: s.cfg.LightUpdatesFetcher,
}
ethpbv1alpha1.RegisterLightClientServer(s.grpcServer, lightClientServer)
ethpbv1alpha1.RegisterNodeServer(s.grpcServer, nodeServer)
ethpbservice.RegisterBeaconNodeServer(s.grpcServer, nodeServerV1)
ethpbv1alpha1.RegisterHealthServer(s.grpcServer, nodeServer)

View File

@@ -12,8 +12,6 @@ go_library(
"//beacon-chain/state/stateutil:go_default_library",
"//beacon-chain/state/types:go_default_library",
"//crypto/hash:go_default_library",
"//encoding/bytesutil:go_default_library",
"//encoding/ssz:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"@com_github_pkg_errors//:go_default_library",
@@ -28,7 +26,6 @@ go_test(
],
embed = [":go_default_library"],
deps = [
"//beacon-chain/state/stateutil:go_default_library",
"//beacon-chain/state/types:go_default_library",
"//beacon-chain/state/v1:go_default_library",
"//config/params:go_default_library",

View File

@@ -18,7 +18,6 @@ type FieldTrie struct {
field types.FieldIndex
dataType types.DataType
length uint64
numOfElems int
}
// NewFieldTrie is the constructor for the field trie data structure. It creates the corresponding
@@ -27,19 +26,18 @@ type FieldTrie struct {
func NewFieldTrie(field types.FieldIndex, dataType types.DataType, elements interface{}, length uint64) (*FieldTrie, error) {
if elements == nil {
return &FieldTrie{
field: field,
dataType: dataType,
reference: stateutil.NewRef(1),
RWMutex: new(sync.RWMutex),
length: length,
numOfElems: 0,
field: field,
dataType: dataType,
reference: stateutil.NewRef(1),
RWMutex: new(sync.RWMutex),
length: length,
}, nil
}
fieldRoots, err := fieldConverters(field, []uint64{}, elements, true)
if err != nil {
return nil, err
}
if err := validateElements(field, dataType, elements, length); err != nil {
if err := validateElements(field, elements, length); err != nil {
return nil, err
}
switch dataType {
@@ -55,9 +53,8 @@ func NewFieldTrie(field types.FieldIndex, dataType types.DataType, elements inte
reference: stateutil.NewRef(1),
RWMutex: new(sync.RWMutex),
length: length,
numOfElems: reflect.ValueOf(elements).Len(),
}, nil
case types.CompositeArray, types.CompressedArray:
case types.CompositeArray:
return &FieldTrie{
fieldLayers: stateutil.ReturnTrieLayerVariable(fieldRoots, length),
field: field,
@@ -65,7 +62,6 @@ func NewFieldTrie(field types.FieldIndex, dataType types.DataType, elements inte
reference: stateutil.NewRef(1),
RWMutex: new(sync.RWMutex),
length: length,
numOfElems: reflect.ValueOf(elements).Len(),
}, nil
default:
return nil, errors.Errorf("unrecognized data type in field map: %v", reflect.TypeOf(dataType).Name())
@@ -96,40 +92,13 @@ func (f *FieldTrie) RecomputeTrie(indices []uint64, elements interface{}) ([32]b
if err != nil {
return [32]byte{}, err
}
f.numOfElems = reflect.ValueOf(elements).Len()
return fieldRoot, nil
case types.CompositeArray:
fieldRoot, f.fieldLayers, err = stateutil.RecomputeFromLayerVariable(fieldRoots, indices, f.fieldLayers)
if err != nil {
return [32]byte{}, err
}
f.numOfElems = reflect.ValueOf(elements).Len()
return stateutil.AddInMixin(fieldRoot, uint64(len(f.fieldLayers[0])))
case types.CompressedArray:
numOfElems, err := f.field.ElemsInChunk()
if err != nil {
return [32]byte{}, err
}
// We remove the duplicates here in order to prevent
// duplicated insertions into the trie.
newIndices := []uint64{}
indexExists := make(map[uint64]bool)
newRoots := make([][32]byte, 0, len(fieldRoots)/int(numOfElems))
for i, idx := range indices {
startIdx := idx / numOfElems
if indexExists[startIdx] {
continue
}
newIndices = append(newIndices, startIdx)
indexExists[startIdx] = true
newRoots = append(newRoots, fieldRoots[i])
}
fieldRoot, f.fieldLayers, err = stateutil.RecomputeFromLayerVariable(newRoots, newIndices, f.fieldLayers)
if err != nil {
return [32]byte{}, err
}
f.numOfElems = reflect.ValueOf(elements).Len()
return stateutil.AddInMixin(fieldRoot, uint64(f.numOfElems))
default:
return [32]byte{}, errors.Errorf("unrecognized data type in field map: %v", reflect.TypeOf(f.dataType).Name())
}
@@ -140,12 +109,11 @@ func (f *FieldTrie) RecomputeTrie(indices []uint64, elements interface{}) ([32]b
func (f *FieldTrie) CopyTrie() *FieldTrie {
if f.fieldLayers == nil {
return &FieldTrie{
field: f.field,
dataType: f.dataType,
reference: stateutil.NewRef(1),
RWMutex: new(sync.RWMutex),
length: f.length,
numOfElems: f.numOfElems,
field: f.field,
dataType: f.dataType,
reference: stateutil.NewRef(1),
RWMutex: new(sync.RWMutex),
length: f.length,
}
}
dstFieldTrie := make([][]*[32]byte, len(f.fieldLayers))
@@ -160,7 +128,6 @@ func (f *FieldTrie) CopyTrie() *FieldTrie {
reference: stateutil.NewRef(1),
RWMutex: new(sync.RWMutex),
length: f.length,
numOfElems: f.numOfElems,
}
}
@@ -172,9 +139,6 @@ func (f *FieldTrie) TrieRoot() ([32]byte, error) {
case types.CompositeArray:
trieRoot := *f.fieldLayers[len(f.fieldLayers)-1][0]
return stateutil.AddInMixin(trieRoot, uint64(len(f.fieldLayers[0])))
case types.CompressedArray:
trieRoot := *f.fieldLayers[len(f.fieldLayers)-1][0]
return stateutil.AddInMixin(trieRoot, uint64(f.numOfElems))
default:
return [32]byte{}, errors.Errorf("unrecognized data type in field map: %v", reflect.TypeOf(f.dataType).Name())
}

View File

@@ -1,7 +1,6 @@
package fieldtrie
import (
"encoding/binary"
"fmt"
"reflect"
@@ -9,37 +8,20 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
"github.com/prysmaticlabs/prysm/beacon-chain/state/types"
"github.com/prysmaticlabs/prysm/crypto/hash"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/encoding/ssz"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/runtime/version"
)
func (f *FieldTrie) validateIndices(idxs []uint64) error {
length := f.length
if f.dataType == types.CompressedArray {
comLength, err := f.field.ElemsInChunk()
if err != nil {
return err
}
length *= comLength
}
for _, idx := range idxs {
if idx >= length {
return errors.Errorf("invalid index for field %s: %d >= length %d", f.field.String(version.Phase0), idx, length)
if idx >= f.length {
return errors.Errorf("invalid index for field %s: %d >= length %d", f.field.String(version.Phase0), idx, f.length)
}
}
return nil
}
func validateElements(field types.FieldIndex, dataType types.DataType, elements interface{}, length uint64) error {
if dataType == types.CompressedArray {
comLength, err := field.ElemsInChunk()
if err != nil {
return err
}
length *= comLength
}
func validateElements(field types.FieldIndex, elements interface{}, length uint64) error {
val := reflect.ValueOf(elements)
if val.Len() > int(length) {
return errors.Errorf("elements length is larger than expected for field %s: %d > %d", field.String(version.Phase0), val.Len(), length)
@@ -56,21 +38,21 @@ func fieldConverters(field types.FieldIndex, indices []uint64, elements interfac
return nil, errors.Errorf("Wanted type of %v but got %v",
reflect.TypeOf([][]byte{}).Name(), reflect.TypeOf(elements).Name())
}
return handleByteArrays(val, indices, convertAll)
return stateutil.HandleByteArrays(val, indices, convertAll)
case types.Eth1DataVotes:
val, ok := elements.([]*ethpb.Eth1Data)
if !ok {
return nil, errors.Errorf("Wanted type of %v but got %v",
reflect.TypeOf([]*ethpb.Eth1Data{}).Name(), reflect.TypeOf(elements).Name())
}
return handleEth1DataSlice(val, indices, convertAll)
return HandleEth1DataSlice(val, indices, convertAll)
case types.Validators:
val, ok := elements.([]*ethpb.Validator)
if !ok {
return nil, errors.Errorf("Wanted type of %v but got %v",
reflect.TypeOf([]*ethpb.Validator{}).Name(), reflect.TypeOf(elements).Name())
}
return handleValidatorSlice(val, indices, convertAll)
return stateutil.HandleValidatorSlice(val, indices, convertAll)
case types.PreviousEpochAttestations, types.CurrentEpochAttestations:
val, ok := elements.([]*ethpb.PendingAttestation)
if !ok {
@@ -78,87 +60,13 @@ func fieldConverters(field types.FieldIndex, indices []uint64, elements interfac
reflect.TypeOf([]*ethpb.PendingAttestation{}).Name(), reflect.TypeOf(elements).Name())
}
return handlePendingAttestation(val, indices, convertAll)
case types.Balances:
val, ok := elements.([]uint64)
if !ok {
return nil, errors.Errorf("Wanted type of %v but got %v",
reflect.TypeOf([]uint64{}).Name(), reflect.TypeOf(elements).Name())
}
return handleBalanceSlice(val, indices, convertAll)
default:
return [][32]byte{}, errors.Errorf("got unsupported type of %v", reflect.TypeOf(elements).Name())
}
}
// handleByteArrays computes and returns byte arrays in a slice of root format.
func handleByteArrays(val [][]byte, indices []uint64, convertAll bool) ([][32]byte, error) {
length := len(indices)
if convertAll {
length = len(val)
}
roots := make([][32]byte, 0, length)
rootCreator := func(input []byte) {
newRoot := bytesutil.ToBytes32(input)
roots = append(roots, newRoot)
}
if convertAll {
for i := range val {
rootCreator(val[i])
}
return roots, nil
}
if len(val) > 0 {
for _, idx := range indices {
if idx > uint64(len(val))-1 {
return nil, fmt.Errorf("index %d greater than number of byte arrays %d", idx, len(val))
}
rootCreator(val[idx])
}
}
return roots, nil
}
// handleValidatorSlice returns the validator indices in a slice of root format.
func handleValidatorSlice(val []*ethpb.Validator, indices []uint64, convertAll bool) ([][32]byte, error) {
length := len(indices)
if convertAll {
length = len(val)
}
roots := make([][32]byte, 0, length)
hasher := hash.CustomSHA256Hasher()
rootCreator := func(input *ethpb.Validator) error {
newRoot, err := stateutil.ValidatorRootWithHasher(hasher, input)
if err != nil {
return err
}
roots = append(roots, newRoot)
return nil
}
if convertAll {
for i := range val {
err := rootCreator(val[i])
if err != nil {
return nil, err
}
}
return roots, nil
}
if len(val) > 0 {
for _, idx := range indices {
if idx > uint64(len(val))-1 {
return nil, fmt.Errorf("index %d greater than number of validators %d", idx, len(val))
}
err := rootCreator(val[idx])
if err != nil {
return nil, err
}
}
}
return roots, nil
}
// handleEth1DataSlice processes a list of eth1data and indices into the appropriate roots.
func handleEth1DataSlice(val []*ethpb.Eth1Data, indices []uint64, convertAll bool) ([][32]byte, error) {
// HandleEth1DataSlice processes a list of eth1data and indices into the appropriate roots.
func HandleEth1DataSlice(val []*ethpb.Eth1Data, indices []uint64, convertAll bool) ([][32]byte, error) {
length := len(indices)
if convertAll {
length = len(val)
@@ -233,48 +141,3 @@ func handlePendingAttestation(val []*ethpb.PendingAttestation, indices []uint64,
}
return roots, nil
}
func handleBalanceSlice(val []uint64, indices []uint64, convertAll bool) ([][32]byte, error) {
if convertAll {
balancesMarshaling := make([][]byte, 0)
for _, b := range val {
balanceBuf := make([]byte, 8)
binary.LittleEndian.PutUint64(balanceBuf, b)
balancesMarshaling = append(balancesMarshaling, balanceBuf)
}
balancesChunks, err := ssz.PackByChunk(balancesMarshaling)
if err != nil {
return [][32]byte{}, errors.Wrap(err, "could not pack balances into chunks")
}
return balancesChunks, nil
}
if len(val) > 0 {
numOfElems, err := types.Balances.ElemsInChunk()
if err != nil {
return nil, err
}
roots := [][32]byte{}
for _, idx := range indices {
// We split the indexes into their relevant groups. Balances
// are compressed according to 4 values -> 1 chunk.
startIdx := idx / numOfElems
startGroup := startIdx * numOfElems
chunk := [32]byte{}
sizeOfElem := len(chunk) / int(numOfElems)
for i, j := 0, startGroup; j < startGroup+numOfElems; i, j = i+sizeOfElem, j+1 {
wantedVal := uint64(0)
// We are adding chunks in sets of 4, if the set is at the edge of the array
// then you will need to zero out the rest of the chunk. Ex : 41 indexes,
// so 41 % 4 = 1 . There are 3 indexes, which do not exist yet but we
// have to add in as a root. These 3 indexes are then given a 'zero' value.
if int(j) < len(val) {
wantedVal = val[j]
}
binary.LittleEndian.PutUint64(chunk[i:i+sizeOfElem], wantedVal)
}
roots = append(roots, chunk)
}
return roots, nil
}
return [][32]byte{}, nil
}

View File

@@ -1,13 +1,8 @@
package fieldtrie
import (
"encoding/binary"
"sync"
"testing"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
"github.com/prysmaticlabs/prysm/beacon-chain/state/types"
"github.com/prysmaticlabs/prysm/config/params"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/assert"
)
@@ -22,64 +17,7 @@ func Test_handlePendingAttestation_OutOfRange(t *testing.T) {
func Test_handleEth1DataSlice_OutOfRange(t *testing.T) {
items := make([]*ethpb.Eth1Data, 1)
indices := []uint64{3}
_, err := handleEth1DataSlice(items, indices, false)
_, err := HandleEth1DataSlice(items, indices, false)
assert.ErrorContains(t, "index 3 greater than number of items in eth1 data slice 1", err)
}
func Test_handleValidatorSlice_OutOfRange(t *testing.T) {
vals := make([]*ethpb.Validator, 1)
indices := []uint64{3}
_, err := handleValidatorSlice(vals, indices, false)
assert.ErrorContains(t, "index 3 greater than number of validators 1", err)
}
func TestBalancesSlice_CorrectRoots_All(t *testing.T) {
balances := []uint64{5, 2929, 34, 1291, 354305}
roots, err := handleBalanceSlice(balances, []uint64{}, true)
assert.NoError(t, err)
root1 := [32]byte{}
binary.LittleEndian.PutUint64(root1[:8], balances[0])
binary.LittleEndian.PutUint64(root1[8:16], balances[1])
binary.LittleEndian.PutUint64(root1[16:24], balances[2])
binary.LittleEndian.PutUint64(root1[24:32], balances[3])
root2 := [32]byte{}
binary.LittleEndian.PutUint64(root2[:8], balances[4])
assert.DeepEqual(t, roots, [][32]byte{root1, root2})
}
func TestBalancesSlice_CorrectRoots_Some(t *testing.T) {
balances := []uint64{5, 2929, 34, 1291, 354305}
roots, err := handleBalanceSlice(balances, []uint64{2, 3}, false)
assert.NoError(t, err)
root1 := [32]byte{}
binary.LittleEndian.PutUint64(root1[:8], balances[0])
binary.LittleEndian.PutUint64(root1[8:16], balances[1])
binary.LittleEndian.PutUint64(root1[16:24], balances[2])
binary.LittleEndian.PutUint64(root1[24:32], balances[3])
// Returns root for each indice(even if duplicated)
assert.DeepEqual(t, roots, [][32]byte{root1, root1})
}
func TestValidateIndices_CompressedField(t *testing.T) {
fakeTrie := &FieldTrie{
RWMutex: new(sync.RWMutex),
reference: stateutil.NewRef(0),
fieldLayers: nil,
field: types.Balances,
dataType: types.CompressedArray,
length: params.BeaconConfig().ValidatorRegistryLimit / 4,
numOfElems: 0,
}
goodIdx := params.BeaconConfig().ValidatorRegistryLimit - 1
assert.NoError(t, fakeTrie.validateIndices([]uint64{goodIdx}))
badIdx := goodIdx + 1
assert.ErrorContains(t, "invalid index for field balances", fakeTrie.validateIndices([]uint64{badIdx}))
}

View File

@@ -37,15 +37,6 @@ func (s *State) HasStateInCache(ctx context.Context, blockRoot [32]byte) (bool,
return has, nil
}
// StateByRootIfCached retrieves a state using the input block root only if the state is already in the cache
func (s *State) StateByRootIfCachedNoCopy(blockRoot [32]byte) state.BeaconState {
if !s.hotStateCache.has(blockRoot) {
return nil
}
state := s.hotStateCache.getWithoutCopy(blockRoot)
return state
}
// StateByRoot retrieves the state using input block root.
func (s *State) StateByRoot(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "stateGen.StateByRoot")

View File

@@ -56,44 +56,6 @@ func TestStateByRoot_ColdState(t *testing.T) {
require.DeepSSZEqual(t, loadedState.InnerStateUnsafe(), beaconState.InnerStateUnsafe())
}
func TestStateByRootIfCachedNoCopy_HotState(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service := New(beaconDB)
beaconState, _ := util.DeterministicGenesisState(t, 32)
r := [32]byte{'A'}
require.NoError(t, service.beaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Root: r[:]}))
service.hotStateCache.put(r, beaconState)
loadedState := service.StateByRootIfCachedNoCopy(r)
require.DeepSSZEqual(t, loadedState.InnerStateUnsafe(), beaconState.InnerStateUnsafe())
}
func TestStateByRootIfCachedNoCopy_ColdState(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service := New(beaconDB)
service.finalizedInfo.slot = 2
service.slotsPerArchivedPoint = 1
b := util.NewBeaconBlock()
b.Block.Slot = 1
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b)))
bRoot, err := b.Block.HashTreeRoot()
require.NoError(t, err)
beaconState, _ := util.DeterministicGenesisState(t, 32)
require.NoError(t, beaconState.SetSlot(1))
require.NoError(t, service.beaconDB.SaveState(ctx, beaconState, bRoot))
require.NoError(t, service.beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b)))
require.NoError(t, service.beaconDB.SaveGenesisBlockRoot(ctx, bRoot))
loadedState := service.StateByRootIfCachedNoCopy(bRoot)
require.NoError(t, err)
require.Equal(t, loadedState, nil)
}
func TestStateByRoot_HotStateUsingEpochBoundaryCacheNoReplay(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)

View File

@@ -23,11 +23,6 @@ func NewMockService() *MockStateManager {
}
}
// StateByRootIfCached
func (m *MockStateManager) StateByRootIfCachedNoCopy(_ [32]byte) state.BeaconState { // skipcq: RVV-B0013
panic("implement me")
}
// Resume --
func (m *MockStateManager) Resume(_ context.Context, _ state.BeaconState) (state.BeaconState, error) {
panic("implement me")

View File

@@ -31,7 +31,6 @@ type StateManager interface {
HasState(ctx context.Context, blockRoot [32]byte) (bool, error)
HasStateInCache(ctx context.Context, blockRoot [32]byte) (bool, error)
StateByRoot(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error)
StateByRootIfCachedNoCopy(blockRoot [32]byte) state.BeaconState
StateByRootInitialSync(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error)
StateBySlot(ctx context.Context, slot types.Slot) (state.BeaconState, error)
RecoverStateSummary(ctx context.Context, blockRoot [32]byte) (*ethpb.StateSummary, error)

View File

@@ -3,6 +3,7 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"array_root.go",
"block_header_root.go",
"eth1_root.go",
"participation_bit_root.go",
@@ -48,6 +49,7 @@ go_test(
"state_root_test.go",
"stateutil_test.go",
"trie_helpers_test.go",
"validator_root_test.go",
],
embed = [":go_default_library"],
deps = [

View File

@@ -0,0 +1,35 @@
package stateutil
import (
"fmt"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
)
// HandleByteArrays computes and returns byte arrays in a slice of root format.
func HandleByteArrays(val [][]byte, indices []uint64, convertAll bool) ([][32]byte, error) {
length := len(indices)
if convertAll {
length = len(val)
}
roots := make([][32]byte, 0, length)
rootCreator := func(input []byte) {
newRoot := bytesutil.ToBytes32(input)
roots = append(roots, newRoot)
}
if convertAll {
for i := range val {
rootCreator(val[i])
}
return roots, nil
}
if len(val) > 0 {
for _, idx := range indices {
if idx > uint64(len(val))-1 {
return nil, fmt.Errorf("index %d greater than number of byte arrays %d", idx, len(val))
}
rootCreator(val[idx])
}
}
return roots, nil
}

View File

@@ -2,6 +2,7 @@ package stateutil
import (
"encoding/binary"
"fmt"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/config/params"
@@ -126,3 +127,42 @@ func ValidatorEncKey(validator *ethpb.Validator) []byte {
return enc
}
// HandleValidatorSlice returns the validator indices in a slice of root format.
func HandleValidatorSlice(val []*ethpb.Validator, indices []uint64, convertAll bool) ([][32]byte, error) {
length := len(indices)
if convertAll {
length = len(val)
}
roots := make([][32]byte, 0, length)
hasher := hash.CustomSHA256Hasher()
rootCreator := func(input *ethpb.Validator) error {
newRoot, err := ValidatorRootWithHasher(hasher, input)
if err != nil {
return err
}
roots = append(roots, newRoot)
return nil
}
if convertAll {
for i := range val {
err := rootCreator(val[i])
if err != nil {
return nil, err
}
}
return roots, nil
}
if len(val) > 0 {
for _, idx := range indices {
if idx > uint64(len(val))-1 {
return nil, fmt.Errorf("index %d greater than number of validators %d", idx, len(val))
}
err := rootCreator(val[idx])
if err != nil {
return nil, err
}
}
}
return roots, nil
}

View File

@@ -0,0 +1,15 @@
package stateutil
import (
"testing"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/assert"
)
func Test_handleValidatorSlice_OutOfRange(t *testing.T) {
vals := make([]*ethpb.Validator, 1)
indices := []uint64{3}
_, err := HandleValidatorSlice(vals, indices, false)
assert.ErrorContains(t, "index 3 greater than number of validators 1", err)
}

View File

@@ -5,10 +5,7 @@ go_library(
srcs = ["types.go"],
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/state/types",
visibility = ["//beacon-chain:__subpackages__"],
deps = [
"//runtime/version:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
deps = ["//runtime/version:go_default_library"],
)
go_test(

View File

@@ -1,7 +1,6 @@
package types
import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/runtime/version"
)
@@ -19,10 +18,6 @@ const (
// CompositeArray represents a variable length array with
// a non primitive type.
CompositeArray
// CompressedArray represents a variable length array which
// can pack multiple elements into a leaf of the underlying
// trie.
CompressedArray
)
// String returns the name of the field index.
@@ -89,17 +84,6 @@ func (f FieldIndex) String(stateVersion int) string {
}
}
// ElemsInChunk returns the number of elements in the chunk (number of
// elements that are able to be packed).
func (f FieldIndex) ElemsInChunk() (uint64, error) {
switch f {
case Balances:
return 4, nil
default:
return 0, errors.Errorf("field %d doesn't support element compression", f)
}
}
// Below we define a set of useful enum values for the field
// indices of the beacon state. For example, genesisTime is the
// 0th field of the beacon state. This is helpful when we are

View File

@@ -87,7 +87,6 @@ go_test(
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/stateutil:go_default_library",
"//beacon-chain/state/types:go_default_library",
"//config/features:go_default_library",
"//config/params:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",

View File

@@ -5,7 +5,6 @@ import (
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
stateTypes "github.com/prysmaticlabs/prysm/beacon-chain/state/types"
"github.com/prysmaticlabs/prysm/config/features"
"github.com/prysmaticlabs/prysm/crypto/hash"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"google.golang.org/protobuf/proto"
@@ -173,10 +172,6 @@ func (b *BeaconState) addDirtyIndices(index stateTypes.FieldIndex, indices []uin
if b.rebuildTrie[index] {
return
}
// Exit early if balance trie computation isn't enabled.
if !features.Get().EnableBalanceTrieComputation && index == balances {
return
}
totalIndicesLen := len(b.dirtyIndices[index]) + len(indices)
if totalIndicesLen > indicesLimit {
b.rebuildTrie[index] = true

View File

@@ -103,7 +103,6 @@ func (b *BeaconState) SetBalances(val []uint64) error {
b.state.Balances = val
b.markFieldAsDirty(balances)
b.rebuildTrie[balances] = true
return nil
}
@@ -129,7 +128,6 @@ func (b *BeaconState) UpdateBalancesAtIndex(idx types.ValidatorIndex, val uint64
bals[idx] = val
b.state.Balances = bals
b.markFieldAsDirty(balances)
b.addDirtyIndices(balances, []uint64{uint64(idx)})
return nil
}
@@ -221,8 +219,6 @@ func (b *BeaconState) AppendBalance(bal uint64) error {
}
b.state.Balances = append(bals, bal)
balIdx := len(b.state.Balances) - 1
b.markFieldAsDirty(balances)
b.addDirtyIndices(balances, []uint64{uint64(balIdx)})
return nil
}

View File

@@ -1,13 +1,10 @@
package v1
import (
"context"
"strconv"
"sync"
"testing"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
@@ -107,90 +104,3 @@ func TestStateTrie_IsNil(t *testing.T) {
nonNilState := &BeaconState{state: &ethpb.BeaconState{}}
assert.Equal(t, false, nonNilState.IsNil())
}
func TestBeaconState_AppendBalanceWithTrie(t *testing.T) {
count := uint64(100)
vals := make([]*ethpb.Validator, 0, count)
bals := make([]uint64, 0, count)
for i := uint64(1); i < count; i++ {
someRoot := [32]byte{}
someKey := [48]byte{}
copy(someRoot[:], strconv.Itoa(int(i)))
copy(someKey[:], strconv.Itoa(int(i)))
vals = append(vals, &ethpb.Validator{
PublicKey: someKey[:],
WithdrawalCredentials: someRoot[:],
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
Slashed: false,
ActivationEligibilityEpoch: 1,
ActivationEpoch: 1,
ExitEpoch: 1,
WithdrawableEpoch: 1,
})
bals = append(bals, params.BeaconConfig().MaxEffectiveBalance)
}
zeroHash := params.BeaconConfig().ZeroHash
mockblockRoots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot)
for i := 0; i < len(mockblockRoots); i++ {
mockblockRoots[i] = zeroHash[:]
}
mockstateRoots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot)
for i := 0; i < len(mockstateRoots); i++ {
mockstateRoots[i] = zeroHash[:]
}
mockrandaoMixes := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector)
for i := 0; i < len(mockrandaoMixes); i++ {
mockrandaoMixes[i] = zeroHash[:]
}
var pubKeys [][]byte
for i := uint64(0); i < params.BeaconConfig().SyncCommitteeSize; i++ {
pubKeys = append(pubKeys, bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength))
}
st, err := InitializeFromProto(&ethpb.BeaconState{
Slot: 1,
GenesisValidatorsRoot: make([]byte, 32),
Fork: &ethpb.Fork{
PreviousVersion: make([]byte, 4),
CurrentVersion: make([]byte, 4),
Epoch: 0,
},
LatestBlockHeader: &ethpb.BeaconBlockHeader{
ParentRoot: make([]byte, 32),
StateRoot: make([]byte, 32),
BodyRoot: make([]byte, 32),
},
Validators: vals,
Balances: bals,
Eth1Data: &ethpb.Eth1Data{
DepositRoot: make([]byte, 32),
BlockHash: make([]byte, 32),
},
BlockRoots: mockblockRoots,
StateRoots: mockstateRoots,
RandaoMixes: mockrandaoMixes,
JustificationBits: bitfield.NewBitvector4(),
PreviousJustifiedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)},
CurrentJustifiedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)},
FinalizedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)},
Slashings: make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector),
})
assert.NoError(t, err)
_, err = st.HashTreeRoot(context.Background())
assert.NoError(t, err)
for i := 0; i < 100; i++ {
if i%2 == 0 {
assert.NoError(t, st.UpdateBalancesAtIndex(types.ValidatorIndex(i), 1000))
}
if i%3 == 0 {
assert.NoError(t, st.AppendBalance(1000))
}
}
_, err = st.HashTreeRoot(context.Background())
assert.NoError(t, err)
newRt := bytesutil.ToBytes32(st.merkleLayers[0][balances])
wantedRt, err := stateutil.Uint64ListRootWithRegistryLimit(st.state.Balances)
assert.NoError(t, err)
assert.Equal(t, wantedRt, newRt, "state roots are unequal")
}

View File

@@ -12,7 +12,6 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/state/fieldtrie"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
"github.com/prysmaticlabs/prysm/beacon-chain/state/types"
"github.com/prysmaticlabs/prysm/config/features"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/container/slice"
"github.com/prysmaticlabs/prysm/crypto/hash"
@@ -320,20 +319,6 @@ func (b *BeaconState) rootSelector(ctx context.Context, field types.FieldIndex)
}
return b.recomputeFieldTrie(validators, b.state.Validators)
case balances:
if features.Get().EnableBalanceTrieComputation {
if b.rebuildTrie[field] {
maxBalCap := params.BeaconConfig().ValidatorRegistryLimit
elemSize := uint64(8)
balLimit := (maxBalCap*elemSize + 31) / 32
err := b.resetFieldTrie(field, b.state.Balances, balLimit)
if err != nil {
return [32]byte{}, err
}
delete(b.rebuildTrie, field)
return b.stateFieldLeaves[field].TrieRoot()
}
return b.recomputeFieldTrie(balances, b.state.Balances)
}
return stateutil.Uint64ListRootWithRegistryLimit(b.state.Balances)
case randaoMixes:
if b.rebuildTrie[field] {

View File

@@ -7,7 +7,6 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/state"
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
"github.com/prysmaticlabs/prysm/config/features"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
@@ -17,12 +16,6 @@ import (
"github.com/prysmaticlabs/prysm/testing/util"
)
func TestMain(m *testing.M) {
resetCfg := features.InitWithReset(&features.Flags{EnableBalanceTrieComputation: true})
defer resetCfg()
m.Run()
}
func TestInitializeFromProto(t *testing.T) {
testState, _ := util.DeterministicGenesisState(t, 64)
pbState, err := v1.ProtobufBeaconState(testState.InnerStateUnsafe())

View File

@@ -17,6 +17,7 @@ var _ state.BeaconState = (*BeaconState)(nil)
func init() {
fieldMap = make(map[types.FieldIndex]types.DataType, params.BeaconConfig().BeaconStateFieldCount)
// Initialize the fixed sized arrays.
fieldMap[types.BlockRoots] = types.BasicArray
fieldMap[types.StateRoots] = types.BasicArray
@@ -27,7 +28,6 @@ func init() {
fieldMap[types.Validators] = types.CompositeArray
fieldMap[types.PreviousEpochAttestations] = types.CompositeArray
fieldMap[types.CurrentEpochAttestations] = types.CompositeArray
fieldMap[types.Balances] = types.CompressedArray
}
// fieldMap keeps track of each field

View File

@@ -79,13 +79,11 @@ go_test(
"//beacon-chain/state/stateutil:go_default_library",
"//beacon-chain/state/types:go_default_library",
"//beacon-chain/state/v1:go_default_library",
"//config/features:go_default_library",
"//config/params:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
],
)

View File

@@ -5,7 +5,6 @@ import (
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
stateTypes "github.com/prysmaticlabs/prysm/beacon-chain/state/types"
"github.com/prysmaticlabs/prysm/config/features"
"github.com/prysmaticlabs/prysm/crypto/hash"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"google.golang.org/protobuf/proto"
@@ -172,10 +171,6 @@ func (b *BeaconState) addDirtyIndices(index stateTypes.FieldIndex, indices []uin
if b.rebuildTrie[index] {
return
}
// Exit early if balance trie computation isn't enabled.
if !features.Get().EnableBalanceTrieComputation && index == balances {
return
}
totalIndicesLen := len(b.dirtyIndices[index]) + len(indices)
if totalIndicesLen > indicesLimit {
b.rebuildTrie[index] = true

View File

@@ -2,15 +2,10 @@ package v2
import (
"context"
"strconv"
"testing"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
stateTypes "github.com/prysmaticlabs/prysm/beacon-chain/state/types"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/assert"
@@ -62,100 +57,3 @@ func TestAppendBeyondIndicesLimit(t *testing.T) {
assert.Equal(t, true, st.rebuildTrie[validators])
assert.Equal(t, len(st.dirtyIndices[validators]), 0)
}
func TestBeaconState_AppendBalanceWithTrie(t *testing.T) {
count := uint64(100)
vals := make([]*ethpb.Validator, 0, count)
bals := make([]uint64, 0, count)
for i := uint64(1); i < count; i++ {
someRoot := [32]byte{}
someKey := [48]byte{}
copy(someRoot[:], strconv.Itoa(int(i)))
copy(someKey[:], strconv.Itoa(int(i)))
vals = append(vals, &ethpb.Validator{
PublicKey: someKey[:],
WithdrawalCredentials: someRoot[:],
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
Slashed: false,
ActivationEligibilityEpoch: 1,
ActivationEpoch: 1,
ExitEpoch: 1,
WithdrawableEpoch: 1,
})
bals = append(bals, params.BeaconConfig().MaxEffectiveBalance)
}
zeroHash := params.BeaconConfig().ZeroHash
mockblockRoots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot)
for i := 0; i < len(mockblockRoots); i++ {
mockblockRoots[i] = zeroHash[:]
}
mockstateRoots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot)
for i := 0; i < len(mockstateRoots); i++ {
mockstateRoots[i] = zeroHash[:]
}
mockrandaoMixes := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector)
for i := 0; i < len(mockrandaoMixes); i++ {
mockrandaoMixes[i] = zeroHash[:]
}
var pubKeys [][]byte
for i := uint64(0); i < params.BeaconConfig().SyncCommitteeSize; i++ {
pubKeys = append(pubKeys, bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength))
}
st, err := InitializeFromProto(&ethpb.BeaconStateAltair{
Slot: 1,
GenesisValidatorsRoot: make([]byte, 32),
Fork: &ethpb.Fork{
PreviousVersion: make([]byte, 4),
CurrentVersion: make([]byte, 4),
Epoch: 0,
},
LatestBlockHeader: &ethpb.BeaconBlockHeader{
ParentRoot: make([]byte, 32),
StateRoot: make([]byte, 32),
BodyRoot: make([]byte, 32),
},
CurrentEpochParticipation: []byte{},
PreviousEpochParticipation: []byte{},
Validators: vals,
Balances: bals,
Eth1Data: &eth.Eth1Data{
DepositRoot: make([]byte, 32),
BlockHash: make([]byte, 32),
},
BlockRoots: mockblockRoots,
StateRoots: mockstateRoots,
RandaoMixes: mockrandaoMixes,
JustificationBits: bitfield.NewBitvector4(),
PreviousJustifiedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)},
CurrentJustifiedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)},
FinalizedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)},
Slashings: make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector),
CurrentSyncCommittee: &ethpb.SyncCommittee{
Pubkeys: pubKeys,
AggregatePubkey: make([]byte, 48),
},
NextSyncCommittee: &ethpb.SyncCommittee{
Pubkeys: pubKeys,
AggregatePubkey: make([]byte, 48),
},
})
assert.NoError(t, err)
_, err = st.HashTreeRoot(context.Background())
assert.NoError(t, err)
for i := 0; i < 100; i++ {
if i%2 == 0 {
assert.NoError(t, st.UpdateBalancesAtIndex(types.ValidatorIndex(i), 1000))
}
if i%3 == 0 {
assert.NoError(t, st.AppendBalance(1000))
}
}
_, err = st.HashTreeRoot(context.Background())
assert.NoError(t, err)
newRt := bytesutil.ToBytes32(st.merkleLayers[0][balances])
wantedRt, err := stateutil.Uint64ListRootWithRegistryLimit(st.state.Balances)
assert.NoError(t, err)
assert.Equal(t, wantedRt, newRt, "state roots are unequal")
}

View File

@@ -102,7 +102,6 @@ func (b *BeaconState) SetBalances(val []uint64) error {
b.sharedFieldReferences[balances] = stateutil.NewRef(1)
b.state.Balances = val
b.rebuildTrie[balances] = true
b.markFieldAsDirty(balances)
return nil
}
@@ -129,7 +128,6 @@ func (b *BeaconState) UpdateBalancesAtIndex(idx types.ValidatorIndex, val uint64
bals[idx] = val
b.state.Balances = bals
b.markFieldAsDirty(balances)
b.addDirtyIndices(balances, []uint64{uint64(idx)})
return nil
}
@@ -221,9 +219,7 @@ func (b *BeaconState) AppendBalance(bal uint64) error {
}
b.state.Balances = append(bals, bal)
balIdx := len(b.state.Balances) - 1
b.markFieldAsDirty(balances)
b.addDirtyIndices(balances, []uint64{uint64(balIdx)})
return nil
}

View File

@@ -12,7 +12,6 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/state/fieldtrie"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
"github.com/prysmaticlabs/prysm/beacon-chain/state/types"
"github.com/prysmaticlabs/prysm/config/features"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/container/slice"
"github.com/prysmaticlabs/prysm/crypto/hash"
@@ -325,20 +324,6 @@ func (b *BeaconState) rootSelector(ctx context.Context, field types.FieldIndex)
}
return b.recomputeFieldTrie(validators, b.state.Validators)
case balances:
if features.Get().EnableBalanceTrieComputation {
if b.rebuildTrie[field] {
maxBalCap := params.BeaconConfig().ValidatorRegistryLimit
elemSize := uint64(8)
balLimit := (maxBalCap*elemSize + 31) / 32
err := b.resetFieldTrie(field, b.state.Balances, balLimit)
if err != nil {
return [32]byte{}, err
}
delete(b.rebuildTrie, field)
return b.stateFieldLeaves[field].TrieRoot()
}
return b.recomputeFieldTrie(balances, b.state.Balances)
}
return stateutil.Uint64ListRootWithRegistryLimit(b.state.Balances)
case randaoMixes:
if b.rebuildTrie[field] {

View File

@@ -6,7 +6,6 @@ import (
"testing"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
"github.com/prysmaticlabs/prysm/config/features"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
@@ -14,12 +13,6 @@ import (
"github.com/prysmaticlabs/prysm/testing/require"
)
func TestMain(m *testing.M) {
resetCfg := features.InitWithReset(&features.Flags{EnableBalanceTrieComputation: true})
defer resetCfg()
m.Run()
}
func TestValidatorMap_DistinctCopy(t *testing.T) {
count := uint64(100)
vals := make([]*ethpb.Validator, 0, count)

View File

@@ -22,9 +22,6 @@ func init() {
// Initialize the composite arrays.
fieldMap[types.Eth1DataVotes] = types.CompositeArray
fieldMap[types.Validators] = types.CompositeArray
// Initialize Compressed Arrays
fieldMap[types.Balances] = types.CompressedArray
}
// fieldMap keeps track of each field

View File

@@ -1,34 +1,16 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
load("@prysm//tools/go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"field_root_eth1.go",
"field_root_validator.go",
"field_root_vector.go",
"field_roots.go",
"types.go",
],
srcs = ["types.go"],
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/state/v3",
visibility = ["//beacon-chain:__pkg__"],
deps = [
"//beacon-chain/state/fieldtrie:go_default_library",
"//beacon-chain/state/stateutil:go_default_library",
"//beacon-chain/state/types:go_default_library",
"//config/features:go_default_library",
"//config/params:go_default_library",
"//crypto/hash:go_default_library",
"//encoding/bytesutil:go_default_library",
"//encoding/ssz:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_dgraph_io_ristretto//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["field_root_test.go"],
embed = [":go_default_library"],
deps = ["//testing/assert:go_default_library"],
)

View File

@@ -1,59 +0,0 @@
package v3
import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
"github.com/prysmaticlabs/prysm/config/features"
"github.com/prysmaticlabs/prysm/encoding/ssz"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
)
// eth1Root computes the HashTreeRoot Merkleization of
// a BeaconBlockHeader struct according to the eth2
// Simple Serialize specification.
func eth1Root(hasher ssz.HashFn, eth1Data *ethpb.Eth1Data) ([32]byte, error) {
if eth1Data == nil {
return [32]byte{}, errors.New("nil eth1 data")
}
enc := stateutil.Eth1DataEncKey(eth1Data)
if features.Get().EnableSSZCache {
if found, ok := cachedHasher.rootsCache.Get(string(enc)); ok && found != nil {
return found.([32]byte), nil
}
}
root, err := stateutil.Eth1DataRootWithHasher(hasher, eth1Data)
if err != nil {
return [32]byte{}, err
}
if features.Get().EnableSSZCache {
cachedHasher.rootsCache.Set(string(enc), root, 32)
}
return root, nil
}
// eth1DataVotesRoot computes the HashTreeRoot Merkleization of
// a list of Eth1Data structs according to the eth2
// Simple Serialize specification.
func eth1DataVotesRoot(eth1DataVotes []*ethpb.Eth1Data) ([32]byte, error) {
hashKey, err := stateutil.Eth1DatasEncKey(eth1DataVotes)
if err != nil {
return [32]byte{}, err
}
if features.Get().EnableSSZCache {
if found, ok := cachedHasher.rootsCache.Get(string(hashKey[:])); ok && found != nil {
return found.([32]byte), nil
}
}
root, err := stateutil.Eth1DatasRoot(eth1DataVotes)
if err != nil {
return [32]byte{}, err
}
if features.Get().EnableSSZCache {
cachedHasher.rootsCache.Set(string(hashKey[:]), root, 32)
}
return root, nil
}

View File

@@ -1,23 +0,0 @@
package v3
import (
"testing"
"github.com/prysmaticlabs/prysm/testing/assert"
)
func TestArraysTreeRoot_OnlyPowerOf2(t *testing.T) {
_, err := nocachedHasher.arraysRoot([][]byte{}, 1, "testing")
assert.NoError(t, err)
_, err = nocachedHasher.arraysRoot([][]byte{}, 4, "testing")
assert.NoError(t, err)
_, err = nocachedHasher.arraysRoot([][]byte{}, 8, "testing")
assert.NoError(t, err)
_, err = nocachedHasher.arraysRoot([][]byte{}, 10, "testing")
assert.ErrorContains(t, "hash layer is a non power of 2", err)
}
func TestArraysTreeRoot_ZeroLength(t *testing.T) {
_, err := nocachedHasher.arraysRoot([][]byte{}, 0, "testing")
assert.ErrorContains(t, "zero leaves provided", err)
}

View File

@@ -1,89 +0,0 @@
package v3
import (
"bytes"
"encoding/binary"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
"github.com/prysmaticlabs/prysm/config/features"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/crypto/hash"
"github.com/prysmaticlabs/prysm/encoding/ssz"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
)
func (h *stateRootHasher) validatorRegistryRoot(validators []*ethpb.Validator) ([32]byte, error) {
hashKeyElements := make([]byte, len(validators)*32)
roots := make([][32]byte, len(validators))
emptyKey := hash.FastSum256(hashKeyElements)
hasher := hash.CustomSHA256Hasher()
bytesProcessed := 0
for i := 0; i < len(validators); i++ {
val, err := h.validatorRoot(hasher, validators[i])
if err != nil {
return [32]byte{}, errors.Wrap(err, "could not compute validators merkleization")
}
copy(hashKeyElements[bytesProcessed:bytesProcessed+32], val[:])
roots[i] = val
bytesProcessed += 32
}
hashKey := hash.FastSum256(hashKeyElements)
if hashKey != emptyKey && h.rootsCache != nil {
if found, ok := h.rootsCache.Get(string(hashKey[:])); found != nil && ok {
return found.([32]byte), nil
}
}
validatorsRootsRoot, err := ssz.BitwiseMerkleizeArrays(hasher, roots, uint64(len(roots)), params.BeaconConfig().ValidatorRegistryLimit)
if err != nil {
return [32]byte{}, errors.Wrap(err, "could not compute validator registry merkleization")
}
validatorsRootsBuf := new(bytes.Buffer)
if err := binary.Write(validatorsRootsBuf, binary.LittleEndian, uint64(len(validators))); err != nil {
return [32]byte{}, errors.Wrap(err, "could not marshal validator registry length")
}
// We need to mix in the length of the slice.
var validatorsRootsBufRoot [32]byte
copy(validatorsRootsBufRoot[:], validatorsRootsBuf.Bytes())
res := ssz.MixInLength(validatorsRootsRoot, validatorsRootsBufRoot[:])
if hashKey != emptyKey && h.rootsCache != nil {
h.rootsCache.Set(string(hashKey[:]), res, 32)
}
return res, nil
}
func (h *stateRootHasher) validatorRoot(hasher ssz.HashFn, validator *ethpb.Validator) ([32]byte, error) {
if validator == nil {
return [32]byte{}, errors.New("nil validator")
}
enc := stateutil.ValidatorEncKey(validator)
// Check if it exists in cache:
if h.rootsCache != nil {
if found, ok := h.rootsCache.Get(string(enc)); found != nil && ok {
return found.([32]byte), nil
}
}
valRoot, err := stateutil.ValidatorRootWithHasher(hasher, validator)
if err != nil {
return [32]byte{}, err
}
if h.rootsCache != nil {
h.rootsCache.Set(string(enc), valRoot, 32)
}
return valRoot, nil
}
// ValidatorRegistryRoot computes the HashTreeRoot Merkleization of
// a list of validator structs according to the eth2
// Simple Serialize specification.
func ValidatorRegistryRoot(vals []*ethpb.Validator) ([32]byte, error) {
if features.Get().EnableSSZCache {
return cachedHasher.validatorRegistryRoot(vals)
}
return nocachedHasher.validatorRegistryRoot(vals)
}

View File

@@ -1,146 +0,0 @@
package v3
import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
"github.com/prysmaticlabs/prysm/crypto/hash"
"github.com/prysmaticlabs/prysm/encoding/ssz"
)
func (h *stateRootHasher) arraysRoot(input [][]byte, length uint64, fieldName string) ([32]byte, error) {
lock.Lock()
defer lock.Unlock()
hashFunc := hash.CustomSHA256Hasher()
if _, ok := layersCache[fieldName]; !ok && h.rootsCache != nil {
depth := ssz.Depth(length)
layersCache[fieldName] = make([][][32]byte, depth+1)
}
leaves := make([][32]byte, length)
for i, chunk := range input {
copy(leaves[i][:], chunk)
}
bytesProcessed := 0
changedIndices := make([]int, 0)
prevLeaves, ok := leavesCache[fieldName]
if len(prevLeaves) == 0 || h.rootsCache == nil {
prevLeaves = leaves
}
for i := 0; i < len(leaves); i++ {
// We check if any items changed since the roots were last recomputed.
notEqual := leaves[i] != prevLeaves[i]
if ok && h.rootsCache != nil && notEqual {
changedIndices = append(changedIndices, i)
}
bytesProcessed += 32
}
if len(changedIndices) > 0 && h.rootsCache != nil {
var rt [32]byte
var err error
// If indices did change since last computation, we only recompute
// the modified branches in the cached Merkle tree for this state field.
chunks := leaves
// We need to ensure we recompute indices of the Merkle tree which
// changed in-between calls to this function. This check adds an offset
// to the recomputed indices to ensure we do so evenly.
maxChangedIndex := changedIndices[len(changedIndices)-1]
if maxChangedIndex+2 == len(chunks) && maxChangedIndex%2 != 0 {
changedIndices = append(changedIndices, maxChangedIndex+1)
}
for i := 0; i < len(changedIndices); i++ {
rt, err = recomputeRoot(changedIndices[i], chunks, fieldName, hashFunc)
if err != nil {
return [32]byte{}, err
}
}
leavesCache[fieldName] = chunks
return rt, nil
}
res, err := h.merkleizeWithCache(leaves, length, fieldName, hashFunc)
if err != nil {
return [32]byte{}, err
}
if h.rootsCache != nil {
leavesCache[fieldName] = leaves
}
return res, nil
}
func recomputeRoot(idx int, chunks [][32]byte, fieldName string, hasher func([]byte) [32]byte) ([32]byte, error) {
items, ok := layersCache[fieldName]
if !ok {
return [32]byte{}, errors.New("could not recompute root as there was no cache found")
}
if items == nil {
return [32]byte{}, errors.New("could not recompute root as there were no items found in the layers cache")
}
layers := items
root := chunks[idx]
layers[0] = chunks
// The merkle tree structure looks as follows:
// [[r1, r2, r3, r4], [parent1, parent2], [root]]
// Using information about the index which changed, idx, we recompute
// only its branch up the tree.
currentIndex := idx
for i := 0; i < len(layers)-1; i++ {
isLeft := currentIndex%2 == 0
neighborIdx := currentIndex ^ 1
neighbor := [32]byte{}
if layers[i] != nil && len(layers[i]) != 0 && neighborIdx < len(layers[i]) {
neighbor = layers[i][neighborIdx]
}
if isLeft {
parentHash := hasher(append(root[:], neighbor[:]...))
root = parentHash
} else {
parentHash := hasher(append(neighbor[:], root[:]...))
root = parentHash
}
parentIdx := currentIndex / 2
// Update the cached layers at the parent index.
if len(layers[i+1]) == 0 {
layers[i+1] = append(layers[i+1], root)
} else {
layers[i+1][parentIdx] = root
}
currentIndex = parentIdx
}
layersCache[fieldName] = layers
// If there is only a single leaf, we return it (the identity element).
if len(layers[0]) == 1 {
return layers[0][0], nil
}
return root, nil
}
func (h *stateRootHasher) merkleizeWithCache(leaves [][32]byte, length uint64,
fieldName string, hasher func([]byte) [32]byte) ([32]byte, error) {
if len(leaves) == 0 {
return [32]byte{}, errors.New("zero leaves provided")
}
if len(leaves) == 1 {
return leaves[0], nil
}
hashLayer := leaves
layers := make([][][32]byte, ssz.Depth(length)+1)
if items, ok := layersCache[fieldName]; ok && h.rootsCache != nil {
if len(items[0]) == len(leaves) {
layers = items
}
}
layers[0] = hashLayer
var err error
layers, hashLayer, err = stateutil.MerkleizeTrieLeaves(layers, hashLayer, hasher)
if err != nil {
return [32]byte{}, err
}
root := hashLayer[0]
if h.rootsCache != nil {
layersCache[fieldName] = layers
}
return root, nil
}

View File

@@ -1,226 +0,0 @@
package v3
import (
"encoding/binary"
"sync"
"github.com/dgraph-io/ristretto"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
"github.com/prysmaticlabs/prysm/config/features"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/crypto/hash"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/encoding/ssz"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
)
var (
leavesCache = make(map[string][][32]byte, params.BeaconConfig().BeaconStateMergeFieldCount)
layersCache = make(map[string][][][32]byte, params.BeaconConfig().BeaconStateMergeFieldCount)
lock sync.RWMutex
)
const cacheSize = 100000
var nocachedHasher *stateRootHasher
var cachedHasher *stateRootHasher
func init() {
rootsCache, err := ristretto.NewCache(&ristretto.Config{
NumCounters: cacheSize, // number of keys to track frequency of (1M).
MaxCost: 1 << 22, // maximum cost of cache (3MB).
// 100,000 roots will take up approximately 3 MB in memory.
BufferItems: 64, // number of keys per Get buffer.
})
if err != nil {
panic(err)
}
// Temporarily disable roots cache until cache issues can be resolved.
cachedHasher = &stateRootHasher{rootsCache: rootsCache}
nocachedHasher = &stateRootHasher{}
}
type stateRootHasher struct {
rootsCache *ristretto.Cache
}
// computeFieldRoots returns the hash tree root computations of every field in
// the beacon state as a list of 32 byte roots.
//nolint:deadcode
func computeFieldRoots(state *ethpb.BeaconStateMerge) ([][]byte, error) {
if features.Get().EnableSSZCache {
return cachedHasher.computeFieldRootsWithHasher(state)
}
return nocachedHasher.computeFieldRootsWithHasher(state)
}
func (h *stateRootHasher) computeFieldRootsWithHasher(state *ethpb.BeaconStateMerge) ([][]byte, error) {
if state == nil {
return nil, errors.New("nil state")
}
hasher := hash.CustomSHA256Hasher()
fieldRoots := make([][]byte, params.BeaconConfig().BeaconStateMergeFieldCount)
// Genesis time root.
genesisRoot := ssz.Uint64Root(state.GenesisTime)
fieldRoots[0] = genesisRoot[:]
// Genesis validator root.
r := [32]byte{}
copy(r[:], state.GenesisValidatorsRoot)
fieldRoots[1] = r[:]
// Slot root.
slotRoot := ssz.Uint64Root(uint64(state.Slot))
fieldRoots[2] = slotRoot[:]
// Fork data structure root.
forkHashTreeRoot, err := ssz.ForkRoot(state.Fork)
if err != nil {
return nil, errors.Wrap(err, "could not compute fork merkleization")
}
fieldRoots[3] = forkHashTreeRoot[:]
// BeaconBlockHeader data structure root.
headerHashTreeRoot, err := stateutil.BlockHeaderRoot(state.LatestBlockHeader)
if err != nil {
return nil, errors.Wrap(err, "could not compute block header merkleization")
}
fieldRoots[4] = headerHashTreeRoot[:]
// BlockRoots array root.
blockRootsRoot, err := h.arraysRoot(state.BlockRoots, uint64(params.BeaconConfig().SlotsPerHistoricalRoot), "BlockRoots")
if err != nil {
return nil, errors.Wrap(err, "could not compute block roots merkleization")
}
fieldRoots[5] = blockRootsRoot[:]
// StateRoots array root.
stateRootsRoot, err := h.arraysRoot(state.StateRoots, uint64(params.BeaconConfig().SlotsPerHistoricalRoot), "StateRoots")
if err != nil {
return nil, errors.Wrap(err, "could not compute state roots merkleization")
}
fieldRoots[6] = stateRootsRoot[:]
// HistoricalRoots slice root.
historicalRootsRt, err := ssz.ByteArrayRootWithLimit(state.HistoricalRoots, params.BeaconConfig().HistoricalRootsLimit)
if err != nil {
return nil, errors.Wrap(err, "could not compute historical roots merkleization")
}
fieldRoots[7] = historicalRootsRt[:]
// Eth1Data data structure root.
eth1HashTreeRoot, err := eth1Root(hasher, state.Eth1Data)
if err != nil {
return nil, errors.Wrap(err, "could not compute eth1data merkleization")
}
fieldRoots[8] = eth1HashTreeRoot[:]
// Eth1DataVotes slice root.
eth1VotesRoot, err := eth1DataVotesRoot(state.Eth1DataVotes)
if err != nil {
return nil, errors.Wrap(err, "could not compute eth1data votes merkleization")
}
fieldRoots[9] = eth1VotesRoot[:]
// Eth1DepositIndex root.
eth1DepositIndexBuf := make([]byte, 8)
binary.LittleEndian.PutUint64(eth1DepositIndexBuf, state.Eth1DepositIndex)
eth1DepositBuf := bytesutil.ToBytes32(eth1DepositIndexBuf)
fieldRoots[10] = eth1DepositBuf[:]
// Validators slice root.
validatorsRoot, err := h.validatorRegistryRoot(state.Validators)
if err != nil {
return nil, errors.Wrap(err, "could not compute validator registry merkleization")
}
fieldRoots[11] = validatorsRoot[:]
// Balances slice root.
balancesRoot, err := stateutil.Uint64ListRootWithRegistryLimit(state.Balances)
if err != nil {
return nil, errors.Wrap(err, "could not compute validator balances merkleization")
}
fieldRoots[12] = balancesRoot[:]
// RandaoMixes array root.
randaoRootsRoot, err := h.arraysRoot(state.RandaoMixes, uint64(params.BeaconConfig().EpochsPerHistoricalVector), "RandaoMixes")
if err != nil {
return nil, errors.Wrap(err, "could not compute randao roots merkleization")
}
fieldRoots[13] = randaoRootsRoot[:]
// Slashings array root.
slashingsRootsRoot, err := ssz.SlashingsRoot(state.Slashings)
if err != nil {
return nil, errors.Wrap(err, "could not compute slashings merkleization")
}
fieldRoots[14] = slashingsRootsRoot[:]
// PreviousEpochParticipation slice root.
prevParticipationRoot, err := stateutil.ParticipationBitsRoot(state.PreviousEpochParticipation)
if err != nil {
return nil, errors.Wrap(err, "could not compute previous epoch participation merkleization")
}
fieldRoots[15] = prevParticipationRoot[:]
// CurrentEpochParticipation slice root.
currParticipationRoot, err := stateutil.ParticipationBitsRoot(state.CurrentEpochParticipation)
if err != nil {
return nil, errors.Wrap(err, "could not compute current epoch participation merkleization")
}
fieldRoots[16] = currParticipationRoot[:]
// JustificationBits root.
justifiedBitsRoot := bytesutil.ToBytes32(state.JustificationBits)
fieldRoots[17] = justifiedBitsRoot[:]
// PreviousJustifiedCheckpoint data structure root.
prevCheckRoot, err := ssz.CheckpointRoot(hasher, state.PreviousJustifiedCheckpoint)
if err != nil {
return nil, errors.Wrap(err, "could not compute previous justified checkpoint merkleization")
}
fieldRoots[18] = prevCheckRoot[:]
// CurrentJustifiedCheckpoint data structure root.
currJustRoot, err := ssz.CheckpointRoot(hasher, state.CurrentJustifiedCheckpoint)
if err != nil {
return nil, errors.Wrap(err, "could not compute current justified checkpoint merkleization")
}
fieldRoots[19] = currJustRoot[:]
// FinalizedCheckpoint data structure root.
finalRoot, err := ssz.CheckpointRoot(hasher, state.FinalizedCheckpoint)
if err != nil {
return nil, errors.Wrap(err, "could not compute finalized checkpoint merkleization")
}
fieldRoots[20] = finalRoot[:]
// Inactivity scores root.
inactivityScoresRoot, err := stateutil.Uint64ListRootWithRegistryLimit(state.InactivityScores)
if err != nil {
return nil, errors.Wrap(err, "could not compute inactivityScoreRoot")
}
fieldRoots[21] = inactivityScoresRoot[:]
// Current sync committee root.
currentSyncCommitteeRoot, err := stateutil.SyncCommitteeRoot(state.CurrentSyncCommittee)
if err != nil {
return nil, errors.Wrap(err, "could not compute sync committee merkleization")
}
fieldRoots[22] = currentSyncCommitteeRoot[:]
// Next sync committee root.
nextSyncCommitteeRoot, err := stateutil.SyncCommitteeRoot(state.NextSyncCommittee)
if err != nil {
return nil, errors.Wrap(err, "could not compute sync committee merkleization")
}
fieldRoots[23] = nextSyncCommitteeRoot[:]
// Execution payload root.
//TODO: Blocked by https://github.com/ferranbt/fastssz/pull/65
fieldRoots[24] = []byte{}
return fieldRoots, nil
}

View File

@@ -88,29 +88,16 @@ func (s *Service) processPendingBlocks(ctx context.Context) error {
continue
}
s.pendingQueueLock.RLock()
inPendingQueue := s.seenPendingBlocks[bytesutil.ToBytes32(b.Block().ParentRoot())]
s.pendingQueueLock.RUnlock()
blkRoot, err := b.Block().HashTreeRoot()
if err != nil {
tracing.AnnotateError(span, err)
span.End()
return err
}
inDB := s.cfg.beaconDB.HasBlock(ctx, blkRoot)
// No need to process the same block twice.
if inDB {
s.pendingQueueLock.Lock()
if err := s.deleteBlockFromPendingQueue(slot, b, blkRoot); err != nil {
s.pendingQueueLock.Unlock()
return err
}
s.pendingQueueLock.Unlock()
span.End()
continue
}
s.pendingQueueLock.RLock()
inPendingQueue := s.seenPendingBlocks[bytesutil.ToBytes32(b.Block().ParentRoot())]
s.pendingQueueLock.RUnlock()
parentIsBad := s.hasBadBlock(bytesutil.ToBytes32(b.Block().ParentRoot()))
blockIsBad := s.hasBadBlock(blkRoot)
// Check if parent is a bad block.
@@ -130,12 +117,12 @@ func (s *Service) processPendingBlocks(ctx context.Context) error {
continue
}
parentInDb := s.cfg.beaconDB.HasBlock(ctx, bytesutil.ToBytes32(b.Block().ParentRoot()))
inDB := s.cfg.beaconDB.HasBlock(ctx, bytesutil.ToBytes32(b.Block().ParentRoot()))
hasPeer := len(pids) != 0
// Only request for missing parent block if it's not in beaconDB, not in pending cache
// and has peer in the peer list.
if !inPendingQueue && !parentInDb && hasPeer {
if !inPendingQueue && !inDB && hasPeer {
log.WithFields(logrus.Fields{
"currentSlot": b.Block().Slot(),
"parentRoot": hex.EncodeToString(bytesutil.Trunc(b.Block().ParentRoot())),
@@ -146,7 +133,7 @@ func (s *Service) processPendingBlocks(ctx context.Context) error {
continue
}
if !parentInDb {
if !inDB {
span.End()
continue
}
@@ -180,7 +167,6 @@ func (s *Service) processPendingBlocks(ctx context.Context) error {
s.pendingQueueLock.Lock()
if err := s.deleteBlockFromPendingQueue(slot, b, blkRoot); err != nil {
s.pendingQueueLock.Unlock()
return err
}
s.pendingQueueLock.Unlock()
@@ -335,7 +321,6 @@ func (s *Service) deleteBlockFromPendingQueue(slot types.Slot, b block.SignedBea
}
if len(newBlks) == 0 {
s.slotToPendingBlocks.Delete(slotToCacheKey(slot))
delete(s.seenPendingBlocks, r)
return nil
}

View File

@@ -84,13 +84,9 @@ func TestRegularSyncBeaconBlockSubscriber_ProcessPendingBlocks1(t *testing.T) {
require.NoError(t, r.insertBlockToPendingQueue(b1.Block.Slot, wrapper.WrappedPhase0SignedBeaconBlock(b1), b1Root))
require.NoError(t, r.cfg.beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(b1)))
nBlock := util.NewBeaconBlock()
nBlock.Block.Slot = b1.Block.Slot
nRoot, err := nBlock.Block.HashTreeRoot()
require.NoError(t, err)
// Insert bad b1 in the cache to verify the good one doesn't get replaced.
require.NoError(t, r.insertBlockToPendingQueue(nBlock.Block.Slot, wrapper.WrappedPhase0SignedBeaconBlock(nBlock), nRoot))
require.NoError(t, r.insertBlockToPendingQueue(b1.Block.Slot, wrapper.WrappedPhase0SignedBeaconBlock(util.NewBeaconBlock()), [32]byte{}))
require.NoError(t, r.processPendingBlocks(context.Background())) // Marks a block as bad
require.NoError(t, r.processPendingBlocks(context.Background())) // Bad block removed on second run
@@ -144,46 +140,6 @@ func TestRegularSync_InsertDuplicateBlocks(t *testing.T) {
}
func TestRegularSyncBeaconBlockSubscriber_DoNotReprocessBlock(t *testing.T) {
db := dbtest.SetupDB(t)
p1 := p2ptest.NewTestP2P(t)
r := &Service{
cfg: &config{
p2p: p1,
beaconDB: db,
chain: &mock.ChainService{
FinalizedCheckPoint: &ethpb.Checkpoint{
Epoch: 0,
},
},
stateGen: stategen.New(db),
},
slotToPendingBlocks: gcache.New(time.Second, 2*time.Second),
seenPendingBlocks: make(map[[32]byte]bool),
}
r.initCaches()
b0 := util.NewBeaconBlock()
require.NoError(t, r.cfg.beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(b0)))
b0Root, err := b0.Block.HashTreeRoot()
require.NoError(t, err)
b3 := util.NewBeaconBlock()
b3.Block.Slot = 3
b3.Block.ParentRoot = b0Root[:]
b3Root, err := b3.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, r.cfg.beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(b3)))
// Add b3 to the cache
require.NoError(t, r.insertBlockToPendingQueue(b3.Block.Slot, wrapper.WrappedPhase0SignedBeaconBlock(b3), b3Root))
require.NoError(t, r.processPendingBlocks(context.Background()))
assert.Equal(t, 0, len(r.slotToPendingBlocks.Items()), "Incorrect size for slot to pending blocks cache")
assert.Equal(t, 0, len(r.seenPendingBlocks), "Incorrect size for seen pending block")
}
// /- b1 - b2 - b5
// b0
// \- b3 - b4
@@ -281,7 +237,7 @@ func TestRegularSyncBeaconBlockSubscriber_ProcessPendingBlocks_2Chains(t *testin
require.NoError(t, r.processPendingBlocks(context.Background())) // Bad block removed on second run
assert.Equal(t, 1, len(r.slotToPendingBlocks.Items()), "Incorrect size for slot to pending blocks cache")
assert.Equal(t, 1, len(r.seenPendingBlocks), "Incorrect size for seen pending block")
assert.Equal(t, 3, len(r.seenPendingBlocks), "Incorrect size for seen pending block")
// Add b2 to the cache
require.NoError(t, r.insertBlockToPendingQueue(b2.Block.Slot, wrapper.WrappedPhase0SignedBeaconBlock(b2), b2Root))
@@ -292,7 +248,7 @@ func TestRegularSyncBeaconBlockSubscriber_ProcessPendingBlocks_2Chains(t *testin
require.NoError(t, r.processPendingBlocks(context.Background())) // Bad block removed on second run
assert.Equal(t, 0, len(r.slotToPendingBlocks.Items()), "Incorrect size for slot to pending blocks cache")
assert.Equal(t, 0, len(r.seenPendingBlocks), "Incorrect size for seen pending block")
assert.Equal(t, 4, len(r.seenPendingBlocks), "Incorrect size for seen pending block")
}
func TestRegularSyncBeaconBlockSubscriber_PruneOldPendingBlocks(t *testing.T) {
@@ -362,7 +318,7 @@ func TestRegularSyncBeaconBlockSubscriber_PruneOldPendingBlocks(t *testing.T) {
require.NoError(t, r.processPendingBlocks(context.Background()))
assert.Equal(t, 0, len(r.slotToPendingBlocks.Items()), "Incorrect size for slot to pending blocks cache")
assert.Equal(t, 0, len(r.seenPendingBlocks), "Incorrect size for seen pending block")
assert.Equal(t, 4, len(r.seenPendingBlocks), "Incorrect size for seen pending block")
}
func TestService_sortedPendingSlots(t *testing.T) {
@@ -473,7 +429,7 @@ func TestService_BatchRootRequest(t *testing.T) {
assert.Equal(t, 4, len(r.seenPendingBlocks), "Incorrect size for seen pending block")
}
func TestService_AddPendingBlockToQueueOverMax(t *testing.T) {
func TestService_AddPeningBlockToQueueOverMax(t *testing.T) {
r := &Service{
slotToPendingBlocks: gcache.New(time.Second, 2*time.Second),
seenPendingBlocks: make(map[[32]byte]bool),

View File

@@ -7,6 +7,7 @@ go_library(
visibility = ["//visibility:public"],
deps = [
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/kv:go_default_library",
"//cmd:go_default_library",
"//runtime/tos:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",

View File

@@ -2,6 +2,7 @@ package db
import (
beacondb "github.com/prysmaticlabs/prysm/beacon-chain/db"
"github.com/prysmaticlabs/prysm/beacon-chain/db/kv"
"github.com/prysmaticlabs/prysm/cmd"
"github.com/prysmaticlabs/prysm/runtime/tos"
"github.com/sirupsen/logrus"
@@ -31,5 +32,19 @@ var Commands = &cli.Command{
return nil
},
},
{
Name: "migrate-state",
Description: `migrate the state database to a new format`,
Flags: cmd.WrapFlags([]cli.Flag{
cmd.DataDirFlag,
}),
Before: tos.VerifyTosAcceptedOrPrompt,
Action: func(cliCtx *cli.Context) error {
if err := kv.MigrateStateDB(cliCtx); err != nil {
log.Fatalf("Could not migrate the database: %v", err)
}
return nil
},
},
},
}

View File

@@ -1,50 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_binary")
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"client.go",
"main.go",
"process_update.go",
"server.go",
"validate_update.go",
],
importpath = "github.com/prysmaticlabs/prysm/cmd/light",
visibility = ["//visibility:private"],
deps = [
"//beacon-chain/core/signing:go_default_library",
"//config/params:go_default_library",
"//container/trie:go_default_library",
"//crypto/bls/blst:go_default_library",
"//crypto/bls/common:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/eth/service:go_default_library",
"//proto/eth/v1:go_default_library",
"//proto/eth/v2:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//time/slots:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@org_golang_google_grpc//:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
],
)
go_binary(
name = "light",
embed = [":go_default_library"],
visibility = ["//visibility:public"],
)
go_test(
name = "go_default_test",
srcs = ["client_test.go"],
embed = [":go_default_library"],
deps = [
"//proto/prysm/v1alpha1:go_default_library",
"//testing/require:go_default_library",
],
)

View File

@@ -1,3 +0,0 @@
package main
type Client struct{}

View File

@@ -1,25 +0,0 @@
package main
import (
"fmt"
"testing"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/require"
)
func TestProveCheckpoint(t *testing.T) {
root := [32]byte{1}
check := &ethpb.Checkpoint{
Epoch: 1,
Root: root[:],
}
tr, err := check.GetTree()
require.NoError(t, err)
a, err := tr.Get(0)
require.NoError(t, err)
b, err := tr.Get(1)
require.NoError(t, err)
fmt.Println(a.Hash())
fmt.Println(b.Hash())
}

View File

@@ -1,71 +0,0 @@
package main
import (
"context"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/encoding/ssz"
v1 "github.com/prysmaticlabs/prysm/proto/eth/v1"
v2 "github.com/prysmaticlabs/prysm/proto/eth/v2"
v1alpha1 "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/sirupsen/logrus"
"google.golang.org/grpc"
"google.golang.org/protobuf/types/known/emptypb"
)
// Precomputed values for generalized indices.
const (
FinalizedRootIndex = 105
FinalizedRootIndexFloorLog2 = 6
NextSyncCommitteeIndex = 55
NextSyncCommitteeIndexFloorLog2 = 5
)
var log = logrus.WithField("prefix", "light")
type LightClientSnapshot struct {
Header *v1.BeaconBlockHeader
CurrentSyncCommittee *v2.SyncCommittee
NextSyncCommittee *v2.SyncCommittee
}
type LightClientUpdate struct {
Header *v1.BeaconBlockHeader
NextSyncCommittee *v2.SyncCommittee
NextSyncCommitteeBranch [NextSyncCommitteeIndexFloorLog2][32]byte
FinalityHeader *v1.BeaconBlockHeader
FinalityBranch [FinalizedRootIndexFloorLog2][32]byte
SyncCommitteeBits bitfield.Bitvector512
SyncCommitteeSignature [96]byte
ForkVersion *v1alpha1.Version
}
type Store struct {
Snapshot *LightClientSnapshot
ValidUpdates []*LightClientUpdate
}
func main() {
conn, err := grpc.Dial("localhost:4000", grpc.WithInsecure())
if err != nil {
log.Fatalf("fail to dial: %v", err)
}
ctx := context.Background()
lesClient := v1alpha1.NewLightClientClient(conn)
update, err := lesClient.LatestUpdateFinalized(ctx, &emptypb.Empty{})
if err != nil {
log.Fatalf("could not get latest update: %v", err)
}
// Attempt to verify a merkle proof of the next sync committee branch vs. the state root.
root := bytesutil.ToBytes32(update.Header.StateRoot)
leaf, err := update.NextSyncCommittee.HashTreeRoot()
if err != nil {
log.Fatalf("could not hash tree root: %v", err)
}
log.Infof("Verifying proof with root %#x, leaf %#x", root, leaf)
validProof := ssz.VerifyProof(root, update.NextSyncCommitteeBranch, leaf, NextSyncCommitteeIndex)
if !validProof {
log.Error("could not verify merkle proof")
}
}

View File

@@ -1,54 +0,0 @@
package main
import (
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/time/slots"
)
func applyLightClientUpdate(snapshot *LightClientSnapshot, update *LightClientUpdate) {
snapshotPeriod := slots.ToEpoch(snapshot.Header.Slot) / params.BeaconConfig().EpochsPerSyncCommitteePeriod
updatePeriod := slots.ToEpoch(update.Header.Slot) / params.BeaconConfig().EpochsPerSyncCommitteePeriod
if updatePeriod == snapshotPeriod+1 {
snapshot.CurrentSyncCommittee = snapshot.NextSyncCommittee
} else {
snapshot.Header = update.Header
}
}
func processLightClientUpdate(
store *Store,
update *LightClientUpdate,
currentSlot types.Slot,
genesisValidatorsRoot [32]byte,
) error {
if err := validateLightClientUpdate(store.Snapshot, update, genesisValidatorsRoot); err != nil {
return err
}
store.ValidUpdates = append(store.ValidUpdates, update)
updateTimeout := uint64(params.BeaconConfig().SlotsPerEpoch) * uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)
sumParticipantBits := update.SyncCommitteeBits.Count()
hasQuorum := sumParticipantBits*3 >= uint64(len(update.SyncCommitteeBits))*2
if hasQuorum && !isEmptyBlockHeader(update.FinalityHeader) {
// Apply update if (1) 2/3 quorum is reached and (2) we have a finality proof.
// Note that (2) means that the current light client design needs finality.
// It may be changed to re-organizable light client design. See the on-going issue consensus-specs#2182.
applyLightClientUpdate(store.Snapshot, update)
store.ValidUpdates = make([]*LightClientUpdate, 0)
} else if currentSlot > store.Snapshot.Header.Slot.Add(updateTimeout) {
// Forced best update when the update timeout has elapsed
// Use the update that has the highest sum of sync committee bits.
updateWithHighestSumBits := store.ValidUpdates[0]
highestSumBitsUpdate := updateWithHighestSumBits.SyncCommitteeBits.Count()
for _, validUpdate := range store.ValidUpdates {
sumUpdateBits := validUpdate.SyncCommitteeBits.Count()
if sumUpdateBits > highestSumBitsUpdate {
highestSumBitsUpdate = sumUpdateBits
updateWithHighestSumBits = validUpdate
}
}
applyLightClientUpdate(store.Snapshot, updateWithHighestSumBits)
store.ValidUpdates = make([]*LightClientUpdate, 0)
}
return nil
}

View File

@@ -1,3 +0,0 @@
package main
type Server struct{}

View File

@@ -1,151 +0,0 @@
package main
import (
"math"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/core/signing"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/container/trie"
"github.com/prysmaticlabs/prysm/crypto/bls/blst"
"github.com/prysmaticlabs/prysm/crypto/bls/common"
v1 "github.com/prysmaticlabs/prysm/proto/eth/v1"
v2 "github.com/prysmaticlabs/prysm/proto/eth/v2"
v1alpha1 "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/time/slots"
"google.golang.org/protobuf/proto"
)
func validateLightClientUpdate(
snapshot *LightClientSnapshot,
update *LightClientUpdate,
genesisValidatorsRoot [32]byte,
) error {
if update.Header.Slot <= snapshot.Header.Slot {
return errors.New("wrong")
}
snapshotPeriod := slots.ToEpoch(snapshot.Header.Slot) / params.BeaconConfig().EpochsPerSyncCommitteePeriod
updatePeriod := slots.ToEpoch(update.Header.Slot) / params.BeaconConfig().EpochsPerSyncCommitteePeriod
if updatePeriod != snapshotPeriod || updatePeriod != snapshotPeriod+1 {
return errors.New("unwanted")
}
// Verify finality headers.
var signedHeader *v1.BeaconBlockHeader
if isEmptyBlockHeader(update.FinalityHeader) {
signedHeader = update.Header
// Check if branch is empty.
for _, elem := range update.FinalityBranch {
if elem != [32]byte{} {
return errors.New("branch not empty")
}
}
} else {
leaf, err := update.Header.HashTreeRoot()
if err != nil {
return err
}
depth := FinalizedRootIndexFloorLog2
index := getSubtreeIndex(FinalizedRootIndex)
root := update.FinalityHeader.StateRoot
merkleBranch := make([][]byte, len(update.FinalityBranch))
for i, item := range update.FinalityBranch {
merkleBranch[i] = item[:]
}
if !trie.VerifyMerkleBranch(root, leaf[:], int(index), merkleBranch, uint64(depth)) {
return errors.New("does not verify")
}
}
// Verify update next sync committee if the update period incremented.
var syncCommittee *v2.SyncCommittee
if updatePeriod == snapshotPeriod {
syncCommittee = snapshot.CurrentSyncCommittee
for _, elem := range update.NextSyncCommitteeBranch {
if elem != [32]byte{} {
return errors.New("branch not empty")
}
}
} else {
syncCommittee = snapshot.NextSyncCommittee
v1Sync := &v1alpha1.SyncCommittee{
Pubkeys: syncCommittee.Pubkeys,
AggregatePubkey: syncCommittee.AggregatePubkey,
}
leaf, err := v1Sync.HashTreeRoot()
if err != nil {
return err
}
depth := NextSyncCommitteeIndexFloorLog2
index := getSubtreeIndex(NextSyncCommitteeIndex)
root := update.Header.StateRoot
merkleBranch := make([][]byte, len(update.NextSyncCommitteeBranch))
for i, item := range update.NextSyncCommitteeBranch {
merkleBranch[i] = item[:]
}
if !trie.VerifyMerkleBranch(root, leaf[:], int(index), merkleBranch, uint64(depth)) {
return errors.New("does not verify")
}
}
// Verify sync committee has sufficient participants
if update.SyncCommitteeBits.Count() < params.BeaconConfig().MinSyncCommitteeParticipants {
return errors.New("insufficient participants")
}
// Verify sync committee aggregate signature
participantPubkeys := make([][]byte, 0)
for i, pubKey := range syncCommittee.Pubkeys {
bit := update.SyncCommitteeBits.BitAt(uint64(i))
if bit {
participantPubkeys = append(participantPubkeys, pubKey)
}
}
domain, err := signing.ComputeDomain(
params.BeaconConfig().DomainSyncCommittee,
[]byte(update.ForkVersion.Version),
genesisValidatorsRoot[:],
)
if err != nil {
return err
}
signingRoot, err := signing.ComputeSigningRoot(signedHeader, domain)
if err != nil {
return err
}
sig, err := blst.SignatureFromBytes(update.SyncCommitteeSignature[:])
if err != nil {
return err
}
pubKeys := make([]common.PublicKey, 0)
for _, pubkey := range participantPubkeys {
pk, err := blst.PublicKeyFromBytes(pubkey)
if err != nil {
return err
}
pubKeys = append(pubKeys, pk)
}
if !sig.FastAggregateVerify(pubKeys, signingRoot) {
return errors.New("failed to verify")
}
return nil
}
func isEmptyBlockHeader(header *v1.BeaconBlockHeader) bool {
emptyRoot := params.BeaconConfig().ZeroHash
return proto.Equal(header, &v1.BeaconBlockHeader{
Slot: 0,
ProposerIndex: 0,
ParentRoot: emptyRoot[:],
StateRoot: emptyRoot[:],
BodyRoot: emptyRoot[:],
})
}
func getSubtreeIndex(index uint64) uint64 {
return index % uint64(math.Pow(2, floorLog2(index)))
}
func floorLog2(x uint64) float64 {
return math.Floor(math.Log2(float64(x)))
}

View File

@@ -67,18 +67,6 @@ func exportSlashingProtectionJSON(cliCtx *cli.Context) error {
if err != nil {
return errors.Wrap(err, "could not export slashing protection history")
}
// Check if JSON data is empty and issue a warning about common problems to the user.
if eipJSON == nil || len(eipJSON.Data) == 0 {
log.Fatal(
"No slashing protection data was found in your database. This is likely because an older version of " +
"Prysm would place your validator database in your wallet directory as a validator.db file. Now, " +
"Prysm keeps its validator database inside the direct/ or derived/ folder in your wallet directory. " +
"Try running this command again, but add direct/ or derived/ to the path where your wallet " +
"directory is in and you should obtain your slashing protection history",
)
}
outputDir, err := userprompt.InputDirectory(
cliCtx,
"Enter your desired output directory for your slashing protection history file",

View File

@@ -52,7 +52,6 @@ type Flags struct {
EnableHistoricalSpaceRepresentation bool // EnableHistoricalSpaceRepresentation enables the saving of registry validators in separate buckets to save space
EnableGetBlockOptimizations bool // EnableGetBlockOptimizations optimizes some elements of the GetBlock() function.
EnableBatchVerification bool // EnableBatchVerification enables batch signature verification on gossip messages.
EnableBalanceTrieComputation bool // EnableBalanceTrieComputation enables our beacon state to use balance tries for hash tree root operations.
// Logging related toggles.
DisableGRPCConnectionLogs bool // Disables logging when a new grpc client has connected.
@@ -224,10 +223,6 @@ func ConfigureBeaconChain(ctx *cli.Context) {
logEnabled(enableBatchGossipVerification)
cfg.EnableBatchVerification = true
}
if ctx.Bool(enableBalanceTrieComputation.Name) {
logEnabled(enableBalanceTrieComputation)
cfg.EnableBalanceTrieComputation = true
}
Init(cfg)
}

View File

@@ -139,10 +139,6 @@ var (
Name: "enable-batch-gossip-verification",
Usage: "This enables batch verification of signatures received over gossip.",
}
enableBalanceTrieComputation = &cli.BoolFlag{
Name: "enable-balance-trie-computation",
Usage: "This enables optimized hash tree root operations for our balance field.",
}
)
// devModeFlags holds list of flags that are set when development mode is on.
@@ -151,7 +147,6 @@ var devModeFlags = []cli.Flag{
forceOptMaxCoverAggregationStategy,
enableGetBlockOptimizations,
enableBatchGossipVerification,
enableBalanceTrieComputation,
}
// ValidatorFlags contains a list of all the feature flags that apply to the validator client.
@@ -197,7 +192,6 @@ var BeaconChainFlags = append(deprecatedFlags, []cli.Flag{
disableCorrectlyPruneCanonicalAtts,
disableActiveBalanceCache,
enableBatchGossipVerification,
enableBalanceTrieComputation,
}...)
// E2EBeaconChainFlags contains a list of the beacon chain feature flags to be tested in E2E.

View File

@@ -22,7 +22,7 @@ const (
// Genesis Fork Epoch for the mainnet config.
genesisForkEpoch = 0
// Altair Fork Epoch for mainnet config.
mainnetAltairForkEpoch = 1 // Oct 27, 2021, 10:56:23am UTC
mainnetAltairForkEpoch = 74240 // Oct 27, 2021, 10:56:23am UTC
)
var mainnetNetworkConfig = &NetworkConfig{
@@ -97,8 +97,8 @@ var mainnetBeaconConfig = &BeaconChainConfig{
// Time parameter constants.
MinAttestationInclusionDelay: 1,
SecondsPerSlot: 4,
SlotsPerEpoch: 4,
SecondsPerSlot: 12,
SlotsPerEpoch: 32,
SqrRootSlotsPerEpoch: 5,
MinSeedLookahead: 1,
MaxSeedLookahead: 4,

View File

@@ -8,7 +8,6 @@ import (
"github.com/minio/sha256-simd"
"github.com/pkg/errors"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
)
const bytesPerChunk = 32
@@ -114,53 +113,6 @@ func Pack(serializedItems [][]byte) ([][]byte, error) {
return chunks, nil
}
// PackByChunk a given byte array's final chunk with zeroes if needed.
func PackByChunk(serializedItems [][]byte) ([][bytesPerChunk]byte, error) {
emptyChunk := [bytesPerChunk]byte{}
// If there are no items, we return an empty chunk.
if len(serializedItems) == 0 {
return [][bytesPerChunk]byte{emptyChunk}, nil
} else if len(serializedItems[0]) == bytesPerChunk {
// If each item has exactly BYTES_PER_CHUNK length, we return the list of serialized items.
chunks := make([][bytesPerChunk]byte, 0, len(serializedItems))
for _, c := range serializedItems {
chunks = append(chunks, bytesutil.ToBytes32(c))
}
return chunks, nil
}
// We flatten the list in order to pack its items into byte chunks correctly.
var orderedItems []byte
for _, item := range serializedItems {
orderedItems = append(orderedItems, item...)
}
// If all our serialized item slices are length zero, we
// exit early.
if len(orderedItems) == 0 {
return [][bytesPerChunk]byte{emptyChunk}, nil
}
numItems := len(orderedItems)
var chunks [][bytesPerChunk]byte
for i := 0; i < numItems; i += bytesPerChunk {
j := i + bytesPerChunk
// We create our upper bound index of the chunk, if it is greater than numItems,
// we set it as numItems itself.
if j > numItems {
j = numItems
}
// We create chunks from the list of items based on the
// indices determined above.
// Right-pad the last chunk with zero bytes if it does not
// have length bytesPerChunk from the helper.
// The ToBytes32 helper allocates a 32-byte array, before
// copying the ordered items in. This ensures that even if
// the last chunk is != 32 in length, we will right-pad it with
// zero bytes.
chunks = append(chunks, bytesutil.ToBytes32(orderedItems[i:j]))
}
return chunks, nil
}
// MixInLength appends hash length to root
func MixInLength(root [32]byte, length []byte) [32]byte {
var hash [32]byte

View File

@@ -94,22 +94,6 @@ func TestPack(t *testing.T) {
}
}
func TestPackByChunk(t *testing.T) {
byteSlice2D := [][]byte{
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 2, 5, 2, 6, 2, 7},
{1, 1, 2, 3, 5, 8, 13, 21, 34},
}
expected := [][32]byte{{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 2, 5, 2, 6, 2, 7, 1, 1},
{2, 3, 5, 8, 13, 21, 34, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}
result, err := ssz.PackByChunk(byteSlice2D)
require.NoError(t, err)
assert.Equal(t, len(expected), len(result))
for i, v := range expected {
assert.DeepEqual(t, v, result[i])
}
}
func TestMixInLength(t *testing.T) {
byteSlice := [32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}
length := []byte{1, 2, 3}

View File

@@ -1,197 +0,0 @@
package tree
import (
"bytes"
"sort"
"github.com/pkg/errors"
"github.com/protolambda/ztyp/codec"
"github.com/protolambda/ztyp/tree"
"github.com/protolambda/ztyp/view"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
)
var (
BLSPubkeyType = view.BasicVectorType(view.ByteType, 48)
ValidatorType = view.ContainerType("Validator", []view.FieldDef{
{"pubkey", BLSPubkeyType},
{"withdrawal_credentials", view.RootType},
{"effective_balance", view.Uint64Type},
{"slashed", view.BoolType},
{"activation_eligibility_epoch", view.Uint64Type},
{"activation_epoch", view.Uint64Type},
{"exit_epoch", view.Uint64Type},
{"withdrawable_epoch", view.Uint64Type},
})
ForkType = view.ContainerType("Fork", []view.FieldDef{
{"previous_version", view.Bytes4Type},
{"current_version", view.Bytes4Type},
{"epoch", view.Uint64Type},
})
BeaconBlockHeaderType = view.ContainerType("BeaconBlockHeader", []view.FieldDef{
{"slot", view.Uint64Type},
{"proposer_index", view.Uint64Type},
{"parent_root", view.RootType},
{"state_root", view.RootType},
{"body_root", view.RootType},
})
BlockRootsType = view.VectorType(view.RootType, 8192)
StateRootsType = view.VectorType(view.RootType, 8192)
HistoricalRootsType = view.ListType(view.RootType, 16777216)
Eth1DataType = view.ContainerType("Eth1Data", []view.FieldDef{
{"deposit_root", view.RootType},
{"deposit_count", view.Uint64Type},
{"block_hash", view.RootType},
})
Eth1DataVotesType = view.ComplexListType(Eth1DataType, 2048)
ValidatorsType = view.ComplexListType(ValidatorType, 1099511627776)
BalancesType = view.BasicListType(view.Uint64Type, 1099511627776)
RandaoMixesType = view.VectorType(view.RootType, 65536)
SlashingsType = view.BasicVectorType(view.Uint64Type, 8192)
ParticipationType = view.BasicListType(view.ByteType, 1099511627776)
JustificationBitsType = view.BitVectorType(4)
CheckpointType = view.ContainerType("Checkpoint", []view.FieldDef{
{"epoch", view.Uint64Type},
{"root", view.RootType},
})
InactivityScoresType = view.BasicListType(view.Uint64Type, 1099511627776)
SyncCommitteeKeysType = view.VectorType(BLSPubkeyType, 512)
SyncCommitteeType = view.ContainerType("SyncCommittee", []view.FieldDef{
{"pubkeys", SyncCommitteeKeysType},
{"aggregate_pubkey", BLSPubkeyType},
})
BeaconStateAltairType = view.ContainerType("BeaconStateAltair", []view.FieldDef{
{"genesis_time", view.Uint64Type},
{"genesis_validators_root", view.RootType},
{"slot", view.Uint64Type},
{"fork", ForkType},
{"latest_block_header", BeaconBlockHeaderType},
{"block_roots", BlockRootsType},
{"state_roots", StateRootsType},
{"historical_roots", HistoricalRootsType},
{"eth1_data", Eth1DataType},
{"eth1_data_votes", Eth1DataVotesType},
{"eth1_deposit_index", view.Uint64Type},
{"validators", ValidatorsType},
{"balances", BalancesType},
{"randao_mixes", RandaoMixesType},
{"slashings", SlashingsType},
{"previous_epoch_participation", ParticipationType},
{"current_epoch_participation", ParticipationType},
{"justification_bits", JustificationBitsType},
{"previous_justified_checkpoint", CheckpointType},
{"current_justified_checkpoint", CheckpointType},
{"finalized_checkpoint", CheckpointType},
{"inactivity_scores", InactivityScoresType},
{"current_sync_committee", SyncCommitteeType},
{"next_sync_committee", SyncCommitteeType},
})
)
type TreeBackedState struct {
beaconState view.View
}
func NewTreeBackedState(beaconState state.BeaconState) (*TreeBackedState, error) {
enc, err := beaconState.MarshalSSZ()
if err != nil {
return nil, err
}
dec := codec.NewDecodingReader(bytes.NewReader(enc), uint64(len(enc)))
treeBacked, err := BeaconStateAltairType.Deserialize(dec)
if err != nil {
return nil, err
}
return &TreeBackedState{beaconState: treeBacked}, nil
}
func VerifyProof(root [32]byte, proof [][]byte, leaf tree.Root, generalizedIndex tree.Gindex64) bool {
h := leaf
hFn := tree.GetHashFn()
idx := generalizedIndex
for _, elem := range proof {
if idx%2 == 0 {
h = hFn(h, bytesutil.ToBytes32(elem))
} else {
h = hFn(bytesutil.ToBytes32(elem), h)
}
idx = idx / 2
}
return h == root
}
func (tb *TreeBackedState) View() view.View {
return tb.beaconState
}
func (tb *TreeBackedState) Proof(
fieldIndex uint64,
) (proof [][]byte, generalizedIdx tree.Gindex64, err error) {
cont, ok := tb.beaconState.(*view.ContainerView)
if !ok {
err = errors.New("not a container")
return
}
depth := tree.CoverDepth(cont.FieldCount())
generalizedIdx, err = tree.ToGindex64(fieldIndex, depth)
if err != nil {
return
}
leaves := make(map[tree.Gindex64]struct{})
leaves[generalizedIdx] = struct{}{}
leavesSorted := make([]tree.Gindex64, 0, len(leaves))
for g := range leaves {
leavesSorted = append(leavesSorted, g)
}
sort.Slice(leavesSorted, func(i, j int) bool {
return leavesSorted[i] < leavesSorted[j]
})
// Mark every gindex that is between the root and the leaves.
interest := make(map[tree.Gindex64]struct{})
for _, g := range leavesSorted {
iter, _ := g.BitIter()
n := tree.Gindex64(1)
for {
right, ok := iter.Next()
if !ok {
break
}
n *= 2
if right {
n += 1
}
interest[n] = struct{}{}
}
}
witness := make(map[tree.Gindex64]struct{})
// For every gindex that is covered, check if the sibling is covered, and if not, it's a witness
for g := range interest {
if _, ok := interest[g^1]; !ok {
witness[g^1] = struct{}{}
}
}
witnessSorted := make([]tree.Gindex64, 0, len(witness))
for g := range witness {
witnessSorted = append(witnessSorted, g)
}
sort.Slice(witnessSorted, func(i, j int) bool {
return witnessSorted[i] < witnessSorted[j]
})
node := tb.beaconState.Backing()
hFn := tree.GetHashFn()
proof = make([][]byte, 0, len(witnessSorted))
for i := len(witnessSorted) - 1; i >= 0; i-- {
g := witnessSorted[i]
n, err2 := node.Getter(g)
if err2 != nil {
err = err2
return
}
root := n.MerkleRoot(hFn)
proof = append(proof, root[:])
}
return
}

View File

@@ -1,71 +0,0 @@
package tree
import (
"bytes"
"context"
"fmt"
"testing"
"github.com/protolambda/ztyp/codec"
"github.com/protolambda/ztyp/tree"
"github.com/protolambda/ztyp/view"
stateAltair "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
"github.com/prysmaticlabs/prysm/io/file"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/require"
)
func TestProof_SimpleField(t *testing.T) {
runProofTest(t, 0 /* genesis time */)
}
func TestProof_FinalizedCheckpoint(t *testing.T) {
runProofTest(t, 20 /* finalized checkpoint */)
}
func runProofTest(t testing.TB, fieldIndex uint64) {
data, err := file.ReadFileAsBytes("/tmp/state.ssz")
require.NoError(t, err)
dec := codec.NewDecodingReader(bytes.NewReader(data), uint64(len(data)))
treeBacked, err := BeaconStateAltairType.Deserialize(dec)
require.NoError(t, err)
tb := &TreeBackedState{beaconState: treeBacked}
// Get a proof of the field.
proof, gIndex, err := tb.Proof(fieldIndex)
require.NoError(t, err)
root := tb.beaconState.HashTreeRoot(tree.GetHashFn())
leaf, err := tb.View().Backing().Getter(gIndex)
require.NoError(t, err)
// Verify the Merkle proof using the state root, leaf for the finalized checkpoint,
// and the generalized index of the field in the state.
valid := VerifyProof(root, proof, leaf.MerkleRoot(tree.GetHashFn()), gIndex)
require.Equal(t, true, valid)
}
func TestPrysmSSZComparison(t *testing.T) {
data, err := file.ReadFileAsBytes("/tmp/state.ssz")
require.NoError(t, err)
protoState := &ethpb.BeaconStateAltair{}
require.NoError(t, protoState.UnmarshalSSZ(data))
prysmBeaconState, err := stateAltair.InitializeFromProto(protoState)
require.NoError(t, err)
dec := codec.NewDecodingReader(bytes.NewReader(data), uint64(len(data)))
ztypBeaconState, err := BeaconStateAltairType.Deserialize(dec)
require.NoError(t, err)
hFn := tree.GetHashFn()
ztypItem := ztypBeaconState.(*view.ContainerView)
ztypRoot := ztypItem.HashTreeRoot(hFn)
prysmRoot, err := prysmBeaconState.HashTreeRoot(context.Background())
require.NoError(t, err)
require.Equal(
t,
fmt.Sprintf("%#x", prysmRoot),
ztypRoot.String(),
)
}

1
go.mod
View File

@@ -79,7 +79,6 @@ require (
github.com/prometheus/procfs v0.7.0 // indirect
github.com/prometheus/prom2json v1.3.0
github.com/prometheus/tsdb v0.10.0 // indirect
github.com/protolambda/ztyp v0.1.9 // indirect
github.com/prysmaticlabs/eth2-types v0.0.0-20210303084904-c9735a06829d
github.com/prysmaticlabs/go-bitfield v0.0.0-20210809151128-385d8c5e3fb7
github.com/prysmaticlabs/prombbolt v0.0.0-20210126082820-9b7adba6db7c

2
go.sum
View File

@@ -1191,8 +1191,6 @@ github.com/prometheus/prom2json v1.3.0/go.mod h1:rMN7m0ApCowcoDlypBHlkNbp5eJQf/+
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/prometheus/tsdb v0.10.0 h1:If5rVCMTp6W2SiRAQFlbpJNgVlgMEd+U2GZckwK38ic=
github.com/prometheus/tsdb v0.10.0/go.mod h1:oi49uRhEe9dPUTlS3JRZOwJuVi6tmh10QSgwXEyGCt4=
github.com/protolambda/ztyp v0.1.9 h1:TEQvOTihEf89grFJMOZA1DrJ4B6M1Cg35U3WRSTdgew=
github.com/protolambda/ztyp v0.1.9/go.mod h1:NAGmX7+zlkxxv7F5ATHrdXwZFtkQjAUinDgg7RAV29k=
github.com/prysmaticlabs/eth2-types v0.0.0-20210303084904-c9735a06829d h1:1dN7YAqMN3oAJ0LceWcyv/U4jHLh+5urnSnr4br6zg4=
github.com/prysmaticlabs/eth2-types v0.0.0-20210303084904-c9735a06829d/go.mod h1:kOmQ/zdobQf7HUohDTifDNFEZfNaSCIY5fkONPL+dWU=
github.com/prysmaticlabs/go-bitfield v0.0.0-20210108222456-8e92c3709aa0/go.mod h1:hCwmef+4qXWjv0jLDbQdWnL0Ol7cS7/lCSS26WR+u6s=

View File

@@ -87,10 +87,9 @@ func (ImportedKeystoreStatus_Status) EnumDescriptor() ([]byte, []int) {
type DeletedKeystoreStatus_Status int32
const (
DeletedKeystoreStatus_DELETED DeletedKeystoreStatus_Status = 0
DeletedKeystoreStatus_NOT_FOUND DeletedKeystoreStatus_Status = 1
DeletedKeystoreStatus_NOT_ACTIVE DeletedKeystoreStatus_Status = 2
DeletedKeystoreStatus_ERROR DeletedKeystoreStatus_Status = 3
DeletedKeystoreStatus_DELETED DeletedKeystoreStatus_Status = 0
DeletedKeystoreStatus_NOT_FOUND DeletedKeystoreStatus_Status = 1
DeletedKeystoreStatus_ERROR DeletedKeystoreStatus_Status = 2
)
// Enum value maps for DeletedKeystoreStatus_Status.
@@ -98,14 +97,12 @@ var (
DeletedKeystoreStatus_Status_name = map[int32]string{
0: "DELETED",
1: "NOT_FOUND",
2: "NOT_ACTIVE",
3: "ERROR",
2: "ERROR",
}
DeletedKeystoreStatus_Status_value = map[string]int32{
"DELETED": 0,
"NOT_FOUND": 1,
"NOT_ACTIVE": 2,
"ERROR": 3,
"DELETED": 0,
"NOT_FOUND": 1,
"ERROR": 2,
}
)
@@ -628,7 +625,7 @@ var file_proto_eth_service_key_management_proto_rawDesc = []byte{
0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x30, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75,
0x73, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4f, 0x52, 0x54, 0x45, 0x44, 0x10, 0x00, 0x12,
0x0d, 0x0a, 0x09, 0x44, 0x55, 0x50, 0x4c, 0x49, 0x43, 0x41, 0x54, 0x45, 0x10, 0x01, 0x12, 0x09,
0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x02, 0x22, 0xbe, 0x01, 0x0a, 0x15, 0x44, 0x65,
0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x02, 0x22, 0xae, 0x01, 0x0a, 0x15, 0x44, 0x65,
0x6c, 0x65, 0x74, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x74, 0x61,
0x74, 0x75, 0x73, 0x12, 0x4a, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20,
0x01, 0x28, 0x0e, 0x32, 0x32, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65,
@@ -636,11 +633,10 @@ var file_proto_eth_service_key_management_proto_rawDesc = []byte{
0x65, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12,
0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x3f, 0x0a, 0x06, 0x53, 0x74, 0x61,
0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x2f, 0x0a, 0x06, 0x53, 0x74, 0x61,
0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x00,
0x12, 0x0d, 0x0a, 0x09, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x01, 0x12,
0x0e, 0x0a, 0x0a, 0x4e, 0x4f, 0x54, 0x5f, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x02, 0x12,
0x09, 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x03, 0x32, 0xb9, 0x03, 0x0a, 0x0d, 0x4b,
0x09, 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x02, 0x32, 0xb9, 0x03, 0x0a, 0x0d, 0x4b,
0x65, 0x79, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x78, 0x0a, 0x0d,
0x4c, 0x69, 0x73, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x73, 0x12, 0x16, 0x2e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,

View File

@@ -130,8 +130,7 @@ message DeletedKeystoreStatus {
enum Status {
DELETED = 0;
NOT_FOUND = 1;
NOT_ACTIVE = 2;
ERROR = 3;
ERROR = 2;
}
Status status = 1;
string message = 2;

View File

@@ -0,0 +1,32 @@
load("@rules_proto//proto:defs.bzl", "proto_library")
load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library")
load("@prysm//tools/go:def.bzl", "go_library")
# gazelle:ignore
proto_library(
name = "proto",
srcs = ["beacon_wrapper.proto"],
visibility = ["//visibility:public"],
deps = [
"//proto/eth/v1:proto",
"//proto/eth/v2:proto",
],
)
go_proto_library(
name = "go_proto",
importpath = "github.com/prysmaticlabs/prysm/proto/eth/wrapper",
proto = ":proto",
visibility = ["//visibility:public"],
deps = [
"//proto/eth/v1:go_default_library",
"//proto/eth/v2:go_default_library",
],
)
go_library(
name = "go_default_library",
embed = [":go_proto"],
importpath = "github.com/prysmaticlabs/prysm/proto/eth/wrapper",
visibility = ["//visibility:public"],
)

230
proto/eth/wrapper/beacon_wrapper.pb.go generated Executable file
View File

@@ -0,0 +1,230 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.25.0
// protoc v3.15.8
// source: proto/eth/wrapper/beacon_wrapper.proto
package ethereum_eth_wrapper
import (
reflect "reflect"
sync "sync"
proto "github.com/golang/protobuf/proto"
v1 "github.com/prysmaticlabs/prysm/proto/eth/v1"
v2 "github.com/prysmaticlabs/prysm/proto/eth/v2"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// This is a compile-time assertion that a sufficiently up-to-date version
// of the legacy proto package is being used.
const _ = proto.ProtoPackageIsVersion4
type WrappedBeaconState struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Version v2.Version `protobuf:"varint,1001,opt,name=version,proto3,enum=ethereum.eth.v2.Version" json:"version,omitempty"`
ValidatorIndexes []byte `protobuf:"bytes,1002,opt,name=validatorIndexes,proto3" json:"validatorIndexes,omitempty"`
// Types that are assignable to State:
// *WrappedBeaconState_Phase0State
// *WrappedBeaconState_AltairState
State isWrappedBeaconState_State `protobuf_oneof:"state"`
}
func (x *WrappedBeaconState) Reset() {
*x = WrappedBeaconState{}
if protoimpl.UnsafeEnabled {
mi := &file_proto_eth_wrapper_beacon_wrapper_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *WrappedBeaconState) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WrappedBeaconState) ProtoMessage() {}
func (x *WrappedBeaconState) ProtoReflect() protoreflect.Message {
mi := &file_proto_eth_wrapper_beacon_wrapper_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WrappedBeaconState.ProtoReflect.Descriptor instead.
func (*WrappedBeaconState) Descriptor() ([]byte, []int) {
return file_proto_eth_wrapper_beacon_wrapper_proto_rawDescGZIP(), []int{0}
}
func (x *WrappedBeaconState) GetVersion() v2.Version {
if x != nil {
return x.Version
}
return v2.Version_PHASE0
}
func (x *WrappedBeaconState) GetValidatorIndexes() []byte {
if x != nil {
return x.ValidatorIndexes
}
return nil
}
func (m *WrappedBeaconState) GetState() isWrappedBeaconState_State {
if m != nil {
return m.State
}
return nil
}
func (x *WrappedBeaconState) GetPhase0State() *v1.BeaconState {
if x, ok := x.GetState().(*WrappedBeaconState_Phase0State); ok {
return x.Phase0State
}
return nil
}
func (x *WrappedBeaconState) GetAltairState() *v2.BeaconStateV2 {
if x, ok := x.GetState().(*WrappedBeaconState_AltairState); ok {
return x.AltairState
}
return nil
}
type isWrappedBeaconState_State interface {
isWrappedBeaconState_State()
}
type WrappedBeaconState_Phase0State struct {
Phase0State *v1.BeaconState `protobuf:"bytes,1,opt,name=phase0_state,json=phase0State,proto3,oneof"`
}
type WrappedBeaconState_AltairState struct {
AltairState *v2.BeaconStateV2 `protobuf:"bytes,2,opt,name=altair_state,json=altairState,proto3,oneof"`
}
func (*WrappedBeaconState_Phase0State) isWrappedBeaconState_State() {}
func (*WrappedBeaconState_AltairState) isWrappedBeaconState_State() {}
var File_proto_eth_wrapper_beacon_wrapper_proto protoreflect.FileDescriptor
var file_proto_eth_wrapper_beacon_wrapper_proto_rawDesc = []byte{
0x0a, 0x26, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x65, 0x74, 0x68, 0x2f, 0x77, 0x72, 0x61, 0x70,
0x70, 0x65, 0x72, 0x2f, 0x62, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x5f, 0x77, 0x72, 0x61, 0x70, 0x70,
0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65,
0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x1a, 0x1a,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x65, 0x74, 0x68, 0x2f, 0x76, 0x32, 0x2f, 0x76, 0x65, 0x72,
0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x2f, 0x65, 0x74, 0x68, 0x2f, 0x76, 0x31, 0x2f, 0x62, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x5f,
0x73, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x2f, 0x65, 0x74, 0x68, 0x2f, 0x76, 0x32, 0x2f, 0x62, 0x65, 0x61, 0x63, 0x6f, 0x6e,
0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x87, 0x02, 0x0a,
0x12, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x64, 0x42, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x53, 0x74,
0x61, 0x74, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0xe9,
0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d,
0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52,
0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x10, 0x76, 0x61, 0x6c, 0x69,
0x64, 0x61, 0x74, 0x6f, 0x72, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x18, 0xea, 0x07, 0x20,
0x01, 0x28, 0x0c, 0x52, 0x10, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x49, 0x6e,
0x64, 0x65, 0x78, 0x65, 0x73, 0x12, 0x41, 0x0a, 0x0c, 0x70, 0x68, 0x61, 0x73, 0x65, 0x30, 0x5f,
0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x65, 0x74,
0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x65,
0x61, 0x63, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x48, 0x00, 0x52, 0x0b, 0x70, 0x68, 0x61,
0x73, 0x65, 0x30, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x43, 0x0a, 0x0c, 0x61, 0x6c, 0x74, 0x61,
0x69, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e,
0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x32,
0x2e, 0x42, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x56, 0x32, 0x48, 0x00,
0x52, 0x0b, 0x61, 0x6c, 0x74, 0x61, 0x69, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x07, 0x0a,
0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_proto_eth_wrapper_beacon_wrapper_proto_rawDescOnce sync.Once
file_proto_eth_wrapper_beacon_wrapper_proto_rawDescData = file_proto_eth_wrapper_beacon_wrapper_proto_rawDesc
)
func file_proto_eth_wrapper_beacon_wrapper_proto_rawDescGZIP() []byte {
file_proto_eth_wrapper_beacon_wrapper_proto_rawDescOnce.Do(func() {
file_proto_eth_wrapper_beacon_wrapper_proto_rawDescData = protoimpl.X.CompressGZIP(file_proto_eth_wrapper_beacon_wrapper_proto_rawDescData)
})
return file_proto_eth_wrapper_beacon_wrapper_proto_rawDescData
}
var file_proto_eth_wrapper_beacon_wrapper_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
var file_proto_eth_wrapper_beacon_wrapper_proto_goTypes = []interface{}{
(*WrappedBeaconState)(nil), // 0: ethereum.eth.wrapper.WrappedBeaconState
(v2.Version)(0), // 1: ethereum.eth.v2.Version
(*v1.BeaconState)(nil), // 2: ethereum.eth.v1.BeaconState
(*v2.BeaconStateV2)(nil), // 3: ethereum.eth.v2.BeaconStateV2
}
var file_proto_eth_wrapper_beacon_wrapper_proto_depIdxs = []int32{
1, // 0: ethereum.eth.wrapper.WrappedBeaconState.version:type_name -> ethereum.eth.v2.Version
2, // 1: ethereum.eth.wrapper.WrappedBeaconState.phase0_state:type_name -> ethereum.eth.v1.BeaconState
3, // 2: ethereum.eth.wrapper.WrappedBeaconState.altair_state:type_name -> ethereum.eth.v2.BeaconStateV2
3, // [3:3] is the sub-list for method output_type
3, // [3:3] is the sub-list for method input_type
3, // [3:3] is the sub-list for extension type_name
3, // [3:3] is the sub-list for extension extendee
0, // [0:3] is the sub-list for field type_name
}
func init() { file_proto_eth_wrapper_beacon_wrapper_proto_init() }
func file_proto_eth_wrapper_beacon_wrapper_proto_init() {
if File_proto_eth_wrapper_beacon_wrapper_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_proto_eth_wrapper_beacon_wrapper_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*WrappedBeaconState); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
file_proto_eth_wrapper_beacon_wrapper_proto_msgTypes[0].OneofWrappers = []interface{}{
(*WrappedBeaconState_Phase0State)(nil),
(*WrappedBeaconState_AltairState)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_proto_eth_wrapper_beacon_wrapper_proto_rawDesc,
NumEnums: 0,
NumMessages: 1,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_proto_eth_wrapper_beacon_wrapper_proto_goTypes,
DependencyIndexes: file_proto_eth_wrapper_beacon_wrapper_proto_depIdxs,
MessageInfos: file_proto_eth_wrapper_beacon_wrapper_proto_msgTypes,
}.Build()
File_proto_eth_wrapper_beacon_wrapper_proto = out.File
file_proto_eth_wrapper_beacon_wrapper_proto_rawDesc = nil
file_proto_eth_wrapper_beacon_wrapper_proto_goTypes = nil
file_proto_eth_wrapper_beacon_wrapper_proto_depIdxs = nil
}

View File

@@ -0,0 +1,16 @@
syntax = "proto3";
package ethereum.eth.wrapper;
import "proto/eth/v2/version.proto";
import "proto/eth/v1/beacon_state.proto";
import "proto/eth/v2/beacon_state.proto";
message WrappedBeaconState {
v2.Version version = 1001;
bytes validatorIndexes = 1002;
oneof state {
v1.BeaconState phase0_state = 1;
v2.BeaconStateV2 altair_state = 2;
}
}

Some files were not shown because too many files have changed in this diff Show More