mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-09 21:38:05 -05:00
Compare commits
38 Commits
custom_has
...
monitor_sy
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0f6a458b58 | ||
|
|
815ff6d2ea | ||
|
|
cd01b7a318 | ||
|
|
6d35aa11f4 | ||
|
|
9e1cebf20e | ||
|
|
448fc24643 | ||
|
|
66a864d3d0 | ||
|
|
dfc62f6815 | ||
|
|
cd1e3f2b3e | ||
|
|
9b29569e79 | ||
|
|
c488029eb3 | ||
|
|
6f20d17d15 | ||
|
|
6ed48c66d8 | ||
|
|
94fd99f5cd | ||
|
|
3d2a89f991 | ||
|
|
a6655be50f | ||
|
|
d4a420ddfd | ||
|
|
838b19e985 | ||
|
|
788338a004 | ||
|
|
39c33b82ad | ||
|
|
905e0f4c1c | ||
|
|
0ade1f121d | ||
|
|
ee52f8dff3 | ||
|
|
50159c2e48 | ||
|
|
cae58bbbd8 | ||
|
|
9b37418761 | ||
|
|
a78cdf86cc | ||
|
|
1c4ea75a18 | ||
|
|
6f4c80531c | ||
|
|
e3246922eb | ||
|
|
5962363847 | ||
|
|
5e8cf9cd28 | ||
|
|
b5ca09bce6 | ||
|
|
720ee3f2a4 | ||
|
|
3d139d35f6 | ||
|
|
ea38969af2 | ||
|
|
f753ce81cc | ||
|
|
9d678b0c47 |
@@ -18,6 +18,7 @@ go_library(
|
||||
"receive_attestation.go",
|
||||
"receive_block.go",
|
||||
"service.go",
|
||||
"state_balance_cache.go",
|
||||
"weak_subjectivity_checks.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/blockchain",
|
||||
@@ -95,6 +96,7 @@ go_test(
|
||||
"init_test.go",
|
||||
"log_test.go",
|
||||
"metrics_test.go",
|
||||
"mock_test.go",
|
||||
"process_attestation_test.go",
|
||||
"process_block_test.go",
|
||||
"receive_attestation_test.go",
|
||||
@@ -141,6 +143,7 @@ go_test(
|
||||
"chain_info_norace_test.go",
|
||||
"checktags_test.go",
|
||||
"init_test.go",
|
||||
"mock_test.go",
|
||||
"receive_block_test.go",
|
||||
"service_norace_test.go",
|
||||
],
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
@@ -42,9 +41,6 @@ func (s *Service) updateHead(ctx context.Context, balances []uint64) error {
|
||||
// ensure head gets its best justified info.
|
||||
if s.bestJustifiedCheckpt.Epoch > s.justifiedCheckpt.Epoch {
|
||||
s.justifiedCheckpt = s.bestJustifiedCheckpt
|
||||
if err := s.cacheJustifiedStateBalances(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Get head from the fork choice service.
|
||||
@@ -273,57 +269,6 @@ func (s *Service) hasHeadState() bool {
|
||||
return s.head != nil && s.head.state != nil
|
||||
}
|
||||
|
||||
// This caches justified state balances to be used for fork choice.
|
||||
func (s *Service) cacheJustifiedStateBalances(ctx context.Context, justifiedRoot [32]byte) error {
|
||||
if err := s.cfg.BeaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.clearInitSyncBlocks()
|
||||
|
||||
var justifiedState state.BeaconState
|
||||
var err error
|
||||
if justifiedRoot == s.genesisRoot {
|
||||
justifiedState, err = s.cfg.BeaconDB.GenesisState(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
justifiedState, err = s.cfg.StateGen.StateByRoot(ctx, justifiedRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if justifiedState == nil || justifiedState.IsNil() {
|
||||
return errors.New("justified state can't be nil")
|
||||
}
|
||||
|
||||
epoch := time.CurrentEpoch(justifiedState)
|
||||
|
||||
justifiedBalances := make([]uint64, justifiedState.NumValidators())
|
||||
if err := justifiedState.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
|
||||
if helpers.IsActiveValidatorUsingTrie(val, epoch) {
|
||||
justifiedBalances[idx] = val.EffectiveBalance()
|
||||
} else {
|
||||
justifiedBalances[idx] = 0
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.justifiedBalancesLock.Lock()
|
||||
defer s.justifiedBalancesLock.Unlock()
|
||||
s.justifiedBalances = justifiedBalances
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) getJustifiedBalances() []uint64 {
|
||||
s.justifiedBalancesLock.RLock()
|
||||
defer s.justifiedBalancesLock.RUnlock()
|
||||
return s.justifiedBalances
|
||||
}
|
||||
|
||||
// Notifies a common event feed of a new chain head event. Called right after a new
|
||||
// chain head is determined, set, and saved to disk.
|
||||
func (s *Service) notifyNewHeadEvent(
|
||||
|
||||
@@ -123,13 +123,15 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
|
||||
func TestCacheJustifiedStateBalances_CanCache(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
ctx := context.Background()
|
||||
|
||||
state, _ := util.DeterministicGenesisState(t, 100)
|
||||
r := [32]byte{'a'}
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: r[:]}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), state, r))
|
||||
require.NoError(t, service.cacheJustifiedStateBalances(context.Background(), r))
|
||||
require.DeepEqual(t, service.getJustifiedBalances(), state.Balances(), "Incorrect justified balances")
|
||||
balances, err := service.justifiedBalances.get(ctx, r)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, balances, state.Balances(), "Incorrect justified balances")
|
||||
}
|
||||
|
||||
func TestUpdateHead_MissingJustifiedRoot(t *testing.T) {
|
||||
|
||||
@@ -25,18 +25,18 @@ func TestService_TreeHandler(t *testing.T) {
|
||||
headState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, headState.SetBalances([]uint64{params.BeaconConfig().GweiPerEth}))
|
||||
cfg := &config{
|
||||
BeaconDB: beaconDB,
|
||||
ForkChoiceStore: protoarray.New(
|
||||
0, // justifiedEpoch
|
||||
0, // finalizedEpoch
|
||||
[32]byte{'a'},
|
||||
),
|
||||
StateGen: stategen.New(beaconDB),
|
||||
fcs := protoarray.New(
|
||||
0, // justifiedEpoch
|
||||
0, // finalizedEpoch
|
||||
[32]byte{'a'},
|
||||
)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
s, err := NewService(ctx)
|
||||
s, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
s.cfg = cfg
|
||||
require.NoError(t, s.cfg.ForkChoiceStore.ProcessBlock(ctx, 0, [32]byte{'a'}, [32]byte{'g'}, [32]byte{'c'}, 0, 0))
|
||||
require.NoError(t, s.cfg.ForkChoiceStore.ProcessBlock(ctx, 1, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'c'}, 0, 0))
|
||||
s.setHead([32]byte{'a'}, wrapper.WrappedPhase0SignedBeaconBlock(util.NewBeaconBlock()), headState)
|
||||
|
||||
@@ -130,6 +130,14 @@ var (
|
||||
Name: "sync_head_state_hit",
|
||||
Help: "The number of sync head state requests that are present in the cache.",
|
||||
})
|
||||
stateBalanceCacheHit = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "state_balance_cache_hit",
|
||||
Help: "Count the number of state balance cache hits.",
|
||||
})
|
||||
stateBalanceCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "state_balance_cache_miss",
|
||||
Help: "Count the number of state balance cache hits.",
|
||||
})
|
||||
)
|
||||
|
||||
// reportSlotMetrics reports slot related metrics.
|
||||
|
||||
50
beacon-chain/blockchain/mock_test.go
Normal file
50
beacon-chain/blockchain/mock_test.go
Normal file
@@ -0,0 +1,50 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
)
|
||||
|
||||
func testServiceOptsWithDB(t *testing.T) []Option {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0, [32]byte{'a'})
|
||||
return []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
}
|
||||
|
||||
// warning: only use these opts when you are certain there are no db calls
|
||||
// in your code path. this is a lightweight way to satisfy the stategen/beacondb
|
||||
// initialization requirements w/o the overhead of db init.
|
||||
func testServiceOptsNoDB() []Option {
|
||||
return []Option{
|
||||
withStateBalanceCache(satisfactoryStateBalanceCache()),
|
||||
}
|
||||
}
|
||||
|
||||
type mockStateByRooter struct {
|
||||
state state.BeaconState
|
||||
err error
|
||||
}
|
||||
|
||||
var _ stateByRooter = &mockStateByRooter{}
|
||||
|
||||
func (m mockStateByRooter) StateByRoot(_ context.Context, _ [32]byte) (state.BeaconState, error) {
|
||||
return m.state, m.err
|
||||
}
|
||||
|
||||
// returns an instance of the state balance cache that can be used
|
||||
// to satisfy the requirement for one in NewService, but which will
|
||||
// always return an error if used.
|
||||
func satisfactoryStateBalanceCache() *stateBalanceCache {
|
||||
err := errors.New("satisfactoryStateBalanceCache doesn't perform real caching")
|
||||
return &stateBalanceCache{stateGen: mockStateByRooter{err: err}}
|
||||
}
|
||||
@@ -130,6 +130,13 @@ func WithSlasherAttestationsFeed(f *event.Feed) Option {
|
||||
}
|
||||
}
|
||||
|
||||
func withStateBalanceCache(c *stateBalanceCache) Option {
|
||||
return func(s *Service) error {
|
||||
s.justifiedBalances = c
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithFinalizedStateAtStartUp to store finalized state at start up.
|
||||
func WithFinalizedStateAtStartUp(st state.BeaconState) Option {
|
||||
return func(s *Service) error {
|
||||
|
||||
@@ -24,14 +24,13 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{
|
||||
BeaconDB: beaconDB,
|
||||
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
|
||||
StateGen: stategen.New(beaconDB),
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithForkChoiceStore(protoarray.New(0, 0, [32]byte{})),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
service, err := NewService(ctx)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
_, err = blockTree1(t, beaconDB, []byte{'g'})
|
||||
require.NoError(t, err)
|
||||
@@ -131,14 +130,14 @@ func TestStore_OnAttestation_Ok(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{
|
||||
BeaconDB: beaconDB,
|
||||
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
|
||||
StateGen: stategen.New(beaconDB),
|
||||
fcs := protoarray.New(0, 0, [32]byte{'a'})
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
service, err := NewService(ctx)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
genesisState, pks := util.DeterministicGenesisState(t, 64)
|
||||
require.NoError(t, genesisState.SetGenesisTime(uint64(time.Now().Unix())-params.BeaconConfig().SecondsPerSlot))
|
||||
require.NoError(t, service.saveGenesisData(ctx, genesisState))
|
||||
@@ -157,13 +156,12 @@ func TestStore_SaveCheckpointState(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{
|
||||
BeaconDB: beaconDB,
|
||||
StateGen: stategen.New(beaconDB),
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
service, err := NewService(ctx)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
@@ -230,13 +228,12 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{
|
||||
BeaconDB: beaconDB,
|
||||
StateGen: stategen.New(beaconDB),
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
service, err := NewService(ctx)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
epoch := types.Epoch(1)
|
||||
baseState, _ := util.DeterministicGenesisState(t, 1)
|
||||
@@ -268,12 +265,10 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
|
||||
|
||||
func TestAttEpoch_MatchPrevEpoch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx)
|
||||
opts := testServiceOptsNoDB()
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
nowTime := uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().SecondsPerSlot
|
||||
require.NoError(t, service.verifyAttTargetEpoch(ctx, 0, nowTime, ðpb.Checkpoint{Root: make([]byte, 32)}))
|
||||
@@ -281,12 +276,10 @@ func TestAttEpoch_MatchPrevEpoch(t *testing.T) {
|
||||
|
||||
func TestAttEpoch_MatchCurrentEpoch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx)
|
||||
opts := testServiceOptsNoDB()
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
nowTime := uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().SecondsPerSlot
|
||||
require.NoError(t, service.verifyAttTargetEpoch(ctx, 0, nowTime, ðpb.Checkpoint{Epoch: 1}))
|
||||
@@ -294,12 +287,10 @@ func TestAttEpoch_MatchCurrentEpoch(t *testing.T) {
|
||||
|
||||
func TestAttEpoch_NotMatch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx)
|
||||
opts := testServiceOptsNoDB()
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
nowTime := 2 * uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().SecondsPerSlot
|
||||
err = service.verifyAttTargetEpoch(ctx, 0, nowTime, ðpb.Checkpoint{Root: make([]byte, 32)})
|
||||
@@ -308,12 +299,9 @@ func TestAttEpoch_NotMatch(t *testing.T) {
|
||||
|
||||
func TestVerifyBeaconBlock_NoBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
d := util.HydrateAttestationData(ðpb.AttestationData{})
|
||||
assert.ErrorContains(t, "signed beacon block can't be nil", service.verifyBeaconBlock(ctx, d))
|
||||
@@ -321,12 +309,10 @@ func TestVerifyBeaconBlock_NoBlock(t *testing.T) {
|
||||
|
||||
func TestVerifyBeaconBlock_futureBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 2
|
||||
@@ -340,12 +326,10 @@ func TestVerifyBeaconBlock_futureBlock(t *testing.T) {
|
||||
|
||||
func TestVerifyBeaconBlock_OK(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 2
|
||||
@@ -361,10 +345,14 @@ func TestVerifyFinalizedConsistency_InconsistentRoot(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx)
|
||||
fcs := protoarray.New(0, 0, [32]byte{'a'})
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
b32 := util.NewBeaconBlock()
|
||||
b32.Block.Slot = 32
|
||||
@@ -387,12 +375,10 @@ func TestVerifyFinalizedConsistency_InconsistentRoot(t *testing.T) {
|
||||
|
||||
func TestVerifyFinalizedConsistency_OK(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
b32 := util.NewBeaconBlock()
|
||||
b32.Block.Slot = 32
|
||||
@@ -415,12 +401,10 @@ func TestVerifyFinalizedConsistency_OK(t *testing.T) {
|
||||
|
||||
func TestVerifyFinalizedConsistency_IsCanonical(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
b32 := util.NewBeaconBlock()
|
||||
b32.Block.Slot = 32
|
||||
|
||||
@@ -151,7 +151,12 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
|
||||
s.finalizedCheckpt = postState.FinalizedCheckpoint()
|
||||
}
|
||||
|
||||
if err := s.updateHead(ctx, s.getJustifiedBalances()); err != nil {
|
||||
balances, err := s.justifiedBalances.get(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root))
|
||||
if err != nil {
|
||||
msg := fmt.Sprintf("could not read balances for state w/ justified checkpoint %#x", s.justifiedCheckpt.Root)
|
||||
return errors.Wrap(err, msg)
|
||||
}
|
||||
if err := s.updateHead(ctx, balances); err != nil {
|
||||
log.WithError(err).Warn("Could not update head")
|
||||
}
|
||||
|
||||
@@ -255,12 +260,12 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []block.SignedBeaconBlo
|
||||
|
||||
jCheckpoints := make([]*ethpb.Checkpoint, len(blks))
|
||||
fCheckpoints := make([]*ethpb.Checkpoint, len(blks))
|
||||
sigSet := &bls.SignatureSet{
|
||||
sigSet := &bls.SignatureBatch{
|
||||
Signatures: [][]byte{},
|
||||
PublicKeys: []bls.PublicKey{},
|
||||
Messages: [][32]byte{},
|
||||
}
|
||||
var set *bls.SignatureSet
|
||||
var set *bls.SignatureBatch
|
||||
boundaries := make(map[[32]byte]state.BeaconState)
|
||||
for i, b := range blks {
|
||||
set, preState, err = transition.ExecuteStateTransitionNoVerifyAnySig(ctx, preState, b)
|
||||
|
||||
@@ -193,9 +193,6 @@ func (s *Service) updateJustified(ctx context.Context, state state.ReadOnlyBeaco
|
||||
if canUpdate {
|
||||
s.prevJustifiedCheckpt = s.justifiedCheckpt
|
||||
s.justifiedCheckpt = cpt
|
||||
if err := s.cacheJustifiedStateBalances(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -206,12 +203,13 @@ func (s *Service) updateJustified(ctx context.Context, state state.ReadOnlyBeaco
|
||||
// This method does not have defense against fork choice bouncing attack, which is why it's only recommend to be used during initial syncing.
|
||||
func (s *Service) updateJustifiedInitSync(ctx context.Context, cp *ethpb.Checkpoint) error {
|
||||
s.prevJustifiedCheckpt = s.justifiedCheckpt
|
||||
s.justifiedCheckpt = cp
|
||||
if err := s.cacheJustifiedStateBalances(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root)); err != nil {
|
||||
|
||||
if err := s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, cp); err != nil {
|
||||
return err
|
||||
}
|
||||
s.justifiedCheckpt = cp
|
||||
|
||||
return s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, cp)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) updateFinalized(ctx context.Context, cp *ethpb.Checkpoint) error {
|
||||
@@ -330,7 +328,9 @@ func (s *Service) finalizedImpliesNewJustified(ctx context.Context, state state.
|
||||
if !attestation.CheckPointIsEqual(s.justifiedCheckpt, state.CurrentJustifiedCheckpoint()) {
|
||||
if state.CurrentJustifiedCheckpoint().Epoch > s.justifiedCheckpt.Epoch {
|
||||
s.justifiedCheckpt = state.CurrentJustifiedCheckpoint()
|
||||
return s.cacheJustifiedStateBalances(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root))
|
||||
// we don't need to check if the previous justified checkpoint was an ancestor since the new
|
||||
// finalized checkpoint is overriding it.
|
||||
return nil
|
||||
}
|
||||
|
||||
// Update justified if store justified is not in chain with finalized check point.
|
||||
@@ -345,9 +345,6 @@ func (s *Service) finalizedImpliesNewJustified(ctx context.Context, state state.
|
||||
}
|
||||
if !bytes.Equal(anc, s.finalizedCheckpt.Root) {
|
||||
s.justifiedCheckpt = state.CurrentJustifiedCheckpoint()
|
||||
if err := s.cacheJustifiedStateBalances(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -35,16 +35,17 @@ import (
|
||||
|
||||
func TestStore_OnBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{
|
||||
BeaconDB: beaconDB,
|
||||
StateGen: stategen.New(beaconDB),
|
||||
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0, [32]byte{'a'})
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
service, err := NewService(ctx)
|
||||
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
assert.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
|
||||
@@ -134,13 +135,12 @@ func TestStore_OnBlockBatch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{
|
||||
BeaconDB: beaconDB,
|
||||
StateGen: stategen.New(beaconDB),
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
service, err := NewService(ctx)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
@@ -186,14 +186,12 @@ func TestStore_OnBlockBatch(t *testing.T) {
|
||||
|
||||
func TestRemoveStateSinceLastFinalized_EmptyStartSlot(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
params.UseMinimalConfig()
|
||||
defer params.UseMainnetConfig()
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
service.genesisTime = time.Now()
|
||||
|
||||
update, err := service.shouldUpdateCurrentJustified(ctx, ðpb.Checkpoint{Root: make([]byte, 32)})
|
||||
@@ -221,16 +219,13 @@ func TestRemoveStateSinceLastFinalized_EmptyStartSlot(t *testing.T) {
|
||||
|
||||
func TestShouldUpdateJustified_ReturnFalse(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
params.UseMinimalConfig()
|
||||
defer params.UseMainnetConfig()
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{})
|
||||
|
||||
lastJustifiedBlk := util.NewBeaconBlock()
|
||||
lastJustifiedBlk.Block.ParentRoot = bytesutil.PadTo([]byte{'G'}, 32)
|
||||
lastJustifiedRoot, err := lastJustifiedBlk.Block.HashTreeRoot()
|
||||
@@ -255,13 +250,12 @@ func TestCachedPreState_CanGetFromStateSummary(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{
|
||||
BeaconDB: beaconDB,
|
||||
StateGen: stategen.New(beaconDB),
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
service, err := NewService(ctx)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
s, err := v1.InitializeFromProto(ðpb.BeaconState{Slot: 1, GenesisValidatorsRoot: params.BeaconConfig().ZeroHash[:]})
|
||||
require.NoError(t, err)
|
||||
@@ -289,13 +283,12 @@ func TestCachedPreState_CanGetFromDB(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{
|
||||
BeaconDB: beaconDB,
|
||||
StateGen: stategen.New(beaconDB),
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
service, err := NewService(ctx)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
@@ -327,10 +320,13 @@ func TestUpdateJustified_CouldUpdateBest(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB), ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(protoarray.New(0, 0, [32]byte{})),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
signedBlock := util.NewBeaconBlock()
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(signedBlock)))
|
||||
@@ -361,10 +357,12 @@ func TestFillForkChoiceMissingBlocks_CanSave(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{Root: make([]byte, 32)}
|
||||
|
||||
@@ -400,10 +398,12 @@ func TestFillForkChoiceMissingBlocks_RootsMatch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{Root: make([]byte, 32)}
|
||||
|
||||
@@ -442,10 +442,12 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
|
||||
// Set finalized epoch to 1.
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{Epoch: 1}
|
||||
@@ -586,7 +588,8 @@ func TestCurrentSlot_HandlesOverflow(t *testing.T) {
|
||||
}
|
||||
func TestAncestorByDB_CtxErr(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
service, err := NewService(ctx)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
cancel()
|
||||
@@ -598,10 +601,14 @@ func TestAncestor_HandleSkipSlot(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx)
|
||||
fcs := protoarray.New(0, 0, [32]byte{'a'})
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
b1 := util.NewBeaconBlock()
|
||||
b1.Block.Slot = 1
|
||||
@@ -642,10 +649,9 @@ func TestAncestor_HandleSkipSlot(t *testing.T) {
|
||||
|
||||
func TestAncestor_CanUseForkchoice(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cfg := &config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
b1 := util.NewBeaconBlock()
|
||||
b1.Block.Slot = 1
|
||||
@@ -682,10 +688,14 @@ func TestAncestor_CanUseDB(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx)
|
||||
fcs := protoarray.New(0, 0, [32]byte{'a'})
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
b1 := util.NewBeaconBlock()
|
||||
b1.Block.Slot = 1
|
||||
@@ -720,10 +730,9 @@ func TestAncestor_CanUseDB(t *testing.T) {
|
||||
|
||||
func TestEnsureRootNotZeroHashes(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cfg := &config{}
|
||||
service, err := NewService(ctx)
|
||||
opts := testServiceOptsNoDB()
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
service.genesisRoot = [32]byte{'a'}
|
||||
|
||||
r := service.ensureRootNotZeros(params.BeaconConfig().ZeroHash)
|
||||
@@ -735,6 +744,12 @@ func TestEnsureRootNotZeroHashes(t *testing.T) {
|
||||
|
||||
func TestFinalizedImpliesNewJustified(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0, [32]byte{'a'})
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
ctx := context.Background()
|
||||
type args struct {
|
||||
cachedCheckPoint *ethpb.Checkpoint
|
||||
@@ -776,9 +791,8 @@ func TestFinalizedImpliesNewJustified(t *testing.T) {
|
||||
beaconState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(test.args.stateCheckPoint))
|
||||
service, err := NewService(ctx)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB), ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service.justifiedCheckpt = test.args.cachedCheckPoint
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: bytesutil.PadTo(test.want.Root, 32)}))
|
||||
genesisState, err := util.NewBeaconState()
|
||||
@@ -815,6 +829,12 @@ func TestVerifyBlkDescendant(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
fcs := protoarray.New(0, 0, [32]byte{'a'})
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 1
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
@@ -869,9 +889,8 @@ func TestVerifyBlkDescendant(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
service, err := NewService(ctx)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB), ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{
|
||||
Root: tt.args.finalizedRoot[:],
|
||||
}
|
||||
@@ -885,12 +904,10 @@ func TestVerifyBlkDescendant(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestUpdateJustifiedInitSync(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
cfg := &config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
gBlk := util.NewBeaconBlock()
|
||||
gRoot, err := gBlk.Block.HashTreeRoot()
|
||||
@@ -916,10 +933,9 @@ func TestUpdateJustifiedInitSync(t *testing.T) {
|
||||
|
||||
func TestHandleEpochBoundary_BadMetrics(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cfg := &config{}
|
||||
service, err := NewService(ctx)
|
||||
opts := testServiceOptsNoDB()
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
@@ -931,10 +947,9 @@ func TestHandleEpochBoundary_BadMetrics(t *testing.T) {
|
||||
|
||||
func TestHandleEpochBoundary_UpdateFirstSlot(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cfg := &config{}
|
||||
service, err := NewService(ctx)
|
||||
opts := testServiceOptsNoDB()
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
s, _ := util.DeterministicGenesisState(t, 1024)
|
||||
service.head = &head{state: s}
|
||||
@@ -946,18 +961,18 @@ func TestHandleEpochBoundary_UpdateFirstSlot(t *testing.T) {
|
||||
func TestOnBlock_CanFinalize(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0, [32]byte{'a'})
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
cfg := &config{
|
||||
BeaconDB: beaconDB,
|
||||
StateGen: stategen.New(beaconDB),
|
||||
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
|
||||
DepositCache: depositCache,
|
||||
StateNotifier: &mock.MockStateNotifier{},
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithDepositCache(depositCache),
|
||||
WithStateNotifier(&mock.MockStateNotifier{}),
|
||||
}
|
||||
service, err := NewService(ctx)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
gs, keys := util.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
@@ -991,18 +1006,12 @@ func TestOnBlock_CanFinalize(t *testing.T) {
|
||||
|
||||
func TestInsertFinalizedDeposits(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
cfg := &config{
|
||||
BeaconDB: beaconDB,
|
||||
StateGen: stategen.New(beaconDB),
|
||||
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
|
||||
DepositCache: depositCache,
|
||||
}
|
||||
service, err := NewService(ctx)
|
||||
opts = append(opts, WithDepositCache(depositCache))
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
gs, _ := util.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
|
||||
@@ -131,7 +131,13 @@ func (s *Service) processAttestationsRoutine(subscribedToStateEvents chan<- stru
|
||||
continue
|
||||
}
|
||||
s.processAttestations(s.ctx)
|
||||
if err := s.updateHead(s.ctx, s.getJustifiedBalances()); err != nil {
|
||||
|
||||
balances, err := s.justifiedBalances.get(s.ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root))
|
||||
if err != nil {
|
||||
log.Errorf("Unable to get justified balances for root %v w/ error %s", s.justifiedCheckpt.Root, err)
|
||||
continue
|
||||
}
|
||||
if err := s.updateHead(s.ctx, balances); err != nil {
|
||||
log.Warnf("Resolving fork due to new attestation: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,9 +9,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
@@ -42,12 +40,10 @@ func TestAttestationCheckPtState_FarFutureSlot(t *testing.T) {
|
||||
|
||||
func TestVerifyLMDFFGConsistent_NotOK(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
b32 := util.NewBeaconBlock()
|
||||
b32.Block.Slot = 32
|
||||
@@ -71,12 +67,10 @@ func TestVerifyLMDFFGConsistent_NotOK(t *testing.T) {
|
||||
|
||||
func TestVerifyLMDFFGConsistent_OK(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
b32 := util.NewBeaconBlock()
|
||||
b32.Block.Slot = 32
|
||||
@@ -101,18 +95,12 @@ func TestVerifyLMDFFGConsistent_OK(t *testing.T) {
|
||||
func TestProcessAttestations_Ok(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
opts = append(opts, WithAttestationPool(attestations.NewPool()))
|
||||
|
||||
cfg := &config{
|
||||
BeaconDB: beaconDB,
|
||||
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
|
||||
StateGen: stategen.New(beaconDB),
|
||||
AttPool: attestations.NewPool(),
|
||||
}
|
||||
service, err := NewService(ctx)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.genesisTime = prysmTime.Now().Add(-1 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
|
||||
service.cfg = cfg
|
||||
genesisState, pks := util.DeterministicGenesisState(t, 64)
|
||||
require.NoError(t, genesisState.SetGenesisTime(uint64(prysmTime.Now().Unix())-params.BeaconConfig().SecondsPerSlot))
|
||||
require.NoError(t, service.saveGenesisData(ctx, genesisState))
|
||||
|
||||
@@ -124,21 +124,16 @@ func TestService_ReceiveBlock(t *testing.T) {
|
||||
genesisBlockRoot := bytesutil.ToBytes32(nil)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, genesis, genesisBlockRoot))
|
||||
|
||||
cfg := &config{
|
||||
BeaconDB: beaconDB,
|
||||
ForkChoiceStore: protoarray.New(
|
||||
0, // justifiedEpoch
|
||||
0, // finalizedEpoch
|
||||
genesisBlockRoot,
|
||||
),
|
||||
AttPool: attestations.NewPool(),
|
||||
ExitPool: voluntaryexits.NewPool(),
|
||||
StateNotifier: &blockchainTesting.MockStateNotifier{RecordEvents: true},
|
||||
StateGen: stategen.New(beaconDB),
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithForkChoiceStore(protoarray.New(0, 0, genesisBlockRoot)),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
WithExitPool(voluntaryexits.NewPool()),
|
||||
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
s, err := NewService(ctx)
|
||||
s, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
s.cfg = cfg
|
||||
require.NoError(t, s.saveGenesisData(ctx, genesis))
|
||||
gBlk, err := s.cfg.BeaconDB.GenesisBlock(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -166,21 +161,17 @@ func TestService_ReceiveBlockUpdateHead(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
genesisBlockRoot := bytesutil.ToBytes32(nil)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, genesis, genesisBlockRoot))
|
||||
cfg := &config{
|
||||
BeaconDB: beaconDB,
|
||||
ForkChoiceStore: protoarray.New(
|
||||
0, // justifiedEpoch
|
||||
0, // finalizedEpoch
|
||||
genesisBlockRoot,
|
||||
),
|
||||
AttPool: attestations.NewPool(),
|
||||
ExitPool: voluntaryexits.NewPool(),
|
||||
StateNotifier: &blockchainTesting.MockStateNotifier{RecordEvents: true},
|
||||
StateGen: stategen.New(beaconDB),
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithForkChoiceStore(protoarray.New(0, 0, genesisBlockRoot)),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
WithExitPool(voluntaryexits.NewPool()),
|
||||
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
s, err := NewService(ctx)
|
||||
|
||||
s, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
s.cfg = cfg
|
||||
require.NoError(t, s.saveGenesisData(ctx, genesis))
|
||||
gBlk, err := s.cfg.BeaconDB.GenesisBlock(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -250,19 +241,14 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
genesisBlockRoot, err := genesis.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
cfg := &config{
|
||||
BeaconDB: beaconDB,
|
||||
ForkChoiceStore: protoarray.New(
|
||||
0, // justifiedEpoch
|
||||
0, // finalizedEpoch
|
||||
genesisBlockRoot,
|
||||
),
|
||||
StateNotifier: &blockchainTesting.MockStateNotifier{RecordEvents: true},
|
||||
StateGen: stategen.New(beaconDB),
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithForkChoiceStore(protoarray.New(0, 0, genesisBlockRoot)),
|
||||
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
s, err := NewService(ctx)
|
||||
s, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
s.cfg = cfg
|
||||
err = s.saveGenesisData(ctx, genesis)
|
||||
require.NoError(t, err)
|
||||
gBlk, err := s.cfg.BeaconDB.GenesisBlock(ctx)
|
||||
@@ -287,9 +273,10 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_HasInitSyncBlock(t *testing.T) {
|
||||
s, err := NewService(context.Background())
|
||||
opts := testServiceOptsNoDB()
|
||||
opts = append(opts, WithStateNotifier(&blockchainTesting.MockStateNotifier{}))
|
||||
s, err := NewService(context.Background(), opts...)
|
||||
require.NoError(t, err)
|
||||
s.cfg = &config{StateNotifier: &blockchainTesting.MockStateNotifier{}}
|
||||
r := [32]byte{'a'}
|
||||
if s.HasInitSyncBlock(r) {
|
||||
t.Error("Should not have block")
|
||||
@@ -301,11 +288,10 @@ func TestService_HasInitSyncBlock(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCheckSaveHotStateDB_Enabling(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
hook := logTest.NewGlobal()
|
||||
s, err := NewService(context.Background())
|
||||
s, err := NewService(context.Background(), opts...)
|
||||
require.NoError(t, err)
|
||||
s.cfg = &config{StateGen: stategen.New(beaconDB)}
|
||||
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
|
||||
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
s.finalizedCheckpt = ðpb.Checkpoint{}
|
||||
@@ -315,11 +301,10 @@ func TestCheckSaveHotStateDB_Enabling(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCheckSaveHotStateDB_Disabling(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
hook := logTest.NewGlobal()
|
||||
s, err := NewService(context.Background())
|
||||
opts := testServiceOptsWithDB(t)
|
||||
s, err := NewService(context.Background(), opts...)
|
||||
require.NoError(t, err)
|
||||
s.cfg = &config{StateGen: stategen.New(beaconDB)}
|
||||
s.finalizedCheckpt = ðpb.Checkpoint{}
|
||||
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
|
||||
s.genesisTime = time.Now()
|
||||
@@ -329,11 +314,10 @@ func TestCheckSaveHotStateDB_Disabling(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCheckSaveHotStateDB_Overflow(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
hook := logTest.NewGlobal()
|
||||
s, err := NewService(context.Background())
|
||||
opts := testServiceOptsWithDB(t)
|
||||
s, err := NewService(context.Background(), opts...)
|
||||
require.NoError(t, err)
|
||||
s.cfg = &config{StateGen: stategen.New(beaconDB)}
|
||||
s.finalizedCheckpt = ðpb.Checkpoint{Epoch: 10000000}
|
||||
s.genesisTime = time.Now()
|
||||
|
||||
|
||||
@@ -62,9 +62,9 @@ type Service struct {
|
||||
checkpointStateCache *cache.CheckpointStateCache
|
||||
initSyncBlocks map[[32]byte]block.SignedBeaconBlock
|
||||
initSyncBlocksLock sync.RWMutex
|
||||
justifiedBalances []uint64
|
||||
justifiedBalancesLock sync.RWMutex
|
||||
wsVerifier *WeakSubjectivityVerifier
|
||||
//justifiedBalances []uint64
|
||||
justifiedBalances *stateBalanceCache
|
||||
wsVerifier *WeakSubjectivityVerifier
|
||||
}
|
||||
|
||||
// config options for the service.
|
||||
@@ -97,7 +97,6 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
boundaryRoots: [][32]byte{},
|
||||
checkpointStateCache: cache.NewCheckpointStateCache(),
|
||||
initSyncBlocks: make(map[[32]byte]block.SignedBeaconBlock),
|
||||
justifiedBalances: make([]uint64, 0),
|
||||
cfg: &config{},
|
||||
}
|
||||
for _, opt := range opts {
|
||||
@@ -106,6 +105,12 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
}
|
||||
}
|
||||
var err error
|
||||
if srv.justifiedBalances == nil {
|
||||
srv.justifiedBalances, err = newStateBalanceCache(srv.cfg.StateGen)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
srv.wsVerifier, err = NewWeakSubjectivityVerifier(srv.cfg.WeakSubjectivityCheckpt, srv.cfg.BeaconDB)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -151,9 +156,6 @@ func (s *Service) Start() {
|
||||
|
||||
// Resume fork choice.
|
||||
s.justifiedCheckpt = ethpb.CopyCheckpoint(justifiedCheckpoint)
|
||||
if err := s.cacheJustifiedStateBalances(s.ctx, s.ensureRootNotZeros(bytesutil.ToBytes32(s.justifiedCheckpt.Root))); err != nil {
|
||||
log.Fatalf("Could not cache justified state balances: %v", err)
|
||||
}
|
||||
s.prevJustifiedCheckpt = ethpb.CopyCheckpoint(justifiedCheckpoint)
|
||||
s.bestJustifiedCheckpt = ethpb.CopyCheckpoint(justifiedCheckpoint)
|
||||
s.finalizedCheckpt = ethpb.CopyCheckpoint(finalizedCheckpoint)
|
||||
@@ -340,9 +342,6 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon
|
||||
genesisCheckpoint := genesisState.FinalizedCheckpoint()
|
||||
|
||||
s.justifiedCheckpt = ethpb.CopyCheckpoint(genesisCheckpoint)
|
||||
if err := s.cacheJustifiedStateBalances(ctx, genesisBlkRoot); err != nil {
|
||||
return err
|
||||
}
|
||||
s.prevJustifiedCheckpt = ethpb.CopyCheckpoint(genesisCheckpoint)
|
||||
s.bestJustifiedCheckpt = ethpb.CopyCheckpoint(genesisCheckpoint)
|
||||
s.finalizedCheckpt = ethpb.CopyCheckpoint(genesisCheckpoint)
|
||||
|
||||
@@ -108,25 +108,24 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg := &config{
|
||||
BeaconBlockBuf: 0,
|
||||
BeaconDB: beaconDB,
|
||||
DepositCache: depositCache,
|
||||
ChainStartFetcher: web3Service,
|
||||
P2p: &mockBroadcaster{},
|
||||
StateNotifier: &mockBeaconNode{},
|
||||
AttPool: attestations.NewPool(),
|
||||
StateGen: stategen.New(beaconDB),
|
||||
ForkChoiceStore: protoarray.New(0, 0, params.BeaconConfig().ZeroHash),
|
||||
AttService: attService,
|
||||
stateGen := stategen.New(beaconDB)
|
||||
// Safe a state in stategen to purposes of testing a service stop / shutdown.
|
||||
require.NoError(t, stateGen.SaveState(ctx, bytesutil.ToBytes32(bState.FinalizedCheckpoint().Root), bState))
|
||||
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithDepositCache(depositCache),
|
||||
WithChainStartFetcher(web3Service),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
WithP2PBroadcaster(&mockBroadcaster{}),
|
||||
WithStateNotifier(&mockBeaconNode{}),
|
||||
WithForkChoiceStore(protoarray.New(0, 0, params.BeaconConfig().ZeroHash)),
|
||||
WithAttestationService(attService),
|
||||
WithStateGen(stateGen),
|
||||
}
|
||||
|
||||
// Safe a state in stategen to purposes of testing a service stop / shutdown.
|
||||
require.NoError(t, cfg.StateGen.SaveState(ctx, bytesutil.ToBytes32(bState.FinalizedCheckpoint().Root), bState))
|
||||
|
||||
chainService, err := NewService(ctx)
|
||||
chainService, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err, "Unable to setup chain service")
|
||||
chainService.cfg = cfg
|
||||
chainService.genesisTime = time.Unix(1, 0) // non-zero time
|
||||
|
||||
return chainService
|
||||
|
||||
82
beacon-chain/blockchain/state_balance_cache.go
Normal file
82
beacon-chain/blockchain/state_balance_cache.go
Normal file
@@ -0,0 +1,82 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
)
|
||||
|
||||
var errNilStateFromStategen = errors.New("justified state can't be nil")
|
||||
|
||||
type stateBalanceCache struct {
|
||||
sync.Mutex
|
||||
balances []uint64
|
||||
root [32]byte
|
||||
stateGen stateByRooter
|
||||
}
|
||||
|
||||
type stateByRooter interface {
|
||||
StateByRoot(context.Context, [32]byte) (state.BeaconState, error)
|
||||
}
|
||||
|
||||
// newStateBalanceCache exists to remind us that stateBalanceCache needs a stagegen
|
||||
// to avoid nil pointer bugs when updating the cache in the read path (get())
|
||||
func newStateBalanceCache(sg *stategen.State) (*stateBalanceCache, error) {
|
||||
if sg == nil {
|
||||
return nil, errors.New("Can't initialize state balance cache without stategen")
|
||||
}
|
||||
return &stateBalanceCache{stateGen: sg}, nil
|
||||
}
|
||||
|
||||
// update is called by get() when the requested root doesn't match
|
||||
// the previously read value. This cache assumes we only want to cache one
|
||||
// set of balances for a single root (the current justified root).
|
||||
//
|
||||
// warning: this is not thread-safe on its own, relies on get() for locking
|
||||
func (c *stateBalanceCache) update(ctx context.Context, justifiedRoot [32]byte) ([]uint64, error) {
|
||||
stateBalanceCacheMiss.Inc()
|
||||
justifiedState, err := c.stateGen.StateByRoot(ctx, justifiedRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if justifiedState == nil || justifiedState.IsNil() {
|
||||
return nil, errNilStateFromStategen
|
||||
}
|
||||
epoch := time.CurrentEpoch(justifiedState)
|
||||
|
||||
justifiedBalances := make([]uint64, justifiedState.NumValidators())
|
||||
var balanceAccumulator = func(idx int, val state.ReadOnlyValidator) error {
|
||||
if helpers.IsActiveValidatorUsingTrie(val, epoch) {
|
||||
justifiedBalances[idx] = val.EffectiveBalance()
|
||||
} else {
|
||||
justifiedBalances[idx] = 0
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if err := justifiedState.ReadFromEveryValidator(balanceAccumulator); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c.balances = justifiedBalances
|
||||
c.root = justifiedRoot
|
||||
return c.balances, nil
|
||||
}
|
||||
|
||||
// getBalances takes an explicit justifiedRoot so it can invalidate the singleton cache key
|
||||
// when the justified root changes, and takes a context so that the long-running stategen
|
||||
// read path can connect to the upstream cancellation/timeout chain.
|
||||
func (c *stateBalanceCache) get(ctx context.Context, justifiedRoot [32]byte) ([]uint64, error) {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
if justifiedRoot == c.root {
|
||||
stateBalanceCacheHit.Inc()
|
||||
return c.balances, nil
|
||||
}
|
||||
|
||||
return c.update(ctx, justifiedRoot)
|
||||
}
|
||||
225
beacon-chain/blockchain/state_balance_cache_test.go
Normal file
225
beacon-chain/blockchain/state_balance_cache_test.go
Normal file
@@ -0,0 +1,225 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
v2 "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
)
|
||||
|
||||
type mockStateByRoot struct {
|
||||
state state.BeaconState
|
||||
err error
|
||||
}
|
||||
|
||||
func (m *mockStateByRoot) StateByRoot(context.Context, [32]byte) (state.BeaconState, error) {
|
||||
return m.state, m.err
|
||||
}
|
||||
|
||||
type testStateOpt func(*ethpb.BeaconStateAltair)
|
||||
|
||||
func testStateWithValidators(v []*ethpb.Validator) testStateOpt {
|
||||
return func(a *ethpb.BeaconStateAltair) {
|
||||
a.Validators = v
|
||||
}
|
||||
}
|
||||
|
||||
func testStateWithSlot(slot types.Slot) testStateOpt {
|
||||
return func(a *ethpb.BeaconStateAltair) {
|
||||
a.Slot = slot
|
||||
}
|
||||
}
|
||||
|
||||
func testStateFixture(opts ...testStateOpt) state.BeaconState {
|
||||
a := ðpb.BeaconStateAltair{}
|
||||
for _, o := range opts {
|
||||
o(a)
|
||||
}
|
||||
s, _ := v2.InitializeFromProtoUnsafe(a)
|
||||
return s
|
||||
}
|
||||
|
||||
func generateTestValidators(count int, opts ...func(*ethpb.Validator)) []*ethpb.Validator {
|
||||
vs := make([]*ethpb.Validator, count)
|
||||
var i uint32 = 0
|
||||
for ; i < uint32(count); i++ {
|
||||
pk := make([]byte, 48)
|
||||
binary.LittleEndian.PutUint32(pk, i)
|
||||
v := ðpb.Validator{PublicKey: pk}
|
||||
for _, o := range opts {
|
||||
o(v)
|
||||
}
|
||||
vs[i] = v
|
||||
}
|
||||
return vs
|
||||
}
|
||||
|
||||
func oddValidatorsExpired(currentSlot types.Slot) func(*ethpb.Validator) {
|
||||
return func(v *ethpb.Validator) {
|
||||
pki := binary.LittleEndian.Uint64(v.PublicKey)
|
||||
if pki%2 == 0 {
|
||||
v.ExitEpoch = types.Epoch(int(slots.ToEpoch(currentSlot)) + 1)
|
||||
} else {
|
||||
v.ExitEpoch = types.Epoch(int(slots.ToEpoch(currentSlot)) - 1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func oddValidatorsQueued(currentSlot types.Slot) func(*ethpb.Validator) {
|
||||
return func(v *ethpb.Validator) {
|
||||
v.ExitEpoch = types.Epoch(int(slots.ToEpoch(currentSlot)) + 1)
|
||||
pki := binary.LittleEndian.Uint64(v.PublicKey)
|
||||
if pki%2 == 0 {
|
||||
v.ActivationEpoch = types.Epoch(int(slots.ToEpoch(currentSlot)) - 1)
|
||||
} else {
|
||||
v.ActivationEpoch = types.Epoch(int(slots.ToEpoch(currentSlot)) + 1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func allValidatorsValid(currentSlot types.Slot) func(*ethpb.Validator) {
|
||||
return func(v *ethpb.Validator) {
|
||||
v.ActivationEpoch = types.Epoch(int(slots.ToEpoch(currentSlot)) - 1)
|
||||
v.ExitEpoch = types.Epoch(int(slots.ToEpoch(currentSlot)) + 1)
|
||||
}
|
||||
}
|
||||
|
||||
func balanceIsKeyTimes2(v *ethpb.Validator) {
|
||||
pki := binary.LittleEndian.Uint64(v.PublicKey)
|
||||
v.EffectiveBalance = uint64(pki) * 2
|
||||
}
|
||||
|
||||
func testHalfExpiredValidators() ([]*ethpb.Validator, []uint64) {
|
||||
balances := []uint64{0, 0, 4, 0, 8, 0, 12, 0, 16, 0}
|
||||
return generateTestValidators(10,
|
||||
oddValidatorsExpired(types.Slot(99)),
|
||||
balanceIsKeyTimes2), balances
|
||||
}
|
||||
|
||||
func testHalfQueuedValidators() ([]*ethpb.Validator, []uint64) {
|
||||
balances := []uint64{0, 0, 4, 0, 8, 0, 12, 0, 16, 0}
|
||||
return generateTestValidators(10,
|
||||
oddValidatorsQueued(types.Slot(99)),
|
||||
balanceIsKeyTimes2), balances
|
||||
}
|
||||
|
||||
func testAllValidValidators() ([]*ethpb.Validator, []uint64) {
|
||||
balances := []uint64{0, 2, 4, 6, 8, 10, 12, 14, 16, 18}
|
||||
return generateTestValidators(10,
|
||||
allValidatorsValid(types.Slot(99)),
|
||||
balanceIsKeyTimes2), balances
|
||||
}
|
||||
|
||||
func TestStateBalanceCache(t *testing.T) {
|
||||
type sbcTestCase struct {
|
||||
err error
|
||||
root [32]byte
|
||||
sbc *stateBalanceCache
|
||||
balances []uint64
|
||||
name string
|
||||
}
|
||||
sentinelCacheMiss := errors.New("Cache missed, as expected!")
|
||||
sentinelBalances := []uint64{1, 2, 3, 4, 5}
|
||||
halfExpiredValidators, halfExpiredBalances := testHalfExpiredValidators()
|
||||
halfQueuedValidators, halfQueuedBalances := testHalfQueuedValidators()
|
||||
allValidValidators, allValidBalances := testAllValidValidators()
|
||||
cases := []sbcTestCase{
|
||||
{
|
||||
root: bytesutil.ToBytes32([]byte{'A'}),
|
||||
balances: sentinelBalances,
|
||||
sbc: &stateBalanceCache{
|
||||
stateGen: &mockStateByRooter{
|
||||
err: sentinelCacheMiss,
|
||||
},
|
||||
root: bytesutil.ToBytes32([]byte{'A'}),
|
||||
balances: sentinelBalances,
|
||||
},
|
||||
name: "cache hit",
|
||||
},
|
||||
// this works by using a staterooter that returns a known error
|
||||
// so really we're testing the miss by making sure stategen got called
|
||||
// this also tells us stategen errors are propagated
|
||||
{
|
||||
sbc: &stateBalanceCache{
|
||||
stateGen: &mockStateByRooter{
|
||||
//state: generateTestValidators(1, testWithBadEpoch),
|
||||
err: sentinelCacheMiss,
|
||||
},
|
||||
root: bytesutil.ToBytes32([]byte{'B'}),
|
||||
},
|
||||
err: sentinelCacheMiss,
|
||||
root: bytesutil.ToBytes32([]byte{'A'}),
|
||||
name: "cache miss",
|
||||
},
|
||||
{
|
||||
sbc: &stateBalanceCache{
|
||||
stateGen: &mockStateByRooter{},
|
||||
root: bytesutil.ToBytes32([]byte{'B'}),
|
||||
},
|
||||
err: errNilStateFromStategen,
|
||||
root: bytesutil.ToBytes32([]byte{'A'}),
|
||||
name: "error for nil state upon cache miss",
|
||||
},
|
||||
{
|
||||
sbc: &stateBalanceCache{
|
||||
stateGen: &mockStateByRooter{
|
||||
state: testStateFixture(
|
||||
testStateWithSlot(99),
|
||||
testStateWithValidators(halfExpiredValidators)),
|
||||
},
|
||||
},
|
||||
balances: halfExpiredBalances,
|
||||
root: bytesutil.ToBytes32([]byte{'A'}),
|
||||
name: "test filtering by exit epoch",
|
||||
},
|
||||
{
|
||||
sbc: &stateBalanceCache{
|
||||
stateGen: &mockStateByRooter{
|
||||
state: testStateFixture(
|
||||
testStateWithSlot(99),
|
||||
testStateWithValidators(halfQueuedValidators)),
|
||||
},
|
||||
},
|
||||
balances: halfQueuedBalances,
|
||||
root: bytesutil.ToBytes32([]byte{'A'}),
|
||||
name: "test filtering by activation epoch",
|
||||
},
|
||||
{
|
||||
sbc: &stateBalanceCache{
|
||||
stateGen: &mockStateByRooter{
|
||||
state: testStateFixture(
|
||||
testStateWithSlot(99),
|
||||
testStateWithValidators(allValidValidators)),
|
||||
},
|
||||
},
|
||||
balances: allValidBalances,
|
||||
root: bytesutil.ToBytes32([]byte{'A'}),
|
||||
name: "happy path",
|
||||
},
|
||||
}
|
||||
ctx := context.Background()
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
cache := c.sbc
|
||||
cacheRootStart := cache.root
|
||||
b, err := cache.get(ctx, c.root)
|
||||
require.ErrorIs(t, err, c.err)
|
||||
require.DeepEqual(t, c.balances, b)
|
||||
if c.err != nil {
|
||||
// if there was an error somewhere, the root should not have changed (unless it already matched)
|
||||
require.Equal(t, cacheRootStart, cache.root)
|
||||
} else {
|
||||
// when successful, the cache should always end with a root matching the request
|
||||
require.Equal(t, c.root, cache.root)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -5,7 +5,6 @@
|
||||
package depositcache
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"math/big"
|
||||
@@ -54,6 +53,7 @@ type DepositCache struct {
|
||||
pendingDeposits []*dbpb.DepositContainer
|
||||
deposits []*dbpb.DepositContainer
|
||||
finalizedDeposits *FinalizedDeposits
|
||||
depositsByKey map[[48]byte][]*dbpb.DepositContainer
|
||||
depositsLock sync.RWMutex
|
||||
}
|
||||
|
||||
@@ -69,6 +69,7 @@ func New() (*DepositCache, error) {
|
||||
return &DepositCache{
|
||||
pendingDeposits: []*dbpb.DepositContainer{},
|
||||
deposits: []*dbpb.DepositContainer{},
|
||||
depositsByKey: map[[48]byte][]*ethpb.DepositContainer{},
|
||||
finalizedDeposits: &FinalizedDeposits{Deposits: finalizedDepositsTrie, MerkleTrieIndex: -1},
|
||||
}, nil
|
||||
}
|
||||
@@ -95,10 +96,15 @@ func (dc *DepositCache) InsertDeposit(ctx context.Context, d *ethpb.Deposit, blo
|
||||
}
|
||||
// Keep the slice sorted on insertion in order to avoid costly sorting on retrieval.
|
||||
heightIdx := sort.Search(len(dc.deposits), func(i int) bool { return dc.deposits[i].Index >= index })
|
||||
depCtr := &dbpb.DepositContainer{Deposit: d, Eth1BlockHeight: blockNum, DepositRoot: depositRoot[:], Index: index}
|
||||
newDeposits := append(
|
||||
[]*dbpb.DepositContainer{{Deposit: d, Eth1BlockHeight: blockNum, DepositRoot: depositRoot[:], Index: index}},
|
||||
[]*dbpb.DepositContainer{depCtr},
|
||||
dc.deposits[heightIdx:]...)
|
||||
dc.deposits = append(dc.deposits[:heightIdx], newDeposits...)
|
||||
// Append the deposit to our map, in the event no deposits
|
||||
// exist for the pubkey , it is simply added to the map.
|
||||
pubkey := bytesutil.ToBytes48(d.Data.PublicKey)
|
||||
dc.depositsByKey[pubkey] = append(dc.depositsByKey[pubkey], depCtr)
|
||||
historicalDepositsCount.Inc()
|
||||
return nil
|
||||
}
|
||||
@@ -112,6 +118,13 @@ func (dc *DepositCache) InsertDepositContainers(ctx context.Context, ctrs []*dbp
|
||||
|
||||
sort.SliceStable(ctrs, func(i int, j int) bool { return ctrs[i].Index < ctrs[j].Index })
|
||||
dc.deposits = ctrs
|
||||
for _, c := range ctrs {
|
||||
// Use a new value, as the reference
|
||||
// of c changes in the next iteration.
|
||||
newPtr := c
|
||||
pKey := bytesutil.ToBytes48(newPtr.Deposit.Data.PublicKey)
|
||||
dc.depositsByKey[pKey] = append(dc.depositsByKey[pKey], newPtr)
|
||||
}
|
||||
historicalDepositsCount.Add(float64(len(ctrs)))
|
||||
}
|
||||
|
||||
@@ -202,13 +215,15 @@ func (dc *DepositCache) DepositByPubkey(ctx context.Context, pubKey []byte) (*et
|
||||
|
||||
var deposit *ethpb.Deposit
|
||||
var blockNum *big.Int
|
||||
for _, ctnr := range dc.deposits {
|
||||
if bytes.Equal(ctnr.Deposit.Data.PublicKey, pubKey) {
|
||||
deposit = ctnr.Deposit
|
||||
blockNum = big.NewInt(int64(ctnr.Eth1BlockHeight))
|
||||
break
|
||||
}
|
||||
deps, ok := dc.depositsByKey[bytesutil.ToBytes48(pubKey)]
|
||||
if !ok || len(deps) == 0 {
|
||||
return deposit, blockNum
|
||||
}
|
||||
// We always return the first deposit if a particular
|
||||
// validator key has multiple deposits assigned to
|
||||
// it.
|
||||
deposit = deps[0].Deposit
|
||||
blockNum = big.NewInt(int64(deps[0].Eth1BlockHeight))
|
||||
return deposit, blockNum
|
||||
}
|
||||
|
||||
|
||||
@@ -44,31 +44,31 @@ func TestInsertDeposit_MaintainsSortedOrderByIndex(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{},
|
||||
deposit: ðpb.Deposit{Data: &dbpb.Deposit_Data{PublicKey: []byte{'A'}}},
|
||||
index: 0,
|
||||
expectedErr: "",
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{},
|
||||
deposit: ðpb.Deposit{Data: &dbpb.Deposit_Data{PublicKey: []byte{'B'}}},
|
||||
index: 3,
|
||||
expectedErr: "wanted deposit with index 1 to be inserted but received 3",
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{},
|
||||
deposit: ðpb.Deposit{Data: &dbpb.Deposit_Data{PublicKey: []byte{'C'}}},
|
||||
index: 1,
|
||||
expectedErr: "",
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{},
|
||||
deposit: ðpb.Deposit{Data: &dbpb.Deposit_Data{PublicKey: []byte{'D'}}},
|
||||
index: 4,
|
||||
expectedErr: "wanted deposit with index 2 to be inserted but received 4",
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{},
|
||||
deposit: ðpb.Deposit{Data: &dbpb.Deposit_Data{PublicKey: []byte{'E'}}},
|
||||
index: 2,
|
||||
expectedErr: "",
|
||||
},
|
||||
@@ -316,8 +316,7 @@ func TestDepositsNumberAndRootAtHeight(t *testing.T) {
|
||||
func TestDepositByPubkey_ReturnsFirstMatchingDeposit(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
dc.deposits = []*dbpb.DepositContainer{
|
||||
ctrs := []*dbpb.DepositContainer{
|
||||
{
|
||||
Eth1BlockHeight: 9,
|
||||
Deposit: ðpb.Deposit{
|
||||
@@ -359,6 +358,7 @@ func TestDepositByPubkey_ReturnsFirstMatchingDeposit(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
dc.InsertDepositContainers(context.Background(), ctrs)
|
||||
|
||||
pk1 := bytesutil.PadTo([]byte("pk1"), 48)
|
||||
dep, blkNum := dc.DepositByPubkey(context.Background(), pk1)
|
||||
@@ -626,24 +626,28 @@ func TestPruneProofs_Ok(t *testing.T) {
|
||||
index int64
|
||||
}{
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 0,
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk0"), 48)}},
|
||||
index: 0,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 1,
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk1"), 48)}},
|
||||
index: 1,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 2,
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk2"), 48)}},
|
||||
index: 2,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 3,
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk3"), 48)}},
|
||||
index: 3,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -669,24 +673,26 @@ func TestPruneProofs_SomeAlreadyPruned(t *testing.T) {
|
||||
index int64
|
||||
}{
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: nil},
|
||||
index: 0,
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: nil, Data: ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.PadTo([]byte("pk0"), 48)}},
|
||||
index: 0,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: nil, Data: ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.PadTo([]byte("pk1"), 48)}}, index: 1,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: nil},
|
||||
index: 1,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(), Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk2"), 48)}},
|
||||
index: 2,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 3,
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk3"), 48)}},
|
||||
index: 3,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -709,24 +715,28 @@ func TestPruneProofs_PruneAllWhenDepositIndexTooBig(t *testing.T) {
|
||||
index int64
|
||||
}{
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 0,
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk0"), 48)}},
|
||||
index: 0,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 1,
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk1"), 48)}},
|
||||
index: 1,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 2,
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk2"), 48)}},
|
||||
index: 2,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 3,
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk3"), 48)}},
|
||||
index: 3,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -752,24 +762,28 @@ func TestPruneProofs_CorrectlyHandleLastIndex(t *testing.T) {
|
||||
index int64
|
||||
}{
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 0,
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk0"), 48)}},
|
||||
index: 0,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 1,
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk1"), 48)}},
|
||||
index: 1,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 2,
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk2"), 48)}},
|
||||
index: 2,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 3,
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk3"), 48)}},
|
||||
index: 3,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -785,6 +799,36 @@ func TestPruneProofs_CorrectlyHandleLastIndex(t *testing.T) {
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[3].Deposit.Proof)
|
||||
}
|
||||
|
||||
func TestDepositMap_WorksCorrectly(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
pk0 := bytesutil.PadTo([]byte("pk0"), 48)
|
||||
dep, _ := dc.DepositByPubkey(context.Background(), pk0)
|
||||
var nilDep *ethpb.Deposit
|
||||
assert.DeepEqual(t, nilDep, dep)
|
||||
|
||||
dep = ðpb.Deposit{Proof: makeDepositProof(), Data: ðpb.Deposit_Data{PublicKey: pk0, Amount: 1000}}
|
||||
assert.NoError(t, dc.InsertDeposit(context.Background(), dep, 1000, 0, [32]byte{}))
|
||||
|
||||
dep, _ = dc.DepositByPubkey(context.Background(), pk0)
|
||||
assert.NotEqual(t, nilDep, dep)
|
||||
assert.Equal(t, uint64(1000), dep.Data.Amount)
|
||||
|
||||
dep = ðpb.Deposit{Proof: makeDepositProof(), Data: ðpb.Deposit_Data{PublicKey: pk0, Amount: 10000}}
|
||||
assert.NoError(t, dc.InsertDeposit(context.Background(), dep, 1000, 1, [32]byte{}))
|
||||
|
||||
// Make sure we have the same deposit returned over here.
|
||||
dep, _ = dc.DepositByPubkey(context.Background(), pk0)
|
||||
assert.NotEqual(t, nilDep, dep)
|
||||
assert.Equal(t, uint64(1000), dep.Data.Amount)
|
||||
|
||||
// Make sure another key doesn't work.
|
||||
pk1 := bytesutil.PadTo([]byte("pk1"), 48)
|
||||
dep, _ = dc.DepositByPubkey(context.Background(), pk1)
|
||||
assert.DeepEqual(t, nilDep, dep)
|
||||
}
|
||||
|
||||
func makeDepositProof() [][]byte {
|
||||
proof := make([][]byte, int(params.BeaconConfig().DepositContractTreeDepth)+1)
|
||||
for i := range proof {
|
||||
|
||||
12
beacon-chain/cache/skip_slot_cache.go
vendored
12
beacon-chain/cache/skip_slot_cache.go
vendored
@@ -120,26 +120,22 @@ func (c *SkipSlotCache) MarkInProgress(r [32]byte) error {
|
||||
|
||||
// MarkNotInProgress will release the lock on a given request. This should be
|
||||
// called after put.
|
||||
func (c *SkipSlotCache) MarkNotInProgress(r [32]byte) error {
|
||||
func (c *SkipSlotCache) MarkNotInProgress(r [32]byte) {
|
||||
if c.disabled {
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
delete(c.inProgress, r)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Put the response in the cache.
|
||||
func (c *SkipSlotCache) Put(_ context.Context, r [32]byte, state state.BeaconState) error {
|
||||
func (c *SkipSlotCache) Put(_ context.Context, r [32]byte, state state.BeaconState) {
|
||||
if c.disabled {
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
// Copy state so cached value is not mutated.
|
||||
c.cache.Add(r, state.Copy())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
4
beacon-chain/cache/skip_slot_cache_test.go
vendored
4
beacon-chain/cache/skip_slot_cache_test.go
vendored
@@ -28,8 +28,8 @@ func TestSkipSlotCache_RoundTrip(t *testing.T) {
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, c.Put(ctx, r, s))
|
||||
require.NoError(t, c.MarkNotInProgress(r))
|
||||
c.Put(ctx, r, s)
|
||||
c.MarkNotInProgress(r)
|
||||
|
||||
res, err := c.Get(ctx, r)
|
||||
require.NoError(t, err)
|
||||
|
||||
50
beacon-chain/cache/sync_committee_test.go
vendored
50
beacon-chain/cache/sync_committee_test.go
vendored
@@ -5,8 +5,6 @@ import (
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
@@ -28,10 +26,10 @@ func TestSyncCommitteeCache_CanUpdateAndRetrieve(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "only current epoch",
|
||||
currentSyncCommittee: convertToCommittee([][]byte{
|
||||
currentSyncCommittee: util.ConvertToCommittee([][]byte{
|
||||
pubKeys[1], pubKeys[2], pubKeys[3], pubKeys[2], pubKeys[2],
|
||||
}),
|
||||
nextSyncCommittee: convertToCommittee([][]byte{}),
|
||||
nextSyncCommittee: util.ConvertToCommittee([][]byte{}),
|
||||
currentSyncMap: map[types.ValidatorIndex][]types.CommitteeIndex{
|
||||
1: {0},
|
||||
2: {1, 3, 4},
|
||||
@@ -45,8 +43,8 @@ func TestSyncCommitteeCache_CanUpdateAndRetrieve(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "only next epoch",
|
||||
currentSyncCommittee: convertToCommittee([][]byte{}),
|
||||
nextSyncCommittee: convertToCommittee([][]byte{
|
||||
currentSyncCommittee: util.ConvertToCommittee([][]byte{}),
|
||||
nextSyncCommittee: util.ConvertToCommittee([][]byte{
|
||||
pubKeys[1], pubKeys[2], pubKeys[3], pubKeys[2], pubKeys[2],
|
||||
}),
|
||||
currentSyncMap: map[types.ValidatorIndex][]types.CommitteeIndex{
|
||||
@@ -62,14 +60,14 @@ func TestSyncCommitteeCache_CanUpdateAndRetrieve(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "some current epoch and some next epoch",
|
||||
currentSyncCommittee: convertToCommittee([][]byte{
|
||||
currentSyncCommittee: util.ConvertToCommittee([][]byte{
|
||||
pubKeys[1],
|
||||
pubKeys[2],
|
||||
pubKeys[3],
|
||||
pubKeys[2],
|
||||
pubKeys[2],
|
||||
}),
|
||||
nextSyncCommittee: convertToCommittee([][]byte{
|
||||
nextSyncCommittee: util.ConvertToCommittee([][]byte{
|
||||
pubKeys[7],
|
||||
pubKeys[6],
|
||||
pubKeys[5],
|
||||
@@ -90,14 +88,14 @@ func TestSyncCommitteeCache_CanUpdateAndRetrieve(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "some current epoch and some next epoch duplicated across",
|
||||
currentSyncCommittee: convertToCommittee([][]byte{
|
||||
currentSyncCommittee: util.ConvertToCommittee([][]byte{
|
||||
pubKeys[1],
|
||||
pubKeys[2],
|
||||
pubKeys[3],
|
||||
pubKeys[2],
|
||||
pubKeys[2],
|
||||
}),
|
||||
nextSyncCommittee: convertToCommittee([][]byte{
|
||||
nextSyncCommittee: util.ConvertToCommittee([][]byte{
|
||||
pubKeys[2],
|
||||
pubKeys[1],
|
||||
pubKeys[3],
|
||||
@@ -117,13 +115,13 @@ func TestSyncCommitteeCache_CanUpdateAndRetrieve(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "all duplicated",
|
||||
currentSyncCommittee: convertToCommittee([][]byte{
|
||||
currentSyncCommittee: util.ConvertToCommittee([][]byte{
|
||||
pubKeys[100],
|
||||
pubKeys[100],
|
||||
pubKeys[100],
|
||||
pubKeys[100],
|
||||
}),
|
||||
nextSyncCommittee: convertToCommittee([][]byte{
|
||||
nextSyncCommittee: util.ConvertToCommittee([][]byte{
|
||||
pubKeys[100],
|
||||
pubKeys[100],
|
||||
pubKeys[100],
|
||||
@@ -138,13 +136,13 @@ func TestSyncCommitteeCache_CanUpdateAndRetrieve(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "unknown keys",
|
||||
currentSyncCommittee: convertToCommittee([][]byte{
|
||||
currentSyncCommittee: util.ConvertToCommittee([][]byte{
|
||||
pubKeys[100],
|
||||
pubKeys[100],
|
||||
pubKeys[100],
|
||||
pubKeys[100],
|
||||
}),
|
||||
nextSyncCommittee: convertToCommittee([][]byte{
|
||||
nextSyncCommittee: util.ConvertToCommittee([][]byte{
|
||||
pubKeys[100],
|
||||
pubKeys[100],
|
||||
pubKeys[100],
|
||||
@@ -189,13 +187,13 @@ func TestSyncCommitteeCache_RootDoesNotExist(t *testing.T) {
|
||||
func TestSyncCommitteeCache_CanRotate(t *testing.T) {
|
||||
c := cache.NewSyncCommittee()
|
||||
s, _ := util.DeterministicGenesisStateAltair(t, 64)
|
||||
require.NoError(t, s.SetCurrentSyncCommittee(convertToCommittee([][]byte{{1}})))
|
||||
require.NoError(t, s.SetCurrentSyncCommittee(util.ConvertToCommittee([][]byte{{1}})))
|
||||
require.NoError(t, c.UpdatePositionsInCommittee([32]byte{'a'}, s))
|
||||
require.NoError(t, s.SetCurrentSyncCommittee(convertToCommittee([][]byte{{2}})))
|
||||
require.NoError(t, s.SetCurrentSyncCommittee(util.ConvertToCommittee([][]byte{{2}})))
|
||||
require.NoError(t, c.UpdatePositionsInCommittee([32]byte{'b'}, s))
|
||||
require.NoError(t, s.SetCurrentSyncCommittee(convertToCommittee([][]byte{{3}})))
|
||||
require.NoError(t, s.SetCurrentSyncCommittee(util.ConvertToCommittee([][]byte{{3}})))
|
||||
require.NoError(t, c.UpdatePositionsInCommittee([32]byte{'c'}, s))
|
||||
require.NoError(t, s.SetCurrentSyncCommittee(convertToCommittee([][]byte{{4}})))
|
||||
require.NoError(t, s.SetCurrentSyncCommittee(util.ConvertToCommittee([][]byte{{4}})))
|
||||
require.NoError(t, c.UpdatePositionsInCommittee([32]byte{'d'}, s))
|
||||
|
||||
_, err := c.CurrentPeriodIndexPosition([32]byte{'a'}, 0)
|
||||
@@ -204,19 +202,3 @@ func TestSyncCommitteeCache_CanRotate(t *testing.T) {
|
||||
_, err = c.CurrentPeriodIndexPosition([32]byte{'c'}, 0)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func convertToCommittee(inputKeys [][]byte) *ethpb.SyncCommittee {
|
||||
var pubKeys [][]byte
|
||||
for i := uint64(0); i < params.BeaconConfig().SyncCommitteeSize; i++ {
|
||||
if i < uint64(len(inputKeys)) {
|
||||
pubKeys = append(pubKeys, bytesutil.PadTo(inputKeys[i], params.BeaconConfig().BLSPubkeyLength))
|
||||
} else {
|
||||
pubKeys = append(pubKeys, bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength))
|
||||
}
|
||||
}
|
||||
|
||||
return ðpb.SyncCommittee{
|
||||
Pubkeys: pubKeys,
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -358,7 +358,7 @@ func TestValidateIndexedAttestation_BadAttestationsSignatureSet(t *testing.T) {
|
||||
}
|
||||
|
||||
want := "nil or missing indexed attestation data"
|
||||
_, err := blocks.AttestationSignatureSet(context.Background(), beaconState, atts)
|
||||
_, err := blocks.AttestationSignatureBatch(context.Background(), beaconState, atts)
|
||||
assert.ErrorContains(t, want, err)
|
||||
|
||||
atts = []*ethpb.Attestation{}
|
||||
@@ -378,7 +378,7 @@ func TestValidateIndexedAttestation_BadAttestationsSignatureSet(t *testing.T) {
|
||||
}
|
||||
|
||||
want = "expected non-empty attesting indices"
|
||||
_, err = blocks.AttestationSignatureSet(context.Background(), beaconState, atts)
|
||||
_, err = blocks.AttestationSignatureBatch(context.Background(), beaconState, atts)
|
||||
assert.ErrorContains(t, want, err)
|
||||
}
|
||||
|
||||
@@ -502,7 +502,7 @@ func TestRetrieveAttestationSignatureSet_VerifiesMultipleAttestations(t *testing
|
||||
}
|
||||
att2.Signature = bls.AggregateSignatures(sigs).Marshal()
|
||||
|
||||
set, err := blocks.AttestationSignatureSet(ctx, st, []*ethpb.Attestation{att1, att2})
|
||||
set, err := blocks.AttestationSignatureBatch(ctx, st, []*ethpb.Attestation{att1, att2})
|
||||
require.NoError(t, err)
|
||||
verified, err := set.Verify()
|
||||
require.NoError(t, err)
|
||||
@@ -566,6 +566,6 @@ func TestRetrieveAttestationSignatureSet_AcrossFork(t *testing.T) {
|
||||
}
|
||||
att2.Signature = bls.AggregateSignatures(sigs).Marshal()
|
||||
|
||||
_, err = blocks.AttestationSignatureSet(ctx, st, []*ethpb.Attestation{att1, att2})
|
||||
_, err = blocks.AttestationSignatureBatch(ctx, st, []*ethpb.Attestation{att1, att2})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -62,7 +62,7 @@ func ProcessAttesterSlashing(
|
||||
if err := VerifyAttesterSlashing(ctx, beaconState, slashing); err != nil {
|
||||
return nil, errors.Wrap(err, "could not verify attester slashing")
|
||||
}
|
||||
slashableIndices := slashableAttesterIndices(slashing)
|
||||
slashableIndices := SlashableAttesterIndices(slashing)
|
||||
sort.SliceStable(slashableIndices, func(i, j int) bool {
|
||||
return slashableIndices[i] < slashableIndices[j]
|
||||
})
|
||||
@@ -152,7 +152,8 @@ func IsSlashableAttestationData(data1, data2 *ethpb.AttestationData) bool {
|
||||
return isDoubleVote || isSurroundVote
|
||||
}
|
||||
|
||||
func slashableAttesterIndices(slashing *ethpb.AttesterSlashing) []uint64 {
|
||||
// SlashableAttesterIndices returns the intersection of attester indices from both attestations in this slashing.
|
||||
func SlashableAttesterIndices(slashing *ethpb.AttesterSlashing) []uint64 {
|
||||
if slashing == nil || slashing.Attestation_1 == nil || slashing.Attestation_2 == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -245,7 +245,7 @@ func TestFuzzslashableAttesterIndices_10000(t *testing.T) {
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(attesterSlashing)
|
||||
slashableAttesterIndices(attesterSlashing)
|
||||
SlashableAttesterIndices(attesterSlashing)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -321,7 +321,7 @@ func TestBlockSignatureSet_OK(t *testing.T) {
|
||||
validators[proposerIdx].PublicKey = priv.PublicKey().Marshal()
|
||||
err = state.UpdateValidatorAtIndex(proposerIdx, validators[proposerIdx])
|
||||
require.NoError(t, err)
|
||||
set, err := blocks.BlockSignatureSet(state, block.Block.ProposerIndex, block.Signature, block.Block.HashTreeRoot)
|
||||
set, err := blocks.BlockSignatureBatch(state, block.Block.ProposerIndex, block.Signature, block.Block.HashTreeRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
verified, err := set.Verify()
|
||||
|
||||
@@ -82,7 +82,7 @@ func TestRandaoSignatureSet_OK(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
set, err := blocks.RandaoSignatureSet(context.Background(), beaconState, block.Body.RandaoReveal)
|
||||
set, err := blocks.RandaoSignatureBatch(context.Background(), beaconState, block.Body.RandaoReveal)
|
||||
require.NoError(t, err)
|
||||
verified, err := set.Verify()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -18,8 +18,8 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
)
|
||||
|
||||
// retrieves the signature set from the raw data, public key,signature and domain provided.
|
||||
func signatureSet(signedData, pub, signature, domain []byte) (*bls.SignatureSet, error) {
|
||||
// retrieves the signature batch from the raw data, public key,signature and domain provided.
|
||||
func signatureBatch(signedData, pub, signature, domain []byte) (*bls.SignatureBatch, error) {
|
||||
publicKey, err := bls.PublicKeyFromBytes(pub)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not convert bytes to public key")
|
||||
@@ -32,7 +32,7 @@ func signatureSet(signedData, pub, signature, domain []byte) (*bls.SignatureSet,
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not hash container")
|
||||
}
|
||||
return &bls.SignatureSet{
|
||||
return &bls.SignatureBatch{
|
||||
Signatures: [][]byte{signature},
|
||||
PublicKeys: []bls.PublicKey{publicKey},
|
||||
Messages: [][32]byte{root},
|
||||
@@ -41,7 +41,7 @@ func signatureSet(signedData, pub, signature, domain []byte) (*bls.SignatureSet,
|
||||
|
||||
// verifies the signature from the raw data, public key and domain provided.
|
||||
func verifySignature(signedData, pub, signature, domain []byte) error {
|
||||
set, err := signatureSet(signedData, pub, signature, domain)
|
||||
set, err := signatureBatch(signedData, pub, signature, domain)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -116,11 +116,11 @@ func VerifyBlockSignatureUsingCurrentFork(beaconState state.ReadOnlyBeaconState,
|
||||
return signing.VerifyBlockSigningRoot(proposerPubKey, blk.Signature(), domain, blk.Block().HashTreeRoot)
|
||||
}
|
||||
|
||||
// BlockSignatureSet retrieves the block signature set from the provided block and its corresponding state.
|
||||
func BlockSignatureSet(beaconState state.ReadOnlyBeaconState,
|
||||
// BlockSignatureBatch retrieves the block signature batch from the provided block and its corresponding state.
|
||||
func BlockSignatureBatch(beaconState state.ReadOnlyBeaconState,
|
||||
proposerIndex types.ValidatorIndex,
|
||||
sig []byte,
|
||||
rootFunc func() ([32]byte, error)) (*bls.SignatureSet, error) {
|
||||
rootFunc func() ([32]byte, error)) (*bls.SignatureBatch, error) {
|
||||
currentEpoch := slots.ToEpoch(beaconState.Slot())
|
||||
domain, err := signing.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorRoot())
|
||||
if err != nil {
|
||||
@@ -131,21 +131,21 @@ func BlockSignatureSet(beaconState state.ReadOnlyBeaconState,
|
||||
return nil, err
|
||||
}
|
||||
proposerPubKey := proposer.PublicKey
|
||||
return signing.BlockSignatureSet(proposerPubKey, sig, domain, rootFunc)
|
||||
return signing.BlockSignatureBatch(proposerPubKey, sig, domain, rootFunc)
|
||||
}
|
||||
|
||||
// RandaoSignatureSet retrieves the relevant randao specific signature set object
|
||||
// RandaoSignatureBatch retrieves the relevant randao specific signature batch object
|
||||
// from a block and its corresponding state.
|
||||
func RandaoSignatureSet(
|
||||
func RandaoSignatureBatch(
|
||||
ctx context.Context,
|
||||
beaconState state.ReadOnlyBeaconState,
|
||||
reveal []byte,
|
||||
) (*bls.SignatureSet, error) {
|
||||
) (*bls.SignatureBatch, error) {
|
||||
buf, proposerPub, domain, err := randaoSigningData(ctx, beaconState)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
set, err := signatureSet(buf, proposerPub, reveal, domain)
|
||||
set, err := signatureBatch(buf, proposerPub, reveal, domain)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -171,13 +171,13 @@ func randaoSigningData(ctx context.Context, beaconState state.ReadOnlyBeaconStat
|
||||
return buf, proposerPub[:], domain, nil
|
||||
}
|
||||
|
||||
// Method to break down attestations of the same domain and collect them into a single signature set.
|
||||
func createAttestationSignatureSet(
|
||||
// Method to break down attestations of the same domain and collect them into a single signature batch.
|
||||
func createAttestationSignatureBatch(
|
||||
ctx context.Context,
|
||||
beaconState state.ReadOnlyBeaconState,
|
||||
atts []*ethpb.Attestation,
|
||||
domain []byte,
|
||||
) (*bls.SignatureSet, error) {
|
||||
) (*bls.SignatureBatch, error) {
|
||||
if len(atts) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
@@ -216,16 +216,16 @@ func createAttestationSignatureSet(
|
||||
}
|
||||
msgs[i] = root
|
||||
}
|
||||
return &bls.SignatureSet{
|
||||
return &bls.SignatureBatch{
|
||||
Signatures: sigs,
|
||||
PublicKeys: pks,
|
||||
Messages: msgs,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// AttestationSignatureSet retrieves all the related attestation signature data such as the relevant public keys,
|
||||
// signatures and attestation signing data and collate it into a signature set object.
|
||||
func AttestationSignatureSet(ctx context.Context, beaconState state.ReadOnlyBeaconState, atts []*ethpb.Attestation) (*bls.SignatureSet, error) {
|
||||
// AttestationSignatureBatch retrieves all the related attestation signature data such as the relevant public keys,
|
||||
// signatures and attestation signing data and collate it into a signature batch object.
|
||||
func AttestationSignatureBatch(ctx context.Context, beaconState state.ReadOnlyBeaconState, atts []*ethpb.Attestation) (*bls.SignatureBatch, error) {
|
||||
if len(atts) == 0 {
|
||||
return bls.NewSet(), nil
|
||||
}
|
||||
@@ -247,12 +247,12 @@ func AttestationSignatureSet(ctx context.Context, beaconState state.ReadOnlyBeac
|
||||
set := bls.NewSet()
|
||||
|
||||
// Check attestations from before the fork.
|
||||
if fork.Epoch > 0 && len(preForkAtts) > 0 { // Check to prevent underflow and there is valid attestations to create sig set.
|
||||
if fork.Epoch > 0 && len(preForkAtts) > 0 { // Check to prevent underflow and there is valid attestations to create sig batch.
|
||||
prevDomain, err := signing.Domain(fork, fork.Epoch-1, dt, gvr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
aSet, err := createAttestationSignatureSet(ctx, beaconState, preForkAtts, prevDomain)
|
||||
aSet, err := createAttestationSignatureBatch(ctx, beaconState, preForkAtts, prevDomain)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -272,7 +272,7 @@ func AttestationSignatureSet(ctx context.Context, beaconState state.ReadOnlyBeac
|
||||
return nil, err
|
||||
}
|
||||
|
||||
aSet, err := createAttestationSignatureSet(ctx, beaconState, postForkAtts, currDomain)
|
||||
aSet, err := createAttestationSignatureBatch(ctx, beaconState, postForkAtts, currDomain)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -2,9 +2,9 @@ package signing
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/eth2-types"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// Domain returns the domain version for BLS private key to sign and verify.
|
||||
|
||||
@@ -3,9 +3,9 @@ package signing
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/eth2-types"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
)
|
||||
|
||||
@@ -118,11 +118,11 @@ func VerifyBlockHeaderSigningRoot(blkHdr *ethpb.BeaconBlockHeader, pub, signatur
|
||||
|
||||
// VerifyBlockSigningRoot verifies the signing root of a block given its public key, signature and domain.
|
||||
func VerifyBlockSigningRoot(pub, signature, domain []byte, rootFunc func() ([32]byte, error)) error {
|
||||
set, err := BlockSignatureSet(pub, signature, domain, rootFunc)
|
||||
set, err := BlockSignatureBatch(pub, signature, domain, rootFunc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// We assume only one signature set is returned here.
|
||||
// We assume only one signature batch is returned here.
|
||||
sig := set.Signatures[0]
|
||||
publicKey := set.PublicKeys[0]
|
||||
root := set.Messages[0]
|
||||
@@ -137,9 +137,9 @@ func VerifyBlockSigningRoot(pub, signature, domain []byte, rootFunc func() ([32]
|
||||
return nil
|
||||
}
|
||||
|
||||
// BlockSignatureSet retrieves the relevant signature, message and pubkey data from a block and collating it
|
||||
// into a signature set object.
|
||||
func BlockSignatureSet(pub, signature, domain []byte, rootFunc func() ([32]byte, error)) (*bls.SignatureSet, error) {
|
||||
// BlockSignatureBatch retrieves the relevant signature, message and pubkey data from a block and collating it
|
||||
// into a signature batch object.
|
||||
func BlockSignatureBatch(pub, signature, domain []byte, rootFunc func() ([32]byte, error)) (*bls.SignatureBatch, error) {
|
||||
publicKey, err := bls.PublicKeyFromBytes(pub)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not convert bytes to public key")
|
||||
@@ -149,7 +149,7 @@ func BlockSignatureSet(pub, signature, domain []byte, rootFunc func() ([32]byte,
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute signing root")
|
||||
}
|
||||
return &bls.SignatureSet{
|
||||
return &bls.SignatureBatch{
|
||||
Signatures: [][]byte{signature},
|
||||
PublicKeys: []bls.PublicKey{publicKey},
|
||||
Messages: [][32]byte{root},
|
||||
|
||||
@@ -214,10 +214,7 @@ func ProcessSlots(ctx context.Context, state state.BeaconState, slot types.Slot)
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err := SkipSlotCache.MarkNotInProgress(key); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
log.WithError(err).Error("Failed to mark skip slot no longer in progress")
|
||||
}
|
||||
SkipSlotCache.MarkNotInProgress(key)
|
||||
}()
|
||||
|
||||
for state.Slot() < slot {
|
||||
@@ -225,7 +222,7 @@ func ProcessSlots(ctx context.Context, state state.BeaconState, slot types.Slot)
|
||||
tracing.AnnotateError(span, ctx.Err())
|
||||
// Cache last best value.
|
||||
if highestSlot < state.Slot() {
|
||||
if err := SkipSlotCache.Put(ctx, key, state); err != nil {
|
||||
if SkipSlotCache.Put(ctx, key, state); err != nil {
|
||||
log.WithError(err).Error("Failed to put skip slot cache value")
|
||||
}
|
||||
}
|
||||
@@ -269,10 +266,7 @@ func ProcessSlots(ctx context.Context, state state.BeaconState, slot types.Slot)
|
||||
}
|
||||
|
||||
if highestSlot < state.Slot() {
|
||||
if err := SkipSlotCache.Put(ctx, key, state); err != nil {
|
||||
log.WithError(err).Error("Failed to put skip slot cache value")
|
||||
tracing.AnnotateError(span, err)
|
||||
}
|
||||
SkipSlotCache.Put(ctx, key, state)
|
||||
}
|
||||
|
||||
return state, nil
|
||||
|
||||
@@ -45,7 +45,7 @@ func ExecuteStateTransitionNoVerifyAnySig(
|
||||
ctx context.Context,
|
||||
state state.BeaconState,
|
||||
signed block.SignedBeaconBlock,
|
||||
) (*bls.SignatureSet, state.BeaconState, error) {
|
||||
) (*bls.SignatureBatch, state.BeaconState, error) {
|
||||
if ctx.Err() != nil {
|
||||
return nil, nil, ctx.Err()
|
||||
}
|
||||
@@ -182,7 +182,7 @@ func ProcessBlockNoVerifyAnySig(
|
||||
ctx context.Context,
|
||||
state state.BeaconState,
|
||||
signed block.SignedBeaconBlock,
|
||||
) (*bls.SignatureSet, state.BeaconState, error) {
|
||||
) (*bls.SignatureBatch, state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "core.state.ProcessBlockNoVerifyAnySig")
|
||||
defer span.End()
|
||||
if err := helpers.BeaconBlockIsNil(signed); err != nil {
|
||||
@@ -209,17 +209,17 @@ func ProcessBlockNoVerifyAnySig(
|
||||
}
|
||||
}
|
||||
|
||||
bSet, err := b.BlockSignatureSet(state, blk.ProposerIndex(), signed.Signature(), blk.HashTreeRoot)
|
||||
bSet, err := b.BlockSignatureBatch(state, blk.ProposerIndex(), signed.Signature(), blk.HashTreeRoot)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, nil, errors.Wrap(err, "could not retrieve block signature set")
|
||||
}
|
||||
rSet, err := b.RandaoSignatureSet(ctx, state, signed.Block().Body().RandaoReveal())
|
||||
rSet, err := b.RandaoSignatureBatch(ctx, state, signed.Block().Body().RandaoReveal())
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, nil, errors.Wrap(err, "could not retrieve randao signature set")
|
||||
}
|
||||
aSet, err := b.AttestationSignatureSet(ctx, state, signed.Block().Body().Attestations())
|
||||
aSet, err := b.AttestationSignatureBatch(ctx, state, signed.Block().Body().Attestations())
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not retrieve attestation signature set")
|
||||
}
|
||||
|
||||
@@ -39,14 +39,6 @@ type ReadOnlyDatabase interface {
|
||||
StateSummary(ctx context.Context, blockRoot [32]byte) (*ethpb.StateSummary, error)
|
||||
HasStateSummary(ctx context.Context, blockRoot [32]byte) bool
|
||||
HighestSlotStatesBelow(ctx context.Context, slot types.Slot) ([]state.ReadOnlyBeaconState, error)
|
||||
// Slashing operations.
|
||||
ProposerSlashing(ctx context.Context, slashingRoot [32]byte) (*eth.ProposerSlashing, error)
|
||||
AttesterSlashing(ctx context.Context, slashingRoot [32]byte) (*eth.AttesterSlashing, error)
|
||||
HasProposerSlashing(ctx context.Context, slashingRoot [32]byte) bool
|
||||
HasAttesterSlashing(ctx context.Context, slashingRoot [32]byte) bool
|
||||
// Block operations.
|
||||
VoluntaryExit(ctx context.Context, exitRoot [32]byte) (*eth.VoluntaryExit, error)
|
||||
HasVoluntaryExit(ctx context.Context, exitRoot [32]byte) bool
|
||||
// Checkpoint operations.
|
||||
JustifiedCheckpoint(ctx context.Context) (*eth.Checkpoint, error)
|
||||
FinalizedCheckpoint(ctx context.Context) (*eth.Checkpoint, error)
|
||||
@@ -75,11 +67,6 @@ type NoHeadAccessDatabase interface {
|
||||
DeleteStates(ctx context.Context, blockRoots [][32]byte) error
|
||||
SaveStateSummary(ctx context.Context, summary *ethpb.StateSummary) error
|
||||
SaveStateSummaries(ctx context.Context, summaries []*ethpb.StateSummary) error
|
||||
// Slashing operations.
|
||||
SaveProposerSlashing(ctx context.Context, slashing *eth.ProposerSlashing) error
|
||||
SaveAttesterSlashing(ctx context.Context, slashing *eth.AttesterSlashing) error
|
||||
// Block operations.
|
||||
SaveVoluntaryExit(ctx context.Context, exit *eth.VoluntaryExit) error
|
||||
// Checkpoint operations.
|
||||
SaveJustifiedCheckpoint(ctx context.Context, checkpoint *eth.Checkpoint) error
|
||||
SaveFinalizedCheckpoint(ctx context.Context, checkpoint *eth.Checkpoint) error
|
||||
|
||||
@@ -18,10 +18,8 @@ go_library(
|
||||
"migration_archived_index.go",
|
||||
"migration_block_slot_index.go",
|
||||
"migration_state_validators.go",
|
||||
"operations.go",
|
||||
"powchain.go",
|
||||
"schema.go",
|
||||
"slashings.go",
|
||||
"state.go",
|
||||
"state_summary.go",
|
||||
"state_summary_cache.go",
|
||||
@@ -88,9 +86,7 @@ go_test(
|
||||
"migration_archived_index_test.go",
|
||||
"migration_block_slot_index_test.go",
|
||||
"migration_state_validators_test.go",
|
||||
"operations_test.go",
|
||||
"powchain_test.go",
|
||||
"slashings_test.go",
|
||||
"state_summary_test.go",
|
||||
"state_test.go",
|
||||
"utils_test.go",
|
||||
|
||||
@@ -150,11 +150,7 @@ func (s *Store) BlocksBySlot(ctx context.Context, slot types.Slot) (bool, []bloc
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(blocksBucket)
|
||||
|
||||
keys, err := blockRootsBySlot(ctx, tx, slot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
keys := blockRootsBySlot(ctx, tx, slot)
|
||||
for i := 0; i < len(keys); i++ {
|
||||
encoded := bkt.Get(keys[i])
|
||||
blk, err := unmarshalBlock(ctx, encoded)
|
||||
@@ -174,11 +170,7 @@ func (s *Store) BlockRootsBySlot(ctx context.Context, slot types.Slot) (bool, []
|
||||
defer span.End()
|
||||
blockRoots := make([][32]byte, 0)
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
keys, err := blockRootsBySlot(ctx, tx, slot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
keys := blockRootsBySlot(ctx, tx, slot)
|
||||
for i := 0; i < len(keys); i++ {
|
||||
blockRoots = append(blockRoots, bytesutil.ToBytes32(keys[i]))
|
||||
}
|
||||
@@ -519,7 +511,7 @@ func blockRootsBySlotRange(
|
||||
}
|
||||
|
||||
// blockRootsBySlot retrieves the block roots by slot
|
||||
func blockRootsBySlot(ctx context.Context, tx *bolt.Tx, slot types.Slot) ([][]byte, error) {
|
||||
func blockRootsBySlot(ctx context.Context, tx *bolt.Tx, slot types.Slot) [][]byte {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.blockRootsBySlot")
|
||||
defer span.End()
|
||||
|
||||
@@ -533,7 +525,7 @@ func blockRootsBySlot(ctx context.Context, tx *bolt.Tx, slot types.Slot) ([][]by
|
||||
roots = append(roots, v[i:i+32])
|
||||
}
|
||||
}
|
||||
return roots, nil
|
||||
return roots
|
||||
}
|
||||
|
||||
// createBlockIndicesFromBlock takes in a beacon block and returns
|
||||
|
||||
@@ -1,78 +0,0 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// VoluntaryExit retrieval by signing root.
|
||||
func (s *Store) VoluntaryExit(ctx context.Context, exitRoot [32]byte) (*ethpb.VoluntaryExit, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.VoluntaryExit")
|
||||
defer span.End()
|
||||
enc, err := s.voluntaryExitBytes(ctx, exitRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(enc) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
exit := ðpb.VoluntaryExit{}
|
||||
if err := decode(ctx, enc, exit); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return exit, nil
|
||||
}
|
||||
|
||||
// HasVoluntaryExit verifies if a voluntary exit is stored in the db by its signing root.
|
||||
func (s *Store) HasVoluntaryExit(ctx context.Context, exitRoot [32]byte) bool {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.HasVoluntaryExit")
|
||||
defer span.End()
|
||||
enc, err := s.voluntaryExitBytes(ctx, exitRoot)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return len(enc) > 0
|
||||
}
|
||||
|
||||
// SaveVoluntaryExit to the db by its signing root.
|
||||
func (s *Store) SaveVoluntaryExit(ctx context.Context, exit *ethpb.VoluntaryExit) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveVoluntaryExit")
|
||||
defer span.End()
|
||||
exitRoot, err := exit.HashTreeRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
enc, err := encode(ctx, exit)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(voluntaryExitsBucket)
|
||||
return bucket.Put(exitRoot[:], enc)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Store) voluntaryExitBytes(ctx context.Context, exitRoot [32]byte) ([]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.voluntaryExitBytes")
|
||||
defer span.End()
|
||||
var dst []byte
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(voluntaryExitsBucket)
|
||||
dst = bkt.Get(exitRoot[:])
|
||||
return nil
|
||||
})
|
||||
return dst, err
|
||||
}
|
||||
|
||||
// deleteVoluntaryExit clears a voluntary exit from the db by its signing root.
|
||||
func (s *Store) deleteVoluntaryExit(ctx context.Context, exitRoot [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.deleteVoluntaryExit")
|
||||
defer span.End()
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(voluntaryExitsBucket)
|
||||
return bucket.Delete(exitRoot[:])
|
||||
})
|
||||
}
|
||||
@@ -1,31 +0,0 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func TestStore_VoluntaryExits_CRUD(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
exit := ðpb.VoluntaryExit{
|
||||
Epoch: 5,
|
||||
}
|
||||
exitRoot, err := exit.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
retrieved, err := db.VoluntaryExit(ctx, exitRoot)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, (*ethpb.VoluntaryExit)(nil), retrieved, "Expected nil voluntary exit")
|
||||
require.NoError(t, db.SaveVoluntaryExit(ctx, exit))
|
||||
assert.Equal(t, true, db.HasVoluntaryExit(ctx, exitRoot), "Expected voluntary exit to exist in the db")
|
||||
retrieved, err = db.VoluntaryExit(ctx, exitRoot)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(exit, retrieved), "Wanted %v, received %v", exit, retrieved)
|
||||
require.NoError(t, db.deleteVoluntaryExit(ctx, exitRoot))
|
||||
assert.Equal(t, false, db.HasVoluntaryExit(ctx, exitRoot), "Expected voluntary exit to have been deleted from the db")
|
||||
}
|
||||
@@ -1,147 +0,0 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// ProposerSlashing retrieval by slashing root.
|
||||
func (s *Store) ProposerSlashing(ctx context.Context, slashingRoot [32]byte) (*ethpb.ProposerSlashing, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.ProposerSlashing")
|
||||
defer span.End()
|
||||
enc, err := s.proposerSlashingBytes(ctx, slashingRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(enc) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
proposerSlashing := ðpb.ProposerSlashing{}
|
||||
if err := decode(ctx, enc, proposerSlashing); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return proposerSlashing, nil
|
||||
}
|
||||
|
||||
// HasProposerSlashing verifies if a slashing is stored in the db.
|
||||
func (s *Store) HasProposerSlashing(ctx context.Context, slashingRoot [32]byte) bool {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.HasProposerSlashing")
|
||||
defer span.End()
|
||||
enc, err := s.proposerSlashingBytes(ctx, slashingRoot)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return len(enc) > 0
|
||||
}
|
||||
|
||||
// SaveProposerSlashing to the db by its hash tree root.
|
||||
func (s *Store) SaveProposerSlashing(ctx context.Context, slashing *ethpb.ProposerSlashing) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveProposerSlashing")
|
||||
defer span.End()
|
||||
slashingRoot, err := slashing.HashTreeRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
enc, err := encode(ctx, slashing)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(proposerSlashingsBucket)
|
||||
return bucket.Put(slashingRoot[:], enc)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Store) proposerSlashingBytes(ctx context.Context, slashingRoot [32]byte) ([]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.proposerSlashingBytes")
|
||||
defer span.End()
|
||||
var dst []byte
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(proposerSlashingsBucket)
|
||||
dst = bkt.Get(slashingRoot[:])
|
||||
return nil
|
||||
})
|
||||
return dst, err
|
||||
}
|
||||
|
||||
// deleteProposerSlashing clears a proposer slashing from the db by its hash tree root.
|
||||
func (s *Store) deleteProposerSlashing(ctx context.Context, slashingRoot [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.deleteProposerSlashing")
|
||||
defer span.End()
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(proposerSlashingsBucket)
|
||||
return bucket.Delete(slashingRoot[:])
|
||||
})
|
||||
}
|
||||
|
||||
// AttesterSlashing retrieval by hash tree root.
|
||||
func (s *Store) AttesterSlashing(ctx context.Context, slashingRoot [32]byte) (*ethpb.AttesterSlashing, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.AttesterSlashing")
|
||||
defer span.End()
|
||||
enc, err := s.attesterSlashingBytes(ctx, slashingRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(enc) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
attSlashing := ðpb.AttesterSlashing{}
|
||||
if err := decode(ctx, enc, attSlashing); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return attSlashing, nil
|
||||
}
|
||||
|
||||
// HasAttesterSlashing verifies if a slashing is stored in the db.
|
||||
func (s *Store) HasAttesterSlashing(ctx context.Context, slashingRoot [32]byte) bool {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.HasAttesterSlashing")
|
||||
defer span.End()
|
||||
enc, err := s.attesterSlashingBytes(ctx, slashingRoot)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return len(enc) > 0
|
||||
}
|
||||
|
||||
// SaveAttesterSlashing to the db by its hash tree root.
|
||||
func (s *Store) SaveAttesterSlashing(ctx context.Context, slashing *ethpb.AttesterSlashing) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveAttesterSlashing")
|
||||
defer span.End()
|
||||
slashingRoot, err := slashing.HashTreeRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
enc, err := encode(ctx, slashing)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(attesterSlashingsBucket)
|
||||
return bucket.Put(slashingRoot[:], enc)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Store) attesterSlashingBytes(ctx context.Context, slashingRoot [32]byte) ([]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.attesterSlashingBytes")
|
||||
defer span.End()
|
||||
var dst []byte
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(attesterSlashingsBucket)
|
||||
dst = bkt.Get(slashingRoot[:])
|
||||
return nil
|
||||
})
|
||||
return dst, err
|
||||
}
|
||||
|
||||
// deleteAttesterSlashing clears an attester slashing from the db by its hash tree root.
|
||||
func (s *Store) deleteAttesterSlashing(ctx context.Context, slashingRoot [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.deleteAttesterSlashing")
|
||||
defer span.End()
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(attesterSlashingsBucket)
|
||||
return bucket.Delete(slashingRoot[:])
|
||||
})
|
||||
}
|
||||
@@ -1,67 +0,0 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func TestStore_ProposerSlashing_CRUD(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
prop := ðpb.ProposerSlashing{
|
||||
Header_1: util.HydrateSignedBeaconHeader(ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
ProposerIndex: 5,
|
||||
},
|
||||
}),
|
||||
Header_2: util.HydrateSignedBeaconHeader(ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
ProposerIndex: 5,
|
||||
},
|
||||
}),
|
||||
}
|
||||
slashingRoot, err := prop.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
retrieved, err := db.ProposerSlashing(ctx, slashingRoot)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, (*ethpb.ProposerSlashing)(nil), retrieved, "Expected nil proposer slashing")
|
||||
require.NoError(t, db.SaveProposerSlashing(ctx, prop))
|
||||
assert.Equal(t, true, db.HasProposerSlashing(ctx, slashingRoot), "Expected proposer slashing to exist in the db")
|
||||
retrieved, err = db.ProposerSlashing(ctx, slashingRoot)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(prop, retrieved), "Wanted %v, received %v", prop, retrieved)
|
||||
require.NoError(t, db.deleteProposerSlashing(ctx, slashingRoot))
|
||||
assert.Equal(t, false, db.HasProposerSlashing(ctx, slashingRoot), "Expected proposer slashing to have been deleted from the db")
|
||||
}
|
||||
|
||||
func TestStore_AttesterSlashing_CRUD(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
att := ðpb.AttesterSlashing{
|
||||
Attestation_1: util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 5,
|
||||
}}),
|
||||
Attestation_2: util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 7,
|
||||
}})}
|
||||
slashingRoot, err := att.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
retrieved, err := db.AttesterSlashing(ctx, slashingRoot)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, (*ethpb.AttesterSlashing)(nil), retrieved, "Expected nil attester slashing")
|
||||
require.NoError(t, db.SaveAttesterSlashing(ctx, att))
|
||||
assert.Equal(t, true, db.HasAttesterSlashing(ctx, slashingRoot), "Expected attester slashing to exist in the db")
|
||||
retrieved, err = db.AttesterSlashing(ctx, slashingRoot)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(att, retrieved), "Wanted %v, received %v", att, retrieved)
|
||||
require.NoError(t, db.deleteAttesterSlashing(ctx, slashingRoot))
|
||||
assert.Equal(t, false, db.HasAttesterSlashing(ctx, slashingRoot), "Expected attester slashing to have been deleted from the db")
|
||||
}
|
||||
61
beacon-chain/monitor/BUILD.bazel
Normal file
61
beacon-chain/monitor/BUILD.bazel
Normal file
@@ -0,0 +1,61 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"doc.go",
|
||||
"metrics.go",
|
||||
"process_attestation.go",
|
||||
"process_block.go",
|
||||
"process_exit.go",
|
||||
"process_sync_committee.go",
|
||||
"service.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/monitor",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//proto/prysm/v1alpha1/block:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"process_attestation_test.go",
|
||||
"process_block_test.go",
|
||||
"process_exit_test.go",
|
||||
"process_sync_committee_test.go",
|
||||
"service_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/wrapper:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
],
|
||||
)
|
||||
7
beacon-chain/monitor/doc.go
Normal file
7
beacon-chain/monitor/doc.go
Normal file
@@ -0,0 +1,7 @@
|
||||
/*
|
||||
Package monitor defines a runtime service which receives
|
||||
notifications triggered by events related to performance of tracked
|
||||
validating keys. It then logs and emits metrics for a user to keep finely
|
||||
detailed performance measures.
|
||||
*/
|
||||
package monitor
|
||||
93
beacon-chain/monitor/metrics.go
Normal file
93
beacon-chain/monitor/metrics.go
Normal file
@@ -0,0 +1,93 @@
|
||||
package monitor
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
log = logrus.WithField("prefix", "monitor")
|
||||
// TODO: The Prometheus gauge vectors and counters in this package deprecate the
|
||||
// corresponding gauge vectors and counters in the validator client.
|
||||
|
||||
// inclusionSlotGauge used to track attestation inclusion distance
|
||||
inclusionSlotGauge = promauto.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: "monitor",
|
||||
Name: "inclusion_slot",
|
||||
Help: "Attestations inclusion slot",
|
||||
},
|
||||
[]string{
|
||||
"validator_index",
|
||||
},
|
||||
)
|
||||
// timelyHeadCounter used to track attestation timely head flags
|
||||
timelyHeadCounter = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "monitor",
|
||||
Name: "timely_head",
|
||||
Help: "Attestation timely Head flag",
|
||||
},
|
||||
[]string{
|
||||
"validator_index",
|
||||
},
|
||||
)
|
||||
// timelyTargetCounter used to track attestation timely head flags
|
||||
timelyTargetCounter = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "monitor",
|
||||
Name: "timely_target",
|
||||
Help: "Attestation timely Target flag",
|
||||
},
|
||||
[]string{
|
||||
"validator_index",
|
||||
},
|
||||
)
|
||||
// timelySourceCounter used to track attestation timely head flags
|
||||
timelySourceCounter = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "monitor",
|
||||
Name: "timely_source",
|
||||
Help: "Attestation timely Source flag",
|
||||
},
|
||||
[]string{
|
||||
"validator_index",
|
||||
},
|
||||
)
|
||||
|
||||
// proposedSlotsCounter used to track proposed blocks
|
||||
proposedSlotsCounter = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "monitor",
|
||||
Name: "proposed_slots_total",
|
||||
Help: "Number of proposed blocks included",
|
||||
},
|
||||
[]string{
|
||||
"validator_index",
|
||||
},
|
||||
)
|
||||
// aggregationCounter used to track aggregations
|
||||
aggregationCounter = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "monitor",
|
||||
Name: "aggregations",
|
||||
Help: "Number of aggregation duties performed",
|
||||
},
|
||||
[]string{
|
||||
"validator_index",
|
||||
},
|
||||
)
|
||||
// syncCommitteeContributionCounter used to track sync committee
|
||||
// contributions
|
||||
syncCommitteeContributionCounter = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "monitor",
|
||||
Name: "sync_committee_contributions_total",
|
||||
Help: "Number of Sync committee contributions performed",
|
||||
},
|
||||
[]string{
|
||||
"validator_index",
|
||||
},
|
||||
)
|
||||
)
|
||||
233
beacon-chain/monitor/process_attestation.go
Normal file
233
beacon-chain/monitor/process_attestation.go
Normal file
@@ -0,0 +1,233 @@
|
||||
package monitor
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// updatedPerformanceFromTrackedVal returns true if the validator is tracked and if the
|
||||
// given slot is different than the last attested slot from this validator.
|
||||
// It assumes that a read lock is held on the monitor service.
|
||||
func (s *Service) updatedPerformanceFromTrackedVal(idx types.ValidatorIndex, slot types.Slot) bool {
|
||||
if !s.trackedIndex(idx) {
|
||||
return false
|
||||
}
|
||||
|
||||
if lp, ok := s.latestPerformance[idx]; ok {
|
||||
return lp.attestedSlot != slot
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// attestingIndices returns the indices of validators that appear in the
|
||||
// given aggregated atestation.
|
||||
func attestingIndices(ctx context.Context, state state.BeaconState, att *ethpb.Attestation) ([]uint64, error) {
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, state, att.Data.Slot, att.Data.CommitteeIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return attestation.AttestingIndices(att.AggregationBits, committee)
|
||||
}
|
||||
|
||||
// logMessageTimelyFlagsForIndex returns the log message with the basic
|
||||
// performance indicators for the attestation (head, source, target)
|
||||
func logMessageTimelyFlagsForIndex(idx types.ValidatorIndex, data *ethpb.AttestationData) logrus.Fields {
|
||||
return logrus.Fields{
|
||||
"ValidatorIndex": idx,
|
||||
"Slot": data.Slot,
|
||||
"Source": fmt.Sprintf("%#x", bytesutil.Trunc(data.Source.Root)),
|
||||
"Target": fmt.Sprintf("%#x", bytesutil.Trunc(data.Target.Root)),
|
||||
"Head": fmt.Sprintf("%#x", bytesutil.Trunc(data.BeaconBlockRoot)),
|
||||
}
|
||||
}
|
||||
|
||||
// processAttestations logs the event that one of our tracked validators'
|
||||
// attestations was included in a block
|
||||
func (s *Service) processAttestations(ctx context.Context, state state.BeaconState, blk block.BeaconBlock) {
|
||||
if blk == nil || blk.Body() == nil {
|
||||
return
|
||||
}
|
||||
for _, attestation := range blk.Body().Attestations() {
|
||||
s.processIncludedAttestation(ctx, state, attestation)
|
||||
}
|
||||
}
|
||||
|
||||
// processIncludedAttestation logs in the event that one of our tracked validators'
|
||||
// appears in the attesting indices and this included attestation was not included
|
||||
// before.
|
||||
func (s *Service) processIncludedAttestation(ctx context.Context, state state.BeaconState, att *ethpb.Attestation) {
|
||||
attestingIndices, err := attestingIndices(ctx, state, att)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get attesting indices")
|
||||
return
|
||||
}
|
||||
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
for _, idx := range attestingIndices {
|
||||
if s.updatedPerformanceFromTrackedVal(types.ValidatorIndex(idx), att.Data.Slot) {
|
||||
logFields := logMessageTimelyFlagsForIndex(types.ValidatorIndex(idx), att.Data)
|
||||
balance, err := state.BalanceAtIndex(types.ValidatorIndex(idx))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get balance")
|
||||
return
|
||||
}
|
||||
|
||||
aggregatedPerf := s.aggregatedPerformance[types.ValidatorIndex(idx)]
|
||||
aggregatedPerf.totalAttestedCount++
|
||||
aggregatedPerf.totalRequestedCount++
|
||||
|
||||
latestPerf := s.latestPerformance[types.ValidatorIndex(idx)]
|
||||
balanceChg := int64(balance - latestPerf.balance)
|
||||
latestPerf.balanceChange = balanceChg
|
||||
latestPerf.balance = balance
|
||||
latestPerf.attestedSlot = att.Data.Slot
|
||||
latestPerf.inclusionSlot = state.Slot()
|
||||
inclusionSlotGauge.WithLabelValues(fmt.Sprintf("%d", idx)).Set(float64(latestPerf.inclusionSlot))
|
||||
aggregatedPerf.totalDistance += uint64(latestPerf.inclusionSlot - latestPerf.attestedSlot)
|
||||
|
||||
if state.Version() == version.Altair {
|
||||
targetIdx := params.BeaconConfig().TimelyTargetFlagIndex
|
||||
sourceIdx := params.BeaconConfig().TimelySourceFlagIndex
|
||||
headIdx := params.BeaconConfig().TimelyHeadFlagIndex
|
||||
|
||||
var participation []byte
|
||||
if slots.ToEpoch(latestPerf.inclusionSlot) ==
|
||||
slots.ToEpoch(latestPerf.attestedSlot) {
|
||||
participation, err = state.CurrentEpochParticipation()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get current epoch participation")
|
||||
return
|
||||
}
|
||||
} else {
|
||||
participation, err = state.PreviousEpochParticipation()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get previous epoch participation")
|
||||
return
|
||||
}
|
||||
}
|
||||
flags := participation[idx]
|
||||
hasFlag, err := altair.HasValidatorFlag(flags, sourceIdx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get timely Source flag")
|
||||
return
|
||||
}
|
||||
latestPerf.timelySource = hasFlag
|
||||
hasFlag, err = altair.HasValidatorFlag(flags, headIdx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get timely Head flag")
|
||||
return
|
||||
}
|
||||
latestPerf.timelyHead = hasFlag
|
||||
hasFlag, err = altair.HasValidatorFlag(flags, targetIdx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get timely Target flag")
|
||||
return
|
||||
}
|
||||
latestPerf.timelyTarget = hasFlag
|
||||
|
||||
if latestPerf.timelySource {
|
||||
timelySourceCounter.WithLabelValues(fmt.Sprintf("%d", idx)).Inc()
|
||||
aggregatedPerf.totalCorrectSource++
|
||||
}
|
||||
if latestPerf.timelyHead {
|
||||
timelyHeadCounter.WithLabelValues(fmt.Sprintf("%d", idx)).Inc()
|
||||
aggregatedPerf.totalCorrectHead++
|
||||
}
|
||||
if latestPerf.timelyTarget {
|
||||
timelyTargetCounter.WithLabelValues(fmt.Sprintf("%d", idx)).Inc()
|
||||
aggregatedPerf.totalCorrectTarget++
|
||||
}
|
||||
}
|
||||
logFields["CorrectHead"] = latestPerf.timelyHead
|
||||
logFields["CorrectSource"] = latestPerf.timelySource
|
||||
logFields["CorrectTarget"] = latestPerf.timelyTarget
|
||||
logFields["InclusionSlot"] = latestPerf.inclusionSlot
|
||||
logFields["NewBalance"] = balance
|
||||
logFields["BalanceChange"] = balanceChg
|
||||
|
||||
s.latestPerformance[types.ValidatorIndex(idx)] = latestPerf
|
||||
s.aggregatedPerformance[types.ValidatorIndex(idx)] = aggregatedPerf
|
||||
log.WithFields(logFields).Info("Attestation included")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// processUnaggregatedAttestation logs when the beacon node sees an unaggregated attestation from one of our
|
||||
// tracked validators
|
||||
func (s *Service) processUnaggregatedAttestation(ctx context.Context, att *ethpb.Attestation) {
|
||||
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
|
||||
root := bytesutil.ToBytes32(att.Data.BeaconBlockRoot)
|
||||
state := s.config.StateGen.StateByRootIfCachedNoCopy(root)
|
||||
if state == nil {
|
||||
log.WithField("BeaconBlockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debug(
|
||||
"Skipping unaggregated attestation due to state not found in cache")
|
||||
return
|
||||
}
|
||||
attestingIndices, err := attestingIndices(ctx, state, att)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get attesting indices")
|
||||
return
|
||||
}
|
||||
for _, idx := range attestingIndices {
|
||||
if s.updatedPerformanceFromTrackedVal(types.ValidatorIndex(idx), att.Data.Slot) {
|
||||
logFields := logMessageTimelyFlagsForIndex(types.ValidatorIndex(idx), att.Data)
|
||||
log.WithFields(logFields).Info("Processed unaggregated attestation")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// processAggregatedAttestation logs when we see an aggregation from one of our tracked validators or an aggregated
|
||||
// attestation from one of our tracked validators
|
||||
func (s *Service) processAggregatedAttestation(ctx context.Context, att *ethpb.AggregateAttestationAndProof) {
|
||||
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
if s.trackedIndex(att.AggregatorIndex) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"ValidatorIndex": att.AggregatorIndex,
|
||||
}).Info("Processed attestation aggregation")
|
||||
aggregatedPerf := s.aggregatedPerformance[att.AggregatorIndex]
|
||||
aggregatedPerf.totalAggregations++
|
||||
s.aggregatedPerformance[att.AggregatorIndex] = aggregatedPerf
|
||||
aggregationCounter.WithLabelValues(fmt.Sprintf("%d", att.AggregatorIndex)).Inc()
|
||||
}
|
||||
|
||||
var root [32]byte
|
||||
copy(root[:], att.Aggregate.Data.BeaconBlockRoot)
|
||||
state := s.config.StateGen.StateByRootIfCachedNoCopy(root)
|
||||
if state == nil {
|
||||
log.WithField("BeaconBlockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debug(
|
||||
"Skipping agregated attestation due to state not found in cache")
|
||||
return
|
||||
}
|
||||
attestingIndices, err := attestingIndices(ctx, state, att.Aggregate)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get attesting indices")
|
||||
return
|
||||
}
|
||||
for _, idx := range attestingIndices {
|
||||
if s.updatedPerformanceFromTrackedVal(types.ValidatorIndex(idx), att.Aggregate.Data.Slot) {
|
||||
logFields := logMessageTimelyFlagsForIndex(types.ValidatorIndex(idx), att.Aggregate.Data)
|
||||
log.WithFields(logFields).Info("Processed aggregated attestation")
|
||||
}
|
||||
}
|
||||
}
|
||||
296
beacon-chain/monitor/process_attestation_test.go
Normal file
296
beacon-chain/monitor/process_attestation_test.go
Normal file
@@ -0,0 +1,296 @@
|
||||
package monitor
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
"github.com/sirupsen/logrus"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func setupService(t *testing.T) *Service {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
trackedVals := map[types.ValidatorIndex]interface{}{
|
||||
1: nil,
|
||||
2: nil,
|
||||
12: nil,
|
||||
15: nil,
|
||||
}
|
||||
latestPerformance := map[types.ValidatorIndex]ValidatorLatestPerformance{
|
||||
1: {
|
||||
balance: 32000000000,
|
||||
},
|
||||
2: {
|
||||
balance: 32000000000,
|
||||
},
|
||||
12: {
|
||||
balance: 31900000000,
|
||||
},
|
||||
15: {
|
||||
balance: 31900000000,
|
||||
},
|
||||
}
|
||||
aggregatedPerformance := map[types.ValidatorIndex]ValidatorAggregatedPerformance{
|
||||
1: {},
|
||||
2: {},
|
||||
12: {},
|
||||
15: {},
|
||||
}
|
||||
trackedSyncCommitteeIndices := map[types.ValidatorIndex][]types.CommitteeIndex{
|
||||
1: {0, 1, 2, 3},
|
||||
12: {4, 5},
|
||||
}
|
||||
return &Service{
|
||||
config: &ValidatorMonitorConfig{
|
||||
StateGen: stategen.New(beaconDB),
|
||||
},
|
||||
trackedValidators: trackedVals,
|
||||
latestPerformance: latestPerformance,
|
||||
aggregatedPerformance: aggregatedPerformance,
|
||||
trackedSyncCommitteeIndices: trackedSyncCommitteeIndices,
|
||||
lastSyncedEpoch: 0,
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetAttestingIndices(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconState, _ := util.DeterministicGenesisState(t, 256)
|
||||
att := ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 0,
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b11, 0b1},
|
||||
}
|
||||
attestingIndices, err := attestingIndices(ctx, beaconState, att)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, attestingIndices, []uint64{0xc, 0x2})
|
||||
|
||||
}
|
||||
|
||||
func TestProcessIncludedAttestationTwoTracked(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s := setupService(t)
|
||||
state, _ := util.DeterministicGenesisStateAltair(t, 256)
|
||||
require.NoError(t, state.SetSlot(2))
|
||||
require.NoError(t, state.SetCurrentParticipationBits(bytes.Repeat([]byte{0xff}, 13)))
|
||||
|
||||
att := ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 0,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("hello-world"), 32),
|
||||
Source: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: bytesutil.PadTo([]byte("hello-world"), 32),
|
||||
},
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: bytesutil.PadTo([]byte("hello-world"), 32),
|
||||
},
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b11, 0b1},
|
||||
}
|
||||
s.processIncludedAttestation(context.Background(), state, att)
|
||||
wanted1 := "\"Attestation included\" BalanceChange=0 CorrectHead=true CorrectSource=true CorrectTarget=true Head=0x68656c6c6f2d InclusionSlot=2 NewBalance=32000000000 Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=2 prefix=monitor"
|
||||
wanted2 := "\"Attestation included\" BalanceChange=100000000 CorrectHead=true CorrectSource=true CorrectTarget=true Head=0x68656c6c6f2d InclusionSlot=2 NewBalance=32000000000 Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=12 prefix=monitor"
|
||||
require.LogsContain(t, hook, wanted1)
|
||||
require.LogsContain(t, hook, wanted2)
|
||||
}
|
||||
|
||||
func TestProcessUnaggregatedAttestationStateNotCached(t *testing.T) {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
hook := logTest.NewGlobal()
|
||||
ctx := context.Background()
|
||||
|
||||
s := setupService(t)
|
||||
state, _ := util.DeterministicGenesisStateAltair(t, 256)
|
||||
require.NoError(t, state.SetSlot(2))
|
||||
header := state.LatestBlockHeader()
|
||||
participation := []byte{0xff, 0xff, 0x01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
|
||||
require.NoError(t, state.SetCurrentParticipationBits(participation))
|
||||
|
||||
att := ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 0,
|
||||
BeaconBlockRoot: header.GetStateRoot(),
|
||||
Source: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: bytesutil.PadTo([]byte("hello-world"), 32),
|
||||
},
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: bytesutil.PadTo([]byte("hello-world"), 32),
|
||||
},
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b11, 0b1},
|
||||
}
|
||||
s.processUnaggregatedAttestation(ctx, att)
|
||||
require.LogsContain(t, hook, "Skipping unaggregated attestation due to state not found in cache")
|
||||
logrus.SetLevel(logrus.InfoLevel)
|
||||
}
|
||||
|
||||
func TestProcessUnaggregatedAttestationStateCached(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
hook := logTest.NewGlobal()
|
||||
|
||||
s := setupService(t)
|
||||
state, _ := util.DeterministicGenesisStateAltair(t, 256)
|
||||
participation := []byte{0xff, 0xff, 0x01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
|
||||
require.NoError(t, state.SetCurrentParticipationBits(participation))
|
||||
|
||||
root := [32]byte{}
|
||||
copy(root[:], "hello-world")
|
||||
|
||||
att := ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 0,
|
||||
BeaconBlockRoot: root[:],
|
||||
Source: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: root[:],
|
||||
},
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: root[:],
|
||||
},
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b11, 0b1},
|
||||
}
|
||||
require.NoError(t, s.config.StateGen.SaveState(ctx, root, state))
|
||||
s.processUnaggregatedAttestation(context.Background(), att)
|
||||
wanted1 := "\"Processed unaggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=2 prefix=monitor"
|
||||
wanted2 := "\"Processed unaggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=12 prefix=monitor"
|
||||
require.LogsContain(t, hook, wanted1)
|
||||
require.LogsContain(t, hook, wanted2)
|
||||
}
|
||||
|
||||
func TestProcessAggregatedAttestationStateNotCached(t *testing.T) {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
hook := logTest.NewGlobal()
|
||||
ctx := context.Background()
|
||||
|
||||
s := setupService(t)
|
||||
state, _ := util.DeterministicGenesisStateAltair(t, 256)
|
||||
require.NoError(t, state.SetSlot(2))
|
||||
header := state.LatestBlockHeader()
|
||||
participation := []byte{0xff, 0xff, 0x01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
|
||||
require.NoError(t, state.SetCurrentParticipationBits(participation))
|
||||
|
||||
att := ðpb.AggregateAttestationAndProof{
|
||||
AggregatorIndex: 2,
|
||||
Aggregate: ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 0,
|
||||
BeaconBlockRoot: header.GetStateRoot(),
|
||||
Source: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: bytesutil.PadTo([]byte("hello-world"), 32),
|
||||
},
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: bytesutil.PadTo([]byte("hello-world"), 32),
|
||||
},
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b11, 0b1},
|
||||
},
|
||||
}
|
||||
s.processAggregatedAttestation(ctx, att)
|
||||
require.LogsContain(t, hook, "\"Processed attestation aggregation\" ValidatorIndex=2 prefix=monitor")
|
||||
require.LogsContain(t, hook, "Skipping agregated attestation due to state not found in cache")
|
||||
logrus.SetLevel(logrus.InfoLevel)
|
||||
}
|
||||
|
||||
func TestProcessAggregatedAttestationStateCached(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
ctx := context.Background()
|
||||
s := setupService(t)
|
||||
state, _ := util.DeterministicGenesisStateAltair(t, 256)
|
||||
participation := []byte{0xff, 0xff, 0x01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
|
||||
require.NoError(t, state.SetCurrentParticipationBits(participation))
|
||||
|
||||
root := [32]byte{}
|
||||
copy(root[:], "hello-world")
|
||||
|
||||
att := ðpb.AggregateAttestationAndProof{
|
||||
AggregatorIndex: 2,
|
||||
Aggregate: ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 0,
|
||||
BeaconBlockRoot: root[:],
|
||||
Source: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: root[:],
|
||||
},
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: root[:],
|
||||
},
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b10, 0b1},
|
||||
},
|
||||
}
|
||||
|
||||
require.NoError(t, s.config.StateGen.SaveState(ctx, root, state))
|
||||
s.processAggregatedAttestation(ctx, att)
|
||||
require.LogsContain(t, hook, "\"Processed attestation aggregation\" ValidatorIndex=2 prefix=monitor")
|
||||
require.LogsContain(t, hook, "\"Processed aggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=2 prefix=monitor")
|
||||
require.LogsDoNotContain(t, hook, "\"Processed aggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=12 prefix=monitor")
|
||||
}
|
||||
|
||||
func TestProcessAttestations(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s := setupService(t)
|
||||
ctx := context.Background()
|
||||
state, _ := util.DeterministicGenesisStateAltair(t, 256)
|
||||
require.NoError(t, state.SetSlot(2))
|
||||
require.NoError(t, state.SetCurrentParticipationBits(bytes.Repeat([]byte{0xff}, 13)))
|
||||
|
||||
att := ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 0,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("hello-world"), 32),
|
||||
Source: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: bytesutil.PadTo([]byte("hello-world"), 32),
|
||||
},
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: bytesutil.PadTo([]byte("hello-world"), 32),
|
||||
},
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b11, 0b1},
|
||||
}
|
||||
|
||||
block := ðpb.BeaconBlockAltair{
|
||||
Slot: 2,
|
||||
Body: ðpb.BeaconBlockBodyAltair{
|
||||
Attestations: []*ethpb.Attestation{att},
|
||||
},
|
||||
}
|
||||
|
||||
wrappedBlock, err := wrapper.WrappedAltairBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
s.processAttestations(ctx, state, wrappedBlock)
|
||||
wanted1 := "\"Attestation included\" BalanceChange=0 CorrectHead=true CorrectSource=true CorrectTarget=true Head=0x68656c6c6f2d InclusionSlot=2 NewBalance=32000000000 Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=2 prefix=monitor"
|
||||
wanted2 := "\"Attestation included\" BalanceChange=100000000 CorrectHead=true CorrectSource=true CorrectTarget=true Head=0x68656c6c6f2d InclusionSlot=2 NewBalance=32000000000 Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=12 prefix=monitor"
|
||||
require.LogsContain(t, hook, wanted1)
|
||||
require.LogsContain(t, hook, wanted2)
|
||||
|
||||
}
|
||||
135
beacon-chain/monitor/process_block.go
Normal file
135
beacon-chain/monitor/process_block.go
Normal file
@@ -0,0 +1,135 @@
|
||||
package monitor
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// processBlock handles the cases when
|
||||
// 1) A block was proposed by one of our tracked validators
|
||||
// 2) An attestation by one of our tracked validators was included
|
||||
// 3) An Exit by one of our validators was included
|
||||
// 4) A Slashing by one of our tracked validators was included
|
||||
// 5) A Sync Committe Contribution by one of our tracked validators was included
|
||||
func (s *Service) processBlock(ctx context.Context, b block.SignedBeaconBlock) {
|
||||
if b == nil || b.Block() == nil {
|
||||
return
|
||||
}
|
||||
blk := b.Block()
|
||||
|
||||
s.processSlashings(blk)
|
||||
s.processExitsFromBlock(blk)
|
||||
|
||||
root, err := blk.HashTreeRoot()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not compute block's hash tree root")
|
||||
return
|
||||
}
|
||||
state := s.config.StateGen.StateByRootIfCachedNoCopy(root)
|
||||
if state == nil {
|
||||
log.WithField("BeaconBlockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debug(
|
||||
"Skipping block collection due to state not found in cache")
|
||||
return
|
||||
}
|
||||
|
||||
currEpoch := slots.ToEpoch(blk.Slot())
|
||||
s.RLock()
|
||||
lastSyncedEpoch := s.lastSyncedEpoch
|
||||
s.RUnlock()
|
||||
|
||||
if currEpoch != lastSyncedEpoch &&
|
||||
slots.SyncCommitteePeriod(currEpoch) == slots.SyncCommitteePeriod(lastSyncedEpoch) {
|
||||
s.updateSyncCommitteeTrackedVals(state)
|
||||
}
|
||||
|
||||
s.processSyncAggregate(state, blk)
|
||||
s.processProposedBlock(state, root, blk)
|
||||
s.processAttestations(ctx, state, blk)
|
||||
}
|
||||
|
||||
// processProposedBlock logs the event that one of our tracked validators proposed a block that was included
|
||||
func (s *Service) processProposedBlock(state state.BeaconState, root [32]byte, blk block.BeaconBlock) {
|
||||
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
if s.trackedIndex(blk.ProposerIndex()) {
|
||||
// update metrics
|
||||
proposedSlotsCounter.WithLabelValues(fmt.Sprintf("%d", blk.ProposerIndex())).Inc()
|
||||
|
||||
// update the performance map
|
||||
balance, err := state.BalanceAtIndex(blk.ProposerIndex())
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get balance")
|
||||
return
|
||||
}
|
||||
|
||||
latestPerf := s.latestPerformance[blk.ProposerIndex()]
|
||||
balanceChg := int64(balance - latestPerf.balance)
|
||||
latestPerf.balanceChange = balanceChg
|
||||
latestPerf.balance = balance
|
||||
s.latestPerformance[blk.ProposerIndex()] = latestPerf
|
||||
|
||||
aggPerf := s.aggregatedPerformance[blk.ProposerIndex()]
|
||||
aggPerf.totalProposedCount++
|
||||
s.aggregatedPerformance[blk.ProposerIndex()] = aggPerf
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"ProposerIndex": blk.ProposerIndex(),
|
||||
"Slot": blk.Slot(),
|
||||
"Version": blk.Version(),
|
||||
"ParentRoot": fmt.Sprintf("%#x", bytesutil.Trunc(blk.ParentRoot())),
|
||||
"BlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(root[:])),
|
||||
"NewBalance": balance,
|
||||
"BalanceChange": balanceChg,
|
||||
}).Info("Proposed block was included")
|
||||
}
|
||||
}
|
||||
|
||||
// processSlashings logs the event of one of our tracked validators was slashed
|
||||
func (s *Service) processSlashings(blk block.BeaconBlock) {
|
||||
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
|
||||
for _, slashing := range blk.Body().ProposerSlashings() {
|
||||
idx := slashing.Header_1.Header.ProposerIndex
|
||||
if s.trackedIndex(idx) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"ProposerIndex": idx,
|
||||
"Slot:": blk.Slot(),
|
||||
"SlashingSlot": slashing.Header_1.Header.Slot,
|
||||
"Root1": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Header_1.Header.BodyRoot)),
|
||||
"Root2": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Header_2.Header.BodyRoot)),
|
||||
}).Info("Proposer slashing was included")
|
||||
}
|
||||
}
|
||||
|
||||
for _, slashing := range blk.Body().AttesterSlashings() {
|
||||
for _, idx := range blocks.SlashableAttesterIndices(slashing) {
|
||||
if s.trackedIndex(types.ValidatorIndex(idx)) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"AttesterIndex": idx,
|
||||
"Slot:": blk.Slot(),
|
||||
"Slot1": slashing.Attestation_1.Data.Slot,
|
||||
"Root1": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Attestation_1.Data.BeaconBlockRoot)),
|
||||
"SourceEpoch1": slashing.Attestation_1.Data.Source.Epoch,
|
||||
"TargetEpoch1": slashing.Attestation_1.Data.Target.Epoch,
|
||||
"Slot2": slashing.Attestation_2.Data.Slot,
|
||||
"Root2": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Attestation_2.Data.BeaconBlockRoot)),
|
||||
"SourceEpoch2": slashing.Attestation_2.Data.Source.Epoch,
|
||||
"TargetEpoch2": slashing.Attestation_2.Data.Target.Epoch,
|
||||
}).Info("Attester slashing was included")
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
231
beacon-chain/monitor/process_block_test.go
Normal file
231
beacon-chain/monitor/process_block_test.go
Normal file
@@ -0,0 +1,231 @@
|
||||
package monitor
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func TestProcessSlashings(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
block *ethpb.BeaconBlock
|
||||
wantedErr string
|
||||
}{
|
||||
{
|
||||
name: "Proposer slashing a tracked index",
|
||||
block: ðpb.BeaconBlock{
|
||||
Body: ðpb.BeaconBlockBody{
|
||||
ProposerSlashings: []*ethpb.ProposerSlashing{
|
||||
{
|
||||
Header_1: ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
ProposerIndex: 2,
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch + 1,
|
||||
},
|
||||
},
|
||||
Header_2: ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
ProposerIndex: 2,
|
||||
Slot: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantedErr: "\"Proposer slashing was included\" ProposerIndex=2",
|
||||
},
|
||||
{
|
||||
name: "Proposer slashing an untracked index",
|
||||
block: ðpb.BeaconBlock{
|
||||
Body: ðpb.BeaconBlockBody{
|
||||
ProposerSlashings: []*ethpb.ProposerSlashing{
|
||||
{
|
||||
Header_1: ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
ProposerIndex: 3,
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch + 4,
|
||||
},
|
||||
},
|
||||
Header_2: ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
ProposerIndex: 3,
|
||||
Slot: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantedErr: "",
|
||||
},
|
||||
{
|
||||
name: "Attester slashing a tracked index",
|
||||
block: ðpb.BeaconBlock{
|
||||
Body: ðpb.BeaconBlockBody{
|
||||
AttesterSlashings: []*ethpb.AttesterSlashing{
|
||||
{
|
||||
Attestation_1: util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 1},
|
||||
},
|
||||
AttestingIndices: []uint64{1, 3, 4},
|
||||
}),
|
||||
Attestation_2: util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
AttestingIndices: []uint64{1, 5, 6},
|
||||
}),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantedErr: "\"Attester slashing was included\" AttesterIndex=1",
|
||||
},
|
||||
{
|
||||
name: "Attester slashing untracked index",
|
||||
block: ðpb.BeaconBlock{
|
||||
Body: ðpb.BeaconBlockBody{
|
||||
AttesterSlashings: []*ethpb.AttesterSlashing{
|
||||
{
|
||||
Attestation_1: util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 1},
|
||||
},
|
||||
AttestingIndices: []uint64{1, 3, 4},
|
||||
}),
|
||||
Attestation_2: util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
AttestingIndices: []uint64{3, 5, 6},
|
||||
}),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantedErr: "",
|
||||
}}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s := &Service{
|
||||
trackedValidators: map[types.ValidatorIndex]interface{}{
|
||||
1: nil,
|
||||
2: nil,
|
||||
},
|
||||
}
|
||||
s.processSlashings(wrapper.WrappedPhase0BeaconBlock(tt.block))
|
||||
if tt.wantedErr != "" {
|
||||
require.LogsContain(t, hook, tt.wantedErr)
|
||||
} else {
|
||||
require.LogsDoNotContain(t, hook, "slashing")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessProposedBlock(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
block *ethpb.BeaconBlock
|
||||
wantedErr string
|
||||
}{
|
||||
{
|
||||
name: "Block proposed by tracked validator",
|
||||
block: ðpb.BeaconBlock{
|
||||
Slot: 6,
|
||||
ProposerIndex: 12,
|
||||
ParentRoot: bytesutil.PadTo([]byte("hello-world"), 32),
|
||||
StateRoot: bytesutil.PadTo([]byte("state-world"), 32),
|
||||
},
|
||||
wantedErr: "\"Proposed block was included\" BalanceChange=100000000 BlockRoot=0x68656c6c6f2d NewBalance=32000000000 ParentRoot=0x68656c6c6f2d ProposerIndex=12 Slot=6 Version=0 prefix=monitor",
|
||||
},
|
||||
{
|
||||
name: "Block proposed by untracked validator",
|
||||
block: ðpb.BeaconBlock{
|
||||
Slot: 6,
|
||||
ProposerIndex: 13,
|
||||
ParentRoot: bytesutil.PadTo([]byte("hello-world"), 32),
|
||||
StateRoot: bytesutil.PadTo([]byte("state-world"), 32),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s := setupService(t)
|
||||
beaconState, _ := util.DeterministicGenesisState(t, 256)
|
||||
root := [32]byte{}
|
||||
copy(root[:], "hello-world")
|
||||
s.processProposedBlock(beaconState, root, wrapper.WrappedPhase0BeaconBlock(tt.block))
|
||||
if tt.wantedErr != "" {
|
||||
require.LogsContain(t, hook, tt.wantedErr)
|
||||
} else {
|
||||
require.LogsDoNotContain(t, hook, "included")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestProcessBlock_AllEventsTrackedVals(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
ctx := context.Background()
|
||||
|
||||
genesis, keys := util.DeterministicGenesisStateAltair(t, 64)
|
||||
c, err := altair.NextSyncCommittee(ctx, genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, genesis.SetCurrentSyncCommittee(c))
|
||||
|
||||
genConfig := util.DefaultBlockGenConfig()
|
||||
genConfig.NumProposerSlashings = 1
|
||||
b, err := util.GenerateFullBlockAltair(genesis, keys, genConfig, 1)
|
||||
require.NoError(t, err)
|
||||
s := setupService(t)
|
||||
|
||||
pubKeys := make([][]byte, 3)
|
||||
pubKeys[0] = genesis.Validators()[0].PublicKey
|
||||
pubKeys[1] = genesis.Validators()[1].PublicKey
|
||||
pubKeys[2] = genesis.Validators()[2].PublicKey
|
||||
|
||||
currentSyncCommittee := util.ConvertToCommittee([][]byte{
|
||||
pubKeys[0], pubKeys[1], pubKeys[2], pubKeys[1], pubKeys[1],
|
||||
})
|
||||
require.NoError(t, genesis.SetCurrentSyncCommittee(currentSyncCommittee))
|
||||
|
||||
idx := b.Block.Body.ProposerSlashings[0].Header_1.Header.ProposerIndex
|
||||
s.RLock()
|
||||
if !s.trackedIndex(idx) {
|
||||
s.trackedValidators[idx] = nil
|
||||
s.latestPerformance[idx] = ValidatorLatestPerformance{
|
||||
balance: 31900000000,
|
||||
}
|
||||
s.aggregatedPerformance[idx] = ValidatorAggregatedPerformance{}
|
||||
}
|
||||
s.RUnlock()
|
||||
s.updateSyncCommitteeTrackedVals(genesis)
|
||||
|
||||
root, err := b.GetBlock().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.config.StateGen.SaveState(ctx, root, genesis))
|
||||
wanted1 := fmt.Sprintf("\"Proposed block was included\" BalanceChange=100000000 BlockRoot=%#x NewBalance=32000000000 ParentRoot=0xf732eaeb7fae ProposerIndex=15 Slot=1 Version=1 prefix=monitor", bytesutil.Trunc(root[:]))
|
||||
wanted2 := fmt.Sprintf("\"Proposer slashing was included\" ProposerIndex=%d Root1=0x000100000000 Root2=0x000200000000 SlashingSlot=0 Slot:=1 prefix=monitor", idx)
|
||||
wanted3 := "\"Sync committee contribution included\" BalanceChange=0 Contributions=3 ExpectedContrib=3 NewBalance=32000000000 ValidatorIndex=1 prefix=monitor"
|
||||
wanted4 := "\"Sync committee contribution included\" BalanceChange=0 Contributions=1 ExpectedContrib=1 NewBalance=32000000000 ValidatorIndex=2 prefix=monitor"
|
||||
wrapped, err := wrapper.WrappedAltairSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
s.processBlock(ctx, wrapped)
|
||||
require.LogsContain(t, hook, wanted1)
|
||||
require.LogsContain(t, hook, wanted2)
|
||||
require.LogsContain(t, hook, wanted3)
|
||||
require.LogsContain(t, hook, wanted4)
|
||||
}
|
||||
39
beacon-chain/monitor/process_exit.go
Normal file
39
beacon-chain/monitor/process_exit.go
Normal file
@@ -0,0 +1,39 @@
|
||||
package monitor
|
||||
|
||||
import (
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// processExitsFromBlock logs the event of one of our tracked validators' exit was
|
||||
// included in a block
|
||||
func (s *Service) processExitsFromBlock(blk block.BeaconBlock) {
|
||||
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
|
||||
for _, exit := range blk.Body().VoluntaryExits() {
|
||||
idx := exit.Exit.ValidatorIndex
|
||||
if s.trackedIndex(idx) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"ValidatorIndex": idx,
|
||||
"Slot": blk.Slot(),
|
||||
}).Info("Voluntary exit was included")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// processExit logs the event of one of our tracked validators' exit was processed
|
||||
func (s *Service) processExit(exit *ethpb.SignedVoluntaryExit) {
|
||||
idx := exit.Exit.ValidatorIndex
|
||||
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
|
||||
if s.trackedIndex(idx) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"ValidatorIndex": idx,
|
||||
}).Info("Voluntary exit was processed")
|
||||
}
|
||||
}
|
||||
118
beacon-chain/monitor/process_exit_test.go
Normal file
118
beacon-chain/monitor/process_exit_test.go
Normal file
@@ -0,0 +1,118 @@
|
||||
package monitor
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func TestProcessExitsFromBlockTrackedIndices(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s := &Service{
|
||||
trackedValidators: map[types.ValidatorIndex]interface{}{
|
||||
1: nil,
|
||||
2: nil,
|
||||
},
|
||||
}
|
||||
|
||||
exits := []*ethpb.SignedVoluntaryExit{
|
||||
{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
ValidatorIndex: 3,
|
||||
Epoch: 1,
|
||||
},
|
||||
},
|
||||
{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
ValidatorIndex: 2,
|
||||
Epoch: 0,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
block := ðpb.BeaconBlock{
|
||||
Body: ðpb.BeaconBlockBody{
|
||||
VoluntaryExits: exits,
|
||||
},
|
||||
}
|
||||
|
||||
s.processExitsFromBlock(wrapper.WrappedPhase0BeaconBlock(block))
|
||||
require.LogsContain(t, hook, "\"Voluntary exit was included\" Slot=0 ValidatorIndex=2")
|
||||
}
|
||||
|
||||
func TestProcessExitsFromBlockUntrackedIndices(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s := &Service{
|
||||
trackedValidators: map[types.ValidatorIndex]interface{}{
|
||||
1: nil,
|
||||
2: nil,
|
||||
},
|
||||
}
|
||||
|
||||
exits := []*ethpb.SignedVoluntaryExit{
|
||||
{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
ValidatorIndex: 3,
|
||||
Epoch: 1,
|
||||
},
|
||||
},
|
||||
{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
ValidatorIndex: 4,
|
||||
Epoch: 0,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
block := ðpb.BeaconBlock{
|
||||
Body: ðpb.BeaconBlockBody{
|
||||
VoluntaryExits: exits,
|
||||
},
|
||||
}
|
||||
|
||||
s.processExitsFromBlock(wrapper.WrappedPhase0BeaconBlock(block))
|
||||
require.LogsDoNotContain(t, hook, "\"Voluntary exit was included\"")
|
||||
}
|
||||
|
||||
func TestProcessExitP2PTrackedIndices(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s := &Service{
|
||||
trackedValidators: map[types.ValidatorIndex]interface{}{
|
||||
1: nil,
|
||||
2: nil,
|
||||
},
|
||||
}
|
||||
|
||||
exit := ðpb.SignedVoluntaryExit{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
ValidatorIndex: 1,
|
||||
Epoch: 1,
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
s.processExit(exit)
|
||||
require.LogsContain(t, hook, "\"Voluntary exit was processed\" ValidatorIndex=1")
|
||||
}
|
||||
|
||||
func TestProcessExitP2PUntrackedIndices(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s := &Service{
|
||||
trackedValidators: map[types.ValidatorIndex]interface{}{
|
||||
1: nil,
|
||||
2: nil,
|
||||
},
|
||||
}
|
||||
|
||||
exit := ðpb.SignedVoluntaryExit{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
ValidatorIndex: 3,
|
||||
Epoch: 1,
|
||||
},
|
||||
}
|
||||
s.processExit(exit)
|
||||
require.LogsDoNotContain(t, hook, "\"Voluntary exit was processed\"")
|
||||
}
|
||||
82
beacon-chain/monitor/process_sync_committee.go
Normal file
82
beacon-chain/monitor/process_sync_committee.go
Normal file
@@ -0,0 +1,82 @@
|
||||
package monitor
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// processSyncCommitteeContribution logs the event that one of our tracked
|
||||
// validators' aggregated sync contribution has been processed.
|
||||
// TODO: We do not log if a sync contribution was included in an aggregate (we log them when they are included in blocks)
|
||||
func (s *Service) processSyncCommitteeContribution(contribution *ethpb.SignedContributionAndProof) {
|
||||
idx := contribution.Message.AggregatorIndex
|
||||
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
if s.trackedIndex(idx) {
|
||||
aggPerf := s.aggregatedPerformance[idx]
|
||||
aggPerf.totalSyncComitteeAggregations++
|
||||
s.aggregatedPerformance[idx] = aggPerf
|
||||
|
||||
log.WithField("ValidatorIndex", contribution.Message.AggregatorIndex).Info("Sync committee aggregation processed")
|
||||
}
|
||||
}
|
||||
|
||||
// processSyncAggregate logs the event that one of our tracked validators is a sync-committee member and its
|
||||
// contribution was included
|
||||
func (s *Service) processSyncAggregate(state state.BeaconState, blk block.BeaconBlock) {
|
||||
if blk == nil || blk.Body() == nil {
|
||||
return
|
||||
}
|
||||
bits, err := blk.Body().SyncAggregate()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Cannot get SyncAggregate")
|
||||
return
|
||||
}
|
||||
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
for validatorIdx, committeeIndices := range s.trackedSyncCommitteeIndices {
|
||||
if len(committeeIndices) > 0 {
|
||||
contrib := 0
|
||||
for _, idx := range committeeIndices {
|
||||
if bits.SyncCommitteeBits.BitAt(uint64(idx)) {
|
||||
contrib++
|
||||
}
|
||||
}
|
||||
|
||||
balance, err := state.BalanceAtIndex(validatorIdx)
|
||||
if err != nil {
|
||||
log.Error("Could not get balance")
|
||||
return
|
||||
}
|
||||
|
||||
latestPerf := s.latestPerformance[validatorIdx]
|
||||
balanceChg := int64(balance - latestPerf.balance)
|
||||
latestPerf.balanceChange = balanceChg
|
||||
latestPerf.balance = balance
|
||||
s.latestPerformance[validatorIdx] = latestPerf
|
||||
|
||||
aggPerf := s.aggregatedPerformance[validatorIdx]
|
||||
aggPerf.totalSyncComitteeContributions += uint64(contrib)
|
||||
s.aggregatedPerformance[validatorIdx] = aggPerf
|
||||
|
||||
syncCommitteeContributionCounter.WithLabelValues(
|
||||
fmt.Sprintf("%d", validatorIdx)).Add(float64(contrib))
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"ValidatorIndex": validatorIdx,
|
||||
"ExpectedContrib": len(committeeIndices),
|
||||
"Contributions": contrib,
|
||||
"NewBalance": balance,
|
||||
"BalanceChange": balanceChg,
|
||||
}).Info("Sync committee contribution included")
|
||||
}
|
||||
}
|
||||
}
|
||||
59
beacon-chain/monitor/process_sync_committee_test.go
Normal file
59
beacon-chain/monitor/process_sync_committee_test.go
Normal file
@@ -0,0 +1,59 @@
|
||||
package monitor
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func TestProcessSyncCommitteeContribution(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s := setupService(t)
|
||||
|
||||
contrib := ðpb.SignedContributionAndProof{
|
||||
Message: ðpb.ContributionAndProof{
|
||||
AggregatorIndex: 1,
|
||||
},
|
||||
}
|
||||
|
||||
s.processSyncCommitteeContribution(contrib)
|
||||
require.LogsContain(t, hook, "\"Sync committee aggregation processed\" ValidatorIndex=1")
|
||||
require.LogsDoNotContain(t, hook, "ValidatorIndex=2")
|
||||
}
|
||||
|
||||
func TestProcessSyncAggregate(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s := setupService(t)
|
||||
beaconState, _ := util.DeterministicGenesisStateAltair(t, 256)
|
||||
|
||||
block := ðpb.BeaconBlockAltair{
|
||||
Slot: 2,
|
||||
Body: ðpb.BeaconBlockBodyAltair{
|
||||
SyncAggregate: ðpb.SyncAggregate{
|
||||
SyncCommitteeBits: bitfield.Bitvector512{
|
||||
0x31, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff,
|
||||
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
wrappedBlock, err := wrapper.WrappedAltairBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
|
||||
s.processSyncAggregate(beaconState, wrappedBlock)
|
||||
require.LogsContain(t, hook, "\"Sync committee contribution included\" BalanceChange=0 Contributions=1 ExpectedContrib=4 NewBalance=32000000000 ValidatorIndex=1 prefix=monitor")
|
||||
require.LogsContain(t, hook, "\"Sync committee contribution included\" BalanceChange=100000000 Contributions=2 ExpectedContrib=2 NewBalance=32000000000 ValidatorIndex=12 prefix=monitor")
|
||||
require.LogsDoNotContain(t, hook, "ValidatorIndex=2")
|
||||
}
|
||||
88
beacon-chain/monitor/service.go
Normal file
88
beacon-chain/monitor/service.go
Normal file
@@ -0,0 +1,88 @@
|
||||
package monitor
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
)
|
||||
|
||||
// ValidatorLatestPerformance keeps track of the latest participation of the validator
|
||||
type ValidatorLatestPerformance struct {
|
||||
attestedSlot types.Slot
|
||||
inclusionSlot types.Slot
|
||||
timelySource bool
|
||||
timelyTarget bool
|
||||
timelyHead bool
|
||||
balance uint64
|
||||
balanceChange int64
|
||||
}
|
||||
|
||||
// ValidatorAggregatedPerformance keeps track of the accumulated performance of
|
||||
// the validator since launch
|
||||
type ValidatorAggregatedPerformance struct {
|
||||
totalAttestedCount uint64
|
||||
totalRequestedCount uint64
|
||||
totalDistance uint64
|
||||
totalCorrectSource uint64
|
||||
totalCorrectTarget uint64
|
||||
totalCorrectHead uint64
|
||||
totalProposedCount uint64
|
||||
totalAggregations uint64
|
||||
totalSyncComitteeContributions uint64
|
||||
totalSyncComitteeAggregations uint64
|
||||
}
|
||||
|
||||
// ValidatorMonitorConfig contains the list of validator indices that the
|
||||
// monitor service tracks, as well as the event feed notifier that the
|
||||
// monitor needs to subscribe.
|
||||
type ValidatorMonitorConfig struct {
|
||||
StateGen stategen.StateManager
|
||||
}
|
||||
|
||||
// Service is the main structure that tracks validators and reports logs and
|
||||
// metrics of their performances throughout their lifetime.
|
||||
type Service struct {
|
||||
config *ValidatorMonitorConfig
|
||||
|
||||
// Locks access to TrackedValidators, latestPerformance, aggregatedPerformance,
|
||||
// trackedSyncedCommitteeIndices and lastSyncedEpoch
|
||||
sync.RWMutex
|
||||
|
||||
trackedValidators map[types.ValidatorIndex]interface{}
|
||||
latestPerformance map[types.ValidatorIndex]ValidatorLatestPerformance
|
||||
aggregatedPerformance map[types.ValidatorIndex]ValidatorAggregatedPerformance
|
||||
trackedSyncCommitteeIndices map[types.ValidatorIndex][]types.CommitteeIndex
|
||||
lastSyncedEpoch types.Epoch
|
||||
}
|
||||
|
||||
// TrackedIndex returns if the given validator index corresponds to one of the
|
||||
// validators we follow.
|
||||
// It assumes the caller holds the service lock.
|
||||
func (s *Service) trackedIndex(idx types.ValidatorIndex) bool {
|
||||
_, ok := s.trackedValidators[idx]
|
||||
return ok
|
||||
}
|
||||
|
||||
// updateSyncCommitteeTrackedVals updates the sync committee assignments of our
|
||||
// tracked validators. It gets called when we sync a block after the Sync Period changes.
|
||||
func (s *Service) updateSyncCommitteeTrackedVals(state state.BeaconState) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
for idx := range s.trackedValidators {
|
||||
syncIdx, err := helpers.CurrentPeriodSyncSubcommitteeIndices(state, idx)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("ValidatorIndex", idx).Error(
|
||||
"Sync committee assignments will not be reported")
|
||||
delete(s.trackedSyncCommitteeIndices, idx)
|
||||
} else if len(syncIdx) == 0 {
|
||||
delete(s.trackedSyncCommitteeIndices, idx)
|
||||
} else {
|
||||
s.trackedSyncCommitteeIndices[idx] = syncIdx
|
||||
}
|
||||
}
|
||||
s.lastSyncedEpoch = slots.ToEpoch(state.Slot())
|
||||
}
|
||||
45
beacon-chain/monitor/service_test.go
Normal file
45
beacon-chain/monitor/service_test.go
Normal file
@@ -0,0 +1,45 @@
|
||||
package monitor
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func TestTrackedIndex(t *testing.T) {
|
||||
s := &Service{
|
||||
trackedValidators: map[types.ValidatorIndex]interface{}{
|
||||
1: nil,
|
||||
2: nil,
|
||||
},
|
||||
}
|
||||
require.Equal(t, s.trackedIndex(types.ValidatorIndex(1)), true)
|
||||
require.Equal(t, s.trackedIndex(types.ValidatorIndex(3)), false)
|
||||
}
|
||||
|
||||
func TestUpdateSyncCommitteeTrackedVals(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s := setupService(t)
|
||||
state, _ := util.DeterministicGenesisStateAltair(t, 1024)
|
||||
|
||||
pubKeys := make([][]byte, 3)
|
||||
pubKeys[0] = state.Validators()[0].PublicKey
|
||||
pubKeys[1] = state.Validators()[1].PublicKey
|
||||
pubKeys[2] = state.Validators()[2].PublicKey
|
||||
|
||||
currentSyncCommittee := util.ConvertToCommittee([][]byte{
|
||||
pubKeys[0], pubKeys[1], pubKeys[2], pubKeys[1], pubKeys[1],
|
||||
})
|
||||
require.NoError(t, state.SetCurrentSyncCommittee(currentSyncCommittee))
|
||||
|
||||
s.updateSyncCommitteeTrackedVals(state)
|
||||
require.LogsDoNotContain(t, hook, "Sync committee assignments will not be reported")
|
||||
newTrackedSyncIndices := map[types.ValidatorIndex][]types.CommitteeIndex{
|
||||
1: {1, 3, 4},
|
||||
2: {2},
|
||||
}
|
||||
require.DeepEqual(t, s.trackedSyncCommitteeIndices, newTrackedSyncIndices)
|
||||
}
|
||||
@@ -113,9 +113,9 @@ func configureExecutionSetting(cliCtx *cli.Context) {
|
||||
c.TerminalBlockHashActivationEpoch = types.Epoch(cliCtx.Uint64(flags.TerminalBlockHashActivationEpochOverride.Name))
|
||||
params.OverrideBeaconConfig(c)
|
||||
}
|
||||
if cliCtx.IsSet(flags.Coinbase.Name) {
|
||||
if cliCtx.IsSet(flags.FeeRecipient.Name) {
|
||||
c := params.BeaconConfig()
|
||||
c.Coinbase = common.HexToAddress(cliCtx.String(flags.Coinbase.Name))
|
||||
c.FeeRecipient = common.HexToAddress(cliCtx.String(flags.FeeRecipient.Name))
|
||||
params.OverrideBeaconConfig(c)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -79,11 +79,11 @@ func TestConfigureExecutionSetting(t *testing.T) {
|
||||
set.Uint64(flags.TerminalTotalDifficultyOverride.Name, 0, "")
|
||||
set.String(flags.TerminalBlockHashOverride.Name, "", "")
|
||||
set.Uint64(flags.TerminalBlockHashActivationEpochOverride.Name, 0, "")
|
||||
set.String(flags.Coinbase.Name, "", "")
|
||||
set.String(flags.FeeRecipient.Name, "", "")
|
||||
require.NoError(t, set.Set(flags.TerminalTotalDifficultyOverride.Name, strconv.Itoa(100)))
|
||||
require.NoError(t, set.Set(flags.TerminalBlockHashOverride.Name, "0xA"))
|
||||
require.NoError(t, set.Set(flags.TerminalBlockHashActivationEpochOverride.Name, strconv.Itoa(200)))
|
||||
require.NoError(t, set.Set(flags.Coinbase.Name, "0xB"))
|
||||
require.NoError(t, set.Set(flags.FeeRecipient.Name, "0xB"))
|
||||
cliCtx := cli.NewContext(&app, set, nil)
|
||||
|
||||
configureExecutionSetting(cliCtx)
|
||||
@@ -91,7 +91,7 @@ func TestConfigureExecutionSetting(t *testing.T) {
|
||||
assert.Equal(t, uint64(100), params.BeaconConfig().TerminalTotalDifficulty)
|
||||
assert.Equal(t, common.HexToHash("0xA"), params.BeaconConfig().TerminalBlockHash)
|
||||
assert.Equal(t, types.Epoch(200), params.BeaconConfig().TerminalBlockHashActivationEpoch)
|
||||
assert.Equal(t, common.HexToAddress("0xB"), params.BeaconConfig().Coinbase)
|
||||
assert.Equal(t, common.HexToAddress("0xB"), params.BeaconConfig().FeeRecipient)
|
||||
}
|
||||
|
||||
func TestConfigureNetwork(t *testing.T) {
|
||||
|
||||
@@ -854,6 +854,14 @@ func (b *BeaconNode) registerDeterminsticGenesisService() error {
|
||||
GenesisPath: genesisStatePath,
|
||||
})
|
||||
|
||||
// Register genesis state as start-up state when interop mode.
|
||||
// The start-up state gets reused across services.
|
||||
st, err := b.db.GenesisState(b.ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.finalizedStateAtStartUp = st
|
||||
|
||||
return b.services.RegisterService(svc)
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -437,9 +437,9 @@ func TestLogTillGenesis_OK(t *testing.T) {
|
||||
|
||||
func TestInitDepositCache_OK(t *testing.T) {
|
||||
ctrs := []*protodb.DepositContainer{
|
||||
{Index: 0, Eth1BlockHeight: 2, Deposit: ðpb.Deposit{Proof: [][]byte{[]byte("A")}}},
|
||||
{Index: 1, Eth1BlockHeight: 4, Deposit: ðpb.Deposit{Proof: [][]byte{[]byte("B")}}},
|
||||
{Index: 2, Eth1BlockHeight: 6, Deposit: ðpb.Deposit{Proof: [][]byte{[]byte("c")}}},
|
||||
{Index: 0, Eth1BlockHeight: 2, Deposit: ðpb.Deposit{Proof: [][]byte{[]byte("A")}, Data: ðpb.Deposit_Data{PublicKey: []byte{}}}},
|
||||
{Index: 1, Eth1BlockHeight: 4, Deposit: ðpb.Deposit{Proof: [][]byte{[]byte("B")}, Data: ðpb.Deposit_Data{PublicKey: []byte{}}}},
|
||||
{Index: 2, Eth1BlockHeight: 6, Deposit: ðpb.Deposit{Proof: [][]byte{[]byte("c")}, Data: ðpb.Deposit_Data{PublicKey: []byte{}}}},
|
||||
}
|
||||
gs, _ := util.DeterministicGenesisState(t, 1)
|
||||
beaconDB := dbutil.SetupDB(t)
|
||||
|
||||
@@ -100,7 +100,7 @@ func TestGetSpec(t *testing.T) {
|
||||
config.TerminalBlockHash = common.HexToHash("TerminalBlockHash")
|
||||
config.TerminalBlockHashActivationEpoch = 72
|
||||
config.TerminalTotalDifficulty = 73
|
||||
config.Coinbase = common.HexToAddress("Coinbase")
|
||||
config.FeeRecipient = common.HexToAddress("FeeRecipient")
|
||||
|
||||
var dbp [4]byte
|
||||
copy(dbp[:], []byte{'0', '0', '0', '1'})
|
||||
@@ -329,8 +329,8 @@ func TestGetSpec(t *testing.T) {
|
||||
assert.Equal(t, common.HexToHash("TerminalBlockHash"), common.HexToHash(v))
|
||||
case "TERMINAL_TOTAL_DIFFICULTY":
|
||||
assert.Equal(t, "73", v)
|
||||
case "COINBASE":
|
||||
assert.Equal(t, common.HexToAddress("Coinbase"), v)
|
||||
case "FeeRecipient":
|
||||
assert.Equal(t, common.HexToAddress("FeeRecipient"), v)
|
||||
default:
|
||||
t.Errorf("Incorrect key: %s", k)
|
||||
}
|
||||
|
||||
@@ -94,7 +94,12 @@ func (vs *Server) deposits(
|
||||
ctx, span := trace.StartSpan(ctx, "ProposerServer.deposits")
|
||||
defer span.End()
|
||||
|
||||
if vs.MockEth1Votes || !vs.Eth1InfoFetcher.IsConnectedToETH1() {
|
||||
if vs.MockEth1Votes {
|
||||
return []*ethpb.Deposit{}, nil
|
||||
}
|
||||
|
||||
if !vs.Eth1InfoFetcher.IsConnectedToETH1() {
|
||||
log.Warn("not connected to eth1 node, skip pending deposit insertion")
|
||||
return []*ethpb.Deposit{}, nil
|
||||
}
|
||||
// Need to fetch if the deposits up to the state's latest eth1 data matches
|
||||
@@ -112,6 +117,7 @@ func (vs *Server) deposits(
|
||||
// If there are no pending deposits, exit early.
|
||||
allPendingContainers := vs.PendingDepositsFetcher.PendingContainers(ctx, canonicalEth1DataHeight)
|
||||
if len(allPendingContainers) == 0 {
|
||||
log.Debug("no pending deposits for inclusion in block")
|
||||
return []*ethpb.Deposit{}, nil
|
||||
}
|
||||
|
||||
@@ -127,21 +133,21 @@ func (vs *Server) deposits(
|
||||
if uint64(dep.Index) >= beaconState.Eth1DepositIndex() && uint64(dep.Index) < canonicalEth1Data.DepositCount {
|
||||
pendingDeps = append(pendingDeps, dep)
|
||||
}
|
||||
// Don't try to pack more than the max allowed in a block
|
||||
if uint64(len(pendingDeps)) == params.BeaconConfig().MaxDeposits {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for i := range pendingDeps {
|
||||
// Don't construct merkle proof if the number of deposits is more than max allowed in block.
|
||||
if uint64(i) == params.BeaconConfig().MaxDeposits {
|
||||
break
|
||||
}
|
||||
pendingDeps[i].Deposit, err = constructMerkleProof(depositTrie, int(pendingDeps[i].Index), pendingDeps[i].Deposit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// Limit the return of pending deposits to not be more than max deposits allowed in block.
|
||||
|
||||
var pendingDeposits []*ethpb.Deposit
|
||||
for i := uint64(0); i < uint64(len(pendingDeps)) && i < params.BeaconConfig().MaxDeposits; i++ {
|
||||
for i := uint64(0); i < uint64(len(pendingDeps)); i++ {
|
||||
pendingDeposits = append(pendingDeposits, pendingDeps[i].Deposit)
|
||||
}
|
||||
return pendingDeposits, nil
|
||||
|
||||
@@ -12,6 +12,8 @@ go_library(
|
||||
"//beacon-chain/state/stateutil:go_default_library",
|
||||
"//beacon-chain/state/types:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
@@ -26,6 +28,7 @@ go_test(
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/state/stateutil:go_default_library",
|
||||
"//beacon-chain/state/types:go_default_library",
|
||||
"//beacon-chain/state/v1:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
|
||||
@@ -18,6 +18,7 @@ type FieldTrie struct {
|
||||
field types.FieldIndex
|
||||
dataType types.DataType
|
||||
length uint64
|
||||
numOfElems int
|
||||
}
|
||||
|
||||
// NewFieldTrie is the constructor for the field trie data structure. It creates the corresponding
|
||||
@@ -26,18 +27,19 @@ type FieldTrie struct {
|
||||
func NewFieldTrie(field types.FieldIndex, dataType types.DataType, elements interface{}, length uint64) (*FieldTrie, error) {
|
||||
if elements == nil {
|
||||
return &FieldTrie{
|
||||
field: field,
|
||||
dataType: dataType,
|
||||
reference: stateutil.NewRef(1),
|
||||
RWMutex: new(sync.RWMutex),
|
||||
length: length,
|
||||
field: field,
|
||||
dataType: dataType,
|
||||
reference: stateutil.NewRef(1),
|
||||
RWMutex: new(sync.RWMutex),
|
||||
length: length,
|
||||
numOfElems: 0,
|
||||
}, nil
|
||||
}
|
||||
fieldRoots, err := fieldConverters(field, []uint64{}, elements, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := validateElements(field, elements, length); err != nil {
|
||||
if err := validateElements(field, dataType, elements, length); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch dataType {
|
||||
@@ -53,8 +55,9 @@ func NewFieldTrie(field types.FieldIndex, dataType types.DataType, elements inte
|
||||
reference: stateutil.NewRef(1),
|
||||
RWMutex: new(sync.RWMutex),
|
||||
length: length,
|
||||
numOfElems: reflect.ValueOf(elements).Len(),
|
||||
}, nil
|
||||
case types.CompositeArray:
|
||||
case types.CompositeArray, types.CompressedArray:
|
||||
return &FieldTrie{
|
||||
fieldLayers: stateutil.ReturnTrieLayerVariable(fieldRoots, length),
|
||||
field: field,
|
||||
@@ -62,6 +65,7 @@ func NewFieldTrie(field types.FieldIndex, dataType types.DataType, elements inte
|
||||
reference: stateutil.NewRef(1),
|
||||
RWMutex: new(sync.RWMutex),
|
||||
length: length,
|
||||
numOfElems: reflect.ValueOf(elements).Len(),
|
||||
}, nil
|
||||
default:
|
||||
return nil, errors.Errorf("unrecognized data type in field map: %v", reflect.TypeOf(dataType).Name())
|
||||
@@ -92,13 +96,40 @@ func (f *FieldTrie) RecomputeTrie(indices []uint64, elements interface{}) ([32]b
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
f.numOfElems = reflect.ValueOf(elements).Len()
|
||||
return fieldRoot, nil
|
||||
case types.CompositeArray:
|
||||
fieldRoot, f.fieldLayers, err = stateutil.RecomputeFromLayerVariable(fieldRoots, indices, f.fieldLayers)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
f.numOfElems = reflect.ValueOf(elements).Len()
|
||||
return stateutil.AddInMixin(fieldRoot, uint64(len(f.fieldLayers[0])))
|
||||
case types.CompressedArray:
|
||||
numOfElems, err := f.field.ElemsInChunk()
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
// We remove the duplicates here in order to prevent
|
||||
// duplicated insertions into the trie.
|
||||
newIndices := []uint64{}
|
||||
indexExists := make(map[uint64]bool)
|
||||
newRoots := make([][32]byte, 0, len(fieldRoots)/int(numOfElems))
|
||||
for i, idx := range indices {
|
||||
startIdx := idx / numOfElems
|
||||
if indexExists[startIdx] {
|
||||
continue
|
||||
}
|
||||
newIndices = append(newIndices, startIdx)
|
||||
indexExists[startIdx] = true
|
||||
newRoots = append(newRoots, fieldRoots[i])
|
||||
}
|
||||
fieldRoot, f.fieldLayers, err = stateutil.RecomputeFromLayerVariable(newRoots, newIndices, f.fieldLayers)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
f.numOfElems = reflect.ValueOf(elements).Len()
|
||||
return stateutil.AddInMixin(fieldRoot, uint64(f.numOfElems))
|
||||
default:
|
||||
return [32]byte{}, errors.Errorf("unrecognized data type in field map: %v", reflect.TypeOf(f.dataType).Name())
|
||||
}
|
||||
@@ -109,11 +140,12 @@ func (f *FieldTrie) RecomputeTrie(indices []uint64, elements interface{}) ([32]b
|
||||
func (f *FieldTrie) CopyTrie() *FieldTrie {
|
||||
if f.fieldLayers == nil {
|
||||
return &FieldTrie{
|
||||
field: f.field,
|
||||
dataType: f.dataType,
|
||||
reference: stateutil.NewRef(1),
|
||||
RWMutex: new(sync.RWMutex),
|
||||
length: f.length,
|
||||
field: f.field,
|
||||
dataType: f.dataType,
|
||||
reference: stateutil.NewRef(1),
|
||||
RWMutex: new(sync.RWMutex),
|
||||
length: f.length,
|
||||
numOfElems: f.numOfElems,
|
||||
}
|
||||
}
|
||||
dstFieldTrie := make([][]*[32]byte, len(f.fieldLayers))
|
||||
@@ -128,6 +160,7 @@ func (f *FieldTrie) CopyTrie() *FieldTrie {
|
||||
reference: stateutil.NewRef(1),
|
||||
RWMutex: new(sync.RWMutex),
|
||||
length: f.length,
|
||||
numOfElems: f.numOfElems,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -139,6 +172,9 @@ func (f *FieldTrie) TrieRoot() ([32]byte, error) {
|
||||
case types.CompositeArray:
|
||||
trieRoot := *f.fieldLayers[len(f.fieldLayers)-1][0]
|
||||
return stateutil.AddInMixin(trieRoot, uint64(len(f.fieldLayers[0])))
|
||||
case types.CompressedArray:
|
||||
trieRoot := *f.fieldLayers[len(f.fieldLayers)-1][0]
|
||||
return stateutil.AddInMixin(trieRoot, uint64(f.numOfElems))
|
||||
default:
|
||||
return [32]byte{}, errors.Errorf("unrecognized data type in field map: %v", reflect.TypeOf(f.dataType).Name())
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package fieldtrie
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
@@ -8,20 +9,37 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/types"
|
||||
"github.com/prysmaticlabs/prysm/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/encoding/ssz"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/runtime/version"
|
||||
)
|
||||
|
||||
func (f *FieldTrie) validateIndices(idxs []uint64) error {
|
||||
length := f.length
|
||||
if f.dataType == types.CompressedArray {
|
||||
comLength, err := f.field.ElemsInChunk()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
length *= comLength
|
||||
}
|
||||
for _, idx := range idxs {
|
||||
if idx >= f.length {
|
||||
return errors.Errorf("invalid index for field %s: %d >= length %d", f.field.String(version.Phase0), idx, f.length)
|
||||
if idx >= length {
|
||||
return errors.Errorf("invalid index for field %s: %d >= length %d", f.field.String(version.Phase0), idx, length)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateElements(field types.FieldIndex, elements interface{}, length uint64) error {
|
||||
func validateElements(field types.FieldIndex, dataType types.DataType, elements interface{}, length uint64) error {
|
||||
if dataType == types.CompressedArray {
|
||||
comLength, err := field.ElemsInChunk()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
length *= comLength
|
||||
}
|
||||
val := reflect.ValueOf(elements)
|
||||
if val.Len() > int(length) {
|
||||
return errors.Errorf("elements length is larger than expected for field %s: %d > %d", field.String(version.Phase0), val.Len(), length)
|
||||
@@ -38,21 +56,21 @@ func fieldConverters(field types.FieldIndex, indices []uint64, elements interfac
|
||||
return nil, errors.Errorf("Wanted type of %v but got %v",
|
||||
reflect.TypeOf([][]byte{}).Name(), reflect.TypeOf(elements).Name())
|
||||
}
|
||||
return stateutil.HandleByteArrays(val, indices, convertAll)
|
||||
return handleByteArrays(val, indices, convertAll)
|
||||
case types.Eth1DataVotes:
|
||||
val, ok := elements.([]*ethpb.Eth1Data)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("Wanted type of %v but got %v",
|
||||
reflect.TypeOf([]*ethpb.Eth1Data{}).Name(), reflect.TypeOf(elements).Name())
|
||||
}
|
||||
return HandleEth1DataSlice(val, indices, convertAll)
|
||||
return handleEth1DataSlice(val, indices, convertAll)
|
||||
case types.Validators:
|
||||
val, ok := elements.([]*ethpb.Validator)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("Wanted type of %v but got %v",
|
||||
reflect.TypeOf([]*ethpb.Validator{}).Name(), reflect.TypeOf(elements).Name())
|
||||
}
|
||||
return stateutil.HandleValidatorSlice(val, indices, convertAll)
|
||||
return handleValidatorSlice(val, indices, convertAll)
|
||||
case types.PreviousEpochAttestations, types.CurrentEpochAttestations:
|
||||
val, ok := elements.([]*ethpb.PendingAttestation)
|
||||
if !ok {
|
||||
@@ -60,13 +78,87 @@ func fieldConverters(field types.FieldIndex, indices []uint64, elements interfac
|
||||
reflect.TypeOf([]*ethpb.PendingAttestation{}).Name(), reflect.TypeOf(elements).Name())
|
||||
}
|
||||
return handlePendingAttestation(val, indices, convertAll)
|
||||
case types.Balances:
|
||||
val, ok := elements.([]uint64)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("Wanted type of %v but got %v",
|
||||
reflect.TypeOf([]uint64{}).Name(), reflect.TypeOf(elements).Name())
|
||||
}
|
||||
return handleBalanceSlice(val, indices, convertAll)
|
||||
default:
|
||||
return [][32]byte{}, errors.Errorf("got unsupported type of %v", reflect.TypeOf(elements).Name())
|
||||
}
|
||||
}
|
||||
|
||||
// HandleEth1DataSlice processes a list of eth1data and indices into the appropriate roots.
|
||||
func HandleEth1DataSlice(val []*ethpb.Eth1Data, indices []uint64, convertAll bool) ([][32]byte, error) {
|
||||
// handleByteArrays computes and returns byte arrays in a slice of root format.
|
||||
func handleByteArrays(val [][]byte, indices []uint64, convertAll bool) ([][32]byte, error) {
|
||||
length := len(indices)
|
||||
if convertAll {
|
||||
length = len(val)
|
||||
}
|
||||
roots := make([][32]byte, 0, length)
|
||||
rootCreator := func(input []byte) {
|
||||
newRoot := bytesutil.ToBytes32(input)
|
||||
roots = append(roots, newRoot)
|
||||
}
|
||||
if convertAll {
|
||||
for i := range val {
|
||||
rootCreator(val[i])
|
||||
}
|
||||
return roots, nil
|
||||
}
|
||||
if len(val) > 0 {
|
||||
for _, idx := range indices {
|
||||
if idx > uint64(len(val))-1 {
|
||||
return nil, fmt.Errorf("index %d greater than number of byte arrays %d", idx, len(val))
|
||||
}
|
||||
rootCreator(val[idx])
|
||||
}
|
||||
}
|
||||
return roots, nil
|
||||
}
|
||||
|
||||
// handleValidatorSlice returns the validator indices in a slice of root format.
|
||||
func handleValidatorSlice(val []*ethpb.Validator, indices []uint64, convertAll bool) ([][32]byte, error) {
|
||||
length := len(indices)
|
||||
if convertAll {
|
||||
length = len(val)
|
||||
}
|
||||
roots := make([][32]byte, 0, length)
|
||||
hasher := hash.CustomSHA256Hasher()
|
||||
rootCreator := func(input *ethpb.Validator) error {
|
||||
newRoot, err := stateutil.ValidatorRootWithHasher(hasher, input)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
roots = append(roots, newRoot)
|
||||
return nil
|
||||
}
|
||||
if convertAll {
|
||||
for i := range val {
|
||||
err := rootCreator(val[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return roots, nil
|
||||
}
|
||||
if len(val) > 0 {
|
||||
for _, idx := range indices {
|
||||
if idx > uint64(len(val))-1 {
|
||||
return nil, fmt.Errorf("index %d greater than number of validators %d", idx, len(val))
|
||||
}
|
||||
err := rootCreator(val[idx])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
return roots, nil
|
||||
}
|
||||
|
||||
// handleEth1DataSlice processes a list of eth1data and indices into the appropriate roots.
|
||||
func handleEth1DataSlice(val []*ethpb.Eth1Data, indices []uint64, convertAll bool) ([][32]byte, error) {
|
||||
length := len(indices)
|
||||
if convertAll {
|
||||
length = len(val)
|
||||
@@ -141,3 +233,48 @@ func handlePendingAttestation(val []*ethpb.PendingAttestation, indices []uint64,
|
||||
}
|
||||
return roots, nil
|
||||
}
|
||||
|
||||
func handleBalanceSlice(val []uint64, indices []uint64, convertAll bool) ([][32]byte, error) {
|
||||
if convertAll {
|
||||
balancesMarshaling := make([][]byte, 0)
|
||||
for _, b := range val {
|
||||
balanceBuf := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(balanceBuf, b)
|
||||
balancesMarshaling = append(balancesMarshaling, balanceBuf)
|
||||
}
|
||||
balancesChunks, err := ssz.PackByChunk(balancesMarshaling)
|
||||
if err != nil {
|
||||
return [][32]byte{}, errors.Wrap(err, "could not pack balances into chunks")
|
||||
}
|
||||
return balancesChunks, nil
|
||||
}
|
||||
if len(val) > 0 {
|
||||
numOfElems, err := types.Balances.ElemsInChunk()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
roots := [][32]byte{}
|
||||
for _, idx := range indices {
|
||||
// We split the indexes into their relevant groups. Balances
|
||||
// are compressed according to 4 values -> 1 chunk.
|
||||
startIdx := idx / numOfElems
|
||||
startGroup := startIdx * numOfElems
|
||||
chunk := [32]byte{}
|
||||
sizeOfElem := len(chunk) / int(numOfElems)
|
||||
for i, j := 0, startGroup; j < startGroup+numOfElems; i, j = i+sizeOfElem, j+1 {
|
||||
wantedVal := uint64(0)
|
||||
// We are adding chunks in sets of 4, if the set is at the edge of the array
|
||||
// then you will need to zero out the rest of the chunk. Ex : 41 indexes,
|
||||
// so 41 % 4 = 1 . There are 3 indexes, which do not exist yet but we
|
||||
// have to add in as a root. These 3 indexes are then given a 'zero' value.
|
||||
if int(j) < len(val) {
|
||||
wantedVal = val[j]
|
||||
}
|
||||
binary.LittleEndian.PutUint64(chunk[i:i+sizeOfElem], wantedVal)
|
||||
}
|
||||
roots = append(roots, chunk)
|
||||
}
|
||||
return roots, nil
|
||||
}
|
||||
return [][32]byte{}, nil
|
||||
}
|
||||
|
||||
@@ -1,8 +1,13 @@
|
||||
package fieldtrie
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/types"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
)
|
||||
@@ -17,7 +22,64 @@ func Test_handlePendingAttestation_OutOfRange(t *testing.T) {
|
||||
func Test_handleEth1DataSlice_OutOfRange(t *testing.T) {
|
||||
items := make([]*ethpb.Eth1Data, 1)
|
||||
indices := []uint64{3}
|
||||
_, err := HandleEth1DataSlice(items, indices, false)
|
||||
_, err := handleEth1DataSlice(items, indices, false)
|
||||
assert.ErrorContains(t, "index 3 greater than number of items in eth1 data slice 1", err)
|
||||
|
||||
}
|
||||
|
||||
func Test_handleValidatorSlice_OutOfRange(t *testing.T) {
|
||||
vals := make([]*ethpb.Validator, 1)
|
||||
indices := []uint64{3}
|
||||
_, err := handleValidatorSlice(vals, indices, false)
|
||||
assert.ErrorContains(t, "index 3 greater than number of validators 1", err)
|
||||
}
|
||||
|
||||
func TestBalancesSlice_CorrectRoots_All(t *testing.T) {
|
||||
balances := []uint64{5, 2929, 34, 1291, 354305}
|
||||
roots, err := handleBalanceSlice(balances, []uint64{}, true)
|
||||
assert.NoError(t, err)
|
||||
|
||||
root1 := [32]byte{}
|
||||
binary.LittleEndian.PutUint64(root1[:8], balances[0])
|
||||
binary.LittleEndian.PutUint64(root1[8:16], balances[1])
|
||||
binary.LittleEndian.PutUint64(root1[16:24], balances[2])
|
||||
binary.LittleEndian.PutUint64(root1[24:32], balances[3])
|
||||
|
||||
root2 := [32]byte{}
|
||||
binary.LittleEndian.PutUint64(root2[:8], balances[4])
|
||||
|
||||
assert.DeepEqual(t, roots, [][32]byte{root1, root2})
|
||||
}
|
||||
|
||||
func TestBalancesSlice_CorrectRoots_Some(t *testing.T) {
|
||||
balances := []uint64{5, 2929, 34, 1291, 354305}
|
||||
roots, err := handleBalanceSlice(balances, []uint64{2, 3}, false)
|
||||
assert.NoError(t, err)
|
||||
|
||||
root1 := [32]byte{}
|
||||
binary.LittleEndian.PutUint64(root1[:8], balances[0])
|
||||
binary.LittleEndian.PutUint64(root1[8:16], balances[1])
|
||||
binary.LittleEndian.PutUint64(root1[16:24], balances[2])
|
||||
binary.LittleEndian.PutUint64(root1[24:32], balances[3])
|
||||
|
||||
// Returns root for each indice(even if duplicated)
|
||||
assert.DeepEqual(t, roots, [][32]byte{root1, root1})
|
||||
}
|
||||
|
||||
func TestValidateIndices_CompressedField(t *testing.T) {
|
||||
fakeTrie := &FieldTrie{
|
||||
RWMutex: new(sync.RWMutex),
|
||||
reference: stateutil.NewRef(0),
|
||||
fieldLayers: nil,
|
||||
field: types.Balances,
|
||||
dataType: types.CompressedArray,
|
||||
length: params.BeaconConfig().ValidatorRegistryLimit / 4,
|
||||
numOfElems: 0,
|
||||
}
|
||||
goodIdx := params.BeaconConfig().ValidatorRegistryLimit - 1
|
||||
assert.NoError(t, fakeTrie.validateIndices([]uint64{goodIdx}))
|
||||
|
||||
badIdx := goodIdx + 1
|
||||
assert.ErrorContains(t, "invalid index for field balances", fakeTrie.validateIndices([]uint64{badIdx}))
|
||||
|
||||
}
|
||||
|
||||
@@ -148,7 +148,6 @@ type WriteOnlyBlockRoots interface {
|
||||
|
||||
// WriteOnlyStateRoots defines a struct which only has write access to state roots methods.
|
||||
type WriteOnlyStateRoots interface {
|
||||
SetStateRoots(val [][]byte) error
|
||||
UpdateStateRootAtIndex(idx uint64, stateRoot [32]byte) error
|
||||
}
|
||||
|
||||
|
||||
@@ -37,6 +37,15 @@ func (s *State) HasStateInCache(ctx context.Context, blockRoot [32]byte) (bool,
|
||||
return has, nil
|
||||
}
|
||||
|
||||
// StateByRootIfCached retrieves a state using the input block root only if the state is already in the cache
|
||||
func (s *State) StateByRootIfCachedNoCopy(blockRoot [32]byte) state.BeaconState {
|
||||
if !s.hotStateCache.has(blockRoot) {
|
||||
return nil
|
||||
}
|
||||
state := s.hotStateCache.getWithoutCopy(blockRoot)
|
||||
return state
|
||||
}
|
||||
|
||||
// StateByRoot retrieves the state using input block root.
|
||||
func (s *State) StateByRoot(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "stateGen.StateByRoot")
|
||||
|
||||
@@ -56,6 +56,44 @@ func TestStateByRoot_ColdState(t *testing.T) {
|
||||
require.DeepSSZEqual(t, loadedState.InnerStateUnsafe(), beaconState.InnerStateUnsafe())
|
||||
}
|
||||
|
||||
func TestStateByRootIfCachedNoCopy_HotState(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
service := New(beaconDB)
|
||||
|
||||
beaconState, _ := util.DeterministicGenesisState(t, 32)
|
||||
r := [32]byte{'A'}
|
||||
require.NoError(t, service.beaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: r[:]}))
|
||||
service.hotStateCache.put(r, beaconState)
|
||||
|
||||
loadedState := service.StateByRootIfCachedNoCopy(r)
|
||||
require.DeepSSZEqual(t, loadedState.InnerStateUnsafe(), beaconState.InnerStateUnsafe())
|
||||
}
|
||||
|
||||
func TestStateByRootIfCachedNoCopy_ColdState(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
service := New(beaconDB)
|
||||
service.finalizedInfo.slot = 2
|
||||
service.slotsPerArchivedPoint = 1
|
||||
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 1
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b)))
|
||||
bRoot, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
beaconState, _ := util.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, beaconState.SetSlot(1))
|
||||
require.NoError(t, service.beaconDB.SaveState(ctx, beaconState, bRoot))
|
||||
require.NoError(t, service.beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b)))
|
||||
require.NoError(t, service.beaconDB.SaveGenesisBlockRoot(ctx, bRoot))
|
||||
loadedState := service.StateByRootIfCachedNoCopy(bRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, loadedState, nil)
|
||||
}
|
||||
|
||||
func TestStateByRoot_HotStateUsingEpochBoundaryCacheNoReplay(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
@@ -23,6 +23,11 @@ func NewMockService() *MockStateManager {
|
||||
}
|
||||
}
|
||||
|
||||
// StateByRootIfCached
|
||||
func (m *MockStateManager) StateByRootIfCachedNoCopy(_ [32]byte) state.BeaconState { // skipcq: RVV-B0013
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
// Resume --
|
||||
func (m *MockStateManager) Resume(_ context.Context, _ state.BeaconState) (state.BeaconState, error) {
|
||||
panic("implement me")
|
||||
|
||||
@@ -31,6 +31,7 @@ type StateManager interface {
|
||||
HasState(ctx context.Context, blockRoot [32]byte) (bool, error)
|
||||
HasStateInCache(ctx context.Context, blockRoot [32]byte) (bool, error)
|
||||
StateByRoot(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error)
|
||||
StateByRootIfCachedNoCopy(blockRoot [32]byte) state.BeaconState
|
||||
StateByRootInitialSync(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error)
|
||||
StateBySlot(ctx context.Context, slot types.Slot) (state.BeaconState, error)
|
||||
RecoverStateSummary(ctx context.Context, blockRoot [32]byte) (*ethpb.StateSummary, error)
|
||||
|
||||
@@ -3,7 +3,6 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"array_root.go",
|
||||
"block_header_root.go",
|
||||
"eth1_root.go",
|
||||
"participation_bit_root.go",
|
||||
@@ -49,7 +48,6 @@ go_test(
|
||||
"state_root_test.go",
|
||||
"stateutil_test.go",
|
||||
"trie_helpers_test.go",
|
||||
"validator_root_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
package stateutil
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
)
|
||||
|
||||
// HandleByteArrays computes and returns byte arrays in a slice of root format.
|
||||
func HandleByteArrays(val [][]byte, indices []uint64, convertAll bool) ([][32]byte, error) {
|
||||
length := len(indices)
|
||||
if convertAll {
|
||||
length = len(val)
|
||||
}
|
||||
roots := make([][32]byte, 0, length)
|
||||
rootCreator := func(input []byte) {
|
||||
newRoot := bytesutil.ToBytes32(input)
|
||||
roots = append(roots, newRoot)
|
||||
}
|
||||
if convertAll {
|
||||
for i := range val {
|
||||
rootCreator(val[i])
|
||||
}
|
||||
return roots, nil
|
||||
}
|
||||
if len(val) > 0 {
|
||||
for _, idx := range indices {
|
||||
if idx > uint64(len(val))-1 {
|
||||
return nil, fmt.Errorf("index %d greater than number of byte arrays %d", idx, len(val))
|
||||
}
|
||||
rootCreator(val[idx])
|
||||
}
|
||||
}
|
||||
return roots, nil
|
||||
}
|
||||
@@ -2,7 +2,6 @@ package stateutil
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
@@ -127,42 +126,3 @@ func ValidatorEncKey(validator *ethpb.Validator) []byte {
|
||||
|
||||
return enc
|
||||
}
|
||||
|
||||
// HandleValidatorSlice returns the validator indices in a slice of root format.
|
||||
func HandleValidatorSlice(val []*ethpb.Validator, indices []uint64, convertAll bool) ([][32]byte, error) {
|
||||
length := len(indices)
|
||||
if convertAll {
|
||||
length = len(val)
|
||||
}
|
||||
roots := make([][32]byte, 0, length)
|
||||
hasher := hash.CustomSHA256Hasher()
|
||||
rootCreator := func(input *ethpb.Validator) error {
|
||||
newRoot, err := ValidatorRootWithHasher(hasher, input)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
roots = append(roots, newRoot)
|
||||
return nil
|
||||
}
|
||||
if convertAll {
|
||||
for i := range val {
|
||||
err := rootCreator(val[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return roots, nil
|
||||
}
|
||||
if len(val) > 0 {
|
||||
for _, idx := range indices {
|
||||
if idx > uint64(len(val))-1 {
|
||||
return nil, fmt.Errorf("index %d greater than number of validators %d", idx, len(val))
|
||||
}
|
||||
err := rootCreator(val[idx])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
return roots, nil
|
||||
}
|
||||
|
||||
@@ -1,15 +0,0 @@
|
||||
package stateutil
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
)
|
||||
|
||||
func Test_handleValidatorSlice_OutOfRange(t *testing.T) {
|
||||
vals := make([]*ethpb.Validator, 1)
|
||||
indices := []uint64{3}
|
||||
_, err := HandleValidatorSlice(vals, indices, false)
|
||||
assert.ErrorContains(t, "index 3 greater than number of validators 1", err)
|
||||
}
|
||||
@@ -5,7 +5,10 @@ go_library(
|
||||
srcs = ["types.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/state/types",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = ["//runtime/version:go_default_library"],
|
||||
deps = [
|
||||
"//runtime/version:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/runtime/version"
|
||||
)
|
||||
|
||||
@@ -18,6 +19,10 @@ const (
|
||||
// CompositeArray represents a variable length array with
|
||||
// a non primitive type.
|
||||
CompositeArray
|
||||
// CompressedArray represents a variable length array which
|
||||
// can pack multiple elements into a leaf of the underlying
|
||||
// trie.
|
||||
CompressedArray
|
||||
)
|
||||
|
||||
// String returns the name of the field index.
|
||||
@@ -54,12 +59,12 @@ func (f FieldIndex) String(stateVersion int) string {
|
||||
case Slashings:
|
||||
return "slashings"
|
||||
case PreviousEpochAttestations:
|
||||
if version.Altair == stateVersion {
|
||||
if version.Altair == stateVersion || version.Merge == stateVersion {
|
||||
return "previousEpochParticipationBits"
|
||||
}
|
||||
return "previousEpochAttestations"
|
||||
case CurrentEpochAttestations:
|
||||
if version.Altair == stateVersion {
|
||||
if version.Altair == stateVersion || version.Merge == stateVersion {
|
||||
return "currentEpochParticipationBits"
|
||||
}
|
||||
return "currentEpochAttestations"
|
||||
@@ -77,11 +82,24 @@ func (f FieldIndex) String(stateVersion int) string {
|
||||
return "currentSyncCommittee"
|
||||
case NextSyncCommittee:
|
||||
return "nextSyncCommittee"
|
||||
case LatestExecutionPayloadHeader:
|
||||
return "latestExecutionPayloadHeader"
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
// ElemsInChunk returns the number of elements in the chunk (number of
|
||||
// elements that are able to be packed).
|
||||
func (f FieldIndex) ElemsInChunk() (uint64, error) {
|
||||
switch f {
|
||||
case Balances:
|
||||
return 4, nil
|
||||
default:
|
||||
return 0, errors.Errorf("field %d doesn't support element compression", f)
|
||||
}
|
||||
}
|
||||
|
||||
// Below we define a set of useful enum values for the field
|
||||
// indices of the beacon state. For example, genesisTime is the
|
||||
// 0th field of the beacon state. This is helpful when we are
|
||||
@@ -114,12 +132,13 @@ const (
|
||||
InactivityScores
|
||||
CurrentSyncCommittee
|
||||
NextSyncCommittee
|
||||
// State fields added in Merge.
|
||||
LatestExecutionPayloadHeader
|
||||
)
|
||||
|
||||
// Altair fields which replaced previous phase 0 fields.
|
||||
const (
|
||||
// Epoch Attestations is switched with participation bits in
|
||||
// Altair.
|
||||
// Epoch Attestations is switched with participation bits in Altair.
|
||||
PreviousEpochParticipationBits = PreviousEpochAttestations
|
||||
CurrentEpochParticipationBits = CurrentEpochAttestations
|
||||
)
|
||||
|
||||
@@ -87,6 +87,7 @@ go_test(
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stateutil:go_default_library",
|
||||
"//beacon-chain/state/types:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
|
||||
stateTypes "github.com/prysmaticlabs/prysm/beacon-chain/state/types"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/crypto/hash"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"google.golang.org/protobuf/proto"
|
||||
@@ -172,6 +173,10 @@ func (b *BeaconState) addDirtyIndices(index stateTypes.FieldIndex, indices []uin
|
||||
if b.rebuildTrie[index] {
|
||||
return
|
||||
}
|
||||
// Exit early if balance trie computation isn't enabled.
|
||||
if !features.Get().EnableBalanceTrieComputation && index == balances {
|
||||
return
|
||||
}
|
||||
totalIndicesLen := len(b.dirtyIndices[index]) + len(indices)
|
||||
if totalIndicesLen > indicesLimit {
|
||||
b.rebuildTrie[index] = true
|
||||
|
||||
@@ -5,24 +5,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
|
||||
)
|
||||
|
||||
// SetStateRoots for the beacon state. Updates the state roots
|
||||
// to a new value by overwriting the previous value.
|
||||
func (b *BeaconState) SetStateRoots(val [][]byte) error {
|
||||
if !b.hasInnerState() {
|
||||
return ErrNilInnerState
|
||||
}
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
b.sharedFieldReferences[stateRoots].MinusRef()
|
||||
b.sharedFieldReferences[stateRoots] = stateutil.NewRef(1)
|
||||
|
||||
b.state.StateRoots = val
|
||||
b.markFieldAsDirty(stateRoots)
|
||||
b.rebuildTrie[stateRoots] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateStateRootAtIndex for the beacon state. Updates the state root
|
||||
// at a specific index to a new value.
|
||||
func (b *BeaconState) UpdateStateRootAtIndex(idx uint64, stateRoot [32]byte) error {
|
||||
|
||||
@@ -103,6 +103,7 @@ func (b *BeaconState) SetBalances(val []uint64) error {
|
||||
|
||||
b.state.Balances = val
|
||||
b.markFieldAsDirty(balances)
|
||||
b.rebuildTrie[balances] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -128,6 +129,7 @@ func (b *BeaconState) UpdateBalancesAtIndex(idx types.ValidatorIndex, val uint64
|
||||
bals[idx] = val
|
||||
b.state.Balances = bals
|
||||
b.markFieldAsDirty(balances)
|
||||
b.addDirtyIndices(balances, []uint64{uint64(idx)})
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -219,6 +221,8 @@ func (b *BeaconState) AppendBalance(bal uint64) error {
|
||||
}
|
||||
|
||||
b.state.Balances = append(bals, bal)
|
||||
balIdx := len(b.state.Balances) - 1
|
||||
b.markFieldAsDirty(balances)
|
||||
b.addDirtyIndices(balances, []uint64{uint64(balIdx)})
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,10 +1,13 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
@@ -104,3 +107,90 @@ func TestStateTrie_IsNil(t *testing.T) {
|
||||
nonNilState := &BeaconState{state: ðpb.BeaconState{}}
|
||||
assert.Equal(t, false, nonNilState.IsNil())
|
||||
}
|
||||
|
||||
func TestBeaconState_AppendBalanceWithTrie(t *testing.T) {
|
||||
count := uint64(100)
|
||||
vals := make([]*ethpb.Validator, 0, count)
|
||||
bals := make([]uint64, 0, count)
|
||||
for i := uint64(1); i < count; i++ {
|
||||
someRoot := [32]byte{}
|
||||
someKey := [48]byte{}
|
||||
copy(someRoot[:], strconv.Itoa(int(i)))
|
||||
copy(someKey[:], strconv.Itoa(int(i)))
|
||||
vals = append(vals, ðpb.Validator{
|
||||
PublicKey: someKey[:],
|
||||
WithdrawalCredentials: someRoot[:],
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
Slashed: false,
|
||||
ActivationEligibilityEpoch: 1,
|
||||
ActivationEpoch: 1,
|
||||
ExitEpoch: 1,
|
||||
WithdrawableEpoch: 1,
|
||||
})
|
||||
bals = append(bals, params.BeaconConfig().MaxEffectiveBalance)
|
||||
}
|
||||
zeroHash := params.BeaconConfig().ZeroHash
|
||||
mockblockRoots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot)
|
||||
for i := 0; i < len(mockblockRoots); i++ {
|
||||
mockblockRoots[i] = zeroHash[:]
|
||||
}
|
||||
|
||||
mockstateRoots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot)
|
||||
for i := 0; i < len(mockstateRoots); i++ {
|
||||
mockstateRoots[i] = zeroHash[:]
|
||||
}
|
||||
mockrandaoMixes := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector)
|
||||
for i := 0; i < len(mockrandaoMixes); i++ {
|
||||
mockrandaoMixes[i] = zeroHash[:]
|
||||
}
|
||||
var pubKeys [][]byte
|
||||
for i := uint64(0); i < params.BeaconConfig().SyncCommitteeSize; i++ {
|
||||
pubKeys = append(pubKeys, bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength))
|
||||
}
|
||||
st, err := InitializeFromProto(ðpb.BeaconState{
|
||||
Slot: 1,
|
||||
GenesisValidatorsRoot: make([]byte, 32),
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: make([]byte, 4),
|
||||
CurrentVersion: make([]byte, 4),
|
||||
Epoch: 0,
|
||||
},
|
||||
LatestBlockHeader: ðpb.BeaconBlockHeader{
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
},
|
||||
Validators: vals,
|
||||
Balances: bals,
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
DepositRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
},
|
||||
BlockRoots: mockblockRoots,
|
||||
StateRoots: mockstateRoots,
|
||||
RandaoMixes: mockrandaoMixes,
|
||||
JustificationBits: bitfield.NewBitvector4(),
|
||||
PreviousJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
CurrentJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
Slashings: make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector),
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
_, err = st.HashTreeRoot(context.Background())
|
||||
assert.NoError(t, err)
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
if i%2 == 0 {
|
||||
assert.NoError(t, st.UpdateBalancesAtIndex(types.ValidatorIndex(i), 1000))
|
||||
}
|
||||
if i%3 == 0 {
|
||||
assert.NoError(t, st.AppendBalance(1000))
|
||||
}
|
||||
}
|
||||
_, err = st.HashTreeRoot(context.Background())
|
||||
assert.NoError(t, err)
|
||||
newRt := bytesutil.ToBytes32(st.merkleLayers[0][balances])
|
||||
wantedRt, err := stateutil.Uint64ListRootWithRegistryLimit(st.state.Balances)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, wantedRt, newRt, "state roots are unequal")
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/fieldtrie"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/types"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/container/slice"
|
||||
"github.com/prysmaticlabs/prysm/crypto/hash"
|
||||
@@ -319,6 +320,20 @@ func (b *BeaconState) rootSelector(ctx context.Context, field types.FieldIndex)
|
||||
}
|
||||
return b.recomputeFieldTrie(validators, b.state.Validators)
|
||||
case balances:
|
||||
if features.Get().EnableBalanceTrieComputation {
|
||||
if b.rebuildTrie[field] {
|
||||
maxBalCap := params.BeaconConfig().ValidatorRegistryLimit
|
||||
elemSize := uint64(8)
|
||||
balLimit := (maxBalCap*elemSize + 31) / 32
|
||||
err := b.resetFieldTrie(field, b.state.Balances, balLimit)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
delete(b.rebuildTrie, field)
|
||||
return b.stateFieldLeaves[field].TrieRoot()
|
||||
}
|
||||
return b.recomputeFieldTrie(balances, b.state.Balances)
|
||||
}
|
||||
return stateutil.Uint64ListRootWithRegistryLimit(b.state.Balances)
|
||||
case randaoMixes:
|
||||
if b.rebuildTrie[field] {
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
@@ -16,6 +17,12 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{EnableBalanceTrieComputation: true})
|
||||
defer resetCfg()
|
||||
m.Run()
|
||||
}
|
||||
|
||||
func TestInitializeFromProto(t *testing.T) {
|
||||
testState, _ := util.DeterministicGenesisState(t, 64)
|
||||
pbState, err := v1.ProtobufBeaconState(testState.InnerStateUnsafe())
|
||||
|
||||
@@ -17,7 +17,6 @@ var _ state.BeaconState = (*BeaconState)(nil)
|
||||
|
||||
func init() {
|
||||
fieldMap = make(map[types.FieldIndex]types.DataType, params.BeaconConfig().BeaconStateFieldCount)
|
||||
|
||||
// Initialize the fixed sized arrays.
|
||||
fieldMap[types.BlockRoots] = types.BasicArray
|
||||
fieldMap[types.StateRoots] = types.BasicArray
|
||||
@@ -28,6 +27,7 @@ func init() {
|
||||
fieldMap[types.Validators] = types.CompositeArray
|
||||
fieldMap[types.PreviousEpochAttestations] = types.CompositeArray
|
||||
fieldMap[types.CurrentEpochAttestations] = types.CompositeArray
|
||||
fieldMap[types.Balances] = types.CompressedArray
|
||||
}
|
||||
|
||||
// fieldMap keeps track of each field
|
||||
|
||||
@@ -29,3 +29,8 @@ func (b *BeaconState) CurrentSyncCommittee() (*ethpb.SyncCommittee, error) {
|
||||
func (b *BeaconState) NextSyncCommittee() (*ethpb.SyncCommittee, error) {
|
||||
return nil, errors.New("NextSyncCommittee is not supported for phase 0 beacon state")
|
||||
}
|
||||
|
||||
// LatestExecutionPayloadHeader is not supported for phase 0 beacon state.
|
||||
func (b *BeaconState) LatestExecutionPayloadHeader() (*ethpb.ExecutionPayloadHeader, error) {
|
||||
return nil, errors.New("LatestExecutionPayloadHeader is not supported for phase 0 beacon state")
|
||||
}
|
||||
|
||||
@@ -44,3 +44,8 @@ func (b *BeaconState) SetCurrentParticipationBits(val []byte) error {
|
||||
func (b *BeaconState) SetInactivityScores(val []uint64) error {
|
||||
return errors.New("SetInactivityScores is not supported for phase 0 beacon state")
|
||||
}
|
||||
|
||||
// SetLatestExecutionPayloadHeader is not supported for phase 0 beacon state.
|
||||
func (b *BeaconState) SetLatestExecutionPayloadHeader(val *ethpb.ExecutionPayloadHeader) error {
|
||||
return errors.New("SetLatestExecutionPayloadHeader is not supported for phase 0 beacon state")
|
||||
}
|
||||
|
||||
@@ -79,11 +79,13 @@ go_test(
|
||||
"//beacon-chain/state/stateutil:go_default_library",
|
||||
"//beacon-chain/state/types:go_default_library",
|
||||
"//beacon-chain/state/v1:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -14,3 +14,8 @@ func (b *BeaconState) PreviousEpochAttestations() ([]*ethpb.PendingAttestation,
|
||||
func (b *BeaconState) CurrentEpochAttestations() ([]*ethpb.PendingAttestation, error) {
|
||||
return nil, errors.New("CurrentEpochAttestations is not supported for hard fork 1 beacon state")
|
||||
}
|
||||
|
||||
// LatestExecutionPayloadHeader is not supported for hard fork 1 beacon state.
|
||||
func (b *BeaconState) LatestExecutionPayloadHeader() (*ethpb.ExecutionPayloadHeader, error) {
|
||||
return nil, errors.New("LatestExecutionPayloadHeader is not supported for hard fork 1 beacon state")
|
||||
}
|
||||
|
||||
@@ -29,3 +29,8 @@ func (b *BeaconState) AppendPreviousEpochAttestations(val *ethpb.PendingAttestat
|
||||
func (b *BeaconState) RotateAttestations() error {
|
||||
return errors.New("RotateAttestations is not supported for hard fork 1 beacon state")
|
||||
}
|
||||
|
||||
// SetLatestExecutionPayloadHeader is not supported for hard fork 1 beacon state.
|
||||
func (b *BeaconState) SetLatestExecutionPayloadHeader(val *ethpb.ExecutionPayloadHeader) error {
|
||||
return errors.New("SetLatestExecutionPayloadHeader is not supported for hard fork 1 beacon state")
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
|
||||
stateTypes "github.com/prysmaticlabs/prysm/beacon-chain/state/types"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/crypto/hash"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"google.golang.org/protobuf/proto"
|
||||
@@ -171,6 +172,10 @@ func (b *BeaconState) addDirtyIndices(index stateTypes.FieldIndex, indices []uin
|
||||
if b.rebuildTrie[index] {
|
||||
return
|
||||
}
|
||||
// Exit early if balance trie computation isn't enabled.
|
||||
if !features.Get().EnableBalanceTrieComputation && index == balances {
|
||||
return
|
||||
}
|
||||
totalIndicesLen := len(b.dirtyIndices[index]) + len(indices)
|
||||
if totalIndicesLen > indicesLimit {
|
||||
b.rebuildTrie[index] = true
|
||||
|
||||
@@ -5,24 +5,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
|
||||
)
|
||||
|
||||
// SetStateRoots for the beacon state. Updates the state roots
|
||||
// to a new value by overwriting the previous value.
|
||||
func (b *BeaconState) SetStateRoots(val [][]byte) error {
|
||||
if !b.hasInnerState() {
|
||||
return ErrNilInnerState
|
||||
}
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
b.sharedFieldReferences[stateRoots].MinusRef()
|
||||
b.sharedFieldReferences[stateRoots] = stateutil.NewRef(1)
|
||||
|
||||
b.state.StateRoots = val
|
||||
b.markFieldAsDirty(stateRoots)
|
||||
b.rebuildTrie[stateRoots] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateStateRootAtIndex for the beacon state. Updates the state root
|
||||
// at a specific index to a new value.
|
||||
func (b *BeaconState) UpdateStateRootAtIndex(idx uint64, stateRoot [32]byte) error {
|
||||
|
||||
@@ -2,10 +2,15 @@ package v2
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
|
||||
stateTypes "github.com/prysmaticlabs/prysm/beacon-chain/state/types"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
@@ -57,3 +62,100 @@ func TestAppendBeyondIndicesLimit(t *testing.T) {
|
||||
assert.Equal(t, true, st.rebuildTrie[validators])
|
||||
assert.Equal(t, len(st.dirtyIndices[validators]), 0)
|
||||
}
|
||||
|
||||
func TestBeaconState_AppendBalanceWithTrie(t *testing.T) {
|
||||
count := uint64(100)
|
||||
vals := make([]*ethpb.Validator, 0, count)
|
||||
bals := make([]uint64, 0, count)
|
||||
for i := uint64(1); i < count; i++ {
|
||||
someRoot := [32]byte{}
|
||||
someKey := [48]byte{}
|
||||
copy(someRoot[:], strconv.Itoa(int(i)))
|
||||
copy(someKey[:], strconv.Itoa(int(i)))
|
||||
vals = append(vals, ðpb.Validator{
|
||||
PublicKey: someKey[:],
|
||||
WithdrawalCredentials: someRoot[:],
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
Slashed: false,
|
||||
ActivationEligibilityEpoch: 1,
|
||||
ActivationEpoch: 1,
|
||||
ExitEpoch: 1,
|
||||
WithdrawableEpoch: 1,
|
||||
})
|
||||
bals = append(bals, params.BeaconConfig().MaxEffectiveBalance)
|
||||
}
|
||||
zeroHash := params.BeaconConfig().ZeroHash
|
||||
mockblockRoots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot)
|
||||
for i := 0; i < len(mockblockRoots); i++ {
|
||||
mockblockRoots[i] = zeroHash[:]
|
||||
}
|
||||
|
||||
mockstateRoots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot)
|
||||
for i := 0; i < len(mockstateRoots); i++ {
|
||||
mockstateRoots[i] = zeroHash[:]
|
||||
}
|
||||
mockrandaoMixes := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector)
|
||||
for i := 0; i < len(mockrandaoMixes); i++ {
|
||||
mockrandaoMixes[i] = zeroHash[:]
|
||||
}
|
||||
var pubKeys [][]byte
|
||||
for i := uint64(0); i < params.BeaconConfig().SyncCommitteeSize; i++ {
|
||||
pubKeys = append(pubKeys, bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength))
|
||||
}
|
||||
st, err := InitializeFromProto(ðpb.BeaconStateAltair{
|
||||
Slot: 1,
|
||||
GenesisValidatorsRoot: make([]byte, 32),
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: make([]byte, 4),
|
||||
CurrentVersion: make([]byte, 4),
|
||||
Epoch: 0,
|
||||
},
|
||||
LatestBlockHeader: ðpb.BeaconBlockHeader{
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
},
|
||||
CurrentEpochParticipation: []byte{},
|
||||
PreviousEpochParticipation: []byte{},
|
||||
Validators: vals,
|
||||
Balances: bals,
|
||||
Eth1Data: ð.Eth1Data{
|
||||
DepositRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
},
|
||||
BlockRoots: mockblockRoots,
|
||||
StateRoots: mockstateRoots,
|
||||
RandaoMixes: mockrandaoMixes,
|
||||
JustificationBits: bitfield.NewBitvector4(),
|
||||
PreviousJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
CurrentJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
Slashings: make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector),
|
||||
CurrentSyncCommittee: ðpb.SyncCommittee{
|
||||
Pubkeys: pubKeys,
|
||||
AggregatePubkey: make([]byte, 48),
|
||||
},
|
||||
NextSyncCommittee: ðpb.SyncCommittee{
|
||||
Pubkeys: pubKeys,
|
||||
AggregatePubkey: make([]byte, 48),
|
||||
},
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
_, err = st.HashTreeRoot(context.Background())
|
||||
assert.NoError(t, err)
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
if i%2 == 0 {
|
||||
assert.NoError(t, st.UpdateBalancesAtIndex(types.ValidatorIndex(i), 1000))
|
||||
}
|
||||
if i%3 == 0 {
|
||||
assert.NoError(t, st.AppendBalance(1000))
|
||||
}
|
||||
}
|
||||
_, err = st.HashTreeRoot(context.Background())
|
||||
assert.NoError(t, err)
|
||||
newRt := bytesutil.ToBytes32(st.merkleLayers[0][balances])
|
||||
wantedRt, err := stateutil.Uint64ListRootWithRegistryLimit(st.state.Balances)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, wantedRt, newRt, "state roots are unequal")
|
||||
}
|
||||
|
||||
@@ -102,6 +102,7 @@ func (b *BeaconState) SetBalances(val []uint64) error {
|
||||
b.sharedFieldReferences[balances] = stateutil.NewRef(1)
|
||||
|
||||
b.state.Balances = val
|
||||
b.rebuildTrie[balances] = true
|
||||
b.markFieldAsDirty(balances)
|
||||
return nil
|
||||
}
|
||||
@@ -128,6 +129,7 @@ func (b *BeaconState) UpdateBalancesAtIndex(idx types.ValidatorIndex, val uint64
|
||||
bals[idx] = val
|
||||
b.state.Balances = bals
|
||||
b.markFieldAsDirty(balances)
|
||||
b.addDirtyIndices(balances, []uint64{uint64(idx)})
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -219,7 +221,9 @@ func (b *BeaconState) AppendBalance(bal uint64) error {
|
||||
}
|
||||
|
||||
b.state.Balances = append(bals, bal)
|
||||
balIdx := len(b.state.Balances) - 1
|
||||
b.markFieldAsDirty(balances)
|
||||
b.addDirtyIndices(balances, []uint64{uint64(balIdx)})
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/fieldtrie"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/types"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/container/slice"
|
||||
"github.com/prysmaticlabs/prysm/crypto/hash"
|
||||
@@ -324,6 +325,20 @@ func (b *BeaconState) rootSelector(ctx context.Context, field types.FieldIndex)
|
||||
}
|
||||
return b.recomputeFieldTrie(validators, b.state.Validators)
|
||||
case balances:
|
||||
if features.Get().EnableBalanceTrieComputation {
|
||||
if b.rebuildTrie[field] {
|
||||
maxBalCap := params.BeaconConfig().ValidatorRegistryLimit
|
||||
elemSize := uint64(8)
|
||||
balLimit := (maxBalCap*elemSize + 31) / 32
|
||||
err := b.resetFieldTrie(field, b.state.Balances, balLimit)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
delete(b.rebuildTrie, field)
|
||||
return b.stateFieldLeaves[field].TrieRoot()
|
||||
}
|
||||
return b.recomputeFieldTrie(balances, b.state.Balances)
|
||||
}
|
||||
return stateutil.Uint64ListRootWithRegistryLimit(b.state.Balances)
|
||||
case randaoMixes:
|
||||
if b.rebuildTrie[field] {
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
@@ -13,6 +14,12 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{EnableBalanceTrieComputation: true})
|
||||
defer resetCfg()
|
||||
m.Run()
|
||||
}
|
||||
|
||||
func TestValidatorMap_DistinctCopy(t *testing.T) {
|
||||
count := uint64(100)
|
||||
vals := make([]*ethpb.Validator, 0, count)
|
||||
|
||||
@@ -22,6 +22,9 @@ func init() {
|
||||
// Initialize the composite arrays.
|
||||
fieldMap[types.Eth1DataVotes] = types.CompositeArray
|
||||
fieldMap[types.Validators] = types.CompositeArray
|
||||
|
||||
// Initialize Compressed Arrays
|
||||
fieldMap[types.Balances] = types.CompressedArray
|
||||
}
|
||||
|
||||
// fieldMap keeps track of each field
|
||||
|
||||
78
beacon-chain/state/v3/BUILD.bazel
Normal file
78
beacon-chain/state/v3/BUILD.bazel
Normal file
@@ -0,0 +1,78 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"deprecated_getters.go",
|
||||
"deprecated_setters.go",
|
||||
"field_root_eth1.go",
|
||||
"field_root_validator.go",
|
||||
"field_root_vector.go",
|
||||
"field_roots.go",
|
||||
"getters_block.go",
|
||||
"getters_checkpoint.go",
|
||||
"getters_eth1.go",
|
||||
"getters_misc.go",
|
||||
"getters_participation.go",
|
||||
"getters_payload_header.go",
|
||||
"getters_randao.go",
|
||||
"getters_state.go",
|
||||
"getters_sync_committee.go",
|
||||
"getters_validator.go",
|
||||
"setters_block.go",
|
||||
"setters_checkpoint.go",
|
||||
"setters_eth1.go",
|
||||
"setters_misc.go",
|
||||
"setters_participation.go",
|
||||
"setters_payload_header.go",
|
||||
"setters_randao.go",
|
||||
"setters_state.go",
|
||||
"setters_sync_committee.go",
|
||||
"setters_validator.go",
|
||||
"state_trie.go",
|
||||
"types.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/state/v3",
|
||||
visibility = ["//beacon-chain:__pkg__"],
|
||||
deps = [
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/fieldtrie:go_default_library",
|
||||
"//beacon-chain/state/stateutil:go_default_library",
|
||||
"//beacon-chain/state/types:go_default_library",
|
||||
"//beacon-chain/state/v1:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"@com_github_dgraph_io_ristretto//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"deprecated_getters_test.go",
|
||||
"deprecated_setters_test.go",
|
||||
"field_root_test.go",
|
||||
"getters_block_test.go",
|
||||
"getters_test.go",
|
||||
"getters_validator_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/state/v1:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
],
|
||||
)
|
||||
16
beacon-chain/state/v3/deprecated_getters.go
Normal file
16
beacon-chain/state/v3/deprecated_getters.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package v3
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// PreviousEpochAttestations is not supported for HF1 beacon state.
|
||||
func (b *BeaconState) PreviousEpochAttestations() ([]*ethpb.PendingAttestation, error) {
|
||||
return nil, errors.New("PreviousEpochAttestations is not supported for version Merge beacon state")
|
||||
}
|
||||
|
||||
// CurrentEpochAttestations is not supported for HF1 beacon state.
|
||||
func (b *BeaconState) CurrentEpochAttestations() ([]*ethpb.PendingAttestation, error) {
|
||||
return nil, errors.New("CurrentEpochAttestations is not supported for version Merge beacon state")
|
||||
}
|
||||
19
beacon-chain/state/v3/deprecated_getters_test.go
Normal file
19
beacon-chain/state/v3/deprecated_getters_test.go
Normal file
@@ -0,0 +1,19 @@
|
||||
package v3
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
)
|
||||
|
||||
func TestBeaconState_CurrentEpochAttestations(t *testing.T) {
|
||||
s := &BeaconState{}
|
||||
_, err := s.CurrentEpochAttestations()
|
||||
require.ErrorContains(t, "CurrentEpochAttestations is not supported for version Merge beacon state", err)
|
||||
}
|
||||
|
||||
func TestBeaconState_PreviousEpochAttestations(t *testing.T) {
|
||||
s := &BeaconState{}
|
||||
_, err := s.PreviousEpochAttestations()
|
||||
require.ErrorContains(t, "PreviousEpochAttestations is not supported for version Merge beacon state", err)
|
||||
}
|
||||
31
beacon-chain/state/v3/deprecated_setters.go
Normal file
31
beacon-chain/state/v3/deprecated_setters.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package v3
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// SetPreviousEpochAttestations is not supported for HF1 beacon state.
|
||||
func (b *BeaconState) SetPreviousEpochAttestations(val []*ethpb.PendingAttestation) error {
|
||||
return errors.New("SetPreviousEpochAttestations is not supported for version Merge beacon state")
|
||||
}
|
||||
|
||||
// SetCurrentEpochAttestations is not supported for HF1 beacon state.
|
||||
func (b *BeaconState) SetCurrentEpochAttestations(val []*ethpb.PendingAttestation) error {
|
||||
return errors.New("SetCurrentEpochAttestations is not supported for version Merge beacon state")
|
||||
}
|
||||
|
||||
// AppendCurrentEpochAttestations is not supported for HF1 beacon state.
|
||||
func (b *BeaconState) AppendCurrentEpochAttestations(val *ethpb.PendingAttestation) error {
|
||||
return errors.New("AppendCurrentEpochAttestations is not supported for version Merge beacon state")
|
||||
}
|
||||
|
||||
// AppendPreviousEpochAttestations is not supported for HF1 beacon state.
|
||||
func (b *BeaconState) AppendPreviousEpochAttestations(val *ethpb.PendingAttestation) error {
|
||||
return errors.New("AppendPreviousEpochAttestations is not supported for version Merge beacon state")
|
||||
}
|
||||
|
||||
// RotateAttestations is not supported for HF1 beacon state.
|
||||
func (b *BeaconState) RotateAttestations() error {
|
||||
return errors.New("RotateAttestations is not supported for version Merge beacon state")
|
||||
}
|
||||
27
beacon-chain/state/v3/deprecated_setters_test.go
Normal file
27
beacon-chain/state/v3/deprecated_setters_test.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package v3
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
)
|
||||
|
||||
func TestBeaconState_AppendCurrentEpochAttestations(t *testing.T) {
|
||||
s := &BeaconState{}
|
||||
require.ErrorContains(t, "AppendCurrentEpochAttestations is not supported for version Merge beacon state", s.AppendCurrentEpochAttestations(nil))
|
||||
}
|
||||
|
||||
func TestBeaconState_AppendPreviousEpochAttestations(t *testing.T) {
|
||||
s := &BeaconState{}
|
||||
require.ErrorContains(t, "AppendPreviousEpochAttestations is not supported for version Merge beacon state", s.AppendPreviousEpochAttestations(nil))
|
||||
}
|
||||
|
||||
func TestBeaconState_SetCurrentEpochAttestations(t *testing.T) {
|
||||
s := &BeaconState{}
|
||||
require.ErrorContains(t, "SetCurrentEpochAttestations is not supported for version Merge beacon state", s.SetCurrentEpochAttestations(nil))
|
||||
}
|
||||
|
||||
func TestBeaconState_SetPreviousEpochAttestations(t *testing.T) {
|
||||
s := &BeaconState{}
|
||||
require.ErrorContains(t, "SetPreviousEpochAttestations is not supported for version Merge beacon state", s.SetPreviousEpochAttestations(nil))
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user