mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 22:07:59 -05:00
Compare commits
77 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4aa372a65f | ||
|
|
f89b7624cf | ||
|
|
b854b51e21 | ||
|
|
6832f79cf2 | ||
|
|
f3e6c83d66 | ||
|
|
82453961ee | ||
|
|
b1d3ea83bd | ||
|
|
98ab265b6b | ||
|
|
8e115672c5 | ||
|
|
4778ec7434 | ||
|
|
fac8526eb6 | ||
|
|
93fc666f83 | ||
|
|
b5bd461627 | ||
|
|
788338a004 | ||
|
|
8fecc5af7f | ||
|
|
03e40edf2c | ||
|
|
39c33b82ad | ||
|
|
905e0f4c1c | ||
|
|
0ade1f121d | ||
|
|
ee52f8dff3 | ||
|
|
50159c2e48 | ||
|
|
cae58bbbd8 | ||
|
|
9b37418761 | ||
|
|
a78cdf86cc | ||
|
|
1c4ea75a18 | ||
|
|
6f4c80531c | ||
|
|
7a2b8e4e6a | ||
|
|
e13cdf493e | ||
|
|
fdd9c535b4 | ||
|
|
ba728d4929 | ||
|
|
cd5eb0a2ef | ||
|
|
6d44428e9c | ||
|
|
cefb5cec55 | ||
|
|
815debee38 | ||
|
|
fc6c17cc75 | ||
|
|
df0e9fa3d7 | ||
|
|
0287bc65c7 | ||
|
|
35c3225579 | ||
|
|
f238f872a1 | ||
|
|
24f105b804 | ||
|
|
f98354f59f | ||
|
|
2aea4e49f4 | ||
|
|
a8e8338973 | ||
|
|
ed78d15ed6 | ||
|
|
d5cf0a2e54 | ||
|
|
b89bb3fa30 | ||
|
|
be722f2c5c | ||
|
|
09c99b25bc | ||
|
|
24ff40fbf5 | ||
|
|
d96491ffa9 | ||
|
|
5121a50bb4 | ||
|
|
1c6d914ea1 | ||
|
|
71adada879 | ||
|
|
5790aa66e0 | ||
|
|
8e6bb39d2f | ||
|
|
ad06914f45 | ||
|
|
259e07d5c9 | ||
|
|
f6cf77acd8 | ||
|
|
40a36fb02d | ||
|
|
2e65be12b8 | ||
|
|
4a237e11bc | ||
|
|
4d9eafe110 | ||
|
|
7b1b9a564b | ||
|
|
341cced53f | ||
|
|
a3ad254b78 | ||
|
|
a7fc25f2e0 | ||
|
|
9551a6c4b8 | ||
|
|
d39113af60 | ||
|
|
d27334746b | ||
|
|
d00c7a0ce8 | ||
|
|
c7826856a5 | ||
|
|
1bec9ae9e6 | ||
|
|
17ed9356ff | ||
|
|
c493290027 | ||
|
|
30c07a8a1a | ||
|
|
b7fb8a8dcd | ||
|
|
bd108c3244 |
@@ -18,6 +18,7 @@ go_library(
|
||||
"receive_attestation.go",
|
||||
"receive_block.go",
|
||||
"service.go",
|
||||
"state_balance_cache.go",
|
||||
"weak_subjectivity_checks.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/blockchain",
|
||||
@@ -95,6 +96,7 @@ go_test(
|
||||
"init_test.go",
|
||||
"log_test.go",
|
||||
"metrics_test.go",
|
||||
"mock_test.go",
|
||||
"process_attestation_test.go",
|
||||
"process_block_test.go",
|
||||
"receive_attestation_test.go",
|
||||
@@ -141,6 +143,7 @@ go_test(
|
||||
"chain_info_norace_test.go",
|
||||
"checktags_test.go",
|
||||
"init_test.go",
|
||||
"mock_test.go",
|
||||
"receive_block_test.go",
|
||||
"service_norace_test.go",
|
||||
],
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
@@ -42,9 +41,6 @@ func (s *Service) updateHead(ctx context.Context, balances []uint64) error {
|
||||
// ensure head gets its best justified info.
|
||||
if s.bestJustifiedCheckpt.Epoch > s.justifiedCheckpt.Epoch {
|
||||
s.justifiedCheckpt = s.bestJustifiedCheckpt
|
||||
if err := s.cacheJustifiedStateBalances(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Get head from the fork choice service.
|
||||
@@ -273,57 +269,6 @@ func (s *Service) hasHeadState() bool {
|
||||
return s.head != nil && s.head.state != nil
|
||||
}
|
||||
|
||||
// This caches justified state balances to be used for fork choice.
|
||||
func (s *Service) cacheJustifiedStateBalances(ctx context.Context, justifiedRoot [32]byte) error {
|
||||
if err := s.cfg.BeaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.clearInitSyncBlocks()
|
||||
|
||||
var justifiedState state.BeaconState
|
||||
var err error
|
||||
if justifiedRoot == s.genesisRoot {
|
||||
justifiedState, err = s.cfg.BeaconDB.GenesisState(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
justifiedState, err = s.cfg.StateGen.StateByRoot(ctx, justifiedRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if justifiedState == nil || justifiedState.IsNil() {
|
||||
return errors.New("justified state can't be nil")
|
||||
}
|
||||
|
||||
epoch := time.CurrentEpoch(justifiedState)
|
||||
|
||||
justifiedBalances := make([]uint64, justifiedState.NumValidators())
|
||||
if err := justifiedState.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
|
||||
if helpers.IsActiveValidatorUsingTrie(val, epoch) {
|
||||
justifiedBalances[idx] = val.EffectiveBalance()
|
||||
} else {
|
||||
justifiedBalances[idx] = 0
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.justifiedBalancesLock.Lock()
|
||||
defer s.justifiedBalancesLock.Unlock()
|
||||
s.justifiedBalances = justifiedBalances
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) getJustifiedBalances() []uint64 {
|
||||
s.justifiedBalancesLock.RLock()
|
||||
defer s.justifiedBalancesLock.RUnlock()
|
||||
return s.justifiedBalances
|
||||
}
|
||||
|
||||
// Notifies a common event feed of a new chain head event. Called right after a new
|
||||
// chain head is determined, set, and saved to disk.
|
||||
func (s *Service) notifyNewHeadEvent(
|
||||
|
||||
@@ -123,13 +123,15 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
|
||||
func TestCacheJustifiedStateBalances_CanCache(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
ctx := context.Background()
|
||||
|
||||
state, _ := util.DeterministicGenesisState(t, 100)
|
||||
r := [32]byte{'a'}
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: r[:]}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), state, r))
|
||||
require.NoError(t, service.cacheJustifiedStateBalances(context.Background(), r))
|
||||
require.DeepEqual(t, service.getJustifiedBalances(), state.Balances(), "Incorrect justified balances")
|
||||
balances, err := service.justifiedBalances.get(ctx, r)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, balances, state.Balances(), "Incorrect justified balances")
|
||||
}
|
||||
|
||||
func TestUpdateHead_MissingJustifiedRoot(t *testing.T) {
|
||||
|
||||
@@ -25,18 +25,18 @@ func TestService_TreeHandler(t *testing.T) {
|
||||
headState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, headState.SetBalances([]uint64{params.BeaconConfig().GweiPerEth}))
|
||||
cfg := &config{
|
||||
BeaconDB: beaconDB,
|
||||
ForkChoiceStore: protoarray.New(
|
||||
0, // justifiedEpoch
|
||||
0, // finalizedEpoch
|
||||
[32]byte{'a'},
|
||||
),
|
||||
StateGen: stategen.New(beaconDB),
|
||||
fcs := protoarray.New(
|
||||
0, // justifiedEpoch
|
||||
0, // finalizedEpoch
|
||||
[32]byte{'a'},
|
||||
)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
s, err := NewService(ctx)
|
||||
s, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
s.cfg = cfg
|
||||
require.NoError(t, s.cfg.ForkChoiceStore.ProcessBlock(ctx, 0, [32]byte{'a'}, [32]byte{'g'}, [32]byte{'c'}, 0, 0))
|
||||
require.NoError(t, s.cfg.ForkChoiceStore.ProcessBlock(ctx, 1, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'c'}, 0, 0))
|
||||
s.setHead([32]byte{'a'}, wrapper.WrappedPhase0SignedBeaconBlock(util.NewBeaconBlock()), headState)
|
||||
|
||||
@@ -130,6 +130,14 @@ var (
|
||||
Name: "sync_head_state_hit",
|
||||
Help: "The number of sync head state requests that are present in the cache.",
|
||||
})
|
||||
stateBalanceCacheHit = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "state_balance_cache_hit",
|
||||
Help: "Count the number of state balance cache hits.",
|
||||
})
|
||||
stateBalanceCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "state_balance_cache_miss",
|
||||
Help: "Count the number of state balance cache hits.",
|
||||
})
|
||||
)
|
||||
|
||||
// reportSlotMetrics reports slot related metrics.
|
||||
|
||||
50
beacon-chain/blockchain/mock_test.go
Normal file
50
beacon-chain/blockchain/mock_test.go
Normal file
@@ -0,0 +1,50 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
)
|
||||
|
||||
func testServiceOptsWithDB(t *testing.T) []Option {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0, [32]byte{'a'})
|
||||
return []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
}
|
||||
|
||||
// warning: only use these opts when you are certain there are no db calls
|
||||
// in your code path. this is a lightweight way to satisfy the stategen/beacondb
|
||||
// initialization requirements w/o the overhead of db init.
|
||||
func testServiceOptsNoDB() []Option {
|
||||
return []Option{
|
||||
withStateBalanceCache(satisfactoryStateBalanceCache()),
|
||||
}
|
||||
}
|
||||
|
||||
type mockStateByRooter struct {
|
||||
state state.BeaconState
|
||||
err error
|
||||
}
|
||||
|
||||
var _ stateByRooter = &mockStateByRooter{}
|
||||
|
||||
func (m mockStateByRooter) StateByRoot(_ context.Context, _ [32]byte) (state.BeaconState, error) {
|
||||
return m.state, m.err
|
||||
}
|
||||
|
||||
// returns an instance of the state balance cache that can be used
|
||||
// to satisfy the requirement for one in NewService, but which will
|
||||
// always return an error if used.
|
||||
func satisfactoryStateBalanceCache() *stateBalanceCache {
|
||||
err := errors.New("satisfactoryStateBalanceCache doesn't perform real caching")
|
||||
return &stateBalanceCache{stateGen: mockStateByRooter{err: err}}
|
||||
}
|
||||
@@ -130,6 +130,13 @@ func WithSlasherAttestationsFeed(f *event.Feed) Option {
|
||||
}
|
||||
}
|
||||
|
||||
func withStateBalanceCache(c *stateBalanceCache) Option {
|
||||
return func(s *Service) error {
|
||||
s.justifiedBalances = c
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithFinalizedStateAtStartUp to store finalized state at start up.
|
||||
func WithFinalizedStateAtStartUp(st state.BeaconState) Option {
|
||||
return func(s *Service) error {
|
||||
|
||||
@@ -24,14 +24,13 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{
|
||||
BeaconDB: beaconDB,
|
||||
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
|
||||
StateGen: stategen.New(beaconDB),
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithForkChoiceStore(protoarray.New(0, 0, [32]byte{})),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
service, err := NewService(ctx)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
_, err = blockTree1(t, beaconDB, []byte{'g'})
|
||||
require.NoError(t, err)
|
||||
@@ -131,14 +130,14 @@ func TestStore_OnAttestation_Ok(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{
|
||||
BeaconDB: beaconDB,
|
||||
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
|
||||
StateGen: stategen.New(beaconDB),
|
||||
fcs := protoarray.New(0, 0, [32]byte{'a'})
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
service, err := NewService(ctx)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
genesisState, pks := util.DeterministicGenesisState(t, 64)
|
||||
require.NoError(t, genesisState.SetGenesisTime(uint64(time.Now().Unix())-params.BeaconConfig().SecondsPerSlot))
|
||||
require.NoError(t, service.saveGenesisData(ctx, genesisState))
|
||||
@@ -157,13 +156,12 @@ func TestStore_SaveCheckpointState(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{
|
||||
BeaconDB: beaconDB,
|
||||
StateGen: stategen.New(beaconDB),
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
service, err := NewService(ctx)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
@@ -230,13 +228,12 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{
|
||||
BeaconDB: beaconDB,
|
||||
StateGen: stategen.New(beaconDB),
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
service, err := NewService(ctx)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
epoch := types.Epoch(1)
|
||||
baseState, _ := util.DeterministicGenesisState(t, 1)
|
||||
@@ -268,12 +265,10 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
|
||||
|
||||
func TestAttEpoch_MatchPrevEpoch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx)
|
||||
opts := testServiceOptsNoDB()
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
nowTime := uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().SecondsPerSlot
|
||||
require.NoError(t, service.verifyAttTargetEpoch(ctx, 0, nowTime, ðpb.Checkpoint{Root: make([]byte, 32)}))
|
||||
@@ -281,12 +276,10 @@ func TestAttEpoch_MatchPrevEpoch(t *testing.T) {
|
||||
|
||||
func TestAttEpoch_MatchCurrentEpoch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx)
|
||||
opts := testServiceOptsNoDB()
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
nowTime := uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().SecondsPerSlot
|
||||
require.NoError(t, service.verifyAttTargetEpoch(ctx, 0, nowTime, ðpb.Checkpoint{Epoch: 1}))
|
||||
@@ -294,12 +287,10 @@ func TestAttEpoch_MatchCurrentEpoch(t *testing.T) {
|
||||
|
||||
func TestAttEpoch_NotMatch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx)
|
||||
opts := testServiceOptsNoDB()
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
nowTime := 2 * uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().SecondsPerSlot
|
||||
err = service.verifyAttTargetEpoch(ctx, 0, nowTime, ðpb.Checkpoint{Root: make([]byte, 32)})
|
||||
@@ -308,12 +299,9 @@ func TestAttEpoch_NotMatch(t *testing.T) {
|
||||
|
||||
func TestVerifyBeaconBlock_NoBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
d := util.HydrateAttestationData(ðpb.AttestationData{})
|
||||
assert.ErrorContains(t, "signed beacon block can't be nil", service.verifyBeaconBlock(ctx, d))
|
||||
@@ -321,12 +309,10 @@ func TestVerifyBeaconBlock_NoBlock(t *testing.T) {
|
||||
|
||||
func TestVerifyBeaconBlock_futureBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 2
|
||||
@@ -340,12 +326,10 @@ func TestVerifyBeaconBlock_futureBlock(t *testing.T) {
|
||||
|
||||
func TestVerifyBeaconBlock_OK(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 2
|
||||
@@ -361,10 +345,14 @@ func TestVerifyFinalizedConsistency_InconsistentRoot(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx)
|
||||
fcs := protoarray.New(0, 0, [32]byte{'a'})
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
b32 := util.NewBeaconBlock()
|
||||
b32.Block.Slot = 32
|
||||
@@ -387,12 +375,10 @@ func TestVerifyFinalizedConsistency_InconsistentRoot(t *testing.T) {
|
||||
|
||||
func TestVerifyFinalizedConsistency_OK(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
b32 := util.NewBeaconBlock()
|
||||
b32.Block.Slot = 32
|
||||
@@ -415,12 +401,10 @@ func TestVerifyFinalizedConsistency_OK(t *testing.T) {
|
||||
|
||||
func TestVerifyFinalizedConsistency_IsCanonical(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
b32 := util.NewBeaconBlock()
|
||||
b32.Block.Slot = 32
|
||||
|
||||
@@ -151,7 +151,12 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
|
||||
s.finalizedCheckpt = postState.FinalizedCheckpoint()
|
||||
}
|
||||
|
||||
if err := s.updateHead(ctx, s.getJustifiedBalances()); err != nil {
|
||||
balances, err := s.justifiedBalances.get(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root))
|
||||
if err != nil {
|
||||
msg := fmt.Sprintf("could not read balances for state w/ justified checkpoint %#x", s.justifiedCheckpt.Root)
|
||||
return errors.Wrap(err, msg)
|
||||
}
|
||||
if err := s.updateHead(ctx, balances); err != nil {
|
||||
log.WithError(err).Warn("Could not update head")
|
||||
}
|
||||
|
||||
|
||||
@@ -193,9 +193,6 @@ func (s *Service) updateJustified(ctx context.Context, state state.ReadOnlyBeaco
|
||||
if canUpdate {
|
||||
s.prevJustifiedCheckpt = s.justifiedCheckpt
|
||||
s.justifiedCheckpt = cpt
|
||||
if err := s.cacheJustifiedStateBalances(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -206,12 +203,13 @@ func (s *Service) updateJustified(ctx context.Context, state state.ReadOnlyBeaco
|
||||
// This method does not have defense against fork choice bouncing attack, which is why it's only recommend to be used during initial syncing.
|
||||
func (s *Service) updateJustifiedInitSync(ctx context.Context, cp *ethpb.Checkpoint) error {
|
||||
s.prevJustifiedCheckpt = s.justifiedCheckpt
|
||||
s.justifiedCheckpt = cp
|
||||
if err := s.cacheJustifiedStateBalances(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root)); err != nil {
|
||||
|
||||
if err := s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, cp); err != nil {
|
||||
return err
|
||||
}
|
||||
s.justifiedCheckpt = cp
|
||||
|
||||
return s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, cp)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) updateFinalized(ctx context.Context, cp *ethpb.Checkpoint) error {
|
||||
@@ -330,7 +328,9 @@ func (s *Service) finalizedImpliesNewJustified(ctx context.Context, state state.
|
||||
if !attestation.CheckPointIsEqual(s.justifiedCheckpt, state.CurrentJustifiedCheckpoint()) {
|
||||
if state.CurrentJustifiedCheckpoint().Epoch > s.justifiedCheckpt.Epoch {
|
||||
s.justifiedCheckpt = state.CurrentJustifiedCheckpoint()
|
||||
return s.cacheJustifiedStateBalances(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root))
|
||||
// we don't need to check if the previous justified checkpoint was an ancestor since the new
|
||||
// finalized checkpoint is overriding it.
|
||||
return nil
|
||||
}
|
||||
|
||||
// Update justified if store justified is not in chain with finalized check point.
|
||||
@@ -345,9 +345,6 @@ func (s *Service) finalizedImpliesNewJustified(ctx context.Context, state state.
|
||||
}
|
||||
if !bytes.Equal(anc, s.finalizedCheckpt.Root) {
|
||||
s.justifiedCheckpt = state.CurrentJustifiedCheckpoint()
|
||||
if err := s.cacheJustifiedStateBalances(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -35,16 +35,17 @@ import (
|
||||
|
||||
func TestStore_OnBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{
|
||||
BeaconDB: beaconDB,
|
||||
StateGen: stategen.New(beaconDB),
|
||||
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0, [32]byte{'a'})
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
service, err := NewService(ctx)
|
||||
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
assert.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
|
||||
@@ -134,13 +135,12 @@ func TestStore_OnBlockBatch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{
|
||||
BeaconDB: beaconDB,
|
||||
StateGen: stategen.New(beaconDB),
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
service, err := NewService(ctx)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
@@ -186,14 +186,12 @@ func TestStore_OnBlockBatch(t *testing.T) {
|
||||
|
||||
func TestRemoveStateSinceLastFinalized_EmptyStartSlot(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
params.UseMinimalConfig()
|
||||
defer params.UseMainnetConfig()
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
service.genesisTime = time.Now()
|
||||
|
||||
update, err := service.shouldUpdateCurrentJustified(ctx, ðpb.Checkpoint{Root: make([]byte, 32)})
|
||||
@@ -221,16 +219,13 @@ func TestRemoveStateSinceLastFinalized_EmptyStartSlot(t *testing.T) {
|
||||
|
||||
func TestShouldUpdateJustified_ReturnFalse(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
params.UseMinimalConfig()
|
||||
defer params.UseMainnetConfig()
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{})
|
||||
|
||||
lastJustifiedBlk := util.NewBeaconBlock()
|
||||
lastJustifiedBlk.Block.ParentRoot = bytesutil.PadTo([]byte{'G'}, 32)
|
||||
lastJustifiedRoot, err := lastJustifiedBlk.Block.HashTreeRoot()
|
||||
@@ -255,13 +250,12 @@ func TestCachedPreState_CanGetFromStateSummary(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{
|
||||
BeaconDB: beaconDB,
|
||||
StateGen: stategen.New(beaconDB),
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
service, err := NewService(ctx)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
s, err := v1.InitializeFromProto(ðpb.BeaconState{Slot: 1, GenesisValidatorsRoot: params.BeaconConfig().ZeroHash[:]})
|
||||
require.NoError(t, err)
|
||||
@@ -289,13 +283,12 @@ func TestCachedPreState_CanGetFromDB(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{
|
||||
BeaconDB: beaconDB,
|
||||
StateGen: stategen.New(beaconDB),
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
service, err := NewService(ctx)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
@@ -327,10 +320,13 @@ func TestUpdateJustified_CouldUpdateBest(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB), ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(protoarray.New(0, 0, [32]byte{})),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
signedBlock := util.NewBeaconBlock()
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(signedBlock)))
|
||||
@@ -361,10 +357,12 @@ func TestFillForkChoiceMissingBlocks_CanSave(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{Root: make([]byte, 32)}
|
||||
|
||||
@@ -400,10 +398,12 @@ func TestFillForkChoiceMissingBlocks_RootsMatch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{Root: make([]byte, 32)}
|
||||
|
||||
@@ -442,10 +442,12 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
|
||||
// Set finalized epoch to 1.
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{Epoch: 1}
|
||||
@@ -586,7 +588,8 @@ func TestCurrentSlot_HandlesOverflow(t *testing.T) {
|
||||
}
|
||||
func TestAncestorByDB_CtxErr(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
service, err := NewService(ctx)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
cancel()
|
||||
@@ -598,10 +601,14 @@ func TestAncestor_HandleSkipSlot(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx)
|
||||
fcs := protoarray.New(0, 0, [32]byte{'a'})
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
b1 := util.NewBeaconBlock()
|
||||
b1.Block.Slot = 1
|
||||
@@ -642,10 +649,9 @@ func TestAncestor_HandleSkipSlot(t *testing.T) {
|
||||
|
||||
func TestAncestor_CanUseForkchoice(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cfg := &config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
b1 := util.NewBeaconBlock()
|
||||
b1.Block.Slot = 1
|
||||
@@ -682,10 +688,14 @@ func TestAncestor_CanUseDB(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx)
|
||||
fcs := protoarray.New(0, 0, [32]byte{'a'})
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
b1 := util.NewBeaconBlock()
|
||||
b1.Block.Slot = 1
|
||||
@@ -720,10 +730,9 @@ func TestAncestor_CanUseDB(t *testing.T) {
|
||||
|
||||
func TestEnsureRootNotZeroHashes(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cfg := &config{}
|
||||
service, err := NewService(ctx)
|
||||
opts := testServiceOptsNoDB()
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
service.genesisRoot = [32]byte{'a'}
|
||||
|
||||
r := service.ensureRootNotZeros(params.BeaconConfig().ZeroHash)
|
||||
@@ -735,6 +744,12 @@ func TestEnsureRootNotZeroHashes(t *testing.T) {
|
||||
|
||||
func TestFinalizedImpliesNewJustified(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0, [32]byte{'a'})
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
ctx := context.Background()
|
||||
type args struct {
|
||||
cachedCheckPoint *ethpb.Checkpoint
|
||||
@@ -776,9 +791,8 @@ func TestFinalizedImpliesNewJustified(t *testing.T) {
|
||||
beaconState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(test.args.stateCheckPoint))
|
||||
service, err := NewService(ctx)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB), ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service.justifiedCheckpt = test.args.cachedCheckPoint
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: bytesutil.PadTo(test.want.Root, 32)}))
|
||||
genesisState, err := util.NewBeaconState()
|
||||
@@ -815,6 +829,12 @@ func TestVerifyBlkDescendant(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
fcs := protoarray.New(0, 0, [32]byte{'a'})
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 1
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
@@ -869,9 +889,8 @@ func TestVerifyBlkDescendant(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
service, err := NewService(ctx)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB), ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{
|
||||
Root: tt.args.finalizedRoot[:],
|
||||
}
|
||||
@@ -885,12 +904,10 @@ func TestVerifyBlkDescendant(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestUpdateJustifiedInitSync(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
cfg := &config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
gBlk := util.NewBeaconBlock()
|
||||
gRoot, err := gBlk.Block.HashTreeRoot()
|
||||
@@ -916,10 +933,9 @@ func TestUpdateJustifiedInitSync(t *testing.T) {
|
||||
|
||||
func TestHandleEpochBoundary_BadMetrics(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cfg := &config{}
|
||||
service, err := NewService(ctx)
|
||||
opts := testServiceOptsNoDB()
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
@@ -931,10 +947,9 @@ func TestHandleEpochBoundary_BadMetrics(t *testing.T) {
|
||||
|
||||
func TestHandleEpochBoundary_UpdateFirstSlot(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cfg := &config{}
|
||||
service, err := NewService(ctx)
|
||||
opts := testServiceOptsNoDB()
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
s, _ := util.DeterministicGenesisState(t, 1024)
|
||||
service.head = &head{state: s}
|
||||
@@ -946,18 +961,18 @@ func TestHandleEpochBoundary_UpdateFirstSlot(t *testing.T) {
|
||||
func TestOnBlock_CanFinalize(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0, [32]byte{'a'})
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
cfg := &config{
|
||||
BeaconDB: beaconDB,
|
||||
StateGen: stategen.New(beaconDB),
|
||||
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
|
||||
DepositCache: depositCache,
|
||||
StateNotifier: &mock.MockStateNotifier{},
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithDepositCache(depositCache),
|
||||
WithStateNotifier(&mock.MockStateNotifier{}),
|
||||
}
|
||||
service, err := NewService(ctx)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
gs, keys := util.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
@@ -991,18 +1006,12 @@ func TestOnBlock_CanFinalize(t *testing.T) {
|
||||
|
||||
func TestInsertFinalizedDeposits(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
cfg := &config{
|
||||
BeaconDB: beaconDB,
|
||||
StateGen: stategen.New(beaconDB),
|
||||
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
|
||||
DepositCache: depositCache,
|
||||
}
|
||||
service, err := NewService(ctx)
|
||||
opts = append(opts, WithDepositCache(depositCache))
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
gs, _ := util.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
|
||||
@@ -131,7 +131,13 @@ func (s *Service) processAttestationsRoutine(subscribedToStateEvents chan<- stru
|
||||
continue
|
||||
}
|
||||
s.processAttestations(s.ctx)
|
||||
if err := s.updateHead(s.ctx, s.getJustifiedBalances()); err != nil {
|
||||
|
||||
balances, err := s.justifiedBalances.get(s.ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root))
|
||||
if err != nil {
|
||||
log.Errorf("Unable to get justified balances for root %v w/ error %s", s.justifiedCheckpt.Root, err)
|
||||
continue
|
||||
}
|
||||
if err := s.updateHead(s.ctx, balances); err != nil {
|
||||
log.Warnf("Resolving fork due to new attestation: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,9 +9,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
@@ -42,12 +40,10 @@ func TestAttestationCheckPtState_FarFutureSlot(t *testing.T) {
|
||||
|
||||
func TestVerifyLMDFFGConsistent_NotOK(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
b32 := util.NewBeaconBlock()
|
||||
b32.Block.Slot = 32
|
||||
@@ -71,12 +67,10 @@ func TestVerifyLMDFFGConsistent_NotOK(t *testing.T) {
|
||||
|
||||
func TestVerifyLMDFFGConsistent_OK(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg = cfg
|
||||
|
||||
b32 := util.NewBeaconBlock()
|
||||
b32.Block.Slot = 32
|
||||
@@ -101,18 +95,12 @@ func TestVerifyLMDFFGConsistent_OK(t *testing.T) {
|
||||
func TestProcessAttestations_Ok(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
opts = append(opts, WithAttestationPool(attestations.NewPool()))
|
||||
|
||||
cfg := &config{
|
||||
BeaconDB: beaconDB,
|
||||
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
|
||||
StateGen: stategen.New(beaconDB),
|
||||
AttPool: attestations.NewPool(),
|
||||
}
|
||||
service, err := NewService(ctx)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.genesisTime = prysmTime.Now().Add(-1 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
|
||||
service.cfg = cfg
|
||||
genesisState, pks := util.DeterministicGenesisState(t, 64)
|
||||
require.NoError(t, genesisState.SetGenesisTime(uint64(prysmTime.Now().Unix())-params.BeaconConfig().SecondsPerSlot))
|
||||
require.NoError(t, service.saveGenesisData(ctx, genesisState))
|
||||
|
||||
@@ -124,21 +124,16 @@ func TestService_ReceiveBlock(t *testing.T) {
|
||||
genesisBlockRoot := bytesutil.ToBytes32(nil)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, genesis, genesisBlockRoot))
|
||||
|
||||
cfg := &config{
|
||||
BeaconDB: beaconDB,
|
||||
ForkChoiceStore: protoarray.New(
|
||||
0, // justifiedEpoch
|
||||
0, // finalizedEpoch
|
||||
genesisBlockRoot,
|
||||
),
|
||||
AttPool: attestations.NewPool(),
|
||||
ExitPool: voluntaryexits.NewPool(),
|
||||
StateNotifier: &blockchainTesting.MockStateNotifier{RecordEvents: true},
|
||||
StateGen: stategen.New(beaconDB),
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithForkChoiceStore(protoarray.New(0, 0, genesisBlockRoot)),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
WithExitPool(voluntaryexits.NewPool()),
|
||||
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
s, err := NewService(ctx)
|
||||
s, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
s.cfg = cfg
|
||||
require.NoError(t, s.saveGenesisData(ctx, genesis))
|
||||
gBlk, err := s.cfg.BeaconDB.GenesisBlock(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -166,21 +161,17 @@ func TestService_ReceiveBlockUpdateHead(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
genesisBlockRoot := bytesutil.ToBytes32(nil)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, genesis, genesisBlockRoot))
|
||||
cfg := &config{
|
||||
BeaconDB: beaconDB,
|
||||
ForkChoiceStore: protoarray.New(
|
||||
0, // justifiedEpoch
|
||||
0, // finalizedEpoch
|
||||
genesisBlockRoot,
|
||||
),
|
||||
AttPool: attestations.NewPool(),
|
||||
ExitPool: voluntaryexits.NewPool(),
|
||||
StateNotifier: &blockchainTesting.MockStateNotifier{RecordEvents: true},
|
||||
StateGen: stategen.New(beaconDB),
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithForkChoiceStore(protoarray.New(0, 0, genesisBlockRoot)),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
WithExitPool(voluntaryexits.NewPool()),
|
||||
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
s, err := NewService(ctx)
|
||||
|
||||
s, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
s.cfg = cfg
|
||||
require.NoError(t, s.saveGenesisData(ctx, genesis))
|
||||
gBlk, err := s.cfg.BeaconDB.GenesisBlock(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -250,19 +241,14 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
genesisBlockRoot, err := genesis.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
cfg := &config{
|
||||
BeaconDB: beaconDB,
|
||||
ForkChoiceStore: protoarray.New(
|
||||
0, // justifiedEpoch
|
||||
0, // finalizedEpoch
|
||||
genesisBlockRoot,
|
||||
),
|
||||
StateNotifier: &blockchainTesting.MockStateNotifier{RecordEvents: true},
|
||||
StateGen: stategen.New(beaconDB),
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithForkChoiceStore(protoarray.New(0, 0, genesisBlockRoot)),
|
||||
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
s, err := NewService(ctx)
|
||||
s, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
s.cfg = cfg
|
||||
err = s.saveGenesisData(ctx, genesis)
|
||||
require.NoError(t, err)
|
||||
gBlk, err := s.cfg.BeaconDB.GenesisBlock(ctx)
|
||||
@@ -287,9 +273,10 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_HasInitSyncBlock(t *testing.T) {
|
||||
s, err := NewService(context.Background())
|
||||
opts := testServiceOptsNoDB()
|
||||
opts = append(opts, WithStateNotifier(&blockchainTesting.MockStateNotifier{}))
|
||||
s, err := NewService(context.Background(), opts...)
|
||||
require.NoError(t, err)
|
||||
s.cfg = &config{StateNotifier: &blockchainTesting.MockStateNotifier{}}
|
||||
r := [32]byte{'a'}
|
||||
if s.HasInitSyncBlock(r) {
|
||||
t.Error("Should not have block")
|
||||
@@ -301,11 +288,10 @@ func TestService_HasInitSyncBlock(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCheckSaveHotStateDB_Enabling(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
opts := testServiceOptsWithDB(t)
|
||||
hook := logTest.NewGlobal()
|
||||
s, err := NewService(context.Background())
|
||||
s, err := NewService(context.Background(), opts...)
|
||||
require.NoError(t, err)
|
||||
s.cfg = &config{StateGen: stategen.New(beaconDB)}
|
||||
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
|
||||
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
s.finalizedCheckpt = ðpb.Checkpoint{}
|
||||
@@ -315,11 +301,10 @@ func TestCheckSaveHotStateDB_Enabling(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCheckSaveHotStateDB_Disabling(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
hook := logTest.NewGlobal()
|
||||
s, err := NewService(context.Background())
|
||||
opts := testServiceOptsWithDB(t)
|
||||
s, err := NewService(context.Background(), opts...)
|
||||
require.NoError(t, err)
|
||||
s.cfg = &config{StateGen: stategen.New(beaconDB)}
|
||||
s.finalizedCheckpt = ðpb.Checkpoint{}
|
||||
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
|
||||
s.genesisTime = time.Now()
|
||||
@@ -329,11 +314,10 @@ func TestCheckSaveHotStateDB_Disabling(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCheckSaveHotStateDB_Overflow(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
hook := logTest.NewGlobal()
|
||||
s, err := NewService(context.Background())
|
||||
opts := testServiceOptsWithDB(t)
|
||||
s, err := NewService(context.Background(), opts...)
|
||||
require.NoError(t, err)
|
||||
s.cfg = &config{StateGen: stategen.New(beaconDB)}
|
||||
s.finalizedCheckpt = ðpb.Checkpoint{Epoch: 10000000}
|
||||
s.genesisTime = time.Now()
|
||||
|
||||
|
||||
@@ -62,9 +62,9 @@ type Service struct {
|
||||
checkpointStateCache *cache.CheckpointStateCache
|
||||
initSyncBlocks map[[32]byte]block.SignedBeaconBlock
|
||||
initSyncBlocksLock sync.RWMutex
|
||||
justifiedBalances []uint64
|
||||
justifiedBalancesLock sync.RWMutex
|
||||
wsVerifier *WeakSubjectivityVerifier
|
||||
//justifiedBalances []uint64
|
||||
justifiedBalances *stateBalanceCache
|
||||
wsVerifier *WeakSubjectivityVerifier
|
||||
}
|
||||
|
||||
// config options for the service.
|
||||
@@ -97,7 +97,6 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
boundaryRoots: [][32]byte{},
|
||||
checkpointStateCache: cache.NewCheckpointStateCache(),
|
||||
initSyncBlocks: make(map[[32]byte]block.SignedBeaconBlock),
|
||||
justifiedBalances: make([]uint64, 0),
|
||||
cfg: &config{},
|
||||
}
|
||||
for _, opt := range opts {
|
||||
@@ -106,6 +105,12 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
}
|
||||
}
|
||||
var err error
|
||||
if srv.justifiedBalances == nil {
|
||||
srv.justifiedBalances, err = newStateBalanceCache(srv.cfg.StateGen)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
srv.wsVerifier, err = NewWeakSubjectivityVerifier(srv.cfg.WeakSubjectivityCheckpt, srv.cfg.BeaconDB)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -151,9 +156,6 @@ func (s *Service) Start() {
|
||||
|
||||
// Resume fork choice.
|
||||
s.justifiedCheckpt = ethpb.CopyCheckpoint(justifiedCheckpoint)
|
||||
if err := s.cacheJustifiedStateBalances(s.ctx, s.ensureRootNotZeros(bytesutil.ToBytes32(s.justifiedCheckpt.Root))); err != nil {
|
||||
log.Fatalf("Could not cache justified state balances: %v", err)
|
||||
}
|
||||
s.prevJustifiedCheckpt = ethpb.CopyCheckpoint(justifiedCheckpoint)
|
||||
s.bestJustifiedCheckpt = ethpb.CopyCheckpoint(justifiedCheckpoint)
|
||||
s.finalizedCheckpt = ethpb.CopyCheckpoint(finalizedCheckpoint)
|
||||
@@ -340,9 +342,6 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon
|
||||
genesisCheckpoint := genesisState.FinalizedCheckpoint()
|
||||
|
||||
s.justifiedCheckpt = ethpb.CopyCheckpoint(genesisCheckpoint)
|
||||
if err := s.cacheJustifiedStateBalances(ctx, genesisBlkRoot); err != nil {
|
||||
return err
|
||||
}
|
||||
s.prevJustifiedCheckpt = ethpb.CopyCheckpoint(genesisCheckpoint)
|
||||
s.bestJustifiedCheckpt = ethpb.CopyCheckpoint(genesisCheckpoint)
|
||||
s.finalizedCheckpt = ethpb.CopyCheckpoint(genesisCheckpoint)
|
||||
|
||||
@@ -108,25 +108,24 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg := &config{
|
||||
BeaconBlockBuf: 0,
|
||||
BeaconDB: beaconDB,
|
||||
DepositCache: depositCache,
|
||||
ChainStartFetcher: web3Service,
|
||||
P2p: &mockBroadcaster{},
|
||||
StateNotifier: &mockBeaconNode{},
|
||||
AttPool: attestations.NewPool(),
|
||||
StateGen: stategen.New(beaconDB),
|
||||
ForkChoiceStore: protoarray.New(0, 0, params.BeaconConfig().ZeroHash),
|
||||
AttService: attService,
|
||||
stateGen := stategen.New(beaconDB)
|
||||
// Safe a state in stategen to purposes of testing a service stop / shutdown.
|
||||
require.NoError(t, stateGen.SaveState(ctx, bytesutil.ToBytes32(bState.FinalizedCheckpoint().Root), bState))
|
||||
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithDepositCache(depositCache),
|
||||
WithChainStartFetcher(web3Service),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
WithP2PBroadcaster(&mockBroadcaster{}),
|
||||
WithStateNotifier(&mockBeaconNode{}),
|
||||
WithForkChoiceStore(protoarray.New(0, 0, params.BeaconConfig().ZeroHash)),
|
||||
WithAttestationService(attService),
|
||||
WithStateGen(stateGen),
|
||||
}
|
||||
|
||||
// Safe a state in stategen to purposes of testing a service stop / shutdown.
|
||||
require.NoError(t, cfg.StateGen.SaveState(ctx, bytesutil.ToBytes32(bState.FinalizedCheckpoint().Root), bState))
|
||||
|
||||
chainService, err := NewService(ctx)
|
||||
chainService, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err, "Unable to setup chain service")
|
||||
chainService.cfg = cfg
|
||||
chainService.genesisTime = time.Unix(1, 0) // non-zero time
|
||||
|
||||
return chainService
|
||||
|
||||
82
beacon-chain/blockchain/state_balance_cache.go
Normal file
82
beacon-chain/blockchain/state_balance_cache.go
Normal file
@@ -0,0 +1,82 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
)
|
||||
|
||||
var errNilStateFromStategen = errors.New("justified state can't be nil")
|
||||
|
||||
type stateBalanceCache struct {
|
||||
sync.Mutex
|
||||
balances []uint64
|
||||
root [32]byte
|
||||
stateGen stateByRooter
|
||||
}
|
||||
|
||||
type stateByRooter interface {
|
||||
StateByRoot(context.Context, [32]byte) (state.BeaconState, error)
|
||||
}
|
||||
|
||||
// newStateBalanceCache exists to remind us that stateBalanceCache needs a stagegen
|
||||
// to avoid nil pointer bugs when updating the cache in the read path (get())
|
||||
func newStateBalanceCache(sg *stategen.State) (*stateBalanceCache, error) {
|
||||
if sg == nil {
|
||||
return nil, errors.New("Can't initialize state balance cache without stategen")
|
||||
}
|
||||
return &stateBalanceCache{stateGen: sg}, nil
|
||||
}
|
||||
|
||||
// update is called by get() when the requested root doesn't match
|
||||
// the previously read value. This cache assumes we only want to cache one
|
||||
// set of balances for a single root (the current justified root).
|
||||
//
|
||||
// warning: this is not thread-safe on its own, relies on get() for locking
|
||||
func (c *stateBalanceCache) update(ctx context.Context, justifiedRoot [32]byte) ([]uint64, error) {
|
||||
stateBalanceCacheMiss.Inc()
|
||||
justifiedState, err := c.stateGen.StateByRoot(ctx, justifiedRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if justifiedState == nil || justifiedState.IsNil() {
|
||||
return nil, errNilStateFromStategen
|
||||
}
|
||||
epoch := time.CurrentEpoch(justifiedState)
|
||||
|
||||
justifiedBalances := make([]uint64, justifiedState.NumValidators())
|
||||
var balanceAccumulator = func(idx int, val state.ReadOnlyValidator) error {
|
||||
if helpers.IsActiveValidatorUsingTrie(val, epoch) {
|
||||
justifiedBalances[idx] = val.EffectiveBalance()
|
||||
} else {
|
||||
justifiedBalances[idx] = 0
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if err := justifiedState.ReadFromEveryValidator(balanceAccumulator); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c.balances = justifiedBalances
|
||||
c.root = justifiedRoot
|
||||
return c.balances, nil
|
||||
}
|
||||
|
||||
// getBalances takes an explicit justifiedRoot so it can invalidate the singleton cache key
|
||||
// when the justified root changes, and takes a context so that the long-running stategen
|
||||
// read path can connect to the upstream cancellation/timeout chain.
|
||||
func (c *stateBalanceCache) get(ctx context.Context, justifiedRoot [32]byte) ([]uint64, error) {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
if justifiedRoot == c.root {
|
||||
stateBalanceCacheHit.Inc()
|
||||
return c.balances, nil
|
||||
}
|
||||
|
||||
return c.update(ctx, justifiedRoot)
|
||||
}
|
||||
225
beacon-chain/blockchain/state_balance_cache_test.go
Normal file
225
beacon-chain/blockchain/state_balance_cache_test.go
Normal file
@@ -0,0 +1,225 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
v2 "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
)
|
||||
|
||||
type mockStateByRoot struct {
|
||||
state state.BeaconState
|
||||
err error
|
||||
}
|
||||
|
||||
func (m *mockStateByRoot) StateByRoot(context.Context, [32]byte) (state.BeaconState, error) {
|
||||
return m.state, m.err
|
||||
}
|
||||
|
||||
type testStateOpt func(*ethpb.BeaconStateAltair)
|
||||
|
||||
func testStateWithValidators(v []*ethpb.Validator) testStateOpt {
|
||||
return func(a *ethpb.BeaconStateAltair) {
|
||||
a.Validators = v
|
||||
}
|
||||
}
|
||||
|
||||
func testStateWithSlot(slot types.Slot) testStateOpt {
|
||||
return func(a *ethpb.BeaconStateAltair) {
|
||||
a.Slot = slot
|
||||
}
|
||||
}
|
||||
|
||||
func testStateFixture(opts ...testStateOpt) state.BeaconState {
|
||||
a := ðpb.BeaconStateAltair{}
|
||||
for _, o := range opts {
|
||||
o(a)
|
||||
}
|
||||
s, _ := v2.InitializeFromProtoUnsafe(a)
|
||||
return s
|
||||
}
|
||||
|
||||
func generateTestValidators(count int, opts ...func(*ethpb.Validator)) []*ethpb.Validator {
|
||||
vs := make([]*ethpb.Validator, count)
|
||||
var i uint32 = 0
|
||||
for ; i < uint32(count); i++ {
|
||||
pk := make([]byte, 48)
|
||||
binary.LittleEndian.PutUint32(pk, i)
|
||||
v := ðpb.Validator{PublicKey: pk}
|
||||
for _, o := range opts {
|
||||
o(v)
|
||||
}
|
||||
vs[i] = v
|
||||
}
|
||||
return vs
|
||||
}
|
||||
|
||||
func oddValidatorsExpired(currentSlot types.Slot) func(*ethpb.Validator) {
|
||||
return func(v *ethpb.Validator) {
|
||||
pki := binary.LittleEndian.Uint64(v.PublicKey)
|
||||
if pki%2 == 0 {
|
||||
v.ExitEpoch = types.Epoch(int(slots.ToEpoch(currentSlot)) + 1)
|
||||
} else {
|
||||
v.ExitEpoch = types.Epoch(int(slots.ToEpoch(currentSlot)) - 1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func oddValidatorsQueued(currentSlot types.Slot) func(*ethpb.Validator) {
|
||||
return func(v *ethpb.Validator) {
|
||||
v.ExitEpoch = types.Epoch(int(slots.ToEpoch(currentSlot)) + 1)
|
||||
pki := binary.LittleEndian.Uint64(v.PublicKey)
|
||||
if pki%2 == 0 {
|
||||
v.ActivationEpoch = types.Epoch(int(slots.ToEpoch(currentSlot)) - 1)
|
||||
} else {
|
||||
v.ActivationEpoch = types.Epoch(int(slots.ToEpoch(currentSlot)) + 1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func allValidatorsValid(currentSlot types.Slot) func(*ethpb.Validator) {
|
||||
return func(v *ethpb.Validator) {
|
||||
v.ActivationEpoch = types.Epoch(int(slots.ToEpoch(currentSlot)) - 1)
|
||||
v.ExitEpoch = types.Epoch(int(slots.ToEpoch(currentSlot)) + 1)
|
||||
}
|
||||
}
|
||||
|
||||
func balanceIsKeyTimes2(v *ethpb.Validator) {
|
||||
pki := binary.LittleEndian.Uint64(v.PublicKey)
|
||||
v.EffectiveBalance = uint64(pki) * 2
|
||||
}
|
||||
|
||||
func testHalfExpiredValidators() ([]*ethpb.Validator, []uint64) {
|
||||
balances := []uint64{0, 0, 4, 0, 8, 0, 12, 0, 16, 0}
|
||||
return generateTestValidators(10,
|
||||
oddValidatorsExpired(types.Slot(99)),
|
||||
balanceIsKeyTimes2), balances
|
||||
}
|
||||
|
||||
func testHalfQueuedValidators() ([]*ethpb.Validator, []uint64) {
|
||||
balances := []uint64{0, 0, 4, 0, 8, 0, 12, 0, 16, 0}
|
||||
return generateTestValidators(10,
|
||||
oddValidatorsQueued(types.Slot(99)),
|
||||
balanceIsKeyTimes2), balances
|
||||
}
|
||||
|
||||
func testAllValidValidators() ([]*ethpb.Validator, []uint64) {
|
||||
balances := []uint64{0, 2, 4, 6, 8, 10, 12, 14, 16, 18}
|
||||
return generateTestValidators(10,
|
||||
allValidatorsValid(types.Slot(99)),
|
||||
balanceIsKeyTimes2), balances
|
||||
}
|
||||
|
||||
func TestStateBalanceCache(t *testing.T) {
|
||||
type sbcTestCase struct {
|
||||
err error
|
||||
root [32]byte
|
||||
sbc *stateBalanceCache
|
||||
balances []uint64
|
||||
name string
|
||||
}
|
||||
sentinelCacheMiss := errors.New("Cache missed, as expected!")
|
||||
sentinelBalances := []uint64{1, 2, 3, 4, 5}
|
||||
halfExpiredValidators, halfExpiredBalances := testHalfExpiredValidators()
|
||||
halfQueuedValidators, halfQueuedBalances := testHalfQueuedValidators()
|
||||
allValidValidators, allValidBalances := testAllValidValidators()
|
||||
cases := []sbcTestCase{
|
||||
{
|
||||
root: bytesutil.ToBytes32([]byte{'A'}),
|
||||
balances: sentinelBalances,
|
||||
sbc: &stateBalanceCache{
|
||||
stateGen: &mockStateByRooter{
|
||||
err: sentinelCacheMiss,
|
||||
},
|
||||
root: bytesutil.ToBytes32([]byte{'A'}),
|
||||
balances: sentinelBalances,
|
||||
},
|
||||
name: "cache hit",
|
||||
},
|
||||
// this works by using a staterooter that returns a known error
|
||||
// so really we're testing the miss by making sure stategen got called
|
||||
// this also tells us stategen errors are propagated
|
||||
{
|
||||
sbc: &stateBalanceCache{
|
||||
stateGen: &mockStateByRooter{
|
||||
//state: generateTestValidators(1, testWithBadEpoch),
|
||||
err: sentinelCacheMiss,
|
||||
},
|
||||
root: bytesutil.ToBytes32([]byte{'B'}),
|
||||
},
|
||||
err: sentinelCacheMiss,
|
||||
root: bytesutil.ToBytes32([]byte{'A'}),
|
||||
name: "cache miss",
|
||||
},
|
||||
{
|
||||
sbc: &stateBalanceCache{
|
||||
stateGen: &mockStateByRooter{},
|
||||
root: bytesutil.ToBytes32([]byte{'B'}),
|
||||
},
|
||||
err: errNilStateFromStategen,
|
||||
root: bytesutil.ToBytes32([]byte{'A'}),
|
||||
name: "error for nil state upon cache miss",
|
||||
},
|
||||
{
|
||||
sbc: &stateBalanceCache{
|
||||
stateGen: &mockStateByRooter{
|
||||
state: testStateFixture(
|
||||
testStateWithSlot(99),
|
||||
testStateWithValidators(halfExpiredValidators)),
|
||||
},
|
||||
},
|
||||
balances: halfExpiredBalances,
|
||||
root: bytesutil.ToBytes32([]byte{'A'}),
|
||||
name: "test filtering by exit epoch",
|
||||
},
|
||||
{
|
||||
sbc: &stateBalanceCache{
|
||||
stateGen: &mockStateByRooter{
|
||||
state: testStateFixture(
|
||||
testStateWithSlot(99),
|
||||
testStateWithValidators(halfQueuedValidators)),
|
||||
},
|
||||
},
|
||||
balances: halfQueuedBalances,
|
||||
root: bytesutil.ToBytes32([]byte{'A'}),
|
||||
name: "test filtering by activation epoch",
|
||||
},
|
||||
{
|
||||
sbc: &stateBalanceCache{
|
||||
stateGen: &mockStateByRooter{
|
||||
state: testStateFixture(
|
||||
testStateWithSlot(99),
|
||||
testStateWithValidators(allValidValidators)),
|
||||
},
|
||||
},
|
||||
balances: allValidBalances,
|
||||
root: bytesutil.ToBytes32([]byte{'A'}),
|
||||
name: "happy path",
|
||||
},
|
||||
}
|
||||
ctx := context.Background()
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
cache := c.sbc
|
||||
cacheRootStart := cache.root
|
||||
b, err := cache.get(ctx, c.root)
|
||||
require.ErrorIs(t, err, c.err)
|
||||
require.DeepEqual(t, c.balances, b)
|
||||
if c.err != nil {
|
||||
// if there was an error somewhere, the root should not have changed (unless it already matched)
|
||||
require.Equal(t, cacheRootStart, cache.root)
|
||||
} else {
|
||||
// when successful, the cache should always end with a root matching the request
|
||||
require.Equal(t, c.root, cache.root)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -147,7 +147,6 @@ type Database interface {
|
||||
io.Closer
|
||||
backup.BackupExporter
|
||||
HeadAccessDatabase
|
||||
|
||||
DatabasePath() string
|
||||
ClearDB() error
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@ go_library(
|
||||
"finalized_block_roots.go",
|
||||
"genesis.go",
|
||||
"kv.go",
|
||||
"light.go",
|
||||
"log.go",
|
||||
"migration.go",
|
||||
"migration_archived_index.go",
|
||||
|
||||
32
beacon-chain/light/BUILD.bazel
Normal file
32
beacon-chain/light/BUILD.bazel
Normal file
@@ -0,0 +1,32 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"service.go",
|
||||
"update_comparison.go",
|
||||
"updater.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/light",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/db/iface:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//beacon-chain/sync:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/block:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
209
beacon-chain/light/service.go
Normal file
209
beacon-chain/light/service.go
Normal file
@@ -0,0 +1,209 @@
|
||||
package light
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db/iface"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
syncSrv "github.com/prysmaticlabs/prysm/beacon-chain/sync"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/io/file"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
block2 "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
)
|
||||
|
||||
type UpdatesFetcher interface {
|
||||
BestUpdateForPeriod(ctx context.Context, period uint64) (*ethpb.LightClientUpdate, error)
|
||||
LatestFinalizedUpdate(ctx context.Context) *ethpb.LightClientUpdate
|
||||
LatestNonFinalizedUpdate(ctx context.Context) *ethpb.LightClientUpdate
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
StateGen stategen.StateManager
|
||||
Database iface.Database
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
FinalizationFetcher blockchain.FinalizationFetcher
|
||||
StateNotifier statefeed.Notifier
|
||||
TimeFetcher blockchain.TimeFetcher
|
||||
SyncChecker syncSrv.Checker
|
||||
}
|
||||
|
||||
type Service struct {
|
||||
cfg *Config
|
||||
cancelFunc context.CancelFunc
|
||||
prevHeadData map[[32]byte]*ethpb.SyncAttestedData
|
||||
lock sync.RWMutex
|
||||
genesisTime time.Time
|
||||
finalizedByEpoch map[types.Epoch]*ethpb.LightClientFinalizedCheckpoint
|
||||
bestUpdateByPeriod map[uint64]*ethpb.LightClientUpdate
|
||||
latestFinalizedUpdate *ethpb.LightClientUpdate
|
||||
latestNonFinalizedUpdate *ethpb.LightClientUpdate
|
||||
}
|
||||
|
||||
// New --
|
||||
func New(ctx context.Context, cfg *Config) *Service {
|
||||
return &Service{
|
||||
cfg: cfg,
|
||||
prevHeadData: make(map[[32]byte]*ethpb.SyncAttestedData),
|
||||
finalizedByEpoch: make(map[types.Epoch]*ethpb.LightClientFinalizedCheckpoint),
|
||||
bestUpdateByPeriod: make(map[uint64]*ethpb.LightClientUpdate),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) Start() {
|
||||
go s.run()
|
||||
}
|
||||
|
||||
func (s *Service) Stop() error {
|
||||
s.cancelFunc()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) Status() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) BestUpdateForPeriod(ctx context.Context, period uint64) (*ethpb.LightClientUpdate, error) {
|
||||
s.lock.RLock()
|
||||
defer s.lock.RUnlock()
|
||||
update, ok := s.bestUpdateByPeriod[period]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("no update found for period %d", period)
|
||||
}
|
||||
return update, nil
|
||||
}
|
||||
|
||||
func (s *Service) LatestFinalizedUpdate(ctx context.Context) *ethpb.LightClientUpdate {
|
||||
s.lock.RLock()
|
||||
defer s.lock.RUnlock()
|
||||
return s.latestFinalizedUpdate
|
||||
}
|
||||
|
||||
func (s *Service) LatestNonFinalizedUpdate(ctx context.Context) *ethpb.LightClientUpdate {
|
||||
return s.latestNonFinalizedUpdate
|
||||
}
|
||||
|
||||
func (s *Service) run() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
s.cancelFunc = cancel
|
||||
s.waitForChainInitialization(ctx)
|
||||
s.waitForSync(ctx)
|
||||
// Initialize the service from finalized (state, block) data.
|
||||
log.Info("Initializing from finalized data")
|
||||
if err := s.initializeFromFinalizedData(ctx); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
log.Info("Beginning subscriptions")
|
||||
// Begin listening for new chain head and finalized checkpoint events.
|
||||
go s.subscribeHeadEvent(ctx)
|
||||
go s.subscribeFinalizedEvent(ctx)
|
||||
}
|
||||
|
||||
func (s *Service) waitForChainInitialization(ctx context.Context) {
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := s.cfg.StateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
defer stateSub.Unsubscribe()
|
||||
defer close(stateChannel)
|
||||
for {
|
||||
select {
|
||||
case stateEvent := <-stateChannel:
|
||||
// Wait for us to receive the genesis time via a chain started notification.
|
||||
if stateEvent.Type == statefeed.Initialized {
|
||||
// Alternatively, if the chain has already started, we then read the genesis
|
||||
// time value from this data.
|
||||
data, ok := stateEvent.Data.(*statefeed.InitializedData)
|
||||
if !ok {
|
||||
log.Error(
|
||||
"Could not receive chain start notification, want *statefeed.ChainStartedData",
|
||||
)
|
||||
return
|
||||
}
|
||||
s.genesisTime = data.StartTime
|
||||
log.WithField("genesisTime", s.genesisTime).Info(
|
||||
"Received chain initialization event",
|
||||
)
|
||||
return
|
||||
}
|
||||
case err := <-stateSub.Err():
|
||||
log.WithError(err).Error(
|
||||
"Could not subscribe to state events",
|
||||
)
|
||||
return
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) waitForSync(ctx context.Context) {
|
||||
slotTicker := slots.NewSlotTicker(s.genesisTime, params.BeaconConfig().SecondsPerSlot)
|
||||
defer slotTicker.Done()
|
||||
for {
|
||||
select {
|
||||
case <-slotTicker.C():
|
||||
if slots.ToEpoch(slots.SinceGenesis(s.genesisTime)) < 6 {
|
||||
continue
|
||||
}
|
||||
return
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) finalizedBlockOrGenesis(ctx context.Context, cpt *ethpb.Checkpoint) (block2.SignedBeaconBlock, error) {
|
||||
checkpointRoot := bytesutil.ToBytes32(cpt.Root)
|
||||
block, err := s.cfg.Database.Block(ctx, checkpointRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if block == nil || block.IsNil() {
|
||||
return s.cfg.Database.GenesisBlock(ctx)
|
||||
}
|
||||
return block, nil
|
||||
}
|
||||
|
||||
func (s *Service) finalizedStateOrGenesis(ctx context.Context, cpt *ethpb.Checkpoint) (state.BeaconState, error) {
|
||||
checkpointRoot := bytesutil.ToBytes32(cpt.Root)
|
||||
st, err := s.cfg.StateGen.StateByRoot(ctx, checkpointRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if st == nil || st.IsNil() {
|
||||
return s.cfg.Database.GenesisState(ctx)
|
||||
}
|
||||
return st, nil
|
||||
}
|
||||
|
||||
func (s *Service) initializeFromFinalizedData(ctx context.Context) error {
|
||||
cpt, err := s.cfg.Database.FinalizedCheckpoint(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
finalizedBlock, err := s.finalizedBlockOrGenesis(ctx, cpt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
finalizedState, err := s.finalizedStateOrGenesis(ctx, cpt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
enc, err := finalizedState.MarshalSSZ()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := file.WriteFile("/tmp/state.ssz", enc); err != nil {
|
||||
return err
|
||||
}
|
||||
return s.onFinalized(ctx, finalizedBlock, finalizedState)
|
||||
}
|
||||
120
beacon-chain/light/subscribe_finalized_event.go
Normal file
120
beacon-chain/light/subscribe_finalized_event.go
Normal file
@@ -0,0 +1,120 @@
|
||||
package light
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/encoding/ssz"
|
||||
v1 "github.com/prysmaticlabs/prysm/proto/eth/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
)
|
||||
|
||||
const (
|
||||
finalizedCheckpointStateIndex = 20
|
||||
nextSyncCommitteeStateIndex = 23
|
||||
)
|
||||
|
||||
func (s *Service) subscribeFinalizedEvent(ctx context.Context) {
|
||||
stateChan := make(chan *feed.Event, 1)
|
||||
sub := s.cfg.StateNotifier.StateFeed().Subscribe(stateChan)
|
||||
defer sub.Unsubscribe()
|
||||
for {
|
||||
select {
|
||||
case ev := <-stateChan:
|
||||
if ev.Type == statefeed.FinalizedCheckpoint {
|
||||
blk, beaconState, err := s.parseFinalizedEvent(ctx, ev.Data)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
continue
|
||||
}
|
||||
if err := s.onFinalized(ctx, blk, beaconState); err != nil {
|
||||
log.Error(err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) parseFinalizedEvent(
|
||||
ctx context.Context, eventData interface{},
|
||||
) (block.SignedBeaconBlock, state.BeaconState, error) {
|
||||
finalizedCheckpoint, ok := eventData.(*v1.EventFinalizedCheckpoint)
|
||||
if !ok {
|
||||
return nil, nil, errors.New("expected finalized checkpoint event")
|
||||
}
|
||||
checkpointRoot := bytesutil.ToBytes32(finalizedCheckpoint.Block)
|
||||
blk, err := s.cfg.Database.Block(ctx, checkpointRoot)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if blk == nil || blk.IsNil() {
|
||||
return nil, nil, err
|
||||
}
|
||||
st, err := s.cfg.StateGen.StateByRoot(ctx, checkpointRoot)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if st == nil || st.IsNil() {
|
||||
return nil, nil, err
|
||||
}
|
||||
return blk, st, nil
|
||||
}
|
||||
|
||||
func (s *Service) onFinalized(
|
||||
ctx context.Context, signedBlock block.SignedBeaconBlock, postState state.BeaconStateAltair,
|
||||
) error {
|
||||
if _, ok := postState.InnerStateUnsafe().(*ethpb.BeaconStateAltair); !ok {
|
||||
return errors.New("expected an Altair beacon state")
|
||||
}
|
||||
blk := signedBlock.Block()
|
||||
header, err := block.BeaconBlockHeaderFromBlockInterface(blk)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tb, err := ssz.NewTreeBackedState(postState)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
proof, gIndex, err := tb.Proof(nextSyncCommitteeStateIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nextSyncCommittee, err := postState.NextSyncCommittee()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
root, err := postState.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nextSyncCommitteeRoot, err := nextSyncCommittee.HashTreeRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Info("On finalized update")
|
||||
log.Infof("Header state root %#x, state hash tree root %#x", header.StateRoot, root)
|
||||
log.Infof("Generating proof against root %#x with gindex %d and leaf root %#x", root, gIndex, nextSyncCommitteeRoot)
|
||||
log.Info("-----")
|
||||
log.Infof("Proof with length %d", len(proof))
|
||||
for _, elem := range proof {
|
||||
log.Infof("%#x", bytesutil.Trunc(elem))
|
||||
}
|
||||
log.Info("-----")
|
||||
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
currentEpoch := slots.ToEpoch(blk.Slot())
|
||||
s.finalizedByEpoch[currentEpoch] = ðpb.LightClientFinalizedCheckpoint{
|
||||
Header: header,
|
||||
NextSyncCommittee: nextSyncCommittee,
|
||||
NextSyncCommitteeBranch: proof,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
154
beacon-chain/light/subscribe_head_event.go
Normal file
154
beacon-chain/light/subscribe_head_event.go
Normal file
@@ -0,0 +1,154 @@
|
||||
package light
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/encoding/ssz"
|
||||
"github.com/prysmaticlabs/prysm/network/forks"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
)
|
||||
|
||||
func (s *Service) subscribeHeadEvent(ctx context.Context) {
|
||||
stateChan := make(chan *feed.Event, 1)
|
||||
sub := s.cfg.StateNotifier.StateFeed().Subscribe(stateChan)
|
||||
defer sub.Unsubscribe()
|
||||
for {
|
||||
select {
|
||||
case ev := <-stateChan:
|
||||
if ev.Type == statefeed.NewHead {
|
||||
head, beaconState, err := s.getChainHeadAndState(ctx)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
continue
|
||||
}
|
||||
if err := s.onHead(ctx, head, beaconState); err != nil {
|
||||
log.Error(err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
case <-sub.Err():
|
||||
return
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) getChainHeadAndState(ctx context.Context) (block.SignedBeaconBlock, state.BeaconState, error) {
|
||||
head, err := s.cfg.HeadFetcher.HeadBlock(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if head == nil || head.IsNil() {
|
||||
return nil, nil, errors.New("head block is nil")
|
||||
}
|
||||
st, err := s.cfg.HeadFetcher.HeadState(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, errors.New("head state is nil")
|
||||
}
|
||||
if st == nil || st.IsNil() {
|
||||
return nil, nil, err
|
||||
}
|
||||
return head, st, nil
|
||||
}
|
||||
|
||||
func (s *Service) onHead(ctx context.Context, head block.SignedBeaconBlock, postState state.BeaconStateAltair) error {
|
||||
if _, ok := postState.InnerStateUnsafe().(*ethpb.BeaconStateAltair); !ok {
|
||||
return errors.New("expected an Altair beacon state")
|
||||
}
|
||||
blk := head.Block()
|
||||
tb, err := ssz.NewTreeBackedState(postState)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
header, err := block.BeaconBlockHeaderFromBlockInterface(blk)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
finalityBranch, _, err := tb.Proof(finalizedCheckpointStateIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nextSyncCommitteeBranch, gIndex, err := tb.Proof(nextSyncCommitteeStateIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stRoot, err := postState.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blkRoot, err := blk.HashTreeRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Infof("On head, generating sync committee proof for root %#x and index %d, block root %#x, header state root %#x", stRoot[:], gIndex, blkRoot, header.StateRoot)
|
||||
nextSyncCommittee, err := postState.NextSyncCommittee()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.lock.Lock()
|
||||
s.prevHeadData[blkRoot] = ðpb.SyncAttestedData{
|
||||
Header: header,
|
||||
FinalityCheckpoint: postState.FinalizedCheckpoint(),
|
||||
FinalityBranch: finalityBranch,
|
||||
NextSyncCommittee: nextSyncCommittee,
|
||||
NextSyncCommitteeBranch: nextSyncCommitteeBranch,
|
||||
}
|
||||
s.lock.Unlock()
|
||||
syncAttestedBlockRoot, err := helpers.BlockRootAtSlot(postState, postState.Slot()-1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fork, err := forks.Fork(slots.ToEpoch(blk.Slot()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
syncAggregate, err := blk.Body().SyncAggregate()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sigData := &signatureData{
|
||||
slot: blk.Slot(),
|
||||
forkVersion: fork.CurrentVersion,
|
||||
syncAggregate: syncAggregate,
|
||||
}
|
||||
|
||||
s.lock.Lock()
|
||||
syncAttestedData, ok := s.prevHeadData[bytesutil.ToBytes32(syncAttestedBlockRoot)]
|
||||
if !ok {
|
||||
s.lock.Unlock()
|
||||
log.Info("Got useless data, skipping")
|
||||
return nil // Useless data.
|
||||
}
|
||||
s.lock.Unlock()
|
||||
commmitteePeriodWithFinalized, err := s.persistBestFinalizedUpdate(ctx, syncAttestedData, sigData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.persistBestNonFinalizedUpdate(ctx, syncAttestedData, sigData, commmitteePeriodWithFinalized); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.lock.Lock()
|
||||
if len(s.prevHeadData) > PrevDataMaxSize {
|
||||
for k := range s.prevHeadData {
|
||||
delete(s.prevHeadData, k)
|
||||
if len(s.prevHeadData) <= PrevDataMaxSize {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
s.lock.Unlock()
|
||||
return nil
|
||||
}
|
||||
50
beacon-chain/light/update_comparison.go
Normal file
50
beacon-chain/light/update_comparison.go
Normal file
@@ -0,0 +1,50 @@
|
||||
package light
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
func isBetterUpdate(prevUpdate *ethpb.LightClientUpdate, newUpdate *ethpb.LightClientUpdate) bool {
|
||||
prevIsFinalized := isFinalizedUpdate(prevUpdate)
|
||||
newIsFinalized := isFinalizedUpdate(newUpdate)
|
||||
// newUpdate becomes finalized, it's better.
|
||||
if newIsFinalized && !prevIsFinalized {
|
||||
return true
|
||||
}
|
||||
// newUpdate is no longer finalized, it's worse.
|
||||
if !newIsFinalized && prevIsFinalized {
|
||||
return false
|
||||
}
|
||||
return hasMoreBits(newUpdate, prevUpdate)
|
||||
}
|
||||
|
||||
func isLatestBestFinalizedUpdate(prevUpdate *ethpb.LightClientUpdate, newUpdate *ethpb.LightClientUpdate) bool {
|
||||
if newUpdate.FinalityHeader.Slot > prevUpdate.FinalityHeader.Slot {
|
||||
return true
|
||||
}
|
||||
if newUpdate.FinalityHeader.Slot < prevUpdate.FinalityHeader.Slot {
|
||||
return false
|
||||
}
|
||||
return hasMoreBits(newUpdate, prevUpdate)
|
||||
}
|
||||
|
||||
func isLatestBestNonFinalizedUpdate(prevUpdate *ethpb.LightClientUpdate, newUpdate *ethpb.LightClientUpdate) bool {
|
||||
if newUpdate.Header.Slot > prevUpdate.Header.Slot {
|
||||
return true
|
||||
}
|
||||
if newUpdate.Header.Slot < prevUpdate.Header.Slot {
|
||||
return false
|
||||
}
|
||||
return hasMoreBits(newUpdate, prevUpdate)
|
||||
}
|
||||
|
||||
func isFinalizedUpdate(update *ethpb.LightClientUpdate) bool {
|
||||
return !bytes.Equal(params.BeaconConfig().ZeroHash[:], update.FinalityHeader.StateRoot)
|
||||
}
|
||||
|
||||
func hasMoreBits(a *ethpb.LightClientUpdate, b *ethpb.LightClientUpdate) bool {
|
||||
return a.SyncCommitteeBits.Count() > b.SyncCommitteeBits.Count()
|
||||
}
|
||||
134
beacon-chain/light/updater.go
Normal file
134
beacon-chain/light/updater.go
Normal file
@@ -0,0 +1,134 @@
|
||||
package light
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Precomputed values for generalized indices.
|
||||
const (
|
||||
FinalizedRootIndex = 105
|
||||
NextSyncCommitteeIndex = 55
|
||||
PrevDataMaxSize = 64
|
||||
)
|
||||
|
||||
var log = logrus.WithField("prefix", "light")
|
||||
|
||||
type signatureData struct {
|
||||
slot types.Slot
|
||||
forkVersion []byte
|
||||
syncAggregate *ethpb.SyncAggregate
|
||||
}
|
||||
|
||||
func (s *Service) persistBestFinalizedUpdate(ctx context.Context, syncAttestedData *ethpb.SyncAttestedData, sigData *signatureData) (uint64, error) {
|
||||
finalizedEpoch := syncAttestedData.FinalityCheckpoint.Epoch
|
||||
|
||||
s.lock.RLock()
|
||||
finalizedData := s.finalizedByEpoch[finalizedEpoch]
|
||||
s.lock.RUnlock()
|
||||
|
||||
if finalizedData == nil {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
committeePeriod := slots.SyncCommitteePeriod(slots.ToEpoch(syncAttestedData.Header.Slot))
|
||||
signaturePeriod := slots.SyncCommitteePeriod(slots.ToEpoch(sigData.slot))
|
||||
if committeePeriod != signaturePeriod {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
newUpdate := ðpb.LightClientUpdate{
|
||||
Header: finalizedData.Header,
|
||||
NextSyncCommittee: finalizedData.NextSyncCommittee,
|
||||
NextSyncCommitteeBranch: finalizedData.NextSyncCommitteeBranch,
|
||||
FinalityHeader: syncAttestedData.Header,
|
||||
FinalityBranch: syncAttestedData.FinalityBranch,
|
||||
SyncCommitteeBits: sigData.syncAggregate.SyncCommitteeBits,
|
||||
SyncCommitteeSignature: sigData.syncAggregate.SyncCommitteeSignature,
|
||||
ForkVersion: sigData.forkVersion,
|
||||
}
|
||||
|
||||
s.lock.RLock()
|
||||
prevBestUpdate := s.bestUpdateByPeriod[committeePeriod]
|
||||
s.lock.RUnlock()
|
||||
|
||||
if prevBestUpdate == nil || isBetterUpdate(prevBestUpdate, newUpdate) {
|
||||
s.lock.Lock()
|
||||
s.bestUpdateByPeriod[committeePeriod] = newUpdate
|
||||
s.lock.Unlock()
|
||||
}
|
||||
|
||||
s.lock.RLock()
|
||||
prevLatestUpdate := s.latestFinalizedUpdate
|
||||
s.lock.RUnlock()
|
||||
|
||||
if prevLatestUpdate == nil || isLatestBestFinalizedUpdate(prevLatestUpdate, newUpdate) {
|
||||
s.lock.Lock()
|
||||
s.latestFinalizedUpdate = newUpdate
|
||||
s.lock.Unlock()
|
||||
log.Info("Putting latest best finalized update")
|
||||
rt, err := newUpdate.NextSyncCommittee.HashTreeRoot()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
log.Infof("Header state root %#x, state hash tree root %#x", newUpdate.Header.StateRoot, newUpdate.Header.StateRoot)
|
||||
log.Infof("Generating proof against root %#x with gindex %d and leaf root %#x", newUpdate.Header.StateRoot, 55, rt)
|
||||
log.Info("-----")
|
||||
log.Infof("Proof with length %d", len(newUpdate.NextSyncCommitteeBranch))
|
||||
for _, elem := range newUpdate.NextSyncCommitteeBranch {
|
||||
log.Infof("%#x", bytesutil.Trunc(elem))
|
||||
}
|
||||
log.Info("-----")
|
||||
}
|
||||
return committeePeriod, nil
|
||||
}
|
||||
|
||||
func (s *Service) persistBestNonFinalizedUpdate(ctx context.Context, syncAttestedData *ethpb.SyncAttestedData, sigData *signatureData, period uint64) error {
|
||||
committeePeriod := slots.SyncCommitteePeriod(slots.ToEpoch(syncAttestedData.Header.Slot))
|
||||
signaturePeriod := slots.SyncCommitteePeriod(slots.ToEpoch(sigData.slot))
|
||||
if committeePeriod != signaturePeriod {
|
||||
return nil
|
||||
}
|
||||
|
||||
newUpdate := ðpb.LightClientUpdate{
|
||||
Header: syncAttestedData.Header,
|
||||
NextSyncCommittee: syncAttestedData.NextSyncCommittee,
|
||||
NextSyncCommitteeBranch: syncAttestedData.NextSyncCommitteeBranch,
|
||||
FinalityHeader: nil,
|
||||
FinalityBranch: nil,
|
||||
SyncCommitteeBits: sigData.syncAggregate.SyncCommitteeBits,
|
||||
SyncCommitteeSignature: sigData.syncAggregate.SyncCommitteeSignature,
|
||||
ForkVersion: sigData.forkVersion,
|
||||
}
|
||||
|
||||
// Optimization: If there's already a finalized update for this committee period, no need to
|
||||
// create a non-finalized update>
|
||||
if committeePeriod != period {
|
||||
s.lock.RLock()
|
||||
prevBestUpdate := s.bestUpdateByPeriod[committeePeriod]
|
||||
s.lock.RUnlock()
|
||||
if prevBestUpdate == nil || isBetterUpdate(prevBestUpdate, newUpdate) {
|
||||
s.lock.Lock()
|
||||
s.bestUpdateByPeriod[committeePeriod] = newUpdate
|
||||
s.lock.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// Store the latest update here overall. Not checking it's the best
|
||||
s.lock.RLock()
|
||||
prevLatestUpdate := s.latestNonFinalizedUpdate
|
||||
s.lock.RUnlock()
|
||||
|
||||
if prevLatestUpdate == nil || isLatestBestNonFinalizedUpdate(prevLatestUpdate, newUpdate) {
|
||||
// TODO: Don't store nextCommittee, that can be fetched through getBestUpdates()
|
||||
s.lock.Lock()
|
||||
s.latestNonFinalizedUpdate = newUpdate
|
||||
s.lock.Unlock()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -4,6 +4,8 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"doc.go",
|
||||
"metrics.go",
|
||||
"process_attestation.go",
|
||||
"process_block.go",
|
||||
"process_exit.go",
|
||||
"service.go",
|
||||
@@ -11,10 +13,20 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/monitor",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//proto/prysm/v1alpha1/block:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
@@ -23,18 +35,24 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"process_attestation_test.go",
|
||||
"process_block_test.go",
|
||||
"process_exit_test.go",
|
||||
"service_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/wrapper:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
70
beacon-chain/monitor/metrics.go
Normal file
70
beacon-chain/monitor/metrics.go
Normal file
@@ -0,0 +1,70 @@
|
||||
package monitor
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
log = logrus.WithField("prefix", "monitor")
|
||||
// TODO: The Prometheus gauge vectors and counters in this package deprecate the
|
||||
// corresponding gauge vectors and counters in the validator client.
|
||||
|
||||
// inclusionSlotGauge used to track attestation inclusion distance
|
||||
inclusionSlotGauge = promauto.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: "monitor",
|
||||
Name: "inclusion_slot",
|
||||
Help: "Attestations inclusion slot",
|
||||
},
|
||||
[]string{
|
||||
"validator_index",
|
||||
},
|
||||
)
|
||||
// timelyHeadCounter used to track attestation timely head flags
|
||||
timelyHeadCounter = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "monitor",
|
||||
Name: "timely_head",
|
||||
Help: "Attestation timely Head flag",
|
||||
},
|
||||
[]string{
|
||||
"validator_index",
|
||||
},
|
||||
)
|
||||
// timelyTargetCounter used to track attestation timely head flags
|
||||
timelyTargetCounter = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "monitor",
|
||||
Name: "timely_target",
|
||||
Help: "Attestation timely Target flag",
|
||||
},
|
||||
[]string{
|
||||
"validator_index",
|
||||
},
|
||||
)
|
||||
// timelySourceCounter used to track attestation timely head flags
|
||||
timelySourceCounter = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "monitor",
|
||||
Name: "timely_source",
|
||||
Help: "Attestation timely Source flag",
|
||||
},
|
||||
[]string{
|
||||
"validator_index",
|
||||
},
|
||||
)
|
||||
|
||||
// aggregationCounter used to track aggregations
|
||||
aggregationCounter = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "monitor",
|
||||
Name: "aggregations",
|
||||
Help: "Number of aggregation duties performed",
|
||||
},
|
||||
[]string{
|
||||
"validator_index",
|
||||
},
|
||||
)
|
||||
)
|
||||
218
beacon-chain/monitor/process_attestation.go
Normal file
218
beacon-chain/monitor/process_attestation.go
Normal file
@@ -0,0 +1,218 @@
|
||||
package monitor
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// updatedPerformanceFromTrackedVal returns true if the validator is tracked and if the
|
||||
// given slot is different than the last attested slot from this validator.
|
||||
func (s *Service) updatedPerformanceFromTrackedVal(idx types.ValidatorIndex, slot types.Slot) bool {
|
||||
if !s.TrackedIndex(types.ValidatorIndex(idx)) {
|
||||
return false
|
||||
}
|
||||
|
||||
if lp, ok := s.latestPerformance[types.ValidatorIndex(idx)]; ok {
|
||||
return lp.attestedSlot != slot
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// attestingIndices returns the indices of validators that appear in the
|
||||
// given aggregated atestation.
|
||||
func attestingIndices(ctx context.Context, state state.BeaconState, att *ethpb.Attestation) ([]uint64, error) {
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, state, att.Data.Slot, att.Data.CommitteeIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return attestation.AttestingIndices(att.AggregationBits, committee)
|
||||
}
|
||||
|
||||
// logMessageTimelyFlagsForIndex returns the log message with the basic
|
||||
// performance indicators for the attestation (head, source, target)
|
||||
func logMessageTimelyFlagsForIndex(idx types.ValidatorIndex, data *ethpb.AttestationData) logrus.Fields {
|
||||
return logrus.Fields{
|
||||
"ValidatorIndex": idx,
|
||||
"Slot": data.Slot,
|
||||
"Source": fmt.Sprintf("%#x", bytesutil.Trunc(data.Source.Root)),
|
||||
"Target": fmt.Sprintf("%#x", bytesutil.Trunc(data.Target.Root)),
|
||||
"Head": fmt.Sprintf("%#x", bytesutil.Trunc(data.BeaconBlockRoot)),
|
||||
}
|
||||
}
|
||||
|
||||
// processAttestations logs the event that one of our tracked validators'
|
||||
// attestations was included in a block
|
||||
func (s *Service) processAttestations(ctx context.Context, state state.BeaconState, blk block.BeaconBlock) {
|
||||
if blk == nil || blk.Body() == nil {
|
||||
return
|
||||
}
|
||||
for _, attestation := range blk.Body().Attestations() {
|
||||
s.processIncludedAttestation(ctx, state, attestation)
|
||||
}
|
||||
}
|
||||
|
||||
// processIncludedAttestation logs in the event that one of our tracked validators'
|
||||
// appears in the attesting indices and this included attestation was not included
|
||||
// before.
|
||||
func (s *Service) processIncludedAttestation(ctx context.Context, state state.BeaconState, att *ethpb.Attestation) {
|
||||
attestingIndices, err := attestingIndices(ctx, state, att)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get attesting indices")
|
||||
return
|
||||
}
|
||||
for _, idx := range attestingIndices {
|
||||
if s.updatedPerformanceFromTrackedVal(types.ValidatorIndex(idx), att.Data.Slot) {
|
||||
logFields := logMessageTimelyFlagsForIndex(types.ValidatorIndex(idx), att.Data)
|
||||
balance, err := state.BalanceAtIndex(types.ValidatorIndex(idx))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get balance")
|
||||
return
|
||||
}
|
||||
|
||||
aggregatedPerf := s.aggregatedPerformance[types.ValidatorIndex(idx)]
|
||||
aggregatedPerf.totalAttestedCount++
|
||||
aggregatedPerf.totalRequestedCount++
|
||||
|
||||
latestPerf := s.latestPerformance[types.ValidatorIndex(idx)]
|
||||
balanceChg := balance - latestPerf.balance
|
||||
latestPerf.balanceChange = balanceChg
|
||||
latestPerf.balance = balance
|
||||
latestPerf.attestedSlot = att.Data.Slot
|
||||
latestPerf.inclusionSlot = state.Slot()
|
||||
inclusionSlotGauge.WithLabelValues(fmt.Sprintf("%d", idx)).Set(float64(latestPerf.inclusionSlot))
|
||||
aggregatedPerf.totalDistance += uint64(latestPerf.inclusionSlot - latestPerf.attestedSlot)
|
||||
|
||||
if state.Version() == version.Altair {
|
||||
targetIdx := params.BeaconConfig().TimelyTargetFlagIndex
|
||||
sourceIdx := params.BeaconConfig().TimelySourceFlagIndex
|
||||
headIdx := params.BeaconConfig().TimelyHeadFlagIndex
|
||||
|
||||
var participation []byte
|
||||
if slots.ToEpoch(latestPerf.inclusionSlot) ==
|
||||
slots.ToEpoch(latestPerf.attestedSlot) {
|
||||
participation, err = state.CurrentEpochParticipation()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get current epoch participation")
|
||||
return
|
||||
}
|
||||
} else {
|
||||
participation, err = state.PreviousEpochParticipation()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get previous epoch participation")
|
||||
return
|
||||
}
|
||||
}
|
||||
flags := participation[idx]
|
||||
hasFlag, err := altair.HasValidatorFlag(flags, sourceIdx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get timely Source flag")
|
||||
return
|
||||
}
|
||||
latestPerf.timelySource = hasFlag
|
||||
hasFlag, err = altair.HasValidatorFlag(flags, headIdx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get timely Head flag")
|
||||
return
|
||||
}
|
||||
latestPerf.timelyHead = hasFlag
|
||||
hasFlag, err = altair.HasValidatorFlag(flags, targetIdx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get timely Target flag")
|
||||
return
|
||||
}
|
||||
latestPerf.timelyTarget = hasFlag
|
||||
|
||||
if latestPerf.timelySource {
|
||||
timelySourceCounter.WithLabelValues(fmt.Sprintf("%d", idx)).Inc()
|
||||
aggregatedPerf.totalCorrectSource++
|
||||
}
|
||||
if latestPerf.timelyHead {
|
||||
timelyHeadCounter.WithLabelValues(fmt.Sprintf("%d", idx)).Inc()
|
||||
aggregatedPerf.totalCorrectHead++
|
||||
}
|
||||
if latestPerf.timelyTarget {
|
||||
timelyTargetCounter.WithLabelValues(fmt.Sprintf("%d", idx)).Inc()
|
||||
aggregatedPerf.totalCorrectTarget++
|
||||
}
|
||||
}
|
||||
logFields["CorrectHead"] = latestPerf.timelyHead
|
||||
logFields["CorrectSource"] = latestPerf.timelySource
|
||||
logFields["CorrectTarget"] = latestPerf.timelyTarget
|
||||
logFields["InclusionSlot"] = latestPerf.inclusionSlot
|
||||
logFields["NewBalance"] = balance
|
||||
logFields["BalanceChange"] = balanceChg
|
||||
|
||||
s.latestPerformance[types.ValidatorIndex(idx)] = latestPerf
|
||||
s.aggregatedPerformance[types.ValidatorIndex(idx)] = aggregatedPerf
|
||||
log.WithFields(logFields).Info("Attestation included")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// processUnaggregatedAttestation logs when the beacon node sees an unaggregated attestation from one of our
|
||||
// tracked validators
|
||||
func (s *Service) processUnaggregatedAttestation(ctx context.Context, att *ethpb.Attestation) {
|
||||
root := bytesutil.ToBytes32(att.Data.BeaconBlockRoot)
|
||||
state := s.config.StateGen.StateByRootIfCachedNoCopy(root)
|
||||
if state == nil {
|
||||
log.Debug("Skipping unaggregated attestation due to state not found in cache")
|
||||
return
|
||||
}
|
||||
attestingIndices, err := attestingIndices(ctx, state, att)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get attesting indices")
|
||||
return
|
||||
}
|
||||
for _, idx := range attestingIndices {
|
||||
if s.updatedPerformanceFromTrackedVal(types.ValidatorIndex(idx), att.Data.Slot) {
|
||||
logFields := logMessageTimelyFlagsForIndex(types.ValidatorIndex(idx), att.Data)
|
||||
log.WithFields(logFields).Info("Processed unaggregated attestation")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// processAggregatedAttestation logs when we see an aggregation from one of our tracked validators or an aggregated
|
||||
// attestation from one of our tracked validators
|
||||
func (s *Service) processAggregatedAttestation(ctx context.Context, att *ethpb.AggregateAttestationAndProof) {
|
||||
if s.TrackedIndex(att.AggregatorIndex) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"ValidatorIndex": att.AggregatorIndex,
|
||||
}).Info("Processed attestation aggregation")
|
||||
aggregatedPerf := s.aggregatedPerformance[att.AggregatorIndex]
|
||||
aggregatedPerf.totalAggregations++
|
||||
s.aggregatedPerformance[att.AggregatorIndex] = aggregatedPerf
|
||||
aggregationCounter.WithLabelValues(fmt.Sprintf("%d", att.AggregatorIndex)).Inc()
|
||||
}
|
||||
|
||||
var root [32]byte
|
||||
copy(root[:], att.Aggregate.Data.BeaconBlockRoot)
|
||||
state := s.config.StateGen.StateByRootIfCachedNoCopy(root)
|
||||
if state == nil {
|
||||
log.Debug("Skipping agregated attestation due to state not found in cache")
|
||||
return
|
||||
}
|
||||
attestingIndices, err := attestingIndices(ctx, state, att.Aggregate)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get attesting indices")
|
||||
return
|
||||
}
|
||||
for _, idx := range attestingIndices {
|
||||
if s.updatedPerformanceFromTrackedVal(types.ValidatorIndex(idx), att.Aggregate.Data.Slot) {
|
||||
logFields := logMessageTimelyFlagsForIndex(types.ValidatorIndex(idx), att.Aggregate.Data)
|
||||
log.WithFields(logFields).Info("Processed aggregated attestation")
|
||||
}
|
||||
}
|
||||
}
|
||||
287
beacon-chain/monitor/process_attestation_test.go
Normal file
287
beacon-chain/monitor/process_attestation_test.go
Normal file
@@ -0,0 +1,287 @@
|
||||
package monitor
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
"github.com/sirupsen/logrus"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func setupService(t *testing.T) *Service {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
trackedVals := map[types.ValidatorIndex]interface{}{
|
||||
1: nil,
|
||||
2: nil,
|
||||
12: nil,
|
||||
}
|
||||
latestPerformance := map[types.ValidatorIndex]ValidatorLatestPerformance{
|
||||
1: {
|
||||
balance: 32000000000,
|
||||
},
|
||||
2: {
|
||||
balance: 32000000000,
|
||||
},
|
||||
12: {
|
||||
balance: 31900000000,
|
||||
},
|
||||
}
|
||||
|
||||
aggregatedPerformance := map[types.ValidatorIndex]ValidatorAggregatedPerformance{
|
||||
1: {},
|
||||
2: {},
|
||||
12: {},
|
||||
}
|
||||
|
||||
return &Service{
|
||||
config: &ValidatorMonitorConfig{
|
||||
StateGen: stategen.New(beaconDB),
|
||||
TrackedValidators: trackedVals,
|
||||
},
|
||||
latestPerformance: latestPerformance,
|
||||
aggregatedPerformance: aggregatedPerformance,
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetAttestingIndices(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconState, _ := util.DeterministicGenesisState(t, 256)
|
||||
att := ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 0,
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b11, 0b1},
|
||||
}
|
||||
attestingIndices, err := attestingIndices(ctx, beaconState, att)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, attestingIndices, []uint64{0xc, 0x2})
|
||||
|
||||
}
|
||||
|
||||
func TestProcessIncludedAttestationTwoTracked(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s := setupService(t)
|
||||
state, _ := util.DeterministicGenesisStateAltair(t, 256)
|
||||
require.NoError(t, state.SetSlot(2))
|
||||
require.NoError(t, state.SetCurrentParticipationBits(bytes.Repeat([]byte{0xff}, 13)))
|
||||
|
||||
att := ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 0,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("hello-world"), 32),
|
||||
Source: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: bytesutil.PadTo([]byte("hello-world"), 32),
|
||||
},
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: bytesutil.PadTo([]byte("hello-world"), 32),
|
||||
},
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b11, 0b1},
|
||||
}
|
||||
s.processIncludedAttestation(context.Background(), state, att)
|
||||
wanted1 := "\"Attestation included\" BalanceChange=0 CorrectHead=true CorrectSource=true CorrectTarget=true Head=0x68656c6c6f2d InclusionSlot=2 NewBalance=32000000000 Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=2 prefix=monitor"
|
||||
wanted2 := "\"Attestation included\" BalanceChange=100000000 CorrectHead=true CorrectSource=true CorrectTarget=true Head=0x68656c6c6f2d InclusionSlot=2 NewBalance=32000000000 Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=12 prefix=monitor"
|
||||
require.LogsContain(t, hook, wanted1)
|
||||
require.LogsContain(t, hook, wanted2)
|
||||
}
|
||||
|
||||
func TestProcessUnaggregatedAttestationStateNotCached(t *testing.T) {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
hook := logTest.NewGlobal()
|
||||
ctx := context.Background()
|
||||
|
||||
s := setupService(t)
|
||||
state, _ := util.DeterministicGenesisStateAltair(t, 256)
|
||||
require.NoError(t, state.SetSlot(2))
|
||||
header := state.LatestBlockHeader()
|
||||
participation := []byte{0xff, 0xff, 0x01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
|
||||
require.NoError(t, state.SetCurrentParticipationBits(participation))
|
||||
|
||||
att := ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 0,
|
||||
BeaconBlockRoot: header.GetStateRoot(),
|
||||
Source: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: bytesutil.PadTo([]byte("hello-world"), 32),
|
||||
},
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: bytesutil.PadTo([]byte("hello-world"), 32),
|
||||
},
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b11, 0b1},
|
||||
}
|
||||
s.processUnaggregatedAttestation(ctx, att)
|
||||
require.LogsContain(t, hook, "Skipping unaggregated attestation due to state not found in cache")
|
||||
logrus.SetLevel(logrus.InfoLevel)
|
||||
}
|
||||
|
||||
func TestProcessUnaggregatedAttestationStateCached(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
hook := logTest.NewGlobal()
|
||||
|
||||
s := setupService(t)
|
||||
state, _ := util.DeterministicGenesisStateAltair(t, 256)
|
||||
participation := []byte{0xff, 0xff, 0x01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
|
||||
require.NoError(t, state.SetCurrentParticipationBits(participation))
|
||||
|
||||
root := [32]byte{}
|
||||
copy(root[:], "hello-world")
|
||||
|
||||
att := ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 0,
|
||||
BeaconBlockRoot: root[:],
|
||||
Source: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: root[:],
|
||||
},
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: root[:],
|
||||
},
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b11, 0b1},
|
||||
}
|
||||
require.NoError(t, s.config.StateGen.SaveState(ctx, root, state))
|
||||
s.processUnaggregatedAttestation(context.Background(), att)
|
||||
wanted1 := "\"Processed unaggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=2 prefix=monitor"
|
||||
wanted2 := "\"Processed unaggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=12 prefix=monitor"
|
||||
require.LogsContain(t, hook, wanted1)
|
||||
require.LogsContain(t, hook, wanted2)
|
||||
}
|
||||
|
||||
func TestProcessAggregatedAttestationStateNotCached(t *testing.T) {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
hook := logTest.NewGlobal()
|
||||
ctx := context.Background()
|
||||
|
||||
s := setupService(t)
|
||||
state, _ := util.DeterministicGenesisStateAltair(t, 256)
|
||||
require.NoError(t, state.SetSlot(2))
|
||||
header := state.LatestBlockHeader()
|
||||
participation := []byte{0xff, 0xff, 0x01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
|
||||
require.NoError(t, state.SetCurrentParticipationBits(participation))
|
||||
|
||||
att := ðpb.AggregateAttestationAndProof{
|
||||
AggregatorIndex: 2,
|
||||
Aggregate: ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 0,
|
||||
BeaconBlockRoot: header.GetStateRoot(),
|
||||
Source: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: bytesutil.PadTo([]byte("hello-world"), 32),
|
||||
},
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: bytesutil.PadTo([]byte("hello-world"), 32),
|
||||
},
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b11, 0b1},
|
||||
},
|
||||
}
|
||||
s.processAggregatedAttestation(ctx, att)
|
||||
require.LogsContain(t, hook, "\"Processed attestation aggregation\" ValidatorIndex=2 prefix=monitor")
|
||||
require.LogsContain(t, hook, "Skipping agregated attestation due to state not found in cache")
|
||||
logrus.SetLevel(logrus.InfoLevel)
|
||||
}
|
||||
|
||||
func TestProcessAggregatedAttestationStateCached(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
ctx := context.Background()
|
||||
s := setupService(t)
|
||||
state, _ := util.DeterministicGenesisStateAltair(t, 256)
|
||||
participation := []byte{0xff, 0xff, 0x01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
|
||||
require.NoError(t, state.SetCurrentParticipationBits(participation))
|
||||
|
||||
root := [32]byte{}
|
||||
copy(root[:], "hello-world")
|
||||
|
||||
att := ðpb.AggregateAttestationAndProof{
|
||||
AggregatorIndex: 2,
|
||||
Aggregate: ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 0,
|
||||
BeaconBlockRoot: root[:],
|
||||
Source: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: root[:],
|
||||
},
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: root[:],
|
||||
},
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b10, 0b1},
|
||||
},
|
||||
}
|
||||
|
||||
require.NoError(t, s.config.StateGen.SaveState(ctx, root, state))
|
||||
s.processAggregatedAttestation(ctx, att)
|
||||
require.LogsContain(t, hook, "\"Processed attestation aggregation\" ValidatorIndex=2 prefix=monitor")
|
||||
require.LogsContain(t, hook, "\"Processed aggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=2 prefix=monitor")
|
||||
require.LogsDoNotContain(t, hook, "\"Processed aggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=12 prefix=monitor")
|
||||
}
|
||||
|
||||
func TestProcessAttestations(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s := setupService(t)
|
||||
ctx := context.Background()
|
||||
state, _ := util.DeterministicGenesisStateAltair(t, 256)
|
||||
require.NoError(t, state.SetSlot(2))
|
||||
require.NoError(t, state.SetCurrentParticipationBits(bytes.Repeat([]byte{0xff}, 13)))
|
||||
|
||||
att := ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 0,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("hello-world"), 32),
|
||||
Source: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: bytesutil.PadTo([]byte("hello-world"), 32),
|
||||
},
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: bytesutil.PadTo([]byte("hello-world"), 32),
|
||||
},
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b11, 0b1},
|
||||
}
|
||||
|
||||
block := ðpb.BeaconBlockAltair{
|
||||
Slot: 2,
|
||||
Body: ðpb.BeaconBlockBodyAltair{
|
||||
Attestations: []*ethpb.Attestation{att},
|
||||
},
|
||||
}
|
||||
|
||||
wrappedBlock, err := wrapper.WrappedAltairBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
s.processAttestations(ctx, state, wrappedBlock)
|
||||
wanted1 := "\"Attestation included\" BalanceChange=0 CorrectHead=true CorrectSource=true CorrectTarget=true Head=0x68656c6c6f2d InclusionSlot=2 NewBalance=32000000000 Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=2 prefix=monitor"
|
||||
wanted2 := "\"Attestation included\" BalanceChange=100000000 CorrectHead=true CorrectSource=true CorrectTarget=true Head=0x68656c6c6f2d InclusionSlot=2 NewBalance=32000000000 Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=12 prefix=monitor"
|
||||
require.LogsContain(t, hook, wanted1)
|
||||
require.LogsContain(t, hook, wanted2)
|
||||
|
||||
}
|
||||
@@ -2,24 +2,46 @@ package monitor
|
||||
|
||||
import (
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
)
|
||||
|
||||
var (
|
||||
log = logrus.WithField("prefix", "monitor")
|
||||
)
|
||||
// ValidatorLatestPerformance keeps track of the latest participation of the validator
|
||||
type ValidatorLatestPerformance struct {
|
||||
attestedSlot types.Slot
|
||||
inclusionSlot types.Slot
|
||||
timelySource bool
|
||||
timelyTarget bool
|
||||
timelyHead bool
|
||||
balance uint64
|
||||
balanceChange uint64
|
||||
}
|
||||
|
||||
// ValidatorAggregatedPerformance keeps track of the accumulated performance of
|
||||
// the validator since launch
|
||||
type ValidatorAggregatedPerformance struct {
|
||||
totalAttestedCount uint64
|
||||
totalRequestedCount uint64
|
||||
totalDistance uint64
|
||||
totalCorrectSource uint64
|
||||
totalCorrectTarget uint64
|
||||
totalCorrectHead uint64
|
||||
totalAggregations uint64
|
||||
}
|
||||
|
||||
// ValidatorMonitorConfig contains the list of validator indices that the
|
||||
// monitor service tracks, as well as the event feed notifier that the
|
||||
// monitor needs to subscribe.
|
||||
type ValidatorMonitorConfig struct {
|
||||
StateGen stategen.StateManager
|
||||
TrackedValidators map[types.ValidatorIndex]interface{}
|
||||
}
|
||||
|
||||
// Service is the main structure that tracks validators and reports logs and
|
||||
// metrics of their performances throughout their lifetime.
|
||||
type Service struct {
|
||||
config *ValidatorMonitorConfig
|
||||
config *ValidatorMonitorConfig
|
||||
latestPerformance map[types.ValidatorIndex]ValidatorLatestPerformance
|
||||
aggregatedPerformance map[types.ValidatorIndex]ValidatorAggregatedPerformance
|
||||
}
|
||||
|
||||
// TrackedIndex returns if the given validator index corresponds to one of the
|
||||
|
||||
@@ -26,6 +26,7 @@ go_library(
|
||||
"//beacon-chain/forkchoice:go_default_library",
|
||||
"//beacon-chain/forkchoice/protoarray:go_default_library",
|
||||
"//beacon-chain/gateway:go_default_library",
|
||||
"//beacon-chain/light:go_default_library",
|
||||
"//beacon-chain/node/registration:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
|
||||
@@ -28,6 +28,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/gateway"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/light"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/node/registration"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/slashings"
|
||||
@@ -193,6 +194,10 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := beacon.registerLightClientServer(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := beacon.registerSlasherService(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -681,6 +686,11 @@ func (b *BeaconNode) registerRPCService() error {
|
||||
return err
|
||||
}
|
||||
|
||||
var lightService *light.Service
|
||||
if err := b.services.FetchService(&lightService); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var slasherService *slasher.Service
|
||||
if features.Get().EnableSlasher {
|
||||
if err := b.services.FetchService(&slasherService); err != nil {
|
||||
@@ -757,6 +767,7 @@ func (b *BeaconNode) registerRPCService() error {
|
||||
StateGen: b.stateGen,
|
||||
EnableDebugRPCEndpoints: enableDebugRPCEndpoints,
|
||||
MaxMsgSize: maxMsgSize,
|
||||
LightUpdatesFetcher: lightService,
|
||||
})
|
||||
|
||||
return b.services.RegisterService(rpcService)
|
||||
@@ -866,3 +877,25 @@ func (b *BeaconNode) registerDeterminsticGenesisService() error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *BeaconNode) registerLightClientServer() error {
|
||||
var chainService *blockchain.Service
|
||||
if err := b.services.FetchService(&chainService); err != nil {
|
||||
return err
|
||||
}
|
||||
var syncService *initialsync.Service
|
||||
if err := b.services.FetchService(&syncService); err != nil {
|
||||
return err
|
||||
}
|
||||
svc := light.New(b.ctx, &light.Config{
|
||||
Database: b.db,
|
||||
StateGen: b.stateGen,
|
||||
HeadFetcher: chainService,
|
||||
FinalizationFetcher: chainService,
|
||||
StateNotifier: b,
|
||||
TimeFetcher: chainService,
|
||||
SyncChecker: syncService,
|
||||
})
|
||||
|
||||
return b.services.RegisterService(svc)
|
||||
}
|
||||
|
||||
@@ -29,6 +29,7 @@ go_library(
|
||||
"//beacon-chain/rpc/eth/validator:go_default_library",
|
||||
"//beacon-chain/rpc/prysm/v1alpha1/beacon:go_default_library",
|
||||
"//beacon-chain/rpc/prysm/v1alpha1/debug:go_default_library",
|
||||
"//beacon-chain/rpc/prysm/v1alpha1/light:go_default_library",
|
||||
"//beacon-chain/rpc/prysm/v1alpha1/node:go_default_library",
|
||||
"//beacon-chain/rpc/prysm/v1alpha1/validator:go_default_library",
|
||||
"//beacon-chain/rpc/statefetcher:go_default_library",
|
||||
|
||||
17
beacon-chain/rpc/prysm/v1alpha1/light/BUILD.bazel
Normal file
17
beacon-chain/rpc/prysm/v1alpha1/light/BUILD.bazel
Normal file
@@ -0,0 +1,17 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["server.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/rpc/prysm/v1alpha1/light",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//beacon-chain/db/iface:go_default_library",
|
||||
"//beacon-chain/db/kv:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@io_bazel_rules_go//proto/wkt:empty_go_proto",
|
||||
"@org_golang_google_grpc//codes:go_default_library",
|
||||
"@org_golang_google_grpc//status:go_default_library",
|
||||
],
|
||||
)
|
||||
38
beacon-chain/rpc/prysm/v1alpha1/light/server.go
Normal file
38
beacon-chain/rpc/prysm/v1alpha1/light/server.go
Normal file
@@ -0,0 +1,38 @@
|
||||
package light
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/golang/protobuf/ptypes/empty"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/light"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type Server struct {
|
||||
Fetcher light.UpdatesFetcher
|
||||
}
|
||||
|
||||
// BestUpdates GET /eth/v1alpha1/lightclient/best_update/:periods.
|
||||
func (s *Server) BestUpdates(ctx context.Context, req *ethpb.BestUpdatesRequest) (*ethpb.BestUpdatesResponse, error) {
|
||||
updates := make([]*ethpb.LightClientUpdate, 0)
|
||||
for _, period := range req.SyncCommitteePeriods {
|
||||
update, err := s.Fetcher.BestUpdateForPeriod(ctx, period)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
continue
|
||||
}
|
||||
updates = append(updates, update)
|
||||
}
|
||||
return ðpb.BestUpdatesResponse{Updates: updates}, nil
|
||||
}
|
||||
|
||||
// LatestUpdateFinalized GET /eth/v1alpha1/lightclient/latest_update_finalized/
|
||||
func (s *Server) LatestUpdateFinalized(ctx context.Context, _ *empty.Empty) (*ethpb.LightClientUpdate, error) {
|
||||
return s.Fetcher.LatestFinalizedUpdate(ctx), nil
|
||||
}
|
||||
|
||||
// LatestUpdateNonFinalized /eth/v1alpha1/lightclient/latest_update_nonfinalized/
|
||||
func (s *Server) LatestUpdateNonFinalized(ctx context.Context, _ *empty.Empty) (*ethpb.LightClientUpdate, error) {
|
||||
return s.Fetcher.LatestNonFinalizedUpdate(ctx), nil
|
||||
}
|
||||
@@ -94,7 +94,12 @@ func (vs *Server) deposits(
|
||||
ctx, span := trace.StartSpan(ctx, "ProposerServer.deposits")
|
||||
defer span.End()
|
||||
|
||||
if vs.MockEth1Votes || !vs.Eth1InfoFetcher.IsConnectedToETH1() {
|
||||
if vs.MockEth1Votes {
|
||||
return []*ethpb.Deposit{}, nil
|
||||
}
|
||||
|
||||
if !vs.Eth1InfoFetcher.IsConnectedToETH1() {
|
||||
log.Warn("not connected to eth1 node, skip pending deposit insertion")
|
||||
return []*ethpb.Deposit{}, nil
|
||||
}
|
||||
// Need to fetch if the deposits up to the state's latest eth1 data matches
|
||||
@@ -112,6 +117,7 @@ func (vs *Server) deposits(
|
||||
// If there are no pending deposits, exit early.
|
||||
allPendingContainers := vs.PendingDepositsFetcher.PendingContainers(ctx, canonicalEth1DataHeight)
|
||||
if len(allPendingContainers) == 0 {
|
||||
log.Debug("no pending deposits for inclusion in block")
|
||||
return []*ethpb.Deposit{}, nil
|
||||
}
|
||||
|
||||
@@ -127,21 +133,21 @@ func (vs *Server) deposits(
|
||||
if uint64(dep.Index) >= beaconState.Eth1DepositIndex() && uint64(dep.Index) < canonicalEth1Data.DepositCount {
|
||||
pendingDeps = append(pendingDeps, dep)
|
||||
}
|
||||
// Don't try to pack more than the max allowed in a block
|
||||
if uint64(len(pendingDeps)) == params.BeaconConfig().MaxDeposits {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for i := range pendingDeps {
|
||||
// Don't construct merkle proof if the number of deposits is more than max allowed in block.
|
||||
if uint64(i) == params.BeaconConfig().MaxDeposits {
|
||||
break
|
||||
}
|
||||
pendingDeps[i].Deposit, err = constructMerkleProof(depositTrie, int(pendingDeps[i].Index), pendingDeps[i].Deposit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// Limit the return of pending deposits to not be more than max deposits allowed in block.
|
||||
|
||||
var pendingDeposits []*ethpb.Deposit
|
||||
for i := uint64(0); i < uint64(len(pendingDeps)) && i < params.BeaconConfig().MaxDeposits; i++ {
|
||||
for i := uint64(0); i < uint64(len(pendingDeps)); i++ {
|
||||
pendingDeposits = append(pendingDeposits, pendingDeps[i].Deposit)
|
||||
}
|
||||
return pendingDeposits, nil
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
opfeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/operation"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/light"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/slashings"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/synccommittee"
|
||||
@@ -33,6 +34,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/rpc/eth/validator"
|
||||
beaconv1alpha1 "github.com/prysmaticlabs/prysm/beacon-chain/rpc/prysm/v1alpha1/beacon"
|
||||
debugv1alpha1 "github.com/prysmaticlabs/prysm/beacon-chain/rpc/prysm/v1alpha1/debug"
|
||||
lightserver "github.com/prysmaticlabs/prysm/beacon-chain/rpc/prysm/v1alpha1/light"
|
||||
nodev1alpha1 "github.com/prysmaticlabs/prysm/beacon-chain/rpc/prysm/v1alpha1/node"
|
||||
validatorv1alpha1 "github.com/prysmaticlabs/prysm/beacon-chain/rpc/prysm/v1alpha1/validator"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/rpc/statefetcher"
|
||||
@@ -108,6 +110,7 @@ type Config struct {
|
||||
OperationNotifier opfeed.Notifier
|
||||
StateGen *stategen.State
|
||||
MaxMsgSize int
|
||||
LightUpdatesFetcher light.UpdatesFetcher
|
||||
}
|
||||
|
||||
// NewService instantiates a new RPC service instance that will
|
||||
@@ -279,6 +282,10 @@ func (s *Service) Start() {
|
||||
VoluntaryExitsPool: s.cfg.ExitPool,
|
||||
V1Alpha1ValidatorServer: validatorServer,
|
||||
}
|
||||
lightClientServer := &lightserver.Server{
|
||||
Fetcher: s.cfg.LightUpdatesFetcher,
|
||||
}
|
||||
ethpbv1alpha1.RegisterLightClientServer(s.grpcServer, lightClientServer)
|
||||
ethpbv1alpha1.RegisterNodeServer(s.grpcServer, nodeServer)
|
||||
ethpbservice.RegisterBeaconNodeServer(s.grpcServer, nodeServerV1)
|
||||
ethpbv1alpha1.RegisterHealthServer(s.grpcServer, nodeServer)
|
||||
|
||||
@@ -12,6 +12,8 @@ go_library(
|
||||
"//beacon-chain/state/stateutil:go_default_library",
|
||||
"//beacon-chain/state/types:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
@@ -26,6 +28,7 @@ go_test(
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/state/stateutil:go_default_library",
|
||||
"//beacon-chain/state/types:go_default_library",
|
||||
"//beacon-chain/state/v1:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
|
||||
@@ -18,6 +18,7 @@ type FieldTrie struct {
|
||||
field types.FieldIndex
|
||||
dataType types.DataType
|
||||
length uint64
|
||||
numOfElems int
|
||||
}
|
||||
|
||||
// NewFieldTrie is the constructor for the field trie data structure. It creates the corresponding
|
||||
@@ -26,18 +27,19 @@ type FieldTrie struct {
|
||||
func NewFieldTrie(field types.FieldIndex, dataType types.DataType, elements interface{}, length uint64) (*FieldTrie, error) {
|
||||
if elements == nil {
|
||||
return &FieldTrie{
|
||||
field: field,
|
||||
dataType: dataType,
|
||||
reference: stateutil.NewRef(1),
|
||||
RWMutex: new(sync.RWMutex),
|
||||
length: length,
|
||||
field: field,
|
||||
dataType: dataType,
|
||||
reference: stateutil.NewRef(1),
|
||||
RWMutex: new(sync.RWMutex),
|
||||
length: length,
|
||||
numOfElems: 0,
|
||||
}, nil
|
||||
}
|
||||
fieldRoots, err := fieldConverters(field, []uint64{}, elements, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := validateElements(field, elements, length); err != nil {
|
||||
if err := validateElements(field, dataType, elements, length); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch dataType {
|
||||
@@ -53,8 +55,9 @@ func NewFieldTrie(field types.FieldIndex, dataType types.DataType, elements inte
|
||||
reference: stateutil.NewRef(1),
|
||||
RWMutex: new(sync.RWMutex),
|
||||
length: length,
|
||||
numOfElems: reflect.ValueOf(elements).Len(),
|
||||
}, nil
|
||||
case types.CompositeArray:
|
||||
case types.CompositeArray, types.CompressedArray:
|
||||
return &FieldTrie{
|
||||
fieldLayers: stateutil.ReturnTrieLayerVariable(fieldRoots, length),
|
||||
field: field,
|
||||
@@ -62,6 +65,7 @@ func NewFieldTrie(field types.FieldIndex, dataType types.DataType, elements inte
|
||||
reference: stateutil.NewRef(1),
|
||||
RWMutex: new(sync.RWMutex),
|
||||
length: length,
|
||||
numOfElems: reflect.ValueOf(elements).Len(),
|
||||
}, nil
|
||||
default:
|
||||
return nil, errors.Errorf("unrecognized data type in field map: %v", reflect.TypeOf(dataType).Name())
|
||||
@@ -92,13 +96,40 @@ func (f *FieldTrie) RecomputeTrie(indices []uint64, elements interface{}) ([32]b
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
f.numOfElems = reflect.ValueOf(elements).Len()
|
||||
return fieldRoot, nil
|
||||
case types.CompositeArray:
|
||||
fieldRoot, f.fieldLayers, err = stateutil.RecomputeFromLayerVariable(fieldRoots, indices, f.fieldLayers)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
f.numOfElems = reflect.ValueOf(elements).Len()
|
||||
return stateutil.AddInMixin(fieldRoot, uint64(len(f.fieldLayers[0])))
|
||||
case types.CompressedArray:
|
||||
numOfElems, err := f.field.ElemsInChunk()
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
// We remove the duplicates here in order to prevent
|
||||
// duplicated insertions into the trie.
|
||||
newIndices := []uint64{}
|
||||
indexExists := make(map[uint64]bool)
|
||||
newRoots := make([][32]byte, 0, len(fieldRoots)/int(numOfElems))
|
||||
for i, idx := range indices {
|
||||
startIdx := idx / numOfElems
|
||||
if indexExists[startIdx] {
|
||||
continue
|
||||
}
|
||||
newIndices = append(newIndices, startIdx)
|
||||
indexExists[startIdx] = true
|
||||
newRoots = append(newRoots, fieldRoots[i])
|
||||
}
|
||||
fieldRoot, f.fieldLayers, err = stateutil.RecomputeFromLayerVariable(newRoots, newIndices, f.fieldLayers)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
f.numOfElems = reflect.ValueOf(elements).Len()
|
||||
return stateutil.AddInMixin(fieldRoot, uint64(f.numOfElems))
|
||||
default:
|
||||
return [32]byte{}, errors.Errorf("unrecognized data type in field map: %v", reflect.TypeOf(f.dataType).Name())
|
||||
}
|
||||
@@ -109,11 +140,12 @@ func (f *FieldTrie) RecomputeTrie(indices []uint64, elements interface{}) ([32]b
|
||||
func (f *FieldTrie) CopyTrie() *FieldTrie {
|
||||
if f.fieldLayers == nil {
|
||||
return &FieldTrie{
|
||||
field: f.field,
|
||||
dataType: f.dataType,
|
||||
reference: stateutil.NewRef(1),
|
||||
RWMutex: new(sync.RWMutex),
|
||||
length: f.length,
|
||||
field: f.field,
|
||||
dataType: f.dataType,
|
||||
reference: stateutil.NewRef(1),
|
||||
RWMutex: new(sync.RWMutex),
|
||||
length: f.length,
|
||||
numOfElems: f.numOfElems,
|
||||
}
|
||||
}
|
||||
dstFieldTrie := make([][]*[32]byte, len(f.fieldLayers))
|
||||
@@ -128,6 +160,7 @@ func (f *FieldTrie) CopyTrie() *FieldTrie {
|
||||
reference: stateutil.NewRef(1),
|
||||
RWMutex: new(sync.RWMutex),
|
||||
length: f.length,
|
||||
numOfElems: f.numOfElems,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -139,6 +172,9 @@ func (f *FieldTrie) TrieRoot() ([32]byte, error) {
|
||||
case types.CompositeArray:
|
||||
trieRoot := *f.fieldLayers[len(f.fieldLayers)-1][0]
|
||||
return stateutil.AddInMixin(trieRoot, uint64(len(f.fieldLayers[0])))
|
||||
case types.CompressedArray:
|
||||
trieRoot := *f.fieldLayers[len(f.fieldLayers)-1][0]
|
||||
return stateutil.AddInMixin(trieRoot, uint64(f.numOfElems))
|
||||
default:
|
||||
return [32]byte{}, errors.Errorf("unrecognized data type in field map: %v", reflect.TypeOf(f.dataType).Name())
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package fieldtrie
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
@@ -8,20 +9,37 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/types"
|
||||
"github.com/prysmaticlabs/prysm/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/encoding/ssz"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/runtime/version"
|
||||
)
|
||||
|
||||
func (f *FieldTrie) validateIndices(idxs []uint64) error {
|
||||
length := f.length
|
||||
if f.dataType == types.CompressedArray {
|
||||
comLength, err := f.field.ElemsInChunk()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
length *= comLength
|
||||
}
|
||||
for _, idx := range idxs {
|
||||
if idx >= f.length {
|
||||
return errors.Errorf("invalid index for field %s: %d >= length %d", f.field.String(version.Phase0), idx, f.length)
|
||||
if idx >= length {
|
||||
return errors.Errorf("invalid index for field %s: %d >= length %d", f.field.String(version.Phase0), idx, length)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateElements(field types.FieldIndex, elements interface{}, length uint64) error {
|
||||
func validateElements(field types.FieldIndex, dataType types.DataType, elements interface{}, length uint64) error {
|
||||
if dataType == types.CompressedArray {
|
||||
comLength, err := field.ElemsInChunk()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
length *= comLength
|
||||
}
|
||||
val := reflect.ValueOf(elements)
|
||||
if val.Len() > int(length) {
|
||||
return errors.Errorf("elements length is larger than expected for field %s: %d > %d", field.String(version.Phase0), val.Len(), length)
|
||||
@@ -38,21 +56,21 @@ func fieldConverters(field types.FieldIndex, indices []uint64, elements interfac
|
||||
return nil, errors.Errorf("Wanted type of %v but got %v",
|
||||
reflect.TypeOf([][]byte{}).Name(), reflect.TypeOf(elements).Name())
|
||||
}
|
||||
return stateutil.HandleByteArrays(val, indices, convertAll)
|
||||
return handleByteArrays(val, indices, convertAll)
|
||||
case types.Eth1DataVotes:
|
||||
val, ok := elements.([]*ethpb.Eth1Data)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("Wanted type of %v but got %v",
|
||||
reflect.TypeOf([]*ethpb.Eth1Data{}).Name(), reflect.TypeOf(elements).Name())
|
||||
}
|
||||
return HandleEth1DataSlice(val, indices, convertAll)
|
||||
return handleEth1DataSlice(val, indices, convertAll)
|
||||
case types.Validators:
|
||||
val, ok := elements.([]*ethpb.Validator)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("Wanted type of %v but got %v",
|
||||
reflect.TypeOf([]*ethpb.Validator{}).Name(), reflect.TypeOf(elements).Name())
|
||||
}
|
||||
return stateutil.HandleValidatorSlice(val, indices, convertAll)
|
||||
return handleValidatorSlice(val, indices, convertAll)
|
||||
case types.PreviousEpochAttestations, types.CurrentEpochAttestations:
|
||||
val, ok := elements.([]*ethpb.PendingAttestation)
|
||||
if !ok {
|
||||
@@ -60,13 +78,87 @@ func fieldConverters(field types.FieldIndex, indices []uint64, elements interfac
|
||||
reflect.TypeOf([]*ethpb.PendingAttestation{}).Name(), reflect.TypeOf(elements).Name())
|
||||
}
|
||||
return handlePendingAttestation(val, indices, convertAll)
|
||||
case types.Balances:
|
||||
val, ok := elements.([]uint64)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("Wanted type of %v but got %v",
|
||||
reflect.TypeOf([]uint64{}).Name(), reflect.TypeOf(elements).Name())
|
||||
}
|
||||
return handleBalanceSlice(val, indices, convertAll)
|
||||
default:
|
||||
return [][32]byte{}, errors.Errorf("got unsupported type of %v", reflect.TypeOf(elements).Name())
|
||||
}
|
||||
}
|
||||
|
||||
// HandleEth1DataSlice processes a list of eth1data and indices into the appropriate roots.
|
||||
func HandleEth1DataSlice(val []*ethpb.Eth1Data, indices []uint64, convertAll bool) ([][32]byte, error) {
|
||||
// handleByteArrays computes and returns byte arrays in a slice of root format.
|
||||
func handleByteArrays(val [][]byte, indices []uint64, convertAll bool) ([][32]byte, error) {
|
||||
length := len(indices)
|
||||
if convertAll {
|
||||
length = len(val)
|
||||
}
|
||||
roots := make([][32]byte, 0, length)
|
||||
rootCreator := func(input []byte) {
|
||||
newRoot := bytesutil.ToBytes32(input)
|
||||
roots = append(roots, newRoot)
|
||||
}
|
||||
if convertAll {
|
||||
for i := range val {
|
||||
rootCreator(val[i])
|
||||
}
|
||||
return roots, nil
|
||||
}
|
||||
if len(val) > 0 {
|
||||
for _, idx := range indices {
|
||||
if idx > uint64(len(val))-1 {
|
||||
return nil, fmt.Errorf("index %d greater than number of byte arrays %d", idx, len(val))
|
||||
}
|
||||
rootCreator(val[idx])
|
||||
}
|
||||
}
|
||||
return roots, nil
|
||||
}
|
||||
|
||||
// handleValidatorSlice returns the validator indices in a slice of root format.
|
||||
func handleValidatorSlice(val []*ethpb.Validator, indices []uint64, convertAll bool) ([][32]byte, error) {
|
||||
length := len(indices)
|
||||
if convertAll {
|
||||
length = len(val)
|
||||
}
|
||||
roots := make([][32]byte, 0, length)
|
||||
hasher := hash.CustomSHA256Hasher()
|
||||
rootCreator := func(input *ethpb.Validator) error {
|
||||
newRoot, err := stateutil.ValidatorRootWithHasher(hasher, input)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
roots = append(roots, newRoot)
|
||||
return nil
|
||||
}
|
||||
if convertAll {
|
||||
for i := range val {
|
||||
err := rootCreator(val[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return roots, nil
|
||||
}
|
||||
if len(val) > 0 {
|
||||
for _, idx := range indices {
|
||||
if idx > uint64(len(val))-1 {
|
||||
return nil, fmt.Errorf("index %d greater than number of validators %d", idx, len(val))
|
||||
}
|
||||
err := rootCreator(val[idx])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
return roots, nil
|
||||
}
|
||||
|
||||
// handleEth1DataSlice processes a list of eth1data and indices into the appropriate roots.
|
||||
func handleEth1DataSlice(val []*ethpb.Eth1Data, indices []uint64, convertAll bool) ([][32]byte, error) {
|
||||
length := len(indices)
|
||||
if convertAll {
|
||||
length = len(val)
|
||||
@@ -141,3 +233,48 @@ func handlePendingAttestation(val []*ethpb.PendingAttestation, indices []uint64,
|
||||
}
|
||||
return roots, nil
|
||||
}
|
||||
|
||||
func handleBalanceSlice(val []uint64, indices []uint64, convertAll bool) ([][32]byte, error) {
|
||||
if convertAll {
|
||||
balancesMarshaling := make([][]byte, 0)
|
||||
for _, b := range val {
|
||||
balanceBuf := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(balanceBuf, b)
|
||||
balancesMarshaling = append(balancesMarshaling, balanceBuf)
|
||||
}
|
||||
balancesChunks, err := ssz.PackByChunk(balancesMarshaling)
|
||||
if err != nil {
|
||||
return [][32]byte{}, errors.Wrap(err, "could not pack balances into chunks")
|
||||
}
|
||||
return balancesChunks, nil
|
||||
}
|
||||
if len(val) > 0 {
|
||||
numOfElems, err := types.Balances.ElemsInChunk()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
roots := [][32]byte{}
|
||||
for _, idx := range indices {
|
||||
// We split the indexes into their relevant groups. Balances
|
||||
// are compressed according to 4 values -> 1 chunk.
|
||||
startIdx := idx / numOfElems
|
||||
startGroup := startIdx * numOfElems
|
||||
chunk := [32]byte{}
|
||||
sizeOfElem := len(chunk) / int(numOfElems)
|
||||
for i, j := 0, startGroup; j < startGroup+numOfElems; i, j = i+sizeOfElem, j+1 {
|
||||
wantedVal := uint64(0)
|
||||
// We are adding chunks in sets of 4, if the set is at the edge of the array
|
||||
// then you will need to zero out the rest of the chunk. Ex : 41 indexes,
|
||||
// so 41 % 4 = 1 . There are 3 indexes, which do not exist yet but we
|
||||
// have to add in as a root. These 3 indexes are then given a 'zero' value.
|
||||
if int(j) < len(val) {
|
||||
wantedVal = val[j]
|
||||
}
|
||||
binary.LittleEndian.PutUint64(chunk[i:i+sizeOfElem], wantedVal)
|
||||
}
|
||||
roots = append(roots, chunk)
|
||||
}
|
||||
return roots, nil
|
||||
}
|
||||
return [][32]byte{}, nil
|
||||
}
|
||||
|
||||
@@ -1,8 +1,13 @@
|
||||
package fieldtrie
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/types"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
)
|
||||
@@ -17,7 +22,64 @@ func Test_handlePendingAttestation_OutOfRange(t *testing.T) {
|
||||
func Test_handleEth1DataSlice_OutOfRange(t *testing.T) {
|
||||
items := make([]*ethpb.Eth1Data, 1)
|
||||
indices := []uint64{3}
|
||||
_, err := HandleEth1DataSlice(items, indices, false)
|
||||
_, err := handleEth1DataSlice(items, indices, false)
|
||||
assert.ErrorContains(t, "index 3 greater than number of items in eth1 data slice 1", err)
|
||||
|
||||
}
|
||||
|
||||
func Test_handleValidatorSlice_OutOfRange(t *testing.T) {
|
||||
vals := make([]*ethpb.Validator, 1)
|
||||
indices := []uint64{3}
|
||||
_, err := handleValidatorSlice(vals, indices, false)
|
||||
assert.ErrorContains(t, "index 3 greater than number of validators 1", err)
|
||||
}
|
||||
|
||||
func TestBalancesSlice_CorrectRoots_All(t *testing.T) {
|
||||
balances := []uint64{5, 2929, 34, 1291, 354305}
|
||||
roots, err := handleBalanceSlice(balances, []uint64{}, true)
|
||||
assert.NoError(t, err)
|
||||
|
||||
root1 := [32]byte{}
|
||||
binary.LittleEndian.PutUint64(root1[:8], balances[0])
|
||||
binary.LittleEndian.PutUint64(root1[8:16], balances[1])
|
||||
binary.LittleEndian.PutUint64(root1[16:24], balances[2])
|
||||
binary.LittleEndian.PutUint64(root1[24:32], balances[3])
|
||||
|
||||
root2 := [32]byte{}
|
||||
binary.LittleEndian.PutUint64(root2[:8], balances[4])
|
||||
|
||||
assert.DeepEqual(t, roots, [][32]byte{root1, root2})
|
||||
}
|
||||
|
||||
func TestBalancesSlice_CorrectRoots_Some(t *testing.T) {
|
||||
balances := []uint64{5, 2929, 34, 1291, 354305}
|
||||
roots, err := handleBalanceSlice(balances, []uint64{2, 3}, false)
|
||||
assert.NoError(t, err)
|
||||
|
||||
root1 := [32]byte{}
|
||||
binary.LittleEndian.PutUint64(root1[:8], balances[0])
|
||||
binary.LittleEndian.PutUint64(root1[8:16], balances[1])
|
||||
binary.LittleEndian.PutUint64(root1[16:24], balances[2])
|
||||
binary.LittleEndian.PutUint64(root1[24:32], balances[3])
|
||||
|
||||
// Returns root for each indice(even if duplicated)
|
||||
assert.DeepEqual(t, roots, [][32]byte{root1, root1})
|
||||
}
|
||||
|
||||
func TestValidateIndices_CompressedField(t *testing.T) {
|
||||
fakeTrie := &FieldTrie{
|
||||
RWMutex: new(sync.RWMutex),
|
||||
reference: stateutil.NewRef(0),
|
||||
fieldLayers: nil,
|
||||
field: types.Balances,
|
||||
dataType: types.CompressedArray,
|
||||
length: params.BeaconConfig().ValidatorRegistryLimit / 4,
|
||||
numOfElems: 0,
|
||||
}
|
||||
goodIdx := params.BeaconConfig().ValidatorRegistryLimit - 1
|
||||
assert.NoError(t, fakeTrie.validateIndices([]uint64{goodIdx}))
|
||||
|
||||
badIdx := goodIdx + 1
|
||||
assert.ErrorContains(t, "invalid index for field balances", fakeTrie.validateIndices([]uint64{badIdx}))
|
||||
|
||||
}
|
||||
|
||||
@@ -37,6 +37,15 @@ func (s *State) HasStateInCache(ctx context.Context, blockRoot [32]byte) (bool,
|
||||
return has, nil
|
||||
}
|
||||
|
||||
// StateByRootIfCached retrieves a state using the input block root only if the state is already in the cache
|
||||
func (s *State) StateByRootIfCachedNoCopy(blockRoot [32]byte) state.BeaconState {
|
||||
if !s.hotStateCache.has(blockRoot) {
|
||||
return nil
|
||||
}
|
||||
state := s.hotStateCache.getWithoutCopy(blockRoot)
|
||||
return state
|
||||
}
|
||||
|
||||
// StateByRoot retrieves the state using input block root.
|
||||
func (s *State) StateByRoot(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "stateGen.StateByRoot")
|
||||
|
||||
@@ -56,6 +56,44 @@ func TestStateByRoot_ColdState(t *testing.T) {
|
||||
require.DeepSSZEqual(t, loadedState.InnerStateUnsafe(), beaconState.InnerStateUnsafe())
|
||||
}
|
||||
|
||||
func TestStateByRootIfCachedNoCopy_HotState(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
service := New(beaconDB)
|
||||
|
||||
beaconState, _ := util.DeterministicGenesisState(t, 32)
|
||||
r := [32]byte{'A'}
|
||||
require.NoError(t, service.beaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: r[:]}))
|
||||
service.hotStateCache.put(r, beaconState)
|
||||
|
||||
loadedState := service.StateByRootIfCachedNoCopy(r)
|
||||
require.DeepSSZEqual(t, loadedState.InnerStateUnsafe(), beaconState.InnerStateUnsafe())
|
||||
}
|
||||
|
||||
func TestStateByRootIfCachedNoCopy_ColdState(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
service := New(beaconDB)
|
||||
service.finalizedInfo.slot = 2
|
||||
service.slotsPerArchivedPoint = 1
|
||||
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 1
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b)))
|
||||
bRoot, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
beaconState, _ := util.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, beaconState.SetSlot(1))
|
||||
require.NoError(t, service.beaconDB.SaveState(ctx, beaconState, bRoot))
|
||||
require.NoError(t, service.beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b)))
|
||||
require.NoError(t, service.beaconDB.SaveGenesisBlockRoot(ctx, bRoot))
|
||||
loadedState := service.StateByRootIfCachedNoCopy(bRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, loadedState, nil)
|
||||
}
|
||||
|
||||
func TestStateByRoot_HotStateUsingEpochBoundaryCacheNoReplay(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
@@ -23,6 +23,11 @@ func NewMockService() *MockStateManager {
|
||||
}
|
||||
}
|
||||
|
||||
// StateByRootIfCached
|
||||
func (m *MockStateManager) StateByRootIfCachedNoCopy(_ [32]byte) state.BeaconState { // skipcq: RVV-B0013
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
// Resume --
|
||||
func (m *MockStateManager) Resume(_ context.Context, _ state.BeaconState) (state.BeaconState, error) {
|
||||
panic("implement me")
|
||||
|
||||
@@ -31,6 +31,7 @@ type StateManager interface {
|
||||
HasState(ctx context.Context, blockRoot [32]byte) (bool, error)
|
||||
HasStateInCache(ctx context.Context, blockRoot [32]byte) (bool, error)
|
||||
StateByRoot(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error)
|
||||
StateByRootIfCachedNoCopy(blockRoot [32]byte) state.BeaconState
|
||||
StateByRootInitialSync(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error)
|
||||
StateBySlot(ctx context.Context, slot types.Slot) (state.BeaconState, error)
|
||||
RecoverStateSummary(ctx context.Context, blockRoot [32]byte) (*ethpb.StateSummary, error)
|
||||
|
||||
@@ -3,7 +3,6 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"array_root.go",
|
||||
"block_header_root.go",
|
||||
"eth1_root.go",
|
||||
"participation_bit_root.go",
|
||||
@@ -49,7 +48,6 @@ go_test(
|
||||
"state_root_test.go",
|
||||
"stateutil_test.go",
|
||||
"trie_helpers_test.go",
|
||||
"validator_root_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
package stateutil
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
)
|
||||
|
||||
// HandleByteArrays computes and returns byte arrays in a slice of root format.
|
||||
func HandleByteArrays(val [][]byte, indices []uint64, convertAll bool) ([][32]byte, error) {
|
||||
length := len(indices)
|
||||
if convertAll {
|
||||
length = len(val)
|
||||
}
|
||||
roots := make([][32]byte, 0, length)
|
||||
rootCreator := func(input []byte) {
|
||||
newRoot := bytesutil.ToBytes32(input)
|
||||
roots = append(roots, newRoot)
|
||||
}
|
||||
if convertAll {
|
||||
for i := range val {
|
||||
rootCreator(val[i])
|
||||
}
|
||||
return roots, nil
|
||||
}
|
||||
if len(val) > 0 {
|
||||
for _, idx := range indices {
|
||||
if idx > uint64(len(val))-1 {
|
||||
return nil, fmt.Errorf("index %d greater than number of byte arrays %d", idx, len(val))
|
||||
}
|
||||
rootCreator(val[idx])
|
||||
}
|
||||
}
|
||||
return roots, nil
|
||||
}
|
||||
@@ -2,7 +2,6 @@ package stateutil
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
@@ -127,42 +126,3 @@ func ValidatorEncKey(validator *ethpb.Validator) []byte {
|
||||
|
||||
return enc
|
||||
}
|
||||
|
||||
// HandleValidatorSlice returns the validator indices in a slice of root format.
|
||||
func HandleValidatorSlice(val []*ethpb.Validator, indices []uint64, convertAll bool) ([][32]byte, error) {
|
||||
length := len(indices)
|
||||
if convertAll {
|
||||
length = len(val)
|
||||
}
|
||||
roots := make([][32]byte, 0, length)
|
||||
hasher := hash.CustomSHA256Hasher()
|
||||
rootCreator := func(input *ethpb.Validator) error {
|
||||
newRoot, err := ValidatorRootWithHasher(hasher, input)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
roots = append(roots, newRoot)
|
||||
return nil
|
||||
}
|
||||
if convertAll {
|
||||
for i := range val {
|
||||
err := rootCreator(val[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return roots, nil
|
||||
}
|
||||
if len(val) > 0 {
|
||||
for _, idx := range indices {
|
||||
if idx > uint64(len(val))-1 {
|
||||
return nil, fmt.Errorf("index %d greater than number of validators %d", idx, len(val))
|
||||
}
|
||||
err := rootCreator(val[idx])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
return roots, nil
|
||||
}
|
||||
|
||||
@@ -1,15 +0,0 @@
|
||||
package stateutil
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
)
|
||||
|
||||
func Test_handleValidatorSlice_OutOfRange(t *testing.T) {
|
||||
vals := make([]*ethpb.Validator, 1)
|
||||
indices := []uint64{3}
|
||||
_, err := HandleValidatorSlice(vals, indices, false)
|
||||
assert.ErrorContains(t, "index 3 greater than number of validators 1", err)
|
||||
}
|
||||
@@ -5,7 +5,10 @@ go_library(
|
||||
srcs = ["types.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/state/types",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = ["//runtime/version:go_default_library"],
|
||||
deps = [
|
||||
"//runtime/version:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/runtime/version"
|
||||
)
|
||||
|
||||
@@ -18,6 +19,10 @@ const (
|
||||
// CompositeArray represents a variable length array with
|
||||
// a non primitive type.
|
||||
CompositeArray
|
||||
// CompressedArray represents a variable length array which
|
||||
// can pack multiple elements into a leaf of the underlying
|
||||
// trie.
|
||||
CompressedArray
|
||||
)
|
||||
|
||||
// String returns the name of the field index.
|
||||
@@ -84,6 +89,17 @@ func (f FieldIndex) String(stateVersion int) string {
|
||||
}
|
||||
}
|
||||
|
||||
// ElemsInChunk returns the number of elements in the chunk (number of
|
||||
// elements that are able to be packed).
|
||||
func (f FieldIndex) ElemsInChunk() (uint64, error) {
|
||||
switch f {
|
||||
case Balances:
|
||||
return 4, nil
|
||||
default:
|
||||
return 0, errors.Errorf("field %d doesn't support element compression", f)
|
||||
}
|
||||
}
|
||||
|
||||
// Below we define a set of useful enum values for the field
|
||||
// indices of the beacon state. For example, genesisTime is the
|
||||
// 0th field of the beacon state. This is helpful when we are
|
||||
|
||||
@@ -87,6 +87,7 @@ go_test(
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stateutil:go_default_library",
|
||||
"//beacon-chain/state/types:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
|
||||
stateTypes "github.com/prysmaticlabs/prysm/beacon-chain/state/types"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/crypto/hash"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"google.golang.org/protobuf/proto"
|
||||
@@ -172,6 +173,10 @@ func (b *BeaconState) addDirtyIndices(index stateTypes.FieldIndex, indices []uin
|
||||
if b.rebuildTrie[index] {
|
||||
return
|
||||
}
|
||||
// Exit early if balance trie computation isn't enabled.
|
||||
if !features.Get().EnableBalanceTrieComputation && index == balances {
|
||||
return
|
||||
}
|
||||
totalIndicesLen := len(b.dirtyIndices[index]) + len(indices)
|
||||
if totalIndicesLen > indicesLimit {
|
||||
b.rebuildTrie[index] = true
|
||||
|
||||
@@ -103,6 +103,7 @@ func (b *BeaconState) SetBalances(val []uint64) error {
|
||||
|
||||
b.state.Balances = val
|
||||
b.markFieldAsDirty(balances)
|
||||
b.rebuildTrie[balances] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -128,6 +129,7 @@ func (b *BeaconState) UpdateBalancesAtIndex(idx types.ValidatorIndex, val uint64
|
||||
bals[idx] = val
|
||||
b.state.Balances = bals
|
||||
b.markFieldAsDirty(balances)
|
||||
b.addDirtyIndices(balances, []uint64{uint64(idx)})
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -219,6 +221,8 @@ func (b *BeaconState) AppendBalance(bal uint64) error {
|
||||
}
|
||||
|
||||
b.state.Balances = append(bals, bal)
|
||||
balIdx := len(b.state.Balances) - 1
|
||||
b.markFieldAsDirty(balances)
|
||||
b.addDirtyIndices(balances, []uint64{uint64(balIdx)})
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,10 +1,13 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
@@ -104,3 +107,90 @@ func TestStateTrie_IsNil(t *testing.T) {
|
||||
nonNilState := &BeaconState{state: ðpb.BeaconState{}}
|
||||
assert.Equal(t, false, nonNilState.IsNil())
|
||||
}
|
||||
|
||||
func TestBeaconState_AppendBalanceWithTrie(t *testing.T) {
|
||||
count := uint64(100)
|
||||
vals := make([]*ethpb.Validator, 0, count)
|
||||
bals := make([]uint64, 0, count)
|
||||
for i := uint64(1); i < count; i++ {
|
||||
someRoot := [32]byte{}
|
||||
someKey := [48]byte{}
|
||||
copy(someRoot[:], strconv.Itoa(int(i)))
|
||||
copy(someKey[:], strconv.Itoa(int(i)))
|
||||
vals = append(vals, ðpb.Validator{
|
||||
PublicKey: someKey[:],
|
||||
WithdrawalCredentials: someRoot[:],
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
Slashed: false,
|
||||
ActivationEligibilityEpoch: 1,
|
||||
ActivationEpoch: 1,
|
||||
ExitEpoch: 1,
|
||||
WithdrawableEpoch: 1,
|
||||
})
|
||||
bals = append(bals, params.BeaconConfig().MaxEffectiveBalance)
|
||||
}
|
||||
zeroHash := params.BeaconConfig().ZeroHash
|
||||
mockblockRoots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot)
|
||||
for i := 0; i < len(mockblockRoots); i++ {
|
||||
mockblockRoots[i] = zeroHash[:]
|
||||
}
|
||||
|
||||
mockstateRoots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot)
|
||||
for i := 0; i < len(mockstateRoots); i++ {
|
||||
mockstateRoots[i] = zeroHash[:]
|
||||
}
|
||||
mockrandaoMixes := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector)
|
||||
for i := 0; i < len(mockrandaoMixes); i++ {
|
||||
mockrandaoMixes[i] = zeroHash[:]
|
||||
}
|
||||
var pubKeys [][]byte
|
||||
for i := uint64(0); i < params.BeaconConfig().SyncCommitteeSize; i++ {
|
||||
pubKeys = append(pubKeys, bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength))
|
||||
}
|
||||
st, err := InitializeFromProto(ðpb.BeaconState{
|
||||
Slot: 1,
|
||||
GenesisValidatorsRoot: make([]byte, 32),
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: make([]byte, 4),
|
||||
CurrentVersion: make([]byte, 4),
|
||||
Epoch: 0,
|
||||
},
|
||||
LatestBlockHeader: ðpb.BeaconBlockHeader{
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
},
|
||||
Validators: vals,
|
||||
Balances: bals,
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
DepositRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
},
|
||||
BlockRoots: mockblockRoots,
|
||||
StateRoots: mockstateRoots,
|
||||
RandaoMixes: mockrandaoMixes,
|
||||
JustificationBits: bitfield.NewBitvector4(),
|
||||
PreviousJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
CurrentJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
Slashings: make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector),
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
_, err = st.HashTreeRoot(context.Background())
|
||||
assert.NoError(t, err)
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
if i%2 == 0 {
|
||||
assert.NoError(t, st.UpdateBalancesAtIndex(types.ValidatorIndex(i), 1000))
|
||||
}
|
||||
if i%3 == 0 {
|
||||
assert.NoError(t, st.AppendBalance(1000))
|
||||
}
|
||||
}
|
||||
_, err = st.HashTreeRoot(context.Background())
|
||||
assert.NoError(t, err)
|
||||
newRt := bytesutil.ToBytes32(st.merkleLayers[0][balances])
|
||||
wantedRt, err := stateutil.Uint64ListRootWithRegistryLimit(st.state.Balances)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, wantedRt, newRt, "state roots are unequal")
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/fieldtrie"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/types"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/container/slice"
|
||||
"github.com/prysmaticlabs/prysm/crypto/hash"
|
||||
@@ -319,6 +320,20 @@ func (b *BeaconState) rootSelector(ctx context.Context, field types.FieldIndex)
|
||||
}
|
||||
return b.recomputeFieldTrie(validators, b.state.Validators)
|
||||
case balances:
|
||||
if features.Get().EnableBalanceTrieComputation {
|
||||
if b.rebuildTrie[field] {
|
||||
maxBalCap := params.BeaconConfig().ValidatorRegistryLimit
|
||||
elemSize := uint64(8)
|
||||
balLimit := (maxBalCap*elemSize + 31) / 32
|
||||
err := b.resetFieldTrie(field, b.state.Balances, balLimit)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
delete(b.rebuildTrie, field)
|
||||
return b.stateFieldLeaves[field].TrieRoot()
|
||||
}
|
||||
return b.recomputeFieldTrie(balances, b.state.Balances)
|
||||
}
|
||||
return stateutil.Uint64ListRootWithRegistryLimit(b.state.Balances)
|
||||
case randaoMixes:
|
||||
if b.rebuildTrie[field] {
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
@@ -16,6 +17,12 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{EnableBalanceTrieComputation: true})
|
||||
defer resetCfg()
|
||||
m.Run()
|
||||
}
|
||||
|
||||
func TestInitializeFromProto(t *testing.T) {
|
||||
testState, _ := util.DeterministicGenesisState(t, 64)
|
||||
pbState, err := v1.ProtobufBeaconState(testState.InnerStateUnsafe())
|
||||
|
||||
@@ -17,7 +17,6 @@ var _ state.BeaconState = (*BeaconState)(nil)
|
||||
|
||||
func init() {
|
||||
fieldMap = make(map[types.FieldIndex]types.DataType, params.BeaconConfig().BeaconStateFieldCount)
|
||||
|
||||
// Initialize the fixed sized arrays.
|
||||
fieldMap[types.BlockRoots] = types.BasicArray
|
||||
fieldMap[types.StateRoots] = types.BasicArray
|
||||
@@ -28,6 +27,7 @@ func init() {
|
||||
fieldMap[types.Validators] = types.CompositeArray
|
||||
fieldMap[types.PreviousEpochAttestations] = types.CompositeArray
|
||||
fieldMap[types.CurrentEpochAttestations] = types.CompositeArray
|
||||
fieldMap[types.Balances] = types.CompressedArray
|
||||
}
|
||||
|
||||
// fieldMap keeps track of each field
|
||||
|
||||
@@ -79,11 +79,13 @@ go_test(
|
||||
"//beacon-chain/state/stateutil:go_default_library",
|
||||
"//beacon-chain/state/types:go_default_library",
|
||||
"//beacon-chain/state/v1:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
|
||||
stateTypes "github.com/prysmaticlabs/prysm/beacon-chain/state/types"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/crypto/hash"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"google.golang.org/protobuf/proto"
|
||||
@@ -171,6 +172,10 @@ func (b *BeaconState) addDirtyIndices(index stateTypes.FieldIndex, indices []uin
|
||||
if b.rebuildTrie[index] {
|
||||
return
|
||||
}
|
||||
// Exit early if balance trie computation isn't enabled.
|
||||
if !features.Get().EnableBalanceTrieComputation && index == balances {
|
||||
return
|
||||
}
|
||||
totalIndicesLen := len(b.dirtyIndices[index]) + len(indices)
|
||||
if totalIndicesLen > indicesLimit {
|
||||
b.rebuildTrie[index] = true
|
||||
|
||||
@@ -2,10 +2,15 @@ package v2
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
|
||||
stateTypes "github.com/prysmaticlabs/prysm/beacon-chain/state/types"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
@@ -57,3 +62,100 @@ func TestAppendBeyondIndicesLimit(t *testing.T) {
|
||||
assert.Equal(t, true, st.rebuildTrie[validators])
|
||||
assert.Equal(t, len(st.dirtyIndices[validators]), 0)
|
||||
}
|
||||
|
||||
func TestBeaconState_AppendBalanceWithTrie(t *testing.T) {
|
||||
count := uint64(100)
|
||||
vals := make([]*ethpb.Validator, 0, count)
|
||||
bals := make([]uint64, 0, count)
|
||||
for i := uint64(1); i < count; i++ {
|
||||
someRoot := [32]byte{}
|
||||
someKey := [48]byte{}
|
||||
copy(someRoot[:], strconv.Itoa(int(i)))
|
||||
copy(someKey[:], strconv.Itoa(int(i)))
|
||||
vals = append(vals, ðpb.Validator{
|
||||
PublicKey: someKey[:],
|
||||
WithdrawalCredentials: someRoot[:],
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
Slashed: false,
|
||||
ActivationEligibilityEpoch: 1,
|
||||
ActivationEpoch: 1,
|
||||
ExitEpoch: 1,
|
||||
WithdrawableEpoch: 1,
|
||||
})
|
||||
bals = append(bals, params.BeaconConfig().MaxEffectiveBalance)
|
||||
}
|
||||
zeroHash := params.BeaconConfig().ZeroHash
|
||||
mockblockRoots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot)
|
||||
for i := 0; i < len(mockblockRoots); i++ {
|
||||
mockblockRoots[i] = zeroHash[:]
|
||||
}
|
||||
|
||||
mockstateRoots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot)
|
||||
for i := 0; i < len(mockstateRoots); i++ {
|
||||
mockstateRoots[i] = zeroHash[:]
|
||||
}
|
||||
mockrandaoMixes := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector)
|
||||
for i := 0; i < len(mockrandaoMixes); i++ {
|
||||
mockrandaoMixes[i] = zeroHash[:]
|
||||
}
|
||||
var pubKeys [][]byte
|
||||
for i := uint64(0); i < params.BeaconConfig().SyncCommitteeSize; i++ {
|
||||
pubKeys = append(pubKeys, bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength))
|
||||
}
|
||||
st, err := InitializeFromProto(ðpb.BeaconStateAltair{
|
||||
Slot: 1,
|
||||
GenesisValidatorsRoot: make([]byte, 32),
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: make([]byte, 4),
|
||||
CurrentVersion: make([]byte, 4),
|
||||
Epoch: 0,
|
||||
},
|
||||
LatestBlockHeader: ðpb.BeaconBlockHeader{
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
},
|
||||
CurrentEpochParticipation: []byte{},
|
||||
PreviousEpochParticipation: []byte{},
|
||||
Validators: vals,
|
||||
Balances: bals,
|
||||
Eth1Data: ð.Eth1Data{
|
||||
DepositRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
},
|
||||
BlockRoots: mockblockRoots,
|
||||
StateRoots: mockstateRoots,
|
||||
RandaoMixes: mockrandaoMixes,
|
||||
JustificationBits: bitfield.NewBitvector4(),
|
||||
PreviousJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
CurrentJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
Slashings: make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector),
|
||||
CurrentSyncCommittee: ðpb.SyncCommittee{
|
||||
Pubkeys: pubKeys,
|
||||
AggregatePubkey: make([]byte, 48),
|
||||
},
|
||||
NextSyncCommittee: ðpb.SyncCommittee{
|
||||
Pubkeys: pubKeys,
|
||||
AggregatePubkey: make([]byte, 48),
|
||||
},
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
_, err = st.HashTreeRoot(context.Background())
|
||||
assert.NoError(t, err)
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
if i%2 == 0 {
|
||||
assert.NoError(t, st.UpdateBalancesAtIndex(types.ValidatorIndex(i), 1000))
|
||||
}
|
||||
if i%3 == 0 {
|
||||
assert.NoError(t, st.AppendBalance(1000))
|
||||
}
|
||||
}
|
||||
_, err = st.HashTreeRoot(context.Background())
|
||||
assert.NoError(t, err)
|
||||
newRt := bytesutil.ToBytes32(st.merkleLayers[0][balances])
|
||||
wantedRt, err := stateutil.Uint64ListRootWithRegistryLimit(st.state.Balances)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, wantedRt, newRt, "state roots are unequal")
|
||||
}
|
||||
|
||||
@@ -102,6 +102,7 @@ func (b *BeaconState) SetBalances(val []uint64) error {
|
||||
b.sharedFieldReferences[balances] = stateutil.NewRef(1)
|
||||
|
||||
b.state.Balances = val
|
||||
b.rebuildTrie[balances] = true
|
||||
b.markFieldAsDirty(balances)
|
||||
return nil
|
||||
}
|
||||
@@ -128,6 +129,7 @@ func (b *BeaconState) UpdateBalancesAtIndex(idx types.ValidatorIndex, val uint64
|
||||
bals[idx] = val
|
||||
b.state.Balances = bals
|
||||
b.markFieldAsDirty(balances)
|
||||
b.addDirtyIndices(balances, []uint64{uint64(idx)})
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -219,7 +221,9 @@ func (b *BeaconState) AppendBalance(bal uint64) error {
|
||||
}
|
||||
|
||||
b.state.Balances = append(bals, bal)
|
||||
balIdx := len(b.state.Balances) - 1
|
||||
b.markFieldAsDirty(balances)
|
||||
b.addDirtyIndices(balances, []uint64{uint64(balIdx)})
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/fieldtrie"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/types"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/container/slice"
|
||||
"github.com/prysmaticlabs/prysm/crypto/hash"
|
||||
@@ -324,6 +325,20 @@ func (b *BeaconState) rootSelector(ctx context.Context, field types.FieldIndex)
|
||||
}
|
||||
return b.recomputeFieldTrie(validators, b.state.Validators)
|
||||
case balances:
|
||||
if features.Get().EnableBalanceTrieComputation {
|
||||
if b.rebuildTrie[field] {
|
||||
maxBalCap := params.BeaconConfig().ValidatorRegistryLimit
|
||||
elemSize := uint64(8)
|
||||
balLimit := (maxBalCap*elemSize + 31) / 32
|
||||
err := b.resetFieldTrie(field, b.state.Balances, balLimit)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
delete(b.rebuildTrie, field)
|
||||
return b.stateFieldLeaves[field].TrieRoot()
|
||||
}
|
||||
return b.recomputeFieldTrie(balances, b.state.Balances)
|
||||
}
|
||||
return stateutil.Uint64ListRootWithRegistryLimit(b.state.Balances)
|
||||
case randaoMixes:
|
||||
if b.rebuildTrie[field] {
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
@@ -13,6 +14,12 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{EnableBalanceTrieComputation: true})
|
||||
defer resetCfg()
|
||||
m.Run()
|
||||
}
|
||||
|
||||
func TestValidatorMap_DistinctCopy(t *testing.T) {
|
||||
count := uint64(100)
|
||||
vals := make([]*ethpb.Validator, 0, count)
|
||||
|
||||
@@ -22,6 +22,9 @@ func init() {
|
||||
// Initialize the composite arrays.
|
||||
fieldMap[types.Eth1DataVotes] = types.CompositeArray
|
||||
fieldMap[types.Validators] = types.CompositeArray
|
||||
|
||||
// Initialize Compressed Arrays
|
||||
fieldMap[types.Balances] = types.CompressedArray
|
||||
}
|
||||
|
||||
// fieldMap keeps track of each field
|
||||
|
||||
@@ -1,16 +1,34 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["types.go"],
|
||||
srcs = [
|
||||
"field_root_eth1.go",
|
||||
"field_root_validator.go",
|
||||
"field_root_vector.go",
|
||||
"field_roots.go",
|
||||
"types.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/state/v3",
|
||||
visibility = ["//beacon-chain:__pkg__"],
|
||||
deps = [
|
||||
"//beacon-chain/state/fieldtrie:go_default_library",
|
||||
"//beacon-chain/state/stateutil:go_default_library",
|
||||
"//beacon-chain/state/types:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_dgraph_io_ristretto//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["field_root_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = ["//testing/assert:go_default_library"],
|
||||
)
|
||||
|
||||
59
beacon-chain/state/v3/field_root_eth1.go
Normal file
59
beacon-chain/state/v3/field_root_eth1.go
Normal file
@@ -0,0 +1,59 @@
|
||||
package v3
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/encoding/ssz"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// eth1Root computes the HashTreeRoot Merkleization of
|
||||
// a BeaconBlockHeader struct according to the eth2
|
||||
// Simple Serialize specification.
|
||||
func eth1Root(hasher ssz.HashFn, eth1Data *ethpb.Eth1Data) ([32]byte, error) {
|
||||
if eth1Data == nil {
|
||||
return [32]byte{}, errors.New("nil eth1 data")
|
||||
}
|
||||
|
||||
enc := stateutil.Eth1DataEncKey(eth1Data)
|
||||
if features.Get().EnableSSZCache {
|
||||
if found, ok := cachedHasher.rootsCache.Get(string(enc)); ok && found != nil {
|
||||
return found.([32]byte), nil
|
||||
}
|
||||
}
|
||||
|
||||
root, err := stateutil.Eth1DataRootWithHasher(hasher, eth1Data)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
|
||||
if features.Get().EnableSSZCache {
|
||||
cachedHasher.rootsCache.Set(string(enc), root, 32)
|
||||
}
|
||||
return root, nil
|
||||
}
|
||||
|
||||
// eth1DataVotesRoot computes the HashTreeRoot Merkleization of
|
||||
// a list of Eth1Data structs according to the eth2
|
||||
// Simple Serialize specification.
|
||||
func eth1DataVotesRoot(eth1DataVotes []*ethpb.Eth1Data) ([32]byte, error) {
|
||||
hashKey, err := stateutil.Eth1DatasEncKey(eth1DataVotes)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
|
||||
if features.Get().EnableSSZCache {
|
||||
if found, ok := cachedHasher.rootsCache.Get(string(hashKey[:])); ok && found != nil {
|
||||
return found.([32]byte), nil
|
||||
}
|
||||
}
|
||||
root, err := stateutil.Eth1DatasRoot(eth1DataVotes)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
if features.Get().EnableSSZCache {
|
||||
cachedHasher.rootsCache.Set(string(hashKey[:]), root, 32)
|
||||
}
|
||||
return root, nil
|
||||
}
|
||||
23
beacon-chain/state/v3/field_root_test.go
Normal file
23
beacon-chain/state/v3/field_root_test.go
Normal file
@@ -0,0 +1,23 @@
|
||||
package v3
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
)
|
||||
|
||||
func TestArraysTreeRoot_OnlyPowerOf2(t *testing.T) {
|
||||
_, err := nocachedHasher.arraysRoot([][]byte{}, 1, "testing")
|
||||
assert.NoError(t, err)
|
||||
_, err = nocachedHasher.arraysRoot([][]byte{}, 4, "testing")
|
||||
assert.NoError(t, err)
|
||||
_, err = nocachedHasher.arraysRoot([][]byte{}, 8, "testing")
|
||||
assert.NoError(t, err)
|
||||
_, err = nocachedHasher.arraysRoot([][]byte{}, 10, "testing")
|
||||
assert.ErrorContains(t, "hash layer is a non power of 2", err)
|
||||
}
|
||||
|
||||
func TestArraysTreeRoot_ZeroLength(t *testing.T) {
|
||||
_, err := nocachedHasher.arraysRoot([][]byte{}, 0, "testing")
|
||||
assert.ErrorContains(t, "zero leaves provided", err)
|
||||
}
|
||||
89
beacon-chain/state/v3/field_root_validator.go
Normal file
89
beacon-chain/state/v3/field_root_validator.go
Normal file
@@ -0,0 +1,89 @@
|
||||
package v3
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/encoding/ssz"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
func (h *stateRootHasher) validatorRegistryRoot(validators []*ethpb.Validator) ([32]byte, error) {
|
||||
hashKeyElements := make([]byte, len(validators)*32)
|
||||
roots := make([][32]byte, len(validators))
|
||||
emptyKey := hash.FastSum256(hashKeyElements)
|
||||
hasher := hash.CustomSHA256Hasher()
|
||||
bytesProcessed := 0
|
||||
for i := 0; i < len(validators); i++ {
|
||||
val, err := h.validatorRoot(hasher, validators[i])
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not compute validators merkleization")
|
||||
}
|
||||
copy(hashKeyElements[bytesProcessed:bytesProcessed+32], val[:])
|
||||
roots[i] = val
|
||||
bytesProcessed += 32
|
||||
}
|
||||
|
||||
hashKey := hash.FastSum256(hashKeyElements)
|
||||
if hashKey != emptyKey && h.rootsCache != nil {
|
||||
if found, ok := h.rootsCache.Get(string(hashKey[:])); found != nil && ok {
|
||||
return found.([32]byte), nil
|
||||
}
|
||||
}
|
||||
|
||||
validatorsRootsRoot, err := ssz.BitwiseMerkleizeArrays(hasher, roots, uint64(len(roots)), params.BeaconConfig().ValidatorRegistryLimit)
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not compute validator registry merkleization")
|
||||
}
|
||||
validatorsRootsBuf := new(bytes.Buffer)
|
||||
if err := binary.Write(validatorsRootsBuf, binary.LittleEndian, uint64(len(validators))); err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not marshal validator registry length")
|
||||
}
|
||||
// We need to mix in the length of the slice.
|
||||
var validatorsRootsBufRoot [32]byte
|
||||
copy(validatorsRootsBufRoot[:], validatorsRootsBuf.Bytes())
|
||||
res := ssz.MixInLength(validatorsRootsRoot, validatorsRootsBufRoot[:])
|
||||
if hashKey != emptyKey && h.rootsCache != nil {
|
||||
h.rootsCache.Set(string(hashKey[:]), res, 32)
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (h *stateRootHasher) validatorRoot(hasher ssz.HashFn, validator *ethpb.Validator) ([32]byte, error) {
|
||||
if validator == nil {
|
||||
return [32]byte{}, errors.New("nil validator")
|
||||
}
|
||||
|
||||
enc := stateutil.ValidatorEncKey(validator)
|
||||
// Check if it exists in cache:
|
||||
if h.rootsCache != nil {
|
||||
if found, ok := h.rootsCache.Get(string(enc)); found != nil && ok {
|
||||
return found.([32]byte), nil
|
||||
}
|
||||
}
|
||||
|
||||
valRoot, err := stateutil.ValidatorRootWithHasher(hasher, validator)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
|
||||
if h.rootsCache != nil {
|
||||
h.rootsCache.Set(string(enc), valRoot, 32)
|
||||
}
|
||||
return valRoot, nil
|
||||
}
|
||||
|
||||
// ValidatorRegistryRoot computes the HashTreeRoot Merkleization of
|
||||
// a list of validator structs according to the eth2
|
||||
// Simple Serialize specification.
|
||||
func ValidatorRegistryRoot(vals []*ethpb.Validator) ([32]byte, error) {
|
||||
if features.Get().EnableSSZCache {
|
||||
return cachedHasher.validatorRegistryRoot(vals)
|
||||
}
|
||||
return nocachedHasher.validatorRegistryRoot(vals)
|
||||
}
|
||||
146
beacon-chain/state/v3/field_root_vector.go
Normal file
146
beacon-chain/state/v3/field_root_vector.go
Normal file
@@ -0,0 +1,146 @@
|
||||
package v3
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
|
||||
"github.com/prysmaticlabs/prysm/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/encoding/ssz"
|
||||
)
|
||||
|
||||
func (h *stateRootHasher) arraysRoot(input [][]byte, length uint64, fieldName string) ([32]byte, error) {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
hashFunc := hash.CustomSHA256Hasher()
|
||||
if _, ok := layersCache[fieldName]; !ok && h.rootsCache != nil {
|
||||
depth := ssz.Depth(length)
|
||||
layersCache[fieldName] = make([][][32]byte, depth+1)
|
||||
}
|
||||
|
||||
leaves := make([][32]byte, length)
|
||||
for i, chunk := range input {
|
||||
copy(leaves[i][:], chunk)
|
||||
}
|
||||
bytesProcessed := 0
|
||||
changedIndices := make([]int, 0)
|
||||
prevLeaves, ok := leavesCache[fieldName]
|
||||
if len(prevLeaves) == 0 || h.rootsCache == nil {
|
||||
prevLeaves = leaves
|
||||
}
|
||||
|
||||
for i := 0; i < len(leaves); i++ {
|
||||
// We check if any items changed since the roots were last recomputed.
|
||||
notEqual := leaves[i] != prevLeaves[i]
|
||||
if ok && h.rootsCache != nil && notEqual {
|
||||
changedIndices = append(changedIndices, i)
|
||||
}
|
||||
bytesProcessed += 32
|
||||
}
|
||||
if len(changedIndices) > 0 && h.rootsCache != nil {
|
||||
var rt [32]byte
|
||||
var err error
|
||||
// If indices did change since last computation, we only recompute
|
||||
// the modified branches in the cached Merkle tree for this state field.
|
||||
chunks := leaves
|
||||
|
||||
// We need to ensure we recompute indices of the Merkle tree which
|
||||
// changed in-between calls to this function. This check adds an offset
|
||||
// to the recomputed indices to ensure we do so evenly.
|
||||
maxChangedIndex := changedIndices[len(changedIndices)-1]
|
||||
if maxChangedIndex+2 == len(chunks) && maxChangedIndex%2 != 0 {
|
||||
changedIndices = append(changedIndices, maxChangedIndex+1)
|
||||
}
|
||||
for i := 0; i < len(changedIndices); i++ {
|
||||
rt, err = recomputeRoot(changedIndices[i], chunks, fieldName, hashFunc)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
}
|
||||
leavesCache[fieldName] = chunks
|
||||
return rt, nil
|
||||
}
|
||||
|
||||
res, err := h.merkleizeWithCache(leaves, length, fieldName, hashFunc)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
if h.rootsCache != nil {
|
||||
leavesCache[fieldName] = leaves
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func recomputeRoot(idx int, chunks [][32]byte, fieldName string, hasher func([]byte) [32]byte) ([32]byte, error) {
|
||||
items, ok := layersCache[fieldName]
|
||||
if !ok {
|
||||
return [32]byte{}, errors.New("could not recompute root as there was no cache found")
|
||||
}
|
||||
if items == nil {
|
||||
return [32]byte{}, errors.New("could not recompute root as there were no items found in the layers cache")
|
||||
}
|
||||
layers := items
|
||||
root := chunks[idx]
|
||||
layers[0] = chunks
|
||||
// The merkle tree structure looks as follows:
|
||||
// [[r1, r2, r3, r4], [parent1, parent2], [root]]
|
||||
// Using information about the index which changed, idx, we recompute
|
||||
// only its branch up the tree.
|
||||
currentIndex := idx
|
||||
for i := 0; i < len(layers)-1; i++ {
|
||||
isLeft := currentIndex%2 == 0
|
||||
neighborIdx := currentIndex ^ 1
|
||||
|
||||
neighbor := [32]byte{}
|
||||
if layers[i] != nil && len(layers[i]) != 0 && neighborIdx < len(layers[i]) {
|
||||
neighbor = layers[i][neighborIdx]
|
||||
}
|
||||
if isLeft {
|
||||
parentHash := hasher(append(root[:], neighbor[:]...))
|
||||
root = parentHash
|
||||
} else {
|
||||
parentHash := hasher(append(neighbor[:], root[:]...))
|
||||
root = parentHash
|
||||
}
|
||||
parentIdx := currentIndex / 2
|
||||
// Update the cached layers at the parent index.
|
||||
if len(layers[i+1]) == 0 {
|
||||
layers[i+1] = append(layers[i+1], root)
|
||||
} else {
|
||||
layers[i+1][parentIdx] = root
|
||||
}
|
||||
currentIndex = parentIdx
|
||||
}
|
||||
layersCache[fieldName] = layers
|
||||
// If there is only a single leaf, we return it (the identity element).
|
||||
if len(layers[0]) == 1 {
|
||||
return layers[0][0], nil
|
||||
}
|
||||
return root, nil
|
||||
}
|
||||
|
||||
func (h *stateRootHasher) merkleizeWithCache(leaves [][32]byte, length uint64,
|
||||
fieldName string, hasher func([]byte) [32]byte) ([32]byte, error) {
|
||||
if len(leaves) == 0 {
|
||||
return [32]byte{}, errors.New("zero leaves provided")
|
||||
}
|
||||
if len(leaves) == 1 {
|
||||
return leaves[0], nil
|
||||
}
|
||||
hashLayer := leaves
|
||||
layers := make([][][32]byte, ssz.Depth(length)+1)
|
||||
if items, ok := layersCache[fieldName]; ok && h.rootsCache != nil {
|
||||
if len(items[0]) == len(leaves) {
|
||||
layers = items
|
||||
}
|
||||
}
|
||||
layers[0] = hashLayer
|
||||
var err error
|
||||
layers, hashLayer, err = stateutil.MerkleizeTrieLeaves(layers, hashLayer, hasher)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
root := hashLayer[0]
|
||||
if h.rootsCache != nil {
|
||||
layersCache[fieldName] = layers
|
||||
}
|
||||
return root, nil
|
||||
}
|
||||
226
beacon-chain/state/v3/field_roots.go
Normal file
226
beacon-chain/state/v3/field_roots.go
Normal file
@@ -0,0 +1,226 @@
|
||||
package v3
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"sync"
|
||||
|
||||
"github.com/dgraph-io/ristretto"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/encoding/ssz"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
var (
|
||||
leavesCache = make(map[string][][32]byte, params.BeaconConfig().BeaconStateMergeFieldCount)
|
||||
layersCache = make(map[string][][][32]byte, params.BeaconConfig().BeaconStateMergeFieldCount)
|
||||
lock sync.RWMutex
|
||||
)
|
||||
|
||||
const cacheSize = 100000
|
||||
|
||||
var nocachedHasher *stateRootHasher
|
||||
var cachedHasher *stateRootHasher
|
||||
|
||||
func init() {
|
||||
rootsCache, err := ristretto.NewCache(&ristretto.Config{
|
||||
NumCounters: cacheSize, // number of keys to track frequency of (1M).
|
||||
MaxCost: 1 << 22, // maximum cost of cache (3MB).
|
||||
// 100,000 roots will take up approximately 3 MB in memory.
|
||||
BufferItems: 64, // number of keys per Get buffer.
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
// Temporarily disable roots cache until cache issues can be resolved.
|
||||
cachedHasher = &stateRootHasher{rootsCache: rootsCache}
|
||||
nocachedHasher = &stateRootHasher{}
|
||||
}
|
||||
|
||||
type stateRootHasher struct {
|
||||
rootsCache *ristretto.Cache
|
||||
}
|
||||
|
||||
// computeFieldRoots returns the hash tree root computations of every field in
|
||||
// the beacon state as a list of 32 byte roots.
|
||||
//nolint:deadcode
|
||||
func computeFieldRoots(state *ethpb.BeaconStateMerge) ([][]byte, error) {
|
||||
if features.Get().EnableSSZCache {
|
||||
return cachedHasher.computeFieldRootsWithHasher(state)
|
||||
}
|
||||
return nocachedHasher.computeFieldRootsWithHasher(state)
|
||||
}
|
||||
|
||||
func (h *stateRootHasher) computeFieldRootsWithHasher(state *ethpb.BeaconStateMerge) ([][]byte, error) {
|
||||
if state == nil {
|
||||
return nil, errors.New("nil state")
|
||||
}
|
||||
hasher := hash.CustomSHA256Hasher()
|
||||
fieldRoots := make([][]byte, params.BeaconConfig().BeaconStateMergeFieldCount)
|
||||
|
||||
// Genesis time root.
|
||||
genesisRoot := ssz.Uint64Root(state.GenesisTime)
|
||||
fieldRoots[0] = genesisRoot[:]
|
||||
|
||||
// Genesis validator root.
|
||||
r := [32]byte{}
|
||||
copy(r[:], state.GenesisValidatorsRoot)
|
||||
fieldRoots[1] = r[:]
|
||||
|
||||
// Slot root.
|
||||
slotRoot := ssz.Uint64Root(uint64(state.Slot))
|
||||
fieldRoots[2] = slotRoot[:]
|
||||
|
||||
// Fork data structure root.
|
||||
forkHashTreeRoot, err := ssz.ForkRoot(state.Fork)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute fork merkleization")
|
||||
}
|
||||
fieldRoots[3] = forkHashTreeRoot[:]
|
||||
|
||||
// BeaconBlockHeader data structure root.
|
||||
headerHashTreeRoot, err := stateutil.BlockHeaderRoot(state.LatestBlockHeader)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute block header merkleization")
|
||||
}
|
||||
fieldRoots[4] = headerHashTreeRoot[:]
|
||||
|
||||
// BlockRoots array root.
|
||||
blockRootsRoot, err := h.arraysRoot(state.BlockRoots, uint64(params.BeaconConfig().SlotsPerHistoricalRoot), "BlockRoots")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute block roots merkleization")
|
||||
}
|
||||
fieldRoots[5] = blockRootsRoot[:]
|
||||
|
||||
// StateRoots array root.
|
||||
stateRootsRoot, err := h.arraysRoot(state.StateRoots, uint64(params.BeaconConfig().SlotsPerHistoricalRoot), "StateRoots")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute state roots merkleization")
|
||||
}
|
||||
fieldRoots[6] = stateRootsRoot[:]
|
||||
|
||||
// HistoricalRoots slice root.
|
||||
historicalRootsRt, err := ssz.ByteArrayRootWithLimit(state.HistoricalRoots, params.BeaconConfig().HistoricalRootsLimit)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute historical roots merkleization")
|
||||
}
|
||||
fieldRoots[7] = historicalRootsRt[:]
|
||||
|
||||
// Eth1Data data structure root.
|
||||
eth1HashTreeRoot, err := eth1Root(hasher, state.Eth1Data)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute eth1data merkleization")
|
||||
}
|
||||
fieldRoots[8] = eth1HashTreeRoot[:]
|
||||
|
||||
// Eth1DataVotes slice root.
|
||||
eth1VotesRoot, err := eth1DataVotesRoot(state.Eth1DataVotes)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute eth1data votes merkleization")
|
||||
}
|
||||
fieldRoots[9] = eth1VotesRoot[:]
|
||||
|
||||
// Eth1DepositIndex root.
|
||||
eth1DepositIndexBuf := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(eth1DepositIndexBuf, state.Eth1DepositIndex)
|
||||
eth1DepositBuf := bytesutil.ToBytes32(eth1DepositIndexBuf)
|
||||
fieldRoots[10] = eth1DepositBuf[:]
|
||||
|
||||
// Validators slice root.
|
||||
validatorsRoot, err := h.validatorRegistryRoot(state.Validators)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute validator registry merkleization")
|
||||
}
|
||||
fieldRoots[11] = validatorsRoot[:]
|
||||
|
||||
// Balances slice root.
|
||||
balancesRoot, err := stateutil.Uint64ListRootWithRegistryLimit(state.Balances)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute validator balances merkleization")
|
||||
}
|
||||
fieldRoots[12] = balancesRoot[:]
|
||||
|
||||
// RandaoMixes array root.
|
||||
randaoRootsRoot, err := h.arraysRoot(state.RandaoMixes, uint64(params.BeaconConfig().EpochsPerHistoricalVector), "RandaoMixes")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute randao roots merkleization")
|
||||
}
|
||||
fieldRoots[13] = randaoRootsRoot[:]
|
||||
|
||||
// Slashings array root.
|
||||
slashingsRootsRoot, err := ssz.SlashingsRoot(state.Slashings)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute slashings merkleization")
|
||||
}
|
||||
fieldRoots[14] = slashingsRootsRoot[:]
|
||||
|
||||
// PreviousEpochParticipation slice root.
|
||||
prevParticipationRoot, err := stateutil.ParticipationBitsRoot(state.PreviousEpochParticipation)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute previous epoch participation merkleization")
|
||||
}
|
||||
fieldRoots[15] = prevParticipationRoot[:]
|
||||
|
||||
// CurrentEpochParticipation slice root.
|
||||
currParticipationRoot, err := stateutil.ParticipationBitsRoot(state.CurrentEpochParticipation)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute current epoch participation merkleization")
|
||||
}
|
||||
fieldRoots[16] = currParticipationRoot[:]
|
||||
|
||||
// JustificationBits root.
|
||||
justifiedBitsRoot := bytesutil.ToBytes32(state.JustificationBits)
|
||||
fieldRoots[17] = justifiedBitsRoot[:]
|
||||
|
||||
// PreviousJustifiedCheckpoint data structure root.
|
||||
prevCheckRoot, err := ssz.CheckpointRoot(hasher, state.PreviousJustifiedCheckpoint)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute previous justified checkpoint merkleization")
|
||||
}
|
||||
fieldRoots[18] = prevCheckRoot[:]
|
||||
|
||||
// CurrentJustifiedCheckpoint data structure root.
|
||||
currJustRoot, err := ssz.CheckpointRoot(hasher, state.CurrentJustifiedCheckpoint)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute current justified checkpoint merkleization")
|
||||
}
|
||||
fieldRoots[19] = currJustRoot[:]
|
||||
|
||||
// FinalizedCheckpoint data structure root.
|
||||
finalRoot, err := ssz.CheckpointRoot(hasher, state.FinalizedCheckpoint)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute finalized checkpoint merkleization")
|
||||
}
|
||||
fieldRoots[20] = finalRoot[:]
|
||||
|
||||
// Inactivity scores root.
|
||||
inactivityScoresRoot, err := stateutil.Uint64ListRootWithRegistryLimit(state.InactivityScores)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute inactivityScoreRoot")
|
||||
}
|
||||
fieldRoots[21] = inactivityScoresRoot[:]
|
||||
|
||||
// Current sync committee root.
|
||||
currentSyncCommitteeRoot, err := stateutil.SyncCommitteeRoot(state.CurrentSyncCommittee)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute sync committee merkleization")
|
||||
}
|
||||
fieldRoots[22] = currentSyncCommitteeRoot[:]
|
||||
|
||||
// Next sync committee root.
|
||||
nextSyncCommitteeRoot, err := stateutil.SyncCommitteeRoot(state.NextSyncCommittee)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute sync committee merkleization")
|
||||
}
|
||||
fieldRoots[23] = nextSyncCommitteeRoot[:]
|
||||
|
||||
// Execution payload root.
|
||||
//TODO: Blocked by https://github.com/ferranbt/fastssz/pull/65
|
||||
fieldRoots[24] = []byte{}
|
||||
|
||||
return fieldRoots, nil
|
||||
}
|
||||
@@ -88,16 +88,29 @@ func (s *Service) processPendingBlocks(ctx context.Context) error {
|
||||
continue
|
||||
}
|
||||
|
||||
s.pendingQueueLock.RLock()
|
||||
inPendingQueue := s.seenPendingBlocks[bytesutil.ToBytes32(b.Block().ParentRoot())]
|
||||
s.pendingQueueLock.RUnlock()
|
||||
|
||||
blkRoot, err := b.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
span.End()
|
||||
return err
|
||||
}
|
||||
inDB := s.cfg.beaconDB.HasBlock(ctx, blkRoot)
|
||||
// No need to process the same block twice.
|
||||
if inDB {
|
||||
s.pendingQueueLock.Lock()
|
||||
if err := s.deleteBlockFromPendingQueue(slot, b, blkRoot); err != nil {
|
||||
s.pendingQueueLock.Unlock()
|
||||
return err
|
||||
}
|
||||
s.pendingQueueLock.Unlock()
|
||||
span.End()
|
||||
continue
|
||||
}
|
||||
|
||||
s.pendingQueueLock.RLock()
|
||||
inPendingQueue := s.seenPendingBlocks[bytesutil.ToBytes32(b.Block().ParentRoot())]
|
||||
s.pendingQueueLock.RUnlock()
|
||||
|
||||
parentIsBad := s.hasBadBlock(bytesutil.ToBytes32(b.Block().ParentRoot()))
|
||||
blockIsBad := s.hasBadBlock(blkRoot)
|
||||
// Check if parent is a bad block.
|
||||
@@ -117,12 +130,12 @@ func (s *Service) processPendingBlocks(ctx context.Context) error {
|
||||
continue
|
||||
}
|
||||
|
||||
inDB := s.cfg.beaconDB.HasBlock(ctx, bytesutil.ToBytes32(b.Block().ParentRoot()))
|
||||
parentInDb := s.cfg.beaconDB.HasBlock(ctx, bytesutil.ToBytes32(b.Block().ParentRoot()))
|
||||
hasPeer := len(pids) != 0
|
||||
|
||||
// Only request for missing parent block if it's not in beaconDB, not in pending cache
|
||||
// and has peer in the peer list.
|
||||
if !inPendingQueue && !inDB && hasPeer {
|
||||
if !inPendingQueue && !parentInDb && hasPeer {
|
||||
log.WithFields(logrus.Fields{
|
||||
"currentSlot": b.Block().Slot(),
|
||||
"parentRoot": hex.EncodeToString(bytesutil.Trunc(b.Block().ParentRoot())),
|
||||
@@ -133,7 +146,7 @@ func (s *Service) processPendingBlocks(ctx context.Context) error {
|
||||
continue
|
||||
}
|
||||
|
||||
if !inDB {
|
||||
if !parentInDb {
|
||||
span.End()
|
||||
continue
|
||||
}
|
||||
@@ -167,6 +180,7 @@ func (s *Service) processPendingBlocks(ctx context.Context) error {
|
||||
|
||||
s.pendingQueueLock.Lock()
|
||||
if err := s.deleteBlockFromPendingQueue(slot, b, blkRoot); err != nil {
|
||||
s.pendingQueueLock.Unlock()
|
||||
return err
|
||||
}
|
||||
s.pendingQueueLock.Unlock()
|
||||
@@ -321,6 +335,7 @@ func (s *Service) deleteBlockFromPendingQueue(slot types.Slot, b block.SignedBea
|
||||
}
|
||||
if len(newBlks) == 0 {
|
||||
s.slotToPendingBlocks.Delete(slotToCacheKey(slot))
|
||||
delete(s.seenPendingBlocks, r)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -84,9 +84,13 @@ func TestRegularSyncBeaconBlockSubscriber_ProcessPendingBlocks1(t *testing.T) {
|
||||
require.NoError(t, r.insertBlockToPendingQueue(b1.Block.Slot, wrapper.WrappedPhase0SignedBeaconBlock(b1), b1Root))
|
||||
require.NoError(t, r.cfg.beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(b1)))
|
||||
|
||||
// Insert bad b1 in the cache to verify the good one doesn't get replaced.
|
||||
require.NoError(t, r.insertBlockToPendingQueue(b1.Block.Slot, wrapper.WrappedPhase0SignedBeaconBlock(util.NewBeaconBlock()), [32]byte{}))
|
||||
nBlock := util.NewBeaconBlock()
|
||||
nBlock.Block.Slot = b1.Block.Slot
|
||||
nRoot, err := nBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Insert bad b1 in the cache to verify the good one doesn't get replaced.
|
||||
require.NoError(t, r.insertBlockToPendingQueue(nBlock.Block.Slot, wrapper.WrappedPhase0SignedBeaconBlock(nBlock), nRoot))
|
||||
require.NoError(t, r.processPendingBlocks(context.Background())) // Marks a block as bad
|
||||
require.NoError(t, r.processPendingBlocks(context.Background())) // Bad block removed on second run
|
||||
|
||||
@@ -140,6 +144,46 @@ func TestRegularSync_InsertDuplicateBlocks(t *testing.T) {
|
||||
|
||||
}
|
||||
|
||||
func TestRegularSyncBeaconBlockSubscriber_DoNotReprocessBlock(t *testing.T) {
|
||||
db := dbtest.SetupDB(t)
|
||||
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
r := &Service{
|
||||
cfg: &config{
|
||||
p2p: p1,
|
||||
beaconDB: db,
|
||||
chain: &mock.ChainService{
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
},
|
||||
},
|
||||
stateGen: stategen.New(db),
|
||||
},
|
||||
slotToPendingBlocks: gcache.New(time.Second, 2*time.Second),
|
||||
seenPendingBlocks: make(map[[32]byte]bool),
|
||||
}
|
||||
r.initCaches()
|
||||
|
||||
b0 := util.NewBeaconBlock()
|
||||
require.NoError(t, r.cfg.beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(b0)))
|
||||
b0Root, err := b0.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
b3 := util.NewBeaconBlock()
|
||||
b3.Block.Slot = 3
|
||||
b3.Block.ParentRoot = b0Root[:]
|
||||
b3Root, err := b3.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, r.cfg.beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(b3)))
|
||||
|
||||
// Add b3 to the cache
|
||||
require.NoError(t, r.insertBlockToPendingQueue(b3.Block.Slot, wrapper.WrappedPhase0SignedBeaconBlock(b3), b3Root))
|
||||
|
||||
require.NoError(t, r.processPendingBlocks(context.Background()))
|
||||
assert.Equal(t, 0, len(r.slotToPendingBlocks.Items()), "Incorrect size for slot to pending blocks cache")
|
||||
assert.Equal(t, 0, len(r.seenPendingBlocks), "Incorrect size for seen pending block")
|
||||
}
|
||||
|
||||
// /- b1 - b2 - b5
|
||||
// b0
|
||||
// \- b3 - b4
|
||||
@@ -237,7 +281,7 @@ func TestRegularSyncBeaconBlockSubscriber_ProcessPendingBlocks_2Chains(t *testin
|
||||
require.NoError(t, r.processPendingBlocks(context.Background())) // Bad block removed on second run
|
||||
|
||||
assert.Equal(t, 1, len(r.slotToPendingBlocks.Items()), "Incorrect size for slot to pending blocks cache")
|
||||
assert.Equal(t, 3, len(r.seenPendingBlocks), "Incorrect size for seen pending block")
|
||||
assert.Equal(t, 1, len(r.seenPendingBlocks), "Incorrect size for seen pending block")
|
||||
|
||||
// Add b2 to the cache
|
||||
require.NoError(t, r.insertBlockToPendingQueue(b2.Block.Slot, wrapper.WrappedPhase0SignedBeaconBlock(b2), b2Root))
|
||||
@@ -248,7 +292,7 @@ func TestRegularSyncBeaconBlockSubscriber_ProcessPendingBlocks_2Chains(t *testin
|
||||
require.NoError(t, r.processPendingBlocks(context.Background())) // Bad block removed on second run
|
||||
|
||||
assert.Equal(t, 0, len(r.slotToPendingBlocks.Items()), "Incorrect size for slot to pending blocks cache")
|
||||
assert.Equal(t, 4, len(r.seenPendingBlocks), "Incorrect size for seen pending block")
|
||||
assert.Equal(t, 0, len(r.seenPendingBlocks), "Incorrect size for seen pending block")
|
||||
}
|
||||
|
||||
func TestRegularSyncBeaconBlockSubscriber_PruneOldPendingBlocks(t *testing.T) {
|
||||
@@ -318,7 +362,7 @@ func TestRegularSyncBeaconBlockSubscriber_PruneOldPendingBlocks(t *testing.T) {
|
||||
|
||||
require.NoError(t, r.processPendingBlocks(context.Background()))
|
||||
assert.Equal(t, 0, len(r.slotToPendingBlocks.Items()), "Incorrect size for slot to pending blocks cache")
|
||||
assert.Equal(t, 4, len(r.seenPendingBlocks), "Incorrect size for seen pending block")
|
||||
assert.Equal(t, 0, len(r.seenPendingBlocks), "Incorrect size for seen pending block")
|
||||
}
|
||||
|
||||
func TestService_sortedPendingSlots(t *testing.T) {
|
||||
@@ -429,7 +473,7 @@ func TestService_BatchRootRequest(t *testing.T) {
|
||||
assert.Equal(t, 4, len(r.seenPendingBlocks), "Incorrect size for seen pending block")
|
||||
}
|
||||
|
||||
func TestService_AddPeningBlockToQueueOverMax(t *testing.T) {
|
||||
func TestService_AddPendingBlockToQueueOverMax(t *testing.T) {
|
||||
r := &Service{
|
||||
slotToPendingBlocks: gcache.New(time.Second, 2*time.Second),
|
||||
seenPendingBlocks: make(map[[32]byte]bool),
|
||||
|
||||
50
cmd/light/BUILD.bazel
Normal file
50
cmd/light/BUILD.bazel
Normal file
@@ -0,0 +1,50 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary")
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"client.go",
|
||||
"main.go",
|
||||
"process_update.go",
|
||||
"server.go",
|
||||
"validate_update.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/cmd/light",
|
||||
visibility = ["//visibility:private"],
|
||||
deps = [
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
"//crypto/bls/blst:go_default_library",
|
||||
"//crypto/bls/common:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/eth/service:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/eth/v2:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@org_golang_google_grpc//:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "light",
|
||||
embed = [":go_default_library"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["client_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
],
|
||||
)
|
||||
3
cmd/light/client.go
Normal file
3
cmd/light/client.go
Normal file
@@ -0,0 +1,3 @@
|
||||
package main
|
||||
|
||||
type Client struct{}
|
||||
25
cmd/light/client_test.go
Normal file
25
cmd/light/client_test.go
Normal file
@@ -0,0 +1,25 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
)
|
||||
|
||||
func TestProveCheckpoint(t *testing.T) {
|
||||
root := [32]byte{1}
|
||||
check := ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: root[:],
|
||||
}
|
||||
tr, err := check.GetTree()
|
||||
require.NoError(t, err)
|
||||
a, err := tr.Get(0)
|
||||
require.NoError(t, err)
|
||||
b, err := tr.Get(1)
|
||||
require.NoError(t, err)
|
||||
fmt.Println(a.Hash())
|
||||
fmt.Println(b.Hash())
|
||||
}
|
||||
71
cmd/light/main.go
Normal file
71
cmd/light/main.go
Normal file
@@ -0,0 +1,71 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/encoding/ssz"
|
||||
v1 "github.com/prysmaticlabs/prysm/proto/eth/v1"
|
||||
v2 "github.com/prysmaticlabs/prysm/proto/eth/v2"
|
||||
v1alpha1 "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
)
|
||||
|
||||
// Precomputed values for generalized indices.
|
||||
const (
|
||||
FinalizedRootIndex = 105
|
||||
FinalizedRootIndexFloorLog2 = 6
|
||||
NextSyncCommitteeIndex = 55
|
||||
NextSyncCommitteeIndexFloorLog2 = 5
|
||||
)
|
||||
|
||||
var log = logrus.WithField("prefix", "light")
|
||||
|
||||
type LightClientSnapshot struct {
|
||||
Header *v1.BeaconBlockHeader
|
||||
CurrentSyncCommittee *v2.SyncCommittee
|
||||
NextSyncCommittee *v2.SyncCommittee
|
||||
}
|
||||
|
||||
type LightClientUpdate struct {
|
||||
Header *v1.BeaconBlockHeader
|
||||
NextSyncCommittee *v2.SyncCommittee
|
||||
NextSyncCommitteeBranch [NextSyncCommitteeIndexFloorLog2][32]byte
|
||||
FinalityHeader *v1.BeaconBlockHeader
|
||||
FinalityBranch [FinalizedRootIndexFloorLog2][32]byte
|
||||
SyncCommitteeBits bitfield.Bitvector512
|
||||
SyncCommitteeSignature [96]byte
|
||||
ForkVersion *v1alpha1.Version
|
||||
}
|
||||
|
||||
type Store struct {
|
||||
Snapshot *LightClientSnapshot
|
||||
ValidUpdates []*LightClientUpdate
|
||||
}
|
||||
|
||||
func main() {
|
||||
conn, err := grpc.Dial("localhost:4000", grpc.WithInsecure())
|
||||
if err != nil {
|
||||
log.Fatalf("fail to dial: %v", err)
|
||||
}
|
||||
ctx := context.Background()
|
||||
lesClient := v1alpha1.NewLightClientClient(conn)
|
||||
update, err := lesClient.LatestUpdateFinalized(ctx, &emptypb.Empty{})
|
||||
if err != nil {
|
||||
log.Fatalf("could not get latest update: %v", err)
|
||||
}
|
||||
// Attempt to verify a merkle proof of the next sync committee branch vs. the state root.
|
||||
root := bytesutil.ToBytes32(update.Header.StateRoot)
|
||||
leaf, err := update.NextSyncCommittee.HashTreeRoot()
|
||||
if err != nil {
|
||||
log.Fatalf("could not hash tree root: %v", err)
|
||||
}
|
||||
log.Infof("Verifying proof with root %#x, leaf %#x", root, leaf)
|
||||
validProof := ssz.VerifyProof(root, update.NextSyncCommitteeBranch, leaf, NextSyncCommitteeIndex)
|
||||
if !validProof {
|
||||
log.Error("could not verify merkle proof")
|
||||
}
|
||||
}
|
||||
54
cmd/light/process_update.go
Normal file
54
cmd/light/process_update.go
Normal file
@@ -0,0 +1,54 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
)
|
||||
|
||||
func applyLightClientUpdate(snapshot *LightClientSnapshot, update *LightClientUpdate) {
|
||||
snapshotPeriod := slots.ToEpoch(snapshot.Header.Slot) / params.BeaconConfig().EpochsPerSyncCommitteePeriod
|
||||
updatePeriod := slots.ToEpoch(update.Header.Slot) / params.BeaconConfig().EpochsPerSyncCommitteePeriod
|
||||
if updatePeriod == snapshotPeriod+1 {
|
||||
snapshot.CurrentSyncCommittee = snapshot.NextSyncCommittee
|
||||
} else {
|
||||
snapshot.Header = update.Header
|
||||
}
|
||||
}
|
||||
|
||||
func processLightClientUpdate(
|
||||
store *Store,
|
||||
update *LightClientUpdate,
|
||||
currentSlot types.Slot,
|
||||
genesisValidatorsRoot [32]byte,
|
||||
) error {
|
||||
if err := validateLightClientUpdate(store.Snapshot, update, genesisValidatorsRoot); err != nil {
|
||||
return err
|
||||
}
|
||||
store.ValidUpdates = append(store.ValidUpdates, update)
|
||||
updateTimeout := uint64(params.BeaconConfig().SlotsPerEpoch) * uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)
|
||||
sumParticipantBits := update.SyncCommitteeBits.Count()
|
||||
hasQuorum := sumParticipantBits*3 >= uint64(len(update.SyncCommitteeBits))*2
|
||||
if hasQuorum && !isEmptyBlockHeader(update.FinalityHeader) {
|
||||
// Apply update if (1) 2/3 quorum is reached and (2) we have a finality proof.
|
||||
// Note that (2) means that the current light client design needs finality.
|
||||
// It may be changed to re-organizable light client design. See the on-going issue consensus-specs#2182.
|
||||
applyLightClientUpdate(store.Snapshot, update)
|
||||
store.ValidUpdates = make([]*LightClientUpdate, 0)
|
||||
} else if currentSlot > store.Snapshot.Header.Slot.Add(updateTimeout) {
|
||||
// Forced best update when the update timeout has elapsed
|
||||
// Use the update that has the highest sum of sync committee bits.
|
||||
updateWithHighestSumBits := store.ValidUpdates[0]
|
||||
highestSumBitsUpdate := updateWithHighestSumBits.SyncCommitteeBits.Count()
|
||||
for _, validUpdate := range store.ValidUpdates {
|
||||
sumUpdateBits := validUpdate.SyncCommitteeBits.Count()
|
||||
if sumUpdateBits > highestSumBitsUpdate {
|
||||
highestSumBitsUpdate = sumUpdateBits
|
||||
updateWithHighestSumBits = validUpdate
|
||||
}
|
||||
}
|
||||
applyLightClientUpdate(store.Snapshot, updateWithHighestSumBits)
|
||||
store.ValidUpdates = make([]*LightClientUpdate, 0)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
3
cmd/light/server.go
Normal file
3
cmd/light/server.go
Normal file
@@ -0,0 +1,3 @@
|
||||
package main
|
||||
|
||||
type Server struct{}
|
||||
151
cmd/light/validate_update.go
Normal file
151
cmd/light/validate_update.go
Normal file
@@ -0,0 +1,151 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"math"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/container/trie"
|
||||
"github.com/prysmaticlabs/prysm/crypto/bls/blst"
|
||||
"github.com/prysmaticlabs/prysm/crypto/bls/common"
|
||||
v1 "github.com/prysmaticlabs/prysm/proto/eth/v1"
|
||||
v2 "github.com/prysmaticlabs/prysm/proto/eth/v2"
|
||||
v1alpha1 "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func validateLightClientUpdate(
|
||||
snapshot *LightClientSnapshot,
|
||||
update *LightClientUpdate,
|
||||
genesisValidatorsRoot [32]byte,
|
||||
) error {
|
||||
if update.Header.Slot <= snapshot.Header.Slot {
|
||||
return errors.New("wrong")
|
||||
}
|
||||
snapshotPeriod := slots.ToEpoch(snapshot.Header.Slot) / params.BeaconConfig().EpochsPerSyncCommitteePeriod
|
||||
updatePeriod := slots.ToEpoch(update.Header.Slot) / params.BeaconConfig().EpochsPerSyncCommitteePeriod
|
||||
if updatePeriod != snapshotPeriod || updatePeriod != snapshotPeriod+1 {
|
||||
return errors.New("unwanted")
|
||||
}
|
||||
|
||||
// Verify finality headers.
|
||||
var signedHeader *v1.BeaconBlockHeader
|
||||
if isEmptyBlockHeader(update.FinalityHeader) {
|
||||
signedHeader = update.Header
|
||||
// Check if branch is empty.
|
||||
for _, elem := range update.FinalityBranch {
|
||||
if elem != [32]byte{} {
|
||||
return errors.New("branch not empty")
|
||||
}
|
||||
}
|
||||
} else {
|
||||
leaf, err := update.Header.HashTreeRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
depth := FinalizedRootIndexFloorLog2
|
||||
index := getSubtreeIndex(FinalizedRootIndex)
|
||||
root := update.FinalityHeader.StateRoot
|
||||
merkleBranch := make([][]byte, len(update.FinalityBranch))
|
||||
for i, item := range update.FinalityBranch {
|
||||
merkleBranch[i] = item[:]
|
||||
}
|
||||
if !trie.VerifyMerkleBranch(root, leaf[:], int(index), merkleBranch, uint64(depth)) {
|
||||
return errors.New("does not verify")
|
||||
}
|
||||
}
|
||||
|
||||
// Verify update next sync committee if the update period incremented.
|
||||
var syncCommittee *v2.SyncCommittee
|
||||
if updatePeriod == snapshotPeriod {
|
||||
syncCommittee = snapshot.CurrentSyncCommittee
|
||||
for _, elem := range update.NextSyncCommitteeBranch {
|
||||
if elem != [32]byte{} {
|
||||
return errors.New("branch not empty")
|
||||
}
|
||||
}
|
||||
} else {
|
||||
syncCommittee = snapshot.NextSyncCommittee
|
||||
v1Sync := &v1alpha1.SyncCommittee{
|
||||
Pubkeys: syncCommittee.Pubkeys,
|
||||
AggregatePubkey: syncCommittee.AggregatePubkey,
|
||||
}
|
||||
leaf, err := v1Sync.HashTreeRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
depth := NextSyncCommitteeIndexFloorLog2
|
||||
index := getSubtreeIndex(NextSyncCommitteeIndex)
|
||||
root := update.Header.StateRoot
|
||||
merkleBranch := make([][]byte, len(update.NextSyncCommitteeBranch))
|
||||
for i, item := range update.NextSyncCommitteeBranch {
|
||||
merkleBranch[i] = item[:]
|
||||
}
|
||||
if !trie.VerifyMerkleBranch(root, leaf[:], int(index), merkleBranch, uint64(depth)) {
|
||||
return errors.New("does not verify")
|
||||
}
|
||||
}
|
||||
|
||||
// Verify sync committee has sufficient participants
|
||||
if update.SyncCommitteeBits.Count() < params.BeaconConfig().MinSyncCommitteeParticipants {
|
||||
return errors.New("insufficient participants")
|
||||
}
|
||||
|
||||
// Verify sync committee aggregate signature
|
||||
participantPubkeys := make([][]byte, 0)
|
||||
for i, pubKey := range syncCommittee.Pubkeys {
|
||||
bit := update.SyncCommitteeBits.BitAt(uint64(i))
|
||||
if bit {
|
||||
participantPubkeys = append(participantPubkeys, pubKey)
|
||||
}
|
||||
}
|
||||
domain, err := signing.ComputeDomain(
|
||||
params.BeaconConfig().DomainSyncCommittee,
|
||||
[]byte(update.ForkVersion.Version),
|
||||
genesisValidatorsRoot[:],
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
signingRoot, err := signing.ComputeSigningRoot(signedHeader, domain)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sig, err := blst.SignatureFromBytes(update.SyncCommitteeSignature[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pubKeys := make([]common.PublicKey, 0)
|
||||
for _, pubkey := range participantPubkeys {
|
||||
pk, err := blst.PublicKeyFromBytes(pubkey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pubKeys = append(pubKeys, pk)
|
||||
}
|
||||
if !sig.FastAggregateVerify(pubKeys, signingRoot) {
|
||||
return errors.New("failed to verify")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func isEmptyBlockHeader(header *v1.BeaconBlockHeader) bool {
|
||||
emptyRoot := params.BeaconConfig().ZeroHash
|
||||
return proto.Equal(header, &v1.BeaconBlockHeader{
|
||||
Slot: 0,
|
||||
ProposerIndex: 0,
|
||||
ParentRoot: emptyRoot[:],
|
||||
StateRoot: emptyRoot[:],
|
||||
BodyRoot: emptyRoot[:],
|
||||
})
|
||||
}
|
||||
|
||||
func getSubtreeIndex(index uint64) uint64 {
|
||||
return index % uint64(math.Pow(2, floorLog2(index)))
|
||||
}
|
||||
|
||||
func floorLog2(x uint64) float64 {
|
||||
return math.Floor(math.Log2(float64(x)))
|
||||
}
|
||||
@@ -67,6 +67,18 @@ func exportSlashingProtectionJSON(cliCtx *cli.Context) error {
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not export slashing protection history")
|
||||
}
|
||||
|
||||
// Check if JSON data is empty and issue a warning about common problems to the user.
|
||||
if eipJSON == nil || len(eipJSON.Data) == 0 {
|
||||
log.Fatal(
|
||||
"No slashing protection data was found in your database. This is likely because an older version of " +
|
||||
"Prysm would place your validator database in your wallet directory as a validator.db file. Now, " +
|
||||
"Prysm keeps its validator database inside the direct/ or derived/ folder in your wallet directory. " +
|
||||
"Try running this command again, but add direct/ or derived/ to the path where your wallet " +
|
||||
"directory is in and you should obtain your slashing protection history",
|
||||
)
|
||||
}
|
||||
|
||||
outputDir, err := userprompt.InputDirectory(
|
||||
cliCtx,
|
||||
"Enter your desired output directory for your slashing protection history file",
|
||||
|
||||
@@ -52,6 +52,7 @@ type Flags struct {
|
||||
EnableHistoricalSpaceRepresentation bool // EnableHistoricalSpaceRepresentation enables the saving of registry validators in separate buckets to save space
|
||||
EnableGetBlockOptimizations bool // EnableGetBlockOptimizations optimizes some elements of the GetBlock() function.
|
||||
EnableBatchVerification bool // EnableBatchVerification enables batch signature verification on gossip messages.
|
||||
EnableBalanceTrieComputation bool // EnableBalanceTrieComputation enables our beacon state to use balance tries for hash tree root operations.
|
||||
// Logging related toggles.
|
||||
DisableGRPCConnectionLogs bool // Disables logging when a new grpc client has connected.
|
||||
|
||||
@@ -223,6 +224,10 @@ func ConfigureBeaconChain(ctx *cli.Context) {
|
||||
logEnabled(enableBatchGossipVerification)
|
||||
cfg.EnableBatchVerification = true
|
||||
}
|
||||
if ctx.Bool(enableBalanceTrieComputation.Name) {
|
||||
logEnabled(enableBalanceTrieComputation)
|
||||
cfg.EnableBalanceTrieComputation = true
|
||||
}
|
||||
Init(cfg)
|
||||
}
|
||||
|
||||
|
||||
@@ -139,6 +139,10 @@ var (
|
||||
Name: "enable-batch-gossip-verification",
|
||||
Usage: "This enables batch verification of signatures received over gossip.",
|
||||
}
|
||||
enableBalanceTrieComputation = &cli.BoolFlag{
|
||||
Name: "enable-balance-trie-computation",
|
||||
Usage: "This enables optimized hash tree root operations for our balance field.",
|
||||
}
|
||||
)
|
||||
|
||||
// devModeFlags holds list of flags that are set when development mode is on.
|
||||
@@ -147,6 +151,7 @@ var devModeFlags = []cli.Flag{
|
||||
forceOptMaxCoverAggregationStategy,
|
||||
enableGetBlockOptimizations,
|
||||
enableBatchGossipVerification,
|
||||
enableBalanceTrieComputation,
|
||||
}
|
||||
|
||||
// ValidatorFlags contains a list of all the feature flags that apply to the validator client.
|
||||
@@ -192,6 +197,7 @@ var BeaconChainFlags = append(deprecatedFlags, []cli.Flag{
|
||||
disableCorrectlyPruneCanonicalAtts,
|
||||
disableActiveBalanceCache,
|
||||
enableBatchGossipVerification,
|
||||
enableBalanceTrieComputation,
|
||||
}...)
|
||||
|
||||
// E2EBeaconChainFlags contains a list of the beacon chain feature flags to be tested in E2E.
|
||||
|
||||
@@ -22,7 +22,7 @@ const (
|
||||
// Genesis Fork Epoch for the mainnet config.
|
||||
genesisForkEpoch = 0
|
||||
// Altair Fork Epoch for mainnet config.
|
||||
mainnetAltairForkEpoch = 74240 // Oct 27, 2021, 10:56:23am UTC
|
||||
mainnetAltairForkEpoch = 1 // Oct 27, 2021, 10:56:23am UTC
|
||||
)
|
||||
|
||||
var mainnetNetworkConfig = &NetworkConfig{
|
||||
@@ -97,8 +97,8 @@ var mainnetBeaconConfig = &BeaconChainConfig{
|
||||
|
||||
// Time parameter constants.
|
||||
MinAttestationInclusionDelay: 1,
|
||||
SecondsPerSlot: 12,
|
||||
SlotsPerEpoch: 32,
|
||||
SecondsPerSlot: 4,
|
||||
SlotsPerEpoch: 4,
|
||||
SqrRootSlotsPerEpoch: 5,
|
||||
MinSeedLookahead: 1,
|
||||
MaxSeedLookahead: 4,
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/minio/sha256-simd"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
)
|
||||
|
||||
const bytesPerChunk = 32
|
||||
@@ -113,6 +114,53 @@ func Pack(serializedItems [][]byte) ([][]byte, error) {
|
||||
return chunks, nil
|
||||
}
|
||||
|
||||
// PackByChunk a given byte array's final chunk with zeroes if needed.
|
||||
func PackByChunk(serializedItems [][]byte) ([][bytesPerChunk]byte, error) {
|
||||
emptyChunk := [bytesPerChunk]byte{}
|
||||
// If there are no items, we return an empty chunk.
|
||||
if len(serializedItems) == 0 {
|
||||
return [][bytesPerChunk]byte{emptyChunk}, nil
|
||||
} else if len(serializedItems[0]) == bytesPerChunk {
|
||||
// If each item has exactly BYTES_PER_CHUNK length, we return the list of serialized items.
|
||||
chunks := make([][bytesPerChunk]byte, 0, len(serializedItems))
|
||||
for _, c := range serializedItems {
|
||||
chunks = append(chunks, bytesutil.ToBytes32(c))
|
||||
}
|
||||
return chunks, nil
|
||||
}
|
||||
// We flatten the list in order to pack its items into byte chunks correctly.
|
||||
var orderedItems []byte
|
||||
for _, item := range serializedItems {
|
||||
orderedItems = append(orderedItems, item...)
|
||||
}
|
||||
// If all our serialized item slices are length zero, we
|
||||
// exit early.
|
||||
if len(orderedItems) == 0 {
|
||||
return [][bytesPerChunk]byte{emptyChunk}, nil
|
||||
}
|
||||
numItems := len(orderedItems)
|
||||
var chunks [][bytesPerChunk]byte
|
||||
for i := 0; i < numItems; i += bytesPerChunk {
|
||||
j := i + bytesPerChunk
|
||||
// We create our upper bound index of the chunk, if it is greater than numItems,
|
||||
// we set it as numItems itself.
|
||||
if j > numItems {
|
||||
j = numItems
|
||||
}
|
||||
// We create chunks from the list of items based on the
|
||||
// indices determined above.
|
||||
// Right-pad the last chunk with zero bytes if it does not
|
||||
// have length bytesPerChunk from the helper.
|
||||
// The ToBytes32 helper allocates a 32-byte array, before
|
||||
// copying the ordered items in. This ensures that even if
|
||||
// the last chunk is != 32 in length, we will right-pad it with
|
||||
// zero bytes.
|
||||
chunks = append(chunks, bytesutil.ToBytes32(orderedItems[i:j]))
|
||||
}
|
||||
|
||||
return chunks, nil
|
||||
}
|
||||
|
||||
// MixInLength appends hash length to root
|
||||
func MixInLength(root [32]byte, length []byte) [32]byte {
|
||||
var hash [32]byte
|
||||
|
||||
@@ -94,6 +94,22 @@ func TestPack(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestPackByChunk(t *testing.T) {
|
||||
byteSlice2D := [][]byte{
|
||||
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 2, 5, 2, 6, 2, 7},
|
||||
{1, 1, 2, 3, 5, 8, 13, 21, 34},
|
||||
}
|
||||
expected := [][32]byte{{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 2, 5, 2, 6, 2, 7, 1, 1},
|
||||
{2, 3, 5, 8, 13, 21, 34, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}
|
||||
|
||||
result, err := ssz.PackByChunk(byteSlice2D)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, len(expected), len(result))
|
||||
for i, v := range expected {
|
||||
assert.DeepEqual(t, v, result[i])
|
||||
}
|
||||
}
|
||||
|
||||
func TestMixInLength(t *testing.T) {
|
||||
byteSlice := [32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}
|
||||
length := []byte{1, 2, 3}
|
||||
|
||||
197
encoding/ssz/tree/state.go
Normal file
197
encoding/ssz/tree/state.go
Normal file
@@ -0,0 +1,197 @@
|
||||
package tree
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"sort"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/protolambda/ztyp/codec"
|
||||
"github.com/protolambda/ztyp/tree"
|
||||
"github.com/protolambda/ztyp/view"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
)
|
||||
|
||||
var (
|
||||
BLSPubkeyType = view.BasicVectorType(view.ByteType, 48)
|
||||
ValidatorType = view.ContainerType("Validator", []view.FieldDef{
|
||||
{"pubkey", BLSPubkeyType},
|
||||
{"withdrawal_credentials", view.RootType},
|
||||
{"effective_balance", view.Uint64Type},
|
||||
{"slashed", view.BoolType},
|
||||
{"activation_eligibility_epoch", view.Uint64Type},
|
||||
{"activation_epoch", view.Uint64Type},
|
||||
{"exit_epoch", view.Uint64Type},
|
||||
{"withdrawable_epoch", view.Uint64Type},
|
||||
})
|
||||
ForkType = view.ContainerType("Fork", []view.FieldDef{
|
||||
{"previous_version", view.Bytes4Type},
|
||||
{"current_version", view.Bytes4Type},
|
||||
{"epoch", view.Uint64Type},
|
||||
})
|
||||
BeaconBlockHeaderType = view.ContainerType("BeaconBlockHeader", []view.FieldDef{
|
||||
{"slot", view.Uint64Type},
|
||||
{"proposer_index", view.Uint64Type},
|
||||
{"parent_root", view.RootType},
|
||||
{"state_root", view.RootType},
|
||||
{"body_root", view.RootType},
|
||||
})
|
||||
BlockRootsType = view.VectorType(view.RootType, 8192)
|
||||
StateRootsType = view.VectorType(view.RootType, 8192)
|
||||
HistoricalRootsType = view.ListType(view.RootType, 16777216)
|
||||
Eth1DataType = view.ContainerType("Eth1Data", []view.FieldDef{
|
||||
{"deposit_root", view.RootType},
|
||||
{"deposit_count", view.Uint64Type},
|
||||
{"block_hash", view.RootType},
|
||||
})
|
||||
Eth1DataVotesType = view.ComplexListType(Eth1DataType, 2048)
|
||||
ValidatorsType = view.ComplexListType(ValidatorType, 1099511627776)
|
||||
BalancesType = view.BasicListType(view.Uint64Type, 1099511627776)
|
||||
RandaoMixesType = view.VectorType(view.RootType, 65536)
|
||||
SlashingsType = view.BasicVectorType(view.Uint64Type, 8192)
|
||||
ParticipationType = view.BasicListType(view.ByteType, 1099511627776)
|
||||
JustificationBitsType = view.BitVectorType(4)
|
||||
CheckpointType = view.ContainerType("Checkpoint", []view.FieldDef{
|
||||
{"epoch", view.Uint64Type},
|
||||
{"root", view.RootType},
|
||||
})
|
||||
InactivityScoresType = view.BasicListType(view.Uint64Type, 1099511627776)
|
||||
SyncCommitteeKeysType = view.VectorType(BLSPubkeyType, 512)
|
||||
SyncCommitteeType = view.ContainerType("SyncCommittee", []view.FieldDef{
|
||||
{"pubkeys", SyncCommitteeKeysType},
|
||||
{"aggregate_pubkey", BLSPubkeyType},
|
||||
})
|
||||
BeaconStateAltairType = view.ContainerType("BeaconStateAltair", []view.FieldDef{
|
||||
{"genesis_time", view.Uint64Type},
|
||||
{"genesis_validators_root", view.RootType},
|
||||
{"slot", view.Uint64Type},
|
||||
{"fork", ForkType},
|
||||
{"latest_block_header", BeaconBlockHeaderType},
|
||||
{"block_roots", BlockRootsType},
|
||||
{"state_roots", StateRootsType},
|
||||
{"historical_roots", HistoricalRootsType},
|
||||
{"eth1_data", Eth1DataType},
|
||||
{"eth1_data_votes", Eth1DataVotesType},
|
||||
{"eth1_deposit_index", view.Uint64Type},
|
||||
{"validators", ValidatorsType},
|
||||
{"balances", BalancesType},
|
||||
{"randao_mixes", RandaoMixesType},
|
||||
{"slashings", SlashingsType},
|
||||
{"previous_epoch_participation", ParticipationType},
|
||||
{"current_epoch_participation", ParticipationType},
|
||||
{"justification_bits", JustificationBitsType},
|
||||
{"previous_justified_checkpoint", CheckpointType},
|
||||
{"current_justified_checkpoint", CheckpointType},
|
||||
{"finalized_checkpoint", CheckpointType},
|
||||
{"inactivity_scores", InactivityScoresType},
|
||||
{"current_sync_committee", SyncCommitteeType},
|
||||
{"next_sync_committee", SyncCommitteeType},
|
||||
})
|
||||
)
|
||||
|
||||
type TreeBackedState struct {
|
||||
beaconState view.View
|
||||
}
|
||||
|
||||
func NewTreeBackedState(beaconState state.BeaconState) (*TreeBackedState, error) {
|
||||
enc, err := beaconState.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dec := codec.NewDecodingReader(bytes.NewReader(enc), uint64(len(enc)))
|
||||
treeBacked, err := BeaconStateAltairType.Deserialize(dec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &TreeBackedState{beaconState: treeBacked}, nil
|
||||
}
|
||||
|
||||
func VerifyProof(root [32]byte, proof [][]byte, leaf tree.Root, generalizedIndex tree.Gindex64) bool {
|
||||
h := leaf
|
||||
hFn := tree.GetHashFn()
|
||||
idx := generalizedIndex
|
||||
for _, elem := range proof {
|
||||
if idx%2 == 0 {
|
||||
h = hFn(h, bytesutil.ToBytes32(elem))
|
||||
} else {
|
||||
h = hFn(bytesutil.ToBytes32(elem), h)
|
||||
}
|
||||
idx = idx / 2
|
||||
}
|
||||
return h == root
|
||||
}
|
||||
|
||||
func (tb *TreeBackedState) View() view.View {
|
||||
return tb.beaconState
|
||||
}
|
||||
|
||||
func (tb *TreeBackedState) Proof(
|
||||
fieldIndex uint64,
|
||||
) (proof [][]byte, generalizedIdx tree.Gindex64, err error) {
|
||||
cont, ok := tb.beaconState.(*view.ContainerView)
|
||||
if !ok {
|
||||
err = errors.New("not a container")
|
||||
return
|
||||
}
|
||||
depth := tree.CoverDepth(cont.FieldCount())
|
||||
generalizedIdx, err = tree.ToGindex64(fieldIndex, depth)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
leaves := make(map[tree.Gindex64]struct{})
|
||||
leaves[generalizedIdx] = struct{}{}
|
||||
leavesSorted := make([]tree.Gindex64, 0, len(leaves))
|
||||
for g := range leaves {
|
||||
leavesSorted = append(leavesSorted, g)
|
||||
}
|
||||
sort.Slice(leavesSorted, func(i, j int) bool {
|
||||
return leavesSorted[i] < leavesSorted[j]
|
||||
})
|
||||
|
||||
// Mark every gindex that is between the root and the leaves.
|
||||
interest := make(map[tree.Gindex64]struct{})
|
||||
for _, g := range leavesSorted {
|
||||
iter, _ := g.BitIter()
|
||||
n := tree.Gindex64(1)
|
||||
for {
|
||||
right, ok := iter.Next()
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
n *= 2
|
||||
if right {
|
||||
n += 1
|
||||
}
|
||||
interest[n] = struct{}{}
|
||||
}
|
||||
}
|
||||
witness := make(map[tree.Gindex64]struct{})
|
||||
// For every gindex that is covered, check if the sibling is covered, and if not, it's a witness
|
||||
for g := range interest {
|
||||
if _, ok := interest[g^1]; !ok {
|
||||
witness[g^1] = struct{}{}
|
||||
}
|
||||
}
|
||||
witnessSorted := make([]tree.Gindex64, 0, len(witness))
|
||||
for g := range witness {
|
||||
witnessSorted = append(witnessSorted, g)
|
||||
}
|
||||
sort.Slice(witnessSorted, func(i, j int) bool {
|
||||
return witnessSorted[i] < witnessSorted[j]
|
||||
})
|
||||
|
||||
node := tb.beaconState.Backing()
|
||||
hFn := tree.GetHashFn()
|
||||
proof = make([][]byte, 0, len(witnessSorted))
|
||||
for i := len(witnessSorted) - 1; i >= 0; i-- {
|
||||
g := witnessSorted[i]
|
||||
n, err2 := node.Getter(g)
|
||||
if err2 != nil {
|
||||
err = err2
|
||||
return
|
||||
}
|
||||
root := n.MerkleRoot(hFn)
|
||||
proof = append(proof, root[:])
|
||||
}
|
||||
return
|
||||
}
|
||||
71
encoding/ssz/tree/state_test.go
Normal file
71
encoding/ssz/tree/state_test.go
Normal file
@@ -0,0 +1,71 @@
|
||||
package tree
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/protolambda/ztyp/codec"
|
||||
"github.com/protolambda/ztyp/tree"
|
||||
"github.com/protolambda/ztyp/view"
|
||||
stateAltair "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
|
||||
"github.com/prysmaticlabs/prysm/io/file"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
)
|
||||
|
||||
func TestProof_SimpleField(t *testing.T) {
|
||||
runProofTest(t, 0 /* genesis time */)
|
||||
}
|
||||
|
||||
func TestProof_FinalizedCheckpoint(t *testing.T) {
|
||||
runProofTest(t, 20 /* finalized checkpoint */)
|
||||
}
|
||||
|
||||
func runProofTest(t testing.TB, fieldIndex uint64) {
|
||||
data, err := file.ReadFileAsBytes("/tmp/state.ssz")
|
||||
require.NoError(t, err)
|
||||
|
||||
dec := codec.NewDecodingReader(bytes.NewReader(data), uint64(len(data)))
|
||||
treeBacked, err := BeaconStateAltairType.Deserialize(dec)
|
||||
require.NoError(t, err)
|
||||
tb := &TreeBackedState{beaconState: treeBacked}
|
||||
|
||||
// Get a proof of the field.
|
||||
proof, gIndex, err := tb.Proof(fieldIndex)
|
||||
require.NoError(t, err)
|
||||
|
||||
root := tb.beaconState.HashTreeRoot(tree.GetHashFn())
|
||||
leaf, err := tb.View().Backing().Getter(gIndex)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify the Merkle proof using the state root, leaf for the finalized checkpoint,
|
||||
// and the generalized index of the field in the state.
|
||||
valid := VerifyProof(root, proof, leaf.MerkleRoot(tree.GetHashFn()), gIndex)
|
||||
require.Equal(t, true, valid)
|
||||
}
|
||||
|
||||
func TestPrysmSSZComparison(t *testing.T) {
|
||||
data, err := file.ReadFileAsBytes("/tmp/state.ssz")
|
||||
require.NoError(t, err)
|
||||
|
||||
protoState := ðpb.BeaconStateAltair{}
|
||||
require.NoError(t, protoState.UnmarshalSSZ(data))
|
||||
prysmBeaconState, err := stateAltair.InitializeFromProto(protoState)
|
||||
require.NoError(t, err)
|
||||
|
||||
dec := codec.NewDecodingReader(bytes.NewReader(data), uint64(len(data)))
|
||||
ztypBeaconState, err := BeaconStateAltairType.Deserialize(dec)
|
||||
require.NoError(t, err)
|
||||
hFn := tree.GetHashFn()
|
||||
ztypItem := ztypBeaconState.(*view.ContainerView)
|
||||
ztypRoot := ztypItem.HashTreeRoot(hFn)
|
||||
prysmRoot, err := prysmBeaconState.HashTreeRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
require.Equal(
|
||||
t,
|
||||
fmt.Sprintf("%#x", prysmRoot),
|
||||
ztypRoot.String(),
|
||||
)
|
||||
}
|
||||
1
go.mod
1
go.mod
@@ -79,6 +79,7 @@ require (
|
||||
github.com/prometheus/procfs v0.7.0 // indirect
|
||||
github.com/prometheus/prom2json v1.3.0
|
||||
github.com/prometheus/tsdb v0.10.0 // indirect
|
||||
github.com/protolambda/ztyp v0.1.9 // indirect
|
||||
github.com/prysmaticlabs/eth2-types v0.0.0-20210303084904-c9735a06829d
|
||||
github.com/prysmaticlabs/go-bitfield v0.0.0-20210809151128-385d8c5e3fb7
|
||||
github.com/prysmaticlabs/prombbolt v0.0.0-20210126082820-9b7adba6db7c
|
||||
|
||||
2
go.sum
2
go.sum
@@ -1191,6 +1191,8 @@ github.com/prometheus/prom2json v1.3.0/go.mod h1:rMN7m0ApCowcoDlypBHlkNbp5eJQf/+
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/prometheus/tsdb v0.10.0 h1:If5rVCMTp6W2SiRAQFlbpJNgVlgMEd+U2GZckwK38ic=
|
||||
github.com/prometheus/tsdb v0.10.0/go.mod h1:oi49uRhEe9dPUTlS3JRZOwJuVi6tmh10QSgwXEyGCt4=
|
||||
github.com/protolambda/ztyp v0.1.9 h1:TEQvOTihEf89grFJMOZA1DrJ4B6M1Cg35U3WRSTdgew=
|
||||
github.com/protolambda/ztyp v0.1.9/go.mod h1:NAGmX7+zlkxxv7F5ATHrdXwZFtkQjAUinDgg7RAV29k=
|
||||
github.com/prysmaticlabs/eth2-types v0.0.0-20210303084904-c9735a06829d h1:1dN7YAqMN3oAJ0LceWcyv/U4jHLh+5urnSnr4br6zg4=
|
||||
github.com/prysmaticlabs/eth2-types v0.0.0-20210303084904-c9735a06829d/go.mod h1:kOmQ/zdobQf7HUohDTifDNFEZfNaSCIY5fkONPL+dWU=
|
||||
github.com/prysmaticlabs/go-bitfield v0.0.0-20210108222456-8e92c3709aa0/go.mod h1:hCwmef+4qXWjv0jLDbQdWnL0Ol7cS7/lCSS26WR+u6s=
|
||||
|
||||
24
proto/eth/service/key_management.pb.go
generated
24
proto/eth/service/key_management.pb.go
generated
@@ -87,9 +87,10 @@ func (ImportedKeystoreStatus_Status) EnumDescriptor() ([]byte, []int) {
|
||||
type DeletedKeystoreStatus_Status int32
|
||||
|
||||
const (
|
||||
DeletedKeystoreStatus_DELETED DeletedKeystoreStatus_Status = 0
|
||||
DeletedKeystoreStatus_NOT_FOUND DeletedKeystoreStatus_Status = 1
|
||||
DeletedKeystoreStatus_ERROR DeletedKeystoreStatus_Status = 2
|
||||
DeletedKeystoreStatus_DELETED DeletedKeystoreStatus_Status = 0
|
||||
DeletedKeystoreStatus_NOT_FOUND DeletedKeystoreStatus_Status = 1
|
||||
DeletedKeystoreStatus_NOT_ACTIVE DeletedKeystoreStatus_Status = 2
|
||||
DeletedKeystoreStatus_ERROR DeletedKeystoreStatus_Status = 3
|
||||
)
|
||||
|
||||
// Enum value maps for DeletedKeystoreStatus_Status.
|
||||
@@ -97,12 +98,14 @@ var (
|
||||
DeletedKeystoreStatus_Status_name = map[int32]string{
|
||||
0: "DELETED",
|
||||
1: "NOT_FOUND",
|
||||
2: "ERROR",
|
||||
2: "NOT_ACTIVE",
|
||||
3: "ERROR",
|
||||
}
|
||||
DeletedKeystoreStatus_Status_value = map[string]int32{
|
||||
"DELETED": 0,
|
||||
"NOT_FOUND": 1,
|
||||
"ERROR": 2,
|
||||
"DELETED": 0,
|
||||
"NOT_FOUND": 1,
|
||||
"NOT_ACTIVE": 2,
|
||||
"ERROR": 3,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -625,7 +628,7 @@ var file_proto_eth_service_key_management_proto_rawDesc = []byte{
|
||||
0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x30, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75,
|
||||
0x73, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4f, 0x52, 0x54, 0x45, 0x44, 0x10, 0x00, 0x12,
|
||||
0x0d, 0x0a, 0x09, 0x44, 0x55, 0x50, 0x4c, 0x49, 0x43, 0x41, 0x54, 0x45, 0x10, 0x01, 0x12, 0x09,
|
||||
0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x02, 0x22, 0xae, 0x01, 0x0a, 0x15, 0x44, 0x65,
|
||||
0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x02, 0x22, 0xbe, 0x01, 0x0a, 0x15, 0x44, 0x65,
|
||||
0x6c, 0x65, 0x74, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x74, 0x61,
|
||||
0x74, 0x75, 0x73, 0x12, 0x4a, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20,
|
||||
0x01, 0x28, 0x0e, 0x32, 0x32, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65,
|
||||
@@ -633,10 +636,11 @@ var file_proto_eth_service_key_management_proto_rawDesc = []byte{
|
||||
0x65, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
|
||||
0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12,
|
||||
0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
|
||||
0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x2f, 0x0a, 0x06, 0x53, 0x74, 0x61,
|
||||
0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x3f, 0x0a, 0x06, 0x53, 0x74, 0x61,
|
||||
0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x00,
|
||||
0x12, 0x0d, 0x0a, 0x09, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x01, 0x12,
|
||||
0x09, 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x02, 0x32, 0xb9, 0x03, 0x0a, 0x0d, 0x4b,
|
||||
0x0e, 0x0a, 0x0a, 0x4e, 0x4f, 0x54, 0x5f, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x02, 0x12,
|
||||
0x09, 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x03, 0x32, 0xb9, 0x03, 0x0a, 0x0d, 0x4b,
|
||||
0x65, 0x79, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x78, 0x0a, 0x0d,
|
||||
0x4c, 0x69, 0x73, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x73, 0x12, 0x16, 0x2e,
|
||||
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
|
||||
|
||||
@@ -130,7 +130,8 @@ message DeletedKeystoreStatus {
|
||||
enum Status {
|
||||
DELETED = 0;
|
||||
NOT_FOUND = 1;
|
||||
ERROR = 2;
|
||||
NOT_ACTIVE = 2;
|
||||
ERROR = 3;
|
||||
}
|
||||
Status status = 1;
|
||||
string message = 2;
|
||||
|
||||
912
proto/prysm/v1alpha1/light_client.pb.go
generated
Executable file
912
proto/prysm/v1alpha1/light_client.pb.go
generated
Executable file
@@ -0,0 +1,912 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.25.0
|
||||
// protoc v3.15.8
|
||||
// source: proto/prysm/v1alpha1/light_client.proto
|
||||
|
||||
package eth
|
||||
|
||||
import (
|
||||
context "context"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
empty "github.com/golang/protobuf/ptypes/empty"
|
||||
github_com_prysmaticlabs_go_bitfield "github.com/prysmaticlabs/go-bitfield"
|
||||
_ "github.com/prysmaticlabs/prysm/proto/eth/ext"
|
||||
_ "google.golang.org/genproto/googleapis/api/annotations"
|
||||
grpc "google.golang.org/grpc"
|
||||
codes "google.golang.org/grpc/codes"
|
||||
status "google.golang.org/grpc/status"
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
// This is a compile-time assertion that a sufficiently up-to-date version
|
||||
// of the legacy proto package is being used.
|
||||
const _ = proto.ProtoPackageIsVersion4
|
||||
|
||||
type BestUpdatesRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
SyncCommitteePeriods []uint64 `protobuf:"varint,1,rep,packed,name=sync_committee_periods,json=syncCommitteePeriods,proto3" json:"sync_committee_periods,omitempty"`
|
||||
}
|
||||
|
||||
func (x *BestUpdatesRequest) Reset() {
|
||||
*x = BestUpdatesRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_proto_prysm_v1alpha1_light_client_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *BestUpdatesRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*BestUpdatesRequest) ProtoMessage() {}
|
||||
|
||||
func (x *BestUpdatesRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_proto_prysm_v1alpha1_light_client_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use BestUpdatesRequest.ProtoReflect.Descriptor instead.
|
||||
func (*BestUpdatesRequest) Descriptor() ([]byte, []int) {
|
||||
return file_proto_prysm_v1alpha1_light_client_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *BestUpdatesRequest) GetSyncCommitteePeriods() []uint64 {
|
||||
if x != nil {
|
||||
return x.SyncCommitteePeriods
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type BestUpdatesResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Updates []*LightClientUpdate `protobuf:"bytes,1,rep,name=updates,proto3" json:"updates,omitempty"`
|
||||
}
|
||||
|
||||
func (x *BestUpdatesResponse) Reset() {
|
||||
*x = BestUpdatesResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_proto_prysm_v1alpha1_light_client_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *BestUpdatesResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*BestUpdatesResponse) ProtoMessage() {}
|
||||
|
||||
func (x *BestUpdatesResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_proto_prysm_v1alpha1_light_client_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use BestUpdatesResponse.ProtoReflect.Descriptor instead.
|
||||
func (*BestUpdatesResponse) Descriptor() ([]byte, []int) {
|
||||
return file_proto_prysm_v1alpha1_light_client_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *BestUpdatesResponse) GetUpdates() []*LightClientUpdate {
|
||||
if x != nil {
|
||||
return x.Updates
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type LightClientUpdate struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Header *BeaconBlockHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
|
||||
NextSyncCommittee *SyncCommittee `protobuf:"bytes,2,opt,name=next_sync_committee,json=nextSyncCommittee,proto3" json:"next_sync_committee,omitempty"`
|
||||
NextSyncCommitteeBranch [][]byte `protobuf:"bytes,3,rep,name=next_sync_committee_branch,json=nextSyncCommitteeBranch,proto3" json:"next_sync_committee_branch,omitempty" ssz-size:"5,32"`
|
||||
FinalityHeader *BeaconBlockHeader `protobuf:"bytes,4,opt,name=finality_header,json=finalityHeader,proto3" json:"finality_header,omitempty"`
|
||||
FinalityBranch [][]byte `protobuf:"bytes,5,rep,name=finality_branch,json=finalityBranch,proto3" json:"finality_branch,omitempty" ssz-size:"6,32"`
|
||||
SyncCommitteeBits github_com_prysmaticlabs_go_bitfield.Bitvector512 `protobuf:"bytes,6,opt,name=sync_committee_bits,json=syncCommitteeBits,proto3" json:"sync_committee_bits,omitempty" cast-type:"github.com/prysmaticlabs/go-bitfield.Bitvector512" ssz-size:"64"`
|
||||
SyncCommitteeSignature []byte `protobuf:"bytes,7,opt,name=sync_committee_signature,json=syncCommitteeSignature,proto3" json:"sync_committee_signature,omitempty" ssz-size:"96"`
|
||||
ForkVersion []byte `protobuf:"bytes,8,opt,name=fork_version,json=forkVersion,proto3" json:"fork_version,omitempty" ssz-size:"4"`
|
||||
}
|
||||
|
||||
func (x *LightClientUpdate) Reset() {
|
||||
*x = LightClientUpdate{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_proto_prysm_v1alpha1_light_client_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *LightClientUpdate) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*LightClientUpdate) ProtoMessage() {}
|
||||
|
||||
func (x *LightClientUpdate) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_proto_prysm_v1alpha1_light_client_proto_msgTypes[2]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use LightClientUpdate.ProtoReflect.Descriptor instead.
|
||||
func (*LightClientUpdate) Descriptor() ([]byte, []int) {
|
||||
return file_proto_prysm_v1alpha1_light_client_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
func (x *LightClientUpdate) GetHeader() *BeaconBlockHeader {
|
||||
if x != nil {
|
||||
return x.Header
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *LightClientUpdate) GetNextSyncCommittee() *SyncCommittee {
|
||||
if x != nil {
|
||||
return x.NextSyncCommittee
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *LightClientUpdate) GetNextSyncCommitteeBranch() [][]byte {
|
||||
if x != nil {
|
||||
return x.NextSyncCommitteeBranch
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *LightClientUpdate) GetFinalityHeader() *BeaconBlockHeader {
|
||||
if x != nil {
|
||||
return x.FinalityHeader
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *LightClientUpdate) GetFinalityBranch() [][]byte {
|
||||
if x != nil {
|
||||
return x.FinalityBranch
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *LightClientUpdate) GetSyncCommitteeBits() github_com_prysmaticlabs_go_bitfield.Bitvector512 {
|
||||
if x != nil {
|
||||
return x.SyncCommitteeBits
|
||||
}
|
||||
return github_com_prysmaticlabs_go_bitfield.Bitvector512(nil)
|
||||
}
|
||||
|
||||
func (x *LightClientUpdate) GetSyncCommitteeSignature() []byte {
|
||||
if x != nil {
|
||||
return x.SyncCommitteeSignature
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *LightClientUpdate) GetForkVersion() []byte {
|
||||
if x != nil {
|
||||
return x.ForkVersion
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type ClientSnapshot struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Header *BeaconBlockHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
|
||||
CurrentSyncCommittee *SyncCommittee `protobuf:"bytes,2,opt,name=current_sync_committee,json=currentSyncCommittee,proto3" json:"current_sync_committee,omitempty"`
|
||||
NextSyncCommittee *SyncCommittee `protobuf:"bytes,3,opt,name=next_sync_committee,json=nextSyncCommittee,proto3" json:"next_sync_committee,omitempty"`
|
||||
}
|
||||
|
||||
func (x *ClientSnapshot) Reset() {
|
||||
*x = ClientSnapshot{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_proto_prysm_v1alpha1_light_client_proto_msgTypes[3]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *ClientSnapshot) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*ClientSnapshot) ProtoMessage() {}
|
||||
|
||||
func (x *ClientSnapshot) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_proto_prysm_v1alpha1_light_client_proto_msgTypes[3]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use ClientSnapshot.ProtoReflect.Descriptor instead.
|
||||
func (*ClientSnapshot) Descriptor() ([]byte, []int) {
|
||||
return file_proto_prysm_v1alpha1_light_client_proto_rawDescGZIP(), []int{3}
|
||||
}
|
||||
|
||||
func (x *ClientSnapshot) GetHeader() *BeaconBlockHeader {
|
||||
if x != nil {
|
||||
return x.Header
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *ClientSnapshot) GetCurrentSyncCommittee() *SyncCommittee {
|
||||
if x != nil {
|
||||
return x.CurrentSyncCommittee
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *ClientSnapshot) GetNextSyncCommittee() *SyncCommittee {
|
||||
if x != nil {
|
||||
return x.NextSyncCommittee
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type SyncAttestedData struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Header *BeaconBlockHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
|
||||
FinalityCheckpoint *Checkpoint `protobuf:"bytes,2,opt,name=finality_checkpoint,json=finalityCheckpoint,proto3" json:"finality_checkpoint,omitempty"`
|
||||
FinalityBranch [][]byte `protobuf:"bytes,3,rep,name=finality_branch,json=finalityBranch,proto3" json:"finality_branch,omitempty" ssz-size:"6,32"`
|
||||
NextSyncCommittee *SyncCommittee `protobuf:"bytes,4,opt,name=next_sync_committee,json=nextSyncCommittee,proto3" json:"next_sync_committee,omitempty"`
|
||||
NextSyncCommitteeBranch [][]byte `protobuf:"bytes,5,rep,name=next_sync_committee_branch,json=nextSyncCommitteeBranch,proto3" json:"next_sync_committee_branch,omitempty" ssz-size:"5,32"`
|
||||
}
|
||||
|
||||
func (x *SyncAttestedData) Reset() {
|
||||
*x = SyncAttestedData{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_proto_prysm_v1alpha1_light_client_proto_msgTypes[4]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *SyncAttestedData) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*SyncAttestedData) ProtoMessage() {}
|
||||
|
||||
func (x *SyncAttestedData) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_proto_prysm_v1alpha1_light_client_proto_msgTypes[4]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use SyncAttestedData.ProtoReflect.Descriptor instead.
|
||||
func (*SyncAttestedData) Descriptor() ([]byte, []int) {
|
||||
return file_proto_prysm_v1alpha1_light_client_proto_rawDescGZIP(), []int{4}
|
||||
}
|
||||
|
||||
func (x *SyncAttestedData) GetHeader() *BeaconBlockHeader {
|
||||
if x != nil {
|
||||
return x.Header
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *SyncAttestedData) GetFinalityCheckpoint() *Checkpoint {
|
||||
if x != nil {
|
||||
return x.FinalityCheckpoint
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *SyncAttestedData) GetFinalityBranch() [][]byte {
|
||||
if x != nil {
|
||||
return x.FinalityBranch
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *SyncAttestedData) GetNextSyncCommittee() *SyncCommittee {
|
||||
if x != nil {
|
||||
return x.NextSyncCommittee
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *SyncAttestedData) GetNextSyncCommitteeBranch() [][]byte {
|
||||
if x != nil {
|
||||
return x.NextSyncCommitteeBranch
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type LightClientFinalizedCheckpoint struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Header *BeaconBlockHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
|
||||
NextSyncCommittee *SyncCommittee `protobuf:"bytes,2,opt,name=next_sync_committee,json=nextSyncCommittee,proto3" json:"next_sync_committee,omitempty"`
|
||||
NextSyncCommitteeBranch [][]byte `protobuf:"bytes,3,rep,name=next_sync_committee_branch,json=nextSyncCommitteeBranch,proto3" json:"next_sync_committee_branch,omitempty" ssz-size:"5,32"`
|
||||
}
|
||||
|
||||
func (x *LightClientFinalizedCheckpoint) Reset() {
|
||||
*x = LightClientFinalizedCheckpoint{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_proto_prysm_v1alpha1_light_client_proto_msgTypes[5]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *LightClientFinalizedCheckpoint) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*LightClientFinalizedCheckpoint) ProtoMessage() {}
|
||||
|
||||
func (x *LightClientFinalizedCheckpoint) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_proto_prysm_v1alpha1_light_client_proto_msgTypes[5]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use LightClientFinalizedCheckpoint.ProtoReflect.Descriptor instead.
|
||||
func (*LightClientFinalizedCheckpoint) Descriptor() ([]byte, []int) {
|
||||
return file_proto_prysm_v1alpha1_light_client_proto_rawDescGZIP(), []int{5}
|
||||
}
|
||||
|
||||
func (x *LightClientFinalizedCheckpoint) GetHeader() *BeaconBlockHeader {
|
||||
if x != nil {
|
||||
return x.Header
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *LightClientFinalizedCheckpoint) GetNextSyncCommittee() *SyncCommittee {
|
||||
if x != nil {
|
||||
return x.NextSyncCommittee
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *LightClientFinalizedCheckpoint) GetNextSyncCommitteeBranch() [][]byte {
|
||||
if x != nil {
|
||||
return x.NextSyncCommitteeBranch
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var File_proto_prysm_v1alpha1_light_client_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_proto_prysm_v1alpha1_light_client_proto_rawDesc = []byte{
|
||||
0x0a, 0x27, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x31,
|
||||
0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x5f, 0x63, 0x6c, 0x69,
|
||||
0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x65, 0x74, 0x68, 0x65, 0x72,
|
||||
0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31,
|
||||
0x1a, 0x1b, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x65, 0x74, 0x68, 0x2f, 0x65, 0x78, 0x74, 0x2f,
|
||||
0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67,
|
||||
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
|
||||
0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f,
|
||||
0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70,
|
||||
0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x26, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f,
|
||||
0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x61,
|
||||
0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x1a, 0x27, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x31,
|
||||
0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x62, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x5f, 0x73, 0x74,
|
||||
0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f,
|
||||
0x62, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x70, 0x72, 0x6f,
|
||||
0x74, 0x6f, 0x1a, 0x29, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f,
|
||||
0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x63, 0x6f,
|
||||
0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x4a, 0x0a,
|
||||
0x12, 0x42, 0x65, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75,
|
||||
0x65, 0x73, 0x74, 0x12, 0x34, 0x0a, 0x16, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x63, 0x6f, 0x6d, 0x6d,
|
||||
0x69, 0x74, 0x74, 0x65, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x73, 0x18, 0x01, 0x20,
|
||||
0x03, 0x28, 0x04, 0x52, 0x14, 0x73, 0x79, 0x6e, 0x63, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x74,
|
||||
0x65, 0x65, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x73, 0x22, 0x59, 0x0a, 0x13, 0x42, 0x65, 0x73,
|
||||
0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
|
||||
0x12, 0x42, 0x0a, 0x07, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28,
|
||||
0x0b, 0x32, 0x28, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68,
|
||||
0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4c, 0x69, 0x67, 0x68, 0x74, 0x43,
|
||||
0x6c, 0x69, 0x65, 0x6e, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x07, 0x75, 0x70, 0x64,
|
||||
0x61, 0x74, 0x65, 0x73, 0x22, 0xd1, 0x04, 0x0a, 0x11, 0x4c, 0x69, 0x67, 0x68, 0x74, 0x43, 0x6c,
|
||||
0x69, 0x65, 0x6e, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x40, 0x0a, 0x06, 0x68, 0x65,
|
||||
0x61, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x74, 0x68,
|
||||
0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68,
|
||||
0x61, 0x31, 0x2e, 0x42, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x65,
|
||||
0x61, 0x64, 0x65, 0x72, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x54, 0x0a, 0x13,
|
||||
0x6e, 0x65, 0x78, 0x74, 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74,
|
||||
0x74, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x74, 0x68, 0x65,
|
||||
0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61,
|
||||
0x31, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x65, 0x52,
|
||||
0x11, 0x6e, 0x65, 0x78, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x74,
|
||||
0x65, 0x65, 0x12, 0x45, 0x0a, 0x1a, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x5f,
|
||||
0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x65, 0x5f, 0x62, 0x72, 0x61, 0x6e, 0x63, 0x68,
|
||||
0x18, 0x03, 0x20, 0x03, 0x28, 0x0c, 0x42, 0x08, 0x8a, 0xb5, 0x18, 0x04, 0x35, 0x2c, 0x33, 0x32,
|
||||
0x52, 0x17, 0x6e, 0x65, 0x78, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74,
|
||||
0x74, 0x65, 0x65, 0x42, 0x72, 0x61, 0x6e, 0x63, 0x68, 0x12, 0x51, 0x0a, 0x0f, 0x66, 0x69, 0x6e,
|
||||
0x61, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01,
|
||||
0x28, 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74,
|
||||
0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x42, 0x65, 0x61, 0x63, 0x6f,
|
||||
0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0e, 0x66, 0x69,
|
||||
0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x31, 0x0a, 0x0f,
|
||||
0x66, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x62, 0x72, 0x61, 0x6e, 0x63, 0x68, 0x18,
|
||||
0x05, 0x20, 0x03, 0x28, 0x0c, 0x42, 0x08, 0x8a, 0xb5, 0x18, 0x04, 0x36, 0x2c, 0x33, 0x32, 0x52,
|
||||
0x0e, 0x66, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x42, 0x72, 0x61, 0x6e, 0x63, 0x68, 0x12,
|
||||
0x6b, 0x0a, 0x13, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65,
|
||||
0x65, 0x5f, 0x62, 0x69, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x3b, 0x8a, 0xb5,
|
||||
0x18, 0x02, 0x31, 0x36, 0x82, 0xb5, 0x18, 0x31, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63,
|
||||
0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x61, 0x74, 0x69, 0x63, 0x6c, 0x61, 0x62, 0x73,
|
||||
0x2f, 0x67, 0x6f, 0x2d, 0x62, 0x69, 0x74, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x2e, 0x42, 0x69, 0x74,
|
||||
0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x31, 0x32, 0x38, 0x52, 0x11, 0x73, 0x79, 0x6e, 0x63, 0x43,
|
||||
0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x65, 0x42, 0x69, 0x74, 0x73, 0x12, 0x40, 0x0a, 0x18,
|
||||
0x73, 0x79, 0x6e, 0x63, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x65, 0x5f, 0x73,
|
||||
0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06,
|
||||
0x8a, 0xb5, 0x18, 0x02, 0x39, 0x36, 0x52, 0x16, 0x73, 0x79, 0x6e, 0x63, 0x43, 0x6f, 0x6d, 0x6d,
|
||||
0x69, 0x74, 0x74, 0x65, 0x65, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x28,
|
||||
0x0a, 0x0c, 0x66, 0x6f, 0x72, 0x6b, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x08,
|
||||
0x20, 0x01, 0x28, 0x0c, 0x42, 0x05, 0x8a, 0xb5, 0x18, 0x01, 0x34, 0x52, 0x0b, 0x66, 0x6f, 0x72,
|
||||
0x6b, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x84, 0x02, 0x0a, 0x0e, 0x43, 0x6c, 0x69,
|
||||
0x65, 0x6e, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x40, 0x0a, 0x06, 0x68,
|
||||
0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x74,
|
||||
0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70,
|
||||
0x68, 0x61, 0x31, 0x2e, 0x42, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48,
|
||||
0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x5a, 0x0a,
|
||||
0x16, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x63, 0x6f,
|
||||
0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e,
|
||||
0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61,
|
||||
0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74,
|
||||
0x74, 0x65, 0x65, 0x52, 0x14, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x79, 0x6e, 0x63,
|
||||
0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x65, 0x12, 0x54, 0x0a, 0x13, 0x6e, 0x65, 0x78,
|
||||
0x74, 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x65,
|
||||
0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75,
|
||||
0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x53,
|
||||
0x79, 0x6e, 0x63, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x65, 0x52, 0x11, 0x6e, 0x65,
|
||||
0x78, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x65, 0x22,
|
||||
0xf8, 0x02, 0x0a, 0x10, 0x53, 0x79, 0x6e, 0x63, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x65, 0x64,
|
||||
0x44, 0x61, 0x74, 0x61, 0x12, 0x40, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01,
|
||||
0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e,
|
||||
0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x42, 0x65, 0x61,
|
||||
0x63, 0x6f, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x06,
|
||||
0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x52, 0x0a, 0x13, 0x66, 0x69, 0x6e, 0x61, 0x6c, 0x69,
|
||||
0x74, 0x79, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x02, 0x20,
|
||||
0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65,
|
||||
0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, 0x68, 0x65, 0x63,
|
||||
0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x12, 0x66, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79,
|
||||
0x43, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x31, 0x0a, 0x0f, 0x66, 0x69,
|
||||
0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x62, 0x72, 0x61, 0x6e, 0x63, 0x68, 0x18, 0x03, 0x20,
|
||||
0x03, 0x28, 0x0c, 0x42, 0x08, 0x8a, 0xb5, 0x18, 0x04, 0x36, 0x2c, 0x33, 0x32, 0x52, 0x0e, 0x66,
|
||||
0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x42, 0x72, 0x61, 0x6e, 0x63, 0x68, 0x12, 0x54, 0x0a,
|
||||
0x13, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x69,
|
||||
0x74, 0x74, 0x65, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x74, 0x68,
|
||||
0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68,
|
||||
0x61, 0x31, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x65,
|
||||
0x52, 0x11, 0x6e, 0x65, 0x78, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74,
|
||||
0x74, 0x65, 0x65, 0x12, 0x45, 0x0a, 0x1a, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x73, 0x79, 0x6e, 0x63,
|
||||
0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x65, 0x5f, 0x62, 0x72, 0x61, 0x6e, 0x63,
|
||||
0x68, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0c, 0x42, 0x08, 0x8a, 0xb5, 0x18, 0x04, 0x35, 0x2c, 0x33,
|
||||
0x32, 0x52, 0x17, 0x6e, 0x65, 0x78, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x43, 0x6f, 0x6d, 0x6d, 0x69,
|
||||
0x74, 0x74, 0x65, 0x65, 0x42, 0x72, 0x61, 0x6e, 0x63, 0x68, 0x22, 0xff, 0x01, 0x0a, 0x1e, 0x4c,
|
||||
0x69, 0x67, 0x68, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69,
|
||||
0x7a, 0x65, 0x64, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x40, 0x0a,
|
||||
0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e,
|
||||
0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61,
|
||||
0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x42, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x42, 0x6c, 0x6f, 0x63,
|
||||
0x6b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12,
|
||||
0x54, 0x0a, 0x13, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x63, 0x6f, 0x6d,
|
||||
0x6d, 0x69, 0x74, 0x74, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65,
|
||||
0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c,
|
||||
0x70, 0x68, 0x61, 0x31, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x74,
|
||||
0x65, 0x65, 0x52, 0x11, 0x6e, 0x65, 0x78, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x43, 0x6f, 0x6d, 0x6d,
|
||||
0x69, 0x74, 0x74, 0x65, 0x65, 0x12, 0x45, 0x0a, 0x1a, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x73, 0x79,
|
||||
0x6e, 0x63, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x65, 0x5f, 0x62, 0x72, 0x61,
|
||||
0x6e, 0x63, 0x68, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c, 0x42, 0x08, 0x8a, 0xb5, 0x18, 0x04, 0x35,
|
||||
0x2c, 0x33, 0x32, 0x52, 0x17, 0x6e, 0x65, 0x78, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x43, 0x6f, 0x6d,
|
||||
0x6d, 0x69, 0x74, 0x74, 0x65, 0x65, 0x42, 0x72, 0x61, 0x6e, 0x63, 0x68, 0x32, 0xda, 0x03, 0x0a,
|
||||
0x0b, 0x4c, 0x69, 0x67, 0x68, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x12, 0x96, 0x01, 0x0a,
|
||||
0x0b, 0x42, 0x65, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x73, 0x12, 0x29, 0x2e, 0x65,
|
||||
0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c,
|
||||
0x70, 0x68, 0x61, 0x31, 0x2e, 0x42, 0x65, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x73,
|
||||
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65,
|
||||
0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e,
|
||||
0x42, 0x65, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
|
||||
0x6e, 0x73, 0x65, 0x22, 0x30, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x22, 0x25, 0x2f, 0x65, 0x74,
|
||||
0x68, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x6c, 0x69, 0x67, 0x68, 0x74,
|
||||
0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2f, 0x62, 0x65, 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61,
|
||||
0x74, 0x65, 0x3a, 0x01, 0x2a, 0x12, 0x94, 0x01, 0x0a, 0x15, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74,
|
||||
0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x12,
|
||||
0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
|
||||
0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x28, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65,
|
||||
0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e,
|
||||
0x4c, 0x69, 0x67, 0x68, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74,
|
||||
0x65, 0x22, 0x39, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x33, 0x12, 0x31, 0x2f, 0x65, 0x74, 0x68, 0x2f,
|
||||
0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x63, 0x6c,
|
||||
0x69, 0x65, 0x6e, 0x74, 0x2f, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61,
|
||||
0x74, 0x65, 0x5f, 0x66, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x12, 0x9a, 0x01, 0x0a,
|
||||
0x18, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x6e,
|
||||
0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
|
||||
0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74,
|
||||
0x79, 0x1a, 0x28, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68,
|
||||
0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4c, 0x69, 0x67, 0x68, 0x74, 0x43,
|
||||
0x6c, 0x69, 0x65, 0x6e, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x22, 0x3c, 0x82, 0xd3, 0xe4,
|
||||
0x93, 0x02, 0x36, 0x12, 0x34, 0x2f, 0x65, 0x74, 0x68, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68,
|
||||
0x61, 0x31, 0x2f, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2f, 0x6c,
|
||||
0x61, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6e, 0x6f, 0x6e,
|
||||
0x66, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x42, 0x93, 0x01, 0x0a, 0x19, 0x6f, 0x72,
|
||||
0x67, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76,
|
||||
0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x0b, 0x4c, 0x69, 0x67, 0x68, 0x74, 0x43, 0x6c,
|
||||
0x69, 0x65, 0x6e, 0x74, 0x50, 0x01, 0x5a, 0x37, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63,
|
||||
0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x61, 0x74, 0x69, 0x63, 0x6c, 0x61, 0x62, 0x73,
|
||||
0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x72, 0x79,
|
||||
0x73, 0x6d, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x3b, 0x65, 0x74, 0x68, 0xaa,
|
||||
0x02, 0x15, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x45, 0x74, 0x68, 0x2e, 0x56,
|
||||
0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xca, 0x02, 0x15, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65,
|
||||
0x75, 0x6d, 0x5c, 0x45, 0x74, 0x68, 0x5c, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x62,
|
||||
0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_proto_prysm_v1alpha1_light_client_proto_rawDescOnce sync.Once
|
||||
file_proto_prysm_v1alpha1_light_client_proto_rawDescData = file_proto_prysm_v1alpha1_light_client_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_proto_prysm_v1alpha1_light_client_proto_rawDescGZIP() []byte {
|
||||
file_proto_prysm_v1alpha1_light_client_proto_rawDescOnce.Do(func() {
|
||||
file_proto_prysm_v1alpha1_light_client_proto_rawDescData = protoimpl.X.CompressGZIP(file_proto_prysm_v1alpha1_light_client_proto_rawDescData)
|
||||
})
|
||||
return file_proto_prysm_v1alpha1_light_client_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_proto_prysm_v1alpha1_light_client_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
|
||||
var file_proto_prysm_v1alpha1_light_client_proto_goTypes = []interface{}{
|
||||
(*BestUpdatesRequest)(nil), // 0: ethereum.eth.v1alpha1.BestUpdatesRequest
|
||||
(*BestUpdatesResponse)(nil), // 1: ethereum.eth.v1alpha1.BestUpdatesResponse
|
||||
(*LightClientUpdate)(nil), // 2: ethereum.eth.v1alpha1.LightClientUpdate
|
||||
(*ClientSnapshot)(nil), // 3: ethereum.eth.v1alpha1.ClientSnapshot
|
||||
(*SyncAttestedData)(nil), // 4: ethereum.eth.v1alpha1.SyncAttestedData
|
||||
(*LightClientFinalizedCheckpoint)(nil), // 5: ethereum.eth.v1alpha1.LightClientFinalizedCheckpoint
|
||||
(*BeaconBlockHeader)(nil), // 6: ethereum.eth.v1alpha1.BeaconBlockHeader
|
||||
(*SyncCommittee)(nil), // 7: ethereum.eth.v1alpha1.SyncCommittee
|
||||
(*Checkpoint)(nil), // 8: ethereum.eth.v1alpha1.Checkpoint
|
||||
(*empty.Empty)(nil), // 9: google.protobuf.Empty
|
||||
}
|
||||
var file_proto_prysm_v1alpha1_light_client_proto_depIdxs = []int32{
|
||||
2, // 0: ethereum.eth.v1alpha1.BestUpdatesResponse.updates:type_name -> ethereum.eth.v1alpha1.LightClientUpdate
|
||||
6, // 1: ethereum.eth.v1alpha1.LightClientUpdate.header:type_name -> ethereum.eth.v1alpha1.BeaconBlockHeader
|
||||
7, // 2: ethereum.eth.v1alpha1.LightClientUpdate.next_sync_committee:type_name -> ethereum.eth.v1alpha1.SyncCommittee
|
||||
6, // 3: ethereum.eth.v1alpha1.LightClientUpdate.finality_header:type_name -> ethereum.eth.v1alpha1.BeaconBlockHeader
|
||||
6, // 4: ethereum.eth.v1alpha1.ClientSnapshot.header:type_name -> ethereum.eth.v1alpha1.BeaconBlockHeader
|
||||
7, // 5: ethereum.eth.v1alpha1.ClientSnapshot.current_sync_committee:type_name -> ethereum.eth.v1alpha1.SyncCommittee
|
||||
7, // 6: ethereum.eth.v1alpha1.ClientSnapshot.next_sync_committee:type_name -> ethereum.eth.v1alpha1.SyncCommittee
|
||||
6, // 7: ethereum.eth.v1alpha1.SyncAttestedData.header:type_name -> ethereum.eth.v1alpha1.BeaconBlockHeader
|
||||
8, // 8: ethereum.eth.v1alpha1.SyncAttestedData.finality_checkpoint:type_name -> ethereum.eth.v1alpha1.Checkpoint
|
||||
7, // 9: ethereum.eth.v1alpha1.SyncAttestedData.next_sync_committee:type_name -> ethereum.eth.v1alpha1.SyncCommittee
|
||||
6, // 10: ethereum.eth.v1alpha1.LightClientFinalizedCheckpoint.header:type_name -> ethereum.eth.v1alpha1.BeaconBlockHeader
|
||||
7, // 11: ethereum.eth.v1alpha1.LightClientFinalizedCheckpoint.next_sync_committee:type_name -> ethereum.eth.v1alpha1.SyncCommittee
|
||||
0, // 12: ethereum.eth.v1alpha1.LightClient.BestUpdates:input_type -> ethereum.eth.v1alpha1.BestUpdatesRequest
|
||||
9, // 13: ethereum.eth.v1alpha1.LightClient.LatestUpdateFinalized:input_type -> google.protobuf.Empty
|
||||
9, // 14: ethereum.eth.v1alpha1.LightClient.LatestUpdateNonFinalized:input_type -> google.protobuf.Empty
|
||||
1, // 15: ethereum.eth.v1alpha1.LightClient.BestUpdates:output_type -> ethereum.eth.v1alpha1.BestUpdatesResponse
|
||||
2, // 16: ethereum.eth.v1alpha1.LightClient.LatestUpdateFinalized:output_type -> ethereum.eth.v1alpha1.LightClientUpdate
|
||||
2, // 17: ethereum.eth.v1alpha1.LightClient.LatestUpdateNonFinalized:output_type -> ethereum.eth.v1alpha1.LightClientUpdate
|
||||
15, // [15:18] is the sub-list for method output_type
|
||||
12, // [12:15] is the sub-list for method input_type
|
||||
12, // [12:12] is the sub-list for extension type_name
|
||||
12, // [12:12] is the sub-list for extension extendee
|
||||
0, // [0:12] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_proto_prysm_v1alpha1_light_client_proto_init() }
|
||||
func file_proto_prysm_v1alpha1_light_client_proto_init() {
|
||||
if File_proto_prysm_v1alpha1_light_client_proto != nil {
|
||||
return
|
||||
}
|
||||
file_proto_prysm_v1alpha1_attestation_proto_init()
|
||||
file_proto_prysm_v1alpha1_beacon_state_proto_init()
|
||||
file_proto_prysm_v1alpha1_beacon_block_proto_init()
|
||||
file_proto_prysm_v1alpha1_sync_committee_proto_init()
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_proto_prysm_v1alpha1_light_client_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*BestUpdatesRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_proto_prysm_v1alpha1_light_client_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*BestUpdatesResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_proto_prysm_v1alpha1_light_client_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*LightClientUpdate); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_proto_prysm_v1alpha1_light_client_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ClientSnapshot); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_proto_prysm_v1alpha1_light_client_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*SyncAttestedData); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_proto_prysm_v1alpha1_light_client_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*LightClientFinalizedCheckpoint); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_proto_prysm_v1alpha1_light_client_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 6,
|
||||
NumExtensions: 0,
|
||||
NumServices: 1,
|
||||
},
|
||||
GoTypes: file_proto_prysm_v1alpha1_light_client_proto_goTypes,
|
||||
DependencyIndexes: file_proto_prysm_v1alpha1_light_client_proto_depIdxs,
|
||||
MessageInfos: file_proto_prysm_v1alpha1_light_client_proto_msgTypes,
|
||||
}.Build()
|
||||
File_proto_prysm_v1alpha1_light_client_proto = out.File
|
||||
file_proto_prysm_v1alpha1_light_client_proto_rawDesc = nil
|
||||
file_proto_prysm_v1alpha1_light_client_proto_goTypes = nil
|
||||
file_proto_prysm_v1alpha1_light_client_proto_depIdxs = nil
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ context.Context
|
||||
var _ grpc.ClientConnInterface
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
const _ = grpc.SupportPackageIsVersion6
|
||||
|
||||
// LightClientClient is the client API for LightClient service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
|
||||
type LightClientClient interface {
|
||||
BestUpdates(ctx context.Context, in *BestUpdatesRequest, opts ...grpc.CallOption) (*BestUpdatesResponse, error)
|
||||
LatestUpdateFinalized(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*LightClientUpdate, error)
|
||||
LatestUpdateNonFinalized(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*LightClientUpdate, error)
|
||||
}
|
||||
|
||||
type lightClientClient struct {
|
||||
cc grpc.ClientConnInterface
|
||||
}
|
||||
|
||||
func NewLightClientClient(cc grpc.ClientConnInterface) LightClientClient {
|
||||
return &lightClientClient{cc}
|
||||
}
|
||||
|
||||
func (c *lightClientClient) BestUpdates(ctx context.Context, in *BestUpdatesRequest, opts ...grpc.CallOption) (*BestUpdatesResponse, error) {
|
||||
out := new(BestUpdatesResponse)
|
||||
err := c.cc.Invoke(ctx, "/ethereum.eth.v1alpha1.LightClient/BestUpdates", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *lightClientClient) LatestUpdateFinalized(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*LightClientUpdate, error) {
|
||||
out := new(LightClientUpdate)
|
||||
err := c.cc.Invoke(ctx, "/ethereum.eth.v1alpha1.LightClient/LatestUpdateFinalized", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *lightClientClient) LatestUpdateNonFinalized(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*LightClientUpdate, error) {
|
||||
out := new(LightClientUpdate)
|
||||
err := c.cc.Invoke(ctx, "/ethereum.eth.v1alpha1.LightClient/LatestUpdateNonFinalized", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// LightClientServer is the server API for LightClient service.
|
||||
type LightClientServer interface {
|
||||
BestUpdates(context.Context, *BestUpdatesRequest) (*BestUpdatesResponse, error)
|
||||
LatestUpdateFinalized(context.Context, *empty.Empty) (*LightClientUpdate, error)
|
||||
LatestUpdateNonFinalized(context.Context, *empty.Empty) (*LightClientUpdate, error)
|
||||
}
|
||||
|
||||
// UnimplementedLightClientServer can be embedded to have forward compatible implementations.
|
||||
type UnimplementedLightClientServer struct {
|
||||
}
|
||||
|
||||
func (*UnimplementedLightClientServer) BestUpdates(context.Context, *BestUpdatesRequest) (*BestUpdatesResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method BestUpdates not implemented")
|
||||
}
|
||||
func (*UnimplementedLightClientServer) LatestUpdateFinalized(context.Context, *empty.Empty) (*LightClientUpdate, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method LatestUpdateFinalized not implemented")
|
||||
}
|
||||
func (*UnimplementedLightClientServer) LatestUpdateNonFinalized(context.Context, *empty.Empty) (*LightClientUpdate, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method LatestUpdateNonFinalized not implemented")
|
||||
}
|
||||
|
||||
func RegisterLightClientServer(s *grpc.Server, srv LightClientServer) {
|
||||
s.RegisterService(&_LightClient_serviceDesc, srv)
|
||||
}
|
||||
|
||||
func _LightClient_BestUpdates_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(BestUpdatesRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(LightClientServer).BestUpdates(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/ethereum.eth.v1alpha1.LightClient/BestUpdates",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(LightClientServer).BestUpdates(ctx, req.(*BestUpdatesRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _LightClient_LatestUpdateFinalized_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(empty.Empty)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(LightClientServer).LatestUpdateFinalized(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/ethereum.eth.v1alpha1.LightClient/LatestUpdateFinalized",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(LightClientServer).LatestUpdateFinalized(ctx, req.(*empty.Empty))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _LightClient_LatestUpdateNonFinalized_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(empty.Empty)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(LightClientServer).LatestUpdateNonFinalized(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/ethereum.eth.v1alpha1.LightClient/LatestUpdateNonFinalized",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(LightClientServer).LatestUpdateNonFinalized(ctx, req.(*empty.Empty))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
var _LightClient_serviceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "ethereum.eth.v1alpha1.LightClient",
|
||||
HandlerType: (*LightClientServer)(nil),
|
||||
Methods: []grpc.MethodDesc{
|
||||
{
|
||||
MethodName: "BestUpdates",
|
||||
Handler: _LightClient_BestUpdates_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "LatestUpdateFinalized",
|
||||
Handler: _LightClient_LatestUpdateFinalized_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "LatestUpdateNonFinalized",
|
||||
Handler: _LightClient_LatestUpdateNonFinalized_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{},
|
||||
Metadata: "proto/prysm/v1alpha1/light_client.proto",
|
||||
}
|
||||
301
proto/prysm/v1alpha1/light_client.pb.gw.go
Executable file
301
proto/prysm/v1alpha1/light_client.pb.gw.go
Executable file
@@ -0,0 +1,301 @@
|
||||
// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
|
||||
// source: proto/prysm/v1alpha1/light_client.proto
|
||||
|
||||
/*
|
||||
Package eth is a reverse proxy.
|
||||
|
||||
It translates gRPC into RESTful JSON APIs.
|
||||
*/
|
||||
package eth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
emptypb "github.com/golang/protobuf/ptypes/empty"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
|
||||
github_com_prysmaticlabs_eth2_types "github.com/prysmaticlabs/eth2-types"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// Suppress "imported and not used" errors
|
||||
var _ codes.Code
|
||||
var _ io.Reader
|
||||
var _ status.Status
|
||||
var _ = runtime.String
|
||||
var _ = utilities.NewDoubleArray
|
||||
var _ = metadata.Join
|
||||
var _ = github_com_prysmaticlabs_eth2_types.Epoch(0)
|
||||
var _ = emptypb.Empty{}
|
||||
|
||||
func request_LightClient_BestUpdates_0(ctx context.Context, marshaler runtime.Marshaler, client LightClientClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq BestUpdatesRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
newReader, berr := utilities.IOReaderFactory(req.Body)
|
||||
if berr != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
|
||||
}
|
||||
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
|
||||
msg, err := client.BestUpdates(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func local_request_LightClient_BestUpdates_0(ctx context.Context, marshaler runtime.Marshaler, server LightClientServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq BestUpdatesRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
newReader, berr := utilities.IOReaderFactory(req.Body)
|
||||
if berr != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
|
||||
}
|
||||
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
|
||||
msg, err := server.BestUpdates(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func request_LightClient_LatestUpdateFinalized_0(ctx context.Context, marshaler runtime.Marshaler, client LightClientClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq emptypb.Empty
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
msg, err := client.LatestUpdateFinalized(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func local_request_LightClient_LatestUpdateFinalized_0(ctx context.Context, marshaler runtime.Marshaler, server LightClientServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq emptypb.Empty
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
msg, err := server.LatestUpdateFinalized(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func request_LightClient_LatestUpdateNonFinalized_0(ctx context.Context, marshaler runtime.Marshaler, client LightClientClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq emptypb.Empty
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
msg, err := client.LatestUpdateNonFinalized(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func local_request_LightClient_LatestUpdateNonFinalized_0(ctx context.Context, marshaler runtime.Marshaler, server LightClientServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq emptypb.Empty
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
msg, err := server.LatestUpdateNonFinalized(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
// RegisterLightClientHandlerServer registers the http handlers for service LightClient to "mux".
|
||||
// UnaryRPC :call LightClientServer directly.
|
||||
// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
|
||||
// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterLightClientHandlerFromEndpoint instead.
|
||||
func RegisterLightClientHandlerServer(ctx context.Context, mux *runtime.ServeMux, server LightClientServer) error {
|
||||
|
||||
mux.Handle("POST", pattern_LightClient_BestUpdates_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/ethereum.eth.v1alpha1.LightClient/BestUpdates")
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_LightClient_BestUpdates_0(rctx, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_LightClient_BestUpdates_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("GET", pattern_LightClient_LatestUpdateFinalized_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/ethereum.eth.v1alpha1.LightClient/LatestUpdateFinalized")
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_LightClient_LatestUpdateFinalized_0(rctx, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_LightClient_LatestUpdateFinalized_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("GET", pattern_LightClient_LatestUpdateNonFinalized_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/ethereum.eth.v1alpha1.LightClient/LatestUpdateNonFinalized")
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_LightClient_LatestUpdateNonFinalized_0(rctx, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_LightClient_LatestUpdateNonFinalized_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RegisterLightClientHandlerFromEndpoint is same as RegisterLightClientHandler but
|
||||
// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
|
||||
func RegisterLightClientHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
|
||||
conn, err := grpc.Dial(endpoint, opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
if cerr := conn.Close(); cerr != nil {
|
||||
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
|
||||
}
|
||||
return
|
||||
}
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
if cerr := conn.Close(); cerr != nil {
|
||||
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
|
||||
}
|
||||
}()
|
||||
}()
|
||||
|
||||
return RegisterLightClientHandler(ctx, mux, conn)
|
||||
}
|
||||
|
||||
// RegisterLightClientHandler registers the http handlers for service LightClient to "mux".
|
||||
// The handlers forward requests to the grpc endpoint over "conn".
|
||||
func RegisterLightClientHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
|
||||
return RegisterLightClientHandlerClient(ctx, mux, NewLightClientClient(conn))
|
||||
}
|
||||
|
||||
// RegisterLightClientHandlerClient registers the http handlers for service LightClient
|
||||
// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "LightClientClient".
|
||||
// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "LightClientClient"
|
||||
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
|
||||
// "LightClientClient" to call the correct interceptors.
|
||||
func RegisterLightClientHandlerClient(ctx context.Context, mux *runtime.ServeMux, client LightClientClient) error {
|
||||
|
||||
mux.Handle("POST", pattern_LightClient_BestUpdates_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateContext(ctx, mux, req, "/ethereum.eth.v1alpha1.LightClient/BestUpdates")
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_LightClient_BestUpdates_0(rctx, inboundMarshaler, client, req, pathParams)
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_LightClient_BestUpdates_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("GET", pattern_LightClient_LatestUpdateFinalized_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateContext(ctx, mux, req, "/ethereum.eth.v1alpha1.LightClient/LatestUpdateFinalized")
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_LightClient_LatestUpdateFinalized_0(rctx, inboundMarshaler, client, req, pathParams)
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_LightClient_LatestUpdateFinalized_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("GET", pattern_LightClient_LatestUpdateNonFinalized_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateContext(ctx, mux, req, "/ethereum.eth.v1alpha1.LightClient/LatestUpdateNonFinalized")
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_LightClient_LatestUpdateNonFinalized_0(rctx, inboundMarshaler, client, req, pathParams)
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_LightClient_LatestUpdateNonFinalized_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
pattern_LightClient_BestUpdates_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"eth", "v1alpha1", "lightclient", "best_update"}, ""))
|
||||
|
||||
pattern_LightClient_LatestUpdateFinalized_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"eth", "v1alpha1", "lightclient", "latest_update_finalized"}, ""))
|
||||
|
||||
pattern_LightClient_LatestUpdateNonFinalized_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"eth", "v1alpha1", "lightclient", "latest_update_nonfinalized"}, ""))
|
||||
)
|
||||
|
||||
var (
|
||||
forward_LightClient_BestUpdates_0 = runtime.ForwardResponseMessage
|
||||
|
||||
forward_LightClient_LatestUpdateFinalized_0 = runtime.ForwardResponseMessage
|
||||
|
||||
forward_LightClient_LatestUpdateNonFinalized_0 = runtime.ForwardResponseMessage
|
||||
)
|
||||
91
proto/prysm/v1alpha1/light_client.proto
Normal file
91
proto/prysm/v1alpha1/light_client.proto
Normal file
@@ -0,0 +1,91 @@
|
||||
// Copyright 2021 Prysmatic Labs.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
syntax = "proto3";
|
||||
|
||||
package ethereum.eth.v1alpha1;
|
||||
|
||||
import "proto/eth/ext/options.proto";
|
||||
import "google/api/annotations.proto";
|
||||
import "google/protobuf/empty.proto";
|
||||
|
||||
import "proto/prysm/v1alpha1/attestation.proto";
|
||||
import "proto/prysm/v1alpha1/beacon_state.proto";
|
||||
import "proto/prysm/v1alpha1/beacon_block.proto";
|
||||
import "proto/prysm/v1alpha1/sync_committee.proto";
|
||||
|
||||
|
||||
option csharp_namespace = "Ethereum.Eth.V1alpha1";
|
||||
option go_package = "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1;eth";
|
||||
option java_multiple_files = true;
|
||||
option java_outer_classname = "LightClient";
|
||||
option java_package = "org.ethereum.eth.v1alpha1";
|
||||
option php_namespace = "Ethereum\\Eth\\v1alpha1";
|
||||
|
||||
service LightClient {
|
||||
rpc BestUpdates(BestUpdatesRequest) returns (BestUpdatesResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/eth/v1alpha1/lightclient/best_update",
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
rpc LatestUpdateFinalized(google.protobuf.Empty) returns (LightClientUpdate) {
|
||||
option (google.api.http) = {
|
||||
get: "/eth/v1alpha1/lightclient/latest_update_finalized",
|
||||
};
|
||||
}
|
||||
rpc LatestUpdateNonFinalized(google.protobuf.Empty) returns (LightClientUpdate) {
|
||||
option (google.api.http) = {
|
||||
get: "/eth/v1alpha1/lightclient/latest_update_nonfinalized",
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
message BestUpdatesRequest {
|
||||
repeated uint64 sync_committee_periods = 1;
|
||||
}
|
||||
|
||||
message BestUpdatesResponse {
|
||||
repeated LightClientUpdate updates = 1;
|
||||
}
|
||||
|
||||
message LightClientUpdate {
|
||||
ethereum.eth.v1alpha1.BeaconBlockHeader header = 1;
|
||||
ethereum.eth.v1alpha1.SyncCommittee next_sync_committee = 2;
|
||||
repeated bytes next_sync_committee_branch = 3 [(ethereum.eth.ext.ssz_size) = "5,32"];
|
||||
ethereum.eth.v1alpha1.BeaconBlockHeader finality_header = 4;
|
||||
repeated bytes finality_branch = 5 [(ethereum.eth.ext.ssz_size) = "6,32"];
|
||||
bytes sync_committee_bits = 6 [(ethereum.eth.ext.ssz_size) = "sync_committee_aggregate_bytes.size", (ethereum.eth.ext.cast_type) = "sync_committee_aggregate_bits.type"];
|
||||
bytes sync_committee_signature = 7 [(ethereum.eth.ext.ssz_size) = "96"];
|
||||
bytes fork_version = 8 [(ethereum.eth.ext.ssz_size) = "4"];
|
||||
}
|
||||
|
||||
message ClientSnapshot {
|
||||
ethereum.eth.v1alpha1.BeaconBlockHeader header = 1;
|
||||
ethereum.eth.v1alpha1.SyncCommittee current_sync_committee = 2;
|
||||
ethereum.eth.v1alpha1.SyncCommittee next_sync_committee = 3;
|
||||
}
|
||||
|
||||
message SyncAttestedData {
|
||||
ethereum.eth.v1alpha1.BeaconBlockHeader header = 1;
|
||||
ethereum.eth.v1alpha1.Checkpoint finality_checkpoint = 2;
|
||||
repeated bytes finality_branch = 3 [(ethereum.eth.ext.ssz_size) = "6,32"];
|
||||
ethereum.eth.v1alpha1.SyncCommittee next_sync_committee = 4;
|
||||
repeated bytes next_sync_committee_branch = 5 [(ethereum.eth.ext.ssz_size) = "5,32"];
|
||||
}
|
||||
|
||||
message LightClientFinalizedCheckpoint {
|
||||
ethereum.eth.v1alpha1.BeaconBlockHeader header = 1;
|
||||
ethereum.eth.v1alpha1.SyncCommittee next_sync_committee = 2;
|
||||
repeated bytes next_sync_committee_branch = 3 [(ethereum.eth.ext.ssz_size) = "5,32"];
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
package assertions
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
@@ -86,6 +87,16 @@ func NoError(loggerFn assertionLoggerFn, err error, msg ...interface{}) {
|
||||
}
|
||||
}
|
||||
|
||||
// ErrorIs uses Errors.Is to recursively unwrap err looking for target in the chain.
|
||||
// If any error in the chain matches target, the assertion will pass.
|
||||
func ErrorIs(loggerFn assertionLoggerFn, err, target error, msg ...interface{}) {
|
||||
if !errors.Is(err, target) {
|
||||
errMsg := parseMsg(fmt.Sprintf("error %s not in chain", target), msg...)
|
||||
_, file, line, _ := runtime.Caller(2)
|
||||
loggerFn("%s:%d %s: %v", filepath.Base(file), line, errMsg, err)
|
||||
}
|
||||
}
|
||||
|
||||
// ErrorContains asserts that actual error contains wanted message.
|
||||
func ErrorContains(loggerFn assertionLoggerFn, want string, err error, msg ...interface{}) {
|
||||
if err == nil || !strings.Contains(err.Error(), want) {
|
||||
|
||||
@@ -62,12 +62,7 @@ func e2eMinimal(t *testing.T, usePrysmSh bool) {
|
||||
ev.APIGatewayV1Alpha1VerifyIntegrity,
|
||||
ev.FinishedSyncing,
|
||||
ev.AllNodesHaveSameHead,
|
||||
}
|
||||
// TODO(#9166): remove this block once v2 changes are live.
|
||||
if !usePrysmSh {
|
||||
evals = append(evals, ev.ValidatorSyncParticipation)
|
||||
} else {
|
||||
t.Log("Warning: Skipping v2 specific evaluators for prior release")
|
||||
ev.ValidatorSyncParticipation,
|
||||
}
|
||||
testConfig := &types.E2EConfig{
|
||||
BeaconFlags: []string{
|
||||
|
||||
@@ -71,3 +71,9 @@ func LogsDoNotContain(tb assertions.AssertionTestingTB, hook *test.Hook, want st
|
||||
func NotEmpty(tb assertions.AssertionTestingTB, obj interface{}, msg ...interface{}) {
|
||||
assertions.NotEmpty(tb.Fatalf, obj, msg...)
|
||||
}
|
||||
|
||||
// ErrorIs uses Errors.Is to recursively unwrap err looking for target in the chain.
|
||||
// If any error in the chain matches target, the assertion will pass.
|
||||
func ErrorIs(tb assertions.AssertionTestingTB, err, target error, msg ...interface{}) {
|
||||
assertions.ErrorIs(tb.Fatalf, err, target, msg)
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ go_test(
|
||||
size = "small",
|
||||
srcs = [
|
||||
"effective_balance_updates_test.go",
|
||||
"epoch_processing_test.go",
|
||||
"eth1_data_reset_test.go",
|
||||
"historical_roots_update_test.go",
|
||||
"inactivity_updates_test.go",
|
||||
@@ -21,5 +22,8 @@ go_test(
|
||||
],
|
||||
shard_count = 4,
|
||||
tags = ["spectest"],
|
||||
deps = ["//testing/spectest/shared/altair/epoch_processing:go_default_library"],
|
||||
deps = [
|
||||
"//config/features:go_default_library",
|
||||
"//testing/spectest/shared/altair/epoch_processing:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user