Compare commits

...

55 Commits

Author SHA1 Message Date
Raul Jordan
969675d172 Merge branch 'develop' into e2e-configs 2021-12-02 10:00:20 -05:00
Raul Jordan
d3c97da4e1 Ensure Slashing Protection Exports and Keymanager API Work According to Spec (#9938)
* password compliance

* delete keys tests

* changes to slashing protection exports

* export tests pass

* fix up failures

* gaz

* table driven tests for delete keystores

* comment

* rem deletion logic

* look ma, no db

* fix up tests

* ineff

* gaz

* broken test fix

* Update validator/keymanager/imported/delete.go

* rem
2021-12-02 09:58:49 -05:00
Raul Jordan
1d216a8737 Filter Errored Keys from Returned Slashing Protection History in Standard API (#9968)
* add err condition

* naming
2021-12-02 03:32:34 +00:00
Raul Jordan
790bf03123 Replace a Few IntFlags with Uint64Flags (#9959)
* use uints instead of ints

* fix method

* fix

* fix

* builds

* deepsource

* deep source
2021-12-01 23:34:53 +00:00
Preston Van Loon
ab60b1c7b2 Update go-ethereum to v1.10.13 (#9967)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2021-12-01 19:31:21 +00:00
Nishant Das
236a5c4167 Cleanup From Deepsource (#9961)
* ds cleanup

* fix

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2021-12-01 18:56:07 +00:00
Nishant Das
5e2229ce9d Update Libp2p to v0.15.1 (#9960)
* fix deps

* tidy it all

* fix build

* remove tls patch

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2021-12-01 18:09:34 +00:00
Potuz
6ffba5c769 Add v1alpha1_to_v2.go (#9966)
* Add v1alpha1_to_v2.go

* add tests

* gazelle

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2021-12-01 17:39:43 +00:00
Potuz
3e61763bd7 fix operation precedence (#9965) 2021-12-01 17:14:08 +00:00
terence tsao
23bdce2354 Fix grpc client connected... logging (#9956)
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2021-12-01 14:45:39 +00:00
Nishant Das
d94bf32dcf Faster Doppelganger Check (#9964)
* faster check

* potuz's review

* potuz's review
2021-12-01 12:37:10 +00:00
Nishant Das
7cbef104b0 Remove Balances Timeout (#9957)
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2021-12-01 04:03:26 +00:00
Potuz
cd6d0d9cf1 Monitor aggregated logs (#9943) 2021-12-01 03:35:55 +00:00
Potuz
afbe02697d Monitor service (#9933)
* Add a service for the monitor

* Do not block service start

* gaz

* move channel subscription outide go routine

* add service start test

* fix panic on node tests

* Radek's first pass

* Radek's take 2

* uncap error messages

* revert reversal

* Terence take 1

* gaz

* Missing locks found by Terence

* Track via bool not empty interface

* Add tests for every function

* fix allocation of slice

* Minor cleanups

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2021-11-30 19:27:03 -03:00
terence tsao
2c921ec628 Update spec tests to v1.1.6 (#9955)
* Update spec test to v1.1.6

* Update spec test to v1.1.6
2021-11-30 21:21:59 +00:00
terence tsao
0e72938914 Uncap error messages (#9952) 2021-11-30 07:41:07 -08:00
Potuz
71d55d1cff Check for syncstatus before performing a voluntary exit (#9951)
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2021-11-30 10:40:59 +00:00
terence tsao
d8aa0f8827 Alter config filed name to devnet if it's not populated in file (#9949) 2021-11-29 19:27:26 -08:00
Nishant Das
37bc407b56 Refactor States To Allow for Single Cached Hasher (#9922)
* initial changes

* gaz

* unexport and add in godoc

* nocache

* fix edge case

* fix bad implementation

* fix build file

* add it in

* terence's review

* gaz

* fix build

* Apply suggestions from code review

remove assigned ctx

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2021-11-29 16:30:17 +00:00
Potuz
5983d0a397 Allow requests for next sync committee (#9945)
* Allow requests for next sync committee

* fix deepsource and variable rename

* Minor cleanup

* Potuz's comments

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2021-11-28 17:34:24 +00:00
terence tsao
85faecf2ca Add test utility merge state (#9944)
* Add test utility merge state

* gaz

* gaz
2021-11-26 15:53:25 +00:00
terence tsao
f42227aa04 Rest of the merge state implementation (#9939)
* Add rest of the state implementations

* Update BUILD.bazel

* Update state_trie_test.go

* fix test

* fix test

* Update beacon-chain/state/v3/state_trie.go

Co-authored-by: Potuz <potuz@prysmaticlabs.com>

* Update beacon-chain/state/v3/state_trie.go

Co-authored-by: Potuz <potuz@prysmaticlabs.com>

* add ctx

* go fmt

Co-authored-by: Potuz <potuz@prysmaticlabs.com>
2021-11-25 17:41:05 -03:00
terence tsao
c9d5b4ba0e Add merge beacon block wrappers (#9906) 2021-11-24 14:26:17 -08:00
Potuz
fed004686b Add verbosity to aggregation logs (#9937) 2021-11-24 11:35:45 -08:00
Raul Jordan
4ae7513835 Import Keystores Standard API Implementation (#9924)
* begin

* rem deleted code

* delete keystores all tests

* surface errors to user

* add in changes

* del

* tests

* slice

* begin import process

* add import keystores logic

* unit tests for import

* tests for all import keystores keymanager issues

* change proto

* pbs

* renaming works

* use proper request

* pb

* comment

* gaz

* fix up cli cmd

* test

* add gw

* precond

* tests

* radek comments
2021-11-24 10:40:49 -05:00
Nishant Das
1d53fd2fd3 revert change (#9931) 2021-11-24 07:09:15 -08:00
Potuz
a2c1185032 Monitor sync committee (#9923)
* Add sync committeee contributions to monitor

* gaz

* Raul's review

* Added lock around TrackedValidators

* add comment to trackedIndex

* add missing locks because of trackedIndex

* Terence fixes 2

* moved TrackedValidator to service from config

* Terence comment fix

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2021-11-24 09:56:34 +08:00
terence tsao
448d62d6e3 Add merge beacon chain objects and generate ssz.go (#9929) 2021-11-23 23:34:31 +00:00
terence tsao
4858de7875 Use prysmaticlabs/fastssz (#9928)
* Use prysmaticlabs/fastssz

* Generated code
2021-11-23 21:28:24 +00:00
terence tsao
cd1e3f2b3e Rename coinbase to fee recipient (#9918)
* Rename coinbase to fee recipient

* Fix imports

* Update field name

* Fee receipient

* Fix goimports

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2021-11-23 17:49:06 +00:00
Nishant Das
6f20d17d15 Rename To Signature Batch (#9926)
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2021-11-23 17:57:06 +01:00
terence tsao
94fd99f5cd Add getters and setters for beacon state v3 (part 2) (#9916) 2021-11-22 15:56:23 -08:00
Potuz
d4a420ddfd Monitor blocks (#9910)
* Add proposer logging to validator monitor

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2021-11-22 18:20:31 -03:00
terence tsao
838b19e985 Add getters and setters for beacon state v3 (part 1) (#9915) 2021-11-22 09:37:55 -08:00
Potuz
788338a004 Stop packing deposits early if we reach max allowed (#9806)
* Stop packing deposits early if we reach max allowed

* Add logs to proposals without deposits

* Update beacon-chain/rpc/prysm/v1alpha1/validator/proposer_deposits.go

* Update beacon-chain/rpc/prysm/v1alpha1/validator/proposer_deposits.go

* Update beacon-chain/rpc/prysm/v1alpha1/validator/proposer_deposits.go

* reinsert debug log

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
Co-authored-by: Nishant Das <nishdas93@gmail.com>
2021-11-20 14:14:07 +00:00
kasey
39c33b82ad Switch to lazy state balance cache (#9822)
* quick lazy balance cache proof of concept

* WIP refactoring to use lazy cache

* updating tests to use functional opts

* updating the rest of the tests, all passing

* use mock stategen where possible

reduces the number of test cases that require db setup

* rename test opt method for clear link

* Update beacon-chain/blockchain/process_block.go

Co-authored-by: terence tsao <terence@prysmaticlabs.com>

* test assumption that zerohash is in db

* remove unused MockDB (mocking stategen instead)

* fix cache bug, switch to sync.Mutex

* improve test coverage for the state cache

* uncomment failing genesis test for discussion

* gofmt

* remove unused Service struct member

* cleanup unused func input

* combining type declaration in signature

* don't export the state cache constructor

* work around blockchain deps w/ new file

service_test brings in a ton of dependencies that make bazel rules
for blockchain complex, so just sticking these mocks in their own
file simplifies things.

* gofmt

* remove intentionally failing test

this test established that the zero root can't be used to look up the
state, resulting in a change in another PR to update stategen to use the
GenesisState db method instead when the zero root is detected.

* fixed error introduced by develop refresh

* fix import ordering

* appease deepsource

* remove unused function

* godoc comments on new requires/assert

* defensive constructor per terence's PR comment

* more differentiated balance cache metric names

Co-authored-by: kasey <kasey@users.noreply.github.com>
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2021-11-19 15:59:26 +00:00
Potuz
905e0f4c1c Monitor metrics (#9921)
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2021-11-19 15:34:28 +00:00
Nishant Das
0ade1f121d Add Balance Field Trie (#9793)
* save stuff

* fix in v1

* clean up more

* fix bugs

* add comments and clean up

* add flag + test

* add tests

* fmt

* radek's review

* gaz

* kasey's review

* gaz and new conditional

* improve naming
2021-11-19 20:01:15 +08:00
Raul Jordan
ee52f8dff3 Implement Validator Standard Key Manager API Delete Keystores (#9886)
* begin

* implement delete and filter export history

* rem deleted code

* delete keystores all tests

* gaz

* test

* double import fix

* test

* surface errors to user

* add in changes

* edit proto

* edit

* del

* tests

* gaz

* slice

* duplicate key found in request
2021-11-19 04:11:54 +00:00
Potuz
50159c2e48 Monitor attestations (#9901)
Log attestation performance on the validator monitor
2021-11-18 22:14:56 -03:00
Preston Van Loon
cae58bbbd8 Unskip v2 end to end check for prior release (#9920) 2021-11-18 23:17:19 +00:00
Raul Jordan
9b37418761 Warn Users In Case Slashing Protection Exports are Empty (#9919)
* export text

* Update cmd/validator/slashing-protection/export.go

Co-authored-by: Potuz <potuz@prysmaticlabs.com>

Co-authored-by: Potuz <potuz@prysmaticlabs.com>
2021-11-18 20:49:19 +00:00
Raul Jordan
a78cdf86cc Warn Users if Slashing Protection History is Empty (#9909)
* warning in case not found

* better comment

* fatal
2021-11-17 15:39:43 +00:00
Nishant Das
1c4ea75a18 Prevent Reprocessing of a Block From Our Pending Queue (#9904)
* fix bugs

* test

* raul's review
2021-11-17 10:05:50 -05:00
terence tsao
6f4c80531c Add field roots for beacon state v3 (#9914)
* Add field roots for beacon state

* Update BUILD.bazel

* Adding an exception for state v3

* fix deadcode

Co-authored-by: nisdas <nishdas93@gmail.com>
2021-11-17 09:04:49 +00:00
terence tsao
e3246922eb Add merge state type definitions (#9908) 2021-11-16 08:36:13 -08:00
Nishant Das
5962363847 Add in Deposit Map To Cache (#9885)
* add in map + tests

* tes

* raul's review

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2021-11-16 02:07:42 +00:00
Potuz
5e8cf9cd28 Monitor exits (#9899)
* Validator monitor process slashings

* Preston's comment

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* add test for tracked index

* Prestons requested changes

* Process voluntary exits in validator monitor

* Address Reviewers' comments

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2021-11-15 19:45:18 +00:00
Raul Jordan
b5ca09bce6 Add Network Flags to Slashing Protection Export Command (#9907) 2021-11-15 19:21:54 +00:00
terence tsao
720ee3f2a4 Add merge state protobuf (#9888)
* Add beacon state protobuf

* Update proto/prysm/v1alpha1/beacon_state.proto

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update proto/prysm/v1alpha1/beacon_state.proto

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Regenerate pbs

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2021-11-15 18:53:43 +00:00
Potuz
3d139d35f6 Validator monitor process slashings (#9898)
* Validator monitor process slashings

* Preston's comment

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* add test for tracked index

* Prestons requested changes

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2021-11-15 17:39:55 +00:00
terence tsao
ea38969af2 Register start-up state when interop mode (#9900)
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2021-11-15 16:32:54 +00:00
terence tsao
f753ce81cc Add merge block protobuf (#9887)
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2021-11-15 15:42:18 +00:00
Nishant Das
9d678b0c47 Clean Up Methods In Prysm (#9903)
* clean up

* go simple
2021-11-15 10:13:52 -05:00
terence tsao
1410ee7e7d Update testnet_e2e_config.go 2021-09-30 11:32:54 -07:00
304 changed files with 15423 additions and 3765 deletions

View File

@@ -225,7 +225,7 @@ filegroup(
url = "https://github.com/eth2-clients/slashing-protection-interchange-tests/archive/b8413ca42dc92308019d0d4db52c87e9e125c4e9.tar.gz",
)
consensus_spec_version = "v1.1.5"
consensus_spec_version = "v1.1.6"
bls_test_version = "v0.1.1"
@@ -241,7 +241,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "a7d7173d953494c0dfde432c9fc064c25d46d666b024749b3474ae0cdfc50050",
sha256 = "58dbf798e86017b5561af38f2217b99e9fa5b6be0e928b4c73dad6040bb94d65",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
)
@@ -257,7 +257,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "f86872061588c0197516b23025d39e9365b4716c112218a618739dc0d6f4666a",
sha256 = "5be19f7fca9733686ca25dad5ae306327e98830ef6354549d1ddfc56c10e0e9a",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
)
@@ -273,7 +273,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "7a06975360fd37fbb4694d0e06abb78d2a0835146c1d9b26d33569edff8b98f0",
sha256 = "cc110528fcf7ede049e6a05788c77f4a865c3110b49508149d61bb2a992bb896",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
)
@@ -288,7 +288,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "87d8089200163340484d61212fbdffbb5d9d03e1244622761dcb91e641a65761",
sha256 = "c318d7b909ab39db9cc861f645ddd364e7475a4a3425bb702ab407fad3807acd",
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
)

View File

@@ -18,6 +18,7 @@ go_library(
"receive_attestation.go",
"receive_block.go",
"service.go",
"state_balance_cache.go",
"weak_subjectivity_checks.go",
],
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/blockchain",
@@ -95,6 +96,7 @@ go_test(
"init_test.go",
"log_test.go",
"metrics_test.go",
"mock_test.go",
"process_attestation_test.go",
"process_block_test.go",
"receive_attestation_test.go",
@@ -141,6 +143,7 @@ go_test(
"chain_info_norace_test.go",
"checktags_test.go",
"init_test.go",
"mock_test.go",
"receive_block_test.go",
"service_norace_test.go",
],

View File

@@ -10,7 +10,6 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/config/features"
@@ -42,9 +41,6 @@ func (s *Service) updateHead(ctx context.Context, balances []uint64) error {
// ensure head gets its best justified info.
if s.bestJustifiedCheckpt.Epoch > s.justifiedCheckpt.Epoch {
s.justifiedCheckpt = s.bestJustifiedCheckpt
if err := s.cacheJustifiedStateBalances(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root)); err != nil {
return err
}
}
// Get head from the fork choice service.
@@ -273,57 +269,6 @@ func (s *Service) hasHeadState() bool {
return s.head != nil && s.head.state != nil
}
// This caches justified state balances to be used for fork choice.
func (s *Service) cacheJustifiedStateBalances(ctx context.Context, justifiedRoot [32]byte) error {
if err := s.cfg.BeaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
return err
}
s.clearInitSyncBlocks()
var justifiedState state.BeaconState
var err error
if justifiedRoot == s.genesisRoot {
justifiedState, err = s.cfg.BeaconDB.GenesisState(ctx)
if err != nil {
return err
}
} else {
justifiedState, err = s.cfg.StateGen.StateByRoot(ctx, justifiedRoot)
if err != nil {
return err
}
}
if justifiedState == nil || justifiedState.IsNil() {
return errors.New("justified state can't be nil")
}
epoch := time.CurrentEpoch(justifiedState)
justifiedBalances := make([]uint64, justifiedState.NumValidators())
if err := justifiedState.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
if helpers.IsActiveValidatorUsingTrie(val, epoch) {
justifiedBalances[idx] = val.EffectiveBalance()
} else {
justifiedBalances[idx] = 0
}
return nil
}); err != nil {
return err
}
s.justifiedBalancesLock.Lock()
defer s.justifiedBalancesLock.Unlock()
s.justifiedBalances = justifiedBalances
return nil
}
func (s *Service) getJustifiedBalances() []uint64 {
s.justifiedBalancesLock.RLock()
defer s.justifiedBalancesLock.RUnlock()
return s.justifiedBalances
}
// Notifies a common event feed of a new chain head event. Called right after a new
// chain head is determined, set, and saved to disk.
func (s *Service) notifyNewHeadEvent(

View File

@@ -123,13 +123,15 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
func TestCacheJustifiedStateBalances_CanCache(t *testing.T) {
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
ctx := context.Background()
state, _ := util.DeterministicGenesisState(t, 100)
r := [32]byte{'a'}
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: r[:]}))
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), state, r))
require.NoError(t, service.cacheJustifiedStateBalances(context.Background(), r))
require.DeepEqual(t, service.getJustifiedBalances(), state.Balances(), "Incorrect justified balances")
balances, err := service.justifiedBalances.get(ctx, r)
require.NoError(t, err)
require.DeepEqual(t, balances, state.Balances(), "Incorrect justified balances")
}
func TestUpdateHead_MissingJustifiedRoot(t *testing.T) {

View File

@@ -25,18 +25,18 @@ func TestService_TreeHandler(t *testing.T) {
headState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, headState.SetBalances([]uint64{params.BeaconConfig().GweiPerEth}))
cfg := &config{
BeaconDB: beaconDB,
ForkChoiceStore: protoarray.New(
0, // justifiedEpoch
0, // finalizedEpoch
[32]byte{'a'},
),
StateGen: stategen.New(beaconDB),
fcs := protoarray.New(
0, // justifiedEpoch
0, // finalizedEpoch
[32]byte{'a'},
)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
}
s, err := NewService(ctx)
s, err := NewService(ctx, opts...)
require.NoError(t, err)
s.cfg = cfg
require.NoError(t, s.cfg.ForkChoiceStore.ProcessBlock(ctx, 0, [32]byte{'a'}, [32]byte{'g'}, [32]byte{'c'}, 0, 0))
require.NoError(t, s.cfg.ForkChoiceStore.ProcessBlock(ctx, 1, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'c'}, 0, 0))
s.setHead([32]byte{'a'}, wrapper.WrappedPhase0SignedBeaconBlock(util.NewBeaconBlock()), headState)

View File

@@ -130,6 +130,14 @@ var (
Name: "sync_head_state_hit",
Help: "The number of sync head state requests that are present in the cache.",
})
stateBalanceCacheHit = promauto.NewCounter(prometheus.CounterOpts{
Name: "state_balance_cache_hit",
Help: "Count the number of state balance cache hits.",
})
stateBalanceCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
Name: "state_balance_cache_miss",
Help: "Count the number of state balance cache hits.",
})
)
// reportSlotMetrics reports slot related metrics.

View File

@@ -0,0 +1,50 @@
package blockchain
import (
"context"
"errors"
"testing"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
)
func testServiceOptsWithDB(t *testing.T) []Option {
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0, [32]byte{'a'})
return []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
}
}
// warning: only use these opts when you are certain there are no db calls
// in your code path. this is a lightweight way to satisfy the stategen/beacondb
// initialization requirements w/o the overhead of db init.
func testServiceOptsNoDB() []Option {
return []Option{
withStateBalanceCache(satisfactoryStateBalanceCache()),
}
}
type mockStateByRooter struct {
state state.BeaconState
err error
}
var _ stateByRooter = &mockStateByRooter{}
func (m mockStateByRooter) StateByRoot(_ context.Context, _ [32]byte) (state.BeaconState, error) {
return m.state, m.err
}
// returns an instance of the state balance cache that can be used
// to satisfy the requirement for one in NewService, but which will
// always return an error if used.
func satisfactoryStateBalanceCache() *stateBalanceCache {
err := errors.New("satisfactoryStateBalanceCache doesn't perform real caching")
return &stateBalanceCache{stateGen: mockStateByRooter{err: err}}
}

View File

@@ -130,6 +130,13 @@ func WithSlasherAttestationsFeed(f *event.Feed) Option {
}
}
func withStateBalanceCache(c *stateBalanceCache) Option {
return func(s *Service) error {
s.justifiedBalances = c
return nil
}
}
// WithFinalizedStateAtStartUp to store finalized state at start up.
func WithFinalizedStateAtStartUp(st state.BeaconState) Option {
return func(s *Service) error {

View File

@@ -24,14 +24,13 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
cfg := &config{
BeaconDB: beaconDB,
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
StateGen: stategen.New(beaconDB),
opts := []Option{
WithDatabase(beaconDB),
WithForkChoiceStore(protoarray.New(0, 0, [32]byte{})),
WithStateGen(stategen.New(beaconDB)),
}
service, err := NewService(ctx)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg = cfg
_, err = blockTree1(t, beaconDB, []byte{'g'})
require.NoError(t, err)
@@ -131,14 +130,14 @@ func TestStore_OnAttestation_Ok(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
cfg := &config{
BeaconDB: beaconDB,
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
StateGen: stategen.New(beaconDB),
fcs := protoarray.New(0, 0, [32]byte{'a'})
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
}
service, err := NewService(ctx)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg = cfg
genesisState, pks := util.DeterministicGenesisState(t, 64)
require.NoError(t, genesisState.SetGenesisTime(uint64(time.Now().Unix())-params.BeaconConfig().SecondsPerSlot))
require.NoError(t, service.saveGenesisData(ctx, genesisState))
@@ -157,13 +156,12 @@ func TestStore_SaveCheckpointState(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
cfg := &config{
BeaconDB: beaconDB,
StateGen: stategen.New(beaconDB),
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
}
service, err := NewService(ctx)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg = cfg
s, err := util.NewBeaconState()
require.NoError(t, err)
@@ -230,13 +228,12 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
cfg := &config{
BeaconDB: beaconDB,
StateGen: stategen.New(beaconDB),
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
}
service, err := NewService(ctx)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg = cfg
epoch := types.Epoch(1)
baseState, _ := util.DeterministicGenesisState(t, 1)
@@ -268,12 +265,10 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
func TestAttEpoch_MatchPrevEpoch(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
cfg := &config{BeaconDB: beaconDB}
service, err := NewService(ctx)
opts := testServiceOptsNoDB()
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg = cfg
nowTime := uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().SecondsPerSlot
require.NoError(t, service.verifyAttTargetEpoch(ctx, 0, nowTime, &ethpb.Checkpoint{Root: make([]byte, 32)}))
@@ -281,12 +276,10 @@ func TestAttEpoch_MatchPrevEpoch(t *testing.T) {
func TestAttEpoch_MatchCurrentEpoch(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
cfg := &config{BeaconDB: beaconDB}
service, err := NewService(ctx)
opts := testServiceOptsNoDB()
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg = cfg
nowTime := uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().SecondsPerSlot
require.NoError(t, service.verifyAttTargetEpoch(ctx, 0, nowTime, &ethpb.Checkpoint{Epoch: 1}))
@@ -294,12 +287,10 @@ func TestAttEpoch_MatchCurrentEpoch(t *testing.T) {
func TestAttEpoch_NotMatch(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
cfg := &config{BeaconDB: beaconDB}
service, err := NewService(ctx)
opts := testServiceOptsNoDB()
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg = cfg
nowTime := 2 * uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().SecondsPerSlot
err = service.verifyAttTargetEpoch(ctx, 0, nowTime, &ethpb.Checkpoint{Root: make([]byte, 32)})
@@ -308,12 +299,9 @@ func TestAttEpoch_NotMatch(t *testing.T) {
func TestVerifyBeaconBlock_NoBlock(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
cfg := &config{BeaconDB: beaconDB}
service, err := NewService(ctx)
opts := testServiceOptsWithDB(t)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg = cfg
d := util.HydrateAttestationData(&ethpb.AttestationData{})
assert.ErrorContains(t, "signed beacon block can't be nil", service.verifyBeaconBlock(ctx, d))
@@ -321,12 +309,10 @@ func TestVerifyBeaconBlock_NoBlock(t *testing.T) {
func TestVerifyBeaconBlock_futureBlock(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
cfg := &config{BeaconDB: beaconDB}
service, err := NewService(ctx)
opts := testServiceOptsWithDB(t)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg = cfg
b := util.NewBeaconBlock()
b.Block.Slot = 2
@@ -340,12 +326,10 @@ func TestVerifyBeaconBlock_futureBlock(t *testing.T) {
func TestVerifyBeaconBlock_OK(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
cfg := &config{BeaconDB: beaconDB}
service, err := NewService(ctx)
opts := testServiceOptsWithDB(t)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg = cfg
b := util.NewBeaconBlock()
b.Block.Slot = 2
@@ -361,10 +345,14 @@ func TestVerifyFinalizedConsistency_InconsistentRoot(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
cfg := &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
service, err := NewService(ctx)
fcs := protoarray.New(0, 0, [32]byte{'a'})
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg = cfg
b32 := util.NewBeaconBlock()
b32.Block.Slot = 32
@@ -387,12 +375,10 @@ func TestVerifyFinalizedConsistency_InconsistentRoot(t *testing.T) {
func TestVerifyFinalizedConsistency_OK(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
cfg := &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
service, err := NewService(ctx)
opts := testServiceOptsWithDB(t)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg = cfg
b32 := util.NewBeaconBlock()
b32.Block.Slot = 32
@@ -415,12 +401,10 @@ func TestVerifyFinalizedConsistency_OK(t *testing.T) {
func TestVerifyFinalizedConsistency_IsCanonical(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
cfg := &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
service, err := NewService(ctx)
opts := testServiceOptsWithDB(t)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg = cfg
b32 := util.NewBeaconBlock()
b32.Block.Slot = 32

View File

@@ -151,7 +151,12 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
s.finalizedCheckpt = postState.FinalizedCheckpoint()
}
if err := s.updateHead(ctx, s.getJustifiedBalances()); err != nil {
balances, err := s.justifiedBalances.get(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root))
if err != nil {
msg := fmt.Sprintf("could not read balances for state w/ justified checkpoint %#x", s.justifiedCheckpt.Root)
return errors.Wrap(err, msg)
}
if err := s.updateHead(ctx, balances); err != nil {
log.WithError(err).Warn("Could not update head")
}
@@ -255,12 +260,12 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []block.SignedBeaconBlo
jCheckpoints := make([]*ethpb.Checkpoint, len(blks))
fCheckpoints := make([]*ethpb.Checkpoint, len(blks))
sigSet := &bls.SignatureSet{
sigSet := &bls.SignatureBatch{
Signatures: [][]byte{},
PublicKeys: []bls.PublicKey{},
Messages: [][32]byte{},
}
var set *bls.SignatureSet
var set *bls.SignatureBatch
boundaries := make(map[[32]byte]state.BeaconState)
for i, b := range blks {
set, preState, err = transition.ExecuteStateTransitionNoVerifyAnySig(ctx, preState, b)

View File

@@ -193,9 +193,6 @@ func (s *Service) updateJustified(ctx context.Context, state state.ReadOnlyBeaco
if canUpdate {
s.prevJustifiedCheckpt = s.justifiedCheckpt
s.justifiedCheckpt = cpt
if err := s.cacheJustifiedStateBalances(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root)); err != nil {
return err
}
}
return nil
@@ -206,12 +203,13 @@ func (s *Service) updateJustified(ctx context.Context, state state.ReadOnlyBeaco
// This method does not have defense against fork choice bouncing attack, which is why it's only recommend to be used during initial syncing.
func (s *Service) updateJustifiedInitSync(ctx context.Context, cp *ethpb.Checkpoint) error {
s.prevJustifiedCheckpt = s.justifiedCheckpt
s.justifiedCheckpt = cp
if err := s.cacheJustifiedStateBalances(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root)); err != nil {
if err := s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, cp); err != nil {
return err
}
s.justifiedCheckpt = cp
return s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, cp)
return nil
}
func (s *Service) updateFinalized(ctx context.Context, cp *ethpb.Checkpoint) error {
@@ -330,7 +328,9 @@ func (s *Service) finalizedImpliesNewJustified(ctx context.Context, state state.
if !attestation.CheckPointIsEqual(s.justifiedCheckpt, state.CurrentJustifiedCheckpoint()) {
if state.CurrentJustifiedCheckpoint().Epoch > s.justifiedCheckpt.Epoch {
s.justifiedCheckpt = state.CurrentJustifiedCheckpoint()
return s.cacheJustifiedStateBalances(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root))
// we don't need to check if the previous justified checkpoint was an ancestor since the new
// finalized checkpoint is overriding it.
return nil
}
// Update justified if store justified is not in chain with finalized check point.
@@ -345,9 +345,6 @@ func (s *Service) finalizedImpliesNewJustified(ctx context.Context, state state.
}
if !bytes.Equal(anc, s.finalizedCheckpt.Root) {
s.justifiedCheckpt = state.CurrentJustifiedCheckpoint()
if err := s.cacheJustifiedStateBalances(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root)); err != nil {
return err
}
}
}
return nil

View File

@@ -35,16 +35,17 @@ import (
func TestStore_OnBlock(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
cfg := &config{
BeaconDB: beaconDB,
StateGen: stategen.New(beaconDB),
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0, [32]byte{'a'})
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
}
service, err := NewService(ctx)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg = cfg
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
assert.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
@@ -134,13 +135,12 @@ func TestStore_OnBlockBatch(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
cfg := &config{
BeaconDB: beaconDB,
StateGen: stategen.New(beaconDB),
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
}
service, err := NewService(ctx)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg = cfg
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
@@ -186,14 +186,12 @@ func TestStore_OnBlockBatch(t *testing.T) {
func TestRemoveStateSinceLastFinalized_EmptyStartSlot(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
params.UseMinimalConfig()
defer params.UseMainnetConfig()
cfg := &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
service, err := NewService(ctx)
opts := testServiceOptsWithDB(t)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg = cfg
service.genesisTime = time.Now()
update, err := service.shouldUpdateCurrentJustified(ctx, &ethpb.Checkpoint{Root: make([]byte, 32)})
@@ -221,16 +219,13 @@ func TestRemoveStateSinceLastFinalized_EmptyStartSlot(t *testing.T) {
func TestShouldUpdateJustified_ReturnFalse(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
params.UseMinimalConfig()
defer params.UseMainnetConfig()
cfg := &config{BeaconDB: beaconDB}
service, err := NewService(ctx)
opts := testServiceOptsWithDB(t)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg = cfg
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{})
lastJustifiedBlk := util.NewBeaconBlock()
lastJustifiedBlk.Block.ParentRoot = bytesutil.PadTo([]byte{'G'}, 32)
lastJustifiedRoot, err := lastJustifiedBlk.Block.HashTreeRoot()
@@ -255,13 +250,12 @@ func TestCachedPreState_CanGetFromStateSummary(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
cfg := &config{
BeaconDB: beaconDB,
StateGen: stategen.New(beaconDB),
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
}
service, err := NewService(ctx)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg = cfg
s, err := v1.InitializeFromProto(&ethpb.BeaconState{Slot: 1, GenesisValidatorsRoot: params.BeaconConfig().ZeroHash[:]})
require.NoError(t, err)
@@ -289,13 +283,12 @@ func TestCachedPreState_CanGetFromDB(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
cfg := &config{
BeaconDB: beaconDB,
StateGen: stategen.New(beaconDB),
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
}
service, err := NewService(ctx)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg = cfg
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
@@ -327,10 +320,13 @@ func TestUpdateJustified_CouldUpdateBest(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
cfg := &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB), ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
service, err := NewService(ctx)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(protoarray.New(0, 0, [32]byte{})),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg = cfg
signedBlock := util.NewBeaconBlock()
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(signedBlock)))
@@ -361,10 +357,12 @@ func TestFillForkChoiceMissingBlocks_CanSave(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
cfg := &config{BeaconDB: beaconDB}
service, err := NewService(ctx)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg = cfg
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
service.finalizedCheckpt = &ethpb.Checkpoint{Root: make([]byte, 32)}
@@ -400,10 +398,12 @@ func TestFillForkChoiceMissingBlocks_RootsMatch(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
cfg := &config{BeaconDB: beaconDB}
service, err := NewService(ctx)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg = cfg
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
service.finalizedCheckpt = &ethpb.Checkpoint{Root: make([]byte, 32)}
@@ -442,10 +442,12 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
cfg := &config{BeaconDB: beaconDB}
service, err := NewService(ctx)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg = cfg
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
// Set finalized epoch to 1.
service.finalizedCheckpt = &ethpb.Checkpoint{Epoch: 1}
@@ -586,7 +588,8 @@ func TestCurrentSlot_HandlesOverflow(t *testing.T) {
}
func TestAncestorByDB_CtxErr(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
service, err := NewService(ctx)
opts := testServiceOptsWithDB(t)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
cancel()
@@ -598,10 +601,14 @@ func TestAncestor_HandleSkipSlot(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
cfg := &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
service, err := NewService(ctx)
fcs := protoarray.New(0, 0, [32]byte{'a'})
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg = cfg
b1 := util.NewBeaconBlock()
b1.Block.Slot = 1
@@ -642,10 +649,9 @@ func TestAncestor_HandleSkipSlot(t *testing.T) {
func TestAncestor_CanUseForkchoice(t *testing.T) {
ctx := context.Background()
cfg := &config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
service, err := NewService(ctx)
opts := testServiceOptsWithDB(t)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg = cfg
b1 := util.NewBeaconBlock()
b1.Block.Slot = 1
@@ -682,10 +688,14 @@ func TestAncestor_CanUseDB(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
cfg := &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
service, err := NewService(ctx)
fcs := protoarray.New(0, 0, [32]byte{'a'})
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg = cfg
b1 := util.NewBeaconBlock()
b1.Block.Slot = 1
@@ -720,10 +730,9 @@ func TestAncestor_CanUseDB(t *testing.T) {
func TestEnsureRootNotZeroHashes(t *testing.T) {
ctx := context.Background()
cfg := &config{}
service, err := NewService(ctx)
opts := testServiceOptsNoDB()
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg = cfg
service.genesisRoot = [32]byte{'a'}
r := service.ensureRootNotZeros(params.BeaconConfig().ZeroHash)
@@ -735,6 +744,12 @@ func TestEnsureRootNotZeroHashes(t *testing.T) {
func TestFinalizedImpliesNewJustified(t *testing.T) {
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0, [32]byte{'a'})
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
}
ctx := context.Background()
type args struct {
cachedCheckPoint *ethpb.Checkpoint
@@ -776,9 +791,8 @@ func TestFinalizedImpliesNewJustified(t *testing.T) {
beaconState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(test.args.stateCheckPoint))
service, err := NewService(ctx)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg = &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB), ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
service.justifiedCheckpt = test.args.cachedCheckPoint
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Root: bytesutil.PadTo(test.want.Root, 32)}))
genesisState, err := util.NewBeaconState()
@@ -815,6 +829,12 @@ func TestVerifyBlkDescendant(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
fcs := protoarray.New(0, 0, [32]byte{'a'})
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
}
b := util.NewBeaconBlock()
b.Block.Slot = 1
r, err := b.Block.HashTreeRoot()
@@ -869,9 +889,8 @@ func TestVerifyBlkDescendant(t *testing.T) {
},
}
for _, tt := range tests {
service, err := NewService(ctx)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg = &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB), ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
service.finalizedCheckpt = &ethpb.Checkpoint{
Root: tt.args.finalizedRoot[:],
}
@@ -885,12 +904,10 @@ func TestVerifyBlkDescendant(t *testing.T) {
}
func TestUpdateJustifiedInitSync(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
cfg := &config{BeaconDB: beaconDB}
service, err := NewService(ctx)
opts := testServiceOptsWithDB(t)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg = cfg
gBlk := util.NewBeaconBlock()
gRoot, err := gBlk.Block.HashTreeRoot()
@@ -916,10 +933,9 @@ func TestUpdateJustifiedInitSync(t *testing.T) {
func TestHandleEpochBoundary_BadMetrics(t *testing.T) {
ctx := context.Background()
cfg := &config{}
service, err := NewService(ctx)
opts := testServiceOptsNoDB()
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg = cfg
s, err := util.NewBeaconState()
require.NoError(t, err)
@@ -931,10 +947,9 @@ func TestHandleEpochBoundary_BadMetrics(t *testing.T) {
func TestHandleEpochBoundary_UpdateFirstSlot(t *testing.T) {
ctx := context.Background()
cfg := &config{}
service, err := NewService(ctx)
opts := testServiceOptsNoDB()
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg = cfg
s, _ := util.DeterministicGenesisState(t, 1024)
service.head = &head{state: s}
@@ -946,18 +961,18 @@ func TestHandleEpochBoundary_UpdateFirstSlot(t *testing.T) {
func TestOnBlock_CanFinalize(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0, [32]byte{'a'})
depositCache, err := depositcache.New()
require.NoError(t, err)
cfg := &config{
BeaconDB: beaconDB,
StateGen: stategen.New(beaconDB),
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
DepositCache: depositCache,
StateNotifier: &mock.MockStateNotifier{},
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
WithDepositCache(depositCache),
WithStateNotifier(&mock.MockStateNotifier{}),
}
service, err := NewService(ctx)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg = cfg
gs, keys := util.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
@@ -991,18 +1006,12 @@ func TestOnBlock_CanFinalize(t *testing.T) {
func TestInsertFinalizedDeposits(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := testServiceOptsWithDB(t)
depositCache, err := depositcache.New()
require.NoError(t, err)
cfg := &config{
BeaconDB: beaconDB,
StateGen: stategen.New(beaconDB),
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
DepositCache: depositCache,
}
service, err := NewService(ctx)
opts = append(opts, WithDepositCache(depositCache))
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg = cfg
gs, _ := util.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))

View File

@@ -131,7 +131,13 @@ func (s *Service) processAttestationsRoutine(subscribedToStateEvents chan<- stru
continue
}
s.processAttestations(s.ctx)
if err := s.updateHead(s.ctx, s.getJustifiedBalances()); err != nil {
balances, err := s.justifiedBalances.get(s.ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root))
if err != nil {
log.Errorf("Unable to get justified balances for root %v w/ error %s", s.justifiedCheckpt.Root, err)
continue
}
if err := s.updateHead(s.ctx, balances); err != nil {
log.Warnf("Resolving fork due to new attestation: %v", err)
}
}

View File

@@ -9,9 +9,7 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
@@ -42,12 +40,10 @@ func TestAttestationCheckPtState_FarFutureSlot(t *testing.T) {
func TestVerifyLMDFFGConsistent_NotOK(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := testServiceOptsWithDB(t)
cfg := &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
service, err := NewService(ctx)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg = cfg
b32 := util.NewBeaconBlock()
b32.Block.Slot = 32
@@ -71,12 +67,10 @@ func TestVerifyLMDFFGConsistent_NotOK(t *testing.T) {
func TestVerifyLMDFFGConsistent_OK(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
cfg := &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
service, err := NewService(ctx)
opts := testServiceOptsWithDB(t)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg = cfg
b32 := util.NewBeaconBlock()
b32.Block.Slot = 32
@@ -101,18 +95,12 @@ func TestVerifyLMDFFGConsistent_OK(t *testing.T) {
func TestProcessAttestations_Ok(t *testing.T) {
hook := logTest.NewGlobal()
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := testServiceOptsWithDB(t)
opts = append(opts, WithAttestationPool(attestations.NewPool()))
cfg := &config{
BeaconDB: beaconDB,
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
StateGen: stategen.New(beaconDB),
AttPool: attestations.NewPool(),
}
service, err := NewService(ctx)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.genesisTime = prysmTime.Now().Add(-1 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
service.cfg = cfg
genesisState, pks := util.DeterministicGenesisState(t, 64)
require.NoError(t, genesisState.SetGenesisTime(uint64(prysmTime.Now().Unix())-params.BeaconConfig().SecondsPerSlot))
require.NoError(t, service.saveGenesisData(ctx, genesisState))

View File

@@ -124,21 +124,16 @@ func TestService_ReceiveBlock(t *testing.T) {
genesisBlockRoot := bytesutil.ToBytes32(nil)
require.NoError(t, beaconDB.SaveState(ctx, genesis, genesisBlockRoot))
cfg := &config{
BeaconDB: beaconDB,
ForkChoiceStore: protoarray.New(
0, // justifiedEpoch
0, // finalizedEpoch
genesisBlockRoot,
),
AttPool: attestations.NewPool(),
ExitPool: voluntaryexits.NewPool(),
StateNotifier: &blockchainTesting.MockStateNotifier{RecordEvents: true},
StateGen: stategen.New(beaconDB),
opts := []Option{
WithDatabase(beaconDB),
WithForkChoiceStore(protoarray.New(0, 0, genesisBlockRoot)),
WithAttestationPool(attestations.NewPool()),
WithExitPool(voluntaryexits.NewPool()),
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}),
WithStateGen(stategen.New(beaconDB)),
}
s, err := NewService(ctx)
s, err := NewService(ctx, opts...)
require.NoError(t, err)
s.cfg = cfg
require.NoError(t, s.saveGenesisData(ctx, genesis))
gBlk, err := s.cfg.BeaconDB.GenesisBlock(ctx)
require.NoError(t, err)
@@ -166,21 +161,17 @@ func TestService_ReceiveBlockUpdateHead(t *testing.T) {
beaconDB := testDB.SetupDB(t)
genesisBlockRoot := bytesutil.ToBytes32(nil)
require.NoError(t, beaconDB.SaveState(ctx, genesis, genesisBlockRoot))
cfg := &config{
BeaconDB: beaconDB,
ForkChoiceStore: protoarray.New(
0, // justifiedEpoch
0, // finalizedEpoch
genesisBlockRoot,
),
AttPool: attestations.NewPool(),
ExitPool: voluntaryexits.NewPool(),
StateNotifier: &blockchainTesting.MockStateNotifier{RecordEvents: true},
StateGen: stategen.New(beaconDB),
opts := []Option{
WithDatabase(beaconDB),
WithForkChoiceStore(protoarray.New(0, 0, genesisBlockRoot)),
WithAttestationPool(attestations.NewPool()),
WithExitPool(voluntaryexits.NewPool()),
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}),
WithStateGen(stategen.New(beaconDB)),
}
s, err := NewService(ctx)
s, err := NewService(ctx, opts...)
require.NoError(t, err)
s.cfg = cfg
require.NoError(t, s.saveGenesisData(ctx, genesis))
gBlk, err := s.cfg.BeaconDB.GenesisBlock(ctx)
require.NoError(t, err)
@@ -250,19 +241,14 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
beaconDB := testDB.SetupDB(t)
genesisBlockRoot, err := genesis.HashTreeRoot(ctx)
require.NoError(t, err)
cfg := &config{
BeaconDB: beaconDB,
ForkChoiceStore: protoarray.New(
0, // justifiedEpoch
0, // finalizedEpoch
genesisBlockRoot,
),
StateNotifier: &blockchainTesting.MockStateNotifier{RecordEvents: true},
StateGen: stategen.New(beaconDB),
opts := []Option{
WithDatabase(beaconDB),
WithForkChoiceStore(protoarray.New(0, 0, genesisBlockRoot)),
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}),
WithStateGen(stategen.New(beaconDB)),
}
s, err := NewService(ctx)
s, err := NewService(ctx, opts...)
require.NoError(t, err)
s.cfg = cfg
err = s.saveGenesisData(ctx, genesis)
require.NoError(t, err)
gBlk, err := s.cfg.BeaconDB.GenesisBlock(ctx)
@@ -287,9 +273,10 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
}
func TestService_HasInitSyncBlock(t *testing.T) {
s, err := NewService(context.Background())
opts := testServiceOptsNoDB()
opts = append(opts, WithStateNotifier(&blockchainTesting.MockStateNotifier{}))
s, err := NewService(context.Background(), opts...)
require.NoError(t, err)
s.cfg = &config{StateNotifier: &blockchainTesting.MockStateNotifier{}}
r := [32]byte{'a'}
if s.HasInitSyncBlock(r) {
t.Error("Should not have block")
@@ -301,11 +288,10 @@ func TestService_HasInitSyncBlock(t *testing.T) {
}
func TestCheckSaveHotStateDB_Enabling(t *testing.T) {
beaconDB := testDB.SetupDB(t)
opts := testServiceOptsWithDB(t)
hook := logTest.NewGlobal()
s, err := NewService(context.Background())
s, err := NewService(context.Background(), opts...)
require.NoError(t, err)
s.cfg = &config{StateGen: stategen.New(beaconDB)}
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
s.finalizedCheckpt = &ethpb.Checkpoint{}
@@ -315,11 +301,10 @@ func TestCheckSaveHotStateDB_Enabling(t *testing.T) {
}
func TestCheckSaveHotStateDB_Disabling(t *testing.T) {
beaconDB := testDB.SetupDB(t)
hook := logTest.NewGlobal()
s, err := NewService(context.Background())
opts := testServiceOptsWithDB(t)
s, err := NewService(context.Background(), opts...)
require.NoError(t, err)
s.cfg = &config{StateGen: stategen.New(beaconDB)}
s.finalizedCheckpt = &ethpb.Checkpoint{}
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
s.genesisTime = time.Now()
@@ -329,11 +314,10 @@ func TestCheckSaveHotStateDB_Disabling(t *testing.T) {
}
func TestCheckSaveHotStateDB_Overflow(t *testing.T) {
beaconDB := testDB.SetupDB(t)
hook := logTest.NewGlobal()
s, err := NewService(context.Background())
opts := testServiceOptsWithDB(t)
s, err := NewService(context.Background(), opts...)
require.NoError(t, err)
s.cfg = &config{StateGen: stategen.New(beaconDB)}
s.finalizedCheckpt = &ethpb.Checkpoint{Epoch: 10000000}
s.genesisTime = time.Now()

View File

@@ -62,9 +62,9 @@ type Service struct {
checkpointStateCache *cache.CheckpointStateCache
initSyncBlocks map[[32]byte]block.SignedBeaconBlock
initSyncBlocksLock sync.RWMutex
justifiedBalances []uint64
justifiedBalancesLock sync.RWMutex
wsVerifier *WeakSubjectivityVerifier
//justifiedBalances []uint64
justifiedBalances *stateBalanceCache
wsVerifier *WeakSubjectivityVerifier
}
// config options for the service.
@@ -97,7 +97,6 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
boundaryRoots: [][32]byte{},
checkpointStateCache: cache.NewCheckpointStateCache(),
initSyncBlocks: make(map[[32]byte]block.SignedBeaconBlock),
justifiedBalances: make([]uint64, 0),
cfg: &config{},
}
for _, opt := range opts {
@@ -106,6 +105,12 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
}
}
var err error
if srv.justifiedBalances == nil {
srv.justifiedBalances, err = newStateBalanceCache(srv.cfg.StateGen)
if err != nil {
return nil, err
}
}
srv.wsVerifier, err = NewWeakSubjectivityVerifier(srv.cfg.WeakSubjectivityCheckpt, srv.cfg.BeaconDB)
if err != nil {
return nil, err
@@ -151,9 +156,6 @@ func (s *Service) Start() {
// Resume fork choice.
s.justifiedCheckpt = ethpb.CopyCheckpoint(justifiedCheckpoint)
if err := s.cacheJustifiedStateBalances(s.ctx, s.ensureRootNotZeros(bytesutil.ToBytes32(s.justifiedCheckpt.Root))); err != nil {
log.Fatalf("Could not cache justified state balances: %v", err)
}
s.prevJustifiedCheckpt = ethpb.CopyCheckpoint(justifiedCheckpoint)
s.bestJustifiedCheckpt = ethpb.CopyCheckpoint(justifiedCheckpoint)
s.finalizedCheckpt = ethpb.CopyCheckpoint(finalizedCheckpoint)
@@ -340,9 +342,6 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon
genesisCheckpoint := genesisState.FinalizedCheckpoint()
s.justifiedCheckpt = ethpb.CopyCheckpoint(genesisCheckpoint)
if err := s.cacheJustifiedStateBalances(ctx, genesisBlkRoot); err != nil {
return err
}
s.prevJustifiedCheckpt = ethpb.CopyCheckpoint(genesisCheckpoint)
s.bestJustifiedCheckpt = ethpb.CopyCheckpoint(genesisCheckpoint)
s.finalizedCheckpt = ethpb.CopyCheckpoint(genesisCheckpoint)

View File

@@ -108,25 +108,24 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
depositCache, err := depositcache.New()
require.NoError(t, err)
cfg := &config{
BeaconBlockBuf: 0,
BeaconDB: beaconDB,
DepositCache: depositCache,
ChainStartFetcher: web3Service,
P2p: &mockBroadcaster{},
StateNotifier: &mockBeaconNode{},
AttPool: attestations.NewPool(),
StateGen: stategen.New(beaconDB),
ForkChoiceStore: protoarray.New(0, 0, params.BeaconConfig().ZeroHash),
AttService: attService,
stateGen := stategen.New(beaconDB)
// Safe a state in stategen to purposes of testing a service stop / shutdown.
require.NoError(t, stateGen.SaveState(ctx, bytesutil.ToBytes32(bState.FinalizedCheckpoint().Root), bState))
opts := []Option{
WithDatabase(beaconDB),
WithDepositCache(depositCache),
WithChainStartFetcher(web3Service),
WithAttestationPool(attestations.NewPool()),
WithP2PBroadcaster(&mockBroadcaster{}),
WithStateNotifier(&mockBeaconNode{}),
WithForkChoiceStore(protoarray.New(0, 0, params.BeaconConfig().ZeroHash)),
WithAttestationService(attService),
WithStateGen(stateGen),
}
// Safe a state in stategen to purposes of testing a service stop / shutdown.
require.NoError(t, cfg.StateGen.SaveState(ctx, bytesutil.ToBytes32(bState.FinalizedCheckpoint().Root), bState))
chainService, err := NewService(ctx)
chainService, err := NewService(ctx, opts...)
require.NoError(t, err, "Unable to setup chain service")
chainService.cfg = cfg
chainService.genesisTime = time.Unix(1, 0) // non-zero time
return chainService

View File

@@ -0,0 +1,82 @@
package blockchain
import (
"context"
"errors"
"sync"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
)
var errNilStateFromStategen = errors.New("justified state can't be nil")
type stateBalanceCache struct {
sync.Mutex
balances []uint64
root [32]byte
stateGen stateByRooter
}
type stateByRooter interface {
StateByRoot(context.Context, [32]byte) (state.BeaconState, error)
}
// newStateBalanceCache exists to remind us that stateBalanceCache needs a stagegen
// to avoid nil pointer bugs when updating the cache in the read path (get())
func newStateBalanceCache(sg *stategen.State) (*stateBalanceCache, error) {
if sg == nil {
return nil, errors.New("Can't initialize state balance cache without stategen")
}
return &stateBalanceCache{stateGen: sg}, nil
}
// update is called by get() when the requested root doesn't match
// the previously read value. This cache assumes we only want to cache one
// set of balances for a single root (the current justified root).
//
// warning: this is not thread-safe on its own, relies on get() for locking
func (c *stateBalanceCache) update(ctx context.Context, justifiedRoot [32]byte) ([]uint64, error) {
stateBalanceCacheMiss.Inc()
justifiedState, err := c.stateGen.StateByRoot(ctx, justifiedRoot)
if err != nil {
return nil, err
}
if justifiedState == nil || justifiedState.IsNil() {
return nil, errNilStateFromStategen
}
epoch := time.CurrentEpoch(justifiedState)
justifiedBalances := make([]uint64, justifiedState.NumValidators())
var balanceAccumulator = func(idx int, val state.ReadOnlyValidator) error {
if helpers.IsActiveValidatorUsingTrie(val, epoch) {
justifiedBalances[idx] = val.EffectiveBalance()
} else {
justifiedBalances[idx] = 0
}
return nil
}
if err := justifiedState.ReadFromEveryValidator(balanceAccumulator); err != nil {
return nil, err
}
c.balances = justifiedBalances
c.root = justifiedRoot
return c.balances, nil
}
// getBalances takes an explicit justifiedRoot so it can invalidate the singleton cache key
// when the justified root changes, and takes a context so that the long-running stategen
// read path can connect to the upstream cancellation/timeout chain.
func (c *stateBalanceCache) get(ctx context.Context, justifiedRoot [32]byte) ([]uint64, error) {
c.Lock()
defer c.Unlock()
if justifiedRoot == c.root {
stateBalanceCacheHit.Inc()
return c.balances, nil
}
return c.update(ctx, justifiedRoot)
}

View File

@@ -0,0 +1,225 @@
package blockchain
import (
"context"
"encoding/binary"
"errors"
"testing"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
v2 "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/time/slots"
)
type mockStateByRoot struct {
state state.BeaconState
err error
}
func (m *mockStateByRoot) StateByRoot(context.Context, [32]byte) (state.BeaconState, error) {
return m.state, m.err
}
type testStateOpt func(*ethpb.BeaconStateAltair)
func testStateWithValidators(v []*ethpb.Validator) testStateOpt {
return func(a *ethpb.BeaconStateAltair) {
a.Validators = v
}
}
func testStateWithSlot(slot types.Slot) testStateOpt {
return func(a *ethpb.BeaconStateAltair) {
a.Slot = slot
}
}
func testStateFixture(opts ...testStateOpt) state.BeaconState {
a := &ethpb.BeaconStateAltair{}
for _, o := range opts {
o(a)
}
s, _ := v2.InitializeFromProtoUnsafe(a)
return s
}
func generateTestValidators(count int, opts ...func(*ethpb.Validator)) []*ethpb.Validator {
vs := make([]*ethpb.Validator, count)
var i uint32 = 0
for ; i < uint32(count); i++ {
pk := make([]byte, 48)
binary.LittleEndian.PutUint32(pk, i)
v := &ethpb.Validator{PublicKey: pk}
for _, o := range opts {
o(v)
}
vs[i] = v
}
return vs
}
func oddValidatorsExpired(currentSlot types.Slot) func(*ethpb.Validator) {
return func(v *ethpb.Validator) {
pki := binary.LittleEndian.Uint64(v.PublicKey)
if pki%2 == 0 {
v.ExitEpoch = types.Epoch(int(slots.ToEpoch(currentSlot)) + 1)
} else {
v.ExitEpoch = types.Epoch(int(slots.ToEpoch(currentSlot)) - 1)
}
}
}
func oddValidatorsQueued(currentSlot types.Slot) func(*ethpb.Validator) {
return func(v *ethpb.Validator) {
v.ExitEpoch = types.Epoch(int(slots.ToEpoch(currentSlot)) + 1)
pki := binary.LittleEndian.Uint64(v.PublicKey)
if pki%2 == 0 {
v.ActivationEpoch = types.Epoch(int(slots.ToEpoch(currentSlot)) - 1)
} else {
v.ActivationEpoch = types.Epoch(int(slots.ToEpoch(currentSlot)) + 1)
}
}
}
func allValidatorsValid(currentSlot types.Slot) func(*ethpb.Validator) {
return func(v *ethpb.Validator) {
v.ActivationEpoch = types.Epoch(int(slots.ToEpoch(currentSlot)) - 1)
v.ExitEpoch = types.Epoch(int(slots.ToEpoch(currentSlot)) + 1)
}
}
func balanceIsKeyTimes2(v *ethpb.Validator) {
pki := binary.LittleEndian.Uint64(v.PublicKey)
v.EffectiveBalance = uint64(pki) * 2
}
func testHalfExpiredValidators() ([]*ethpb.Validator, []uint64) {
balances := []uint64{0, 0, 4, 0, 8, 0, 12, 0, 16, 0}
return generateTestValidators(10,
oddValidatorsExpired(types.Slot(99)),
balanceIsKeyTimes2), balances
}
func testHalfQueuedValidators() ([]*ethpb.Validator, []uint64) {
balances := []uint64{0, 0, 4, 0, 8, 0, 12, 0, 16, 0}
return generateTestValidators(10,
oddValidatorsQueued(types.Slot(99)),
balanceIsKeyTimes2), balances
}
func testAllValidValidators() ([]*ethpb.Validator, []uint64) {
balances := []uint64{0, 2, 4, 6, 8, 10, 12, 14, 16, 18}
return generateTestValidators(10,
allValidatorsValid(types.Slot(99)),
balanceIsKeyTimes2), balances
}
func TestStateBalanceCache(t *testing.T) {
type sbcTestCase struct {
err error
root [32]byte
sbc *stateBalanceCache
balances []uint64
name string
}
sentinelCacheMiss := errors.New("Cache missed, as expected!")
sentinelBalances := []uint64{1, 2, 3, 4, 5}
halfExpiredValidators, halfExpiredBalances := testHalfExpiredValidators()
halfQueuedValidators, halfQueuedBalances := testHalfQueuedValidators()
allValidValidators, allValidBalances := testAllValidValidators()
cases := []sbcTestCase{
{
root: bytesutil.ToBytes32([]byte{'A'}),
balances: sentinelBalances,
sbc: &stateBalanceCache{
stateGen: &mockStateByRooter{
err: sentinelCacheMiss,
},
root: bytesutil.ToBytes32([]byte{'A'}),
balances: sentinelBalances,
},
name: "cache hit",
},
// this works by using a staterooter that returns a known error
// so really we're testing the miss by making sure stategen got called
// this also tells us stategen errors are propagated
{
sbc: &stateBalanceCache{
stateGen: &mockStateByRooter{
//state: generateTestValidators(1, testWithBadEpoch),
err: sentinelCacheMiss,
},
root: bytesutil.ToBytes32([]byte{'B'}),
},
err: sentinelCacheMiss,
root: bytesutil.ToBytes32([]byte{'A'}),
name: "cache miss",
},
{
sbc: &stateBalanceCache{
stateGen: &mockStateByRooter{},
root: bytesutil.ToBytes32([]byte{'B'}),
},
err: errNilStateFromStategen,
root: bytesutil.ToBytes32([]byte{'A'}),
name: "error for nil state upon cache miss",
},
{
sbc: &stateBalanceCache{
stateGen: &mockStateByRooter{
state: testStateFixture(
testStateWithSlot(99),
testStateWithValidators(halfExpiredValidators)),
},
},
balances: halfExpiredBalances,
root: bytesutil.ToBytes32([]byte{'A'}),
name: "test filtering by exit epoch",
},
{
sbc: &stateBalanceCache{
stateGen: &mockStateByRooter{
state: testStateFixture(
testStateWithSlot(99),
testStateWithValidators(halfQueuedValidators)),
},
},
balances: halfQueuedBalances,
root: bytesutil.ToBytes32([]byte{'A'}),
name: "test filtering by activation epoch",
},
{
sbc: &stateBalanceCache{
stateGen: &mockStateByRooter{
state: testStateFixture(
testStateWithSlot(99),
testStateWithValidators(allValidValidators)),
},
},
balances: allValidBalances,
root: bytesutil.ToBytes32([]byte{'A'}),
name: "happy path",
},
}
ctx := context.Background()
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
cache := c.sbc
cacheRootStart := cache.root
b, err := cache.get(ctx, c.root)
require.ErrorIs(t, err, c.err)
require.DeepEqual(t, c.balances, b)
if c.err != nil {
// if there was an error somewhere, the root should not have changed (unless it already matched)
require.Equal(t, cacheRootStart, cache.root)
} else {
// when successful, the cache should always end with a root matching the request
require.Equal(t, c.root, cache.root)
}
})
}
}

View File

@@ -5,7 +5,6 @@
package depositcache
import (
"bytes"
"context"
"encoding/hex"
"math/big"
@@ -54,6 +53,7 @@ type DepositCache struct {
pendingDeposits []*dbpb.DepositContainer
deposits []*dbpb.DepositContainer
finalizedDeposits *FinalizedDeposits
depositsByKey map[[48]byte][]*dbpb.DepositContainer
depositsLock sync.RWMutex
}
@@ -69,6 +69,7 @@ func New() (*DepositCache, error) {
return &DepositCache{
pendingDeposits: []*dbpb.DepositContainer{},
deposits: []*dbpb.DepositContainer{},
depositsByKey: map[[48]byte][]*ethpb.DepositContainer{},
finalizedDeposits: &FinalizedDeposits{Deposits: finalizedDepositsTrie, MerkleTrieIndex: -1},
}, nil
}
@@ -95,10 +96,15 @@ func (dc *DepositCache) InsertDeposit(ctx context.Context, d *ethpb.Deposit, blo
}
// Keep the slice sorted on insertion in order to avoid costly sorting on retrieval.
heightIdx := sort.Search(len(dc.deposits), func(i int) bool { return dc.deposits[i].Index >= index })
depCtr := &dbpb.DepositContainer{Deposit: d, Eth1BlockHeight: blockNum, DepositRoot: depositRoot[:], Index: index}
newDeposits := append(
[]*dbpb.DepositContainer{{Deposit: d, Eth1BlockHeight: blockNum, DepositRoot: depositRoot[:], Index: index}},
[]*dbpb.DepositContainer{depCtr},
dc.deposits[heightIdx:]...)
dc.deposits = append(dc.deposits[:heightIdx], newDeposits...)
// Append the deposit to our map, in the event no deposits
// exist for the pubkey , it is simply added to the map.
pubkey := bytesutil.ToBytes48(d.Data.PublicKey)
dc.depositsByKey[pubkey] = append(dc.depositsByKey[pubkey], depCtr)
historicalDepositsCount.Inc()
return nil
}
@@ -112,6 +118,13 @@ func (dc *DepositCache) InsertDepositContainers(ctx context.Context, ctrs []*dbp
sort.SliceStable(ctrs, func(i int, j int) bool { return ctrs[i].Index < ctrs[j].Index })
dc.deposits = ctrs
for _, c := range ctrs {
// Use a new value, as the reference
// of c changes in the next iteration.
newPtr := c
pKey := bytesutil.ToBytes48(newPtr.Deposit.Data.PublicKey)
dc.depositsByKey[pKey] = append(dc.depositsByKey[pKey], newPtr)
}
historicalDepositsCount.Add(float64(len(ctrs)))
}
@@ -202,13 +215,15 @@ func (dc *DepositCache) DepositByPubkey(ctx context.Context, pubKey []byte) (*et
var deposit *ethpb.Deposit
var blockNum *big.Int
for _, ctnr := range dc.deposits {
if bytes.Equal(ctnr.Deposit.Data.PublicKey, pubKey) {
deposit = ctnr.Deposit
blockNum = big.NewInt(int64(ctnr.Eth1BlockHeight))
break
}
deps, ok := dc.depositsByKey[bytesutil.ToBytes48(pubKey)]
if !ok || len(deps) == 0 {
return deposit, blockNum
}
// We always return the first deposit if a particular
// validator key has multiple deposits assigned to
// it.
deposit = deps[0].Deposit
blockNum = big.NewInt(int64(deps[0].Eth1BlockHeight))
return deposit, blockNum
}

View File

@@ -44,31 +44,31 @@ func TestInsertDeposit_MaintainsSortedOrderByIndex(t *testing.T) {
}{
{
blkNum: 0,
deposit: &ethpb.Deposit{},
deposit: &ethpb.Deposit{Data: &dbpb.Deposit_Data{PublicKey: []byte{'A'}}},
index: 0,
expectedErr: "",
},
{
blkNum: 0,
deposit: &ethpb.Deposit{},
deposit: &ethpb.Deposit{Data: &dbpb.Deposit_Data{PublicKey: []byte{'B'}}},
index: 3,
expectedErr: "wanted deposit with index 1 to be inserted but received 3",
},
{
blkNum: 0,
deposit: &ethpb.Deposit{},
deposit: &ethpb.Deposit{Data: &dbpb.Deposit_Data{PublicKey: []byte{'C'}}},
index: 1,
expectedErr: "",
},
{
blkNum: 0,
deposit: &ethpb.Deposit{},
deposit: &ethpb.Deposit{Data: &dbpb.Deposit_Data{PublicKey: []byte{'D'}}},
index: 4,
expectedErr: "wanted deposit with index 2 to be inserted but received 4",
},
{
blkNum: 0,
deposit: &ethpb.Deposit{},
deposit: &ethpb.Deposit{Data: &dbpb.Deposit_Data{PublicKey: []byte{'E'}}},
index: 2,
expectedErr: "",
},
@@ -316,8 +316,7 @@ func TestDepositsNumberAndRootAtHeight(t *testing.T) {
func TestDepositByPubkey_ReturnsFirstMatchingDeposit(t *testing.T) {
dc, err := New()
require.NoError(t, err)
dc.deposits = []*dbpb.DepositContainer{
ctrs := []*dbpb.DepositContainer{
{
Eth1BlockHeight: 9,
Deposit: &ethpb.Deposit{
@@ -359,6 +358,7 @@ func TestDepositByPubkey_ReturnsFirstMatchingDeposit(t *testing.T) {
},
},
}
dc.InsertDepositContainers(context.Background(), ctrs)
pk1 := bytesutil.PadTo([]byte("pk1"), 48)
dep, blkNum := dc.DepositByPubkey(context.Background(), pk1)
@@ -626,24 +626,28 @@ func TestPruneProofs_Ok(t *testing.T) {
index int64
}{
{
blkNum: 0,
deposit: &ethpb.Deposit{Proof: makeDepositProof()},
index: 0,
blkNum: 0,
deposit: &ethpb.Deposit{Proof: makeDepositProof(),
Data: &ethpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk0"), 48)}},
index: 0,
},
{
blkNum: 0,
deposit: &ethpb.Deposit{Proof: makeDepositProof()},
index: 1,
blkNum: 0,
deposit: &ethpb.Deposit{Proof: makeDepositProof(),
Data: &ethpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk1"), 48)}},
index: 1,
},
{
blkNum: 0,
deposit: &ethpb.Deposit{Proof: makeDepositProof()},
index: 2,
blkNum: 0,
deposit: &ethpb.Deposit{Proof: makeDepositProof(),
Data: &ethpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk2"), 48)}},
index: 2,
},
{
blkNum: 0,
deposit: &ethpb.Deposit{Proof: makeDepositProof()},
index: 3,
blkNum: 0,
deposit: &ethpb.Deposit{Proof: makeDepositProof(),
Data: &ethpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk3"), 48)}},
index: 3,
},
}
@@ -669,24 +673,26 @@ func TestPruneProofs_SomeAlreadyPruned(t *testing.T) {
index int64
}{
{
blkNum: 0,
deposit: &ethpb.Deposit{Proof: nil},
index: 0,
blkNum: 0,
deposit: &ethpb.Deposit{Proof: nil, Data: &ethpb.Deposit_Data{
PublicKey: bytesutil.PadTo([]byte("pk0"), 48)}},
index: 0,
},
{
blkNum: 0,
deposit: &ethpb.Deposit{Proof: nil, Data: &ethpb.Deposit_Data{
PublicKey: bytesutil.PadTo([]byte("pk1"), 48)}}, index: 1,
},
{
blkNum: 0,
deposit: &ethpb.Deposit{Proof: nil},
index: 1,
},
{
blkNum: 0,
deposit: &ethpb.Deposit{Proof: makeDepositProof()},
deposit: &ethpb.Deposit{Proof: makeDepositProof(), Data: &ethpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk2"), 48)}},
index: 2,
},
{
blkNum: 0,
deposit: &ethpb.Deposit{Proof: makeDepositProof()},
index: 3,
blkNum: 0,
deposit: &ethpb.Deposit{Proof: makeDepositProof(),
Data: &ethpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk3"), 48)}},
index: 3,
},
}
@@ -709,24 +715,28 @@ func TestPruneProofs_PruneAllWhenDepositIndexTooBig(t *testing.T) {
index int64
}{
{
blkNum: 0,
deposit: &ethpb.Deposit{Proof: makeDepositProof()},
index: 0,
blkNum: 0,
deposit: &ethpb.Deposit{Proof: makeDepositProof(),
Data: &ethpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk0"), 48)}},
index: 0,
},
{
blkNum: 0,
deposit: &ethpb.Deposit{Proof: makeDepositProof()},
index: 1,
blkNum: 0,
deposit: &ethpb.Deposit{Proof: makeDepositProof(),
Data: &ethpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk1"), 48)}},
index: 1,
},
{
blkNum: 0,
deposit: &ethpb.Deposit{Proof: makeDepositProof()},
index: 2,
blkNum: 0,
deposit: &ethpb.Deposit{Proof: makeDepositProof(),
Data: &ethpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk2"), 48)}},
index: 2,
},
{
blkNum: 0,
deposit: &ethpb.Deposit{Proof: makeDepositProof()},
index: 3,
blkNum: 0,
deposit: &ethpb.Deposit{Proof: makeDepositProof(),
Data: &ethpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk3"), 48)}},
index: 3,
},
}
@@ -752,24 +762,28 @@ func TestPruneProofs_CorrectlyHandleLastIndex(t *testing.T) {
index int64
}{
{
blkNum: 0,
deposit: &ethpb.Deposit{Proof: makeDepositProof()},
index: 0,
blkNum: 0,
deposit: &ethpb.Deposit{Proof: makeDepositProof(),
Data: &ethpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk0"), 48)}},
index: 0,
},
{
blkNum: 0,
deposit: &ethpb.Deposit{Proof: makeDepositProof()},
index: 1,
blkNum: 0,
deposit: &ethpb.Deposit{Proof: makeDepositProof(),
Data: &ethpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk1"), 48)}},
index: 1,
},
{
blkNum: 0,
deposit: &ethpb.Deposit{Proof: makeDepositProof()},
index: 2,
blkNum: 0,
deposit: &ethpb.Deposit{Proof: makeDepositProof(),
Data: &ethpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk2"), 48)}},
index: 2,
},
{
blkNum: 0,
deposit: &ethpb.Deposit{Proof: makeDepositProof()},
index: 3,
blkNum: 0,
deposit: &ethpb.Deposit{Proof: makeDepositProof(),
Data: &ethpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk3"), 48)}},
index: 3,
},
}
@@ -785,6 +799,36 @@ func TestPruneProofs_CorrectlyHandleLastIndex(t *testing.T) {
assert.DeepEqual(t, [][]byte(nil), dc.deposits[3].Deposit.Proof)
}
func TestDepositMap_WorksCorrectly(t *testing.T) {
dc, err := New()
require.NoError(t, err)
pk0 := bytesutil.PadTo([]byte("pk0"), 48)
dep, _ := dc.DepositByPubkey(context.Background(), pk0)
var nilDep *ethpb.Deposit
assert.DeepEqual(t, nilDep, dep)
dep = &ethpb.Deposit{Proof: makeDepositProof(), Data: &ethpb.Deposit_Data{PublicKey: pk0, Amount: 1000}}
assert.NoError(t, dc.InsertDeposit(context.Background(), dep, 1000, 0, [32]byte{}))
dep, _ = dc.DepositByPubkey(context.Background(), pk0)
assert.NotEqual(t, nilDep, dep)
assert.Equal(t, uint64(1000), dep.Data.Amount)
dep = &ethpb.Deposit{Proof: makeDepositProof(), Data: &ethpb.Deposit_Data{PublicKey: pk0, Amount: 10000}}
assert.NoError(t, dc.InsertDeposit(context.Background(), dep, 1000, 1, [32]byte{}))
// Make sure we have the same deposit returned over here.
dep, _ = dc.DepositByPubkey(context.Background(), pk0)
assert.NotEqual(t, nilDep, dep)
assert.Equal(t, uint64(1000), dep.Data.Amount)
// Make sure another key doesn't work.
pk1 := bytesutil.PadTo([]byte("pk1"), 48)
dep, _ = dc.DepositByPubkey(context.Background(), pk1)
assert.DeepEqual(t, nilDep, dep)
}
func makeDepositProof() [][]byte {
proof := make([][]byte, int(params.BeaconConfig().DepositContractTreeDepth)+1)
for i := range proof {

View File

@@ -120,26 +120,22 @@ func (c *SkipSlotCache) MarkInProgress(r [32]byte) error {
// MarkNotInProgress will release the lock on a given request. This should be
// called after put.
func (c *SkipSlotCache) MarkNotInProgress(r [32]byte) error {
func (c *SkipSlotCache) MarkNotInProgress(r [32]byte) {
if c.disabled {
return nil
return
}
c.lock.Lock()
defer c.lock.Unlock()
delete(c.inProgress, r)
return nil
}
// Put the response in the cache.
func (c *SkipSlotCache) Put(_ context.Context, r [32]byte, state state.BeaconState) error {
func (c *SkipSlotCache) Put(_ context.Context, r [32]byte, state state.BeaconState) {
if c.disabled {
return nil
return
}
// Copy state so cached value is not mutated.
c.cache.Add(r, state.Copy())
return nil
}

View File

@@ -28,8 +28,8 @@ func TestSkipSlotCache_RoundTrip(t *testing.T) {
})
require.NoError(t, err)
require.NoError(t, c.Put(ctx, r, s))
require.NoError(t, c.MarkNotInProgress(r))
c.Put(ctx, r, s)
c.MarkNotInProgress(r)
res, err := c.Get(ctx, r)
require.NoError(t, err)

View File

@@ -5,8 +5,6 @@ import (
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
@@ -28,10 +26,10 @@ func TestSyncCommitteeCache_CanUpdateAndRetrieve(t *testing.T) {
}{
{
name: "only current epoch",
currentSyncCommittee: convertToCommittee([][]byte{
currentSyncCommittee: util.ConvertToCommittee([][]byte{
pubKeys[1], pubKeys[2], pubKeys[3], pubKeys[2], pubKeys[2],
}),
nextSyncCommittee: convertToCommittee([][]byte{}),
nextSyncCommittee: util.ConvertToCommittee([][]byte{}),
currentSyncMap: map[types.ValidatorIndex][]types.CommitteeIndex{
1: {0},
2: {1, 3, 4},
@@ -45,8 +43,8 @@ func TestSyncCommitteeCache_CanUpdateAndRetrieve(t *testing.T) {
},
{
name: "only next epoch",
currentSyncCommittee: convertToCommittee([][]byte{}),
nextSyncCommittee: convertToCommittee([][]byte{
currentSyncCommittee: util.ConvertToCommittee([][]byte{}),
nextSyncCommittee: util.ConvertToCommittee([][]byte{
pubKeys[1], pubKeys[2], pubKeys[3], pubKeys[2], pubKeys[2],
}),
currentSyncMap: map[types.ValidatorIndex][]types.CommitteeIndex{
@@ -62,14 +60,14 @@ func TestSyncCommitteeCache_CanUpdateAndRetrieve(t *testing.T) {
},
{
name: "some current epoch and some next epoch",
currentSyncCommittee: convertToCommittee([][]byte{
currentSyncCommittee: util.ConvertToCommittee([][]byte{
pubKeys[1],
pubKeys[2],
pubKeys[3],
pubKeys[2],
pubKeys[2],
}),
nextSyncCommittee: convertToCommittee([][]byte{
nextSyncCommittee: util.ConvertToCommittee([][]byte{
pubKeys[7],
pubKeys[6],
pubKeys[5],
@@ -90,14 +88,14 @@ func TestSyncCommitteeCache_CanUpdateAndRetrieve(t *testing.T) {
},
{
name: "some current epoch and some next epoch duplicated across",
currentSyncCommittee: convertToCommittee([][]byte{
currentSyncCommittee: util.ConvertToCommittee([][]byte{
pubKeys[1],
pubKeys[2],
pubKeys[3],
pubKeys[2],
pubKeys[2],
}),
nextSyncCommittee: convertToCommittee([][]byte{
nextSyncCommittee: util.ConvertToCommittee([][]byte{
pubKeys[2],
pubKeys[1],
pubKeys[3],
@@ -117,13 +115,13 @@ func TestSyncCommitteeCache_CanUpdateAndRetrieve(t *testing.T) {
},
{
name: "all duplicated",
currentSyncCommittee: convertToCommittee([][]byte{
currentSyncCommittee: util.ConvertToCommittee([][]byte{
pubKeys[100],
pubKeys[100],
pubKeys[100],
pubKeys[100],
}),
nextSyncCommittee: convertToCommittee([][]byte{
nextSyncCommittee: util.ConvertToCommittee([][]byte{
pubKeys[100],
pubKeys[100],
pubKeys[100],
@@ -138,13 +136,13 @@ func TestSyncCommitteeCache_CanUpdateAndRetrieve(t *testing.T) {
},
{
name: "unknown keys",
currentSyncCommittee: convertToCommittee([][]byte{
currentSyncCommittee: util.ConvertToCommittee([][]byte{
pubKeys[100],
pubKeys[100],
pubKeys[100],
pubKeys[100],
}),
nextSyncCommittee: convertToCommittee([][]byte{
nextSyncCommittee: util.ConvertToCommittee([][]byte{
pubKeys[100],
pubKeys[100],
pubKeys[100],
@@ -189,13 +187,13 @@ func TestSyncCommitteeCache_RootDoesNotExist(t *testing.T) {
func TestSyncCommitteeCache_CanRotate(t *testing.T) {
c := cache.NewSyncCommittee()
s, _ := util.DeterministicGenesisStateAltair(t, 64)
require.NoError(t, s.SetCurrentSyncCommittee(convertToCommittee([][]byte{{1}})))
require.NoError(t, s.SetCurrentSyncCommittee(util.ConvertToCommittee([][]byte{{1}})))
require.NoError(t, c.UpdatePositionsInCommittee([32]byte{'a'}, s))
require.NoError(t, s.SetCurrentSyncCommittee(convertToCommittee([][]byte{{2}})))
require.NoError(t, s.SetCurrentSyncCommittee(util.ConvertToCommittee([][]byte{{2}})))
require.NoError(t, c.UpdatePositionsInCommittee([32]byte{'b'}, s))
require.NoError(t, s.SetCurrentSyncCommittee(convertToCommittee([][]byte{{3}})))
require.NoError(t, s.SetCurrentSyncCommittee(util.ConvertToCommittee([][]byte{{3}})))
require.NoError(t, c.UpdatePositionsInCommittee([32]byte{'c'}, s))
require.NoError(t, s.SetCurrentSyncCommittee(convertToCommittee([][]byte{{4}})))
require.NoError(t, s.SetCurrentSyncCommittee(util.ConvertToCommittee([][]byte{{4}})))
require.NoError(t, c.UpdatePositionsInCommittee([32]byte{'d'}, s))
_, err := c.CurrentPeriodIndexPosition([32]byte{'a'}, 0)
@@ -204,19 +202,3 @@ func TestSyncCommitteeCache_CanRotate(t *testing.T) {
_, err = c.CurrentPeriodIndexPosition([32]byte{'c'}, 0)
require.NoError(t, err)
}
func convertToCommittee(inputKeys [][]byte) *ethpb.SyncCommittee {
var pubKeys [][]byte
for i := uint64(0); i < params.BeaconConfig().SyncCommitteeSize; i++ {
if i < uint64(len(inputKeys)) {
pubKeys = append(pubKeys, bytesutil.PadTo(inputKeys[i], params.BeaconConfig().BLSPubkeyLength))
} else {
pubKeys = append(pubKeys, bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength))
}
}
return &ethpb.SyncCommittee{
Pubkeys: pubKeys,
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
}
}

View File

@@ -358,7 +358,7 @@ func TestValidateIndexedAttestation_BadAttestationsSignatureSet(t *testing.T) {
}
want := "nil or missing indexed attestation data"
_, err := blocks.AttestationSignatureSet(context.Background(), beaconState, atts)
_, err := blocks.AttestationSignatureBatch(context.Background(), beaconState, atts)
assert.ErrorContains(t, want, err)
atts = []*ethpb.Attestation{}
@@ -378,7 +378,7 @@ func TestValidateIndexedAttestation_BadAttestationsSignatureSet(t *testing.T) {
}
want = "expected non-empty attesting indices"
_, err = blocks.AttestationSignatureSet(context.Background(), beaconState, atts)
_, err = blocks.AttestationSignatureBatch(context.Background(), beaconState, atts)
assert.ErrorContains(t, want, err)
}
@@ -502,7 +502,7 @@ func TestRetrieveAttestationSignatureSet_VerifiesMultipleAttestations(t *testing
}
att2.Signature = bls.AggregateSignatures(sigs).Marshal()
set, err := blocks.AttestationSignatureSet(ctx, st, []*ethpb.Attestation{att1, att2})
set, err := blocks.AttestationSignatureBatch(ctx, st, []*ethpb.Attestation{att1, att2})
require.NoError(t, err)
verified, err := set.Verify()
require.NoError(t, err)
@@ -566,6 +566,6 @@ func TestRetrieveAttestationSignatureSet_AcrossFork(t *testing.T) {
}
att2.Signature = bls.AggregateSignatures(sigs).Marshal()
_, err = blocks.AttestationSignatureSet(ctx, st, []*ethpb.Attestation{att1, att2})
_, err = blocks.AttestationSignatureBatch(ctx, st, []*ethpb.Attestation{att1, att2})
require.NoError(t, err)
}

View File

@@ -62,7 +62,7 @@ func ProcessAttesterSlashing(
if err := VerifyAttesterSlashing(ctx, beaconState, slashing); err != nil {
return nil, errors.Wrap(err, "could not verify attester slashing")
}
slashableIndices := slashableAttesterIndices(slashing)
slashableIndices := SlashableAttesterIndices(slashing)
sort.SliceStable(slashableIndices, func(i, j int) bool {
return slashableIndices[i] < slashableIndices[j]
})
@@ -152,7 +152,8 @@ func IsSlashableAttestationData(data1, data2 *ethpb.AttestationData) bool {
return isDoubleVote || isSurroundVote
}
func slashableAttesterIndices(slashing *ethpb.AttesterSlashing) []uint64 {
// SlashableAttesterIndices returns the intersection of attester indices from both attestations in this slashing.
func SlashableAttesterIndices(slashing *ethpb.AttesterSlashing) []uint64 {
if slashing == nil || slashing.Attestation_1 == nil || slashing.Attestation_2 == nil {
return nil
}

View File

@@ -47,7 +47,7 @@ func TestFuzzProcessBlockHeader_10000(t *testing.T) {
}
}
func TestFuzzverifyDepositDataSigningRoot_10000(t *testing.T) {
func TestFuzzverifyDepositDataSigningRoot_10000(_ *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
var ba []byte
pubkey := [48]byte{}
@@ -85,7 +85,7 @@ func TestFuzzProcessEth1DataInBlock_10000(t *testing.T) {
}
}
func TestFuzzareEth1DataEqual_10000(t *testing.T) {
func TestFuzzareEth1DataEqual_10000(_ *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
eth1data := &eth.Eth1Data{}
eth1data2 := &eth.Eth1Data{}
@@ -227,7 +227,7 @@ func TestFuzzVerifyAttesterSlashing_10000(t *testing.T) {
}
}
func TestFuzzIsSlashableAttestationData_10000(t *testing.T) {
func TestFuzzIsSlashableAttestationData_10000(_ *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
attestationData := &eth.AttestationData{}
attestationData2 := &eth.AttestationData{}
@@ -239,13 +239,13 @@ func TestFuzzIsSlashableAttestationData_10000(t *testing.T) {
}
}
func TestFuzzslashableAttesterIndices_10000(t *testing.T) {
func TestFuzzslashableAttesterIndices_10000(_ *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
attesterSlashing := &eth.AttesterSlashing{}
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(attesterSlashing)
slashableAttesterIndices(attesterSlashing)
SlashableAttesterIndices(attesterSlashing)
}
}
@@ -397,7 +397,7 @@ func TestFuzzProcessVoluntaryExitsNoVerify_10000(t *testing.T) {
}
}
func TestFuzzVerifyExit_10000(t *testing.T) {
func TestFuzzVerifyExit_10000(_ *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
ve := &eth.SignedVoluntaryExit{}
rawVal := &ethpb.Validator{}

View File

@@ -321,7 +321,7 @@ func TestBlockSignatureSet_OK(t *testing.T) {
validators[proposerIdx].PublicKey = priv.PublicKey().Marshal()
err = state.UpdateValidatorAtIndex(proposerIdx, validators[proposerIdx])
require.NoError(t, err)
set, err := blocks.BlockSignatureSet(state, block.Block.ProposerIndex, block.Signature, block.Block.HashTreeRoot)
set, err := blocks.BlockSignatureBatch(state, block.Block.ProposerIndex, block.Signature, block.Block.HashTreeRoot)
require.NoError(t, err)
verified, err := set.Verify()

View File

@@ -82,7 +82,7 @@ func TestRandaoSignatureSet_OK(t *testing.T) {
},
}
set, err := blocks.RandaoSignatureSet(context.Background(), beaconState, block.Body.RandaoReveal)
set, err := blocks.RandaoSignatureBatch(context.Background(), beaconState, block.Body.RandaoReveal)
require.NoError(t, err)
verified, err := set.Verify()
require.NoError(t, err)

View File

@@ -18,8 +18,8 @@ import (
"github.com/prysmaticlabs/prysm/time/slots"
)
// retrieves the signature set from the raw data, public key,signature and domain provided.
func signatureSet(signedData, pub, signature, domain []byte) (*bls.SignatureSet, error) {
// retrieves the signature batch from the raw data, public key,signature and domain provided.
func signatureBatch(signedData, pub, signature, domain []byte) (*bls.SignatureBatch, error) {
publicKey, err := bls.PublicKeyFromBytes(pub)
if err != nil {
return nil, errors.Wrap(err, "could not convert bytes to public key")
@@ -32,7 +32,7 @@ func signatureSet(signedData, pub, signature, domain []byte) (*bls.SignatureSet,
if err != nil {
return nil, errors.Wrap(err, "could not hash container")
}
return &bls.SignatureSet{
return &bls.SignatureBatch{
Signatures: [][]byte{signature},
PublicKeys: []bls.PublicKey{publicKey},
Messages: [][32]byte{root},
@@ -41,7 +41,7 @@ func signatureSet(signedData, pub, signature, domain []byte) (*bls.SignatureSet,
// verifies the signature from the raw data, public key and domain provided.
func verifySignature(signedData, pub, signature, domain []byte) error {
set, err := signatureSet(signedData, pub, signature, domain)
set, err := signatureBatch(signedData, pub, signature, domain)
if err != nil {
return err
}
@@ -116,11 +116,11 @@ func VerifyBlockSignatureUsingCurrentFork(beaconState state.ReadOnlyBeaconState,
return signing.VerifyBlockSigningRoot(proposerPubKey, blk.Signature(), domain, blk.Block().HashTreeRoot)
}
// BlockSignatureSet retrieves the block signature set from the provided block and its corresponding state.
func BlockSignatureSet(beaconState state.ReadOnlyBeaconState,
// BlockSignatureBatch retrieves the block signature batch from the provided block and its corresponding state.
func BlockSignatureBatch(beaconState state.ReadOnlyBeaconState,
proposerIndex types.ValidatorIndex,
sig []byte,
rootFunc func() ([32]byte, error)) (*bls.SignatureSet, error) {
rootFunc func() ([32]byte, error)) (*bls.SignatureBatch, error) {
currentEpoch := slots.ToEpoch(beaconState.Slot())
domain, err := signing.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorRoot())
if err != nil {
@@ -131,21 +131,21 @@ func BlockSignatureSet(beaconState state.ReadOnlyBeaconState,
return nil, err
}
proposerPubKey := proposer.PublicKey
return signing.BlockSignatureSet(proposerPubKey, sig, domain, rootFunc)
return signing.BlockSignatureBatch(proposerPubKey, sig, domain, rootFunc)
}
// RandaoSignatureSet retrieves the relevant randao specific signature set object
// RandaoSignatureBatch retrieves the relevant randao specific signature batch object
// from a block and its corresponding state.
func RandaoSignatureSet(
func RandaoSignatureBatch(
ctx context.Context,
beaconState state.ReadOnlyBeaconState,
reveal []byte,
) (*bls.SignatureSet, error) {
) (*bls.SignatureBatch, error) {
buf, proposerPub, domain, err := randaoSigningData(ctx, beaconState)
if err != nil {
return nil, err
}
set, err := signatureSet(buf, proposerPub, reveal, domain)
set, err := signatureBatch(buf, proposerPub, reveal, domain)
if err != nil {
return nil, err
}
@@ -171,13 +171,13 @@ func randaoSigningData(ctx context.Context, beaconState state.ReadOnlyBeaconStat
return buf, proposerPub[:], domain, nil
}
// Method to break down attestations of the same domain and collect them into a single signature set.
func createAttestationSignatureSet(
// Method to break down attestations of the same domain and collect them into a single signature batch.
func createAttestationSignatureBatch(
ctx context.Context,
beaconState state.ReadOnlyBeaconState,
atts []*ethpb.Attestation,
domain []byte,
) (*bls.SignatureSet, error) {
) (*bls.SignatureBatch, error) {
if len(atts) == 0 {
return nil, nil
}
@@ -216,16 +216,16 @@ func createAttestationSignatureSet(
}
msgs[i] = root
}
return &bls.SignatureSet{
return &bls.SignatureBatch{
Signatures: sigs,
PublicKeys: pks,
Messages: msgs,
}, nil
}
// AttestationSignatureSet retrieves all the related attestation signature data such as the relevant public keys,
// signatures and attestation signing data and collate it into a signature set object.
func AttestationSignatureSet(ctx context.Context, beaconState state.ReadOnlyBeaconState, atts []*ethpb.Attestation) (*bls.SignatureSet, error) {
// AttestationSignatureBatch retrieves all the related attestation signature data such as the relevant public keys,
// signatures and attestation signing data and collate it into a signature batch object.
func AttestationSignatureBatch(ctx context.Context, beaconState state.ReadOnlyBeaconState, atts []*ethpb.Attestation) (*bls.SignatureBatch, error) {
if len(atts) == 0 {
return bls.NewSet(), nil
}
@@ -247,12 +247,12 @@ func AttestationSignatureSet(ctx context.Context, beaconState state.ReadOnlyBeac
set := bls.NewSet()
// Check attestations from before the fork.
if fork.Epoch > 0 && len(preForkAtts) > 0 { // Check to prevent underflow and there is valid attestations to create sig set.
if fork.Epoch > 0 && len(preForkAtts) > 0 { // Check to prevent underflow and there is valid attestations to create sig batch.
prevDomain, err := signing.Domain(fork, fork.Epoch-1, dt, gvr)
if err != nil {
return nil, err
}
aSet, err := createAttestationSignatureSet(ctx, beaconState, preForkAtts, prevDomain)
aSet, err := createAttestationSignatureBatch(ctx, beaconState, preForkAtts, prevDomain)
if err != nil {
return nil, err
}
@@ -272,7 +272,7 @@ func AttestationSignatureSet(ctx context.Context, beaconState state.ReadOnlyBeac
return nil, err
}
aSet, err := createAttestationSignatureSet(ctx, beaconState, postForkAtts, currDomain)
aSet, err := createAttestationSignatureBatch(ctx, beaconState, postForkAtts, currDomain)
if err != nil {
return nil, err
}

View File

@@ -59,7 +59,7 @@ func TotalActiveBalance(s state.ReadOnlyBeaconState) (uint64, error) {
case err == nil:
return bal, nil
case errors.Is(err, cache.ErrNotFound):
break
// Do nothing if we receive a not found error.
default:
// In the event, we encounter another error we return it.
return 0, err

View File

@@ -2,9 +2,9 @@ package signing
import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/eth2-types"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/crypto/bls"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
)
// Domain returns the domain version for BLS private key to sign and verify.

View File

@@ -3,9 +3,9 @@ package signing
import (
"testing"
"github.com/prysmaticlabs/eth2-types"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
)

View File

@@ -118,11 +118,11 @@ func VerifyBlockHeaderSigningRoot(blkHdr *ethpb.BeaconBlockHeader, pub, signatur
// VerifyBlockSigningRoot verifies the signing root of a block given its public key, signature and domain.
func VerifyBlockSigningRoot(pub, signature, domain []byte, rootFunc func() ([32]byte, error)) error {
set, err := BlockSignatureSet(pub, signature, domain, rootFunc)
set, err := BlockSignatureBatch(pub, signature, domain, rootFunc)
if err != nil {
return err
}
// We assume only one signature set is returned here.
// We assume only one signature batch is returned here.
sig := set.Signatures[0]
publicKey := set.PublicKeys[0]
root := set.Messages[0]
@@ -137,9 +137,9 @@ func VerifyBlockSigningRoot(pub, signature, domain []byte, rootFunc func() ([32]
return nil
}
// BlockSignatureSet retrieves the relevant signature, message and pubkey data from a block and collating it
// into a signature set object.
func BlockSignatureSet(pub, signature, domain []byte, rootFunc func() ([32]byte, error)) (*bls.SignatureSet, error) {
// BlockSignatureBatch retrieves the relevant signature, message and pubkey data from a block and collating it
// into a signature batch object.
func BlockSignatureBatch(pub, signature, domain []byte, rootFunc func() ([32]byte, error)) (*bls.SignatureBatch, error) {
publicKey, err := bls.PublicKeyFromBytes(pub)
if err != nil {
return nil, errors.Wrap(err, "could not convert bytes to public key")
@@ -149,7 +149,7 @@ func BlockSignatureSet(pub, signature, domain []byte, rootFunc func() ([32]byte,
if err != nil {
return nil, errors.Wrap(err, "could not compute signing root")
}
return &bls.SignatureSet{
return &bls.SignatureBatch{
Signatures: [][]byte{signature},
PublicKeys: []bls.PublicKey{publicKey},
Messages: [][32]byte{root},

View File

@@ -35,6 +35,7 @@ go_library(
"//beacon-chain/core/transition/interop:go_default_library",
"//beacon-chain/core/validators:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/stateutil:go_default_library",
"//beacon-chain/state/v1:go_default_library",
"//config/features:go_default_library",
"//config/params:go_default_library",

View File

@@ -7,6 +7,7 @@ import (
b "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
"github.com/prysmaticlabs/prysm/config/params"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
@@ -105,7 +106,7 @@ func OptimizedGenesisBeaconState(genesisTime uint64, preState state.BeaconState,
slashings := make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector)
genesisValidatorsRoot, err := v1.ValidatorRegistryRoot(preState.Validators())
genesisValidatorsRoot, err := stateutil.ValidatorRegistryRoot(preState.Validators())
if err != nil {
return nil, errors.Wrapf(err, "could not hash tree root genesis validators %v", err)
}

View File

@@ -214,10 +214,7 @@ func ProcessSlots(ctx context.Context, state state.BeaconState, slot types.Slot)
return nil, err
}
defer func() {
if err := SkipSlotCache.MarkNotInProgress(key); err != nil {
tracing.AnnotateError(span, err)
log.WithError(err).Error("Failed to mark skip slot no longer in progress")
}
SkipSlotCache.MarkNotInProgress(key)
}()
for state.Slot() < slot {
@@ -225,7 +222,7 @@ func ProcessSlots(ctx context.Context, state state.BeaconState, slot types.Slot)
tracing.AnnotateError(span, ctx.Err())
// Cache last best value.
if highestSlot < state.Slot() {
if err := SkipSlotCache.Put(ctx, key, state); err != nil {
if SkipSlotCache.Put(ctx, key, state); err != nil {
log.WithError(err).Error("Failed to put skip slot cache value")
}
}
@@ -269,10 +266,7 @@ func ProcessSlots(ctx context.Context, state state.BeaconState, slot types.Slot)
}
if highestSlot < state.Slot() {
if err := SkipSlotCache.Put(ctx, key, state); err != nil {
log.WithError(err).Error("Failed to put skip slot cache value")
tracing.AnnotateError(span, err)
}
SkipSlotCache.Put(ctx, key, state)
}
return state, nil

View File

@@ -45,7 +45,7 @@ func ExecuteStateTransitionNoVerifyAnySig(
ctx context.Context,
state state.BeaconState,
signed block.SignedBeaconBlock,
) (*bls.SignatureSet, state.BeaconState, error) {
) (*bls.SignatureBatch, state.BeaconState, error) {
if ctx.Err() != nil {
return nil, nil, ctx.Err()
}
@@ -182,7 +182,7 @@ func ProcessBlockNoVerifyAnySig(
ctx context.Context,
state state.BeaconState,
signed block.SignedBeaconBlock,
) (*bls.SignatureSet, state.BeaconState, error) {
) (*bls.SignatureBatch, state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "core.state.ProcessBlockNoVerifyAnySig")
defer span.End()
if err := helpers.BeaconBlockIsNil(signed); err != nil {
@@ -209,17 +209,17 @@ func ProcessBlockNoVerifyAnySig(
}
}
bSet, err := b.BlockSignatureSet(state, blk.ProposerIndex(), signed.Signature(), blk.HashTreeRoot)
bSet, err := b.BlockSignatureBatch(state, blk.ProposerIndex(), signed.Signature(), blk.HashTreeRoot)
if err != nil {
tracing.AnnotateError(span, err)
return nil, nil, errors.Wrap(err, "could not retrieve block signature set")
}
rSet, err := b.RandaoSignatureSet(ctx, state, signed.Block().Body().RandaoReveal())
rSet, err := b.RandaoSignatureBatch(ctx, state, signed.Block().Body().RandaoReveal())
if err != nil {
tracing.AnnotateError(span, err)
return nil, nil, errors.Wrap(err, "could not retrieve randao signature set")
}
aSet, err := b.AttestationSignatureSet(ctx, state, signed.Block().Body().Attestations())
aSet, err := b.AttestationSignatureBatch(ctx, state, signed.Block().Body().Attestations())
if err != nil {
return nil, nil, errors.Wrap(err, "could not retrieve attestation signature set")
}

View File

@@ -39,14 +39,6 @@ type ReadOnlyDatabase interface {
StateSummary(ctx context.Context, blockRoot [32]byte) (*ethpb.StateSummary, error)
HasStateSummary(ctx context.Context, blockRoot [32]byte) bool
HighestSlotStatesBelow(ctx context.Context, slot types.Slot) ([]state.ReadOnlyBeaconState, error)
// Slashing operations.
ProposerSlashing(ctx context.Context, slashingRoot [32]byte) (*eth.ProposerSlashing, error)
AttesterSlashing(ctx context.Context, slashingRoot [32]byte) (*eth.AttesterSlashing, error)
HasProposerSlashing(ctx context.Context, slashingRoot [32]byte) bool
HasAttesterSlashing(ctx context.Context, slashingRoot [32]byte) bool
// Block operations.
VoluntaryExit(ctx context.Context, exitRoot [32]byte) (*eth.VoluntaryExit, error)
HasVoluntaryExit(ctx context.Context, exitRoot [32]byte) bool
// Checkpoint operations.
JustifiedCheckpoint(ctx context.Context) (*eth.Checkpoint, error)
FinalizedCheckpoint(ctx context.Context) (*eth.Checkpoint, error)
@@ -75,11 +67,6 @@ type NoHeadAccessDatabase interface {
DeleteStates(ctx context.Context, blockRoots [][32]byte) error
SaveStateSummary(ctx context.Context, summary *ethpb.StateSummary) error
SaveStateSummaries(ctx context.Context, summaries []*ethpb.StateSummary) error
// Slashing operations.
SaveProposerSlashing(ctx context.Context, slashing *eth.ProposerSlashing) error
SaveAttesterSlashing(ctx context.Context, slashing *eth.AttesterSlashing) error
// Block operations.
SaveVoluntaryExit(ctx context.Context, exit *eth.VoluntaryExit) error
// Checkpoint operations.
SaveJustifiedCheckpoint(ctx context.Context, checkpoint *eth.Checkpoint) error
SaveFinalizedCheckpoint(ctx context.Context, checkpoint *eth.Checkpoint) error

View File

@@ -18,10 +18,8 @@ go_library(
"migration_archived_index.go",
"migration_block_slot_index.go",
"migration_state_validators.go",
"operations.go",
"powchain.go",
"schema.go",
"slashings.go",
"state.go",
"state_summary.go",
"state_summary_cache.go",
@@ -88,9 +86,7 @@ go_test(
"migration_archived_index_test.go",
"migration_block_slot_index_test.go",
"migration_state_validators_test.go",
"operations_test.go",
"powchain_test.go",
"slashings_test.go",
"state_summary_test.go",
"state_test.go",
"utils_test.go",

View File

@@ -150,11 +150,7 @@ func (s *Store) BlocksBySlot(ctx context.Context, slot types.Slot) (bool, []bloc
err := s.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(blocksBucket)
keys, err := blockRootsBySlot(ctx, tx, slot)
if err != nil {
return err
}
keys := blockRootsBySlot(ctx, tx, slot)
for i := 0; i < len(keys); i++ {
encoded := bkt.Get(keys[i])
blk, err := unmarshalBlock(ctx, encoded)
@@ -174,11 +170,7 @@ func (s *Store) BlockRootsBySlot(ctx context.Context, slot types.Slot) (bool, []
defer span.End()
blockRoots := make([][32]byte, 0)
err := s.db.View(func(tx *bolt.Tx) error {
keys, err := blockRootsBySlot(ctx, tx, slot)
if err != nil {
return err
}
keys := blockRootsBySlot(ctx, tx, slot)
for i := 0; i < len(keys); i++ {
blockRoots = append(blockRoots, bytesutil.ToBytes32(keys[i]))
}
@@ -519,7 +511,7 @@ func blockRootsBySlotRange(
}
// blockRootsBySlot retrieves the block roots by slot
func blockRootsBySlot(ctx context.Context, tx *bolt.Tx, slot types.Slot) ([][]byte, error) {
func blockRootsBySlot(ctx context.Context, tx *bolt.Tx, slot types.Slot) [][]byte {
ctx, span := trace.StartSpan(ctx, "BeaconDB.blockRootsBySlot")
defer span.End()
@@ -533,7 +525,7 @@ func blockRootsBySlot(ctx context.Context, tx *bolt.Tx, slot types.Slot) ([][]by
roots = append(roots, v[i:i+32])
}
}
return roots, nil
return roots
}
// createBlockIndicesFromBlock takes in a beacon block and returns

View File

@@ -1,78 +0,0 @@
package kv
import (
"context"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
bolt "go.etcd.io/bbolt"
"go.opencensus.io/trace"
)
// VoluntaryExit retrieval by signing root.
func (s *Store) VoluntaryExit(ctx context.Context, exitRoot [32]byte) (*ethpb.VoluntaryExit, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.VoluntaryExit")
defer span.End()
enc, err := s.voluntaryExitBytes(ctx, exitRoot)
if err != nil {
return nil, err
}
if len(enc) == 0 {
return nil, nil
}
exit := &ethpb.VoluntaryExit{}
if err := decode(ctx, enc, exit); err != nil {
return nil, err
}
return exit, nil
}
// HasVoluntaryExit verifies if a voluntary exit is stored in the db by its signing root.
func (s *Store) HasVoluntaryExit(ctx context.Context, exitRoot [32]byte) bool {
ctx, span := trace.StartSpan(ctx, "BeaconDB.HasVoluntaryExit")
defer span.End()
enc, err := s.voluntaryExitBytes(ctx, exitRoot)
if err != nil {
panic(err)
}
return len(enc) > 0
}
// SaveVoluntaryExit to the db by its signing root.
func (s *Store) SaveVoluntaryExit(ctx context.Context, exit *ethpb.VoluntaryExit) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveVoluntaryExit")
defer span.End()
exitRoot, err := exit.HashTreeRoot()
if err != nil {
return err
}
enc, err := encode(ctx, exit)
if err != nil {
return err
}
return s.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(voluntaryExitsBucket)
return bucket.Put(exitRoot[:], enc)
})
}
func (s *Store) voluntaryExitBytes(ctx context.Context, exitRoot [32]byte) ([]byte, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.voluntaryExitBytes")
defer span.End()
var dst []byte
err := s.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(voluntaryExitsBucket)
dst = bkt.Get(exitRoot[:])
return nil
})
return dst, err
}
// deleteVoluntaryExit clears a voluntary exit from the db by its signing root.
func (s *Store) deleteVoluntaryExit(ctx context.Context, exitRoot [32]byte) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.deleteVoluntaryExit")
defer span.End()
return s.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(voluntaryExitsBucket)
return bucket.Delete(exitRoot[:])
})
}

View File

@@ -1,31 +0,0 @@
package kv
import (
"context"
"testing"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
"google.golang.org/protobuf/proto"
)
func TestStore_VoluntaryExits_CRUD(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
exit := &ethpb.VoluntaryExit{
Epoch: 5,
}
exitRoot, err := exit.HashTreeRoot()
require.NoError(t, err)
retrieved, err := db.VoluntaryExit(ctx, exitRoot)
require.NoError(t, err)
assert.Equal(t, (*ethpb.VoluntaryExit)(nil), retrieved, "Expected nil voluntary exit")
require.NoError(t, db.SaveVoluntaryExit(ctx, exit))
assert.Equal(t, true, db.HasVoluntaryExit(ctx, exitRoot), "Expected voluntary exit to exist in the db")
retrieved, err = db.VoluntaryExit(ctx, exitRoot)
require.NoError(t, err)
assert.Equal(t, true, proto.Equal(exit, retrieved), "Wanted %v, received %v", exit, retrieved)
require.NoError(t, db.deleteVoluntaryExit(ctx, exitRoot))
assert.Equal(t, false, db.HasVoluntaryExit(ctx, exitRoot), "Expected voluntary exit to have been deleted from the db")
}

View File

@@ -1,147 +0,0 @@
package kv
import (
"context"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
bolt "go.etcd.io/bbolt"
"go.opencensus.io/trace"
)
// ProposerSlashing retrieval by slashing root.
func (s *Store) ProposerSlashing(ctx context.Context, slashingRoot [32]byte) (*ethpb.ProposerSlashing, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.ProposerSlashing")
defer span.End()
enc, err := s.proposerSlashingBytes(ctx, slashingRoot)
if err != nil {
return nil, err
}
if len(enc) == 0 {
return nil, nil
}
proposerSlashing := &ethpb.ProposerSlashing{}
if err := decode(ctx, enc, proposerSlashing); err != nil {
return nil, err
}
return proposerSlashing, nil
}
// HasProposerSlashing verifies if a slashing is stored in the db.
func (s *Store) HasProposerSlashing(ctx context.Context, slashingRoot [32]byte) bool {
ctx, span := trace.StartSpan(ctx, "BeaconDB.HasProposerSlashing")
defer span.End()
enc, err := s.proposerSlashingBytes(ctx, slashingRoot)
if err != nil {
panic(err)
}
return len(enc) > 0
}
// SaveProposerSlashing to the db by its hash tree root.
func (s *Store) SaveProposerSlashing(ctx context.Context, slashing *ethpb.ProposerSlashing) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveProposerSlashing")
defer span.End()
slashingRoot, err := slashing.HashTreeRoot()
if err != nil {
return err
}
enc, err := encode(ctx, slashing)
if err != nil {
return err
}
return s.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(proposerSlashingsBucket)
return bucket.Put(slashingRoot[:], enc)
})
}
func (s *Store) proposerSlashingBytes(ctx context.Context, slashingRoot [32]byte) ([]byte, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.proposerSlashingBytes")
defer span.End()
var dst []byte
err := s.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(proposerSlashingsBucket)
dst = bkt.Get(slashingRoot[:])
return nil
})
return dst, err
}
// deleteProposerSlashing clears a proposer slashing from the db by its hash tree root.
func (s *Store) deleteProposerSlashing(ctx context.Context, slashingRoot [32]byte) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.deleteProposerSlashing")
defer span.End()
return s.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(proposerSlashingsBucket)
return bucket.Delete(slashingRoot[:])
})
}
// AttesterSlashing retrieval by hash tree root.
func (s *Store) AttesterSlashing(ctx context.Context, slashingRoot [32]byte) (*ethpb.AttesterSlashing, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.AttesterSlashing")
defer span.End()
enc, err := s.attesterSlashingBytes(ctx, slashingRoot)
if err != nil {
return nil, err
}
if len(enc) == 0 {
return nil, nil
}
attSlashing := &ethpb.AttesterSlashing{}
if err := decode(ctx, enc, attSlashing); err != nil {
return nil, err
}
return attSlashing, nil
}
// HasAttesterSlashing verifies if a slashing is stored in the db.
func (s *Store) HasAttesterSlashing(ctx context.Context, slashingRoot [32]byte) bool {
ctx, span := trace.StartSpan(ctx, "BeaconDB.HasAttesterSlashing")
defer span.End()
enc, err := s.attesterSlashingBytes(ctx, slashingRoot)
if err != nil {
panic(err)
}
return len(enc) > 0
}
// SaveAttesterSlashing to the db by its hash tree root.
func (s *Store) SaveAttesterSlashing(ctx context.Context, slashing *ethpb.AttesterSlashing) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveAttesterSlashing")
defer span.End()
slashingRoot, err := slashing.HashTreeRoot()
if err != nil {
return err
}
enc, err := encode(ctx, slashing)
if err != nil {
return err
}
return s.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(attesterSlashingsBucket)
return bucket.Put(slashingRoot[:], enc)
})
}
func (s *Store) attesterSlashingBytes(ctx context.Context, slashingRoot [32]byte) ([]byte, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.attesterSlashingBytes")
defer span.End()
var dst []byte
err := s.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(attesterSlashingsBucket)
dst = bkt.Get(slashingRoot[:])
return nil
})
return dst, err
}
// deleteAttesterSlashing clears an attester slashing from the db by its hash tree root.
func (s *Store) deleteAttesterSlashing(ctx context.Context, slashingRoot [32]byte) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.deleteAttesterSlashing")
defer span.End()
return s.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(attesterSlashingsBucket)
return bucket.Delete(slashingRoot[:])
})
}

View File

@@ -1,67 +0,0 @@
package kv
import (
"context"
"testing"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
"google.golang.org/protobuf/proto"
)
func TestStore_ProposerSlashing_CRUD(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
prop := &ethpb.ProposerSlashing{
Header_1: util.HydrateSignedBeaconHeader(&ethpb.SignedBeaconBlockHeader{
Header: &ethpb.BeaconBlockHeader{
ProposerIndex: 5,
},
}),
Header_2: util.HydrateSignedBeaconHeader(&ethpb.SignedBeaconBlockHeader{
Header: &ethpb.BeaconBlockHeader{
ProposerIndex: 5,
},
}),
}
slashingRoot, err := prop.HashTreeRoot()
require.NoError(t, err)
retrieved, err := db.ProposerSlashing(ctx, slashingRoot)
require.NoError(t, err)
assert.Equal(t, (*ethpb.ProposerSlashing)(nil), retrieved, "Expected nil proposer slashing")
require.NoError(t, db.SaveProposerSlashing(ctx, prop))
assert.Equal(t, true, db.HasProposerSlashing(ctx, slashingRoot), "Expected proposer slashing to exist in the db")
retrieved, err = db.ProposerSlashing(ctx, slashingRoot)
require.NoError(t, err)
assert.Equal(t, true, proto.Equal(prop, retrieved), "Wanted %v, received %v", prop, retrieved)
require.NoError(t, db.deleteProposerSlashing(ctx, slashingRoot))
assert.Equal(t, false, db.HasProposerSlashing(ctx, slashingRoot), "Expected proposer slashing to have been deleted from the db")
}
func TestStore_AttesterSlashing_CRUD(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
att := &ethpb.AttesterSlashing{
Attestation_1: util.HydrateIndexedAttestation(&ethpb.IndexedAttestation{
Data: &ethpb.AttestationData{
Slot: 5,
}}),
Attestation_2: util.HydrateIndexedAttestation(&ethpb.IndexedAttestation{
Data: &ethpb.AttestationData{
Slot: 7,
}})}
slashingRoot, err := att.HashTreeRoot()
require.NoError(t, err)
retrieved, err := db.AttesterSlashing(ctx, slashingRoot)
require.NoError(t, err)
assert.Equal(t, (*ethpb.AttesterSlashing)(nil), retrieved, "Expected nil attester slashing")
require.NoError(t, db.SaveAttesterSlashing(ctx, att))
assert.Equal(t, true, db.HasAttesterSlashing(ctx, slashingRoot), "Expected attester slashing to exist in the db")
retrieved, err = db.AttesterSlashing(ctx, slashingRoot)
require.NoError(t, err)
assert.Equal(t, true, proto.Equal(att, retrieved), "Wanted %v, received %v", att, retrieved)
require.NoError(t, db.deleteAttesterSlashing(ctx, slashingRoot))
assert.Equal(t, false, db.HasAttesterSlashing(ctx, slashingRoot), "Expected attester slashing to have been deleted from the db")
}

View File

@@ -14,7 +14,7 @@ import (
// PruneAttestationsAtEpoch deletes all attestations from the slasher DB with target epoch
// less than or equal to the specified epoch.
func (s *Store) PruneAttestationsAtEpoch(
ctx context.Context, maxEpoch types.Epoch,
_ context.Context, maxEpoch types.Epoch,
) (numPruned uint, err error) {
// We can prune everything less than the current epoch - history length.
encodedEndPruneEpoch := fssz.MarshalUint64([]byte{}, uint64(maxEpoch))
@@ -85,7 +85,7 @@ func (s *Store) PruneAttestationsAtEpoch(
// PruneProposalsAtEpoch deletes all proposals from the slasher DB with epoch
// less than or equal to the specified epoch.
func (s *Store) PruneProposalsAtEpoch(
ctx context.Context, maxEpoch types.Epoch,
_ context.Context, maxEpoch types.Epoch,
) (numPruned uint, err error) {
var endPruneSlot types.Slot
endPruneSlot, err = slots.EpochEnd(maxEpoch)

View File

@@ -394,7 +394,7 @@ func (s *Store) SaveBlockProposals(
// HighestAttestations retrieves the last attestation data from the database for all indices.
func (s *Store) HighestAttestations(
ctx context.Context,
_ context.Context,
indices []types.ValidatorIndex,
) ([]*slashpb.HighestAttestation, error) {
if len(indices) == 0 {

View File

@@ -0,0 +1,70 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"metrics.go",
"process_attestation.go",
"process_block.go",
"process_exit.go",
"process_sync_committee.go",
"service.go",
],
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/monitor",
visibility = ["//beacon-chain:__subpackages__"],
deps = [
"//async/event:go_default_library",
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/core/altair:go_default_library",
"//beacon-chain/core/blocks:go_default_library",
"//beacon-chain/core/feed:go_default_library",
"//beacon-chain/core/feed/operation:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//config/params:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/attestation:go_default_library",
"//proto/prysm/v1alpha1/block:go_default_library",
"//runtime/version:go_default_library",
"//time/slots:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"process_attestation_test.go",
"process_block_test.go",
"process_exit_test.go",
"process_sync_committee_test.go",
"service_test.go",
],
embed = [":go_default_library"],
deps = [
"//beacon-chain/blockchain/testing:go_default_library",
"//beacon-chain/core/altair:go_default_library",
"//beacon-chain/core/feed:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/db/testing:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//config/params:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/wrapper:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"//time/slots:go_default_library",
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
],
)

View File

@@ -0,0 +1,7 @@
/*
Package monitor defines a runtime service which receives
notifications triggered by events related to performance of tracked
validating keys. It then logs and emits metrics for a user to keep finely
detailed performance measures.
*/
package monitor

View File

@@ -0,0 +1,93 @@
package monitor
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/sirupsen/logrus"
)
var (
log = logrus.WithField("prefix", "monitor")
// TODO: The Prometheus gauge vectors and counters in this package deprecate the
// corresponding gauge vectors and counters in the validator client.
// inclusionSlotGauge used to track attestation inclusion distance
inclusionSlotGauge = promauto.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: "monitor",
Name: "inclusion_slot",
Help: "Attestations inclusion slot",
},
[]string{
"validator_index",
},
)
// timelyHeadCounter used to track attestation timely head flags
timelyHeadCounter = promauto.NewCounterVec(
prometheus.CounterOpts{
Namespace: "monitor",
Name: "timely_head",
Help: "Attestation timely Head flag",
},
[]string{
"validator_index",
},
)
// timelyTargetCounter used to track attestation timely head flags
timelyTargetCounter = promauto.NewCounterVec(
prometheus.CounterOpts{
Namespace: "monitor",
Name: "timely_target",
Help: "Attestation timely Target flag",
},
[]string{
"validator_index",
},
)
// timelySourceCounter used to track attestation timely head flags
timelySourceCounter = promauto.NewCounterVec(
prometheus.CounterOpts{
Namespace: "monitor",
Name: "timely_source",
Help: "Attestation timely Source flag",
},
[]string{
"validator_index",
},
)
// proposedSlotsCounter used to track proposed blocks
proposedSlotsCounter = promauto.NewCounterVec(
prometheus.CounterOpts{
Namespace: "monitor",
Name: "proposed_slots_total",
Help: "Number of proposed blocks included",
},
[]string{
"validator_index",
},
)
// aggregationCounter used to track aggregations
aggregationCounter = promauto.NewCounterVec(
prometheus.CounterOpts{
Namespace: "monitor",
Name: "aggregations",
Help: "Number of aggregation duties performed",
},
[]string{
"validator_index",
},
)
// syncCommitteeContributionCounter used to track sync committee
// contributions
syncCommitteeContributionCounter = promauto.NewCounterVec(
prometheus.CounterOpts{
Namespace: "monitor",
Name: "sync_committee_contributions_total",
Help: "Number of Sync committee contributions performed",
},
[]string{
"validator_index",
},
)
)

View File

@@ -0,0 +1,234 @@
package monitor
import (
"context"
"fmt"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/attestation"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/prysmaticlabs/prysm/runtime/version"
"github.com/prysmaticlabs/prysm/time/slots"
"github.com/sirupsen/logrus"
)
// updatedPerformanceFromTrackedVal returns true if the validator is tracked and if the
// given slot is different than the last attested slot from this validator.
// It assumes that a read lock is held on the monitor service.
func (s *Service) updatedPerformanceFromTrackedVal(idx types.ValidatorIndex, slot types.Slot) bool {
if !s.trackedIndex(idx) {
return false
}
if lp, ok := s.latestPerformance[idx]; ok {
return lp.attestedSlot != slot
}
return false
}
// attestingIndices returns the indices of validators that appear in the
// given aggregated atestation.
func attestingIndices(ctx context.Context, state state.BeaconState, att *ethpb.Attestation) ([]uint64, error) {
committee, err := helpers.BeaconCommitteeFromState(ctx, state, att.Data.Slot, att.Data.CommitteeIndex)
if err != nil {
return nil, err
}
return attestation.AttestingIndices(att.AggregationBits, committee)
}
// logMessageTimelyFlagsForIndex returns the log message with the basic
// performance indicators for the attestation (head, source, target)
func logMessageTimelyFlagsForIndex(idx types.ValidatorIndex, data *ethpb.AttestationData) logrus.Fields {
return logrus.Fields{
"ValidatorIndex": idx,
"Slot": data.Slot,
"Source": fmt.Sprintf("%#x", bytesutil.Trunc(data.Source.Root)),
"Target": fmt.Sprintf("%#x", bytesutil.Trunc(data.Target.Root)),
"Head": fmt.Sprintf("%#x", bytesutil.Trunc(data.BeaconBlockRoot)),
}
}
// processAttestations logs the event that one of our tracked validators'
// attestations was included in a block
func (s *Service) processAttestations(ctx context.Context, state state.BeaconState, blk block.BeaconBlock) {
if blk == nil || blk.Body() == nil {
return
}
for _, attestation := range blk.Body().Attestations() {
s.processIncludedAttestation(ctx, state, attestation)
}
}
// processIncludedAttestation logs in the event that one of our tracked validators'
// appears in the attesting indices and this included attestation was not included
// before.
func (s *Service) processIncludedAttestation(ctx context.Context, state state.BeaconState, att *ethpb.Attestation) {
attestingIndices, err := attestingIndices(ctx, state, att)
if err != nil {
log.WithError(err).Error("Could not get attesting indices")
return
}
s.Lock()
defer s.Unlock()
for _, idx := range attestingIndices {
if s.updatedPerformanceFromTrackedVal(types.ValidatorIndex(idx), att.Data.Slot) {
logFields := logMessageTimelyFlagsForIndex(types.ValidatorIndex(idx), att.Data)
balance, err := state.BalanceAtIndex(types.ValidatorIndex(idx))
if err != nil {
log.WithError(err).Error("Could not get balance")
return
}
aggregatedPerf := s.aggregatedPerformance[types.ValidatorIndex(idx)]
aggregatedPerf.totalAttestedCount++
aggregatedPerf.totalRequestedCount++
latestPerf := s.latestPerformance[types.ValidatorIndex(idx)]
balanceChg := int64(balance - latestPerf.balance)
latestPerf.balanceChange = balanceChg
latestPerf.balance = balance
latestPerf.attestedSlot = att.Data.Slot
latestPerf.inclusionSlot = state.Slot()
inclusionSlotGauge.WithLabelValues(fmt.Sprintf("%d", idx)).Set(float64(latestPerf.inclusionSlot))
aggregatedPerf.totalDistance += uint64(latestPerf.inclusionSlot - latestPerf.attestedSlot)
if state.Version() == version.Altair {
targetIdx := params.BeaconConfig().TimelyTargetFlagIndex
sourceIdx := params.BeaconConfig().TimelySourceFlagIndex
headIdx := params.BeaconConfig().TimelyHeadFlagIndex
var participation []byte
if slots.ToEpoch(latestPerf.inclusionSlot) ==
slots.ToEpoch(latestPerf.attestedSlot) {
participation, err = state.CurrentEpochParticipation()
if err != nil {
log.WithError(err).Error("Could not get current epoch participation")
return
}
} else {
participation, err = state.PreviousEpochParticipation()
if err != nil {
log.WithError(err).Error("Could not get previous epoch participation")
return
}
}
flags := participation[idx]
hasFlag, err := altair.HasValidatorFlag(flags, sourceIdx)
if err != nil {
log.WithError(err).Error("Could not get timely Source flag")
return
}
latestPerf.timelySource = hasFlag
hasFlag, err = altair.HasValidatorFlag(flags, headIdx)
if err != nil {
log.WithError(err).Error("Could not get timely Head flag")
return
}
latestPerf.timelyHead = hasFlag
hasFlag, err = altair.HasValidatorFlag(flags, targetIdx)
if err != nil {
log.WithError(err).Error("Could not get timely Target flag")
return
}
latestPerf.timelyTarget = hasFlag
if latestPerf.timelySource {
timelySourceCounter.WithLabelValues(fmt.Sprintf("%d", idx)).Inc()
aggregatedPerf.totalCorrectSource++
}
if latestPerf.timelyHead {
timelyHeadCounter.WithLabelValues(fmt.Sprintf("%d", idx)).Inc()
aggregatedPerf.totalCorrectHead++
}
if latestPerf.timelyTarget {
timelyTargetCounter.WithLabelValues(fmt.Sprintf("%d", idx)).Inc()
aggregatedPerf.totalCorrectTarget++
}
}
logFields["CorrectHead"] = latestPerf.timelyHead
logFields["CorrectSource"] = latestPerf.timelySource
logFields["CorrectTarget"] = latestPerf.timelyTarget
logFields["InclusionSlot"] = latestPerf.inclusionSlot
logFields["NewBalance"] = balance
logFields["BalanceChange"] = balanceChg
s.latestPerformance[types.ValidatorIndex(idx)] = latestPerf
s.aggregatedPerformance[types.ValidatorIndex(idx)] = aggregatedPerf
log.WithFields(logFields).Info("Attestation included")
}
}
}
// processUnaggregatedAttestation logs when the beacon node sees an unaggregated attestation from one of our
// tracked validators
func (s *Service) processUnaggregatedAttestation(ctx context.Context, att *ethpb.Attestation) {
s.RLock()
defer s.RUnlock()
root := bytesutil.ToBytes32(att.Data.BeaconBlockRoot)
state := s.config.StateGen.StateByRootIfCachedNoCopy(root)
if state == nil {
log.WithField("BeaconBlockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debug(
"Skipping unaggregated attestation due to state not found in cache")
return
}
attestingIndices, err := attestingIndices(ctx, state, att)
if err != nil {
log.WithError(err).Error("Could not get attesting indices")
return
}
for _, idx := range attestingIndices {
if s.updatedPerformanceFromTrackedVal(types.ValidatorIndex(idx), att.Data.Slot) {
logFields := logMessageTimelyFlagsForIndex(types.ValidatorIndex(idx), att.Data)
log.WithFields(logFields).Info("Processed unaggregated attestation")
}
}
}
// processAggregatedAttestation logs when we see an aggregation from one of our tracked validators or an aggregated
// attestation from one of our tracked validators
func (s *Service) processAggregatedAttestation(ctx context.Context, att *ethpb.AggregateAttestationAndProof) {
s.Lock()
defer s.Unlock()
if s.trackedIndex(att.AggregatorIndex) {
log.WithFields(logrus.Fields{
"AggregatorIndex": att.AggregatorIndex,
"Slot": att.Aggregate.Data.Slot,
"BeaconBlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(
att.Aggregate.Data.BeaconBlockRoot)),
"SourceRoot:": fmt.Sprintf("%#x", bytesutil.Trunc(
att.Aggregate.Data.Source.Root)),
"TargetRoot:": fmt.Sprintf("%#x", bytesutil.Trunc(
att.Aggregate.Data.Target.Root)),
}).Info("Processed attestation aggregation")
aggregatedPerf := s.aggregatedPerformance[att.AggregatorIndex]
aggregatedPerf.totalAggregations++
s.aggregatedPerformance[att.AggregatorIndex] = aggregatedPerf
aggregationCounter.WithLabelValues(fmt.Sprintf("%d", att.AggregatorIndex)).Inc()
}
var root [32]byte
copy(root[:], att.Aggregate.Data.BeaconBlockRoot)
state := s.config.StateGen.StateByRootIfCachedNoCopy(root)
if state == nil {
log.WithField("BeaconBlockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debug(
"Skipping agregated attestation due to state not found in cache")
return
}
attestingIndices, err := attestingIndices(ctx, state, att.Aggregate)
if err != nil {
log.WithError(err).Error("Could not get attesting indices")
return
}
for _, idx := range attestingIndices {
if s.updatedPerformanceFromTrackedVal(types.ValidatorIndex(idx), att.Aggregate.Data.Slot) {
logFields := logMessageTimelyFlagsForIndex(types.ValidatorIndex(idx), att.Aggregate.Data)
log.WithFields(logFields).Info("Processed aggregated attestation")
}
}
}

View File

@@ -0,0 +1,248 @@
package monitor
import (
"bytes"
"context"
"testing"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
"github.com/sirupsen/logrus"
logTest "github.com/sirupsen/logrus/hooks/test"
)
func TestGetAttestingIndices(t *testing.T) {
ctx := context.Background()
beaconState, _ := util.DeterministicGenesisState(t, 256)
att := &ethpb.Attestation{
Data: &ethpb.AttestationData{
Slot: 1,
CommitteeIndex: 0,
},
AggregationBits: bitfield.Bitlist{0b11, 0b1},
}
attestingIndices, err := attestingIndices(ctx, beaconState, att)
require.NoError(t, err)
require.DeepEqual(t, attestingIndices, []uint64{0xc, 0x2})
}
func TestProcessIncludedAttestationTwoTracked(t *testing.T) {
hook := logTest.NewGlobal()
s := setupService(t)
state, _ := util.DeterministicGenesisStateAltair(t, 256)
require.NoError(t, state.SetSlot(2))
require.NoError(t, state.SetCurrentParticipationBits(bytes.Repeat([]byte{0xff}, 13)))
att := &ethpb.Attestation{
Data: &ethpb.AttestationData{
Slot: 1,
CommitteeIndex: 0,
BeaconBlockRoot: bytesutil.PadTo([]byte("hello-world"), 32),
Source: &ethpb.Checkpoint{
Epoch: 0,
Root: bytesutil.PadTo([]byte("hello-world"), 32),
},
Target: &ethpb.Checkpoint{
Epoch: 1,
Root: bytesutil.PadTo([]byte("hello-world"), 32),
},
},
AggregationBits: bitfield.Bitlist{0b11, 0b1},
}
s.processIncludedAttestation(context.Background(), state, att)
wanted1 := "\"Attestation included\" BalanceChange=0 CorrectHead=true CorrectSource=true CorrectTarget=true Head=0x68656c6c6f2d InclusionSlot=2 NewBalance=32000000000 Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=2 prefix=monitor"
wanted2 := "\"Attestation included\" BalanceChange=100000000 CorrectHead=true CorrectSource=true CorrectTarget=true Head=0x68656c6c6f2d InclusionSlot=2 NewBalance=32000000000 Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=12 prefix=monitor"
require.LogsContain(t, hook, wanted1)
require.LogsContain(t, hook, wanted2)
}
func TestProcessUnaggregatedAttestationStateNotCached(t *testing.T) {
logrus.SetLevel(logrus.DebugLevel)
hook := logTest.NewGlobal()
ctx := context.Background()
s := setupService(t)
state, _ := util.DeterministicGenesisStateAltair(t, 256)
require.NoError(t, state.SetSlot(2))
header := state.LatestBlockHeader()
participation := []byte{0xff, 0xff, 0x01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
require.NoError(t, state.SetCurrentParticipationBits(participation))
att := &ethpb.Attestation{
Data: &ethpb.AttestationData{
Slot: 1,
CommitteeIndex: 0,
BeaconBlockRoot: header.GetStateRoot(),
Source: &ethpb.Checkpoint{
Epoch: 0,
Root: bytesutil.PadTo([]byte("hello-world"), 32),
},
Target: &ethpb.Checkpoint{
Epoch: 1,
Root: bytesutil.PadTo([]byte("hello-world"), 32),
},
},
AggregationBits: bitfield.Bitlist{0b11, 0b1},
}
s.processUnaggregatedAttestation(ctx, att)
require.LogsContain(t, hook, "Skipping unaggregated attestation due to state not found in cache")
logrus.SetLevel(logrus.InfoLevel)
}
func TestProcessUnaggregatedAttestationStateCached(t *testing.T) {
ctx := context.Background()
hook := logTest.NewGlobal()
s := setupService(t)
state, _ := util.DeterministicGenesisStateAltair(t, 256)
participation := []byte{0xff, 0xff, 0x01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
require.NoError(t, state.SetCurrentParticipationBits(participation))
root := [32]byte{}
copy(root[:], "hello-world")
att := &ethpb.Attestation{
Data: &ethpb.AttestationData{
Slot: 1,
CommitteeIndex: 0,
BeaconBlockRoot: root[:],
Source: &ethpb.Checkpoint{
Epoch: 0,
Root: root[:],
},
Target: &ethpb.Checkpoint{
Epoch: 1,
Root: root[:],
},
},
AggregationBits: bitfield.Bitlist{0b11, 0b1},
}
require.NoError(t, s.config.StateGen.SaveState(ctx, root, state))
s.processUnaggregatedAttestation(context.Background(), att)
wanted1 := "\"Processed unaggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=2 prefix=monitor"
wanted2 := "\"Processed unaggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=12 prefix=monitor"
require.LogsContain(t, hook, wanted1)
require.LogsContain(t, hook, wanted2)
}
func TestProcessAggregatedAttestationStateNotCached(t *testing.T) {
logrus.SetLevel(logrus.DebugLevel)
hook := logTest.NewGlobal()
ctx := context.Background()
s := setupService(t)
state, _ := util.DeterministicGenesisStateAltair(t, 256)
require.NoError(t, state.SetSlot(2))
header := state.LatestBlockHeader()
participation := []byte{0xff, 0xff, 0x01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
require.NoError(t, state.SetCurrentParticipationBits(participation))
att := &ethpb.AggregateAttestationAndProof{
AggregatorIndex: 2,
Aggregate: &ethpb.Attestation{
Data: &ethpb.AttestationData{
Slot: 1,
CommitteeIndex: 0,
BeaconBlockRoot: header.GetStateRoot(),
Source: &ethpb.Checkpoint{
Epoch: 0,
Root: bytesutil.PadTo([]byte("hello-world"), 32),
},
Target: &ethpb.Checkpoint{
Epoch: 1,
Root: bytesutil.PadTo([]byte("hello-world"), 32),
},
},
AggregationBits: bitfield.Bitlist{0b11, 0b1},
},
}
s.processAggregatedAttestation(ctx, att)
require.LogsContain(t, hook, "\"Processed attestation aggregation\" AggregatorIndex=2 BeaconBlockRoot=0x000000000000 Slot=1 SourceRoot:=0x68656c6c6f2d TargetRoot:=0x68656c6c6f2d prefix=monitor")
require.LogsContain(t, hook, "Skipping agregated attestation due to state not found in cache")
logrus.SetLevel(logrus.InfoLevel)
}
func TestProcessAggregatedAttestationStateCached(t *testing.T) {
hook := logTest.NewGlobal()
ctx := context.Background()
s := setupService(t)
state, _ := util.DeterministicGenesisStateAltair(t, 256)
participation := []byte{0xff, 0xff, 0x01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
require.NoError(t, state.SetCurrentParticipationBits(participation))
root := [32]byte{}
copy(root[:], "hello-world")
att := &ethpb.AggregateAttestationAndProof{
AggregatorIndex: 2,
Aggregate: &ethpb.Attestation{
Data: &ethpb.AttestationData{
Slot: 1,
CommitteeIndex: 0,
BeaconBlockRoot: root[:],
Source: &ethpb.Checkpoint{
Epoch: 0,
Root: root[:],
},
Target: &ethpb.Checkpoint{
Epoch: 1,
Root: root[:],
},
},
AggregationBits: bitfield.Bitlist{0b10, 0b1},
},
}
require.NoError(t, s.config.StateGen.SaveState(ctx, root, state))
s.processAggregatedAttestation(ctx, att)
require.LogsContain(t, hook, "\"Processed attestation aggregation\" AggregatorIndex=2 BeaconBlockRoot=0x68656c6c6f2d Slot=1 SourceRoot:=0x68656c6c6f2d TargetRoot:=0x68656c6c6f2d prefix=monitor")
require.LogsContain(t, hook, "\"Processed aggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=2 prefix=monitor")
require.LogsDoNotContain(t, hook, "\"Processed aggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=12 prefix=monitor")
}
func TestProcessAttestations(t *testing.T) {
hook := logTest.NewGlobal()
s := setupService(t)
ctx := context.Background()
state, _ := util.DeterministicGenesisStateAltair(t, 256)
require.NoError(t, state.SetSlot(2))
require.NoError(t, state.SetCurrentParticipationBits(bytes.Repeat([]byte{0xff}, 13)))
att := &ethpb.Attestation{
Data: &ethpb.AttestationData{
Slot: 1,
CommitteeIndex: 0,
BeaconBlockRoot: bytesutil.PadTo([]byte("hello-world"), 32),
Source: &ethpb.Checkpoint{
Epoch: 0,
Root: bytesutil.PadTo([]byte("hello-world"), 32),
},
Target: &ethpb.Checkpoint{
Epoch: 1,
Root: bytesutil.PadTo([]byte("hello-world"), 32),
},
},
AggregationBits: bitfield.Bitlist{0b11, 0b1},
}
block := &ethpb.BeaconBlockAltair{
Slot: 2,
Body: &ethpb.BeaconBlockBodyAltair{
Attestations: []*ethpb.Attestation{att},
},
}
wrappedBlock, err := wrapper.WrappedAltairBeaconBlock(block)
require.NoError(t, err)
s.processAttestations(ctx, state, wrappedBlock)
wanted1 := "\"Attestation included\" BalanceChange=0 CorrectHead=true CorrectSource=true CorrectTarget=true Head=0x68656c6c6f2d InclusionSlot=2 NewBalance=32000000000 Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=2 prefix=monitor"
wanted2 := "\"Attestation included\" BalanceChange=100000000 CorrectHead=true CorrectSource=true CorrectTarget=true Head=0x68656c6c6f2d InclusionSlot=2 NewBalance=32000000000 Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=12 prefix=monitor"
require.LogsContain(t, hook, wanted1)
require.LogsContain(t, hook, wanted2)
}

View File

@@ -0,0 +1,177 @@
package monitor
import (
"context"
"fmt"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/prysmaticlabs/prysm/time/slots"
"github.com/sirupsen/logrus"
)
// Number of epochs between aggregate reports
const AggregateReportingPeriod = 5
// processBlock handles the cases when
// 1) A block was proposed by one of our tracked validators
// 2) An attestation by one of our tracked validators was included
// 3) An Exit by one of our validators was included
// 4) A Slashing by one of our tracked validators was included
// 5) A Sync Committe Contribution by one of our tracked validators was included
func (s *Service) processBlock(ctx context.Context, b block.SignedBeaconBlock) {
if b == nil || b.Block() == nil {
return
}
blk := b.Block()
s.processSlashings(blk)
s.processExitsFromBlock(blk)
root, err := blk.HashTreeRoot()
if err != nil {
log.WithError(err).Error("Could not compute block's hash tree root")
return
}
state := s.config.StateGen.StateByRootIfCachedNoCopy(root)
if state == nil {
log.WithField("BeaconBlockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debug(
"Skipping block collection due to state not found in cache")
return
}
currEpoch := slots.ToEpoch(blk.Slot())
s.RLock()
lastSyncedEpoch := s.lastSyncedEpoch
s.RUnlock()
if currEpoch != lastSyncedEpoch &&
slots.SyncCommitteePeriod(currEpoch) == slots.SyncCommitteePeriod(lastSyncedEpoch) {
s.updateSyncCommitteeTrackedVals(state)
}
s.processSyncAggregate(state, blk)
s.processProposedBlock(state, root, blk)
s.processAttestations(ctx, state, blk)
if blk.Slot()%(AggregateReportingPeriod*params.BeaconConfig().SlotsPerEpoch) == 0 {
s.logAggregatedPerformance()
}
}
// processProposedBlock logs the event that one of our tracked validators proposed a block that was included
func (s *Service) processProposedBlock(state state.BeaconState, root [32]byte, blk block.BeaconBlock) {
s.Lock()
defer s.Unlock()
if s.trackedIndex(blk.ProposerIndex()) {
// update metrics
proposedSlotsCounter.WithLabelValues(fmt.Sprintf("%d", blk.ProposerIndex())).Inc()
// update the performance map
balance, err := state.BalanceAtIndex(blk.ProposerIndex())
if err != nil {
log.WithError(err).Error("Could not get balance")
return
}
latestPerf := s.latestPerformance[blk.ProposerIndex()]
balanceChg := int64(balance - latestPerf.balance)
latestPerf.balanceChange = balanceChg
latestPerf.balance = balance
s.latestPerformance[blk.ProposerIndex()] = latestPerf
aggPerf := s.aggregatedPerformance[blk.ProposerIndex()]
aggPerf.totalProposedCount++
s.aggregatedPerformance[blk.ProposerIndex()] = aggPerf
log.WithFields(logrus.Fields{
"ProposerIndex": blk.ProposerIndex(),
"Slot": blk.Slot(),
"Version": blk.Version(),
"ParentRoot": fmt.Sprintf("%#x", bytesutil.Trunc(blk.ParentRoot())),
"BlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(root[:])),
"NewBalance": balance,
"BalanceChange": balanceChg,
}).Info("Proposed block was included")
}
}
// processSlashings logs the event of one of our tracked validators was slashed
func (s *Service) processSlashings(blk block.BeaconBlock) {
s.RLock()
defer s.RUnlock()
for _, slashing := range blk.Body().ProposerSlashings() {
idx := slashing.Header_1.Header.ProposerIndex
if s.trackedIndex(idx) {
log.WithFields(logrus.Fields{
"ProposerIndex": idx,
"Slot:": blk.Slot(),
"SlashingSlot": slashing.Header_1.Header.Slot,
"Root1": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Header_1.Header.BodyRoot)),
"Root2": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Header_2.Header.BodyRoot)),
}).Info("Proposer slashing was included")
}
}
for _, slashing := range blk.Body().AttesterSlashings() {
for _, idx := range blocks.SlashableAttesterIndices(slashing) {
if s.trackedIndex(types.ValidatorIndex(idx)) {
log.WithFields(logrus.Fields{
"AttesterIndex": idx,
"Slot:": blk.Slot(),
"Slot1": slashing.Attestation_1.Data.Slot,
"Root1": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Attestation_1.Data.BeaconBlockRoot)),
"SourceEpoch1": slashing.Attestation_1.Data.Source.Epoch,
"TargetEpoch1": slashing.Attestation_1.Data.Target.Epoch,
"Slot2": slashing.Attestation_2.Data.Slot,
"Root2": fmt.Sprintf("%#x", bytesutil.Trunc(slashing.Attestation_2.Data.BeaconBlockRoot)),
"SourceEpoch2": slashing.Attestation_2.Data.Source.Epoch,
"TargetEpoch2": slashing.Attestation_2.Data.Target.Epoch,
}).Info("Attester slashing was included")
}
}
}
}
// logAggregatedPerformance logs the performance statistics collected since the run started
func (s *Service) logAggregatedPerformance() {
s.RLock()
defer s.RUnlock()
for idx, p := range s.aggregatedPerformance {
if p.totalAttestedCount == 0 || p.totalRequestedCount == 0 || p.startBalance == 0 {
break
}
l, ok := s.latestPerformance[idx]
if !ok {
break
}
percentAtt := float64(p.totalAttestedCount) / float64(p.totalRequestedCount)
percentBal := float64(l.balance-p.startBalance) / float64(p.startBalance)
percentDistance := float64(p.totalDistance) / float64(p.totalAttestedCount)
percentCorrectSource := float64(p.totalCorrectSource) / float64(p.totalAttestedCount)
percentCorrectHead := float64(p.totalCorrectHead) / float64(p.totalAttestedCount)
percentCorrectTarget := float64(p.totalCorrectTarget) / float64(p.totalAttestedCount)
log.WithFields(logrus.Fields{
"ValidatorIndex": idx,
"StartEpoch": p.startEpoch,
"StartBalance": p.startBalance,
"TotalRequested": p.totalRequestedCount,
"AttestationInclusion": fmt.Sprintf("%.2f%%", percentAtt*100),
"BalanceChangePct": fmt.Sprintf("%.2f%%", percentBal*100),
"CorrectlyVotedSourcePct": fmt.Sprintf("%.2f%%", percentCorrectSource*100),
"CorrectlyVotedTargetPct": fmt.Sprintf("%.2f%%", percentCorrectTarget*100),
"CorrectlyVotedHeadPct": fmt.Sprintf("%.2f%%", percentCorrectHead*100),
"AverageInclusionDistance": fmt.Sprintf("%.1f", percentDistance),
"TotalProposedBlocks": p.totalProposedCount,
"TotalAggregations": p.totalAggregations,
"TotalSyncContributions": p.totalSyncComitteeContributions,
}).Info("Aggregated performance since launch")
}
}

View File

@@ -0,0 +1,246 @@
package monitor
import (
"context"
"fmt"
"testing"
"time"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
logTest "github.com/sirupsen/logrus/hooks/test"
)
func TestProcessSlashings(t *testing.T) {
tests := []struct {
name string
block *ethpb.BeaconBlock
wantedErr string
}{
{
name: "Proposer slashing a tracked index",
block: &ethpb.BeaconBlock{
Body: &ethpb.BeaconBlockBody{
ProposerSlashings: []*ethpb.ProposerSlashing{
{
Header_1: &ethpb.SignedBeaconBlockHeader{
Header: &ethpb.BeaconBlockHeader{
ProposerIndex: 2,
Slot: params.BeaconConfig().SlotsPerEpoch + 1,
},
},
Header_2: &ethpb.SignedBeaconBlockHeader{
Header: &ethpb.BeaconBlockHeader{
ProposerIndex: 2,
Slot: 0,
},
},
},
},
},
},
wantedErr: "\"Proposer slashing was included\" ProposerIndex=2",
},
{
name: "Proposer slashing an untracked index",
block: &ethpb.BeaconBlock{
Body: &ethpb.BeaconBlockBody{
ProposerSlashings: []*ethpb.ProposerSlashing{
{
Header_1: &ethpb.SignedBeaconBlockHeader{
Header: &ethpb.BeaconBlockHeader{
ProposerIndex: 3,
Slot: params.BeaconConfig().SlotsPerEpoch + 4,
},
},
Header_2: &ethpb.SignedBeaconBlockHeader{
Header: &ethpb.BeaconBlockHeader{
ProposerIndex: 3,
Slot: 0,
},
},
},
},
},
},
wantedErr: "",
},
{
name: "Attester slashing a tracked index",
block: &ethpb.BeaconBlock{
Body: &ethpb.BeaconBlockBody{
AttesterSlashings: []*ethpb.AttesterSlashing{
{
Attestation_1: util.HydrateIndexedAttestation(&ethpb.IndexedAttestation{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 1},
},
AttestingIndices: []uint64{1, 3, 4},
}),
Attestation_2: util.HydrateIndexedAttestation(&ethpb.IndexedAttestation{
AttestingIndices: []uint64{1, 5, 6},
}),
},
},
},
},
wantedErr: "\"Attester slashing was included\" AttesterIndex=1",
},
{
name: "Attester slashing untracked index",
block: &ethpb.BeaconBlock{
Body: &ethpb.BeaconBlockBody{
AttesterSlashings: []*ethpb.AttesterSlashing{
{
Attestation_1: util.HydrateIndexedAttestation(&ethpb.IndexedAttestation{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 1},
},
AttestingIndices: []uint64{1, 3, 4},
}),
Attestation_2: util.HydrateIndexedAttestation(&ethpb.IndexedAttestation{
AttestingIndices: []uint64{3, 5, 6},
}),
},
},
},
},
wantedErr: "",
}}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
hook := logTest.NewGlobal()
s := &Service{
TrackedValidators: map[types.ValidatorIndex]bool{
1: true,
2: true,
},
}
s.processSlashings(wrapper.WrappedPhase0BeaconBlock(tt.block))
if tt.wantedErr != "" {
require.LogsContain(t, hook, tt.wantedErr)
} else {
require.LogsDoNotContain(t, hook, "slashing")
}
})
}
}
func TestProcessProposedBlock(t *testing.T) {
tests := []struct {
name string
block *ethpb.BeaconBlock
wantedErr string
}{
{
name: "Block proposed by tracked validator",
block: &ethpb.BeaconBlock{
Slot: 6,
ProposerIndex: 12,
ParentRoot: bytesutil.PadTo([]byte("hello-world"), 32),
StateRoot: bytesutil.PadTo([]byte("state-world"), 32),
},
wantedErr: "\"Proposed block was included\" BalanceChange=100000000 BlockRoot=0x68656c6c6f2d NewBalance=32000000000 ParentRoot=0x68656c6c6f2d ProposerIndex=12 Slot=6 Version=0 prefix=monitor",
},
{
name: "Block proposed by untracked validator",
block: &ethpb.BeaconBlock{
Slot: 6,
ProposerIndex: 13,
ParentRoot: bytesutil.PadTo([]byte("hello-world"), 32),
StateRoot: bytesutil.PadTo([]byte("state-world"), 32),
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
hook := logTest.NewGlobal()
s := setupService(t)
beaconState, _ := util.DeterministicGenesisState(t, 256)
root := [32]byte{}
copy(root[:], "hello-world")
s.processProposedBlock(beaconState, root, wrapper.WrappedPhase0BeaconBlock(tt.block))
if tt.wantedErr != "" {
require.LogsContain(t, hook, tt.wantedErr)
} else {
require.LogsDoNotContain(t, hook, "included")
}
})
}
}
func TestProcessBlock_AllEventsTrackedVals(t *testing.T) {
hook := logTest.NewGlobal()
ctx := context.Background()
genesis, keys := util.DeterministicGenesisStateAltair(t, 64)
c, err := altair.NextSyncCommittee(ctx, genesis)
require.NoError(t, err)
require.NoError(t, genesis.SetCurrentSyncCommittee(c))
genConfig := util.DefaultBlockGenConfig()
genConfig.NumProposerSlashings = 1
b, err := util.GenerateFullBlockAltair(genesis, keys, genConfig, 1)
require.NoError(t, err)
s := setupService(t)
pubKeys := make([][]byte, 3)
pubKeys[0] = genesis.Validators()[0].PublicKey
pubKeys[1] = genesis.Validators()[1].PublicKey
pubKeys[2] = genesis.Validators()[2].PublicKey
currentSyncCommittee := util.ConvertToCommittee([][]byte{
pubKeys[0], pubKeys[1], pubKeys[2], pubKeys[1], pubKeys[1],
})
require.NoError(t, genesis.SetCurrentSyncCommittee(currentSyncCommittee))
idx := b.Block.Body.ProposerSlashings[0].Header_1.Header.ProposerIndex
s.RLock()
if !s.trackedIndex(idx) {
s.TrackedValidators[idx] = true
s.latestPerformance[idx] = ValidatorLatestPerformance{
balance: 31900000000,
}
s.aggregatedPerformance[idx] = ValidatorAggregatedPerformance{}
}
s.RUnlock()
s.updateSyncCommitteeTrackedVals(genesis)
root, err := b.GetBlock().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, s.config.StateGen.SaveState(ctx, root, genesis))
wanted1 := fmt.Sprintf("\"Proposed block was included\" BalanceChange=100000000 BlockRoot=%#x NewBalance=32000000000 ParentRoot=0xf732eaeb7fae ProposerIndex=15 Slot=1 Version=1 prefix=monitor", bytesutil.Trunc(root[:]))
wanted2 := fmt.Sprintf("\"Proposer slashing was included\" ProposerIndex=%d Root1=0x000100000000 Root2=0x000200000000 SlashingSlot=0 Slot:=1 prefix=monitor", idx)
wanted3 := "\"Sync committee contribution included\" BalanceChange=0 Contributions=3 ExpectedContrib=3 NewBalance=32000000000 ValidatorIndex=1 prefix=monitor"
wanted4 := "\"Sync committee contribution included\" BalanceChange=0 Contributions=1 ExpectedContrib=1 NewBalance=32000000000 ValidatorIndex=2 prefix=monitor"
wrapped, err := wrapper.WrappedAltairSignedBeaconBlock(b)
require.NoError(t, err)
s.processBlock(ctx, wrapped)
require.LogsContain(t, hook, wanted1)
require.LogsContain(t, hook, wanted2)
require.LogsContain(t, hook, wanted3)
require.LogsContain(t, hook, wanted4)
}
func TestLogAggregatedPerformance(t *testing.T) {
hook := logTest.NewGlobal()
s := setupService(t)
s.logAggregatedPerformance()
time.Sleep(3000 * time.Millisecond)
wanted := "\"Aggregated performance since launch\" AttestationInclusion=\"80.00%\"" +
" AverageInclusionDistance=1.2 BalanceChangePct=\"0.95%\" CorrectlyVotedHeadPct=\"66.67%\" " +
"CorrectlyVotedSourcePct=\"91.67%\" CorrectlyVotedTargetPct=\"100.00%\" StartBalance=31700000000 " +
"StartEpoch=0 TotalAggregations=0 TotalProposedBlocks=1 TotalRequested=15 TotalSyncContributions=0 " +
"ValidatorIndex=1 prefix=monitor"
require.LogsContain(t, hook, wanted)
}

View File

@@ -0,0 +1,35 @@
package monitor
import (
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/sirupsen/logrus"
)
// processExitsFromBlock logs the event of one of our tracked validators' exit was
// included in a block
func (s *Service) processExitsFromBlock(blk block.BeaconBlock) {
s.RLock()
defer s.RUnlock()
for _, exit := range blk.Body().VoluntaryExits() {
idx := exit.Exit.ValidatorIndex
if s.trackedIndex(idx) {
log.WithFields(logrus.Fields{
"ValidatorIndex": idx,
"Slot": blk.Slot(),
}).Info("Voluntary exit was included")
}
}
}
// processExit logs the event of one of our tracked validators' exit was processed
func (s *Service) processExit(exit *ethpb.SignedVoluntaryExit) {
idx := exit.Exit.ValidatorIndex
s.RLock()
defer s.RUnlock()
if s.trackedIndex(idx) {
log.WithFields(logrus.Fields{
"ValidatorIndex": idx,
}).Info("Voluntary exit was processed")
}
}

View File

@@ -0,0 +1,118 @@
package monitor
import (
"testing"
types "github.com/prysmaticlabs/eth2-types"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/testing/require"
logTest "github.com/sirupsen/logrus/hooks/test"
)
func TestProcessExitsFromBlockTrackedIndices(t *testing.T) {
hook := logTest.NewGlobal()
s := &Service{
TrackedValidators: map[types.ValidatorIndex]bool{
1: true,
2: true,
},
}
exits := []*ethpb.SignedVoluntaryExit{
{
Exit: &ethpb.VoluntaryExit{
ValidatorIndex: 3,
Epoch: 1,
},
},
{
Exit: &ethpb.VoluntaryExit{
ValidatorIndex: 2,
Epoch: 0,
},
},
}
block := &ethpb.BeaconBlock{
Body: &ethpb.BeaconBlockBody{
VoluntaryExits: exits,
},
}
s.processExitsFromBlock(wrapper.WrappedPhase0BeaconBlock(block))
require.LogsContain(t, hook, "\"Voluntary exit was included\" Slot=0 ValidatorIndex=2")
}
func TestProcessExitsFromBlockUntrackedIndices(t *testing.T) {
hook := logTest.NewGlobal()
s := &Service{
TrackedValidators: map[types.ValidatorIndex]bool{
1: true,
2: true,
},
}
exits := []*ethpb.SignedVoluntaryExit{
{
Exit: &ethpb.VoluntaryExit{
ValidatorIndex: 3,
Epoch: 1,
},
},
{
Exit: &ethpb.VoluntaryExit{
ValidatorIndex: 4,
Epoch: 0,
},
},
}
block := &ethpb.BeaconBlock{
Body: &ethpb.BeaconBlockBody{
VoluntaryExits: exits,
},
}
s.processExitsFromBlock(wrapper.WrappedPhase0BeaconBlock(block))
require.LogsDoNotContain(t, hook, "\"Voluntary exit was included\"")
}
func TestProcessExitP2PTrackedIndices(t *testing.T) {
hook := logTest.NewGlobal()
s := &Service{
TrackedValidators: map[types.ValidatorIndex]bool{
1: true,
2: true,
},
}
exit := &ethpb.SignedVoluntaryExit{
Exit: &ethpb.VoluntaryExit{
ValidatorIndex: 1,
Epoch: 1,
},
Signature: make([]byte, 96),
}
s.processExit(exit)
require.LogsContain(t, hook, "\"Voluntary exit was processed\" ValidatorIndex=1")
}
func TestProcessExitP2PUntrackedIndices(t *testing.T) {
hook := logTest.NewGlobal()
s := &Service{
TrackedValidators: map[types.ValidatorIndex]bool{
1: true,
2: true,
},
}
exit := &ethpb.SignedVoluntaryExit{
Exit: &ethpb.VoluntaryExit{
ValidatorIndex: 3,
Epoch: 1,
},
}
s.processExit(exit)
require.LogsDoNotContain(t, hook, "\"Voluntary exit was processed\"")
}

View File

@@ -0,0 +1,79 @@
package monitor
import (
"fmt"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/sirupsen/logrus"
)
// processSyncCommitteeContribution logs the event that one of our tracked
// validators' aggregated sync contribution has been processed.
// TODO: We do not log if a sync contribution was included in an aggregate (we
// log them when they are included in blocks)
func (s *Service) processSyncCommitteeContribution(contribution *ethpb.SignedContributionAndProof) {
idx := contribution.Message.AggregatorIndex
s.Lock()
defer s.Unlock()
if s.trackedIndex(idx) {
aggPerf := s.aggregatedPerformance[idx]
aggPerf.totalSyncComitteeAggregations++
s.aggregatedPerformance[idx] = aggPerf
log.WithField("ValidatorIndex", contribution.Message.AggregatorIndex).Info("Sync committee aggregation processed")
}
}
// processSyncAggregate logs the event that one of our tracked validators is a sync-committee member and its
// contribution was included
func (s *Service) processSyncAggregate(state state.BeaconState, blk block.BeaconBlock) {
if blk == nil || blk.Body() == nil {
return
}
bits, err := blk.Body().SyncAggregate()
if err != nil {
log.WithError(err).Error("Cannot get SyncAggregate")
return
}
s.Lock()
defer s.Unlock()
for validatorIdx, committeeIndices := range s.trackedSyncCommitteeIndices {
if len(committeeIndices) > 0 {
contrib := 0
for _, idx := range committeeIndices {
if bits.SyncCommitteeBits.BitAt(uint64(idx)) {
contrib++
}
}
balance, err := state.BalanceAtIndex(validatorIdx)
if err != nil {
log.Error("Could not get balance")
return
}
latestPerf := s.latestPerformance[validatorIdx]
balanceChg := int64(balance - latestPerf.balance)
latestPerf.balanceChange = balanceChg
latestPerf.balance = balance
s.latestPerformance[validatorIdx] = latestPerf
aggPerf := s.aggregatedPerformance[validatorIdx]
aggPerf.totalSyncComitteeContributions += uint64(contrib)
s.aggregatedPerformance[validatorIdx] = aggPerf
syncCommitteeContributionCounter.WithLabelValues(
fmt.Sprintf("%d", validatorIdx)).Add(float64(contrib))
log.WithFields(logrus.Fields{
"ValidatorIndex": validatorIdx,
"ExpectedContrib": len(committeeIndices),
"Contributions": contrib,
"NewBalance": balance,
"BalanceChange": balanceChg,
}).Info("Sync committee contribution included")
}
}
}

View File

@@ -0,0 +1,59 @@
package monitor
import (
"testing"
"github.com/prysmaticlabs/go-bitfield"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
logTest "github.com/sirupsen/logrus/hooks/test"
)
func TestProcessSyncCommitteeContribution(t *testing.T) {
hook := logTest.NewGlobal()
s := setupService(t)
contrib := &ethpb.SignedContributionAndProof{
Message: &ethpb.ContributionAndProof{
AggregatorIndex: 1,
},
}
s.processSyncCommitteeContribution(contrib)
require.LogsContain(t, hook, "\"Sync committee aggregation processed\" ValidatorIndex=1")
require.LogsDoNotContain(t, hook, "ValidatorIndex=2")
}
func TestProcessSyncAggregate(t *testing.T) {
hook := logTest.NewGlobal()
s := setupService(t)
beaconState, _ := util.DeterministicGenesisStateAltair(t, 256)
block := &ethpb.BeaconBlockAltair{
Slot: 2,
Body: &ethpb.BeaconBlockBodyAltair{
SyncAggregate: &ethpb.SyncAggregate{
SyncCommitteeBits: bitfield.Bitvector512{
0x31, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
},
},
},
}
wrappedBlock, err := wrapper.WrappedAltairBeaconBlock(block)
require.NoError(t, err)
s.processSyncAggregate(beaconState, wrappedBlock)
require.LogsContain(t, hook, "\"Sync committee contribution included\" BalanceChange=0 Contributions=1 ExpectedContrib=4 NewBalance=32000000000 ValidatorIndex=1 prefix=monitor")
require.LogsContain(t, hook, "\"Sync committee contribution included\" BalanceChange=100000000 Contributions=2 ExpectedContrib=2 NewBalance=32000000000 ValidatorIndex=12 prefix=monitor")
require.LogsDoNotContain(t, hook, "ValidatorIndex=2")
}

View File

@@ -0,0 +1,311 @@
package monitor
import (
"context"
"errors"
"sort"
"sync"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/async/event"
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed/operation"
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/time/slots"
"github.com/sirupsen/logrus"
)
var (
// Error when event feed data is not statefeed.SyncedData.
errNotSyncedData = errors.New("event feed data is not of type *statefeed.SyncedData")
// Error when the context is closed while waiting for sync.
errContextClosedWhileWaiting = errors.New("context closed while waiting for beacon to sync to latest Head")
)
// ValidatorLatestPerformance keeps track of the latest participation of the validator
type ValidatorLatestPerformance struct {
attestedSlot types.Slot
inclusionSlot types.Slot
timelySource bool
timelyTarget bool
timelyHead bool
balance uint64
balanceChange int64
}
// ValidatorAggregatedPerformance keeps track of the accumulated performance of
// the validator since launch
type ValidatorAggregatedPerformance struct {
startEpoch types.Epoch
startBalance uint64
totalAttestedCount uint64
totalRequestedCount uint64
totalDistance uint64
totalCorrectSource uint64
totalCorrectTarget uint64
totalCorrectHead uint64
totalProposedCount uint64
totalAggregations uint64
totalSyncComitteeContributions uint64
totalSyncComitteeAggregations uint64
}
// ValidatorMonitorConfig contains the list of validator indices that the
// monitor service tracks, as well as the event feed notifier that the
// monitor needs to subscribe.
type ValidatorMonitorConfig struct {
StateNotifier statefeed.Notifier
AttestationNotifier operation.Notifier
HeadFetcher blockchain.HeadFetcher
StateGen stategen.StateManager
}
// Service is the main structure that tracks validators and reports logs and
// metrics of their performances throughout their lifetime.
type Service struct {
config *ValidatorMonitorConfig
ctx context.Context
cancel context.CancelFunc
isLogging bool
// Locks access to TrackedValidators, latestPerformance, aggregatedPerformance,
// trackedSyncedCommitteeIndices and lastSyncedEpoch
sync.RWMutex
TrackedValidators map[types.ValidatorIndex]bool
latestPerformance map[types.ValidatorIndex]ValidatorLatestPerformance
aggregatedPerformance map[types.ValidatorIndex]ValidatorAggregatedPerformance
trackedSyncCommitteeIndices map[types.ValidatorIndex][]types.CommitteeIndex
lastSyncedEpoch types.Epoch
}
// NewService sets up a new validator monitor instance when given a list of validator indices to track.
func NewService(ctx context.Context, config *ValidatorMonitorConfig, tracked []types.ValidatorIndex) (*Service, error) {
ctx, cancel := context.WithCancel(ctx)
r := &Service{
config: config,
ctx: ctx,
cancel: cancel,
TrackedValidators: make(map[types.ValidatorIndex]bool, len(tracked)),
latestPerformance: make(map[types.ValidatorIndex]ValidatorLatestPerformance),
aggregatedPerformance: make(map[types.ValidatorIndex]ValidatorAggregatedPerformance),
trackedSyncCommitteeIndices: make(map[types.ValidatorIndex][]types.CommitteeIndex),
}
for _, idx := range tracked {
r.TrackedValidators[idx] = true
}
return r, nil
}
// Start sets up the TrackedValidators map and then calls to wait until the beacon is synced.
func (s *Service) Start() {
s.Lock()
defer s.Unlock()
tracked := make([]types.ValidatorIndex, 0, len(s.TrackedValidators))
for idx := range s.TrackedValidators {
tracked = append(tracked, idx)
}
sort.Slice(tracked, func(i, j int) bool { return tracked[i] < tracked[j] })
log.WithFields(logrus.Fields{
"ValidatorIndices": tracked,
}).Info("Starting service")
s.isLogging = false
stateChannel := make(chan *feed.Event, 1)
stateSub := s.config.StateNotifier.StateFeed().Subscribe(stateChannel)
go s.run(stateChannel, stateSub)
}
// run waits until the beacon is synced and starts the monitoring system.
func (s *Service) run(stateChannel chan *feed.Event, stateSub event.Subscription) {
if stateChannel == nil {
log.Error("State state is nil")
return
}
if err := s.waitForSync(stateChannel, stateSub); err != nil {
log.WithError(err)
return
}
state, err := s.config.HeadFetcher.HeadState(s.ctx)
if err != nil {
log.WithError(err).Error("Could not get head state")
return
}
if state == nil {
log.Error("Head state is nil")
return
}
epoch := slots.ToEpoch(state.Slot())
log.WithField("Epoch", epoch).Info("Synced to head epoch, starting reporting performance")
s.Lock()
s.initializePerformanceStructures(state, epoch)
s.Unlock()
s.updateSyncCommitteeTrackedVals(state)
s.Lock()
s.isLogging = true
s.Unlock()
s.monitorRoutine(stateChannel, stateSub)
}
// initializePerformanceStructures initializes the validatorLatestPerformance
// and validatorAggregatedPerformance for each tracked validator.
func (s *Service) initializePerformanceStructures(state state.BeaconState, epoch types.Epoch) {
for idx := range s.TrackedValidators {
balance, err := state.BalanceAtIndex(idx)
if err != nil {
log.WithError(err).WithField("ValidatorIndex", idx).Error(
"Could not fetch starting balance, skipping aggregated logs.")
balance = 0
}
s.aggregatedPerformance[idx] = ValidatorAggregatedPerformance{
startEpoch: epoch,
startBalance: balance,
}
s.latestPerformance[idx] = ValidatorLatestPerformance{
balance: balance,
}
}
}
// Status retrieves the status of the service.
func (s *Service) Status() error {
if s.isLogging {
return nil
}
return errors.New("not running")
}
// Stop stops the service.
func (s *Service) Stop() error {
defer s.cancel()
s.isLogging = false
return nil
}
// waitForSync waits until the beacon node is synced to the latest head.
func (s *Service) waitForSync(stateChannel chan *feed.Event, stateSub event.Subscription) error {
for {
select {
case event := <-stateChannel:
if event.Type == statefeed.Synced {
_, ok := event.Data.(*statefeed.SyncedData)
if !ok {
return errNotSyncedData
}
return nil
}
case <-s.ctx.Done():
log.Debug("Context closed, exiting goroutine")
return errContextClosedWhileWaiting
case err := <-stateSub.Err():
log.WithError(err).Error("Could not subscribe to state notifier")
return err
}
}
}
// monitorRoutine is the main dispatcher, it registers event channels for the
// state feed and the operation feed. It then calls the appropriate function
// when we get messages after syncing a block or processing attestations/sync
// committee contributions.
func (s *Service) monitorRoutine(stateChannel chan *feed.Event, stateSub event.Subscription) {
defer stateSub.Unsubscribe()
opChannel := make(chan *feed.Event, 1)
opSub := s.config.AttestationNotifier.OperationFeed().Subscribe(opChannel)
defer opSub.Unsubscribe()
for {
select {
case event := <-stateChannel:
if event.Type == statefeed.BlockProcessed {
data, ok := event.Data.(*statefeed.BlockProcessedData)
if !ok {
log.Error("Event feed data is not of type *statefeed.BlockProcessedData")
} else if data.Verified {
// We only process blocks that have been verified
s.processBlock(s.ctx, data.SignedBlock)
}
}
case event := <-opChannel:
switch event.Type {
case operation.UnaggregatedAttReceived:
data, ok := event.Data.(*operation.UnAggregatedAttReceivedData)
if !ok {
log.Error("Event feed data is not of type *operation.UnAggregatedAttReceivedData")
} else {
s.processUnaggregatedAttestation(s.ctx, data.Attestation)
}
case operation.AggregatedAttReceived:
data, ok := event.Data.(*operation.AggregatedAttReceivedData)
if !ok {
log.Error("Event feed data is not of type *operation.AggregatedAttReceivedData")
} else {
s.processAggregatedAttestation(s.ctx, data.Attestation)
}
case operation.ExitReceived:
data, ok := event.Data.(*operation.ExitReceivedData)
if !ok {
log.Error("Event feed data is not of type *operation.ExitReceivedData")
} else {
s.processExit(data.Exit)
}
case operation.SyncCommitteeContributionReceived:
data, ok := event.Data.(*operation.SyncCommitteeContributionReceivedData)
if !ok {
log.Error("Event feed data is not of type *operation.SyncCommitteeContributionReceivedData")
} else {
s.processSyncCommitteeContribution(data.Contribution)
}
}
case <-s.ctx.Done():
log.Debug("Context closed, exiting goroutine")
return
case err := <-stateSub.Err():
log.WithError(err).Error("Could not subscribe to state notifier")
return
}
}
}
// TrackedIndex returns if the given validator index corresponds to one of the
// validators we follow.
// It assumes the caller holds the service Lock
func (s *Service) trackedIndex(idx types.ValidatorIndex) bool {
_, ok := s.TrackedValidators[idx]
return ok
}
// updateSyncCommitteeTrackedVals updates the sync committee assignments of our
// tracked validators. It gets called when we sync a block after the Sync Period changes.
func (s *Service) updateSyncCommitteeTrackedVals(state state.BeaconState) {
s.Lock()
defer s.Unlock()
for idx := range s.TrackedValidators {
syncIdx, err := helpers.CurrentPeriodSyncSubcommitteeIndices(state, idx)
if err != nil {
log.WithError(err).WithField("ValidatorIndex", idx).Error(
"Sync committee assignments will not be reported")
delete(s.trackedSyncCommitteeIndices, idx)
} else if len(syncIdx) == 0 {
delete(s.trackedSyncCommitteeIndices, idx)
} else {
s.trackedSyncCommitteeIndices[idx] = syncIdx
}
}
s.lastSyncedEpoch = slots.ToEpoch(state.Slot())
}

View File

@@ -0,0 +1,312 @@
package monitor
import (
"context"
"fmt"
"sync"
"testing"
"time"
types "github.com/prysmaticlabs/eth2-types"
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
"github.com/prysmaticlabs/prysm/time/slots"
logTest "github.com/sirupsen/logrus/hooks/test"
)
func setupService(t *testing.T) *Service {
beaconDB := testDB.SetupDB(t)
state, _ := util.DeterministicGenesisStateAltair(t, 256)
pubKeys := make([][]byte, 3)
pubKeys[0] = state.Validators()[0].PublicKey
pubKeys[1] = state.Validators()[1].PublicKey
pubKeys[2] = state.Validators()[2].PublicKey
currentSyncCommittee := util.ConvertToCommittee([][]byte{
pubKeys[0], pubKeys[1], pubKeys[2], pubKeys[1], pubKeys[1],
})
require.NoError(t, state.SetCurrentSyncCommittee(currentSyncCommittee))
chainService := &mock.ChainService{
Genesis: time.Now(),
DB: beaconDB,
State: state,
Root: []byte("hello-world"),
ValidatorsRoot: [32]byte{},
}
trackedVals := map[types.ValidatorIndex]bool{
1: true,
2: true,
12: true,
15: true,
}
latestPerformance := map[types.ValidatorIndex]ValidatorLatestPerformance{
1: {
balance: 32000000000,
},
2: {
balance: 32000000000,
},
12: {
balance: 31900000000,
},
15: {
balance: 31900000000,
},
}
aggregatedPerformance := map[types.ValidatorIndex]ValidatorAggregatedPerformance{
1: {
startEpoch: 0,
startBalance: 31700000000,
totalAttestedCount: 12,
totalRequestedCount: 15,
totalDistance: 14,
totalCorrectHead: 8,
totalCorrectSource: 11,
totalCorrectTarget: 12,
totalProposedCount: 1,
totalSyncComitteeContributions: 0,
totalSyncComitteeAggregations: 0,
},
2: {},
12: {},
15: {},
}
trackedSyncCommitteeIndices := map[types.ValidatorIndex][]types.CommitteeIndex{
1: {0, 1, 2, 3},
12: {4, 5},
}
return &Service{
config: &ValidatorMonitorConfig{
StateGen: stategen.New(beaconDB),
StateNotifier: chainService.StateNotifier(),
HeadFetcher: chainService,
AttestationNotifier: chainService.OperationNotifier(),
},
ctx: context.Background(),
TrackedValidators: trackedVals,
latestPerformance: latestPerformance,
aggregatedPerformance: aggregatedPerformance,
trackedSyncCommitteeIndices: trackedSyncCommitteeIndices,
lastSyncedEpoch: 0,
}
}
func TestTrackedIndex(t *testing.T) {
s := &Service{
TrackedValidators: map[types.ValidatorIndex]bool{
1: true,
2: true,
},
}
require.Equal(t, s.trackedIndex(types.ValidatorIndex(1)), true)
require.Equal(t, s.trackedIndex(types.ValidatorIndex(3)), false)
}
func TestUpdateSyncCommitteeTrackedVals(t *testing.T) {
hook := logTest.NewGlobal()
s := setupService(t)
state, _ := util.DeterministicGenesisStateAltair(t, 1024)
s.updateSyncCommitteeTrackedVals(state)
require.LogsDoNotContain(t, hook, "Sync committee assignments will not be reported")
newTrackedSyncIndices := map[types.ValidatorIndex][]types.CommitteeIndex{
1: {1, 3, 4},
2: {2},
}
require.DeepEqual(t, s.trackedSyncCommitteeIndices, newTrackedSyncIndices)
}
func TestNewService(t *testing.T) {
config := &ValidatorMonitorConfig{}
tracked := []types.ValidatorIndex{}
ctx := context.Background()
_, err := NewService(ctx, config, tracked)
require.NoError(t, err)
}
func TestStart(t *testing.T) {
hook := logTest.NewGlobal()
s := setupService(t)
stateChannel := make(chan *feed.Event, 1)
stateSub := s.config.StateNotifier.StateFeed().Subscribe(stateChannel)
defer stateSub.Unsubscribe()
wg := &sync.WaitGroup{}
wg.Add(1)
s.Start()
go func() {
select {
case stateEvent := <-stateChannel:
if stateEvent.Type == statefeed.Synced {
_, ok := stateEvent.Data.(*statefeed.SyncedData)
require.Equal(t, true, ok, "Event feed data is not type *statefeed.SyncedData")
}
case <-s.ctx.Done():
}
wg.Done()
}()
for sent := 0; sent == 0; {
sent = s.config.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.Synced,
Data: &statefeed.SyncedData{
StartTime: time.Now(),
},
})
}
// wait for Logrus
time.Sleep(1000 * time.Millisecond)
require.LogsContain(t, hook, "Synced to head epoch, starting reporting performance")
require.LogsContain(t, hook, "\"Starting service\" ValidatorIndices=\"[1 2 12 15]\"")
require.Equal(t, s.isLogging, true, "monitor is not running")
}
func TestInitializePerformanceStructures(t *testing.T) {
hook := logTest.NewGlobal()
ctx := context.Background()
s := setupService(t)
state, err := s.config.HeadFetcher.HeadState(ctx)
require.NoError(t, err)
epoch := slots.ToEpoch(state.Slot())
s.initializePerformanceStructures(state, epoch)
require.LogsDoNotContain(t, hook, "Could not fetch starting balance")
latestPerformance := map[types.ValidatorIndex]ValidatorLatestPerformance{
1: {
balance: 32000000000,
},
2: {
balance: 32000000000,
},
12: {
balance: 32000000000,
},
15: {
balance: 32000000000,
},
}
aggregatedPerformance := map[types.ValidatorIndex]ValidatorAggregatedPerformance{
1: {
startBalance: 32000000000,
},
2: {
startBalance: 32000000000,
},
12: {
startBalance: 32000000000,
},
15: {
startBalance: 32000000000,
},
}
require.DeepEqual(t, s.latestPerformance, latestPerformance)
require.DeepEqual(t, s.aggregatedPerformance, aggregatedPerformance)
}
func TestMonitorRoutine(t *testing.T) {
ctx := context.Background()
hook := logTest.NewGlobal()
s := setupService(t)
stateChannel := make(chan *feed.Event, 1)
stateSub := s.config.StateNotifier.StateFeed().Subscribe(stateChannel)
wg := &sync.WaitGroup{}
wg.Add(1)
go func() {
s.monitorRoutine(stateChannel, stateSub)
wg.Done()
}()
genesis, keys := util.DeterministicGenesisStateAltair(t, 64)
c, err := altair.NextSyncCommittee(ctx, genesis)
require.NoError(t, err)
require.NoError(t, genesis.SetCurrentSyncCommittee(c))
genConfig := util.DefaultBlockGenConfig()
block, err := util.GenerateFullBlockAltair(genesis, keys, genConfig, 1)
require.NoError(t, err)
root, err := block.GetBlock().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, s.config.StateGen.SaveState(ctx, root, genesis))
wrapped, err := wrapper.WrappedAltairSignedBeaconBlock(block)
require.NoError(t, err)
stateChannel <- &feed.Event{
Type: statefeed.BlockProcessed,
Data: &statefeed.BlockProcessedData{
Slot: 1,
Verified: true,
SignedBlock: wrapped,
},
}
// Wait for Logrus
time.Sleep(1000 * time.Millisecond)
wanted1 := fmt.Sprintf("\"Proposed block was included\" BalanceChange=100000000 BlockRoot=%#x NewBalance=32000000000 ParentRoot=0xf732eaeb7fae ProposerIndex=15 Slot=1 Version=1 prefix=monitor", bytesutil.Trunc(root[:]))
require.LogsContain(t, hook, wanted1)
}
func TestWaitForSync(t *testing.T) {
s := setupService(t)
stateChannel := make(chan *feed.Event, 1)
stateSub := s.config.StateNotifier.StateFeed().Subscribe(stateChannel)
defer stateSub.Unsubscribe()
wg := &sync.WaitGroup{}
wg.Add(1)
go func() {
err := s.waitForSync(stateChannel, stateSub)
require.NoError(t, err)
wg.Done()
}()
stateChannel <- &feed.Event{
Type: statefeed.Synced,
Data: &statefeed.SyncedData{
StartTime: time.Now(),
},
}
}
func TestRun(t *testing.T) {
hook := logTest.NewGlobal()
s := setupService(t)
stateChannel := make(chan *feed.Event, 1)
stateSub := s.config.StateNotifier.StateFeed().Subscribe(stateChannel)
wg := &sync.WaitGroup{}
wg.Add(1)
go func() {
s.run(stateChannel, stateSub)
wg.Done()
}()
stateChannel <- &feed.Event{
Type: statefeed.Synced,
Data: &statefeed.SyncedData{
StartTime: time.Now(),
},
}
//wait for Logrus
time.Sleep(1000 * time.Millisecond)
require.LogsContain(t, hook, "Synced to head epoch, starting reporting performance")
}

View File

@@ -26,6 +26,7 @@ go_library(
"//beacon-chain/forkchoice:go_default_library",
"//beacon-chain/forkchoice/protoarray:go_default_library",
"//beacon-chain/gateway:go_default_library",
"//beacon-chain/monitor:go_default_library",
"//beacon-chain/node/registration:go_default_library",
"//beacon-chain/operations/attestations:go_default_library",
"//beacon-chain/operations/slashings:go_default_library",

View File

@@ -113,9 +113,9 @@ func configureExecutionSetting(cliCtx *cli.Context) {
c.TerminalBlockHashActivationEpoch = types.Epoch(cliCtx.Uint64(flags.TerminalBlockHashActivationEpochOverride.Name))
params.OverrideBeaconConfig(c)
}
if cliCtx.IsSet(flags.Coinbase.Name) {
if cliCtx.IsSet(flags.FeeRecipient.Name) {
c := params.BeaconConfig()
c.Coinbase = common.HexToAddress(cliCtx.String(flags.Coinbase.Name))
c.FeeRecipient = common.HexToAddress(cliCtx.String(flags.FeeRecipient.Name))
params.OverrideBeaconConfig(c)
}
}

View File

@@ -79,11 +79,11 @@ func TestConfigureExecutionSetting(t *testing.T) {
set.Uint64(flags.TerminalTotalDifficultyOverride.Name, 0, "")
set.String(flags.TerminalBlockHashOverride.Name, "", "")
set.Uint64(flags.TerminalBlockHashActivationEpochOverride.Name, 0, "")
set.String(flags.Coinbase.Name, "", "")
set.String(flags.FeeRecipient.Name, "", "")
require.NoError(t, set.Set(flags.TerminalTotalDifficultyOverride.Name, strconv.Itoa(100)))
require.NoError(t, set.Set(flags.TerminalBlockHashOverride.Name, "0xA"))
require.NoError(t, set.Set(flags.TerminalBlockHashActivationEpochOverride.Name, strconv.Itoa(200)))
require.NoError(t, set.Set(flags.Coinbase.Name, "0xB"))
require.NoError(t, set.Set(flags.FeeRecipient.Name, "0xB"))
cliCtx := cli.NewContext(&app, set, nil)
configureExecutionSetting(cliCtx)
@@ -91,7 +91,7 @@ func TestConfigureExecutionSetting(t *testing.T) {
assert.Equal(t, uint64(100), params.BeaconConfig().TerminalTotalDifficulty)
assert.Equal(t, common.HexToHash("0xA"), params.BeaconConfig().TerminalBlockHash)
assert.Equal(t, types.Epoch(200), params.BeaconConfig().TerminalBlockHashActivationEpoch)
assert.Equal(t, common.HexToAddress("0xB"), params.BeaconConfig().Coinbase)
assert.Equal(t, common.HexToAddress("0xB"), params.BeaconConfig().FeeRecipient)
}
func TestConfigureNetwork(t *testing.T) {

View File

@@ -17,6 +17,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/pkg/errors"
types "github.com/prysmaticlabs/eth2-types"
apigateway "github.com/prysmaticlabs/prysm/api/gateway"
"github.com/prysmaticlabs/prysm/async/event"
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
@@ -28,6 +29,7 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/beacon-chain/gateway"
"github.com/prysmaticlabs/prysm/beacon-chain/monitor"
"github.com/prysmaticlabs/prysm/beacon-chain/node/registration"
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/beacon-chain/operations/slashings"
@@ -205,6 +207,10 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
return nil, err
}
if err := beacon.registerValidatorMonitorService(); err != nil {
return nil, err
}
if !cliCtx.Bool(cmd.DisableMonitoringFlag.Name) {
if err := beacon.registerPrometheusService(cliCtx); err != nil {
return nil, err
@@ -483,7 +489,7 @@ func (b *BeaconNode) registerP2P(cliCtx *cli.Context) error {
MetaDataDir: cliCtx.String(cmd.P2PMetadata.Name),
TCPPort: cliCtx.Uint(cmd.P2PTCPPort.Name),
UDPPort: cliCtx.Uint(cmd.P2PUDPPort.Name),
MaxPeers: cliCtx.Uint(cmd.P2PMaxPeers.Name),
MaxPeers: cliCtx.Uint64(cmd.P2PMaxPeers.Name),
AllowListCIDR: cliCtx.String(cmd.P2PAllowList.Name),
DenyListCIDR: slice.SplitCommaSeparated(cliCtx.StringSlice(cmd.P2PDenyList.Name)),
EnableUPnP: cliCtx.Bool(cmd.EnableUPnPFlag.Name),
@@ -854,7 +860,45 @@ func (b *BeaconNode) registerDeterminsticGenesisService() error {
GenesisPath: genesisStatePath,
})
// Register genesis state as start-up state when interop mode.
// The start-up state gets reused across services.
st, err := b.db.GenesisState(b.ctx)
if err != nil {
return err
}
b.finalizedStateAtStartUp = st
return b.services.RegisterService(svc)
}
return nil
}
func (b *BeaconNode) registerValidatorMonitorService() error {
if cmd.ValidatorMonitorIndicesFlag.Value == nil {
return nil
}
cliSlice := cmd.ValidatorMonitorIndicesFlag.Value.Value()
if cliSlice == nil {
return nil
}
tracked := make([]types.ValidatorIndex, len(cliSlice))
for i := range tracked {
tracked[i] = types.ValidatorIndex(cliSlice[i])
}
var chainService *blockchain.Service
if err := b.services.FetchService(&chainService); err != nil {
return err
}
monitorConfig := &monitor.ValidatorMonitorConfig{
StateNotifier: b,
AttestationNotifier: b,
StateGen: b.stateGen,
HeadFetcher: chainService,
}
svc, err := monitor.NewService(b.ctx, monitorConfig, tracked)
if err != nil {
return err
}
return b.services.RegisterService(svc)
}

View File

@@ -36,11 +36,11 @@ func (m *PoolMock) InsertProposerSlashing(_ context.Context, _ state.ReadOnlyBea
}
// MarkIncludedAttesterSlashing --
func (_ *PoolMock) MarkIncludedAttesterSlashing(_ *ethpb.AttesterSlashing) {
func (*PoolMock) MarkIncludedAttesterSlashing(_ *ethpb.AttesterSlashing) {
panic("implement me")
}
// MarkIncludedProposerSlashing --
func (_ *PoolMock) MarkIncludedProposerSlashing(_ *ethpb.ProposerSlashing) {
func (*PoolMock) MarkIncludedProposerSlashing(_ *ethpb.ProposerSlashing) {
panic("implement me")
}

View File

@@ -74,7 +74,6 @@ go_library(
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
"@com_github_ferranbt_fastssz//:go_default_library",
"@com_github_ipfs_go_ipfs_addr//:go_default_library",
"@com_github_kevinms_leakybucket_go//:go_default_library",
"@com_github_kr_pretty//:go_default_library",
"@com_github_libp2p_go_libp2p//:go_default_library",

View File

@@ -23,7 +23,7 @@ type Config struct {
MetaDataDir string
TCPPort uint
UDPPort uint
MaxPeers uint
MaxPeers uint64
AllowListCIDR string
DenyListCIDR []string
StateNotifier statefeed.Notifier

View File

@@ -104,7 +104,7 @@ func TestService_RejectInboundPeersBeyondLimit(t *testing.T) {
ScorerParams: &scorers.Config{},
}),
host: mockp2p.NewTestP2P(t).BHost,
cfg: &Config{MaxPeers: uint(limit)},
cfg: &Config{MaxPeers: uint64(limit)},
}
var err error
s.addrFilter, err = configureFilter(&Config{})

View File

@@ -9,7 +9,6 @@ import (
"github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/enr"
iaddr "github.com/ipfs/go-ipfs-addr"
"github.com/libp2p/go-libp2p-core/network"
"github.com/libp2p/go-libp2p-core/peer"
ma "github.com/multiformats/go-multiaddr"
@@ -454,11 +453,7 @@ func peersFromStringAddrs(addrs []string) ([]ma.Multiaddr, error) {
}
func multiAddrFromString(address string) (ma.Multiaddr, error) {
addr, err := iaddr.ParseString(address)
if err != nil {
return nil, err
}
return addr.Multiaddr(), nil
return ma.NewMultiaddr(address)
}
func udpVersionFromIP(ipAddr net.IP) string {

View File

@@ -63,7 +63,7 @@ func TestCorrect_ActiveValidatorsCount(t *testing.T) {
assert.Equal(t, int(params.BeaconConfig().MinGenesisActiveValidatorCount)+100, int(vals), "mainnet genesis active count isn't accurate")
}
func TestLoggingParameters(t *testing.T) {
func TestLoggingParameters(_ *testing.T) {
logGossipParameters("testing", nil)
logGossipParameters("testing", &pubsub.TopicScoreParams{})
// Test out actual gossip parameters.

View File

@@ -77,7 +77,7 @@ type PeerManager interface {
ENR() *enr.Record
DiscoveryAddresses() ([]multiaddr.Multiaddr, error)
RefreshENR()
FindPeersWithSubnet(ctx context.Context, topic string, subIndex uint64, threshold int) (bool, error)
FindPeersWithSubnet(ctx context.Context, topic string, subIndex uint64, threshold uint64) (bool, error)
AddPingMethod(reqFunc func(ctx context.Context, id peer.ID) error)
}

View File

@@ -85,7 +85,7 @@ func newBlockProviderScorer(store *peerdata.Store, config *BlockProviderScorerCo
if scorer.config.StalePeerRefreshInterval == 0 {
scorer.config.StalePeerRefreshInterval = DefaultBlockProviderStalePeerRefreshInterval
}
batchSize := uint64(flags.Get().BlockBatchLimit)
batchSize := flags.Get().BlockBatchLimit
scorer.maxScore = 1.0
if batchSize > 0 {
totalBatches := float64(scorer.config.ProcessedBlocksCap / batchSize)
@@ -110,7 +110,7 @@ func (s *BlockProviderScorer) score(pid peer.ID) float64 {
if !ok || time.Since(peerData.BlockProviderUpdated) >= s.config.StalePeerRefreshInterval {
return s.maxScore
}
batchSize := uint64(flags.Get().BlockBatchLimit)
batchSize := flags.Get().BlockBatchLimit
if batchSize > 0 {
processedBatches := float64(peerData.ProcessedBlocks / batchSize)
score += processedBatches * s.config.ProcessedBatchWeight

View File

@@ -21,7 +21,7 @@ func TestScorers_BlockProvider_Score(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
batchSize := uint64(flags.Get().BlockBatchLimit)
batchSize := flags.Get().BlockBatchLimit
tests := []struct {
name string
update func(scorer *scorers.BlockProviderScorer)
@@ -160,7 +160,7 @@ func TestScorers_BlockProvider_WeightSorted(t *testing.T) {
},
})
scorer := peerStatuses.Scorers().BlockProviderScorer()
batchSize := uint64(flags.Get().BlockBatchLimit)
batchSize := flags.Get().BlockBatchLimit
r := rand.NewDeterministicGenerator()
reverse := func(pids []peer.ID) []peer.ID {
@@ -214,7 +214,7 @@ func TestScorers_BlockProvider_WeightSorted(t *testing.T) {
}
func TestScorers_BlockProvider_Sorted(t *testing.T) {
batchSize := uint64(flags.Get().BlockBatchLimit)
batchSize := flags.Get().BlockBatchLimit
tests := []struct {
name string
update func(s *scorers.BlockProviderScorer)
@@ -309,7 +309,7 @@ func TestScorers_BlockProvider_Sorted(t *testing.T) {
func TestScorers_BlockProvider_MaxScore(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
batchSize := uint64(flags.Get().BlockBatchLimit)
batchSize := flags.Get().BlockBatchLimit
tests := []struct {
name string
@@ -347,7 +347,7 @@ func TestScorers_BlockProvider_MaxScore(t *testing.T) {
func TestScorers_BlockProvider_FormatScorePretty(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
batchSize := uint64(flags.Get().BlockBatchLimit)
batchSize := flags.Get().BlockBatchLimit
format := "[%0.1f%%, raw: %0.2f, blocks: %d/1280]"
tests := []struct {

View File

@@ -17,7 +17,7 @@ func TestScorers_Service_Init(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
batchSize := uint64(flags.Get().BlockBatchLimit)
batchSize := flags.Get().BlockBatchLimit
t.Run("default config", func(t *testing.T) {
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
@@ -82,7 +82,7 @@ func TestScorers_Service_Score(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
batchSize := uint64(flags.Get().BlockBatchLimit)
batchSize := flags.Get().BlockBatchLimit
peerScores := func(s *scorers.Service, pids []peer.ID) map[string]float64 {
scores := make(map[string]float64, len(pids))

View File

@@ -696,7 +696,7 @@ func (p *Status) BestFinalized(maxPeers int, ourFinalizedEpoch types.Epoch) (typ
// BestNonFinalized returns the highest known epoch, higher than ours,
// and is shared by at least minPeers.
func (p *Status) BestNonFinalized(minPeers int, ourHeadEpoch types.Epoch) (types.Epoch, []peer.ID) {
func (p *Status) BestNonFinalized(minPeers uint64, ourHeadEpoch types.Epoch) (types.Epoch, []peer.ID) {
connected := p.Connected()
epochVotes := make(map[types.Epoch]uint64)
pidEpoch := make(map[peer.ID]types.Epoch, len(connected))

View File

@@ -33,12 +33,11 @@ const syncLockerVal = 100
// subscribed to a particular subnet. Then we try to connect
// with those peers. This method will block until the required amount of
// peers are found, the method only exits in the event of context timeouts.
func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string,
index uint64, threshold int) (bool, error) {
func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string, subIndex, threshold uint64) (bool, error) {
ctx, span := trace.StartSpan(ctx, "p2p.FindPeersWithSubnet")
defer span.End()
span.AddAttributes(trace.Int64Attribute("index", int64(index)))
span.AddAttributes(trace.Int64Attribute("index", int64(subIndex)))
if s.dv5Listener == nil {
// return if discovery isn't set
@@ -49,14 +48,14 @@ func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string,
iterator := s.dv5Listener.RandomNodes()
switch {
case strings.Contains(topic, GossipAttestationMessage):
iterator = filterNodes(ctx, iterator, s.filterPeerForAttSubnet(index))
iterator = filterNodes(ctx, iterator, s.filterPeerForAttSubnet(subIndex))
case strings.Contains(topic, GossipSyncCommitteeMessage):
iterator = filterNodes(ctx, iterator, s.filterPeerForSyncSubnet(index))
iterator = filterNodes(ctx, iterator, s.filterPeerForSyncSubnet(subIndex))
default:
return false, errors.New("no subnet exists for provided topic")
}
currNum := len(s.pubsub.ListPeers(topic))
currNum := uint64(len(s.pubsub.ListPeers(topic)))
wg := new(sync.WaitGroup)
for {
if err := ctx.Err(); err != nil {
@@ -81,7 +80,7 @@ func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string,
}
// Wait for all dials to be completed.
wg.Wait()
currNum = len(s.pubsub.ListPeers(topic))
currNum = uint64(len(s.pubsub.ListPeers(topic)))
}
return true, nil
}

View File

@@ -61,7 +61,7 @@ func (p *FakeP2P) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
}
// FindPeersWithSubnet mocks the p2p func.
func (p *FakeP2P) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ int) (bool, error) {
func (p *FakeP2P) FindPeersWithSubnet(_ context.Context, _ string, _, _ uint64) (bool, error) {
return false, nil
}

View File

@@ -51,7 +51,7 @@ func (m MockPeerManager) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
func (m MockPeerManager) RefreshENR() {}
// FindPeersWithSubnet .
func (m MockPeerManager) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ int) (bool, error) {
func (m MockPeerManager) FindPeersWithSubnet(_ context.Context, _ string, _, _ uint64) (bool, error) {
return true, nil
}

View File

@@ -349,7 +349,7 @@ func (p *TestP2P) Peers() *peers.Status {
}
// FindPeersWithSubnet mocks the p2p func.
func (p *TestP2P) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ int) (bool, error) {
func (p *TestP2P) FindPeersWithSubnet(_ context.Context, _ string, _, _ uint64) (bool, error) {
return false, nil
}

View File

@@ -437,9 +437,9 @@ func TestLogTillGenesis_OK(t *testing.T) {
func TestInitDepositCache_OK(t *testing.T) {
ctrs := []*protodb.DepositContainer{
{Index: 0, Eth1BlockHeight: 2, Deposit: &ethpb.Deposit{Proof: [][]byte{[]byte("A")}}},
{Index: 1, Eth1BlockHeight: 4, Deposit: &ethpb.Deposit{Proof: [][]byte{[]byte("B")}}},
{Index: 2, Eth1BlockHeight: 6, Deposit: &ethpb.Deposit{Proof: [][]byte{[]byte("c")}}},
{Index: 0, Eth1BlockHeight: 2, Deposit: &ethpb.Deposit{Proof: [][]byte{[]byte("A")}, Data: &ethpb.Deposit_Data{PublicKey: []byte{}}}},
{Index: 1, Eth1BlockHeight: 4, Deposit: &ethpb.Deposit{Proof: [][]byte{[]byte("B")}, Data: &ethpb.Deposit_Data{PublicKey: []byte{}}}},
{Index: 2, Eth1BlockHeight: 6, Deposit: &ethpb.Deposit{Proof: [][]byte{[]byte("c")}, Data: &ethpb.Deposit_Data{PublicKey: []byte{}}}},
}
gs, _ := util.DeterministicGenesisState(t, 1)
beaconDB := dbutil.SetupDB(t)

View File

@@ -100,7 +100,7 @@ func TestGetSpec(t *testing.T) {
config.TerminalBlockHash = common.HexToHash("TerminalBlockHash")
config.TerminalBlockHashActivationEpoch = 72
config.TerminalTotalDifficulty = 73
config.Coinbase = common.HexToAddress("Coinbase")
config.FeeRecipient = common.HexToAddress("FeeRecipient")
var dbp [4]byte
copy(dbp[:], []byte{'0', '0', '0', '1'})
@@ -329,8 +329,8 @@ func TestGetSpec(t *testing.T) {
assert.Equal(t, common.HexToHash("TerminalBlockHash"), common.HexToHash(v))
case "TERMINAL_TOTAL_DIFFICULTY":
assert.Equal(t, "73", v)
case "COINBASE":
assert.Equal(t, common.HexToAddress("Coinbase"), v)
case "FeeRecipient":
assert.Equal(t, common.HexToAddress("FeeRecipient"), v)
default:
t.Errorf("Incorrect key: %s", k)
}

View File

@@ -16,6 +16,7 @@ import (
"github.com/prysmaticlabs/prysm/proto/eth/v2"
ethpbv2 "github.com/prysmaticlabs/prysm/proto/eth/v2"
ethpbalpha "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/time/slots"
"go.opencensus.io/trace"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
@@ -27,6 +28,40 @@ func (bs *Server) ListSyncCommittees(ctx context.Context, req *ethpbv2.StateSync
ctx, span := trace.StartSpan(ctx, "beacon.ListSyncCommittees")
defer span.End()
currentSlot := bs.GenesisTimeFetcher.CurrentSlot()
currentEpoch := slots.ToEpoch(currentSlot)
currentPeriodStartEpoch, err := slots.SyncCommitteePeriodStartEpoch(currentEpoch)
if err != nil {
return nil, status.Errorf(
codes.Internal,
"Could not calculate start period for slot %d: %v",
currentSlot,
err,
)
}
var reqPeriodStartEpoch types.Epoch
if req.Epoch == nil {
reqPeriodStartEpoch = currentPeriodStartEpoch
} else {
reqPeriodStartEpoch, err = slots.SyncCommitteePeriodStartEpoch(*req.Epoch)
if err != nil {
return nil, status.Errorf(
codes.Internal,
"Could not calculate start period for epoch %d: %v",
*req.Epoch,
err,
)
}
if reqPeriodStartEpoch > currentPeriodStartEpoch+params.BeaconConfig().EpochsPerSyncCommitteePeriod {
return nil, status.Errorf(
codes.Internal,
"Could not fetch sync committee too far in the future. Requested epoch: %d, current epoch: %d",
*req.Epoch, currentEpoch,
)
}
}
st, err := bs.stateFromRequest(ctx, &stateRequest{
epoch: req.Epoch,
stateId: req.StateId,
@@ -35,10 +70,20 @@ func (bs *Server) ListSyncCommittees(ctx context.Context, req *ethpbv2.StateSync
return nil, status.Errorf(codes.Internal, "Could not fetch beacon state using request: %v", err)
}
// Get the current sync committee and sync committee indices from the state.
committeeIndices, committee, err := currentCommitteeIndicesFromState(st)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get sync committee indices from state: %v", err)
var committeeIndices []types.ValidatorIndex
var committee *ethpbalpha.SyncCommittee
if reqPeriodStartEpoch > currentPeriodStartEpoch {
// Get the next sync committee and sync committee indices from the state.
committeeIndices, committee, err = nextCommitteeIndicesFromState(st)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get next sync committee indices: %v", err)
}
} else {
// Get the current sync committee and sync committee indices from the state.
committeeIndices, committee, err = currentCommitteeIndicesFromState(st)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get current sync committee indices: %v", err)
}
}
subcommittees, err := extractSyncSubcommittees(st, committee)
if err != nil {
@@ -75,6 +120,28 @@ func currentCommitteeIndicesFromState(st state.BeaconState) ([]types.ValidatorIn
return committeeIndices, committee, nil
}
func nextCommitteeIndicesFromState(st state.BeaconState) ([]types.ValidatorIndex, *ethpbalpha.SyncCommittee, error) {
committee, err := st.NextSyncCommittee()
if err != nil {
return nil, nil, fmt.Errorf(
"could not get sync committee: %v", err,
)
}
committeeIndices := make([]types.ValidatorIndex, len(committee.Pubkeys))
for i, key := range committee.Pubkeys {
index, ok := st.ValidatorIndexByPubkey(bytesutil.ToBytes48(key))
if !ok {
return nil, nil, fmt.Errorf(
"validator index not found for pubkey %#x",
bytesutil.Trunc(key),
)
}
committeeIndices[i] = index
}
return committeeIndices, committee, nil
}
func extractSyncSubcommittees(st state.BeaconState, committee *ethpbalpha.SyncCommittee) ([]*eth.SyncSubcommitteeValidators, error) {
subcommitteeCount := params.BeaconConfig().SyncCommitteeSubnetCount
subcommittees := make([]*ethpbv2.SyncSubcommitteeValidators, subcommitteeCount)

View File

@@ -4,6 +4,7 @@ import (
"context"
"strings"
"testing"
"time"
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
types "github.com/prysmaticlabs/eth2-types"
@@ -55,6 +56,37 @@ func Test_currentCommitteeIndicesFromState(t *testing.T) {
})
}
func Test_nextCommitteeIndicesFromState(t *testing.T) {
st, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().SyncCommitteeSize)
vals := st.Validators()
wantedCommittee := make([][]byte, params.BeaconConfig().SyncCommitteeSize)
wantedIndices := make([]types.ValidatorIndex, len(wantedCommittee))
for i := 0; i < len(wantedCommittee); i++ {
wantedIndices[i] = types.ValidatorIndex(i)
wantedCommittee[i] = vals[i].PublicKey
}
require.NoError(t, st.SetNextSyncCommittee(&ethpbalpha.SyncCommittee{
Pubkeys: wantedCommittee,
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
}))
t.Run("OK", func(t *testing.T) {
indices, committee, err := nextCommitteeIndicesFromState(st)
require.NoError(t, err)
require.DeepEqual(t, wantedIndices, indices)
require.DeepEqual(t, wantedCommittee, committee.Pubkeys)
})
t.Run("validator in committee not found in state", func(t *testing.T) {
wantedCommittee[0] = bytesutil.PadTo([]byte("fakepubkey"), 48)
require.NoError(t, st.SetNextSyncCommittee(&ethpbalpha.SyncCommittee{
Pubkeys: wantedCommittee,
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
}))
_, _, err := nextCommitteeIndicesFromState(st)
require.ErrorContains(t, "index not found for pubkey", err)
})
}
func Test_extractSyncSubcommittees(t *testing.T) {
st, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().SyncCommitteeSize)
vals := st.Validators()
@@ -123,6 +155,9 @@ func TestListSyncCommittees(t *testing.T) {
require.NoError(t, err)
s := &Server{
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
Genesis: time.Now(),
},
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
@@ -150,6 +185,57 @@ func TestListSyncCommittees(t *testing.T) {
}
}
func TestListSyncCommitteesFuture(t *testing.T) {
ctx := context.Background()
st, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := make([][]byte, params.BeaconConfig().SyncCommitteeSize)
vals := st.Validators()
for i := 0; i < len(syncCommittee); i++ {
syncCommittee[i] = vals[i].PublicKey
}
require.NoError(t, st.SetNextSyncCommittee(&ethpbalpha.SyncCommittee{
Pubkeys: syncCommittee,
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
}))
s := &Server{
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
Genesis: time.Now(),
},
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
}
req := &ethpbv2.StateSyncCommitteesRequest{}
epoch := 2 * params.BeaconConfig().EpochsPerSyncCommitteePeriod
req.Epoch = &epoch
_, err := s.ListSyncCommittees(ctx, req)
require.ErrorContains(t, "Could not fetch sync committee too far in the future", err)
epoch = 2*params.BeaconConfig().EpochsPerSyncCommitteePeriod - 1
resp, err := s.ListSyncCommittees(ctx, req)
require.NoError(t, err)
require.NotNil(t, resp.Data)
committeeVals := resp.Data.Validators
require.NotNil(t, committeeVals)
require.Equal(t, params.BeaconConfig().SyncCommitteeSize, uint64(len(committeeVals)), "incorrect committee size")
for i := uint64(0); i < params.BeaconConfig().SyncCommitteeSize; i++ {
assert.Equal(t, types.ValidatorIndex(i), committeeVals[i])
}
require.NotNil(t, resp.Data.ValidatorAggregates)
assert.Equal(t, params.BeaconConfig().SyncCommitteeSubnetCount, uint64(len(resp.Data.ValidatorAggregates)))
for i := uint64(0); i < params.BeaconConfig().SyncCommitteeSubnetCount; i++ {
vStartIndex := types.ValidatorIndex(params.BeaconConfig().SyncCommitteeSize / params.BeaconConfig().SyncCommitteeSubnetCount * i)
vEndIndex := types.ValidatorIndex(params.BeaconConfig().SyncCommitteeSize/params.BeaconConfig().SyncCommitteeSubnetCount*(i+1) - 1)
j := 0
for vIndex := vStartIndex; vIndex <= vEndIndex; vIndex++ {
assert.Equal(t, vIndex, resp.Data.ValidatorAggregates[i].Validators[j])
j++
}
}
}
func TestSubmitPoolSyncCommitteeSignatures(t *testing.T) {
ctx := grpc.NewContextWithServerTransportStream(context.Background(), &runtime.ServerTransportStream{})
st, _ := util.DeterministicGenesisStateAltair(t, 10)

View File

@@ -48,7 +48,7 @@ func (bs *Server) ListBlocks(
switch q := req.QueryFilter.(type) {
case *ethpb.ListBlocksRequest_Epoch:
ctrs, numBlks, nextPageToken, err := bs.ListBlocksForEpoch(ctx, req, q)
ctrs, numBlks, nextPageToken, err := bs.listBlocksForEpoch(ctx, req, q)
if err != nil {
return nil, err
}
@@ -63,7 +63,7 @@ func (bs *Server) ListBlocks(
NextPageToken: nextPageToken,
}, nil
case *ethpb.ListBlocksRequest_Root:
ctrs, numBlks, nextPageToken, err := bs.ListBlocksForRoot(ctx, req, q)
ctrs, numBlks, nextPageToken, err := bs.listBlocksForRoot(ctx, req, q)
if err != nil {
return nil, err
}
@@ -79,7 +79,7 @@ func (bs *Server) ListBlocks(
}, nil
case *ethpb.ListBlocksRequest_Slot:
ctrs, numBlks, nextPageToken, err := bs.ListBlocksForSlot(ctx, req, q)
ctrs, numBlks, nextPageToken, err := bs.listBlocksForSlot(ctx, req, q)
if err != nil {
return nil, err
}
@@ -94,7 +94,7 @@ func (bs *Server) ListBlocks(
NextPageToken: nextPageToken,
}, nil
case *ethpb.ListBlocksRequest_Genesis:
ctrs, numBlks, nextPageToken, err := bs.ListBlocksForGenesis(ctx, req, q)
ctrs, numBlks, nextPageToken, err := bs.listBlocksForGenesis(ctx, req, q)
if err != nil {
return nil, err
}
@@ -128,7 +128,7 @@ func (bs *Server) ListBeaconBlocks(
switch q := req.QueryFilter.(type) {
case *ethpb.ListBlocksRequest_Epoch:
ctrs, numBlks, nextPageToken, err := bs.ListBlocksForEpoch(ctx, req, q)
ctrs, numBlks, nextPageToken, err := bs.listBlocksForEpoch(ctx, req, q)
if err != nil {
return nil, err
}
@@ -142,7 +142,7 @@ func (bs *Server) ListBeaconBlocks(
NextPageToken: nextPageToken,
}, nil
case *ethpb.ListBlocksRequest_Root:
ctrs, numBlks, nextPageToken, err := bs.ListBlocksForRoot(ctx, req, q)
ctrs, numBlks, nextPageToken, err := bs.listBlocksForRoot(ctx, req, q)
if err != nil {
return nil, err
}
@@ -157,7 +157,7 @@ func (bs *Server) ListBeaconBlocks(
}, nil
case *ethpb.ListBlocksRequest_Slot:
ctrs, numBlks, nextPageToken, err := bs.ListBlocksForSlot(ctx, req, q)
ctrs, numBlks, nextPageToken, err := bs.listBlocksForSlot(ctx, req, q)
if err != nil {
return nil, err
}
@@ -171,7 +171,7 @@ func (bs *Server) ListBeaconBlocks(
NextPageToken: nextPageToken,
}, nil
case *ethpb.ListBlocksRequest_Genesis:
ctrs, numBlks, nextPageToken, err := bs.ListBlocksForGenesis(ctx, req, q)
ctrs, numBlks, nextPageToken, err := bs.listBlocksForGenesis(ctx, req, q)
if err != nil {
return nil, err
}
@@ -224,8 +224,8 @@ func convertToBlockContainer(blk block.SignedBeaconBlock, root [32]byte, isCanon
return ctr, nil
}
// ListBlocksForEpoch retrieves all blocks for the provided epoch.
func (bs *Server) ListBlocksForEpoch(ctx context.Context, req *ethpb.ListBlocksRequest, q *ethpb.ListBlocksRequest_Epoch) ([]blockContainer, int, string, error) {
// listBlocksForEpoch retrieves all blocks for the provided epoch.
func (bs *Server) listBlocksForEpoch(ctx context.Context, req *ethpb.ListBlocksRequest, q *ethpb.ListBlocksRequest_Epoch) ([]blockContainer, int, string, error) {
blks, _, err := bs.BeaconDB.Blocks(ctx, filters.NewFilter().SetStartEpoch(q.Epoch).SetEndEpoch(q.Epoch))
if err != nil {
return nil, 0, strconv.Itoa(0), status.Errorf(codes.Internal, "Could not get blocks: %v", err)
@@ -262,8 +262,8 @@ func (bs *Server) ListBlocksForEpoch(ctx context.Context, req *ethpb.ListBlocksR
return containers, numBlks, nextPageToken, nil
}
// ListBlocksForRoot retrieves the block for the provided root.
func (bs *Server) ListBlocksForRoot(ctx context.Context, _ *ethpb.ListBlocksRequest, q *ethpb.ListBlocksRequest_Root) ([]blockContainer, int, string, error) {
// listBlocksForRoot retrieves the block for the provided root.
func (bs *Server) listBlocksForRoot(ctx context.Context, _ *ethpb.ListBlocksRequest, q *ethpb.ListBlocksRequest_Root) ([]blockContainer, int, string, error) {
blk, err := bs.BeaconDB.Block(ctx, bytesutil.ToBytes32(q.Root))
if err != nil {
return nil, 0, strconv.Itoa(0), status.Errorf(codes.Internal, "Could not retrieve block: %v", err)
@@ -286,8 +286,8 @@ func (bs *Server) ListBlocksForRoot(ctx context.Context, _ *ethpb.ListBlocksRequ
}}, 1, strconv.Itoa(0), nil
}
// ListBlocksForSlot retrieves all blocks for the provided slot.
func (bs *Server) ListBlocksForSlot(ctx context.Context, req *ethpb.ListBlocksRequest, q *ethpb.ListBlocksRequest_Slot) ([]blockContainer, int, string, error) {
// listBlocksForSlot retrieves all blocks for the provided slot.
func (bs *Server) listBlocksForSlot(ctx context.Context, req *ethpb.ListBlocksRequest, q *ethpb.ListBlocksRequest_Slot) ([]blockContainer, int, string, error) {
hasBlocks, blks, err := bs.BeaconDB.BlocksBySlot(ctx, q.Slot)
if err != nil {
return nil, 0, strconv.Itoa(0), status.Errorf(codes.Internal, "Could not retrieve blocks for slot %d: %v", q.Slot, err)
@@ -323,8 +323,8 @@ func (bs *Server) ListBlocksForSlot(ctx context.Context, req *ethpb.ListBlocksRe
return containers, numBlks, nextPageToken, nil
}
// ListBlocksForGenesis retrieves the genesis block.
func (bs *Server) ListBlocksForGenesis(ctx context.Context, _ *ethpb.ListBlocksRequest, _ *ethpb.ListBlocksRequest_Genesis) ([]blockContainer, int, string, error) {
// listBlocksForGenesis retrieves the genesis block.
func (bs *Server) listBlocksForGenesis(ctx context.Context, _ *ethpb.ListBlocksRequest, _ *ethpb.ListBlocksRequest_Genesis) ([]blockContainer, int, string, error) {
genBlk, err := bs.BeaconDB.GenesisBlock(ctx)
if err != nil {
return nil, 0, strconv.Itoa(0), status.Errorf(codes.Internal, "Could not retrieve blocks for genesis slot: %v", err)

View File

@@ -4,7 +4,6 @@ import (
"context"
"sort"
"strconv"
"time"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/api/pagination"
@@ -27,9 +26,6 @@ import (
"google.golang.org/protobuf/types/known/emptypb"
)
// BalancesTimeout for gRPC requests to ListValidatorBalances.
const BalancesTimeout = time.Second * 30
// ListValidatorBalances retrieves the validator balances for a given set of public keys.
// An optional Epoch parameter is provided to request historical validator balances from
// archived, persistent data.
@@ -37,8 +33,6 @@ func (bs *Server) ListValidatorBalances(
ctx context.Context,
req *ethpb.ListValidatorBalancesRequest,
) (*ethpb.ValidatorBalances, error) {
ctx, cancel := context.WithTimeout(ctx, BalancesTimeout)
defer cancel()
if int(req.PageSize) > cmd.Get().MaxRPCPageSize {
return nil, status.Errorf(codes.InvalidArgument, "Requested page size %d can not be greater than max size %d",
req.PageSize, cmd.Get().MaxRPCPageSize)

View File

@@ -94,7 +94,12 @@ func (vs *Server) deposits(
ctx, span := trace.StartSpan(ctx, "ProposerServer.deposits")
defer span.End()
if vs.MockEth1Votes || !vs.Eth1InfoFetcher.IsConnectedToETH1() {
if vs.MockEth1Votes {
return []*ethpb.Deposit{}, nil
}
if !vs.Eth1InfoFetcher.IsConnectedToETH1() {
log.Warn("not connected to eth1 node, skip pending deposit insertion")
return []*ethpb.Deposit{}, nil
}
// Need to fetch if the deposits up to the state's latest eth1 data matches
@@ -112,6 +117,7 @@ func (vs *Server) deposits(
// If there are no pending deposits, exit early.
allPendingContainers := vs.PendingDepositsFetcher.PendingContainers(ctx, canonicalEth1DataHeight)
if len(allPendingContainers) == 0 {
log.Debug("no pending deposits for inclusion in block")
return []*ethpb.Deposit{}, nil
}
@@ -127,21 +133,21 @@ func (vs *Server) deposits(
if uint64(dep.Index) >= beaconState.Eth1DepositIndex() && uint64(dep.Index) < canonicalEth1Data.DepositCount {
pendingDeps = append(pendingDeps, dep)
}
// Don't try to pack more than the max allowed in a block
if uint64(len(pendingDeps)) == params.BeaconConfig().MaxDeposits {
break
}
}
for i := range pendingDeps {
// Don't construct merkle proof if the number of deposits is more than max allowed in block.
if uint64(i) == params.BeaconConfig().MaxDeposits {
break
}
pendingDeps[i].Deposit, err = constructMerkleProof(depositTrie, int(pendingDeps[i].Index), pendingDeps[i].Deposit)
if err != nil {
return nil, err
}
}
// Limit the return of pending deposits to not be more than max deposits allowed in block.
var pendingDeposits []*ethpb.Deposit
for i := uint64(0); i < uint64(len(pendingDeps)) && i < params.BeaconConfig().MaxDeposits; i++ {
for i := uint64(0); i < uint64(len(pendingDeps)); i++ {
pendingDeposits = append(pendingDeposits, pendingDeps[i].Deposit)
}
return pendingDeposits, nil

View File

@@ -24,6 +24,8 @@ import (
var errPubkeyDoesNotExist = errors.New("pubkey does not exist")
var nonExistentIndex = types.ValidatorIndex(^uint64(0))
const numStatesToCheck = 2
// ValidatorStatus returns the validator status of the current epoch.
// The status response can be one of the following:
// DEPOSITED - validator's deposit has been recognized by Ethereum 1, not yet recognized by Ethereum.
@@ -106,9 +108,17 @@ func (vs *Server) CheckDoppelGanger(ctx context.Context, req *ethpb.DoppelGanger
if err != nil {
return nil, status.Error(codes.Internal, "Could not get head state")
}
currEpoch := slots.ToEpoch(headState.Slot())
isRecent, resp := checkValidatorsAreRecent(currEpoch, req)
// If all provided keys are recent we skip this check
// as we are unable to effectively determine if a doppelganger
// is active.
if isRecent {
return resp, nil
}
// We walk back from the current head state to the state at the beginning of the previous 2 epochs.
// Where S_i , i := 0,1,2. i = 0 would signify the current head state in this epoch.
currEpoch := slots.ToEpoch(headState.Slot())
previousEpoch, err := currEpoch.SafeSub(1)
if err != nil {
previousEpoch = currEpoch
@@ -125,18 +135,18 @@ func (vs *Server) CheckDoppelGanger(ctx context.Context, req *ethpb.DoppelGanger
if err != nil {
return nil, status.Error(codes.Internal, "Could not get older state")
}
resp := &ethpb.DoppelGangerResponse{
resp = &ethpb.DoppelGangerResponse{
Responses: []*ethpb.DoppelGangerResponse_ValidatorResponse{},
}
for _, v := range req.ValidatorRequests {
// If the validator's last recorded epoch was
// less than or equal to 2 epochs ago, this method will not
// less than or equal to `numStatesToCheck` epochs ago, this method will not
// be able to catch duplicates. This is due to how attestation
// inclusion works, where an attestation for the current epoch
// is able to included in the current or next epoch. Depending
// on which epoch it is included the balance change will be
// reflected in the following epoch.
if v.Epoch+2 >= currEpoch {
if v.Epoch+numStatesToCheck >= currEpoch {
resp.Responses = append(resp.Responses,
&ethpb.DoppelGangerResponse_ValidatorResponse{
PublicKey: v.PublicKey,
@@ -322,6 +332,44 @@ func (vs *Server) validatorStatus(
}
}
func (vs *Server) retrieveAfterEpochTransition(ctx context.Context, epoch types.Epoch) (state.BeaconState, error) {
endSlot, err := slots.EpochEnd(epoch)
if err != nil {
return nil, err
}
retState, err := vs.StateGen.StateBySlot(ctx, endSlot)
if err != nil {
return nil, err
}
return transition.ProcessSlots(ctx, retState, retState.Slot()+1)
}
func checkValidatorsAreRecent(headEpoch types.Epoch, req *ethpb.DoppelGangerRequest) (bool, *ethpb.DoppelGangerResponse) {
validatorsAreRecent := true
resp := &ethpb.DoppelGangerResponse{
Responses: []*ethpb.DoppelGangerResponse_ValidatorResponse{},
}
for _, v := range req.ValidatorRequests {
// Due to how balances are reflected for individual
// validators, we can only effectively determine if a
// validator voted or not if we are able to look
// back more than `numStatesToCheck` epochs into the past.
if v.Epoch+numStatesToCheck < headEpoch {
validatorsAreRecent = false
// Zero out response if we encounter non-recent validators to
// guard against potential misuse.
resp.Responses = []*ethpb.DoppelGangerResponse_ValidatorResponse{}
break
}
resp.Responses = append(resp.Responses,
&ethpb.DoppelGangerResponse_ValidatorResponse{
PublicKey: v.PublicKey,
DuplicateExists: false,
})
}
return validatorsAreRecent, resp
}
func statusForPubKey(headState state.ReadOnlyBeaconState, pubKey []byte) (ethpb.ValidatorStatus, types.ValidatorIndex, error) {
if headState == nil || headState.IsNil() {
return ethpb.ValidatorStatus_UNKNOWN_STATUS, 0, errors.New("head state does not exist")
@@ -371,15 +419,3 @@ func depositStatus(depositOrBalance uint64) ethpb.ValidatorStatus {
}
return ethpb.ValidatorStatus_DEPOSITED
}
func (vs *Server) retrieveAfterEpochTransition(ctx context.Context, epoch types.Epoch) (state.BeaconState, error) {
endSlot, err := slots.EpochEnd(epoch)
if err != nil {
return nil, err
}
retState, err := vs.StateGen.StateBySlot(ctx, endSlot)
if err != nil {
return nil, err
}
return transition.ProcessSlots(ctx, retState, retState.Slot()+1)
}

View File

@@ -1170,25 +1170,10 @@ func TestServer_CheckDoppelGanger(t *testing.T) {
svSetup: func(t *testing.T) (*Server, *ethpb.DoppelGangerRequest, *ethpb.DoppelGangerResponse) {
mockGen := stategen.NewMockService()
hs, ps, os, keys := createStateSetup(t, 4, mockGen)
// Previous Epoch State
for i := 10; i < 15; i++ {
bal, err := ps.BalanceAtIndex(types.ValidatorIndex(i))
assert.NoError(t, err)
// Add 100 gwei, to mock an active validator
assert.NoError(t, ps.UpdateBalancesAtIndex(types.ValidatorIndex(i), bal-1000000000))
}
// Older Epoch State
for i := 10; i < 15; i++ {
bal, err := os.BalanceAtIndex(types.ValidatorIndex(i))
assert.NoError(t, err)
// Add 200 gwei, to mock an active validator
assert.NoError(t, os.UpdateBalancesAtIndex(types.ValidatorIndex(i), bal-2000000000))
}
hs, _, _, keys := createStateSetup(t, 4, mockGen)
vs := &Server{
StateGen: mockGen,
StateGen: nil,
HeadFetcher: &mockChain.ChainService{
State: hs,
},

View File

@@ -382,7 +382,7 @@ func (s *Service) logNewClientConnection(ctx context.Context) {
if !s.connectedRPCClients[clientInfo.Addr] {
log.WithFields(logrus.Fields{
"addr": clientInfo.Addr.String(),
}).Infof("NewService gRPC client connected to beacon node")
}).Infof("gRPC client connected to beacon node")
s.connectedRPCClients[clientInfo.Addr] = true
}
}

View File

@@ -3,8 +3,15 @@ load("@prysm//tools/go:def.bzl", "go_library")
go_library(
name = "go_default_library",
testonly = True,
srcs = ["mock_state_fetcher.go"],
srcs = [
"mock_genesis_timefetcher.go",
"mock_state_fetcher.go",
],
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/rpc/testutil",
visibility = ["//beacon-chain:__subpackages__"],
deps = ["//beacon-chain/state:go_default_library"],
deps = [
"//beacon-chain/state:go_default_library",
"//config/params:go_default_library",
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
],
)

View File

@@ -0,0 +1,21 @@
package testutil
import (
"time"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/config/params"
)
// MockGenesisTimeFetcher is a fake implementation of the blockchain.TimeFetcher
type MockGenesisTimeFetcher struct {
Genesis time.Time
}
func (m *MockGenesisTimeFetcher) GenesisTime() time.Time {
return m.Genesis
}
func (m *MockGenesisTimeFetcher) CurrentSlot() types.Slot {
return types.Slot(uint64(time.Now().Unix()-m.Genesis.Unix()) / params.BeaconConfig().SecondsPerSlot)
}

View File

@@ -28,7 +28,7 @@ func (s *MockSlashingChecker) HighestAttestations(
return atts, nil
}
func (s *MockSlashingChecker) IsSlashableBlock(ctx context.Context, proposal *ethpb.SignedBeaconBlockHeader) (*ethpb.ProposerSlashing, error) {
func (s *MockSlashingChecker) IsSlashableBlock(_ context.Context, _ *ethpb.SignedBeaconBlockHeader) (*ethpb.ProposerSlashing, error) {
if s.ProposerSlashingFound {
return &ethpb.ProposerSlashing{
Header_1: &ethpb.SignedBeaconBlockHeader{
@@ -56,7 +56,7 @@ func (s *MockSlashingChecker) IsSlashableBlock(ctx context.Context, proposal *et
return nil, nil
}
func (s *MockSlashingChecker) IsSlashableAttestation(ctx context.Context, attestation *ethpb.IndexedAttestation) ([]*ethpb.AttesterSlashing, error) {
func (s *MockSlashingChecker) IsSlashableAttestation(_ context.Context, _ *ethpb.IndexedAttestation) ([]*ethpb.AttesterSlashing, error) {
if s.AttesterSlashingFound {
return []*ethpb.AttesterSlashing{
{

View File

@@ -12,6 +12,8 @@ go_library(
"//beacon-chain/state/stateutil:go_default_library",
"//beacon-chain/state/types:go_default_library",
"//crypto/hash:go_default_library",
"//encoding/bytesutil:go_default_library",
"//encoding/ssz:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"@com_github_pkg_errors//:go_default_library",
@@ -26,8 +28,8 @@ go_test(
],
embed = [":go_default_library"],
deps = [
"//beacon-chain/state/stateutil:go_default_library",
"//beacon-chain/state/types:go_default_library",
"//beacon-chain/state/v1:go_default_library",
"//config/params:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",

View File

@@ -18,6 +18,7 @@ type FieldTrie struct {
field types.FieldIndex
dataType types.DataType
length uint64
numOfElems int
}
// NewFieldTrie is the constructor for the field trie data structure. It creates the corresponding
@@ -26,18 +27,19 @@ type FieldTrie struct {
func NewFieldTrie(field types.FieldIndex, dataType types.DataType, elements interface{}, length uint64) (*FieldTrie, error) {
if elements == nil {
return &FieldTrie{
field: field,
dataType: dataType,
reference: stateutil.NewRef(1),
RWMutex: new(sync.RWMutex),
length: length,
field: field,
dataType: dataType,
reference: stateutil.NewRef(1),
RWMutex: new(sync.RWMutex),
length: length,
numOfElems: 0,
}, nil
}
fieldRoots, err := fieldConverters(field, []uint64{}, elements, true)
if err != nil {
return nil, err
}
if err := validateElements(field, elements, length); err != nil {
if err := validateElements(field, dataType, elements, length); err != nil {
return nil, err
}
switch dataType {
@@ -53,8 +55,9 @@ func NewFieldTrie(field types.FieldIndex, dataType types.DataType, elements inte
reference: stateutil.NewRef(1),
RWMutex: new(sync.RWMutex),
length: length,
numOfElems: reflect.ValueOf(elements).Len(),
}, nil
case types.CompositeArray:
case types.CompositeArray, types.CompressedArray:
return &FieldTrie{
fieldLayers: stateutil.ReturnTrieLayerVariable(fieldRoots, length),
field: field,
@@ -62,6 +65,7 @@ func NewFieldTrie(field types.FieldIndex, dataType types.DataType, elements inte
reference: stateutil.NewRef(1),
RWMutex: new(sync.RWMutex),
length: length,
numOfElems: reflect.ValueOf(elements).Len(),
}, nil
default:
return nil, errors.Errorf("unrecognized data type in field map: %v", reflect.TypeOf(dataType).Name())
@@ -92,13 +96,40 @@ func (f *FieldTrie) RecomputeTrie(indices []uint64, elements interface{}) ([32]b
if err != nil {
return [32]byte{}, err
}
f.numOfElems = reflect.ValueOf(elements).Len()
return fieldRoot, nil
case types.CompositeArray:
fieldRoot, f.fieldLayers, err = stateutil.RecomputeFromLayerVariable(fieldRoots, indices, f.fieldLayers)
if err != nil {
return [32]byte{}, err
}
f.numOfElems = reflect.ValueOf(elements).Len()
return stateutil.AddInMixin(fieldRoot, uint64(len(f.fieldLayers[0])))
case types.CompressedArray:
numOfElems, err := f.field.ElemsInChunk()
if err != nil {
return [32]byte{}, err
}
// We remove the duplicates here in order to prevent
// duplicated insertions into the trie.
newIndices := []uint64{}
indexExists := make(map[uint64]bool)
newRoots := make([][32]byte, 0, len(fieldRoots)/int(numOfElems))
for i, idx := range indices {
startIdx := idx / numOfElems
if indexExists[startIdx] {
continue
}
newIndices = append(newIndices, startIdx)
indexExists[startIdx] = true
newRoots = append(newRoots, fieldRoots[i])
}
fieldRoot, f.fieldLayers, err = stateutil.RecomputeFromLayerVariable(newRoots, newIndices, f.fieldLayers)
if err != nil {
return [32]byte{}, err
}
f.numOfElems = reflect.ValueOf(elements).Len()
return stateutil.AddInMixin(fieldRoot, uint64(f.numOfElems))
default:
return [32]byte{}, errors.Errorf("unrecognized data type in field map: %v", reflect.TypeOf(f.dataType).Name())
}
@@ -109,11 +140,12 @@ func (f *FieldTrie) RecomputeTrie(indices []uint64, elements interface{}) ([32]b
func (f *FieldTrie) CopyTrie() *FieldTrie {
if f.fieldLayers == nil {
return &FieldTrie{
field: f.field,
dataType: f.dataType,
reference: stateutil.NewRef(1),
RWMutex: new(sync.RWMutex),
length: f.length,
field: f.field,
dataType: f.dataType,
reference: stateutil.NewRef(1),
RWMutex: new(sync.RWMutex),
length: f.length,
numOfElems: f.numOfElems,
}
}
dstFieldTrie := make([][]*[32]byte, len(f.fieldLayers))
@@ -128,6 +160,7 @@ func (f *FieldTrie) CopyTrie() *FieldTrie {
reference: stateutil.NewRef(1),
RWMutex: new(sync.RWMutex),
length: f.length,
numOfElems: f.numOfElems,
}
}
@@ -139,6 +172,9 @@ func (f *FieldTrie) TrieRoot() ([32]byte, error) {
case types.CompositeArray:
trieRoot := *f.fieldLayers[len(f.fieldLayers)-1][0]
return stateutil.AddInMixin(trieRoot, uint64(len(f.fieldLayers[0])))
case types.CompressedArray:
trieRoot := *f.fieldLayers[len(f.fieldLayers)-1][0]
return stateutil.AddInMixin(trieRoot, uint64(f.numOfElems))
default:
return [32]byte{}, errors.Errorf("unrecognized data type in field map: %v", reflect.TypeOf(f.dataType).Name())
}

View File

@@ -1,6 +1,7 @@
package fieldtrie
import (
"encoding/binary"
"fmt"
"reflect"
@@ -8,20 +9,37 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
"github.com/prysmaticlabs/prysm/beacon-chain/state/types"
"github.com/prysmaticlabs/prysm/crypto/hash"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/encoding/ssz"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/runtime/version"
)
func (f *FieldTrie) validateIndices(idxs []uint64) error {
length := f.length
if f.dataType == types.CompressedArray {
comLength, err := f.field.ElemsInChunk()
if err != nil {
return err
}
length *= comLength
}
for _, idx := range idxs {
if idx >= f.length {
return errors.Errorf("invalid index for field %s: %d >= length %d", f.field.String(version.Phase0), idx, f.length)
if idx >= length {
return errors.Errorf("invalid index for field %s: %d >= length %d", f.field.String(version.Phase0), idx, length)
}
}
return nil
}
func validateElements(field types.FieldIndex, elements interface{}, length uint64) error {
func validateElements(field types.FieldIndex, dataType types.DataType, elements interface{}, length uint64) error {
if dataType == types.CompressedArray {
comLength, err := field.ElemsInChunk()
if err != nil {
return err
}
length *= comLength
}
val := reflect.ValueOf(elements)
if val.Len() > int(length) {
return errors.Errorf("elements length is larger than expected for field %s: %d > %d", field.String(version.Phase0), val.Len(), length)
@@ -38,21 +56,21 @@ func fieldConverters(field types.FieldIndex, indices []uint64, elements interfac
return nil, errors.Errorf("Wanted type of %v but got %v",
reflect.TypeOf([][]byte{}).Name(), reflect.TypeOf(elements).Name())
}
return stateutil.HandleByteArrays(val, indices, convertAll)
return handleByteArrays(val, indices, convertAll)
case types.Eth1DataVotes:
val, ok := elements.([]*ethpb.Eth1Data)
if !ok {
return nil, errors.Errorf("Wanted type of %v but got %v",
reflect.TypeOf([]*ethpb.Eth1Data{}).Name(), reflect.TypeOf(elements).Name())
}
return HandleEth1DataSlice(val, indices, convertAll)
return handleEth1DataSlice(val, indices, convertAll)
case types.Validators:
val, ok := elements.([]*ethpb.Validator)
if !ok {
return nil, errors.Errorf("Wanted type of %v but got %v",
reflect.TypeOf([]*ethpb.Validator{}).Name(), reflect.TypeOf(elements).Name())
}
return stateutil.HandleValidatorSlice(val, indices, convertAll)
return handleValidatorSlice(val, indices, convertAll)
case types.PreviousEpochAttestations, types.CurrentEpochAttestations:
val, ok := elements.([]*ethpb.PendingAttestation)
if !ok {
@@ -60,13 +78,87 @@ func fieldConverters(field types.FieldIndex, indices []uint64, elements interfac
reflect.TypeOf([]*ethpb.PendingAttestation{}).Name(), reflect.TypeOf(elements).Name())
}
return handlePendingAttestation(val, indices, convertAll)
case types.Balances:
val, ok := elements.([]uint64)
if !ok {
return nil, errors.Errorf("Wanted type of %v but got %v",
reflect.TypeOf([]uint64{}).Name(), reflect.TypeOf(elements).Name())
}
return handleBalanceSlice(val, indices, convertAll)
default:
return [][32]byte{}, errors.Errorf("got unsupported type of %v", reflect.TypeOf(elements).Name())
}
}
// HandleEth1DataSlice processes a list of eth1data and indices into the appropriate roots.
func HandleEth1DataSlice(val []*ethpb.Eth1Data, indices []uint64, convertAll bool) ([][32]byte, error) {
// handleByteArrays computes and returns byte arrays in a slice of root format.
func handleByteArrays(val [][]byte, indices []uint64, convertAll bool) ([][32]byte, error) {
length := len(indices)
if convertAll {
length = len(val)
}
roots := make([][32]byte, 0, length)
rootCreator := func(input []byte) {
newRoot := bytesutil.ToBytes32(input)
roots = append(roots, newRoot)
}
if convertAll {
for i := range val {
rootCreator(val[i])
}
return roots, nil
}
if len(val) > 0 {
for _, idx := range indices {
if idx > uint64(len(val))-1 {
return nil, fmt.Errorf("index %d greater than number of byte arrays %d", idx, len(val))
}
rootCreator(val[idx])
}
}
return roots, nil
}
// handleValidatorSlice returns the validator indices in a slice of root format.
func handleValidatorSlice(val []*ethpb.Validator, indices []uint64, convertAll bool) ([][32]byte, error) {
length := len(indices)
if convertAll {
length = len(val)
}
roots := make([][32]byte, 0, length)
hasher := hash.CustomSHA256Hasher()
rootCreator := func(input *ethpb.Validator) error {
newRoot, err := stateutil.ValidatorRootWithHasher(hasher, input)
if err != nil {
return err
}
roots = append(roots, newRoot)
return nil
}
if convertAll {
for i := range val {
err := rootCreator(val[i])
if err != nil {
return nil, err
}
}
return roots, nil
}
if len(val) > 0 {
for _, idx := range indices {
if idx > uint64(len(val))-1 {
return nil, fmt.Errorf("index %d greater than number of validators %d", idx, len(val))
}
err := rootCreator(val[idx])
if err != nil {
return nil, err
}
}
}
return roots, nil
}
// handleEth1DataSlice processes a list of eth1data and indices into the appropriate roots.
func handleEth1DataSlice(val []*ethpb.Eth1Data, indices []uint64, convertAll bool) ([][32]byte, error) {
length := len(indices)
if convertAll {
length = len(val)
@@ -141,3 +233,48 @@ func handlePendingAttestation(val []*ethpb.PendingAttestation, indices []uint64,
}
return roots, nil
}
func handleBalanceSlice(val []uint64, indices []uint64, convertAll bool) ([][32]byte, error) {
if convertAll {
balancesMarshaling := make([][]byte, 0)
for _, b := range val {
balanceBuf := make([]byte, 8)
binary.LittleEndian.PutUint64(balanceBuf, b)
balancesMarshaling = append(balancesMarshaling, balanceBuf)
}
balancesChunks, err := ssz.PackByChunk(balancesMarshaling)
if err != nil {
return [][32]byte{}, errors.Wrap(err, "could not pack balances into chunks")
}
return balancesChunks, nil
}
if len(val) > 0 {
numOfElems, err := types.Balances.ElemsInChunk()
if err != nil {
return nil, err
}
roots := [][32]byte{}
for _, idx := range indices {
// We split the indexes into their relevant groups. Balances
// are compressed according to 4 values -> 1 chunk.
startIdx := idx / numOfElems
startGroup := startIdx * numOfElems
chunk := [32]byte{}
sizeOfElem := len(chunk) / int(numOfElems)
for i, j := 0, startGroup; j < startGroup+numOfElems; i, j = i+sizeOfElem, j+1 {
wantedVal := uint64(0)
// We are adding chunks in sets of 4, if the set is at the edge of the array
// then you will need to zero out the rest of the chunk. Ex : 41 indexes,
// so 41 % 4 = 1 . There are 3 indexes, which do not exist yet but we
// have to add in as a root. These 3 indexes are then given a 'zero' value.
if int(j) < len(val) {
wantedVal = val[j]
}
binary.LittleEndian.PutUint64(chunk[i:i+sizeOfElem], wantedVal)
}
roots = append(roots, chunk)
}
return roots, nil
}
return [][32]byte{}, nil
}

View File

@@ -5,8 +5,8 @@ import (
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/state/fieldtrie"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
stateTypes "github.com/prysmaticlabs/prysm/beacon-chain/state/types"
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
"github.com/prysmaticlabs/prysm/config/params"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/assert"
@@ -20,7 +20,7 @@ func TestFieldTrie_NewTrie(t *testing.T) {
// 5 represents the enum value of state roots
trie, err := fieldtrie.NewFieldTrie(5, stateTypes.BasicArray, newState.StateRoots(), uint64(params.BeaconConfig().SlotsPerHistoricalRoot))
require.NoError(t, err)
root, err := v1.RootsArrayHashTreeRoot(newState.StateRoots(), uint64(params.BeaconConfig().SlotsPerHistoricalRoot), "StateRoots")
root, err := stateutil.RootsArrayHashTreeRoot(newState.StateRoots(), uint64(params.BeaconConfig().SlotsPerHistoricalRoot), "StateRoots")
require.NoError(t, err)
newRoot, err := trie.TrieRoot()
require.NoError(t, err)
@@ -48,7 +48,7 @@ func TestFieldTrie_RecomputeTrie(t *testing.T) {
require.NoError(t, newState.UpdateValidatorAtIndex(types.ValidatorIndex(changedIdx[0]), changedVals[0]))
require.NoError(t, newState.UpdateValidatorAtIndex(types.ValidatorIndex(changedIdx[1]), changedVals[1]))
expectedRoot, err := v1.ValidatorRegistryRoot(newState.Validators())
expectedRoot, err := stateutil.ValidatorRegistryRoot(newState.Validators())
require.NoError(t, err)
root, err := trie.RecomputeTrie(changedIdx, newState.Validators())
require.NoError(t, err)

View File

@@ -1,8 +1,13 @@
package fieldtrie
import (
"encoding/binary"
"sync"
"testing"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
"github.com/prysmaticlabs/prysm/beacon-chain/state/types"
"github.com/prysmaticlabs/prysm/config/params"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/assert"
)
@@ -17,7 +22,64 @@ func Test_handlePendingAttestation_OutOfRange(t *testing.T) {
func Test_handleEth1DataSlice_OutOfRange(t *testing.T) {
items := make([]*ethpb.Eth1Data, 1)
indices := []uint64{3}
_, err := HandleEth1DataSlice(items, indices, false)
_, err := handleEth1DataSlice(items, indices, false)
assert.ErrorContains(t, "index 3 greater than number of items in eth1 data slice 1", err)
}
func Test_handleValidatorSlice_OutOfRange(t *testing.T) {
vals := make([]*ethpb.Validator, 1)
indices := []uint64{3}
_, err := handleValidatorSlice(vals, indices, false)
assert.ErrorContains(t, "index 3 greater than number of validators 1", err)
}
func TestBalancesSlice_CorrectRoots_All(t *testing.T) {
balances := []uint64{5, 2929, 34, 1291, 354305}
roots, err := handleBalanceSlice(balances, []uint64{}, true)
assert.NoError(t, err)
root1 := [32]byte{}
binary.LittleEndian.PutUint64(root1[:8], balances[0])
binary.LittleEndian.PutUint64(root1[8:16], balances[1])
binary.LittleEndian.PutUint64(root1[16:24], balances[2])
binary.LittleEndian.PutUint64(root1[24:32], balances[3])
root2 := [32]byte{}
binary.LittleEndian.PutUint64(root2[:8], balances[4])
assert.DeepEqual(t, roots, [][32]byte{root1, root2})
}
func TestBalancesSlice_CorrectRoots_Some(t *testing.T) {
balances := []uint64{5, 2929, 34, 1291, 354305}
roots, err := handleBalanceSlice(balances, []uint64{2, 3}, false)
assert.NoError(t, err)
root1 := [32]byte{}
binary.LittleEndian.PutUint64(root1[:8], balances[0])
binary.LittleEndian.PutUint64(root1[8:16], balances[1])
binary.LittleEndian.PutUint64(root1[16:24], balances[2])
binary.LittleEndian.PutUint64(root1[24:32], balances[3])
// Returns root for each indice(even if duplicated)
assert.DeepEqual(t, roots, [][32]byte{root1, root1})
}
func TestValidateIndices_CompressedField(t *testing.T) {
fakeTrie := &FieldTrie{
RWMutex: new(sync.RWMutex),
reference: stateutil.NewRef(0),
fieldLayers: nil,
field: types.Balances,
dataType: types.CompressedArray,
length: params.BeaconConfig().ValidatorRegistryLimit / 4,
numOfElems: 0,
}
goodIdx := params.BeaconConfig().ValidatorRegistryLimit - 1
assert.NoError(t, fakeTrie.validateIndices([]uint64{goodIdx}))
badIdx := goodIdx + 1
assert.ErrorContains(t, "invalid index for field balances", fakeTrie.validateIndices([]uint64{badIdx}))
}

View File

@@ -148,7 +148,6 @@ type WriteOnlyBlockRoots interface {
// WriteOnlyStateRoots defines a struct which only has write access to state roots methods.
type WriteOnlyStateRoots interface {
SetStateRoots(val [][]byte) error
UpdateStateRootAtIndex(idx uint64, stateRoot [32]byte) error
}

View File

@@ -37,6 +37,15 @@ func (s *State) HasStateInCache(ctx context.Context, blockRoot [32]byte) (bool,
return has, nil
}
// StateByRootIfCached retrieves a state using the input block root only if the state is already in the cache
func (s *State) StateByRootIfCachedNoCopy(blockRoot [32]byte) state.BeaconState {
if !s.hotStateCache.has(blockRoot) {
return nil
}
state := s.hotStateCache.getWithoutCopy(blockRoot)
return state
}
// StateByRoot retrieves the state using input block root.
func (s *State) StateByRoot(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "stateGen.StateByRoot")

View File

@@ -56,6 +56,44 @@ func TestStateByRoot_ColdState(t *testing.T) {
require.DeepSSZEqual(t, loadedState.InnerStateUnsafe(), beaconState.InnerStateUnsafe())
}
func TestStateByRootIfCachedNoCopy_HotState(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service := New(beaconDB)
beaconState, _ := util.DeterministicGenesisState(t, 32)
r := [32]byte{'A'}
require.NoError(t, service.beaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Root: r[:]}))
service.hotStateCache.put(r, beaconState)
loadedState := service.StateByRootIfCachedNoCopy(r)
require.DeepSSZEqual(t, loadedState.InnerStateUnsafe(), beaconState.InnerStateUnsafe())
}
func TestStateByRootIfCachedNoCopy_ColdState(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service := New(beaconDB)
service.finalizedInfo.slot = 2
service.slotsPerArchivedPoint = 1
b := util.NewBeaconBlock()
b.Block.Slot = 1
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b)))
bRoot, err := b.Block.HashTreeRoot()
require.NoError(t, err)
beaconState, _ := util.DeterministicGenesisState(t, 32)
require.NoError(t, beaconState.SetSlot(1))
require.NoError(t, service.beaconDB.SaveState(ctx, beaconState, bRoot))
require.NoError(t, service.beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b)))
require.NoError(t, service.beaconDB.SaveGenesisBlockRoot(ctx, bRoot))
loadedState := service.StateByRootIfCachedNoCopy(bRoot)
require.NoError(t, err)
require.Equal(t, loadedState, nil)
}
func TestStateByRoot_HotStateUsingEpochBoundaryCacheNoReplay(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)

Some files were not shown because too many files have changed in this diff Show More