Compare commits

...

30 Commits

Author SHA1 Message Date
Raul Jordan
969675d172 Merge branch 'develop' into e2e-configs 2021-12-02 10:00:20 -05:00
Raul Jordan
d3c97da4e1 Ensure Slashing Protection Exports and Keymanager API Work According to Spec (#9938)
* password compliance

* delete keys tests

* changes to slashing protection exports

* export tests pass

* fix up failures

* gaz

* table driven tests for delete keystores

* comment

* rem deletion logic

* look ma, no db

* fix up tests

* ineff

* gaz

* broken test fix

* Update validator/keymanager/imported/delete.go

* rem
2021-12-02 09:58:49 -05:00
Raul Jordan
1d216a8737 Filter Errored Keys from Returned Slashing Protection History in Standard API (#9968)
* add err condition

* naming
2021-12-02 03:32:34 +00:00
Raul Jordan
790bf03123 Replace a Few IntFlags with Uint64Flags (#9959)
* use uints instead of ints

* fix method

* fix

* fix

* builds

* deepsource

* deep source
2021-12-01 23:34:53 +00:00
Preston Van Loon
ab60b1c7b2 Update go-ethereum to v1.10.13 (#9967)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2021-12-01 19:31:21 +00:00
Nishant Das
236a5c4167 Cleanup From Deepsource (#9961)
* ds cleanup

* fix

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2021-12-01 18:56:07 +00:00
Nishant Das
5e2229ce9d Update Libp2p to v0.15.1 (#9960)
* fix deps

* tidy it all

* fix build

* remove tls patch

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2021-12-01 18:09:34 +00:00
Potuz
6ffba5c769 Add v1alpha1_to_v2.go (#9966)
* Add v1alpha1_to_v2.go

* add tests

* gazelle

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2021-12-01 17:39:43 +00:00
Potuz
3e61763bd7 fix operation precedence (#9965) 2021-12-01 17:14:08 +00:00
terence tsao
23bdce2354 Fix grpc client connected... logging (#9956)
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2021-12-01 14:45:39 +00:00
Nishant Das
d94bf32dcf Faster Doppelganger Check (#9964)
* faster check

* potuz's review

* potuz's review
2021-12-01 12:37:10 +00:00
Nishant Das
7cbef104b0 Remove Balances Timeout (#9957)
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2021-12-01 04:03:26 +00:00
Potuz
cd6d0d9cf1 Monitor aggregated logs (#9943) 2021-12-01 03:35:55 +00:00
Potuz
afbe02697d Monitor service (#9933)
* Add a service for the monitor

* Do not block service start

* gaz

* move channel subscription outide go routine

* add service start test

* fix panic on node tests

* Radek's first pass

* Radek's take 2

* uncap error messages

* revert reversal

* Terence take 1

* gaz

* Missing locks found by Terence

* Track via bool not empty interface

* Add tests for every function

* fix allocation of slice

* Minor cleanups

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2021-11-30 19:27:03 -03:00
terence tsao
2c921ec628 Update spec tests to v1.1.6 (#9955)
* Update spec test to v1.1.6

* Update spec test to v1.1.6
2021-11-30 21:21:59 +00:00
terence tsao
0e72938914 Uncap error messages (#9952) 2021-11-30 07:41:07 -08:00
Potuz
71d55d1cff Check for syncstatus before performing a voluntary exit (#9951)
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2021-11-30 10:40:59 +00:00
terence tsao
d8aa0f8827 Alter config filed name to devnet if it's not populated in file (#9949) 2021-11-29 19:27:26 -08:00
Nishant Das
37bc407b56 Refactor States To Allow for Single Cached Hasher (#9922)
* initial changes

* gaz

* unexport and add in godoc

* nocache

* fix edge case

* fix bad implementation

* fix build file

* add it in

* terence's review

* gaz

* fix build

* Apply suggestions from code review

remove assigned ctx

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2021-11-29 16:30:17 +00:00
Potuz
5983d0a397 Allow requests for next sync committee (#9945)
* Allow requests for next sync committee

* fix deepsource and variable rename

* Minor cleanup

* Potuz's comments

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2021-11-28 17:34:24 +00:00
terence tsao
85faecf2ca Add test utility merge state (#9944)
* Add test utility merge state

* gaz

* gaz
2021-11-26 15:53:25 +00:00
terence tsao
f42227aa04 Rest of the merge state implementation (#9939)
* Add rest of the state implementations

* Update BUILD.bazel

* Update state_trie_test.go

* fix test

* fix test

* Update beacon-chain/state/v3/state_trie.go

Co-authored-by: Potuz <potuz@prysmaticlabs.com>

* Update beacon-chain/state/v3/state_trie.go

Co-authored-by: Potuz <potuz@prysmaticlabs.com>

* add ctx

* go fmt

Co-authored-by: Potuz <potuz@prysmaticlabs.com>
2021-11-25 17:41:05 -03:00
terence tsao
c9d5b4ba0e Add merge beacon block wrappers (#9906) 2021-11-24 14:26:17 -08:00
Potuz
fed004686b Add verbosity to aggregation logs (#9937) 2021-11-24 11:35:45 -08:00
Raul Jordan
4ae7513835 Import Keystores Standard API Implementation (#9924)
* begin

* rem deleted code

* delete keystores all tests

* surface errors to user

* add in changes

* del

* tests

* slice

* begin import process

* add import keystores logic

* unit tests for import

* tests for all import keystores keymanager issues

* change proto

* pbs

* renaming works

* use proper request

* pb

* comment

* gaz

* fix up cli cmd

* test

* add gw

* precond

* tests

* radek comments
2021-11-24 10:40:49 -05:00
Nishant Das
1d53fd2fd3 revert change (#9931) 2021-11-24 07:09:15 -08:00
Potuz
a2c1185032 Monitor sync committee (#9923)
* Add sync committeee contributions to monitor

* gaz

* Raul's review

* Added lock around TrackedValidators

* add comment to trackedIndex

* add missing locks because of trackedIndex

* Terence fixes 2

* moved TrackedValidator to service from config

* Terence comment fix

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2021-11-24 09:56:34 +08:00
terence tsao
448d62d6e3 Add merge beacon chain objects and generate ssz.go (#9929) 2021-11-23 23:34:31 +00:00
terence tsao
4858de7875 Use prysmaticlabs/fastssz (#9928)
* Use prysmaticlabs/fastssz

* Generated code
2021-11-23 21:28:24 +00:00
terence tsao
1410ee7e7d Update testnet_e2e_config.go 2021-09-30 11:32:54 -07:00
161 changed files with 7656 additions and 2853 deletions

View File

@@ -225,7 +225,7 @@ filegroup(
url = "https://github.com/eth2-clients/slashing-protection-interchange-tests/archive/b8413ca42dc92308019d0d4db52c87e9e125c4e9.tar.gz",
)
consensus_spec_version = "v1.1.5"
consensus_spec_version = "v1.1.6"
bls_test_version = "v0.1.1"
@@ -241,7 +241,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "a7d7173d953494c0dfde432c9fc064c25d46d666b024749b3474ae0cdfc50050",
sha256 = "58dbf798e86017b5561af38f2217b99e9fa5b6be0e928b4c73dad6040bb94d65",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
)
@@ -257,7 +257,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "f86872061588c0197516b23025d39e9365b4716c112218a618739dc0d6f4666a",
sha256 = "5be19f7fca9733686ca25dad5ae306327e98830ef6354549d1ddfc56c10e0e9a",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
)
@@ -273,7 +273,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "7a06975360fd37fbb4694d0e06abb78d2a0835146c1d9b26d33569edff8b98f0",
sha256 = "cc110528fcf7ede049e6a05788c77f4a865c3110b49508149d61bb2a992bb896",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
)
@@ -288,7 +288,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "87d8089200163340484d61212fbdffbb5d9d03e1244622761dcb91e641a65761",
sha256 = "c318d7b909ab39db9cc861f645ddd364e7475a4a3425bb702ab407fad3807acd",
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
)

View File

@@ -5,8 +5,6 @@ import (
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
@@ -28,10 +26,10 @@ func TestSyncCommitteeCache_CanUpdateAndRetrieve(t *testing.T) {
}{
{
name: "only current epoch",
currentSyncCommittee: convertToCommittee([][]byte{
currentSyncCommittee: util.ConvertToCommittee([][]byte{
pubKeys[1], pubKeys[2], pubKeys[3], pubKeys[2], pubKeys[2],
}),
nextSyncCommittee: convertToCommittee([][]byte{}),
nextSyncCommittee: util.ConvertToCommittee([][]byte{}),
currentSyncMap: map[types.ValidatorIndex][]types.CommitteeIndex{
1: {0},
2: {1, 3, 4},
@@ -45,8 +43,8 @@ func TestSyncCommitteeCache_CanUpdateAndRetrieve(t *testing.T) {
},
{
name: "only next epoch",
currentSyncCommittee: convertToCommittee([][]byte{}),
nextSyncCommittee: convertToCommittee([][]byte{
currentSyncCommittee: util.ConvertToCommittee([][]byte{}),
nextSyncCommittee: util.ConvertToCommittee([][]byte{
pubKeys[1], pubKeys[2], pubKeys[3], pubKeys[2], pubKeys[2],
}),
currentSyncMap: map[types.ValidatorIndex][]types.CommitteeIndex{
@@ -62,14 +60,14 @@ func TestSyncCommitteeCache_CanUpdateAndRetrieve(t *testing.T) {
},
{
name: "some current epoch and some next epoch",
currentSyncCommittee: convertToCommittee([][]byte{
currentSyncCommittee: util.ConvertToCommittee([][]byte{
pubKeys[1],
pubKeys[2],
pubKeys[3],
pubKeys[2],
pubKeys[2],
}),
nextSyncCommittee: convertToCommittee([][]byte{
nextSyncCommittee: util.ConvertToCommittee([][]byte{
pubKeys[7],
pubKeys[6],
pubKeys[5],
@@ -90,14 +88,14 @@ func TestSyncCommitteeCache_CanUpdateAndRetrieve(t *testing.T) {
},
{
name: "some current epoch and some next epoch duplicated across",
currentSyncCommittee: convertToCommittee([][]byte{
currentSyncCommittee: util.ConvertToCommittee([][]byte{
pubKeys[1],
pubKeys[2],
pubKeys[3],
pubKeys[2],
pubKeys[2],
}),
nextSyncCommittee: convertToCommittee([][]byte{
nextSyncCommittee: util.ConvertToCommittee([][]byte{
pubKeys[2],
pubKeys[1],
pubKeys[3],
@@ -117,13 +115,13 @@ func TestSyncCommitteeCache_CanUpdateAndRetrieve(t *testing.T) {
},
{
name: "all duplicated",
currentSyncCommittee: convertToCommittee([][]byte{
currentSyncCommittee: util.ConvertToCommittee([][]byte{
pubKeys[100],
pubKeys[100],
pubKeys[100],
pubKeys[100],
}),
nextSyncCommittee: convertToCommittee([][]byte{
nextSyncCommittee: util.ConvertToCommittee([][]byte{
pubKeys[100],
pubKeys[100],
pubKeys[100],
@@ -138,13 +136,13 @@ func TestSyncCommitteeCache_CanUpdateAndRetrieve(t *testing.T) {
},
{
name: "unknown keys",
currentSyncCommittee: convertToCommittee([][]byte{
currentSyncCommittee: util.ConvertToCommittee([][]byte{
pubKeys[100],
pubKeys[100],
pubKeys[100],
pubKeys[100],
}),
nextSyncCommittee: convertToCommittee([][]byte{
nextSyncCommittee: util.ConvertToCommittee([][]byte{
pubKeys[100],
pubKeys[100],
pubKeys[100],
@@ -189,13 +187,13 @@ func TestSyncCommitteeCache_RootDoesNotExist(t *testing.T) {
func TestSyncCommitteeCache_CanRotate(t *testing.T) {
c := cache.NewSyncCommittee()
s, _ := util.DeterministicGenesisStateAltair(t, 64)
require.NoError(t, s.SetCurrentSyncCommittee(convertToCommittee([][]byte{{1}})))
require.NoError(t, s.SetCurrentSyncCommittee(util.ConvertToCommittee([][]byte{{1}})))
require.NoError(t, c.UpdatePositionsInCommittee([32]byte{'a'}, s))
require.NoError(t, s.SetCurrentSyncCommittee(convertToCommittee([][]byte{{2}})))
require.NoError(t, s.SetCurrentSyncCommittee(util.ConvertToCommittee([][]byte{{2}})))
require.NoError(t, c.UpdatePositionsInCommittee([32]byte{'b'}, s))
require.NoError(t, s.SetCurrentSyncCommittee(convertToCommittee([][]byte{{3}})))
require.NoError(t, s.SetCurrentSyncCommittee(util.ConvertToCommittee([][]byte{{3}})))
require.NoError(t, c.UpdatePositionsInCommittee([32]byte{'c'}, s))
require.NoError(t, s.SetCurrentSyncCommittee(convertToCommittee([][]byte{{4}})))
require.NoError(t, s.SetCurrentSyncCommittee(util.ConvertToCommittee([][]byte{{4}})))
require.NoError(t, c.UpdatePositionsInCommittee([32]byte{'d'}, s))
_, err := c.CurrentPeriodIndexPosition([32]byte{'a'}, 0)
@@ -204,19 +202,3 @@ func TestSyncCommitteeCache_CanRotate(t *testing.T) {
_, err = c.CurrentPeriodIndexPosition([32]byte{'c'}, 0)
require.NoError(t, err)
}
func convertToCommittee(inputKeys [][]byte) *ethpb.SyncCommittee {
var pubKeys [][]byte
for i := uint64(0); i < params.BeaconConfig().SyncCommitteeSize; i++ {
if i < uint64(len(inputKeys)) {
pubKeys = append(pubKeys, bytesutil.PadTo(inputKeys[i], params.BeaconConfig().BLSPubkeyLength))
} else {
pubKeys = append(pubKeys, bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength))
}
}
return &ethpb.SyncCommittee{
Pubkeys: pubKeys,
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
}
}

View File

@@ -47,7 +47,7 @@ func TestFuzzProcessBlockHeader_10000(t *testing.T) {
}
}
func TestFuzzverifyDepositDataSigningRoot_10000(t *testing.T) {
func TestFuzzverifyDepositDataSigningRoot_10000(_ *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
var ba []byte
pubkey := [48]byte{}
@@ -85,7 +85,7 @@ func TestFuzzProcessEth1DataInBlock_10000(t *testing.T) {
}
}
func TestFuzzareEth1DataEqual_10000(t *testing.T) {
func TestFuzzareEth1DataEqual_10000(_ *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
eth1data := &eth.Eth1Data{}
eth1data2 := &eth.Eth1Data{}
@@ -227,7 +227,7 @@ func TestFuzzVerifyAttesterSlashing_10000(t *testing.T) {
}
}
func TestFuzzIsSlashableAttestationData_10000(t *testing.T) {
func TestFuzzIsSlashableAttestationData_10000(_ *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
attestationData := &eth.AttestationData{}
attestationData2 := &eth.AttestationData{}
@@ -239,7 +239,7 @@ func TestFuzzIsSlashableAttestationData_10000(t *testing.T) {
}
}
func TestFuzzslashableAttesterIndices_10000(t *testing.T) {
func TestFuzzslashableAttesterIndices_10000(_ *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
attesterSlashing := &eth.AttesterSlashing{}
@@ -397,7 +397,7 @@ func TestFuzzProcessVoluntaryExitsNoVerify_10000(t *testing.T) {
}
}
func TestFuzzVerifyExit_10000(t *testing.T) {
func TestFuzzVerifyExit_10000(_ *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
ve := &eth.SignedVoluntaryExit{}
rawVal := &ethpb.Validator{}

View File

@@ -59,7 +59,7 @@ func TotalActiveBalance(s state.ReadOnlyBeaconState) (uint64, error) {
case err == nil:
return bal, nil
case errors.Is(err, cache.ErrNotFound):
break
// Do nothing if we receive a not found error.
default:
// In the event, we encounter another error we return it.
return 0, err

View File

@@ -35,6 +35,7 @@ go_library(
"//beacon-chain/core/transition/interop:go_default_library",
"//beacon-chain/core/validators:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/stateutil:go_default_library",
"//beacon-chain/state/v1:go_default_library",
"//config/features:go_default_library",
"//config/params:go_default_library",

View File

@@ -7,6 +7,7 @@ import (
b "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
"github.com/prysmaticlabs/prysm/config/params"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
@@ -105,7 +106,7 @@ func OptimizedGenesisBeaconState(genesisTime uint64, preState state.BeaconState,
slashings := make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector)
genesisValidatorsRoot, err := v1.ValidatorRegistryRoot(preState.Validators())
genesisValidatorsRoot, err := stateutil.ValidatorRegistryRoot(preState.Validators())
if err != nil {
return nil, errors.Wrapf(err, "could not hash tree root genesis validators %v", err)
}

View File

@@ -14,7 +14,7 @@ import (
// PruneAttestationsAtEpoch deletes all attestations from the slasher DB with target epoch
// less than or equal to the specified epoch.
func (s *Store) PruneAttestationsAtEpoch(
ctx context.Context, maxEpoch types.Epoch,
_ context.Context, maxEpoch types.Epoch,
) (numPruned uint, err error) {
// We can prune everything less than the current epoch - history length.
encodedEndPruneEpoch := fssz.MarshalUint64([]byte{}, uint64(maxEpoch))
@@ -85,7 +85,7 @@ func (s *Store) PruneAttestationsAtEpoch(
// PruneProposalsAtEpoch deletes all proposals from the slasher DB with epoch
// less than or equal to the specified epoch.
func (s *Store) PruneProposalsAtEpoch(
ctx context.Context, maxEpoch types.Epoch,
_ context.Context, maxEpoch types.Epoch,
) (numPruned uint, err error) {
var endPruneSlot types.Slot
endPruneSlot, err = slots.EpochEnd(maxEpoch)

View File

@@ -394,7 +394,7 @@ func (s *Store) SaveBlockProposals(
// HighestAttestations retrieves the last attestation data from the database for all indices.
func (s *Store) HighestAttestations(
ctx context.Context,
_ context.Context,
indices []types.ValidatorIndex,
) ([]*slashpb.HighestAttestation, error) {
if len(indices) == 0 {

View File

@@ -8,13 +8,19 @@ go_library(
"process_attestation.go",
"process_block.go",
"process_exit.go",
"process_sync_committee.go",
"service.go",
],
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/monitor",
visibility = ["//beacon-chain:__subpackages__"],
deps = [
"//async/event:go_default_library",
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/core/altair:go_default_library",
"//beacon-chain/core/blocks:go_default_library",
"//beacon-chain/core/feed:go_default_library",
"//beacon-chain/core/feed/operation:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
@@ -38,10 +44,15 @@ go_test(
"process_attestation_test.go",
"process_block_test.go",
"process_exit_test.go",
"process_sync_committee_test.go",
"service_test.go",
],
embed = [":go_default_library"],
deps = [
"//beacon-chain/blockchain/testing:go_default_library",
"//beacon-chain/core/altair:go_default_library",
"//beacon-chain/core/feed:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/db/testing:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//config/params:go_default_library",
@@ -50,6 +61,7 @@ go_test(
"//proto/prysm/v1alpha1/wrapper:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"//time/slots:go_default_library",
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",

View File

@@ -55,6 +55,7 @@ var (
"validator_index",
},
)
// proposedSlotsCounter used to track proposed blocks
proposedSlotsCounter = promauto.NewCounterVec(
prometheus.CounterOpts{
@@ -77,4 +78,16 @@ var (
"validator_index",
},
)
// syncCommitteeContributionCounter used to track sync committee
// contributions
syncCommitteeContributionCounter = promauto.NewCounterVec(
prometheus.CounterOpts{
Namespace: "monitor",
Name: "sync_committee_contributions_total",
Help: "Number of Sync committee contributions performed",
},
[]string{
"validator_index",
},
)
)

View File

@@ -20,12 +20,13 @@ import (
// updatedPerformanceFromTrackedVal returns true if the validator is tracked and if the
// given slot is different than the last attested slot from this validator.
// It assumes that a read lock is held on the monitor service.
func (s *Service) updatedPerformanceFromTrackedVal(idx types.ValidatorIndex, slot types.Slot) bool {
if !s.TrackedIndex(types.ValidatorIndex(idx)) {
if !s.trackedIndex(idx) {
return false
}
if lp, ok := s.latestPerformance[types.ValidatorIndex(idx)]; ok {
if lp, ok := s.latestPerformance[idx]; ok {
return lp.attestedSlot != slot
}
return false
@@ -73,6 +74,8 @@ func (s *Service) processIncludedAttestation(ctx context.Context, state state.Be
log.WithError(err).Error("Could not get attesting indices")
return
}
s.Lock()
defer s.Unlock()
for _, idx := range attestingIndices {
if s.updatedPerformanceFromTrackedVal(types.ValidatorIndex(idx), att.Data.Slot) {
logFields := logMessageTimelyFlagsForIndex(types.ValidatorIndex(idx), att.Data)
@@ -87,7 +90,7 @@ func (s *Service) processIncludedAttestation(ctx context.Context, state state.Be
aggregatedPerf.totalRequestedCount++
latestPerf := s.latestPerformance[types.ValidatorIndex(idx)]
balanceChg := balance - latestPerf.balance
balanceChg := int64(balance - latestPerf.balance)
latestPerf.balanceChange = balanceChg
latestPerf.balance = balance
latestPerf.attestedSlot = att.Data.Slot
@@ -165,10 +168,13 @@ func (s *Service) processIncludedAttestation(ctx context.Context, state state.Be
// processUnaggregatedAttestation logs when the beacon node sees an unaggregated attestation from one of our
// tracked validators
func (s *Service) processUnaggregatedAttestation(ctx context.Context, att *ethpb.Attestation) {
s.RLock()
defer s.RUnlock()
root := bytesutil.ToBytes32(att.Data.BeaconBlockRoot)
state := s.config.StateGen.StateByRootIfCachedNoCopy(root)
if state == nil {
log.Debug("Skipping unaggregated attestation due to state not found in cache")
log.WithField("BeaconBlockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debug(
"Skipping unaggregated attestation due to state not found in cache")
return
}
attestingIndices, err := attestingIndices(ctx, state, att)
@@ -187,9 +193,18 @@ func (s *Service) processUnaggregatedAttestation(ctx context.Context, att *ethpb
// processAggregatedAttestation logs when we see an aggregation from one of our tracked validators or an aggregated
// attestation from one of our tracked validators
func (s *Service) processAggregatedAttestation(ctx context.Context, att *ethpb.AggregateAttestationAndProof) {
if s.TrackedIndex(att.AggregatorIndex) {
s.Lock()
defer s.Unlock()
if s.trackedIndex(att.AggregatorIndex) {
log.WithFields(logrus.Fields{
"ValidatorIndex": att.AggregatorIndex,
"AggregatorIndex": att.AggregatorIndex,
"Slot": att.Aggregate.Data.Slot,
"BeaconBlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(
att.Aggregate.Data.BeaconBlockRoot)),
"SourceRoot:": fmt.Sprintf("%#x", bytesutil.Trunc(
att.Aggregate.Data.Source.Root)),
"TargetRoot:": fmt.Sprintf("%#x", bytesutil.Trunc(
att.Aggregate.Data.Target.Root)),
}).Info("Processed attestation aggregation")
aggregatedPerf := s.aggregatedPerformance[att.AggregatorIndex]
aggregatedPerf.totalAggregations++
@@ -201,7 +216,8 @@ func (s *Service) processAggregatedAttestation(ctx context.Context, att *ethpb.A
copy(root[:], att.Aggregate.Data.BeaconBlockRoot)
state := s.config.StateGen.StateByRootIfCachedNoCopy(root)
if state == nil {
log.Debug("Skipping agregated attestation due to state not found in cache")
log.WithField("BeaconBlockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debug(
"Skipping agregated attestation due to state not found in cache")
return
}
attestingIndices, err := attestingIndices(ctx, state, att.Aggregate)

View File

@@ -5,10 +5,7 @@ import (
"context"
"testing"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/go-bitfield"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
@@ -18,47 +15,6 @@ import (
logTest "github.com/sirupsen/logrus/hooks/test"
)
func setupService(t *testing.T) *Service {
beaconDB := testDB.SetupDB(t)
trackedVals := map[types.ValidatorIndex]interface{}{
1: nil,
2: nil,
12: nil,
15: nil,
}
latestPerformance := map[types.ValidatorIndex]ValidatorLatestPerformance{
1: {
balance: 32000000000,
},
2: {
balance: 32000000000,
},
12: {
balance: 31900000000,
},
15: {
balance: 31900000000,
},
}
aggregatedPerformance := map[types.ValidatorIndex]ValidatorAggregatedPerformance{
1: {},
2: {},
12: {},
15: {},
}
return &Service{
config: &ValidatorMonitorConfig{
StateGen: stategen.New(beaconDB),
TrackedValidators: trackedVals,
},
latestPerformance: latestPerformance,
aggregatedPerformance: aggregatedPerformance,
}
}
func TestGetAttestingIndices(t *testing.T) {
ctx := context.Background()
beaconState, _ := util.DeterministicGenesisState(t, 256)
@@ -206,7 +162,7 @@ func TestProcessAggregatedAttestationStateNotCached(t *testing.T) {
},
}
s.processAggregatedAttestation(ctx, att)
require.LogsContain(t, hook, "\"Processed attestation aggregation\" ValidatorIndex=2 prefix=monitor")
require.LogsContain(t, hook, "\"Processed attestation aggregation\" AggregatorIndex=2 BeaconBlockRoot=0x000000000000 Slot=1 SourceRoot:=0x68656c6c6f2d TargetRoot:=0x68656c6c6f2d prefix=monitor")
require.LogsContain(t, hook, "Skipping agregated attestation due to state not found in cache")
logrus.SetLevel(logrus.InfoLevel)
}
@@ -244,7 +200,7 @@ func TestProcessAggregatedAttestationStateCached(t *testing.T) {
require.NoError(t, s.config.StateGen.SaveState(ctx, root, state))
s.processAggregatedAttestation(ctx, att)
require.LogsContain(t, hook, "\"Processed attestation aggregation\" ValidatorIndex=2 prefix=monitor")
require.LogsContain(t, hook, "\"Processed attestation aggregation\" AggregatorIndex=2 BeaconBlockRoot=0x68656c6c6f2d Slot=1 SourceRoot:=0x68656c6c6f2d TargetRoot:=0x68656c6c6f2d prefix=monitor")
require.LogsContain(t, hook, "\"Processed aggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=2 prefix=monitor")
require.LogsDoNotContain(t, hook, "\"Processed aggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=12 prefix=monitor")
}

View File

@@ -7,11 +7,16 @@ import (
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/prysmaticlabs/prysm/time/slots"
"github.com/sirupsen/logrus"
)
// Number of epochs between aggregate reports
const AggregateReportingPeriod = 5
// processBlock handles the cases when
// 1) A block was proposed by one of our tracked validators
// 2) An attestation by one of our tracked validators was included
@@ -39,13 +44,30 @@ func (s *Service) processBlock(ctx context.Context, b block.SignedBeaconBlock) {
return
}
currEpoch := slots.ToEpoch(blk.Slot())
s.RLock()
lastSyncedEpoch := s.lastSyncedEpoch
s.RUnlock()
if currEpoch != lastSyncedEpoch &&
slots.SyncCommitteePeriod(currEpoch) == slots.SyncCommitteePeriod(lastSyncedEpoch) {
s.updateSyncCommitteeTrackedVals(state)
}
s.processSyncAggregate(state, blk)
s.processProposedBlock(state, root, blk)
s.processAttestations(ctx, state, blk)
if blk.Slot()%(AggregateReportingPeriod*params.BeaconConfig().SlotsPerEpoch) == 0 {
s.logAggregatedPerformance()
}
}
// processProposedBlock logs the event that one of our tracked validators proposed a block that was included
func (s *Service) processProposedBlock(state state.BeaconState, root [32]byte, blk block.BeaconBlock) {
if s.TrackedIndex(blk.ProposerIndex()) {
s.Lock()
defer s.Unlock()
if s.trackedIndex(blk.ProposerIndex()) {
// update metrics
proposedSlotsCounter.WithLabelValues(fmt.Sprintf("%d", blk.ProposerIndex())).Inc()
@@ -57,7 +79,7 @@ func (s *Service) processProposedBlock(state state.BeaconState, root [32]byte, b
}
latestPerf := s.latestPerformance[blk.ProposerIndex()]
balanceChg := balance - latestPerf.balance
balanceChg := int64(balance - latestPerf.balance)
latestPerf.balanceChange = balanceChg
latestPerf.balance = balance
s.latestPerformance[blk.ProposerIndex()] = latestPerf
@@ -80,9 +102,11 @@ func (s *Service) processProposedBlock(state state.BeaconState, root [32]byte, b
// processSlashings logs the event of one of our tracked validators was slashed
func (s *Service) processSlashings(blk block.BeaconBlock) {
s.RLock()
defer s.RUnlock()
for _, slashing := range blk.Body().ProposerSlashings() {
idx := slashing.Header_1.Header.ProposerIndex
if s.TrackedIndex(idx) {
if s.trackedIndex(idx) {
log.WithFields(logrus.Fields{
"ProposerIndex": idx,
"Slot:": blk.Slot(),
@@ -95,7 +119,7 @@ func (s *Service) processSlashings(blk block.BeaconBlock) {
for _, slashing := range blk.Body().AttesterSlashings() {
for _, idx := range blocks.SlashableAttesterIndices(slashing) {
if s.TrackedIndex(types.ValidatorIndex(idx)) {
if s.trackedIndex(types.ValidatorIndex(idx)) {
log.WithFields(logrus.Fields{
"AttesterIndex": idx,
"Slot:": blk.Slot(),
@@ -113,3 +137,41 @@ func (s *Service) processSlashings(blk block.BeaconBlock) {
}
}
}
// logAggregatedPerformance logs the performance statistics collected since the run started
func (s *Service) logAggregatedPerformance() {
s.RLock()
defer s.RUnlock()
for idx, p := range s.aggregatedPerformance {
if p.totalAttestedCount == 0 || p.totalRequestedCount == 0 || p.startBalance == 0 {
break
}
l, ok := s.latestPerformance[idx]
if !ok {
break
}
percentAtt := float64(p.totalAttestedCount) / float64(p.totalRequestedCount)
percentBal := float64(l.balance-p.startBalance) / float64(p.startBalance)
percentDistance := float64(p.totalDistance) / float64(p.totalAttestedCount)
percentCorrectSource := float64(p.totalCorrectSource) / float64(p.totalAttestedCount)
percentCorrectHead := float64(p.totalCorrectHead) / float64(p.totalAttestedCount)
percentCorrectTarget := float64(p.totalCorrectTarget) / float64(p.totalAttestedCount)
log.WithFields(logrus.Fields{
"ValidatorIndex": idx,
"StartEpoch": p.startEpoch,
"StartBalance": p.startBalance,
"TotalRequested": p.totalRequestedCount,
"AttestationInclusion": fmt.Sprintf("%.2f%%", percentAtt*100),
"BalanceChangePct": fmt.Sprintf("%.2f%%", percentBal*100),
"CorrectlyVotedSourcePct": fmt.Sprintf("%.2f%%", percentCorrectSource*100),
"CorrectlyVotedTargetPct": fmt.Sprintf("%.2f%%", percentCorrectTarget*100),
"CorrectlyVotedHeadPct": fmt.Sprintf("%.2f%%", percentCorrectHead*100),
"AverageInclusionDistance": fmt.Sprintf("%.1f", percentDistance),
"TotalProposedBlocks": p.totalProposedCount,
"TotalAggregations": p.totalAggregations,
"TotalSyncContributions": p.totalSyncComitteeContributions,
}).Info("Aggregated performance since launch")
}
}

View File

@@ -4,8 +4,10 @@ import (
"context"
"fmt"
"testing"
"time"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
@@ -116,11 +118,9 @@ func TestProcessSlashings(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
hook := logTest.NewGlobal()
s := &Service{
config: &ValidatorMonitorConfig{
TrackedValidators: map[types.ValidatorIndex]interface{}{
1: nil,
2: nil,
},
TrackedValidators: map[types.ValidatorIndex]bool{
1: true,
2: true,
},
}
s.processSlashings(wrapper.WrappedPhase0BeaconBlock(tt.block))
@@ -178,31 +178,69 @@ func TestProcessProposedBlock(t *testing.T) {
}
func TestProcessBlock_ProposerAndSlashedTrackedVals(t *testing.T) {
func TestProcessBlock_AllEventsTrackedVals(t *testing.T) {
hook := logTest.NewGlobal()
ctx := context.Background()
s := setupService(t)
genesis, keys := util.DeterministicGenesisState(t, 64)
genesis, keys := util.DeterministicGenesisStateAltair(t, 64)
c, err := altair.NextSyncCommittee(ctx, genesis)
require.NoError(t, err)
require.NoError(t, genesis.SetCurrentSyncCommittee(c))
genConfig := util.DefaultBlockGenConfig()
genConfig.NumProposerSlashings = 1
b, err := util.GenerateFullBlock(genesis, keys, genConfig, 1)
b, err := util.GenerateFullBlockAltair(genesis, keys, genConfig, 1)
require.NoError(t, err)
s := setupService(t)
pubKeys := make([][]byte, 3)
pubKeys[0] = genesis.Validators()[0].PublicKey
pubKeys[1] = genesis.Validators()[1].PublicKey
pubKeys[2] = genesis.Validators()[2].PublicKey
currentSyncCommittee := util.ConvertToCommittee([][]byte{
pubKeys[0], pubKeys[1], pubKeys[2], pubKeys[1], pubKeys[1],
})
require.NoError(t, genesis.SetCurrentSyncCommittee(currentSyncCommittee))
idx := b.Block.Body.ProposerSlashings[0].Header_1.Header.ProposerIndex
if !s.TrackedIndex(idx) {
s.config.TrackedValidators[idx] = nil
s.RLock()
if !s.trackedIndex(idx) {
s.TrackedValidators[idx] = true
s.latestPerformance[idx] = ValidatorLatestPerformance{
balance: 31900000000,
}
s.aggregatedPerformance[idx] = ValidatorAggregatedPerformance{}
}
s.RUnlock()
s.updateSyncCommitteeTrackedVals(genesis)
require.NoError(t, err)
root, err := b.GetBlock().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, s.config.StateGen.SaveState(ctx, root, genesis))
wanted1 := fmt.Sprintf("\"Proposed block was included\" BalanceChange=100000000 BlockRoot=%#x NewBalance=32000000000 ParentRoot=0x67a9fe4d0d8d ProposerIndex=15 Slot=1 Version=0 prefix=monitor", bytesutil.Trunc(root[:]))
wanted1 := fmt.Sprintf("\"Proposed block was included\" BalanceChange=100000000 BlockRoot=%#x NewBalance=32000000000 ParentRoot=0xf732eaeb7fae ProposerIndex=15 Slot=1 Version=1 prefix=monitor", bytesutil.Trunc(root[:]))
wanted2 := fmt.Sprintf("\"Proposer slashing was included\" ProposerIndex=%d Root1=0x000100000000 Root2=0x000200000000 SlashingSlot=0 Slot:=1 prefix=monitor", idx)
wrapped := wrapper.WrappedPhase0SignedBeaconBlock(b)
wanted3 := "\"Sync committee contribution included\" BalanceChange=0 Contributions=3 ExpectedContrib=3 NewBalance=32000000000 ValidatorIndex=1 prefix=monitor"
wanted4 := "\"Sync committee contribution included\" BalanceChange=0 Contributions=1 ExpectedContrib=1 NewBalance=32000000000 ValidatorIndex=2 prefix=monitor"
wrapped, err := wrapper.WrappedAltairSignedBeaconBlock(b)
require.NoError(t, err)
s.processBlock(ctx, wrapped)
require.LogsContain(t, hook, wanted1)
require.LogsContain(t, hook, wanted2)
require.LogsContain(t, hook, wanted3)
require.LogsContain(t, hook, wanted4)
}
func TestLogAggregatedPerformance(t *testing.T) {
hook := logTest.NewGlobal()
s := setupService(t)
s.logAggregatedPerformance()
time.Sleep(3000 * time.Millisecond)
wanted := "\"Aggregated performance since launch\" AttestationInclusion=\"80.00%\"" +
" AverageInclusionDistance=1.2 BalanceChangePct=\"0.95%\" CorrectlyVotedHeadPct=\"66.67%\" " +
"CorrectlyVotedSourcePct=\"91.67%\" CorrectlyVotedTargetPct=\"100.00%\" StartBalance=31700000000 " +
"StartEpoch=0 TotalAggregations=0 TotalProposedBlocks=1 TotalRequested=15 TotalSyncContributions=0 " +
"ValidatorIndex=1 prefix=monitor"
require.LogsContain(t, hook, wanted)
}

View File

@@ -9,9 +9,11 @@ import (
// processExitsFromBlock logs the event of one of our tracked validators' exit was
// included in a block
func (s *Service) processExitsFromBlock(blk block.BeaconBlock) {
s.RLock()
defer s.RUnlock()
for _, exit := range blk.Body().VoluntaryExits() {
idx := exit.Exit.ValidatorIndex
if s.TrackedIndex(idx) {
if s.trackedIndex(idx) {
log.WithFields(logrus.Fields{
"ValidatorIndex": idx,
"Slot": blk.Slot(),
@@ -23,7 +25,9 @@ func (s *Service) processExitsFromBlock(blk block.BeaconBlock) {
// processExit logs the event of one of our tracked validators' exit was processed
func (s *Service) processExit(exit *ethpb.SignedVoluntaryExit) {
idx := exit.Exit.ValidatorIndex
if s.TrackedIndex(idx) {
s.RLock()
defer s.RUnlock()
if s.trackedIndex(idx) {
log.WithFields(logrus.Fields{
"ValidatorIndex": idx,
}).Info("Voluntary exit was processed")

View File

@@ -13,11 +13,9 @@ import (
func TestProcessExitsFromBlockTrackedIndices(t *testing.T) {
hook := logTest.NewGlobal()
s := &Service{
config: &ValidatorMonitorConfig{
TrackedValidators: map[types.ValidatorIndex]interface{}{
1: nil,
2: nil,
},
TrackedValidators: map[types.ValidatorIndex]bool{
1: true,
2: true,
},
}
@@ -49,11 +47,9 @@ func TestProcessExitsFromBlockTrackedIndices(t *testing.T) {
func TestProcessExitsFromBlockUntrackedIndices(t *testing.T) {
hook := logTest.NewGlobal()
s := &Service{
config: &ValidatorMonitorConfig{
TrackedValidators: map[types.ValidatorIndex]interface{}{
1: nil,
2: nil,
},
TrackedValidators: map[types.ValidatorIndex]bool{
1: true,
2: true,
},
}
@@ -85,11 +81,9 @@ func TestProcessExitsFromBlockUntrackedIndices(t *testing.T) {
func TestProcessExitP2PTrackedIndices(t *testing.T) {
hook := logTest.NewGlobal()
s := &Service{
config: &ValidatorMonitorConfig{
TrackedValidators: map[types.ValidatorIndex]interface{}{
1: nil,
2: nil,
},
TrackedValidators: map[types.ValidatorIndex]bool{
1: true,
2: true,
},
}
@@ -107,11 +101,9 @@ func TestProcessExitP2PTrackedIndices(t *testing.T) {
func TestProcessExitP2PUntrackedIndices(t *testing.T) {
hook := logTest.NewGlobal()
s := &Service{
config: &ValidatorMonitorConfig{
TrackedValidators: map[types.ValidatorIndex]interface{}{
1: nil,
2: nil,
},
TrackedValidators: map[types.ValidatorIndex]bool{
1: true,
2: true,
},
}

View File

@@ -0,0 +1,79 @@
package monitor
import (
"fmt"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/sirupsen/logrus"
)
// processSyncCommitteeContribution logs the event that one of our tracked
// validators' aggregated sync contribution has been processed.
// TODO: We do not log if a sync contribution was included in an aggregate (we
// log them when they are included in blocks)
func (s *Service) processSyncCommitteeContribution(contribution *ethpb.SignedContributionAndProof) {
idx := contribution.Message.AggregatorIndex
s.Lock()
defer s.Unlock()
if s.trackedIndex(idx) {
aggPerf := s.aggregatedPerformance[idx]
aggPerf.totalSyncComitteeAggregations++
s.aggregatedPerformance[idx] = aggPerf
log.WithField("ValidatorIndex", contribution.Message.AggregatorIndex).Info("Sync committee aggregation processed")
}
}
// processSyncAggregate logs the event that one of our tracked validators is a sync-committee member and its
// contribution was included
func (s *Service) processSyncAggregate(state state.BeaconState, blk block.BeaconBlock) {
if blk == nil || blk.Body() == nil {
return
}
bits, err := blk.Body().SyncAggregate()
if err != nil {
log.WithError(err).Error("Cannot get SyncAggregate")
return
}
s.Lock()
defer s.Unlock()
for validatorIdx, committeeIndices := range s.trackedSyncCommitteeIndices {
if len(committeeIndices) > 0 {
contrib := 0
for _, idx := range committeeIndices {
if bits.SyncCommitteeBits.BitAt(uint64(idx)) {
contrib++
}
}
balance, err := state.BalanceAtIndex(validatorIdx)
if err != nil {
log.Error("Could not get balance")
return
}
latestPerf := s.latestPerformance[validatorIdx]
balanceChg := int64(balance - latestPerf.balance)
latestPerf.balanceChange = balanceChg
latestPerf.balance = balance
s.latestPerformance[validatorIdx] = latestPerf
aggPerf := s.aggregatedPerformance[validatorIdx]
aggPerf.totalSyncComitteeContributions += uint64(contrib)
s.aggregatedPerformance[validatorIdx] = aggPerf
syncCommitteeContributionCounter.WithLabelValues(
fmt.Sprintf("%d", validatorIdx)).Add(float64(contrib))
log.WithFields(logrus.Fields{
"ValidatorIndex": validatorIdx,
"ExpectedContrib": len(committeeIndices),
"Contributions": contrib,
"NewBalance": balance,
"BalanceChange": balanceChg,
}).Info("Sync committee contribution included")
}
}
}

View File

@@ -0,0 +1,59 @@
package monitor
import (
"testing"
"github.com/prysmaticlabs/go-bitfield"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
logTest "github.com/sirupsen/logrus/hooks/test"
)
func TestProcessSyncCommitteeContribution(t *testing.T) {
hook := logTest.NewGlobal()
s := setupService(t)
contrib := &ethpb.SignedContributionAndProof{
Message: &ethpb.ContributionAndProof{
AggregatorIndex: 1,
},
}
s.processSyncCommitteeContribution(contrib)
require.LogsContain(t, hook, "\"Sync committee aggregation processed\" ValidatorIndex=1")
require.LogsDoNotContain(t, hook, "ValidatorIndex=2")
}
func TestProcessSyncAggregate(t *testing.T) {
hook := logTest.NewGlobal()
s := setupService(t)
beaconState, _ := util.DeterministicGenesisStateAltair(t, 256)
block := &ethpb.BeaconBlockAltair{
Slot: 2,
Body: &ethpb.BeaconBlockBodyAltair{
SyncAggregate: &ethpb.SyncAggregate{
SyncCommitteeBits: bitfield.Bitvector512{
0x31, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
},
},
},
}
wrappedBlock, err := wrapper.WrappedAltairBeaconBlock(block)
require.NoError(t, err)
s.processSyncAggregate(beaconState, wrappedBlock)
require.LogsContain(t, hook, "\"Sync committee contribution included\" BalanceChange=0 Contributions=1 ExpectedContrib=4 NewBalance=32000000000 ValidatorIndex=1 prefix=monitor")
require.LogsContain(t, hook, "\"Sync committee contribution included\" BalanceChange=100000000 Contributions=2 ExpectedContrib=2 NewBalance=32000000000 ValidatorIndex=12 prefix=monitor")
require.LogsDoNotContain(t, hook, "ValidatorIndex=2")
}

View File

@@ -1,8 +1,30 @@
package monitor
import (
"context"
"errors"
"sort"
"sync"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/async/event"
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed/operation"
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/time/slots"
"github.com/sirupsen/logrus"
)
var (
// Error when event feed data is not statefeed.SyncedData.
errNotSyncedData = errors.New("event feed data is not of type *statefeed.SyncedData")
// Error when the context is closed while waiting for sync.
errContextClosedWhileWaiting = errors.New("context closed while waiting for beacon to sync to latest Head")
)
// ValidatorLatestPerformance keeps track of the latest participation of the validator
@@ -13,41 +35,277 @@ type ValidatorLatestPerformance struct {
timelyTarget bool
timelyHead bool
balance uint64
balanceChange uint64
balanceChange int64
}
// ValidatorAggregatedPerformance keeps track of the accumulated performance of
// the validator since launch
type ValidatorAggregatedPerformance struct {
totalAttestedCount uint64
totalRequestedCount uint64
totalDistance uint64
totalCorrectSource uint64
totalCorrectTarget uint64
totalCorrectHead uint64
totalProposedCount uint64
totalAggregations uint64
startEpoch types.Epoch
startBalance uint64
totalAttestedCount uint64
totalRequestedCount uint64
totalDistance uint64
totalCorrectSource uint64
totalCorrectTarget uint64
totalCorrectHead uint64
totalProposedCount uint64
totalAggregations uint64
totalSyncComitteeContributions uint64
totalSyncComitteeAggregations uint64
}
// ValidatorMonitorConfig contains the list of validator indices that the
// monitor service tracks, as well as the event feed notifier that the
// monitor needs to subscribe.
type ValidatorMonitorConfig struct {
StateGen stategen.StateManager
TrackedValidators map[types.ValidatorIndex]interface{}
StateNotifier statefeed.Notifier
AttestationNotifier operation.Notifier
HeadFetcher blockchain.HeadFetcher
StateGen stategen.StateManager
}
// Service is the main structure that tracks validators and reports logs and
// metrics of their performances throughout their lifetime.
type Service struct {
config *ValidatorMonitorConfig
latestPerformance map[types.ValidatorIndex]ValidatorLatestPerformance
aggregatedPerformance map[types.ValidatorIndex]ValidatorAggregatedPerformance
config *ValidatorMonitorConfig
ctx context.Context
cancel context.CancelFunc
isLogging bool
// Locks access to TrackedValidators, latestPerformance, aggregatedPerformance,
// trackedSyncedCommitteeIndices and lastSyncedEpoch
sync.RWMutex
TrackedValidators map[types.ValidatorIndex]bool
latestPerformance map[types.ValidatorIndex]ValidatorLatestPerformance
aggregatedPerformance map[types.ValidatorIndex]ValidatorAggregatedPerformance
trackedSyncCommitteeIndices map[types.ValidatorIndex][]types.CommitteeIndex
lastSyncedEpoch types.Epoch
}
// NewService sets up a new validator monitor instance when given a list of validator indices to track.
func NewService(ctx context.Context, config *ValidatorMonitorConfig, tracked []types.ValidatorIndex) (*Service, error) {
ctx, cancel := context.WithCancel(ctx)
r := &Service{
config: config,
ctx: ctx,
cancel: cancel,
TrackedValidators: make(map[types.ValidatorIndex]bool, len(tracked)),
latestPerformance: make(map[types.ValidatorIndex]ValidatorLatestPerformance),
aggregatedPerformance: make(map[types.ValidatorIndex]ValidatorAggregatedPerformance),
trackedSyncCommitteeIndices: make(map[types.ValidatorIndex][]types.CommitteeIndex),
}
for _, idx := range tracked {
r.TrackedValidators[idx] = true
}
return r, nil
}
// Start sets up the TrackedValidators map and then calls to wait until the beacon is synced.
func (s *Service) Start() {
s.Lock()
defer s.Unlock()
tracked := make([]types.ValidatorIndex, 0, len(s.TrackedValidators))
for idx := range s.TrackedValidators {
tracked = append(tracked, idx)
}
sort.Slice(tracked, func(i, j int) bool { return tracked[i] < tracked[j] })
log.WithFields(logrus.Fields{
"ValidatorIndices": tracked,
}).Info("Starting service")
s.isLogging = false
stateChannel := make(chan *feed.Event, 1)
stateSub := s.config.StateNotifier.StateFeed().Subscribe(stateChannel)
go s.run(stateChannel, stateSub)
}
// run waits until the beacon is synced and starts the monitoring system.
func (s *Service) run(stateChannel chan *feed.Event, stateSub event.Subscription) {
if stateChannel == nil {
log.Error("State state is nil")
return
}
if err := s.waitForSync(stateChannel, stateSub); err != nil {
log.WithError(err)
return
}
state, err := s.config.HeadFetcher.HeadState(s.ctx)
if err != nil {
log.WithError(err).Error("Could not get head state")
return
}
if state == nil {
log.Error("Head state is nil")
return
}
epoch := slots.ToEpoch(state.Slot())
log.WithField("Epoch", epoch).Info("Synced to head epoch, starting reporting performance")
s.Lock()
s.initializePerformanceStructures(state, epoch)
s.Unlock()
s.updateSyncCommitteeTrackedVals(state)
s.Lock()
s.isLogging = true
s.Unlock()
s.monitorRoutine(stateChannel, stateSub)
}
// initializePerformanceStructures initializes the validatorLatestPerformance
// and validatorAggregatedPerformance for each tracked validator.
func (s *Service) initializePerformanceStructures(state state.BeaconState, epoch types.Epoch) {
for idx := range s.TrackedValidators {
balance, err := state.BalanceAtIndex(idx)
if err != nil {
log.WithError(err).WithField("ValidatorIndex", idx).Error(
"Could not fetch starting balance, skipping aggregated logs.")
balance = 0
}
s.aggregatedPerformance[idx] = ValidatorAggregatedPerformance{
startEpoch: epoch,
startBalance: balance,
}
s.latestPerformance[idx] = ValidatorLatestPerformance{
balance: balance,
}
}
}
// Status retrieves the status of the service.
func (s *Service) Status() error {
if s.isLogging {
return nil
}
return errors.New("not running")
}
// Stop stops the service.
func (s *Service) Stop() error {
defer s.cancel()
s.isLogging = false
return nil
}
// waitForSync waits until the beacon node is synced to the latest head.
func (s *Service) waitForSync(stateChannel chan *feed.Event, stateSub event.Subscription) error {
for {
select {
case event := <-stateChannel:
if event.Type == statefeed.Synced {
_, ok := event.Data.(*statefeed.SyncedData)
if !ok {
return errNotSyncedData
}
return nil
}
case <-s.ctx.Done():
log.Debug("Context closed, exiting goroutine")
return errContextClosedWhileWaiting
case err := <-stateSub.Err():
log.WithError(err).Error("Could not subscribe to state notifier")
return err
}
}
}
// monitorRoutine is the main dispatcher, it registers event channels for the
// state feed and the operation feed. It then calls the appropriate function
// when we get messages after syncing a block or processing attestations/sync
// committee contributions.
func (s *Service) monitorRoutine(stateChannel chan *feed.Event, stateSub event.Subscription) {
defer stateSub.Unsubscribe()
opChannel := make(chan *feed.Event, 1)
opSub := s.config.AttestationNotifier.OperationFeed().Subscribe(opChannel)
defer opSub.Unsubscribe()
for {
select {
case event := <-stateChannel:
if event.Type == statefeed.BlockProcessed {
data, ok := event.Data.(*statefeed.BlockProcessedData)
if !ok {
log.Error("Event feed data is not of type *statefeed.BlockProcessedData")
} else if data.Verified {
// We only process blocks that have been verified
s.processBlock(s.ctx, data.SignedBlock)
}
}
case event := <-opChannel:
switch event.Type {
case operation.UnaggregatedAttReceived:
data, ok := event.Data.(*operation.UnAggregatedAttReceivedData)
if !ok {
log.Error("Event feed data is not of type *operation.UnAggregatedAttReceivedData")
} else {
s.processUnaggregatedAttestation(s.ctx, data.Attestation)
}
case operation.AggregatedAttReceived:
data, ok := event.Data.(*operation.AggregatedAttReceivedData)
if !ok {
log.Error("Event feed data is not of type *operation.AggregatedAttReceivedData")
} else {
s.processAggregatedAttestation(s.ctx, data.Attestation)
}
case operation.ExitReceived:
data, ok := event.Data.(*operation.ExitReceivedData)
if !ok {
log.Error("Event feed data is not of type *operation.ExitReceivedData")
} else {
s.processExit(data.Exit)
}
case operation.SyncCommitteeContributionReceived:
data, ok := event.Data.(*operation.SyncCommitteeContributionReceivedData)
if !ok {
log.Error("Event feed data is not of type *operation.SyncCommitteeContributionReceivedData")
} else {
s.processSyncCommitteeContribution(data.Contribution)
}
}
case <-s.ctx.Done():
log.Debug("Context closed, exiting goroutine")
return
case err := <-stateSub.Err():
log.WithError(err).Error("Could not subscribe to state notifier")
return
}
}
}
// TrackedIndex returns if the given validator index corresponds to one of the
// validators we follow
func (s *Service) TrackedIndex(idx types.ValidatorIndex) bool {
_, ok := s.config.TrackedValidators[idx]
// validators we follow.
// It assumes the caller holds the service Lock
func (s *Service) trackedIndex(idx types.ValidatorIndex) bool {
_, ok := s.TrackedValidators[idx]
return ok
}
// updateSyncCommitteeTrackedVals updates the sync committee assignments of our
// tracked validators. It gets called when we sync a block after the Sync Period changes.
func (s *Service) updateSyncCommitteeTrackedVals(state state.BeaconState) {
s.Lock()
defer s.Unlock()
for idx := range s.TrackedValidators {
syncIdx, err := helpers.CurrentPeriodSyncSubcommitteeIndices(state, idx)
if err != nil {
log.WithError(err).WithField("ValidatorIndex", idx).Error(
"Sync committee assignments will not be reported")
delete(s.trackedSyncCommitteeIndices, idx)
} else if len(syncIdx) == 0 {
delete(s.trackedSyncCommitteeIndices, idx)
} else {
s.trackedSyncCommitteeIndices[idx] = syncIdx
}
}
s.lastSyncedEpoch = slots.ToEpoch(state.Slot())
}

View File

@@ -1,21 +1,312 @@
package monitor
import (
"context"
"fmt"
"sync"
"testing"
"time"
types "github.com/prysmaticlabs/eth2-types"
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
"github.com/prysmaticlabs/prysm/time/slots"
logTest "github.com/sirupsen/logrus/hooks/test"
)
func setupService(t *testing.T) *Service {
beaconDB := testDB.SetupDB(t)
state, _ := util.DeterministicGenesisStateAltair(t, 256)
pubKeys := make([][]byte, 3)
pubKeys[0] = state.Validators()[0].PublicKey
pubKeys[1] = state.Validators()[1].PublicKey
pubKeys[2] = state.Validators()[2].PublicKey
currentSyncCommittee := util.ConvertToCommittee([][]byte{
pubKeys[0], pubKeys[1], pubKeys[2], pubKeys[1], pubKeys[1],
})
require.NoError(t, state.SetCurrentSyncCommittee(currentSyncCommittee))
chainService := &mock.ChainService{
Genesis: time.Now(),
DB: beaconDB,
State: state,
Root: []byte("hello-world"),
ValidatorsRoot: [32]byte{},
}
trackedVals := map[types.ValidatorIndex]bool{
1: true,
2: true,
12: true,
15: true,
}
latestPerformance := map[types.ValidatorIndex]ValidatorLatestPerformance{
1: {
balance: 32000000000,
},
2: {
balance: 32000000000,
},
12: {
balance: 31900000000,
},
15: {
balance: 31900000000,
},
}
aggregatedPerformance := map[types.ValidatorIndex]ValidatorAggregatedPerformance{
1: {
startEpoch: 0,
startBalance: 31700000000,
totalAttestedCount: 12,
totalRequestedCount: 15,
totalDistance: 14,
totalCorrectHead: 8,
totalCorrectSource: 11,
totalCorrectTarget: 12,
totalProposedCount: 1,
totalSyncComitteeContributions: 0,
totalSyncComitteeAggregations: 0,
},
2: {},
12: {},
15: {},
}
trackedSyncCommitteeIndices := map[types.ValidatorIndex][]types.CommitteeIndex{
1: {0, 1, 2, 3},
12: {4, 5},
}
return &Service{
config: &ValidatorMonitorConfig{
StateGen: stategen.New(beaconDB),
StateNotifier: chainService.StateNotifier(),
HeadFetcher: chainService,
AttestationNotifier: chainService.OperationNotifier(),
},
ctx: context.Background(),
TrackedValidators: trackedVals,
latestPerformance: latestPerformance,
aggregatedPerformance: aggregatedPerformance,
trackedSyncCommitteeIndices: trackedSyncCommitteeIndices,
lastSyncedEpoch: 0,
}
}
func TestTrackedIndex(t *testing.T) {
s := &Service{
config: &ValidatorMonitorConfig{
TrackedValidators: map[types.ValidatorIndex]interface{}{
1: nil,
2: nil,
},
TrackedValidators: map[types.ValidatorIndex]bool{
1: true,
2: true,
},
}
require.Equal(t, s.TrackedIndex(types.ValidatorIndex(1)), true)
require.Equal(t, s.TrackedIndex(types.ValidatorIndex(3)), false)
require.Equal(t, s.trackedIndex(types.ValidatorIndex(1)), true)
require.Equal(t, s.trackedIndex(types.ValidatorIndex(3)), false)
}
func TestUpdateSyncCommitteeTrackedVals(t *testing.T) {
hook := logTest.NewGlobal()
s := setupService(t)
state, _ := util.DeterministicGenesisStateAltair(t, 1024)
s.updateSyncCommitteeTrackedVals(state)
require.LogsDoNotContain(t, hook, "Sync committee assignments will not be reported")
newTrackedSyncIndices := map[types.ValidatorIndex][]types.CommitteeIndex{
1: {1, 3, 4},
2: {2},
}
require.DeepEqual(t, s.trackedSyncCommitteeIndices, newTrackedSyncIndices)
}
func TestNewService(t *testing.T) {
config := &ValidatorMonitorConfig{}
tracked := []types.ValidatorIndex{}
ctx := context.Background()
_, err := NewService(ctx, config, tracked)
require.NoError(t, err)
}
func TestStart(t *testing.T) {
hook := logTest.NewGlobal()
s := setupService(t)
stateChannel := make(chan *feed.Event, 1)
stateSub := s.config.StateNotifier.StateFeed().Subscribe(stateChannel)
defer stateSub.Unsubscribe()
wg := &sync.WaitGroup{}
wg.Add(1)
s.Start()
go func() {
select {
case stateEvent := <-stateChannel:
if stateEvent.Type == statefeed.Synced {
_, ok := stateEvent.Data.(*statefeed.SyncedData)
require.Equal(t, true, ok, "Event feed data is not type *statefeed.SyncedData")
}
case <-s.ctx.Done():
}
wg.Done()
}()
for sent := 0; sent == 0; {
sent = s.config.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.Synced,
Data: &statefeed.SyncedData{
StartTime: time.Now(),
},
})
}
// wait for Logrus
time.Sleep(1000 * time.Millisecond)
require.LogsContain(t, hook, "Synced to head epoch, starting reporting performance")
require.LogsContain(t, hook, "\"Starting service\" ValidatorIndices=\"[1 2 12 15]\"")
require.Equal(t, s.isLogging, true, "monitor is not running")
}
func TestInitializePerformanceStructures(t *testing.T) {
hook := logTest.NewGlobal()
ctx := context.Background()
s := setupService(t)
state, err := s.config.HeadFetcher.HeadState(ctx)
require.NoError(t, err)
epoch := slots.ToEpoch(state.Slot())
s.initializePerformanceStructures(state, epoch)
require.LogsDoNotContain(t, hook, "Could not fetch starting balance")
latestPerformance := map[types.ValidatorIndex]ValidatorLatestPerformance{
1: {
balance: 32000000000,
},
2: {
balance: 32000000000,
},
12: {
balance: 32000000000,
},
15: {
balance: 32000000000,
},
}
aggregatedPerformance := map[types.ValidatorIndex]ValidatorAggregatedPerformance{
1: {
startBalance: 32000000000,
},
2: {
startBalance: 32000000000,
},
12: {
startBalance: 32000000000,
},
15: {
startBalance: 32000000000,
},
}
require.DeepEqual(t, s.latestPerformance, latestPerformance)
require.DeepEqual(t, s.aggregatedPerformance, aggregatedPerformance)
}
func TestMonitorRoutine(t *testing.T) {
ctx := context.Background()
hook := logTest.NewGlobal()
s := setupService(t)
stateChannel := make(chan *feed.Event, 1)
stateSub := s.config.StateNotifier.StateFeed().Subscribe(stateChannel)
wg := &sync.WaitGroup{}
wg.Add(1)
go func() {
s.monitorRoutine(stateChannel, stateSub)
wg.Done()
}()
genesis, keys := util.DeterministicGenesisStateAltair(t, 64)
c, err := altair.NextSyncCommittee(ctx, genesis)
require.NoError(t, err)
require.NoError(t, genesis.SetCurrentSyncCommittee(c))
genConfig := util.DefaultBlockGenConfig()
block, err := util.GenerateFullBlockAltair(genesis, keys, genConfig, 1)
require.NoError(t, err)
root, err := block.GetBlock().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, s.config.StateGen.SaveState(ctx, root, genesis))
wrapped, err := wrapper.WrappedAltairSignedBeaconBlock(block)
require.NoError(t, err)
stateChannel <- &feed.Event{
Type: statefeed.BlockProcessed,
Data: &statefeed.BlockProcessedData{
Slot: 1,
Verified: true,
SignedBlock: wrapped,
},
}
// Wait for Logrus
time.Sleep(1000 * time.Millisecond)
wanted1 := fmt.Sprintf("\"Proposed block was included\" BalanceChange=100000000 BlockRoot=%#x NewBalance=32000000000 ParentRoot=0xf732eaeb7fae ProposerIndex=15 Slot=1 Version=1 prefix=monitor", bytesutil.Trunc(root[:]))
require.LogsContain(t, hook, wanted1)
}
func TestWaitForSync(t *testing.T) {
s := setupService(t)
stateChannel := make(chan *feed.Event, 1)
stateSub := s.config.StateNotifier.StateFeed().Subscribe(stateChannel)
defer stateSub.Unsubscribe()
wg := &sync.WaitGroup{}
wg.Add(1)
go func() {
err := s.waitForSync(stateChannel, stateSub)
require.NoError(t, err)
wg.Done()
}()
stateChannel <- &feed.Event{
Type: statefeed.Synced,
Data: &statefeed.SyncedData{
StartTime: time.Now(),
},
}
}
func TestRun(t *testing.T) {
hook := logTest.NewGlobal()
s := setupService(t)
stateChannel := make(chan *feed.Event, 1)
stateSub := s.config.StateNotifier.StateFeed().Subscribe(stateChannel)
wg := &sync.WaitGroup{}
wg.Add(1)
go func() {
s.run(stateChannel, stateSub)
wg.Done()
}()
stateChannel <- &feed.Event{
Type: statefeed.Synced,
Data: &statefeed.SyncedData{
StartTime: time.Now(),
},
}
//wait for Logrus
time.Sleep(1000 * time.Millisecond)
require.LogsContain(t, hook, "Synced to head epoch, starting reporting performance")
}

View File

@@ -26,6 +26,7 @@ go_library(
"//beacon-chain/forkchoice:go_default_library",
"//beacon-chain/forkchoice/protoarray:go_default_library",
"//beacon-chain/gateway:go_default_library",
"//beacon-chain/monitor:go_default_library",
"//beacon-chain/node/registration:go_default_library",
"//beacon-chain/operations/attestations:go_default_library",
"//beacon-chain/operations/slashings:go_default_library",

View File

@@ -17,6 +17,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/pkg/errors"
types "github.com/prysmaticlabs/eth2-types"
apigateway "github.com/prysmaticlabs/prysm/api/gateway"
"github.com/prysmaticlabs/prysm/async/event"
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
@@ -28,6 +29,7 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/beacon-chain/gateway"
"github.com/prysmaticlabs/prysm/beacon-chain/monitor"
"github.com/prysmaticlabs/prysm/beacon-chain/node/registration"
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/beacon-chain/operations/slashings"
@@ -205,6 +207,10 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
return nil, err
}
if err := beacon.registerValidatorMonitorService(); err != nil {
return nil, err
}
if !cliCtx.Bool(cmd.DisableMonitoringFlag.Name) {
if err := beacon.registerPrometheusService(cliCtx); err != nil {
return nil, err
@@ -483,7 +489,7 @@ func (b *BeaconNode) registerP2P(cliCtx *cli.Context) error {
MetaDataDir: cliCtx.String(cmd.P2PMetadata.Name),
TCPPort: cliCtx.Uint(cmd.P2PTCPPort.Name),
UDPPort: cliCtx.Uint(cmd.P2PUDPPort.Name),
MaxPeers: cliCtx.Uint(cmd.P2PMaxPeers.Name),
MaxPeers: cliCtx.Uint64(cmd.P2PMaxPeers.Name),
AllowListCIDR: cliCtx.String(cmd.P2PAllowList.Name),
DenyListCIDR: slice.SplitCommaSeparated(cliCtx.StringSlice(cmd.P2PDenyList.Name)),
EnableUPnP: cliCtx.Bool(cmd.EnableUPnPFlag.Name),
@@ -866,3 +872,33 @@ func (b *BeaconNode) registerDeterminsticGenesisService() error {
}
return nil
}
func (b *BeaconNode) registerValidatorMonitorService() error {
if cmd.ValidatorMonitorIndicesFlag.Value == nil {
return nil
}
cliSlice := cmd.ValidatorMonitorIndicesFlag.Value.Value()
if cliSlice == nil {
return nil
}
tracked := make([]types.ValidatorIndex, len(cliSlice))
for i := range tracked {
tracked[i] = types.ValidatorIndex(cliSlice[i])
}
var chainService *blockchain.Service
if err := b.services.FetchService(&chainService); err != nil {
return err
}
monitorConfig := &monitor.ValidatorMonitorConfig{
StateNotifier: b,
AttestationNotifier: b,
StateGen: b.stateGen,
HeadFetcher: chainService,
}
svc, err := monitor.NewService(b.ctx, monitorConfig, tracked)
if err != nil {
return err
}
return b.services.RegisterService(svc)
}

View File

@@ -36,11 +36,11 @@ func (m *PoolMock) InsertProposerSlashing(_ context.Context, _ state.ReadOnlyBea
}
// MarkIncludedAttesterSlashing --
func (_ *PoolMock) MarkIncludedAttesterSlashing(_ *ethpb.AttesterSlashing) {
func (*PoolMock) MarkIncludedAttesterSlashing(_ *ethpb.AttesterSlashing) {
panic("implement me")
}
// MarkIncludedProposerSlashing --
func (_ *PoolMock) MarkIncludedProposerSlashing(_ *ethpb.ProposerSlashing) {
func (*PoolMock) MarkIncludedProposerSlashing(_ *ethpb.ProposerSlashing) {
panic("implement me")
}

View File

@@ -74,7 +74,6 @@ go_library(
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
"@com_github_ferranbt_fastssz//:go_default_library",
"@com_github_ipfs_go_ipfs_addr//:go_default_library",
"@com_github_kevinms_leakybucket_go//:go_default_library",
"@com_github_kr_pretty//:go_default_library",
"@com_github_libp2p_go_libp2p//:go_default_library",

View File

@@ -23,7 +23,7 @@ type Config struct {
MetaDataDir string
TCPPort uint
UDPPort uint
MaxPeers uint
MaxPeers uint64
AllowListCIDR string
DenyListCIDR []string
StateNotifier statefeed.Notifier

View File

@@ -104,7 +104,7 @@ func TestService_RejectInboundPeersBeyondLimit(t *testing.T) {
ScorerParams: &scorers.Config{},
}),
host: mockp2p.NewTestP2P(t).BHost,
cfg: &Config{MaxPeers: uint(limit)},
cfg: &Config{MaxPeers: uint64(limit)},
}
var err error
s.addrFilter, err = configureFilter(&Config{})

View File

@@ -9,7 +9,6 @@ import (
"github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/enr"
iaddr "github.com/ipfs/go-ipfs-addr"
"github.com/libp2p/go-libp2p-core/network"
"github.com/libp2p/go-libp2p-core/peer"
ma "github.com/multiformats/go-multiaddr"
@@ -454,11 +453,7 @@ func peersFromStringAddrs(addrs []string) ([]ma.Multiaddr, error) {
}
func multiAddrFromString(address string) (ma.Multiaddr, error) {
addr, err := iaddr.ParseString(address)
if err != nil {
return nil, err
}
return addr.Multiaddr(), nil
return ma.NewMultiaddr(address)
}
func udpVersionFromIP(ipAddr net.IP) string {

View File

@@ -63,7 +63,7 @@ func TestCorrect_ActiveValidatorsCount(t *testing.T) {
assert.Equal(t, int(params.BeaconConfig().MinGenesisActiveValidatorCount)+100, int(vals), "mainnet genesis active count isn't accurate")
}
func TestLoggingParameters(t *testing.T) {
func TestLoggingParameters(_ *testing.T) {
logGossipParameters("testing", nil)
logGossipParameters("testing", &pubsub.TopicScoreParams{})
// Test out actual gossip parameters.

View File

@@ -77,7 +77,7 @@ type PeerManager interface {
ENR() *enr.Record
DiscoveryAddresses() ([]multiaddr.Multiaddr, error)
RefreshENR()
FindPeersWithSubnet(ctx context.Context, topic string, subIndex uint64, threshold int) (bool, error)
FindPeersWithSubnet(ctx context.Context, topic string, subIndex uint64, threshold uint64) (bool, error)
AddPingMethod(reqFunc func(ctx context.Context, id peer.ID) error)
}

View File

@@ -85,7 +85,7 @@ func newBlockProviderScorer(store *peerdata.Store, config *BlockProviderScorerCo
if scorer.config.StalePeerRefreshInterval == 0 {
scorer.config.StalePeerRefreshInterval = DefaultBlockProviderStalePeerRefreshInterval
}
batchSize := uint64(flags.Get().BlockBatchLimit)
batchSize := flags.Get().BlockBatchLimit
scorer.maxScore = 1.0
if batchSize > 0 {
totalBatches := float64(scorer.config.ProcessedBlocksCap / batchSize)
@@ -110,7 +110,7 @@ func (s *BlockProviderScorer) score(pid peer.ID) float64 {
if !ok || time.Since(peerData.BlockProviderUpdated) >= s.config.StalePeerRefreshInterval {
return s.maxScore
}
batchSize := uint64(flags.Get().BlockBatchLimit)
batchSize := flags.Get().BlockBatchLimit
if batchSize > 0 {
processedBatches := float64(peerData.ProcessedBlocks / batchSize)
score += processedBatches * s.config.ProcessedBatchWeight

View File

@@ -21,7 +21,7 @@ func TestScorers_BlockProvider_Score(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
batchSize := uint64(flags.Get().BlockBatchLimit)
batchSize := flags.Get().BlockBatchLimit
tests := []struct {
name string
update func(scorer *scorers.BlockProviderScorer)
@@ -160,7 +160,7 @@ func TestScorers_BlockProvider_WeightSorted(t *testing.T) {
},
})
scorer := peerStatuses.Scorers().BlockProviderScorer()
batchSize := uint64(flags.Get().BlockBatchLimit)
batchSize := flags.Get().BlockBatchLimit
r := rand.NewDeterministicGenerator()
reverse := func(pids []peer.ID) []peer.ID {
@@ -214,7 +214,7 @@ func TestScorers_BlockProvider_WeightSorted(t *testing.T) {
}
func TestScorers_BlockProvider_Sorted(t *testing.T) {
batchSize := uint64(flags.Get().BlockBatchLimit)
batchSize := flags.Get().BlockBatchLimit
tests := []struct {
name string
update func(s *scorers.BlockProviderScorer)
@@ -309,7 +309,7 @@ func TestScorers_BlockProvider_Sorted(t *testing.T) {
func TestScorers_BlockProvider_MaxScore(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
batchSize := uint64(flags.Get().BlockBatchLimit)
batchSize := flags.Get().BlockBatchLimit
tests := []struct {
name string
@@ -347,7 +347,7 @@ func TestScorers_BlockProvider_MaxScore(t *testing.T) {
func TestScorers_BlockProvider_FormatScorePretty(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
batchSize := uint64(flags.Get().BlockBatchLimit)
batchSize := flags.Get().BlockBatchLimit
format := "[%0.1f%%, raw: %0.2f, blocks: %d/1280]"
tests := []struct {

View File

@@ -17,7 +17,7 @@ func TestScorers_Service_Init(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
batchSize := uint64(flags.Get().BlockBatchLimit)
batchSize := flags.Get().BlockBatchLimit
t.Run("default config", func(t *testing.T) {
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
@@ -82,7 +82,7 @@ func TestScorers_Service_Score(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
batchSize := uint64(flags.Get().BlockBatchLimit)
batchSize := flags.Get().BlockBatchLimit
peerScores := func(s *scorers.Service, pids []peer.ID) map[string]float64 {
scores := make(map[string]float64, len(pids))

View File

@@ -696,7 +696,7 @@ func (p *Status) BestFinalized(maxPeers int, ourFinalizedEpoch types.Epoch) (typ
// BestNonFinalized returns the highest known epoch, higher than ours,
// and is shared by at least minPeers.
func (p *Status) BestNonFinalized(minPeers int, ourHeadEpoch types.Epoch) (types.Epoch, []peer.ID) {
func (p *Status) BestNonFinalized(minPeers uint64, ourHeadEpoch types.Epoch) (types.Epoch, []peer.ID) {
connected := p.Connected()
epochVotes := make(map[types.Epoch]uint64)
pidEpoch := make(map[peer.ID]types.Epoch, len(connected))

View File

@@ -33,12 +33,11 @@ const syncLockerVal = 100
// subscribed to a particular subnet. Then we try to connect
// with those peers. This method will block until the required amount of
// peers are found, the method only exits in the event of context timeouts.
func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string,
index uint64, threshold int) (bool, error) {
func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string, subIndex, threshold uint64) (bool, error) {
ctx, span := trace.StartSpan(ctx, "p2p.FindPeersWithSubnet")
defer span.End()
span.AddAttributes(trace.Int64Attribute("index", int64(index)))
span.AddAttributes(trace.Int64Attribute("index", int64(subIndex)))
if s.dv5Listener == nil {
// return if discovery isn't set
@@ -49,14 +48,14 @@ func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string,
iterator := s.dv5Listener.RandomNodes()
switch {
case strings.Contains(topic, GossipAttestationMessage):
iterator = filterNodes(ctx, iterator, s.filterPeerForAttSubnet(index))
iterator = filterNodes(ctx, iterator, s.filterPeerForAttSubnet(subIndex))
case strings.Contains(topic, GossipSyncCommitteeMessage):
iterator = filterNodes(ctx, iterator, s.filterPeerForSyncSubnet(index))
iterator = filterNodes(ctx, iterator, s.filterPeerForSyncSubnet(subIndex))
default:
return false, errors.New("no subnet exists for provided topic")
}
currNum := len(s.pubsub.ListPeers(topic))
currNum := uint64(len(s.pubsub.ListPeers(topic)))
wg := new(sync.WaitGroup)
for {
if err := ctx.Err(); err != nil {
@@ -81,7 +80,7 @@ func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string,
}
// Wait for all dials to be completed.
wg.Wait()
currNum = len(s.pubsub.ListPeers(topic))
currNum = uint64(len(s.pubsub.ListPeers(topic)))
}
return true, nil
}

View File

@@ -61,7 +61,7 @@ func (p *FakeP2P) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
}
// FindPeersWithSubnet mocks the p2p func.
func (p *FakeP2P) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ int) (bool, error) {
func (p *FakeP2P) FindPeersWithSubnet(_ context.Context, _ string, _, _ uint64) (bool, error) {
return false, nil
}

View File

@@ -51,7 +51,7 @@ func (m MockPeerManager) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
func (m MockPeerManager) RefreshENR() {}
// FindPeersWithSubnet .
func (m MockPeerManager) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ int) (bool, error) {
func (m MockPeerManager) FindPeersWithSubnet(_ context.Context, _ string, _, _ uint64) (bool, error) {
return true, nil
}

View File

@@ -349,7 +349,7 @@ func (p *TestP2P) Peers() *peers.Status {
}
// FindPeersWithSubnet mocks the p2p func.
func (p *TestP2P) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ int) (bool, error) {
func (p *TestP2P) FindPeersWithSubnet(_ context.Context, _ string, _, _ uint64) (bool, error) {
return false, nil
}

View File

@@ -16,6 +16,7 @@ import (
"github.com/prysmaticlabs/prysm/proto/eth/v2"
ethpbv2 "github.com/prysmaticlabs/prysm/proto/eth/v2"
ethpbalpha "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/time/slots"
"go.opencensus.io/trace"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
@@ -27,6 +28,40 @@ func (bs *Server) ListSyncCommittees(ctx context.Context, req *ethpbv2.StateSync
ctx, span := trace.StartSpan(ctx, "beacon.ListSyncCommittees")
defer span.End()
currentSlot := bs.GenesisTimeFetcher.CurrentSlot()
currentEpoch := slots.ToEpoch(currentSlot)
currentPeriodStartEpoch, err := slots.SyncCommitteePeriodStartEpoch(currentEpoch)
if err != nil {
return nil, status.Errorf(
codes.Internal,
"Could not calculate start period for slot %d: %v",
currentSlot,
err,
)
}
var reqPeriodStartEpoch types.Epoch
if req.Epoch == nil {
reqPeriodStartEpoch = currentPeriodStartEpoch
} else {
reqPeriodStartEpoch, err = slots.SyncCommitteePeriodStartEpoch(*req.Epoch)
if err != nil {
return nil, status.Errorf(
codes.Internal,
"Could not calculate start period for epoch %d: %v",
*req.Epoch,
err,
)
}
if reqPeriodStartEpoch > currentPeriodStartEpoch+params.BeaconConfig().EpochsPerSyncCommitteePeriod {
return nil, status.Errorf(
codes.Internal,
"Could not fetch sync committee too far in the future. Requested epoch: %d, current epoch: %d",
*req.Epoch, currentEpoch,
)
}
}
st, err := bs.stateFromRequest(ctx, &stateRequest{
epoch: req.Epoch,
stateId: req.StateId,
@@ -35,10 +70,20 @@ func (bs *Server) ListSyncCommittees(ctx context.Context, req *ethpbv2.StateSync
return nil, status.Errorf(codes.Internal, "Could not fetch beacon state using request: %v", err)
}
// Get the current sync committee and sync committee indices from the state.
committeeIndices, committee, err := currentCommitteeIndicesFromState(st)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get sync committee indices from state: %v", err)
var committeeIndices []types.ValidatorIndex
var committee *ethpbalpha.SyncCommittee
if reqPeriodStartEpoch > currentPeriodStartEpoch {
// Get the next sync committee and sync committee indices from the state.
committeeIndices, committee, err = nextCommitteeIndicesFromState(st)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get next sync committee indices: %v", err)
}
} else {
// Get the current sync committee and sync committee indices from the state.
committeeIndices, committee, err = currentCommitteeIndicesFromState(st)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get current sync committee indices: %v", err)
}
}
subcommittees, err := extractSyncSubcommittees(st, committee)
if err != nil {
@@ -75,6 +120,28 @@ func currentCommitteeIndicesFromState(st state.BeaconState) ([]types.ValidatorIn
return committeeIndices, committee, nil
}
func nextCommitteeIndicesFromState(st state.BeaconState) ([]types.ValidatorIndex, *ethpbalpha.SyncCommittee, error) {
committee, err := st.NextSyncCommittee()
if err != nil {
return nil, nil, fmt.Errorf(
"could not get sync committee: %v", err,
)
}
committeeIndices := make([]types.ValidatorIndex, len(committee.Pubkeys))
for i, key := range committee.Pubkeys {
index, ok := st.ValidatorIndexByPubkey(bytesutil.ToBytes48(key))
if !ok {
return nil, nil, fmt.Errorf(
"validator index not found for pubkey %#x",
bytesutil.Trunc(key),
)
}
committeeIndices[i] = index
}
return committeeIndices, committee, nil
}
func extractSyncSubcommittees(st state.BeaconState, committee *ethpbalpha.SyncCommittee) ([]*eth.SyncSubcommitteeValidators, error) {
subcommitteeCount := params.BeaconConfig().SyncCommitteeSubnetCount
subcommittees := make([]*ethpbv2.SyncSubcommitteeValidators, subcommitteeCount)

View File

@@ -4,6 +4,7 @@ import (
"context"
"strings"
"testing"
"time"
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
types "github.com/prysmaticlabs/eth2-types"
@@ -55,6 +56,37 @@ func Test_currentCommitteeIndicesFromState(t *testing.T) {
})
}
func Test_nextCommitteeIndicesFromState(t *testing.T) {
st, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().SyncCommitteeSize)
vals := st.Validators()
wantedCommittee := make([][]byte, params.BeaconConfig().SyncCommitteeSize)
wantedIndices := make([]types.ValidatorIndex, len(wantedCommittee))
for i := 0; i < len(wantedCommittee); i++ {
wantedIndices[i] = types.ValidatorIndex(i)
wantedCommittee[i] = vals[i].PublicKey
}
require.NoError(t, st.SetNextSyncCommittee(&ethpbalpha.SyncCommittee{
Pubkeys: wantedCommittee,
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
}))
t.Run("OK", func(t *testing.T) {
indices, committee, err := nextCommitteeIndicesFromState(st)
require.NoError(t, err)
require.DeepEqual(t, wantedIndices, indices)
require.DeepEqual(t, wantedCommittee, committee.Pubkeys)
})
t.Run("validator in committee not found in state", func(t *testing.T) {
wantedCommittee[0] = bytesutil.PadTo([]byte("fakepubkey"), 48)
require.NoError(t, st.SetNextSyncCommittee(&ethpbalpha.SyncCommittee{
Pubkeys: wantedCommittee,
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
}))
_, _, err := nextCommitteeIndicesFromState(st)
require.ErrorContains(t, "index not found for pubkey", err)
})
}
func Test_extractSyncSubcommittees(t *testing.T) {
st, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().SyncCommitteeSize)
vals := st.Validators()
@@ -123,6 +155,9 @@ func TestListSyncCommittees(t *testing.T) {
require.NoError(t, err)
s := &Server{
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
Genesis: time.Now(),
},
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
@@ -150,6 +185,57 @@ func TestListSyncCommittees(t *testing.T) {
}
}
func TestListSyncCommitteesFuture(t *testing.T) {
ctx := context.Background()
st, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := make([][]byte, params.BeaconConfig().SyncCommitteeSize)
vals := st.Validators()
for i := 0; i < len(syncCommittee); i++ {
syncCommittee[i] = vals[i].PublicKey
}
require.NoError(t, st.SetNextSyncCommittee(&ethpbalpha.SyncCommittee{
Pubkeys: syncCommittee,
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
}))
s := &Server{
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
Genesis: time.Now(),
},
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
}
req := &ethpbv2.StateSyncCommitteesRequest{}
epoch := 2 * params.BeaconConfig().EpochsPerSyncCommitteePeriod
req.Epoch = &epoch
_, err := s.ListSyncCommittees(ctx, req)
require.ErrorContains(t, "Could not fetch sync committee too far in the future", err)
epoch = 2*params.BeaconConfig().EpochsPerSyncCommitteePeriod - 1
resp, err := s.ListSyncCommittees(ctx, req)
require.NoError(t, err)
require.NotNil(t, resp.Data)
committeeVals := resp.Data.Validators
require.NotNil(t, committeeVals)
require.Equal(t, params.BeaconConfig().SyncCommitteeSize, uint64(len(committeeVals)), "incorrect committee size")
for i := uint64(0); i < params.BeaconConfig().SyncCommitteeSize; i++ {
assert.Equal(t, types.ValidatorIndex(i), committeeVals[i])
}
require.NotNil(t, resp.Data.ValidatorAggregates)
assert.Equal(t, params.BeaconConfig().SyncCommitteeSubnetCount, uint64(len(resp.Data.ValidatorAggregates)))
for i := uint64(0); i < params.BeaconConfig().SyncCommitteeSubnetCount; i++ {
vStartIndex := types.ValidatorIndex(params.BeaconConfig().SyncCommitteeSize / params.BeaconConfig().SyncCommitteeSubnetCount * i)
vEndIndex := types.ValidatorIndex(params.BeaconConfig().SyncCommitteeSize/params.BeaconConfig().SyncCommitteeSubnetCount*(i+1) - 1)
j := 0
for vIndex := vStartIndex; vIndex <= vEndIndex; vIndex++ {
assert.Equal(t, vIndex, resp.Data.ValidatorAggregates[i].Validators[j])
j++
}
}
}
func TestSubmitPoolSyncCommitteeSignatures(t *testing.T) {
ctx := grpc.NewContextWithServerTransportStream(context.Background(), &runtime.ServerTransportStream{})
st, _ := util.DeterministicGenesisStateAltair(t, 10)

View File

@@ -48,7 +48,7 @@ func (bs *Server) ListBlocks(
switch q := req.QueryFilter.(type) {
case *ethpb.ListBlocksRequest_Epoch:
ctrs, numBlks, nextPageToken, err := bs.ListBlocksForEpoch(ctx, req, q)
ctrs, numBlks, nextPageToken, err := bs.listBlocksForEpoch(ctx, req, q)
if err != nil {
return nil, err
}
@@ -63,7 +63,7 @@ func (bs *Server) ListBlocks(
NextPageToken: nextPageToken,
}, nil
case *ethpb.ListBlocksRequest_Root:
ctrs, numBlks, nextPageToken, err := bs.ListBlocksForRoot(ctx, req, q)
ctrs, numBlks, nextPageToken, err := bs.listBlocksForRoot(ctx, req, q)
if err != nil {
return nil, err
}
@@ -79,7 +79,7 @@ func (bs *Server) ListBlocks(
}, nil
case *ethpb.ListBlocksRequest_Slot:
ctrs, numBlks, nextPageToken, err := bs.ListBlocksForSlot(ctx, req, q)
ctrs, numBlks, nextPageToken, err := bs.listBlocksForSlot(ctx, req, q)
if err != nil {
return nil, err
}
@@ -94,7 +94,7 @@ func (bs *Server) ListBlocks(
NextPageToken: nextPageToken,
}, nil
case *ethpb.ListBlocksRequest_Genesis:
ctrs, numBlks, nextPageToken, err := bs.ListBlocksForGenesis(ctx, req, q)
ctrs, numBlks, nextPageToken, err := bs.listBlocksForGenesis(ctx, req, q)
if err != nil {
return nil, err
}
@@ -128,7 +128,7 @@ func (bs *Server) ListBeaconBlocks(
switch q := req.QueryFilter.(type) {
case *ethpb.ListBlocksRequest_Epoch:
ctrs, numBlks, nextPageToken, err := bs.ListBlocksForEpoch(ctx, req, q)
ctrs, numBlks, nextPageToken, err := bs.listBlocksForEpoch(ctx, req, q)
if err != nil {
return nil, err
}
@@ -142,7 +142,7 @@ func (bs *Server) ListBeaconBlocks(
NextPageToken: nextPageToken,
}, nil
case *ethpb.ListBlocksRequest_Root:
ctrs, numBlks, nextPageToken, err := bs.ListBlocksForRoot(ctx, req, q)
ctrs, numBlks, nextPageToken, err := bs.listBlocksForRoot(ctx, req, q)
if err != nil {
return nil, err
}
@@ -157,7 +157,7 @@ func (bs *Server) ListBeaconBlocks(
}, nil
case *ethpb.ListBlocksRequest_Slot:
ctrs, numBlks, nextPageToken, err := bs.ListBlocksForSlot(ctx, req, q)
ctrs, numBlks, nextPageToken, err := bs.listBlocksForSlot(ctx, req, q)
if err != nil {
return nil, err
}
@@ -171,7 +171,7 @@ func (bs *Server) ListBeaconBlocks(
NextPageToken: nextPageToken,
}, nil
case *ethpb.ListBlocksRequest_Genesis:
ctrs, numBlks, nextPageToken, err := bs.ListBlocksForGenesis(ctx, req, q)
ctrs, numBlks, nextPageToken, err := bs.listBlocksForGenesis(ctx, req, q)
if err != nil {
return nil, err
}
@@ -224,8 +224,8 @@ func convertToBlockContainer(blk block.SignedBeaconBlock, root [32]byte, isCanon
return ctr, nil
}
// ListBlocksForEpoch retrieves all blocks for the provided epoch.
func (bs *Server) ListBlocksForEpoch(ctx context.Context, req *ethpb.ListBlocksRequest, q *ethpb.ListBlocksRequest_Epoch) ([]blockContainer, int, string, error) {
// listBlocksForEpoch retrieves all blocks for the provided epoch.
func (bs *Server) listBlocksForEpoch(ctx context.Context, req *ethpb.ListBlocksRequest, q *ethpb.ListBlocksRequest_Epoch) ([]blockContainer, int, string, error) {
blks, _, err := bs.BeaconDB.Blocks(ctx, filters.NewFilter().SetStartEpoch(q.Epoch).SetEndEpoch(q.Epoch))
if err != nil {
return nil, 0, strconv.Itoa(0), status.Errorf(codes.Internal, "Could not get blocks: %v", err)
@@ -262,8 +262,8 @@ func (bs *Server) ListBlocksForEpoch(ctx context.Context, req *ethpb.ListBlocksR
return containers, numBlks, nextPageToken, nil
}
// ListBlocksForRoot retrieves the block for the provided root.
func (bs *Server) ListBlocksForRoot(ctx context.Context, _ *ethpb.ListBlocksRequest, q *ethpb.ListBlocksRequest_Root) ([]blockContainer, int, string, error) {
// listBlocksForRoot retrieves the block for the provided root.
func (bs *Server) listBlocksForRoot(ctx context.Context, _ *ethpb.ListBlocksRequest, q *ethpb.ListBlocksRequest_Root) ([]blockContainer, int, string, error) {
blk, err := bs.BeaconDB.Block(ctx, bytesutil.ToBytes32(q.Root))
if err != nil {
return nil, 0, strconv.Itoa(0), status.Errorf(codes.Internal, "Could not retrieve block: %v", err)
@@ -286,8 +286,8 @@ func (bs *Server) ListBlocksForRoot(ctx context.Context, _ *ethpb.ListBlocksRequ
}}, 1, strconv.Itoa(0), nil
}
// ListBlocksForSlot retrieves all blocks for the provided slot.
func (bs *Server) ListBlocksForSlot(ctx context.Context, req *ethpb.ListBlocksRequest, q *ethpb.ListBlocksRequest_Slot) ([]blockContainer, int, string, error) {
// listBlocksForSlot retrieves all blocks for the provided slot.
func (bs *Server) listBlocksForSlot(ctx context.Context, req *ethpb.ListBlocksRequest, q *ethpb.ListBlocksRequest_Slot) ([]blockContainer, int, string, error) {
hasBlocks, blks, err := bs.BeaconDB.BlocksBySlot(ctx, q.Slot)
if err != nil {
return nil, 0, strconv.Itoa(0), status.Errorf(codes.Internal, "Could not retrieve blocks for slot %d: %v", q.Slot, err)
@@ -323,8 +323,8 @@ func (bs *Server) ListBlocksForSlot(ctx context.Context, req *ethpb.ListBlocksRe
return containers, numBlks, nextPageToken, nil
}
// ListBlocksForGenesis retrieves the genesis block.
func (bs *Server) ListBlocksForGenesis(ctx context.Context, _ *ethpb.ListBlocksRequest, _ *ethpb.ListBlocksRequest_Genesis) ([]blockContainer, int, string, error) {
// listBlocksForGenesis retrieves the genesis block.
func (bs *Server) listBlocksForGenesis(ctx context.Context, _ *ethpb.ListBlocksRequest, _ *ethpb.ListBlocksRequest_Genesis) ([]blockContainer, int, string, error) {
genBlk, err := bs.BeaconDB.GenesisBlock(ctx)
if err != nil {
return nil, 0, strconv.Itoa(0), status.Errorf(codes.Internal, "Could not retrieve blocks for genesis slot: %v", err)

View File

@@ -4,7 +4,6 @@ import (
"context"
"sort"
"strconv"
"time"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/api/pagination"
@@ -27,9 +26,6 @@ import (
"google.golang.org/protobuf/types/known/emptypb"
)
// BalancesTimeout for gRPC requests to ListValidatorBalances.
const BalancesTimeout = time.Second * 30
// ListValidatorBalances retrieves the validator balances for a given set of public keys.
// An optional Epoch parameter is provided to request historical validator balances from
// archived, persistent data.
@@ -37,8 +33,6 @@ func (bs *Server) ListValidatorBalances(
ctx context.Context,
req *ethpb.ListValidatorBalancesRequest,
) (*ethpb.ValidatorBalances, error) {
ctx, cancel := context.WithTimeout(ctx, BalancesTimeout)
defer cancel()
if int(req.PageSize) > cmd.Get().MaxRPCPageSize {
return nil, status.Errorf(codes.InvalidArgument, "Requested page size %d can not be greater than max size %d",
req.PageSize, cmd.Get().MaxRPCPageSize)

View File

@@ -24,6 +24,8 @@ import (
var errPubkeyDoesNotExist = errors.New("pubkey does not exist")
var nonExistentIndex = types.ValidatorIndex(^uint64(0))
const numStatesToCheck = 2
// ValidatorStatus returns the validator status of the current epoch.
// The status response can be one of the following:
// DEPOSITED - validator's deposit has been recognized by Ethereum 1, not yet recognized by Ethereum.
@@ -106,9 +108,17 @@ func (vs *Server) CheckDoppelGanger(ctx context.Context, req *ethpb.DoppelGanger
if err != nil {
return nil, status.Error(codes.Internal, "Could not get head state")
}
currEpoch := slots.ToEpoch(headState.Slot())
isRecent, resp := checkValidatorsAreRecent(currEpoch, req)
// If all provided keys are recent we skip this check
// as we are unable to effectively determine if a doppelganger
// is active.
if isRecent {
return resp, nil
}
// We walk back from the current head state to the state at the beginning of the previous 2 epochs.
// Where S_i , i := 0,1,2. i = 0 would signify the current head state in this epoch.
currEpoch := slots.ToEpoch(headState.Slot())
previousEpoch, err := currEpoch.SafeSub(1)
if err != nil {
previousEpoch = currEpoch
@@ -125,18 +135,18 @@ func (vs *Server) CheckDoppelGanger(ctx context.Context, req *ethpb.DoppelGanger
if err != nil {
return nil, status.Error(codes.Internal, "Could not get older state")
}
resp := &ethpb.DoppelGangerResponse{
resp = &ethpb.DoppelGangerResponse{
Responses: []*ethpb.DoppelGangerResponse_ValidatorResponse{},
}
for _, v := range req.ValidatorRequests {
// If the validator's last recorded epoch was
// less than or equal to 2 epochs ago, this method will not
// less than or equal to `numStatesToCheck` epochs ago, this method will not
// be able to catch duplicates. This is due to how attestation
// inclusion works, where an attestation for the current epoch
// is able to included in the current or next epoch. Depending
// on which epoch it is included the balance change will be
// reflected in the following epoch.
if v.Epoch+2 >= currEpoch {
if v.Epoch+numStatesToCheck >= currEpoch {
resp.Responses = append(resp.Responses,
&ethpb.DoppelGangerResponse_ValidatorResponse{
PublicKey: v.PublicKey,
@@ -322,6 +332,44 @@ func (vs *Server) validatorStatus(
}
}
func (vs *Server) retrieveAfterEpochTransition(ctx context.Context, epoch types.Epoch) (state.BeaconState, error) {
endSlot, err := slots.EpochEnd(epoch)
if err != nil {
return nil, err
}
retState, err := vs.StateGen.StateBySlot(ctx, endSlot)
if err != nil {
return nil, err
}
return transition.ProcessSlots(ctx, retState, retState.Slot()+1)
}
func checkValidatorsAreRecent(headEpoch types.Epoch, req *ethpb.DoppelGangerRequest) (bool, *ethpb.DoppelGangerResponse) {
validatorsAreRecent := true
resp := &ethpb.DoppelGangerResponse{
Responses: []*ethpb.DoppelGangerResponse_ValidatorResponse{},
}
for _, v := range req.ValidatorRequests {
// Due to how balances are reflected for individual
// validators, we can only effectively determine if a
// validator voted or not if we are able to look
// back more than `numStatesToCheck` epochs into the past.
if v.Epoch+numStatesToCheck < headEpoch {
validatorsAreRecent = false
// Zero out response if we encounter non-recent validators to
// guard against potential misuse.
resp.Responses = []*ethpb.DoppelGangerResponse_ValidatorResponse{}
break
}
resp.Responses = append(resp.Responses,
&ethpb.DoppelGangerResponse_ValidatorResponse{
PublicKey: v.PublicKey,
DuplicateExists: false,
})
}
return validatorsAreRecent, resp
}
func statusForPubKey(headState state.ReadOnlyBeaconState, pubKey []byte) (ethpb.ValidatorStatus, types.ValidatorIndex, error) {
if headState == nil || headState.IsNil() {
return ethpb.ValidatorStatus_UNKNOWN_STATUS, 0, errors.New("head state does not exist")
@@ -371,15 +419,3 @@ func depositStatus(depositOrBalance uint64) ethpb.ValidatorStatus {
}
return ethpb.ValidatorStatus_DEPOSITED
}
func (vs *Server) retrieveAfterEpochTransition(ctx context.Context, epoch types.Epoch) (state.BeaconState, error) {
endSlot, err := slots.EpochEnd(epoch)
if err != nil {
return nil, err
}
retState, err := vs.StateGen.StateBySlot(ctx, endSlot)
if err != nil {
return nil, err
}
return transition.ProcessSlots(ctx, retState, retState.Slot()+1)
}

View File

@@ -1170,25 +1170,10 @@ func TestServer_CheckDoppelGanger(t *testing.T) {
svSetup: func(t *testing.T) (*Server, *ethpb.DoppelGangerRequest, *ethpb.DoppelGangerResponse) {
mockGen := stategen.NewMockService()
hs, ps, os, keys := createStateSetup(t, 4, mockGen)
// Previous Epoch State
for i := 10; i < 15; i++ {
bal, err := ps.BalanceAtIndex(types.ValidatorIndex(i))
assert.NoError(t, err)
// Add 100 gwei, to mock an active validator
assert.NoError(t, ps.UpdateBalancesAtIndex(types.ValidatorIndex(i), bal-1000000000))
}
// Older Epoch State
for i := 10; i < 15; i++ {
bal, err := os.BalanceAtIndex(types.ValidatorIndex(i))
assert.NoError(t, err)
// Add 200 gwei, to mock an active validator
assert.NoError(t, os.UpdateBalancesAtIndex(types.ValidatorIndex(i), bal-2000000000))
}
hs, _, _, keys := createStateSetup(t, 4, mockGen)
vs := &Server{
StateGen: mockGen,
StateGen: nil,
HeadFetcher: &mockChain.ChainService{
State: hs,
},

View File

@@ -382,7 +382,7 @@ func (s *Service) logNewClientConnection(ctx context.Context) {
if !s.connectedRPCClients[clientInfo.Addr] {
log.WithFields(logrus.Fields{
"addr": clientInfo.Addr.String(),
}).Infof("NewService gRPC client connected to beacon node")
}).Infof("gRPC client connected to beacon node")
s.connectedRPCClients[clientInfo.Addr] = true
}
}

View File

@@ -3,8 +3,15 @@ load("@prysm//tools/go:def.bzl", "go_library")
go_library(
name = "go_default_library",
testonly = True,
srcs = ["mock_state_fetcher.go"],
srcs = [
"mock_genesis_timefetcher.go",
"mock_state_fetcher.go",
],
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/rpc/testutil",
visibility = ["//beacon-chain:__subpackages__"],
deps = ["//beacon-chain/state:go_default_library"],
deps = [
"//beacon-chain/state:go_default_library",
"//config/params:go_default_library",
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
],
)

View File

@@ -0,0 +1,21 @@
package testutil
import (
"time"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/config/params"
)
// MockGenesisTimeFetcher is a fake implementation of the blockchain.TimeFetcher
type MockGenesisTimeFetcher struct {
Genesis time.Time
}
func (m *MockGenesisTimeFetcher) GenesisTime() time.Time {
return m.Genesis
}
func (m *MockGenesisTimeFetcher) CurrentSlot() types.Slot {
return types.Slot(uint64(time.Now().Unix()-m.Genesis.Unix()) / params.BeaconConfig().SecondsPerSlot)
}

View File

@@ -28,7 +28,7 @@ func (s *MockSlashingChecker) HighestAttestations(
return atts, nil
}
func (s *MockSlashingChecker) IsSlashableBlock(ctx context.Context, proposal *ethpb.SignedBeaconBlockHeader) (*ethpb.ProposerSlashing, error) {
func (s *MockSlashingChecker) IsSlashableBlock(_ context.Context, _ *ethpb.SignedBeaconBlockHeader) (*ethpb.ProposerSlashing, error) {
if s.ProposerSlashingFound {
return &ethpb.ProposerSlashing{
Header_1: &ethpb.SignedBeaconBlockHeader{
@@ -56,7 +56,7 @@ func (s *MockSlashingChecker) IsSlashableBlock(ctx context.Context, proposal *et
return nil, nil
}
func (s *MockSlashingChecker) IsSlashableAttestation(ctx context.Context, attestation *ethpb.IndexedAttestation) ([]*ethpb.AttesterSlashing, error) {
func (s *MockSlashingChecker) IsSlashableAttestation(_ context.Context, _ *ethpb.IndexedAttestation) ([]*ethpb.AttesterSlashing, error) {
if s.AttesterSlashingFound {
return []*ethpb.AttesterSlashing{
{

View File

@@ -30,7 +30,6 @@ go_test(
deps = [
"//beacon-chain/state/stateutil:go_default_library",
"//beacon-chain/state/types:go_default_library",
"//beacon-chain/state/v1:go_default_library",
"//config/params:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",

View File

@@ -5,8 +5,8 @@ import (
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/state/fieldtrie"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
stateTypes "github.com/prysmaticlabs/prysm/beacon-chain/state/types"
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
"github.com/prysmaticlabs/prysm/config/params"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/assert"
@@ -20,7 +20,7 @@ func TestFieldTrie_NewTrie(t *testing.T) {
// 5 represents the enum value of state roots
trie, err := fieldtrie.NewFieldTrie(5, stateTypes.BasicArray, newState.StateRoots(), uint64(params.BeaconConfig().SlotsPerHistoricalRoot))
require.NoError(t, err)
root, err := v1.RootsArrayHashTreeRoot(newState.StateRoots(), uint64(params.BeaconConfig().SlotsPerHistoricalRoot), "StateRoots")
root, err := stateutil.RootsArrayHashTreeRoot(newState.StateRoots(), uint64(params.BeaconConfig().SlotsPerHistoricalRoot), "StateRoots")
require.NoError(t, err)
newRoot, err := trie.TrieRoot()
require.NoError(t, err)
@@ -48,7 +48,7 @@ func TestFieldTrie_RecomputeTrie(t *testing.T) {
require.NoError(t, newState.UpdateValidatorAtIndex(types.ValidatorIndex(changedIdx[0]), changedVals[0]))
require.NoError(t, newState.UpdateValidatorAtIndex(types.ValidatorIndex(changedIdx[1]), changedVals[1]))
expectedRoot, err := v1.ValidatorRegistryRoot(newState.Validators())
expectedRoot, err := stateutil.ValidatorRegistryRoot(newState.Validators())
require.NoError(t, err)
root, err := trie.RecomputeTrie(changedIdx, newState.Validators())
require.NoError(t, err)

View File

@@ -5,9 +5,14 @@ go_library(
srcs = [
"block_header_root.go",
"eth1_root.go",
"field_root_attestation.go",
"field_root_eth1.go",
"field_root_validator.go",
"field_root_vector.go",
"participation_bit_root.go",
"pending_attestation_root.go",
"reference.go",
"state_hasher.go",
"sync_committee.root.go",
"trie_helpers.go",
"validator_map_handler.go",
@@ -28,6 +33,7 @@ go_library(
],
deps = [
"//beacon-chain/core/transition/stateutils:go_default_library",
"//config/features:go_default_library",
"//config/params:go_default_library",
"//container/trie:go_default_library",
"//crypto/hash:go_default_library",
@@ -35,8 +41,10 @@ go_library(
"//encoding/ssz:go_default_library",
"//math:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_dgraph_io_ristretto//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
"@io_opencensus_go//trace:go_default_library",
],
)
@@ -44,6 +52,7 @@ go_test(
name = "go_default_test",
srcs = [
"benchmark_test.go",
"field_root_test.go",
"reference_bench_test.go",
"state_root_test.go",
"stateutil_test.go",
@@ -51,7 +60,6 @@ go_test(
],
embed = [":go_default_library"],
deps = [
"//beacon-chain/state/v1:go_default_library",
"//config/features:go_default_library",
"//config/params:go_default_library",
"//crypto/hash:go_default_library",

View File

@@ -12,9 +12,9 @@ import (
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
)
// Eth1DataEncKey returns the encoded key in bytes of input `eth1Data`,
// eth1DataEncKey returns the encoded key in bytes of input `eth1Data`,
// the returned key bytes can be used for caching purposes.
func Eth1DataEncKey(eth1Data *ethpb.Eth1Data) []byte {
func eth1DataEncKey(eth1Data *ethpb.Eth1Data) []byte {
enc := make([]byte, 0, 96)
if eth1Data != nil {
if len(eth1Data.DepositRoot) > 0 {

View File

@@ -0,0 +1,83 @@
package stateutil
import (
"bytes"
"encoding/binary"
"fmt"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/config/features"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/crypto/hash"
"github.com/prysmaticlabs/prysm/encoding/ssz"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
)
// RootsArrayHashTreeRoot computes the Merkle root of arrays of 32-byte hashes, such as [64][32]byte
// according to the Simple Serialize specification of Ethereum.
func RootsArrayHashTreeRoot(vals [][]byte, length uint64, fieldName string) ([32]byte, error) {
if features.Get().EnableSSZCache {
return CachedHasher.arraysRoot(vals, length, fieldName)
}
return NocachedHasher.arraysRoot(vals, length, fieldName)
}
func (h *stateRootHasher) epochAttestationsRoot(atts []*ethpb.PendingAttestation) ([32]byte, error) {
max := uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().MaxAttestations
if uint64(len(atts)) > max {
return [32]byte{}, fmt.Errorf("epoch attestation exceeds max length %d", max)
}
hasher := hash.CustomSHA256Hasher()
roots := make([][]byte, len(atts))
for i := 0; i < len(atts); i++ {
pendingRoot, err := h.pendingAttestationRoot(hasher, atts[i])
if err != nil {
return [32]byte{}, errors.Wrap(err, "could not attestation merkleization")
}
roots[i] = pendingRoot[:]
}
attsRootsRoot, err := ssz.BitwiseMerkleize(
hasher,
roots,
uint64(len(roots)),
uint64(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().MaxAttestations)),
)
if err != nil {
return [32]byte{}, errors.Wrap(err, "could not compute epoch attestations merkleization")
}
attsLenBuf := new(bytes.Buffer)
if err := binary.Write(attsLenBuf, binary.LittleEndian, uint64(len(atts))); err != nil {
return [32]byte{}, errors.Wrap(err, "could not marshal epoch attestations length")
}
// We need to mix in the length of the slice.
attsLenRoot := make([]byte, 32)
copy(attsLenRoot, attsLenBuf.Bytes())
res := ssz.MixInLength(attsRootsRoot, attsLenRoot)
return res, nil
}
func (h *stateRootHasher) pendingAttestationRoot(hasher ssz.HashFn, att *ethpb.PendingAttestation) ([32]byte, error) {
if att == nil {
return [32]byte{}, errors.New("nil pending attestation")
}
// Marshal attestation to determine if it exists in the cache.
enc := pendingAttEncKey(att)
// Check if it exists in cache:
if h.rootsCache != nil {
if found, ok := h.rootsCache.Get(string(enc)); found != nil && ok {
return found.([32]byte), nil
}
}
res, err := PendingAttRootWithHasher(hasher, att)
if err != nil {
return [32]byte{}, err
}
if h.rootsCache != nil {
h.rootsCache.Set(string(enc), res, 32)
}
return res, nil
}

View File

@@ -1,35 +1,34 @@
package v2
package stateutil
import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
"github.com/prysmaticlabs/prysm/config/features"
"github.com/prysmaticlabs/prysm/encoding/ssz"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
)
// eth1Root computes the HashTreeRoot Merkleization of
// Eth1Root computes the HashTreeRoot Merkleization of
// a BeaconBlockHeader struct according to the eth2
// Simple Serialize specification.
func eth1Root(hasher ssz.HashFn, eth1Data *ethpb.Eth1Data) ([32]byte, error) {
func Eth1Root(hasher ssz.HashFn, eth1Data *ethpb.Eth1Data) ([32]byte, error) {
if eth1Data == nil {
return [32]byte{}, errors.New("nil eth1 data")
}
enc := stateutil.Eth1DataEncKey(eth1Data)
enc := eth1DataEncKey(eth1Data)
if features.Get().EnableSSZCache {
if found, ok := cachedHasher.rootsCache.Get(string(enc)); ok && found != nil {
if found, ok := CachedHasher.rootsCache.Get(string(enc)); ok && found != nil {
return found.([32]byte), nil
}
}
root, err := stateutil.Eth1DataRootWithHasher(hasher, eth1Data)
root, err := Eth1DataRootWithHasher(hasher, eth1Data)
if err != nil {
return [32]byte{}, err
}
if features.Get().EnableSSZCache {
cachedHasher.rootsCache.Set(string(enc), root, 32)
CachedHasher.rootsCache.Set(string(enc), root, 32)
}
return root, nil
}
@@ -38,22 +37,22 @@ func eth1Root(hasher ssz.HashFn, eth1Data *ethpb.Eth1Data) ([32]byte, error) {
// a list of Eth1Data structs according to the eth2
// Simple Serialize specification.
func eth1DataVotesRoot(eth1DataVotes []*ethpb.Eth1Data) ([32]byte, error) {
hashKey, err := stateutil.Eth1DatasEncKey(eth1DataVotes)
hashKey, err := Eth1DatasEncKey(eth1DataVotes)
if err != nil {
return [32]byte{}, err
}
if features.Get().EnableSSZCache {
if found, ok := cachedHasher.rootsCache.Get(string(hashKey[:])); ok && found != nil {
if found, ok := CachedHasher.rootsCache.Get(string(hashKey[:])); ok && found != nil {
return found.([32]byte), nil
}
}
root, err := stateutil.Eth1DatasRoot(eth1DataVotes)
root, err := Eth1DatasRoot(eth1DataVotes)
if err != nil {
return [32]byte{}, err
}
if features.Get().EnableSSZCache {
cachedHasher.rootsCache.Set(string(hashKey[:]), root, 32)
CachedHasher.rootsCache.Set(string(hashKey[:]), root, 32)
}
return root, nil
}

View File

@@ -1,4 +1,4 @@
package v3
package stateutil
import (
"testing"
@@ -7,17 +7,17 @@ import (
)
func TestArraysTreeRoot_OnlyPowerOf2(t *testing.T) {
_, err := nocachedHasher.arraysRoot([][]byte{}, 1, "testing")
_, err := NocachedHasher.arraysRoot([][]byte{}, 1, "testing")
assert.NoError(t, err)
_, err = nocachedHasher.arraysRoot([][]byte{}, 4, "testing")
_, err = NocachedHasher.arraysRoot([][]byte{}, 4, "testing")
assert.NoError(t, err)
_, err = nocachedHasher.arraysRoot([][]byte{}, 8, "testing")
_, err = NocachedHasher.arraysRoot([][]byte{}, 8, "testing")
assert.NoError(t, err)
_, err = nocachedHasher.arraysRoot([][]byte{}, 10, "testing")
_, err = NocachedHasher.arraysRoot([][]byte{}, 10, "testing")
assert.ErrorContains(t, "hash layer is a non power of 2", err)
}
func TestArraysTreeRoot_ZeroLength(t *testing.T) {
_, err := nocachedHasher.arraysRoot([][]byte{}, 0, "testing")
_, err := NocachedHasher.arraysRoot([][]byte{}, 0, "testing")
assert.ErrorContains(t, "zero leaves provided", err)
}

View File

@@ -1,11 +1,10 @@
package v3
package stateutil
import (
"bytes"
"encoding/binary"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
"github.com/prysmaticlabs/prysm/config/features"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/crypto/hash"
@@ -13,6 +12,16 @@ import (
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
)
// ValidatorRegistryRoot computes the HashTreeRoot Merkleization of
// a list of validator structs according to the Ethereum
// Simple Serialize specification.
func ValidatorRegistryRoot(vals []*ethpb.Validator) ([32]byte, error) {
if features.Get().EnableSSZCache {
return CachedHasher.validatorRegistryRoot(vals)
}
return NocachedHasher.validatorRegistryRoot(vals)
}
func (h *stateRootHasher) validatorRegistryRoot(validators []*ethpb.Validator) ([32]byte, error) {
hashKeyElements := make([]byte, len(validators)*32)
roots := make([][32]byte, len(validators))
@@ -59,7 +68,7 @@ func (h *stateRootHasher) validatorRoot(hasher ssz.HashFn, validator *ethpb.Vali
return [32]byte{}, errors.New("nil validator")
}
enc := stateutil.ValidatorEncKey(validator)
enc := validatorEncKey(validator)
// Check if it exists in cache:
if h.rootsCache != nil {
if found, ok := h.rootsCache.Get(string(enc)); found != nil && ok {
@@ -67,7 +76,7 @@ func (h *stateRootHasher) validatorRoot(hasher ssz.HashFn, validator *ethpb.Vali
}
}
valRoot, err := stateutil.ValidatorRootWithHasher(hasher, validator)
valRoot, err := ValidatorRootWithHasher(hasher, validator)
if err != nil {
return [32]byte{}, err
}
@@ -77,13 +86,3 @@ func (h *stateRootHasher) validatorRoot(hasher ssz.HashFn, validator *ethpb.Vali
}
return valRoot, nil
}
// ValidatorRegistryRoot computes the HashTreeRoot Merkleization of
// a list of validator structs according to the eth2
// Simple Serialize specification.
func ValidatorRegistryRoot(vals []*ethpb.Validator) ([32]byte, error) {
if features.Get().EnableSSZCache {
return cachedHasher.validatorRegistryRoot(vals)
}
return nocachedHasher.validatorRegistryRoot(vals)
}

View File

@@ -1,8 +1,7 @@
package v3
package stateutil
import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
"github.com/prysmaticlabs/prysm/crypto/hash"
"github.com/prysmaticlabs/prysm/encoding/ssz"
)
@@ -26,7 +25,18 @@ func (h *stateRootHasher) arraysRoot(input [][]byte, length uint64, fieldName st
if len(prevLeaves) == 0 || h.rootsCache == nil {
prevLeaves = leaves
}
// Exit early if our previous leaves length don't match with the current set.
// This should never happen but better to be defensive here.
if len(prevLeaves) != len(leaves) {
res, err := h.merkleizeWithCache(leaves, length, fieldName, hashFunc)
if err != nil {
return [32]byte{}, err
}
if h.rootsCache != nil {
leavesCache[fieldName] = leaves
}
return res, nil
}
for i := 0; i < len(leaves); i++ {
// We check if any items changed since the roots were last recomputed.
notEqual := leaves[i] != prevLeaves[i]
@@ -134,7 +144,7 @@ func (h *stateRootHasher) merkleizeWithCache(leaves [][32]byte, length uint64,
}
layers[0] = hashLayer
var err error
layers, hashLayer, err = stateutil.MerkleizeTrieLeaves(layers, hashLayer, hasher)
layers, hashLayer, err = MerkleizeTrieLeaves(layers, hashLayer, hasher)
if err != nil {
return [32]byte{}, err
}

View File

@@ -40,9 +40,9 @@ func PendingAttRootWithHasher(hasher ssz.HashFn, att *ethpb.PendingAttestation)
return ssz.BitwiseMerkleizeArrays(hasher, fieldRoots, uint64(len(fieldRoots)), uint64(len(fieldRoots)))
}
// PendingAttEncKey returns the encoded key in bytes of input `pendingAttestation`,
// pendingAttEncKey returns the encoded key in bytes of input `pendingAttestation`,
// the returned key bytes can be used for caching purposes.
func PendingAttEncKey(att *ethpb.PendingAttestation) []byte {
func pendingAttEncKey(att *ethpb.PendingAttestation) []byte {
enc := make([]byte, 2192)
if att != nil {

View File

@@ -0,0 +1,547 @@
package stateutil
import (
"context"
"encoding/binary"
"sync"
"github.com/dgraph-io/ristretto"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/crypto/hash"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/encoding/ssz"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"go.opencensus.io/trace"
)
var (
// Set the map size as equal to that of the latest state field count.
leavesCache = make(map[string][][32]byte, params.BeaconConfig().BeaconStateMergeFieldCount)
layersCache = make(map[string][][][32]byte, params.BeaconConfig().BeaconStateMergeFieldCount)
lock sync.RWMutex
)
const cacheSize = 100000
// NocachedHasher references a hasher that will not utilize a cache.
var NocachedHasher *stateRootHasher
// CachedHasher references a hasher that will utilize a roots cache.
var CachedHasher *stateRootHasher
func init() {
rootsCache, err := ristretto.NewCache(&ristretto.Config{
NumCounters: cacheSize, // number of keys to track frequency of (1M).
MaxCost: 1 << 22, // maximum cost of cache (3MB).
// 100,000 roots will take up approximately 3 MB in memory.
BufferItems: 64, // number of keys per Get buffer.
})
if err != nil {
panic(err)
}
// Temporarily disable roots cache until cache issues can be resolved.
CachedHasher = &stateRootHasher{rootsCache: rootsCache}
NocachedHasher = &stateRootHasher{}
}
// stateRootHasher defines an object through which we can
// hash the different fields in the state with a few cached layers.
type stateRootHasher struct {
rootsCache *ristretto.Cache
}
// ComputeFieldRootsWithHasherPhase0 hashes the provided phase 0 state and returns its respective field roots.
func (h *stateRootHasher) ComputeFieldRootsWithHasherPhase0(ctx context.Context, state *ethpb.BeaconState) ([][]byte, error) {
_, span := trace.StartSpan(ctx, "hasher.ComputeFieldRootsWithHasherPhase0")
defer span.End()
if state == nil {
return nil, errors.New("nil state")
}
hasher := hash.CustomSHA256Hasher()
fieldRoots := make([][]byte, params.BeaconConfig().BeaconStateFieldCount)
// Genesis time root.
genesisRoot := ssz.Uint64Root(state.GenesisTime)
fieldRoots[0] = genesisRoot[:]
// Genesis validator root.
r := [32]byte{}
copy(r[:], state.GenesisValidatorsRoot)
fieldRoots[1] = r[:]
// Slot root.
slotRoot := ssz.Uint64Root(uint64(state.Slot))
fieldRoots[2] = slotRoot[:]
// Fork data structure root.
forkHashTreeRoot, err := ssz.ForkRoot(state.Fork)
if err != nil {
return nil, errors.Wrap(err, "could not compute fork merkleization")
}
fieldRoots[3] = forkHashTreeRoot[:]
// BeaconBlockHeader data structure root.
headerHashTreeRoot, err := BlockHeaderRoot(state.LatestBlockHeader)
if err != nil {
return nil, errors.Wrap(err, "could not compute block header merkleization")
}
fieldRoots[4] = headerHashTreeRoot[:]
// BlockRoots array root.
blockRootsRoot, err := h.arraysRoot(state.BlockRoots, uint64(params.BeaconConfig().SlotsPerHistoricalRoot), "BlockRoots")
if err != nil {
return nil, errors.Wrap(err, "could not compute block roots merkleization")
}
fieldRoots[5] = blockRootsRoot[:]
// StateRoots array root.
stateRootsRoot, err := h.arraysRoot(state.StateRoots, uint64(params.BeaconConfig().SlotsPerHistoricalRoot), "StateRoots")
if err != nil {
return nil, errors.Wrap(err, "could not compute state roots merkleization")
}
fieldRoots[6] = stateRootsRoot[:]
// HistoricalRoots slice root.
historicalRootsRt, err := ssz.ByteArrayRootWithLimit(state.HistoricalRoots, params.BeaconConfig().HistoricalRootsLimit)
if err != nil {
return nil, errors.Wrap(err, "could not compute historical roots merkleization")
}
fieldRoots[7] = historicalRootsRt[:]
// Eth1Data data structure root.
eth1HashTreeRoot, err := Eth1Root(hasher, state.Eth1Data)
if err != nil {
return nil, errors.Wrap(err, "could not compute eth1data merkleization")
}
fieldRoots[8] = eth1HashTreeRoot[:]
// Eth1DataVotes slice root.
eth1VotesRoot, err := eth1DataVotesRoot(state.Eth1DataVotes)
if err != nil {
return nil, errors.Wrap(err, "could not compute eth1data votes merkleization")
}
fieldRoots[9] = eth1VotesRoot[:]
// Eth1DepositIndex root.
eth1DepositIndexBuf := make([]byte, 8)
binary.LittleEndian.PutUint64(eth1DepositIndexBuf, state.Eth1DepositIndex)
eth1DepositBuf := bytesutil.ToBytes32(eth1DepositIndexBuf)
fieldRoots[10] = eth1DepositBuf[:]
// Validators slice root.
validatorsRoot, err := h.validatorRegistryRoot(state.Validators)
if err != nil {
return nil, errors.Wrap(err, "could not compute validator registry merkleization")
}
fieldRoots[11] = validatorsRoot[:]
// Balances slice root.
balancesRoot, err := Uint64ListRootWithRegistryLimit(state.Balances)
if err != nil {
return nil, errors.Wrap(err, "could not compute validator balances merkleization")
}
fieldRoots[12] = balancesRoot[:]
// RandaoMixes array root.
randaoRootsRoot, err := h.arraysRoot(state.RandaoMixes, uint64(params.BeaconConfig().EpochsPerHistoricalVector), "RandaoMixes")
if err != nil {
return nil, errors.Wrap(err, "could not compute randao roots merkleization")
}
fieldRoots[13] = randaoRootsRoot[:]
// Slashings array root.
slashingsRootsRoot, err := ssz.SlashingsRoot(state.Slashings)
if err != nil {
return nil, errors.Wrap(err, "could not compute slashings merkleization")
}
fieldRoots[14] = slashingsRootsRoot[:]
// PreviousEpochAttestations slice root.
prevAttsRoot, err := h.epochAttestationsRoot(state.PreviousEpochAttestations)
if err != nil {
return nil, errors.Wrap(err, "could not compute previous epoch attestations merkleization")
}
fieldRoots[15] = prevAttsRoot[:]
// CurrentEpochAttestations slice root.
currAttsRoot, err := h.epochAttestationsRoot(state.CurrentEpochAttestations)
if err != nil {
return nil, errors.Wrap(err, "could not compute current epoch attestations merkleization")
}
fieldRoots[16] = currAttsRoot[:]
// JustificationBits root.
justifiedBitsRoot := bytesutil.ToBytes32(state.JustificationBits)
fieldRoots[17] = justifiedBitsRoot[:]
// PreviousJustifiedCheckpoint data structure root.
prevCheckRoot, err := ssz.CheckpointRoot(hasher, state.PreviousJustifiedCheckpoint)
if err != nil {
return nil, errors.Wrap(err, "could not compute previous justified checkpoint merkleization")
}
fieldRoots[18] = prevCheckRoot[:]
// CurrentJustifiedCheckpoint data structure root.
currJustRoot, err := ssz.CheckpointRoot(hasher, state.CurrentJustifiedCheckpoint)
if err != nil {
return nil, errors.Wrap(err, "could not compute current justified checkpoint merkleization")
}
fieldRoots[19] = currJustRoot[:]
// FinalizedCheckpoint data structure root.
finalRoot, err := ssz.CheckpointRoot(hasher, state.FinalizedCheckpoint)
if err != nil {
return nil, errors.Wrap(err, "could not compute finalized checkpoint merkleization")
}
fieldRoots[20] = finalRoot[:]
return fieldRoots, nil
}
// ComputeFieldRootsWithHasherAltair hashes the provided altair state and returns its respective field roots.
func (h *stateRootHasher) ComputeFieldRootsWithHasherAltair(ctx context.Context, state *ethpb.BeaconStateAltair) ([][]byte, error) {
_, span := trace.StartSpan(ctx, "hasher.ComputeFieldRootsWithHasherAltair")
defer span.End()
if state == nil {
return nil, errors.New("nil state")
}
hasher := hash.CustomSHA256Hasher()
fieldRoots := make([][]byte, params.BeaconConfig().BeaconStateAltairFieldCount)
// Genesis time root.
genesisRoot := ssz.Uint64Root(state.GenesisTime)
fieldRoots[0] = genesisRoot[:]
// Genesis validator root.
r := [32]byte{}
copy(r[:], state.GenesisValidatorsRoot)
fieldRoots[1] = r[:]
// Slot root.
slotRoot := ssz.Uint64Root(uint64(state.Slot))
fieldRoots[2] = slotRoot[:]
// Fork data structure root.
forkHashTreeRoot, err := ssz.ForkRoot(state.Fork)
if err != nil {
return nil, errors.Wrap(err, "could not compute fork merkleization")
}
fieldRoots[3] = forkHashTreeRoot[:]
// BeaconBlockHeader data structure root.
headerHashTreeRoot, err := BlockHeaderRoot(state.LatestBlockHeader)
if err != nil {
return nil, errors.Wrap(err, "could not compute block header merkleization")
}
fieldRoots[4] = headerHashTreeRoot[:]
// BlockRoots array root.
blockRootsRoot, err := h.arraysRoot(state.BlockRoots, uint64(params.BeaconConfig().SlotsPerHistoricalRoot), "BlockRoots")
if err != nil {
return nil, errors.Wrap(err, "could not compute block roots merkleization")
}
fieldRoots[5] = blockRootsRoot[:]
// StateRoots array root.
stateRootsRoot, err := h.arraysRoot(state.StateRoots, uint64(params.BeaconConfig().SlotsPerHistoricalRoot), "StateRoots")
if err != nil {
return nil, errors.Wrap(err, "could not compute state roots merkleization")
}
fieldRoots[6] = stateRootsRoot[:]
// HistoricalRoots slice root.
historicalRootsRt, err := ssz.ByteArrayRootWithLimit(state.HistoricalRoots, params.BeaconConfig().HistoricalRootsLimit)
if err != nil {
return nil, errors.Wrap(err, "could not compute historical roots merkleization")
}
fieldRoots[7] = historicalRootsRt[:]
// Eth1Data data structure root.
eth1HashTreeRoot, err := Eth1Root(hasher, state.Eth1Data)
if err != nil {
return nil, errors.Wrap(err, "could not compute eth1data merkleization")
}
fieldRoots[8] = eth1HashTreeRoot[:]
// Eth1DataVotes slice root.
eth1VotesRoot, err := eth1DataVotesRoot(state.Eth1DataVotes)
if err != nil {
return nil, errors.Wrap(err, "could not compute eth1data votes merkleization")
}
fieldRoots[9] = eth1VotesRoot[:]
// Eth1DepositIndex root.
eth1DepositIndexBuf := make([]byte, 8)
binary.LittleEndian.PutUint64(eth1DepositIndexBuf, state.Eth1DepositIndex)
eth1DepositBuf := bytesutil.ToBytes32(eth1DepositIndexBuf)
fieldRoots[10] = eth1DepositBuf[:]
// Validators slice root.
validatorsRoot, err := h.validatorRegistryRoot(state.Validators)
if err != nil {
return nil, errors.Wrap(err, "could not compute validator registry merkleization")
}
fieldRoots[11] = validatorsRoot[:]
// Balances slice root.
balancesRoot, err := Uint64ListRootWithRegistryLimit(state.Balances)
if err != nil {
return nil, errors.Wrap(err, "could not compute validator balances merkleization")
}
fieldRoots[12] = balancesRoot[:]
// RandaoMixes array root.
randaoRootsRoot, err := h.arraysRoot(state.RandaoMixes, uint64(params.BeaconConfig().EpochsPerHistoricalVector), "RandaoMixes")
if err != nil {
return nil, errors.Wrap(err, "could not compute randao roots merkleization")
}
fieldRoots[13] = randaoRootsRoot[:]
// Slashings array root.
slashingsRootsRoot, err := ssz.SlashingsRoot(state.Slashings)
if err != nil {
return nil, errors.Wrap(err, "could not compute slashings merkleization")
}
fieldRoots[14] = slashingsRootsRoot[:]
// PreviousEpochParticipation slice root.
prevParticipationRoot, err := ParticipationBitsRoot(state.PreviousEpochParticipation)
if err != nil {
return nil, errors.Wrap(err, "could not compute previous epoch participation merkleization")
}
fieldRoots[15] = prevParticipationRoot[:]
// CurrentEpochParticipation slice root.
currParticipationRoot, err := ParticipationBitsRoot(state.CurrentEpochParticipation)
if err != nil {
return nil, errors.Wrap(err, "could not compute current epoch participation merkleization")
}
fieldRoots[16] = currParticipationRoot[:]
// JustificationBits root.
justifiedBitsRoot := bytesutil.ToBytes32(state.JustificationBits)
fieldRoots[17] = justifiedBitsRoot[:]
// PreviousJustifiedCheckpoint data structure root.
prevCheckRoot, err := ssz.CheckpointRoot(hasher, state.PreviousJustifiedCheckpoint)
if err != nil {
return nil, errors.Wrap(err, "could not compute previous justified checkpoint merkleization")
}
fieldRoots[18] = prevCheckRoot[:]
// CurrentJustifiedCheckpoint data structure root.
currJustRoot, err := ssz.CheckpointRoot(hasher, state.CurrentJustifiedCheckpoint)
if err != nil {
return nil, errors.Wrap(err, "could not compute current justified checkpoint merkleization")
}
fieldRoots[19] = currJustRoot[:]
// FinalizedCheckpoint data structure root.
finalRoot, err := ssz.CheckpointRoot(hasher, state.FinalizedCheckpoint)
if err != nil {
return nil, errors.Wrap(err, "could not compute finalized checkpoint merkleization")
}
fieldRoots[20] = finalRoot[:]
// Inactivity scores root.
inactivityScoresRoot, err := Uint64ListRootWithRegistryLimit(state.InactivityScores)
if err != nil {
return nil, errors.Wrap(err, "could not compute inactivityScoreRoot")
}
fieldRoots[21] = inactivityScoresRoot[:]
// Current sync committee root.
currentSyncCommitteeRoot, err := SyncCommitteeRoot(state.CurrentSyncCommittee)
if err != nil {
return nil, errors.Wrap(err, "could not compute sync committee merkleization")
}
fieldRoots[22] = currentSyncCommitteeRoot[:]
// Next sync committee root.
nextSyncCommitteeRoot, err := SyncCommitteeRoot(state.NextSyncCommittee)
if err != nil {
return nil, errors.Wrap(err, "could not compute sync committee merkleization")
}
fieldRoots[23] = nextSyncCommitteeRoot[:]
return fieldRoots, nil
}
// ComputeFieldRootsWithHasherMerge hashes the provided merge state and returns its respective field roots.
func (h *stateRootHasher) ComputeFieldRootsWithHasherMerge(ctx context.Context, state *ethpb.BeaconStateMerge) ([][]byte, error) {
_, span := trace.StartSpan(ctx, "hasher.ComputeFieldRootsWithHasherMerge")
defer span.End()
if state == nil {
return nil, errors.New("nil state")
}
hasher := hash.CustomSHA256Hasher()
fieldRoots := make([][]byte, params.BeaconConfig().BeaconStateMergeFieldCount)
// Genesis time root.
genesisRoot := ssz.Uint64Root(state.GenesisTime)
fieldRoots[0] = genesisRoot[:]
// Genesis validator root.
r := [32]byte{}
copy(r[:], state.GenesisValidatorsRoot)
fieldRoots[1] = r[:]
// Slot root.
slotRoot := ssz.Uint64Root(uint64(state.Slot))
fieldRoots[2] = slotRoot[:]
// Fork data structure root.
forkHashTreeRoot, err := ssz.ForkRoot(state.Fork)
if err != nil {
return nil, errors.Wrap(err, "could not compute fork merkleization")
}
fieldRoots[3] = forkHashTreeRoot[:]
// BeaconBlockHeader data structure root.
headerHashTreeRoot, err := BlockHeaderRoot(state.LatestBlockHeader)
if err != nil {
return nil, errors.Wrap(err, "could not compute block header merkleization")
}
fieldRoots[4] = headerHashTreeRoot[:]
// BlockRoots array root.
blockRootsRoot, err := h.arraysRoot(state.BlockRoots, uint64(params.BeaconConfig().SlotsPerHistoricalRoot), "BlockRoots")
if err != nil {
return nil, errors.Wrap(err, "could not compute block roots merkleization")
}
fieldRoots[5] = blockRootsRoot[:]
// StateRoots array root.
stateRootsRoot, err := h.arraysRoot(state.StateRoots, uint64(params.BeaconConfig().SlotsPerHistoricalRoot), "StateRoots")
if err != nil {
return nil, errors.Wrap(err, "could not compute state roots merkleization")
}
fieldRoots[6] = stateRootsRoot[:]
// HistoricalRoots slice root.
historicalRootsRt, err := ssz.ByteArrayRootWithLimit(state.HistoricalRoots, params.BeaconConfig().HistoricalRootsLimit)
if err != nil {
return nil, errors.Wrap(err, "could not compute historical roots merkleization")
}
fieldRoots[7] = historicalRootsRt[:]
// Eth1Data data structure root.
eth1HashTreeRoot, err := Eth1Root(hasher, state.Eth1Data)
if err != nil {
return nil, errors.Wrap(err, "could not compute eth1data merkleization")
}
fieldRoots[8] = eth1HashTreeRoot[:]
// Eth1DataVotes slice root.
eth1VotesRoot, err := eth1DataVotesRoot(state.Eth1DataVotes)
if err != nil {
return nil, errors.Wrap(err, "could not compute eth1data votes merkleization")
}
fieldRoots[9] = eth1VotesRoot[:]
// Eth1DepositIndex root.
eth1DepositIndexBuf := make([]byte, 8)
binary.LittleEndian.PutUint64(eth1DepositIndexBuf, state.Eth1DepositIndex)
eth1DepositBuf := bytesutil.ToBytes32(eth1DepositIndexBuf)
fieldRoots[10] = eth1DepositBuf[:]
// Validators slice root.
validatorsRoot, err := h.validatorRegistryRoot(state.Validators)
if err != nil {
return nil, errors.Wrap(err, "could not compute validator registry merkleization")
}
fieldRoots[11] = validatorsRoot[:]
// Balances slice root.
balancesRoot, err := Uint64ListRootWithRegistryLimit(state.Balances)
if err != nil {
return nil, errors.Wrap(err, "could not compute validator balances merkleization")
}
fieldRoots[12] = balancesRoot[:]
// RandaoMixes array root.
randaoRootsRoot, err := h.arraysRoot(state.RandaoMixes, uint64(params.BeaconConfig().EpochsPerHistoricalVector), "RandaoMixes")
if err != nil {
return nil, errors.Wrap(err, "could not compute randao roots merkleization")
}
fieldRoots[13] = randaoRootsRoot[:]
// Slashings array root.
slashingsRootsRoot, err := ssz.SlashingsRoot(state.Slashings)
if err != nil {
return nil, errors.Wrap(err, "could not compute slashings merkleization")
}
fieldRoots[14] = slashingsRootsRoot[:]
// PreviousEpochParticipation slice root.
prevParticipationRoot, err := ParticipationBitsRoot(state.PreviousEpochParticipation)
if err != nil {
return nil, errors.Wrap(err, "could not compute previous epoch participation merkleization")
}
fieldRoots[15] = prevParticipationRoot[:]
// CurrentEpochParticipation slice root.
currParticipationRoot, err := ParticipationBitsRoot(state.CurrentEpochParticipation)
if err != nil {
return nil, errors.Wrap(err, "could not compute current epoch participation merkleization")
}
fieldRoots[16] = currParticipationRoot[:]
// JustificationBits root.
justifiedBitsRoot := bytesutil.ToBytes32(state.JustificationBits)
fieldRoots[17] = justifiedBitsRoot[:]
// PreviousJustifiedCheckpoint data structure root.
prevCheckRoot, err := ssz.CheckpointRoot(hasher, state.PreviousJustifiedCheckpoint)
if err != nil {
return nil, errors.Wrap(err, "could not compute previous justified checkpoint merkleization")
}
fieldRoots[18] = prevCheckRoot[:]
// CurrentJustifiedCheckpoint data structure root.
currJustRoot, err := ssz.CheckpointRoot(hasher, state.CurrentJustifiedCheckpoint)
if err != nil {
return nil, errors.Wrap(err, "could not compute current justified checkpoint merkleization")
}
fieldRoots[19] = currJustRoot[:]
// FinalizedCheckpoint data structure root.
finalRoot, err := ssz.CheckpointRoot(hasher, state.FinalizedCheckpoint)
if err != nil {
return nil, errors.Wrap(err, "could not compute finalized checkpoint merkleization")
}
fieldRoots[20] = finalRoot[:]
// Inactivity scores root.
inactivityScoresRoot, err := Uint64ListRootWithRegistryLimit(state.InactivityScores)
if err != nil {
return nil, errors.Wrap(err, "could not compute inactivityScoreRoot")
}
fieldRoots[21] = inactivityScoresRoot[:]
// Current sync committee root.
currentSyncCommitteeRoot, err := SyncCommitteeRoot(state.CurrentSyncCommittee)
if err != nil {
return nil, errors.Wrap(err, "could not compute sync committee merkleization")
}
fieldRoots[22] = currentSyncCommitteeRoot[:]
// Next sync committee root.
nextSyncCommitteeRoot, err := SyncCommitteeRoot(state.NextSyncCommittee)
if err != nil {
return nil, errors.Wrap(err, "could not compute sync committee merkleization")
}
fieldRoots[23] = nextSyncCommitteeRoot[:]
// Execution payload root.
executionPayloadRoot, err := state.LatestExecutionPayloadHeader.HashTreeRoot()
if err != nil {
return nil, err
}
fieldRoots[24] = executionPayloadRoot[:]
return fieldRoots, nil
}

View File

@@ -5,7 +5,6 @@ import (
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/crypto/hash"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
@@ -17,7 +16,7 @@ import (
func TestReturnTrieLayer_OK(t *testing.T) {
newState, _ := util.DeterministicGenesisState(t, 32)
root, err := v1.RootsArrayHashTreeRoot(newState.BlockRoots(), uint64(params.BeaconConfig().SlotsPerHistoricalRoot), "BlockRoots")
root, err := stateutil.RootsArrayHashTreeRoot(newState.BlockRoots(), uint64(params.BeaconConfig().SlotsPerHistoricalRoot), "BlockRoots")
require.NoError(t, err)
blockRts := newState.BlockRoots()
roots := make([][32]byte, 0, len(blockRts))
@@ -32,7 +31,7 @@ func TestReturnTrieLayer_OK(t *testing.T) {
func TestReturnTrieLayerVariable_OK(t *testing.T) {
newState, _ := util.DeterministicGenesisState(t, 32)
root, err := v1.ValidatorRegistryRoot(newState.Validators())
root, err := stateutil.ValidatorRegistryRoot(newState.Validators())
require.NoError(t, err)
hasher := hash.CustomSHA256Hasher()
validators := newState.Validators()
@@ -64,7 +63,7 @@ func TestRecomputeFromLayer_FixedSizedArray(t *testing.T) {
require.NoError(t, newState.UpdateBlockRootAtIndex(changedIdx[0], changedRoots[0]))
require.NoError(t, newState.UpdateBlockRootAtIndex(changedIdx[1], changedRoots[1]))
expectedRoot, err := v1.RootsArrayHashTreeRoot(newState.BlockRoots(), uint64(params.BeaconConfig().SlotsPerHistoricalRoot), "BlockRoots")
expectedRoot, err := stateutil.RootsArrayHashTreeRoot(newState.BlockRoots(), uint64(params.BeaconConfig().SlotsPerHistoricalRoot), "BlockRoots")
require.NoError(t, err)
root, _, err := stateutil.RecomputeFromLayer(changedRoots, changedIdx, layers)
require.NoError(t, err)
@@ -98,7 +97,7 @@ func TestRecomputeFromLayer_VariableSizedArray(t *testing.T) {
require.NoError(t, newState.UpdateValidatorAtIndex(types.ValidatorIndex(changedIdx[0]), changedVals[0]))
require.NoError(t, newState.UpdateValidatorAtIndex(types.ValidatorIndex(changedIdx[1]), changedVals[1]))
expectedRoot, err := v1.ValidatorRegistryRoot(newState.Validators())
expectedRoot, err := stateutil.ValidatorRegistryRoot(newState.Validators())
require.NoError(t, err)
roots = make([][32]byte, 0, len(changedVals))
for _, val := range changedVals {

View File

@@ -88,9 +88,9 @@ func Uint64ListRootWithRegistryLimit(balances []uint64) ([32]byte, error) {
return ssz.MixInLength(balancesRootsRoot, balancesLengthRoot), nil
}
// ValidatorEncKey returns the encoded key in bytes of input `validator`,
// validatorEncKey returns the encoded key in bytes of input `validator`,
// the returned key bytes can be used for caching purposes.
func ValidatorEncKey(validator *ethpb.Validator) []byte {
func validatorEncKey(validator *ethpb.Validator) []byte {
if validator == nil {
return nil
}

View File

@@ -56,7 +56,6 @@ go_library(
"//encoding/ssz:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"@com_github_dgraph_io_ristretto//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",

View File

@@ -2,203 +2,17 @@ package v1
import (
"context"
"encoding/binary"
"sync"
"github.com/dgraph-io/ristretto"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
"github.com/prysmaticlabs/prysm/config/features"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/crypto/hash"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/encoding/ssz"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"go.opencensus.io/trace"
)
var (
leavesCache = make(map[string][][32]byte, params.BeaconConfig().BeaconStateFieldCount)
layersCache = make(map[string][][][32]byte, params.BeaconConfig().BeaconStateFieldCount)
lock sync.RWMutex
)
const cacheSize = 100000
var nocachedHasher *stateRootHasher
var cachedHasher *stateRootHasher
func init() {
rootsCache, err := ristretto.NewCache(&ristretto.Config{
NumCounters: cacheSize, // number of keys to track frequency of (1M).
MaxCost: 1 << 22, // maximum cost of cache (3MB).
// 100,000 roots will take up approximately 3 MB in memory.
BufferItems: 64, // number of keys per Get buffer.
})
if err != nil {
panic(err)
}
// Temporarily disable roots cache until cache issues can be resolved.
cachedHasher = &stateRootHasher{rootsCache: rootsCache}
nocachedHasher = &stateRootHasher{}
}
type stateRootHasher struct {
rootsCache *ristretto.Cache
}
// computeFieldRoots returns the hash tree root computations of every field in
// the beacon state as a list of 32 byte roots.
func computeFieldRoots(ctx context.Context, state *ethpb.BeaconState) ([][]byte, error) {
if features.Get().EnableSSZCache {
return cachedHasher.computeFieldRootsWithHasher(ctx, state)
return stateutil.CachedHasher.ComputeFieldRootsWithHasherPhase0(ctx, state)
}
return nocachedHasher.computeFieldRootsWithHasher(ctx, state)
}
func (h *stateRootHasher) computeFieldRootsWithHasher(ctx context.Context, state *ethpb.BeaconState) ([][]byte, error) {
ctx, span := trace.StartSpan(ctx, "beaconState.computeFieldRootsWithHasher")
defer span.End()
if state == nil {
return nil, errors.New("nil state")
}
hasher := hash.CustomSHA256Hasher()
fieldRoots := make([][]byte, params.BeaconConfig().BeaconStateFieldCount)
// Genesis time root.
genesisRoot := ssz.Uint64Root(state.GenesisTime)
fieldRoots[0] = genesisRoot[:]
// Genesis validator root.
r := [32]byte{}
copy(r[:], state.GenesisValidatorsRoot)
fieldRoots[1] = r[:]
// Slot root.
slotRoot := ssz.Uint64Root(uint64(state.Slot))
fieldRoots[2] = slotRoot[:]
// Fork data structure root.
forkHashTreeRoot, err := ssz.ForkRoot(state.Fork)
if err != nil {
return nil, errors.Wrap(err, "could not compute fork merkleization")
}
fieldRoots[3] = forkHashTreeRoot[:]
// BeaconBlockHeader data structure root.
headerHashTreeRoot, err := stateutil.BlockHeaderRoot(state.LatestBlockHeader)
if err != nil {
return nil, errors.Wrap(err, "could not compute block header merkleization")
}
fieldRoots[4] = headerHashTreeRoot[:]
// BlockRoots array root.
blockRootsRoot, err := h.arraysRoot(state.BlockRoots, uint64(params.BeaconConfig().SlotsPerHistoricalRoot), "BlockRoots")
if err != nil {
return nil, errors.Wrap(err, "could not compute block roots merkleization")
}
fieldRoots[5] = blockRootsRoot[:]
// StateRoots array root.
stateRootsRoot, err := h.arraysRoot(state.StateRoots, uint64(params.BeaconConfig().SlotsPerHistoricalRoot), "StateRoots")
if err != nil {
return nil, errors.Wrap(err, "could not compute state roots merkleization")
}
fieldRoots[6] = stateRootsRoot[:]
// HistoricalRoots slice root.
historicalRootsRt, err := ssz.ByteArrayRootWithLimit(state.HistoricalRoots, params.BeaconConfig().HistoricalRootsLimit)
if err != nil {
return nil, errors.Wrap(err, "could not compute historical roots merkleization")
}
fieldRoots[7] = historicalRootsRt[:]
// Eth1Data data structure root.
eth1HashTreeRoot, err := eth1Root(hasher, state.Eth1Data)
if err != nil {
return nil, errors.Wrap(err, "could not compute eth1data merkleization")
}
fieldRoots[8] = eth1HashTreeRoot[:]
// Eth1DataVotes slice root.
eth1VotesRoot, err := eth1DataVotesRoot(state.Eth1DataVotes)
if err != nil {
return nil, errors.Wrap(err, "could not compute eth1data votes merkleization")
}
fieldRoots[9] = eth1VotesRoot[:]
// Eth1DepositIndex root.
eth1DepositIndexBuf := make([]byte, 8)
binary.LittleEndian.PutUint64(eth1DepositIndexBuf, state.Eth1DepositIndex)
eth1DepositBuf := bytesutil.ToBytes32(eth1DepositIndexBuf)
fieldRoots[10] = eth1DepositBuf[:]
// Validators slice root.
validatorsRoot, err := h.validatorRegistryRoot(state.Validators)
if err != nil {
return nil, errors.Wrap(err, "could not compute validator registry merkleization")
}
fieldRoots[11] = validatorsRoot[:]
// Balances slice root.
balancesRoot, err := stateutil.Uint64ListRootWithRegistryLimit(state.Balances)
if err != nil {
return nil, errors.Wrap(err, "could not compute validator balances merkleization")
}
fieldRoots[12] = balancesRoot[:]
// RandaoMixes array root.
randaoRootsRoot, err := h.arraysRoot(state.RandaoMixes, uint64(params.BeaconConfig().EpochsPerHistoricalVector), "RandaoMixes")
if err != nil {
return nil, errors.Wrap(err, "could not compute randao roots merkleization")
}
fieldRoots[13] = randaoRootsRoot[:]
// Slashings array root.
slashingsRootsRoot, err := ssz.SlashingsRoot(state.Slashings)
if err != nil {
return nil, errors.Wrap(err, "could not compute slashings merkleization")
}
fieldRoots[14] = slashingsRootsRoot[:]
// PreviousEpochAttestations slice root.
prevAttsRoot, err := h.epochAttestationsRoot(state.PreviousEpochAttestations)
if err != nil {
return nil, errors.Wrap(err, "could not compute previous epoch attestations merkleization")
}
fieldRoots[15] = prevAttsRoot[:]
// CurrentEpochAttestations slice root.
currAttsRoot, err := h.epochAttestationsRoot(state.CurrentEpochAttestations)
if err != nil {
return nil, errors.Wrap(err, "could not compute current epoch attestations merkleization")
}
fieldRoots[16] = currAttsRoot[:]
// JustificationBits root.
justifiedBitsRoot := bytesutil.ToBytes32(state.JustificationBits)
fieldRoots[17] = justifiedBitsRoot[:]
// PreviousJustifiedCheckpoint data structure root.
prevCheckRoot, err := ssz.CheckpointRoot(hasher, state.PreviousJustifiedCheckpoint)
if err != nil {
return nil, errors.Wrap(err, "could not compute previous justified checkpoint merkleization")
}
fieldRoots[18] = prevCheckRoot[:]
// CurrentJustifiedCheckpoint data structure root.
currJustRoot, err := ssz.CheckpointRoot(hasher, state.CurrentJustifiedCheckpoint)
if err != nil {
return nil, errors.Wrap(err, "could not compute current justified checkpoint merkleization")
}
fieldRoots[19] = currJustRoot[:]
// FinalizedCheckpoint data structure root.
finalRoot, err := ssz.CheckpointRoot(hasher, state.FinalizedCheckpoint)
if err != nil {
return nil, errors.Wrap(err, "could not compute finalized checkpoint merkleization")
}
fieldRoots[20] = finalRoot[:]
return fieldRoots, nil
return stateutil.NocachedHasher.ComputeFieldRootsWithHasherPhase0(ctx, state)
}

View File

@@ -1,15 +1,6 @@
package v1
import (
"bytes"
"encoding/binary"
"fmt"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/crypto/hash"
"github.com/prysmaticlabs/prysm/encoding/ssz"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
)
@@ -62,63 +53,3 @@ func (b *BeaconState) currentEpochAttestations() []*ethpb.PendingAttestation {
return ethpb.CopyPendingAttestationSlice(b.state.CurrentEpochAttestations)
}
func (h *stateRootHasher) epochAttestationsRoot(atts []*ethpb.PendingAttestation) ([32]byte, error) {
max := uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().MaxAttestations
if uint64(len(atts)) > max {
return [32]byte{}, fmt.Errorf("epoch attestation exceeds max length %d", max)
}
hasher := hash.CustomSHA256Hasher()
roots := make([][]byte, len(atts))
for i := 0; i < len(atts); i++ {
pendingRoot, err := h.pendingAttestationRoot(hasher, atts[i])
if err != nil {
return [32]byte{}, errors.Wrap(err, "could not attestation merkleization")
}
roots[i] = pendingRoot[:]
}
attsRootsRoot, err := ssz.BitwiseMerkleize(
hasher,
roots,
uint64(len(roots)),
uint64(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().MaxAttestations)),
)
if err != nil {
return [32]byte{}, errors.Wrap(err, "could not compute epoch attestations merkleization")
}
attsLenBuf := new(bytes.Buffer)
if err := binary.Write(attsLenBuf, binary.LittleEndian, uint64(len(atts))); err != nil {
return [32]byte{}, errors.Wrap(err, "could not marshal epoch attestations length")
}
// We need to mix in the length of the slice.
attsLenRoot := make([]byte, 32)
copy(attsLenRoot, attsLenBuf.Bytes())
res := ssz.MixInLength(attsRootsRoot, attsLenRoot)
return res, nil
}
func (h *stateRootHasher) pendingAttestationRoot(hasher ssz.HashFn, att *ethpb.PendingAttestation) ([32]byte, error) {
if att == nil {
return [32]byte{}, errors.New("nil pending attestation")
}
// Marshal attestation to determine if it exists in the cache.
enc := stateutil.PendingAttEncKey(att)
// Check if it exists in cache:
if h.rootsCache != nil {
if found, ok := h.rootsCache.Get(string(enc)); found != nil && ok {
return found.([32]byte), nil
}
}
res, err := stateutil.PendingAttRootWithHasher(hasher, att)
if err != nil {
return [32]byte{}, err
}
if h.rootsCache != nil {
h.rootsCache.Set(string(enc), res, 32)
}
return res, nil
}

View File

@@ -1,10 +1,6 @@
package v1
import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
"github.com/prysmaticlabs/prysm/config/features"
"github.com/prysmaticlabs/prysm/encoding/ssz"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
)
@@ -93,53 +89,3 @@ func (b *BeaconState) eth1DepositIndex() uint64 {
return b.state.Eth1DepositIndex
}
// eth1Root computes the HashTreeRoot Merkleization of
// a BeaconBlockHeader struct according to the Ethereum
// Simple Serialize specification.
func eth1Root(hasher ssz.HashFn, eth1Data *ethpb.Eth1Data) ([32]byte, error) {
if eth1Data == nil {
return [32]byte{}, errors.New("nil eth1 data")
}
enc := stateutil.Eth1DataEncKey(eth1Data)
if features.Get().EnableSSZCache {
if found, ok := cachedHasher.rootsCache.Get(string(enc)); ok && found != nil {
return found.([32]byte), nil
}
}
root, err := stateutil.Eth1DataRootWithHasher(hasher, eth1Data)
if err != nil {
return [32]byte{}, err
}
if features.Get().EnableSSZCache {
cachedHasher.rootsCache.Set(string(enc), root, 32)
}
return root, nil
}
// eth1DataVotesRoot computes the HashTreeRoot Merkleization of
// a list of Eth1Data structs according to the Ethereum
// Simple Serialize specification.
func eth1DataVotesRoot(eth1DataVotes []*ethpb.Eth1Data) ([32]byte, error) {
hashKey, err := stateutil.Eth1DatasEncKey(eth1DataVotes)
if err != nil {
return [32]byte{}, err
}
if features.Get().EnableSSZCache {
if found, ok := cachedHasher.rootsCache.Get(string(hashKey[:])); ok && found != nil {
return found.([32]byte), nil
}
}
root, err := stateutil.Eth1DatasRoot(eth1DataVotes)
if err != nil {
return [32]byte{}, err
}
if features.Get().EnableSSZCache {
cachedHasher.rootsCache.Set(string(hashKey[:]), root, 32)
}
return root, nil
}

View File

@@ -1,14 +1,9 @@
package v1
import (
"github.com/pkg/errors"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
"github.com/prysmaticlabs/prysm/config/features"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/crypto/hash"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/encoding/ssz"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/runtime/version"
)
@@ -166,150 +161,3 @@ func (b *BeaconState) balancesLength() int {
return len(b.state.Balances)
}
// RootsArrayHashTreeRoot computes the Merkle root of arrays of 32-byte hashes, such as [64][32]byte
// according to the Simple Serialize specification of Ethereum.
func RootsArrayHashTreeRoot(vals [][]byte, length uint64, fieldName string) ([32]byte, error) {
if features.Get().EnableSSZCache {
return cachedHasher.arraysRoot(vals, length, fieldName)
}
return nocachedHasher.arraysRoot(vals, length, fieldName)
}
func (h *stateRootHasher) arraysRoot(input [][]byte, length uint64, fieldName string) ([32]byte, error) {
lock.Lock()
defer lock.Unlock()
hashFunc := hash.CustomSHA256Hasher()
if _, ok := layersCache[fieldName]; !ok && h.rootsCache != nil {
depth := ssz.Depth(length)
layersCache[fieldName] = make([][][32]byte, depth+1)
}
leaves := make([][32]byte, length)
for i, chunk := range input {
copy(leaves[i][:], chunk)
}
bytesProcessed := 0
changedIndices := make([]int, 0)
prevLeaves, ok := leavesCache[fieldName]
if len(prevLeaves) == 0 || h.rootsCache == nil {
prevLeaves = leaves
}
for i := 0; i < len(leaves); i++ {
// We check if any items changed since the roots were last recomputed.
notEqual := leaves[i] != prevLeaves[i]
if ok && h.rootsCache != nil && notEqual {
changedIndices = append(changedIndices, i)
}
bytesProcessed += 32
}
if len(changedIndices) > 0 && h.rootsCache != nil {
var rt [32]byte
var err error
// If indices did change since last computation, we only recompute
// the modified branches in the cached Merkle tree for this state field.
chunks := leaves
// We need to ensure we recompute indices of the Merkle tree which
// changed in-between calls to this function. This check adds an offset
// to the recomputed indices to ensure we do so evenly.
maxChangedIndex := changedIndices[len(changedIndices)-1]
if maxChangedIndex+2 == len(chunks) && maxChangedIndex%2 != 0 {
changedIndices = append(changedIndices, maxChangedIndex+1)
}
for i := 0; i < len(changedIndices); i++ {
rt, err = recomputeRoot(changedIndices[i], chunks, fieldName, hashFunc)
if err != nil {
return [32]byte{}, err
}
}
leavesCache[fieldName] = chunks
return rt, nil
}
res, err := h.merkleizeWithCache(leaves, length, fieldName, hashFunc)
if err != nil {
return [32]byte{}, err
}
if h.rootsCache != nil {
leavesCache[fieldName] = leaves
}
return res, nil
}
func recomputeRoot(idx int, chunks [][32]byte, fieldName string, hasher func([]byte) [32]byte) ([32]byte, error) {
items, ok := layersCache[fieldName]
if !ok {
return [32]byte{}, errors.New("could not recompute root as there was no cache found")
}
if items == nil {
return [32]byte{}, errors.New("could not recompute root as there were no items found in the layers cache")
}
layers := items
root := chunks[idx]
layers[0] = chunks
// The merkle tree structure looks as follows:
// [[r1, r2, r3, r4], [parent1, parent2], [root]]
// Using information about the index which changed, idx, we recompute
// only its branch up the tree.
currentIndex := idx
for i := 0; i < len(layers)-1; i++ {
isLeft := currentIndex%2 == 0
neighborIdx := currentIndex ^ 1
neighbor := [32]byte{}
if layers[i] != nil && len(layers[i]) != 0 && neighborIdx < len(layers[i]) {
neighbor = layers[i][neighborIdx]
}
if isLeft {
parentHash := hasher(append(root[:], neighbor[:]...))
root = parentHash
} else {
parentHash := hasher(append(neighbor[:], root[:]...))
root = parentHash
}
parentIdx := currentIndex / 2
// Update the cached layers at the parent index.
if len(layers[i+1]) == 0 {
layers[i+1] = append(layers[i+1], root)
} else {
layers[i+1][parentIdx] = root
}
currentIndex = parentIdx
}
layersCache[fieldName] = layers
// If there is only a single leaf, we return it (the identity element).
if len(layers[0]) == 1 {
return layers[0][0], nil
}
return root, nil
}
func (h *stateRootHasher) merkleizeWithCache(leaves [][32]byte, length uint64,
fieldName string, hasher func([]byte) [32]byte) ([32]byte, error) {
if len(leaves) == 0 {
return [32]byte{}, errors.New("zero leaves provided")
}
if len(leaves) == 1 {
return leaves[0], nil
}
hashLayer := leaves
layers := make([][][32]byte, ssz.Depth(length)+1)
if items, ok := layersCache[fieldName]; ok && h.rootsCache != nil {
if len(items[0]) == len(leaves) {
layers = items
}
}
layers[0] = hashLayer
var err error
layers, hashLayer, err = stateutil.MerkleizeTrieLeaves(layers, hashLayer, hasher)
if err != nil {
return [32]byte{}, err
}
root := hashLayer[0]
if h.rootsCache != nil {
layersCache[fieldName] = layers
}
return root, nil
}

View File

@@ -1,19 +1,12 @@
package v1
import (
"bytes"
"encoding/binary"
"fmt"
"github.com/pkg/errors"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
"github.com/prysmaticlabs/prysm/config/features"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/crypto/hash"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/encoding/ssz"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
)
@@ -302,78 +295,3 @@ func (b *BeaconState) slashings() []uint64 {
copy(res, b.state.Slashings)
return res
}
func (h *stateRootHasher) validatorRegistryRoot(validators []*ethpb.Validator) ([32]byte, error) {
hashKeyElements := make([]byte, len(validators)*32)
roots := make([][32]byte, len(validators))
emptyKey := hash.FastSum256(hashKeyElements)
hasher := hash.CustomSHA256Hasher()
bytesProcessed := 0
for i := 0; i < len(validators); i++ {
val, err := h.validatorRoot(hasher, validators[i])
if err != nil {
return [32]byte{}, errors.Wrap(err, "could not compute validators merkleization")
}
copy(hashKeyElements[bytesProcessed:bytesProcessed+32], val[:])
roots[i] = val
bytesProcessed += 32
}
hashKey := hash.FastSum256(hashKeyElements)
if hashKey != emptyKey && h.rootsCache != nil {
if found, ok := h.rootsCache.Get(string(hashKey[:])); found != nil && ok {
return found.([32]byte), nil
}
}
validatorsRootsRoot, err := ssz.BitwiseMerkleizeArrays(hasher, roots, uint64(len(roots)), params.BeaconConfig().ValidatorRegistryLimit)
if err != nil {
return [32]byte{}, errors.Wrap(err, "could not compute validator registry merkleization")
}
validatorsRootsBuf := new(bytes.Buffer)
if err := binary.Write(validatorsRootsBuf, binary.LittleEndian, uint64(len(validators))); err != nil {
return [32]byte{}, errors.Wrap(err, "could not marshal validator registry length")
}
// We need to mix in the length of the slice.
var validatorsRootsBufRoot [32]byte
copy(validatorsRootsBufRoot[:], validatorsRootsBuf.Bytes())
res := ssz.MixInLength(validatorsRootsRoot, validatorsRootsBufRoot[:])
if hashKey != emptyKey && h.rootsCache != nil {
h.rootsCache.Set(string(hashKey[:]), res, 32)
}
return res, nil
}
func (h *stateRootHasher) validatorRoot(hasher ssz.HashFn, validator *ethpb.Validator) ([32]byte, error) {
if validator == nil {
return [32]byte{}, errors.New("nil validator")
}
enc := stateutil.ValidatorEncKey(validator)
// Check if it exists in cache:
if h.rootsCache != nil {
if found, ok := h.rootsCache.Get(string(enc)); found != nil && ok {
return found.([32]byte), nil
}
}
valRoot, err := stateutil.ValidatorRootWithHasher(hasher, validator)
if err != nil {
return [32]byte{}, err
}
if h.rootsCache != nil {
h.rootsCache.Set(string(enc), valRoot, 32)
}
return valRoot, nil
}
// ValidatorRegistryRoot computes the HashTreeRoot Merkleization of
// a list of validator structs according to the Ethereum
// Simple Serialize specification.
func ValidatorRegistryRoot(vals []*ethpb.Validator) ([32]byte, error) {
if features.Get().EnableSSZCache {
return cachedHasher.validatorRegistryRoot(vals)
}
return nocachedHasher.validatorRegistryRoot(vals)
}

View File

@@ -18,19 +18,3 @@ func TestBeaconState_ValidatorAtIndexReadOnly_HandlesNilSlice(t *testing.T) {
_, err = st.ValidatorAtIndexReadOnly(0)
assert.Equal(t, v1.ErrNilValidatorsInState, err)
}
func TestArraysTreeRoot_OnlyPowerOf2(t *testing.T) {
_, err := v1.RootsArrayHashTreeRoot([][]byte{}, 1, "testing")
assert.NoError(t, err)
_, err = v1.RootsArrayHashTreeRoot([][]byte{}, 4, "testing")
assert.NoError(t, err)
_, err = v1.RootsArrayHashTreeRoot([][]byte{}, 8, "testing")
assert.NoError(t, err)
_, err = v1.RootsArrayHashTreeRoot([][]byte{}, 10, "testing")
assert.ErrorContains(t, "hash layer is a non power of 2", err)
}
func TestArraysTreeRoot_ZeroLength(t *testing.T) {
_, err := v1.RootsArrayHashTreeRoot([][]byte{}, 0, "testing")
assert.ErrorContains(t, "zero leaves provided", err)
}

View File

@@ -294,7 +294,7 @@ func (b *BeaconState) rootSelector(ctx context.Context, field types.FieldIndex)
case historicalRoots:
return ssz.ByteArrayRootWithLimit(b.state.HistoricalRoots, params.BeaconConfig().HistoricalRootsLimit)
case eth1Data:
return eth1Root(hasher, b.state.Eth1Data)
return stateutil.Eth1Root(hasher, b.state.Eth1Data)
case eth1DataVotes:
if b.rebuildTrie[field] {
err := b.resetFieldTrie(

View File

@@ -6,17 +6,17 @@ import (
)
// AppendCurrentParticipationBits is not supported for phase 0 beacon state.
func (b *BeaconState) AppendCurrentParticipationBits(val byte) error {
func (*BeaconState) AppendCurrentParticipationBits(_ byte) error {
return errors.New("AppendCurrentParticipationBits is not supported for phase 0 beacon state")
}
// AppendPreviousParticipationBits is not supported for phase 0 beacon state.
func (b *BeaconState) AppendPreviousParticipationBits(val byte) error {
func (*BeaconState) AppendPreviousParticipationBits(_ byte) error {
return errors.New("AppendPreviousParticipationBits is not supported for phase 0 beacon state")
}
// AppendInactivityScore is not supported for phase 0 beacon state.
func (b *BeaconState) AppendInactivityScore(s uint64) error {
func (*BeaconState) AppendInactivityScore(_ uint64) error {
return errors.New("AppendInactivityScore is not supported for phase 0 beacon state")
}

View File

@@ -5,9 +5,6 @@ go_library(
srcs = [
"deprecated_getters.go",
"deprecated_setters.go",
"field_root_eth1.go",
"field_root_validator.go",
"field_root_vector.go",
"field_roots.go",
"getters_block.go",
"getters_checkpoint.go",
@@ -51,7 +48,6 @@ go_library(
"//encoding/ssz:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"@com_github_dgraph_io_ristretto//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
@@ -67,7 +63,6 @@ go_test(
srcs = [
"deprecated_getters_test.go",
"deprecated_setters_test.go",
"field_root_test.go",
"getters_block_test.go",
"getters_test.go",
"getters_validator_test.go",

View File

@@ -6,17 +6,17 @@ import (
)
// SetPreviousEpochAttestations is not supported for HF1 beacon state.
func (b *BeaconState) SetPreviousEpochAttestations(val []*ethpb.PendingAttestation) error {
func (b *BeaconState) SetPreviousEpochAttestations(_ []*ethpb.PendingAttestation) error {
return errors.New("SetPreviousEpochAttestations is not supported for hard fork 1 beacon state")
}
// SetCurrentEpochAttestations is not supported for HF1 beacon state.
func (b *BeaconState) SetCurrentEpochAttestations(val []*ethpb.PendingAttestation) error {
func (b *BeaconState) SetCurrentEpochAttestations(_ []*ethpb.PendingAttestation) error {
return errors.New("SetCurrentEpochAttestations is not supported for hard fork 1 beacon state")
}
// AppendCurrentEpochAttestations is not supported for HF1 beacon state.
func (b *BeaconState) AppendCurrentEpochAttestations(val *ethpb.PendingAttestation) error {
func (b *BeaconState) AppendCurrentEpochAttestations(_ *ethpb.PendingAttestation) error {
return errors.New("AppendCurrentEpochAttestations is not supported for hard fork 1 beacon state")
}

View File

@@ -1,23 +0,0 @@
package v2
import (
"testing"
"github.com/prysmaticlabs/prysm/testing/assert"
)
func TestArraysTreeRoot_OnlyPowerOf2(t *testing.T) {
_, err := nocachedHasher.arraysRoot([][]byte{}, 1, "testing")
assert.NoError(t, err)
_, err = nocachedHasher.arraysRoot([][]byte{}, 4, "testing")
assert.NoError(t, err)
_, err = nocachedHasher.arraysRoot([][]byte{}, 8, "testing")
assert.NoError(t, err)
_, err = nocachedHasher.arraysRoot([][]byte{}, 10, "testing")
assert.ErrorContains(t, "hash layer is a non power of 2", err)
}
func TestArraysTreeRoot_ZeroLength(t *testing.T) {
_, err := nocachedHasher.arraysRoot([][]byte{}, 0, "testing")
assert.ErrorContains(t, "zero leaves provided", err)
}

View File

@@ -1,78 +0,0 @@
package v2
import (
"bytes"
"encoding/binary"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/crypto/hash"
"github.com/prysmaticlabs/prysm/encoding/ssz"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
)
func (h *stateRootHasher) validatorRegistryRoot(validators []*ethpb.Validator) ([32]byte, error) {
hashKeyElements := make([]byte, len(validators)*32)
roots := make([][32]byte, len(validators))
emptyKey := hash.FastSum256(hashKeyElements)
hasher := hash.CustomSHA256Hasher()
bytesProcessed := 0
for i := 0; i < len(validators); i++ {
val, err := h.validatorRoot(hasher, validators[i])
if err != nil {
return [32]byte{}, errors.Wrap(err, "could not compute validators merkleization")
}
copy(hashKeyElements[bytesProcessed:bytesProcessed+32], val[:])
roots[i] = val
bytesProcessed += 32
}
hashKey := hash.FastSum256(hashKeyElements)
if hashKey != emptyKey && h.rootsCache != nil {
if found, ok := h.rootsCache.Get(string(hashKey[:])); found != nil && ok {
return found.([32]byte), nil
}
}
validatorsRootsRoot, err := ssz.BitwiseMerkleizeArrays(hasher, roots, uint64(len(roots)), params.BeaconConfig().ValidatorRegistryLimit)
if err != nil {
return [32]byte{}, errors.Wrap(err, "could not compute validator registry merkleization")
}
validatorsRootsBuf := new(bytes.Buffer)
if err := binary.Write(validatorsRootsBuf, binary.LittleEndian, uint64(len(validators))); err != nil {
return [32]byte{}, errors.Wrap(err, "could not marshal validator registry length")
}
// We need to mix in the length of the slice.
var validatorsRootsBufRoot [32]byte
copy(validatorsRootsBufRoot[:], validatorsRootsBuf.Bytes())
res := ssz.MixInLength(validatorsRootsRoot, validatorsRootsBufRoot[:])
if hashKey != emptyKey && h.rootsCache != nil {
h.rootsCache.Set(string(hashKey[:]), res, 32)
}
return res, nil
}
func (h *stateRootHasher) validatorRoot(hasher ssz.HashFn, validator *ethpb.Validator) ([32]byte, error) {
if validator == nil {
return [32]byte{}, errors.New("nil validator")
}
enc := stateutil.ValidatorEncKey(validator)
// Check if it exists in cache:
if h.rootsCache != nil {
if found, ok := h.rootsCache.Get(string(enc)); found != nil && ok {
return found.([32]byte), nil
}
}
valRoot, err := stateutil.ValidatorRootWithHasher(hasher, validator)
if err != nil {
return [32]byte{}, err
}
if h.rootsCache != nil {
h.rootsCache.Set(string(enc), valRoot, 32)
}
return valRoot, nil
}

View File

@@ -1,146 +0,0 @@
package v2
import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
"github.com/prysmaticlabs/prysm/crypto/hash"
"github.com/prysmaticlabs/prysm/encoding/ssz"
)
func (h *stateRootHasher) arraysRoot(input [][]byte, length uint64, fieldName string) ([32]byte, error) {
lock.Lock()
defer lock.Unlock()
hashFunc := hash.CustomSHA256Hasher()
if _, ok := layersCache[fieldName]; !ok && h.rootsCache != nil {
depth := ssz.Depth(length)
layersCache[fieldName] = make([][][32]byte, depth+1)
}
leaves := make([][32]byte, length)
for i, chunk := range input {
copy(leaves[i][:], chunk)
}
bytesProcessed := 0
changedIndices := make([]int, 0)
prevLeaves, ok := leavesCache[fieldName]
if len(prevLeaves) == 0 || h.rootsCache == nil {
prevLeaves = leaves
}
for i := 0; i < len(leaves); i++ {
// We check if any items changed since the roots were last recomputed.
notEqual := leaves[i] != prevLeaves[i]
if ok && h.rootsCache != nil && notEqual {
changedIndices = append(changedIndices, i)
}
bytesProcessed += 32
}
if len(changedIndices) > 0 && h.rootsCache != nil {
var rt [32]byte
var err error
// If indices did change since last computation, we only recompute
// the modified branches in the cached Merkle tree for this state field.
chunks := leaves
// We need to ensure we recompute indices of the Merkle tree which
// changed in-between calls to this function. This check adds an offset
// to the recomputed indices to ensure we do so evenly.
maxChangedIndex := changedIndices[len(changedIndices)-1]
if maxChangedIndex+2 == len(chunks) && maxChangedIndex%2 != 0 {
changedIndices = append(changedIndices, maxChangedIndex+1)
}
for i := 0; i < len(changedIndices); i++ {
rt, err = recomputeRoot(changedIndices[i], chunks, fieldName, hashFunc)
if err != nil {
return [32]byte{}, err
}
}
leavesCache[fieldName] = chunks
return rt, nil
}
res, err := h.merkleizeWithCache(leaves, length, fieldName, hashFunc)
if err != nil {
return [32]byte{}, err
}
if h.rootsCache != nil {
leavesCache[fieldName] = leaves
}
return res, nil
}
func recomputeRoot(idx int, chunks [][32]byte, fieldName string, hasher func([]byte) [32]byte) ([32]byte, error) {
items, ok := layersCache[fieldName]
if !ok {
return [32]byte{}, errors.New("could not recompute root as there was no cache found")
}
if items == nil {
return [32]byte{}, errors.New("could not recompute root as there were no items found in the layers cache")
}
layers := items
root := chunks[idx]
layers[0] = chunks
// The merkle tree structure looks as follows:
// [[r1, r2, r3, r4], [parent1, parent2], [root]]
// Using information about the index which changed, idx, we recompute
// only its branch up the tree.
currentIndex := idx
for i := 0; i < len(layers)-1; i++ {
isLeft := currentIndex%2 == 0
neighborIdx := currentIndex ^ 1
neighbor := [32]byte{}
if layers[i] != nil && len(layers[i]) != 0 && neighborIdx < len(layers[i]) {
neighbor = layers[i][neighborIdx]
}
if isLeft {
parentHash := hasher(append(root[:], neighbor[:]...))
root = parentHash
} else {
parentHash := hasher(append(neighbor[:], root[:]...))
root = parentHash
}
parentIdx := currentIndex / 2
// Update the cached layers at the parent index.
if len(layers[i+1]) == 0 {
layers[i+1] = append(layers[i+1], root)
} else {
layers[i+1][parentIdx] = root
}
currentIndex = parentIdx
}
layersCache[fieldName] = layers
// If there is only a single leaf, we return it (the identity element).
if len(layers[0]) == 1 {
return layers[0][0], nil
}
return root, nil
}
func (h *stateRootHasher) merkleizeWithCache(leaves [][32]byte, length uint64,
fieldName string, hasher func([]byte) [32]byte) ([32]byte, error) {
if len(leaves) == 0 {
return [32]byte{}, errors.New("zero leaves provided")
}
if len(leaves) == 1 {
return leaves[0], nil
}
hashLayer := leaves
layers := make([][][32]byte, ssz.Depth(length)+1)
if items, ok := layersCache[fieldName]; ok && h.rootsCache != nil {
if len(items[0]) == len(leaves) {
layers = items
}
}
layers[0] = hashLayer
var err error
layers, hashLayer, err = stateutil.MerkleizeTrieLeaves(layers, hashLayer, hasher)
if err != nil {
return [32]byte{}, err
}
root := hashLayer[0]
if h.rootsCache != nil {
layersCache[fieldName] = layers
}
return root, nil
}

View File

@@ -2,225 +2,17 @@ package v2
import (
"context"
"encoding/binary"
"sync"
"github.com/dgraph-io/ristretto"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
"github.com/prysmaticlabs/prysm/config/features"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/crypto/hash"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/encoding/ssz"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"go.opencensus.io/trace"
)
var (
leavesCache = make(map[string][][32]byte, params.BeaconConfig().BeaconStateAltairFieldCount)
layersCache = make(map[string][][][32]byte, params.BeaconConfig().BeaconStateAltairFieldCount)
lock sync.RWMutex
)
const cacheSize = 100000
var nocachedHasher *stateRootHasher
var cachedHasher *stateRootHasher
func init() {
rootsCache, err := ristretto.NewCache(&ristretto.Config{
NumCounters: cacheSize, // number of keys to track frequency of (1M).
MaxCost: 1 << 22, // maximum cost of cache (3MB).
// 100,000 roots will take up approximately 3 MB in memory.
BufferItems: 64, // number of keys per Get buffer.
})
if err != nil {
panic(err)
}
// Temporarily disable roots cache until cache issues can be resolved.
cachedHasher = &stateRootHasher{rootsCache: rootsCache}
nocachedHasher = &stateRootHasher{}
}
type stateRootHasher struct {
rootsCache *ristretto.Cache
}
// computeFieldRoots returns the hash tree root computations of every field in
// the beacon state as a list of 32 byte roots.
func computeFieldRoots(ctx context.Context, state *ethpb.BeaconStateAltair) ([][]byte, error) {
if features.Get().EnableSSZCache {
return cachedHasher.computeFieldRootsWithHasher(ctx, state)
return stateutil.CachedHasher.ComputeFieldRootsWithHasherAltair(ctx, state)
}
return nocachedHasher.computeFieldRootsWithHasher(ctx, state)
}
func (h *stateRootHasher) computeFieldRootsWithHasher(ctx context.Context, state *ethpb.BeaconStateAltair) ([][]byte, error) {
ctx, span := trace.StartSpan(ctx, "beaconState.computeFieldRootsWithHasher")
defer span.End()
if state == nil {
return nil, errors.New("nil state")
}
hasher := hash.CustomSHA256Hasher()
fieldRoots := make([][]byte, params.BeaconConfig().BeaconStateAltairFieldCount)
// Genesis time root.
genesisRoot := ssz.Uint64Root(state.GenesisTime)
fieldRoots[0] = genesisRoot[:]
// Genesis validator root.
r := [32]byte{}
copy(r[:], state.GenesisValidatorsRoot)
fieldRoots[1] = r[:]
// Slot root.
slotRoot := ssz.Uint64Root(uint64(state.Slot))
fieldRoots[2] = slotRoot[:]
// Fork data structure root.
forkHashTreeRoot, err := ssz.ForkRoot(state.Fork)
if err != nil {
return nil, errors.Wrap(err, "could not compute fork merkleization")
}
fieldRoots[3] = forkHashTreeRoot[:]
// BeaconBlockHeader data structure root.
headerHashTreeRoot, err := stateutil.BlockHeaderRoot(state.LatestBlockHeader)
if err != nil {
return nil, errors.Wrap(err, "could not compute block header merkleization")
}
fieldRoots[4] = headerHashTreeRoot[:]
// BlockRoots array root.
blockRootsRoot, err := h.arraysRoot(state.BlockRoots, uint64(params.BeaconConfig().SlotsPerHistoricalRoot), "BlockRoots")
if err != nil {
return nil, errors.Wrap(err, "could not compute block roots merkleization")
}
fieldRoots[5] = blockRootsRoot[:]
// StateRoots array root.
stateRootsRoot, err := h.arraysRoot(state.StateRoots, uint64(params.BeaconConfig().SlotsPerHistoricalRoot), "StateRoots")
if err != nil {
return nil, errors.Wrap(err, "could not compute state roots merkleization")
}
fieldRoots[6] = stateRootsRoot[:]
// HistoricalRoots slice root.
historicalRootsRt, err := ssz.ByteArrayRootWithLimit(state.HistoricalRoots, params.BeaconConfig().HistoricalRootsLimit)
if err != nil {
return nil, errors.Wrap(err, "could not compute historical roots merkleization")
}
fieldRoots[7] = historicalRootsRt[:]
// Eth1Data data structure root.
eth1HashTreeRoot, err := eth1Root(hasher, state.Eth1Data)
if err != nil {
return nil, errors.Wrap(err, "could not compute eth1data merkleization")
}
fieldRoots[8] = eth1HashTreeRoot[:]
// Eth1DataVotes slice root.
eth1VotesRoot, err := eth1DataVotesRoot(state.Eth1DataVotes)
if err != nil {
return nil, errors.Wrap(err, "could not compute eth1data votes merkleization")
}
fieldRoots[9] = eth1VotesRoot[:]
// Eth1DepositIndex root.
eth1DepositIndexBuf := make([]byte, 8)
binary.LittleEndian.PutUint64(eth1DepositIndexBuf, state.Eth1DepositIndex)
eth1DepositBuf := bytesutil.ToBytes32(eth1DepositIndexBuf)
fieldRoots[10] = eth1DepositBuf[:]
// Validators slice root.
validatorsRoot, err := h.validatorRegistryRoot(state.Validators)
if err != nil {
return nil, errors.Wrap(err, "could not compute validator registry merkleization")
}
fieldRoots[11] = validatorsRoot[:]
// Balances slice root.
balancesRoot, err := stateutil.Uint64ListRootWithRegistryLimit(state.Balances)
if err != nil {
return nil, errors.Wrap(err, "could not compute validator balances merkleization")
}
fieldRoots[12] = balancesRoot[:]
// RandaoMixes array root.
randaoRootsRoot, err := h.arraysRoot(state.RandaoMixes, uint64(params.BeaconConfig().EpochsPerHistoricalVector), "RandaoMixes")
if err != nil {
return nil, errors.Wrap(err, "could not compute randao roots merkleization")
}
fieldRoots[13] = randaoRootsRoot[:]
// Slashings array root.
slashingsRootsRoot, err := ssz.SlashingsRoot(state.Slashings)
if err != nil {
return nil, errors.Wrap(err, "could not compute slashings merkleization")
}
fieldRoots[14] = slashingsRootsRoot[:]
// PreviousEpochParticipation slice root.
prevParticipationRoot, err := stateutil.ParticipationBitsRoot(state.PreviousEpochParticipation)
if err != nil {
return nil, errors.Wrap(err, "could not compute previous epoch participation merkleization")
}
fieldRoots[15] = prevParticipationRoot[:]
// CurrentEpochParticipation slice root.
currParticipationRoot, err := stateutil.ParticipationBitsRoot(state.CurrentEpochParticipation)
if err != nil {
return nil, errors.Wrap(err, "could not compute current epoch participation merkleization")
}
fieldRoots[16] = currParticipationRoot[:]
// JustificationBits root.
justifiedBitsRoot := bytesutil.ToBytes32(state.JustificationBits)
fieldRoots[17] = justifiedBitsRoot[:]
// PreviousJustifiedCheckpoint data structure root.
prevCheckRoot, err := ssz.CheckpointRoot(hasher, state.PreviousJustifiedCheckpoint)
if err != nil {
return nil, errors.Wrap(err, "could not compute previous justified checkpoint merkleization")
}
fieldRoots[18] = prevCheckRoot[:]
// CurrentJustifiedCheckpoint data structure root.
currJustRoot, err := ssz.CheckpointRoot(hasher, state.CurrentJustifiedCheckpoint)
if err != nil {
return nil, errors.Wrap(err, "could not compute current justified checkpoint merkleization")
}
fieldRoots[19] = currJustRoot[:]
// FinalizedCheckpoint data structure root.
finalRoot, err := ssz.CheckpointRoot(hasher, state.FinalizedCheckpoint)
if err != nil {
return nil, errors.Wrap(err, "could not compute finalized checkpoint merkleization")
}
fieldRoots[20] = finalRoot[:]
// Inactivity scores root.
inactivityScoresRoot, err := stateutil.Uint64ListRootWithRegistryLimit(state.InactivityScores)
if err != nil {
return nil, errors.Wrap(err, "could not compute inactivityScoreRoot")
}
fieldRoots[21] = inactivityScoresRoot[:]
// Current sync committee root.
currentSyncCommitteeRoot, err := stateutil.SyncCommitteeRoot(state.CurrentSyncCommittee)
if err != nil {
return nil, errors.Wrap(err, "could not compute sync committee merkleization")
}
fieldRoots[22] = currentSyncCommitteeRoot[:]
// Next sync committee root.
nextSyncCommitteeRoot, err := stateutil.SyncCommitteeRoot(state.NextSyncCommittee)
if err != nil {
return nil, errors.Wrap(err, "could not compute sync committee merkleization")
}
fieldRoots[23] = nextSyncCommitteeRoot[:]
return fieldRoots, nil
return stateutil.NocachedHasher.ComputeFieldRootsWithHasherAltair(ctx, state)
}

View File

@@ -299,7 +299,7 @@ func (b *BeaconState) rootSelector(ctx context.Context, field types.FieldIndex)
case historicalRoots:
return ssz.ByteArrayRootWithLimit(b.state.HistoricalRoots, params.BeaconConfig().HistoricalRootsLimit)
case eth1Data:
return eth1Root(hasher, b.state.Eth1Data)
return stateutil.Eth1Root(hasher, b.state.Eth1Data)
case eth1DataVotes:
if b.rebuildTrie[field] {
err := b.resetFieldTrie(

View File

@@ -5,9 +5,6 @@ go_library(
srcs = [
"deprecated_getters.go",
"deprecated_setters.go",
"field_root_eth1.go",
"field_root_validator.go",
"field_root_vector.go",
"field_roots.go",
"getters_block.go",
"getters_checkpoint.go",
@@ -33,7 +30,10 @@ go_library(
"types.go",
],
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/state/v3",
visibility = ["//beacon-chain:__pkg__"],
visibility = [
"//beacon-chain:__subpackages__",
"//testing/util:__pkg__",
],
deps = [
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/fieldtrie:go_default_library",
@@ -42,17 +42,18 @@ go_library(
"//beacon-chain/state/v1:go_default_library",
"//config/features:go_default_library",
"//config/params:go_default_library",
"//container/slice:go_default_library",
"//crypto/hash:go_default_library",
"//encoding/bytesutil:go_default_library",
"//encoding/ssz:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"@com_github_dgraph_io_ristretto//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@io_opencensus_go//trace:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
],
)
@@ -62,17 +63,24 @@ go_test(
srcs = [
"deprecated_getters_test.go",
"deprecated_setters_test.go",
"field_root_test.go",
"getters_block_test.go",
"getters_test.go",
"getters_validator_test.go",
"setters_test.go",
"state_trie_test.go",
],
embed = [":go_default_library"],
deps = [
"//beacon-chain/state/stateutil:go_default_library",
"//beacon-chain/state/types:go_default_library",
"//beacon-chain/state/v1:go_default_library",
"//config/features:go_default_library",
"//config/params:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
],
)

View File

@@ -6,17 +6,17 @@ import (
)
// SetPreviousEpochAttestations is not supported for HF1 beacon state.
func (b *BeaconState) SetPreviousEpochAttestations(val []*ethpb.PendingAttestation) error {
func (*BeaconState) SetPreviousEpochAttestations(_ []*ethpb.PendingAttestation) error {
return errors.New("SetPreviousEpochAttestations is not supported for version Merge beacon state")
}
// SetCurrentEpochAttestations is not supported for HF1 beacon state.
func (b *BeaconState) SetCurrentEpochAttestations(val []*ethpb.PendingAttestation) error {
func (*BeaconState) SetCurrentEpochAttestations(_ []*ethpb.PendingAttestation) error {
return errors.New("SetCurrentEpochAttestations is not supported for version Merge beacon state")
}
// AppendCurrentEpochAttestations is not supported for HF1 beacon state.
func (b *BeaconState) AppendCurrentEpochAttestations(val *ethpb.PendingAttestation) error {
func (*BeaconState) AppendCurrentEpochAttestations(_ *ethpb.PendingAttestation) error {
return errors.New("AppendCurrentEpochAttestations is not supported for version Merge beacon state")
}

View File

@@ -1,59 +0,0 @@
package v3
import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
"github.com/prysmaticlabs/prysm/config/features"
"github.com/prysmaticlabs/prysm/encoding/ssz"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
)
// eth1Root computes the HashTreeRoot Merkleization of
// a BeaconBlockHeader struct according to the eth2
// Simple Serialize specification.
func eth1Root(hasher ssz.HashFn, eth1Data *ethpb.Eth1Data) ([32]byte, error) {
if eth1Data == nil {
return [32]byte{}, errors.New("nil eth1 data")
}
enc := stateutil.Eth1DataEncKey(eth1Data)
if features.Get().EnableSSZCache {
if found, ok := cachedHasher.rootsCache.Get(string(enc)); ok && found != nil {
return found.([32]byte), nil
}
}
root, err := stateutil.Eth1DataRootWithHasher(hasher, eth1Data)
if err != nil {
return [32]byte{}, err
}
if features.Get().EnableSSZCache {
cachedHasher.rootsCache.Set(string(enc), root, 32)
}
return root, nil
}
// eth1DataVotesRoot computes the HashTreeRoot Merkleization of
// a list of Eth1Data structs according to the eth2
// Simple Serialize specification.
func eth1DataVotesRoot(eth1DataVotes []*ethpb.Eth1Data) ([32]byte, error) {
hashKey, err := stateutil.Eth1DatasEncKey(eth1DataVotes)
if err != nil {
return [32]byte{}, err
}
if features.Get().EnableSSZCache {
if found, ok := cachedHasher.rootsCache.Get(string(hashKey[:])); ok && found != nil {
return found.([32]byte), nil
}
}
root, err := stateutil.Eth1DatasRoot(eth1DataVotes)
if err != nil {
return [32]byte{}, err
}
if features.Get().EnableSSZCache {
cachedHasher.rootsCache.Set(string(hashKey[:]), root, 32)
}
return root, nil
}

View File

@@ -1,226 +1,19 @@
package v3
import (
"encoding/binary"
"sync"
"context"
"github.com/dgraph-io/ristretto"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
"github.com/prysmaticlabs/prysm/config/features"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/crypto/hash"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/encoding/ssz"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
)
var (
leavesCache = make(map[string][][32]byte, params.BeaconConfig().BeaconStateMergeFieldCount)
layersCache = make(map[string][][][32]byte, params.BeaconConfig().BeaconStateMergeFieldCount)
lock sync.RWMutex
)
const cacheSize = 100000
var nocachedHasher *stateRootHasher
var cachedHasher *stateRootHasher
func init() {
rootsCache, err := ristretto.NewCache(&ristretto.Config{
NumCounters: cacheSize, // number of keys to track frequency of (1M).
MaxCost: 1 << 22, // maximum cost of cache (3MB).
// 100,000 roots will take up approximately 3 MB in memory.
BufferItems: 64, // number of keys per Get buffer.
})
if err != nil {
panic(err)
}
// Temporarily disable roots cache until cache issues can be resolved.
cachedHasher = &stateRootHasher{rootsCache: rootsCache}
nocachedHasher = &stateRootHasher{}
}
type stateRootHasher struct {
rootsCache *ristretto.Cache
}
// computeFieldRoots returns the hash tree root computations of every field in
// the beacon state as a list of 32 byte roots.
//nolint:deadcode
func computeFieldRoots(state *ethpb.BeaconStateMerge) ([][]byte, error) {
func computeFieldRoots(ctx context.Context, state *ethpb.BeaconStateMerge) ([][]byte, error) {
if features.Get().EnableSSZCache {
return cachedHasher.computeFieldRootsWithHasher(state)
return stateutil.CachedHasher.ComputeFieldRootsWithHasherMerge(ctx, state)
}
return nocachedHasher.computeFieldRootsWithHasher(state)
}
func (h *stateRootHasher) computeFieldRootsWithHasher(state *ethpb.BeaconStateMerge) ([][]byte, error) {
if state == nil {
return nil, errors.New("nil state")
}
hasher := hash.CustomSHA256Hasher()
fieldRoots := make([][]byte, params.BeaconConfig().BeaconStateMergeFieldCount)
// Genesis time root.
genesisRoot := ssz.Uint64Root(state.GenesisTime)
fieldRoots[0] = genesisRoot[:]
// Genesis validator root.
r := [32]byte{}
copy(r[:], state.GenesisValidatorsRoot)
fieldRoots[1] = r[:]
// Slot root.
slotRoot := ssz.Uint64Root(uint64(state.Slot))
fieldRoots[2] = slotRoot[:]
// Fork data structure root.
forkHashTreeRoot, err := ssz.ForkRoot(state.Fork)
if err != nil {
return nil, errors.Wrap(err, "could not compute fork merkleization")
}
fieldRoots[3] = forkHashTreeRoot[:]
// BeaconBlockHeader data structure root.
headerHashTreeRoot, err := stateutil.BlockHeaderRoot(state.LatestBlockHeader)
if err != nil {
return nil, errors.Wrap(err, "could not compute block header merkleization")
}
fieldRoots[4] = headerHashTreeRoot[:]
// BlockRoots array root.
blockRootsRoot, err := h.arraysRoot(state.BlockRoots, uint64(params.BeaconConfig().SlotsPerHistoricalRoot), "BlockRoots")
if err != nil {
return nil, errors.Wrap(err, "could not compute block roots merkleization")
}
fieldRoots[5] = blockRootsRoot[:]
// StateRoots array root.
stateRootsRoot, err := h.arraysRoot(state.StateRoots, uint64(params.BeaconConfig().SlotsPerHistoricalRoot), "StateRoots")
if err != nil {
return nil, errors.Wrap(err, "could not compute state roots merkleization")
}
fieldRoots[6] = stateRootsRoot[:]
// HistoricalRoots slice root.
historicalRootsRt, err := ssz.ByteArrayRootWithLimit(state.HistoricalRoots, params.BeaconConfig().HistoricalRootsLimit)
if err != nil {
return nil, errors.Wrap(err, "could not compute historical roots merkleization")
}
fieldRoots[7] = historicalRootsRt[:]
// Eth1Data data structure root.
eth1HashTreeRoot, err := eth1Root(hasher, state.Eth1Data)
if err != nil {
return nil, errors.Wrap(err, "could not compute eth1data merkleization")
}
fieldRoots[8] = eth1HashTreeRoot[:]
// Eth1DataVotes slice root.
eth1VotesRoot, err := eth1DataVotesRoot(state.Eth1DataVotes)
if err != nil {
return nil, errors.Wrap(err, "could not compute eth1data votes merkleization")
}
fieldRoots[9] = eth1VotesRoot[:]
// Eth1DepositIndex root.
eth1DepositIndexBuf := make([]byte, 8)
binary.LittleEndian.PutUint64(eth1DepositIndexBuf, state.Eth1DepositIndex)
eth1DepositBuf := bytesutil.ToBytes32(eth1DepositIndexBuf)
fieldRoots[10] = eth1DepositBuf[:]
// Validators slice root.
validatorsRoot, err := h.validatorRegistryRoot(state.Validators)
if err != nil {
return nil, errors.Wrap(err, "could not compute validator registry merkleization")
}
fieldRoots[11] = validatorsRoot[:]
// Balances slice root.
balancesRoot, err := stateutil.Uint64ListRootWithRegistryLimit(state.Balances)
if err != nil {
return nil, errors.Wrap(err, "could not compute validator balances merkleization")
}
fieldRoots[12] = balancesRoot[:]
// RandaoMixes array root.
randaoRootsRoot, err := h.arraysRoot(state.RandaoMixes, uint64(params.BeaconConfig().EpochsPerHistoricalVector), "RandaoMixes")
if err != nil {
return nil, errors.Wrap(err, "could not compute randao roots merkleization")
}
fieldRoots[13] = randaoRootsRoot[:]
// Slashings array root.
slashingsRootsRoot, err := ssz.SlashingsRoot(state.Slashings)
if err != nil {
return nil, errors.Wrap(err, "could not compute slashings merkleization")
}
fieldRoots[14] = slashingsRootsRoot[:]
// PreviousEpochParticipation slice root.
prevParticipationRoot, err := stateutil.ParticipationBitsRoot(state.PreviousEpochParticipation)
if err != nil {
return nil, errors.Wrap(err, "could not compute previous epoch participation merkleization")
}
fieldRoots[15] = prevParticipationRoot[:]
// CurrentEpochParticipation slice root.
currParticipationRoot, err := stateutil.ParticipationBitsRoot(state.CurrentEpochParticipation)
if err != nil {
return nil, errors.Wrap(err, "could not compute current epoch participation merkleization")
}
fieldRoots[16] = currParticipationRoot[:]
// JustificationBits root.
justifiedBitsRoot := bytesutil.ToBytes32(state.JustificationBits)
fieldRoots[17] = justifiedBitsRoot[:]
// PreviousJustifiedCheckpoint data structure root.
prevCheckRoot, err := ssz.CheckpointRoot(hasher, state.PreviousJustifiedCheckpoint)
if err != nil {
return nil, errors.Wrap(err, "could not compute previous justified checkpoint merkleization")
}
fieldRoots[18] = prevCheckRoot[:]
// CurrentJustifiedCheckpoint data structure root.
currJustRoot, err := ssz.CheckpointRoot(hasher, state.CurrentJustifiedCheckpoint)
if err != nil {
return nil, errors.Wrap(err, "could not compute current justified checkpoint merkleization")
}
fieldRoots[19] = currJustRoot[:]
// FinalizedCheckpoint data structure root.
finalRoot, err := ssz.CheckpointRoot(hasher, state.FinalizedCheckpoint)
if err != nil {
return nil, errors.Wrap(err, "could not compute finalized checkpoint merkleization")
}
fieldRoots[20] = finalRoot[:]
// Inactivity scores root.
inactivityScoresRoot, err := stateutil.Uint64ListRootWithRegistryLimit(state.InactivityScores)
if err != nil {
return nil, errors.Wrap(err, "could not compute inactivityScoreRoot")
}
fieldRoots[21] = inactivityScoresRoot[:]
// Current sync committee root.
currentSyncCommitteeRoot, err := stateutil.SyncCommitteeRoot(state.CurrentSyncCommittee)
if err != nil {
return nil, errors.Wrap(err, "could not compute sync committee merkleization")
}
fieldRoots[22] = currentSyncCommitteeRoot[:]
// Next sync committee root.
nextSyncCommitteeRoot, err := stateutil.SyncCommitteeRoot(state.NextSyncCommittee)
if err != nil {
return nil, errors.Wrap(err, "could not compute sync committee merkleization")
}
fieldRoots[23] = nextSyncCommitteeRoot[:]
// Execution payload root.
//TODO: Blocked by https://github.com/ferranbt/fastssz/pull/65
fieldRoots[24] = []byte{}
return fieldRoots, nil
return stateutil.NocachedHasher.ComputeFieldRootsWithHasherMerge(ctx, state)
}

View File

@@ -23,31 +23,32 @@ func (b *BeaconState) CloneInnerState() interface{} {
b.lock.RLock()
defer b.lock.RUnlock()
return &ethpb.BeaconStateAltair{
GenesisTime: b.genesisTime(),
GenesisValidatorsRoot: b.genesisValidatorRoot(),
Slot: b.slot(),
Fork: b.fork(),
LatestBlockHeader: b.latestBlockHeader(),
BlockRoots: b.blockRoots(),
StateRoots: b.stateRoots(),
HistoricalRoots: b.historicalRoots(),
Eth1Data: b.eth1Data(),
Eth1DataVotes: b.eth1DataVotes(),
Eth1DepositIndex: b.eth1DepositIndex(),
Validators: b.validators(),
Balances: b.balances(),
RandaoMixes: b.randaoMixes(),
Slashings: b.slashings(),
CurrentEpochParticipation: b.currentEpochParticipation(),
PreviousEpochParticipation: b.previousEpochParticipation(),
JustificationBits: b.justificationBits(),
PreviousJustifiedCheckpoint: b.previousJustifiedCheckpoint(),
CurrentJustifiedCheckpoint: b.currentJustifiedCheckpoint(),
FinalizedCheckpoint: b.finalizedCheckpoint(),
InactivityScores: b.inactivityScores(),
CurrentSyncCommittee: b.currentSyncCommittee(),
NextSyncCommittee: b.nextSyncCommittee(),
return &ethpb.BeaconStateMerge{
GenesisTime: b.genesisTime(),
GenesisValidatorsRoot: b.genesisValidatorRoot(),
Slot: b.slot(),
Fork: b.fork(),
LatestBlockHeader: b.latestBlockHeader(),
BlockRoots: b.blockRoots(),
StateRoots: b.stateRoots(),
HistoricalRoots: b.historicalRoots(),
Eth1Data: b.eth1Data(),
Eth1DataVotes: b.eth1DataVotes(),
Eth1DepositIndex: b.eth1DepositIndex(),
Validators: b.validators(),
Balances: b.balances(),
RandaoMixes: b.randaoMixes(),
Slashings: b.slashings(),
CurrentEpochParticipation: b.currentEpochParticipation(),
PreviousEpochParticipation: b.previousEpochParticipation(),
JustificationBits: b.justificationBits(),
PreviousJustifiedCheckpoint: b.previousJustifiedCheckpoint(),
CurrentJustifiedCheckpoint: b.currentJustifiedCheckpoint(),
FinalizedCheckpoint: b.finalizedCheckpoint(),
InactivityScores: b.inactivityScores(),
CurrentSyncCommittee: b.currentSyncCommittee(),
NextSyncCommittee: b.nextSyncCommittee(),
LatestExecutionPayloadHeader: b.latestExecutionPayloadHeader(),
}
}
@@ -112,16 +113,15 @@ func (b *BeaconState) MarshalSSZ() ([]byte, error) {
if !b.hasInnerState() {
return nil, errors.New("nil beacon state")
}
//TODO: Blocked by https://github.com/ferranbt/fastssz/pull/65
return []byte{}, nil
return b.state.MarshalSSZ()
}
// ProtobufBeaconState transforms an input into beacon state hard fork 1 in the form of protobuf.
// ProtobufBeaconState transforms an input into beacon state Merge in the form of protobuf.
// Error is returned if the input is not type protobuf beacon state.
func ProtobufBeaconState(s interface{}) (*ethpb.BeaconStateAltair, error) {
pbState, ok := s.(*ethpb.BeaconStateAltair)
func ProtobufBeaconState(s interface{}) (*ethpb.BeaconStateMerge, error) {
pbState, ok := s.(*ethpb.BeaconStateMerge)
if !ok {
return nil, errors.New("input is not type pb.BeaconStateAltair")
return nil, errors.New("input is not type pb.BeaconStateMerge")
}
return pbState, nil
}

View File

@@ -5,7 +5,9 @@ import (
"sync"
"testing"
types "github.com/prysmaticlabs/eth2-types"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
)
@@ -86,3 +88,105 @@ func TestNilState_NoPanic(t *testing.T) {
_, err = st.NextSyncCommittee()
_ = err
}
func TestBeaconState_ValidatorByPubkey(t *testing.T) {
keyCreator := func(input []byte) [48]byte {
nKey := [48]byte{}
copy(nKey[:1], input)
return nKey
}
tests := []struct {
name string
modifyFunc func(b *BeaconState, k [48]byte)
exists bool
expectedIdx types.ValidatorIndex
largestIdxInSet types.ValidatorIndex
}{
{
name: "retrieve validator",
modifyFunc: func(b *BeaconState, key [48]byte) {
assert.NoError(t, b.AppendValidator(&ethpb.Validator{PublicKey: key[:]}))
},
exists: true,
expectedIdx: 0,
},
{
name: "retrieve validator with multiple validators from the start",
modifyFunc: func(b *BeaconState, key [48]byte) {
key1 := keyCreator([]byte{'C'})
key2 := keyCreator([]byte{'D'})
assert.NoError(t, b.AppendValidator(&ethpb.Validator{PublicKey: key[:]}))
assert.NoError(t, b.AppendValidator(&ethpb.Validator{PublicKey: key1[:]}))
assert.NoError(t, b.AppendValidator(&ethpb.Validator{PublicKey: key2[:]}))
},
exists: true,
expectedIdx: 0,
},
{
name: "retrieve validator with multiple validators",
modifyFunc: func(b *BeaconState, key [48]byte) {
key1 := keyCreator([]byte{'C'})
key2 := keyCreator([]byte{'D'})
assert.NoError(t, b.AppendValidator(&ethpb.Validator{PublicKey: key1[:]}))
assert.NoError(t, b.AppendValidator(&ethpb.Validator{PublicKey: key2[:]}))
assert.NoError(t, b.AppendValidator(&ethpb.Validator{PublicKey: key[:]}))
},
exists: true,
expectedIdx: 2,
},
{
name: "retrieve validator with multiple validators from the start with shared state",
modifyFunc: func(b *BeaconState, key [48]byte) {
key1 := keyCreator([]byte{'C'})
key2 := keyCreator([]byte{'D'})
assert.NoError(t, b.AppendValidator(&ethpb.Validator{PublicKey: key[:]}))
_ = b.Copy()
assert.NoError(t, b.AppendValidator(&ethpb.Validator{PublicKey: key1[:]}))
assert.NoError(t, b.AppendValidator(&ethpb.Validator{PublicKey: key2[:]}))
},
exists: true,
expectedIdx: 0,
},
{
name: "retrieve validator with multiple validators with shared state",
modifyFunc: func(b *BeaconState, key [48]byte) {
key1 := keyCreator([]byte{'C'})
key2 := keyCreator([]byte{'D'})
assert.NoError(t, b.AppendValidator(&ethpb.Validator{PublicKey: key1[:]}))
assert.NoError(t, b.AppendValidator(&ethpb.Validator{PublicKey: key2[:]}))
n := b.Copy()
// Append to another state
assert.NoError(t, n.AppendValidator(&ethpb.Validator{PublicKey: key[:]}))
},
exists: false,
expectedIdx: 0,
},
{
name: "retrieve validator with multiple validators with shared state at boundary",
modifyFunc: func(b *BeaconState, key [48]byte) {
key1 := keyCreator([]byte{'C'})
assert.NoError(t, b.AppendValidator(&ethpb.Validator{PublicKey: key1[:]}))
n := b.Copy()
// Append to another state
assert.NoError(t, n.AppendValidator(&ethpb.Validator{PublicKey: key[:]}))
},
exists: false,
expectedIdx: 0,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s, err := InitializeFromProto(&ethpb.BeaconStateMerge{})
require.NoError(t, err)
nKey := keyCreator([]byte{'A'})
tt.modifyFunc(s, nKey)
idx, ok := s.ValidatorIndexByPubkey(nKey)
assert.Equal(t, tt.exists, ok)
assert.Equal(t, tt.expectedIdx, idx)
})
}
}

View File

@@ -0,0 +1,185 @@
package v3
import (
"context"
"strconv"
"testing"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
stateTypes "github.com/prysmaticlabs/prysm/beacon-chain/state/types"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
)
func TestAppendBeyondIndicesLimit(t *testing.T) {
zeroHash := params.BeaconConfig().ZeroHash
mockblockRoots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot)
for i := 0; i < len(mockblockRoots); i++ {
mockblockRoots[i] = zeroHash[:]
}
mockstateRoots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot)
for i := 0; i < len(mockstateRoots); i++ {
mockstateRoots[i] = zeroHash[:]
}
mockrandaoMixes := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector)
for i := 0; i < len(mockrandaoMixes); i++ {
mockrandaoMixes[i] = zeroHash[:]
}
payload := &ethpb.ExecutionPayloadHeader{
ParentHash: make([]byte, 32),
FeeRecipient: make([]byte, 20),
StateRoot: make([]byte, 32),
ReceiptRoot: make([]byte, 32),
LogsBloom: make([]byte, 256),
Random: make([]byte, 32),
BaseFeePerGas: make([]byte, 32),
BlockHash: make([]byte, 32),
TransactionsRoot: make([]byte, 32),
}
st, err := InitializeFromProto(&ethpb.BeaconStateMerge{
Slot: 1,
CurrentEpochParticipation: []byte{},
PreviousEpochParticipation: []byte{},
Validators: []*eth.Validator{},
Eth1Data: &eth.Eth1Data{},
BlockRoots: mockblockRoots,
StateRoots: mockstateRoots,
RandaoMixes: mockrandaoMixes,
LatestExecutionPayloadHeader: payload,
})
require.NoError(t, err)
_, err = st.HashTreeRoot(context.Background())
require.NoError(t, err)
for i := stateTypes.FieldIndex(0); i < stateTypes.FieldIndex(params.BeaconConfig().BeaconStateMergeFieldCount); i++ {
st.dirtyFields[i] = true
}
_, err = st.HashTreeRoot(context.Background())
require.NoError(t, err)
for i := 0; i < 10; i++ {
assert.NoError(t, st.AppendValidator(&eth.Validator{}))
}
assert.Equal(t, false, st.rebuildTrie[validators])
assert.NotEqual(t, len(st.dirtyIndices[validators]), 0)
for i := 0; i < indicesLimit; i++ {
assert.NoError(t, st.AppendValidator(&eth.Validator{}))
}
assert.Equal(t, true, st.rebuildTrie[validators])
assert.Equal(t, len(st.dirtyIndices[validators]), 0)
}
func TestBeaconState_AppendBalanceWithTrie(t *testing.T) {
count := uint64(100)
vals := make([]*ethpb.Validator, 0, count)
bals := make([]uint64, 0, count)
for i := uint64(1); i < count; i++ {
someRoot := [32]byte{}
someKey := [48]byte{}
copy(someRoot[:], strconv.Itoa(int(i)))
copy(someKey[:], strconv.Itoa(int(i)))
vals = append(vals, &ethpb.Validator{
PublicKey: someKey[:],
WithdrawalCredentials: someRoot[:],
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
Slashed: false,
ActivationEligibilityEpoch: 1,
ActivationEpoch: 1,
ExitEpoch: 1,
WithdrawableEpoch: 1,
})
bals = append(bals, params.BeaconConfig().MaxEffectiveBalance)
}
zeroHash := params.BeaconConfig().ZeroHash
mockblockRoots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot)
for i := 0; i < len(mockblockRoots); i++ {
mockblockRoots[i] = zeroHash[:]
}
mockstateRoots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot)
for i := 0; i < len(mockstateRoots); i++ {
mockstateRoots[i] = zeroHash[:]
}
mockrandaoMixes := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector)
for i := 0; i < len(mockrandaoMixes); i++ {
mockrandaoMixes[i] = zeroHash[:]
}
var pubKeys [][]byte
for i := uint64(0); i < params.BeaconConfig().SyncCommitteeSize; i++ {
pubKeys = append(pubKeys, bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength))
}
payload := &ethpb.ExecutionPayloadHeader{
ParentHash: make([]byte, 32),
FeeRecipient: make([]byte, 20),
StateRoot: make([]byte, 32),
ReceiptRoot: make([]byte, 32),
LogsBloom: make([]byte, 256),
Random: make([]byte, 32),
BaseFeePerGas: make([]byte, 32),
BlockHash: make([]byte, 32),
TransactionsRoot: make([]byte, 32),
}
st, err := InitializeFromProto(&ethpb.BeaconStateMerge{
Slot: 1,
GenesisValidatorsRoot: make([]byte, 32),
Fork: &ethpb.Fork{
PreviousVersion: make([]byte, 4),
CurrentVersion: make([]byte, 4),
Epoch: 0,
},
LatestBlockHeader: &ethpb.BeaconBlockHeader{
ParentRoot: make([]byte, 32),
StateRoot: make([]byte, 32),
BodyRoot: make([]byte, 32),
},
CurrentEpochParticipation: []byte{},
PreviousEpochParticipation: []byte{},
Validators: vals,
Balances: bals,
Eth1Data: &eth.Eth1Data{
DepositRoot: make([]byte, 32),
BlockHash: make([]byte, 32),
},
BlockRoots: mockblockRoots,
StateRoots: mockstateRoots,
RandaoMixes: mockrandaoMixes,
JustificationBits: bitfield.NewBitvector4(),
PreviousJustifiedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)},
CurrentJustifiedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)},
FinalizedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)},
Slashings: make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector),
CurrentSyncCommittee: &ethpb.SyncCommittee{
Pubkeys: pubKeys,
AggregatePubkey: make([]byte, 48),
},
NextSyncCommittee: &ethpb.SyncCommittee{
Pubkeys: pubKeys,
AggregatePubkey: make([]byte, 48),
},
LatestExecutionPayloadHeader: payload,
})
assert.NoError(t, err)
_, err = st.HashTreeRoot(context.Background())
assert.NoError(t, err)
for i := 0; i < 100; i++ {
if i%2 == 0 {
assert.NoError(t, st.UpdateBalancesAtIndex(types.ValidatorIndex(i), 1000))
}
if i%3 == 0 {
assert.NoError(t, st.AppendBalance(1000))
}
}
_, err = st.HashTreeRoot(context.Background())
assert.NoError(t, err)
newRt := bytesutil.ToBytes32(st.merkleLayers[0][balances])
wantedRt, err := stateutil.Uint64ListRootWithRegistryLimit(st.state.Balances)
assert.NoError(t, err)
assert.Equal(t, wantedRt, newRt, "state roots are unequal")
}

View File

@@ -1,14 +1,24 @@
package v3
import (
"context"
"runtime"
"sort"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/beacon-chain/state/fieldtrie"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
"github.com/prysmaticlabs/prysm/beacon-chain/state/types"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/container/slice"
"github.com/prysmaticlabs/prysm/crypto/hash"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/encoding/ssz"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"go.opencensus.io/trace"
"google.golang.org/protobuf/proto"
)
@@ -31,7 +41,7 @@ func InitializeFromProtoUnsafe(st *ethpb.BeaconStateMerge) (*BeaconState, error)
return nil, errors.New("received nil state")
}
fieldCount := params.BeaconConfig().BeaconStateAltairFieldCount
fieldCount := params.BeaconConfig().BeaconStateMergeFieldCount
b := &BeaconState{
state: st,
dirtyFields: make(map[types.FieldIndex]bool, fieldCount),
@@ -65,7 +75,319 @@ func InitializeFromProtoUnsafe(st *ethpb.BeaconStateMerge) (*BeaconState, error)
b.sharedFieldReferences[balances] = stateutil.NewRef(1)
b.sharedFieldReferences[inactivityScores] = stateutil.NewRef(1) // New in Altair.
b.sharedFieldReferences[historicalRoots] = stateutil.NewRef(1)
b.sharedFieldReferences[latestExecutionPayloadHeader] = stateutil.NewRef(1) // New in Merge.
stateCount.Inc()
return b, nil
}
// Copy returns a deep copy of the beacon state.
func (b *BeaconState) Copy() state.BeaconState {
if !b.hasInnerState() {
return nil
}
b.lock.RLock()
defer b.lock.RUnlock()
fieldCount := params.BeaconConfig().BeaconStateMergeFieldCount
dst := &BeaconState{
state: &ethpb.BeaconStateMerge{
// Primitive types, safe to copy.
GenesisTime: b.state.GenesisTime,
Slot: b.state.Slot,
Eth1DepositIndex: b.state.Eth1DepositIndex,
// Large arrays, infrequently changed, constant size.
RandaoMixes: b.state.RandaoMixes,
StateRoots: b.state.StateRoots,
BlockRoots: b.state.BlockRoots,
Slashings: b.state.Slashings,
Eth1DataVotes: b.state.Eth1DataVotes,
// Large arrays, increases over time.
Validators: b.state.Validators,
Balances: b.state.Balances,
HistoricalRoots: b.state.HistoricalRoots,
PreviousEpochParticipation: b.state.PreviousEpochParticipation,
CurrentEpochParticipation: b.state.CurrentEpochParticipation,
InactivityScores: b.state.InactivityScores,
// Everything else, too small to be concerned about, constant size.
Fork: b.fork(),
LatestBlockHeader: b.latestBlockHeader(),
Eth1Data: b.eth1Data(),
JustificationBits: b.justificationBits(),
PreviousJustifiedCheckpoint: b.previousJustifiedCheckpoint(),
CurrentJustifiedCheckpoint: b.currentJustifiedCheckpoint(),
FinalizedCheckpoint: b.finalizedCheckpoint(),
GenesisValidatorsRoot: b.genesisValidatorRoot(),
CurrentSyncCommittee: b.currentSyncCommittee(),
NextSyncCommittee: b.nextSyncCommittee(),
LatestExecutionPayloadHeader: b.latestExecutionPayloadHeader(),
},
dirtyFields: make(map[types.FieldIndex]bool, fieldCount),
dirtyIndices: make(map[types.FieldIndex][]uint64, fieldCount),
rebuildTrie: make(map[types.FieldIndex]bool, fieldCount),
sharedFieldReferences: make(map[types.FieldIndex]*stateutil.Reference, 11),
stateFieldLeaves: make(map[types.FieldIndex]*fieldtrie.FieldTrie, fieldCount),
// Copy on write validator index map.
valMapHandler: b.valMapHandler,
}
for field, ref := range b.sharedFieldReferences {
ref.AddRef()
dst.sharedFieldReferences[field] = ref
}
// Increment ref for validator map
b.valMapHandler.AddRef()
for i := range b.dirtyFields {
dst.dirtyFields[i] = true
}
for i := range b.dirtyIndices {
indices := make([]uint64, len(b.dirtyIndices[i]))
copy(indices, b.dirtyIndices[i])
dst.dirtyIndices[i] = indices
}
for i := range b.rebuildTrie {
dst.rebuildTrie[i] = true
}
for fldIdx, fieldTrie := range b.stateFieldLeaves {
dst.stateFieldLeaves[fldIdx] = fieldTrie
if fieldTrie.FieldReference() != nil {
fieldTrie.Lock()
fieldTrie.FieldReference().AddRef()
fieldTrie.Unlock()
}
}
if b.merkleLayers != nil {
dst.merkleLayers = make([][][]byte, len(b.merkleLayers))
for i, layer := range b.merkleLayers {
dst.merkleLayers[i] = make([][]byte, len(layer))
for j, content := range layer {
dst.merkleLayers[i][j] = make([]byte, len(content))
copy(dst.merkleLayers[i][j], content)
}
}
}
stateCount.Inc()
// Finalizer runs when dst is being destroyed in garbage collection.
runtime.SetFinalizer(dst, func(b *BeaconState) {
for field, v := range b.sharedFieldReferences {
v.MinusRef()
if b.stateFieldLeaves[field].FieldReference() != nil {
b.stateFieldLeaves[field].FieldReference().MinusRef()
}
}
for i := 0; i < fieldCount; i++ {
field := types.FieldIndex(i)
delete(b.stateFieldLeaves, field)
delete(b.dirtyIndices, field)
delete(b.dirtyFields, field)
delete(b.sharedFieldReferences, field)
delete(b.stateFieldLeaves, field)
}
stateCount.Sub(1)
})
return dst
}
// HashTreeRoot of the beacon state retrieves the Merkle root of the trie
// representation of the beacon state based on the eth2 Simple Serialize specification.
func (b *BeaconState) HashTreeRoot(ctx context.Context) ([32]byte, error) {
_, span := trace.StartSpan(ctx, "BeaconStateMerge.HashTreeRoot")
defer span.End()
b.lock.Lock()
defer b.lock.Unlock()
if b.merkleLayers == nil || len(b.merkleLayers) == 0 {
fieldRoots, err := computeFieldRoots(ctx, b.state)
if err != nil {
return [32]byte{}, err
}
layers := stateutil.Merkleize(fieldRoots)
b.merkleLayers = layers
b.dirtyFields = make(map[types.FieldIndex]bool, params.BeaconConfig().BeaconStateMergeFieldCount)
}
for field := range b.dirtyFields {
root, err := b.rootSelector(field)
if err != nil {
return [32]byte{}, err
}
b.merkleLayers[0][field] = root[:]
b.recomputeRoot(int(field))
delete(b.dirtyFields, field)
}
return bytesutil.ToBytes32(b.merkleLayers[len(b.merkleLayers)-1][0]), nil
}
// FieldReferencesCount returns the reference count held by each field. This
// also includes the field trie held by each field.
func (b *BeaconState) FieldReferencesCount() map[string]uint64 {
refMap := make(map[string]uint64)
b.lock.RLock()
defer b.lock.RUnlock()
for i, f := range b.sharedFieldReferences {
refMap[i.String(b.Version())] = uint64(f.Refs())
}
for i, f := range b.stateFieldLeaves {
numOfRefs := uint64(f.FieldReference().Refs())
f.RLock()
if !f.Empty() {
refMap[i.String(b.Version())+"_trie"] = numOfRefs
}
f.RUnlock()
}
return refMap
}
// IsNil checks if the state and the underlying proto
// object are nil.
func (b *BeaconState) IsNil() bool {
return b == nil || b.state == nil
}
func (b *BeaconState) rootSelector(field types.FieldIndex) ([32]byte, error) {
hasher := hash.CustomSHA256Hasher()
switch field {
case genesisTime:
return ssz.Uint64Root(b.state.GenesisTime), nil
case genesisValidatorRoot:
return bytesutil.ToBytes32(b.state.GenesisValidatorsRoot), nil
case slot:
return ssz.Uint64Root(uint64(b.state.Slot)), nil
case eth1DepositIndex:
return ssz.Uint64Root(b.state.Eth1DepositIndex), nil
case fork:
return ssz.ForkRoot(b.state.Fork)
case latestBlockHeader:
return stateutil.BlockHeaderRoot(b.state.LatestBlockHeader)
case blockRoots:
if b.rebuildTrie[field] {
err := b.resetFieldTrie(field, b.state.BlockRoots, uint64(params.BeaconConfig().SlotsPerHistoricalRoot))
if err != nil {
return [32]byte{}, err
}
b.dirtyIndices[field] = []uint64{}
delete(b.rebuildTrie, field)
return b.stateFieldLeaves[field].TrieRoot()
}
return b.recomputeFieldTrie(blockRoots, b.state.BlockRoots)
case stateRoots:
if b.rebuildTrie[field] {
err := b.resetFieldTrie(field, b.state.StateRoots, uint64(params.BeaconConfig().SlotsPerHistoricalRoot))
if err != nil {
return [32]byte{}, err
}
b.dirtyIndices[field] = []uint64{}
delete(b.rebuildTrie, field)
return b.stateFieldLeaves[field].TrieRoot()
}
return b.recomputeFieldTrie(stateRoots, b.state.StateRoots)
case historicalRoots:
return ssz.ByteArrayRootWithLimit(b.state.HistoricalRoots, params.BeaconConfig().HistoricalRootsLimit)
case eth1Data:
return stateutil.Eth1Root(hasher, b.state.Eth1Data)
case eth1DataVotes:
if b.rebuildTrie[field] {
err := b.resetFieldTrie(field, b.state.Eth1DataVotes, uint64(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().EpochsPerEth1VotingPeriod))))
if err != nil {
return [32]byte{}, err
}
b.dirtyIndices[field] = []uint64{}
delete(b.rebuildTrie, field)
return b.stateFieldLeaves[field].TrieRoot()
}
return b.recomputeFieldTrie(field, b.state.Eth1DataVotes)
case validators:
if b.rebuildTrie[field] {
err := b.resetFieldTrie(field, b.state.Validators, params.BeaconConfig().ValidatorRegistryLimit)
if err != nil {
return [32]byte{}, err
}
b.dirtyIndices[validators] = []uint64{}
delete(b.rebuildTrie, validators)
return b.stateFieldLeaves[field].TrieRoot()
}
return b.recomputeFieldTrie(validators, b.state.Validators)
case balances:
return stateutil.Uint64ListRootWithRegistryLimit(b.state.Balances)
case randaoMixes:
if b.rebuildTrie[field] {
err := b.resetFieldTrie(field, b.state.RandaoMixes, uint64(params.BeaconConfig().EpochsPerHistoricalVector))
if err != nil {
return [32]byte{}, err
}
b.dirtyIndices[field] = []uint64{}
delete(b.rebuildTrie, field)
return b.stateFieldLeaves[field].TrieRoot()
}
return b.recomputeFieldTrie(randaoMixes, b.state.RandaoMixes)
case slashings:
return ssz.SlashingsRoot(b.state.Slashings)
case previousEpochParticipationBits:
return stateutil.ParticipationBitsRoot(b.state.PreviousEpochParticipation)
case currentEpochParticipationBits:
return stateutil.ParticipationBitsRoot(b.state.CurrentEpochParticipation)
case justificationBits:
return bytesutil.ToBytes32(b.state.JustificationBits), nil
case previousJustifiedCheckpoint:
return ssz.CheckpointRoot(hasher, b.state.PreviousJustifiedCheckpoint)
case currentJustifiedCheckpoint:
return ssz.CheckpointRoot(hasher, b.state.CurrentJustifiedCheckpoint)
case finalizedCheckpoint:
return ssz.CheckpointRoot(hasher, b.state.FinalizedCheckpoint)
case inactivityScores:
return stateutil.Uint64ListRootWithRegistryLimit(b.state.InactivityScores)
case currentSyncCommittee:
return stateutil.SyncCommitteeRoot(b.state.CurrentSyncCommittee)
case nextSyncCommittee:
return stateutil.SyncCommitteeRoot(b.state.NextSyncCommittee)
case latestExecutionPayloadHeader:
return b.state.LatestExecutionPayloadHeader.HashTreeRoot()
}
return [32]byte{}, errors.New("invalid field index provided")
}
func (b *BeaconState) recomputeFieldTrie(index types.FieldIndex, elements interface{}) ([32]byte, error) {
fTrie := b.stateFieldLeaves[index]
if fTrie.FieldReference().Refs() > 1 {
fTrie.Lock()
defer fTrie.Unlock()
fTrie.FieldReference().MinusRef()
newTrie := fTrie.CopyTrie()
b.stateFieldLeaves[index] = newTrie
fTrie = newTrie
}
// remove duplicate indexes
b.dirtyIndices[index] = slice.SetUint64(b.dirtyIndices[index])
// sort indexes again
sort.Slice(b.dirtyIndices[index], func(i int, j int) bool {
return b.dirtyIndices[index][i] < b.dirtyIndices[index][j]
})
root, err := fTrie.RecomputeTrie(b.dirtyIndices[index], elements)
if err != nil {
return [32]byte{}, err
}
b.dirtyIndices[index] = []uint64{}
return root, nil
}
func (b *BeaconState) resetFieldTrie(index types.FieldIndex, elements interface{}, length uint64) error {
fTrie, err := fieldtrie.NewFieldTrie(index, fieldMap[index], elements, length)
if err != nil {
return err
}
b.stateFieldLeaves[index] = fTrie
b.dirtyIndices[index] = []uint64{}
return nil
}

View File

@@ -0,0 +1,167 @@
package v3
import (
"strconv"
"sync"
"testing"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
"github.com/prysmaticlabs/prysm/config/features"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
)
func TestMain(m *testing.M) {
resetCfg := features.InitWithReset(&features.Flags{EnableBalanceTrieComputation: true})
defer resetCfg()
m.Run()
}
func TestValidatorMap_DistinctCopy(t *testing.T) {
count := uint64(100)
vals := make([]*ethpb.Validator, 0, count)
for i := uint64(1); i < count; i++ {
someRoot := [32]byte{}
someKey := [48]byte{}
copy(someRoot[:], strconv.Itoa(int(i)))
copy(someKey[:], strconv.Itoa(int(i)))
vals = append(vals, &ethpb.Validator{
PublicKey: someKey[:],
WithdrawalCredentials: someRoot[:],
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
Slashed: false,
ActivationEligibilityEpoch: 1,
ActivationEpoch: 1,
ExitEpoch: 1,
WithdrawableEpoch: 1,
})
}
handler := stateutil.NewValMapHandler(vals)
newHandler := handler.Copy()
wantedPubkey := strconv.Itoa(22)
handler.Set(bytesutil.ToBytes48([]byte(wantedPubkey)), 27)
val1, _ := handler.Get(bytesutil.ToBytes48([]byte(wantedPubkey)))
val2, _ := newHandler.Get(bytesutil.ToBytes48([]byte(wantedPubkey)))
assert.NotEqual(t, val1, val2, "Values are supposed to be unequal due to copy")
}
func TestInitializeFromProto(t *testing.T) {
type test struct {
name string
state *ethpb.BeaconStateMerge
error string
}
initTests := []test{
{
name: "nil state",
state: nil,
error: "received nil state",
},
{
name: "nil validators",
state: &ethpb.BeaconStateMerge{
Slot: 4,
Validators: nil,
},
},
{
name: "empty state",
state: &ethpb.BeaconStateMerge{},
},
}
for _, tt := range initTests {
t.Run(tt.name, func(t *testing.T) {
_, err := InitializeFromProto(tt.state)
if tt.error != "" {
require.ErrorContains(t, tt.error, err)
} else {
require.NoError(t, err)
}
})
}
}
func TestBeaconState_NoDeadlock(t *testing.T) {
count := uint64(100)
vals := make([]*ethpb.Validator, 0, count)
for i := uint64(1); i < count; i++ {
someRoot := [32]byte{}
someKey := [48]byte{}
copy(someRoot[:], strconv.Itoa(int(i)))
copy(someKey[:], strconv.Itoa(int(i)))
vals = append(vals, &ethpb.Validator{
PublicKey: someKey[:],
WithdrawalCredentials: someRoot[:],
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
Slashed: false,
ActivationEligibilityEpoch: 1,
ActivationEpoch: 1,
ExitEpoch: 1,
WithdrawableEpoch: 1,
})
}
st, err := InitializeFromProtoUnsafe(&ethpb.BeaconStateMerge{
Validators: vals,
})
assert.NoError(t, err)
wg := new(sync.WaitGroup)
wg.Add(1)
go func() {
// Continuously lock and unlock the state
// by acquiring the lock.
for i := 0; i < 1000; i++ {
for _, f := range st.stateFieldLeaves {
f.Lock()
if f.Empty() {
f.InsertFieldLayer(make([][]*[32]byte, 10))
}
f.Unlock()
f.FieldReference().AddRef()
}
}
wg.Done()
}()
// Constantly read from the offending portion
// of the code to ensure there is no possible
// recursive read locking.
for i := 0; i < 1000; i++ {
go func() {
_ = st.FieldReferencesCount()
}()
}
// Test will not terminate in the event of a deadlock.
wg.Wait()
}
func TestInitializeFromProtoUnsafe(t *testing.T) {
type test struct {
name string
state *ethpb.BeaconStateMerge
error string
}
initTests := []test{
{
name: "nil state",
state: nil,
error: "received nil state",
},
{
name: "nil validators",
state: &ethpb.BeaconStateMerge{
Slot: 4,
Validators: nil,
},
},
{
name: "empty state",
state: &ethpb.BeaconStateMerge{},
},
// TODO: Add full state. Blocked by testutil migration.
}
_ = initTests
}

View File

@@ -63,7 +63,7 @@ func (f *blocksFetcher) selectFailOverPeer(excludedPID peer.ID, peers []peer.ID)
// waitForMinimumPeers spins and waits up until enough peers are available.
func (f *blocksFetcher) waitForMinimumPeers(ctx context.Context) ([]peer.ID, error) {
required := params.BeaconConfig().MaxPeersToSync
required := uint64(params.BeaconConfig().MaxPeersToSync)
if flags.Get().MinimumSyncPeers < required {
required = flags.Get().MinimumSyncPeers
}
@@ -79,7 +79,7 @@ func (f *blocksFetcher) waitForMinimumPeers(ctx context.Context) ([]peer.ID, err
headEpoch := slots.ToEpoch(f.chain.HeadSlot())
_, peers = f.p2p.Peers().BestNonFinalized(flags.Get().MinimumSyncPeers, headEpoch)
}
if len(peers) >= required {
if uint64(len(peers)) >= required {
return peers, nil
}
log.WithFields(logrus.Fields{
@@ -123,14 +123,14 @@ func (f *blocksFetcher) filterPeers(ctx context.Context, peers []peer.ID, peersP
// trimPeers limits peer list, returning only specified percentage of peers.
// Takes system constraints into account (min/max peers to sync).
func trimPeers(peers []peer.ID, peersPercentage float64) []peer.ID {
required := params.BeaconConfig().MaxPeersToSync
required := uint64(params.BeaconConfig().MaxPeersToSync)
if flags.Get().MinimumSyncPeers < required {
required = flags.Get().MinimumSyncPeers
}
// Weak/slow peers will be pushed down the list and trimmed since only percentage of peers is selected.
limit := uint64(math.Round(float64(len(peers)) * peersPercentage))
// Limit cannot be less that minimum peers required by sync mechanism.
limit = mathutil.Max(limit, uint64(required))
limit = mathutil.Max(limit, required)
// Limit cannot be higher than number of peers available (safe-guard).
limit = mathutil.Min(limit, uint64(len(peers)))
return peers[:limit]

View File

@@ -118,7 +118,7 @@ func TestBlocksFetcher_filterPeers(t *testing.T) {
capacityWeight float64
}
batchSize := uint64(flags.Get().BlockBatchLimit)
batchSize := flags.Get().BlockBatchLimit
tests := []struct {
name string
args args

View File

@@ -372,7 +372,7 @@ func TestBlocksFetcher_RoundRobin(t *testing.T) {
}
func TestBlocksFetcher_scheduleRequest(t *testing.T) {
blockBatchLimit := uint64(flags.Get().BlockBatchLimit)
blockBatchLimit := flags.Get().BlockBatchLimit
t.Run("context cancellation", func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{})
@@ -426,7 +426,7 @@ func TestBlocksFetcher_handleRequest(t *testing.T) {
})
cancel()
response := fetcher.handleRequest(ctx, 1, uint64(blockBatchLimit))
response := fetcher.handleRequest(ctx, 1, blockBatchLimit)
assert.ErrorContains(t, "context canceled", response.err)
})
@@ -441,7 +441,7 @@ func TestBlocksFetcher_handleRequest(t *testing.T) {
requestCtx, reqCancel := context.WithTimeout(context.Background(), 2*time.Second)
defer reqCancel()
go func() {
response := fetcher.handleRequest(requestCtx, 1 /* start */, uint64(blockBatchLimit) /* count */)
response := fetcher.handleRequest(requestCtx, 1 /* start */, blockBatchLimit /* count */)
select {
case <-ctx.Done():
case fetcher.fetchResponses <- response:
@@ -459,7 +459,7 @@ func TestBlocksFetcher_handleRequest(t *testing.T) {
blocks = resp.blocks
}
}
if uint64(len(blocks)) != uint64(blockBatchLimit) {
if uint64(len(blocks)) != blockBatchLimit {
t.Errorf("incorrect number of blocks returned, expected: %v, got: %v", blockBatchLimit, len(blocks))
}
@@ -510,11 +510,11 @@ func TestBlocksFetcher_requestBeaconBlocksByRange(t *testing.T) {
req := &p2ppb.BeaconBlocksByRangeRequest{
StartSlot: 1,
Step: 1,
Count: uint64(blockBatchLimit),
Count: blockBatchLimit,
}
blocks, err := fetcher.requestBlocks(ctx, req, peerIDs[0])
assert.NoError(t, err)
assert.Equal(t, uint64(blockBatchLimit), uint64(len(blocks)), "Incorrect number of blocks returned")
assert.Equal(t, blockBatchLimit, uint64(len(blocks)), "Incorrect number of blocks returned")
// Test context cancellation.
ctx, cancel = context.WithCancel(context.Background())
@@ -717,10 +717,9 @@ func TestBlocksFetcher_requestBlocksFromPeerReturningInvalidBlocks(t *testing.T)
blk.Block.Slot = req.StartSlot - 1
assert.NoError(t, beaconsync.WriteBlockChunk(stream, chain, p1.Encoding(), wrapper.WrappedPhase0SignedBeaconBlock(blk)))
break
} else {
blk.Block.Slot = i
assert.NoError(t, beaconsync.WriteBlockChunk(stream, chain, p1.Encoding(), wrapper.WrappedPhase0SignedBeaconBlock(blk)))
}
blk.Block.Slot = i
assert.NoError(t, beaconsync.WriteBlockChunk(stream, chain, p1.Encoding(), wrapper.WrappedPhase0SignedBeaconBlock(blk)))
}
}
},
@@ -749,10 +748,9 @@ func TestBlocksFetcher_requestBlocksFromPeerReturningInvalidBlocks(t *testing.T)
blk.Block.Slot = req.StartSlot.Add(req.Count * req.Step)
assert.NoError(t, beaconsync.WriteBlockChunk(stream, chain, p1.Encoding(), wrapper.WrappedPhase0SignedBeaconBlock(blk)))
break
} else {
blk.Block.Slot = i
assert.NoError(t, beaconsync.WriteBlockChunk(stream, chain, p1.Encoding(), wrapper.WrappedPhase0SignedBeaconBlock(blk)))
}
blk.Block.Slot = i
assert.NoError(t, beaconsync.WriteBlockChunk(stream, chain, p1.Encoding(), wrapper.WrappedPhase0SignedBeaconBlock(blk)))
}
}
},

View File

@@ -201,7 +201,7 @@ func TestBlocksFetcher_findFork(t *testing.T) {
peers = append(peers, connectPeerHavingBlocks(t, p2p, chain1, finalizedSlot, p2p.Peers()))
}
blockBatchLimit := uint64(flags.Get().BlockBatchLimit) * 2
blockBatchLimit := flags.Get().BlockBatchLimit * 2
pidInd := 0
for i := uint64(1); i < uint64(len(chain1)); i += blockBatchLimit {
req := &p2ppb.BeaconBlocksByRangeRequest{

View File

@@ -163,13 +163,13 @@ func (s *Service) Resync() error {
}
func (s *Service) waitForMinimumPeers() {
required := params.BeaconConfig().MaxPeersToSync
required := uint64(params.BeaconConfig().MaxPeersToSync)
if flags.Get().MinimumSyncPeers < required {
required = flags.Get().MinimumSyncPeers
}
for {
_, peers := s.cfg.P2P.Peers().BestNonFinalized(flags.Get().MinimumSyncPeers, s.cfg.Chain.FinalizedCheckpt().Epoch)
if len(peers) >= required {
if uint64(len(peers)) >= required {
break
}
log.WithFields(logrus.Fields{

View File

@@ -27,7 +27,7 @@ import (
)
func TestService_Constants(t *testing.T) {
if params.BeaconConfig().MaxPeersToSync*flags.Get().BlockBatchLimit > 1000 {
if uint64(params.BeaconConfig().MaxPeersToSync)*flags.Get().BlockBatchLimit > uint64(1000) {
t.Fatal("rpc rejects requests over 1000 range slots")
}
}

View File

@@ -43,7 +43,7 @@ func (s *Service) beaconBlocksByRangeRPCHandler(ctx context.Context, msg interfa
// The initial count for the first batch to be returned back.
count := m.Count
allowedBlocksPerSecond := uint64(flags.Get().BlockBatchLimit)
allowedBlocksPerSecond := flags.Get().BlockBatchLimit
if count > allowedBlocksPerSecond {
count = allowedBlocksPerSecond
}

View File

@@ -394,11 +394,11 @@ func TestRPCBeaconBlocksByRange_RPCHandlerRateLimitOverflow(t *testing.T) {
req := &pb.BeaconBlocksByRangeRequest{
StartSlot: 100,
Step: 1,
Count: uint64(flags.Get().BlockBatchLimit),
Count: flags.Get().BlockBatchLimit,
}
saveBlocks(req)
for i := 0; i < flags.Get().BlockBatchLimitBurstFactor; i++ {
for i := uint64(0); i < flags.Get().BlockBatchLimitBurstFactor; i++ {
assert.NoError(t, sendRequest(p1, p2, r, req, true, false))
}

View File

@@ -641,7 +641,7 @@ func (s *Service) unSubscribeFromTopic(topic string) {
// find if we have peers who are subscribed to the same subnet
func (s *Service) validPeersExist(subnetTopic string) bool {
numOfPeers := s.cfg.p2p.PubSub().ListPeers(subnetTopic + s.cfg.p2p.Encoding().ProtocolSuffix())
return len(numOfPeers) >= flags.Get().MinimumPeersPerSubnet
return uint64(len(numOfPeers)) >= flags.Get().MinimumPeersPerSubnet
}
func (s *Service) retrievePersistentSubs(currSlot types.Slot) []uint64 {
@@ -682,7 +682,7 @@ func (s *Service) filterNeededPeers(pids []peer.ID) []peer.ID {
for _, sub := range wantedSubs {
subnetTopic := fmt.Sprintf(topic, digest, sub) + s.cfg.p2p.Encoding().ProtocolSuffix()
peers := s.cfg.p2p.PubSub().ListPeers(subnetTopic)
if len(peers) > flags.Get().MinimumPeersPerSubnet {
if uint64(len(peers)) > flags.Get().MinimumPeersPerSubnet {
// In the event we have more than the minimum, we can
// mark the remaining as viable for pruning.
peers = peers[:flags.Get().MinimumPeersPerSubnet]

View File

@@ -489,7 +489,7 @@ func TestFilterSubnetPeers(t *testing.T) {
// Try with only peers from subnet 20.
wantedPeers = []peer.ID{p2.BHost.ID()}
// Connect an excess amount of peers in the particular subnet.
for i := 1; i <= flags.Get().MinimumPeersPerSubnet; i++ {
for i := uint64(1); i <= flags.Get().MinimumPeersPerSubnet; i++ {
nPeer := createPeer(t, subnet20)
p.Connect(nPeer)
wantedPeers = append(wantedPeers, nPeer.BHost.ID())

View File

@@ -93,6 +93,15 @@ func (s *Service) validateAggregateAndProof(ctx context.Context, pid peer.ID, ms
return pubsub.ValidationReject, errors.New("bad block referenced in attestation data")
}
// Verify aggregate attestation has not already been seen via aggregate gossip, within a block, or through the creation locally.
seen, err := s.cfg.attPool.HasAggregatedAttestation(m.Message.Aggregate)
if err != nil {
tracing.AnnotateError(span, err)
return pubsub.ValidationIgnore, err
}
if seen {
return pubsub.ValidationIgnore, nil
}
if !s.validateBlockInAttestation(ctx, m) {
return pubsub.ValidationIgnore, nil
}

View File

@@ -88,7 +88,7 @@ var (
}
// MinSyncPeers specifies the required number of successful peer handshakes in order
// to start syncing with external peers.
MinSyncPeers = &cli.IntFlag{
MinSyncPeers = &cli.Uint64Flag{
Name: "min-sync-peers",
Usage: "The required number of valid peers to connect with before syncing.",
Value: 3,
@@ -123,13 +123,13 @@ var (
Usage: "Does not run the discoveryV5 dht.",
}
// BlockBatchLimit specifies the requested block batch size.
BlockBatchLimit = &cli.IntFlag{
BlockBatchLimit = &cli.Uint64Flag{
Name: "block-batch-limit",
Usage: "The amount of blocks the local peer is bounded to request and respond to in a batch.",
Value: 64,
}
// BlockBatchLimitBurstFactor specifies the factor by which block batch size may increase.
BlockBatchLimitBurstFactor = &cli.IntFlag{
BlockBatchLimitBurstFactor = &cli.Uint64Flag{
Name: "block-batch-limit-burst-factor",
Usage: "The factor by which block batch limit may increase on burst.",
Value: 10,

View File

@@ -12,10 +12,10 @@ type GlobalFlags struct {
DisableSync bool
DisableDiscv5 bool
SubscribeToAllSubnets bool
MinimumSyncPeers int
MinimumPeersPerSubnet int
BlockBatchLimit int
BlockBatchLimitBurstFactor int
MinimumSyncPeers uint64
MinimumPeersPerSubnet uint64
BlockBatchLimit uint64
BlockBatchLimitBurstFactor uint64
}
var globalConfig *GlobalFlags
@@ -50,17 +50,17 @@ func ConfigureGlobalFlags(ctx *cli.Context) {
cfg.SubscribeToAllSubnets = true
}
cfg.DisableDiscv5 = ctx.Bool(DisableDiscv5.Name)
cfg.BlockBatchLimit = ctx.Int(BlockBatchLimit.Name)
cfg.BlockBatchLimitBurstFactor = ctx.Int(BlockBatchLimitBurstFactor.Name)
cfg.MinimumPeersPerSubnet = ctx.Int(MinPeersPerSubnet.Name)
cfg.BlockBatchLimit = ctx.Uint64(BlockBatchLimit.Name)
cfg.BlockBatchLimitBurstFactor = ctx.Uint64(BlockBatchLimitBurstFactor.Name)
cfg.MinimumPeersPerSubnet = ctx.Uint64(MinPeersPerSubnet.Name)
configureMinimumPeers(ctx, cfg)
Init(cfg)
}
func configureMinimumPeers(ctx *cli.Context, cfg *GlobalFlags) {
cfg.MinimumSyncPeers = ctx.Int(MinSyncPeers.Name)
maxPeers := ctx.Int(cmd.P2PMaxPeers.Name)
cfg.MinimumSyncPeers = ctx.Uint64(MinSyncPeers.Name)
maxPeers := ctx.Uint64(cmd.P2PMaxPeers.Name)
if cfg.MinimumSyncPeers > maxPeers {
log.Warnf("Changing Minimum Sync Peers to %d", maxPeers)
cfg.MinimumSyncPeers = maxPeers

View File

@@ -118,6 +118,7 @@ var appFlags = []cli.Flag{
cmd.RestoreSourceFileFlag,
cmd.RestoreTargetDirFlag,
cmd.BoltMMapInitialSizeFlag,
cmd.ValidatorMonitorIndicesFlag,
}
func init() {

View File

@@ -73,6 +73,7 @@ var appHelpFlagGroups = []flagGroup{
cmd.RestoreSourceFileFlag,
cmd.RestoreTargetDirFlag,
cmd.BoltMMapInitialSizeFlag,
cmd.ValidatorMonitorIndicesFlag,
},
},
{

Some files were not shown because too many files have changed in this diff Show More