mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 05:47:59 -05:00
Compare commits
317 Commits
e2ez
...
v2.0.0-rc.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4db77ce691 | ||
|
|
1919484cae | ||
|
|
89637634f6 | ||
|
|
b870b126f1 | ||
|
|
873335c93f | ||
|
|
8e9976be54 | ||
|
|
aa2f058a8e | ||
|
|
61702e101b | ||
|
|
bcaefd34e7 | ||
|
|
df2cebb4e3 | ||
|
|
8813ed35d4 | ||
|
|
e80ce8be95 | ||
|
|
75a4ad364d | ||
|
|
acdf29ef44 | ||
|
|
1691cea615 | ||
|
|
621aff1b86 | ||
|
|
aad0dd80d1 | ||
|
|
6dc70c53ec | ||
|
|
17f3816976 | ||
|
|
f05ac82c6f | ||
|
|
e78e554d27 | ||
|
|
6c435382dc | ||
|
|
1aa1165428 | ||
|
|
6c086080c5 | ||
|
|
c9dd3faf7a | ||
|
|
86259e76de | ||
|
|
d26f52a7cc | ||
|
|
f3a8397f75 | ||
|
|
5212cc649a | ||
|
|
043850d972 | ||
|
|
e2238bd6d1 | ||
|
|
df291e2ffb | ||
|
|
5ba5b303d3 | ||
|
|
f2ce4dcab3 | ||
|
|
8765c3ac42 | ||
|
|
57fff2d88e | ||
|
|
c010a972e7 | ||
|
|
c02ed805b0 | ||
|
|
93adf4980a | ||
|
|
3fe969992a | ||
|
|
2135108830 | ||
|
|
4c146dc896 | ||
|
|
042a3cda02 | ||
|
|
b8676480f0 | ||
|
|
711022d34e | ||
|
|
eec93be4ed | ||
|
|
21d096622f | ||
|
|
62846d61b8 | ||
|
|
a228a407be | ||
|
|
f527b676da | ||
|
|
5bd4e10dd6 | ||
|
|
d19e13352b | ||
|
|
6bda9a0bf2 | ||
|
|
2da6b7bb97 | ||
|
|
7faed861c4 | ||
|
|
8b9129d84e | ||
|
|
8b219b14da | ||
|
|
5bf9bd3d73 | ||
|
|
59f12c8ac1 | ||
|
|
1094ca0838 | ||
|
|
ebe4b309c0 | ||
|
|
ea94f0e70d | ||
|
|
47443e130d | ||
|
|
4dfa5c2757 | ||
|
|
1851d40f74 | ||
|
|
eee1d47655 | ||
|
|
7ce76652fb | ||
|
|
19e6f0c19a | ||
|
|
6470e2718a | ||
|
|
30cd5c076e | ||
|
|
03d8af5cda | ||
|
|
194f0cb76d | ||
|
|
2a0e8510d4 | ||
|
|
5e35f778b9 | ||
|
|
972ae7f169 | ||
|
|
80fafaddff | ||
|
|
e6ecdfde0d | ||
|
|
1daf51788d | ||
|
|
35055539a7 | ||
|
|
81ab3ca46c | ||
|
|
5895b10678 | ||
|
|
7ea645ed37 | ||
|
|
a900792160 | ||
|
|
cd87bfd8ab | ||
|
|
98477a0286 | ||
|
|
2d1a63d9f4 | ||
|
|
52be270f0a | ||
|
|
895a86fd53 | ||
|
|
af6246a5f9 | ||
|
|
5e80ceeff9 | ||
|
|
ee661971f0 | ||
|
|
cc7e36776d | ||
|
|
14a9d9a1ad | ||
|
|
2b9fb29ed2 | ||
|
|
9300d1026f | ||
|
|
48345eb68e | ||
|
|
60d14f1806 | ||
|
|
d80d4d01a6 | ||
|
|
275192680f | ||
|
|
9ca958064e | ||
|
|
604958da6c | ||
|
|
ade94444f2 | ||
|
|
4df2f4c790 | ||
|
|
3f8f5edb3f | ||
|
|
0ad4e433a5 | ||
|
|
1be2503e82 | ||
|
|
0dd228bb94 | ||
|
|
78450ea557 | ||
|
|
f0e6d4a0bd | ||
|
|
97901c90a5 | ||
|
|
1379dbfc23 | ||
|
|
19dbc7e249 | ||
|
|
76a70065f2 | ||
|
|
51f513b246 | ||
|
|
2b349a1b06 | ||
|
|
a819caca16 | ||
|
|
e7116d4ea8 | ||
|
|
f8cd989161 | ||
|
|
4c19265ac5 | ||
|
|
f361bf781f | ||
|
|
a458e556e0 | ||
|
|
773b259cd5 | ||
|
|
2bb3da1ba3 | ||
|
|
47367d98b4 | ||
|
|
1ff18c07a4 | ||
|
|
279a95deba | ||
|
|
c0bfa6ef79 | ||
|
|
7e961c2be9 | ||
|
|
c7c7f9bf1b | ||
|
|
7ce85cac31 | ||
|
|
2d836f485d | ||
|
|
780253b786 | ||
|
|
710bb98575 | ||
|
|
d5387851d0 | ||
|
|
ab8dd3788f | ||
|
|
bf1b550b7d | ||
|
|
705564108c | ||
|
|
3df82e7540 | ||
|
|
d0a749ce4b | ||
|
|
081c80998c | ||
|
|
8c62f10b74 | ||
|
|
e232b3ce30 | ||
|
|
17153bb4e9 | ||
|
|
329a45c06a | ||
|
|
1c82394a69 | ||
|
|
856081c80c | ||
|
|
6fff327864 | ||
|
|
e2879f8352 | ||
|
|
523fe58f61 | ||
|
|
04a303c8d2 | ||
|
|
0844bd62ea | ||
|
|
816dc47b17 | ||
|
|
caeec851d4 | ||
|
|
062933af35 | ||
|
|
169573c32e | ||
|
|
e7a7b2861e | ||
|
|
fe6c80fe95 | ||
|
|
2f52dfe96e | ||
|
|
93a7b96f16 | ||
|
|
f078b62c3e | ||
|
|
f476c39708 | ||
|
|
c02e507422 | ||
|
|
ece07e5fbb | ||
|
|
6bbe3dbd10 | ||
|
|
18bfc2a34e | ||
|
|
b774af9535 | ||
|
|
719a5fca02 | ||
|
|
b4a0e4375a | ||
|
|
4d276d2fdf | ||
|
|
8797179cfb | ||
|
|
7cc38108aa | ||
|
|
365ced285e | ||
|
|
97e5730fd9 | ||
|
|
3919b49000 | ||
|
|
7f13396e44 | ||
|
|
ae3e5718e6 | ||
|
|
97a49240ba | ||
|
|
232d519445 | ||
|
|
afd815bb5d | ||
|
|
56d383a354 | ||
|
|
733023df03 | ||
|
|
9d9ce13753 | ||
|
|
975e7a76bf | ||
|
|
20ae23bd42 | ||
|
|
63cf429fa0 | ||
|
|
2aab4e2efe | ||
|
|
e2156f25e0 | ||
|
|
a537833f75 | ||
|
|
61bf95e4e2 | ||
|
|
357d3f3b6a | ||
|
|
b0bbfcab7f | ||
|
|
06801a5230 | ||
|
|
278857d576 | ||
|
|
9c5c70fb32 | ||
|
|
54326af141 | ||
|
|
6020682ad1 | ||
|
|
3f45d54986 | ||
|
|
ecb51dc55d | ||
|
|
cbf4aeb859 | ||
|
|
b107bd2a5a | ||
|
|
a5e2c3f551 | ||
|
|
efffaeb359 | ||
|
|
144576cf36 | ||
|
|
fcf2be08d8 | ||
|
|
2b5cd139f0 | ||
|
|
77a4fdb509 | ||
|
|
a14d37b0ad | ||
|
|
38e28af51e | ||
|
|
6dbe6cfd8c | ||
|
|
c156c1fb91 | ||
|
|
393a744091 | ||
|
|
f83993b211 | ||
|
|
41433f8b2e | ||
|
|
95f62de465 | ||
|
|
fbb140eff7 | ||
|
|
22483a285a | ||
|
|
1d835d9859 | ||
|
|
98f8ab331a | ||
|
|
0e88418b12 | ||
|
|
ef3ff6f1d5 | ||
|
|
7c22496c65 | ||
|
|
c5256d09e0 | ||
|
|
d3d1eb833e | ||
|
|
2d9fd4ea29 | ||
|
|
be168e4034 | ||
|
|
f7c2b9c197 | ||
|
|
89b7cf9be3 | ||
|
|
3591f85a66 | ||
|
|
6ba5ad0325 | ||
|
|
76b16a8989 | ||
|
|
74a19741b4 | ||
|
|
fdb68c482e | ||
|
|
b51729bd2f | ||
|
|
aef1269223 | ||
|
|
9fc1683ec7 | ||
|
|
3790c5edb2 | ||
|
|
c6c7f8234d | ||
|
|
c66ea88da8 | ||
|
|
9a8facd76b | ||
|
|
ca7e0e4807 | ||
|
|
6acedb7dfd | ||
|
|
a3183bc33e | ||
|
|
f6caf627e1 | ||
|
|
fa696a883d | ||
|
|
cbbf188637 | ||
|
|
4d3e65bdcd | ||
|
|
0be2bde4cc | ||
|
|
d5662556bc | ||
|
|
26a10ca56e | ||
|
|
bda70352ca | ||
|
|
fbd45dbf50 | ||
|
|
119ef0f8fa | ||
|
|
04f38324ba | ||
|
|
fa27b6e24c | ||
|
|
fe647e99fc | ||
|
|
bbcaa7eaf2 | ||
|
|
1c1b2eb811 | ||
|
|
427e792073 | ||
|
|
463481febe | ||
|
|
6e41923388 | ||
|
|
17798f878a | ||
|
|
d502f0825a | ||
|
|
96fe2b76bf | ||
|
|
a51a4ca9eb | ||
|
|
9dd8a1737c | ||
|
|
c97f74ccef | ||
|
|
806a923974 | ||
|
|
4b4c2b97b7 | ||
|
|
9d22ea840e | ||
|
|
8a507d749a | ||
|
|
2850581611 | ||
|
|
59bc0c679c | ||
|
|
969dec8ad2 | ||
|
|
91fb8eea8c | ||
|
|
e7ebdb11be | ||
|
|
ff3bb0aa8a | ||
|
|
5945849cb4 | ||
|
|
3435a61413 | ||
|
|
a3b69600ef | ||
|
|
01841434ec | ||
|
|
f60edb055c | ||
|
|
ee3d106a36 | ||
|
|
9b41a069eb | ||
|
|
dc1d5b778b | ||
|
|
224b92781f | ||
|
|
6f54a9d057 | ||
|
|
7906e571a8 | ||
|
|
458817d5ad | ||
|
|
06290c6805 | ||
|
|
1adf1f1bef | ||
|
|
af57cf5e96 | ||
|
|
d59ba818f0 | ||
|
|
9aa2dd1ae6 | ||
|
|
f3abe70838 | ||
|
|
fe4a852e78 | ||
|
|
6af0f619c9 | ||
|
|
3d405910e7 | ||
|
|
2779daee32 | ||
|
|
a0ba4a8563 | ||
|
|
926b3725a1 | ||
|
|
5cc9f4df0b | ||
|
|
fd297999b8 | ||
|
|
0d45eeac56 | ||
|
|
e2fcd25039 | ||
|
|
2436d84370 | ||
|
|
5418d8c367 | ||
|
|
55e5dee7ab | ||
|
|
6a06a4bf98 | ||
|
|
a9d981dce1 | ||
|
|
a69947ba51 | ||
|
|
6a32b18ca9 | ||
|
|
9ebf8651b4 | ||
|
|
8467485aec | ||
|
|
fdb6cf9b57 | ||
|
|
3da55ad7a4 | ||
|
|
773d561361 | ||
|
|
7f6d3ccb36 |
0
.bazelignore
Normal file
0
.bazelignore
Normal file
30
WORKSPACE
30
WORKSPACE
@@ -197,6 +197,8 @@ filegroup(
|
||||
url = "https://github.com/eth2-clients/slashing-protection-interchange-tests/archive/b8413ca42dc92308019d0d4db52c87e9e125c4e9.tar.gz",
|
||||
)
|
||||
|
||||
eth2_spec_version = "v1.1.0-beta.1"
|
||||
|
||||
http_archive(
|
||||
name = "eth2_spec_tests_general",
|
||||
build_file_content = """
|
||||
@@ -209,8 +211,8 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "deacc076365c727d653ac064894ecf0d1b0a675d86704dc8de271259f6a7314b",
|
||||
url = "https://github.com/ethereum/eth2.0-spec-tests/releases/download/v1.1.0-alpha.3/general.tar.gz",
|
||||
sha256 = "e9b4cc60a3e676c6b4a9348424e44cff1ebada603ffb31b0df600dbd70e7fbf6",
|
||||
url = "https://github.com/ethereum/eth2.0-spec-tests/releases/download/%s/general.tar.gz" % eth2_spec_version,
|
||||
)
|
||||
|
||||
http_archive(
|
||||
@@ -225,8 +227,8 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "6e9886af3d2f024e563249d70388129e28e3e92f742f289238ed9b7ec7a7f930",
|
||||
url = "https://github.com/ethereum/eth2.0-spec-tests/releases/download/v1.1.0-alpha.3/minimal.tar.gz",
|
||||
sha256 = "cf82dc729ffe7b924f852e57d1973e1a6377c5b52acc903c953277fa9b4e6de8",
|
||||
url = "https://github.com/ethereum/eth2.0-spec-tests/releases/download/%s/minimal.tar.gz" % eth2_spec_version,
|
||||
)
|
||||
|
||||
http_archive(
|
||||
@@ -241,8 +243,24 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "a7b3d0ffc02a567250f424d69b2474fdc9477cd56eada60af7474560b46a8527",
|
||||
url = "https://github.com/ethereum/eth2.0-spec-tests/releases/download/v1.1.0-alpha.3/mainnet.tar.gz",
|
||||
sha256 = "6c6792375b81858037014e282d28a64b0cf12e12daf16054265c85403b8b329f",
|
||||
url = "https://github.com/ethereum/eth2.0-spec-tests/releases/download/%s/mainnet.tar.gz" % eth2_spec_version,
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "eth2_spec",
|
||||
build_file_content = """
|
||||
filegroup(
|
||||
name = "spec_data",
|
||||
srcs = glob([
|
||||
"**/*.yaml",
|
||||
]),
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "16094dad1bab4e8ab3adb60c10e311cd1e294cd7bbf5a89505f24bebd3d0e513",
|
||||
strip_prefix = "eth2.0-specs-" + eth2_spec_version[1:],
|
||||
url = "https://github.com/ethereum/eth2.0-specs/archive/refs/tags/%s.tar.gz" % eth2_spec_version,
|
||||
)
|
||||
|
||||
http_archive(
|
||||
|
||||
@@ -5,6 +5,7 @@ go_library(
|
||||
srcs = [
|
||||
"chain_info.go",
|
||||
"head.go",
|
||||
"head_sync_committee_info.go",
|
||||
"info.go",
|
||||
"init_sync_process_block.go",
|
||||
"log.go",
|
||||
@@ -26,6 +27,7 @@ go_library(
|
||||
deps = [
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/cache/depositcache:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
@@ -42,6 +44,7 @@ go_library(
|
||||
"//beacon-chain/powchain:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//beacon-chain/state/v2:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
@@ -56,7 +59,9 @@ go_library(
|
||||
"//shared/slotutil:go_default_library",
|
||||
"//shared/timeutils:go_default_library",
|
||||
"//shared/traceutil:go_default_library",
|
||||
"//shared/version:go_default_library",
|
||||
"@com_github_emicklei_dot//:go_default_library",
|
||||
"@com_github_hashicorp_golang_lru//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
|
||||
@@ -24,6 +24,8 @@ type ChainInfoFetcher interface {
|
||||
GenesisFetcher
|
||||
CanonicalFetcher
|
||||
ForkFetcher
|
||||
TimeFetcher
|
||||
HeadDomainFetcher
|
||||
}
|
||||
|
||||
// TimeFetcher retrieves the Ethereum consensus data that's related to time.
|
||||
@@ -48,8 +50,12 @@ type HeadFetcher interface {
|
||||
HeadSeed(ctx context.Context, epoch types.Epoch) ([32]byte, error)
|
||||
HeadGenesisValidatorRoot() [32]byte
|
||||
HeadETH1Data() *ethpb.Eth1Data
|
||||
HeadPublicKeyToValidatorIndex(ctx context.Context, pubKey [48]byte) (types.ValidatorIndex, bool)
|
||||
HeadValidatorIndexToPublicKey(ctx context.Context, index types.ValidatorIndex) ([48]byte, error)
|
||||
ProtoArrayStore() *protoarray.Store
|
||||
ChainHeads() ([][32]byte, []types.Slot)
|
||||
HeadSyncCommitteeFetcher
|
||||
HeadDomainFetcher
|
||||
}
|
||||
|
||||
// ForkFetcher retrieves the current fork information of the Ethereum beacon chain.
|
||||
|
||||
188
beacon-chain/blockchain/head_sync_committee_info.go
Normal file
188
beacon-chain/blockchain/head_sync_committee_info.go
Normal file
@@ -0,0 +1,188 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
core "github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
stateAltair "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
// HeadSyncCommitteeFetcher is the interface that wraps the head sync committee related functions.
|
||||
// The head sync committee functions return callers sync committee indices and public keys with respect to current head state.
|
||||
type HeadSyncCommitteeFetcher interface {
|
||||
HeadCurrentSyncCommitteeIndices(ctx context.Context, index types.ValidatorIndex, slot types.Slot) ([]types.CommitteeIndex, error)
|
||||
HeadNextSyncCommitteeIndices(ctx context.Context, index types.ValidatorIndex, slot types.Slot) ([]types.CommitteeIndex, error)
|
||||
HeadSyncCommitteePubKeys(ctx context.Context, slot types.Slot, committeeIndex types.CommitteeIndex) ([][]byte, error)
|
||||
}
|
||||
|
||||
// HeadDomainFetcher is the interface that wraps the head sync domain related functions.
|
||||
// The head sync committee domain functions return callers domain data with respect to slot and head state.
|
||||
type HeadDomainFetcher interface {
|
||||
HeadSyncCommitteeDomain(ctx context.Context, slot types.Slot) ([]byte, error)
|
||||
HeadSyncSelectionProofDomain(ctx context.Context, slot types.Slot) ([]byte, error)
|
||||
HeadSyncContributionProofDomain(ctx context.Context, slot types.Slot) ([]byte, error)
|
||||
}
|
||||
|
||||
// HeadSyncCommitteeDomain returns the head sync committee domain using current head state advanced up to `slot`.
|
||||
func (s *Service) HeadSyncCommitteeDomain(ctx context.Context, slot types.Slot) ([]byte, error) {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
return s.domainWithHeadState(ctx, slot, params.BeaconConfig().DomainSyncCommittee)
|
||||
}
|
||||
|
||||
// HeadSyncSelectionProofDomain returns the head sync committee domain using current head state advanced up to `slot`.
|
||||
func (s *Service) HeadSyncSelectionProofDomain(ctx context.Context, slot types.Slot) ([]byte, error) {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
return s.domainWithHeadState(ctx, slot, params.BeaconConfig().DomainSyncCommitteeSelectionProof)
|
||||
}
|
||||
|
||||
// HeadSyncContributionProofDomain returns the head sync committee domain using current head state advanced up to `slot`.
|
||||
func (s *Service) HeadSyncContributionProofDomain(ctx context.Context, slot types.Slot) ([]byte, error) {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
return s.domainWithHeadState(ctx, slot, params.BeaconConfig().DomainContributionAndProof)
|
||||
}
|
||||
|
||||
// HeadCurrentSyncCommitteeIndices returns the input validator `index`'s position indices in the current sync committee with respect to `slot`.
|
||||
// Head state advanced up to `slot` is used for calculation.
|
||||
func (s *Service) HeadCurrentSyncCommitteeIndices(ctx context.Context, index types.ValidatorIndex, slot types.Slot) ([]types.CommitteeIndex, error) {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
headState, err := s.getSyncCommitteeHeadState(ctx, slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return helpers.CurrentPeriodSyncSubcommitteeIndices(headState, index)
|
||||
}
|
||||
|
||||
// HeadNextSyncCommitteeIndices returns the input validator `index`'s position indices in the next sync committee with respect to `slot`.
|
||||
// Head state advanced up to `slot` is used for calculation.
|
||||
func (s *Service) HeadNextSyncCommitteeIndices(ctx context.Context, index types.ValidatorIndex, slot types.Slot) ([]types.CommitteeIndex, error) {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
headState, err := s.getSyncCommitteeHeadState(ctx, slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return helpers.NextPeriodSyncSubcommitteeIndices(headState, index)
|
||||
}
|
||||
|
||||
// HeadSyncCommitteePubKeys returns the head sync committee public keys with respect to `slot` and subcommittee index `committeeIndex`.
|
||||
// Head state advanced up to `slot` is used for calculation.
|
||||
func (s *Service) HeadSyncCommitteePubKeys(ctx context.Context, slot types.Slot, committeeIndex types.CommitteeIndex) ([][]byte, error) {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
headState, err := s.getSyncCommitteeHeadState(ctx, slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nextSlotEpoch := helpers.SlotToEpoch(headState.Slot() + 1)
|
||||
currEpoch := helpers.SlotToEpoch(headState.Slot())
|
||||
|
||||
var syncCommittee *ethpb.SyncCommittee
|
||||
if helpers.SyncCommitteePeriod(currEpoch) == helpers.SyncCommitteePeriod(nextSlotEpoch) {
|
||||
syncCommittee, err = headState.CurrentSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
syncCommittee, err = headState.NextSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return altair.SyncSubCommitteePubkeys(syncCommittee, committeeIndex)
|
||||
}
|
||||
|
||||
// returns calculated domain using input `domain` and `slot`.
|
||||
func (s *Service) domainWithHeadState(ctx context.Context, slot types.Slot, domain [4]byte) ([]byte, error) {
|
||||
headState, err := s.getSyncCommitteeHeadState(ctx, slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return helpers.Domain(headState.Fork(), helpers.SlotToEpoch(headState.Slot()), domain, headState.GenesisValidatorRoot())
|
||||
}
|
||||
|
||||
// returns the head state that is advanced up to `slot`. It utilizes the cache `syncCommitteeHeadState` by retrieving using `slot` as key.
|
||||
// For the cache miss, it processes head state up to slot and fill the cache with `slot` as key.
|
||||
func (s *Service) getSyncCommitteeHeadState(ctx context.Context, slot types.Slot) (state.BeaconState, error) {
|
||||
var headState state.BeaconState
|
||||
var err error
|
||||
|
||||
// If there's already a head state exists with the request slot, we don't need to process slots.
|
||||
cachedState := syncCommitteeHeadStateCache.get(slot)
|
||||
if cachedState != nil && !cachedState.IsNil() {
|
||||
syncHeadStateHit.Inc()
|
||||
headState = cachedState
|
||||
} else {
|
||||
headState, err = s.HeadState(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if slot > headState.Slot() {
|
||||
headState, err = core.ProcessSlots(ctx, headState, slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
syncHeadStateMiss.Inc()
|
||||
syncCommitteeHeadStateCache.add(slot, headState)
|
||||
}
|
||||
|
||||
return headState, nil
|
||||
}
|
||||
|
||||
var syncCommitteeHeadStateCache = newSyncCommitteeHeadState()
|
||||
|
||||
// syncCommitteeHeadState to caches latest head state requested by the sync committee participant.
|
||||
type syncCommitteeHeadState struct {
|
||||
cache *lru.Cache
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
// newSyncCommitteeHeadState initializes the lru cache for `syncCommitteeHeadState` with size of 1.
|
||||
func newSyncCommitteeHeadState() *syncCommitteeHeadState {
|
||||
c, err := lru.New(1) // only need size of 1 to avoid redundant state copy, HTR, and process slots.
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return &syncCommitteeHeadState{cache: c}
|
||||
}
|
||||
|
||||
// add `slot` as key and `state` as value onto the lru cache.
|
||||
func (c *syncCommitteeHeadState) add(slot types.Slot, state state.BeaconState) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
c.cache.Add(slot, state)
|
||||
}
|
||||
|
||||
// get `state` using `slot` as key. Return nil if nothing is found.
|
||||
func (c *syncCommitteeHeadState) get(slot types.Slot) state.BeaconState {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
val, exists := c.cache.Get(slot)
|
||||
if !exists {
|
||||
return nil
|
||||
}
|
||||
if val == nil {
|
||||
return nil
|
||||
}
|
||||
return val.(*stateAltair.BeaconState)
|
||||
}
|
||||
108
beacon-chain/blockchain/head_sync_committee_info_test.go
Normal file
108
beacon-chain/blockchain/head_sync_committee_info_test.go
Normal file
@@ -0,0 +1,108 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
func TestService_HeadCurrentSyncCommitteeIndices(t *testing.T) {
|
||||
s, _ := testutil.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
|
||||
c := &Service{}
|
||||
c.head = &head{state: s}
|
||||
|
||||
// Process slot up to `EpochsPerSyncCommitteePeriod` so it can `ProcessSyncCommitteeUpdates`.
|
||||
slot := uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) + 1
|
||||
indices, err := c.HeadCurrentSyncCommitteeIndices(context.Background(), 0, types.Slot(slot))
|
||||
require.NoError(t, err)
|
||||
|
||||
// NextSyncCommittee becomes CurrentSyncCommittee so it should be empty by default.
|
||||
require.Equal(t, 0, len(indices))
|
||||
}
|
||||
|
||||
func TestService_HeadNextSyncCommitteeIndices(t *testing.T) {
|
||||
s, _ := testutil.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
|
||||
c := &Service{}
|
||||
c.head = &head{state: s}
|
||||
|
||||
// Process slot up to `EpochsPerSyncCommitteePeriod` so it can `ProcessSyncCommitteeUpdates`.
|
||||
slot := uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) + 1
|
||||
indices, err := c.HeadNextSyncCommitteeIndices(context.Background(), 0, types.Slot(slot))
|
||||
require.NoError(t, err)
|
||||
|
||||
// NextSyncCommittee should be be empty after `ProcessSyncCommitteeUpdates`. Validator should get indices.
|
||||
require.NotEqual(t, 0, len(indices))
|
||||
}
|
||||
|
||||
func TestService_HeadSyncCommitteePubKeys(t *testing.T) {
|
||||
s, _ := testutil.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
|
||||
c := &Service{}
|
||||
c.head = &head{state: s}
|
||||
|
||||
// Process slot up to 2 * `EpochsPerSyncCommitteePeriod` so it can run `ProcessSyncCommitteeUpdates` twice.
|
||||
slot := uint64(2*params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) + 1
|
||||
pubkeys, err := c.HeadSyncCommitteePubKeys(context.Background(), types.Slot(slot), 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Any subcommittee should match the subcommittee size.
|
||||
subCommitteeSize := params.BeaconConfig().SyncCommitteeSize / params.BeaconConfig().SyncCommitteeSubnetCount
|
||||
require.Equal(t, int(subCommitteeSize), len(pubkeys))
|
||||
}
|
||||
|
||||
func TestService_HeadSyncCommitteeDomain(t *testing.T) {
|
||||
s, _ := testutil.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
|
||||
c := &Service{}
|
||||
c.head = &head{state: s}
|
||||
|
||||
wanted, err := helpers.Domain(s.Fork(), helpers.SlotToEpoch(s.Slot()), params.BeaconConfig().DomainSyncCommittee, s.GenesisValidatorRoot())
|
||||
require.NoError(t, err)
|
||||
|
||||
d, err := c.HeadSyncCommitteeDomain(context.Background(), 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepEqual(t, wanted, d)
|
||||
}
|
||||
|
||||
func TestService_HeadSyncContributionProofDomain(t *testing.T) {
|
||||
s, _ := testutil.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
|
||||
c := &Service{}
|
||||
c.head = &head{state: s}
|
||||
|
||||
wanted, err := helpers.Domain(s.Fork(), helpers.SlotToEpoch(s.Slot()), params.BeaconConfig().DomainContributionAndProof, s.GenesisValidatorRoot())
|
||||
require.NoError(t, err)
|
||||
|
||||
d, err := c.HeadSyncContributionProofDomain(context.Background(), 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepEqual(t, wanted, d)
|
||||
}
|
||||
|
||||
func TestService_HeadSyncSelectionProofDomain(t *testing.T) {
|
||||
s, _ := testutil.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
|
||||
c := &Service{}
|
||||
c.head = &head{state: s}
|
||||
|
||||
wanted, err := helpers.Domain(s.Fork(), helpers.SlotToEpoch(s.Slot()), params.BeaconConfig().DomainSyncCommitteeSelectionProof, s.GenesisValidatorRoot())
|
||||
require.NoError(t, err)
|
||||
|
||||
d, err := c.HeadSyncSelectionProofDomain(context.Background(), 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepEqual(t, wanted, d)
|
||||
}
|
||||
|
||||
func TestSyncCommitteeHeadStateCache_RoundTrip(t *testing.T) {
|
||||
c := newSyncCommitteeHeadState()
|
||||
beaconState, _ := testutil.DeterministicGenesisStateAltair(t, 100)
|
||||
require.NoError(t, beaconState.SetSlot(100))
|
||||
cachedState := c.get(101)
|
||||
require.Equal(t, nil, cachedState)
|
||||
c.add(101, beaconState)
|
||||
cachedState = c.get(101)
|
||||
require.DeepEqual(t, beaconState, cachedState)
|
||||
}
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/timeutils"
|
||||
"github.com/prysmaticlabs/prysm/shared/version"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@@ -33,6 +34,12 @@ func logStateTransitionData(b block.BeaconBlock) {
|
||||
if len(b.Body().VoluntaryExits()) > 0 {
|
||||
log = log.WithField("voluntaryExits", len(b.Body().VoluntaryExits()))
|
||||
}
|
||||
if b.Version() == version.Altair {
|
||||
agg, err := b.Body().SyncAggregate()
|
||||
if err == nil {
|
||||
log = log.WithField("syncBitsCount", agg.SyncCommitteeBits.Count())
|
||||
}
|
||||
}
|
||||
log.Info("Finished applying state transition")
|
||||
}
|
||||
|
||||
|
||||
@@ -3,15 +3,18 @@ package blockchain
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/version"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -107,6 +110,14 @@ var (
|
||||
Buckets: []float64{1, 2, 3, 4, 6, 32, 64},
|
||||
},
|
||||
)
|
||||
syncHeadStateMiss = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "sync_head_state_miss",
|
||||
Help: "The number of sync head state requests that are present in the cache.",
|
||||
})
|
||||
syncHeadStateHit = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "sync_head_state_hit",
|
||||
Help: "The number of sync head state requests that are not present in the cache.",
|
||||
})
|
||||
)
|
||||
|
||||
// reportSlotMetrics reports slot related metrics.
|
||||
@@ -206,14 +217,31 @@ func reportEpochMetrics(ctx context.Context, postState, headState state.BeaconSt
|
||||
beaconFinalizedRoot.Set(float64(bytesutil.ToLowInt64(postState.FinalizedCheckpoint().Root)))
|
||||
currentEth1DataDepositCount.Set(float64(postState.Eth1Data().DepositCount))
|
||||
|
||||
// Validator participation should be viewed on the canonical chain.
|
||||
v, b, err := precompute.New(ctx, headState)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, b, err = precompute.ProcessAttestations(ctx, headState, v, b)
|
||||
if err != nil {
|
||||
return err
|
||||
var b *precompute.Balance
|
||||
var v []*precompute.Validator
|
||||
var err error
|
||||
switch headState.Version() {
|
||||
case version.Phase0:
|
||||
// Validator participation should be viewed on the canonical chain.
|
||||
v, b, err = precompute.New(ctx, headState)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, b, err = precompute.ProcessAttestations(ctx, headState, v, b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case version.Altair:
|
||||
v, b, err = altair.InitializeEpochValidators(ctx, headState)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, b, err = altair.ProcessEpochParticipation(ctx, headState, b, v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return errors.Errorf("invalid state type provided: %T", headState.InnerStateUnsafe())
|
||||
}
|
||||
prevEpochActiveBalances.Set(float64(b.ActivePrevEpoch))
|
||||
prevEpochSourceBalances.Set(float64(b.PrevEpochAttested))
|
||||
|
||||
@@ -62,6 +62,11 @@ func (mb *mockBroadcaster) BroadcastAttestation(_ context.Context, _ uint64, _ *
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastSyncCommitteeMessage(_ context.Context, _ uint64, _ *ethpb.SyncCommitteeMessage) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ p2p.Broadcaster = (*mockBroadcaster)(nil)
|
||||
|
||||
func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
|
||||
@@ -21,7 +21,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
statepb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/event"
|
||||
@@ -42,7 +41,7 @@ type ChainService struct {
|
||||
Genesis time.Time
|
||||
ValidatorsRoot [32]byte
|
||||
CanonicalRoots map[[32]byte]bool
|
||||
Fork *statepb.Fork
|
||||
Fork *ethpb.Fork
|
||||
ETH1Data *ethpb.Eth1Data
|
||||
DB db.Database
|
||||
stateNotifier statefeed.Notifier
|
||||
@@ -52,6 +51,13 @@ type ChainService struct {
|
||||
ForkChoiceStore *protoarray.Store
|
||||
VerifyBlkDescendantErr error
|
||||
Slot *types.Slot // Pointer because 0 is a useful value, so checking against it can be incorrect.
|
||||
CurrentSyncCommitteeIndices []types.CommitteeIndex
|
||||
NextSyncCommitteeIndices []types.CommitteeIndex
|
||||
SyncCommitteeDomain []byte
|
||||
SyncSelectionProofDomain []byte
|
||||
SyncContributionProofDomain []byte
|
||||
PublicKey [48]byte
|
||||
SyncCommitteePubkeys [][]byte
|
||||
}
|
||||
|
||||
// StateNotifier mocks the same method in the chain service.
|
||||
@@ -259,7 +265,7 @@ func (s *ChainService) HeadState(context.Context) (state.BeaconState, error) {
|
||||
}
|
||||
|
||||
// CurrentFork mocks HeadState method in chain service.
|
||||
func (s *ChainService) CurrentFork() *statepb.Fork {
|
||||
func (s *ChainService) CurrentFork() *ethpb.Fork {
|
||||
return s.Fork
|
||||
}
|
||||
|
||||
@@ -393,3 +399,43 @@ func (s *ChainService) ChainHeads() ([][32]byte, []types.Slot) {
|
||||
},
|
||||
[]types.Slot{0, 1}
|
||||
}
|
||||
|
||||
// HeadPublicKeyToValidatorIndex mocks HeadPublicKeyToValidatorIndex and always return 0 and true.
|
||||
func (s *ChainService) HeadPublicKeyToValidatorIndex(ctx context.Context, pubKey [48]byte) (types.ValidatorIndex, bool) {
|
||||
return 0, true
|
||||
}
|
||||
|
||||
// HeadValidatorIndexToPublicKey mocks HeadValidatorIndexToPublicKey and always return empty and nil.
|
||||
func (s *ChainService) HeadValidatorIndexToPublicKey(ctx context.Context, index types.ValidatorIndex) ([48]byte, error) {
|
||||
return s.PublicKey, nil
|
||||
}
|
||||
|
||||
// HeadCurrentSyncCommitteeIndices mocks HeadCurrentSyncCommitteeIndices and always return `CurrentSyncCommitteeIndices`.
|
||||
func (s *ChainService) HeadCurrentSyncCommitteeIndices(ctx context.Context, index types.ValidatorIndex, slot types.Slot) ([]types.CommitteeIndex, error) {
|
||||
return s.CurrentSyncCommitteeIndices, nil
|
||||
}
|
||||
|
||||
// HeadNextSyncCommitteeIndices mocks HeadNextSyncCommitteeIndices and always return `HeadNextSyncCommitteeIndices`.
|
||||
func (s *ChainService) HeadNextSyncCommitteeIndices(ctx context.Context, index types.ValidatorIndex, slot types.Slot) ([]types.CommitteeIndex, error) {
|
||||
return s.NextSyncCommitteeIndices, nil
|
||||
}
|
||||
|
||||
// HeadSyncCommitteePubKeys mocks HeadSyncCommitteePubKeys and always return empty nil.
|
||||
func (s *ChainService) HeadSyncCommitteePubKeys(ctx context.Context, slot types.Slot, committeeIndex types.CommitteeIndex) ([][]byte, error) {
|
||||
return s.SyncCommitteePubkeys, nil
|
||||
}
|
||||
|
||||
// HeadSyncCommitteeDomain mocks HeadSyncCommitteeDomain and always return empty nil.
|
||||
func (s *ChainService) HeadSyncCommitteeDomain(ctx context.Context, slot types.Slot) ([]byte, error) {
|
||||
return s.SyncCommitteeDomain, nil
|
||||
}
|
||||
|
||||
// HeadSyncSelectionProofDomain mocks HeadSyncSelectionProofDomain and always return empty nil.
|
||||
func (s *ChainService) HeadSyncSelectionProofDomain(ctx context.Context, slot types.Slot) ([]byte, error) {
|
||||
return s.SyncSelectionProofDomain, nil
|
||||
}
|
||||
|
||||
// HeadSyncContributionProofDomain mocks HeadSyncContributionProofDomain and always return empty nil.
|
||||
func (s *ChainService) HeadSyncContributionProofDomain(ctx context.Context, slot types.Slot) ([]byte, error) {
|
||||
return s.SyncContributionProofDomain, nil
|
||||
}
|
||||
|
||||
@@ -7,19 +7,26 @@ go_library(
|
||||
"block.go",
|
||||
"deposit.go",
|
||||
"epoch_precompute.go",
|
||||
"epoch_spec.go",
|
||||
"reward.go",
|
||||
"sync_committee.go",
|
||||
"transition.go",
|
||||
"upgrade.go",
|
||||
"validator.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/core/altair",
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//shared/testutil:__pkg__",
|
||||
"//spectest:__subpackages__",
|
||||
"//validator/client:__pkg__",
|
||||
],
|
||||
deps = [
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/epoch:go_default_library",
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/validators:go_default_library",
|
||||
"//beacon-chain/p2p/types:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/v2:go_default_library",
|
||||
@@ -45,9 +52,12 @@ go_test(
|
||||
"deposit_fuzz_test.go",
|
||||
"deposit_test.go",
|
||||
"epoch_precompute_test.go",
|
||||
"epoch_spec_test.go",
|
||||
"reward_test.go",
|
||||
"sync_committee_test.go",
|
||||
"transition_test.go",
|
||||
"upgrade_test.go",
|
||||
"validator_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
@@ -71,5 +81,6 @@ go_test(
|
||||
"@com_github_google_gofuzz//:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -5,11 +5,13 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
fuzz "github.com/google/gofuzz"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
stateAltair "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/shared/attestationutil"
|
||||
@@ -379,6 +381,28 @@ func TestValidatorFlag_Add(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzzProcessAttestationsNoVerify_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðpb.BeaconStateAltair{}
|
||||
b := ðpb.SignedBeaconBlockAltair{Block: ðpb.BeaconBlockAltair{}}
|
||||
ctx := context.Background()
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(b)
|
||||
if b.Block == nil {
|
||||
b.Block = ðpb.BeaconBlockAltair{}
|
||||
}
|
||||
s, err := stateAltair.InitializeFromProtoUnsafe(state)
|
||||
require.NoError(t, err)
|
||||
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
r, err := altair.ProcessAttestationsNoVerifySignature(ctx, s, wsb)
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, b)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetParticipationAndRewardProposer(t *testing.T) {
|
||||
cfg := params.BeaconConfig()
|
||||
sourceFlagIndex := cfg.TimelySourceFlagIndex
|
||||
|
||||
116
beacon-chain/core/altair/epoch_spec.go
Normal file
116
beacon-chain/core/altair/epoch_spec.go
Normal file
@@ -0,0 +1,116 @@
|
||||
package altair
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/mathutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
// ProcessSyncCommitteeUpdates processes sync client committee updates for the beacon state.
|
||||
//
|
||||
// Spec code:
|
||||
// def process_sync_committee_updates(state: BeaconState) -> None:
|
||||
// next_epoch = get_current_epoch(state) + Epoch(1)
|
||||
// if next_epoch % EPOCHS_PER_SYNC_COMMITTEE_PERIOD == 0:
|
||||
// state.current_sync_committee = state.next_sync_committee
|
||||
// state.next_sync_committee = get_next_sync_committee(state)
|
||||
func ProcessSyncCommitteeUpdates(ctx context.Context, beaconState state.BeaconStateAltair) (state.BeaconStateAltair, error) {
|
||||
nextEpoch := helpers.NextEpoch(beaconState)
|
||||
if nextEpoch%params.BeaconConfig().EpochsPerSyncCommitteePeriod == 0 {
|
||||
currentSyncCommittee, err := beaconState.NextSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := beaconState.SetCurrentSyncCommittee(currentSyncCommittee); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nextCommittee, err := NextSyncCommittee(ctx, beaconState)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := beaconState.SetNextSyncCommittee(nextCommittee); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := helpers.UpdateSyncCommitteeCache(beaconState); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
// ProcessParticipationFlagUpdates processes participation flag updates by rotating current to previous.
|
||||
//
|
||||
// Spec code:
|
||||
// def process_participation_flag_updates(state: BeaconState) -> None:
|
||||
// state.previous_epoch_participation = state.current_epoch_participation
|
||||
// state.current_epoch_participation = [ParticipationFlags(0b0000_0000) for _ in range(len(state.validators))]
|
||||
func ProcessParticipationFlagUpdates(beaconState state.BeaconStateAltair) (state.BeaconStateAltair, error) {
|
||||
c, err := beaconState.CurrentEpochParticipation()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := beaconState.SetPreviousParticipationBits(c); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := beaconState.SetCurrentParticipationBits(make([]byte, beaconState.NumValidators())); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
// ProcessSlashings processes the slashed validators during epoch processing,
|
||||
// The function is modified to use PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR.
|
||||
//
|
||||
// Spec code:
|
||||
// def process_slashings(state: BeaconState) -> None:
|
||||
// epoch = get_current_epoch(state)
|
||||
// total_balance = get_total_active_balance(state)
|
||||
// adjusted_total_slashing_balance = min(sum(state.slashings) * PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR, total_balance)
|
||||
// for index, validator in enumerate(state.validators):
|
||||
// if validator.slashed and epoch + EPOCHS_PER_SLASHINGS_VECTOR // 2 == validator.withdrawable_epoch:
|
||||
// increment = EFFECTIVE_BALANCE_INCREMENT # Factored out from penalty numerator to avoid uint64 overflow
|
||||
// penalty_numerator = validator.effective_balance // increment * adjusted_total_slashing_balance
|
||||
// penalty = penalty_numerator // total_balance * increment
|
||||
// decrease_balance(state, ValidatorIndex(index), penalty)
|
||||
// decrease_balance(state, ValidatorIndex(index), penalty)
|
||||
func ProcessSlashings(state state.BeaconState) (state.BeaconState, error) {
|
||||
currentEpoch := helpers.CurrentEpoch(state)
|
||||
totalBalance, err := helpers.TotalActiveBalance(state)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get total active balance")
|
||||
}
|
||||
|
||||
// Compute slashed balances in the current epoch
|
||||
exitLength := params.BeaconConfig().EpochsPerSlashingsVector
|
||||
|
||||
// Compute the sum of state slashings
|
||||
slashings := state.Slashings()
|
||||
totalSlashing := uint64(0)
|
||||
for _, slashing := range slashings {
|
||||
totalSlashing += slashing
|
||||
}
|
||||
|
||||
// a callback is used here to apply the following actions to all validators
|
||||
// below equally.
|
||||
increment := params.BeaconConfig().EffectiveBalanceIncrement
|
||||
minSlashing := mathutil.Min(totalSlashing*params.BeaconConfig().ProportionalSlashingMultiplierAltair, totalBalance)
|
||||
err = state.ApplyToEveryValidator(func(idx int, val *ethpb.Validator) (bool, *ethpb.Validator, error) {
|
||||
correctEpoch := (currentEpoch + exitLength/2) == val.WithdrawableEpoch
|
||||
if val.Slashed && correctEpoch {
|
||||
penaltyNumerator := val.EffectiveBalance / increment * minSlashing
|
||||
penalty := penaltyNumerator / totalBalance * increment
|
||||
if err := helpers.DecreaseBalance(state, types.ValidatorIndex(idx), penalty); err != nil {
|
||||
return false, val, err
|
||||
}
|
||||
return true, val, nil
|
||||
}
|
||||
return false, val, nil
|
||||
})
|
||||
return state, err
|
||||
}
|
||||
181
beacon-chain/core/altair/epoch_spec_test.go
Normal file
181
beacon-chain/core/altair/epoch_spec_test.go
Normal file
@@ -0,0 +1,181 @@
|
||||
package altair_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
stateAltair "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func TestProcessSyncCommitteeUpdates_CanRotate(t *testing.T) {
|
||||
s, _ := testutil.DeterministicGenesisStateAltair(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
h := ðpb.BeaconBlockHeader{
|
||||
StateRoot: bytesutil.PadTo([]byte{'a'}, 32),
|
||||
ParentRoot: bytesutil.PadTo([]byte{'b'}, 32),
|
||||
BodyRoot: bytesutil.PadTo([]byte{'c'}, 32),
|
||||
}
|
||||
require.NoError(t, s.SetLatestBlockHeader(h))
|
||||
postState, err := altair.ProcessSyncCommitteeUpdates(context.Background(), s)
|
||||
require.NoError(t, err)
|
||||
current, err := postState.CurrentSyncCommittee()
|
||||
require.NoError(t, err)
|
||||
next, err := postState.NextSyncCommittee()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, current, next)
|
||||
|
||||
require.NoError(t, s.SetSlot(params.BeaconConfig().SlotsPerEpoch))
|
||||
postState, err = altair.ProcessSyncCommitteeUpdates(context.Background(), s)
|
||||
require.NoError(t, err)
|
||||
c, err := postState.CurrentSyncCommittee()
|
||||
require.NoError(t, err)
|
||||
n, err := postState.NextSyncCommittee()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, current, c)
|
||||
require.DeepEqual(t, next, n)
|
||||
|
||||
require.NoError(t, s.SetSlot(types.Slot(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*params.BeaconConfig().SlotsPerEpoch-1))
|
||||
postState, err = altair.ProcessSyncCommitteeUpdates(context.Background(), s)
|
||||
require.NoError(t, err)
|
||||
c, err = postState.CurrentSyncCommittee()
|
||||
require.NoError(t, err)
|
||||
n, err = postState.NextSyncCommittee()
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, current, c)
|
||||
require.NotEqual(t, next, n)
|
||||
require.DeepEqual(t, next, c)
|
||||
|
||||
// Test boundary condition.
|
||||
slot := params.BeaconConfig().SlotsPerEpoch * types.Slot(helpers.CurrentEpoch(s)+params.BeaconConfig().EpochsPerSyncCommitteePeriod)
|
||||
require.NoError(t, s.SetSlot(slot))
|
||||
boundaryCommittee, err := altair.NextSyncCommittee(context.Background(), s)
|
||||
require.NoError(t, err)
|
||||
require.DeepNotEqual(t, boundaryCommittee, n)
|
||||
}
|
||||
|
||||
func TestProcessParticipationFlagUpdates_CanRotate(t *testing.T) {
|
||||
s, _ := testutil.DeterministicGenesisStateAltair(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
c, err := s.CurrentEpochParticipation()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, make([]byte, params.BeaconConfig().MaxValidatorsPerCommittee), c)
|
||||
p, err := s.PreviousEpochParticipation()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, make([]byte, params.BeaconConfig().MaxValidatorsPerCommittee), p)
|
||||
|
||||
newC := []byte{'a'}
|
||||
newP := []byte{'b'}
|
||||
require.NoError(t, s.SetCurrentParticipationBits(newC))
|
||||
require.NoError(t, s.SetPreviousParticipationBits(newP))
|
||||
c, err = s.CurrentEpochParticipation()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, newC, c)
|
||||
p, err = s.PreviousEpochParticipation()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, newP, p)
|
||||
|
||||
s, err = altair.ProcessParticipationFlagUpdates(s)
|
||||
require.NoError(t, err)
|
||||
c, err = s.CurrentEpochParticipation()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, make([]byte, params.BeaconConfig().MaxValidatorsPerCommittee), c)
|
||||
p, err = s.PreviousEpochParticipation()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, newC, p)
|
||||
}
|
||||
|
||||
func TestProcessSlashings_NotSlashed(t *testing.T) {
|
||||
base := ðpb.BeaconStateAltair{
|
||||
Slot: 0,
|
||||
Validators: []*ethpb.Validator{{Slashed: true}},
|
||||
Balances: []uint64{params.BeaconConfig().MaxEffectiveBalance},
|
||||
Slashings: []uint64{0, 1e9},
|
||||
}
|
||||
s, err := stateAltair.InitializeFromProto(base)
|
||||
require.NoError(t, err)
|
||||
newState, err := altair.ProcessSlashings(s)
|
||||
require.NoError(t, err)
|
||||
wanted := params.BeaconConfig().MaxEffectiveBalance
|
||||
assert.Equal(t, wanted, newState.Balances()[0], "Unexpected slashed balance")
|
||||
}
|
||||
|
||||
func TestProcessSlashings_SlashedLess(t *testing.T) {
|
||||
tests := []struct {
|
||||
state *ethpb.BeaconStateAltair
|
||||
want uint64
|
||||
}{
|
||||
{
|
||||
state: ðpb.BeaconStateAltair{
|
||||
Validators: []*ethpb.Validator{
|
||||
{Slashed: true,
|
||||
WithdrawableEpoch: params.BeaconConfig().EpochsPerSlashingsVector / 2,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance},
|
||||
{ExitEpoch: params.BeaconConfig().FarFutureEpoch, EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance}},
|
||||
Balances: []uint64{params.BeaconConfig().MaxEffectiveBalance, params.BeaconConfig().MaxEffectiveBalance},
|
||||
Slashings: []uint64{0, 1e9},
|
||||
},
|
||||
want: uint64(30000000000),
|
||||
},
|
||||
{
|
||||
state: ðpb.BeaconStateAltair{
|
||||
Validators: []*ethpb.Validator{
|
||||
{Slashed: true,
|
||||
WithdrawableEpoch: params.BeaconConfig().EpochsPerSlashingsVector / 2,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance},
|
||||
{ExitEpoch: params.BeaconConfig().FarFutureEpoch, EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance},
|
||||
{ExitEpoch: params.BeaconConfig().FarFutureEpoch, EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance},
|
||||
},
|
||||
Balances: []uint64{params.BeaconConfig().MaxEffectiveBalance, params.BeaconConfig().MaxEffectiveBalance},
|
||||
Slashings: []uint64{0, 1e9},
|
||||
},
|
||||
want: uint64(31000000000),
|
||||
},
|
||||
{
|
||||
state: ðpb.BeaconStateAltair{
|
||||
Validators: []*ethpb.Validator{
|
||||
{Slashed: true,
|
||||
WithdrawableEpoch: params.BeaconConfig().EpochsPerSlashingsVector / 2,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance},
|
||||
{ExitEpoch: params.BeaconConfig().FarFutureEpoch, EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance},
|
||||
{ExitEpoch: params.BeaconConfig().FarFutureEpoch, EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance},
|
||||
},
|
||||
Balances: []uint64{params.BeaconConfig().MaxEffectiveBalance, params.BeaconConfig().MaxEffectiveBalance},
|
||||
Slashings: []uint64{0, 2 * 1e9},
|
||||
},
|
||||
want: uint64(30000000000),
|
||||
},
|
||||
{
|
||||
state: ðpb.BeaconStateAltair{
|
||||
Validators: []*ethpb.Validator{
|
||||
{Slashed: true,
|
||||
WithdrawableEpoch: params.BeaconConfig().EpochsPerSlashingsVector / 2,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance - params.BeaconConfig().EffectiveBalanceIncrement},
|
||||
{ExitEpoch: params.BeaconConfig().FarFutureEpoch, EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance - params.BeaconConfig().EffectiveBalanceIncrement}},
|
||||
Balances: []uint64{params.BeaconConfig().MaxEffectiveBalance - params.BeaconConfig().EffectiveBalanceIncrement, params.BeaconConfig().MaxEffectiveBalance - params.BeaconConfig().EffectiveBalanceIncrement},
|
||||
Slashings: []uint64{0, 1e9},
|
||||
},
|
||||
want: uint64(29000000000),
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
t.Run(fmt.Sprint(i), func(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
original := proto.Clone(tt.state)
|
||||
s, err := stateAltair.InitializeFromProto(tt.state)
|
||||
require.NoError(t, err)
|
||||
newState, err := altair.ProcessSlashings(s)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tt.want, newState.Balances()[0], "ProcessSlashings({%v}) = newState; newState.Balances[0] = %d", original, newState.Balances()[0])
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"testing"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
altair "github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
|
||||
110
beacon-chain/core/altair/transition.go
Normal file
110
beacon-chain/core/altair/transition.go
Normal file
@@ -0,0 +1,110 @@
|
||||
package altair
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
e "github.com/prysmaticlabs/prysm/beacon-chain/core/epoch"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// ProcessEpoch describes the per epoch operations that are performed on the beacon state.
|
||||
// It's optimized by pre computing validator attested info and epoch total/attested balances upfront.
|
||||
//
|
||||
// Spec code:
|
||||
// def process_epoch(state: BeaconState) -> None:
|
||||
// process_justification_and_finalization(state) # [Modified in Altair]
|
||||
// process_inactivity_updates(state) # [New in Altair]
|
||||
// process_rewards_and_penalties(state) # [Modified in Altair]
|
||||
// process_registry_updates(state)
|
||||
// process_slashings(state) # [Modified in Altair]
|
||||
// process_eth1_data_reset(state)
|
||||
// process_effective_balance_updates(state)
|
||||
// process_slashings_reset(state)
|
||||
// process_randao_mixes_reset(state)
|
||||
// process_historical_roots_update(state)
|
||||
// process_participation_flag_updates(state) # [New in Altair]
|
||||
// process_sync_committee_updates(state) # [New in Altair]
|
||||
func ProcessEpoch(ctx context.Context, state state.BeaconStateAltair) (state.BeaconStateAltair, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "altair.ProcessEpoch")
|
||||
defer span.End()
|
||||
|
||||
if state == nil || state.IsNil() {
|
||||
return nil, errors.New("nil state")
|
||||
}
|
||||
vp, bp, err := InitializeEpochValidators(ctx, state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// New in Altair.
|
||||
vp, bp, err = ProcessEpochParticipation(ctx, state, bp, vp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
state, err = precompute.ProcessJustificationAndFinalizationPreCompute(state, bp)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process justification")
|
||||
}
|
||||
|
||||
// New in Altair.
|
||||
state, vp, err = ProcessInactivityScores(ctx, state, vp)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process inactivity updates")
|
||||
}
|
||||
|
||||
// New in Altair.
|
||||
state, err = ProcessRewardsAndPenaltiesPrecompute(state, bp, vp)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process rewards and penalties")
|
||||
}
|
||||
|
||||
state, err = e.ProcessRegistryUpdates(state)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process registry updates")
|
||||
}
|
||||
|
||||
// Modified in Altair.
|
||||
state, err = ProcessSlashings(state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
state, err = e.ProcessEth1DataReset(state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
state, err = e.ProcessEffectiveBalanceUpdates(state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
state, err = e.ProcessSlashingsReset(state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
state, err = e.ProcessRandaoMixesReset(state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
state, err = e.ProcessHistoricalRootsUpdate(state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// New in Altair.
|
||||
state, err = ProcessParticipationFlagUpdates(state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// New in Altair.
|
||||
state, err = ProcessSyncCommitteeUpdates(ctx, state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return state, nil
|
||||
}
|
||||
34
beacon-chain/core/altair/transition_test.go
Normal file
34
beacon-chain/core/altair/transition_test.go
Normal file
@@ -0,0 +1,34 @@
|
||||
package altair_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
|
||||
stateAltair "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
func TestProcessEpoch_CanProcess(t *testing.T) {
|
||||
epoch := types.Epoch(1)
|
||||
slashing := make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector)
|
||||
base := ðpb.BeaconStateAltair{
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epoch)) + 1,
|
||||
BlockRoots: make([][]byte, 128),
|
||||
Slashings: slashing,
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
JustificationBits: bitfield.Bitvector4{0x00},
|
||||
CurrentJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
}
|
||||
s, err := stateAltair.InitializeFromProto(base)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.SetValidators([]*ethpb.Validator{}))
|
||||
newState, err := altair.ProcessEpoch(context.Background(), s)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(0), newState.Slashings()[2], "Unexpected slashed balance")
|
||||
}
|
||||
87
beacon-chain/core/altair/validator.go
Normal file
87
beacon-chain/core/altair/validator.go
Normal file
@@ -0,0 +1,87 @@
|
||||
package altair
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/validators"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
// SlashValidator with slashed index.
|
||||
// The function is modified to use MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR and use PROPOSER_WEIGHT when calculating the proposer reward.
|
||||
//
|
||||
// def slash_validator(state: BeaconState,
|
||||
// slashed_index: ValidatorIndex,
|
||||
// whistleblower_index: ValidatorIndex=None) -> None:
|
||||
// """
|
||||
// Slash the validator with index ``slashed_index``.
|
||||
// """
|
||||
// epoch = get_current_epoch(state)
|
||||
// initiate_validator_exit(state, slashed_index)
|
||||
// validator = state.validators[slashed_index]
|
||||
// validator.slashed = True
|
||||
// validator.withdrawable_epoch = max(validator.withdrawable_epoch, Epoch(epoch + EPOCHS_PER_SLASHINGS_VECTOR))
|
||||
// state.slashings[epoch % EPOCHS_PER_SLASHINGS_VECTOR] += validator.effective_balance
|
||||
// decrease_balance(state, slashed_index, validator.effective_balance // MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR)
|
||||
//
|
||||
// # Apply proposer and whistleblower rewards
|
||||
// proposer_index = get_beacon_proposer_index(state)
|
||||
// if whistleblower_index is None:
|
||||
// whistleblower_index = proposer_index
|
||||
// whistleblower_reward = Gwei(validator.effective_balance // WHISTLEBLOWER_REWARD_QUOTIENT)
|
||||
// proposer_reward = Gwei(whistleblower_reward * PROPOSER_WEIGHT // WEIGHT_DENOMINATOR)
|
||||
// increase_balance(state, proposer_index, proposer_reward)
|
||||
// increase_balance(state, whistleblower_index, Gwei(whistleblower_reward - proposer_reward))
|
||||
func SlashValidator(state state.BeaconState, slashedIdx types.ValidatorIndex, penaltyQuotient uint64,
|
||||
proposerRewardQuotient uint64) (state.BeaconState, error) {
|
||||
state, err := validators.InitiateValidatorExit(state, slashedIdx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not initiate validator %d exit", slashedIdx)
|
||||
}
|
||||
currentEpoch := helpers.SlotToEpoch(state.Slot())
|
||||
validator, err := state.ValidatorAtIndex(slashedIdx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
validator.Slashed = true
|
||||
maxWithdrawableEpoch := types.MaxEpoch(validator.WithdrawableEpoch, currentEpoch+params.BeaconConfig().EpochsPerSlashingsVector)
|
||||
validator.WithdrawableEpoch = maxWithdrawableEpoch
|
||||
|
||||
if err := state.UpdateValidatorAtIndex(slashedIdx, validator); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// The slashing amount is represented by epochs per slashing vector. The validator's effective balance is then applied to that amount.
|
||||
slashings := state.Slashings()
|
||||
currentSlashing := slashings[currentEpoch%params.BeaconConfig().EpochsPerSlashingsVector]
|
||||
if err := state.UpdateSlashingsAtIndex(
|
||||
uint64(currentEpoch%params.BeaconConfig().EpochsPerSlashingsVector),
|
||||
currentSlashing+validator.EffectiveBalance,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := helpers.DecreaseBalance(state, slashedIdx, validator.EffectiveBalance/penaltyQuotient); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
proposerIdx, err := helpers.BeaconProposerIndex(state)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get proposer idx")
|
||||
}
|
||||
|
||||
// In this implementation, proposer is the whistleblower.
|
||||
whistleBlowerIdx := proposerIdx
|
||||
whistleblowerReward := validator.EffectiveBalance / params.BeaconConfig().WhistleBlowerRewardQuotient
|
||||
proposerReward := whistleblowerReward * params.BeaconConfig().ProposerWeight / params.BeaconConfig().WeightDenominator
|
||||
err = helpers.IncreaseBalance(state, proposerIdx, proposerReward)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = helpers.IncreaseBalance(state, whistleBlowerIdx, whistleblowerReward-proposerReward)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return state, nil
|
||||
}
|
||||
69
beacon-chain/core/altair/validator_test.go
Normal file
69
beacon-chain/core/altair/validator_test.go
Normal file
@@ -0,0 +1,69 @@
|
||||
package altair_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
stateAltair "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
func TestSlashValidator_OK(t *testing.T) {
|
||||
validatorCount := params.BeaconConfig().MinGenesisActiveValidatorCount
|
||||
registry := make([]*ethpb.Validator, 0, validatorCount)
|
||||
balances := make([]uint64, 0, validatorCount)
|
||||
for i := uint64(0); i < validatorCount; i++ {
|
||||
registry = append(registry, ðpb.Validator{
|
||||
ActivationEpoch: 0,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
})
|
||||
balances = append(balances, params.BeaconConfig().MaxEffectiveBalance)
|
||||
}
|
||||
|
||||
base := ðpb.BeaconStateAltair{
|
||||
Validators: registry,
|
||||
Slashings: make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector),
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
Balances: balances,
|
||||
}
|
||||
state, err := stateAltair.InitializeFromProto(base)
|
||||
require.NoError(t, err)
|
||||
|
||||
slashedIdx := types.ValidatorIndex(2)
|
||||
|
||||
proposer, err := helpers.BeaconProposerIndex(state)
|
||||
require.NoError(t, err, "Could not get proposer")
|
||||
proposerBal, err := state.BalanceAtIndex(proposer)
|
||||
require.NoError(t, err)
|
||||
cfg := params.BeaconConfig()
|
||||
slashedState, err := altair.SlashValidator(state, slashedIdx, cfg.MinSlashingPenaltyQuotientAltair, cfg.ProposerRewardQuotient)
|
||||
require.NoError(t, err, "Could not slash validator")
|
||||
state, ok := slashedState.(*stateAltair.BeaconState)
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
v, err := state.ValidatorAtIndex(slashedIdx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, v.Slashed, "Validator not slashed despite supposed to being slashed")
|
||||
assert.Equal(t, helpers.CurrentEpoch(state)+params.BeaconConfig().EpochsPerSlashingsVector, v.WithdrawableEpoch, "Withdrawable epoch not the expected value")
|
||||
|
||||
maxBalance := params.BeaconConfig().MaxEffectiveBalance
|
||||
slashedBalance := state.Slashings()[state.Slot().Mod(uint64(params.BeaconConfig().EpochsPerSlashingsVector))]
|
||||
assert.Equal(t, maxBalance, slashedBalance, "Slashed balance isnt the expected amount")
|
||||
|
||||
whistleblowerReward := slashedBalance / params.BeaconConfig().WhistleBlowerRewardQuotient
|
||||
bal, err := state.BalanceAtIndex(proposer)
|
||||
require.NoError(t, err)
|
||||
// The proposer is the whistleblower in phase 0.
|
||||
assert.Equal(t, proposerBal+whistleblowerReward, bal, "Did not get expected balance for proposer")
|
||||
bal, err = state.BalanceAtIndex(slashedIdx)
|
||||
require.NoError(t, err)
|
||||
v, err = state.ValidatorAtIndex(slashedIdx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, maxBalance-(v.EffectiveBalance/params.BeaconConfig().MinSlashingPenaltyQuotientAltair), bal, "Did not get expected balance for slashed validator")
|
||||
}
|
||||
@@ -36,10 +36,12 @@ go_library(
|
||||
"//shared/depositutil:go_default_library",
|
||||
"//shared/hashutil:go_default_library",
|
||||
"//shared/mathutil:go_default_library",
|
||||
"//shared/p2putils:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/slashutil:go_default_library",
|
||||
"//shared/sliceutil:go_default_library",
|
||||
"//shared/trieutil:go_default_library",
|
||||
"//shared/version:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
@@ -65,6 +67,7 @@ go_test(
|
||||
"proposer_slashing_regression_test.go",
|
||||
"proposer_slashing_test.go",
|
||||
"randao_test.go",
|
||||
"signature_test.go",
|
||||
],
|
||||
data = glob(["testdata/**"]),
|
||||
embed = [":go_default_library"],
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/slashutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/sliceutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/version"
|
||||
)
|
||||
|
||||
// ProcessAttesterSlashings is one of the operations performed
|
||||
@@ -59,7 +60,11 @@ func ProcessAttesterSlashings(
|
||||
}
|
||||
if helpers.IsSlashableValidator(val.ActivationEpoch(), val.WithdrawableEpoch(), val.Slashed(), currentEpoch) {
|
||||
cfg := params.BeaconConfig()
|
||||
beaconState, err = slashFunc(beaconState, types.ValidatorIndex(validatorIndex), cfg.MinSlashingPenaltyQuotient, cfg.ProposerRewardQuotient)
|
||||
slashingQuotient := cfg.MinSlashingPenaltyQuotient
|
||||
if beaconState.Version() == version.Altair {
|
||||
slashingQuotient = cfg.MinSlashingPenaltyQuotientAltair
|
||||
}
|
||||
beaconState, err = slashFunc(beaconState, types.ValidatorIndex(validatorIndex), slashingQuotient, cfg.ProposerRewardQuotient)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not slash validator index %d",
|
||||
validatorIndex)
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/version"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
@@ -55,7 +56,11 @@ func ProcessProposerSlashings(
|
||||
return nil, errors.Wrapf(err, "could not verify proposer slashing %d", idx)
|
||||
}
|
||||
cfg := params.BeaconConfig()
|
||||
beaconState, err = slashFunc(beaconState, slashing.Header_1.Header.ProposerIndex, cfg.MinSlashingPenaltyQuotient, cfg.ProposerRewardQuotient)
|
||||
slashingQuotient := cfg.MinSlashingPenaltyQuotient
|
||||
if beaconState.Version() == version.Altair {
|
||||
slashingQuotient = cfg.MinSlashingPenaltyQuotientAltair
|
||||
}
|
||||
beaconState, err = slashFunc(beaconState, slashing.Header_1.Header.ProposerIndex, slashingQuotient, cfg.ProposerRewardQuotient)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not slash proposer index %d", slashing.Header_1.Header.ProposerIndex)
|
||||
}
|
||||
|
||||
@@ -9,9 +9,10 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
statepb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/shared/attestationutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/bls"
|
||||
"github.com/prysmaticlabs/prysm/shared/p2putils"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
@@ -21,7 +22,7 @@ func signatureSet(signedData, pub, signature, domain []byte) (*bls.SignatureSet,
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not convert bytes to public key")
|
||||
}
|
||||
signingData := &statepb.SigningData{
|
||||
signingData := ðpb.SigningData{
|
||||
ObjectRoot: signedData,
|
||||
Domain: domain,
|
||||
}
|
||||
@@ -77,6 +78,27 @@ func VerifyBlockSignature(beaconState state.ReadOnlyBeaconState,
|
||||
return helpers.VerifyBlockSigningRoot(proposerPubKey, sig, domain, rootFunc)
|
||||
}
|
||||
|
||||
// VerifyBlockSignatureUsingCurrentFork verifies the proposer signature of a beacon block. This differs
|
||||
// from the above method by not using fork data from the state and instead retrieving it
|
||||
// via the respective epoch.
|
||||
func VerifyBlockSignatureUsingCurrentFork(beaconState state.ReadOnlyBeaconState, blk block.SignedBeaconBlock) error {
|
||||
currentEpoch := helpers.SlotToEpoch(blk.Block().Slot())
|
||||
fork, err := p2putils.Fork(currentEpoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
domain, err := helpers.Domain(fork, currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorRoot())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
proposer, err := beaconState.ValidatorAtIndex(blk.Block().ProposerIndex())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
proposerPubKey := proposer.PublicKey
|
||||
return helpers.VerifyBlockSigningRoot(proposerPubKey, blk.Signature(), domain, blk.Block().HashTreeRoot)
|
||||
}
|
||||
|
||||
// BlockSignatureSet retrieves the block signature set from the provided block and its corresponding state.
|
||||
func BlockSignatureSet(beaconState state.ReadOnlyBeaconState,
|
||||
proposerIndex types.ValidatorIndex,
|
||||
|
||||
41
beacon-chain/core/blocks/signature_test.go
Normal file
41
beacon-chain/core/blocks/signature_test.go
Normal file
@@ -0,0 +1,41 @@
|
||||
package blocks_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
wrapperv2 "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
func TestVerifyBlockSignatureUsingCurrentFork(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
bCfg := params.BeaconConfig()
|
||||
bCfg.AltairForkEpoch = 100
|
||||
bCfg.ForkVersionSchedule[bytesutil.ToBytes4(bCfg.AltairForkVersion)] = 100
|
||||
params.OverrideBeaconConfig(bCfg)
|
||||
bState, keys := testutil.DeterministicGenesisState(t, 100)
|
||||
altairBlk := testutil.NewBeaconBlockAltair()
|
||||
altairBlk.Block.ProposerIndex = 0
|
||||
altairBlk.Block.Slot = params.BeaconConfig().SlotsPerEpoch * 100
|
||||
fData := ðpb.Fork{
|
||||
Epoch: 100,
|
||||
CurrentVersion: params.BeaconConfig().AltairForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
}
|
||||
domain, err := helpers.Domain(fData, 100, params.BeaconConfig().DomainBeaconProposer, bState.GenesisValidatorRoot())
|
||||
assert.NoError(t, err)
|
||||
rt, err := helpers.ComputeSigningRoot(altairBlk.Block, domain)
|
||||
assert.NoError(t, err)
|
||||
sig := keys[0].Sign(rt[:]).Marshal()
|
||||
altairBlk.Signature = sig
|
||||
wsb, err := wrapperv2.WrappedAltairSignedBeaconBlock(altairBlk)
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, blocks.VerifyBlockSignatureUsingCurrentFork(bState, wsb))
|
||||
}
|
||||
@@ -51,6 +51,7 @@ go_library(
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -75,6 +76,7 @@ go_test(
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/v1:go_default_library",
|
||||
"//beacon-chain/state/v2:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//shared/bls:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
|
||||
@@ -17,10 +17,12 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/sliceutil"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var committeeCache = cache.NewCommitteesCache()
|
||||
var proposerIndicesCache = cache.NewProposerIndicesCache()
|
||||
var syncCommitteeCache = cache.NewSyncCommittee()
|
||||
|
||||
// SlotCommitteeCount returns the number of crosslink committees of a slot. The
|
||||
// active validator count is provided as an argument rather than a imported implementation
|
||||
@@ -271,26 +273,21 @@ func VerifyAttestationBitfieldLengths(state state.ReadOnlyBeaconState, att *ethp
|
||||
return nil
|
||||
}
|
||||
|
||||
// ShuffledIndices uses input beacon state and returns the shuffled indices of the input epoch,
|
||||
// the shuffled indices then can be used to break up into committees.
|
||||
func ShuffledIndices(s state.ReadOnlyBeaconState, epoch types.Epoch) ([]types.ValidatorIndex, error) {
|
||||
seed, err := Seed(s, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get seed for epoch %d", epoch)
|
||||
}
|
||||
|
||||
// Returns the active indices and the total active balance of the validators in input `state` and during input `epoch`.
|
||||
func activeIndicesAndBalance(s state.ReadOnlyBeaconState, epoch types.Epoch) ([]types.ValidatorIndex, uint64, error) {
|
||||
balances := uint64(0)
|
||||
indices := make([]types.ValidatorIndex, 0, s.NumValidators())
|
||||
if err := s.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
|
||||
if IsActiveValidatorUsingTrie(val, epoch) {
|
||||
balances += val.EffectiveBalance()
|
||||
indices = append(indices, types.ValidatorIndex(idx))
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
// UnshuffleList is used as an optimized implementation for raw speed.
|
||||
return UnshuffleList(indices, seed)
|
||||
return indices, balances, nil
|
||||
}
|
||||
|
||||
// UpdateCommitteeCache gets called at the beginning of every epoch to cache the committee shuffled indices
|
||||
@@ -306,7 +303,13 @@ func UpdateCommitteeCache(state state.ReadOnlyBeaconState, epoch types.Epoch) er
|
||||
return nil
|
||||
}
|
||||
|
||||
shuffledIndices, err := ShuffledIndices(state, e)
|
||||
indices, balance, err := activeIndicesAndBalance(state, e)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get the shuffled indices based on the seed.
|
||||
shuffledIndices, err := UnshuffleList(indices, seed)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -322,11 +325,23 @@ func UpdateCommitteeCache(state state.ReadOnlyBeaconState, epoch types.Epoch) er
|
||||
return sortedIndices[i] < sortedIndices[j]
|
||||
})
|
||||
|
||||
// Only update active balance field in cache if it's current epoch.
|
||||
// Using current epoch state to update next epoch field will cause insert an invalid
|
||||
// active balance value.
|
||||
b := &cache.Balance{}
|
||||
if e == epoch {
|
||||
b = &cache.Balance{
|
||||
Exist: true,
|
||||
Total: balance,
|
||||
}
|
||||
}
|
||||
|
||||
if err := committeeCache.AddCommitteeShuffledList(&cache.Committees{
|
||||
ShuffledIndices: shuffledIndices,
|
||||
CommitteeCount: uint64(params.BeaconConfig().SlotsPerEpoch.Mul(count)),
|
||||
Seed: seed,
|
||||
SortedIndices: sortedIndices,
|
||||
ActiveBalance: b,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -383,10 +398,200 @@ func UpdateProposerIndicesInCache(state state.ReadOnlyBeaconState) error {
|
||||
})
|
||||
}
|
||||
|
||||
// ClearCache clears the committee cache
|
||||
// ClearCache clears the beacon committee cache and sync committee cache.
|
||||
func ClearCache() {
|
||||
committeeCache = cache.NewCommitteesCache()
|
||||
proposerIndicesCache = cache.NewProposerIndicesCache()
|
||||
syncCommitteeCache = cache.NewSyncCommittee()
|
||||
}
|
||||
|
||||
// IsCurrentPeriodSyncCommittee returns true if the input validator index belongs in the current period sync committee
|
||||
// along with the sync committee root.
|
||||
// 1.) Checks if the public key exists in the sync committee cache
|
||||
// 2.) If 1 fails, checks if the public key exists in the input current sync committee object
|
||||
func IsCurrentPeriodSyncCommittee(
|
||||
st state.BeaconStateAltair, valIdx types.ValidatorIndex,
|
||||
) (bool, error) {
|
||||
root, err := syncPeriodBoundaryRoot(st)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
indices, err := syncCommitteeCache.CurrentPeriodIndexPosition(bytesutil.ToBytes32(root), valIdx)
|
||||
if err == cache.ErrNonExistingSyncCommitteeKey {
|
||||
val, err := st.ValidatorAtIndex(valIdx)
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
committee, err := st.CurrentSyncCommittee()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Fill in the cache on miss.
|
||||
go func() {
|
||||
if err := syncCommitteeCache.UpdatePositionsInCommittee(bytesutil.ToBytes32(root), st); err != nil {
|
||||
log.Errorf("Could not fill sync committee cache on miss: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
return len(findSubCommitteeIndices(val.PublicKey, committee.Pubkeys)) > 0, nil
|
||||
}
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return len(indices) > 0, nil
|
||||
}
|
||||
|
||||
// IsNextPeriodSyncCommittee returns true if the input validator index belongs in the next period sync committee
|
||||
// along with the sync period boundary root.
|
||||
// 1.) Checks if the public key exists in the sync committee cache
|
||||
// 2.) If 1 fails, checks if the public key exists in the input next sync committee object
|
||||
func IsNextPeriodSyncCommittee(
|
||||
st state.BeaconStateAltair, valIdx types.ValidatorIndex,
|
||||
) (bool, error) {
|
||||
root, err := syncPeriodBoundaryRoot(st)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
indices, err := syncCommitteeCache.NextPeriodIndexPosition(bytesutil.ToBytes32(root), valIdx)
|
||||
if err == cache.ErrNonExistingSyncCommitteeKey {
|
||||
val, err := st.ValidatorAtIndex(valIdx)
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
committee, err := st.NextSyncCommittee()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return len(findSubCommitteeIndices(val.PublicKey, committee.Pubkeys)) > 0, nil
|
||||
}
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return len(indices) > 0, nil
|
||||
}
|
||||
|
||||
// CurrentPeriodSyncSubcommitteeIndices returns the subcommittee indices of the
|
||||
// current period sync committee for input validator.
|
||||
func CurrentPeriodSyncSubcommitteeIndices(
|
||||
st state.BeaconStateAltair, valIdx types.ValidatorIndex,
|
||||
) ([]types.CommitteeIndex, error) {
|
||||
root, err := syncPeriodBoundaryRoot(st)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
indices, err := syncCommitteeCache.CurrentPeriodIndexPosition(bytesutil.ToBytes32(root), valIdx)
|
||||
if err == cache.ErrNonExistingSyncCommitteeKey {
|
||||
val, err := st.ValidatorAtIndex(valIdx)
|
||||
if err != nil {
|
||||
return nil, nil
|
||||
}
|
||||
committee, err := st.CurrentSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Fill in the cache on miss.
|
||||
go func() {
|
||||
if err := syncCommitteeCache.UpdatePositionsInCommittee(bytesutil.ToBytes32(root), st); err != nil {
|
||||
log.Errorf("Could not fill sync committee cache on miss: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
return findSubCommitteeIndices(val.PublicKey, committee.Pubkeys), nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return indices, nil
|
||||
}
|
||||
|
||||
// NextPeriodSyncSubcommitteeIndices returns the subcommittee indices of the next period sync committee for input validator.
|
||||
func NextPeriodSyncSubcommitteeIndices(
|
||||
st state.BeaconStateAltair, valIdx types.ValidatorIndex,
|
||||
) ([]types.CommitteeIndex, error) {
|
||||
root, err := syncPeriodBoundaryRoot(st)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
indices, err := syncCommitteeCache.NextPeriodIndexPosition(bytesutil.ToBytes32(root), valIdx)
|
||||
if err == cache.ErrNonExistingSyncCommitteeKey {
|
||||
val, err := st.ValidatorAtIndex(valIdx)
|
||||
if err != nil {
|
||||
return nil, nil
|
||||
}
|
||||
committee, err := st.NextSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return findSubCommitteeIndices(val.PublicKey, committee.Pubkeys), nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return indices, nil
|
||||
}
|
||||
|
||||
// UpdateSyncCommitteeCache updates sync committee cache.
|
||||
// It uses `state`'s latest block header root as key. To avoid miss usage, it disallows
|
||||
// block header with state root zeroed out.
|
||||
func UpdateSyncCommitteeCache(st state.BeaconStateAltair) error {
|
||||
nextSlot := st.Slot() + 1
|
||||
if nextSlot%params.BeaconConfig().SlotsPerEpoch != 0 {
|
||||
return errors.New("not at the end of the epoch to update cache")
|
||||
}
|
||||
if SlotToEpoch(nextSlot)%params.BeaconConfig().EpochsPerSyncCommitteePeriod != 0 {
|
||||
return errors.New("not at sync committee period boundary to update cache")
|
||||
}
|
||||
|
||||
header := st.LatestBlockHeader()
|
||||
if bytes.Equal(header.StateRoot, params.BeaconConfig().ZeroHash[:]) {
|
||||
return errors.New("zero hash state root can't be used to update cache")
|
||||
}
|
||||
|
||||
prevBlockRoot, err := header.HashTreeRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return syncCommitteeCache.UpdatePositionsInCommittee(prevBlockRoot, st)
|
||||
}
|
||||
|
||||
// Loop through `pubKeys` for matching `pubKey` and get the indices where it matches.
|
||||
func findSubCommitteeIndices(pubKey []byte, pubKeys [][]byte) []types.CommitteeIndex {
|
||||
var indices []types.CommitteeIndex
|
||||
for i, k := range pubKeys {
|
||||
if bytes.Equal(k, pubKey) {
|
||||
indices = append(indices, types.CommitteeIndex(i))
|
||||
}
|
||||
}
|
||||
return indices
|
||||
}
|
||||
|
||||
// Retrieve the current sync period boundary root by calculating sync period start epoch
|
||||
// and calling `BlockRoot`.
|
||||
// It uses the boundary slot - 1 for block root. (Ex: SlotsPerEpoch * EpochsPerSyncCommitteePeriod - 1)
|
||||
func syncPeriodBoundaryRoot(st state.ReadOnlyBeaconState) ([]byte, error) {
|
||||
// Can't call `BlockRoot` until the first slot.
|
||||
if st.Slot() == params.BeaconConfig().GenesisSlot {
|
||||
return params.BeaconConfig().ZeroHash[:], nil
|
||||
}
|
||||
|
||||
startEpoch, err := SyncCommitteePeriodStartEpoch(CurrentEpoch(st))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
startEpochSlot, err := StartSlot(startEpoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Prevent underflow
|
||||
if startEpochSlot >= 1 {
|
||||
startEpochSlot--
|
||||
}
|
||||
|
||||
return BlockRootAtSlot(st, startEpochSlot)
|
||||
}
|
||||
|
||||
// This computes proposer indices of the current epoch and returns a list of proposer indices,
|
||||
|
||||
@@ -2,15 +2,16 @@ package helpers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
||||
v2 "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
statepb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
@@ -35,7 +36,7 @@ func TestComputeCommittee_WithoutCache(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
state, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
state, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Validators: validators,
|
||||
Slot: 200,
|
||||
BlockRoots: make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot),
|
||||
@@ -91,7 +92,7 @@ func TestVerifyBitfieldLength_OK(t *testing.T) {
|
||||
func TestCommitteeAssignments_CannotRetrieveFutureEpoch(t *testing.T) {
|
||||
ClearCache()
|
||||
epoch := types.Epoch(1)
|
||||
state, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
state, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Slot: 0, // Epoch 0.
|
||||
})
|
||||
require.NoError(t, err)
|
||||
@@ -111,7 +112,7 @@ func TestCommitteeAssignments_NoProposerForSlot0(t *testing.T) {
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
}
|
||||
state, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
state, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Validators: validators,
|
||||
Slot: 2 * params.BeaconConfig().SlotsPerEpoch, // epoch 2
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
@@ -142,7 +143,7 @@ func TestCommitteeAssignments_CanRetrieve(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
state, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
state, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Validators: validators,
|
||||
Slot: 2 * params.BeaconConfig().SlotsPerEpoch, // epoch 2
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
@@ -219,7 +220,7 @@ func TestCommitteeAssignments_CannotRetrieveFuture(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
state, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
state, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Validators: validators,
|
||||
Slot: 2 * params.BeaconConfig().SlotsPerEpoch, // epoch 2
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
@@ -243,7 +244,7 @@ func TestCommitteeAssignments_EverySlotHasMin1Proposer(t *testing.T) {
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
}
|
||||
state, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
state, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Validators: validators,
|
||||
Slot: 2 * params.BeaconConfig().SlotsPerEpoch, // epoch 2
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
@@ -280,7 +281,7 @@ func TestVerifyAttestationBitfieldLengths_OK(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
state, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
state, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Validators: validators,
|
||||
RandaoMixes: activeRoots,
|
||||
})
|
||||
@@ -368,38 +369,6 @@ func TestVerifyAttestationBitfieldLengths_OK(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestShuffledIndices_ShuffleRightLength(t *testing.T) {
|
||||
valiatorCount := 1000
|
||||
validators := make([]*ethpb.Validator, valiatorCount)
|
||||
indices := make([]uint64, valiatorCount)
|
||||
for i := 0; i < valiatorCount; i++ {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
indices[i] = uint64(i)
|
||||
}
|
||||
state, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
Validators: validators,
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
// Test for current epoch
|
||||
shuffledIndices, err := ShuffledIndices(state, 0)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, valiatorCount, len(shuffledIndices), "Incorrect shuffled indices count")
|
||||
if reflect.DeepEqual(indices, shuffledIndices) {
|
||||
t.Error("Shuffling did not happen")
|
||||
}
|
||||
|
||||
// Test for next epoch
|
||||
shuffledIndices, err = ShuffledIndices(state, 1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, valiatorCount, len(shuffledIndices), "Incorrect shuffled indices count")
|
||||
if reflect.DeepEqual(indices, shuffledIndices) {
|
||||
t.Error("Shuffling did not happen")
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateCommitteeCache_CanUpdate(t *testing.T) {
|
||||
ClearCache()
|
||||
validatorCount := params.BeaconConfig().MinGenesisActiveValidatorCount
|
||||
@@ -407,11 +376,12 @@ func TestUpdateCommitteeCache_CanUpdate(t *testing.T) {
|
||||
indices := make([]types.ValidatorIndex, validatorCount)
|
||||
for i := types.ValidatorIndex(0); uint64(i) < validatorCount; i++ {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: 1,
|
||||
}
|
||||
indices[i] = i
|
||||
}
|
||||
state, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
state, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Validators: validators,
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
@@ -426,6 +396,13 @@ func TestUpdateCommitteeCache_CanUpdate(t *testing.T) {
|
||||
indices, err = committeeCache.Committee(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epoch)), seed, idx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, params.BeaconConfig().TargetCommitteeSize, uint64(len(indices)), "Did not save correct indices lengths")
|
||||
|
||||
// Total active balance should be `MinGenesisActiveValidatorCount` given each validator has effective balance of 1.
|
||||
seed, err = Seed(state, 0, params.BeaconConfig().DomainBeaconAttester)
|
||||
require.NoError(t, err)
|
||||
balance, err := committeeCache.ActiveBalance(seed)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, validatorCount, balance)
|
||||
}
|
||||
|
||||
func BenchmarkComputeCommittee300000_WithPreCache(b *testing.B) {
|
||||
@@ -435,7 +412,7 @@ func BenchmarkComputeCommittee300000_WithPreCache(b *testing.B) {
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
}
|
||||
state, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
state, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Validators: validators,
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
@@ -469,7 +446,7 @@ func BenchmarkComputeCommittee3000000_WithPreCache(b *testing.B) {
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
}
|
||||
state, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
state, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Validators: validators,
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
@@ -503,7 +480,7 @@ func BenchmarkComputeCommittee128000_WithOutPreCache(b *testing.B) {
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
}
|
||||
state, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
state, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Validators: validators,
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
@@ -538,7 +515,7 @@ func BenchmarkComputeCommittee1000000_WithOutCache(b *testing.B) {
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
}
|
||||
state, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
state, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Validators: validators,
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
@@ -573,7 +550,7 @@ func BenchmarkComputeCommittee4000000_WithOutCache(b *testing.B) {
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
}
|
||||
state, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
state, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Validators: validators,
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
@@ -610,7 +587,7 @@ func TestBeaconCommitteeFromState_UpdateCacheForPreviousEpoch(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
state, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
state, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch,
|
||||
Validators: validators,
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
@@ -635,7 +612,7 @@ func TestPrecomputeProposerIndices_Ok(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
state, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
state, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Validators: validators,
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
@@ -659,3 +636,373 @@ func TestPrecomputeProposerIndices_Ok(t *testing.T) {
|
||||
}
|
||||
assert.DeepEqual(t, wantedProposerIndices, proposerIndices, "Did not precompute proposer indices correctly")
|
||||
}
|
||||
|
||||
func TestIsCurrentEpochSyncCommittee_UsingCache(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: k,
|
||||
}
|
||||
syncCommittee.Pubkeys = append(syncCommittee.Pubkeys, bytesutil.PadTo(k, 48))
|
||||
}
|
||||
|
||||
state, err := v2.InitializeFromProto(ðpb.BeaconStateAltair{
|
||||
Validators: validators,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
ClearCache()
|
||||
r := [32]byte{'a'}
|
||||
require.NoError(t, err, syncCommitteeCache.UpdatePositionsInCommittee(r, state))
|
||||
|
||||
ok, err := IsCurrentPeriodSyncCommittee(state, 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, ok)
|
||||
}
|
||||
|
||||
func TestIsCurrentEpochSyncCommittee_UsingCommittee(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: k,
|
||||
}
|
||||
syncCommittee.Pubkeys = append(syncCommittee.Pubkeys, bytesutil.PadTo(k, 48))
|
||||
}
|
||||
|
||||
state, err := v2.InitializeFromProto(ðpb.BeaconStateAltair{
|
||||
Validators: validators,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
ok, err := IsCurrentPeriodSyncCommittee(state, 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, ok)
|
||||
}
|
||||
|
||||
func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: k,
|
||||
}
|
||||
syncCommittee.Pubkeys = append(syncCommittee.Pubkeys, bytesutil.PadTo(k, 48))
|
||||
}
|
||||
|
||||
state, err := v2.InitializeFromProto(ðpb.BeaconStateAltair{
|
||||
Validators: validators,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
ok, err := IsCurrentPeriodSyncCommittee(state, 12390192)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, ok)
|
||||
}
|
||||
|
||||
func TestIsNextEpochSyncCommittee_UsingCache(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: k,
|
||||
}
|
||||
syncCommittee.Pubkeys = append(syncCommittee.Pubkeys, bytesutil.PadTo(k, 48))
|
||||
}
|
||||
|
||||
state, err := v2.InitializeFromProto(ðpb.BeaconStateAltair{
|
||||
Validators: validators,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
ClearCache()
|
||||
r := [32]byte{'a'}
|
||||
require.NoError(t, err, syncCommitteeCache.UpdatePositionsInCommittee(r, state))
|
||||
|
||||
ok, err := IsNextPeriodSyncCommittee(state, 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, ok)
|
||||
}
|
||||
|
||||
func TestIsNextEpochSyncCommittee_UsingCommittee(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: k,
|
||||
}
|
||||
syncCommittee.Pubkeys = append(syncCommittee.Pubkeys, bytesutil.PadTo(k, 48))
|
||||
}
|
||||
|
||||
state, err := v2.InitializeFromProto(ðpb.BeaconStateAltair{
|
||||
Validators: validators,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
ok, err := IsNextPeriodSyncCommittee(state, 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, ok)
|
||||
}
|
||||
|
||||
func TestIsNextEpochSyncCommittee_DoesNotExist(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: k,
|
||||
}
|
||||
syncCommittee.Pubkeys = append(syncCommittee.Pubkeys, bytesutil.PadTo(k, 48))
|
||||
}
|
||||
|
||||
state, err := v2.InitializeFromProto(ðpb.BeaconStateAltair{
|
||||
Validators: validators,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
ok, err := IsNextPeriodSyncCommittee(state, 120391029)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, ok)
|
||||
}
|
||||
|
||||
func TestCurrentEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: k,
|
||||
}
|
||||
syncCommittee.Pubkeys = append(syncCommittee.Pubkeys, bytesutil.PadTo(k, 48))
|
||||
}
|
||||
|
||||
state, err := v2.InitializeFromProto(ðpb.BeaconStateAltair{
|
||||
Validators: validators,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
ClearCache()
|
||||
r := [32]byte{'a'}
|
||||
require.NoError(t, err, syncCommitteeCache.UpdatePositionsInCommittee(r, state))
|
||||
|
||||
index, err := CurrentPeriodSyncSubcommitteeIndices(state, 0)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, []types.CommitteeIndex{0}, index)
|
||||
}
|
||||
|
||||
func TestCurrentEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: k,
|
||||
}
|
||||
syncCommittee.Pubkeys = append(syncCommittee.Pubkeys, bytesutil.PadTo(k, 48))
|
||||
}
|
||||
|
||||
state, err := v2.InitializeFromProto(ðpb.BeaconStateAltair{
|
||||
Validators: validators,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
root, err := syncPeriodBoundaryRoot(state)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test that cache was empty.
|
||||
_, err = syncCommitteeCache.CurrentPeriodIndexPosition(bytesutil.ToBytes32(root), 0)
|
||||
require.Equal(t, cache.ErrNonExistingSyncCommitteeKey, err)
|
||||
|
||||
// Test that helper can retrieve the index given empty cache.
|
||||
index, err := CurrentPeriodSyncSubcommitteeIndices(state, 0)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, []types.CommitteeIndex{0}, index)
|
||||
|
||||
// Test that cache was able to fill on miss.
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
index, err = syncCommitteeCache.CurrentPeriodIndexPosition(bytesutil.ToBytes32(root), 0)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, []types.CommitteeIndex{0}, index)
|
||||
}
|
||||
|
||||
func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
|
||||
ClearCache()
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: k,
|
||||
}
|
||||
syncCommittee.Pubkeys = append(syncCommittee.Pubkeys, bytesutil.PadTo(k, 48))
|
||||
}
|
||||
|
||||
state, err := v2.InitializeFromProto(ðpb.BeaconStateAltair{
|
||||
Validators: validators,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
index, err := CurrentPeriodSyncSubcommitteeIndices(state, 129301923)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, []types.CommitteeIndex(nil), index)
|
||||
}
|
||||
|
||||
func TestNextEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: k,
|
||||
}
|
||||
syncCommittee.Pubkeys = append(syncCommittee.Pubkeys, bytesutil.PadTo(k, 48))
|
||||
}
|
||||
|
||||
state, err := v2.InitializeFromProto(ðpb.BeaconStateAltair{
|
||||
Validators: validators,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
ClearCache()
|
||||
r := [32]byte{'a'}
|
||||
require.NoError(t, err, syncCommitteeCache.UpdatePositionsInCommittee(r, state))
|
||||
|
||||
index, err := NextPeriodSyncSubcommitteeIndices(state, 0)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, []types.CommitteeIndex{0}, index)
|
||||
}
|
||||
|
||||
func TestNextEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: k,
|
||||
}
|
||||
syncCommittee.Pubkeys = append(syncCommittee.Pubkeys, bytesutil.PadTo(k, 48))
|
||||
}
|
||||
|
||||
state, err := v2.InitializeFromProto(ðpb.BeaconStateAltair{
|
||||
Validators: validators,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
index, err := NextPeriodSyncSubcommitteeIndices(state, 0)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, []types.CommitteeIndex{0}, index)
|
||||
}
|
||||
|
||||
func TestNextEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
|
||||
ClearCache()
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: k,
|
||||
}
|
||||
syncCommittee.Pubkeys = append(syncCommittee.Pubkeys, bytesutil.PadTo(k, 48))
|
||||
}
|
||||
|
||||
state, err := v2.InitializeFromProto(ðpb.BeaconStateAltair{
|
||||
Validators: validators,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
index, err := NextPeriodSyncSubcommitteeIndices(state, 21093019)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, []types.CommitteeIndex(nil), index)
|
||||
}
|
||||
|
||||
func TestUpdateSyncCommitteeCache_BadSlot(t *testing.T) {
|
||||
state, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Slot: 1,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
err = UpdateSyncCommitteeCache(state)
|
||||
require.ErrorContains(t, "not at the end of the epoch to update cache", err)
|
||||
|
||||
state, err = v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch - 1,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
err = UpdateSyncCommitteeCache(state)
|
||||
require.ErrorContains(t, "not at sync committee period boundary to update cache", err)
|
||||
}
|
||||
|
||||
func TestUpdateSyncCommitteeCache_BadRoot(t *testing.T) {
|
||||
state, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Slot: types.Slot(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*params.BeaconConfig().SlotsPerEpoch - 1,
|
||||
LatestBlockHeader: ðpb.BeaconBlockHeader{StateRoot: params.BeaconConfig().ZeroHash[:]},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
err = UpdateSyncCommitteeCache(state)
|
||||
require.ErrorContains(t, "zero hash state root can't be used to update cache", err)
|
||||
}
|
||||
|
||||
@@ -47,8 +47,11 @@ func TotalBalance(state state.ReadOnlyValidators, indices []types.ValidatorIndex
|
||||
// """
|
||||
// return get_total_balance(state, set(get_active_validator_indices(state, get_current_epoch(state))))
|
||||
func TotalActiveBalance(s state.ReadOnlyBeaconState) (uint64, error) {
|
||||
total := uint64(0)
|
||||
// Check if the active balance exists in cache.
|
||||
epoch := SlotToEpoch(s.Slot())
|
||||
|
||||
// Cache miss. Manually compute the active balance and fill the cache.
|
||||
total := uint64(0)
|
||||
if err := s.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
|
||||
if IsActiveValidatorUsingTrie(val, epoch) {
|
||||
total += val.EffectiveBalance()
|
||||
@@ -57,6 +60,7 @@ func TotalActiveBalance(s state.ReadOnlyBeaconState) (uint64, error) {
|
||||
}); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return total, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -4,16 +4,16 @@ import (
|
||||
"testing"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
statepb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
func TestTotalBalance_OK(t *testing.T) {
|
||||
state, err := v1.InitializeFromProto(&statepb.BeaconState{Validators: []*ethpb.Validator{
|
||||
state, err := v1.InitializeFromProto(ðpb.BeaconState{Validators: []*ethpb.Validator{
|
||||
{EffectiveBalance: 27 * 1e9}, {EffectiveBalance: 28 * 1e9},
|
||||
{EffectiveBalance: 32 * 1e9}, {EffectiveBalance: 40 * 1e9},
|
||||
}})
|
||||
@@ -26,7 +26,7 @@ func TestTotalBalance_OK(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTotalBalance_ReturnsEffectiveBalanceIncrement(t *testing.T) {
|
||||
state, err := v1.InitializeFromProto(&statepb.BeaconState{Validators: []*ethpb.Validator{}})
|
||||
state, err := v1.InitializeFromProto(ðpb.BeaconState{Validators: []*ethpb.Validator{}})
|
||||
require.NoError(t, err)
|
||||
|
||||
balance := TotalBalance(state, []types.ValidatorIndex{})
|
||||
@@ -35,7 +35,7 @@ func TestTotalBalance_ReturnsEffectiveBalanceIncrement(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTotalActiveBalance_OK(t *testing.T) {
|
||||
state, err := v1.InitializeFromProto(&statepb.BeaconState{Validators: []*ethpb.Validator{
|
||||
state, err := v1.InitializeFromProto(ðpb.BeaconState{Validators: []*ethpb.Validator{
|
||||
{
|
||||
EffectiveBalance: 32 * 1e9,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
@@ -55,6 +55,14 @@ func TestTotalActiveBalance_OK(t *testing.T) {
|
||||
}})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Validate that cache miss to start with.
|
||||
epoch := SlotToEpoch(state.Slot())
|
||||
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
require.NoError(t, err)
|
||||
_, err = committeeCache.ActiveBalance(seed)
|
||||
require.Equal(t, cache.ErrNonCommitteeKey, err)
|
||||
|
||||
// Validate manual calculation passes.
|
||||
balance, err := TotalActiveBalance(state)
|
||||
assert.NoError(t, err)
|
||||
wanted := state.Validators()[0].EffectiveBalance + state.Validators()[1].EffectiveBalance +
|
||||
@@ -74,7 +82,7 @@ func TestGetBalance_OK(t *testing.T) {
|
||||
{i: 2, b: []uint64{0, 0, 0}},
|
||||
}
|
||||
for _, test := range tests {
|
||||
state, err := v1.InitializeFromProto(&statepb.BeaconState{Balances: test.b})
|
||||
state, err := v1.InitializeFromProto(ðpb.BeaconState{Balances: test.b})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, test.b[test.i], state.Balances()[test.i], "Incorrect Validator balance")
|
||||
}
|
||||
@@ -92,7 +100,7 @@ func TestIncreaseBalance_OK(t *testing.T) {
|
||||
{i: 2, b: []uint64{27 * 1e9, 28 * 1e9, 32 * 1e9}, nb: 33 * 1e9, eb: 65 * 1e9},
|
||||
}
|
||||
for _, test := range tests {
|
||||
state, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
state, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Validators: []*ethpb.Validator{
|
||||
{EffectiveBalance: 4}, {EffectiveBalance: 4}, {EffectiveBalance: 4}},
|
||||
Balances: test.b,
|
||||
@@ -116,7 +124,7 @@ func TestDecreaseBalance_OK(t *testing.T) {
|
||||
{i: 3, b: []uint64{27 * 1e9, 28 * 1e9, 1, 28 * 1e9}, nb: 28 * 1e9, eb: 0},
|
||||
}
|
||||
for _, test := range tests {
|
||||
state, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
state, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Validators: []*ethpb.Validator{
|
||||
{EffectiveBalance: 4}, {EffectiveBalance: 4}, {EffectiveBalance: 4}, {EffectiveBalance: 3}},
|
||||
Balances: test.b,
|
||||
@@ -179,7 +187,7 @@ func TestIsInInactivityLeak(t *testing.T) {
|
||||
assert.Equal(t, false, IsInInactivityLeak(prevEpoch, finalizedEpoch), "Wanted inactivity leak false")
|
||||
}
|
||||
|
||||
func buildState(slot types.Slot, validatorCount uint64) *statepb.BeaconState {
|
||||
func buildState(slot types.Slot, validatorCount uint64) *ethpb.BeaconState {
|
||||
validators := make([]*ethpb.Validator, validatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -205,7 +213,7 @@ func buildState(slot types.Slot, validatorCount uint64) *statepb.BeaconState {
|
||||
for i := 0; i < len(latestRandaoMixes); i++ {
|
||||
latestRandaoMixes[i] = params.BeaconConfig().ZeroHash[:]
|
||||
}
|
||||
return &statepb.BeaconState{
|
||||
return ðpb.BeaconState{
|
||||
Slot: slot,
|
||||
Balances: validatorBalances,
|
||||
Validators: validators,
|
||||
|
||||
@@ -208,8 +208,8 @@ func PrevSlot(slot types.Slot) types.Slot {
|
||||
// Spec code:
|
||||
// def compute_sync_committee_period(epoch: Epoch) -> uint64:
|
||||
// return epoch // EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
||||
func SyncCommitteePeriod(e types.Epoch) uint64 {
|
||||
return uint64(e / params.BeaconConfig().EpochsPerSyncCommitteePeriod)
|
||||
func SyncCommitteePeriod(epoch types.Epoch) uint64 {
|
||||
return uint64(epoch / params.BeaconConfig().EpochsPerSyncCommitteePeriod)
|
||||
}
|
||||
|
||||
// SyncCommitteePeriodStartEpoch returns the start epoch of a sync committee period.
|
||||
|
||||
@@ -192,6 +192,7 @@ func TestWeakSubjectivity_IsWithinWeakSubjectivityPeriod(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
got, err := helpers.IsWithinWeakSubjectivityPeriod(tt.epoch, tt.genWsState(), tt.genWsCheckpoint())
|
||||
if tt.wantedErr != "" {
|
||||
assert.Equal(t, false, got)
|
||||
|
||||
@@ -26,6 +26,7 @@ go_library(
|
||||
],
|
||||
deps = [
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/epoch:go_default_library",
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
@@ -43,6 +44,7 @@ go_library(
|
||||
"//shared/mathutil:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/traceutil:go_default_library",
|
||||
"//shared/version:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
@@ -56,6 +58,7 @@ go_test(
|
||||
name = "go_default_test",
|
||||
size = "small",
|
||||
srcs = [
|
||||
"altair_transition_no_verify_sig_test.go",
|
||||
"benchmarks_test.go",
|
||||
"skip_slot_cache_test.go",
|
||||
"state_fuzz_test.go",
|
||||
@@ -71,8 +74,10 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
shard_count = 3,
|
||||
deps = [
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/p2p/types:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
@@ -81,6 +86,7 @@ go_test(
|
||||
"//shared/benchutil:go_default_library",
|
||||
"//shared/bls:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/copyutil:go_default_library",
|
||||
"//shared/hashutil:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/testutil:go_default_library",
|
||||
|
||||
221
beacon-chain/core/state/altair_transition_no_verify_sig_test.go
Normal file
221
beacon-chain/core/state/altair_transition_no_verify_sig_test.go
Normal file
@@ -0,0 +1,221 @@
|
||||
package state_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
core "github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
p2pType "github.com/prysmaticlabs/prysm/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/shared/bls"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/copyutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
func TestExecuteAltairStateTransitionNoVerify_FullProcess(t *testing.T) {
|
||||
beaconState, privKeys := testutil.DeterministicGenesisStateAltair(t, 100)
|
||||
|
||||
syncCommittee, err := altair.NextSyncCommittee(context.Background(), beaconState)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetCurrentSyncCommittee(syncCommittee))
|
||||
|
||||
eth1Data := ðpb.Eth1Data{
|
||||
DepositCount: 100,
|
||||
DepositRoot: bytesutil.PadTo([]byte{2}, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
}
|
||||
require.NoError(t, beaconState.SetSlot(params.BeaconConfig().SlotsPerEpoch-1))
|
||||
e := beaconState.Eth1Data()
|
||||
e.DepositCount = 100
|
||||
require.NoError(t, beaconState.SetEth1Data(e))
|
||||
bh := beaconState.LatestBlockHeader()
|
||||
bh.Slot = beaconState.Slot()
|
||||
require.NoError(t, beaconState.SetLatestBlockHeader(bh))
|
||||
require.NoError(t, beaconState.SetEth1DataVotes([]*ethpb.Eth1Data{eth1Data}))
|
||||
|
||||
require.NoError(t, beaconState.SetSlot(beaconState.Slot()+1))
|
||||
epoch := helpers.CurrentEpoch(beaconState)
|
||||
randaoReveal, err := testutil.RandaoReveal(beaconState, epoch, privKeys)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetSlot(beaconState.Slot()-1))
|
||||
|
||||
nextSlotState, err := core.ProcessSlots(context.Background(), beaconState.Copy(), beaconState.Slot()+1)
|
||||
require.NoError(t, err)
|
||||
parentRoot, err := nextSlotState.LatestBlockHeader().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
proposerIdx, err := helpers.BeaconProposerIndex(nextSlotState)
|
||||
require.NoError(t, err)
|
||||
block := testutil.NewBeaconBlockAltair()
|
||||
block.Block.ProposerIndex = proposerIdx
|
||||
block.Block.Slot = beaconState.Slot() + 1
|
||||
block.Block.ParentRoot = parentRoot[:]
|
||||
block.Block.Body.RandaoReveal = randaoReveal
|
||||
block.Block.Body.Eth1Data = eth1Data
|
||||
|
||||
syncBits := bitfield.NewBitvector512()
|
||||
for i := range syncBits {
|
||||
syncBits[i] = 0xff
|
||||
}
|
||||
indices, err := altair.NextSyncCommitteeIndices(context.Background(), beaconState)
|
||||
require.NoError(t, err)
|
||||
h := copyutil.CopyBeaconBlockHeader(beaconState.LatestBlockHeader())
|
||||
prevStateRoot, err := beaconState.HashTreeRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
h.StateRoot = prevStateRoot[:]
|
||||
pbr, err := h.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
syncSigs := make([]bls.Signature, len(indices))
|
||||
for i, indice := range indices {
|
||||
b := p2pType.SSZBytes(pbr[:])
|
||||
sb, err := helpers.ComputeDomainAndSign(beaconState, helpers.CurrentEpoch(beaconState), &b, params.BeaconConfig().DomainSyncCommittee, privKeys[indice])
|
||||
require.NoError(t, err)
|
||||
sig, err := bls.SignatureFromBytes(sb)
|
||||
require.NoError(t, err)
|
||||
syncSigs[i] = sig
|
||||
}
|
||||
aggregatedSig := bls.AggregateSignatures(syncSigs).Marshal()
|
||||
syncAggregate := ðpb.SyncAggregate{
|
||||
SyncCommitteeBits: syncBits,
|
||||
SyncCommitteeSignature: aggregatedSig,
|
||||
}
|
||||
block.Block.Body.SyncAggregate = syncAggregate
|
||||
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
stateRoot, err := core.CalculateStateRoot(context.Background(), beaconState, wsb)
|
||||
require.NoError(t, err)
|
||||
block.Block.StateRoot = stateRoot[:]
|
||||
|
||||
c := beaconState.Copy()
|
||||
sig, err := testutil.BlockSignatureAltair(c, block.Block, privKeys)
|
||||
require.NoError(t, err)
|
||||
block.Signature = sig.Marshal()
|
||||
|
||||
wsb, err = wrapper.WrappedAltairSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
set, _, err := core.ExecuteStateTransitionNoVerifyAnySig(context.Background(), beaconState, wsb)
|
||||
require.NoError(t, err)
|
||||
verified, err := set.Verify()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, verified, "Could not verify signature set")
|
||||
}
|
||||
|
||||
func TestExecuteAltairStateTransitionNoVerifySignature_CouldNotVerifyStateRoot(t *testing.T) {
|
||||
beaconState, privKeys := testutil.DeterministicGenesisStateAltair(t, 100)
|
||||
|
||||
syncCommittee, err := altair.NextSyncCommittee(context.Background(), beaconState)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetCurrentSyncCommittee(syncCommittee))
|
||||
|
||||
eth1Data := ðpb.Eth1Data{
|
||||
DepositCount: 100,
|
||||
DepositRoot: bytesutil.PadTo([]byte{2}, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
}
|
||||
require.NoError(t, beaconState.SetSlot(params.BeaconConfig().SlotsPerEpoch-1))
|
||||
e := beaconState.Eth1Data()
|
||||
e.DepositCount = 100
|
||||
require.NoError(t, beaconState.SetEth1Data(e))
|
||||
bh := beaconState.LatestBlockHeader()
|
||||
bh.Slot = beaconState.Slot()
|
||||
require.NoError(t, beaconState.SetLatestBlockHeader(bh))
|
||||
require.NoError(t, beaconState.SetEth1DataVotes([]*ethpb.Eth1Data{eth1Data}))
|
||||
|
||||
require.NoError(t, beaconState.SetSlot(beaconState.Slot()+1))
|
||||
epoch := helpers.CurrentEpoch(beaconState)
|
||||
randaoReveal, err := testutil.RandaoReveal(beaconState, epoch, privKeys)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetSlot(beaconState.Slot()-1))
|
||||
|
||||
nextSlotState, err := core.ProcessSlots(context.Background(), beaconState.Copy(), beaconState.Slot()+1)
|
||||
require.NoError(t, err)
|
||||
parentRoot, err := nextSlotState.LatestBlockHeader().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
proposerIdx, err := helpers.BeaconProposerIndex(nextSlotState)
|
||||
require.NoError(t, err)
|
||||
block := testutil.NewBeaconBlockAltair()
|
||||
block.Block.ProposerIndex = proposerIdx
|
||||
block.Block.Slot = beaconState.Slot() + 1
|
||||
block.Block.ParentRoot = parentRoot[:]
|
||||
block.Block.Body.RandaoReveal = randaoReveal
|
||||
block.Block.Body.Eth1Data = eth1Data
|
||||
|
||||
syncBits := bitfield.NewBitvector512()
|
||||
for i := range syncBits {
|
||||
syncBits[i] = 0xff
|
||||
}
|
||||
indices, err := altair.NextSyncCommitteeIndices(context.Background(), beaconState)
|
||||
require.NoError(t, err)
|
||||
h := copyutil.CopyBeaconBlockHeader(beaconState.LatestBlockHeader())
|
||||
prevStateRoot, err := beaconState.HashTreeRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
h.StateRoot = prevStateRoot[:]
|
||||
pbr, err := h.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
syncSigs := make([]bls.Signature, len(indices))
|
||||
for i, indice := range indices {
|
||||
b := p2pType.SSZBytes(pbr[:])
|
||||
sb, err := helpers.ComputeDomainAndSign(beaconState, helpers.CurrentEpoch(beaconState), &b, params.BeaconConfig().DomainSyncCommittee, privKeys[indice])
|
||||
require.NoError(t, err)
|
||||
sig, err := bls.SignatureFromBytes(sb)
|
||||
require.NoError(t, err)
|
||||
syncSigs[i] = sig
|
||||
}
|
||||
aggregatedSig := bls.AggregateSignatures(syncSigs).Marshal()
|
||||
syncAggregate := ðpb.SyncAggregate{
|
||||
SyncCommitteeBits: syncBits,
|
||||
SyncCommitteeSignature: aggregatedSig,
|
||||
}
|
||||
block.Block.Body.SyncAggregate = syncAggregate
|
||||
|
||||
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
stateRoot, err := core.CalculateStateRoot(context.Background(), beaconState, wsb)
|
||||
require.NoError(t, err)
|
||||
block.Block.StateRoot = stateRoot[:]
|
||||
|
||||
c := beaconState.Copy()
|
||||
sig, err := testutil.BlockSignatureAltair(c, block.Block, privKeys)
|
||||
require.NoError(t, err)
|
||||
block.Signature = sig.Marshal()
|
||||
|
||||
block.Block.StateRoot = bytesutil.PadTo([]byte{'a'}, 32)
|
||||
wsb, err = wrapper.WrappedAltairSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
_, _, err = core.ExecuteStateTransitionNoVerifyAnySig(context.Background(), beaconState, wsb)
|
||||
require.ErrorContains(t, "could not validate state root", err)
|
||||
}
|
||||
|
||||
func TestExecuteStateTransitionNoVerifyAnySig_PassesProcessingConditions(t *testing.T) {
|
||||
beaconState, block := createFullAltairBlockWithOperations(t)
|
||||
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
set, _, err := core.ExecuteStateTransitionNoVerifyAnySig(context.Background(), beaconState, wsb)
|
||||
require.NoError(t, err)
|
||||
// Test Signature set verifies.
|
||||
verified, err := set.Verify()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, verified, "Could not verify signature set")
|
||||
}
|
||||
|
||||
func createFullAltairBlockWithOperations(t *testing.T) (state.BeaconStateAltair,
|
||||
*ethpb.SignedBeaconBlockAltair) {
|
||||
beaconState, privKeys := testutil.DeterministicGenesisStateAltair(t, 32)
|
||||
sCom, err := altair.NextSyncCommittee(context.Background(), beaconState)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, beaconState.SetCurrentSyncCommittee(sCom))
|
||||
tState := beaconState.Copy()
|
||||
blk, err := testutil.GenerateFullBlockAltair(tState, privKeys,
|
||||
&testutil.BlockGenConfig{NumAttestations: 1, NumVoluntaryExits: 0, NumDeposits: 0}, 1)
|
||||
require.NoError(t, err)
|
||||
|
||||
return beaconState, blk
|
||||
}
|
||||
@@ -19,7 +19,7 @@ var SkipSlotCache = cache.NewSkipSlotCache()
|
||||
// The key for skip slot cache is mixed between state root and state slot.
|
||||
// state root is in the mix to defend against different forks with same skip slots
|
||||
// to hit the same cache. We don't want beacon states mixed up between different chains.
|
||||
func cacheKey(ctx context.Context, state state.ReadOnlyBeaconState) ([32]byte, error) {
|
||||
func CacheKey(ctx context.Context, state state.ReadOnlyBeaconState) ([32]byte, error) {
|
||||
bh := state.LatestBlockHeader()
|
||||
if bh == nil {
|
||||
return [32]byte{}, errors.New("block head in state can't be nil")
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
|
||||
e "github.com/prysmaticlabs/prysm/beacon-chain/core/epoch"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
@@ -19,6 +20,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/shared/mathutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/traceutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/version"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
@@ -183,7 +185,7 @@ func ProcessSlots(ctx context.Context, state state.BeaconState, slot types.Slot)
|
||||
}
|
||||
|
||||
highestSlot := state.Slot()
|
||||
key, err := cacheKey(ctx, state)
|
||||
key, err := CacheKey(ctx, state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -234,16 +236,35 @@ func ProcessSlots(ctx context.Context, state state.BeaconState, slot types.Slot)
|
||||
return nil, errors.Wrap(err, "could not process slot")
|
||||
}
|
||||
if CanProcessEpoch(state) {
|
||||
state, err = ProcessEpochPrecompute(ctx, state)
|
||||
if err != nil {
|
||||
traceutil.AnnotateError(span, err)
|
||||
return nil, errors.Wrap(err, "could not process epoch with optimizations")
|
||||
switch state.Version() {
|
||||
case version.Phase0:
|
||||
state, err = ProcessEpochPrecompute(ctx, state)
|
||||
if err != nil {
|
||||
traceutil.AnnotateError(span, err)
|
||||
return nil, errors.Wrap(err, "could not process epoch with optimizations")
|
||||
}
|
||||
case version.Altair:
|
||||
state, err = altair.ProcessEpoch(ctx, state)
|
||||
if err != nil {
|
||||
traceutil.AnnotateError(span, err)
|
||||
return nil, errors.Wrap(err, "could not process epoch")
|
||||
}
|
||||
default:
|
||||
return nil, errors.New("beacon state should have a version")
|
||||
}
|
||||
}
|
||||
if err := state.SetSlot(state.Slot() + 1); err != nil {
|
||||
traceutil.AnnotateError(span, err)
|
||||
return nil, errors.Wrap(err, "failed to increment state slot")
|
||||
}
|
||||
|
||||
// Transition to Altair state.
|
||||
if helpers.IsEpochStart(state.Slot()) && helpers.SlotToEpoch(state.Slot()) == params.BeaconConfig().AltairForkEpoch {
|
||||
state, err = altair.UpgradeToAltair(ctx, state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if highestSlot < state.Slot() {
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
|
||||
b "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state/interop"
|
||||
@@ -15,6 +16,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/shared/bls"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/prysmaticlabs/prysm/shared/traceutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/version"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
@@ -150,6 +152,16 @@ func CalculateStateRoot(
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not process block")
|
||||
}
|
||||
if signed.Version() == version.Altair {
|
||||
sa, err := signed.Block().Body().SyncAggregate()
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
state, err = altair.ProcessSyncAggregate(state, sa)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return state.HashTreeRoot(ctx)
|
||||
}
|
||||
@@ -182,6 +194,16 @@ func ProcessBlockNoVerifyAnySig(
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if signed.Version() == version.Altair {
|
||||
sa, err := signed.Block().Body().SyncAggregate()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
state, err = altair.ProcessSyncAggregate(state, sa)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
bSet, err := b.BlockSignatureSet(state, blk.ProposerIndex(), signed.Signature(), blk.HashTreeRoot)
|
||||
if err != nil {
|
||||
@@ -240,22 +262,22 @@ func ProcessOperationsNoVerifyAttsSigs(
|
||||
return nil, errors.Wrap(err, "could not verify operation lengths")
|
||||
}
|
||||
|
||||
state, err := b.ProcessProposerSlashings(ctx, state, signedBeaconBlock.Block().Body().ProposerSlashings(), v.SlashValidator)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process block proposer slashings")
|
||||
}
|
||||
state, err = b.ProcessAttesterSlashings(ctx, state, signedBeaconBlock.Block().Body().AttesterSlashings(), v.SlashValidator)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process block attester slashings")
|
||||
}
|
||||
state, err = b.ProcessAttestationsNoVerifySignature(ctx, state, signedBeaconBlock)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process block attestations")
|
||||
}
|
||||
state, err = b.ProcessDeposits(ctx, state, signedBeaconBlock.Block().Body().Deposits())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process block validator deposits")
|
||||
var err error
|
||||
switch signedBeaconBlock.Version() {
|
||||
case version.Phase0:
|
||||
state, err = phase0Operations(ctx, state, signedBeaconBlock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case version.Altair:
|
||||
state, err = altairOperations(ctx, state, signedBeaconBlock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, errors.New("block does not have correct version")
|
||||
}
|
||||
|
||||
state, err = b.ProcessVoluntaryExits(ctx, state, signedBeaconBlock.Block().Body().VoluntaryExits())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process validator exits")
|
||||
@@ -309,3 +331,43 @@ func ProcessBlockForStateRoot(
|
||||
|
||||
return state, nil
|
||||
}
|
||||
|
||||
// This calls altair specific block operations.
|
||||
func altairOperations(
|
||||
ctx context.Context,
|
||||
state state.BeaconState,
|
||||
signedBeaconBlock block.SignedBeaconBlock) (state.BeaconState, error) {
|
||||
state, err := b.ProcessProposerSlashings(ctx, state, signedBeaconBlock.Block().Body().ProposerSlashings(), altair.SlashValidator)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process block proposer slashings")
|
||||
}
|
||||
state, err = b.ProcessAttesterSlashings(ctx, state, signedBeaconBlock.Block().Body().AttesterSlashings(), altair.SlashValidator)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process block attester slashings")
|
||||
}
|
||||
state, err = altair.ProcessAttestationsNoVerifySignature(ctx, state, signedBeaconBlock)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process block attestations")
|
||||
}
|
||||
return altair.ProcessDeposits(ctx, state, signedBeaconBlock.Block().Body().Deposits())
|
||||
}
|
||||
|
||||
// This calls phase 0 specific block operations.
|
||||
func phase0Operations(
|
||||
ctx context.Context,
|
||||
state state.BeaconStateAltair,
|
||||
signedBeaconBlock block.SignedBeaconBlock) (state.BeaconState, error) {
|
||||
state, err := b.ProcessProposerSlashings(ctx, state, signedBeaconBlock.Block().Body().ProposerSlashings(), v.SlashValidator)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process block proposer slashings")
|
||||
}
|
||||
state, err = b.ProcessAttesterSlashings(ctx, state, signedBeaconBlock.Block().Body().AttesterSlashings(), v.SlashValidator)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process block attester slashings")
|
||||
}
|
||||
state, err = b.ProcessAttestationsNoVerifySignature(ctx, state, signedBeaconBlock)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process block attestations")
|
||||
}
|
||||
return b.ProcessDeposits(ctx, state, signedBeaconBlock.Block().Body().Deposits())
|
||||
}
|
||||
|
||||
@@ -26,6 +26,7 @@ go_library(
|
||||
"//beacon-chain/node/registration:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/operations/synccommittee:go_default_library",
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/powchain:go_default_library",
|
||||
|
||||
@@ -28,6 +28,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/node/registration"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/slashings"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/synccommittee"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
|
||||
@@ -59,23 +60,24 @@ const testSkipPowFlag = "test-skip-pow"
|
||||
// full PoS node. It handles the lifecycle of the entire system and registers
|
||||
// services to a service registry.
|
||||
type BeaconNode struct {
|
||||
cliCtx *cli.Context
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
services *shared.ServiceRegistry
|
||||
lock sync.RWMutex
|
||||
stop chan struct{} // Channel to wait for termination notifications.
|
||||
db db.Database
|
||||
attestationPool attestations.Pool
|
||||
exitPool voluntaryexits.PoolManager
|
||||
slashingsPool slashings.PoolManager
|
||||
depositCache *depositcache.DepositCache
|
||||
stateFeed *event.Feed
|
||||
blockFeed *event.Feed
|
||||
opFeed *event.Feed
|
||||
forkChoiceStore forkchoice.ForkChoicer
|
||||
stateGen *stategen.State
|
||||
collector *bcnodeCollector
|
||||
cliCtx *cli.Context
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
services *shared.ServiceRegistry
|
||||
lock sync.RWMutex
|
||||
stop chan struct{} // Channel to wait for termination notifications.
|
||||
db db.Database
|
||||
attestationPool attestations.Pool
|
||||
exitPool voluntaryexits.PoolManager
|
||||
slashingsPool slashings.PoolManager
|
||||
syncCommitteePool synccommittee.Pool
|
||||
depositCache *depositcache.DepositCache
|
||||
stateFeed *event.Feed
|
||||
blockFeed *event.Feed
|
||||
opFeed *event.Feed
|
||||
forkChoiceStore forkchoice.ForkChoicer
|
||||
stateGen *stategen.State
|
||||
collector *bcnodeCollector
|
||||
}
|
||||
|
||||
// New creates a new node instance, sets up configuration options, and registers
|
||||
@@ -95,21 +97,25 @@ func New(cliCtx *cli.Context) (*BeaconNode, error) {
|
||||
configureNetwork(cliCtx)
|
||||
configureInteropConfig(cliCtx)
|
||||
|
||||
// Initializes any forks here.
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
|
||||
registry := shared.NewServiceRegistry()
|
||||
|
||||
ctx, cancel := context.WithCancel(cliCtx.Context)
|
||||
beacon := &BeaconNode{
|
||||
cliCtx: cliCtx,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
services: registry,
|
||||
stop: make(chan struct{}),
|
||||
stateFeed: new(event.Feed),
|
||||
blockFeed: new(event.Feed),
|
||||
opFeed: new(event.Feed),
|
||||
attestationPool: attestations.NewPool(),
|
||||
exitPool: voluntaryexits.NewPool(),
|
||||
slashingsPool: slashings.NewPool(),
|
||||
cliCtx: cliCtx,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
services: registry,
|
||||
stop: make(chan struct{}),
|
||||
stateFeed: new(event.Feed),
|
||||
blockFeed: new(event.Feed),
|
||||
opFeed: new(event.Feed),
|
||||
attestationPool: attestations.NewPool(),
|
||||
exitPool: voluntaryexits.NewPool(),
|
||||
slashingsPool: slashings.NewPool(),
|
||||
syncCommitteePool: synccommittee.NewPool(),
|
||||
}
|
||||
|
||||
depositAddress, err := registration.DepositContractAddress()
|
||||
@@ -500,6 +506,7 @@ func (b *BeaconNode) registerSyncService() error {
|
||||
AttPool: b.attestationPool,
|
||||
ExitPool: b.exitPool,
|
||||
SlashingPool: b.slashingsPool,
|
||||
SyncCommsPool: b.syncCommitteePool,
|
||||
StateGen: b.stateGen,
|
||||
})
|
||||
|
||||
@@ -588,6 +595,7 @@ func (b *BeaconNode) registerRPCService() error {
|
||||
AttestationsPool: b.attestationPool,
|
||||
ExitPool: b.exitPool,
|
||||
SlashingsPool: b.slashingsPool,
|
||||
SyncCommitteeObjectPool: b.syncCommitteePool,
|
||||
POWChainService: web3Service,
|
||||
ChainStartFetcher: chainStartFetcher,
|
||||
MockEth1Votes: mockEth1DataVotes,
|
||||
|
||||
@@ -11,6 +11,7 @@ go_library(
|
||||
"discovery.go",
|
||||
"doc.go",
|
||||
"fork.go",
|
||||
"fork_watcher.go",
|
||||
"gossip_scoring_params.go",
|
||||
"gossip_topic_mappings.go",
|
||||
"handshake.go",
|
||||
@@ -39,6 +40,7 @@ go_library(
|
||||
],
|
||||
deps = [
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
@@ -69,8 +71,10 @@ go_library(
|
||||
"@com_github_ethereum_go_ethereum//p2p/discover:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
|
||||
"@com_github_ferranbt_fastssz//:go_default_library",
|
||||
"@com_github_ipfs_go_ipfs_addr//:go_default_library",
|
||||
"@com_github_kevinms_leakybucket_go//:go_default_library",
|
||||
"@com_github_kr_pretty//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//config:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//p2p/protocol/identify:go_default_library",
|
||||
@@ -148,6 +152,7 @@ go_test(
|
||||
"//shared/testutil/assert:go_default_library",
|
||||
"//shared/testutil/require:go_default_library",
|
||||
"//shared/timeutils:go_default_library",
|
||||
"//shared/version:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//crypto:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/discover:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
|
||||
@@ -7,8 +7,12 @@ import (
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
ssz "github.com/ferranbt/fastssz"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/traceutil"
|
||||
@@ -20,7 +24,8 @@ import (
|
||||
// GossipTypeMapping.
|
||||
var ErrMessageNotMapped = errors.New("message type is not mapped to a PubSub topic")
|
||||
|
||||
// Broadcast a message to the p2p network.
|
||||
// Broadcasts a message to the p2p network, the message is assumed to be
|
||||
// broadcasted to the current fork.
|
||||
func (s *Service) Broadcast(ctx context.Context, msg proto.Message) error {
|
||||
ctx, span := trace.StartSpan(ctx, "p2p.Broadcast")
|
||||
defer span.End()
|
||||
@@ -41,10 +46,15 @@ func (s *Service) Broadcast(ctx context.Context, msg proto.Message) error {
|
||||
traceutil.AnnotateError(span, ErrMessageNotMapped)
|
||||
return ErrMessageNotMapped
|
||||
}
|
||||
return s.broadcastObject(ctx, msg, fmt.Sprintf(topic, forkDigest))
|
||||
castMsg, ok := msg.(ssz.Marshaler)
|
||||
if !ok {
|
||||
return errors.Errorf("message of %T does not support marshaller interface", msg)
|
||||
}
|
||||
return s.broadcastObject(ctx, castMsg, fmt.Sprintf(topic, forkDigest))
|
||||
}
|
||||
|
||||
// BroadcastAttestation broadcasts an attestation to the p2p network.
|
||||
// BroadcastAttestation broadcasts an attestation to the p2p network, the message is assumed to be
|
||||
// broadcasted to the current fork.
|
||||
func (s *Service) BroadcastAttestation(ctx context.Context, subnet uint64, att *eth.Attestation) error {
|
||||
ctx, span := trace.StartSpan(ctx, "p2p.BroadcastAttestation")
|
||||
defer span.End()
|
||||
@@ -61,6 +71,24 @@ func (s *Service) BroadcastAttestation(ctx context.Context, subnet uint64, att *
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastAttestation broadcasts an attestation to the p2p network, the message is assumed to be
|
||||
// broadcasted to the current fork.
|
||||
func (s *Service) BroadcastSyncCommitteeMessage(ctx context.Context, subnet uint64, sMsg *ethpb.SyncCommitteeMessage) error {
|
||||
ctx, span := trace.StartSpan(ctx, "p2p.BroadcastSyncCommitteeMessage")
|
||||
defer span.End()
|
||||
forkDigest, err := s.currentForkDigest()
|
||||
if err != nil {
|
||||
err := errors.Wrap(err, "could not retrieve fork digest")
|
||||
traceutil.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Non-blocking broadcast, with attempts to discover a subnet peer if none available.
|
||||
go s.broadcastSyncCommittee(ctx, subnet, sMsg, forkDigest)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) broadcastAttestation(ctx context.Context, subnet uint64, att *eth.Attestation, forkDigest [4]byte) {
|
||||
ctx, span := trace.StartSpan(ctx, "p2p.broadcastAttestation")
|
||||
defer span.End()
|
||||
@@ -100,6 +128,13 @@ func (s *Service) broadcastAttestation(ctx context.Context, subnet uint64, att *
|
||||
traceutil.AnnotateError(span, err)
|
||||
}
|
||||
}
|
||||
// In the event our attestation is outdated and beyond the
|
||||
// acceptable threshold, we exit early and do not broadcast it.
|
||||
currSlot := helpers.CurrentSlot(uint64(s.genesisTime.Unix()))
|
||||
if att.Data.Slot+params.BeaconConfig().SlotsPerEpoch < currSlot {
|
||||
log.Warnf("Attestation is too old to broadcast, discarding it. Current Slot: %d , Attestation Slot: %d", currSlot, att.Data.Slot)
|
||||
return
|
||||
}
|
||||
|
||||
if err := s.broadcastObject(ctx, att, attestationToTopic(subnet, forkDigest)); err != nil {
|
||||
log.WithError(err).Error("Failed to broadcast attestation")
|
||||
@@ -107,8 +142,63 @@ func (s *Service) broadcastAttestation(ctx context.Context, subnet uint64, att *
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) broadcastSyncCommittee(ctx context.Context, subnet uint64, sMsg *ethpb.SyncCommitteeMessage, forkDigest [4]byte) {
|
||||
ctx, span := trace.StartSpan(ctx, "p2p.broadcastSyncCommittee")
|
||||
defer span.End()
|
||||
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.
|
||||
|
||||
oneSlot := time.Duration(1*params.BeaconConfig().SecondsPerSlot) * time.Second
|
||||
ctx, cancel := context.WithTimeout(ctx, oneSlot)
|
||||
defer cancel()
|
||||
|
||||
// Ensure we have peers with this subnet.
|
||||
// This adds in a special value to the subnet
|
||||
// to ensure that we can re-use the same subnet locker.
|
||||
wrappedSubIdx := subnet + syncLockerVal
|
||||
s.subnetLocker(wrappedSubIdx).RLock()
|
||||
hasPeer := s.hasPeerWithSubnet(syncCommitteeToTopic(subnet, forkDigest))
|
||||
s.subnetLocker(wrappedSubIdx).RUnlock()
|
||||
|
||||
span.AddAttributes(
|
||||
trace.BoolAttribute("hasPeer", hasPeer),
|
||||
trace.Int64Attribute("slot", int64(sMsg.Slot)),
|
||||
trace.Int64Attribute("subnet", int64(subnet)),
|
||||
)
|
||||
|
||||
if !hasPeer {
|
||||
syncCommitteeBroadcastAttempts.Inc()
|
||||
if err := func() error {
|
||||
s.subnetLocker(wrappedSubIdx).Lock()
|
||||
defer s.subnetLocker(wrappedSubIdx).Unlock()
|
||||
ok, err := s.FindPeersWithSubnet(ctx, syncCommitteeToTopic(subnet, forkDigest), subnet, 1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ok {
|
||||
savedSyncCommitteeBroadcasts.Inc()
|
||||
return nil
|
||||
}
|
||||
return errors.New("failed to find peers for subnet")
|
||||
}(); err != nil {
|
||||
log.WithError(err).Error("Failed to find peers")
|
||||
traceutil.AnnotateError(span, err)
|
||||
}
|
||||
}
|
||||
// In the event our sync message is outdated and beyond the
|
||||
// acceptable threshold, we exit early and do not broadcast it.
|
||||
if err := altair.ValidateSyncMessageTime(sMsg.Slot, s.genesisTime, params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil {
|
||||
log.Warnf("Sync Committee Message is too old to broadcast, discarding it. %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := s.broadcastObject(ctx, sMsg, syncCommitteeToTopic(subnet, forkDigest)); err != nil {
|
||||
log.WithError(err).Error("Failed to broadcast sync committee message")
|
||||
traceutil.AnnotateError(span, err)
|
||||
}
|
||||
}
|
||||
|
||||
// method to broadcast messages to other peers in our gossip mesh.
|
||||
func (s *Service) broadcastObject(ctx context.Context, obj interface{}, topic string) error {
|
||||
func (s *Service) broadcastObject(ctx context.Context, obj ssz.Marshaler, topic string) error {
|
||||
_, span := trace.StartSpan(ctx, "p2p.broadcastObject")
|
||||
defer span.End()
|
||||
|
||||
@@ -126,7 +216,6 @@ func (s *Service) broadcastObject(ctx context.Context, obj interface{}, topic st
|
||||
messageLen := int64(buf.Len())
|
||||
span.AddMessageSendEvent(int64(id), messageLen /*uncompressed*/, messageLen /*compressed*/)
|
||||
}
|
||||
|
||||
if err := s.PublishToTopic(ctx, topic+s.Encoding().ProtocolSuffix(), buf.Bytes()); err != nil {
|
||||
err := errors.Wrap(err, "could not publish message")
|
||||
traceutil.AnnotateError(span, err)
|
||||
@@ -138,3 +227,7 @@ func (s *Service) broadcastObject(ctx context.Context, obj interface{}, topic st
|
||||
func attestationToTopic(subnet uint64, forkDigest [4]byte) string {
|
||||
return fmt.Sprintf(AttestationSubnetTopicFormat, forkDigest, subnet)
|
||||
}
|
||||
|
||||
func syncCommitteeToTopic(subnet uint64, forkDigest [4]byte) string {
|
||||
return fmt.Sprintf(SyncCommitteeSubnetTopicFormat, forkDigest, subnet)
|
||||
}
|
||||
|
||||
@@ -18,8 +18,8 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers/scorers"
|
||||
p2ptest "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing"
|
||||
eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
statepb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
testpb "github.com/prysmaticlabs/prysm/proto/testing"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
@@ -46,7 +46,7 @@ func TestService_Broadcast(t *testing.T) {
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
}
|
||||
|
||||
msg := &statepb.Fork{
|
||||
msg := ðpb.Fork{
|
||||
Epoch: 55,
|
||||
CurrentVersion: []byte("fooo"),
|
||||
PreviousVersion: []byte("barr"),
|
||||
@@ -77,7 +77,7 @@ func TestService_Broadcast(t *testing.T) {
|
||||
incomingMessage, err := sub.Next(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
result := &statepb.Fork{}
|
||||
result := ðpb.Fork{}
|
||||
require.NoError(t, p.Encoding().DecodeGossip(incomingMessage.Data, result))
|
||||
if !proto.Equal(result, msg) {
|
||||
tt.Errorf("Did not receive expected message, got %+v, wanted %+v", result, msg)
|
||||
@@ -365,3 +365,66 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
|
||||
t.Error("Failed to receive pubsub within 4s")
|
||||
}
|
||||
}
|
||||
|
||||
func TestService_BroadcastSyncCommittee(t *testing.T) {
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
if len(p1.BHost.Network().Peers()) == 0 {
|
||||
t.Fatal("No peers")
|
||||
}
|
||||
|
||||
p := &Service{
|
||||
host: p1.BHost,
|
||||
pubsub: p1.PubSub(),
|
||||
joinedTopics: map[string]*pubsub.Topic{},
|
||||
cfg: &Config{},
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
subnetsLock: make(map[uint64]*sync.RWMutex),
|
||||
subnetsLockLock: sync.Mutex{},
|
||||
peers: peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
ScorerParams: &scorers.Config{},
|
||||
}),
|
||||
}
|
||||
|
||||
msg := testutil.HydrateSyncCommittee(&pb.SyncCommitteeMessage{})
|
||||
subnet := uint64(5)
|
||||
|
||||
topic := SyncCommitteeSubnetTopicFormat
|
||||
GossipTypeMapping[reflect.TypeOf(msg)] = topic
|
||||
digest, err := p.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
topic = fmt.Sprintf(topic, digest, subnet)
|
||||
|
||||
// External peer subscribes to the topic.
|
||||
topic += p.Encoding().ProtocolSuffix()
|
||||
sub, err := p2.SubscribeToTopic(topic)
|
||||
require.NoError(t, err)
|
||||
|
||||
time.Sleep(50 * time.Millisecond) // libp2p fails without this delay...
|
||||
|
||||
// Async listen for the pubsub, must be before the broadcast.
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go func(tt *testing.T) {
|
||||
defer wg.Done()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
|
||||
defer cancel()
|
||||
|
||||
incomingMessage, err := sub.Next(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
result := &pb.SyncCommitteeMessage{}
|
||||
require.NoError(t, p.Encoding().DecodeGossip(incomingMessage.Data, result))
|
||||
if !proto.Equal(result, msg) {
|
||||
tt.Errorf("Did not receive expected message, got %+v, wanted %+v", result, msg)
|
||||
}
|
||||
}(t)
|
||||
|
||||
// Broadcast to peers and wait.
|
||||
require.NoError(t, p.BroadcastSyncCommitteeMessage(context.Background(), subnet, msg))
|
||||
if testutil.WaitTimeout(&wg, 1*time.Second) {
|
||||
t.Error("Failed to receive pubsub within 1s")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,6 +16,9 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/version"
|
||||
)
|
||||
|
||||
// Listener defines the discovery V5 network interface that is used
|
||||
@@ -36,7 +39,7 @@ type Listener interface {
|
||||
// to be dynamically discoverable by others given our tracked committee ids.
|
||||
func (s *Service) RefreshENR() {
|
||||
// return early if discv5 isnt running
|
||||
if s.dv5Listener == nil {
|
||||
if s.dv5Listener == nil || !s.isInitialized() {
|
||||
return
|
||||
}
|
||||
bitV := bitfield.NewBitvector64()
|
||||
@@ -44,16 +47,43 @@ func (s *Service) RefreshENR() {
|
||||
for _, idx := range committees {
|
||||
bitV.SetBitAt(idx, true)
|
||||
}
|
||||
currentBitV, err := bitvector(s.dv5Listener.Self().Record())
|
||||
currentBitV, err := attBitvector(s.dv5Listener.Self().Record())
|
||||
if err != nil {
|
||||
log.Errorf("Could not retrieve bitfield: %v", err)
|
||||
log.Errorf("Could not retrieve att bitfield: %v", err)
|
||||
return
|
||||
}
|
||||
if bytes.Equal(bitV, currentBitV) {
|
||||
// return early if bitfield hasn't changed
|
||||
return
|
||||
// Compare current epoch with our fork epochs
|
||||
currEpoch := helpers.SlotToEpoch(helpers.CurrentSlot(uint64(s.genesisTime.Unix())))
|
||||
altairForkEpoch := params.BeaconConfig().AltairForkEpoch
|
||||
switch {
|
||||
// Altair Behaviour
|
||||
case currEpoch >= altairForkEpoch:
|
||||
// Retrieve sync subnets from application level
|
||||
// cache.
|
||||
bitS := bitfield.Bitvector4{byte(0x00)}
|
||||
committees = cache.SyncSubnetIDs.GetAllSubnets(currEpoch)
|
||||
for _, idx := range committees {
|
||||
bitS.SetBitAt(idx, true)
|
||||
}
|
||||
currentBitS, err := syncBitvector(s.dv5Listener.Self().Record())
|
||||
if err != nil {
|
||||
log.Errorf("Could not retrieve sync bitfield: %v", err)
|
||||
return
|
||||
}
|
||||
if bytes.Equal(bitV, currentBitV) && bytes.Equal(bitS, currentBitS) &&
|
||||
s.Metadata().Version() == version.Altair {
|
||||
// return early if bitfields haven't changed
|
||||
return
|
||||
}
|
||||
s.updateSubnetRecordWithMetadataV2(bitV, bitS)
|
||||
default:
|
||||
// Phase 0 behaviour.
|
||||
if bytes.Equal(bitV, currentBitV) {
|
||||
// return early if bitfield hasn't changed
|
||||
return
|
||||
}
|
||||
s.updateSubnetRecordWithMetadata(bitV)
|
||||
}
|
||||
s.updateSubnetRecordWithMetadata(bitV)
|
||||
// ping all peers to inform them of new metadata
|
||||
s.pingPeers()
|
||||
}
|
||||
@@ -206,7 +236,8 @@ func (s *Service) createLocalNode(
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not add eth2 fork version entry to enr")
|
||||
}
|
||||
return intializeAttSubnets(localNode), nil
|
||||
localNode = initializeAttSubnets(localNode)
|
||||
return initializeSyncCommSubnets(localNode), nil
|
||||
}
|
||||
|
||||
func (s *Service) startDiscoveryV5(
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers"
|
||||
@@ -33,8 +34,10 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/iputils"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
"github.com/prysmaticlabs/prysm/shared/version"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
@@ -340,3 +343,163 @@ func addPeer(t *testing.T, p *peers.Status, state peerdata.PeerConnectionState)
|
||||
}))
|
||||
return id
|
||||
}
|
||||
|
||||
func TestRefreshENR_ForkBoundaries(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
// Clean up caches after usage.
|
||||
defer cache.SubnetIDs.EmptyAllCaches()
|
||||
defer cache.SubnetIDs.EmptyAllCaches()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
svcBuilder func(t *testing.T) *Service
|
||||
postValidation func(t *testing.T, s *Service)
|
||||
}{
|
||||
{
|
||||
name: "metadata no change",
|
||||
svcBuilder: func(t *testing.T) *Service {
|
||||
port := 2000
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
s := &Service{
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
cfg: &Config{UDPPort: uint(port)},
|
||||
}
|
||||
listener, err := s.createListener(ipAddr, pkey)
|
||||
assert.NoError(t, err)
|
||||
s.dv5Listener = listener
|
||||
s.metaData = wrapper.WrappedMetadataV0(new(pb.MetaDataV0))
|
||||
s.updateSubnetRecordWithMetadata([]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00})
|
||||
return s
|
||||
},
|
||||
postValidation: func(t *testing.T, s *Service) {
|
||||
assert.DeepEqual(t, bitfield.NewBitvector64(), s.metaData.AttnetsBitfield())
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "metadata updated",
|
||||
svcBuilder: func(t *testing.T) *Service {
|
||||
port := 2000
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
s := &Service{
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
cfg: &Config{UDPPort: uint(port)},
|
||||
}
|
||||
listener, err := s.createListener(ipAddr, pkey)
|
||||
assert.NoError(t, err)
|
||||
s.dv5Listener = listener
|
||||
s.metaData = wrapper.WrappedMetadataV0(new(pb.MetaDataV0))
|
||||
s.updateSubnetRecordWithMetadata([]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01})
|
||||
cache.SubnetIDs.AddPersistentCommittee([]byte{'A'}, []uint64{1, 2, 3, 23}, 0)
|
||||
return s
|
||||
},
|
||||
postValidation: func(t *testing.T, s *Service) {
|
||||
assert.DeepEqual(t, bitfield.Bitvector64{0xe, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0}, s.metaData.AttnetsBitfield())
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "metadata updated at fork epoch",
|
||||
svcBuilder: func(t *testing.T) *Service {
|
||||
port := 2000
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
s := &Service{
|
||||
genesisTime: time.Now().Add(-5 * oneEpochDuration()),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
cfg: &Config{UDPPort: uint(port)},
|
||||
}
|
||||
listener, err := s.createListener(ipAddr, pkey)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Update params
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.AltairForkEpoch = 5
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
|
||||
s.dv5Listener = listener
|
||||
s.metaData = wrapper.WrappedMetadataV0(new(pb.MetaDataV0))
|
||||
s.updateSubnetRecordWithMetadata([]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01})
|
||||
cache.SubnetIDs.AddPersistentCommittee([]byte{'A'}, []uint64{1, 2, 3, 23}, 0)
|
||||
return s
|
||||
},
|
||||
postValidation: func(t *testing.T, s *Service) {
|
||||
assert.Equal(t, version.Altair, s.metaData.Version())
|
||||
assert.DeepEqual(t, bitfield.Bitvector4{0x00}, s.metaData.MetadataObjV1().Syncnets)
|
||||
assert.DeepEqual(t, bitfield.Bitvector64{0xe, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0}, s.metaData.AttnetsBitfield())
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "metadata updated at fork epoch with no bitfield",
|
||||
svcBuilder: func(t *testing.T) *Service {
|
||||
port := 2000
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
s := &Service{
|
||||
genesisTime: time.Now().Add(-5 * oneEpochDuration()),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
cfg: &Config{UDPPort: uint(port)},
|
||||
}
|
||||
listener, err := s.createListener(ipAddr, pkey)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Update params
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.AltairForkEpoch = 5
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
|
||||
s.dv5Listener = listener
|
||||
s.metaData = wrapper.WrappedMetadataV0(new(pb.MetaDataV0))
|
||||
s.updateSubnetRecordWithMetadata([]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00})
|
||||
return s
|
||||
},
|
||||
postValidation: func(t *testing.T, s *Service) {
|
||||
assert.Equal(t, version.Altair, s.metaData.Version())
|
||||
assert.DeepEqual(t, bitfield.Bitvector4{0x00}, s.metaData.MetadataObjV1().Syncnets)
|
||||
assert.DeepEqual(t, bitfield.Bitvector64{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, s.metaData.AttnetsBitfield())
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "metadata updated past fork epoch with bitfields",
|
||||
svcBuilder: func(t *testing.T) *Service {
|
||||
port := 2000
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
s := &Service{
|
||||
genesisTime: time.Now().Add(-6 * oneEpochDuration()),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
cfg: &Config{UDPPort: uint(port)},
|
||||
}
|
||||
listener, err := s.createListener(ipAddr, pkey)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Update params
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.AltairForkEpoch = 5
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
|
||||
s.dv5Listener = listener
|
||||
s.metaData = wrapper.WrappedMetadataV0(new(pb.MetaDataV0))
|
||||
s.updateSubnetRecordWithMetadata([]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00})
|
||||
cache.SubnetIDs.AddPersistentCommittee([]byte{'A'}, []uint64{1, 2, 3, 23}, 0)
|
||||
cache.SyncSubnetIDs.AddSyncCommitteeSubnets([]byte{'A'}, 0, []uint64{0, 1}, 0)
|
||||
return s
|
||||
},
|
||||
postValidation: func(t *testing.T, s *Service) {
|
||||
assert.Equal(t, version.Altair, s.metaData.Version())
|
||||
assert.DeepEqual(t, bitfield.Bitvector4{0x03}, s.metaData.MetadataObjV1().Syncnets)
|
||||
assert.DeepEqual(t, bitfield.Bitvector64{0xe, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0}, s.metaData.AttnetsBitfield())
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s := tt.svcBuilder(t)
|
||||
s.RefreshENR()
|
||||
tt.postValidation(t, s)
|
||||
s.dv5Listener.Close()
|
||||
cache.SubnetIDs.EmptyAllCaches()
|
||||
cache.SyncSubnetIDs.EmptyAllCaches()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,20 +2,22 @@ package encoder
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
ssz "github.com/ferranbt/fastssz"
|
||||
)
|
||||
|
||||
// NetworkEncoding represents an encoder compatible with Ethereum consensus p2p.
|
||||
type NetworkEncoding interface {
|
||||
// DecodeGossip to the provided gossip message. The interface must be a pointer to the decoding destination.
|
||||
DecodeGossip([]byte, interface{}) error
|
||||
DecodeGossip([]byte, ssz.Unmarshaler) error
|
||||
// DecodeWithMaxLength a bytes from a reader with a varint length prefix. The interface must be a pointer to the
|
||||
// decoding destination. The length of the message should not be more than the provided limit.
|
||||
DecodeWithMaxLength(io.Reader, interface{}) error
|
||||
DecodeWithMaxLength(io.Reader, ssz.Unmarshaler) error
|
||||
// EncodeGossip an arbitrary gossip message to the provided writer. The interface must be a pointer object to encode.
|
||||
EncodeGossip(io.Writer, interface{}) (int, error)
|
||||
EncodeGossip(io.Writer, ssz.Marshaler) (int, error)
|
||||
// EncodeWithMaxLength an arbitrary message to the provided writer with a varint length prefix. The interface must be
|
||||
// a pointer object to encode. The encoded message should not be bigger than the provided limit.
|
||||
EncodeWithMaxLength(io.Writer, interface{}) (int, error)
|
||||
EncodeWithMaxLength(io.Writer, ssz.Marshaler) (int, error)
|
||||
// ProtocolSuffix returns the last part of the protocol ID to indicate the encoding scheme.
|
||||
ProtocolSuffix() string
|
||||
}
|
||||
|
||||
@@ -33,15 +33,12 @@ type SszNetworkEncoder struct{}
|
||||
// ProtocolSuffixSSZSnappy is the last part of the topic string to identify the encoding protocol.
|
||||
const ProtocolSuffixSSZSnappy = "ssz_snappy"
|
||||
|
||||
func (e SszNetworkEncoder) doEncode(msg interface{}) ([]byte, error) {
|
||||
if v, ok := msg.(fastssz.Marshaler); ok {
|
||||
return v.MarshalSSZ()
|
||||
}
|
||||
return nil, errors.Errorf("non-supported type: %T", msg)
|
||||
func (e SszNetworkEncoder) doEncode(msg fastssz.Marshaler) ([]byte, error) {
|
||||
return msg.MarshalSSZ()
|
||||
}
|
||||
|
||||
// EncodeGossip the proto gossip message to the io.Writer.
|
||||
func (e SszNetworkEncoder) EncodeGossip(w io.Writer, msg interface{}) (int, error) {
|
||||
func (e SszNetworkEncoder) EncodeGossip(w io.Writer, msg fastssz.Marshaler) (int, error) {
|
||||
if msg == nil {
|
||||
return 0, nil
|
||||
}
|
||||
@@ -58,7 +55,7 @@ func (e SszNetworkEncoder) EncodeGossip(w io.Writer, msg interface{}) (int, erro
|
||||
|
||||
// EncodeWithMaxLength the proto message to the io.Writer. This encoding prefixes the byte slice with a protobuf varint
|
||||
// to indicate the size of the message. This checks that the encoded message isn't larger than the provided max limit.
|
||||
func (e SszNetworkEncoder) EncodeWithMaxLength(w io.Writer, msg interface{}) (int, error) {
|
||||
func (e SszNetworkEncoder) EncodeWithMaxLength(w io.Writer, msg fastssz.Marshaler) (int, error) {
|
||||
if msg == nil {
|
||||
return 0, nil
|
||||
}
|
||||
@@ -81,15 +78,12 @@ func (e SszNetworkEncoder) EncodeWithMaxLength(w io.Writer, msg interface{}) (in
|
||||
return writeSnappyBuffer(w, b)
|
||||
}
|
||||
|
||||
func (e SszNetworkEncoder) doDecode(b []byte, to interface{}) error {
|
||||
if v, ok := to.(fastssz.Unmarshaler); ok {
|
||||
return v.UnmarshalSSZ(b)
|
||||
}
|
||||
return errors.Errorf("non-supported type: %T", to)
|
||||
func (e SszNetworkEncoder) doDecode(b []byte, to fastssz.Unmarshaler) error {
|
||||
return to.UnmarshalSSZ(b)
|
||||
}
|
||||
|
||||
// DecodeGossip decodes the bytes to the protobuf gossip message provided.
|
||||
func (e SszNetworkEncoder) DecodeGossip(b []byte, to interface{}) error {
|
||||
func (e SszNetworkEncoder) DecodeGossip(b []byte, to fastssz.Unmarshaler) error {
|
||||
b, err := DecodeSnappy(b, MaxGossipSize)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -115,7 +109,7 @@ func DecodeSnappy(msg []byte, maxSize uint64) ([]byte, error) {
|
||||
|
||||
// DecodeWithMaxLength the bytes from io.Reader to the protobuf message provided.
|
||||
// This checks that the decoded message isn't larger than the provided max limit.
|
||||
func (e SszNetworkEncoder) DecodeWithMaxLength(r io.Reader, to interface{}) error {
|
||||
func (e SszNetworkEncoder) DecodeWithMaxLength(r io.Reader, to fastssz.Unmarshaler) error {
|
||||
msgLen, err := readVarint(r)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
|
||||
gogo "github.com/gogo/protobuf/proto"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/encoder"
|
||||
statepb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
@@ -26,7 +26,7 @@ func TestSszNetworkEncoder_RoundTrip(t *testing.T) {
|
||||
|
||||
func TestSszNetworkEncoder_FailsSnappyLength(t *testing.T) {
|
||||
e := &encoder.SszNetworkEncoder{}
|
||||
att := &statepb.Fork{}
|
||||
att := ðpb.Fork{}
|
||||
data := make([]byte, 32)
|
||||
binary.PutUvarint(data, encoder.MaxGossipSize+32)
|
||||
err := e.DecodeGossip(data, att)
|
||||
@@ -35,14 +35,14 @@ func TestSszNetworkEncoder_FailsSnappyLength(t *testing.T) {
|
||||
|
||||
func testRoundTripWithLength(t *testing.T, e *encoder.SszNetworkEncoder) {
|
||||
buf := new(bytes.Buffer)
|
||||
msg := &statepb.Fork{
|
||||
msg := ðpb.Fork{
|
||||
PreviousVersion: []byte("fooo"),
|
||||
CurrentVersion: []byte("barr"),
|
||||
Epoch: 9001,
|
||||
}
|
||||
_, err := e.EncodeWithMaxLength(buf, msg)
|
||||
require.NoError(t, err)
|
||||
decoded := &statepb.Fork{}
|
||||
decoded := ðpb.Fork{}
|
||||
require.NoError(t, e.DecodeWithMaxLength(buf, decoded))
|
||||
if !proto.Equal(decoded, msg) {
|
||||
t.Logf("decoded=%+v\n", decoded)
|
||||
@@ -52,14 +52,14 @@ func testRoundTripWithLength(t *testing.T, e *encoder.SszNetworkEncoder) {
|
||||
|
||||
func testRoundTripWithGossip(t *testing.T, e *encoder.SszNetworkEncoder) {
|
||||
buf := new(bytes.Buffer)
|
||||
msg := &statepb.Fork{
|
||||
msg := ðpb.Fork{
|
||||
PreviousVersion: []byte("fooo"),
|
||||
CurrentVersion: []byte("barr"),
|
||||
Epoch: 9001,
|
||||
}
|
||||
_, err := e.EncodeGossip(buf, msg)
|
||||
require.NoError(t, err)
|
||||
decoded := &statepb.Fork{}
|
||||
decoded := ðpb.Fork{}
|
||||
require.NoError(t, e.DecodeGossip(buf.Bytes(), decoded))
|
||||
if !proto.Equal(decoded, msg) {
|
||||
t.Logf("decoded=%+v\n", decoded)
|
||||
@@ -69,7 +69,7 @@ func testRoundTripWithGossip(t *testing.T, e *encoder.SszNetworkEncoder) {
|
||||
|
||||
func TestSszNetworkEncoder_EncodeWithMaxLength(t *testing.T) {
|
||||
buf := new(bytes.Buffer)
|
||||
msg := &statepb.Fork{
|
||||
msg := ðpb.Fork{
|
||||
PreviousVersion: []byte("fooo"),
|
||||
CurrentVersion: []byte("barr"),
|
||||
Epoch: 9001,
|
||||
@@ -86,7 +86,7 @@ func TestSszNetworkEncoder_EncodeWithMaxLength(t *testing.T) {
|
||||
|
||||
func TestSszNetworkEncoder_DecodeWithMaxLength(t *testing.T) {
|
||||
buf := new(bytes.Buffer)
|
||||
msg := &statepb.Fork{
|
||||
msg := ðpb.Fork{
|
||||
PreviousVersion: []byte("fooo"),
|
||||
CurrentVersion: []byte("barr"),
|
||||
Epoch: 4242,
|
||||
@@ -99,7 +99,7 @@ func TestSszNetworkEncoder_DecodeWithMaxLength(t *testing.T) {
|
||||
params.OverrideBeaconNetworkConfig(c)
|
||||
_, err := e.EncodeGossip(buf, msg)
|
||||
require.NoError(t, err)
|
||||
decoded := &statepb.Fork{}
|
||||
decoded := ðpb.Fork{}
|
||||
err = e.DecodeWithMaxLength(buf, decoded)
|
||||
wanted := fmt.Sprintf("goes over the provided max limit of %d", maxChunkSize)
|
||||
assert.ErrorContains(t, wanted, err)
|
||||
@@ -115,13 +115,13 @@ func TestSszNetworkEncoder_DecodeWithMultipleFrames(t *testing.T) {
|
||||
maxChunkSize := uint64(1 << 22)
|
||||
c.MaxChunkSize = maxChunkSize
|
||||
params.OverrideBeaconNetworkConfig(c)
|
||||
_, err := e.EncodeWithMaxLength(buf, st.InnerStateUnsafe())
|
||||
_, err := e.EncodeWithMaxLength(buf, st.InnerStateUnsafe().(*ethpb.BeaconState))
|
||||
require.NoError(t, err)
|
||||
// Max snappy block size
|
||||
if buf.Len() <= 76490 {
|
||||
t.Errorf("buffer smaller than expected, wanted > %d but got %d", 76490, buf.Len())
|
||||
}
|
||||
decoded := new(statepb.BeaconState)
|
||||
decoded := new(ethpb.BeaconState)
|
||||
err = e.DecodeWithMaxLength(buf, decoded)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
@@ -144,7 +144,7 @@ func TestSszNetworkEncoder_MaxInt64(t *testing.T) {
|
||||
func TestSszNetworkEncoder_DecodeWithBadSnappyStream(t *testing.T) {
|
||||
st := newBadSnappyStream()
|
||||
e := &encoder.SszNetworkEncoder{}
|
||||
decoded := new(statepb.Fork)
|
||||
decoded := new(ethpb.Fork)
|
||||
err := e.DecodeWithMaxLength(st, decoded)
|
||||
assert.ErrorContains(t, io.EOF.Error(), err)
|
||||
}
|
||||
|
||||
30
beacon-chain/p2p/fork_watcher.go
Normal file
30
beacon-chain/p2p/fork_watcher.go
Normal file
@@ -0,0 +1,30 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/slotutil"
|
||||
)
|
||||
|
||||
// A background routine which listens for new and upcoming forks and
|
||||
// updates the node's discovery service to reflect any new fork version
|
||||
// changes.
|
||||
func (s *Service) forkWatcher() {
|
||||
slotTicker := slotutil.NewSlotTicker(s.genesisTime, params.BeaconConfig().SecondsPerSlot)
|
||||
for {
|
||||
select {
|
||||
case currSlot := <-slotTicker.C():
|
||||
currEpoch := helpers.SlotToEpoch(currSlot)
|
||||
if currEpoch == params.BeaconConfig().AltairForkEpoch {
|
||||
_, err := addForkEntry(s.dv5Listener.LocalNode(), s.genesisTime, s.genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not add fork entry")
|
||||
}
|
||||
}
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Context closed, exiting goroutine")
|
||||
slotTicker.Done()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -21,9 +21,15 @@ const (
|
||||
// aggregateWeight specifies the scoring weight that we apply to
|
||||
// our aggregate topic.
|
||||
aggregateWeight = 0.5
|
||||
// syncContributionWeight specifies the scoring weight that we apply to
|
||||
// our sync contribution topic.
|
||||
syncContributionWeight = 0.2
|
||||
// attestationTotalWeight specifies the scoring weight that we apply to
|
||||
// our attestation subnet topic.
|
||||
attestationTotalWeight = 1
|
||||
// syncCommitteesTotalWeight specifies the scoring weight that we apply to
|
||||
// our sync subnet topic.
|
||||
syncCommitteesTotalWeight = 0.4
|
||||
// attesterSlashingWeight specifies the scoring weight that we apply to
|
||||
// our attester slashing topic.
|
||||
attesterSlashingWeight = 0.05
|
||||
@@ -92,6 +98,10 @@ func (s *Service) topicScoreParams(topic string) (*pubsub.TopicScoreParams, erro
|
||||
return defaultAggregateTopicParams(activeValidators)
|
||||
case strings.Contains(topic, "beacon_attestation"):
|
||||
return defaultAggregateSubnetTopicParams(activeValidators)
|
||||
case strings.Contains(topic, GossipSyncCommitteeMessage):
|
||||
return defaultSyncSubnetTopicParams(activeValidators)
|
||||
case strings.Contains(topic, "sync_committee_contribution_and_proof"):
|
||||
return defaultSyncContributionTopicParams()
|
||||
case strings.Contains(topic, "voluntary_exit"):
|
||||
return defaultVoluntaryExitTopicParams(), nil
|
||||
case strings.Contains(topic, "proposer_slashing"):
|
||||
@@ -215,6 +225,48 @@ func defaultAggregateTopicParams(activeValidators uint64) (*pubsub.TopicScorePar
|
||||
}, nil
|
||||
}
|
||||
|
||||
func defaultSyncContributionTopicParams() (*pubsub.TopicScoreParams, error) {
|
||||
// Determine the expected message rate for the particular gossip topic.
|
||||
aggPerSlot := params.BeaconConfig().SyncCommitteeSubnetCount * params.BeaconConfig().TargetAggregatorsPerSyncSubcommittee
|
||||
firstMessageCap, err := decayLimit(scoreDecay(1*oneEpochDuration()), float64(aggPerSlot*2/gossipSubD))
|
||||
if err != nil {
|
||||
log.Warnf("skipping initializing topic scoring: %v", err)
|
||||
return nil, nil
|
||||
}
|
||||
firstMessageWeight := maxFirstDeliveryScore / firstMessageCap
|
||||
meshThreshold, err := decayThreshold(scoreDecay(1*oneEpochDuration()), float64(aggPerSlot)/dampeningFactor)
|
||||
if err != nil {
|
||||
log.Warnf("skipping initializing topic scoring: %v", err)
|
||||
return nil, nil
|
||||
}
|
||||
meshWeight := -scoreByWeight(syncContributionWeight, meshThreshold)
|
||||
meshCap := 4 * meshThreshold
|
||||
if !meshDeliveryIsScored {
|
||||
// Set the mesh weight as zero as a temporary measure, so as to prevent
|
||||
// the average nodes from being penalised.
|
||||
meshWeight = 0
|
||||
}
|
||||
return &pubsub.TopicScoreParams{
|
||||
TopicWeight: syncContributionWeight,
|
||||
TimeInMeshWeight: maxInMeshScore / inMeshCap(),
|
||||
TimeInMeshQuantum: inMeshTime(),
|
||||
TimeInMeshCap: inMeshCap(),
|
||||
FirstMessageDeliveriesWeight: firstMessageWeight,
|
||||
FirstMessageDeliveriesDecay: scoreDecay(1 * oneEpochDuration()),
|
||||
FirstMessageDeliveriesCap: firstMessageCap,
|
||||
MeshMessageDeliveriesWeight: meshWeight,
|
||||
MeshMessageDeliveriesDecay: scoreDecay(1 * oneEpochDuration()),
|
||||
MeshMessageDeliveriesCap: meshCap,
|
||||
MeshMessageDeliveriesThreshold: meshThreshold,
|
||||
MeshMessageDeliveriesWindow: 2 * time.Second,
|
||||
MeshMessageDeliveriesActivation: 1 * oneEpochDuration(),
|
||||
MeshFailurePenaltyWeight: meshWeight,
|
||||
MeshFailurePenaltyDecay: scoreDecay(1 * oneEpochDuration()),
|
||||
InvalidMessageDeliveriesWeight: -maxScore() / syncContributionWeight,
|
||||
InvalidMessageDeliveriesDecay: scoreDecay(50 * oneEpochDuration()),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func defaultAggregateSubnetTopicParams(activeValidators uint64) (*pubsub.TopicScoreParams, error) {
|
||||
subnetCount := params.BeaconNetworkConfig().AttestationSubnetCount
|
||||
// Get weight for each specific subnet.
|
||||
@@ -238,8 +290,13 @@ func defaultAggregateSubnetTopicParams(activeValidators uint64) (*pubsub.TopicSc
|
||||
firstDecay = 4
|
||||
meshDecay = 16
|
||||
}
|
||||
rate := numPerSlot * 2 / gossipSubD
|
||||
if rate == 0 {
|
||||
log.Warn("rate is 0, skipping initializing topic scoring")
|
||||
return nil, nil
|
||||
}
|
||||
// Determine expected first deliveries based on the message rate.
|
||||
firstMessageCap, err := decayLimit(scoreDecay(firstDecay*oneEpochDuration()), float64(numPerSlot*2/gossipSubD))
|
||||
firstMessageCap, err := decayLimit(scoreDecay(firstDecay*oneEpochDuration()), float64(rate))
|
||||
if err != nil {
|
||||
log.Warnf("skipping initializing topic scoring: %v", err)
|
||||
return nil, nil
|
||||
@@ -279,6 +336,69 @@ func defaultAggregateSubnetTopicParams(activeValidators uint64) (*pubsub.TopicSc
|
||||
}, nil
|
||||
}
|
||||
|
||||
func defaultSyncSubnetTopicParams(activeValidators uint64) (*pubsub.TopicScoreParams, error) {
|
||||
subnetCount := params.BeaconConfig().SyncCommitteeSubnetCount
|
||||
// Get weight for each specific subnet.
|
||||
topicWeight := syncCommitteesTotalWeight / float64(subnetCount)
|
||||
syncComSize := params.BeaconConfig().SyncCommitteeSize
|
||||
// Set the max as the sync committee size
|
||||
if activeValidators > syncComSize {
|
||||
activeValidators = syncComSize
|
||||
}
|
||||
subnetWeight := activeValidators / subnetCount
|
||||
if subnetWeight == 0 {
|
||||
log.Warn("Subnet weight is 0, skipping initializing topic scoring")
|
||||
return nil, nil
|
||||
}
|
||||
firstDecay := time.Duration(1)
|
||||
meshDecay := time.Duration(4)
|
||||
|
||||
rate := subnetWeight * 2 / gossipSubD
|
||||
if rate == 0 {
|
||||
log.Warn("rate is 0, skipping initializing topic scoring")
|
||||
return nil, nil
|
||||
}
|
||||
// Determine expected first deliveries based on the message rate.
|
||||
firstMessageCap, err := decayLimit(scoreDecay(firstDecay*oneEpochDuration()), float64(rate))
|
||||
if err != nil {
|
||||
log.Warnf("skipping initializing topic scoring: %v", err)
|
||||
return nil, nil
|
||||
}
|
||||
firstMessageWeight := maxFirstDeliveryScore / firstMessageCap
|
||||
// Determine expected mesh deliveries based on message rate applied with a dampening factor.
|
||||
meshThreshold, err := decayThreshold(scoreDecay(meshDecay*oneEpochDuration()), float64(subnetWeight)/dampeningFactor)
|
||||
if err != nil {
|
||||
log.Warnf("skipping initializing topic scoring: %v", err)
|
||||
return nil, nil
|
||||
}
|
||||
meshWeight := -scoreByWeight(topicWeight, meshThreshold)
|
||||
meshCap := 4 * meshThreshold
|
||||
if !meshDeliveryIsScored {
|
||||
// Set the mesh weight as zero as a temporary measure, so as to prevent
|
||||
// the average nodes from being penalised.
|
||||
meshWeight = 0
|
||||
}
|
||||
return &pubsub.TopicScoreParams{
|
||||
TopicWeight: topicWeight,
|
||||
TimeInMeshWeight: maxInMeshScore / inMeshCap(),
|
||||
TimeInMeshQuantum: inMeshTime(),
|
||||
TimeInMeshCap: inMeshCap(),
|
||||
FirstMessageDeliveriesWeight: firstMessageWeight,
|
||||
FirstMessageDeliveriesDecay: scoreDecay(firstDecay * oneEpochDuration()),
|
||||
FirstMessageDeliveriesCap: firstMessageCap,
|
||||
MeshMessageDeliveriesWeight: meshWeight,
|
||||
MeshMessageDeliveriesDecay: scoreDecay(meshDecay * oneEpochDuration()),
|
||||
MeshMessageDeliveriesCap: meshCap,
|
||||
MeshMessageDeliveriesThreshold: meshThreshold,
|
||||
MeshMessageDeliveriesWindow: 2 * time.Second,
|
||||
MeshMessageDeliveriesActivation: 1 * oneEpochDuration(),
|
||||
MeshFailurePenaltyWeight: meshWeight,
|
||||
MeshFailurePenaltyDecay: scoreDecay(meshDecay * oneEpochDuration()),
|
||||
InvalidMessageDeliveriesWeight: -maxScore() / topicWeight,
|
||||
InvalidMessageDeliveriesDecay: scoreDecay(50 * oneEpochDuration()),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func defaultAttesterSlashingTopicParams() *pubsub.TopicScoreParams {
|
||||
return &pubsub.TopicScoreParams{
|
||||
TopicWeight: attesterSlashingWeight,
|
||||
@@ -401,8 +521,9 @@ func scoreByWeight(weight, threshold float64) float64 {
|
||||
|
||||
// maxScore attainable by a peer.
|
||||
func maxScore() float64 {
|
||||
totalWeight := beaconBlockWeight + aggregateWeight + attestationTotalWeight +
|
||||
attesterSlashingWeight + proposerSlashingWeight + voluntaryExitWeight
|
||||
totalWeight := beaconBlockWeight + aggregateWeight + +syncContributionWeight +
|
||||
attestationTotalWeight + syncCommitteesTotalWeight + attesterSlashingWeight +
|
||||
proposerSlashingWeight + voluntaryExitWeight
|
||||
return (maxInMeshScore + maxFirstDeliveryScore) * totalWeight
|
||||
}
|
||||
|
||||
|
||||
@@ -3,27 +3,53 @@ package p2p
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// GossipTopicMappings represent the protocol ID to protobuf message type map for easy
|
||||
// gossipTopicMappings represent the protocol ID to protobuf message type map for easy
|
||||
// lookup.
|
||||
var GossipTopicMappings = map[string]proto.Message{
|
||||
BlockSubnetTopicFormat: &pb.SignedBeaconBlock{},
|
||||
AttestationSubnetTopicFormat: &pb.Attestation{},
|
||||
ExitSubnetTopicFormat: &pb.SignedVoluntaryExit{},
|
||||
ProposerSlashingSubnetTopicFormat: &pb.ProposerSlashing{},
|
||||
AttesterSlashingSubnetTopicFormat: &pb.AttesterSlashing{},
|
||||
AggregateAndProofSubnetTopicFormat: &pb.SignedAggregateAttestationAndProof{},
|
||||
var gossipTopicMappings = map[string]proto.Message{
|
||||
BlockSubnetTopicFormat: &pb.SignedBeaconBlock{},
|
||||
AttestationSubnetTopicFormat: &pb.Attestation{},
|
||||
ExitSubnetTopicFormat: &pb.SignedVoluntaryExit{},
|
||||
ProposerSlashingSubnetTopicFormat: &pb.ProposerSlashing{},
|
||||
AttesterSlashingSubnetTopicFormat: &pb.AttesterSlashing{},
|
||||
AggregateAndProofSubnetTopicFormat: &pb.SignedAggregateAttestationAndProof{},
|
||||
SyncContributionAndProofSubnetTopicFormat: ðpb.SignedContributionAndProof{},
|
||||
SyncCommitteeSubnetTopicFormat: ðpb.SyncCommitteeMessage{},
|
||||
}
|
||||
|
||||
// GossipTopicMappings is a function to return the assigned data type
|
||||
// versioned by epoch.
|
||||
func GossipTopicMappings(topic string, epoch types.Epoch) proto.Message {
|
||||
if topic == BlockSubnetTopicFormat && epoch >= params.BeaconConfig().AltairForkEpoch {
|
||||
return ðpb.SignedBeaconBlockAltair{}
|
||||
}
|
||||
return gossipTopicMappings[topic]
|
||||
}
|
||||
|
||||
// AllTopics returns all topics stored in our
|
||||
// gossip mapping.
|
||||
func AllTopics() []string {
|
||||
topics := []string{}
|
||||
for k := range gossipTopicMappings {
|
||||
topics = append(topics, k)
|
||||
}
|
||||
return topics
|
||||
}
|
||||
|
||||
// GossipTypeMapping is the inverse of GossipTopicMappings so that an arbitrary protobuf message
|
||||
// can be mapped to a protocol ID string.
|
||||
var GossipTypeMapping = make(map[reflect.Type]string, len(GossipTopicMappings))
|
||||
var GossipTypeMapping = make(map[reflect.Type]string, len(gossipTopicMappings))
|
||||
|
||||
func init() {
|
||||
for k, v := range GossipTopicMappings {
|
||||
for k, v := range gossipTopicMappings {
|
||||
GossipTypeMapping[reflect.TypeOf(v)] = k
|
||||
}
|
||||
// Specially handle Altair Objects.
|
||||
GossipTypeMapping[reflect.TypeOf(ðpb.SignedBeaconBlockAltair{})] = BlockSubnetTopicFormat
|
||||
}
|
||||
|
||||
@@ -3,14 +3,39 @@ package p2p
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
eth2types "github.com/prysmaticlabs/eth2-types"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
)
|
||||
|
||||
func TestMappingHasNoDuplicates(t *testing.T) {
|
||||
m := make(map[reflect.Type]bool)
|
||||
for _, v := range GossipTopicMappings {
|
||||
for _, v := range gossipTopicMappings {
|
||||
if _, ok := m[reflect.TypeOf(v)]; ok {
|
||||
t.Errorf("%T is duplicated in the topic mapping", v)
|
||||
}
|
||||
m[reflect.TypeOf(v)] = true
|
||||
}
|
||||
}
|
||||
|
||||
func TestGossipTopicMappings_CorrectBlockType(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
bCfg := params.BeaconConfig()
|
||||
forkEpoch := eth2types.Epoch(100)
|
||||
bCfg.AltairForkEpoch = forkEpoch
|
||||
bCfg.ForkVersionSchedule[bytesutil.ToBytes4(bCfg.AltairForkVersion)] = eth2types.Epoch(100)
|
||||
params.OverrideBeaconConfig(bCfg)
|
||||
|
||||
// Before Fork
|
||||
pMessage := GossipTopicMappings(BlockSubnetTopicFormat, 0)
|
||||
_, ok := pMessage.(*ethpb.SignedBeaconBlock)
|
||||
assert.Equal(t, true, ok)
|
||||
|
||||
// After Fork
|
||||
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, forkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedBeaconBlockAltair)
|
||||
assert.Equal(t, true, ok)
|
||||
}
|
||||
|
||||
@@ -35,6 +35,7 @@ type P2P interface {
|
||||
type Broadcaster interface {
|
||||
Broadcast(context.Context, proto.Message) error
|
||||
BroadcastAttestation(ctx context.Context, subnet uint64, att *ethpb.Attestation) error
|
||||
BroadcastSyncCommitteeMessage(ctx context.Context, subnet uint64, sMsg *ethpb.SyncCommitteeMessage) error
|
||||
}
|
||||
|
||||
// SetStreamHandler configures p2p to handle streams of a certain topic ID.
|
||||
|
||||
@@ -25,6 +25,16 @@ var (
|
||||
Name: "p2p_attestation_subnet_attempted_broadcasts",
|
||||
Help: "The number of attestations that were attempted to be broadcast.",
|
||||
})
|
||||
savedSyncCommitteeBroadcasts = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "p2p_sync_committee_subnet_recovered_broadcasts",
|
||||
Help: "The number of sync committee messages that were attempted to be broadcast with no peers on " +
|
||||
"the subnet. The beacon node increments this counter when the broadcast is blocked " +
|
||||
"until a subnet peer can be found.",
|
||||
})
|
||||
syncCommitteeBroadcastAttempts = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "p2p_sync_committee_subnet_attempted_broadcasts",
|
||||
Help: "The number of sync committee that were attempted to be broadcast.",
|
||||
})
|
||||
)
|
||||
|
||||
func (s *Service) updateMetrics() {
|
||||
|
||||
@@ -235,7 +235,7 @@ func (p *Status) SetMetadata(pid peer.ID, metaData metadata.Metadata) {
|
||||
defer p.store.Unlock()
|
||||
|
||||
peerData := p.store.PeerDataGetOrCreate(pid)
|
||||
peerData.MetaData = metaData
|
||||
peerData.MetaData = metaData.Copy()
|
||||
}
|
||||
|
||||
// Metadata returns a copy of the metadata corresponding to the provided
|
||||
|
||||
@@ -8,11 +8,18 @@ import (
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/encoder"
|
||||
"github.com/prysmaticlabs/prysm/shared/p2putils"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
var _ pubsub.SubscriptionFilter = (*Service)(nil)
|
||||
|
||||
const pubsubSubscriptionRequestLimit = 100
|
||||
// It is set at this limit to handle the possibility
|
||||
// of double topic subscriptions at fork boundaries.
|
||||
// -> 64 Attestation Subnets * 2.
|
||||
// -> 4 Sync Committee Subnets * 2.
|
||||
// -> Block,Aggregate,ProposerSlashing,AttesterSlashing,Exits,SyncContribution * 2.
|
||||
const pubsubSubscriptionRequestLimit = 200
|
||||
|
||||
// CanSubscribe returns true if the topic is of interest and we could subscribe to it.
|
||||
func (s *Service) CanSubscribe(topic string) bool {
|
||||
@@ -35,7 +42,12 @@ func (s *Service) CanSubscribe(topic string) bool {
|
||||
log.WithError(err).Error("Could not determine fork digest")
|
||||
return false
|
||||
}
|
||||
if parts[2] != fmt.Sprintf("%x", fd) {
|
||||
digest, err := p2putils.ForkDigestFromEpoch(params.BeaconConfig().AltairForkEpoch, s.genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not determine next fork digest")
|
||||
return false
|
||||
}
|
||||
if parts[2] != fmt.Sprintf("%x", fd) && parts[2] != fmt.Sprintf("%x", digest) {
|
||||
return false
|
||||
}
|
||||
if parts[4] != encoder.ProtocolSuffixSSZSnappy {
|
||||
@@ -43,7 +55,7 @@ func (s *Service) CanSubscribe(topic string) bool {
|
||||
}
|
||||
|
||||
// Check the incoming topic matches any topic mapping. This includes a check for part[3].
|
||||
for gt := range GossipTopicMappings {
|
||||
for gt := range gossipTopicMappings {
|
||||
if _, err := scanfcheck(strings.Join(parts[0:4], "/"), gt); err == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -86,11 +86,11 @@ func TestService_CanSubscribe(t *testing.T) {
|
||||
}
|
||||
|
||||
// Ensure all gossip topic mappings pass validation.
|
||||
for topic := range GossipTopicMappings {
|
||||
for _, topic := range AllTopics() {
|
||||
formatting := []interface{}{digest}
|
||||
|
||||
// Special case for attestation subnets which have a second formatting placeholder.
|
||||
if topic == AttestationSubnetTopicFormat {
|
||||
if topic == AttestationSubnetTopicFormat || topic == SyncCommitteeSubnetTopicFormat {
|
||||
formatting = append(formatting, 0 /* some subnet ID */)
|
||||
}
|
||||
|
||||
@@ -193,7 +193,7 @@ func Test_scanfcheck(t *testing.T) {
|
||||
func TestGossipTopicMapping_scanfcheck_GossipTopicFormattingSanityCheck(t *testing.T) {
|
||||
// scanfcheck only supports integer based substitutions at the moment. Any others will
|
||||
// inaccurately fail validation.
|
||||
for topic := range GossipTopicMappings {
|
||||
for _, topic := range AllTopics() {
|
||||
t.Run(topic, func(t *testing.T) {
|
||||
for i, c := range topic {
|
||||
if string(c) == "%" {
|
||||
@@ -356,5 +356,4 @@ func TestService_MonitorsStateForkUpdates(t *testing.T) {
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
require.True(t, s.isInitialized())
|
||||
require.NotEmpty(t, s.currentForkDigest)
|
||||
}
|
||||
|
||||
@@ -3,10 +3,14 @@ package p2p
|
||||
import (
|
||||
"context"
|
||||
|
||||
ssz "github.com/ferranbt/fastssz"
|
||||
"github.com/kr/pretty"
|
||||
"github.com/libp2p/go-libp2p-core/network"
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
"github.com/libp2p/go-libp2p-core/protocol"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/shared/traceutil"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
@@ -23,6 +27,11 @@ func (s *Service) Send(ctx context.Context, message interface{}, baseTopic strin
|
||||
topic := baseTopic + s.Encoding().ProtocolSuffix()
|
||||
span.AddAttributes(trace.StringAttribute("topic", topic))
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"topic": topic,
|
||||
"request": pretty.Sprint(message),
|
||||
}).Tracef("Sending RPC request to peer %s", pid.String())
|
||||
|
||||
// Apply max dial timeout when opening a new stream.
|
||||
ctx, cancel := context.WithTimeout(ctx, maxDialTimeout)
|
||||
defer cancel()
|
||||
@@ -33,8 +42,12 @@ func (s *Service) Send(ctx context.Context, message interface{}, baseTopic strin
|
||||
return nil, err
|
||||
}
|
||||
// do not encode anything if we are sending a metadata request
|
||||
if baseTopic != RPCMetaDataTopicV1 {
|
||||
if _, err := s.Encoding().EncodeWithMaxLength(stream, message); err != nil {
|
||||
if baseTopic != RPCMetaDataTopicV1 && baseTopic != RPCMetaDataTopicV2 {
|
||||
castedMsg, ok := message.(ssz.Marshaler)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("%T does not support the ssz marshaller interface", message)
|
||||
}
|
||||
if _, err := s.Encoding().EncodeWithMaxLength(stream, castedMsg); err != nil {
|
||||
traceutil.AnnotateError(span, err)
|
||||
_err := stream.Reset()
|
||||
_ = _err
|
||||
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/encoder"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers/scorers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/metadata"
|
||||
"github.com/prysmaticlabs/prysm/shared"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
@@ -93,7 +94,7 @@ func NewService(ctx context.Context, cfg *Config) (*Service, error) {
|
||||
cancel: cancel,
|
||||
cfg: cfg,
|
||||
isPreGenesis: true,
|
||||
joinedTopics: make(map[string]*pubsub.Topic, len(GossipTopicMappings)),
|
||||
joinedTopics: make(map[string]*pubsub.Topic, len(gossipTopicMappings)),
|
||||
subnetsLock: make(map[uint64]*sync.RWMutex),
|
||||
}
|
||||
|
||||
@@ -128,7 +129,6 @@ func NewService(ctx context.Context, cfg *Config) (*Service, error) {
|
||||
|
||||
s.host = h
|
||||
s.host.RemoveStreamHandler(identify.IDDelta)
|
||||
|
||||
// Gossipsub registration is done before we add in any new peers
|
||||
// due to libp2p's gossipsub implementation not taking into
|
||||
// account previously added peers when creating the gossipsub
|
||||
@@ -164,6 +164,9 @@ func NewService(ctx context.Context, cfg *Config) (*Service, error) {
|
||||
},
|
||||
})
|
||||
|
||||
// Initialize Data maps.
|
||||
types.InitializeDataMaps()
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
@@ -217,6 +220,9 @@ func (s *Service) Start() {
|
||||
}
|
||||
s.connectWithAllPeers(addrs)
|
||||
}
|
||||
// Initialize metadata according to the
|
||||
// current epoch.
|
||||
s.RefreshENR()
|
||||
|
||||
// Periodic functions.
|
||||
runutil.RunEvery(s.ctx, params.BeaconNetworkConfig().TtfbTimeout, func() {
|
||||
@@ -250,6 +256,7 @@ func (s *Service) Start() {
|
||||
if p2pHostDNS != "" {
|
||||
logExternalDNSAddr(s.host.ID(), p2pHostDNS, p2pTCPPort)
|
||||
}
|
||||
go s.forkWatcher()
|
||||
}
|
||||
|
||||
// Stop the p2p service and terminate all peer connections.
|
||||
|
||||
@@ -2,10 +2,12 @@ package p2p
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"go.opencensus.io/trace"
|
||||
@@ -15,8 +17,15 @@ import (
|
||||
)
|
||||
|
||||
var attestationSubnetCount = params.BeaconNetworkConfig().AttestationSubnetCount
|
||||
var syncCommsSubnetCount = params.BeaconConfig().SyncCommitteeSubnetCount
|
||||
|
||||
var attSubnetEnrKey = params.BeaconNetworkConfig().AttSubnetKey
|
||||
var syncCommsSubnetEnrKey = params.BeaconNetworkConfig().SyncCommsSubnetKey
|
||||
|
||||
// The value used with the subnet, inorder
|
||||
// to create an appropriate key tor retrieve
|
||||
// the relevant lock.
|
||||
const syncLockerVal = 100
|
||||
|
||||
// FindPeersWithSubnet performs a network search for peers
|
||||
// subscribed to a particular subnet. Then we try to connect
|
||||
@@ -36,7 +45,14 @@ func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string,
|
||||
|
||||
topic += s.Encoding().ProtocolSuffix()
|
||||
iterator := s.dv5Listener.RandomNodes()
|
||||
iterator = filterNodes(ctx, iterator, s.filterPeerForSubnet(index))
|
||||
switch {
|
||||
case strings.Contains(topic, GossipAttestationMessage):
|
||||
iterator = filterNodes(ctx, iterator, s.filterPeerForAttSubnet(index))
|
||||
case strings.Contains(topic, GossipSyncCommitteeMessage):
|
||||
iterator = filterNodes(ctx, iterator, s.filterPeerForSyncSubnet(index))
|
||||
default:
|
||||
return false, errors.New("no subnet exists for provided topic")
|
||||
}
|
||||
|
||||
currNum := uint64(len(s.pubsub.ListPeers(topic)))
|
||||
wg := new(sync.WaitGroup)
|
||||
@@ -69,7 +85,7 @@ func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string,
|
||||
}
|
||||
|
||||
// returns a method with filters peers specifically for a particular attestation subnet.
|
||||
func (s *Service) filterPeerForSubnet(index uint64) func(node *enode.Node) bool {
|
||||
func (s *Service) filterPeerForAttSubnet(index uint64) func(node *enode.Node) bool {
|
||||
return func(node *enode.Node) bool {
|
||||
if !s.filterPeer(node) {
|
||||
return false
|
||||
@@ -89,6 +105,27 @@ func (s *Service) filterPeerForSubnet(index uint64) func(node *enode.Node) bool
|
||||
}
|
||||
}
|
||||
|
||||
// returns a method with filters peers specifically for a particular sync subnet.
|
||||
func (s *Service) filterPeerForSyncSubnet(index uint64) func(node *enode.Node) bool {
|
||||
return func(node *enode.Node) bool {
|
||||
if !s.filterPeer(node) {
|
||||
return false
|
||||
}
|
||||
subnets, err := syncSubnets(node.Record())
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
indExists := false
|
||||
for _, comIdx := range subnets {
|
||||
if comIdx == index {
|
||||
indExists = true
|
||||
break
|
||||
}
|
||||
}
|
||||
return indExists
|
||||
}
|
||||
}
|
||||
|
||||
// lower threshold to broadcast object compared to searching
|
||||
// for a subnet. So that even in the event of poor peer
|
||||
// connectivity, we can still broadcast an attestation.
|
||||
@@ -109,22 +146,50 @@ func (s *Service) updateSubnetRecordWithMetadata(bitV bitfield.Bitvector64) {
|
||||
})
|
||||
}
|
||||
|
||||
// Updates the service's discv5 listener record's attestation subnet
|
||||
// with a new value for a bitfield of subnets tracked. It also record's
|
||||
// the sync committee subnet in the enr. It also updates the node's
|
||||
// metadata by increasing the sequence number and the subnets tracked by the node.
|
||||
func (s *Service) updateSubnetRecordWithMetadataV2(bitV bitfield.Bitvector64, bitS bitfield.Bitvector4) {
|
||||
entry := enr.WithEntry(attSubnetEnrKey, &bitV)
|
||||
subEntry := enr.WithEntry(syncCommsSubnetEnrKey, &bitS)
|
||||
s.dv5Listener.LocalNode().Set(entry)
|
||||
s.dv5Listener.LocalNode().Set(subEntry)
|
||||
s.metaData = wrapper.WrappedMetadataV1(&pb.MetaDataV1{
|
||||
SeqNumber: s.metaData.SequenceNumber() + 1,
|
||||
Attnets: bitV,
|
||||
Syncnets: bitS,
|
||||
})
|
||||
}
|
||||
|
||||
// Initializes a bitvector of attestation subnets beacon nodes is subscribed to
|
||||
// and creates a new ENR entry with its default value.
|
||||
func intializeAttSubnets(node *enode.LocalNode) *enode.LocalNode {
|
||||
func initializeAttSubnets(node *enode.LocalNode) *enode.LocalNode {
|
||||
bitV := bitfield.NewBitvector64()
|
||||
entry := enr.WithEntry(attSubnetEnrKey, bitV.Bytes())
|
||||
node.Set(entry)
|
||||
return node
|
||||
}
|
||||
|
||||
// Initializes a bitvector of sync committees subnets beacon nodes is subscribed to
|
||||
// and creates a new ENR entry with its default value.
|
||||
func initializeSyncCommSubnets(node *enode.LocalNode) *enode.LocalNode {
|
||||
bitV := bitfield.Bitvector4{byte(0x00)}
|
||||
entry := enr.WithEntry(syncCommsSubnetEnrKey, bitV.Bytes())
|
||||
node.Set(entry)
|
||||
return node
|
||||
}
|
||||
|
||||
// Reads the attestation subnets entry from a node's ENR and determines
|
||||
// the committee indices of the attestation subnets the node is subscribed to.
|
||||
func attSubnets(record *enr.Record) ([]uint64, error) {
|
||||
bitV, err := bitvector(record)
|
||||
bitV, err := attBitvector(record)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(bitV) != determineSize(int(attestationSubnetCount)) {
|
||||
return []uint64{}, errors.Errorf("invalid bitvector provided, it has a size of %d", len(bitV))
|
||||
}
|
||||
var committeeIdxs []uint64
|
||||
for i := uint64(0); i < attestationSubnetCount; i++ {
|
||||
if bitV.BitAt(i) {
|
||||
@@ -134,9 +199,28 @@ func attSubnets(record *enr.Record) ([]uint64, error) {
|
||||
return committeeIdxs, nil
|
||||
}
|
||||
|
||||
// Reads the sync subnets entry from a node's ENR and determines
|
||||
// the committee indices of the sync subnets the node is subscribed to.
|
||||
func syncSubnets(record *enr.Record) ([]uint64, error) {
|
||||
bitV, err := syncBitvector(record)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(bitV) != determineSize(int(syncCommsSubnetCount)) {
|
||||
return []uint64{}, errors.Errorf("invalid bitvector provided, it has a size of %d", len(bitV))
|
||||
}
|
||||
var committeeIdxs []uint64
|
||||
for i := uint64(0); i < syncCommsSubnetCount; i++ {
|
||||
if bitV.BitAt(i) {
|
||||
committeeIdxs = append(committeeIdxs, i)
|
||||
}
|
||||
}
|
||||
return committeeIdxs, nil
|
||||
}
|
||||
|
||||
// Parses the attestation subnets ENR entry in a node and extracts its value
|
||||
// as a bitvector for further manipulation.
|
||||
func bitvector(record *enr.Record) (bitfield.Bitvector64, error) {
|
||||
func attBitvector(record *enr.Record) (bitfield.Bitvector64, error) {
|
||||
bitV := bitfield.NewBitvector64()
|
||||
entry := enr.WithEntry(attSubnetEnrKey, &bitV)
|
||||
err := record.Load(entry)
|
||||
@@ -146,6 +230,25 @@ func bitvector(record *enr.Record) (bitfield.Bitvector64, error) {
|
||||
return bitV, nil
|
||||
}
|
||||
|
||||
// Parses the attestation subnets ENR entry in a node and extracts its value
|
||||
// as a bitvector for further manipulation.
|
||||
func syncBitvector(record *enr.Record) (bitfield.Bitvector4, error) {
|
||||
bitV := bitfield.Bitvector4{byte(0x00)}
|
||||
entry := enr.WithEntry(syncCommsSubnetEnrKey, &bitV)
|
||||
err := record.Load(entry)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bitV, nil
|
||||
}
|
||||
|
||||
// The subnet locker is a map which keeps track of all
|
||||
// mutexes stored per subnet. This locker is re-used
|
||||
// between both the attestation and sync subnets. In
|
||||
// order to differentiate between attestation and sync
|
||||
// subnets. Sync subnets are stored by (subnet+syncLockerVal). This
|
||||
// is to prevent conflicts while allowing both subnets
|
||||
// to use a single locker.
|
||||
func (s *Service) subnetLocker(i uint64) *sync.RWMutex {
|
||||
s.subnetsLockLock.Lock()
|
||||
defer s.subnetsLockLock.Unlock()
|
||||
@@ -156,3 +259,11 @@ func (s *Service) subnetLocker(i uint64) *sync.RWMutex {
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
func determineSize(bitCount int) int {
|
||||
numOfBytes := bitCount / 8
|
||||
if bitCount%8 != 0 {
|
||||
numOfBytes++
|
||||
}
|
||||
return numOfBytes
|
||||
}
|
||||
|
||||
@@ -2,11 +2,15 @@ package p2p
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/libp2p/go-libp2p-core/crypto"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
@@ -135,3 +139,349 @@ func TestStartDiscV5_DiscoverPeersWithSubnets(t *testing.T) {
|
||||
assert.NoError(t, s.Stop())
|
||||
exitRoutine <- true
|
||||
}
|
||||
|
||||
func Test_AttSubnets(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
record func(t *testing.T) *enr.Record
|
||||
want []uint64
|
||||
wantErr bool
|
||||
errContains string
|
||||
}{
|
||||
{
|
||||
name: "valid record",
|
||||
record: func(t *testing.T) *enr.Record {
|
||||
db, err := enode.OpenDB("")
|
||||
assert.NoError(t, err)
|
||||
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
|
||||
assert.NoError(t, err)
|
||||
convertedKey := convertFromInterfacePrivKey(priv)
|
||||
assert.NoError(t, err)
|
||||
localNode := enode.NewLocalNode(db, convertedKey)
|
||||
localNode = initializeAttSubnets(localNode)
|
||||
return localNode.Node().Record()
|
||||
},
|
||||
want: nil,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "too small subnet",
|
||||
record: func(t *testing.T) *enr.Record {
|
||||
db, err := enode.OpenDB("")
|
||||
assert.NoError(t, err)
|
||||
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
|
||||
assert.NoError(t, err)
|
||||
convertedKey := convertFromInterfacePrivKey(priv)
|
||||
assert.NoError(t, err)
|
||||
localNode := enode.NewLocalNode(db, convertedKey)
|
||||
entry := enr.WithEntry(attSubnetEnrKey, []byte{})
|
||||
localNode.Set(entry)
|
||||
return localNode.Node().Record()
|
||||
},
|
||||
want: []uint64{},
|
||||
wantErr: true,
|
||||
errContains: "invalid bitvector provided, it has a size of",
|
||||
},
|
||||
{
|
||||
name: "half sized subnet",
|
||||
record: func(t *testing.T) *enr.Record {
|
||||
db, err := enode.OpenDB("")
|
||||
assert.NoError(t, err)
|
||||
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
|
||||
assert.NoError(t, err)
|
||||
convertedKey := convertFromInterfacePrivKey(priv)
|
||||
assert.NoError(t, err)
|
||||
localNode := enode.NewLocalNode(db, convertedKey)
|
||||
entry := enr.WithEntry(attSubnetEnrKey, make([]byte, 4))
|
||||
localNode.Set(entry)
|
||||
return localNode.Node().Record()
|
||||
},
|
||||
want: []uint64{},
|
||||
wantErr: true,
|
||||
errContains: "invalid bitvector provided, it has a size of",
|
||||
},
|
||||
{
|
||||
name: "too large subnet",
|
||||
record: func(t *testing.T) *enr.Record {
|
||||
db, err := enode.OpenDB("")
|
||||
assert.NoError(t, err)
|
||||
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
|
||||
assert.NoError(t, err)
|
||||
convertedKey := convertFromInterfacePrivKey(priv)
|
||||
assert.NoError(t, err)
|
||||
localNode := enode.NewLocalNode(db, convertedKey)
|
||||
entry := enr.WithEntry(attSubnetEnrKey, make([]byte, determineSize(int(attestationSubnetCount))+1))
|
||||
localNode.Set(entry)
|
||||
return localNode.Node().Record()
|
||||
},
|
||||
want: []uint64{},
|
||||
wantErr: true,
|
||||
errContains: "invalid bitvector provided, it has a size of",
|
||||
},
|
||||
{
|
||||
name: "very large subnet",
|
||||
record: func(t *testing.T) *enr.Record {
|
||||
db, err := enode.OpenDB("")
|
||||
assert.NoError(t, err)
|
||||
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
|
||||
assert.NoError(t, err)
|
||||
convertedKey := convertFromInterfacePrivKey(priv)
|
||||
assert.NoError(t, err)
|
||||
localNode := enode.NewLocalNode(db, convertedKey)
|
||||
entry := enr.WithEntry(attSubnetEnrKey, make([]byte, determineSize(int(attestationSubnetCount))+100))
|
||||
localNode.Set(entry)
|
||||
return localNode.Node().Record()
|
||||
},
|
||||
want: []uint64{},
|
||||
wantErr: true,
|
||||
errContains: "invalid bitvector provided, it has a size of",
|
||||
},
|
||||
{
|
||||
name: "single subnet",
|
||||
record: func(t *testing.T) *enr.Record {
|
||||
db, err := enode.OpenDB("")
|
||||
assert.NoError(t, err)
|
||||
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
|
||||
assert.NoError(t, err)
|
||||
convertedKey := convertFromInterfacePrivKey(priv)
|
||||
assert.NoError(t, err)
|
||||
localNode := enode.NewLocalNode(db, convertedKey)
|
||||
bitV := bitfield.NewBitvector64()
|
||||
bitV.SetBitAt(0, true)
|
||||
entry := enr.WithEntry(attSubnetEnrKey, bitV.Bytes())
|
||||
localNode.Set(entry)
|
||||
return localNode.Node().Record()
|
||||
},
|
||||
want: []uint64{0},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "multiple subnets",
|
||||
record: func(t *testing.T) *enr.Record {
|
||||
db, err := enode.OpenDB("")
|
||||
assert.NoError(t, err)
|
||||
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
|
||||
assert.NoError(t, err)
|
||||
convertedKey := convertFromInterfacePrivKey(priv)
|
||||
assert.NoError(t, err)
|
||||
localNode := enode.NewLocalNode(db, convertedKey)
|
||||
bitV := bitfield.NewBitvector64()
|
||||
for i := uint64(0); i < bitV.Len(); i++ {
|
||||
// skip 2 subnets
|
||||
if (i+1)%2 == 0 {
|
||||
continue
|
||||
}
|
||||
bitV.SetBitAt(i, true)
|
||||
}
|
||||
bitV.SetBitAt(0, true)
|
||||
entry := enr.WithEntry(attSubnetEnrKey, bitV.Bytes())
|
||||
localNode.Set(entry)
|
||||
return localNode.Node().Record()
|
||||
},
|
||||
want: []uint64{0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20,
|
||||
22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48,
|
||||
50, 52, 54, 56, 58, 60, 62},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "all subnets",
|
||||
record: func(t *testing.T) *enr.Record {
|
||||
db, err := enode.OpenDB("")
|
||||
assert.NoError(t, err)
|
||||
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
|
||||
assert.NoError(t, err)
|
||||
convertedKey := convertFromInterfacePrivKey(priv)
|
||||
assert.NoError(t, err)
|
||||
localNode := enode.NewLocalNode(db, convertedKey)
|
||||
bitV := bitfield.NewBitvector64()
|
||||
for i := uint64(0); i < bitV.Len(); i++ {
|
||||
bitV.SetBitAt(i, true)
|
||||
}
|
||||
entry := enr.WithEntry(attSubnetEnrKey, bitV.Bytes())
|
||||
localNode.Set(entry)
|
||||
return localNode.Node().Record()
|
||||
},
|
||||
want: []uint64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
|
||||
21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
|
||||
50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := attSubnets(tt.record(t))
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("syncSubnets() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if tt.wantErr {
|
||||
assert.ErrorContains(t, tt.errContains, err)
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("syncSubnets() got = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_SyncSubnets(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
record func(t *testing.T) *enr.Record
|
||||
want []uint64
|
||||
wantErr bool
|
||||
errContains string
|
||||
}{
|
||||
{
|
||||
name: "valid record",
|
||||
record: func(t *testing.T) *enr.Record {
|
||||
db, err := enode.OpenDB("")
|
||||
assert.NoError(t, err)
|
||||
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
|
||||
assert.NoError(t, err)
|
||||
convertedKey := convertFromInterfacePrivKey(priv)
|
||||
assert.NoError(t, err)
|
||||
localNode := enode.NewLocalNode(db, convertedKey)
|
||||
localNode = initializeSyncCommSubnets(localNode)
|
||||
return localNode.Node().Record()
|
||||
},
|
||||
want: nil,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "too small subnet",
|
||||
record: func(t *testing.T) *enr.Record {
|
||||
db, err := enode.OpenDB("")
|
||||
assert.NoError(t, err)
|
||||
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
|
||||
assert.NoError(t, err)
|
||||
convertedKey := convertFromInterfacePrivKey(priv)
|
||||
assert.NoError(t, err)
|
||||
localNode := enode.NewLocalNode(db, convertedKey)
|
||||
entry := enr.WithEntry(syncCommsSubnetEnrKey, []byte{})
|
||||
localNode.Set(entry)
|
||||
return localNode.Node().Record()
|
||||
},
|
||||
want: []uint64{},
|
||||
wantErr: true,
|
||||
errContains: "invalid bitvector provided, it has a size of",
|
||||
},
|
||||
{
|
||||
name: "too large subnet",
|
||||
record: func(t *testing.T) *enr.Record {
|
||||
db, err := enode.OpenDB("")
|
||||
assert.NoError(t, err)
|
||||
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
|
||||
assert.NoError(t, err)
|
||||
convertedKey := convertFromInterfacePrivKey(priv)
|
||||
assert.NoError(t, err)
|
||||
localNode := enode.NewLocalNode(db, convertedKey)
|
||||
entry := enr.WithEntry(syncCommsSubnetEnrKey, make([]byte, determineSize(int(syncCommsSubnetCount))+1))
|
||||
localNode.Set(entry)
|
||||
return localNode.Node().Record()
|
||||
},
|
||||
want: []uint64{},
|
||||
wantErr: true,
|
||||
errContains: "invalid bitvector provided, it has a size of",
|
||||
},
|
||||
{
|
||||
name: "very large subnet",
|
||||
record: func(t *testing.T) *enr.Record {
|
||||
db, err := enode.OpenDB("")
|
||||
assert.NoError(t, err)
|
||||
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
|
||||
assert.NoError(t, err)
|
||||
convertedKey := convertFromInterfacePrivKey(priv)
|
||||
assert.NoError(t, err)
|
||||
localNode := enode.NewLocalNode(db, convertedKey)
|
||||
entry := enr.WithEntry(syncCommsSubnetEnrKey, make([]byte, determineSize(int(syncCommsSubnetCount))+100))
|
||||
localNode.Set(entry)
|
||||
return localNode.Node().Record()
|
||||
},
|
||||
want: []uint64{},
|
||||
wantErr: true,
|
||||
errContains: "invalid bitvector provided, it has a size of",
|
||||
},
|
||||
{
|
||||
name: "single subnet",
|
||||
record: func(t *testing.T) *enr.Record {
|
||||
db, err := enode.OpenDB("")
|
||||
assert.NoError(t, err)
|
||||
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
|
||||
assert.NoError(t, err)
|
||||
convertedKey := convertFromInterfacePrivKey(priv)
|
||||
assert.NoError(t, err)
|
||||
localNode := enode.NewLocalNode(db, convertedKey)
|
||||
bitV := bitfield.Bitvector4{byte(0x00)}
|
||||
bitV.SetBitAt(0, true)
|
||||
entry := enr.WithEntry(syncCommsSubnetEnrKey, bitV.Bytes())
|
||||
localNode.Set(entry)
|
||||
return localNode.Node().Record()
|
||||
},
|
||||
want: []uint64{0},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "multiple subnets",
|
||||
record: func(t *testing.T) *enr.Record {
|
||||
db, err := enode.OpenDB("")
|
||||
assert.NoError(t, err)
|
||||
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
|
||||
assert.NoError(t, err)
|
||||
convertedKey := convertFromInterfacePrivKey(priv)
|
||||
assert.NoError(t, err)
|
||||
localNode := enode.NewLocalNode(db, convertedKey)
|
||||
bitV := bitfield.Bitvector4{byte(0x00)}
|
||||
for i := uint64(0); i < bitV.Len(); i++ {
|
||||
// skip 2 subnets
|
||||
if (i+1)%2 == 0 {
|
||||
continue
|
||||
}
|
||||
bitV.SetBitAt(i, true)
|
||||
}
|
||||
bitV.SetBitAt(0, true)
|
||||
entry := enr.WithEntry(syncCommsSubnetEnrKey, bitV.Bytes())
|
||||
localNode.Set(entry)
|
||||
return localNode.Node().Record()
|
||||
},
|
||||
want: []uint64{0, 2},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "all subnets",
|
||||
record: func(t *testing.T) *enr.Record {
|
||||
db, err := enode.OpenDB("")
|
||||
assert.NoError(t, err)
|
||||
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
|
||||
assert.NoError(t, err)
|
||||
convertedKey := convertFromInterfacePrivKey(priv)
|
||||
assert.NoError(t, err)
|
||||
localNode := enode.NewLocalNode(db, convertedKey)
|
||||
bitV := bitfield.Bitvector4{byte(0x00)}
|
||||
for i := uint64(0); i < bitV.Len(); i++ {
|
||||
bitV.SetBitAt(i, true)
|
||||
}
|
||||
entry := enr.WithEntry(syncCommsSubnetEnrKey, bitV.Bytes())
|
||||
localNode.Set(entry)
|
||||
return localNode.Node().Record()
|
||||
},
|
||||
want: []uint64{0, 1, 2, 3},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := syncSubnets(tt.record(t))
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("syncSubnets() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if tt.wantErr {
|
||||
assert.ErrorContains(t, tt.errContains, err)
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("syncSubnets() got = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -26,6 +26,7 @@ go_library(
|
||||
"@com_github_ethereum_go_ethereum//crypto:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
|
||||
"@com_github_ferranbt_fastssz//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_blankhost//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_core//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_core//connmgr:go_default_library",
|
||||
|
||||
@@ -138,6 +138,11 @@ func (p *FakeP2P) BroadcastAttestation(_ context.Context, _ uint64, _ *ethpb.Att
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastSyncCommitteeMessage -- fake.
|
||||
func (b *FakeP2P) BroadcastSyncCommitteeMessage(_ context.Context, _ uint64, _ *ethpb.SyncCommitteeMessage) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// InterceptPeerDial -- fake.
|
||||
func (p *FakeP2P) InterceptPeerDial(peer.ID) (allow bool) {
|
||||
return true
|
||||
|
||||
@@ -27,3 +27,9 @@ func (m *MockBroadcaster) BroadcastAttestation(_ context.Context, _ uint64, a *e
|
||||
m.BroadcastAttestations = append(m.BroadcastAttestations, a)
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastSyncCommitteeMessage records a broadcast occurred.
|
||||
func (m *MockBroadcaster) BroadcastSyncCommitteeMessage(_ context.Context, _ uint64, _ *ethpb.SyncCommitteeMessage) error {
|
||||
m.BroadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
ssz "github.com/ferranbt/fastssz"
|
||||
bhost "github.com/libp2p/go-libp2p-blankhost"
|
||||
core "github.com/libp2p/go-libp2p-core"
|
||||
"github.com/libp2p/go-libp2p-core/control"
|
||||
@@ -99,7 +100,11 @@ func (p *TestP2P) ReceiveRPC(topic string, msg proto.Message) {
|
||||
}
|
||||
}()
|
||||
|
||||
n, err := p.Encoding().EncodeWithMaxLength(s, msg)
|
||||
castedMsg, ok := msg.(ssz.Marshaler)
|
||||
if !ok {
|
||||
p.t.Fatalf("%T doesnt support ssz marshaler", msg)
|
||||
}
|
||||
n, err := p.Encoding().EncodeWithMaxLength(s, castedMsg)
|
||||
if err != nil {
|
||||
_err := s.Reset()
|
||||
_ = _err
|
||||
@@ -127,8 +132,12 @@ func (p *TestP2P) ReceivePubSub(topic string, msg proto.Message) {
|
||||
// pick up the newly connected peer.
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
|
||||
castedMsg, ok := msg.(ssz.Marshaler)
|
||||
if !ok {
|
||||
p.t.Fatalf("%T doesnt support ssz marshaler", msg)
|
||||
}
|
||||
buf := new(bytes.Buffer)
|
||||
if _, err := p.Encoding().EncodeGossip(buf, msg); err != nil {
|
||||
if _, err := p.Encoding().EncodeGossip(buf, castedMsg); err != nil {
|
||||
p.t.Fatalf("Failed to encode message: %v", err)
|
||||
}
|
||||
digest, err := p.ForkDigest()
|
||||
@@ -156,6 +165,12 @@ func (p *TestP2P) BroadcastAttestation(_ context.Context, _ uint64, _ *ethpb.Att
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastSyncCommitteeMessage broadcasts a sync committee message.
|
||||
func (p *TestP2P) BroadcastSyncCommitteeMessage(_ context.Context, _ uint64, _ *ethpb.SyncCommitteeMessage) error {
|
||||
p.BroadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetStreamHandler for RPC.
|
||||
func (p *TestP2P) SetStreamHandler(topic string, handler network.StreamHandler) {
|
||||
p.BHost.SetStreamHandler(protocol.ID(topic), handler)
|
||||
@@ -292,8 +307,12 @@ func (p *TestP2P) Send(ctx context.Context, msg interface{}, topic string, pid p
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if topic != "/eth2/beacon_chain/req/metadata/1" {
|
||||
if _, err := p.Encoding().EncodeWithMaxLength(stream, msg); err != nil {
|
||||
if topic != "/eth2/beacon_chain/req/metadata/1" && topic != "/eth2/beacon_chain/req/metadata/2" {
|
||||
castedMsg, ok := msg.(ssz.Marshaler)
|
||||
if !ok {
|
||||
p.t.Fatalf("%T doesnt support ssz marshaler", msg)
|
||||
}
|
||||
if _, err := p.Encoding().EncodeWithMaxLength(stream, castedMsg); err != nil {
|
||||
_err := stream.Reset()
|
||||
_ = _err
|
||||
return nil, err
|
||||
|
||||
@@ -1,8 +1,18 @@
|
||||
package p2p
|
||||
|
||||
const (
|
||||
// GossipAttestationMessage is the name for the attestation message type. It is
|
||||
// specially extracted so as to determine the correct message type from an attestation
|
||||
// subnet.
|
||||
GossipAttestationMessage = "beacon_attestation"
|
||||
// GossipSyncCommitteeMessage is the name for the sync committee message type. It is
|
||||
// specially extracted so as to determine the correct message type from a sync committee
|
||||
// subnet.
|
||||
GossipSyncCommitteeMessage = "sync_committee"
|
||||
// AttestationSubnetTopicFormat is the topic format for the attestation subnet.
|
||||
AttestationSubnetTopicFormat = "/eth2/%x/beacon_attestation_%d"
|
||||
AttestationSubnetTopicFormat = "/eth2/%x/" + GossipAttestationMessage + "_%d"
|
||||
// SyncCommitteeSubnetTopicFormat is the topic format for the sync committee subnet.
|
||||
SyncCommitteeSubnetTopicFormat = "/eth2/%x/" + GossipSyncCommitteeMessage + "_%d"
|
||||
// BlockSubnetTopicFormat is the topic format for the block subnet.
|
||||
BlockSubnetTopicFormat = "/eth2/%x/beacon_block"
|
||||
// ExitSubnetTopicFormat is the topic format for the voluntary exit subnet.
|
||||
@@ -13,4 +23,6 @@ const (
|
||||
AttesterSlashingSubnetTopicFormat = "/eth2/%x/attester_slashing"
|
||||
// AggregateAndProofSubnetTopicFormat is the topic format for the aggregate and proof subnet.
|
||||
AggregateAndProofSubnetTopicFormat = "/eth2/%x/beacon_aggregate_and_proof"
|
||||
// SyncContributionAndProofSubnetTopicFormat is the topic format for the sync aggregate and proof subnet.
|
||||
SyncContributionAndProofSubnetTopicFormat = "/eth2/%x/sync_committee_contribution_and_proof"
|
||||
)
|
||||
|
||||
@@ -18,6 +18,7 @@ go_library(
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/operations/synccommittee:go_default_library",
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/powchain:go_default_library",
|
||||
|
||||
@@ -34,7 +34,7 @@ type singleAttestationVerificationFailure struct {
|
||||
// ListPoolAttestations retrieves attestations known by the node but
|
||||
// not necessarily incorporated into any block. Allows filtering by committee index or slot.
|
||||
func (bs *Server) ListPoolAttestations(ctx context.Context, req *ethpb.AttestationsPoolRequest) (*ethpb.AttestationsPoolResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beaconv1.ListPoolAttestations")
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.ListPoolAttestations")
|
||||
defer span.End()
|
||||
|
||||
attestations := bs.AttestationsPool.AggregatedAttestations()
|
||||
@@ -70,7 +70,7 @@ func (bs *Server) ListPoolAttestations(ctx context.Context, req *ethpb.Attestati
|
||||
// SubmitAttestations submits Attestation object to node. If attestation passes all validation
|
||||
// constraints, node MUST publish attestation on appropriate subnet.
|
||||
func (bs *Server) SubmitAttestations(ctx context.Context, req *ethpb.SubmitAttestationsRequest) (*emptypb.Empty, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beaconv1.SubmitAttestation")
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.SubmitAttestation")
|
||||
defer span.End()
|
||||
|
||||
var validAttestations []*eth.Attestation
|
||||
@@ -145,7 +145,7 @@ func (bs *Server) SubmitAttestations(ctx context.Context, req *ethpb.SubmitAttes
|
||||
// ListPoolAttesterSlashings retrieves attester slashings known by the node but
|
||||
// not necessarily incorporated into any block.
|
||||
func (bs *Server) ListPoolAttesterSlashings(ctx context.Context, req *emptypb.Empty) (*ethpb.AttesterSlashingsPoolResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beaconv1.ListPoolAttesterSlashings")
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.ListPoolAttesterSlashings")
|
||||
defer span.End()
|
||||
|
||||
headState, err := bs.ChainInfoFetcher.HeadState(ctx)
|
||||
@@ -167,7 +167,7 @@ func (bs *Server) ListPoolAttesterSlashings(ctx context.Context, req *emptypb.Em
|
||||
// SubmitAttesterSlashing submits AttesterSlashing object to node's pool and
|
||||
// if passes validation node MUST broadcast it to network.
|
||||
func (bs *Server) SubmitAttesterSlashing(ctx context.Context, req *ethpb.AttesterSlashing) (*emptypb.Empty, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beaconv1.SubmitAttesterSlashing")
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.SubmitAttesterSlashing")
|
||||
defer span.End()
|
||||
|
||||
headState, err := bs.ChainInfoFetcher.HeadState(ctx)
|
||||
@@ -197,7 +197,7 @@ func (bs *Server) SubmitAttesterSlashing(ctx context.Context, req *ethpb.Atteste
|
||||
// ListPoolProposerSlashings retrieves proposer slashings known by the node
|
||||
// but not necessarily incorporated into any block.
|
||||
func (bs *Server) ListPoolProposerSlashings(ctx context.Context, req *emptypb.Empty) (*ethpb.ProposerSlashingPoolResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beaconv1.ListPoolProposerSlashings")
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.ListPoolProposerSlashings")
|
||||
defer span.End()
|
||||
|
||||
headState, err := bs.ChainInfoFetcher.HeadState(ctx)
|
||||
@@ -219,7 +219,7 @@ func (bs *Server) ListPoolProposerSlashings(ctx context.Context, req *emptypb.Em
|
||||
// SubmitProposerSlashing submits AttesterSlashing object to node's pool and if
|
||||
// passes validation node MUST broadcast it to network.
|
||||
func (bs *Server) SubmitProposerSlashing(ctx context.Context, req *ethpb.ProposerSlashing) (*emptypb.Empty, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beaconv1.SubmitProposerSlashing")
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.SubmitProposerSlashing")
|
||||
defer span.End()
|
||||
|
||||
headState, err := bs.ChainInfoFetcher.HeadState(ctx)
|
||||
@@ -249,7 +249,7 @@ func (bs *Server) SubmitProposerSlashing(ctx context.Context, req *ethpb.Propose
|
||||
// ListPoolVoluntaryExits retrieves voluntary exits known by the node but
|
||||
// not necessarily incorporated into any block.
|
||||
func (bs *Server) ListPoolVoluntaryExits(ctx context.Context, req *emptypb.Empty) (*ethpb.VoluntaryExitsPoolResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beaconv1.ListPoolVoluntaryExits")
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.ListPoolVoluntaryExits")
|
||||
defer span.End()
|
||||
|
||||
headState, err := bs.ChainInfoFetcher.HeadState(ctx)
|
||||
@@ -272,7 +272,7 @@ func (bs *Server) ListPoolVoluntaryExits(ctx context.Context, req *emptypb.Empty
|
||||
// SubmitVoluntaryExit submits SignedVoluntaryExit object to node's pool
|
||||
// and if passes validation node MUST broadcast it to network.
|
||||
func (bs *Server) SubmitVoluntaryExit(ctx context.Context, req *ethpb.SignedVoluntaryExit) (*emptypb.Empty, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beaconv1.SubmitVoluntaryExit")
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.SubmitVoluntaryExit")
|
||||
defer span.End()
|
||||
|
||||
headState, err := bs.ChainInfoFetcher.HeadState(ctx)
|
||||
|
||||
@@ -37,7 +37,7 @@ var (
|
||||
|
||||
// GetIdentity retrieves data about the node's network presence.
|
||||
func (ns *Server) GetIdentity(ctx context.Context, _ *emptypb.Empty) (*ethpb.IdentityResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "nodeV1.GetIdentity")
|
||||
ctx, span := trace.StartSpan(ctx, "nodev1V1.GetIdentity")
|
||||
defer span.End()
|
||||
|
||||
peerId := ns.PeerManager.PeerID().Pretty()
|
||||
|
||||
@@ -19,6 +19,7 @@ go_library(
|
||||
deps = [
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/cache/depositcache:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
@@ -49,6 +50,7 @@ go_library(
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/sliceutil:go_default_library",
|
||||
"//shared/slotutil:go_default_library",
|
||||
"//shared/version:go_default_library",
|
||||
"@com_github_patrickmn_go_cache//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
|
||||
@@ -11,16 +11,26 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db/filters"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/cmd"
|
||||
"github.com/prysmaticlabs/prysm/shared/event"
|
||||
"github.com/prysmaticlabs/prysm/shared/pagination"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/version"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
)
|
||||
|
||||
// BlockContainer represents an instance of
|
||||
// block along with its relevant metadata.
|
||||
type BlockContainer struct {
|
||||
Blk block.SignedBeaconBlock
|
||||
Root [32]byte
|
||||
IsCanonical bool
|
||||
}
|
||||
|
||||
// ListBlocks retrieves blocks by root, slot, or epoch.
|
||||
//
|
||||
// The server may return multiple blocks in the case that a slot or epoch is
|
||||
@@ -37,166 +47,319 @@ func (bs *Server) ListBlocks(
|
||||
|
||||
switch q := req.QueryFilter.(type) {
|
||||
case *ethpb.ListBlocksRequest_Epoch:
|
||||
blks, _, err := bs.BeaconDB.Blocks(ctx, filters.NewFilter().SetStartEpoch(q.Epoch).SetEndEpoch(q.Epoch))
|
||||
ctrs, numBlks, nextPageToken, err := bs.ListBlocksForEpoch(ctx, req, q)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get blocks: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
numBlks := len(blks)
|
||||
if numBlks == 0 {
|
||||
return ðpb.ListBlocksResponse{
|
||||
BlockContainers: make([]*ethpb.BeaconBlockContainer, 0),
|
||||
TotalSize: 0,
|
||||
NextPageToken: strconv.Itoa(0),
|
||||
}, nil
|
||||
}
|
||||
|
||||
start, end, nextPageToken, err := pagination.StartAndEndPage(req.PageToken, int(req.PageSize), numBlks)
|
||||
blkContainers, err := convertToProto(ctrs)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not paginate blocks: %v", err)
|
||||
}
|
||||
|
||||
returnedBlks := blks[start:end]
|
||||
containers := make([]*ethpb.BeaconBlockContainer, len(returnedBlks))
|
||||
for i, b := range returnedBlks {
|
||||
root, err := b.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
canonical, err := bs.CanonicalFetcher.IsCanonical(ctx, root)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not determine if block is canonical: %v", err)
|
||||
}
|
||||
phBlk, err := b.PbPhase0Block()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get phase 0 block: %v", err)
|
||||
}
|
||||
containers[i] = ðpb.BeaconBlockContainer{
|
||||
Block: phBlk,
|
||||
BlockRoot: root[:],
|
||||
Canonical: canonical,
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ðpb.ListBlocksResponse{
|
||||
BlockContainers: containers,
|
||||
BlockContainers: blkContainers,
|
||||
TotalSize: int32(numBlks),
|
||||
NextPageToken: nextPageToken,
|
||||
}, nil
|
||||
case *ethpb.ListBlocksRequest_Root:
|
||||
blk, err := bs.BeaconDB.Block(ctx, bytesutil.ToBytes32(q.Root))
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not retrieve block: %v", err)
|
||||
}
|
||||
if blk == nil || blk.IsNil() {
|
||||
return ðpb.ListBlocksResponse{
|
||||
BlockContainers: make([]*ethpb.BeaconBlockContainer, 0),
|
||||
TotalSize: 0,
|
||||
NextPageToken: strconv.Itoa(0),
|
||||
}, nil
|
||||
}
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
ctrs, numBlks, nextPageToken, err := bs.ListBlocksForRoot(ctx, req, q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
canonical, err := bs.CanonicalFetcher.IsCanonical(ctx, root)
|
||||
blkContainers, err := convertToProto(ctrs)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not determine if block is canonical: %v", err)
|
||||
}
|
||||
phBlk, err := blk.PbPhase0Block()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not determine if block is phase 0 block: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ðpb.ListBlocksResponse{
|
||||
BlockContainers: []*ethpb.BeaconBlockContainer{{
|
||||
Block: phBlk,
|
||||
BlockRoot: root[:],
|
||||
Canonical: canonical},
|
||||
},
|
||||
TotalSize: 1,
|
||||
BlockContainers: blkContainers,
|
||||
TotalSize: int32(numBlks),
|
||||
NextPageToken: nextPageToken,
|
||||
}, nil
|
||||
|
||||
case *ethpb.ListBlocksRequest_Slot:
|
||||
hasBlocks, blks, err := bs.BeaconDB.BlocksBySlot(ctx, q.Slot)
|
||||
ctrs, numBlks, nextPageToken, err := bs.ListBlocksForSlot(ctx, req, q)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not retrieve blocks for slot %d: %v", q.Slot, err)
|
||||
return nil, err
|
||||
}
|
||||
if !hasBlocks {
|
||||
return ðpb.ListBlocksResponse{
|
||||
BlockContainers: make([]*ethpb.BeaconBlockContainer, 0),
|
||||
TotalSize: 0,
|
||||
NextPageToken: strconv.Itoa(0),
|
||||
}, nil
|
||||
}
|
||||
|
||||
numBlks := len(blks)
|
||||
|
||||
start, end, nextPageToken, err := pagination.StartAndEndPage(req.PageToken, int(req.PageSize), numBlks)
|
||||
blkContainers, err := convertToProto(ctrs)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not paginate blocks: %v", err)
|
||||
}
|
||||
|
||||
returnedBlks := blks[start:end]
|
||||
containers := make([]*ethpb.BeaconBlockContainer, len(returnedBlks))
|
||||
for i, b := range returnedBlks {
|
||||
root, err := b.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
canonical, err := bs.CanonicalFetcher.IsCanonical(ctx, root)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not determine if block is canonical: %v", err)
|
||||
}
|
||||
phBlk, err := b.PbPhase0Block()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not determine if block is phase 0 block: %v", err)
|
||||
}
|
||||
containers[i] = ðpb.BeaconBlockContainer{
|
||||
Block: phBlk,
|
||||
BlockRoot: root[:],
|
||||
Canonical: canonical,
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ðpb.ListBlocksResponse{
|
||||
BlockContainers: containers,
|
||||
BlockContainers: blkContainers,
|
||||
TotalSize: int32(numBlks),
|
||||
NextPageToken: nextPageToken,
|
||||
}, nil
|
||||
case *ethpb.ListBlocksRequest_Genesis:
|
||||
genBlk, err := bs.BeaconDB.GenesisBlock(ctx)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not retrieve blocks for genesis slot: %v", err)
|
||||
}
|
||||
if genBlk == nil || genBlk.IsNil() {
|
||||
return nil, status.Error(codes.Internal, "Could not find genesis block")
|
||||
}
|
||||
root, err := genBlk.Block().HashTreeRoot()
|
||||
ctrs, numBlks, nextPageToken, err := bs.ListBlocksForGenesis(ctx, req, q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
phBlk, err := genBlk.PbPhase0Block()
|
||||
blkContainers, err := convertToProto(ctrs)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not determine if block is phase 0 block: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
containers := []*ethpb.BeaconBlockContainer{
|
||||
{
|
||||
Block: phBlk,
|
||||
BlockRoot: root[:],
|
||||
Canonical: true,
|
||||
},
|
||||
}
|
||||
|
||||
return ðpb.ListBlocksResponse{
|
||||
BlockContainers: containers,
|
||||
TotalSize: int32(1),
|
||||
NextPageToken: strconv.Itoa(0),
|
||||
BlockContainers: blkContainers,
|
||||
TotalSize: int32(numBlks),
|
||||
NextPageToken: nextPageToken,
|
||||
}, nil
|
||||
}
|
||||
|
||||
return nil, status.Error(codes.InvalidArgument, "Must specify a filter criteria for fetching blocks")
|
||||
}
|
||||
|
||||
// ListBlocksAltair retrieves blocks by root, slot, or epoch.
|
||||
//
|
||||
// The server may return multiple blocks in the case that a slot or epoch is
|
||||
// provided as the filter criteria. The server may return an empty list when
|
||||
// no blocks in their database match the filter criteria. This RPC should
|
||||
// not return NOT_FOUND. Only one filter criteria should be used.
|
||||
func (bs *Server) ListBlocksAltair(
|
||||
ctx context.Context, req *ethpb.ListBlocksRequest,
|
||||
) (*ethpb.ListBlocksResponseAltair, error) {
|
||||
if int(req.PageSize) > cmd.Get().MaxRPCPageSize {
|
||||
return nil, status.Errorf(codes.InvalidArgument, "Requested page size %d can not be greater than max size %d",
|
||||
req.PageSize, cmd.Get().MaxRPCPageSize)
|
||||
}
|
||||
|
||||
switch q := req.QueryFilter.(type) {
|
||||
case *ethpb.ListBlocksRequest_Epoch:
|
||||
ctrs, numBlks, nextPageToken, err := bs.ListBlocksForEpoch(ctx, req, q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
altCtrs, err := convertFromV1Containers(ctrs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ðpb.ListBlocksResponseAltair{
|
||||
BlockContainers: altCtrs,
|
||||
TotalSize: int32(numBlks),
|
||||
NextPageToken: nextPageToken,
|
||||
}, nil
|
||||
case *ethpb.ListBlocksRequest_Root:
|
||||
ctrs, numBlks, nextPageToken, err := bs.ListBlocksForRoot(ctx, req, q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
altCtrs, err := convertFromV1Containers(ctrs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ðpb.ListBlocksResponseAltair{
|
||||
BlockContainers: altCtrs,
|
||||
TotalSize: int32(numBlks),
|
||||
NextPageToken: nextPageToken,
|
||||
}, nil
|
||||
|
||||
case *ethpb.ListBlocksRequest_Slot:
|
||||
ctrs, numBlks, nextPageToken, err := bs.ListBlocksForSlot(ctx, req, q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
altCtrs, err := convertFromV1Containers(ctrs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ðpb.ListBlocksResponseAltair{
|
||||
BlockContainers: altCtrs,
|
||||
TotalSize: int32(numBlks),
|
||||
NextPageToken: nextPageToken,
|
||||
}, nil
|
||||
case *ethpb.ListBlocksRequest_Genesis:
|
||||
ctrs, numBlks, nextPageToken, err := bs.ListBlocksForGenesis(ctx, req, q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
altCtrs, err := convertFromV1Containers(ctrs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ðpb.ListBlocksResponseAltair{
|
||||
BlockContainers: altCtrs,
|
||||
TotalSize: int32(numBlks),
|
||||
NextPageToken: nextPageToken,
|
||||
}, nil
|
||||
}
|
||||
|
||||
return nil, status.Error(codes.InvalidArgument, "Must specify a filter criteria for fetching blocks")
|
||||
}
|
||||
|
||||
func convertFromV1Containers(ctrs []BlockContainer) ([]*ethpb.BeaconBlockContainerAltair, error) {
|
||||
protoCtrs := make([]*ethpb.BeaconBlockContainerAltair, len(ctrs))
|
||||
var err error
|
||||
for i, c := range ctrs {
|
||||
protoCtrs[i], err = convertToBlockContainer(c.Blk, c.Root, c.IsCanonical)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get block container: %v", err)
|
||||
}
|
||||
}
|
||||
return protoCtrs, nil
|
||||
}
|
||||
|
||||
func convertToBlockContainer(blk block.SignedBeaconBlock, root [32]byte, isCanonical bool) (*ethpb.BeaconBlockContainerAltair, error) {
|
||||
ctr := ðpb.BeaconBlockContainerAltair{
|
||||
BlockRoot: root[:],
|
||||
Canonical: isCanonical,
|
||||
}
|
||||
|
||||
switch blk.Version() {
|
||||
case version.Phase0:
|
||||
rBlk, err := blk.PbPhase0Block()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ctr.Block = ðpb.BeaconBlockContainerAltair_Phase0Block{Phase0Block: rBlk}
|
||||
case version.Altair:
|
||||
rBlk, err := blk.PbAltairBlock()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ctr.Block = ðpb.BeaconBlockContainerAltair_AltairBlock{AltairBlock: rBlk}
|
||||
}
|
||||
return ctr, nil
|
||||
}
|
||||
|
||||
// ListBlocksForEpoch retrieves all blocks for the provided epoch.
|
||||
func (bs *Server) ListBlocksForEpoch(ctx context.Context, req *ethpb.ListBlocksRequest, q *ethpb.ListBlocksRequest_Epoch) ([]BlockContainer, int, string, error) {
|
||||
blks, _, err := bs.BeaconDB.Blocks(ctx, filters.NewFilter().SetStartEpoch(q.Epoch).SetEndEpoch(q.Epoch))
|
||||
if err != nil {
|
||||
return nil, 0, strconv.Itoa(0), status.Errorf(codes.Internal, "Could not get blocks: %v", err)
|
||||
}
|
||||
|
||||
numBlks := len(blks)
|
||||
if len(blks) == 0 {
|
||||
return []BlockContainer{}, numBlks, strconv.Itoa(0), nil
|
||||
}
|
||||
|
||||
start, end, nextPageToken, err := pagination.StartAndEndPage(req.PageToken, int(req.PageSize), numBlks)
|
||||
if err != nil {
|
||||
return nil, 0, strconv.Itoa(0), status.Errorf(codes.Internal, "Could not paginate blocks: %v", err)
|
||||
}
|
||||
|
||||
returnedBlks := blks[start:end]
|
||||
containers := make([]BlockContainer, len(returnedBlks))
|
||||
for i, b := range returnedBlks {
|
||||
root, err := b.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, 0, strconv.Itoa(0), err
|
||||
}
|
||||
canonical, err := bs.CanonicalFetcher.IsCanonical(ctx, root)
|
||||
if err != nil {
|
||||
return nil, 0, strconv.Itoa(0), status.Errorf(codes.Internal, "Could not determine if block is canonical: %v", err)
|
||||
}
|
||||
containers[i] = BlockContainer{
|
||||
Blk: b,
|
||||
Root: root,
|
||||
IsCanonical: canonical,
|
||||
}
|
||||
}
|
||||
|
||||
return containers, numBlks, nextPageToken, nil
|
||||
}
|
||||
|
||||
// ListBlocksForRoot retrieves the block for the provided root.
|
||||
func (bs *Server) ListBlocksForRoot(ctx context.Context, req *ethpb.ListBlocksRequest, q *ethpb.ListBlocksRequest_Root) ([]BlockContainer, int, string, error) {
|
||||
blk, err := bs.BeaconDB.Block(ctx, bytesutil.ToBytes32(q.Root))
|
||||
if err != nil {
|
||||
return nil, 0, strconv.Itoa(0), status.Errorf(codes.Internal, "Could not retrieve block: %v", err)
|
||||
}
|
||||
if blk == nil || blk.IsNil() {
|
||||
return []BlockContainer{}, 0, strconv.Itoa(0), nil
|
||||
|
||||
}
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, 0, strconv.Itoa(0), status.Errorf(codes.Internal, "Could not determine block root: %v", err)
|
||||
}
|
||||
canonical, err := bs.CanonicalFetcher.IsCanonical(ctx, root)
|
||||
if err != nil {
|
||||
return nil, 0, strconv.Itoa(0), status.Errorf(codes.Internal, "Could not determine if block is canonical: %v", err)
|
||||
}
|
||||
return []BlockContainer{{
|
||||
Blk: blk,
|
||||
Root: root,
|
||||
IsCanonical: canonical,
|
||||
}}, 1, strconv.Itoa(0), nil
|
||||
}
|
||||
|
||||
// ListBlocksForSlot retrieves all blocks for the provided slot.
|
||||
func (bs *Server) ListBlocksForSlot(ctx context.Context, req *ethpb.ListBlocksRequest, q *ethpb.ListBlocksRequest_Slot) ([]BlockContainer, int, string, error) {
|
||||
hasBlocks, blks, err := bs.BeaconDB.BlocksBySlot(ctx, q.Slot)
|
||||
if err != nil {
|
||||
return nil, 0, strconv.Itoa(0), status.Errorf(codes.Internal, "Could not retrieve blocks for slot %d: %v", q.Slot, err)
|
||||
}
|
||||
if !hasBlocks {
|
||||
return []BlockContainer{}, 0, strconv.Itoa(0), nil
|
||||
}
|
||||
|
||||
numBlks := len(blks)
|
||||
|
||||
start, end, nextPageToken, err := pagination.StartAndEndPage(req.PageToken, int(req.PageSize), numBlks)
|
||||
if err != nil {
|
||||
return nil, 0, strconv.Itoa(0), status.Errorf(codes.Internal, "Could not paginate blocks: %v", err)
|
||||
}
|
||||
|
||||
returnedBlks := blks[start:end]
|
||||
containers := make([]BlockContainer, len(returnedBlks))
|
||||
for i, b := range returnedBlks {
|
||||
root, err := b.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, 0, strconv.Itoa(0), status.Errorf(codes.Internal, "Could not determine block root: %v", err)
|
||||
}
|
||||
canonical, err := bs.CanonicalFetcher.IsCanonical(ctx, root)
|
||||
if err != nil {
|
||||
return nil, 0, strconv.Itoa(0), status.Errorf(codes.Internal, "Could not determine if block is canonical: %v", err)
|
||||
}
|
||||
containers[i] = BlockContainer{
|
||||
Blk: b,
|
||||
Root: root,
|
||||
IsCanonical: canonical,
|
||||
}
|
||||
}
|
||||
return containers, numBlks, nextPageToken, nil
|
||||
}
|
||||
|
||||
// ListBlocksForGenesis retrieves the genesis block.
|
||||
func (bs *Server) ListBlocksForGenesis(ctx context.Context, req *ethpb.ListBlocksRequest, q *ethpb.ListBlocksRequest_Genesis) ([]BlockContainer, int, string, error) {
|
||||
genBlk, err := bs.BeaconDB.GenesisBlock(ctx)
|
||||
if err != nil {
|
||||
return nil, 0, strconv.Itoa(0), status.Errorf(codes.Internal, "Could not retrieve blocks for genesis slot: %v", err)
|
||||
}
|
||||
if genBlk == nil || genBlk.IsNil() {
|
||||
return []BlockContainer{}, 0, strconv.Itoa(0), status.Error(codes.Internal, "Could not find genesis block")
|
||||
}
|
||||
root, err := genBlk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, 0, strconv.Itoa(0), status.Errorf(codes.Internal, "Could not determine block root: %v", err)
|
||||
}
|
||||
return []BlockContainer{{
|
||||
Blk: genBlk,
|
||||
Root: root,
|
||||
IsCanonical: true,
|
||||
}}, 1, strconv.Itoa(0), nil
|
||||
}
|
||||
|
||||
func convertToProto(ctrs []BlockContainer) ([]*ethpb.BeaconBlockContainer, error) {
|
||||
protoCtrs := make([]*ethpb.BeaconBlockContainer, len(ctrs))
|
||||
for i, c := range ctrs {
|
||||
phBlk, err := c.Blk.PbPhase0Block()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get phase 0 block: %v", err)
|
||||
}
|
||||
copiedRoot := c.Root
|
||||
protoCtrs[i] = ðpb.BeaconBlockContainer{
|
||||
Block: phBlk,
|
||||
BlockRoot: copiedRoot[:],
|
||||
Canonical: c.IsCanonical,
|
||||
}
|
||||
}
|
||||
return protoCtrs, nil
|
||||
}
|
||||
|
||||
// GetChainHead retrieves information about the head of the beacon chain from
|
||||
// the view of the beacon chain node.
|
||||
//
|
||||
|
||||
@@ -17,7 +17,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
statepb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
@@ -217,7 +216,7 @@ func TestServer_ListBlocks_Pagination(t *testing.T) {
|
||||
Slot: 6}}),
|
||||
BlockRoot: blkContainers[6].BlockRoot,
|
||||
Canonical: blkContainers[6].Canonical}},
|
||||
TotalSize: 1}},
|
||||
TotalSize: 1, NextPageToken: strconv.Itoa(0)}},
|
||||
{req: ðpb.ListBlocksRequest{QueryFilter: ðpb.ListBlocksRequest_Root{Root: root6[:]}},
|
||||
res: ðpb.ListBlocksResponse{
|
||||
BlockContainers: []*ethpb.BeaconBlockContainer{{Block: testutil.HydrateSignedBeaconBlock(ðpb.SignedBeaconBlock{
|
||||
@@ -225,7 +224,7 @@ func TestServer_ListBlocks_Pagination(t *testing.T) {
|
||||
Slot: 6}}),
|
||||
BlockRoot: blkContainers[6].BlockRoot,
|
||||
Canonical: blkContainers[6].Canonical}},
|
||||
TotalSize: 1}},
|
||||
TotalSize: 1, NextPageToken: strconv.Itoa(0)}},
|
||||
{req: ðpb.ListBlocksRequest{
|
||||
PageToken: strconv.Itoa(0),
|
||||
QueryFilter: ðpb.ListBlocksRequest_Epoch{Epoch: 0},
|
||||
@@ -394,7 +393,7 @@ func TestServer_GetChainHead(t *testing.T) {
|
||||
pjRoot, err := prevJustifiedBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
s, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
s, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Slot: 1,
|
||||
PreviousJustifiedCheckpoint: ðpb.Checkpoint{Epoch: 3, Root: pjRoot[:]},
|
||||
CurrentJustifiedCheckpoint: ðpb.Checkpoint{Epoch: 2, Root: jRoot[:]},
|
||||
@@ -484,7 +483,7 @@ func TestServer_StreamChainHead_OnHeadUpdated(t *testing.T) {
|
||||
pjRoot, err := prevJustifiedBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
s, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
s, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Slot: 1,
|
||||
PreviousJustifiedCheckpoint: ðpb.Checkpoint{Epoch: 3, Root: pjRoot[:]},
|
||||
CurrentJustifiedCheckpoint: ðpb.Checkpoint{Epoch: 2, Root: jRoot[:]},
|
||||
@@ -747,3 +746,300 @@ func TestServer_GetWeakSubjectivityCheckpoint(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, sRoot[:], c.StateRoot)
|
||||
}
|
||||
|
||||
func TestServer_ListBlocksAltair_NoResults(t *testing.T) {
|
||||
db := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
bs := &Server{
|
||||
BeaconDB: db,
|
||||
}
|
||||
wanted := ðpb.ListBlocksResponseAltair{
|
||||
BlockContainers: make([]*ethpb.BeaconBlockContainerAltair, 0),
|
||||
TotalSize: int32(0),
|
||||
NextPageToken: strconv.Itoa(0),
|
||||
}
|
||||
res, err := bs.ListBlocksAltair(ctx, ðpb.ListBlocksRequest{
|
||||
QueryFilter: ðpb.ListBlocksRequest_Slot{
|
||||
Slot: 0,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
if !proto.Equal(wanted, res) {
|
||||
t.Errorf("Wanted %v, received %v", wanted, res)
|
||||
}
|
||||
res, err = bs.ListBlocksAltair(ctx, ðpb.ListBlocksRequest{
|
||||
QueryFilter: ðpb.ListBlocksRequest_Slot{
|
||||
Slot: 0,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
if !proto.Equal(wanted, res) {
|
||||
t.Errorf("Wanted %v, received %v", wanted, res)
|
||||
}
|
||||
res, err = bs.ListBlocksAltair(ctx, ðpb.ListBlocksRequest{
|
||||
QueryFilter: ðpb.ListBlocksRequest_Root{
|
||||
Root: make([]byte, 32),
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
if !proto.Equal(wanted, res) {
|
||||
t.Errorf("Wanted %v, received %v", wanted, res)
|
||||
}
|
||||
}
|
||||
|
||||
func TestServer_ListBlocksAltair_Genesis(t *testing.T) {
|
||||
db := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
bs := &Server{
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
// Should throw an error if no genesis block is found.
|
||||
_, err := bs.ListBlocks(ctx, ðpb.ListBlocksRequest{
|
||||
QueryFilter: ðpb.ListBlocksRequest_Genesis{
|
||||
Genesis: true,
|
||||
},
|
||||
})
|
||||
require.ErrorContains(t, "Could not find genesis", err)
|
||||
|
||||
// Should return the proper genesis block if it exists.
|
||||
parentRoot := [32]byte{'a'}
|
||||
blk := testutil.NewBeaconBlock()
|
||||
blk.Block.ParentRoot = parentRoot[:]
|
||||
root, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(blk)))
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
|
||||
ctr, err := convertToBlockContainer(wrapper.WrappedPhase0SignedBeaconBlock(blk), root, true)
|
||||
assert.NoError(t, err)
|
||||
wanted := ðpb.ListBlocksResponseAltair{
|
||||
BlockContainers: []*ethpb.BeaconBlockContainerAltair{ctr},
|
||||
NextPageToken: "0",
|
||||
TotalSize: 1,
|
||||
}
|
||||
res, err := bs.ListBlocksAltair(ctx, ðpb.ListBlocksRequest{
|
||||
QueryFilter: ðpb.ListBlocksRequest_Genesis{
|
||||
Genesis: true,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
if !proto.Equal(wanted, res) {
|
||||
t.Errorf("Wanted %v, received %v", wanted, res)
|
||||
}
|
||||
}
|
||||
|
||||
func TestServer_ListBlocksAltair_Genesis_MultiBlocks(t *testing.T) {
|
||||
db := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
bs := &Server{
|
||||
BeaconDB: db,
|
||||
}
|
||||
// Should return the proper genesis block if it exists.
|
||||
parentRoot := [32]byte{1, 2, 3}
|
||||
blk := testutil.NewBeaconBlock()
|
||||
blk.Block.ParentRoot = parentRoot[:]
|
||||
root, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(blk)))
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
|
||||
|
||||
count := types.Slot(100)
|
||||
blks := make([]block.SignedBeaconBlock, count)
|
||||
for i := types.Slot(0); i < count; i++ {
|
||||
b := testutil.NewBeaconBlock()
|
||||
b.Block.Slot = i
|
||||
require.NoError(t, err)
|
||||
blks[i] = wrapper.WrappedPhase0SignedBeaconBlock(b)
|
||||
}
|
||||
require.NoError(t, db.SaveBlocks(ctx, blks))
|
||||
|
||||
// Should throw an error if more than one blk returned.
|
||||
_, err = bs.ListBlocksAltair(ctx, ðpb.ListBlocksRequest{
|
||||
QueryFilter: ðpb.ListBlocksRequest_Genesis{
|
||||
Genesis: true,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestServer_ListBlocksAltair_Pagination(t *testing.T) {
|
||||
params.UseMinimalConfig()
|
||||
defer params.UseMainnetConfig()
|
||||
|
||||
db := dbTest.SetupDB(t)
|
||||
chain := &chainMock.ChainService{
|
||||
CanonicalRoots: map[[32]byte]bool{},
|
||||
}
|
||||
ctx := context.Background()
|
||||
|
||||
count := types.Slot(100)
|
||||
blks := make([]block.SignedBeaconBlock, count)
|
||||
blkContainers := make([]*ethpb.BeaconBlockContainerAltair, count)
|
||||
for i := types.Slot(0); i < count; i++ {
|
||||
b := testutil.NewBeaconBlock()
|
||||
b.Block.Slot = i
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
chain.CanonicalRoots[root] = true
|
||||
blks[i] = wrapper.WrappedPhase0SignedBeaconBlock(b)
|
||||
ctr, err := convertToBlockContainer(blks[i], root, true)
|
||||
require.NoError(t, err)
|
||||
blkContainers[i] = ctr
|
||||
}
|
||||
require.NoError(t, db.SaveBlocks(ctx, blks))
|
||||
|
||||
orphanedBlk := testutil.NewBeaconBlock()
|
||||
orphanedBlk.Block.Slot = 300
|
||||
orphanedBlkRoot, err := orphanedBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(orphanedBlk)))
|
||||
|
||||
bs := &Server{
|
||||
BeaconDB: db,
|
||||
CanonicalFetcher: chain,
|
||||
}
|
||||
|
||||
root6, err := blks[6].Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
req *ethpb.ListBlocksRequest
|
||||
res *ethpb.ListBlocksResponseAltair
|
||||
}{
|
||||
{req: ðpb.ListBlocksRequest{
|
||||
PageToken: strconv.Itoa(0),
|
||||
QueryFilter: ðpb.ListBlocksRequest_Slot{Slot: 5},
|
||||
PageSize: 3},
|
||||
res: ðpb.ListBlocksResponseAltair{
|
||||
BlockContainers: []*ethpb.BeaconBlockContainerAltair{{Block: ðpb.BeaconBlockContainerAltair_Phase0Block{Phase0Block: testutil.HydrateSignedBeaconBlock(ðpb.SignedBeaconBlock{
|
||||
Block: ðpb.BeaconBlock{
|
||||
Slot: 5}})},
|
||||
BlockRoot: blkContainers[5].BlockRoot,
|
||||
Canonical: blkContainers[5].Canonical}},
|
||||
NextPageToken: "",
|
||||
TotalSize: 1,
|
||||
},
|
||||
},
|
||||
{req: ðpb.ListBlocksRequest{
|
||||
PageToken: strconv.Itoa(0),
|
||||
QueryFilter: ðpb.ListBlocksRequest_Root{Root: root6[:]},
|
||||
PageSize: 3},
|
||||
res: ðpb.ListBlocksResponseAltair{
|
||||
BlockContainers: []*ethpb.BeaconBlockContainerAltair{{Block: ðpb.BeaconBlockContainerAltair_Phase0Block{
|
||||
Phase0Block: testutil.HydrateSignedBeaconBlock(ðpb.SignedBeaconBlock{
|
||||
Block: ðpb.BeaconBlock{
|
||||
Slot: 6}})},
|
||||
BlockRoot: blkContainers[6].BlockRoot,
|
||||
Canonical: blkContainers[6].Canonical}},
|
||||
TotalSize: 1, NextPageToken: strconv.Itoa(0)}},
|
||||
{req: ðpb.ListBlocksRequest{QueryFilter: ðpb.ListBlocksRequest_Root{Root: root6[:]}},
|
||||
res: ðpb.ListBlocksResponseAltair{
|
||||
BlockContainers: []*ethpb.BeaconBlockContainerAltair{{Block: ðpb.BeaconBlockContainerAltair_Phase0Block{
|
||||
Phase0Block: testutil.HydrateSignedBeaconBlock(ðpb.SignedBeaconBlock{
|
||||
Block: ðpb.BeaconBlock{
|
||||
Slot: 6}})},
|
||||
BlockRoot: blkContainers[6].BlockRoot,
|
||||
Canonical: blkContainers[6].Canonical}},
|
||||
TotalSize: 1, NextPageToken: strconv.Itoa(0)}},
|
||||
{req: ðpb.ListBlocksRequest{
|
||||
PageToken: strconv.Itoa(0),
|
||||
QueryFilter: ðpb.ListBlocksRequest_Epoch{Epoch: 0},
|
||||
PageSize: 100},
|
||||
res: ðpb.ListBlocksResponseAltair{
|
||||
BlockContainers: blkContainers[0:params.BeaconConfig().SlotsPerEpoch],
|
||||
NextPageToken: "",
|
||||
TotalSize: int32(params.BeaconConfig().SlotsPerEpoch)}},
|
||||
{req: ðpb.ListBlocksRequest{
|
||||
PageToken: strconv.Itoa(1),
|
||||
QueryFilter: ðpb.ListBlocksRequest_Epoch{Epoch: 5},
|
||||
PageSize: 3},
|
||||
res: ðpb.ListBlocksResponseAltair{
|
||||
BlockContainers: blkContainers[43:46],
|
||||
NextPageToken: "2",
|
||||
TotalSize: int32(params.BeaconConfig().SlotsPerEpoch)}},
|
||||
{req: ðpb.ListBlocksRequest{
|
||||
PageToken: strconv.Itoa(1),
|
||||
QueryFilter: ðpb.ListBlocksRequest_Epoch{Epoch: 11},
|
||||
PageSize: 7},
|
||||
res: ðpb.ListBlocksResponseAltair{
|
||||
BlockContainers: blkContainers[95:96],
|
||||
NextPageToken: "",
|
||||
TotalSize: int32(params.BeaconConfig().SlotsPerEpoch)}},
|
||||
{req: ðpb.ListBlocksRequest{
|
||||
PageToken: strconv.Itoa(0),
|
||||
QueryFilter: ðpb.ListBlocksRequest_Epoch{Epoch: 12},
|
||||
PageSize: 4},
|
||||
res: ðpb.ListBlocksResponseAltair{
|
||||
BlockContainers: blkContainers[96:100],
|
||||
NextPageToken: "",
|
||||
TotalSize: int32(params.BeaconConfig().SlotsPerEpoch / 2)}},
|
||||
{req: ðpb.ListBlocksRequest{
|
||||
PageToken: strconv.Itoa(0),
|
||||
QueryFilter: ðpb.ListBlocksRequest_Slot{Slot: 300},
|
||||
PageSize: 3},
|
||||
res: ðpb.ListBlocksResponseAltair{
|
||||
BlockContainers: []*ethpb.BeaconBlockContainerAltair{{Block: ðpb.BeaconBlockContainerAltair_Phase0Block{
|
||||
Phase0Block: testutil.HydrateSignedBeaconBlock(ðpb.SignedBeaconBlock{
|
||||
Block: ðpb.BeaconBlock{
|
||||
Slot: 300}})},
|
||||
BlockRoot: orphanedBlkRoot[:],
|
||||
Canonical: false}},
|
||||
NextPageToken: "",
|
||||
TotalSize: 1}},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
t.Run(fmt.Sprintf("test_%d", i), func(t *testing.T) {
|
||||
res, err := bs.ListBlocksAltair(ctx, test.req)
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, res, test.res)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestServer_ListBlocksAltair_Errors(t *testing.T) {
|
||||
db := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
bs := &Server{
|
||||
BeaconDB: db,
|
||||
}
|
||||
exceedsMax := int32(cmd.Get().MaxRPCPageSize + 1)
|
||||
|
||||
wanted := fmt.Sprintf("Requested page size %d can not be greater than max size %d", exceedsMax, cmd.Get().MaxRPCPageSize)
|
||||
req := ðpb.ListBlocksRequest{PageToken: strconv.Itoa(0), PageSize: exceedsMax}
|
||||
_, err := bs.ListBlocks(ctx, req)
|
||||
assert.ErrorContains(t, wanted, err)
|
||||
|
||||
wanted = "Must specify a filter criteria for fetching"
|
||||
req = ðpb.ListBlocksRequest{}
|
||||
_, err = bs.ListBlocksAltair(ctx, req)
|
||||
assert.ErrorContains(t, wanted, err)
|
||||
|
||||
req = ðpb.ListBlocksRequest{QueryFilter: ðpb.ListBlocksRequest_Slot{Slot: 0}}
|
||||
res, err := bs.ListBlocksAltair(ctx, req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(res.BlockContainers), "Wanted empty list")
|
||||
assert.Equal(t, int32(0), res.TotalSize, "Wanted total size 0")
|
||||
|
||||
req = ðpb.ListBlocksRequest{QueryFilter: ðpb.ListBlocksRequest_Slot{}}
|
||||
res, err = bs.ListBlocksAltair(ctx, req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(res.BlockContainers), "Wanted empty list")
|
||||
assert.Equal(t, int32(0), res.TotalSize, "Wanted total size 0")
|
||||
|
||||
req = ðpb.ListBlocksRequest{QueryFilter: ðpb.ListBlocksRequest_Root{Root: []byte{'A'}}}
|
||||
res, err = bs.ListBlocksAltair(ctx, req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(res.BlockContainers), "Wanted empty list")
|
||||
assert.Equal(t, int32(0), res.TotalSize, "Wanted total size 0")
|
||||
|
||||
req = ðpb.ListBlocksRequest{QueryFilter: ðpb.ListBlocksRequest_Root{Root: []byte{'A'}}}
|
||||
res, err = bs.ListBlocksAltair(ctx, req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(res.BlockContainers), "Wanted empty list")
|
||||
assert.Equal(t, int32(0), res.TotalSize, "Wanted total size 0")
|
||||
}
|
||||
|
||||
@@ -20,9 +20,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/sync"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
statepb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// Server defines a server implementation of the gRPC Beacon Chain service,
|
||||
@@ -43,14 +40,10 @@ type Server struct {
|
||||
Broadcaster p2p.Broadcaster
|
||||
AttestationsPool attestations.Pool
|
||||
SlashingsPool slashings.PoolManager
|
||||
CanonicalStateChan chan *statepb.BeaconState
|
||||
CanonicalStateChan chan *ethpb.BeaconState
|
||||
ChainStartChan chan time.Time
|
||||
ReceivedAttestationsBuffer chan *ethpb.Attestation
|
||||
CollectedAttestationsBuffer chan []*ethpb.Attestation
|
||||
StateGen stategen.StateManager
|
||||
SyncChecker sync.Checker
|
||||
}
|
||||
|
||||
func (bs *Server) ListBlocksAltair(ctx context.Context, request *ethpb.ListBlocksRequest) (*ethpb.ListBlocksResponseAltair, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "Unimplemented")
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"time"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
core "github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
@@ -17,6 +18,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/shared/cmd"
|
||||
"github.com/prysmaticlabs/prysm/shared/pagination"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/version"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
@@ -521,14 +523,29 @@ func (bs *Server) GetValidatorParticipation(
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get state: %v", err)
|
||||
}
|
||||
|
||||
v, b, err := precompute.New(ctx, beaconState)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not set up pre compute instance: %v", err)
|
||||
}
|
||||
_, b, err = precompute.ProcessAttestations(ctx, beaconState, v, b)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not pre compute attestations: %v", err)
|
||||
var v []*precompute.Validator
|
||||
var b *precompute.Balance
|
||||
switch beaconState.Version() {
|
||||
case version.Phase0:
|
||||
v, b, err = precompute.New(ctx, beaconState)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not set up pre compute instance: %v", err)
|
||||
}
|
||||
_, b, err = precompute.ProcessAttestations(ctx, beaconState, v, b)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not pre compute attestations: %v", err)
|
||||
}
|
||||
case version.Altair:
|
||||
v, b, err = altair.InitializeEpochValidators(ctx, beaconState)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not set up altair pre compute instance: %v", err)
|
||||
}
|
||||
_, b, err = altair.ProcessEpochParticipation(ctx, beaconState, b, v)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not pre compute attestations: %v", err)
|
||||
}
|
||||
default:
|
||||
return nil, status.Errorf(codes.Internal, "Invalid state type retrieved with a version of %d", beaconState.Version())
|
||||
}
|
||||
|
||||
p := ðpb.ValidatorParticipationResponse{
|
||||
@@ -662,19 +679,41 @@ func (bs *Server) GetValidatorPerformance(
|
||||
return nil, status.Errorf(codes.Internal, "Could not process slots: %v", err)
|
||||
}
|
||||
}
|
||||
vp, bp, err := precompute.New(ctx, headState)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
validatorSummary := []*precompute.Validator{}
|
||||
switch headState.Version() {
|
||||
case version.Phase0:
|
||||
vp, bp, err := precompute.New(ctx, headState)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vp, bp, err = precompute.ProcessAttestations(ctx, headState, vp, bp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
headState, err = precompute.ProcessRewardsAndPenaltiesPrecompute(headState, bp, vp, precompute.AttestationsDelta, precompute.ProposersDelta)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
validatorSummary = vp
|
||||
case version.Altair:
|
||||
vp, bp, err := altair.InitializeEpochValidators(ctx, headState)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vp, bp, err = altair.ProcessEpochParticipation(ctx, headState, bp, vp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
headState, vp, err = altair.ProcessInactivityScores(ctx, headState, vp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
headState, err = altair.ProcessRewardsAndPenaltiesPrecompute(headState, bp, vp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
validatorSummary = vp
|
||||
}
|
||||
vp, bp, err = precompute.ProcessAttestations(ctx, headState, vp, bp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
headState, err = precompute.ProcessRewardsAndPenaltiesPrecompute(headState, bp, vp, precompute.AttestationsDelta, precompute.ProposersDelta)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
validatorSummary := vp
|
||||
|
||||
responseCap := len(req.Indices) + len(req.PublicKeys)
|
||||
validatorIndices := make([]types.ValidatorIndex, 0, responseCap)
|
||||
|
||||
@@ -21,7 +21,6 @@ import (
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
||||
mockSync "github.com/prysmaticlabs/prysm/beacon-chain/sync/initial-sync/testing"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
statepb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/cmd"
|
||||
@@ -1308,7 +1307,7 @@ func TestServer_GetValidatorActiveSetChanges(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestServer_GetValidatorQueue_PendingActivation(t *testing.T) {
|
||||
headState, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
headState, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Validators: []*ethpb.Validator{
|
||||
{
|
||||
ActivationEpoch: helpers.ActivationExitEpoch(0),
|
||||
@@ -1407,7 +1406,7 @@ func TestServer_GetValidatorQueue_ExitedValidatorLeavesQueue(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestServer_GetValidatorQueue_PendingExit(t *testing.T) {
|
||||
headState, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
headState, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Validators: []*ethpb.Validator{
|
||||
{
|
||||
ActivationEpoch: 0,
|
||||
@@ -1541,7 +1540,7 @@ func TestServer_GetValidatorParticipation_CurrentAndPrevEpoch(t *testing.T) {
|
||||
balances[i] = params.BeaconConfig().MaxEffectiveBalance
|
||||
}
|
||||
|
||||
atts := []*statepb.PendingAttestation{{
|
||||
atts := []*ethpb.PendingAttestation{{
|
||||
Data: testutil.HydrateAttestationData(ðpb.AttestationData{}),
|
||||
InclusionDelay: 1,
|
||||
AggregationBits: bitfield.NewBitlist(validatorCount / uint64(params.BeaconConfig().SlotsPerEpoch)),
|
||||
@@ -1558,8 +1557,8 @@ func TestServer_GetValidatorParticipation_CurrentAndPrevEpoch(t *testing.T) {
|
||||
b.Block.Slot = 16
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b)))
|
||||
bRoot, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, beaconDB.SaveStateSummary(ctx, &statepb.StateSummary{Root: bRoot[:]}))
|
||||
require.NoError(t, beaconDB.SaveStateSummary(ctx, &statepb.StateSummary{Root: params.BeaconConfig().ZeroHash[:]}))
|
||||
require.NoError(t, beaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: bRoot[:]}))
|
||||
require.NoError(t, beaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: params.BeaconConfig().ZeroHash[:]}))
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, bRoot))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, bRoot))
|
||||
@@ -1620,7 +1619,7 @@ func TestServer_GetValidatorParticipation_OrphanedUntilGenesis(t *testing.T) {
|
||||
balances[i] = params.BeaconConfig().MaxEffectiveBalance
|
||||
}
|
||||
|
||||
atts := []*statepb.PendingAttestation{{
|
||||
atts := []*ethpb.PendingAttestation{{
|
||||
Data: testutil.HydrateAttestationData(ðpb.AttestationData{}),
|
||||
InclusionDelay: 1,
|
||||
AggregationBits: bitfield.NewBitlist(validatorCount / uint64(params.BeaconConfig().SlotsPerEpoch)),
|
||||
@@ -1636,8 +1635,8 @@ func TestServer_GetValidatorParticipation_OrphanedUntilGenesis(t *testing.T) {
|
||||
b := testutil.NewBeaconBlock()
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b)))
|
||||
bRoot, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, beaconDB.SaveStateSummary(ctx, &statepb.StateSummary{Root: bRoot[:]}))
|
||||
require.NoError(t, beaconDB.SaveStateSummary(ctx, &statepb.StateSummary{Root: params.BeaconConfig().ZeroHash[:]}))
|
||||
require.NoError(t, beaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: bRoot[:]}))
|
||||
require.NoError(t, beaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: params.BeaconConfig().ZeroHash[:]}))
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, bRoot))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, bRoot))
|
||||
@@ -1700,9 +1699,9 @@ func TestGetValidatorPerformance_OK(t *testing.T) {
|
||||
headState, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, headState.SetSlot(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epoch+1))))
|
||||
atts := make([]*statepb.PendingAttestation, 3)
|
||||
atts := make([]*ethpb.PendingAttestation, 3)
|
||||
for i := 0; i < len(atts); i++ {
|
||||
atts[i] = &statepb.PendingAttestation{
|
||||
atts[i] = ðpb.PendingAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
Source: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
@@ -2067,11 +2066,11 @@ func TestServer_GetIndividualVotes_Working(t *testing.T) {
|
||||
require.NoError(t, beaconState.SetBlockRoots(br))
|
||||
att2.Data.Target.Root = rt[:]
|
||||
att2.Data.BeaconBlockRoot = newRt[:]
|
||||
err = beaconState.AppendPreviousEpochAttestations(&statepb.PendingAttestation{
|
||||
err = beaconState.AppendPreviousEpochAttestations(ðpb.PendingAttestation{
|
||||
Data: att1.Data, AggregationBits: bf, InclusionDelay: 1,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
err = beaconState.AppendCurrentEpochAttestations(&statepb.PendingAttestation{
|
||||
err = beaconState.AppendCurrentEpochAttestations(ðpb.PendingAttestation{
|
||||
Data: att2.Data, AggregationBits: bf, InclusionDelay: 1,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -6,12 +6,15 @@ go_library(
|
||||
"aggregator.go",
|
||||
"assignments.go",
|
||||
"attester.go",
|
||||
"blocks.go",
|
||||
"exit.go",
|
||||
"log.go",
|
||||
"proposer.go",
|
||||
"proposer_attestations.go",
|
||||
"proposer_sync_aggregate.go",
|
||||
"server.go",
|
||||
"status.go",
|
||||
"sync_committee.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/rpc/prysm/v1alpha1/validator",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
@@ -19,6 +22,7 @@ go_library(
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/cache/depositcache:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/block:go_default_library",
|
||||
@@ -29,6 +33,7 @@ go_library(
|
||||
"//beacon-chain/core/state/interop:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/operations/synccommittee:go_default_library",
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/powchain:go_default_library",
|
||||
@@ -41,28 +46,33 @@ go_library(
|
||||
"//proto/prysm/v1alpha1/wrapper:go_default_library",
|
||||
"//shared/aggregation:go_default_library",
|
||||
"//shared/aggregation/attestations:go_default_library",
|
||||
"//shared/aggregation/sync_contribution:go_default_library",
|
||||
"//shared/bls:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/copyutil:go_default_library",
|
||||
"//shared/depositutil:go_default_library",
|
||||
"//shared/event:go_default_library",
|
||||
"//shared/featureconfig:go_default_library",
|
||||
"//shared/hashutil:go_default_library",
|
||||
"//shared/p2putils:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/rand:go_default_library",
|
||||
"//shared/slotutil:go_default_library",
|
||||
"//shared/timeutils:go_default_library",
|
||||
"//shared/traceutil:go_default_library",
|
||||
"//shared/trieutil:go_default_library",
|
||||
"//shared/version:go_default_library",
|
||||
"@com_github_ferranbt_fastssz//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_bazel_rules_go//proto/wkt:empty_go_proto",
|
||||
"@com_github_wealdtech_go_bytesutil//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
"@org_golang_google_grpc//codes:go_default_library",
|
||||
"@org_golang_google_grpc//status:go_default_library",
|
||||
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
|
||||
"@org_golang_x_sync//errgroup:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -76,9 +86,11 @@ go_test(
|
||||
"attester_test.go",
|
||||
"exit_test.go",
|
||||
"proposer_attestations_test.go",
|
||||
"proposer_sync_aggregate_test.go",
|
||||
"proposer_test.go",
|
||||
"server_test.go",
|
||||
"status_test.go",
|
||||
"sync_committee_test.go",
|
||||
"validator_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
@@ -86,6 +98,7 @@ go_test(
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/cache/depositcache:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/operation:go_default_library",
|
||||
@@ -95,6 +108,7 @@ go_test(
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/operations/synccommittee:go_default_library",
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
"//beacon-chain/p2p/testing:go_default_library",
|
||||
"//beacon-chain/powchain/testing:go_default_library",
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package validator
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"time"
|
||||
|
||||
@@ -17,6 +18,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/shared/rand"
|
||||
"github.com/prysmaticlabs/prysm/shared/slotutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/timeutils"
|
||||
"github.com/prysmaticlabs/prysm/shared/version"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
@@ -136,6 +138,10 @@ func (vs *Server) duties(ctx context.Context, req *ethpb.DutiesRequest) (*ethpb.
|
||||
return nil, status.Errorf(codes.Internal, "Could not compute next committee assignments: %v", err)
|
||||
}
|
||||
|
||||
// Post Altair transition when the beacon state is Altair compatible, and requested epoch is
|
||||
// post fork boundary.
|
||||
postAltairTransition := s.Version() == version.Altair && req.Epoch >= params.BeaconConfig().AltairForkEpoch
|
||||
|
||||
validatorAssignments := make([]*ethpb.DutiesResponse_Duty, 0, len(req.PublicKeys))
|
||||
nextValidatorAssignments := make([]*ethpb.DutiesResponse_Duty, 0, len(req.PublicKeys))
|
||||
for _, pubKey := range req.PublicKeys {
|
||||
@@ -178,6 +184,45 @@ func (vs *Server) duties(ctx context.Context, req *ethpb.DutiesRequest) (*ethpb.
|
||||
vStatus, _ := vs.validatorStatus(ctx, s, pubKey)
|
||||
assignment.Status = vStatus.Status
|
||||
}
|
||||
|
||||
// Are the validators in current or next epoch sync committee.
|
||||
if postAltairTransition {
|
||||
syncCommPeriod := helpers.SyncCommitteePeriod(req.Epoch)
|
||||
csc, err := s.CurrentSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get current sync committee: %v", err)
|
||||
}
|
||||
assignment.IsSyncCommittee, err = helpers.IsCurrentPeriodSyncCommittee(s, idx)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not determine current epoch sync committee: %v", err)
|
||||
}
|
||||
if assignment.IsSyncCommittee {
|
||||
assignValidatorToSyncSubnet(req.Epoch, syncCommPeriod, pubKey, csc, assignment.Status)
|
||||
}
|
||||
|
||||
nextSlotEpoch := helpers.SlotToEpoch(s.Slot() + 1)
|
||||
currentEpoch := helpers.CurrentEpoch(s)
|
||||
|
||||
// Next epoch sync committee duty is assigned with next period sync committee only during
|
||||
// sync period epoch boundary (ie. EPOCHS_PER_SYNC_COMMITTEE_PERIOD - 1). Else wise
|
||||
// next epoch sync committee duty is the same as current epoch.
|
||||
if helpers.SyncCommitteePeriod(nextSlotEpoch) == helpers.SyncCommitteePeriod(currentEpoch)+1 {
|
||||
nsc, err := s.NextSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get next sync committee: %v", err)
|
||||
}
|
||||
nextAssignment.IsSyncCommittee, err = helpers.IsNextPeriodSyncCommittee(s, idx)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not determine next epoch sync committee: %v", err)
|
||||
}
|
||||
if nextAssignment.IsSyncCommittee {
|
||||
assignValidatorToSyncSubnet(req.Epoch, syncCommPeriod+1, pubKey, nsc, nextAssignment.Status)
|
||||
}
|
||||
} else {
|
||||
nextAssignment.IsSyncCommittee = assignment.IsSyncCommittee
|
||||
}
|
||||
}
|
||||
|
||||
validatorAssignments = append(validatorAssignments, assignment)
|
||||
nextValidatorAssignments = append(nextValidatorAssignments, nextAssignment)
|
||||
// Assign relevant validator to subnet.
|
||||
@@ -217,3 +262,51 @@ func (vs *Server) AssignValidatorToSubnet(pubkey []byte, status ethpb.ValidatorS
|
||||
totalDuration := epochDuration * time.Duration(assignedDuration)
|
||||
cache.SubnetIDs.AddPersistentCommittee(pubkey, assignedIdxs, totalDuration*time.Second)
|
||||
}
|
||||
|
||||
// assignValidatorToSyncSubnet checks the status and pubkey of a particular validator
|
||||
// to discern whether persistent subnets need to be registered for them.
|
||||
func assignValidatorToSyncSubnet(currEpoch types.Epoch, syncPeriod uint64, pubkey []byte,
|
||||
syncCommittee *ethpb.SyncCommittee, status ethpb.ValidatorStatus) {
|
||||
if status != ethpb.ValidatorStatus_ACTIVE && status != ethpb.ValidatorStatus_EXITING {
|
||||
return
|
||||
}
|
||||
startEpoch := types.Epoch(syncPeriod) * params.BeaconConfig().EpochsPerSyncCommitteePeriod
|
||||
currPeriod := helpers.SyncCommitteePeriod(currEpoch)
|
||||
endEpoch := startEpoch + params.BeaconConfig().EpochsPerSyncCommitteePeriod
|
||||
_, _, ok, expTime := cache.SyncSubnetIDs.GetSyncCommitteeSubnets(pubkey, startEpoch)
|
||||
if ok && expTime.After(timeutils.Now()) {
|
||||
return
|
||||
}
|
||||
firstValidEpoch, err := startEpoch.SafeSub(params.BeaconConfig().SyncCommitteeSubnetCount)
|
||||
if err != nil {
|
||||
firstValidEpoch = 0
|
||||
}
|
||||
// If we are processing for a future period, we only
|
||||
// add to the relevant subscription once we are at the valid
|
||||
// bound.
|
||||
if syncPeriod != currPeriod && currEpoch < firstValidEpoch {
|
||||
return
|
||||
}
|
||||
subs := subnetsFromCommittee(pubkey, syncCommittee)
|
||||
// Handle overflow in the event current epoch is less
|
||||
// than end epoch. This is an impossible condition, so
|
||||
// it is a defensive check.
|
||||
epochsToWatch, err := endEpoch.SafeSub(uint64(currEpoch))
|
||||
if err != nil {
|
||||
epochsToWatch = 0
|
||||
}
|
||||
epochDuration := time.Duration(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
totalDuration := epochDuration * time.Duration(epochsToWatch) * time.Second
|
||||
cache.SyncSubnetIDs.AddSyncCommitteeSubnets(pubkey, startEpoch, subs, totalDuration)
|
||||
}
|
||||
|
||||
// subnetsFromCommittee retrieves the relevant subnets for the chosen validator.
|
||||
func subnetsFromCommittee(pubkey []byte, comm *ethpb.SyncCommittee) []uint64 {
|
||||
positions := make([]uint64, 0)
|
||||
for i, pkey := range comm.Pubkeys {
|
||||
if bytes.Equal(pubkey, pkey) {
|
||||
positions = append(positions, uint64(i)/(params.BeaconConfig().SyncCommitteeSize/params.BeaconConfig().SyncCommitteeSubnetCount))
|
||||
}
|
||||
}
|
||||
return positions
|
||||
}
|
||||
|
||||
@@ -10,9 +10,12 @@ import (
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
mockChain "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
mockPOW "github.com/prysmaticlabs/prysm/beacon-chain/powchain/testing"
|
||||
mockSync "github.com/prysmaticlabs/prysm/beacon-chain/sync/initial-sync/testing"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/proto/eth/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
@@ -94,6 +97,109 @@ func TestGetDuties_OK(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetAltairDuties_SyncCommitteeOK(t *testing.T) {
|
||||
params.UseMainnetConfig()
|
||||
defer params.UseMinimalConfig()
|
||||
|
||||
bc := params.BeaconConfig()
|
||||
bc.AltairForkEpoch = types.Epoch(0)
|
||||
params.OverrideBeaconConfig(bc)
|
||||
|
||||
genesis := testutil.NewBeaconBlock()
|
||||
deposits, _, err := testutil.DeterministicDepositsAndKeys(params.BeaconConfig().SyncCommitteeSize)
|
||||
require.NoError(t, err)
|
||||
eth1Data, err := testutil.DeterministicEth1Data(len(deposits))
|
||||
require.NoError(t, err)
|
||||
bs, err := testutil.GenesisBeaconState(context.Background(), deposits, 0, eth1Data)
|
||||
h := ðpb.BeaconBlockHeader{
|
||||
StateRoot: bytesutil.PadTo([]byte{'a'}, 32),
|
||||
ParentRoot: bytesutil.PadTo([]byte{'b'}, 32),
|
||||
BodyRoot: bytesutil.PadTo([]byte{'c'}, 32),
|
||||
}
|
||||
require.NoError(t, bs.SetLatestBlockHeader(h))
|
||||
require.NoError(t, err, "Could not setup genesis bs")
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
|
||||
syncCommittee, err := altair.NextSyncCommittee(context.Background(), bs)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, bs.SetCurrentSyncCommittee(syncCommittee))
|
||||
pubKeys := make([][]byte, len(deposits))
|
||||
indices := make([]uint64, len(deposits))
|
||||
for i := 0; i < len(deposits); i++ {
|
||||
pubKeys[i] = deposits[i].Data.PublicKey
|
||||
indices[i] = uint64(i)
|
||||
}
|
||||
require.NoError(t, bs.SetSlot(params.BeaconConfig().SlotsPerEpoch*types.Slot(params.BeaconConfig().EpochsPerSyncCommitteePeriod)-1))
|
||||
require.NoError(t, helpers.UpdateSyncCommitteeCache(bs))
|
||||
|
||||
pubkeysAs48ByteType := make([][48]byte, len(pubKeys))
|
||||
for i, pk := range pubKeys {
|
||||
pubkeysAs48ByteType[i] = bytesutil.ToBytes48(pk)
|
||||
}
|
||||
|
||||
slot := uint64(params.BeaconConfig().SlotsPerEpoch) * uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod) * params.BeaconConfig().SecondsPerSlot
|
||||
chain := &mockChain.ChainService{
|
||||
State: bs, Root: genesisRoot[:], Genesis: time.Now().Add(time.Duration(-1*int64(slot-1)) * time.Second),
|
||||
}
|
||||
vs := &Server{
|
||||
HeadFetcher: chain,
|
||||
TimeFetcher: chain,
|
||||
Eth1InfoFetcher: &mockPOW.POWChain{},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
// Test the first validator in registry.
|
||||
req := ðpb.DutiesRequest{
|
||||
PublicKeys: [][]byte{deposits[0].Data.PublicKey},
|
||||
}
|
||||
res, err := vs.GetDuties(context.Background(), req)
|
||||
require.NoError(t, err, "Could not call epoch committee assignment")
|
||||
if res.CurrentEpochDuties[0].AttesterSlot > bs.Slot()+params.BeaconConfig().SlotsPerEpoch {
|
||||
t.Errorf("Assigned slot %d can't be higher than %d",
|
||||
res.CurrentEpochDuties[0].AttesterSlot, bs.Slot()+params.BeaconConfig().SlotsPerEpoch)
|
||||
}
|
||||
|
||||
// Test the last validator in registry.
|
||||
lastValidatorIndex := params.BeaconConfig().SyncCommitteeSize - 1
|
||||
req = ðpb.DutiesRequest{
|
||||
PublicKeys: [][]byte{deposits[lastValidatorIndex].Data.PublicKey},
|
||||
}
|
||||
res, err = vs.GetDuties(context.Background(), req)
|
||||
require.NoError(t, err, "Could not call epoch committee assignment")
|
||||
if res.CurrentEpochDuties[0].AttesterSlot > bs.Slot()+params.BeaconConfig().SlotsPerEpoch {
|
||||
t.Errorf("Assigned slot %d can't be higher than %d",
|
||||
res.CurrentEpochDuties[0].AttesterSlot, bs.Slot()+params.BeaconConfig().SlotsPerEpoch)
|
||||
}
|
||||
|
||||
// We request for duties for all validators.
|
||||
req = ðpb.DutiesRequest{
|
||||
PublicKeys: pubKeys,
|
||||
Epoch: 0,
|
||||
}
|
||||
res, err = vs.GetDuties(context.Background(), req)
|
||||
require.NoError(t, err, "Could not call epoch committee assignment")
|
||||
for i := 0; i < len(res.CurrentEpochDuties); i++ {
|
||||
assert.Equal(t, types.ValidatorIndex(i), res.CurrentEpochDuties[i].ValidatorIndex)
|
||||
}
|
||||
for i := 0; i < len(res.CurrentEpochDuties); i++ {
|
||||
assert.Equal(t, true, res.CurrentEpochDuties[i].IsSyncCommittee)
|
||||
// Current epoch and next epoch duties should be equal before the sync period epoch boundary.
|
||||
assert.Equal(t, res.CurrentEpochDuties[i].IsSyncCommittee, res.NextEpochDuties[i].IsSyncCommittee)
|
||||
}
|
||||
|
||||
// Current epoch and next epoch duties should not be equal at the sync period epoch boundary.
|
||||
req = ðpb.DutiesRequest{
|
||||
PublicKeys: pubKeys,
|
||||
Epoch: params.BeaconConfig().EpochsPerSyncCommitteePeriod - 1,
|
||||
}
|
||||
res, err = vs.GetDuties(context.Background(), req)
|
||||
require.NoError(t, err, "Could not call epoch committee assignment")
|
||||
for i := 0; i < len(res.CurrentEpochDuties); i++ {
|
||||
assert.NotEqual(t, res.CurrentEpochDuties[i].IsSyncCommittee, res.NextEpochDuties[i].IsSyncCommittee)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetDuties_SlotOutOfUpperBound(t *testing.T) {
|
||||
chain := &mockChain.ChainService{
|
||||
Genesis: time.Now(),
|
||||
@@ -348,6 +454,28 @@ func TestAssignValidatorToSubnet(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestAssignValidatorToSyncSubnet(t *testing.T) {
|
||||
k := pubKey(3)
|
||||
committee := make([][]byte, 0)
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
committee = append(committee, pubKey(uint64(i)))
|
||||
}
|
||||
sCommittee := ðpb.SyncCommittee{
|
||||
Pubkeys: committee,
|
||||
}
|
||||
assignValidatorToSyncSubnet(0, 0, k, sCommittee, ethpb.ValidatorStatus_ACTIVE)
|
||||
coms, _, ok, exp := cache.SyncSubnetIDs.GetSyncCommitteeSubnets(k, 0)
|
||||
require.Equal(t, true, ok, "No cache entry found for validator")
|
||||
assert.Equal(t, uint64(1), uint64(len(coms)))
|
||||
epochDuration := time.Duration(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
totalTime := time.Duration(params.BeaconConfig().EpochsPerSyncCommitteePeriod) * epochDuration * time.Second
|
||||
receivedTime := time.Until(exp.Round(time.Second)).Round(time.Second)
|
||||
if receivedTime < totalTime {
|
||||
t.Fatalf("Expiration time of %f was less than expected duration of %f ", receivedTime.Seconds(), totalTime.Seconds())
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkCommitteeAssignment(b *testing.B) {
|
||||
|
||||
genesis := testutil.NewBeaconBlock()
|
||||
|
||||
108
beacon-chain/rpc/prysm/v1alpha1/validator/blocks.go
Normal file
108
beacon-chain/rpc/prysm/v1alpha1/validator/blocks.go
Normal file
@@ -0,0 +1,108 @@
|
||||
package validator
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
blockfeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/block"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/event"
|
||||
"github.com/prysmaticlabs/prysm/shared/version"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// StreamBlocksAltair to clients every single time a block is received by the beacon node.
|
||||
func (bs *Server) StreamBlocksAltair(req *ethpb.StreamBlocksRequest, stream ethpb.BeaconNodeValidator_StreamBlocksAltairServer) error {
|
||||
blocksChannel := make(chan *feed.Event, 1)
|
||||
var blockSub event.Subscription
|
||||
if req.VerifiedOnly {
|
||||
blockSub = bs.StateNotifier.StateFeed().Subscribe(blocksChannel)
|
||||
} else {
|
||||
blockSub = bs.BlockNotifier.BlockFeed().Subscribe(blocksChannel)
|
||||
}
|
||||
defer blockSub.Unsubscribe()
|
||||
|
||||
for {
|
||||
select {
|
||||
case blockEvent := <-blocksChannel:
|
||||
if req.VerifiedOnly {
|
||||
if blockEvent.Type == statefeed.BlockProcessed {
|
||||
data, ok := blockEvent.Data.(*statefeed.BlockProcessedData)
|
||||
if !ok || data == nil {
|
||||
continue
|
||||
}
|
||||
b := ðpb.StreamBlocksResponse{}
|
||||
switch data.SignedBlock.Version() {
|
||||
case version.Phase0:
|
||||
phBlk, ok := data.SignedBlock.Proto().(*ethpb.SignedBeaconBlock)
|
||||
if !ok {
|
||||
log.Warn("Mismatch between version and block type, was expecting *ethpb.SignedBeaconBlock")
|
||||
continue
|
||||
}
|
||||
b.Block = ðpb.StreamBlocksResponse_Phase0Block{Phase0Block: phBlk}
|
||||
case version.Altair:
|
||||
phBlk, ok := data.SignedBlock.Proto().(*ethpb.SignedBeaconBlockAltair)
|
||||
if !ok {
|
||||
log.Warn("Mismatch between version and block type, was expecting *v2.SignedBeaconBlockAltair")
|
||||
continue
|
||||
}
|
||||
b.Block = ðpb.StreamBlocksResponse_AltairBlock{AltairBlock: phBlk}
|
||||
}
|
||||
|
||||
if err := stream.Send(b); err != nil {
|
||||
return status.Errorf(codes.Unavailable, "Could not send over stream: %v", err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if blockEvent.Type == blockfeed.ReceivedBlock {
|
||||
data, ok := blockEvent.Data.(*blockfeed.ReceivedBlockData)
|
||||
if !ok {
|
||||
// Got bad data over the stream.
|
||||
continue
|
||||
}
|
||||
if data.SignedBlock == nil {
|
||||
// One nil block shouldn't stop the stream.
|
||||
continue
|
||||
}
|
||||
headState, err := bs.HeadFetcher.HeadState(bs.Ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("blockSlot", data.SignedBlock.Block().Slot()).Error("Could not get head state")
|
||||
continue
|
||||
}
|
||||
signed := data.SignedBlock
|
||||
if err := blocks.VerifyBlockSignature(headState, signed.Block().ProposerIndex(), signed.Signature(), signed.Block().HashTreeRoot); err != nil {
|
||||
log.WithError(err).WithField("blockSlot", data.SignedBlock.Block().Slot()).Error("Could not verify block signature")
|
||||
continue
|
||||
}
|
||||
b := ðpb.StreamBlocksResponse{}
|
||||
switch data.SignedBlock.Version() {
|
||||
case version.Phase0:
|
||||
phBlk, ok := data.SignedBlock.Proto().(*ethpb.SignedBeaconBlock)
|
||||
if !ok {
|
||||
log.Warn("Mismatch between version and block type, was expecting *ethpb.SignedBeaconBlock")
|
||||
continue
|
||||
}
|
||||
b.Block = ðpb.StreamBlocksResponse_Phase0Block{Phase0Block: phBlk}
|
||||
case version.Altair:
|
||||
phBlk, ok := data.SignedBlock.Proto().(*ethpb.SignedBeaconBlockAltair)
|
||||
if !ok {
|
||||
log.Warn("Mismatch between version and block type, was expecting *v2.SignedBeaconBlockAltair")
|
||||
continue
|
||||
}
|
||||
b.Block = ðpb.StreamBlocksResponse_AltairBlock{AltairBlock: phBlk}
|
||||
}
|
||||
if err := stream.Send(b); err != nil {
|
||||
return status.Errorf(codes.Unavailable, "Could not send over stream: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
case <-blockSub.Err():
|
||||
return status.Error(codes.Aborted, "Subscriber closed, exiting goroutine")
|
||||
case <-bs.Ctx.Done():
|
||||
return status.Error(codes.Canceled, "Context canceled")
|
||||
case <-stream.Context().Done():
|
||||
return status.Error(codes.Canceled, "Context canceled")
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -24,6 +24,8 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
attaggregation "github.com/prysmaticlabs/prysm/shared/aggregation/attestations"
|
||||
"github.com/prysmaticlabs/prysm/shared/aggregation/sync_contribution"
|
||||
"github.com/prysmaticlabs/prysm/shared/bls"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
@@ -31,6 +33,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/shared/rand"
|
||||
"github.com/prysmaticlabs/prysm/shared/trieutil"
|
||||
"github.com/sirupsen/logrus"
|
||||
bytesutil2 "github.com/wealdtech/go-bytesutil"
|
||||
"go.opencensus.io/trace"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
@@ -41,16 +44,31 @@ var eth1DataNotification bool
|
||||
|
||||
const eth1dataTimeout = 2 * time.Second
|
||||
|
||||
// skipcq: SCC-U1000
|
||||
type eth1DataSingleVote struct {
|
||||
eth1Data *ethpb.Eth1Data
|
||||
blockHeight *big.Int
|
||||
}
|
||||
|
||||
// skipcq: SCC-U1000
|
||||
type eth1DataAggregatedVote struct {
|
||||
data eth1DataSingleVote
|
||||
votes int
|
||||
}
|
||||
|
||||
// BlockData required to create a beacon block.
|
||||
type BlockData struct {
|
||||
ParentRoot []byte
|
||||
Graffiti [32]byte
|
||||
ProposerIdx types.ValidatorIndex
|
||||
Eth1Data *ethpb.Eth1Data
|
||||
Deposits []*ethpb.Deposit
|
||||
Attestations []*ethpb.Attestation
|
||||
ProposerSlashings []*ethpb.ProposerSlashing
|
||||
AttesterSlashings []*ethpb.AttesterSlashing
|
||||
VoluntaryExits []*ethpb.SignedVoluntaryExit
|
||||
}
|
||||
|
||||
// GetBlock is called by a proposer during its assigned slot to request a block to sign
|
||||
// by passing in the slot and the signed randao reveal of the slot.
|
||||
func (vs *Server) GetBlock(ctx context.Context, req *ethpb.BlockRequest) (*ethpb.BeaconBlock, error) {
|
||||
@@ -58,6 +76,51 @@ func (vs *Server) GetBlock(ctx context.Context, req *ethpb.BlockRequest) (*ethpb
|
||||
defer span.End()
|
||||
span.AddAttributes(trace.Int64Attribute("slot", int64(req.Slot)))
|
||||
|
||||
blkData, err := vs.BuildBlockData(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Use zero hash as stub for state root to compute later.
|
||||
stateRoot := params.BeaconConfig().ZeroHash[:]
|
||||
|
||||
blk := ðpb.BeaconBlock{
|
||||
Slot: req.Slot,
|
||||
ParentRoot: blkData.ParentRoot,
|
||||
StateRoot: stateRoot,
|
||||
ProposerIndex: blkData.ProposerIdx,
|
||||
Body: ðpb.BeaconBlockBody{
|
||||
Eth1Data: blkData.Eth1Data,
|
||||
Deposits: blkData.Deposits,
|
||||
Attestations: blkData.Attestations,
|
||||
RandaoReveal: req.RandaoReveal,
|
||||
ProposerSlashings: blkData.ProposerSlashings,
|
||||
AttesterSlashings: blkData.AttesterSlashings,
|
||||
VoluntaryExits: blkData.VoluntaryExits,
|
||||
Graffiti: blkData.Graffiti[:],
|
||||
},
|
||||
}
|
||||
|
||||
// Compute state root with the newly constructed block.
|
||||
stateRoot, err = vs.ComputeStateRoot(
|
||||
ctx, wrapper.WrappedPhase0SignedBeaconBlock(
|
||||
ðpb.SignedBeaconBlock{Block: blk, Signature: make([]byte, 96)},
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
interop.WriteBlockToDisk(wrapper.WrappedPhase0SignedBeaconBlock(ðpb.SignedBeaconBlock{Block: blk}), true /*failed*/)
|
||||
return nil, status.Errorf(codes.Internal, "Could not compute state root: %v", err)
|
||||
}
|
||||
blk.StateRoot = stateRoot
|
||||
|
||||
return blk, nil
|
||||
}
|
||||
|
||||
// BuildBlockData for creating a new beacon block, so that this method can be shared across forks.
|
||||
func (vs *Server) BuildBlockData(ctx context.Context, req *ethpb.BlockRequest) (*BlockData, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "ProposerServer.BuildBlockData")
|
||||
defer span.End()
|
||||
|
||||
if vs.SyncChecker.Syncing() {
|
||||
return nil, status.Errorf(codes.Unavailable, "Syncing to latest head, not ready to respond")
|
||||
}
|
||||
@@ -90,21 +153,18 @@ func (vs *Server) GetBlock(ctx context.Context, req *ethpb.BlockRequest) (*ethpb
|
||||
return nil, status.Errorf(codes.Internal, "Could not get ETH1 data: %v", err)
|
||||
}
|
||||
|
||||
// Pack ETH1 deposits which have not been included in the beacon chain.
|
||||
// Pack ETH1 Deposits which have not been included in the beacon chain.
|
||||
deposits, err := vs.deposits(ctx, head, eth1Data)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get ETH1 deposits: %v", err)
|
||||
return nil, status.Errorf(codes.Internal, "Could not get ETH1 Deposits: %v", err)
|
||||
}
|
||||
|
||||
// Pack aggregated attestations which have not been included in the beacon chain.
|
||||
// Pack aggregated Attestations which have not been included in the beacon chain.
|
||||
atts, err := vs.packAttestations(ctx, head)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get attestations to pack into block: %v", err)
|
||||
return nil, status.Errorf(codes.Internal, "Could not get Attestations to pack into block: %v", err)
|
||||
}
|
||||
|
||||
// Use zero hash as stub for state root to compute later.
|
||||
stateRoot := params.BeaconConfig().ZeroHash[:]
|
||||
|
||||
graffiti := bytesutil.ToBytes32(req.Graffiti)
|
||||
|
||||
// Calculate new proposer index.
|
||||
@@ -113,38 +173,30 @@ func (vs *Server) GetBlock(ctx context.Context, req *ethpb.BlockRequest) (*ethpb
|
||||
return nil, status.Errorf(codes.Internal, "Could not calculate proposer index %v", err)
|
||||
}
|
||||
|
||||
blk := ðpb.BeaconBlock{
|
||||
Slot: req.Slot,
|
||||
ParentRoot: parentRoot,
|
||||
StateRoot: stateRoot,
|
||||
ProposerIndex: idx,
|
||||
Body: ðpb.BeaconBlockBody{
|
||||
Eth1Data: eth1Data,
|
||||
Deposits: deposits,
|
||||
Attestations: atts,
|
||||
RandaoReveal: req.RandaoReveal,
|
||||
ProposerSlashings: vs.SlashingsPool.PendingProposerSlashings(ctx, head, false /*noLimit*/),
|
||||
AttesterSlashings: vs.SlashingsPool.PendingAttesterSlashings(ctx, head, false /*noLimit*/),
|
||||
VoluntaryExits: vs.ExitPool.PendingExits(head, req.Slot, false /*noLimit*/),
|
||||
Graffiti: graffiti[:],
|
||||
},
|
||||
}
|
||||
|
||||
// Compute state root with the newly constructed block.
|
||||
stateRoot, err = vs.computeStateRoot(ctx, wrapper.WrappedPhase0SignedBeaconBlock(ðpb.SignedBeaconBlock{Block: blk, Signature: make([]byte, 96)}))
|
||||
if err != nil {
|
||||
interop.WriteBlockToDisk(wrapper.WrappedPhase0SignedBeaconBlock(ðpb.SignedBeaconBlock{Block: blk}), true /*failed*/)
|
||||
return nil, status.Errorf(codes.Internal, "Could not compute state root: %v", err)
|
||||
}
|
||||
blk.StateRoot = stateRoot
|
||||
|
||||
return blk, nil
|
||||
return &BlockData{
|
||||
ParentRoot: parentRoot,
|
||||
Graffiti: graffiti,
|
||||
ProposerIdx: idx,
|
||||
Eth1Data: eth1Data,
|
||||
Deposits: deposits,
|
||||
Attestations: atts,
|
||||
ProposerSlashings: vs.SlashingsPool.PendingProposerSlashings(ctx, head, false /*noLimit*/),
|
||||
AttesterSlashings: vs.SlashingsPool.PendingAttesterSlashings(ctx, head, false /*noLimit*/),
|
||||
VoluntaryExits: vs.ExitPool.PendingExits(head, req.Slot, false /*noLimit*/),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ProposeBlock is called by a proposer during its assigned slot to create a block in an attempt
|
||||
// to get it processed by the beacon node as the canonical head.
|
||||
func (vs *Server) ProposeBlock(ctx context.Context, rBlk *ethpb.SignedBeaconBlock) (*ethpb.ProposeResponse, error) {
|
||||
blk := wrapper.WrappedPhase0SignedBeaconBlock(rBlk)
|
||||
return vs.ProposeBlockGeneric(ctx, blk)
|
||||
}
|
||||
|
||||
// ProposeBlockGeneric performs the core post-block creation actions once a block proposal is received.
|
||||
func (vs *Server) ProposeBlockGeneric(ctx context.Context, blk block.SignedBeaconBlock) (*ethpb.ProposeResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "ProposerServer.proposeBlock")
|
||||
defer span.End()
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not tree hash block: %v", err)
|
||||
@@ -177,6 +229,137 @@ func (vs *Server) ProposeBlock(ctx context.Context, rBlk *ethpb.SignedBeaconBloc
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetBlockAltair is called by a proposer during its assigned slot to request a block to sign
|
||||
// by passing in the slot and the signed randao reveal of the slot. This is used by a validator
|
||||
// after the altair fork epoch has been encountered.
|
||||
func (vs *Server) GetBlockAltair(ctx context.Context, req *ethpb.BlockRequest) (*ethpb.BeaconBlockAltair, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "ProposerServer.GetBlock")
|
||||
defer span.End()
|
||||
span.AddAttributes(trace.Int64Attribute("slot", int64(req.Slot)))
|
||||
|
||||
blkData, err := vs.BuildBlockData(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Use zero hash as stub for state root to compute later.
|
||||
stateRoot := params.BeaconConfig().ZeroHash[:]
|
||||
|
||||
syncAggregate, err := vs.getSyncAggregate(ctx, req.Slot-1, bytesutil.ToBytes32(blkData.ParentRoot))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
blk := ðpb.BeaconBlockAltair{
|
||||
Slot: req.Slot,
|
||||
ParentRoot: blkData.ParentRoot,
|
||||
StateRoot: stateRoot,
|
||||
ProposerIndex: blkData.ProposerIdx,
|
||||
Body: ðpb.BeaconBlockBodyAltair{
|
||||
Eth1Data: blkData.Eth1Data,
|
||||
Deposits: blkData.Deposits,
|
||||
Attestations: blkData.Attestations,
|
||||
RandaoReveal: req.RandaoReveal,
|
||||
ProposerSlashings: blkData.ProposerSlashings,
|
||||
AttesterSlashings: blkData.AttesterSlashings,
|
||||
VoluntaryExits: blkData.VoluntaryExits,
|
||||
Graffiti: blkData.Graffiti[:],
|
||||
SyncAggregate: syncAggregate,
|
||||
},
|
||||
}
|
||||
// Compute state root with the newly constructed block.
|
||||
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(
|
||||
ðpb.SignedBeaconBlockAltair{Block: blk, Signature: make([]byte, 96)},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stateRoot, err = vs.ComputeStateRoot(
|
||||
ctx,
|
||||
wsb,
|
||||
)
|
||||
if err != nil {
|
||||
interop.WriteBlockToDisk(wsb, true /*failed*/)
|
||||
return nil, status.Errorf(codes.Internal, "Could not compute state root: %v", err)
|
||||
}
|
||||
blk.StateRoot = stateRoot
|
||||
|
||||
return blk, nil
|
||||
}
|
||||
|
||||
// ProposeBlockAltair is called by a proposer during its assigned slot to create a block in an attempt
|
||||
// to get it processed by the beacon node as the canonical head.
|
||||
func (vs *Server) ProposeBlockAltair(ctx context.Context, rBlk *ethpb.SignedBeaconBlockAltair) (*ethpb.ProposeResponse, error) {
|
||||
blk, err := wrapper.WrappedAltairSignedBeaconBlock(rBlk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return vs.ProposeBlockGeneric(ctx, blk)
|
||||
}
|
||||
|
||||
// getSyncAggregate retrieves the sync contributions from the pool to construct the sync aggregate object.
|
||||
// The contributions are filtered based on matching of the input root and slot then profitability.
|
||||
func (vs *Server) getSyncAggregate(ctx context.Context, slot types.Slot, root [32]byte) (*ethpb.SyncAggregate, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "ProposerServer.GetSyncAggregate")
|
||||
defer span.End()
|
||||
|
||||
// Contributions have to match the input root
|
||||
contributions, err := vs.SyncCommitteePool.SyncCommitteeContributions(slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
proposerContributions := proposerSyncContributions(contributions).filterByBlockRoot(root)
|
||||
|
||||
// Each sync subcommittee is 128 bits and the sync committee is 512 bits(mainnet).
|
||||
bitsHolder := [][]byte{}
|
||||
for i := uint64(0); i < params.BeaconConfig().SyncCommitteeSubnetCount; i++ {
|
||||
bitsHolder = append(bitsHolder, ethpb.NewSyncCommitteeAggregationBits())
|
||||
}
|
||||
sigsHolder := make([]bls.Signature, 0, params.BeaconConfig().SyncCommitteeSize/params.BeaconConfig().SyncCommitteeSubnetCount)
|
||||
|
||||
for i := uint64(0); i < params.BeaconConfig().SyncCommitteeSubnetCount; i++ {
|
||||
cs := proposerContributions.filterBySubIndex(i)
|
||||
aggregates, err := sync_contribution.Aggregate(cs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Retrieve the most profitable contribution
|
||||
deduped, err := proposerSyncContributions(aggregates).dedup()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c := deduped.mostProfitable()
|
||||
if c == nil {
|
||||
continue
|
||||
}
|
||||
bitsHolder[i] = c.AggregationBits
|
||||
sig, err := bls.SignatureFromBytes(c.Signature)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sigsHolder = append(sigsHolder, sig)
|
||||
}
|
||||
|
||||
// Aggregate all the contribution bits and signatures.
|
||||
var syncBits []byte
|
||||
for _, b := range bitsHolder {
|
||||
syncBits = append(syncBits, b...)
|
||||
}
|
||||
syncSig := bls.AggregateSignatures(sigsHolder)
|
||||
var syncSigBytes [96]byte
|
||||
if syncSig == nil {
|
||||
syncSigBytes = [96]byte{0xC0} // Infinity signature if itself is nil.
|
||||
} else {
|
||||
syncSigBytes = bytesutil2.ToBytes96(syncSig.Marshal())
|
||||
}
|
||||
|
||||
return ðpb.SyncAggregate{
|
||||
SyncCommitteeBits: syncBits,
|
||||
SyncCommitteeSignature: syncSigBytes[:],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// eth1DataMajorityVote determines the appropriate eth1data for a block proposal using
|
||||
// an algorithm called Voting with the Majority. The algorithm works as follows:
|
||||
// - Determine the timestamp for the start slot for the eth1 voting period.
|
||||
@@ -253,6 +436,7 @@ func (vs *Server) slotStartTime(slot types.Slot) uint64 {
|
||||
return helpers.VotingPeriodStartTime(startTime, slot)
|
||||
}
|
||||
|
||||
// skipcq: SCC-U1000
|
||||
func (vs *Server) inRangeVotes(ctx context.Context,
|
||||
beaconState state.ReadOnlyBeaconState,
|
||||
firstValidBlockNumber, lastValidBlockNumber *big.Int) ([]eth1DataSingleVote, error) {
|
||||
@@ -280,6 +464,7 @@ func (vs *Server) inRangeVotes(ctx context.Context,
|
||||
return inRangeVotes, nil
|
||||
}
|
||||
|
||||
// skipcq: SCC-U1000
|
||||
func chosenEth1DataMajorityVote(votes []eth1DataSingleVote) eth1DataAggregatedVote {
|
||||
var voteCount []eth1DataAggregatedVote
|
||||
for _, singleVote := range votes {
|
||||
@@ -365,9 +550,9 @@ func (vs *Server) randomETH1DataVote(ctx context.Context) (*ethpb.Eth1Data, erro
|
||||
}, nil
|
||||
}
|
||||
|
||||
// computeStateRoot computes the state root after a block has been processed through a state transition and
|
||||
// ComputeStateRoot computes the state root after a block has been processed through a state transition and
|
||||
// returns it to the validator client.
|
||||
func (vs *Server) computeStateRoot(ctx context.Context, block block.SignedBeaconBlock) ([]byte, error) {
|
||||
func (vs *Server) ComputeStateRoot(ctx context.Context, block block.SignedBeaconBlock) ([]byte, error) {
|
||||
beaconState, err := vs.StateGen.StateByRoot(ctx, bytesutil.ToBytes32(block.Block().ParentRoot()))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not retrieve beacon state")
|
||||
@@ -395,14 +580,14 @@ func (vs *Server) deposits(
|
||||
beaconState state.BeaconState,
|
||||
currentVote *ethpb.Eth1Data,
|
||||
) ([]*ethpb.Deposit, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "ProposerServer.deposits")
|
||||
ctx, span := trace.StartSpan(ctx, "ProposerServer.Deposits")
|
||||
defer span.End()
|
||||
|
||||
if vs.MockEth1Votes || !vs.Eth1InfoFetcher.IsConnectedToETH1() {
|
||||
return []*ethpb.Deposit{}, nil
|
||||
}
|
||||
// Need to fetch if the deposits up to the state's latest eth 1 data matches
|
||||
// the number of all deposits in this RPC call. If not, then we return nil.
|
||||
// Need to fetch if the Deposits up to the state's latest eth 1 data matches
|
||||
// the number of all Deposits in this RPC call. If not, then we return nil.
|
||||
canonicalEth1Data, canonicalEth1DataHeight, err := vs.canonicalEth1Data(ctx, beaconState, currentVote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -413,7 +598,7 @@ func (vs *Server) deposits(
|
||||
return []*ethpb.Deposit{}, nil
|
||||
}
|
||||
|
||||
// If there are no pending deposits, exit early.
|
||||
// If there are no pending Deposits, exit early.
|
||||
allPendingContainers := vs.PendingDepositsFetcher.PendingContainers(ctx, canonicalEth1DataHeight)
|
||||
if len(allPendingContainers) == 0 {
|
||||
return []*ethpb.Deposit{}, nil
|
||||
@@ -425,7 +610,7 @@ func (vs *Server) deposits(
|
||||
}
|
||||
|
||||
// Deposits need to be received in order of merkle index root, so this has to make sure
|
||||
// deposits are sorted from lowest to highest.
|
||||
// Deposits are sorted from lowest to highest.
|
||||
var pendingDeps []*dbpb.DepositContainer
|
||||
for _, dep := range allPendingContainers {
|
||||
if uint64(dep.Index) >= beaconState.Eth1DepositIndex() && uint64(dep.Index) < canonicalEth1Data.DepositCount {
|
||||
@@ -434,7 +619,7 @@ func (vs *Server) deposits(
|
||||
}
|
||||
|
||||
for i := range pendingDeps {
|
||||
// Don't construct merkle proof if the number of deposits is more than max allowed in block.
|
||||
// Don't construct merkle proof if the number of Deposits is more than max allowed in block.
|
||||
if uint64(i) == params.BeaconConfig().MaxDeposits {
|
||||
break
|
||||
}
|
||||
@@ -443,7 +628,7 @@ func (vs *Server) deposits(
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// Limit the return of pending deposits to not be more than max deposits allowed in block.
|
||||
// Limit the return of pending Deposits to not be more than max Deposits allowed in block.
|
||||
var pendingDeposits []*ethpb.Deposit
|
||||
for i := uint64(0); i < uint64(len(pendingDeps)) && i < params.BeaconConfig().MaxDeposits; i++ {
|
||||
pendingDeposits = append(pendingDeposits, pendingDeps[i].Deposit)
|
||||
@@ -565,7 +750,7 @@ func (vs *Server) defaultEth1DataResponse(ctx context.Context, currentHeight *bi
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not fetch ETH1_FOLLOW_DISTANCE ancestor")
|
||||
}
|
||||
// Fetch all historical deposits up to an ancestor height.
|
||||
// Fetch all historical Deposits up to an ancestor height.
|
||||
depositsTillHeight, depositRoot := vs.DepositFetcher.DepositsNumberAndRootAtHeight(ctx, ancestorHeight)
|
||||
if depositsTillHeight == 0 {
|
||||
return vs.ChainStartFetcher.ChainStartEth1Data(), nil
|
||||
@@ -582,7 +767,7 @@ func (vs *Server) defaultEth1DataResponse(ctx context.Context, currentHeight *bi
|
||||
}, nil
|
||||
}
|
||||
|
||||
// This filters the input attestations to return a list of valid attestations to be packaged inside a beacon block.
|
||||
// This filters the input Attestations to return a list of valid Attestations to be packaged inside a beacon block.
|
||||
func (vs *Server) filterAttestationsForBlockInclusion(ctx context.Context, st state.BeaconState, atts []*ethpb.Attestation) ([]*ethpb.Attestation, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "ProposerServer.filterAttestationsForBlockInclusion")
|
||||
defer span.End()
|
||||
@@ -602,7 +787,7 @@ func (vs *Server) filterAttestationsForBlockInclusion(ctx context.Context, st st
|
||||
return sorted.limitToMaxAttestations(), nil
|
||||
}
|
||||
|
||||
// The input attestations are processed and seen by the node, this deletes them from pool
|
||||
// The input Attestations are processed and seen by the node, this deletes them from pool
|
||||
// so proposers don't include them in a block for the future.
|
||||
func (vs *Server) deleteAttsInPool(ctx context.Context, atts []*ethpb.Attestation) error {
|
||||
ctx, span := trace.StartSpan(ctx, "ProposerServer.deleteAttsInPool")
|
||||
@@ -631,7 +816,7 @@ func constructMerkleProof(trie *trieutil.SparseMerkleTrie, index int, deposit *e
|
||||
return nil, errors.Wrapf(err, "could not generate merkle proof for deposit at index %d", index)
|
||||
}
|
||||
// For every deposit, we construct a Merkle proof using the powchain service's
|
||||
// in-memory deposits trie, which is updated only once the state's LatestETH1Data
|
||||
// in-memory Deposits trie, which is updated only once the state's LatestETH1Data
|
||||
// property changes during a state transition after a voting period.
|
||||
deposit.Proof = proof
|
||||
return deposit, nil
|
||||
@@ -644,19 +829,19 @@ func (vs *Server) packAttestations(ctx context.Context, latestState state.Beacon
|
||||
atts := vs.AttPool.AggregatedAttestations()
|
||||
atts, err := vs.filterAttestationsForBlockInclusion(ctx, latestState, atts)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not filter attestations")
|
||||
return nil, errors.Wrap(err, "could not filter Attestations")
|
||||
}
|
||||
|
||||
// If there is any room left in the block, consider unaggregated attestations as well.
|
||||
// If there is any room left in the block, consider unaggregated Attestations as well.
|
||||
numAtts := uint64(len(atts))
|
||||
if numAtts < params.BeaconConfig().MaxAttestations {
|
||||
uAtts, err := vs.AttPool.UnaggregatedAttestations()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get unaggregated attestations")
|
||||
return nil, errors.Wrap(err, "could not get unaggregated Attestations")
|
||||
}
|
||||
uAtts, err = vs.filterAttestationsForBlockInclusion(ctx, latestState, uAtts)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not filter attestations")
|
||||
return nil, errors.Wrap(err, "could not filter Attestations")
|
||||
}
|
||||
atts = append(atts, uAtts...)
|
||||
|
||||
|
||||
@@ -6,12 +6,14 @@ import (
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/aggregation"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/version"
|
||||
)
|
||||
|
||||
type proposerAtts []*ethpb.Attestation
|
||||
@@ -19,11 +21,24 @@ type proposerAtts []*ethpb.Attestation
|
||||
// filter separates attestation list into two groups: valid and invalid attestations.
|
||||
// The first group passes the all the required checks for attestation to be considered for proposing.
|
||||
// And attestations from the second group should be deleted.
|
||||
func (a proposerAtts) filter(ctx context.Context, state state.BeaconState) (proposerAtts, proposerAtts) {
|
||||
func (a proposerAtts) filter(ctx context.Context, st state.BeaconState) (proposerAtts, proposerAtts) {
|
||||
validAtts := make([]*ethpb.Attestation, 0, len(a))
|
||||
invalidAtts := make([]*ethpb.Attestation, 0, len(a))
|
||||
var attestationProcessor func(context.Context, state.BeaconState, *ethpb.Attestation) (state.BeaconState, error)
|
||||
switch st.Version() {
|
||||
case version.Phase0:
|
||||
attestationProcessor = blocks.ProcessAttestationNoVerifySignature
|
||||
case version.Altair:
|
||||
// Use a wrapper here, as go needs strong typing for the function signature.
|
||||
attestationProcessor = func(ctx context.Context, st state.BeaconState, attestation *ethpb.Attestation) (state.BeaconState, error) {
|
||||
return altair.ProcessAttestationNoVerifySignature(ctx, st, attestation)
|
||||
}
|
||||
default:
|
||||
// Exit early if there is an unknown state type.
|
||||
return validAtts, invalidAtts
|
||||
}
|
||||
for _, att := range a {
|
||||
if _, err := blocks.ProcessAttestationNoVerifySignature(ctx, state, att); err == nil {
|
||||
if _, err := attestationProcessor(ctx, st, att); err == nil {
|
||||
validAtts = append(validAtts, att)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -0,0 +1,91 @@
|
||||
package validator
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
type proposerSyncContributions []*eth.SyncCommitteeContribution
|
||||
|
||||
// filterByBlockRoot separates sync aggregate list into a valid group.
|
||||
// The valid group contains the input block root.
|
||||
func (cs proposerSyncContributions) filterByBlockRoot(r [32]byte) proposerSyncContributions {
|
||||
matchedSyncContributions := make([]*eth.SyncCommitteeContribution, 0, len(cs))
|
||||
for _, c := range cs {
|
||||
if bytes.Equal(c.BlockRoot, r[:]) {
|
||||
matchedSyncContributions = append(matchedSyncContributions, c)
|
||||
}
|
||||
}
|
||||
return matchedSyncContributions
|
||||
}
|
||||
|
||||
// filterBySubIndex separates sync aggregate list into a valid group.
|
||||
// The valid group contains the matching sub committee index.
|
||||
func (cs proposerSyncContributions) filterBySubIndex(i uint64) proposerSyncContributions {
|
||||
matchedSyncContributions := make([]*eth.SyncCommitteeContribution, 0, len(cs))
|
||||
for _, c := range cs {
|
||||
if c.SubcommitteeIndex == i {
|
||||
matchedSyncContributions = append(matchedSyncContributions, c)
|
||||
}
|
||||
}
|
||||
return matchedSyncContributions
|
||||
}
|
||||
|
||||
// dedup removes duplicate sync contributions (ones with the same bits set on).
|
||||
// Important: not only exact duplicates are removed, but proper subsets are removed too
|
||||
// (their known bits are redundant and are already contained in their supersets).
|
||||
func (cs proposerSyncContributions) dedup() (proposerSyncContributions, error) {
|
||||
if len(cs) < 2 {
|
||||
return cs, nil
|
||||
}
|
||||
contributionsBySubIdx := make(map[uint64][]*eth.SyncCommitteeContribution, len(cs))
|
||||
for _, c := range cs {
|
||||
contributionsBySubIdx[c.SubcommitteeIndex] = append(contributionsBySubIdx[c.SubcommitteeIndex], c)
|
||||
}
|
||||
|
||||
uniqContributions := make([]*eth.SyncCommitteeContribution, 0, len(cs))
|
||||
for _, cs := range contributionsBySubIdx {
|
||||
for i := 0; i < len(cs); i++ {
|
||||
a := cs[i]
|
||||
for j := i + 1; j < len(cs); j++ {
|
||||
b := cs[j]
|
||||
if c, err := a.AggregationBits.Contains(b.AggregationBits); err != nil {
|
||||
return nil, err
|
||||
} else if c {
|
||||
// a contains b, b is redundant.
|
||||
cs[j] = cs[len(cs)-1]
|
||||
cs[len(cs)-1] = nil
|
||||
cs = cs[:len(cs)-1]
|
||||
j--
|
||||
} else if c, err := b.AggregationBits.Contains(a.GetAggregationBits()); err != nil {
|
||||
return nil, err
|
||||
} else if c {
|
||||
// b contains a, a is redundant.
|
||||
cs[i] = cs[len(cs)-1]
|
||||
cs[len(cs)-1] = nil
|
||||
cs = cs[:len(cs)-1]
|
||||
i--
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
uniqContributions = append(uniqContributions, cs...)
|
||||
}
|
||||
return uniqContributions, nil
|
||||
}
|
||||
|
||||
// mostProfitable returns the most profitable sync contribution, the one with the most
|
||||
// votes (ie. aggregation bits count)
|
||||
func (cs proposerSyncContributions) mostProfitable() *eth.SyncCommitteeContribution {
|
||||
if len(cs) == 0 {
|
||||
return nil
|
||||
}
|
||||
mostProfitable := cs[0]
|
||||
for _, c := range cs[1:] {
|
||||
if c.AggregationBits.Count() > mostProfitable.AggregationBits.Count() {
|
||||
mostProfitable = c
|
||||
}
|
||||
}
|
||||
return mostProfitable
|
||||
}
|
||||
@@ -0,0 +1,392 @@
|
||||
package validator
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
v2 "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
)
|
||||
|
||||
func TestProposerSyncContributions_FilterByBlockRoot(t *testing.T) {
|
||||
rootA := [32]byte{'a'}
|
||||
rootB := [32]byte{'b'}
|
||||
tests := []struct {
|
||||
name string
|
||||
cs proposerSyncContributions
|
||||
want proposerSyncContributions
|
||||
}{
|
||||
{
|
||||
name: "empty list",
|
||||
cs: proposerSyncContributions{},
|
||||
want: proposerSyncContributions{},
|
||||
},
|
||||
{
|
||||
name: "single item, not found",
|
||||
cs: proposerSyncContributions{
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.NewBitvector128()},
|
||||
},
|
||||
want: proposerSyncContributions{},
|
||||
},
|
||||
{
|
||||
name: "single item with filter, found",
|
||||
cs: proposerSyncContributions{
|
||||
&v2.SyncCommitteeContribution{BlockRoot: rootA[:], Slot: 0},
|
||||
&v2.SyncCommitteeContribution{BlockRoot: rootB[:], Slot: 1},
|
||||
},
|
||||
want: proposerSyncContributions{
|
||||
&v2.SyncCommitteeContribution{BlockRoot: rootA[:]},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multiple items with filter, found",
|
||||
cs: proposerSyncContributions{
|
||||
&v2.SyncCommitteeContribution{BlockRoot: rootA[:], Slot: 0},
|
||||
&v2.SyncCommitteeContribution{BlockRoot: rootB[:], Slot: 1},
|
||||
&v2.SyncCommitteeContribution{BlockRoot: rootA[:], Slot: 2},
|
||||
&v2.SyncCommitteeContribution{BlockRoot: rootB[:], Slot: 3},
|
||||
},
|
||||
want: proposerSyncContributions{
|
||||
&v2.SyncCommitteeContribution{BlockRoot: rootA[:], Slot: 0},
|
||||
&v2.SyncCommitteeContribution{BlockRoot: rootA[:], Slot: 2},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cs := tt.cs.filterByBlockRoot(rootA)
|
||||
assert.DeepEqual(t, tt.want, cs)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestProposerSyncContributions_FilterBySubcommitteeID(t *testing.T) {
|
||||
rootA := [32]byte{'a'}
|
||||
rootB := [32]byte{'b'}
|
||||
tests := []struct {
|
||||
name string
|
||||
cs proposerSyncContributions
|
||||
want proposerSyncContributions
|
||||
}{
|
||||
{
|
||||
name: "empty list",
|
||||
cs: proposerSyncContributions{},
|
||||
want: proposerSyncContributions{},
|
||||
},
|
||||
{
|
||||
name: "single item, not found",
|
||||
cs: proposerSyncContributions{
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.NewBitvector128(), SubcommitteeIndex: 1},
|
||||
},
|
||||
want: proposerSyncContributions{},
|
||||
},
|
||||
{
|
||||
name: "single item with filter",
|
||||
cs: proposerSyncContributions{
|
||||
&v2.SyncCommitteeContribution{BlockRoot: rootA[:], SubcommitteeIndex: 0},
|
||||
&v2.SyncCommitteeContribution{BlockRoot: rootB[:], SubcommitteeIndex: 1},
|
||||
},
|
||||
want: proposerSyncContributions{
|
||||
&v2.SyncCommitteeContribution{BlockRoot: rootA[:]},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multiple items with filter",
|
||||
cs: proposerSyncContributions{
|
||||
&v2.SyncCommitteeContribution{BlockRoot: rootA[:], SubcommitteeIndex: 0},
|
||||
&v2.SyncCommitteeContribution{BlockRoot: rootB[:], SubcommitteeIndex: 1},
|
||||
&v2.SyncCommitteeContribution{BlockRoot: rootB[:], SubcommitteeIndex: 0},
|
||||
&v2.SyncCommitteeContribution{BlockRoot: rootB[:], SubcommitteeIndex: 2},
|
||||
},
|
||||
want: proposerSyncContributions{
|
||||
&v2.SyncCommitteeContribution{BlockRoot: rootA[:], SubcommitteeIndex: 0},
|
||||
&v2.SyncCommitteeContribution{BlockRoot: rootB[:], SubcommitteeIndex: 0},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cs := tt.cs.filterBySubIndex(0)
|
||||
assert.DeepEqual(t, tt.want, cs)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestProposerSyncContributions_Dedup(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
cs proposerSyncContributions
|
||||
want proposerSyncContributions
|
||||
}{
|
||||
{
|
||||
name: "nil list",
|
||||
cs: nil,
|
||||
want: proposerSyncContributions(nil),
|
||||
},
|
||||
{
|
||||
name: "empty list",
|
||||
cs: proposerSyncContributions{},
|
||||
want: proposerSyncContributions{},
|
||||
},
|
||||
{
|
||||
name: "single item",
|
||||
cs: proposerSyncContributions{
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.NewBitvector128()},
|
||||
},
|
||||
want: proposerSyncContributions{
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.NewBitvector128()},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "two items no duplicates",
|
||||
cs: proposerSyncContributions{
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b10111110, 0x01}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b01111111, 0x01}},
|
||||
},
|
||||
want: proposerSyncContributions{
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b01111111, 0x01}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b10111110, 0x01}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "two items with duplicates",
|
||||
cs: proposerSyncContributions{
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0xba, 0x01}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0xba, 0x01}},
|
||||
},
|
||||
want: proposerSyncContributions{
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0xba, 0x01}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "sorted no duplicates",
|
||||
cs: proposerSyncContributions{
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b11001111, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b01101101, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b00101011, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b10100000, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b00010000, 0b1}},
|
||||
},
|
||||
want: proposerSyncContributions{
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b11001111, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b01101101, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b00101011, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b10100000, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b00010000, 0b1}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "sorted with duplicates",
|
||||
cs: proposerSyncContributions{
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b11001111, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b01101101, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b01101101, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b01101101, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b00001111, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b00000011, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b00000011, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b00000001, 0b1}},
|
||||
},
|
||||
want: proposerSyncContributions{
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b11001111, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b01101101, 0b1}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "all equal",
|
||||
cs: proposerSyncContributions{
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b00000011, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b00000011, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b00000011, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b00000011, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b00000011, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b00000011, 0b1}},
|
||||
},
|
||||
want: proposerSyncContributions{
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b00000011, 0b1}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "unsorted no duplicates",
|
||||
cs: proposerSyncContributions{
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b01101101, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b00100010, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b10100101, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b00010000, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b11001111, 0b1}},
|
||||
},
|
||||
want: proposerSyncContributions{
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b11001111, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b01101101, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b10100101, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b00100010, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b00010000, 0b1}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "unsorted with duplicates",
|
||||
cs: proposerSyncContributions{
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b00001111, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b11001111, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b10100101, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b10100101, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b00000001, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b00000011, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b11001111, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b01101101, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b00000001, 0b1}},
|
||||
},
|
||||
want: proposerSyncContributions{
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b11001111, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b01101101, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b10100101, 0b1}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "no proper subset (same root)",
|
||||
cs: proposerSyncContributions{
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b00000101, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b00000011, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b10000001, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b00011001, 0b1}},
|
||||
},
|
||||
want: proposerSyncContributions{
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b00011001, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b00000011, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b00000101, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b10000001, 0b1}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "proper subset (same root)",
|
||||
cs: proposerSyncContributions{
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b00001111, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b11001111, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b00001111, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b00001111, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b00000001, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b00000011, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b11001111, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b00000001, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b01101101, 0b1}},
|
||||
},
|
||||
want: proposerSyncContributions{
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b11001111, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b01101101, 0b1}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "no proper subset (different index)",
|
||||
cs: proposerSyncContributions{
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b00000101, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b00000011, 0b1}},
|
||||
&v2.SyncCommitteeContribution{SubcommitteeIndex: 1, AggregationBits: bitfield.Bitvector128{0b10000001, 0b1}},
|
||||
&v2.SyncCommitteeContribution{SubcommitteeIndex: 1, AggregationBits: bitfield.Bitvector128{0b00011001, 0b1}},
|
||||
},
|
||||
want: proposerSyncContributions{
|
||||
&v2.SyncCommitteeContribution{SubcommitteeIndex: 1, AggregationBits: bitfield.Bitvector128{0b00011001, 0b1}},
|
||||
&v2.SyncCommitteeContribution{SubcommitteeIndex: 1, AggregationBits: bitfield.Bitvector128{0b10000001, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b00000011, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b00000101, 0b1}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "proper subset (different index 1)",
|
||||
cs: proposerSyncContributions{
|
||||
&v2.SyncCommitteeContribution{SubcommitteeIndex: 1, AggregationBits: bitfield.Bitvector128{0b00001111, 0b1}},
|
||||
&v2.SyncCommitteeContribution{SubcommitteeIndex: 1, AggregationBits: bitfield.Bitvector128{0b11001111, 0b1}},
|
||||
&v2.SyncCommitteeContribution{SubcommitteeIndex: 1, AggregationBits: bitfield.Bitvector128{0b00001111, 0b1}},
|
||||
&v2.SyncCommitteeContribution{SubcommitteeIndex: 1, AggregationBits: bitfield.Bitvector128{0b00001111, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b00000001, 0b1}},
|
||||
&v2.SyncCommitteeContribution{SubcommitteeIndex: 1, AggregationBits: bitfield.Bitvector128{0b00000011, 0b1}},
|
||||
&v2.SyncCommitteeContribution{SubcommitteeIndex: 1, AggregationBits: bitfield.Bitvector128{0b11001111, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b00000001, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b01101101, 0b1}},
|
||||
},
|
||||
want: proposerSyncContributions{
|
||||
&v2.SyncCommitteeContribution{SubcommitteeIndex: 1, AggregationBits: bitfield.Bitvector128{0b11001111, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b01101101, 0b1}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "proper subset (different index 2)",
|
||||
cs: proposerSyncContributions{
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b00001111, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b11001111, 0b1}},
|
||||
&v2.SyncCommitteeContribution{SubcommitteeIndex: 1, AggregationBits: bitfield.Bitvector128{0b00001111, 0b1}},
|
||||
&v2.SyncCommitteeContribution{SubcommitteeIndex: 1, AggregationBits: bitfield.Bitvector128{0b11001111, 0b1}},
|
||||
},
|
||||
want: proposerSyncContributions{
|
||||
&v2.SyncCommitteeContribution{SubcommitteeIndex: 1, AggregationBits: bitfield.Bitvector128{0b11001111, 0b1}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b11001111, 0b1}},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cs, err := tt.cs.dedup()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
sort.Slice(cs, func(i, j int) bool {
|
||||
if cs[i].AggregationBits.Count() == cs[j].AggregationBits.Count() {
|
||||
if cs[i].SubcommitteeIndex == cs[j].SubcommitteeIndex {
|
||||
return bytes.Compare(cs[i].AggregationBits, cs[j].AggregationBits) <= 0
|
||||
}
|
||||
return cs[i].SubcommitteeIndex > cs[j].SubcommitteeIndex
|
||||
}
|
||||
return cs[i].AggregationBits.Count() > cs[j].AggregationBits.Count()
|
||||
})
|
||||
assert.DeepEqual(t, tt.want, cs)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestProposerSyncContributions_MostProfitable(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
cs proposerSyncContributions
|
||||
want *v2.SyncCommitteeContribution
|
||||
}{
|
||||
{
|
||||
name: "Same item",
|
||||
cs: proposerSyncContributions{
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b01}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b01}},
|
||||
},
|
||||
want: &v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b01}},
|
||||
},
|
||||
{
|
||||
name: "Same item again",
|
||||
cs: proposerSyncContributions{
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b01}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b10}},
|
||||
},
|
||||
want: &v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b01}},
|
||||
},
|
||||
{
|
||||
name: "most profitable at the start",
|
||||
cs: proposerSyncContributions{
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b0101}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b0100}},
|
||||
},
|
||||
want: &v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b0101}},
|
||||
},
|
||||
{
|
||||
name: "most profitable at the end",
|
||||
cs: proposerSyncContributions{
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b0101}},
|
||||
&v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b0111}},
|
||||
},
|
||||
want: &v2.SyncCommitteeContribution{AggregationBits: bitfield.Bitvector128{0b0111}},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cs := tt.cs.mostProfitable()
|
||||
assert.DeepEqual(t, tt.want, cs)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -9,11 +9,13 @@ import (
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
|
||||
b "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
dbutil "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/slashings"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/synccommittee"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/voluntaryexits"
|
||||
mockp2p "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing"
|
||||
mockPOW "github.com/prysmaticlabs/prysm/beacon-chain/powchain/testing"
|
||||
@@ -22,7 +24,6 @@ import (
|
||||
mockSync "github.com/prysmaticlabs/prysm/beacon-chain/sync/initial-sync/testing"
|
||||
dbpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
statepb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
attaggregation "github.com/prysmaticlabs/prysm/shared/aggregation/attestations"
|
||||
"github.com/prysmaticlabs/prysm/shared/attestationutil"
|
||||
@@ -110,7 +111,7 @@ func TestProposer_GetBlock_OK(t *testing.T) {
|
||||
assert.Equal(t, req.Slot, block.Slot, "Expected block to have slot of 1")
|
||||
assert.DeepEqual(t, parentRoot[:], block.ParentRoot, "Expected block to have correct parent root")
|
||||
assert.DeepEqual(t, randaoReveal, block.Body.RandaoReveal, "Expected block to have correct randao reveal")
|
||||
assert.DeepEqual(t, req.Graffiti, block.Body.Graffiti, "Expected block to have correct graffiti")
|
||||
assert.DeepEqual(t, req.Graffiti, block.Body.Graffiti, "Expected block to have correct Graffiti")
|
||||
assert.Equal(t, params.BeaconConfig().MaxProposerSlashings, uint64(len(block.Body.ProposerSlashings)))
|
||||
assert.DeepEqual(t, proposerSlashings, block.Body.ProposerSlashings)
|
||||
assert.Equal(t, params.BeaconConfig().MaxAttesterSlashings, uint64(len(block.Body.AttesterSlashings)))
|
||||
@@ -150,7 +151,7 @@ func TestProposer_GetBlock_AddsUnaggregatedAtts(t *testing.T) {
|
||||
StateGen: stategen.New(db),
|
||||
}
|
||||
|
||||
// Generate a bunch of random attestations at slot. These would be considered double votes, but
|
||||
// Generate a bunch of random Attestations at slot. These would be considered double votes, but
|
||||
// we don't care for the purpose of this test.
|
||||
var atts []*ethpb.Attestation
|
||||
for i := uint64(0); len(atts) < int(params.BeaconConfig().MaxAttestations); i++ {
|
||||
@@ -158,12 +159,12 @@ func TestProposer_GetBlock_AddsUnaggregatedAtts(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
atts = append(atts, a...)
|
||||
}
|
||||
// Max attestations minus one so we can almost fill the block and then include 1 unaggregated
|
||||
// Max Attestations minus one so we can almost fill the block and then include 1 unaggregated
|
||||
// att to maximize inclusion.
|
||||
atts = atts[:params.BeaconConfig().MaxAttestations-1]
|
||||
require.NoError(t, proposerServer.AttPool.SaveAggregatedAttestations(atts))
|
||||
|
||||
// Generate some more random attestations with a larger spread so that we can capture at least
|
||||
// Generate some more random Attestations with a larger spread so that we can capture at least
|
||||
// one unaggregated attestation.
|
||||
atts, err = testutil.GenerateAttestations(beaconState, privKeys, 300, 1, true)
|
||||
require.NoError(t, err)
|
||||
@@ -174,7 +175,7 @@ func TestProposer_GetBlock_AddsUnaggregatedAtts(t *testing.T) {
|
||||
require.NoError(t, proposerServer.AttPool.SaveUnaggregatedAttestation(a))
|
||||
}
|
||||
}
|
||||
require.Equal(t, true, found, "No unaggregated attestations were generated")
|
||||
require.Equal(t, true, found, "No unaggregated Attestations were generated")
|
||||
|
||||
randaoReveal, err := testutil.RandaoReveal(beaconState, 0, privKeys)
|
||||
assert.NoError(t, err)
|
||||
@@ -191,7 +192,7 @@ func TestProposer_GetBlock_AddsUnaggregatedAtts(t *testing.T) {
|
||||
assert.Equal(t, req.Slot, block.Slot, "Expected block to have slot of 1")
|
||||
assert.DeepEqual(t, parentRoot[:], block.ParentRoot, "Expected block to have correct parent root")
|
||||
assert.DeepEqual(t, randaoReveal, block.Body.RandaoReveal, "Expected block to have correct randao reveal")
|
||||
assert.DeepEqual(t, req.Graffiti, block.Body.Graffiti, "Expected block to have correct graffiti")
|
||||
assert.DeepEqual(t, req.Graffiti, block.Body.Graffiti, "Expected block to have correct Graffiti")
|
||||
assert.Equal(t, params.BeaconConfig().MaxAttestations, uint64(len(block.Body.Attestations)), "Expected block atts to be aggregated down to 1")
|
||||
hasUnaggregatedAtt := false
|
||||
for _, a := range block.Body.Attestations {
|
||||
@@ -278,7 +279,7 @@ func TestProposer_ComputeStateRoot_OK(t *testing.T) {
|
||||
req.Signature, err = helpers.ComputeDomainAndSign(beaconState, currentEpoch, req.Block, params.BeaconConfig().DomainBeaconProposer, privKeys[proposerIdx])
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = proposerServer.computeStateRoot(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(req))
|
||||
_, err = proposerServer.ComputeStateRoot(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(req))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -333,7 +334,7 @@ func TestProposer_PendingDeposits_Eth1DataVoteOK(t *testing.T) {
|
||||
HeadFetcher: &mock.ChainService{State: beaconState, Root: blkRoot[:]},
|
||||
}
|
||||
|
||||
// It should also return the recent deposits after their follow window.
|
||||
// It should also return the recent Deposits after their follow window.
|
||||
p.LatestBlockNumber = big.NewInt(0).Add(p.LatestBlockNumber, big.NewInt(10000))
|
||||
_, eth1Height, err := bs.canonicalEth1Data(ctx, beaconState, ðpb.Eth1Data{})
|
||||
require.NoError(t, err)
|
||||
@@ -373,7 +374,7 @@ func TestProposer_PendingDeposits_OutsideEth1FollowWindow(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
beaconState, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
beaconState, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
BlockHash: bytesutil.PadTo([]byte("0x0"), 32),
|
||||
DepositRoot: make([]byte, 32),
|
||||
@@ -466,14 +467,14 @@ func TestProposer_PendingDeposits_OutsideEth1FollowWindow(t *testing.T) {
|
||||
|
||||
deposits, err := bs.deposits(ctx, beaconState, ðpb.Eth1Data{})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(deposits), "Received unexpected list of deposits")
|
||||
assert.Equal(t, 0, len(deposits), "Received unexpected list of Deposits")
|
||||
|
||||
// It should not return the recent deposits after their follow window.
|
||||
// as latest block number makes no difference in retrieval of deposits
|
||||
// It should not return the recent Deposits after their follow window.
|
||||
// as latest block number makes no difference in retrieval of Deposits
|
||||
p.LatestBlockNumber = big.NewInt(0).Add(p.LatestBlockNumber, big.NewInt(10000))
|
||||
deposits, err = bs.deposits(ctx, beaconState, ðpb.Eth1Data{})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(deposits), "Received unexpected number of pending deposits")
|
||||
assert.Equal(t, 0, len(deposits), "Received unexpected number of pending Deposits")
|
||||
}
|
||||
|
||||
func TestProposer_PendingDeposits_FollowsCorrectEth1Block(t *testing.T) {
|
||||
@@ -501,7 +502,7 @@ func TestProposer_PendingDeposits_FollowsCorrectEth1Block(t *testing.T) {
|
||||
votes = append(votes, vote)
|
||||
}
|
||||
|
||||
beaconState, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
beaconState, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
BlockHash: []byte("0x0"),
|
||||
DepositRoot: make([]byte, 32),
|
||||
@@ -595,15 +596,15 @@ func TestProposer_PendingDeposits_FollowsCorrectEth1Block(t *testing.T) {
|
||||
|
||||
deposits, err := bs.deposits(ctx, beaconState, ðpb.Eth1Data{})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(deposits), "Received unexpected list of deposits")
|
||||
assert.Equal(t, 0, len(deposits), "Received unexpected list of Deposits")
|
||||
|
||||
// It should also return the recent deposits after their follow window.
|
||||
// It should also return the recent Deposits after their follow window.
|
||||
p.LatestBlockNumber = big.NewInt(0).Add(p.LatestBlockNumber, big.NewInt(10000))
|
||||
// we should get our pending deposits once this vote pushes the vote tally to include
|
||||
// we should get our pending Deposits once this vote pushes the vote tally to include
|
||||
// the updated eth1 data.
|
||||
deposits, err = bs.deposits(ctx, beaconState, vote)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, len(recentDeposits), len(deposits), "Received unexpected number of pending deposits")
|
||||
assert.Equal(t, len(recentDeposits), len(deposits), "Received unexpected number of pending Deposits")
|
||||
}
|
||||
|
||||
func TestProposer_PendingDeposits_CantReturnBelowStateEth1DepositIndex(t *testing.T) {
|
||||
@@ -692,13 +693,13 @@ func TestProposer_PendingDeposits_CantReturnBelowStateEth1DepositIndex(t *testin
|
||||
HeadFetcher: &mock.ChainService{State: beaconState, Root: blkRoot[:]},
|
||||
}
|
||||
|
||||
// It should also return the recent deposits after their follow window.
|
||||
// It should also return the recent Deposits after their follow window.
|
||||
p.LatestBlockNumber = big.NewInt(0).Add(p.LatestBlockNumber, big.NewInt(10000))
|
||||
deposits, err := bs.deposits(ctx, beaconState, ðpb.Eth1Data{})
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedDeposits := 6
|
||||
assert.Equal(t, expectedDeposits, len(deposits), "Received unexpected number of pending deposits")
|
||||
assert.Equal(t, expectedDeposits, len(deposits), "Received unexpected number of pending Deposits")
|
||||
}
|
||||
|
||||
func TestProposer_PendingDeposits_CantReturnMoreThanMax(t *testing.T) {
|
||||
@@ -712,7 +713,7 @@ func TestProposer_PendingDeposits_CantReturnMoreThanMax(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
beaconState, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
beaconState, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
BlockHash: bytesutil.PadTo([]byte("0x0"), 32),
|
||||
DepositRoot: make([]byte, 32),
|
||||
@@ -788,11 +789,11 @@ func TestProposer_PendingDeposits_CantReturnMoreThanMax(t *testing.T) {
|
||||
HeadFetcher: &mock.ChainService{State: beaconState, Root: blkRoot[:]},
|
||||
}
|
||||
|
||||
// It should also return the recent deposits after their follow window.
|
||||
// It should also return the recent Deposits after their follow window.
|
||||
p.LatestBlockNumber = big.NewInt(0).Add(p.LatestBlockNumber, big.NewInt(10000))
|
||||
deposits, err := bs.deposits(ctx, beaconState, ðpb.Eth1Data{})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, params.BeaconConfig().MaxDeposits, uint64(len(deposits)), "Received unexpected number of pending deposits")
|
||||
assert.Equal(t, params.BeaconConfig().MaxDeposits, uint64(len(deposits)), "Received unexpected number of pending Deposits")
|
||||
}
|
||||
|
||||
func TestProposer_PendingDeposits_CantReturnMoreThanDepositCount(t *testing.T) {
|
||||
@@ -806,7 +807,7 @@ func TestProposer_PendingDeposits_CantReturnMoreThanDepositCount(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
beaconState, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
beaconState, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
BlockHash: bytesutil.PadTo([]byte("0x0"), 32),
|
||||
DepositRoot: make([]byte, 32),
|
||||
@@ -882,11 +883,11 @@ func TestProposer_PendingDeposits_CantReturnMoreThanDepositCount(t *testing.T) {
|
||||
PendingDepositsFetcher: depositCache,
|
||||
}
|
||||
|
||||
// It should also return the recent deposits after their follow window.
|
||||
// It should also return the recent Deposits after their follow window.
|
||||
p.LatestBlockNumber = big.NewInt(0).Add(p.LatestBlockNumber, big.NewInt(10000))
|
||||
deposits, err := bs.deposits(ctx, beaconState, ðpb.Eth1Data{})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 3, len(deposits), "Received unexpected number of pending deposits")
|
||||
assert.Equal(t, 3, len(deposits), "Received unexpected number of pending Deposits")
|
||||
}
|
||||
|
||||
func TestProposer_DepositTrie_UtilizesCachedFinalizedDeposits(t *testing.T) {
|
||||
@@ -900,7 +901,7 @@ func TestProposer_DepositTrie_UtilizesCachedFinalizedDeposits(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
beaconState, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
beaconState, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
BlockHash: bytesutil.PadTo([]byte("0x0"), 32),
|
||||
DepositRoot: make([]byte, 32),
|
||||
@@ -1010,7 +1011,7 @@ func TestProposer_DepositTrie_RebuildTrie(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
beaconState, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
beaconState, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
BlockHash: bytesutil.PadTo([]byte("0x0"), 32),
|
||||
DepositRoot: make([]byte, 32),
|
||||
@@ -1291,7 +1292,7 @@ func TestProposer_Eth1Data_MajorityVote(t *testing.T) {
|
||||
InsertBlock(52, earliestValidTime+2, []byte("second")).
|
||||
InsertBlock(100, latestValidTime, []byte("latest"))
|
||||
|
||||
beaconState, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
beaconState, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Slot: slot,
|
||||
Eth1DataVotes: []*ethpb.Eth1Data{
|
||||
{BlockHash: []byte("first"), DepositCount: 1},
|
||||
@@ -1327,7 +1328,7 @@ func TestProposer_Eth1Data_MajorityVote(t *testing.T) {
|
||||
InsertBlock(52, earliestValidTime+2, []byte("second")).
|
||||
InsertBlock(100, latestValidTime, []byte("latest"))
|
||||
|
||||
beaconState, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
beaconState, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Slot: slot,
|
||||
Eth1DataVotes: []*ethpb.Eth1Data{
|
||||
{BlockHash: []byte("earliest"), DepositCount: 1},
|
||||
@@ -1363,7 +1364,7 @@ func TestProposer_Eth1Data_MajorityVote(t *testing.T) {
|
||||
InsertBlock(51, earliestValidTime+1, []byte("first")).
|
||||
InsertBlock(100, latestValidTime, []byte("latest"))
|
||||
|
||||
beaconState, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
beaconState, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Slot: slot,
|
||||
Eth1DataVotes: []*ethpb.Eth1Data{
|
||||
{BlockHash: []byte("first"), DepositCount: 1},
|
||||
@@ -1400,7 +1401,7 @@ func TestProposer_Eth1Data_MajorityVote(t *testing.T) {
|
||||
InsertBlock(51, earliestValidTime+1, []byte("first")).
|
||||
InsertBlock(100, latestValidTime, []byte("latest"))
|
||||
|
||||
beaconState, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
beaconState, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Slot: slot,
|
||||
Eth1DataVotes: []*ethpb.Eth1Data{
|
||||
{BlockHash: []byte("before_range"), DepositCount: 1},
|
||||
@@ -1437,7 +1438,7 @@ func TestProposer_Eth1Data_MajorityVote(t *testing.T) {
|
||||
InsertBlock(100, latestValidTime, []byte("latest")).
|
||||
InsertBlock(101, latestValidTime+1, []byte("after_range"))
|
||||
|
||||
beaconState, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
beaconState, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Slot: slot,
|
||||
Eth1DataVotes: []*ethpb.Eth1Data{
|
||||
{BlockHash: []byte("first"), DepositCount: 1},
|
||||
@@ -1474,7 +1475,7 @@ func TestProposer_Eth1Data_MajorityVote(t *testing.T) {
|
||||
InsertBlock(52, earliestValidTime+2, []byte("second")).
|
||||
InsertBlock(100, latestValidTime, []byte("latest"))
|
||||
|
||||
beaconState, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
beaconState, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Slot: slot,
|
||||
Eth1DataVotes: []*ethpb.Eth1Data{
|
||||
{BlockHash: []byte("unknown"), DepositCount: 1},
|
||||
@@ -1508,7 +1509,7 @@ func TestProposer_Eth1Data_MajorityVote(t *testing.T) {
|
||||
InsertBlock(49, earliestValidTime-1, []byte("before_range")).
|
||||
InsertBlock(101, latestValidTime+1, []byte("after_range"))
|
||||
|
||||
beaconState, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
beaconState, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Slot: slot,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
@@ -1540,7 +1541,7 @@ func TestProposer_Eth1Data_MajorityVote(t *testing.T) {
|
||||
InsertBlock(52, earliestValidTime+2, []byte("second")).
|
||||
InsertBlock(101, latestValidTime+1, []byte("after_range"))
|
||||
|
||||
beaconState, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
beaconState, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Slot: slot,
|
||||
Eth1DataVotes: []*ethpb.Eth1Data{
|
||||
{BlockHash: []byte("before_range"), DepositCount: 1},
|
||||
@@ -1574,7 +1575,7 @@ func TestProposer_Eth1Data_MajorityVote(t *testing.T) {
|
||||
InsertBlock(50, earliestValidTime, []byte("earliest")).
|
||||
InsertBlock(100, latestValidTime, []byte("latest"))
|
||||
|
||||
beaconState, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
beaconState, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Slot: slot,
|
||||
Eth1DataVotes: []*ethpb.Eth1Data{}})
|
||||
require.NoError(t, err)
|
||||
@@ -1599,12 +1600,12 @@ func TestProposer_Eth1Data_MajorityVote(t *testing.T) {
|
||||
assert.DeepEqual(t, expectedHash, hash)
|
||||
})
|
||||
|
||||
t.Run("no votes and more recent block has less deposits - choose current eth1data", func(t *testing.T) {
|
||||
t.Run("no votes and more recent block has less Deposits - choose current eth1data", func(t *testing.T) {
|
||||
p := mockPOW.NewPOWChain().
|
||||
InsertBlock(50, earliestValidTime, []byte("earliest")).
|
||||
InsertBlock(100, latestValidTime, []byte("latest"))
|
||||
|
||||
beaconState, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
beaconState, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Slot: slot,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
@@ -1638,7 +1639,7 @@ func TestProposer_Eth1Data_MajorityVote(t *testing.T) {
|
||||
InsertBlock(52, earliestValidTime+2, []byte("second")).
|
||||
InsertBlock(100, latestValidTime, []byte("latest"))
|
||||
|
||||
beaconState, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
beaconState, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Slot: slot,
|
||||
Eth1DataVotes: []*ethpb.Eth1Data{
|
||||
{BlockHash: []byte("first"), DepositCount: 1},
|
||||
@@ -1666,7 +1667,7 @@ func TestProposer_Eth1Data_MajorityVote(t *testing.T) {
|
||||
assert.DeepEqual(t, expectedHash, hash)
|
||||
})
|
||||
|
||||
t.Run("highest count on block with less deposits - choose another block", func(t *testing.T) {
|
||||
t.Run("highest count on block with less Deposits - choose another block", func(t *testing.T) {
|
||||
t.Skip()
|
||||
p := mockPOW.NewPOWChain().
|
||||
InsertBlock(50, earliestValidTime, []byte("earliest")).
|
||||
@@ -1674,7 +1675,7 @@ func TestProposer_Eth1Data_MajorityVote(t *testing.T) {
|
||||
InsertBlock(52, earliestValidTime+2, []byte("second")).
|
||||
InsertBlock(100, latestValidTime, []byte("latest"))
|
||||
|
||||
beaconState, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
beaconState, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Slot: slot,
|
||||
Eth1DataVotes: []*ethpb.Eth1Data{
|
||||
{BlockHash: []byte("no_new_deposits"), DepositCount: 0},
|
||||
@@ -1707,7 +1708,7 @@ func TestProposer_Eth1Data_MajorityVote(t *testing.T) {
|
||||
t.Skip()
|
||||
p := mockPOW.NewPOWChain().InsertBlock(50, earliestValidTime, []byte("earliest"))
|
||||
|
||||
beaconState, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
beaconState, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Slot: slot,
|
||||
Eth1DataVotes: []*ethpb.Eth1Data{
|
||||
{BlockHash: []byte("earliest"), DepositCount: 1},
|
||||
@@ -1741,7 +1742,7 @@ func TestProposer_Eth1Data_MajorityVote(t *testing.T) {
|
||||
// because of earliest block increment in the algorithm.
|
||||
InsertBlock(50, earliestValidTime+1, []byte("first"))
|
||||
|
||||
beaconState, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
beaconState, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Slot: slot,
|
||||
Eth1DataVotes: []*ethpb.Eth1Data{
|
||||
{BlockHash: []byte("before_range"), DepositCount: 1},
|
||||
@@ -1769,7 +1770,7 @@ func TestProposer_Eth1Data_MajorityVote(t *testing.T) {
|
||||
assert.DeepEqual(t, expectedHash, hash)
|
||||
})
|
||||
|
||||
t.Run("no deposits - choose chain start eth1data", func(t *testing.T) {
|
||||
t.Run("no Deposits - choose chain start eth1data", func(t *testing.T) {
|
||||
p := mockPOW.NewPOWChain().
|
||||
InsertBlock(50, earliestValidTime, []byte("earliest")).
|
||||
InsertBlock(100, latestValidTime, []byte("latest"))
|
||||
@@ -1780,7 +1781,7 @@ func TestProposer_Eth1Data_MajorityVote(t *testing.T) {
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
beaconState, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
beaconState, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Slot: slot,
|
||||
Eth1DataVotes: []*ethpb.Eth1Data{
|
||||
{BlockHash: []byte("earliest"), DepositCount: 1},
|
||||
@@ -1828,7 +1829,7 @@ func TestProposer_FilterAttestation(t *testing.T) {
|
||||
expectedAtts func(inputAtts []*ethpb.Attestation) []*ethpb.Attestation
|
||||
}{
|
||||
{
|
||||
name: "nil attestations",
|
||||
name: "nil Attestations",
|
||||
inputAtts: func() []*ethpb.Attestation {
|
||||
return nil
|
||||
},
|
||||
@@ -1837,7 +1838,7 @@ func TestProposer_FilterAttestation(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid attestations",
|
||||
name: "invalid Attestations",
|
||||
inputAtts: func() []*ethpb.Attestation {
|
||||
atts := make([]*ethpb.Attestation, 10)
|
||||
for i := 0; i < len(atts); i++ {
|
||||
@@ -1923,7 +1924,7 @@ func TestProposer_Deposits_ReturnsEmptyList_IfLatestEth1DataEqGenesisEth1Block(t
|
||||
GenesisEth1Block: height,
|
||||
}
|
||||
|
||||
beaconState, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
beaconState, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
BlockHash: bytesutil.PadTo([]byte("0x0"), 32),
|
||||
DepositRoot: make([]byte, 32),
|
||||
@@ -1999,11 +2000,11 @@ func TestProposer_Deposits_ReturnsEmptyList_IfLatestEth1DataEqGenesisEth1Block(t
|
||||
PendingDepositsFetcher: depositCache,
|
||||
}
|
||||
|
||||
// It should also return the recent deposits after their follow window.
|
||||
// It should also return the recent Deposits after their follow window.
|
||||
p.LatestBlockNumber = big.NewInt(0).Add(p.LatestBlockNumber, big.NewInt(10000))
|
||||
deposits, err := bs.deposits(ctx, beaconState, ðpb.Eth1Data{})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(deposits), "Received unexpected number of pending deposits")
|
||||
assert.Equal(t, 0, len(deposits), "Received unexpected number of pending Deposits")
|
||||
}
|
||||
|
||||
func TestProposer_DeleteAttsInPool_Aggregated(t *testing.T) {
|
||||
@@ -2032,6 +2033,200 @@ func TestProposer_DeleteAttsInPool_Aggregated(t *testing.T) {
|
||||
assert.Equal(t, 0, len(atts), "Did not delete unaggregated attestation")
|
||||
}
|
||||
|
||||
func TestProposer_ProposeBlockAltair_OK(t *testing.T) {
|
||||
db := dbutil.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.OverrideBeaconConfig(params.MainnetConfig())
|
||||
|
||||
genesis := testutil.NewBeaconBlockAltair()
|
||||
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(
|
||||
t,
|
||||
db.SaveBlock(
|
||||
ctx,
|
||||
wsb,
|
||||
),
|
||||
"Could not save genesis block",
|
||||
)
|
||||
|
||||
numDeposits := uint64(64)
|
||||
beaconState, _ := testutil.DeterministicGenesisStateAltair(t, numDeposits)
|
||||
bsRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, genesisRoot), "Could not save genesis state")
|
||||
|
||||
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
|
||||
proposerServer := &Server{
|
||||
ChainStartFetcher: &mockPOW.POWChain{},
|
||||
Eth1InfoFetcher: &mockPOW.POWChain{},
|
||||
Eth1BlockFetcher: &mockPOW.POWChain{},
|
||||
BlockReceiver: c,
|
||||
HeadFetcher: c,
|
||||
BlockNotifier: c.BlockNotifier(),
|
||||
P2P: mockp2p.NewTestP2P(t),
|
||||
}
|
||||
req := testutil.NewBeaconBlockAltair()
|
||||
req.Block.Slot = 5
|
||||
req.Block.ParentRoot = bsRoot[:]
|
||||
wsb, err = wrapper.WrappedAltairSignedBeaconBlock(req)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(ctx, wsb))
|
||||
_, err = proposerServer.ProposeBlockAltair(context.Background(), req)
|
||||
assert.NoError(t, err, "Could not propose block correctly")
|
||||
}
|
||||
|
||||
func TestProposer_GetBlockAltair_OK(t *testing.T) {
|
||||
db := dbutil.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.OverrideBeaconConfig(params.MainnetConfig())
|
||||
beaconState, privKeys := testutil.DeterministicGenesisStateAltair(t, 64)
|
||||
committee, err := altair.NextSyncCommittee(context.Background(), beaconState)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetCurrentSyncCommittee(committee))
|
||||
|
||||
stateRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
|
||||
genesis := b.NewGenesisBlock(stateRoot[:])
|
||||
genAltair := ðpb.SignedBeaconBlockAltair{
|
||||
Block: ðpb.BeaconBlockAltair{
|
||||
Slot: genesis.Block.Slot,
|
||||
ParentRoot: genesis.Block.ParentRoot,
|
||||
StateRoot: genesis.Block.StateRoot,
|
||||
Body: ðpb.BeaconBlockBodyAltair{
|
||||
RandaoReveal: genesis.Block.Body.RandaoReveal,
|
||||
Graffiti: genesis.Block.Body.Graffiti,
|
||||
Eth1Data: genesis.Block.Body.Eth1Data,
|
||||
SyncAggregate: ðpb.SyncAggregate{SyncCommitteeBits: bitfield.NewBitvector512(), SyncCommitteeSignature: make([]byte, 96)},
|
||||
},
|
||||
},
|
||||
Signature: genesis.Signature,
|
||||
}
|
||||
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(genAltair)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(ctx, wsb), "Could not save genesis block")
|
||||
|
||||
parentRoot, err := genAltair.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
proposerServer := &Server{
|
||||
HeadFetcher: &mock.ChainService{State: beaconState, Root: parentRoot[:]},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: &mock.ChainService{},
|
||||
ChainStartFetcher: &mockPOW.POWChain{},
|
||||
Eth1InfoFetcher: &mockPOW.POWChain{},
|
||||
Eth1BlockFetcher: &mockPOW.POWChain{},
|
||||
MockEth1Votes: true,
|
||||
AttPool: attestations.NewPool(),
|
||||
SlashingsPool: slashings.NewPool(),
|
||||
ExitPool: voluntaryexits.NewPool(),
|
||||
StateGen: stategen.New(db),
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
}
|
||||
|
||||
randaoReveal, err := testutil.RandaoReveal(beaconState, 0, privKeys)
|
||||
require.NoError(t, err)
|
||||
|
||||
graffiti := bytesutil.ToBytes32([]byte("eth2"))
|
||||
req := ðpb.BlockRequest{
|
||||
Slot: 1,
|
||||
RandaoReveal: randaoReveal,
|
||||
Graffiti: graffiti[:],
|
||||
}
|
||||
|
||||
proposerSlashings := make([]*ethpb.ProposerSlashing, params.BeaconConfig().MaxProposerSlashings)
|
||||
for i := types.ValidatorIndex(0); uint64(i) < params.BeaconConfig().MaxProposerSlashings; i++ {
|
||||
proposerSlashing, err := testutil.GenerateProposerSlashingForValidator(
|
||||
beaconState,
|
||||
privKeys[i],
|
||||
i, /* validator index */
|
||||
)
|
||||
require.NoError(t, err)
|
||||
proposerSlashings[i] = proposerSlashing
|
||||
err = proposerServer.SlashingsPool.InsertProposerSlashing(context.Background(), beaconState, proposerSlashing)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
attSlashings := make([]*ethpb.AttesterSlashing, params.BeaconConfig().MaxAttesterSlashings)
|
||||
for i := uint64(0); i < params.BeaconConfig().MaxAttesterSlashings; i++ {
|
||||
attesterSlashing, err := testutil.GenerateAttesterSlashingForValidator(
|
||||
beaconState,
|
||||
privKeys[i+params.BeaconConfig().MaxProposerSlashings],
|
||||
types.ValidatorIndex(i+params.BeaconConfig().MaxProposerSlashings), /* validator index */
|
||||
)
|
||||
require.NoError(t, err)
|
||||
attSlashings[i] = attesterSlashing
|
||||
err = proposerServer.SlashingsPool.InsertAttesterSlashing(context.Background(), beaconState, attesterSlashing)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
block, err := proposerServer.GetBlockAltair(ctx, req)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, req.Slot, block.Slot, "Expected block to have slot of 1")
|
||||
assert.DeepEqual(t, parentRoot[:], block.ParentRoot, "Expected block to have correct parent root")
|
||||
assert.DeepEqual(t, randaoReveal, block.Body.RandaoReveal, "Expected block to have correct randao reveal")
|
||||
assert.DeepEqual(t, req.Graffiti, block.Body.Graffiti, "Expected block to have correct Graffiti")
|
||||
assert.Equal(t, params.BeaconConfig().MaxProposerSlashings, uint64(len(block.Body.ProposerSlashings)))
|
||||
assert.DeepEqual(t, proposerSlashings, block.Body.ProposerSlashings)
|
||||
assert.Equal(t, params.BeaconConfig().MaxAttesterSlashings, uint64(len(block.Body.AttesterSlashings)))
|
||||
assert.DeepEqual(t, attSlashings, block.Body.AttesterSlashings)
|
||||
}
|
||||
|
||||
func TestProposer_GetSyncAggregate_OK(t *testing.T) {
|
||||
proposerServer := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
}
|
||||
|
||||
r := params.BeaconConfig().ZeroHash
|
||||
conts := []*ethpb.SyncCommitteeContribution{
|
||||
{Slot: 1, SubcommitteeIndex: 0, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b0001}, BlockRoot: r[:]},
|
||||
{Slot: 1, SubcommitteeIndex: 0, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b1001}, BlockRoot: r[:]},
|
||||
{Slot: 1, SubcommitteeIndex: 0, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b1110}, BlockRoot: r[:]},
|
||||
{Slot: 1, SubcommitteeIndex: 1, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b0001}, BlockRoot: r[:]},
|
||||
{Slot: 1, SubcommitteeIndex: 1, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b1001}, BlockRoot: r[:]},
|
||||
{Slot: 1, SubcommitteeIndex: 1, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b1110}, BlockRoot: r[:]},
|
||||
{Slot: 1, SubcommitteeIndex: 2, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b0001}, BlockRoot: r[:]},
|
||||
{Slot: 1, SubcommitteeIndex: 2, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b1001}, BlockRoot: r[:]},
|
||||
{Slot: 1, SubcommitteeIndex: 2, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b1110}, BlockRoot: r[:]},
|
||||
{Slot: 1, SubcommitteeIndex: 3, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b0001}, BlockRoot: r[:]},
|
||||
{Slot: 1, SubcommitteeIndex: 3, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b1001}, BlockRoot: r[:]},
|
||||
{Slot: 1, SubcommitteeIndex: 3, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b1110}, BlockRoot: r[:]},
|
||||
{Slot: 2, SubcommitteeIndex: 0, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b10101010}, BlockRoot: r[:]},
|
||||
{Slot: 2, SubcommitteeIndex: 1, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b10101010}, BlockRoot: r[:]},
|
||||
{Slot: 2, SubcommitteeIndex: 2, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b10101010}, BlockRoot: r[:]},
|
||||
{Slot: 2, SubcommitteeIndex: 3, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b10101010}, BlockRoot: r[:]},
|
||||
}
|
||||
|
||||
for _, cont := range conts {
|
||||
require.NoError(t, proposerServer.SyncCommitteePool.SaveSyncCommitteeContribution(cont))
|
||||
}
|
||||
|
||||
aggregate, err := proposerServer.getSyncAggregate(context.Background(), 1, bytesutil.ToBytes32(conts[0].BlockRoot))
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, bitfield.Bitvector512{0xf, 0xf, 0xf, 0xf}, aggregate.SyncCommitteeBits)
|
||||
|
||||
aggregate, err = proposerServer.getSyncAggregate(context.Background(), 2, bytesutil.ToBytes32(conts[0].BlockRoot))
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, bitfield.Bitvector512{0xaa, 0xaa, 0xaa, 0xaa}, aggregate.SyncCommitteeBits)
|
||||
|
||||
aggregate, err = proposerServer.getSyncAggregate(context.Background(), 3, bytesutil.ToBytes32(conts[0].BlockRoot))
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, bitfield.NewBitvector512(), aggregate.SyncCommitteeBits)
|
||||
}
|
||||
|
||||
func majorityVoteBoundaryTime(slot types.Slot) (uint64, uint64) {
|
||||
slots := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().EpochsPerEth1VotingPeriod))
|
||||
slotStartTime := uint64(mockPOW.GenesisTime) + uint64((slot - (slot % (slots))).Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/ptypes/empty"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
@@ -19,14 +18,15 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/slashings"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/synccommittee"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/sync"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
statepb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/p2putils"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
@@ -44,7 +44,7 @@ type Server struct {
|
||||
ForkFetcher blockchain.ForkFetcher
|
||||
FinalizationFetcher blockchain.FinalizationFetcher
|
||||
TimeFetcher blockchain.TimeFetcher
|
||||
CanonicalStateChan chan *statepb.BeaconState
|
||||
CanonicalStateChan chan *ethpb.BeaconState
|
||||
BlockFetcher powchain.POWBlockFetcher
|
||||
DepositFetcher depositcache.DepositFetcher
|
||||
ChainStartFetcher powchain.ChainStartFetcher
|
||||
@@ -56,6 +56,7 @@ type Server struct {
|
||||
AttPool attestations.Pool
|
||||
SlashingsPool slashings.PoolManager
|
||||
ExitPool voluntaryexits.PoolManager
|
||||
SyncCommitteePool synccommittee.Pool
|
||||
BlockReceiver blockchain.BlockReceiver
|
||||
MockEth1Votes bool
|
||||
Eth1BlockFetcher powchain.POWBlockFetcher
|
||||
@@ -64,38 +65,6 @@ type Server struct {
|
||||
StateGen stategen.StateManager
|
||||
}
|
||||
|
||||
func (vs *Server) GetBlockAltair(ctx context.Context, request *ethpb.BlockRequest) (*ethpb.BeaconBlockAltair, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "Unimplemented")
|
||||
}
|
||||
|
||||
func (vs *Server) ProposeBlockAltair(ctx context.Context, altair *ethpb.SignedBeaconBlockAltair) (*ethpb.ProposeResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "Unimplemented")
|
||||
}
|
||||
|
||||
func (vs *Server) GetSyncMessageBlockRoot(ctx context.Context, empty *empty.Empty) (*ethpb.SyncMessageBlockRootResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "Unimplemented")
|
||||
}
|
||||
|
||||
func (vs *Server) SubmitSyncMessage(ctx context.Context, message *ethpb.SyncCommitteeMessage) (*empty.Empty, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "Unimplemented")
|
||||
}
|
||||
|
||||
func (vs *Server) GetSyncSubcommitteeIndex(ctx context.Context, request *ethpb.SyncSubcommitteeIndexRequest) (*ethpb.SyncSubcommitteeIndexResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "Unimplemented")
|
||||
}
|
||||
|
||||
func (vs *Server) GetSyncCommitteeContribution(ctx context.Context, request *ethpb.SyncCommitteeContributionRequest) (*ethpb.SyncCommitteeContribution, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "Unimplemented")
|
||||
}
|
||||
|
||||
func (vs *Server) SubmitSignedContributionAndProof(ctx context.Context, proof *ethpb.SignedContributionAndProof) (*empty.Empty, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "Unimplemented")
|
||||
}
|
||||
|
||||
func (vs *Server) StreamBlocksAltair(request *ethpb.StreamBlocksRequest, server ethpb.BeaconNodeValidator_StreamBlocksAltairServer) error {
|
||||
return status.Error(codes.Unimplemented, "Unimplemented")
|
||||
}
|
||||
|
||||
// WaitForActivation checks if a validator public key exists in the active validator registry of the current
|
||||
// beacon state, if not, then it creates a stream which listens for canonical states which contain
|
||||
// the validator with the public key as an active validator record.
|
||||
@@ -155,7 +124,10 @@ func (vs *Server) ValidatorIndex(ctx context.Context, req *ethpb.ValidatorIndexR
|
||||
|
||||
// DomainData fetches the current domain version information from the beacon state.
|
||||
func (vs *Server) DomainData(_ context.Context, request *ethpb.DomainRequest) (*ethpb.DomainResponse, error) {
|
||||
fork := vs.ForkFetcher.CurrentFork()
|
||||
fork, err := p2putils.Fork(request.Epoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
headGenesisValidatorRoot := vs.HeadFetcher.HeadGenesisValidatorRoot()
|
||||
dv, err := helpers.Domain(fork, request.Epoch, bytesutil.ToBytes4(request.Domain), headGenesisValidatorRoot[:])
|
||||
if err != nil {
|
||||
|
||||
186
beacon-chain/rpc/prysm/v1alpha1/validator/sync_committee.go
Normal file
186
beacon-chain/rpc/prysm/v1alpha1/validator/sync_committee.go
Normal file
@@ -0,0 +1,186 @@
|
||||
package validator
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bls"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
)
|
||||
|
||||
// GetSyncMessageBlockRoot retrieves the sync committee block root of the beacon chain.
|
||||
func (vs *Server) GetSyncMessageBlockRoot(
|
||||
ctx context.Context, _ *emptypb.Empty,
|
||||
) (*ethpb.SyncMessageBlockRootResponse, error) {
|
||||
r, err := vs.HeadFetcher.HeadRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not retrieve head root: %v", err)
|
||||
}
|
||||
|
||||
return ðpb.SyncMessageBlockRootResponse{
|
||||
Root: r,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// SubmitSyncMessage submits the sync committee message to the network.
|
||||
// It also saves the sync committee message into the pending pool for block inclusion.
|
||||
func (vs *Server) SubmitSyncMessage(ctx context.Context, msg *ethpb.SyncCommitteeMessage) (*emptypb.Empty, error) {
|
||||
errs, ctx := errgroup.WithContext(ctx)
|
||||
|
||||
idxResp, err := vs.syncSubcommitteeIndex(ctx, msg.ValidatorIndex, msg.Slot)
|
||||
if err != nil {
|
||||
return &emptypb.Empty{}, err
|
||||
}
|
||||
// Broadcasting and saving message into the pool in parallel. As one fail should not affect another.
|
||||
// This broadcasts for all subnets.
|
||||
for _, id := range idxResp.Indices {
|
||||
subCommitteeSize := params.BeaconConfig().SyncCommitteeSize / params.BeaconConfig().SyncCommitteeSubnetCount
|
||||
subnet := uint64(id) / subCommitteeSize
|
||||
errs.Go(func() error {
|
||||
return vs.P2P.BroadcastSyncCommitteeMessage(ctx, subnet, msg)
|
||||
})
|
||||
}
|
||||
|
||||
if err := vs.SyncCommitteePool.SaveSyncCommitteeMessage(msg); err != nil {
|
||||
return &emptypb.Empty{}, err
|
||||
}
|
||||
|
||||
// Wait for p2p broadcast to complete and return the first error (if any)
|
||||
err = errs.Wait()
|
||||
return &emptypb.Empty{}, err
|
||||
}
|
||||
|
||||
// GetSyncSubcommitteeIndex is called by a sync committee participant to get
|
||||
// its subcommittee index for sync message aggregation duty.
|
||||
func (vs *Server) GetSyncSubcommitteeIndex(
|
||||
ctx context.Context, req *ethpb.SyncSubcommitteeIndexRequest,
|
||||
) (*ethpb.SyncSubcommitteeIndexResponse, error) {
|
||||
index, exists := vs.HeadFetcher.HeadPublicKeyToValidatorIndex(ctx, bytesutil.ToBytes48(req.PublicKey))
|
||||
if !exists {
|
||||
return nil, errors.New("public key does not exist in state")
|
||||
}
|
||||
indices, err := vs.syncSubcommitteeIndex(ctx, index, req.Slot)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get sync subcommittee index: %v", err)
|
||||
}
|
||||
return indices, nil
|
||||
}
|
||||
|
||||
// syncSubcommitteeIndex returns a list of subcommittee index of a validator and slot for sync message aggregation duty.
|
||||
func (vs *Server) syncSubcommitteeIndex(
|
||||
ctx context.Context, index types.ValidatorIndex, slot types.Slot,
|
||||
) (*ethpb.SyncSubcommitteeIndexResponse, error) {
|
||||
|
||||
nextSlotEpoch := helpers.SlotToEpoch(slot + 1)
|
||||
currentEpoch := helpers.SlotToEpoch(slot)
|
||||
|
||||
switch {
|
||||
case helpers.SyncCommitteePeriod(nextSlotEpoch) == helpers.SyncCommitteePeriod(currentEpoch):
|
||||
indices, err := vs.HeadFetcher.HeadCurrentSyncCommitteeIndices(ctx, index, slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ðpb.SyncSubcommitteeIndexResponse{
|
||||
Indices: indices,
|
||||
}, nil
|
||||
// At sync committee period boundary, validator should sample the next epoch sync committee.
|
||||
case helpers.SyncCommitteePeriod(nextSlotEpoch) == helpers.SyncCommitteePeriod(currentEpoch)+1:
|
||||
indices, err := vs.HeadFetcher.HeadNextSyncCommitteeIndices(ctx, index, slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ðpb.SyncSubcommitteeIndexResponse{
|
||||
Indices: indices,
|
||||
}, nil
|
||||
default:
|
||||
// Impossible condition.
|
||||
return nil, errors.New("could get calculate sync subcommittee based on the period")
|
||||
}
|
||||
}
|
||||
|
||||
// GetSyncCommitteeContribution is called by a sync committee aggregator
|
||||
// to retrieve sync committee contribution object.
|
||||
func (vs *Server) GetSyncCommitteeContribution(
|
||||
ctx context.Context, req *ethpb.SyncCommitteeContributionRequest,
|
||||
) (*ethpb.SyncCommitteeContribution, error) {
|
||||
msgs, err := vs.SyncCommitteePool.SyncCommitteeMessages(req.Slot)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get sync subcommittee messages: %v", err)
|
||||
}
|
||||
headRoot, err := vs.HeadFetcher.HeadRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get head root: %v", err)
|
||||
}
|
||||
|
||||
subCommitteeSize := params.BeaconConfig().SyncCommitteeSize / params.BeaconConfig().SyncCommitteeSubnetCount
|
||||
sigs := make([]bls.Signature, 0, subCommitteeSize)
|
||||
bits := ethpb.NewSyncCommitteeAggregationBits()
|
||||
for _, msg := range msgs {
|
||||
if bytes.Equal(headRoot, msg.BlockRoot) {
|
||||
idxResp, err := vs.syncSubcommitteeIndex(ctx, msg.ValidatorIndex, req.Slot)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get sync subcommittee index: %v", err)
|
||||
}
|
||||
for _, index := range idxResp.Indices {
|
||||
i := uint64(index)
|
||||
subnetIndex := i / subCommitteeSize
|
||||
if subnetIndex == req.SubnetId {
|
||||
bits.SetBitAt(i%subCommitteeSize, true)
|
||||
sig, err := bls.SignatureFromBytes(msg.Signature)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(
|
||||
codes.Internal,
|
||||
"Could not get bls signature from bytes: %v",
|
||||
err,
|
||||
)
|
||||
}
|
||||
sigs = append(sigs, sig)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
aggregatedSig := make([]byte, 96)
|
||||
aggregatedSig[0] = 0xC0
|
||||
if len(sigs) != 0 {
|
||||
aggregatedSig = bls.AggregateSignatures(sigs).Marshal()
|
||||
}
|
||||
contribution := ðpb.SyncCommitteeContribution{
|
||||
Slot: req.Slot,
|
||||
BlockRoot: headRoot,
|
||||
SubcommitteeIndex: req.SubnetId,
|
||||
AggregationBits: bits,
|
||||
Signature: aggregatedSig,
|
||||
}
|
||||
|
||||
return contribution, nil
|
||||
}
|
||||
|
||||
// SubmitSignedContributionAndProof is called by a sync committee aggregator
|
||||
// to submit signed contribution and proof object.
|
||||
func (vs *Server) SubmitSignedContributionAndProof(
|
||||
ctx context.Context, s *ethpb.SignedContributionAndProof,
|
||||
) (*emptypb.Empty, error) {
|
||||
errs, ctx := errgroup.WithContext(ctx)
|
||||
|
||||
// Broadcasting and saving contribution into the pool in parallel. As one fail should not affect another.
|
||||
errs.Go(func() error {
|
||||
return vs.P2P.Broadcast(ctx, s)
|
||||
})
|
||||
|
||||
if err := vs.SyncCommitteePool.SaveSyncCommitteeContribution(s.Message.Contribution); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Wait for p2p broadcast to complete and return the first error (if any)
|
||||
err := errs.Wait()
|
||||
return &emptypb.Empty{}, err
|
||||
}
|
||||
@@ -0,0 +1,96 @@
|
||||
package validator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/synccommittee"
|
||||
mockp2p "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
)
|
||||
|
||||
func TestGetSyncMessageBlockRoot_OK(t *testing.T) {
|
||||
r := []byte{'a'}
|
||||
server := &Server{
|
||||
HeadFetcher: &mock.ChainService{Root: r},
|
||||
}
|
||||
res, err := server.GetSyncMessageBlockRoot(context.Background(), &emptypb.Empty{})
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, r, res.Root)
|
||||
}
|
||||
|
||||
func TestSubmitSyncMessage_OK(t *testing.T) {
|
||||
st, _ := testutil.DeterministicGenesisStateAltair(t, 10)
|
||||
server := &Server{
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
P2P: &mockp2p.MockBroadcaster{},
|
||||
HeadFetcher: &mock.ChainService{
|
||||
State: st,
|
||||
},
|
||||
}
|
||||
msg := ðpb.SyncCommitteeMessage{
|
||||
Slot: 1,
|
||||
ValidatorIndex: 2,
|
||||
}
|
||||
_, err := server.SubmitSyncMessage(context.Background(), msg)
|
||||
require.NoError(t, err)
|
||||
savedMsgs, err := server.SyncCommitteePool.SyncCommitteeMessages(1)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, []*ethpb.SyncCommitteeMessage{msg}, savedMsgs)
|
||||
}
|
||||
|
||||
func TestGetSyncSubcommitteeIndex_Ok(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.OverrideBeaconConfig(params.MainnetConfig())
|
||||
state.SkipSlotCache.Disable()
|
||||
defer state.SkipSlotCache.Enable()
|
||||
|
||||
server := &Server{
|
||||
HeadFetcher: &mock.ChainService{
|
||||
CurrentSyncCommitteeIndices: []types.CommitteeIndex{0},
|
||||
NextSyncCommitteeIndices: []types.CommitteeIndex{1},
|
||||
},
|
||||
}
|
||||
pubKey := [48]byte{}
|
||||
// Request slot 0, should get the index 0 for validator 0.
|
||||
res, err := server.GetSyncSubcommitteeIndex(context.Background(), ðpb.SyncSubcommitteeIndexRequest{
|
||||
PublicKey: pubKey[:], Slot: types.Slot(0),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, []types.CommitteeIndex{0}, res.Indices)
|
||||
|
||||
// Request at period boundary, should get index 1 for validator 0.
|
||||
periodBoundary := types.Slot(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*params.BeaconConfig().SlotsPerEpoch - 1
|
||||
res, err = server.GetSyncSubcommitteeIndex(context.Background(), ðpb.SyncSubcommitteeIndexRequest{
|
||||
PublicKey: pubKey[:], Slot: periodBoundary,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, []types.CommitteeIndex{1}, res.Indices)
|
||||
}
|
||||
|
||||
func TestSubmitSignedContributionAndProof_OK(t *testing.T) {
|
||||
server := &Server{
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
P2P: &mockp2p.MockBroadcaster{},
|
||||
}
|
||||
contribution := ðpb.SignedContributionAndProof{
|
||||
Message: ðpb.ContributionAndProof{
|
||||
Contribution: ðpb.SyncCommitteeContribution{
|
||||
Slot: 1,
|
||||
SubcommitteeIndex: 2,
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err := server.SubmitSignedContributionAndProof(context.Background(), contribution)
|
||||
require.NoError(t, err)
|
||||
savedMsgs, err := server.SyncCommitteePool.SyncCommitteeContributions(1)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, []*ethpb.SyncCommitteeContribution{contribution.Message.Contribution}, savedMsgs)
|
||||
}
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/slashings"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/synccommittee"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
|
||||
@@ -38,9 +39,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
chainSync "github.com/prysmaticlabs/prysm/beacon-chain/sync"
|
||||
ethpbservice "github.com/prysmaticlabs/prysm/proto/eth/service"
|
||||
ethpbv1alpha1 "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
prysmv2 "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
statepb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/prysmaticlabs/prysm/shared/logutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
@@ -62,8 +61,8 @@ type Service struct {
|
||||
cancel context.CancelFunc
|
||||
listener net.Listener
|
||||
grpcServer *grpc.Server
|
||||
canonicalStateChan chan *statepb.BeaconState
|
||||
incomingAttestation chan *ethpbv1alpha1.Attestation
|
||||
canonicalStateChan chan *ethpb.BeaconState
|
||||
incomingAttestation chan *ethpb.Attestation
|
||||
credentialError error
|
||||
connectedRPCClients map[net.Addr]bool
|
||||
clientConnectionLock sync.Mutex
|
||||
@@ -94,6 +93,7 @@ type Config struct {
|
||||
AttestationsPool attestations.Pool
|
||||
ExitPool voluntaryexits.PoolManager
|
||||
SlashingsPool slashings.PoolManager
|
||||
SyncCommitteeObjectPool synccommittee.Pool
|
||||
SyncService chainSync.Checker
|
||||
Broadcaster p2p.Broadcaster
|
||||
PeersFetcher p2p.PeersProvider
|
||||
@@ -116,8 +116,8 @@ func NewService(ctx context.Context, cfg *Config) *Service {
|
||||
cfg: cfg,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
canonicalStateChan: make(chan *statepb.BeaconState, params.BeaconConfig().DefaultBufferSize),
|
||||
incomingAttestation: make(chan *ethpbv1alpha1.Attestation, params.BeaconConfig().DefaultBufferSize),
|
||||
canonicalStateChan: make(chan *ethpb.BeaconState, params.BeaconConfig().DefaultBufferSize),
|
||||
incomingAttestation: make(chan *ethpb.Attestation, params.BeaconConfig().DefaultBufferSize),
|
||||
connectedRPCClients: make(map[net.Addr]bool),
|
||||
}
|
||||
}
|
||||
@@ -191,7 +191,9 @@ func (s *Service) Start() {
|
||||
PendingDepositsFetcher: s.cfg.PendingDepositFetcher,
|
||||
SlashingsPool: s.cfg.SlashingsPool,
|
||||
StateGen: s.cfg.StateGen,
|
||||
SyncCommitteePool: s.cfg.SyncCommitteeObjectPool,
|
||||
}
|
||||
|
||||
validatorServerV1 := &validator.Server{
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
TimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||
@@ -245,8 +247,8 @@ func (s *Service) Start() {
|
||||
Broadcaster: s.cfg.Broadcaster,
|
||||
StateGen: s.cfg.StateGen,
|
||||
SyncChecker: s.cfg.SyncService,
|
||||
ReceivedAttestationsBuffer: make(chan *ethpbv1alpha1.Attestation, attestationBufferSize),
|
||||
CollectedAttestationsBuffer: make(chan []*ethpbv1alpha1.Attestation, attestationBufferSize),
|
||||
ReceivedAttestationsBuffer: make(chan *ethpb.Attestation, attestationBufferSize),
|
||||
CollectedAttestationsBuffer: make(chan []*ethpb.Attestation, attestationBufferSize),
|
||||
}
|
||||
beaconChainServerV1 := &beacon.Server{
|
||||
BeaconDB: s.cfg.BeaconDB,
|
||||
@@ -268,10 +270,10 @@ func (s *Service) Start() {
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
VoluntaryExitsPool: s.cfg.ExitPool,
|
||||
}
|
||||
ethpbv1alpha1.RegisterNodeServer(s.grpcServer, nodeServer)
|
||||
ethpb.RegisterNodeServer(s.grpcServer, nodeServer)
|
||||
ethpbservice.RegisterBeaconNodeServer(s.grpcServer, nodeServerV1)
|
||||
prysmv2.RegisterHealthServer(s.grpcServer, nodeServer)
|
||||
ethpbv1alpha1.RegisterBeaconChainServer(s.grpcServer, beaconChainServer)
|
||||
ethpb.RegisterHealthServer(s.grpcServer, nodeServer)
|
||||
ethpb.RegisterBeaconChainServer(s.grpcServer, beaconChainServer)
|
||||
ethpbservice.RegisterBeaconChainServer(s.grpcServer, beaconChainServerV1)
|
||||
ethpbservice.RegisterEventsServer(s.grpcServer, &events.Server{
|
||||
Ctx: s.ctx,
|
||||
@@ -281,6 +283,7 @@ func (s *Service) Start() {
|
||||
})
|
||||
if s.cfg.EnableDebugRPCEndpoints {
|
||||
log.Info("Enabled debug gRPC endpoints")
|
||||
|
||||
debugServer := &debugv1alpha1.Server{
|
||||
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||
BeaconDB: s.cfg.BeaconDB,
|
||||
@@ -299,10 +302,11 @@ func (s *Service) Start() {
|
||||
StateGenService: s.cfg.StateGen,
|
||||
},
|
||||
}
|
||||
prysmv2.RegisterDebugServer(s.grpcServer, debugServer)
|
||||
ethpb.RegisterDebugServer(s.grpcServer, debugServer)
|
||||
ethpbservice.RegisterBeaconDebugServer(s.grpcServer, debugServerV1)
|
||||
}
|
||||
ethpbv1alpha1.RegisterBeaconNodeValidatorServer(s.grpcServer, validatorServer)
|
||||
|
||||
ethpb.RegisterBeaconNodeValidatorServer(s.grpcServer, validatorServer)
|
||||
ethpbservice.RegisterBeaconValidatorServer(s.grpcServer, validatorServerV1)
|
||||
// Register reflection service on gRPC server.
|
||||
reflection.Register(s.grpcServer)
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
chainMock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
@@ -25,7 +25,7 @@ func TestGetState(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
headSlot := types.Slot(123)
|
||||
fillSlot := func(state *eth.BeaconState) error {
|
||||
fillSlot := func(state *ethpb.BeaconState) error {
|
||||
state.Slot = headSlot
|
||||
return nil
|
||||
}
|
||||
@@ -59,7 +59,7 @@ func TestGetState(t *testing.T) {
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
state, err := testutil.NewBeaconState(func(state *eth.BeaconState) error {
|
||||
state, err := testutil.NewBeaconState(func(state *ethpb.BeaconState) error {
|
||||
state.BlockRoots[0] = r[:]
|
||||
return nil
|
||||
})
|
||||
@@ -67,7 +67,7 @@ func TestGetState(t *testing.T) {
|
||||
stateRoot, err := state.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, db.SaveStateSummary(ctx, ð.StateSummary{Root: r[:]}))
|
||||
require.NoError(t, db.SaveStateSummary(ctx, ðpb.StateSummary{Root: r[:]}))
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, r))
|
||||
require.NoError(t, db.SaveState(ctx, state, r))
|
||||
|
||||
@@ -88,7 +88,7 @@ func TestGetState(t *testing.T) {
|
||||
|
||||
p := StateProvider{
|
||||
ChainInfoFetcher: &chainMock.ChainService{
|
||||
FinalizedCheckPoint: ð.Checkpoint{
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{
|
||||
Root: stateRoot[:],
|
||||
},
|
||||
},
|
||||
@@ -108,7 +108,7 @@ func TestGetState(t *testing.T) {
|
||||
|
||||
p := StateProvider{
|
||||
ChainInfoFetcher: &chainMock.ChainService{
|
||||
CurrentJustifiedCheckPoint: ð.Checkpoint{
|
||||
CurrentJustifiedCheckPoint: ðpb.Checkpoint{
|
||||
Root: stateRoot[:],
|
||||
},
|
||||
},
|
||||
@@ -187,7 +187,7 @@ func TestGetStateRoot(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
headSlot := types.Slot(123)
|
||||
fillSlot := func(state *eth.BeaconState) error {
|
||||
fillSlot := func(state *ethpb.BeaconState) error {
|
||||
state.Slot = headSlot
|
||||
return nil
|
||||
}
|
||||
@@ -218,13 +218,13 @@ func TestGetStateRoot(t *testing.T) {
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
state, err := testutil.NewBeaconState(func(state *eth.BeaconState) error {
|
||||
state, err := testutil.NewBeaconState(func(state *ethpb.BeaconState) error {
|
||||
state.BlockRoots[0] = r[:]
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, db.SaveStateSummary(ctx, ð.StateSummary{Root: r[:]}))
|
||||
require.NoError(t, db.SaveStateSummary(ctx, ðpb.StateSummary{Root: r[:]}))
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, r))
|
||||
require.NoError(t, db.SaveState(ctx, state, r))
|
||||
|
||||
@@ -248,7 +248,7 @@ func TestGetStateRoot(t *testing.T) {
|
||||
blk.Block.Slot = 40
|
||||
root, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
cp := ð.Checkpoint{
|
||||
cp := ðpb.Checkpoint{
|
||||
Epoch: 5,
|
||||
Root: root[:],
|
||||
}
|
||||
@@ -279,7 +279,7 @@ func TestGetStateRoot(t *testing.T) {
|
||||
blk.Block.Slot = 40
|
||||
root, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
cp := ð.Checkpoint{
|
||||
cp := ðpb.Checkpoint{
|
||||
Epoch: 5,
|
||||
Root: root[:],
|
||||
}
|
||||
|
||||
@@ -24,7 +24,7 @@ var (
|
||||
host = flag.String("host", "127.0.0.1", "Host to serve on")
|
||||
debug = flag.Bool("debug", false, "Enable debug logging")
|
||||
allowedOrigins = flag.String("corsdomain", "localhost:4242", "A comma separated list of CORS domains to allow")
|
||||
enableDebugRPCEndpoints = flag.Bool("enable-debug-rpc-endpoints", false, "Enable debug rpc endpoints such as /eth/v1alpha1/beacon/state")
|
||||
enableDebugRPCEndpoints = flag.Bool("enable-debug-rpc-endpoints", false, "Enable debug rpc endpoints such as /prysm/v1alpha1/beacon/state")
|
||||
grpcMaxMsgSize = flag.Int("grpc-max-msg-size", 1<<22, "Integer to define max recieve message call size")
|
||||
)
|
||||
|
||||
|
||||
@@ -15,6 +15,7 @@ go_library(
|
||||
"//shared/benchutil:__pkg__",
|
||||
"//shared/depositutil:__subpackages__",
|
||||
"//shared/testutil:__pkg__",
|
||||
"//shared/testutil/altair:__pkg__",
|
||||
"//slasher/rpc:__subpackages__",
|
||||
"//spectest:__subpackages__",
|
||||
"//tools/benchmark-files-gen:__pkg__",
|
||||
|
||||
@@ -21,6 +21,7 @@ go_library(
|
||||
"//fuzz:__pkg__",
|
||||
],
|
||||
deps = [
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/state:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
@@ -30,6 +31,7 @@ go_library(
|
||||
"//proto/prysm/v1alpha1/block:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/version:go_default_library",
|
||||
"@com_github_hashicorp_golang_lru//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
@@ -68,6 +70,7 @@ go_test(
|
||||
"//shared/testutil:go_default_library",
|
||||
"//shared/testutil/assert:go_default_library",
|
||||
"//shared/testutil/require:go_default_library",
|
||||
"//shared/version:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
statepb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
)
|
||||
|
||||
@@ -86,7 +86,7 @@ func (m *MockStateManager) StateBySlot(ctx context.Context, slot types.Slot) (st
|
||||
func (m *MockStateManager) RecoverStateSummary(
|
||||
ctx context.Context,
|
||||
blockRoot [32]byte,
|
||||
) (*statepb.StateSummary, error) {
|
||||
) (*ethpb.StateSummary, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
|
||||
@@ -6,11 +6,15 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
transition "github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db/filters"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/version"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
@@ -23,7 +27,6 @@ func (s *State) ReplayBlocks(
|
||||
) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "stateGen.ReplayBlocks")
|
||||
defer span.End()
|
||||
|
||||
var err error
|
||||
// The input block list is sorted in decreasing slots order.
|
||||
if len(signed) > 0 {
|
||||
@@ -146,6 +149,16 @@ func executeStateTransitionStateGen(
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process block")
|
||||
}
|
||||
if signed.Version() == version.Altair {
|
||||
sa, err := signed.Block().Body().SyncAggregate()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
state, err = altair.ProcessSyncAggregate(state, sa)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return state, nil
|
||||
}
|
||||
@@ -176,14 +189,30 @@ func processSlotsStateGen(ctx context.Context, state state.BeaconState, slot typ
|
||||
return nil, errors.Wrap(err, "could not process slot")
|
||||
}
|
||||
if transition.CanProcessEpoch(state) {
|
||||
state, err = transition.ProcessEpochPrecompute(ctx, state)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process epoch with optimizations")
|
||||
switch state.Version() {
|
||||
case version.Phase0:
|
||||
state, err = transition.ProcessEpochPrecompute(ctx, state)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process epoch with optimizations")
|
||||
}
|
||||
case version.Altair:
|
||||
state, err = altair.ProcessEpoch(ctx, state)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process epoch with optimization")
|
||||
}
|
||||
default:
|
||||
return nil, errors.New("beacon state should have a version")
|
||||
}
|
||||
}
|
||||
if err := state.SetSlot(state.Slot() + 1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if helpers.IsEpochStart(state.Slot()) && helpers.SlotToEpoch(state.Slot()) == params.BeaconConfig().AltairForkEpoch {
|
||||
state, err = altair.UpgradeToAltair(ctx, state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return state, nil
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
statepb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
@@ -17,6 +16,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
"github.com/prysmaticlabs/prysm/shared/version"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
@@ -40,7 +40,7 @@ func TestReplayBlocks_AllSkipSlots(t *testing.T) {
|
||||
copy(mockRoot[:], "hello-world")
|
||||
cp.Root = mockRoot[:]
|
||||
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(cp))
|
||||
require.NoError(t, beaconState.AppendCurrentEpochAttestations(&statepb.PendingAttestation{}))
|
||||
require.NoError(t, beaconState.AppendCurrentEpochAttestations(ðpb.PendingAttestation{}))
|
||||
|
||||
service := New(beaconDB)
|
||||
targetSlot := params.BeaconConfig().SlotsPerEpoch - 1
|
||||
@@ -69,7 +69,7 @@ func TestReplayBlocks_SameSlot(t *testing.T) {
|
||||
copy(mockRoot[:], "hello-world")
|
||||
cp.Root = mockRoot[:]
|
||||
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(cp))
|
||||
require.NoError(t, beaconState.AppendCurrentEpochAttestations(&statepb.PendingAttestation{}))
|
||||
require.NoError(t, beaconState.AppendCurrentEpochAttestations(ðpb.PendingAttestation{}))
|
||||
|
||||
service := New(beaconDB)
|
||||
targetSlot := beaconState.Slot()
|
||||
@@ -99,7 +99,7 @@ func TestReplayBlocks_LowerSlotBlock(t *testing.T) {
|
||||
copy(mockRoot[:], "hello-world")
|
||||
cp.Root = mockRoot[:]
|
||||
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(cp))
|
||||
require.NoError(t, beaconState.AppendCurrentEpochAttestations(&statepb.PendingAttestation{}))
|
||||
require.NoError(t, beaconState.AppendCurrentEpochAttestations(ðpb.PendingAttestation{}))
|
||||
|
||||
service := New(beaconDB)
|
||||
targetSlot := beaconState.Slot()
|
||||
@@ -110,6 +110,34 @@ func TestReplayBlocks_LowerSlotBlock(t *testing.T) {
|
||||
assert.Equal(t, targetSlot, newState.Slot(), "Did not advance slots")
|
||||
}
|
||||
|
||||
func TestReplayBlocks_ThroughForkBoundary(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
bCfg := params.BeaconConfig()
|
||||
bCfg.AltairForkEpoch = 1
|
||||
bCfg.ForkVersionSchedule[bytesutil.ToBytes4(bCfg.AltairForkVersion)] = 1
|
||||
params.OverrideBeaconConfig(bCfg)
|
||||
|
||||
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
|
||||
genesisBlock := blocks.NewGenesisBlock([]byte{})
|
||||
bodyRoot, err := genesisBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = beaconState.SetLatestBlockHeader(ðpb.BeaconBlockHeader{
|
||||
Slot: genesisBlock.Block.Slot,
|
||||
ParentRoot: genesisBlock.Block.ParentRoot,
|
||||
StateRoot: params.BeaconConfig().ZeroHash[:],
|
||||
BodyRoot: bodyRoot[:],
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
service := New(testDB.SetupDB(t))
|
||||
targetSlot := params.BeaconConfig().SlotsPerEpoch
|
||||
newState, err := service.ReplayBlocks(context.Background(), beaconState, []block.SignedBeaconBlock{}, targetSlot)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify state is version Altair.
|
||||
assert.Equal(t, version.Altair, newState.Version())
|
||||
}
|
||||
|
||||
func TestLoadBlocks_FirstBranch(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
@@ -740,7 +768,7 @@ func TestLoadFinalizedBlocks(t *testing.T) {
|
||||
filteredBlocks, err := s.loadFinalizedBlocks(ctx, 0, 8)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(filteredBlocks))
|
||||
require.NoError(t, beaconDB.SaveStateSummary(ctx, &statepb.StateSummary{Root: roots[8][:]}))
|
||||
require.NoError(t, beaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: roots[8][:]}))
|
||||
|
||||
require.NoError(t, s.beaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Root: roots[8][:]}))
|
||||
filteredBlocks, err = s.loadFinalizedBlocks(ctx, 0, 8)
|
||||
|
||||
@@ -19,7 +19,17 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/state/v2",
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//fuzz:__pkg__",
|
||||
"//proto/testing:__subpackages__",
|
||||
"//shared/aggregation:__subpackages__",
|
||||
"//shared/benchutil:__pkg__",
|
||||
"//shared/depositutil:__subpackages__",
|
||||
"//shared/interop:__subpackages__",
|
||||
"//shared/testutil:__pkg__",
|
||||
"//slasher/rpc:__subpackages__",
|
||||
"//spectest:__subpackages__",
|
||||
"//tools/benchmark-files-gen:__pkg__",
|
||||
"//tools/pcli:__pkg__",
|
||||
],
|
||||
deps = [
|
||||
"//beacon-chain/state:go_default_library",
|
||||
@@ -52,15 +62,19 @@ go_test(
|
||||
"deprecated_getters_test.go",
|
||||
"deprecated_setters_test.go",
|
||||
"getters_test.go",
|
||||
"state_trie_block_box_test.go",
|
||||
"state_trie_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stateutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/testutil:go_default_library",
|
||||
"//shared/testutil/assert:go_default_library",
|
||||
"//shared/testutil/require:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
275
beacon-chain/state/v2/state_trie_block_box_test.go
Normal file
275
beacon-chain/state/v2/state_trie_block_box_test.go
Normal file
@@ -0,0 +1,275 @@
|
||||
package v2_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
stateAltair "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func TestInitializeFromProto(t *testing.T) {
|
||||
testState, _ := testutil.DeterministicGenesisStateAltair(t, 64)
|
||||
pbState, err := stateAltair.ProtobufBeaconState(testState.InnerStateUnsafe())
|
||||
require.NoError(t, err)
|
||||
type test struct {
|
||||
name string
|
||||
state *ethpb.BeaconStateAltair
|
||||
error string
|
||||
}
|
||||
initTests := []test{
|
||||
{
|
||||
name: "nil state",
|
||||
state: nil,
|
||||
error: "received nil state",
|
||||
},
|
||||
{
|
||||
name: "nil validators",
|
||||
state: ðpb.BeaconStateAltair{
|
||||
Slot: 4,
|
||||
Validators: nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "empty state",
|
||||
state: ðpb.BeaconStateAltair{},
|
||||
},
|
||||
{
|
||||
name: "full state",
|
||||
state: pbState,
|
||||
},
|
||||
}
|
||||
for _, tt := range initTests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
_, err := stateAltair.InitializeFromProto(tt.state)
|
||||
if tt.error != "" {
|
||||
require.ErrorContains(t, tt.error, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestInitializeFromProtoUnsafe(t *testing.T) {
|
||||
testState, _ := testutil.DeterministicGenesisStateAltair(t, 64)
|
||||
pbState, err := stateAltair.ProtobufBeaconState(testState.InnerStateUnsafe())
|
||||
require.NoError(t, err)
|
||||
type test struct {
|
||||
name string
|
||||
state *ethpb.BeaconStateAltair
|
||||
error string
|
||||
}
|
||||
initTests := []test{
|
||||
{
|
||||
name: "nil state",
|
||||
state: nil,
|
||||
error: "received nil state",
|
||||
},
|
||||
{
|
||||
name: "nil validators",
|
||||
state: ðpb.BeaconStateAltair{
|
||||
Slot: 4,
|
||||
Validators: nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "empty state",
|
||||
state: ðpb.BeaconStateAltair{},
|
||||
},
|
||||
{
|
||||
name: "full state",
|
||||
state: pbState,
|
||||
},
|
||||
}
|
||||
for _, tt := range initTests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
_, err := stateAltair.InitializeFromProtoUnsafe(tt.state)
|
||||
if tt.error != "" {
|
||||
assert.ErrorContains(t, tt.error, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBeaconState_HashTreeRoot(t *testing.T) {
|
||||
t.Skip("TODO: Fix FSSZ HTR for sync committee and participation roots")
|
||||
testState, _ := testutil.DeterministicGenesisStateAltair(t, 64)
|
||||
type test struct {
|
||||
name string
|
||||
stateModify func(beaconState state.BeaconStateAltair) (state.BeaconStateAltair, error)
|
||||
error string
|
||||
}
|
||||
initTests := []test{
|
||||
{
|
||||
name: "unchanged state",
|
||||
stateModify: func(beaconState state.BeaconStateAltair) (state.BeaconStateAltair, error) {
|
||||
return beaconState, nil
|
||||
},
|
||||
error: "",
|
||||
},
|
||||
{
|
||||
name: "different slot",
|
||||
stateModify: func(beaconState state.BeaconStateAltair) (state.BeaconStateAltair, error) {
|
||||
if err := beaconState.SetSlot(5); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return beaconState, nil
|
||||
},
|
||||
error: "",
|
||||
},
|
||||
{
|
||||
name: "different validator balance",
|
||||
stateModify: func(beaconState state.BeaconStateAltair) (state.BeaconStateAltair, error) {
|
||||
val, err := beaconState.ValidatorAtIndex(5)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
val.EffectiveBalance = params.BeaconConfig().MaxEffectiveBalance - params.BeaconConfig().EffectiveBalanceIncrement
|
||||
if err := beaconState.UpdateValidatorAtIndex(5, val); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return beaconState, nil
|
||||
},
|
||||
error: "",
|
||||
},
|
||||
}
|
||||
|
||||
var err error
|
||||
var oldHTR []byte
|
||||
for _, tt := range initTests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
testState, err = tt.stateModify(testState)
|
||||
assert.NoError(t, err)
|
||||
root, err := testState.HashTreeRoot(context.Background())
|
||||
if err == nil && tt.error != "" {
|
||||
t.Errorf("Expected error, expected %v, recevied %v", tt.error, err)
|
||||
}
|
||||
pbState, err := stateAltair.ProtobufBeaconState(testState.InnerStateUnsafe())
|
||||
require.NoError(t, err)
|
||||
genericHTR, err := pbState.HashTreeRoot()
|
||||
if err == nil && tt.error != "" {
|
||||
t.Errorf("Expected error, expected %v, recevied %v", tt.error, err)
|
||||
}
|
||||
assert.DeepNotEqual(t, []byte{}, root[:], "Received empty hash tree root")
|
||||
assert.DeepEqual(t, genericHTR[:], root[:], "Expected hash tree root to match generic")
|
||||
if len(oldHTR) != 0 && bytes.Equal(root[:], oldHTR) {
|
||||
t.Errorf("Expected HTR to change, received %#x == old %#x", root, oldHTR)
|
||||
}
|
||||
oldHTR = root[:]
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBeaconState_HashTreeRoot_FieldTrie(t *testing.T) {
|
||||
t.Skip("TODO: Fix FSSZ HTR for sync committee and participation roots")
|
||||
testState, _ := testutil.DeterministicGenesisStateAltair(t, 64)
|
||||
|
||||
type test struct {
|
||||
name string
|
||||
stateModify func(state.BeaconStateAltair) (state.BeaconStateAltair, error)
|
||||
error string
|
||||
}
|
||||
initTests := []test{
|
||||
{
|
||||
name: "unchanged state",
|
||||
stateModify: func(beaconState state.BeaconStateAltair) (state.BeaconStateAltair, error) {
|
||||
return beaconState, nil
|
||||
},
|
||||
error: "",
|
||||
},
|
||||
{
|
||||
name: "different slot",
|
||||
stateModify: func(beaconState state.BeaconStateAltair) (state.BeaconStateAltair, error) {
|
||||
if err := beaconState.SetSlot(5); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return beaconState, nil
|
||||
},
|
||||
error: "",
|
||||
},
|
||||
{
|
||||
name: "different validator balance",
|
||||
stateModify: func(beaconState state.BeaconStateAltair) (state.BeaconStateAltair, error) {
|
||||
val, err := beaconState.ValidatorAtIndex(5)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
val.EffectiveBalance = params.BeaconConfig().MaxEffectiveBalance - params.BeaconConfig().EffectiveBalanceIncrement
|
||||
if err := beaconState.UpdateValidatorAtIndex(5, val); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return beaconState, nil
|
||||
},
|
||||
error: "",
|
||||
},
|
||||
}
|
||||
|
||||
var err error
|
||||
var oldHTR []byte
|
||||
for _, tt := range initTests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
testState, err = tt.stateModify(testState)
|
||||
assert.NoError(t, err)
|
||||
root, err := testState.HashTreeRoot(context.Background())
|
||||
if err == nil && tt.error != "" {
|
||||
t.Errorf("Expected error, expected %v, recevied %v", tt.error, err)
|
||||
}
|
||||
pbState, err := stateAltair.ProtobufBeaconState(testState.InnerStateUnsafe())
|
||||
require.NoError(t, err)
|
||||
genericHTR, err := pbState.HashTreeRoot()
|
||||
if err == nil && tt.error != "" {
|
||||
t.Errorf("Expected error, expected %v, recevied %v", tt.error, err)
|
||||
}
|
||||
assert.DeepNotEqual(t, []byte{}, root[:], "Received empty hash tree root")
|
||||
assert.DeepEqual(t, genericHTR[:], root[:], "Expected hash tree root to match generic")
|
||||
if len(oldHTR) != 0 && bytes.Equal(root[:], oldHTR) {
|
||||
t.Errorf("Expected HTR to change, received %#x == old %#x", root, oldHTR)
|
||||
}
|
||||
oldHTR = root[:]
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBeaconStateAltair_ProtoBeaconStateCompatibility(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
s, _ := testutil.DeterministicGenesisStateAltair(t, 6)
|
||||
inner := s.InnerStateUnsafe()
|
||||
genesis, err := stateAltair.ProtobufBeaconState(inner)
|
||||
require.NoError(t, err)
|
||||
customState, err := stateAltair.InitializeFromProto(genesis)
|
||||
require.NoError(t, err)
|
||||
cloned, ok := proto.Clone(genesis).(*ethpb.BeaconStateAltair)
|
||||
assert.Equal(t, true, ok, "Object is not of type *ethpb.BeaconStateAltair")
|
||||
custom := customState.CloneInnerState()
|
||||
assert.DeepSSZEqual(t, cloned, custom)
|
||||
r1, err := customState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
beaconState, err := stateAltair.InitializeFromProto(genesis)
|
||||
require.NoError(t, err)
|
||||
r2, err := beaconState.HashTreeRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, r1, r2, "Mismatched roots")
|
||||
|
||||
// We then write to the the state and compare hash tree roots again.
|
||||
balances := genesis.Balances
|
||||
balances[0] = 3823
|
||||
require.NoError(t, customState.SetBalances(balances))
|
||||
r1, err = customState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
genesis.Balances = balances
|
||||
beaconState, err = stateAltair.InitializeFromProto(genesis)
|
||||
require.NoError(t, err)
|
||||
r2, err = beaconState.HashTreeRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, r1, r2, "Mismatched roots")
|
||||
}
|
||||
@@ -8,6 +8,7 @@ go_library(
|
||||
"decode_pubsub.go",
|
||||
"doc.go",
|
||||
"error.go",
|
||||
"fork_watcher.go",
|
||||
"fuzz_exports.go", # keep
|
||||
"log.go",
|
||||
"metrics.go",
|
||||
@@ -29,12 +30,17 @@ go_library(
|
||||
"subscriber_beacon_attestation.go",
|
||||
"subscriber_beacon_blocks.go",
|
||||
"subscriber_handlers.go",
|
||||
"subscriber_sync_committee_message.go",
|
||||
"subscriber_sync_contribution_proof.go",
|
||||
"subscription_topic_handler.go",
|
||||
"utils.go",
|
||||
"validate_aggregate_proof.go",
|
||||
"validate_attester_slashing.go",
|
||||
"validate_beacon_attestation.go",
|
||||
"validate_beacon_blocks.go",
|
||||
"validate_proposer_slashing.go",
|
||||
"validate_sync_committee_message.go",
|
||||
"validate_sync_contrbution_proof.go",
|
||||
"validate_voluntary_exit.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/sync",
|
||||
@@ -45,6 +51,7 @@ go_library(
|
||||
deps = [
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/block:go_default_library",
|
||||
@@ -57,6 +64,7 @@ go_library(
|
||||
"//beacon-chain/db/filters:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/operations/synccommittee:go_default_library",
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/p2p/encoder:go_default_library",
|
||||
@@ -84,6 +92,8 @@ go_library(
|
||||
"//shared/sszutil:go_default_library",
|
||||
"//shared/timeutils:go_default_library",
|
||||
"//shared/traceutil:go_default_library",
|
||||
"//shared/version:go_default_library",
|
||||
"@com_github_ferranbt_fastssz//:go_default_library",
|
||||
"@com_github_hashicorp_golang_lru//:go_default_library",
|
||||
"@com_github_kevinms_leakybucket_go//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_core//:go_default_library",
|
||||
@@ -97,6 +107,7 @@ go_library(
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_trailofbits_go_mutexasserts//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
@@ -111,11 +122,13 @@ go_test(
|
||||
"context_test.go",
|
||||
"decode_pubsub_test.go",
|
||||
"error_test.go",
|
||||
"fork_watcher_test.go",
|
||||
"pending_attestations_queue_test.go",
|
||||
"pending_blocks_queue_test.go",
|
||||
"rate_limiter_test.go",
|
||||
"rpc_beacon_blocks_by_range_test.go",
|
||||
"rpc_beacon_blocks_by_root_test.go",
|
||||
"rpc_chunked_response_test.go",
|
||||
"rpc_goodbye_test.go",
|
||||
"rpc_metadata_test.go",
|
||||
"rpc_ping_test.go",
|
||||
@@ -126,6 +139,7 @@ go_test(
|
||||
"subscriber_beacon_aggregate_proof_test.go",
|
||||
"subscriber_beacon_blocks_test.go",
|
||||
"subscriber_test.go",
|
||||
"subscription_topic_handler_test.go",
|
||||
"sync_test.go",
|
||||
"utils_test.go",
|
||||
"validate_aggregate_proof_test.go",
|
||||
@@ -133,13 +147,17 @@ go_test(
|
||||
"validate_beacon_attestation_test.go",
|
||||
"validate_beacon_blocks_test.go",
|
||||
"validate_proposer_slashing_test.go",
|
||||
"validate_sync_committee_message_test.go",
|
||||
"validate_sync_contrbution_proof_test.go",
|
||||
"validate_voluntary_exit_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
shard_count = 4,
|
||||
deps = [
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
@@ -161,12 +179,14 @@ go_test(
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/block:go_default_library",
|
||||
"//proto/prysm/v1alpha1/metadata:go_default_library",
|
||||
"//proto/prysm/v1alpha1/wrapper:go_default_library",
|
||||
"//shared/abool:go_default_library",
|
||||
"//shared/attestationutil:go_default_library",
|
||||
"//shared/bls:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/copyutil:go_default_library",
|
||||
"//shared/p2putils:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/rand:go_default_library",
|
||||
"//shared/sszutil:go_default_library",
|
||||
@@ -176,6 +196,7 @@ go_test(
|
||||
"//shared/timeutils:go_default_library",
|
||||
"@com_github_d4l3k_messagediff//:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
|
||||
"@com_github_golang_snappy//:go_default_library",
|
||||
"@com_github_hashicorp_golang_lru//:go_default_library",
|
||||
"@com_github_kevinms_leakybucket_go//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_core//:go_default_library",
|
||||
|
||||
@@ -1,15 +1,18 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/libp2p/go-libp2p-core/network"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
)
|
||||
|
||||
// Specifies the fixed size context length.
|
||||
const digestLength = 4
|
||||
|
||||
// writes peer's current context for the expected payload to the stream.
|
||||
func writeContextToStream(stream network.Stream, chain blockchain.ChainInfoFetcher) error {
|
||||
func writeContextToStream(objCtx []byte, stream network.Stream, chain blockchain.ChainInfoFetcher) error {
|
||||
rpcCtx, err := rpcContext(stream, chain)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -18,6 +21,10 @@ func writeContextToStream(stream network.Stream, chain blockchain.ChainInfoFetch
|
||||
if len(rpcCtx) == 0 {
|
||||
return nil
|
||||
}
|
||||
// Always choose the object's context when writing to the stream.
|
||||
if objCtx != nil {
|
||||
rpcCtx = objCtx
|
||||
}
|
||||
_, err = stream.Write(rpcCtx)
|
||||
return err
|
||||
}
|
||||
@@ -32,7 +39,7 @@ func readContextFromStream(stream network.Stream, chain blockchain.ChainInfoFetc
|
||||
return []byte{}, nil
|
||||
}
|
||||
// Read context (fork-digest) from stream
|
||||
b := make([]byte, 4)
|
||||
b := make([]byte, digestLength)
|
||||
if _, err := stream.Read(b); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -49,7 +56,27 @@ func rpcContext(stream network.Stream, chain blockchain.ChainInfoFetcher) ([]byt
|
||||
case p2p.SchemaVersionV1:
|
||||
// Return empty context for a v1 method.
|
||||
return []byte{}, nil
|
||||
case p2p.SchemaVersionV2:
|
||||
currFork := chain.CurrentFork()
|
||||
genVersion := chain.GenesisValidatorRoot()
|
||||
digest, err := helpers.ComputeForkDigest(currFork.CurrentVersion, genVersion[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return digest[:], nil
|
||||
default:
|
||||
return nil, errors.New("invalid version of %s registered for topic: %s")
|
||||
}
|
||||
}
|
||||
|
||||
// Validates that the rpc topic matches the provided version.
|
||||
func validateVersion(version string, stream network.Stream) error {
|
||||
_, _, streamVersion, err := p2p.TopicDeconstructor(string(stream.Protocol()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if streamVersion != version {
|
||||
return errors.Errorf("stream version of %s doesn't match provided version %s", streamVersion, version)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -30,7 +30,7 @@ func TestContextWrite_NoWrites(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Nothing will be written to the stream
|
||||
assert.NoError(t, writeContextToStream(strm, nil))
|
||||
assert.NoError(t, writeContextToStream(nil, strm, nil))
|
||||
if testutil.WaitTimeout(wg, 1*time.Second) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
}
|
||||
|
||||
@@ -1,32 +1,60 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
ssz "github.com/ferranbt/fastssz"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
var errNilPubsubMessage = errors.New("nil pubsub message")
|
||||
var errInvalidTopic = errors.New("invalid topic format")
|
||||
|
||||
func (s *Service) decodePubsubMessage(msg *pubsub.Message) (proto.Message, error) {
|
||||
func (s *Service) decodePubsubMessage(msg *pubsub.Message) (ssz.Unmarshaler, error) {
|
||||
if msg == nil || msg.Topic == nil || *msg.Topic == "" {
|
||||
return nil, errNilPubsubMessage
|
||||
}
|
||||
topic := *msg.Topic
|
||||
fDigest, err := p2p.ExtractGossipDigest(topic)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "extraction failed for topic: %s", topic)
|
||||
}
|
||||
topic = strings.TrimSuffix(topic, s.cfg.P2P.Encoding().ProtocolSuffix())
|
||||
topic, err := s.replaceForkDigest(topic)
|
||||
topic, err = s.replaceForkDigest(topic)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
base, ok := p2p.GossipTopicMappings[topic]
|
||||
if !ok {
|
||||
// Specially handle subnet messages.
|
||||
switch {
|
||||
case strings.Contains(topic, p2p.GossipAttestationMessage):
|
||||
topic = p2p.GossipTypeMapping[reflect.TypeOf(ð.Attestation{})]
|
||||
// Given that both sync message related subnets have the same message name, we have to
|
||||
// differentiate them below.
|
||||
case strings.Contains(topic, p2p.GossipSyncCommitteeMessage) && !strings.Contains(topic, p2p.SyncContributionAndProofSubnetTopicFormat):
|
||||
topic = p2p.GossipTypeMapping[reflect.TypeOf(ðpb.SyncCommitteeMessage{})]
|
||||
}
|
||||
|
||||
base := p2p.GossipTopicMappings(topic, 0)
|
||||
if base == nil {
|
||||
return nil, p2p.ErrMessageNotMapped
|
||||
}
|
||||
m := proto.Clone(base)
|
||||
m, ok := proto.Clone(base).(ssz.Unmarshaler)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("message of %T does not support marshaller interface", base)
|
||||
}
|
||||
// Handle different message types across forks.
|
||||
if topic == p2p.BlockSubnetTopicFormat {
|
||||
m, err = extractBlockDataType(fDigest[:], s.cfg.Chain)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if err := s.cfg.P2P.Encoding().DecodeGossip(msg.Data, m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -2,25 +2,34 @@ package sync
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/d4l3k/messagediff"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
pb "github.com/libp2p/go-libp2p-pubsub/pb"
|
||||
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
p2ptesting "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
func TestService_decodePubsubMessage(t *testing.T) {
|
||||
digest, err := helpers.ComputeForkDigest(params.BeaconConfig().GenesisForkVersion, make([]byte, 32))
|
||||
require.NoError(t, err)
|
||||
tests := []struct {
|
||||
name string
|
||||
topic string
|
||||
input *pubsub.Message
|
||||
want proto.Message
|
||||
want interface{}
|
||||
wantErr error
|
||||
}{
|
||||
{
|
||||
@@ -44,12 +53,12 @@ func TestService_decodePubsubMessage(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "topic not mapped to any message type",
|
||||
topic: "/eth2/abcdef/foo",
|
||||
topic: "/eth2/abababab/foo/ssz_snappy",
|
||||
wantErr: p2p.ErrMessageNotMapped,
|
||||
},
|
||||
{
|
||||
name: "valid message -- beacon block",
|
||||
topic: p2p.GossipTypeMapping[reflect.TypeOf(ðpb.SignedBeaconBlock{})],
|
||||
topic: fmt.Sprintf(p2p.GossipTypeMapping[reflect.TypeOf(ðpb.SignedBeaconBlock{})], digest),
|
||||
input: &pubsub.Message{
|
||||
Message: &pb.Message{
|
||||
Data: func() []byte {
|
||||
@@ -62,13 +71,13 @@ func TestService_decodePubsubMessage(t *testing.T) {
|
||||
},
|
||||
},
|
||||
wantErr: nil,
|
||||
want: testutil.NewBeaconBlock(),
|
||||
want: wrapper.WrappedPhase0SignedBeaconBlock(testutil.NewBeaconBlock()),
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s := &Service{
|
||||
cfg: &Config{P2P: p2ptesting.NewTestP2P(t)},
|
||||
cfg: &Config{P2P: p2ptesting.NewTestP2P(t), Chain: &mock.ChainService{ValidatorsRoot: [32]byte{}, Genesis: time.Now()}},
|
||||
}
|
||||
if tt.topic != "" {
|
||||
if tt.input == nil {
|
||||
@@ -79,7 +88,7 @@ func TestService_decodePubsubMessage(t *testing.T) {
|
||||
tt.input.Message.Topic = &tt.topic
|
||||
}
|
||||
got, err := s.decodePubsubMessage(tt.input)
|
||||
if err != tt.wantErr {
|
||||
if err != tt.wantErr && !strings.Contains(err.Error(), tt.wantErr.Error()) {
|
||||
t.Errorf("decodePubsubMessage() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user