Compare commits

...

294 Commits

Author SHA1 Message Date
nisdas
765122dc05 fix one more test 2021-08-04 22:27:26 +08:00
nisdas
a283fa58fb fixes 2021-08-04 21:25:23 +08:00
nisdas
12fe2d91ed fix tests 2021-08-04 19:47:35 +08:00
nisdas
d44905329c Merge branch 'hf1' of https://github.com/prysmaticlabs/geth-sharding into interopFixes 2021-08-04 16:22:21 +08:00
Nishant Das
e2238bd6d1 Fix Spec Edge Case For Altair (#9295)
* fix edge case

* Update validator/client/sync_committee.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2021-08-04 16:22:01 +08:00
nisdas
c0310ad534 remove tracer 2021-08-04 15:25:59 +08:00
nisdas
e10ac0af02 Merge branch 'hf1' of https://github.com/prysmaticlabs/geth-sharding into interopFixes 2021-08-04 15:22:11 +08:00
nisdas
6e358da5ed checkpoint fixes from the last few days 2021-08-04 15:19:45 +08:00
terence tsao
df291e2ffb Update BUILD.bazel 2021-07-30 10:49:26 -07:00
terence tsao
5ba5b303d3 Fix spec test 2021-07-30 10:49:16 -07:00
terence tsao
f2ce4dcab3 Fix validator build 2021-07-30 10:19:10 -07:00
terence tsao
8765c3ac42 Add contexts to sync committee functions 2021-07-30 09:53:45 -07:00
terence tsao
57fff2d88e Update BUILD.bazel 2021-07-30 09:31:47 -07:00
terence tsao
c010a972e7 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-07-30 09:31:24 -07:00
terence tsao
c02ed805b0 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-07-29 10:32:53 -07:00
Raul Jordan
93adf4980a Merge branch 'develop' into hf1 2021-07-29 09:26:19 -05:00
nisdas
3fe969992a fix bad merge 2021-07-29 19:25:34 +08:00
nisdas
2135108830 deepsource 2021-07-29 18:35:07 +08:00
nisdas
4c146dc896 gaz 2021-07-29 18:31:56 +08:00
nisdas
042a3cda02 fix again 2021-07-29 18:31:31 +08:00
nisdas
b8676480f0 fix again 2021-07-29 16:56:42 +08:00
nisdas
711022d34e fix p2p 2021-07-29 16:47:25 +08:00
nisdas
eec93be4ed fix 2021-07-29 16:33:20 +08:00
nisdas
21d096622f fix tests 2021-07-29 15:51:17 +08:00
nisdas
62846d61b8 fix spec tests 2021-07-29 15:45:12 +08:00
nisdas
a228a407be fix tests so far 2021-07-29 15:35:11 +08:00
Raul Jordan
f527b676da val tests pass 2021-07-28 22:43:44 -05:00
Raul Jordan
5bd4e10dd6 fix build 2021-07-28 22:04:54 -05:00
Raul Jordan
d19e13352b beacon builds 2021-07-28 21:56:10 -05:00
rauljordan
6bda9a0bf2 fix conflict in proto file 2021-07-28 20:19:46 -05:00
rauljordan
2da6b7bb97 attempt fix merge confs 2021-07-28 20:19:00 -05:00
Raul Jordan
7faed861c4 Merge branch 'develop' into hf1 2021-07-28 11:30:41 -05:00
nisdas
8b9129d84e merge fixes 2021-07-28 23:11:49 +08:00
nisdas
8b219b14da Merge branch 'develop' of https://github.com/prysmaticlabs/geth-sharding into hf1 2021-07-28 20:32:15 +08:00
Raul Jordan
5bf9bd3d73 fix up conflicts with develop 2021-07-27 12:27:24 -05:00
Raul Jordan
59f12c8ac1 sync develop 2021-07-27 12:02:49 -05:00
terence tsao
1094ca0838 Update field_trie.go 2021-07-27 08:52:51 -07:00
terence tsao
ebe4b309c0 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-07-27 08:52:42 -07:00
rauljordan
ea94f0e70d add changes from develop 2021-07-26 21:16:03 -05:00
rauljordan
47443e130d merge slasher proto 2021-07-26 21:13:44 -05:00
rauljordan
4dfa5c2757 rem slashing proto 2021-07-26 21:02:09 -05:00
terence tsao
1851d40f74 Go fmt 2021-07-26 17:31:59 -07:00
terence tsao
eee1d47655 Fix conflicts 2021-07-26 17:14:57 -07:00
terence tsao
7ce76652fb Merge 2021-07-26 16:57:49 -07:00
prestonvanloon
19e6f0c19a Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-07-23 11:46:41 -05:00
Raul Jordan
6470e2718a Merge branch 'develop' into hf1 2021-07-23 11:01:22 -05:00
Raul Jordan
30cd5c076e fix confs 2021-07-23 11:00:47 -05:00
terence tsao
03d8af5cda Core/altair: sync committee tests clean up 2021-07-23 08:27:32 -07:00
Raul Jordan
194f0cb76d merge with develop 2021-07-23 10:04:22 -05:00
terence tsao
2a0e8510d4 Update skip_slot_cache_test.go 2021-07-23 07:32:36 -07:00
Nishant Das
5e35f778b9 unregister rpc topics (#9241) 2021-07-23 13:00:44 +08:00
terence tsao
972ae7f169 Merge branch 'hf1' of github.com:prysmaticlabs/prysm into hf1 2021-07-22 08:12:58 -07:00
terence tsao
80fafaddff Move TestSyncCommitteePeriod 2021-07-22 08:12:35 -07:00
rauljordan
e6ecdfde0d regen protos 2021-07-21 17:38:01 -05:00
rauljordan
1daf51788d merge with develop 2021-07-21 17:34:53 -05:00
terence tsao
35055539a7 Revamp head state + sync committee operations and the bug fix (#9200) 2021-07-21 08:04:18 -07:00
Nishant Das
81ab3ca46c Enable Sync Evaluator For E2E (#9240) 2021-07-21 06:56:26 -07:00
terence tsao
5895b10678 Fix next epoch sync duty calculation (#9233)
* Fix next epoch duty assignment

* Regression test

* Update comment

* Go fmt

* Fix sync committee disable namings

* Fix test comment
2021-07-21 10:08:59 +08:00
prestonvanloon
7ea645ed37 Merge commit '8d1c9fe1e22630d9fd03c4be861a1d33f35b7a11' of github.com:prysmaticlabs/prysm into hf1 2021-07-20 09:46:46 -05:00
prestonvanloon
a900792160 Merge commit '412ddbb29ef73466b880e7ba4101252ac88cf078' of github.com:prysmaticlabs/prysm into hf1 2021-07-20 09:44:45 -05:00
prestonvanloon
cd87bfd8ab Merge commit '8a7010f5aaeb95abd1f7036fcd356817a2b339fe' of github.com:prysmaticlabs/prysm into hf1 2021-07-20 09:38:53 -05:00
terence tsao
98477a0286 Merge commit '1beb0071b5730f0ec0fb1beb9056c06d55ef562b' of github.com:prysmaticlabs/prysm into hf1 2021-07-20 07:26:55 -07:00
prestonvanloon
2d1a63d9f4 Merge commit '2a0c4e0d5fe8e8a422e3bb47a7a692db56ec55c9' of github.com:prysmaticlabs/prysm into hf1 2021-07-20 08:58:58 -05:00
terence tsao
52be270f0a Update TargetAggregatorsPerSyncSubcommittee to 16 (#9229) 2021-07-20 10:56:06 +08:00
prestonvanloon
895a86fd53 Merge commit '15bfcf8ff6f43db6029c5d8eee0b71a7bead86b0' of github.com:prysmaticlabs/prysm into hf1 2021-07-19 21:52:37 -05:00
terence tsao
af6246a5f9 Merge commit '15704053e112b8bb89472c75cba77e67e26e2c49' of github.com:prysmaticlabs/prysm into hf1 2021-07-19 18:29:13 -07:00
terence tsao
5e80ceeff9 Remove unused sync committee feed type 2021-07-19 14:01:00 -07:00
Nishant Das
ee661971f0 Add Fixes Observed From Devnet (#9205)
* add changes and test cases

* gaz

* add one more test

* comment

* deep source

* fix
2021-07-18 12:32:32 +08:00
nisdas
cc7e36776d gaz 2021-07-17 18:34:12 +08:00
nisdas
14a9d9a1ad Merge branch 'develop' of https://github.com/prysmaticlabs/geth-sharding into hf1 2021-07-17 18:33:45 +08:00
terence tsao
2b9fb29ed2 Sync with develop 2021-07-16 15:20:45 -07:00
Nishant Das
9300d1026f Add E2E Support For Altair (#9173) 2021-07-16 08:06:54 -07:00
Nishant Das
48345eb68e Fix Gossip Message ID Versioning (#9206) 2021-07-16 08:03:35 -07:00
Nishant Das
60d14f1806 Allow For Proper Registration and Deregistration of Topics (#9194)
* add fixes

* gaz

* fix

* fix

* Update beacon-chain/sync/fork_watcher.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* preston's review

* add unit test

* gaz

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2021-07-16 10:50:40 +08:00
terence tsao
d80d4d01a6 Update spec test (#9198) 2021-07-15 16:13:59 -07:00
terence tsao
275192680f Fix build 2021-07-15 07:10:19 -07:00
terence tsao
9ca958064e Fix build 2021-07-15 06:06:03 -07:00
Nishant Das
604958da6c Add List Blocks RPC For Altair (#9186)
* add new method in

* fix

* preston's review

* gaz
2021-07-15 13:22:40 +08:00
terence tsao
ade94444f2 Add active balance to committee cache (#9187) 2021-07-14 18:32:17 -07:00
terence tsao
4df2f4c790 Fix signSlotWithSelectionProof signature 2021-07-14 15:56:50 -07:00
terence tsao
3f8f5edb3f Clean up deposits 2021-07-14 13:25:28 -07:00
terence tsao
0ad4e433a5 Sync with develop 2021-07-14 10:08:08 -07:00
terence tsao
1be2503e82 Disallow sync committee cache update if state root is 0s (#9190) 2021-07-14 08:00:23 -07:00
terence tsao
0dd228bb94 Sync committee pool can retrieve without pop (#9179)
* `RetrieveByKey` for priority queue

* Sync committee pool to use `RetrieveByKey`

* Update message_test.go

* remove err

* fix test

Co-authored-by: nisdas <nishdas93@gmail.com>
2021-07-13 16:13:21 +08:00
terence tsao
78450ea557 Remove validator pubkey to index map (#9177) 2021-07-12 13:44:09 -07:00
terence tsao
f0e6d4a0bd Sync committee cache: fill on miss (#9176) 2021-07-12 10:49:49 -07:00
terence tsao
97901c90a5 Update sync committee cache using block root at boundary slot - 1 (#9171) 2021-07-12 08:06:47 -07:00
nisdas
1379dbfc23 Merge branch 'develop' of https://github.com/prysmaticlabs/geth-sharding into hf1 2021-07-12 14:01:52 +08:00
terence tsao
19dbc7e249 Revamp head state + sync committee operations (#9167) 2021-07-09 15:02:03 -07:00
terence tsao
76a70065f2 Add more metrics (#9168) 2021-07-09 08:01:15 -05:00
Nishant Das
51f513b246 Remove Non Spec Config Variables (#9170)
* only one way

* cleanup

* extra

* fix

* fix test
2021-07-09 07:57:26 -05:00
Nishant Das
2b349a1b06 Interop Fixes (#9163) 2021-07-08 20:33:44 -07:00
terence tsao
a819caca16 State gen's reply can upgrade state (#9164) 2021-07-08 15:38:30 -07:00
Nishant Das
e7116d4ea8 Fix Goimports and DeepSource (#9148)
* add goimports

* fix again

* imports

* deep source

* assignment

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2021-07-08 13:31:16 -05:00
prestonvanloon
f8cd989161 Fix build 2021-07-08 10:42:50 -05:00
prestonvanloon
4c19265ac5 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-07-08 10:33:11 -05:00
Nishant Das
f361bf781f Altair Networking Support (#8994)
* add method and test

* fix message type across forks

* invert fork version schedule

* add support for forks in metadata

* start fixing discovery

* fix fork version stuff

* fix up fork digest usage

* add fork transition mechanism

* gaz

* add fixes to mappings

* checking in current progress

* fix aggregates

* fix scheduling

* fix

* fix copy

* fix rpc stuff

* fixes

* add new topics and files

* gaz

* add pool

* finish pipeline for sync aggregate proofs

* gaz

* Update beacon-chain/cache/sync_subnet_ids.go

Co-authored-by: terence tsao <terence@prysmaticlabs.com>

* add support for sync committee pipeline

* fix

* check better

* fix

* fix again

* remove redundant code

* remove older code

* checkpoint

* get it working

* gaz

* regen

* checkpoint

* gaz

* build

* fix edge case

* fix all errors

* sync with hf1

* fix issues

* gaz

* fix bad rpc respones

* fix it all

* hash

* save everything

* fix all remaining tests

* comments

* fix build

* terence's review

* preston's review

* build

* gaz

* gaz

* add signature test

* add in more tests

* gaz

* add more tests

* fix goimports

* Revert "fix goimports"

This reverts commit 41bf7b4a5c.

* fix tests

* fix all tests

* fix all tests

* fix everything

* fix last test

* fix rpc registration

* fix build

* fix build

* remove outdated method

* add recovery

* Update subscriber.go

* terence's review

* fix bad merge

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2021-07-08 11:25:41 +08:00
terence tsao
a458e556e0 Align sync committee pipelines to alpha8 (#9160) 2021-07-07 10:54:48 -07:00
Nishant Das
773b259cd5 add in (#9158)
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2021-07-07 11:03:16 -05:00
terence tsao
2bb3da1ba3 Clean up sync committee p2p pipelines (#9153) 2021-07-07 07:35:01 -07:00
Raul Jordan
47367d98b4 Resolve Deep Source Warnings (#9154) 2021-07-07 06:33:40 -07:00
terence tsao
1ff18c07a4 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-07-06 20:17:17 -07:00
terence tsao
279a95deba Fix spec tests 2021-07-06 13:26:17 -07:00
terence tsao
c0bfa6ef79 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1
# Conflicts:
#	beacon-chain/core/state/BUILD.bazel
#	beacon-chain/core/state/transition.go
#	beacon-chain/core/state/transition_no_verify_sig.go
#	beacon-chain/db/kv/BUILD.bazel
#	beacon-chain/db/kv/blocks.go
#	beacon-chain/db/kv/blocks_test.go
#	beacon-chain/rpc/eth/v1/beacon/BUILD.bazel
#	beacon-chain/rpc/prysm/v1alpha1/beacon/BUILD.bazel
#	beacon-chain/rpc/prysm/v1alpha1/validator/BUILD.bazel
#	beacon-chain/rpc/prysm/v1alpha1/validator/proposer.go
#	beacon-chain/rpc/prysm/v1alpha1/validator/proposer_test.go
#	beacon-chain/rpc/service.go
#	beacon-chain/state/stategen/BUILD.bazel
#	beacon-chain/state/stategen/replay.go
#	beacon-chain/sync/BUILD.bazel
#	beacon-chain/sync/rpc_chunked_response.go
#	proto/eth/v1alpha1/wrapper/beacon_block.go
#	shared/interfaces/BUILD.bazel
#	shared/testutil/BUILD.bazel
#	shared/testutil/block_test.go
2021-07-06 13:15:35 -07:00
terence tsao
7e961c2be9 Merge branch 'hf1' of github.com:prysmaticlabs/prysm into hf1 2021-07-06 11:09:43 -07:00
terence tsao
c7c7f9bf1b Fix missing 3rd arguments for gomock's Do 2021-07-06 11:09:32 -07:00
Nishant Das
7ce85cac31 Fix Stategen For Altair (#9152) 2021-07-06 10:43:36 -07:00
terence tsao
2d836f485d Move test only functions to testutil (#9137) 2021-07-06 12:08:52 -05:00
terence tsao
780253b786 Go fmt 2021-07-05 10:10:08 -07:00
terence tsao
710bb98575 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-07-05 09:38:49 -07:00
Nishant Das
d5387851d0 Validator Fixes From Interop (#9110)
* validator fixes

* gaz

* fix validator

* pull in fixes

* add in test

* terence's review

* fix references

* fix all tests
2021-07-02 13:50:16 +08:00
terence tsao
ab8dd3788f Altair: Green minimal spec tests (#9078) 2021-06-30 19:28:44 -07:00
terence tsao
bf1b550b7d Green Altair e2e tests (#9131) 2021-06-30 16:33:08 -07:00
prestonvanloon
705564108c Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-06-30 14:27:43 -05:00
terence tsao
3df82e7540 Use sync committee start period root as cache key (#9128) 2021-06-30 08:54:31 -07:00
Raul Jordan
d0a749ce4b ensure build 2021-06-30 10:49:54 -05:00
Raul Jordan
081c80998c fix conflicts and merge with develop 2021-06-30 10:44:18 -05:00
terence tsao
8c62f10b74 Altair: proposer pack sync aggregate (#9124)
* Pack sync aggregate into beacon block

* Build.bazel
2021-06-29 15:22:16 -05:00
terence tsao
e232b3ce30 Merge branch 'hf1' of github.com:prysmaticlabs/prysm into hf1 2021-06-29 12:14:37 -07:00
terence tsao
17153bb4e9 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-06-29 10:26:38 -07:00
Raul Jordan
329a45c06a fix confs 2021-06-29 12:24:47 -05:00
Raul Jordan
1c82394a69 sync changes 2021-06-29 12:21:09 -05:00
terence tsao
856081c80c Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-06-29 09:17:56 -07:00
terence tsao
6fff327864 Gazelle 2021-06-29 09:12:21 -07:00
terence tsao
e2879f8352 Resolve conflict 2021-06-29 09:12:14 -07:00
terence tsao
523fe58f61 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-06-29 09:10:07 -07:00
terence tsao
04a303c8d2 Add logging (#9115) 2021-06-29 08:27:41 +08:00
Raul Jordan
0844bd62ea sync changes 2021-06-28 13:47:42 -05:00
Nishant Das
816dc47b17 Add New V2 Streaming Blocks Method (#9094)
* add latest changes in

* gaz

* regen mocks

* build file

* gazelle

* import

* import

* add iface check

* add one more case

* add one more case

* add proper type

* fix wierd imports

* fix mocks

* go imports

* fix build

* fix go mocks

* pass in non nil objects

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2021-06-28 16:06:53 +08:00
terence tsao
caeec851d4 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-06-27 10:58:31 -07:00
terence tsao
062933af35 Update spec test to alpha8 (#9103) 2021-06-25 13:21:58 -07:00
terence tsao
169573c32e Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-06-25 12:27:56 -07:00
Raul Jordan
e7a7b2861e Merge branch 'develop' into hf1 2021-06-24 18:12:57 -06:00
Nishant Das
fe6c80fe95 Add Sync Subnet Id Cache (#9089)
* add stuff

* remove log

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2021-06-24 11:56:02 -05:00
Nishant Das
2f52dfe96e Add Updated Fork Utils (#9090)
* add fork utils

* fix it

* more unit tests

* fix test

* fix test

* fix tests

* fix tests

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2021-06-24 11:00:27 -05:00
Raul Jordan
93a7b96f16 skip test for validator at current release 2021-06-24 09:43:00 -06:00
Raul Jordan
f078b62c3e Merge branch 'develop' into hf1 2021-06-24 09:42:12 -06:00
Preston Van Loon
f476c39708 Use same version of spectests and spec in WORKSPACE (#9084)
* Use same version of spectests and spec in WORKSPACE

* Fix test loading eth2 spec files
2021-06-23 14:59:32 -05:00
terence tsao
c02e507422 Use correct fork version object for domain data (#9081)
* Use correct fork version object

* Use p2putils

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2021-06-23 13:40:03 -05:00
Raul Jordan
ece07e5fbb Implement Beacon Validator RPC V2 In Proper Package (#9083)
* implement v2 endpoints in own folder

* passing tests

* naming
2021-06-23 13:15:23 -05:00
prestonvanloon
6bbe3dbd10 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-06-23 12:48:22 -05:00
Raul Jordan
18bfc2a34e move rpc packages (#9082) 2021-06-23 12:27:19 -05:00
Raul Jordan
b774af9535 Altair: Use Validator Index for Sync Committee Caching (#9074)
* begin using validator indices for sync committee caching

* amend to use validator indices in rpc

* cache tests pass

* refactor for deduplication

* broken file

* amend core helper tests

* fix tests

* comment

* fix fuzzer iface

* imports

* fix up tests
2021-06-23 12:09:38 -05:00
Raul Jordan
719a5fca02 V2 Prysm Proto Definitions (#9062)
* begin v2 prysm protos

* define

* v2 protos build

* add in generated files

* imports

* do not modify v1alpha1

* revert imports

* use alias pattern

* attempt alias

* attempt new fastssz

* edit

* versioning all works

* edit protos

* edit terms

* edit to reflect proto versioning

* fix more build issues

* beacon chain builds

* beacon chain and shared packages build

* begin helping validator client build

* define protos

* gaz

* build

* tidy check

* gazelle

* revert
2021-06-23 11:27:06 -05:00
Nishant Das
b4a0e4375a add new changes (#9079) 2021-06-23 22:35:35 +08:00
terence tsao
4d276d2fdf Add sync committee head state cache (#9077) 2021-06-23 07:57:11 +08:00
terence tsao
8797179cfb Altair: Update sync committee cache (#9043)
* Update sync committee cache

* Remove debug log

* Gazelle
2021-06-23 07:39:22 +08:00
terence tsao
7cc38108aa Remove copy when push to priority queue (#9076)
* No reflect copy and clean up `SaveSyncCommitteeMessage`

* Update BUILD.bazel

* Go mod tidy
2021-06-23 07:38:30 +08:00
Raul Jordan
365ced285e Merge branch 'develop' into hf1 2021-06-22 14:23:33 -06:00
terence tsao
97e5730fd9 Altair: validator submit SignedContributionAndProof (#9064)
* Contribution and proof protos

* Completed `GetSyncCommitteeContribution` and `SubmitSignedContributionAndProof`

* Update beacon_validator_client_mock.go

* sync committee tests

* Update deposit_test.go
2021-06-22 18:58:56 +08:00
terence tsao
3919b49000 Fixed failing tests from resolving conflict 2021-06-21 21:56:13 -06:00
terence tsao
7f13396e44 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-06-21 08:10:13 -07:00
terence tsao
ae3e5718e6 Altair: check validator is an aggregator for sync committee (#9058) 2021-06-18 11:09:26 -07:00
Nishant Das
97a49240ba Add Block Proposals In Altair (#9056)
* add protobufs

* add tests

* mockgen

* fix mocks

* terence's review

* add ugly hack

* hack

* make ineffassign happy

* add back
2021-06-18 15:42:07 +08:00
terence tsao
232d519445 Fix sync committee duty off by 1 (#9057)
* Fix sync committee duty off by 1

* Update sync_committee_test.go
2021-06-18 11:57:55 +08:00
terence tsao
afd815bb5d Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-06-17 11:00:57 -07:00
terence tsao
56d383a354 Merge branch 'hf1' of github.com:prysmaticlabs/prysm into hf1 2021-06-17 10:44:25 -07:00
terence tsao
733023df03 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-06-17 10:09:04 -07:00
Nishant Das
9d9ce13753 Use Proper Test Util Methods For Altair (#9054)
* fix

* fix block

* terence's review

* fix
2021-06-17 14:22:12 +08:00
terence tsao
975e7a76bf Altair: Validator can submit sync committee msg (#9048)
* Grpc for submitting sync committee message

* RPC server implementation

* RPC client validator implementation

* Tests

* Use slot to get block root

* Gazelle

* Update tests

* Update tests

* Underflow check
2021-06-17 12:21:33 +08:00
Nishant Das
20ae23bd42 Add Sync Committee Pipelines (#9016)
* add sync pipelines

* gaz

* fix iface

* finally have 1 test working

* add all test cases

* gaz

* fix visibility

* ugly hack

* add all new test cases

* add more stuff

* all tests passing

* gaz

* fix lint
2021-06-17 10:42:34 +08:00
terence tsao
63cf429fa0 Add fallback check to sync committee helper (#9051)
* Add fallback to helper for sync committee check

* Gazelle

* Satisfy fuzz test

* Errors in its own file

* Update BUILD.bazel

* Gazelle
2021-06-17 10:28:21 +08:00
terence tsao
2aab4e2efe Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-06-16 10:44:45 -07:00
terence tsao
e2156f25e0 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-06-14 20:30:50 -07:00
terence tsao
a537833f75 Altair: add sync committee to duty rpc (#9037)
* Helpers to update sync committee cache

* Update grpc

* Update server implementation

* Update client implementation

* Fix test setup & deepsrc complains
2021-06-14 21:48:31 +00:00
terence tsao
61bf95e4e2 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-06-14 10:54:00 -07:00
terence tsao
357d3f3b6a Altair: Reuse validator wrapper (#9027) 2021-06-11 13:44:43 -07:00
terence tsao
b0bbfcab7f Run go mod tidy 2021-06-11 13:06:52 -07:00
terence tsao
06801a5230 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-06-11 12:01:27 -07:00
terence tsao
278857d576 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-06-10 20:29:44 -07:00
terence tsao
9c5c70fb32 Altair: sign block can handle versioning (#9013) 2021-06-10 15:44:49 -07:00
terence tsao
54326af141 Sync committee to use priority queue (#9018) 2021-06-10 15:44:28 -07:00
terence tsao
6020682ad1 Altair: beacon block operation in DB (#9012) 2021-06-10 15:16:32 -07:00
terence tsao
3f45d54986 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-06-09 14:27:24 -07:00
terence tsao
ecb51dc55d Altair: save state in DB (#9004)
* Altair: save state in DB

* Simplify return for `hasAltairKey`

* Go fmt
2021-06-09 11:34:42 +08:00
terence tsao
cbf4aeb859 Align to alpha7 (#9005) 2021-06-09 09:17:31 +08:00
terence tsao
b107bd2a5a Copy sync aggregate (#9003) 2021-06-09 09:17:12 +08:00
terence tsao
a5e2c3f551 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-06-08 08:06:20 -07:00
terence tsao
efffaeb359 Merge branch 'hf1' of github.com:prysmaticlabs/prysm into hf1 2021-06-08 08:05:12 -07:00
terence tsao
144576cf36 Altair Add back minimal spec tests (#8993) 2021-06-08 07:22:56 -07:00
Victor Farazdagi
fcf2be08d8 Altair: ssz_static tests (#8847) 2021-06-07 20:57:05 -07:00
terence tsao
2b5cd139f0 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-06-07 10:53:12 -07:00
terence tsao
77a4fdb509 Altair: fork transition and spec tests (#8919) 2021-06-04 15:43:36 -07:00
Nishant Das
a14d37b0ad Handle Multiple Block Types in Req/Resp (#8905)
* add in fork version

* fix

* add in for minimal

* params added in

* fix up

* checkpoint changes

* clean up

* add new stuff

* add new stuff

* fix rpc context

* fix context

* trigger gh hook

* trigger gh hook

* fix all tests

* fix perms

* support fast ssz everywhere

* fix tests

* fix

* map checks

* add initializer

* terence's review

* add metadata test

* add tests

* fix imports

* fix build

* remove junk
2021-06-04 11:30:53 +08:00
terence tsao
38e28af51e Gazelle 2021-06-03 19:34:27 -07:00
terence tsao
6dbe6cfd8c Add caches for sync committee signature and contribution (#8982) 2021-06-03 17:16:00 -07:00
rauljordan
c156c1fb91 rewrite imports and use builtin ethereumapis 2021-06-03 14:50:27 -05:00
rauljordan
393a744091 merge in develop 2021-06-03 14:33:43 -05:00
terence tsao
f83993b211 Altair: Utilize block interface for clean up (#8969) 2021-06-02 09:31:18 -07:00
nisdas
41433f8b2e Merge branch 'develop' of https://github.com/prysmaticlabs/geth-sharding into hf1 2021-06-02 14:02:51 +08:00
terence tsao
95f62de465 Add sync committee test (#8967) 2021-06-01 12:32:12 -07:00
terence tsao
fbb140eff7 Gazelle 2021-06-01 11:22:25 -07:00
terence tsao
22483a285a Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-06-01 11:10:21 -07:00
terence tsao
1d835d9859 Altair: Add Altair block wrapper (#8949)
* Add altair block wrapper

* Update comment

* fix test

Co-authored-by: nisdas <nishdas93@gmail.com>
2021-05-28 17:25:51 +08:00
terence tsao
98f8ab331a Altair: More spec tests (#8941) 2021-05-26 14:10:01 -07:00
terence tsao
0e88418b12 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-05-26 11:34:31 -07:00
terence tsao
ef3ff6f1d5 Sync with develop 2021-05-26 10:44:10 -07:00
terence tsao
7c22496c65 Fix sha256 2021-05-24 11:00:17 -07:00
terence tsao
c5256d09e0 Altair: update spec tests to alpha.6 (#8931) 2021-05-24 10:46:20 -07:00
terence tsao
d3d1eb833e Add fork spec tests (#8917) 2021-05-21 14:35:22 -05:00
terence tsao
2d9fd4ea29 Update to alpha.5 (#8915) 2021-05-20 13:32:51 -07:00
terence tsao
be168e4034 Altair: Add upgrade logic and tests (#8912) 2021-05-20 12:22:33 -07:00
terence tsao
f7c2b9c197 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-05-20 11:00:10 -07:00
Nishant Das
89b7cf9be3 Add In Altair Fork Data (#8903)
* add in fork version

* fix

* add in for minimal

* params added in

* fix up

* radek's review
2021-05-20 16:56:19 +08:00
Preston Van Loon
3591f85a66 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-05-19 12:34:58 -05:00
terence tsao
6ba5ad0325 Altair: align to alpha.4 release (#8900) 2021-05-18 11:14:56 -07:00
Preston Van Loon
76b16a8989 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-05-18 12:41:43 -05:00
terence tsao
74a19741b4 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-05-18 09:16:24 -07:00
nisdas
fdb68c482e Merge branch 'develop' of https://github.com/prysmaticlabs/geth-sharding into hf1 2021-05-18 13:23:49 +08:00
terence tsao
b51729bd2f Merge branch 'hf1' of github.com:prysmaticlabs/prysm into hf1 2021-05-17 14:08:26 -07:00
terence tsao
aef1269223 Sync with develop, remove gogo 2021-05-17 14:08:18 -07:00
terence tsao
9fc1683ec7 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-05-17 12:01:34 -07:00
terence tsao
3790c5edb2 Altair: Add validator helpers for sync committee (#8891)
* Add validator helpers for sync committee

* Fix `TestAssignedToSyncCommittee`

* Update beacon-chain/core/altair/sync_committee.go

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>

* Better error message

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2021-05-17 13:53:38 -05:00
terence tsao
c6c7f8234d Altair: sync committee cache (#8884)
* Starting sync committee cache

* Update index position in committee

* Better comments and tests

* Go fmt

* Manually update BUILD.bazel

* Missing a comma with gazelle

* Unhandle error

* Add cache disabled for fuzzing

* Better place for to lock
2021-05-17 18:23:52 +00:00
terence tsao
c66ea88da8 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-05-17 10:33:04 -07:00
terence tsao
9a8facd76b Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-05-13 10:54:26 -07:00
terence tsao
ca7e0e4807 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-05-12 16:10:44 -07:00
terence tsao
6acedb7dfd Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-05-10 09:41:15 -07:00
terence tsao
a3183bc33e Add SyncCommitteeSigningData Protobuf (#8870) 2021-05-07 17:35:39 +00:00
terence tsao
f6caf627e1 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-05-06 22:25:41 -07:00
Victor Farazdagi
fa696a883d Altair: "sanity" spec tests (#8831)
* Altair: "sanity" spec tests

* Apply suggestions from code review

Co-authored-by: terence tsao <terence@prysmaticlabs.com>

* fix imports

* update cache disable

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2021-05-03 22:19:22 +03:00
terence tsao
cbbf188637 Fix an epoch offset for sync committee rotation (#8857)
* Fix an epoch offset for sync committee rotation

* Regression condition
2021-05-03 21:53:59 +03:00
terence tsao
4d3e65bdcd Altair: Fix field roots computations (#8856)
* Fix sync committee and participation roots

* Skip HTR test because generic fssz impelemntation is wrong

* Gazelle

* Update beacon-chain/state/state-altair/field_root_sync_committee.go

Co-authored-by: Victor Farazdagi <simple.square@gmail.com>
2021-05-03 11:23:01 +03:00
terence tsao
0be2bde4cc Altair: Add Eth2FastAggregateVerify (#8836)
* Add

* Add test

* Add stub

* Add stub
2021-04-30 08:35:51 +02:00
terence tsao
d5662556bc Use process inactivity updates (#8841) 2021-04-30 08:28:46 +02:00
terence tsao
26a10ca56e Merge branch 'hf1' of github.com:prysmaticlabs/prysm into hf1 2021-04-28 07:20:52 -07:00
terence tsao
bda70352ca Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-04-28 07:20:33 -07:00
Victor Farazdagi
fbd45dbf50 Altair: "epoch_processing" spec tests (#8806)
* effective_balance_updates tests

* eth1_data_reset tests

* historical_roots_update tests

* justification_and_finalization tests

* randao_mixes_reset tests

* registry -> registry_updates

* registry_updates tests

* rewards_and_penalties tests

* slashings and slashings_reset tests
2021-04-28 17:15:21 +03:00
Victor Farazdagi
119ef0f8fa Merge branch 'develop' into hf1 2021-04-27 22:20:04 +03:00
Victor Farazdagi
04f38324ba Altair: "rewards" spec tests (#8818)
* Altair: "rewards" spec tests

* better listing of tests

* update RunPrecomputeRewardsAndPenaltiesTests

* remove redundant code
2021-04-27 22:19:25 +03:00
terence tsao
fa27b6e24c Apply inactivity when validator is slashed (#8820)
Co-authored-by: Victor Farazdagi <simple.square@gmail.com>
2021-04-27 09:22:19 +03:00
terence tsao
fe647e99fc Move ProcessInactivityScores outside of ProcessRewards (#8813)
* Move ProcessInactivityScores out of rewards

* Fix tests
2021-04-27 07:57:25 +03:00
terence tsao
bbcaa7eaf2 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-04-25 08:44:34 -07:00
terence tsao
1c1b2eb811 Fix precision (#8808) 2021-04-23 13:08:28 -07:00
Victor Farazdagi
427e792073 Altair: "operations" spec tests (#8795)
* altair/operations/attestation test

* altair/operations/proposer_slashing tests

* altair/operations/block_header tests

* altair/operations/deposit tests

* altair/operations/proposer_slashing tests

* altair/operations/voluntary_exit tests

* ignoring minimal/altair for time being
2021-04-23 17:40:15 +03:00
Victor Farazdagi
463481febe Merge branch 'develop' into hf1 2021-04-23 15:14:15 +03:00
Victor Farazdagi
6e41923388 Merge branch 'develop' into hf1 2021-04-22 20:20:16 +03:00
Victor Farazdagi
17798f878a Merge branch 'develop' into hf1 2021-04-22 18:21:27 +03:00
terence tsao
d502f0825a Altair: ExecuteStateTransitionNoVerifyAnySig for state transition (#8796) 2021-04-21 14:29:00 -07:00
terence tsao
96fe2b76bf Altair: base reward implementation (#8797)
* Add altair basereward

* Test
2021-04-21 23:05:21 +03:00
terence tsao
a51a4ca9eb Merge branch 'hf1' of github.com:prysmaticlabs/prysm into hf1 2021-04-21 09:34:36 -07:00
terence tsao
9dd8a1737c Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-04-21 07:50:14 -07:00
Victor Farazdagi
c97f74ccef Merge branch 'develop' into hf1 2021-04-21 14:24:26 +03:00
terence tsao
806a923974 Altair: Add ProcessBlockNoVerifyAnySig (#8784) 2021-04-19 12:02:54 -07:00
terence tsao
4b4c2b97b7 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-04-16 14:49:33 -07:00
terence tsao
9d22ea840e Altair: Implement ProcessSlots and tests (#8774) 2021-04-16 09:09:22 -07:00
terence tsao
8a507d749a Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-04-15 14:19:24 -07:00
terence tsao
2850581611 Altair: Implement process epoch (#8772) 2021-04-15 09:06:39 -07:00
terence tsao
59bc0c679c Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-04-14 09:38:46 -07:00
terence tsao
969dec8ad2 Altair: Add SetInactivityScores and test (#8762)
* Starting epoch precompute Altair

* Add ProcessRewardsAndPenaltiesPrecompute and tests

* Apply suggestions from code review

* remove redundant err checking

* combine params

* gazelle

* Update beacon-chain/core/altair/epoch_precompute.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update beacon-chain/core/altair/epoch_precompute.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Named returns

* Combine function parameters

* Fix ineffectual asssignment

* Add process_inactivity_updates and test

* Fix ordering

Co-authored-by: Victor Farazdagi <simple.square@gmail.com>
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2021-04-13 13:19:33 -05:00
terence tsao
91fb8eea8c Altair: add modified process rewards and penalties (#8753) 2021-04-13 10:37:32 -07:00
terence tsao
e7ebdb11be Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-04-13 09:55:28 -07:00
terence tsao
ff3bb0aa8a Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-04-12 12:31:07 -07:00
terence tsao
5945849cb4 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-04-11 14:35:25 -07:00
terence tsao
3435a61413 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-04-09 17:00:24 -07:00
terence tsao
a3b69600ef Altair: Slashing for block and epoch (#8732) 2021-04-09 16:41:08 -07:00
terence tsao
01841434ec Add ProcessParticipationFlagUpdates (#8731) 2021-04-09 13:44:23 -05:00
terence tsao
f60edb055c Altair: process attestation for block (#8729) 2021-04-08 20:53:16 -07:00
terence tsao
ee3d106a36 Altair: Process sync committee updates for epoch transition (#8728) 2021-04-08 12:53:07 -07:00
terence tsao
9b41a069eb Altair: Process sync committee (#8721) 2021-04-08 06:37:02 -07:00
terence tsao
dc1d5b778b Altair: Sync committee helpers (#8717)
* Add sync commitee helpers

* Remove extra i++
2021-04-07 16:44:24 +03:00
terence tsao
224b92781f Add Altair state trie black box tests (#8716) 2021-04-06 17:22:08 -07:00
terence tsao
6f54a9d057 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-04-06 09:49:59 -07:00
terence tsao
7906e571a8 Altair: genesis state (#8713) 2021-04-06 07:54:09 -07:00
terence tsao
458817d5ad Altair: Process deposits (#8712) 2021-04-05 13:57:20 -07:00
terence tsao
06290c6805 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-04-05 09:57:05 -07:00
terence tsao
1adf1f1bef Merge commit '9aa2dd1ae65a1c31bdc61413282871d7df8b1e8f' into hf1 2021-04-01 11:38:18 -07:00
terence tsao
af57cf5e96 Merge commit 'fe4a852e786f01eed02293f4578cd7a4f5e8bc4d' into hf1 2021-04-01 11:38:06 -07:00
terence tsao
d59ba818f0 Merge commit '3d405910e7d16c1af315097a74f0b8682a9a1eeb' into hf1 2021-04-01 11:33:30 -07:00
terence tsao
9aa2dd1ae6 Part of 3 of beacon state Altair pkg - copy and hash tree root (#8690) 2021-03-31 09:39:02 -07:00
terence tsao
f3abe70838 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-03-30 16:46:22 -07:00
terence tsao
fe4a852e78 Part of 2 of beacon state Altair pkg - getters and setters (#8658) 2021-03-30 16:35:54 -07:00
terence tsao
6af0f619c9 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-03-29 06:55:46 -07:00
terence tsao
3d405910e7 Part of 1 of beacon state Altair pkg (#8670) 2021-03-25 10:49:39 -07:00
terence tsao
2779daee32 Sync with develop 2021-03-25 09:57:35 -07:00
terence tsao
a0ba4a8563 Sync with develop 2021-03-25 09:50:02 -07:00
terence tsao
926b3725a1 Part of 1 of beacon state Altair pkg (#8652)
* Part 1 of stateAltair pkg- trie and partial field root

* Gazelle

* Sync with develop

* Update visibility
2021-03-24 13:25:38 +00:00
terence tsao
5cc9f4df0b Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-03-23 15:03:05 -07:00
terence tsao
fd297999b8 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-03-23 14:01:16 -07:00
terence tsao
0d45eeac56 Add altair config (#8642)
* Add altair config

* Add domain config and fix test

* Remove old configs

* Use Altair
2021-03-22 20:50:30 -07:00
terence tsao
e2fcd25039 Rename v1 to altair. Add inactivity_scores field (#8641) 2021-03-22 20:50:19 -07:00
terence tsao
2436d84370 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-03-22 09:47:23 -07:00
terence tsao
5418d8c367 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-03-19 13:21:40 -07:00
terence tsao
55e5dee7ab Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-03-18 21:02:40 -07:00
terence tsao
6a06a4bf98 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-03-18 07:08:04 -07:00
terence tsao
a9d981dce1 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-03-17 07:57:24 -07:00
terence tsao
a69947ba51 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-03-16 11:22:08 -07:00
terence tsao
6a32b18ca9 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-03-15 07:47:52 -07:00
terence tsao
9ebf8651b4 Merge branch 'hf1' of github.com:prysmaticlabs/prysm into hf1 2021-03-15 07:47:45 -07:00
terence tsao
8467485aec Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-03-12 13:22:52 -08:00
terence tsao
fdb6cf9b57 HF1: Beacon state protobuf definition (#8581) 2021-03-11 15:51:33 -08:00
terence tsao
3da55ad7a4 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-03-11 06:17:53 -08:00
terence tsao
773d561361 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-03-09 10:13:12 -08:00
terence tsao
7f6d3ccb36 HF1: Add penalty config values (#8522)
* Add new configs

* More tests

* Fix message name
2021-02-25 14:59:47 -06:00
386 changed files with 20225 additions and 2641 deletions

0
.bazelignore Normal file
View File

View File

@@ -197,6 +197,8 @@ filegroup(
url = "https://github.com/eth2-clients/slashing-protection-interchange-tests/archive/b8413ca42dc92308019d0d4db52c87e9e125c4e9.tar.gz", url = "https://github.com/eth2-clients/slashing-protection-interchange-tests/archive/b8413ca42dc92308019d0d4db52c87e9e125c4e9.tar.gz",
) )
eth2_spec_version = "v1.1.0-beta.1"
http_archive( http_archive(
name = "eth2_spec_tests_general", name = "eth2_spec_tests_general",
build_file_content = """ build_file_content = """
@@ -209,8 +211,8 @@ filegroup(
visibility = ["//visibility:public"], visibility = ["//visibility:public"],
) )
""", """,
sha256 = "deacc076365c727d653ac064894ecf0d1b0a675d86704dc8de271259f6a7314b", sha256 = "e9b4cc60a3e676c6b4a9348424e44cff1ebada603ffb31b0df600dbd70e7fbf6",
url = "https://github.com/ethereum/eth2.0-spec-tests/releases/download/v1.1.0-alpha.3/general.tar.gz", url = "https://github.com/ethereum/eth2.0-spec-tests/releases/download/%s/general.tar.gz" % eth2_spec_version,
) )
http_archive( http_archive(
@@ -225,8 +227,8 @@ filegroup(
visibility = ["//visibility:public"], visibility = ["//visibility:public"],
) )
""", """,
sha256 = "6e9886af3d2f024e563249d70388129e28e3e92f742f289238ed9b7ec7a7f930", sha256 = "cf82dc729ffe7b924f852e57d1973e1a6377c5b52acc903c953277fa9b4e6de8",
url = "https://github.com/ethereum/eth2.0-spec-tests/releases/download/v1.1.0-alpha.3/minimal.tar.gz", url = "https://github.com/ethereum/eth2.0-spec-tests/releases/download/%s/minimal.tar.gz" % eth2_spec_version,
) )
http_archive( http_archive(
@@ -241,8 +243,24 @@ filegroup(
visibility = ["//visibility:public"], visibility = ["//visibility:public"],
) )
""", """,
sha256 = "a7b3d0ffc02a567250f424d69b2474fdc9477cd56eada60af7474560b46a8527", sha256 = "6c6792375b81858037014e282d28a64b0cf12e12daf16054265c85403b8b329f",
url = "https://github.com/ethereum/eth2.0-spec-tests/releases/download/v1.1.0-alpha.3/mainnet.tar.gz", url = "https://github.com/ethereum/eth2.0-spec-tests/releases/download/%s/mainnet.tar.gz" % eth2_spec_version,
)
http_archive(
name = "eth2_spec",
build_file_content = """
filegroup(
name = "spec_data",
srcs = glob([
"**/*.yaml",
]),
visibility = ["//visibility:public"],
)
""",
sha256 = "16094dad1bab4e8ab3adb60c10e311cd1e294cd7bbf5a89505f24bebd3d0e513",
strip_prefix = "eth2.0-specs-" + eth2_spec_version[1:],
url = "https://github.com/ethereum/eth2.0-specs/archive/refs/tags/%s.tar.gz" % eth2_spec_version,
) )
http_archive( http_archive(

View File

@@ -5,6 +5,7 @@ go_library(
srcs = [ srcs = [
"chain_info.go", "chain_info.go",
"head.go", "head.go",
"head_sync_committee_info.go",
"info.go", "info.go",
"init_sync_process_block.go", "init_sync_process_block.go",
"log.go", "log.go",
@@ -26,6 +27,7 @@ go_library(
deps = [ deps = [
"//beacon-chain/cache:go_default_library", "//beacon-chain/cache:go_default_library",
"//beacon-chain/cache/depositcache:go_default_library", "//beacon-chain/cache/depositcache:go_default_library",
"//beacon-chain/core/altair:go_default_library",
"//beacon-chain/core/epoch/precompute:go_default_library", "//beacon-chain/core/epoch/precompute:go_default_library",
"//beacon-chain/core/feed:go_default_library", "//beacon-chain/core/feed:go_default_library",
"//beacon-chain/core/feed/state:go_default_library", "//beacon-chain/core/feed/state:go_default_library",
@@ -42,6 +44,7 @@ go_library(
"//beacon-chain/powchain:go_default_library", "//beacon-chain/powchain:go_default_library",
"//beacon-chain/state:go_default_library", "//beacon-chain/state:go_default_library",
"//beacon-chain/state/stategen:go_default_library", "//beacon-chain/state/stategen:go_default_library",
"//beacon-chain/state/v2:go_default_library",
"//cmd/beacon-chain/flags:go_default_library", "//cmd/beacon-chain/flags:go_default_library",
"//proto/eth/v1:go_default_library", "//proto/eth/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library", "//proto/prysm/v1alpha1:go_default_library",
@@ -56,7 +59,9 @@ go_library(
"//shared/slotutil:go_default_library", "//shared/slotutil:go_default_library",
"//shared/timeutils:go_default_library", "//shared/timeutils:go_default_library",
"//shared/traceutil:go_default_library", "//shared/traceutil:go_default_library",
"//shared/version:go_default_library",
"@com_github_emicklei_dot//:go_default_library", "@com_github_emicklei_dot//:go_default_library",
"@com_github_hashicorp_golang_lru//:go_default_library",
"@com_github_pkg_errors//:go_default_library", "@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library", "@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library", "@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",

View File

@@ -24,6 +24,8 @@ type ChainInfoFetcher interface {
GenesisFetcher GenesisFetcher
CanonicalFetcher CanonicalFetcher
ForkFetcher ForkFetcher
TimeFetcher
HeadDomainFetcher
} }
// TimeFetcher retrieves the Ethereum consensus data that's related to time. // TimeFetcher retrieves the Ethereum consensus data that's related to time.
@@ -48,8 +50,12 @@ type HeadFetcher interface {
HeadSeed(ctx context.Context, epoch types.Epoch) ([32]byte, error) HeadSeed(ctx context.Context, epoch types.Epoch) ([32]byte, error)
HeadGenesisValidatorRoot() [32]byte HeadGenesisValidatorRoot() [32]byte
HeadETH1Data() *ethpb.Eth1Data HeadETH1Data() *ethpb.Eth1Data
HeadPublicKeyToValidatorIndex(ctx context.Context, pubKey [48]byte) (types.ValidatorIndex, bool)
HeadValidatorIndexToPublicKey(ctx context.Context, index types.ValidatorIndex) ([48]byte, error)
ProtoArrayStore() *protoarray.Store ProtoArrayStore() *protoarray.Store
ChainHeads() ([][32]byte, []types.Slot) ChainHeads() ([][32]byte, []types.Slot)
HeadSyncCommitteeFetcher
HeadDomainFetcher
} }
// ForkFetcher retrieves the current fork information of the Ethereum beacon chain. // ForkFetcher retrieves the current fork information of the Ethereum beacon chain.

View File

@@ -0,0 +1,188 @@
package blockchain
import (
"context"
"sync"
lru "github.com/hashicorp/golang-lru"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
core "github.com/prysmaticlabs/prysm/beacon-chain/core/state"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
stateAltair "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/params"
)
// HeadSyncCommitteeFetcher is the interface that wraps the head sync committee related functions.
// The head sync committee functions return callers sync committee indices and public keys with respect to current head state.
type HeadSyncCommitteeFetcher interface {
HeadCurrentSyncCommitteeIndices(ctx context.Context, index types.ValidatorIndex, slot types.Slot) ([]types.CommitteeIndex, error)
HeadNextSyncCommitteeIndices(ctx context.Context, index types.ValidatorIndex, slot types.Slot) ([]types.CommitteeIndex, error)
HeadSyncCommitteePubKeys(ctx context.Context, slot types.Slot, committeeIndex types.CommitteeIndex) ([][]byte, error)
}
// HeadDomainFetcher is the interface that wraps the head sync domain related functions.
// The head sync committee domain functions return callers domain data with respect to slot and head state.
type HeadDomainFetcher interface {
HeadSyncCommitteeDomain(ctx context.Context, slot types.Slot) ([]byte, error)
HeadSyncSelectionProofDomain(ctx context.Context, slot types.Slot) ([]byte, error)
HeadSyncContributionProofDomain(ctx context.Context, slot types.Slot) ([]byte, error)
}
// HeadSyncCommitteeDomain returns the head sync committee domain using current head state advanced up to `slot`.
func (s *Service) HeadSyncCommitteeDomain(ctx context.Context, slot types.Slot) ([]byte, error) {
s.headLock.RLock()
defer s.headLock.RUnlock()
return s.domainWithHeadState(ctx, slot, params.BeaconConfig().DomainSyncCommittee)
}
// HeadSyncSelectionProofDomain returns the head sync committee domain using current head state advanced up to `slot`.
func (s *Service) HeadSyncSelectionProofDomain(ctx context.Context, slot types.Slot) ([]byte, error) {
s.headLock.RLock()
defer s.headLock.RUnlock()
return s.domainWithHeadState(ctx, slot, params.BeaconConfig().DomainSyncCommitteeSelectionProof)
}
// HeadSyncContributionProofDomain returns the head sync committee domain using current head state advanced up to `slot`.
func (s *Service) HeadSyncContributionProofDomain(ctx context.Context, slot types.Slot) ([]byte, error) {
s.headLock.RLock()
defer s.headLock.RUnlock()
return s.domainWithHeadState(ctx, slot, params.BeaconConfig().DomainContributionAndProof)
}
// HeadCurrentSyncCommitteeIndices returns the input validator `index`'s position indices in the current sync committee with respect to `slot`.
// Head state advanced up to `slot` is used for calculation.
func (s *Service) HeadCurrentSyncCommitteeIndices(ctx context.Context, index types.ValidatorIndex, slot types.Slot) ([]types.CommitteeIndex, error) {
s.headLock.RLock()
defer s.headLock.RUnlock()
headState, err := s.getSyncCommitteeHeadState(ctx, slot)
if err != nil {
return nil, err
}
return helpers.CurrentPeriodSyncSubcommitteeIndices(headState, index)
}
// HeadNextSyncCommitteeIndices returns the input validator `index`'s position indices in the next sync committee with respect to `slot`.
// Head state advanced up to `slot` is used for calculation.
func (s *Service) HeadNextSyncCommitteeIndices(ctx context.Context, index types.ValidatorIndex, slot types.Slot) ([]types.CommitteeIndex, error) {
s.headLock.RLock()
defer s.headLock.RUnlock()
headState, err := s.getSyncCommitteeHeadState(ctx, slot)
if err != nil {
return nil, err
}
return helpers.NextPeriodSyncSubcommitteeIndices(headState, index)
}
// HeadSyncCommitteePubKeys returns the head sync committee public keys with respect to `slot` and subcommittee index `committeeIndex`.
// Head state advanced up to `slot` is used for calculation.
func (s *Service) HeadSyncCommitteePubKeys(ctx context.Context, slot types.Slot, committeeIndex types.CommitteeIndex) ([][]byte, error) {
s.headLock.RLock()
defer s.headLock.RUnlock()
headState, err := s.getSyncCommitteeHeadState(ctx, slot)
if err != nil {
return nil, err
}
nextSlotEpoch := helpers.SlotToEpoch(headState.Slot() + 1)
currEpoch := helpers.SlotToEpoch(headState.Slot())
var syncCommittee *ethpb.SyncCommittee
if helpers.SyncCommitteePeriod(currEpoch) == helpers.SyncCommitteePeriod(nextSlotEpoch) {
syncCommittee, err = headState.CurrentSyncCommittee()
if err != nil {
return nil, err
}
} else {
syncCommittee, err = headState.NextSyncCommittee()
if err != nil {
return nil, err
}
}
return altair.SyncSubCommitteePubkeys(syncCommittee, committeeIndex)
}
// returns calculated domain using input `domain` and `slot`.
func (s *Service) domainWithHeadState(ctx context.Context, slot types.Slot, domain [4]byte) ([]byte, error) {
headState, err := s.getSyncCommitteeHeadState(ctx, slot)
if err != nil {
return nil, err
}
return helpers.Domain(headState.Fork(), helpers.SlotToEpoch(headState.Slot()), domain, headState.GenesisValidatorRoot())
}
// returns the head state that is advanced up to `slot`. It utilizes the cache `syncCommitteeHeadState` by retrieving using `slot` as key.
// For the cache miss, it processes head state up to slot and fill the cache with `slot` as key.
func (s *Service) getSyncCommitteeHeadState(ctx context.Context, slot types.Slot) (state.BeaconState, error) {
var headState state.BeaconState
var err error
// If there's already a head state exists with the request slot, we don't need to process slots.
cachedState := syncCommitteeHeadStateCache.get(slot)
if cachedState != nil && !cachedState.IsNil() {
syncHeadStateHit.Inc()
headState = cachedState
} else {
headState, err = s.HeadState(ctx)
if err != nil {
return nil, err
}
if slot > headState.Slot() {
headState, err = core.ProcessSlots(ctx, headState, slot)
if err != nil {
return nil, err
}
}
syncHeadStateMiss.Inc()
syncCommitteeHeadStateCache.add(slot, headState)
}
return headState, nil
}
var syncCommitteeHeadStateCache = newSyncCommitteeHeadState()
// syncCommitteeHeadState to caches latest head state requested by the sync committee participant.
type syncCommitteeHeadState struct {
cache *lru.Cache
lock sync.RWMutex
}
// newSyncCommitteeHeadState initializes the lru cache for `syncCommitteeHeadState` with size of 1.
func newSyncCommitteeHeadState() *syncCommitteeHeadState {
c, err := lru.New(1) // only need size of 1 to avoid redundant state copy, HTR, and process slots.
if err != nil {
panic(err)
}
return &syncCommitteeHeadState{cache: c}
}
// add `slot` as key and `state` as value onto the lru cache.
func (c *syncCommitteeHeadState) add(slot types.Slot, state state.BeaconState) {
c.lock.Lock()
defer c.lock.Unlock()
c.cache.Add(slot, state)
}
// get `state` using `slot` as key. Return nil if nothing is found.
func (c *syncCommitteeHeadState) get(slot types.Slot) state.BeaconState {
c.lock.RLock()
defer c.lock.RUnlock()
val, exists := c.cache.Get(slot)
if !exists {
return nil
}
if val == nil {
return nil
}
return val.(*stateAltair.BeaconState)
}

View File

@@ -0,0 +1,108 @@
package blockchain
import (
"context"
"testing"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
)
func TestService_HeadCurrentSyncCommitteeIndices(t *testing.T) {
s, _ := testutil.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
c := &Service{}
c.head = &head{state: s}
// Process slot up to `EpochsPerSyncCommitteePeriod` so it can `ProcessSyncCommitteeUpdates`.
slot := uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) + 1
indices, err := c.HeadCurrentSyncCommitteeIndices(context.Background(), 0, types.Slot(slot))
require.NoError(t, err)
// NextSyncCommittee becomes CurrentSyncCommittee so it should be empty by default.
require.Equal(t, 0, len(indices))
}
func TestService_HeadNextSyncCommitteeIndices(t *testing.T) {
s, _ := testutil.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
c := &Service{}
c.head = &head{state: s}
// Process slot up to `EpochsPerSyncCommitteePeriod` so it can `ProcessSyncCommitteeUpdates`.
slot := uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) + 1
indices, err := c.HeadNextSyncCommitteeIndices(context.Background(), 0, types.Slot(slot))
require.NoError(t, err)
// NextSyncCommittee should be be empty after `ProcessSyncCommitteeUpdates`. Validator should get indices.
require.NotEqual(t, 0, len(indices))
}
func TestService_HeadSyncCommitteePubKeys(t *testing.T) {
s, _ := testutil.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
c := &Service{}
c.head = &head{state: s}
// Process slot up to 2 * `EpochsPerSyncCommitteePeriod` so it can run `ProcessSyncCommitteeUpdates` twice.
slot := uint64(2*params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) + 1
pubkeys, err := c.HeadSyncCommitteePubKeys(context.Background(), types.Slot(slot), 0)
require.NoError(t, err)
// Any subcommittee should match the subcommittee size.
subCommitteeSize := params.BeaconConfig().SyncCommitteeSize / params.BeaconConfig().SyncCommitteeSubnetCount
require.Equal(t, int(subCommitteeSize), len(pubkeys))
}
func TestService_HeadSyncCommitteeDomain(t *testing.T) {
s, _ := testutil.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
c := &Service{}
c.head = &head{state: s}
wanted, err := helpers.Domain(s.Fork(), helpers.SlotToEpoch(s.Slot()), params.BeaconConfig().DomainSyncCommittee, s.GenesisValidatorRoot())
require.NoError(t, err)
d, err := c.HeadSyncCommitteeDomain(context.Background(), 0)
require.NoError(t, err)
require.DeepEqual(t, wanted, d)
}
func TestService_HeadSyncContributionProofDomain(t *testing.T) {
s, _ := testutil.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
c := &Service{}
c.head = &head{state: s}
wanted, err := helpers.Domain(s.Fork(), helpers.SlotToEpoch(s.Slot()), params.BeaconConfig().DomainContributionAndProof, s.GenesisValidatorRoot())
require.NoError(t, err)
d, err := c.HeadSyncContributionProofDomain(context.Background(), 0)
require.NoError(t, err)
require.DeepEqual(t, wanted, d)
}
func TestService_HeadSyncSelectionProofDomain(t *testing.T) {
s, _ := testutil.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
c := &Service{}
c.head = &head{state: s}
wanted, err := helpers.Domain(s.Fork(), helpers.SlotToEpoch(s.Slot()), params.BeaconConfig().DomainSyncCommitteeSelectionProof, s.GenesisValidatorRoot())
require.NoError(t, err)
d, err := c.HeadSyncSelectionProofDomain(context.Background(), 0)
require.NoError(t, err)
require.DeepEqual(t, wanted, d)
}
func TestSyncCommitteeHeadStateCache_RoundTrip(t *testing.T) {
c := newSyncCommitteeHeadState()
beaconState, _ := testutil.DeterministicGenesisStateAltair(t, 100)
require.NoError(t, beaconState.SetSlot(100))
cachedState := c.get(101)
require.Equal(t, nil, cachedState)
c.add(101, beaconState)
cachedState = c.get(101)
require.DeepEqual(t, beaconState, cachedState)
}

View File

@@ -10,6 +10,7 @@ import (
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block" "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/prysmaticlabs/prysm/shared/params" "github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/timeutils" "github.com/prysmaticlabs/prysm/shared/timeutils"
"github.com/prysmaticlabs/prysm/shared/version"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
) )
@@ -33,6 +34,12 @@ func logStateTransitionData(b block.BeaconBlock) {
if len(b.Body().VoluntaryExits()) > 0 { if len(b.Body().VoluntaryExits()) > 0 {
log = log.WithField("voluntaryExits", len(b.Body().VoluntaryExits())) log = log.WithField("voluntaryExits", len(b.Body().VoluntaryExits()))
} }
if b.Version() == version.Altair {
agg, err := b.Body().SyncAggregate()
if err == nil {
log = log.WithField("syncBitsCount", agg.SyncCommitteeBits.Count())
}
}
log.Info("Finished applying state transition") log.Info("Finished applying state transition")
} }

View File

@@ -3,15 +3,18 @@ package blockchain
import ( import (
"context" "context"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/client_golang/prometheus/promauto"
types "github.com/prysmaticlabs/eth2-types" types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute" "github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
"github.com/prysmaticlabs/prysm/beacon-chain/state" "github.com/prysmaticlabs/prysm/beacon-chain/state"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block" "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/prysmaticlabs/prysm/shared/bytesutil" "github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params" "github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/version"
) )
var ( var (
@@ -107,6 +110,14 @@ var (
Buckets: []float64{1, 2, 3, 4, 6, 32, 64}, Buckets: []float64{1, 2, 3, 4, 6, 32, 64},
}, },
) )
syncHeadStateMiss = promauto.NewCounter(prometheus.CounterOpts{
Name: "sync_head_state_miss",
Help: "The number of sync head state requests that are present in the cache.",
})
syncHeadStateHit = promauto.NewCounter(prometheus.CounterOpts{
Name: "sync_head_state_hit",
Help: "The number of sync head state requests that are not present in the cache.",
})
) )
// reportSlotMetrics reports slot related metrics. // reportSlotMetrics reports slot related metrics.
@@ -206,14 +217,31 @@ func reportEpochMetrics(ctx context.Context, postState, headState state.BeaconSt
beaconFinalizedRoot.Set(float64(bytesutil.ToLowInt64(postState.FinalizedCheckpoint().Root))) beaconFinalizedRoot.Set(float64(bytesutil.ToLowInt64(postState.FinalizedCheckpoint().Root)))
currentEth1DataDepositCount.Set(float64(postState.Eth1Data().DepositCount)) currentEth1DataDepositCount.Set(float64(postState.Eth1Data().DepositCount))
// Validator participation should be viewed on the canonical chain. var b *precompute.Balance
v, b, err := precompute.New(ctx, headState) var v []*precompute.Validator
if err != nil { var err error
return err switch headState.Version() {
} case version.Phase0:
_, b, err = precompute.ProcessAttestations(ctx, headState, v, b) // Validator participation should be viewed on the canonical chain.
if err != nil { v, b, err = precompute.New(ctx, headState)
return err if err != nil {
return err
}
_, b, err = precompute.ProcessAttestations(ctx, headState, v, b)
if err != nil {
return err
}
case version.Altair:
v, b, err = altair.InitializeEpochValidators(ctx, headState)
if err != nil {
return err
}
_, b, err = altair.ProcessEpochParticipation(ctx, headState, b, v)
if err != nil {
return err
}
default:
return errors.Errorf("invalid state type provided: %T", headState.InnerStateUnsafe())
} }
prevEpochActiveBalances.Set(float64(b.ActivePrevEpoch)) prevEpochActiveBalances.Set(float64(b.ActivePrevEpoch))
prevEpochSourceBalances.Set(float64(b.PrevEpochAttested)) prevEpochSourceBalances.Set(float64(b.PrevEpochAttested))

View File

@@ -62,6 +62,11 @@ func (mb *mockBroadcaster) BroadcastAttestation(_ context.Context, _ uint64, _ *
return nil return nil
} }
func (mb *mockBroadcaster) BroadcastSyncCommitteeMessage(_ context.Context, _ uint64, _ *ethpb.SyncCommitteeMessage) error {
mb.broadcastCalled = true
return nil
}
var _ p2p.Broadcaster = (*mockBroadcaster)(nil) var _ p2p.Broadcaster = (*mockBroadcaster)(nil)
func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service { func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {

View File

@@ -21,7 +21,6 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/state" "github.com/prysmaticlabs/prysm/beacon-chain/state"
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1" v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
statepb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block" "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/prysmaticlabs/prysm/shared/bytesutil" "github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/event" "github.com/prysmaticlabs/prysm/shared/event"
@@ -42,7 +41,7 @@ type ChainService struct {
Genesis time.Time Genesis time.Time
ValidatorsRoot [32]byte ValidatorsRoot [32]byte
CanonicalRoots map[[32]byte]bool CanonicalRoots map[[32]byte]bool
Fork *statepb.Fork Fork *ethpb.Fork
ETH1Data *ethpb.Eth1Data ETH1Data *ethpb.Eth1Data
DB db.Database DB db.Database
stateNotifier statefeed.Notifier stateNotifier statefeed.Notifier
@@ -52,6 +51,13 @@ type ChainService struct {
ForkChoiceStore *protoarray.Store ForkChoiceStore *protoarray.Store
VerifyBlkDescendantErr error VerifyBlkDescendantErr error
Slot *types.Slot // Pointer because 0 is a useful value, so checking against it can be incorrect. Slot *types.Slot // Pointer because 0 is a useful value, so checking against it can be incorrect.
CurrentSyncCommitteeIndices []types.CommitteeIndex
NextSyncCommitteeIndices []types.CommitteeIndex
SyncCommitteeDomain []byte
SyncSelectionProofDomain []byte
SyncContributionProofDomain []byte
PublicKey [48]byte
SyncCommitteePubkeys [][]byte
} }
// StateNotifier mocks the same method in the chain service. // StateNotifier mocks the same method in the chain service.
@@ -259,7 +265,7 @@ func (s *ChainService) HeadState(context.Context) (state.BeaconState, error) {
} }
// CurrentFork mocks HeadState method in chain service. // CurrentFork mocks HeadState method in chain service.
func (s *ChainService) CurrentFork() *statepb.Fork { func (s *ChainService) CurrentFork() *ethpb.Fork {
return s.Fork return s.Fork
} }
@@ -393,3 +399,43 @@ func (s *ChainService) ChainHeads() ([][32]byte, []types.Slot) {
}, },
[]types.Slot{0, 1} []types.Slot{0, 1}
} }
// HeadPublicKeyToValidatorIndex mocks HeadPublicKeyToValidatorIndex and always return 0 and true.
func (s *ChainService) HeadPublicKeyToValidatorIndex(ctx context.Context, pubKey [48]byte) (types.ValidatorIndex, bool) {
return 0, true
}
// HeadValidatorIndexToPublicKey mocks HeadValidatorIndexToPublicKey and always return empty and nil.
func (s *ChainService) HeadValidatorIndexToPublicKey(ctx context.Context, index types.ValidatorIndex) ([48]byte, error) {
return s.PublicKey, nil
}
// HeadCurrentSyncCommitteeIndices mocks HeadCurrentSyncCommitteeIndices and always return `CurrentSyncCommitteeIndices`.
func (s *ChainService) HeadCurrentSyncCommitteeIndices(ctx context.Context, index types.ValidatorIndex, slot types.Slot) ([]types.CommitteeIndex, error) {
return s.CurrentSyncCommitteeIndices, nil
}
// HeadNextSyncCommitteeIndices mocks HeadNextSyncCommitteeIndices and always return `HeadNextSyncCommitteeIndices`.
func (s *ChainService) HeadNextSyncCommitteeIndices(ctx context.Context, index types.ValidatorIndex, slot types.Slot) ([]types.CommitteeIndex, error) {
return s.NextSyncCommitteeIndices, nil
}
// HeadSyncCommitteePubKeys mocks HeadSyncCommitteePubKeys and always return empty nil.
func (s *ChainService) HeadSyncCommitteePubKeys(ctx context.Context, slot types.Slot, committeeIndex types.CommitteeIndex) ([][]byte, error) {
return s.SyncCommitteePubkeys, nil
}
// HeadSyncCommitteeDomain mocks HeadSyncCommitteeDomain and always return empty nil.
func (s *ChainService) HeadSyncCommitteeDomain(ctx context.Context, slot types.Slot) ([]byte, error) {
return s.SyncCommitteeDomain, nil
}
// HeadSyncSelectionProofDomain mocks HeadSyncSelectionProofDomain and always return empty nil.
func (s *ChainService) HeadSyncSelectionProofDomain(ctx context.Context, slot types.Slot) ([]byte, error) {
return s.SyncSelectionProofDomain, nil
}
// HeadSyncContributionProofDomain mocks HeadSyncContributionProofDomain and always return empty nil.
func (s *ChainService) HeadSyncContributionProofDomain(ctx context.Context, slot types.Slot) ([]byte, error) {
return s.SyncContributionProofDomain, nil
}

View File

@@ -10,17 +10,21 @@ go_library(
"committees.go", "committees.go",
"common.go", "common.go",
"doc.go", "doc.go",
"error.go",
"proposer_indices_type.go", "proposer_indices_type.go",
"skip_slot_cache.go", "skip_slot_cache.go",
"subnet_ids.go", "subnet_ids.go",
"sync_subnet_ids.go",
] + select({ ] + select({
"//fuzz:fuzzing_enabled": [ "//fuzz:fuzzing_enabled": [
"committee_disabled.go", "committee_disabled.go",
"proposer_indices_disabled.go", "proposer_indices_disabled.go",
"sync_committee_disabled.go",
], ],
"//conditions:default": [ "//conditions:default": [
"committee.go", "committee.go",
"proposer_indices.go", "proposer_indices.go",
"sync_committee.go",
], ],
}), }),
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/cache", importpath = "github.com/prysmaticlabs/prysm/beacon-chain/cache",
@@ -32,9 +36,11 @@ go_library(
deps = [ deps = [
"//beacon-chain/state:go_default_library", "//beacon-chain/state:go_default_library",
"//proto/prysm/v1alpha1:go_default_library", "//proto/prysm/v1alpha1:go_default_library",
"//shared/bytesutil:go_default_library",
"//shared/copyutil:go_default_library", "//shared/copyutil:go_default_library",
"//shared/hashutil:go_default_library", "//shared/hashutil:go_default_library",
"//shared/params:go_default_library", "//shared/params:go_default_library",
"//shared/rand:go_default_library",
"//shared/sliceutil:go_default_library", "//shared/sliceutil:go_default_library",
"@com_github_hashicorp_golang_lru//:go_default_library", "@com_github_hashicorp_golang_lru//:go_default_library",
"@com_github_patrickmn_go_cache//:go_default_library", "@com_github_patrickmn_go_cache//:go_default_library",
@@ -58,6 +64,8 @@ go_test(
"proposer_indices_test.go", "proposer_indices_test.go",
"skip_slot_cache_test.go", "skip_slot_cache_test.go",
"subnet_ids_test.go", "subnet_ids_test.go",
"sync_committee_test.go",
"sync_subnet_ids_test.go",
], ],
embed = [":go_default_library"], embed = [":go_default_library"],
deps = [ deps = [
@@ -66,6 +74,7 @@ go_test(
"//proto/prysm/v1alpha1:go_default_library", "//proto/prysm/v1alpha1:go_default_library",
"//shared/bytesutil:go_default_library", "//shared/bytesutil:go_default_library",
"//shared/params:go_default_library", "//shared/params:go_default_library",
"//shared/testutil:go_default_library",
"//shared/testutil/assert:go_default_library", "//shared/testutil/assert:go_default_library",
"//shared/testutil/require:go_default_library", "//shared/testutil/require:go_default_library",
"@com_github_google_gofuzz//:go_default_library", "@com_github_google_gofuzz//:go_default_library",

View File

@@ -127,6 +127,35 @@ func (c *CommitteeCache) ActiveIndices(seed [32]byte) ([]types.ValidatorIndex, e
return item.SortedIndices, nil return item.SortedIndices, nil
} }
// ActiveBalance returns the total active balance of a given seed stored in cache.
func (c *CommitteeCache) ActiveBalance(seed [32]byte) (uint64, error) {
c.lock.RLock()
defer c.lock.RUnlock()
obj, exists := c.CommitteeCache.Get(key(seed))
if exists {
CommitteeCacheHit.Inc()
} else {
CommitteeCacheMiss.Inc()
return 0, ErrNonCommitteeKey
}
item, ok := obj.(*Committees)
if !ok {
return 0, ErrNotCommittee
}
if item == nil {
return 0, errors.New("item is nil")
}
// Return `ErrNonCommitteeKey` if active balance field doesnt exist in item.
if !item.ActiveBalance.Exist {
return 0, ErrNonCommitteeKey
}
return item.ActiveBalance.Total, nil
}
// ActiveIndicesCount returns the active indices count of a given seed stored in cache. // ActiveIndicesCount returns the active indices count of a given seed stored in cache.
func (c *CommitteeCache) ActiveIndicesCount(seed [32]byte) (int, error) { func (c *CommitteeCache) ActiveIndicesCount(seed [32]byte) (int, error) {
c.lock.RLock() c.lock.RLock()

View File

@@ -41,6 +41,11 @@ func (c *FakeCommitteeCache) ActiveIndicesCount(seed [32]byte) (int, error) {
return 0, nil return 0, nil
} }
// ActiveBalance returns the active balance of a given seed stored in cache.
func (c *FakeCommitteeCache) ActiveBalance(seed [32]byte) (uint64, error) {
return 0, nil
}
// ProposerIndices returns the proposer indices of a given seed. // ProposerIndices returns the proposer indices of a given seed.
func (c *FakeCommitteeCache) ProposerIndices(seed [32]byte) ([]types.ValidatorIndex, error) { func (c *FakeCommitteeCache) ProposerIndices(seed [32]byte) ([]types.ValidatorIndex, error) {
return nil, nil return nil, nil

View File

@@ -88,6 +88,24 @@ func TestCommitteeCache_ActiveCount(t *testing.T) {
assert.Equal(t, len(item.SortedIndices), count) assert.Equal(t, len(item.SortedIndices), count)
} }
func TestCommitteeCache_ActiveBalance(t *testing.T) {
cache := NewCommitteesCache()
balances := &Balance{
Exist: true,
Total: uint64(10000),
}
item := &Committees{Seed: [32]byte{'A'}, ActiveBalance: balances}
_, err := cache.ActiveBalance(item.Seed)
require.Equal(t, ErrNonCommitteeKey, err)
require.NoError(t, cache.AddCommitteeShuffledList(item))
got, err := cache.ActiveBalance(item.Seed)
require.NoError(t, err)
assert.Equal(t, balances.Total, got)
}
func TestCommitteeCache_CanRotate(t *testing.T) { func TestCommitteeCache_CanRotate(t *testing.T) {
cache := NewCommitteesCache() cache := NewCommitteesCache()

View File

@@ -10,10 +10,22 @@ import (
// a Committee struct. // a Committee struct.
var ErrNotCommittee = errors.New("object is not a committee struct") var ErrNotCommittee = errors.New("object is not a committee struct")
// ErrNonCommitteeKey will be returned when the committee key does not exist in cache.
var ErrNonCommitteeKey = errors.New("committee key does not exist")
// Committees defines the shuffled committees seed. // Committees defines the shuffled committees seed.
type Committees struct { type Committees struct {
CommitteeCount uint64 CommitteeCount uint64
Seed [32]byte Seed [32]byte
ShuffledIndices []types.ValidatorIndex ShuffledIndices []types.ValidatorIndex
SortedIndices []types.ValidatorIndex SortedIndices []types.ValidatorIndex
ActiveBalance *Balance
}
// Balance tracks active balance.
// Given default uint64 is 0, `Exist` is used to distinguish whether
// this field has been filed.
type Balance struct {
Exist bool
Total uint64
} }

9
beacon-chain/cache/error.go vendored Normal file
View File

@@ -0,0 +1,9 @@
package cache
import "errors"
// Sync committee cache related errors
// ErrNonExistingSyncCommitteeKey when sync committee key (root) does not exist in cache.
var ErrNonExistingSyncCommitteeKey = errors.New("does not exist sync committee key")
var errNotSyncCommitteeIndexPosition = errors.New("not syncCommitteeIndexPosition struct")

187
beacon-chain/cache/sync_committee.go vendored Normal file
View File

@@ -0,0 +1,187 @@
// +build !libfuzzer
package cache
import (
"sync"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"k8s.io/client-go/tools/cache"
)
var (
maxSyncCommitteeSize = uint64(3) // Allows 3 forks to happen around `EPOCHS_PER_SYNC_COMMITTEE_PERIOD` boundary.
// SyncCommitteeCacheMiss tracks the number of committee requests that aren't present in the cache.
SyncCommitteeCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
Name: "sync_committee_index_cache_miss",
Help: "The number of committee requests that aren't present in the sync committee index cache.",
})
// SyncCommitteeCacheHit tracks the number of committee requests that are in the cache.
SyncCommitteeCacheHit = promauto.NewCounter(prometheus.CounterOpts{
Name: "sync_committee_index_cache_hit",
Help: "The number of committee requests that are present in the sync committee index cache.",
})
)
// SyncCommitteeCache utilizes a FIFO cache to sufficiently cache validator position within sync committee.
// It is thread safe with concurrent read write.
type SyncCommitteeCache struct {
cache *cache.FIFO
lock sync.RWMutex
}
// Index position of all validators in sync committee where `currentSyncCommitteeRoot` is the
// key and `vIndexToPositionMap` is value. Inside `vIndexToPositionMap`, validator positions
// are cached where key is the validator index and the value is the `positionInCommittee` struct.
type syncCommitteeIndexPosition struct {
currentSyncCommitteeRoot [32]byte
vIndexToPositionMap map[types.ValidatorIndex]*positionInCommittee
}
// Index position of individual validator of current period and next period sync committee.
type positionInCommittee struct {
currentPeriod []types.CommitteeIndex
nextPeriod []types.CommitteeIndex
}
// NewSyncCommittee initializes and returns a new SyncCommitteeCache.
func NewSyncCommittee() *SyncCommitteeCache {
return &SyncCommitteeCache{
cache: cache.NewFIFO(keyFn),
}
}
// CurrentPeriodIndexPosition returns current period index position of a validator index with respect with
// sync committee. If the input validator index has no assignment, an empty list will be returned.
// If the input root does not exist in cache, ErrNonExistingSyncCommitteeKey is returned.
// Then performing manual checking of state for index position in state is recommended.
func (s *SyncCommitteeCache) CurrentPeriodIndexPosition(root [32]byte, valIdx types.ValidatorIndex) ([]types.CommitteeIndex, error) {
s.lock.RLock()
defer s.lock.RUnlock()
pos, err := s.idxPositionInCommittee(root, valIdx)
if err != nil {
return nil, err
}
if pos == nil {
return []types.CommitteeIndex{}, nil
}
return pos.currentPeriod, nil
}
// NextPeriodIndexPosition returns next period index position of a validator index in respect with sync committee.
// If the input validator index has no assignment, an empty list will be returned.
// If the input root does not exist in cache, ErrNonExistingSyncCommitteeKey is returned.
// Then performing manual checking of state for index position in state is recommended.
func (s *SyncCommitteeCache) NextPeriodIndexPosition(root [32]byte, valIdx types.ValidatorIndex) ([]types.CommitteeIndex, error) {
s.lock.RLock()
defer s.lock.RUnlock()
pos, err := s.idxPositionInCommittee(root, valIdx)
if err != nil {
return nil, err
}
if pos == nil {
return []types.CommitteeIndex{}, nil
}
return pos.nextPeriod, nil
}
// Helper function for `CurrentPeriodIndexPosition` and `NextPeriodIndexPosition` to return a mapping
// of validator index to its index(s) position in the sync committee.
func (s *SyncCommitteeCache) idxPositionInCommittee(
root [32]byte, valIdx types.ValidatorIndex,
) (*positionInCommittee, error) {
obj, exists, err := s.cache.GetByKey(key(root))
if err != nil {
return nil, err
}
if !exists {
SyncCommitteeCacheMiss.Inc()
return nil, ErrNonExistingSyncCommitteeKey
}
item, ok := obj.(*syncCommitteeIndexPosition)
if !ok {
return nil, errNotSyncCommitteeIndexPosition
}
idxInCommittee, ok := item.vIndexToPositionMap[valIdx]
if !ok {
SyncCommitteeCacheMiss.Inc()
return nil, nil
}
SyncCommitteeCacheHit.Inc()
return idxInCommittee, nil
}
// UpdatePositionsInCommittee updates caching of validators position in sync committee in respect to
// current epoch and next epoch. This should be called when `current_sync_committee` and `next_sync_committee`
// change and that happens every `EPOCHS_PER_SYNC_COMMITTEE_PERIOD`.
func (s *SyncCommitteeCache) UpdatePositionsInCommittee(syncCommitteeBoundaryRoot [32]byte, st state.BeaconStateAltair) error {
csc, err := st.CurrentSyncCommittee()
if err != nil {
return err
}
positionsMap := make(map[types.ValidatorIndex]*positionInCommittee)
for i, pubkey := range csc.Pubkeys {
p := bytesutil.ToBytes48(pubkey)
validatorIndex, ok := st.ValidatorIndexByPubkey(p)
if !ok {
continue
}
if _, ok := positionsMap[validatorIndex]; !ok {
m := &positionInCommittee{currentPeriod: []types.CommitteeIndex{types.CommitteeIndex(i)}, nextPeriod: []types.CommitteeIndex{}}
positionsMap[validatorIndex] = m
} else {
positionsMap[validatorIndex].currentPeriod = append(positionsMap[validatorIndex].currentPeriod, types.CommitteeIndex(i))
}
}
nsc, err := st.NextSyncCommittee()
if err != nil {
return err
}
for i, pubkey := range nsc.Pubkeys {
p := bytesutil.ToBytes48(pubkey)
validatorIndex, ok := st.ValidatorIndexByPubkey(p)
if !ok {
continue
}
if _, ok := positionsMap[validatorIndex]; !ok {
m := &positionInCommittee{nextPeriod: []types.CommitteeIndex{types.CommitteeIndex(i)}, currentPeriod: []types.CommitteeIndex{}}
positionsMap[validatorIndex] = m
} else {
positionsMap[validatorIndex].nextPeriod = append(positionsMap[validatorIndex].nextPeriod, types.CommitteeIndex(i))
}
}
s.lock.Lock()
defer s.lock.Unlock()
if err := s.cache.Add(&syncCommitteeIndexPosition{
currentSyncCommitteeRoot: syncCommitteeBoundaryRoot,
vIndexToPositionMap: positionsMap,
}); err != nil {
return err
}
trim(s.cache, maxSyncCommitteeSize)
return nil
}
// Given the `syncCommitteeIndexPosition` object, this returns the key of the object.
// The key is the `currentSyncCommitteeRoot` within the field.
// Error gets returned if input does not comply with `currentSyncCommitteeRoot` object.
func keyFn(obj interface{}) (string, error) {
info, ok := obj.(*syncCommitteeIndexPosition)
if !ok {
return "", errNotSyncCommitteeIndexPosition
}
return string(info.currentSyncCommitteeRoot[:]), nil
}

View File

@@ -0,0 +1,32 @@
// +build libfuzzer
package cache
import (
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
)
// FakeSyncCommitteeCache is a fake `SyncCommitteeCache` to satisfy fuzzing.
type FakeSyncCommitteeCache struct {
}
// NewSyncCommittee initializes and returns a new SyncCommitteeCache.
func NewSyncCommittee() *FakeSyncCommitteeCache {
return &FakeSyncCommitteeCache{}
}
// CurrentEpochIndexPosition -- fake.
func (s *FakeSyncCommitteeCache) CurrentPeriodIndexPosition(root [32]byte, valIdx types.ValidatorIndex) ([]types.CommitteeIndex, error) {
return nil, nil
}
// NextEpochIndexPosition -- fake.
func (s *FakeSyncCommitteeCache) NextPeriodIndexPosition(root [32]byte, valIdx types.ValidatorIndex) ([]types.CommitteeIndex, error) {
return nil, nil
}
// UpdatePositionsInCommittee -- fake.
func (s *FakeSyncCommitteeCache) UpdatePositionsInCommittee(syncCommitteeBoundaryRoot [32]byte, state state.BeaconStateAltair) error {
return nil
}

View File

@@ -0,0 +1,222 @@
package cache_test
import (
"testing"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
)
func TestSyncCommitteeCache_CanUpdateAndRetrieve(t *testing.T) {
numValidators := 101
deterministicState, _ := testutil.DeterministicGenesisStateAltair(t, uint64(numValidators))
pubKeys := make([][]byte, deterministicState.NumValidators())
for i, val := range deterministicState.Validators() {
pubKeys[i] = val.PublicKey
}
tests := []struct {
name string
currentSyncCommittee *ethpb.SyncCommittee
nextSyncCommittee *ethpb.SyncCommittee
currentSyncMap map[types.ValidatorIndex][]types.CommitteeIndex
nextSyncMap map[types.ValidatorIndex][]types.CommitteeIndex
}{
{
name: "only current epoch",
currentSyncCommittee: convertToCommittee([][]byte{
pubKeys[1], pubKeys[2], pubKeys[3], pubKeys[2], pubKeys[2],
}),
nextSyncCommittee: convertToCommittee([][]byte{}),
currentSyncMap: map[types.ValidatorIndex][]types.CommitteeIndex{
1: {0},
2: {1, 3, 4},
3: {2},
},
nextSyncMap: map[types.ValidatorIndex][]types.CommitteeIndex{
1: {},
2: {},
3: {},
},
},
{
name: "only next epoch",
currentSyncCommittee: convertToCommittee([][]byte{}),
nextSyncCommittee: convertToCommittee([][]byte{
pubKeys[1], pubKeys[2], pubKeys[3], pubKeys[2], pubKeys[2],
}),
currentSyncMap: map[types.ValidatorIndex][]types.CommitteeIndex{
1: {},
2: {},
3: {},
},
nextSyncMap: map[types.ValidatorIndex][]types.CommitteeIndex{
1: {0},
2: {1, 3, 4},
3: {2},
},
},
{
name: "some current epoch and some next epoch",
currentSyncCommittee: convertToCommittee([][]byte{
pubKeys[1],
pubKeys[2],
pubKeys[3],
pubKeys[2],
pubKeys[2],
}),
nextSyncCommittee: convertToCommittee([][]byte{
pubKeys[7],
pubKeys[6],
pubKeys[5],
pubKeys[4],
pubKeys[7],
}),
currentSyncMap: map[types.ValidatorIndex][]types.CommitteeIndex{
1: {0},
2: {1, 3, 4},
3: {2},
},
nextSyncMap: map[types.ValidatorIndex][]types.CommitteeIndex{
7: {0, 4},
6: {1},
5: {2},
4: {3},
},
},
{
name: "some current epoch and some next epoch duplicated across",
currentSyncCommittee: convertToCommittee([][]byte{
pubKeys[1],
pubKeys[2],
pubKeys[3],
pubKeys[2],
pubKeys[2],
}),
nextSyncCommittee: convertToCommittee([][]byte{
pubKeys[2],
pubKeys[1],
pubKeys[3],
pubKeys[2],
pubKeys[1],
}),
currentSyncMap: map[types.ValidatorIndex][]types.CommitteeIndex{
1: {0},
2: {1, 3, 4},
3: {2},
},
nextSyncMap: map[types.ValidatorIndex][]types.CommitteeIndex{
1: {1, 4},
2: {0, 3},
3: {2},
},
},
{
name: "all duplicated",
currentSyncCommittee: convertToCommittee([][]byte{
pubKeys[100],
pubKeys[100],
pubKeys[100],
pubKeys[100],
}),
nextSyncCommittee: convertToCommittee([][]byte{
pubKeys[100],
pubKeys[100],
pubKeys[100],
pubKeys[100],
}),
currentSyncMap: map[types.ValidatorIndex][]types.CommitteeIndex{
100: {0, 1, 2, 3},
},
nextSyncMap: map[types.ValidatorIndex][]types.CommitteeIndex{
100: {0, 1, 2, 3},
},
},
{
name: "unknown keys",
currentSyncCommittee: convertToCommittee([][]byte{
pubKeys[100],
pubKeys[100],
pubKeys[100],
pubKeys[100],
}),
nextSyncCommittee: convertToCommittee([][]byte{
pubKeys[100],
pubKeys[100],
pubKeys[100],
pubKeys[100],
}),
currentSyncMap: map[types.ValidatorIndex][]types.CommitteeIndex{
1: {},
},
nextSyncMap: map[types.ValidatorIndex][]types.CommitteeIndex{
1: {},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s, _ := testutil.DeterministicGenesisStateAltair(t, uint64(numValidators))
require.NoError(t, s.SetCurrentSyncCommittee(tt.currentSyncCommittee))
require.NoError(t, s.SetNextSyncCommittee(tt.nextSyncCommittee))
cache := cache.NewSyncCommittee()
r := [32]byte{'a'}
require.NoError(t, cache.UpdatePositionsInCommittee(r, s))
for key, indices := range tt.currentSyncMap {
pos, err := cache.CurrentPeriodIndexPosition(r, key)
require.NoError(t, err)
require.DeepEqual(t, indices, pos)
}
for key, indices := range tt.nextSyncMap {
pos, err := cache.NextPeriodIndexPosition(r, key)
require.NoError(t, err)
require.DeepEqual(t, indices, pos)
}
})
}
}
func TestSyncCommitteeCache_RootDoesNotExist(t *testing.T) {
c := cache.NewSyncCommittee()
_, err := c.CurrentPeriodIndexPosition([32]byte{}, 0)
require.Equal(t, cache.ErrNonExistingSyncCommitteeKey, err)
}
func TestSyncCommitteeCache_CanRotate(t *testing.T) {
c := cache.NewSyncCommittee()
s, _ := testutil.DeterministicGenesisStateAltair(t, 64)
require.NoError(t, s.SetCurrentSyncCommittee(convertToCommittee([][]byte{{1}})))
require.NoError(t, c.UpdatePositionsInCommittee([32]byte{'a'}, s))
require.NoError(t, s.SetCurrentSyncCommittee(convertToCommittee([][]byte{{2}})))
require.NoError(t, c.UpdatePositionsInCommittee([32]byte{'b'}, s))
require.NoError(t, s.SetCurrentSyncCommittee(convertToCommittee([][]byte{{3}})))
require.NoError(t, c.UpdatePositionsInCommittee([32]byte{'c'}, s))
require.NoError(t, s.SetCurrentSyncCommittee(convertToCommittee([][]byte{{4}})))
require.NoError(t, c.UpdatePositionsInCommittee([32]byte{'d'}, s))
_, err := c.CurrentPeriodIndexPosition([32]byte{'a'}, 0)
require.Equal(t, cache.ErrNonExistingSyncCommitteeKey, err)
_, err = c.CurrentPeriodIndexPosition([32]byte{'c'}, 0)
require.NoError(t, err)
}
func convertToCommittee(inputKeys [][]byte) *ethpb.SyncCommittee {
var pubKeys [][]byte
for i := uint64(0); i < params.BeaconConfig().SyncCommitteeSize; i++ {
if i < uint64(len(inputKeys)) {
pubKeys = append(pubKeys, bytesutil.PadTo(inputKeys[i], params.BeaconConfig().BLSPubkeyLength))
} else {
pubKeys = append(pubKeys, bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength))
}
}
return &ethpb.SyncCommittee{
Pubkeys: pubKeys,
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
}
}

120
beacon-chain/cache/sync_subnet_ids.go vendored Normal file
View File

@@ -0,0 +1,120 @@
package cache
import (
"sync"
"time"
"github.com/patrickmn/go-cache"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/rand"
"github.com/prysmaticlabs/prysm/shared/sliceutil"
)
type syncSubnetIDs struct {
sCommittee *cache.Cache
sCommiteeLock sync.RWMutex
}
// SyncSubnetIDs for sync committee participant.
var SyncSubnetIDs = newSyncSubnetIDs()
func newSyncSubnetIDs() *syncSubnetIDs {
epochDuration := time.Duration(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
subLength := epochDuration * time.Duration(params.BeaconConfig().EpochsPerSyncCommitteePeriod)
persistentCache := cache.New(subLength*time.Second, epochDuration*time.Second)
return &syncSubnetIDs{sCommittee: persistentCache}
}
// GetSyncCommitteeSubnets retrieves the sync committee subnet and expiration time of that validator's
// subscription.
func (s *syncSubnetIDs) GetSyncCommitteeSubnets(pubkey []byte, epoch types.Epoch) ([]uint64, types.Epoch, bool, time.Time) {
s.sCommiteeLock.RLock()
defer s.sCommiteeLock.RUnlock()
id, duration, ok := s.sCommittee.GetWithExpiration(keyBuilder(pubkey, epoch))
if !ok {
return []uint64{}, 0, ok, time.Time{}
}
// Retrieve the slot from the cache.
idxs, ok := id.([]uint64)
if !ok {
return []uint64{}, 0, ok, time.Time{}
}
// If no committees are saved, we return
// nothing.
if len(idxs) <= 1 {
return []uint64{}, 0, ok, time.Time{}
}
return idxs[1:], types.Epoch(idxs[0]), ok, duration
}
// GetAllSubnets retrieves all the non-expired subscribed subnets of all the validators
// in the cache. This method also takes the epoch as an argument to only retrieve
// assingments for epochs that have happened.
func (s *syncSubnetIDs) GetAllSubnets(currEpoch types.Epoch) []uint64 {
s.sCommiteeLock.RLock()
defer s.sCommiteeLock.RUnlock()
itemsMap := s.sCommittee.Items()
var committees []uint64
for _, v := range itemsMap {
if v.Expired() {
continue
}
idxs, ok := v.Object.([]uint64)
if !ok {
continue
}
if len(idxs) <= 1 {
continue
}
// Check if the subnet is valid in the current epoch.
if types.Epoch(idxs[0]) > currEpoch {
continue
}
// Ignore the first index as that represents the
// epoch of the validator's assignments.
committees = append(committees, idxs[1:]...)
}
return sliceutil.SetUint64(committees)
}
// AddSyncCommitteeSubnets adds the relevant committee for that particular validator along with its
// expiration period. An Epoch argument here denotes the epoch from which the sync committee subnets
// will be active.
func (s *syncSubnetIDs) AddSyncCommitteeSubnets(pubkey []byte, epoch types.Epoch, comIndex []uint64, duration time.Duration) {
s.sCommiteeLock.Lock()
defer s.sCommiteeLock.Unlock()
subComCount := params.BeaconConfig().SyncCommitteeSubnetCount
// To join a sync committee subnet, select a random number of epochs before the end of the
// current sync committee period between 1 and SYNC_COMMITTEE_SUBNET_COUNT, inclusive.
diff := (rand.NewGenerator().Uint64() % subComCount) + 1
joinEpoch, err := epoch.SafeSub(diff)
if err != nil {
// If we overflow here, we simply set the value to join
// at 0.
joinEpoch = 0
}
// Append the epoch of the subnet into the first index here.
s.sCommittee.Set(keyBuilder(pubkey, epoch), append([]uint64{uint64(joinEpoch)}, comIndex...), duration)
}
// EmptyAllCaches empties out all the related caches and flushes any stored
// entries on them. This should only ever be used for testing, in normal
// production, handling of the relevant subnets for each role is done
// separately.
func (s *syncSubnetIDs) EmptyAllCaches() {
// Clear the cache.
s.sCommiteeLock.Lock()
s.sCommittee.Flush()
s.sCommiteeLock.Unlock()
}
func keyBuilder(pubkey []byte, epoch types.Epoch) string {
epochBytes := bytesutil.Bytes8(uint64(epoch))
return string(append(pubkey, epochBytes...))
}

View File

@@ -0,0 +1,57 @@
package cache
import (
"testing"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
)
func TestSyncSubnetIDsCache_Roundtrip(t *testing.T) {
c := newSyncSubnetIDs()
for i := 0; i < 20; i++ {
pubkey := [48]byte{byte(i)}
c.AddSyncCommitteeSubnets(pubkey[:], 100, []uint64{uint64(i)}, 0)
}
for i := uint64(0); i < 20; i++ {
pubkey := [48]byte{byte(i)}
idxs, _, ok, _ := c.GetSyncCommitteeSubnets(pubkey[:], 100)
if !ok {
t.Errorf("Couldn't find entry in cache for pubkey %#x", pubkey)
continue
}
require.Equal(t, i, idxs[0])
}
coms := c.GetAllSubnets(100)
assert.Equal(t, 20, len(coms))
}
func TestSyncSubnetIDsCache_ValidateCurrentEpoch(t *testing.T) {
c := newSyncSubnetIDs()
for i := 0; i < 20; i++ {
pubkey := [48]byte{byte(i)}
c.AddSyncCommitteeSubnets(pubkey[:], 100, []uint64{uint64(i)}, 0)
}
coms := c.GetAllSubnets(50)
assert.Equal(t, 0, len(coms))
for i := uint64(0); i < 20; i++ {
pubkey := [48]byte{byte(i)}
_, jEpoch, ok, _ := c.GetSyncCommitteeSubnets(pubkey[:], 100)
if !ok {
t.Errorf("Couldn't find entry in cache for pubkey %#x", pubkey)
continue
}
require.Equal(t, true, uint64(jEpoch) >= 100-params.BeaconConfig().SyncCommitteeSubnetCount)
}
coms = c.GetAllSubnets(99)
assert.Equal(t, 20, len(coms))
}

View File

@@ -3,19 +3,36 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library( go_library(
name = "go_default_library", name = "go_default_library",
srcs = [ srcs = [
"attestation.go",
"block.go",
"deposit.go", "deposit.go",
"epoch_precompute.go",
"epoch_spec.go",
"fork_transition.go",
"reward.go",
"sync_committee.go", "sync_committee.go",
"transition.go",
"validator.go",
], ],
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/core/altair", importpath = "github.com/prysmaticlabs/prysm/beacon-chain/core/altair",
visibility = [ visibility = [
"//beacon-chain:__subpackages__", "//beacon-chain:__subpackages__",
"//shared/testutil:__pkg__", "//shared/testutil:__pkg__",
"//spectest:__subpackages__",
"//validator/client:__pkg__",
], ],
deps = [ deps = [
"//beacon-chain/core/blocks:go_default_library", "//beacon-chain/core/blocks:go_default_library",
"//beacon-chain/core/epoch:go_default_library",
"//beacon-chain/core/epoch/precompute:go_default_library",
"//beacon-chain/core/helpers:go_default_library", "//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/validators:go_default_library",
"//beacon-chain/p2p/types:go_default_library",
"//beacon-chain/state:go_default_library", "//beacon-chain/state:go_default_library",
"//beacon-chain/state/v2:go_default_library",
"//proto/prysm/v1alpha1:go_default_library", "//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/block:go_default_library",
"//shared/attestationutil:go_default_library",
"//shared/bls:go_default_library", "//shared/bls:go_default_library",
"//shared/bytesutil:go_default_library", "//shared/bytesutil:go_default_library",
"//shared/hashutil:go_default_library", "//shared/hashutil:go_default_library",
@@ -23,28 +40,46 @@ go_library(
"//shared/params:go_default_library", "//shared/params:go_default_library",
"@com_github_pkg_errors//:go_default_library", "@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_eth2_types//:go_default_library", "@com_github_prysmaticlabs_eth2_types//:go_default_library",
"@io_opencensus_go//trace:go_default_library",
], ],
) )
go_test( go_test(
name = "go_default_test", name = "go_default_test",
srcs = [ srcs = [
"attestation_test.go",
"block_test.go",
"deposit_fuzz_test.go", "deposit_fuzz_test.go",
"deposit_test.go", "deposit_test.go",
"epoch_precompute_test.go",
"epoch_spec_test.go",
"fork_transition_test.go",
"reward_test.go",
"sync_committee_test.go", "sync_committee_test.go",
"transition_test.go",
"validator_test.go",
], ],
embed = [":go_default_library"],
deps = [ deps = [
":go_default_library", "//beacon-chain/core/epoch/precompute:go_default_library",
"//beacon-chain/core/helpers:go_default_library", "//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/p2p/types:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/v2:go_default_library", "//beacon-chain/state/v2:go_default_library",
"//proto/prysm/v1alpha1:go_default_library", "//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/wrapper:go_default_library",
"//shared/attestationutil:go_default_library",
"//shared/bls:go_default_library", "//shared/bls:go_default_library",
"//shared/bytesutil:go_default_library", "//shared/bytesutil:go_default_library",
"//shared/params:go_default_library", "//shared/params:go_default_library",
"//shared/testutil:go_default_library", "//shared/testutil:go_default_library",
"//shared/testutil/assert:go_default_library",
"//shared/testutil/require:go_default_library", "//shared/testutil/require:go_default_library",
"//shared/timeutils:go_default_library",
"//shared/trieutil:go_default_library", "//shared/trieutil:go_default_library",
"@com_github_google_gofuzz//:go_default_library", "@com_github_google_gofuzz//:go_default_library",
"@com_github_prysmaticlabs_eth2_types//:go_default_library", "@com_github_prysmaticlabs_eth2_types//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
], ],
) )

View File

@@ -0,0 +1,284 @@
package altair
import (
"bytes"
"context"
"github.com/pkg/errors"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/prysmaticlabs/prysm/shared/attestationutil"
"github.com/prysmaticlabs/prysm/shared/mathutil"
"github.com/prysmaticlabs/prysm/shared/params"
"go.opencensus.io/trace"
)
type matchingTarget bool
type matchingSource bool
type matchingHead bool
// ProcessAttestations applies processing operations to a block's inner attestation
// records.
func ProcessAttestations(
ctx context.Context,
beaconState state.BeaconState,
b block.SignedBeaconBlock,
) (state.BeaconState, error) {
if err := helpers.VerifyNilBeaconBlock(b); err != nil {
return nil, err
}
var err error
for idx, attestation := range b.Block().Body().Attestations() {
beaconState, err = ProcessAttestation(ctx, beaconState, attestation)
if err != nil {
return nil, errors.Wrapf(err, "could not verify attestation at index %d in block", idx)
}
}
return beaconState, nil
}
// ProcessAttestation verifies an input attestation can pass through processing using the given beacon state.
//
// Spec code:
// def process_attestation(state: BeaconState, attestation: Attestation) -> None:
// data = attestation.data
// assert data.target.epoch in (get_previous_epoch(state), get_current_epoch(state))
// assert data.target.epoch == compute_epoch_at_slot(data.slot)
// assert data.slot + MIN_ATTESTATION_INCLUSION_DELAY <= state.slot <= data.slot + SLOTS_PER_EPOCH
// assert data.index < get_committee_count_per_slot(state, data.target.epoch)
//
// committee = get_beacon_committee(state, data.slot, data.index)
// assert len(attestation.aggregation_bits) == len(committee)
//
// # Participation flag indices
// participation_flag_indices = get_attestation_participation_flag_indices(state, data, state.slot - data.slot)
//
// # Verify signature
// assert is_valid_indexed_attestation(state, get_indexed_attestation(state, attestation))
//
// # Update epoch participation flags
// if data.target.epoch == get_current_epoch(state):
// epoch_participation = state.current_epoch_participation
// else:
// epoch_participation = state.previous_epoch_participation
//
// proposer_reward_numerator = 0
// for index in get_attesting_indices(state, data, attestation.aggregation_bits):
// for flag_index, weight in enumerate(PARTICIPATION_FLAG_WEIGHTS):
// if flag_index in participation_flag_indices and not has_flag(epoch_participation[index], flag_index):
// epoch_participation[index] = add_flag(epoch_participation[index], flag_index)
// proposer_reward_numerator += get_base_reward(state, index) * weight
//
// # Reward proposer
// proposer_reward_denominator = (WEIGHT_DENOMINATOR - PROPOSER_WEIGHT) * WEIGHT_DENOMINATOR // PROPOSER_WEIGHT
// proposer_reward = Gwei(proposer_reward_numerator // proposer_reward_denominator)
// increase_balance(state, get_beacon_proposer_index(state), proposer_reward)
func ProcessAttestation(
ctx context.Context,
beaconState state.BeaconStateAltair,
att *ethpb.Attestation,
) (state.BeaconStateAltair, error) {
beaconState, err := ProcessAttestationNoVerifySignature(ctx, beaconState, att)
if err != nil {
return nil, err
}
return beaconState, blocks.VerifyAttestationSignature(ctx, beaconState, att)
}
// ProcessAttestationsNoVerifySignature applies processing operations to a block's inner attestation
// records. The only difference would be that the attestation signature would not be verified.
func ProcessAttestationsNoVerifySignature(
ctx context.Context,
beaconState state.BeaconState,
b block.SignedBeaconBlock,
) (state.BeaconState, error) {
if err := helpers.VerifyNilBeaconBlock(b); err != nil {
return nil, err
}
body := b.Block().Body()
var err error
for idx, attestation := range body.Attestations() {
beaconState, err = ProcessAttestationNoVerifySignature(ctx, beaconState, attestation)
if err != nil {
return nil, errors.Wrapf(err, "could not verify attestation at index %d in block", idx)
}
}
return beaconState, nil
}
// ProcessAttestationNoVerifySignature processes the attestation without verifying the attestation signature. This
// method is used to validate attestations whose signatures have already been verified or will be verified later.
func ProcessAttestationNoVerifySignature(
ctx context.Context,
beaconState state.BeaconStateAltair,
att *ethpb.Attestation,
) (state.BeaconStateAltair, error) {
ctx, span := trace.StartSpan(ctx, "altair.ProcessAttestationNoVerifySignature")
defer span.End()
if err := blocks.VerifyAttestationNoVerifySignature(ctx, beaconState, att); err != nil {
return nil, err
}
delay, err := beaconState.Slot().SafeSubSlot(att.Data.Slot)
if err != nil {
return nil, err
}
participatedFlags, err := attestationParticipationFlagIndices(
beaconState,
att.Data,
delay)
if err != nil {
return nil, err
}
committee, err := helpers.BeaconCommitteeFromState(beaconState, att.Data.Slot, att.Data.CommitteeIndex)
if err != nil {
return nil, err
}
indices, err := attestationutil.AttestingIndices(att.AggregationBits, committee)
if err != nil {
return nil, err
}
var epochParticipation []byte
currentEpoch := helpers.CurrentEpoch(beaconState)
targetEpoch := att.Data.Target.Epoch
if targetEpoch == currentEpoch {
epochParticipation, err = beaconState.CurrentEpochParticipation()
if err != nil {
return nil, err
}
} else {
epochParticipation, err = beaconState.PreviousEpochParticipation()
if err != nil {
return nil, err
}
}
sourceFlagIndex := params.BeaconConfig().TimelySourceFlagIndex
targetFlagIndex := params.BeaconConfig().TimelyTargetFlagIndex
headFlagIndex := params.BeaconConfig().TimelyHeadFlagIndex
proposerRewardNumerator := uint64(0)
totalBalance, err := helpers.TotalActiveBalance(beaconState)
if err != nil {
return nil, errors.Wrap(err, "could not calculate active balance")
}
for _, index := range indices {
br, err := BaseRewardWithTotalBalance(beaconState, types.ValidatorIndex(index), totalBalance)
if err != nil {
return nil, err
}
if participatedFlags[sourceFlagIndex] && !HasValidatorFlag(epochParticipation[index], sourceFlagIndex) {
epochParticipation[index] = AddValidatorFlag(epochParticipation[index], sourceFlagIndex)
proposerRewardNumerator += br * params.BeaconConfig().TimelySourceWeight
}
if participatedFlags[targetFlagIndex] && !HasValidatorFlag(epochParticipation[index], targetFlagIndex) {
epochParticipation[index] = AddValidatorFlag(epochParticipation[index], targetFlagIndex)
proposerRewardNumerator += br * params.BeaconConfig().TimelyTargetWeight
}
if participatedFlags[headFlagIndex] && !HasValidatorFlag(epochParticipation[index], headFlagIndex) {
epochParticipation[index] = AddValidatorFlag(epochParticipation[index], headFlagIndex)
proposerRewardNumerator += br * params.BeaconConfig().TimelyHeadWeight
}
}
if targetEpoch == currentEpoch {
if err := beaconState.SetCurrentParticipationBits(epochParticipation); err != nil {
return nil, err
}
} else {
if err := beaconState.SetPreviousParticipationBits(epochParticipation); err != nil {
return nil, err
}
}
// Reward proposer.
if err := rewardProposer(beaconState, proposerRewardNumerator); err != nil {
return nil, err
}
return beaconState, nil
}
// This returns the matching statues for attestation data's source target and head.
func matchingStatus(beaconState state.BeaconState, data *ethpb.AttestationData, cp *ethpb.Checkpoint) (s matchingSource, t matchingTarget, h matchingHead, err error) {
s = matchingSource(attestationutil.CheckPointIsEqual(data.Source, cp))
r, err := helpers.BlockRoot(beaconState, data.Target.Epoch)
if err != nil {
return false, false, false, err
}
t = matchingTarget(bytes.Equal(r, data.Target.Root))
r, err = helpers.BlockRootAtSlot(beaconState, data.Slot)
if err != nil {
return false, false, false, err
}
h = matchingHead(bytes.Equal(r, data.BeaconBlockRoot))
return
}
// This rewards proposer by increasing proposer's balance with input reward numerator and calculated reward denominator.
func rewardProposer(beaconState state.BeaconState, proposerRewardNumerator uint64) error {
proposerRewardDenominator := (params.BeaconConfig().WeightDenominator - params.BeaconConfig().ProposerWeight) * params.BeaconConfig().WeightDenominator / params.BeaconConfig().ProposerWeight
proposerReward := proposerRewardNumerator / proposerRewardDenominator
i, err := helpers.BeaconProposerIndex(beaconState)
if err != nil {
return err
}
return helpers.IncreaseBalance(beaconState, i, proposerReward)
}
// HasValidatorFlag returns true if the flag at position has set.
func HasValidatorFlag(flag, flagPosition uint8) bool {
return ((flag >> flagPosition) & 1) == 1
}
// AddValidatorFlag adds new validator flag to existing one.
func AddValidatorFlag(flag, flagPosition uint8) uint8 {
return flag | (1 << flagPosition)
}
// This retrieves a map of attestation scoring based on Altair's participation flag indices.
// This is used to facilitate process attestation during state transition.
func attestationParticipationFlagIndices(beaconState state.BeaconStateAltair, data *ethpb.AttestationData, delay types.Slot) (map[uint8]bool, error) {
currEpoch := helpers.CurrentEpoch(beaconState)
var justifiedCheckpt *ethpb.Checkpoint
if data.Target.Epoch == currEpoch {
justifiedCheckpt = beaconState.CurrentJustifiedCheckpoint()
} else {
justifiedCheckpt = beaconState.PreviousJustifiedCheckpoint()
}
// Get matching participation flags.
matchingSource, matchingTarget, matchingHead, err := matchingStatus(beaconState, data, justifiedCheckpt)
if err != nil {
return nil, err
}
if !matchingSource {
return nil, errors.New("source epoch does not match")
}
// Process matched participation flags.
participatedFlags := make(map[uint8]bool)
sourceFlagIndex := params.BeaconConfig().TimelySourceFlagIndex
targetFlagIndex := params.BeaconConfig().TimelyTargetFlagIndex
headFlagIndex := params.BeaconConfig().TimelyHeadFlagIndex
if matchingSource && delay <= types.Slot(mathutil.IntegerSquareRoot(uint64(params.BeaconConfig().SlotsPerEpoch))) {
participatedFlags[sourceFlagIndex] = true
}
if matchingTarget && delay <= params.BeaconConfig().SlotsPerEpoch {
participatedFlags[targetFlagIndex] = true
}
matchingHeadTarget := bool(matchingHead) && bool(matchingTarget)
if matchingHeadTarget && delay == params.BeaconConfig().MinAttestationInclusionDelay {
participatedFlags[headFlagIndex] = true
}
return participatedFlags, nil
}

View File

@@ -0,0 +1,344 @@
package altair_test
import (
"context"
"fmt"
"testing"
fuzz "github.com/google/gofuzz"
"github.com/prysmaticlabs/go-bitfield"
altair "github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
stateAltair "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/shared/attestationutil"
"github.com/prysmaticlabs/prysm/shared/bls"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
)
func TestProcessAttestations_InclusionDelayFailure(t *testing.T) {
attestations := []*ethpb.Attestation{
testutil.HydrateAttestation(&ethpb.Attestation{
Data: &ethpb.AttestationData{
Target: &ethpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
Slot: 5,
},
}),
}
b := testutil.NewBeaconBlockAltair()
b.Block = &ethpb.BeaconBlockAltair{
Body: &ethpb.BeaconBlockBodyAltair{
Attestations: attestations,
},
}
beaconState, _ := testutil.DeterministicGenesisStateAltair(t, 100)
want := fmt.Sprintf(
"attestation slot %d + inclusion delay %d > state slot %d",
attestations[0].Data.Slot,
params.BeaconConfig().MinAttestationInclusionDelay,
beaconState.Slot(),
)
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(b)
require.NoError(t, err)
_, err = altair.ProcessAttestations(context.Background(), beaconState, wsb)
require.ErrorContains(t, want, err)
}
func TestProcessAttestations_NeitherCurrentNorPrevEpoch(t *testing.T) {
att := testutil.HydrateAttestation(&ethpb.Attestation{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 0, Root: []byte("hello-world")},
Target: &ethpb.Checkpoint{Epoch: 0}}})
b := testutil.NewBeaconBlockAltair()
b.Block = &ethpb.BeaconBlockAltair{
Body: &ethpb.BeaconBlockBodyAltair{
Attestations: []*ethpb.Attestation{att},
},
}
beaconState, _ := testutil.DeterministicGenesisStateAltair(t, 100)
err := beaconState.SetSlot(beaconState.Slot() + params.BeaconConfig().SlotsPerEpoch*4 + params.BeaconConfig().MinAttestationInclusionDelay)
require.NoError(t, err)
pfc := beaconState.PreviousJustifiedCheckpoint()
pfc.Root = []byte("hello-world")
require.NoError(t, beaconState.SetPreviousJustifiedCheckpoint(pfc))
want := fmt.Sprintf(
"expected target epoch (%d) to be the previous epoch (%d) or the current epoch (%d)",
att.Data.Target.Epoch,
helpers.PrevEpoch(beaconState),
helpers.CurrentEpoch(beaconState),
)
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(b)
require.NoError(t, err)
_, err = altair.ProcessAttestations(context.Background(), beaconState, wsb)
require.ErrorContains(t, want, err)
}
func TestProcessAttestations_CurrentEpochFFGDataMismatches(t *testing.T) {
attestations := []*ethpb.Attestation{
{
Data: &ethpb.AttestationData{
Target: &ethpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
Source: &ethpb.Checkpoint{Epoch: 1, Root: make([]byte, 32)},
},
AggregationBits: bitfield.Bitlist{0x09},
},
}
b := testutil.NewBeaconBlockAltair()
b.Block = &ethpb.BeaconBlockAltair{
Body: &ethpb.BeaconBlockBodyAltair{
Attestations: attestations,
},
}
beaconState, _ := testutil.DeterministicGenesisStateAltair(t, 100)
require.NoError(t, beaconState.SetSlot(beaconState.Slot()+params.BeaconConfig().MinAttestationInclusionDelay))
cfc := beaconState.CurrentJustifiedCheckpoint()
cfc.Root = []byte("hello-world")
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(cfc))
want := "source check point not equal to current justified checkpoint"
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(b)
require.NoError(t, err)
_, err = altair.ProcessAttestations(context.Background(), beaconState, wsb)
require.ErrorContains(t, want, err)
b.Block.Body.Attestations[0].Data.Source.Epoch = helpers.CurrentEpoch(beaconState)
b.Block.Body.Attestations[0].Data.Source.Root = []byte{}
wsb, err = wrapper.WrappedAltairSignedBeaconBlock(b)
require.NoError(t, err)
_, err = altair.ProcessAttestations(context.Background(), beaconState, wsb)
require.ErrorContains(t, want, err)
}
func TestProcessAttestations_PrevEpochFFGDataMismatches(t *testing.T) {
beaconState, _ := testutil.DeterministicGenesisStateAltair(t, 100)
aggBits := bitfield.NewBitlist(3)
aggBits.SetBitAt(0, true)
attestations := []*ethpb.Attestation{
{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 1, Root: make([]byte, 32)},
Target: &ethpb.Checkpoint{Epoch: 1, Root: make([]byte, 32)},
Slot: params.BeaconConfig().SlotsPerEpoch,
},
AggregationBits: aggBits,
},
}
b := testutil.NewBeaconBlockAltair()
b.Block = &ethpb.BeaconBlockAltair{
Body: &ethpb.BeaconBlockBodyAltair{
Attestations: attestations,
},
}
err := beaconState.SetSlot(beaconState.Slot() + 2*params.BeaconConfig().SlotsPerEpoch)
require.NoError(t, err)
pfc := beaconState.PreviousJustifiedCheckpoint()
pfc.Root = []byte("hello-world")
require.NoError(t, beaconState.SetPreviousJustifiedCheckpoint(pfc))
want := "source check point not equal to previous justified checkpoint"
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(b)
require.NoError(t, err)
_, err = altair.ProcessAttestations(context.Background(), beaconState, wsb)
require.ErrorContains(t, want, err)
b.Block.Body.Attestations[0].Data.Source.Epoch = helpers.PrevEpoch(beaconState)
b.Block.Body.Attestations[0].Data.Target.Epoch = helpers.PrevEpoch(beaconState)
b.Block.Body.Attestations[0].Data.Source.Root = []byte{}
wsb, err = wrapper.WrappedAltairSignedBeaconBlock(b)
require.NoError(t, err)
_, err = altair.ProcessAttestations(context.Background(), beaconState, wsb)
require.ErrorContains(t, want, err)
}
func TestProcessAttestations_InvalidAggregationBitsLength(t *testing.T) {
beaconState, _ := testutil.DeterministicGenesisStateAltair(t, 100)
aggBits := bitfield.NewBitlist(4)
att := &ethpb.Attestation{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 0, Root: []byte("hello-world")},
Target: &ethpb.Checkpoint{Epoch: 0}},
AggregationBits: aggBits,
}
b := testutil.NewBeaconBlockAltair()
b.Block = &ethpb.BeaconBlockAltair{
Body: &ethpb.BeaconBlockBodyAltair{
Attestations: []*ethpb.Attestation{att},
},
}
err := beaconState.SetSlot(beaconState.Slot() + params.BeaconConfig().MinAttestationInclusionDelay)
require.NoError(t, err)
cfc := beaconState.CurrentJustifiedCheckpoint()
cfc.Root = []byte("hello-world")
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(cfc))
expected := "failed to verify aggregation bitfield: wanted participants bitfield length 3, got: 4"
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(b)
require.NoError(t, err)
_, err = altair.ProcessAttestations(context.Background(), beaconState, wsb)
require.ErrorContains(t, expected, err)
}
func TestProcessAttestations_OK(t *testing.T) {
beaconState, privKeys := testutil.DeterministicGenesisStateAltair(t, 100)
aggBits := bitfield.NewBitlist(3)
aggBits.SetBitAt(0, true)
var mockRoot [32]byte
copy(mockRoot[:], "hello-world")
att := testutil.HydrateAttestation(&ethpb.Attestation{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Root: mockRoot[:]},
Target: &ethpb.Checkpoint{Root: mockRoot[:]},
},
AggregationBits: aggBits,
})
cfc := beaconState.CurrentJustifiedCheckpoint()
cfc.Root = mockRoot[:]
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(cfc))
committee, err := helpers.BeaconCommitteeFromState(beaconState, att.Data.Slot, att.Data.CommitteeIndex)
require.NoError(t, err)
attestingIndices, err := attestationutil.AttestingIndices(att.AggregationBits, committee)
require.NoError(t, err)
sigs := make([]bls.Signature, len(attestingIndices))
for i, indice := range attestingIndices {
sb, err := helpers.ComputeDomainAndSign(beaconState, 0, att.Data, params.BeaconConfig().DomainBeaconAttester, privKeys[indice])
require.NoError(t, err)
sig, err := bls.SignatureFromBytes(sb)
require.NoError(t, err)
sigs[i] = sig
}
att.Signature = bls.AggregateSignatures(sigs).Marshal()
block := testutil.NewBeaconBlockAltair()
block.Block.Body.Attestations = []*ethpb.Attestation{att}
err = beaconState.SetSlot(beaconState.Slot() + params.BeaconConfig().MinAttestationInclusionDelay)
require.NoError(t, err)
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(block)
require.NoError(t, err)
_, err = altair.ProcessAttestations(context.Background(), beaconState, wsb)
require.NoError(t, err)
}
func TestProcessAttestationNoVerify_SourceTargetHead(t *testing.T) {
beaconState, _ := testutil.DeterministicGenesisStateAltair(t, 64)
err := beaconState.SetSlot(beaconState.Slot() + params.BeaconConfig().MinAttestationInclusionDelay)
require.NoError(t, err)
aggBits := bitfield.NewBitlist(2)
aggBits.SetBitAt(0, true)
aggBits.SetBitAt(1, true)
r, err := helpers.BlockRootAtSlot(beaconState, 0)
require.NoError(t, err)
att := &ethpb.Attestation{
Data: &ethpb.AttestationData{
BeaconBlockRoot: r,
Source: &ethpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
Target: &ethpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
},
AggregationBits: aggBits,
}
zeroSig := [96]byte{}
att.Signature = zeroSig[:]
ckp := beaconState.CurrentJustifiedCheckpoint()
copy(ckp.Root, make([]byte, 32))
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(ckp))
beaconState, err = altair.ProcessAttestationNoVerifySignature(context.Background(), beaconState, att)
require.NoError(t, err)
p, err := beaconState.CurrentEpochParticipation()
require.NoError(t, err)
committee, err := helpers.BeaconCommitteeFromState(beaconState, att.Data.Slot, att.Data.CommitteeIndex)
require.NoError(t, err)
indices, err := attestationutil.AttestingIndices(att.AggregationBits, committee)
require.NoError(t, err)
for _, index := range indices {
require.Equal(t, true, altair.HasValidatorFlag(p[index], params.BeaconConfig().TimelyHeadFlagIndex))
require.Equal(t, true, altair.HasValidatorFlag(p[index], params.BeaconConfig().TimelyTargetFlagIndex))
require.Equal(t, true, altair.HasValidatorFlag(p[index], params.BeaconConfig().TimelySourceFlagIndex))
}
}
func TestValidatorFlag_AddHas(t *testing.T) {
tests := []struct {
name string
set []uint8
expectedTrue []uint8
expectedFalse []uint8
}{
{name: "none",
set: []uint8{},
expectedTrue: []uint8{},
expectedFalse: []uint8{params.BeaconConfig().TimelySourceFlagIndex, params.BeaconConfig().TimelyTargetFlagIndex, params.BeaconConfig().TimelyHeadFlagIndex},
},
{
name: "source",
set: []uint8{params.BeaconConfig().TimelySourceFlagIndex},
expectedTrue: []uint8{params.BeaconConfig().TimelySourceFlagIndex},
expectedFalse: []uint8{params.BeaconConfig().TimelyTargetFlagIndex, params.BeaconConfig().TimelyHeadFlagIndex},
},
{
name: "source, target",
set: []uint8{params.BeaconConfig().TimelySourceFlagIndex, params.BeaconConfig().TimelyTargetFlagIndex},
expectedTrue: []uint8{params.BeaconConfig().TimelySourceFlagIndex, params.BeaconConfig().TimelyTargetFlagIndex},
expectedFalse: []uint8{params.BeaconConfig().TimelyHeadFlagIndex},
},
{name: "source, target, head",
set: []uint8{params.BeaconConfig().TimelySourceFlagIndex, params.BeaconConfig().TimelyTargetFlagIndex, params.BeaconConfig().TimelyHeadFlagIndex},
expectedTrue: []uint8{params.BeaconConfig().TimelySourceFlagIndex, params.BeaconConfig().TimelyTargetFlagIndex, params.BeaconConfig().TimelyHeadFlagIndex},
expectedFalse: []uint8{},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
b := uint8(0)
for _, f := range tt.set {
b = altair.AddValidatorFlag(b, f)
}
for _, f := range tt.expectedFalse {
require.Equal(t, false, altair.HasValidatorFlag(b, f))
}
for _, f := range tt.expectedTrue {
require.Equal(t, true, altair.HasValidatorFlag(b, f))
}
})
}
}
func TestFuzzProcessAttestationsNoVerify_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
state := &ethpb.BeaconStateAltair{}
b := &ethpb.SignedBeaconBlockAltair{Block: &ethpb.BeaconBlockAltair{}}
ctx := context.Background()
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(b)
if b.Block == nil {
b.Block = &ethpb.BeaconBlockAltair{}
}
s, err := stateAltair.InitializeFromProtoUnsafe(state)
require.NoError(t, err)
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(b)
require.NoError(t, err)
r, err := altair.ProcessAttestationsNoVerifySignature(ctx, s, wsb)
if err != nil && r != nil {
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, b)
}
}
}

View File

@@ -0,0 +1,130 @@
package altair
import (
"errors"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
p2pType "github.com/prysmaticlabs/prysm/beacon-chain/p2p/types"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/bls"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params"
)
// ProcessSyncAggregate verifies sync committee aggregate signature signing over the previous slot block root.
//
// Spec code:
// def process_sync_aggregate(state: BeaconState, sync_aggregate: SyncAggregate) -> None:
// # Verify sync committee aggregate signature signing over the previous slot block root
// committee_pubkeys = state.current_sync_committee.pubkeys
// participant_pubkeys = [pubkey for pubkey, bit in zip(committee_pubkeys, sync_aggregate.sync_committee_bits) if bit]
// previous_slot = max(state.slot, Slot(1)) - Slot(1)
// domain = get_domain(state, DOMAIN_SYNC_COMMITTEE, compute_epoch_at_slot(previous_slot))
// signing_root = compute_signing_root(get_block_root_at_slot(state, previous_slot), domain)
// assert eth2_fast_aggregate_verify(participant_pubkeys, signing_root, sync_aggregate.sync_committee_signature)
//
// # Compute participant and proposer rewards
// total_active_increments = get_total_active_balance(state) // EFFECTIVE_BALANCE_INCREMENT
// total_base_rewards = Gwei(get_base_reward_per_increment(state) * total_active_increments)
// max_participant_rewards = Gwei(total_base_rewards * SYNC_REWARD_WEIGHT // WEIGHT_DENOMINATOR // SLOTS_PER_EPOCH)
// participant_reward = Gwei(max_participant_rewards // SYNC_COMMITTEE_SIZE)
// proposer_reward = Gwei(participant_reward * PROPOSER_WEIGHT // (WEIGHT_DENOMINATOR - PROPOSER_WEIGHT))
//
// # Apply participant and proposer rewards
// all_pubkeys = [v.pubkey for v in state.validators]
// committee_indices = [ValidatorIndex(all_pubkeys.index(pubkey)) for pubkey in state.current_sync_committee.pubkeys]
// for participant_index, participation_bit in zip(committee_indices, sync_aggregate.sync_committee_bits):
// if participation_bit:
// increase_balance(state, participant_index, participant_reward)
// increase_balance(state, get_beacon_proposer_index(state), proposer_reward)
// else:
// decrease_balance(state, participant_index, participant_reward)
func ProcessSyncAggregate(state state.BeaconStateAltair, sync *ethpb.SyncAggregate) (state.BeaconStateAltair, error) {
currentSyncCommittee, err := state.CurrentSyncCommittee()
if err != nil {
return nil, err
}
committeeKeys := currentSyncCommittee.Pubkeys
votedKeys := make([]bls.PublicKey, 0, len(committeeKeys))
votedIndices := make([]types.ValidatorIndex, 0, len(committeeKeys))
didntVoteIndices := make([]types.ValidatorIndex, 0, len(committeeKeys))
// Verify sync committee signature.
for i := uint64(0); i < sync.SyncCommitteeBits.Len(); i++ {
vIdx, exists := state.ValidatorIndexByPubkey(bytesutil.ToBytes48(committeeKeys[i]))
// Impossible scenario.
if !exists {
return nil, errors.New("validator public key does not exist in state")
}
if sync.SyncCommitteeBits.BitAt(i) {
pubKey, err := bls.PublicKeyFromBytes(committeeKeys[i])
if err != nil {
return nil, err
}
votedKeys = append(votedKeys, pubKey)
votedIndices = append(votedIndices, vIdx)
} else {
didntVoteIndices = append(didntVoteIndices, vIdx)
}
}
ps := helpers.PrevSlot(state.Slot())
d, err := helpers.Domain(state.Fork(), helpers.SlotToEpoch(ps), params.BeaconConfig().DomainSyncCommittee, state.GenesisValidatorRoot())
if err != nil {
return nil, err
}
pbr, err := helpers.BlockRootAtSlot(state, ps)
if err != nil {
return nil, err
}
sszBytes := p2pType.SSZBytes(pbr)
r, err := helpers.ComputeSigningRoot(&sszBytes, d)
if err != nil {
return nil, err
}
sig, err := bls.SignatureFromBytes(sync.SyncCommitteeSignature)
if err != nil {
return nil, err
}
if !sig.Eth2FastAggregateVerify(votedKeys, r) {
return nil, errors.New("could not verify sync committee signature")
}
// Calculate sync committee and proposer rewards
activeBalance, err := helpers.TotalActiveBalance(state)
if err != nil {
return nil, err
}
totalActiveIncrements := activeBalance / params.BeaconConfig().EffectiveBalanceIncrement
totalBaseRewards := baseRewardPerIncrement(activeBalance) * totalActiveIncrements
maxParticipantRewards := totalBaseRewards * params.BeaconConfig().SyncRewardWeight / params.BeaconConfig().WeightDenominator / uint64(params.BeaconConfig().SlotsPerEpoch)
participantReward := maxParticipantRewards / params.BeaconConfig().SyncCommitteeSize
proposerReward := participantReward * params.BeaconConfig().ProposerWeight / (params.BeaconConfig().WeightDenominator - params.BeaconConfig().ProposerWeight)
// Apply sync committee rewards.
earnedProposerReward := uint64(0)
for _, index := range votedIndices {
if err := helpers.IncreaseBalance(state, index, participantReward); err != nil {
return nil, err
}
earnedProposerReward += proposerReward
}
// Apply proposer rewards.
proposerIndex, err := helpers.BeaconProposerIndex(state)
if err != nil {
return nil, err
}
if err := helpers.IncreaseBalance(state, proposerIndex, earnedProposerReward); err != nil {
return nil, err
}
// Apply sync committee penalties.
for _, index := range didntVoteIndices {
if err := helpers.DecreaseBalance(state, index, participantReward); err != nil {
return nil, err
}
}
return state, nil
}

View File

@@ -0,0 +1,91 @@
package altair_test
import (
"context"
"testing"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
p2pType "github.com/prysmaticlabs/prysm/beacon-chain/p2p/types"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/bls"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
)
func TestProcessSyncCommittee_OK(t *testing.T) {
beaconState, privKeys := testutil.DeterministicGenesisStateAltair(t, params.BeaconConfig().MaxValidatorsPerCommittee)
require.NoError(t, beaconState.SetSlot(1))
committee, err := altair.NextSyncCommittee(context.Background(), beaconState)
require.NoError(t, err)
require.NoError(t, beaconState.SetCurrentSyncCommittee(committee))
syncBits := bitfield.NewBitvector512()
for i := range syncBits {
syncBits[i] = 0xff
}
indices, err := altair.NextSyncCommitteeIndices(context.Background(), beaconState)
require.NoError(t, err)
ps := helpers.PrevSlot(beaconState.Slot())
pbr, err := helpers.BlockRootAtSlot(beaconState, ps)
require.NoError(t, err)
sigs := make([]bls.Signature, len(indices))
for i, indice := range indices {
b := p2pType.SSZBytes(pbr)
sb, err := helpers.ComputeDomainAndSign(beaconState, helpers.CurrentEpoch(beaconState), &b, params.BeaconConfig().DomainSyncCommittee, privKeys[indice])
require.NoError(t, err)
sig, err := bls.SignatureFromBytes(sb)
require.NoError(t, err)
sigs[i] = sig
}
aggregatedSig := bls.AggregateSignatures(sigs).Marshal()
syncAggregate := &ethpb.SyncAggregate{
SyncCommitteeBits: syncBits,
SyncCommitteeSignature: aggregatedSig,
}
beaconState, err = altair.ProcessSyncAggregate(beaconState, syncAggregate)
require.NoError(t, err)
// Use a non-sync committee index to compare profitability.
syncCommittee := make(map[types.ValidatorIndex]bool)
for _, index := range indices {
syncCommittee[index] = true
}
nonSyncIndex := types.ValidatorIndex(params.BeaconConfig().MaxValidatorsPerCommittee + 1)
for i := types.ValidatorIndex(0); uint64(i) < params.BeaconConfig().MaxValidatorsPerCommittee; i++ {
if !syncCommittee[i] {
nonSyncIndex = i
break
}
}
// Sync committee should be more profitable than non sync committee
balances := beaconState.Balances()
require.Equal(t, true, balances[indices[0]] > balances[nonSyncIndex])
// Proposer should be more profitable than rest of the sync committee
proposerIndex, err := helpers.BeaconProposerIndex(beaconState)
require.NoError(t, err)
require.Equal(t, true, balances[proposerIndex] > balances[indices[0]])
// Sync committee should have the same profits, except you are a proposer
for i := 1; i < len(indices); i++ {
if proposerIndex == indices[i-1] || proposerIndex == indices[i] {
continue
}
require.Equal(t, balances[indices[i-1]], balances[indices[i]])
}
// Increased balance validator count should equal to sync committee count
increased := uint64(0)
for _, balance := range balances {
if balance > params.BeaconConfig().MaxEffectiveBalance {
increased++
}
}
require.Equal(t, params.BeaconConfig().SyncCommitteeSize, increased)
}

View File

@@ -8,13 +8,12 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair" "github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
stateAltair "github.com/prysmaticlabs/prysm/beacon-chain/state/v2" stateAltair "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
statepb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/testutil/require" "github.com/prysmaticlabs/prysm/shared/testutil/require"
) )
func TestFuzzProcessDeposits_10000(t *testing.T) { func TestFuzzProcessDeposits_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0) fuzzer := fuzz.NewWithSeed(0)
state := &statepb.BeaconStateAltair{} state := &ethpb.BeaconStateAltair{}
deposits := make([]*ethpb.Deposit, 100) deposits := make([]*ethpb.Deposit, 100)
ctx := context.Background() ctx := context.Background()
for i := 0; i < 10000; i++ { for i := 0; i < 10000; i++ {
@@ -33,7 +32,7 @@ func TestFuzzProcessDeposits_10000(t *testing.T) {
func TestFuzzProcessDeposit_10000(t *testing.T) { func TestFuzzProcessDeposit_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0) fuzzer := fuzz.NewWithSeed(0)
state := &statepb.BeaconStateAltair{} state := &ethpb.BeaconStateAltair{}
deposit := &ethpb.Deposit{} deposit := &ethpb.Deposit{}
for i := 0; i < 10000; i++ { for i := 0; i < 10000; i++ {

View File

@@ -0,0 +1,257 @@
package altair
import (
"context"
"github.com/pkg/errors"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/shared/mathutil"
"github.com/prysmaticlabs/prysm/shared/params"
"go.opencensus.io/trace"
)
// InitializeEpochValidators gets called at the beginning of process epoch cycle to return
// pre computed instances of validators attesting records and total
// balances attested in an epoch.
func InitializeEpochValidators(ctx context.Context, st state.BeaconStateAltair) ([]*precompute.Validator, *precompute.Balance, error) {
ctx, span := trace.StartSpan(ctx, "altair.InitializeEpochValidators")
defer span.End()
pValidators := make([]*precompute.Validator, st.NumValidators())
bal := &precompute.Balance{}
prevEpoch := helpers.PrevEpoch(st)
inactivityScores, err := st.InactivityScores()
if err != nil {
return nil, nil, err
}
// This shouldn't happen with a correct beacon state,
// but rather be safe to defend against index out of bound panics.
if st.NumValidators() > len(inactivityScores) {
return nil, nil, errors.New("num of validators can't be greater than length of inactivity scores")
}
if err := st.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
// Was validator withdrawable or slashed
withdrawable := prevEpoch+1 >= val.WithdrawableEpoch()
pVal := &precompute.Validator{
IsSlashed: val.Slashed(),
IsWithdrawableCurrentEpoch: withdrawable,
CurrentEpochEffectiveBalance: val.EffectiveBalance(),
InactivityScore: inactivityScores[idx],
}
// Validator active current epoch
if helpers.IsActiveValidatorUsingTrie(val, helpers.CurrentEpoch(st)) {
pVal.IsActiveCurrentEpoch = true
bal.ActiveCurrentEpoch += val.EffectiveBalance()
}
// Validator active previous epoch
if helpers.IsActiveValidatorUsingTrie(val, prevEpoch) {
pVal.IsActivePrevEpoch = true
bal.ActivePrevEpoch += val.EffectiveBalance()
}
pValidators[idx] = pVal
return nil
}); err != nil {
return nil, nil, errors.Wrap(err, "could not initialize epoch validator")
}
return pValidators, bal, nil
}
// ProcessInactivityScores of beacon chain. This updates inactivity scores of beacon chain and
// updates the precompute validator struct for later processing.
func ProcessInactivityScores(
ctx context.Context,
state state.BeaconState,
vals []*precompute.Validator,
) (state.BeaconState, []*precompute.Validator, error) {
if helpers.CurrentEpoch(state) == params.BeaconConfig().GenesisEpoch {
return state, vals, nil
}
inactivityScores, err := state.InactivityScores()
if err != nil {
return nil, nil, err
}
for i, v := range vals {
if v.IsPrevEpochTargetAttester && !v.IsSlashed {
// Decrease inactivity score when validator gets target correct.
if v.InactivityScore > 0 {
score := uint64(1)
if score > v.InactivityScore {
score = v.InactivityScore
}
v.InactivityScore -= score
}
} else {
v.InactivityScore += params.BeaconConfig().InactivityScoreBias
}
if !helpers.IsInInactivityLeak(helpers.PrevEpoch(state), state.FinalizedCheckpointEpoch()) {
score := params.BeaconConfig().InactivityScoreRecoveryRate
if score > v.InactivityScore {
score = v.InactivityScore
}
v.InactivityScore -= score
}
inactivityScores[i] = v.InactivityScore
}
if err := state.SetInactivityScores(inactivityScores); err != nil {
return nil, nil, err
}
return state, vals, nil
}
// ProcessEpochParticipation processes the epoch participation in state and updates individual validator's pre computes,
// it also tracks and updates epoch attesting balances.
func ProcessEpochParticipation(
ctx context.Context,
state state.BeaconState,
bal *precompute.Balance,
vals []*precompute.Validator,
) ([]*precompute.Validator, *precompute.Balance, error) {
ctx, span := trace.StartSpan(ctx, "altair.ProcessEpochParticipation")
defer span.End()
cp, err := state.CurrentEpochParticipation()
if err != nil {
return nil, nil, err
}
for i, b := range cp {
if HasValidatorFlag(b, params.BeaconConfig().TimelyTargetFlagIndex) {
vals[i].IsCurrentEpochTargetAttester = true
}
}
pp, err := state.PreviousEpochParticipation()
if err != nil {
return nil, nil, err
}
for i, b := range pp {
if HasValidatorFlag(b, params.BeaconConfig().TimelySourceFlagIndex) {
vals[i].IsPrevEpochAttester = true
}
if HasValidatorFlag(b, params.BeaconConfig().TimelyTargetFlagIndex) {
vals[i].IsPrevEpochTargetAttester = true
}
if HasValidatorFlag(b, params.BeaconConfig().TimelyHeadFlagIndex) {
vals[i].IsPrevEpochHeadAttester = true
}
}
bal = precompute.UpdateBalance(vals, bal)
return vals, bal, nil
}
// ProcessRewardsAndPenaltiesPrecompute processes the rewards and penalties of individual validator.
// This is an optimized version by passing in precomputed validator attesting records and and total epoch balances.
func ProcessRewardsAndPenaltiesPrecompute(
state state.BeaconStateAltair,
bal *precompute.Balance,
vals []*precompute.Validator,
) (state.BeaconStateAltair, error) {
// Don't process rewards and penalties in genesis epoch.
if helpers.CurrentEpoch(state) == 0 {
return state, nil
}
numOfVals := state.NumValidators()
// Guard against an out-of-bounds using validator balance precompute.
if len(vals) != numOfVals || len(vals) != state.BalancesLength() {
return state, errors.New("validator registries not the same length as state's validator registries")
}
attsRewards, attsPenalties, err := AttestationsDelta(state, bal, vals)
if err != nil {
return nil, errors.Wrap(err, "could not get attestation delta")
}
balances := state.Balances()
for i := 0; i < numOfVals; i++ {
vals[i].BeforeEpochTransitionBalance = balances[i]
// Compute the post balance of the validator after accounting for the
// attester and proposer rewards and penalties.
balances[i] = helpers.IncreaseBalanceWithVal(balances[i], attsRewards[i])
balances[i] = helpers.DecreaseBalanceWithVal(balances[i], attsPenalties[i])
vals[i].AfterEpochTransitionBalance = balances[i]
}
if err := state.SetBalances(balances); err != nil {
return nil, errors.Wrap(err, "could not set validator balances")
}
return state, nil
}
// AttestationsDelta computes and returns the rewards and penalties differences for individual validators based on the
// voting records.
func AttestationsDelta(state state.BeaconStateAltair, bal *precompute.Balance, vals []*precompute.Validator) (rewards, penalties []uint64, err error) {
numOfVals := state.NumValidators()
rewards = make([]uint64, numOfVals)
penalties = make([]uint64, numOfVals)
prevEpoch := helpers.PrevEpoch(state)
finalizedEpoch := state.FinalizedCheckpointEpoch()
for i, v := range vals {
rewards[i], penalties[i] = attestationDelta(bal, v, prevEpoch, finalizedEpoch)
}
return rewards, penalties, nil
}
func attestationDelta(bal *precompute.Balance, v *precompute.Validator, prevEpoch, finalizedEpoch types.Epoch) (r, p uint64) {
eligible := v.IsActivePrevEpoch || (v.IsSlashed && !v.IsWithdrawableCurrentEpoch)
if !eligible || bal.ActiveCurrentEpoch == 0 {
return 0, 0
}
ebi := params.BeaconConfig().EffectiveBalanceIncrement
eb := v.CurrentEpochEffectiveBalance
br := (eb / ebi) * (ebi * params.BeaconConfig().BaseRewardFactor / mathutil.IntegerSquareRoot(bal.ActiveCurrentEpoch))
activeCurrentEpochIncrements := bal.ActiveCurrentEpoch / ebi
r, p = uint64(0), uint64(0)
// Process source reward / penalty
if v.IsPrevEpochAttester && !v.IsSlashed {
if !helpers.IsInInactivityLeak(prevEpoch, finalizedEpoch) {
rewardNumerator := br * params.BeaconConfig().TimelySourceWeight * (bal.PrevEpochAttested / ebi)
r += rewardNumerator / (activeCurrentEpochIncrements * params.BeaconConfig().WeightDenominator)
}
} else {
p += br * params.BeaconConfig().TimelySourceWeight / params.BeaconConfig().WeightDenominator
}
// Process target reward / penalty
if v.IsPrevEpochTargetAttester && !v.IsSlashed {
if !helpers.IsInInactivityLeak(prevEpoch, finalizedEpoch) {
rewardNumerator := br * params.BeaconConfig().TimelyTargetWeight * (bal.PrevEpochTargetAttested / ebi)
r += rewardNumerator / (activeCurrentEpochIncrements * params.BeaconConfig().WeightDenominator)
}
} else {
p += br * params.BeaconConfig().TimelyTargetWeight / params.BeaconConfig().WeightDenominator
}
// Process head reward / penalty
if v.IsPrevEpochHeadAttester && !v.IsSlashed {
if !helpers.IsInInactivityLeak(prevEpoch, finalizedEpoch) {
rewardNumerator := br * params.BeaconConfig().TimelyHeadWeight * (bal.PrevEpochHeadAttested / ebi)
r += rewardNumerator / (activeCurrentEpochIncrements * params.BeaconConfig().WeightDenominator)
}
}
// Process finality delay penalty
// Apply an additional penalty to validators that did not vote on the correct target or slashed.
if !v.IsPrevEpochTargetAttester || v.IsSlashed {
penaltyNumerator := eb * v.InactivityScore
penaltyDenominator := params.BeaconConfig().InactivityScoreBias * params.BeaconConfig().InactivityPenaltyQuotientAltair
p += penaltyNumerator / penaltyDenominator
}
return r, p
}

View File

@@ -0,0 +1,283 @@
package altair
import (
"context"
"testing"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
stateAltair "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
)
func TestInitializeEpochValidators_Ok(t *testing.T) {
ffe := params.BeaconConfig().FarFutureEpoch
s, err := stateAltair.InitializeFromProto(&ethpb.BeaconStateAltair{
Slot: params.BeaconConfig().SlotsPerEpoch,
// Validator 0 is slashed
// Validator 1 is withdrawable
// Validator 2 is active prev epoch and current epoch
// Validator 3 is active prev epoch
Validators: []*ethpb.Validator{
{Slashed: true, WithdrawableEpoch: ffe, EffectiveBalance: 100},
{EffectiveBalance: 100},
{WithdrawableEpoch: ffe, ExitEpoch: ffe, EffectiveBalance: 100},
{WithdrawableEpoch: ffe, ExitEpoch: 1, EffectiveBalance: 100},
},
InactivityScores: []uint64{0, 1, 2, 3},
})
require.NoError(t, err)
v, b, err := InitializeEpochValidators(context.Background(), s)
require.NoError(t, err)
assert.DeepEqual(t, &precompute.Validator{
IsSlashed: true,
CurrentEpochEffectiveBalance: 100,
InactivityScore: 0,
}, v[0], "Incorrect validator 0 status")
assert.DeepEqual(t, &precompute.Validator{
IsWithdrawableCurrentEpoch: true,
CurrentEpochEffectiveBalance: 100,
InactivityScore: 1,
}, v[1], "Incorrect validator 1 status")
assert.DeepEqual(t, &precompute.Validator{
IsActivePrevEpoch: true,
IsActiveCurrentEpoch: true,
CurrentEpochEffectiveBalance: 100,
InactivityScore: 2,
}, v[2], "Incorrect validator 2 status")
assert.DeepEqual(t, &precompute.Validator{
IsActivePrevEpoch: true,
CurrentEpochEffectiveBalance: 100,
InactivityScore: 3,
}, v[3], "Incorrect validator 3 status")
wantedBalances := &precompute.Balance{
ActiveCurrentEpoch: 100,
ActivePrevEpoch: 200,
}
assert.DeepEqual(t, wantedBalances, b, "Incorrect wanted balance")
}
func TestInitializeEpochValidators_BadState(t *testing.T) {
s, err := stateAltair.InitializeFromProto(&ethpb.BeaconStateAltair{
Validators: []*ethpb.Validator{{}},
InactivityScores: []uint64{},
})
require.NoError(t, err)
_, _, err = InitializeEpochValidators(context.Background(), s)
require.ErrorContains(t, "num of validators can't be greater than length of inactivity scores", err)
}
func TestProcessEpochParticipation(t *testing.T) {
s, err := testState()
require.NoError(t, err)
validators, balance, err := InitializeEpochValidators(context.Background(), s)
require.NoError(t, err)
validators, balance, err = ProcessEpochParticipation(context.Background(), s, balance, validators)
require.NoError(t, err)
require.DeepEqual(t, &precompute.Validator{
IsActiveCurrentEpoch: true,
IsActivePrevEpoch: true,
IsWithdrawableCurrentEpoch: true,
CurrentEpochEffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
}, validators[0])
require.DeepEqual(t, &precompute.Validator{
IsActiveCurrentEpoch: true,
IsActivePrevEpoch: true,
IsWithdrawableCurrentEpoch: true,
CurrentEpochEffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
IsPrevEpochAttester: true,
}, validators[1])
require.DeepEqual(t, &precompute.Validator{
IsActiveCurrentEpoch: true,
IsActivePrevEpoch: true,
IsWithdrawableCurrentEpoch: true,
CurrentEpochEffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
IsPrevEpochAttester: true,
IsCurrentEpochTargetAttester: true,
IsPrevEpochTargetAttester: true,
}, validators[2])
require.DeepEqual(t, &precompute.Validator{
IsActiveCurrentEpoch: true,
IsActivePrevEpoch: true,
IsWithdrawableCurrentEpoch: true,
CurrentEpochEffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
IsPrevEpochAttester: true,
IsCurrentEpochTargetAttester: true,
IsPrevEpochTargetAttester: true,
IsPrevEpochHeadAttester: true,
}, validators[3])
require.Equal(t, params.BeaconConfig().MaxEffectiveBalance*3, balance.PrevEpochAttested)
require.Equal(t, balance.CurrentEpochTargetAttested, params.BeaconConfig().MaxEffectiveBalance*2)
require.Equal(t, balance.PrevEpochTargetAttested, params.BeaconConfig().MaxEffectiveBalance*2)
require.Equal(t, balance.PrevEpochHeadAttested, params.BeaconConfig().MaxEffectiveBalance*1)
}
func TestAttestationsDelta(t *testing.T) {
s, err := testState()
require.NoError(t, err)
validators, balance, err := InitializeEpochValidators(context.Background(), s)
require.NoError(t, err)
validators, balance, err = ProcessEpochParticipation(context.Background(), s, balance, validators)
require.NoError(t, err)
rewards, penalties, err := AttestationsDelta(s, balance, validators)
require.NoError(t, err)
// Reward amount should increase as validator index increases due to setup.
for i := 1; i < len(rewards); i++ {
require.Equal(t, true, rewards[i] > rewards[i-1])
}
// Penalty amount should decrease as validator index increases due to setup.
for i := 1; i < len(penalties); i++ {
require.Equal(t, true, penalties[i] <= penalties[i-1])
}
// First index should have 0 reward.
require.Equal(t, uint64(0), rewards[0])
// Last index should have 0 penalty.
require.Equal(t, uint64(0), penalties[len(penalties)-1])
}
func TestProcessRewardsAndPenaltiesPrecompute_Ok(t *testing.T) {
s, err := testState()
require.NoError(t, err)
validators, balance, err := InitializeEpochValidators(context.Background(), s)
require.NoError(t, err)
validators, balance, err = ProcessEpochParticipation(context.Background(), s, balance, validators)
require.NoError(t, err)
s, err = ProcessRewardsAndPenaltiesPrecompute(s, balance, validators)
require.NoError(t, err)
balances := s.Balances()
// Reward amount should increase as validator index increases due to setup.
for i := 1; i < len(balances); i++ {
require.Equal(t, true, balances[i] >= balances[i-1])
}
wanted := make([]uint64, s.NumValidators())
rewards, penalties, err := AttestationsDelta(s, balance, validators)
require.NoError(t, err)
for i := range rewards {
wanted[i] += rewards[i]
}
for i := range penalties {
if wanted[i] > penalties[i] {
wanted[i] -= penalties[i]
} else {
wanted[i] = 0
}
}
require.DeepEqual(t, wanted, balances)
}
func TestProcessRewardsAndPenaltiesPrecompute_InactivityLeak(t *testing.T) {
s, err := testState()
require.NoError(t, err)
validators, balance, err := InitializeEpochValidators(context.Background(), s)
require.NoError(t, err)
validators, balance, err = ProcessEpochParticipation(context.Background(), s, balance, validators)
require.NoError(t, err)
sCopy := s.Copy()
s, err = ProcessRewardsAndPenaltiesPrecompute(s, balance, validators)
require.NoError(t, err)
// Copied state where finality happened long ago
require.NoError(t, sCopy.SetSlot(params.BeaconConfig().SlotsPerEpoch*1000))
sCopy, err = ProcessRewardsAndPenaltiesPrecompute(sCopy, balance, validators)
require.NoError(t, err)
balances := s.Balances()
inactivityBalances := sCopy.Balances()
// Balances should be much less in inactivity leak cases.
for i := 0; i < len(balances); i++ {
require.Equal(t, true, balances[i] >= inactivityBalances[i])
}
}
func TestProcessInactivityScores_CanProcess(t *testing.T) {
s, err := testState()
require.NoError(t, err)
defaultScore := uint64(5)
require.NoError(t, s.SetInactivityScores([]uint64{defaultScore, defaultScore, defaultScore, defaultScore}))
require.NoError(t, s.SetSlot(params.BeaconConfig().SlotsPerEpoch*types.Slot(params.BeaconConfig().MinEpochsToInactivityPenalty+2)))
validators, balance, err := InitializeEpochValidators(context.Background(), s)
require.NoError(t, err)
validators, _, err = ProcessEpochParticipation(context.Background(), s, balance, validators)
require.NoError(t, err)
s, _, err = ProcessInactivityScores(context.Background(), s, validators)
require.NoError(t, err)
inactivityScores, err := s.InactivityScores()
require.NoError(t, err)
// V0 and V1 didn't vote head. V2 and V3 did.
require.Equal(t, defaultScore+params.BeaconConfig().InactivityScoreBias, inactivityScores[0])
require.Equal(t, defaultScore+params.BeaconConfig().InactivityScoreBias, inactivityScores[1])
require.Equal(t, defaultScore-1, inactivityScores[2])
require.Equal(t, defaultScore-1, inactivityScores[3])
}
func TestProcessRewardsAndPenaltiesPrecompute_GenesisEpoch(t *testing.T) {
s, err := testState()
require.NoError(t, err)
validators, balance, err := InitializeEpochValidators(context.Background(), s)
require.NoError(t, err)
validators, balance, err = ProcessEpochParticipation(context.Background(), s, balance, validators)
require.NoError(t, err)
require.NoError(t, s.SetSlot(0))
s, err = ProcessRewardsAndPenaltiesPrecompute(s, balance, validators)
require.NoError(t, err)
balances := s.Balances()
// Nothing should happen at genesis epoch
for i := 1; i < len(balances); i++ {
require.Equal(t, true, balances[i] == balances[i-1])
}
}
func TestProcessRewardsAndPenaltiesPrecompute_BadState(t *testing.T) {
s, err := testState()
require.NoError(t, err)
validators, balance, err := InitializeEpochValidators(context.Background(), s)
require.NoError(t, err)
_, balance, err = ProcessEpochParticipation(context.Background(), s, balance, validators)
require.NoError(t, err)
_, err = ProcessRewardsAndPenaltiesPrecompute(s, balance, []*precompute.Validator{})
require.ErrorContains(t, "validator registries not the same length as state's validator registries", err)
}
func testState() (state.BeaconState, error) {
generateParticipation := func(flags ...uint8) byte {
b := byte(0)
for _, flag := range flags {
b = AddValidatorFlag(b, flag)
}
return b
}
return stateAltair.InitializeFromProto(&ethpb.BeaconStateAltair{
Slot: 2 * params.BeaconConfig().SlotsPerEpoch,
Validators: []*ethpb.Validator{
{EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance, ExitEpoch: params.BeaconConfig().FarFutureEpoch},
{EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance, ExitEpoch: params.BeaconConfig().FarFutureEpoch},
{EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance, ExitEpoch: params.BeaconConfig().FarFutureEpoch},
{EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance, ExitEpoch: params.BeaconConfig().FarFutureEpoch},
},
CurrentEpochParticipation: []byte{
0,
generateParticipation(params.BeaconConfig().TimelySourceFlagIndex),
generateParticipation(params.BeaconConfig().TimelySourceFlagIndex, params.BeaconConfig().TimelyTargetFlagIndex),
generateParticipation(params.BeaconConfig().TimelySourceFlagIndex, params.BeaconConfig().TimelyTargetFlagIndex, params.BeaconConfig().TimelyHeadFlagIndex),
},
PreviousEpochParticipation: []byte{
0,
generateParticipation(params.BeaconConfig().TimelySourceFlagIndex),
generateParticipation(params.BeaconConfig().TimelySourceFlagIndex, params.BeaconConfig().TimelyTargetFlagIndex),
generateParticipation(params.BeaconConfig().TimelySourceFlagIndex, params.BeaconConfig().TimelyTargetFlagIndex, params.BeaconConfig().TimelyHeadFlagIndex),
},
InactivityScores: []uint64{0, 0, 0, 0},
Balances: []uint64{0, 0, 0, 0},
})
}

View File

@@ -0,0 +1,116 @@
package altair
import (
"context"
"github.com/pkg/errors"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/mathutil"
"github.com/prysmaticlabs/prysm/shared/params"
)
// ProcessSyncCommitteeUpdates processes sync client committee updates for the beacon state.
//
// Spec code:
// def process_sync_committee_updates(state: BeaconState) -> None:
// next_epoch = get_current_epoch(state) + Epoch(1)
// if next_epoch % EPOCHS_PER_SYNC_COMMITTEE_PERIOD == 0:
// state.current_sync_committee = state.next_sync_committee
// state.next_sync_committee = get_next_sync_committee(state)
func ProcessSyncCommitteeUpdates(ctx context.Context, beaconState state.BeaconStateAltair) (state.BeaconStateAltair, error) {
nextEpoch := helpers.NextEpoch(beaconState)
if nextEpoch%params.BeaconConfig().EpochsPerSyncCommitteePeriod == 0 {
currentSyncCommittee, err := beaconState.NextSyncCommittee()
if err != nil {
return nil, err
}
if err := beaconState.SetCurrentSyncCommittee(currentSyncCommittee); err != nil {
return nil, err
}
nextCommittee, err := NextSyncCommittee(ctx, beaconState)
if err != nil {
return nil, err
}
if err := beaconState.SetNextSyncCommittee(nextCommittee); err != nil {
return nil, err
}
if err := helpers.UpdateSyncCommitteeCache(beaconState); err != nil {
return nil, err
}
}
return beaconState, nil
}
// ProcessParticipationFlagUpdates processes participation flag updates by rotating current to previous.
//
// Spec code:
// def process_participation_flag_updates(state: BeaconState) -> None:
// state.previous_epoch_participation = state.current_epoch_participation
// state.current_epoch_participation = [ParticipationFlags(0b0000_0000) for _ in range(len(state.validators))]
func ProcessParticipationFlagUpdates(beaconState state.BeaconStateAltair) (state.BeaconStateAltair, error) {
c, err := beaconState.CurrentEpochParticipation()
if err != nil {
return nil, err
}
if err := beaconState.SetPreviousParticipationBits(c); err != nil {
return nil, err
}
if err := beaconState.SetCurrentParticipationBits(make([]byte, beaconState.NumValidators())); err != nil {
return nil, err
}
return beaconState, nil
}
// ProcessSlashings processes the slashed validators during epoch processing,
// The function is modified to use PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR.
//
// Spec code:
// def process_slashings(state: BeaconState) -> None:
// epoch = get_current_epoch(state)
// total_balance = get_total_active_balance(state)
// adjusted_total_slashing_balance = min(sum(state.slashings) * PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR, total_balance)
// for index, validator in enumerate(state.validators):
// if validator.slashed and epoch + EPOCHS_PER_SLASHINGS_VECTOR // 2 == validator.withdrawable_epoch:
// increment = EFFECTIVE_BALANCE_INCREMENT # Factored out from penalty numerator to avoid uint64 overflow
// penalty_numerator = validator.effective_balance // increment * adjusted_total_slashing_balance
// penalty = penalty_numerator // total_balance * increment
// decrease_balance(state, ValidatorIndex(index), penalty)
// decrease_balance(state, ValidatorIndex(index), penalty)
func ProcessSlashings(state state.BeaconState) (state.BeaconState, error) {
currentEpoch := helpers.CurrentEpoch(state)
totalBalance, err := helpers.TotalActiveBalance(state)
if err != nil {
return nil, errors.Wrap(err, "could not get total active balance")
}
// Compute slashed balances in the current epoch
exitLength := params.BeaconConfig().EpochsPerSlashingsVector
// Compute the sum of state slashings
slashings := state.Slashings()
totalSlashing := uint64(0)
for _, slashing := range slashings {
totalSlashing += slashing
}
// a callback is used here to apply the following actions to all validators
// below equally.
increment := params.BeaconConfig().EffectiveBalanceIncrement
minSlashing := mathutil.Min(totalSlashing*params.BeaconConfig().ProportionalSlashingMultiplierAltair, totalBalance)
err = state.ApplyToEveryValidator(func(idx int, val *ethpb.Validator) (bool, *ethpb.Validator, error) {
correctEpoch := (currentEpoch + exitLength/2) == val.WithdrawableEpoch
if val.Slashed && correctEpoch {
penaltyNumerator := val.EffectiveBalance / increment * minSlashing
penalty := penaltyNumerator / totalBalance * increment
if err := helpers.DecreaseBalance(state, types.ValidatorIndex(idx), penalty); err != nil {
return false, val, err
}
return true, val, nil
}
return false, val, nil
})
return state, err
}

View File

@@ -0,0 +1,181 @@
package altair_test
import (
"context"
"fmt"
"testing"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
stateAltair "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
"google.golang.org/protobuf/proto"
)
func TestProcessSyncCommitteeUpdates_CanRotate(t *testing.T) {
s, _ := testutil.DeterministicGenesisStateAltair(t, params.BeaconConfig().MaxValidatorsPerCommittee)
h := &ethpb.BeaconBlockHeader{
StateRoot: bytesutil.PadTo([]byte{'a'}, 32),
ParentRoot: bytesutil.PadTo([]byte{'b'}, 32),
BodyRoot: bytesutil.PadTo([]byte{'c'}, 32),
}
require.NoError(t, s.SetLatestBlockHeader(h))
postState, err := altair.ProcessSyncCommitteeUpdates(context.Background(), s)
require.NoError(t, err)
current, err := postState.CurrentSyncCommittee()
require.NoError(t, err)
next, err := postState.NextSyncCommittee()
require.NoError(t, err)
require.DeepEqual(t, current, next)
require.NoError(t, s.SetSlot(params.BeaconConfig().SlotsPerEpoch))
postState, err = altair.ProcessSyncCommitteeUpdates(context.Background(), s)
require.NoError(t, err)
c, err := postState.CurrentSyncCommittee()
require.NoError(t, err)
n, err := postState.NextSyncCommittee()
require.NoError(t, err)
require.DeepEqual(t, current, c)
require.DeepEqual(t, next, n)
require.NoError(t, s.SetSlot(types.Slot(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*params.BeaconConfig().SlotsPerEpoch-1))
postState, err = altair.ProcessSyncCommitteeUpdates(context.Background(), s)
require.NoError(t, err)
c, err = postState.CurrentSyncCommittee()
require.NoError(t, err)
n, err = postState.NextSyncCommittee()
require.NoError(t, err)
require.NotEqual(t, current, c)
require.NotEqual(t, next, n)
require.DeepEqual(t, next, c)
// Test boundary condition.
slot := params.BeaconConfig().SlotsPerEpoch * types.Slot(helpers.CurrentEpoch(s)+params.BeaconConfig().EpochsPerSyncCommitteePeriod)
require.NoError(t, s.SetSlot(slot))
boundaryCommittee, err := altair.NextSyncCommittee(context.Background(), s)
require.NoError(t, err)
require.DeepNotEqual(t, boundaryCommittee, n)
}
func TestProcessParticipationFlagUpdates_CanRotate(t *testing.T) {
s, _ := testutil.DeterministicGenesisStateAltair(t, params.BeaconConfig().MaxValidatorsPerCommittee)
c, err := s.CurrentEpochParticipation()
require.NoError(t, err)
require.DeepEqual(t, make([]byte, params.BeaconConfig().MaxValidatorsPerCommittee), c)
p, err := s.PreviousEpochParticipation()
require.NoError(t, err)
require.DeepEqual(t, make([]byte, params.BeaconConfig().MaxValidatorsPerCommittee), p)
newC := []byte{'a'}
newP := []byte{'b'}
require.NoError(t, s.SetCurrentParticipationBits(newC))
require.NoError(t, s.SetPreviousParticipationBits(newP))
c, err = s.CurrentEpochParticipation()
require.NoError(t, err)
require.DeepEqual(t, newC, c)
p, err = s.PreviousEpochParticipation()
require.NoError(t, err)
require.DeepEqual(t, newP, p)
s, err = altair.ProcessParticipationFlagUpdates(s)
require.NoError(t, err)
c, err = s.CurrentEpochParticipation()
require.NoError(t, err)
require.DeepEqual(t, make([]byte, params.BeaconConfig().MaxValidatorsPerCommittee), c)
p, err = s.PreviousEpochParticipation()
require.NoError(t, err)
require.DeepEqual(t, newC, p)
}
func TestProcessSlashings_NotSlashed(t *testing.T) {
base := &ethpb.BeaconStateAltair{
Slot: 0,
Validators: []*ethpb.Validator{{Slashed: true}},
Balances: []uint64{params.BeaconConfig().MaxEffectiveBalance},
Slashings: []uint64{0, 1e9},
}
s, err := stateAltair.InitializeFromProto(base)
require.NoError(t, err)
newState, err := altair.ProcessSlashings(s)
require.NoError(t, err)
wanted := params.BeaconConfig().MaxEffectiveBalance
assert.Equal(t, wanted, newState.Balances()[0], "Unexpected slashed balance")
}
func TestProcessSlashings_SlashedLess(t *testing.T) {
tests := []struct {
state *ethpb.BeaconStateAltair
want uint64
}{
{
state: &ethpb.BeaconStateAltair{
Validators: []*ethpb.Validator{
{Slashed: true,
WithdrawableEpoch: params.BeaconConfig().EpochsPerSlashingsVector / 2,
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance},
{ExitEpoch: params.BeaconConfig().FarFutureEpoch, EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance}},
Balances: []uint64{params.BeaconConfig().MaxEffectiveBalance, params.BeaconConfig().MaxEffectiveBalance},
Slashings: []uint64{0, 1e9},
},
want: uint64(30000000000),
},
{
state: &ethpb.BeaconStateAltair{
Validators: []*ethpb.Validator{
{Slashed: true,
WithdrawableEpoch: params.BeaconConfig().EpochsPerSlashingsVector / 2,
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance},
{ExitEpoch: params.BeaconConfig().FarFutureEpoch, EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance},
{ExitEpoch: params.BeaconConfig().FarFutureEpoch, EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance},
},
Balances: []uint64{params.BeaconConfig().MaxEffectiveBalance, params.BeaconConfig().MaxEffectiveBalance},
Slashings: []uint64{0, 1e9},
},
want: uint64(31000000000),
},
{
state: &ethpb.BeaconStateAltair{
Validators: []*ethpb.Validator{
{Slashed: true,
WithdrawableEpoch: params.BeaconConfig().EpochsPerSlashingsVector / 2,
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance},
{ExitEpoch: params.BeaconConfig().FarFutureEpoch, EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance},
{ExitEpoch: params.BeaconConfig().FarFutureEpoch, EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance},
},
Balances: []uint64{params.BeaconConfig().MaxEffectiveBalance, params.BeaconConfig().MaxEffectiveBalance},
Slashings: []uint64{0, 2 * 1e9},
},
want: uint64(30000000000),
},
{
state: &ethpb.BeaconStateAltair{
Validators: []*ethpb.Validator{
{Slashed: true,
WithdrawableEpoch: params.BeaconConfig().EpochsPerSlashingsVector / 2,
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance - params.BeaconConfig().EffectiveBalanceIncrement},
{ExitEpoch: params.BeaconConfig().FarFutureEpoch, EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance - params.BeaconConfig().EffectiveBalanceIncrement}},
Balances: []uint64{params.BeaconConfig().MaxEffectiveBalance - params.BeaconConfig().EffectiveBalanceIncrement, params.BeaconConfig().MaxEffectiveBalance - params.BeaconConfig().EffectiveBalanceIncrement},
Slashings: []uint64{0, 1e9},
},
want: uint64(29000000000),
},
}
for i, tt := range tests {
t.Run(fmt.Sprint(i), func(t *testing.T) {
helpers.ClearCache()
original := proto.Clone(tt.state)
s, err := stateAltair.InitializeFromProto(tt.state)
require.NoError(t, err)
newState, err := altair.ProcessSlashings(s)
require.NoError(t, err)
assert.Equal(t, tt.want, newState.Balances()[0], "ProcessSlashings({%v}) = newState; newState.Balances[0] = %d", original, newState.Balances()[0])
})
}
}

View File

@@ -0,0 +1,115 @@
package altair
import (
"context"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
statealtair "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/attestationutil"
"github.com/prysmaticlabs/prysm/shared/params"
)
// UpgradeToAltair updates input state to return the version Altair state.
func UpgradeToAltair(ctx context.Context, state state.BeaconState) (state.BeaconStateAltair, error) {
epoch := helpers.CurrentEpoch(state)
s := &ethpb.BeaconStateAltair{
GenesisTime: state.GenesisTime(),
GenesisValidatorsRoot: state.GenesisValidatorRoot(),
Slot: state.Slot(),
Fork: &ethpb.Fork{
PreviousVersion: state.Fork().CurrentVersion,
CurrentVersion: params.BeaconConfig().AltairForkVersion,
Epoch: epoch,
},
LatestBlockHeader: state.LatestBlockHeader(),
BlockRoots: state.BlockRoots(),
StateRoots: state.StateRoots(),
HistoricalRoots: state.HistoricalRoots(),
Eth1Data: state.Eth1Data(),
Eth1DataVotes: state.Eth1DataVotes(),
Eth1DepositIndex: state.Eth1DepositIndex(),
Validators: state.Validators(),
Balances: state.Balances(),
RandaoMixes: state.RandaoMixes(),
Slashings: state.Slashings(),
PreviousEpochParticipation: make([]byte, state.NumValidators()),
CurrentEpochParticipation: make([]byte, state.NumValidators()),
JustificationBits: state.JustificationBits(),
PreviousJustifiedCheckpoint: state.PreviousJustifiedCheckpoint(),
CurrentJustifiedCheckpoint: state.CurrentJustifiedCheckpoint(),
FinalizedCheckpoint: state.FinalizedCheckpoint(),
InactivityScores: make([]uint64, state.NumValidators()),
}
newState, err := statealtair.InitializeFromProto(s)
if err != nil {
return nil, err
}
prevEpochAtts, err := state.PreviousEpochAttestations()
if err != nil {
return nil, err
}
newState, err = TranslateParticipation(newState, prevEpochAtts)
if err != nil {
return nil, err
}
committee, err := NextSyncCommittee(ctx, newState)
if err != nil {
return nil, err
}
if err := newState.SetCurrentSyncCommittee(committee); err != nil {
return nil, err
}
if err := newState.SetNextSyncCommittee(committee); err != nil {
return nil, err
}
return newState, nil
}
// TranslateParticipation translates pending attestations into participation bits, then inserts the bits into beacon state.
// This is helper function t o convert phase 0 beacon state(pending attestations) to Altair beacon state(participation bits).
func TranslateParticipation(state *statealtair.BeaconState, atts []*ethpb.PendingAttestation) (*statealtair.BeaconState, error) {
for _, att := range atts {
epochParticipation, err := state.PreviousEpochParticipation()
if err != nil {
return nil, err
}
participatedFlags, err := attestationParticipationFlagIndices(state, att.Data, att.InclusionDelay)
if err != nil {
return nil, err
}
committee, err := helpers.BeaconCommitteeFromState(state, att.Data.Slot, att.Data.CommitteeIndex)
if err != nil {
return nil, err
}
indices, err := attestationutil.AttestingIndices(att.AggregationBits, committee)
if err != nil {
return nil, err
}
sourceFlagIndex := params.BeaconConfig().TimelySourceFlagIndex
targetFlagIndex := params.BeaconConfig().TimelyTargetFlagIndex
headFlagIndex := params.BeaconConfig().TimelyHeadFlagIndex
for _, index := range indices {
if participatedFlags[sourceFlagIndex] && !HasValidatorFlag(epochParticipation[index], sourceFlagIndex) {
epochParticipation[index] = AddValidatorFlag(epochParticipation[index], sourceFlagIndex)
}
if participatedFlags[targetFlagIndex] && !HasValidatorFlag(epochParticipation[index], targetFlagIndex) {
epochParticipation[index] = AddValidatorFlag(epochParticipation[index], targetFlagIndex)
}
if participatedFlags[headFlagIndex] && !HasValidatorFlag(epochParticipation[index], headFlagIndex) {
epochParticipation[index] = AddValidatorFlag(epochParticipation[index], headFlagIndex)
}
}
if err := state.SetPreviousParticipationBits(epochParticipation); err != nil {
return nil, err
}
}
return state, nil
}

View File

@@ -0,0 +1,87 @@
package altair_test
import (
"context"
"testing"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
stateAltair "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/attestationutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
)
func TestTranslateParticipation(t *testing.T) {
s, _ := testutil.DeterministicGenesisStateAltair(t, 64)
st, ok := s.(*stateAltair.BeaconState)
require.Equal(t, true, ok)
require.NoError(t, st.SetSlot(st.Slot()+params.BeaconConfig().MinAttestationInclusionDelay))
var err error
newState, err := altair.TranslateParticipation(st, nil)
require.NoError(t, err)
participation, err := newState.PreviousEpochParticipation()
require.NoError(t, err)
require.DeepSSZEqual(t, make([]byte, 64), participation)
aggBits := bitfield.NewBitlist(2)
aggBits.SetBitAt(0, true)
aggBits.SetBitAt(1, true)
r, err := helpers.BlockRootAtSlot(s, 0)
require.NoError(t, err)
var pendingAtts []*ethpb.PendingAttestation
for i := 0; i < 3; i++ {
pendingAtts = append(pendingAtts, &ethpb.PendingAttestation{
Data: &ethpb.AttestationData{
CommitteeIndex: types.CommitteeIndex(i),
BeaconBlockRoot: r,
Source: &ethpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
Target: &ethpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
},
AggregationBits: aggBits,
InclusionDelay: 1,
})
}
newState, err = altair.TranslateParticipation(newState, pendingAtts)
require.NoError(t, err)
participation, err = newState.PreviousEpochParticipation()
require.NoError(t, err)
require.DeepNotSSZEqual(t, make([]byte, 64), participation)
committee, err := helpers.BeaconCommitteeFromState(st, pendingAtts[0].Data.Slot, pendingAtts[0].Data.CommitteeIndex)
require.NoError(t, err)
indices, err := attestationutil.AttestingIndices(pendingAtts[0].AggregationBits, committee)
require.NoError(t, err)
for _, index := range indices {
require.Equal(t, true, altair.HasValidatorFlag(participation[index], params.BeaconConfig().TimelyHeadFlagIndex))
require.Equal(t, true, altair.HasValidatorFlag(participation[index], params.BeaconConfig().TimelyTargetFlagIndex))
require.Equal(t, true, altair.HasValidatorFlag(participation[index], params.BeaconConfig().TimelySourceFlagIndex))
}
}
func TestUpgradeToAltair(t *testing.T) {
st, _ := testutil.DeterministicGenesisState(t, params.BeaconConfig().MaxValidatorsPerCommittee)
aState, err := altair.UpgradeToAltair(context.Background(), st)
require.NoError(t, err)
_, ok := aState.(state.BeaconStateAltair)
require.Equal(t, true, ok)
f := aState.Fork()
require.DeepSSZEqual(t, &ethpb.Fork{
PreviousVersion: st.Fork().CurrentVersion,
CurrentVersion: params.BeaconConfig().AltairForkVersion,
Epoch: helpers.CurrentEpoch(st),
}, f)
csc, err := aState.CurrentSyncCommittee()
require.NoError(t, err)
nsc, err := aState.NextSyncCommittee()
require.NoError(t, err)
require.DeepSSZEqual(t, nsc, csc)
}

View File

@@ -0,0 +1,51 @@
package altair
import (
"github.com/pkg/errors"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/shared/mathutil"
"github.com/prysmaticlabs/prysm/shared/params"
)
// BaseReward takes state and validator index and calculate
// individual validator's base reward quotient.
//
// Spec code:
// def get_base_reward(state: BeaconState, index: ValidatorIndex) -> Gwei:
// """
// Return the base reward for the validator defined by ``index`` with respect to the current ``state``.
//
// Note: An optimally performing validator can earn one base reward per epoch over a long time horizon.
// This takes into account both per-epoch (e.g. attestation) and intermittent duties (e.g. block proposal
// and sync committees).
// """
// increments = state.validators[index].effective_balance // EFFECTIVE_BALANCE_INCREMENT
// return Gwei(increments * get_base_reward_per_increment(state))
func BaseReward(state state.ReadOnlyBeaconState, index types.ValidatorIndex) (uint64, error) {
totalBalance, err := helpers.TotalActiveBalance(state)
if err != nil {
return 0, errors.Wrap(err, "could not calculate active balance")
}
return BaseRewardWithTotalBalance(state, index, totalBalance)
}
// BaseRewardWithTotalBalance calculates the base reward with the provided total balance.
func BaseRewardWithTotalBalance(state state.ReadOnlyBeaconState, index types.ValidatorIndex, totalBalance uint64) (uint64, error) {
val, err := state.ValidatorAtIndexReadOnly(index)
if err != nil {
return 0, err
}
increments := val.EffectiveBalance() / params.BeaconConfig().EffectiveBalanceIncrement
return increments * baseRewardPerIncrement(totalBalance), nil
}
// baseRewardPerIncrement of the beacon state
//
// Spec code:
// def get_base_reward_per_increment(state: BeaconState) -> Gwei:
// return Gwei(EFFECTIVE_BALANCE_INCREMENT * BASE_REWARD_FACTOR // integer_squareroot(get_total_active_balance(state)))
func baseRewardPerIncrement(activeBalance uint64) uint64 {
return params.BeaconConfig().EffectiveBalanceIncrement * params.BeaconConfig().BaseRewardFactor / mathutil.IntegerSquareRoot(activeBalance)
}

View File

@@ -0,0 +1,28 @@
package altair_test
import (
"testing"
altair "github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
)
func TestBaseReward(t *testing.T) {
s, _ := testutil.DeterministicGenesisStateAltair(t, params.BeaconConfig().MaxValidatorsPerCommittee)
r0, err := altair.BaseReward(s, 0)
require.NoError(t, err)
r1, err := altair.BaseReward(s, 1)
require.NoError(t, err)
require.Equal(t, r0, r1)
v, err := s.ValidatorAtIndex(0)
require.NoError(t, err)
v.EffectiveBalance = v.EffectiveBalance + params.BeaconConfig().EffectiveBalanceIncrement
require.NoError(t, s.UpdateValidatorAtIndex(0, v))
r0, err = altair.BaseReward(s, 0)
require.NoError(t, err)
require.Equal(t, true, r0 > r1)
}

View File

@@ -2,6 +2,8 @@ package altair
import ( import (
"context" "context"
"fmt"
"time"
"github.com/pkg/errors" "github.com/pkg/errors"
types "github.com/prysmaticlabs/eth2-types" types "github.com/prysmaticlabs/eth2-types"
@@ -170,7 +172,7 @@ func SyncSubCommitteePubkeys(syncCommittee *ethpb.SyncCommittee, subComIdx types
// modulo = max(1, SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT // TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE) // modulo = max(1, SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT // TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE)
// return bytes_to_uint64(hash(signature)[0:8]) % modulo == 0 // return bytes_to_uint64(hash(signature)[0:8]) % modulo == 0
func IsSyncCommitteeAggregator(sig []byte) (bool, error) { func IsSyncCommitteeAggregator(sig []byte) (bool, error) {
if len(sig) != params.BeaconConfig().BLSPubkeyLength { if len(sig) != params.BeaconConfig().BLSSignatureLength {
return false, errors.New("incorrect sig length") return false, errors.New("incorrect sig length")
} }
@@ -179,3 +181,41 @@ func IsSyncCommitteeAggregator(sig []byte) (bool, error) {
hashedSig := hashutil.Hash(sig) hashedSig := hashutil.Hash(sig)
return bytesutil.FromBytes8(hashedSig[:8])%modulo == 0, nil return bytesutil.FromBytes8(hashedSig[:8])%modulo == 0, nil
} }
// Validate Sync Message to ensure that the provided slot is valid.
func ValidateSyncMessageTime(slot types.Slot, genesisTime time.Time, clockDisparity time.Duration) error {
if err := helpers.ValidateSlotClock(slot, uint64(genesisTime.Unix())); err != nil {
return err
}
messageTime, err := helpers.SlotToTime(uint64(genesisTime.Unix()), slot)
if err != nil {
return err
}
currentSlot := helpers.SlotsSince(genesisTime)
slotStartTime, err := helpers.SlotToTime(uint64(genesisTime.Unix()), currentSlot)
if err != nil {
return err
}
lowestSlotBound := slotStartTime.Add(-clockDisparity)
currentLowerBound := time.Now().Add(-clockDisparity)
// In the event the Slot's start time, is before the
// current allowable bound, we set the slot's start
// time as the bound.
if slotStartTime.Before(currentLowerBound) {
lowestSlotBound = slotStartTime
}
lowerBound := lowestSlotBound
upperBound := time.Now().Add(clockDisparity)
// Verify sync message slot is within the time range.
if messageTime.Before(lowerBound) || messageTime.After(upperBound) {
return fmt.Errorf(
"sync message slot %d not within allowable range of %d to %d (current slot)",
slot,
lowerBound.Unix(),
upperBound.Unix(),
)
}
return nil
}

View File

@@ -3,6 +3,7 @@ package altair_test
import ( import (
"context" "context"
"testing" "testing"
"time"
types "github.com/prysmaticlabs/eth2-types" types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair" "github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
@@ -11,7 +12,9 @@ import (
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/bls" "github.com/prysmaticlabs/prysm/shared/bls"
"github.com/prysmaticlabs/prysm/shared/params" "github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
"github.com/prysmaticlabs/prysm/shared/testutil/require" "github.com/prysmaticlabs/prysm/shared/testutil/require"
"github.com/prysmaticlabs/prysm/shared/timeutils"
) )
func TestSyncCommitteeIndices_CanGet(t *testing.T) { func TestSyncCommitteeIndices_CanGet(t *testing.T) {
@@ -278,6 +281,107 @@ func TestSyncSubCommitteePubkeys_CanGet(t *testing.T) {
} }
func Test_ValidateSyncMessageTime(t *testing.T) {
if params.BeaconNetworkConfig().MaximumGossipClockDisparity < 200*time.Millisecond {
t.Fatal("This test expects the maximum clock disparity to be at least 200ms")
}
type args struct {
syncMessageSlot types.Slot
genesisTime time.Time
}
tests := []struct {
name string
args args
wantedErr string
}{
{
name: "sync_message.slot == current_slot",
args: args{
syncMessageSlot: 15,
genesisTime: timeutils.Now().Add(-15 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second),
},
},
{
name: "sync_message.slot == current_slot, received in middle of slot",
args: args{
syncMessageSlot: 15,
genesisTime: timeutils.Now().Add(
-15 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second,
).Add(-(time.Duration(params.BeaconConfig().SecondsPerSlot/2) * time.Second)),
},
},
{
name: "sync_message.slot == current_slot, received 200ms early",
args: args{
syncMessageSlot: 16,
genesisTime: timeutils.Now().Add(
-16 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second,
).Add(-200 * time.Millisecond),
},
},
{
name: "sync_message.slot > current_slot",
args: args{
syncMessageSlot: 16,
genesisTime: timeutils.Now().Add(-(15 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)),
},
wantedErr: "sync message slot 16 not within allowable range of",
},
{
name: "sync_message.slot == current_slot+CLOCK_DISPARITY",
args: args{
syncMessageSlot: 100,
genesisTime: timeutils.Now().Add(-(100*time.Duration(params.BeaconConfig().SecondsPerSlot)*time.Second - params.BeaconNetworkConfig().MaximumGossipClockDisparity)),
},
wantedErr: "",
},
{
name: "sync_message.slot == current_slot+CLOCK_DISPARITY+200ms",
args: args{
syncMessageSlot: 100,
genesisTime: timeutils.Now().Add(-(100*time.Duration(params.BeaconConfig().SecondsPerSlot)*time.Second - params.BeaconNetworkConfig().MaximumGossipClockDisparity - 200*time.Millisecond)),
},
wantedErr: "sync message slot 100 not within allowable range of",
},
{
name: "sync_message.slot == current_slot-CLOCK_DISPARITY",
args: args{
syncMessageSlot: 100,
genesisTime: timeutils.Now().Add(-(100*time.Duration(params.BeaconConfig().SecondsPerSlot)*time.Second + params.BeaconNetworkConfig().MaximumGossipClockDisparity)),
},
wantedErr: "",
},
{
name: "sync_message.slot > current_slot+CLOCK_DISPARITY",
args: args{
syncMessageSlot: 101,
genesisTime: timeutils.Now().Add(-(100*time.Duration(params.BeaconConfig().SecondsPerSlot)*time.Second + params.BeaconNetworkConfig().MaximumGossipClockDisparity)),
},
wantedErr: "sync message slot 101 not within allowable range of",
},
{
name: "sync_message.slot is well beyond current slot",
args: args{
syncMessageSlot: 1 << 32,
genesisTime: timeutils.Now().Add(-15 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second),
},
wantedErr: "which exceeds max allowed value relative to the local clock",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := altair.ValidateSyncMessageTime(tt.args.syncMessageSlot, tt.args.genesisTime,
params.BeaconNetworkConfig().MaximumGossipClockDisparity)
if tt.wantedErr != "" {
assert.ErrorContains(t, tt.wantedErr, err)
} else {
assert.NoError(t, err)
}
})
}
}
func getState(t *testing.T, count uint64) *stateAltair.BeaconState { func getState(t *testing.T, count uint64) *stateAltair.BeaconState {
validators := make([]*ethpb.Validator, count) validators := make([]*ethpb.Validator, count)
for i := 0; i < len(validators); i++ { for i := 0; i < len(validators); i++ {

View File

@@ -0,0 +1,110 @@
package altair
import (
"context"
"github.com/pkg/errors"
e "github.com/prysmaticlabs/prysm/beacon-chain/core/epoch"
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"go.opencensus.io/trace"
)
// ProcessEpoch describes the per epoch operations that are performed on the beacon state.
// It's optimized by pre computing validator attested info and epoch total/attested balances upfront.
//
// Spec code:
// def process_epoch(state: BeaconState) -> None:
// process_justification_and_finalization(state) # [Modified in Altair]
// process_inactivity_updates(state) # [New in Altair]
// process_rewards_and_penalties(state) # [Modified in Altair]
// process_registry_updates(state)
// process_slashings(state) # [Modified in Altair]
// process_eth1_data_reset(state)
// process_effective_balance_updates(state)
// process_slashings_reset(state)
// process_randao_mixes_reset(state)
// process_historical_roots_update(state)
// process_participation_flag_updates(state) # [New in Altair]
// process_sync_committee_updates(state) # [New in Altair]
func ProcessEpoch(ctx context.Context, state state.BeaconStateAltair) (state.BeaconStateAltair, error) {
ctx, span := trace.StartSpan(ctx, "altair.ProcessEpoch")
defer span.End()
if state == nil || state.IsNil() {
return nil, errors.New("nil state")
}
vp, bp, err := InitializeEpochValidators(ctx, state)
if err != nil {
return nil, err
}
// New in Altair.
vp, bp, err = ProcessEpochParticipation(ctx, state, bp, vp)
if err != nil {
return nil, err
}
state, err = precompute.ProcessJustificationAndFinalizationPreCompute(state, bp)
if err != nil {
return nil, errors.Wrap(err, "could not process justification")
}
// New in Altair.
state, vp, err = ProcessInactivityScores(ctx, state, vp)
if err != nil {
return nil, errors.Wrap(err, "could not process inactivity updates")
}
// New in Altair.
state, err = ProcessRewardsAndPenaltiesPrecompute(state, bp, vp)
if err != nil {
return nil, errors.Wrap(err, "could not process rewards and penalties")
}
state, err = e.ProcessRegistryUpdates(state)
if err != nil {
return nil, errors.Wrap(err, "could not process registry updates")
}
// Modified in Altair.
state, err = ProcessSlashings(state)
if err != nil {
return nil, err
}
state, err = e.ProcessEth1DataReset(state)
if err != nil {
return nil, err
}
state, err = e.ProcessEffectiveBalanceUpdates(state)
if err != nil {
return nil, err
}
state, err = e.ProcessSlashingsReset(state)
if err != nil {
return nil, err
}
state, err = e.ProcessRandaoMixesReset(state)
if err != nil {
return nil, err
}
state, err = e.ProcessHistoricalRootsUpdate(state)
if err != nil {
return nil, err
}
// New in Altair.
state, err = ProcessParticipationFlagUpdates(state)
if err != nil {
return nil, err
}
// New in Altair.
state, err = ProcessSyncCommitteeUpdates(ctx, state)
if err != nil {
return nil, err
}
return state, nil
}

View File

@@ -0,0 +1,34 @@
package altair_test
import (
"context"
"testing"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
stateAltair "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
)
func TestProcessEpoch_CanProcess(t *testing.T) {
epoch := types.Epoch(1)
slashing := make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector)
base := &ethpb.BeaconStateAltair{
Slot: params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epoch)) + 1,
BlockRoots: make([][]byte, 128),
Slashings: slashing,
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
FinalizedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)},
JustificationBits: bitfield.Bitvector4{0x00},
CurrentJustifiedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)},
}
s, err := stateAltair.InitializeFromProto(base)
require.NoError(t, err)
require.NoError(t, s.SetValidators([]*ethpb.Validator{}))
newState, err := altair.ProcessEpoch(context.Background(), s)
require.NoError(t, err)
require.Equal(t, uint64(0), newState.Slashings()[2], "Unexpected slashed balance")
}

View File

@@ -0,0 +1,86 @@
package altair
import (
"github.com/pkg/errors"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/validators"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/shared/params"
)
// SlashValidator with slashed index.
// The function is modified to use MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR and use PROPOSER_WEIGHT when calculating the proposer reward.
//
// def slash_validator(state: BeaconState,
// slashed_index: ValidatorIndex,
// whistleblower_index: ValidatorIndex=None) -> None:
// """
// Slash the validator with index ``slashed_index``.
// """
// epoch = get_current_epoch(state)
// initiate_validator_exit(state, slashed_index)
// validator = state.validators[slashed_index]
// validator.slashed = True
// validator.withdrawable_epoch = max(validator.withdrawable_epoch, Epoch(epoch + EPOCHS_PER_SLASHINGS_VECTOR))
// state.slashings[epoch % EPOCHS_PER_SLASHINGS_VECTOR] += validator.effective_balance
// decrease_balance(state, slashed_index, validator.effective_balance // MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR)
//
// # Apply proposer and whistleblower rewards
// proposer_index = get_beacon_proposer_index(state)
// if whistleblower_index is None:
// whistleblower_index = proposer_index
// whistleblower_reward = Gwei(validator.effective_balance // WHISTLEBLOWER_REWARD_QUOTIENT)
// proposer_reward = Gwei(whistleblower_reward * PROPOSER_WEIGHT // WEIGHT_DENOMINATOR)
// increase_balance(state, proposer_index, proposer_reward)
// increase_balance(state, whistleblower_index, Gwei(whistleblower_reward - proposer_reward))
func SlashValidator(state state.BeaconState, slashedIdx types.ValidatorIndex) (state.BeaconState, error) {
state, err := validators.InitiateValidatorExit(state, slashedIdx)
if err != nil {
return nil, errors.Wrapf(err, "could not initiate validator %d exit", slashedIdx)
}
currentEpoch := helpers.SlotToEpoch(state.Slot())
validator, err := state.ValidatorAtIndex(slashedIdx)
if err != nil {
return nil, err
}
validator.Slashed = true
maxWithdrawableEpoch := types.MaxEpoch(validator.WithdrawableEpoch, currentEpoch+params.BeaconConfig().EpochsPerSlashingsVector)
validator.WithdrawableEpoch = maxWithdrawableEpoch
if err := state.UpdateValidatorAtIndex(slashedIdx, validator); err != nil {
return nil, err
}
// The slashing amount is represented by epochs per slashing vector. The validator's effective balance is then applied to that amount.
slashings := state.Slashings()
currentSlashing := slashings[currentEpoch%params.BeaconConfig().EpochsPerSlashingsVector]
if err := state.UpdateSlashingsAtIndex(
uint64(currentEpoch%params.BeaconConfig().EpochsPerSlashingsVector),
currentSlashing+validator.EffectiveBalance,
); err != nil {
return nil, err
}
if err := helpers.DecreaseBalance(state, slashedIdx, validator.EffectiveBalance/params.BeaconConfig().MinSlashingPenaltyQuotientAltair); err != nil {
return nil, err
}
proposerIdx, err := helpers.BeaconProposerIndex(state)
if err != nil {
return nil, errors.Wrap(err, "could not get proposer idx")
}
// In this implementation, proposer is the whistleblower.
whistleBlowerIdx := proposerIdx
whistleblowerReward := validator.EffectiveBalance / params.BeaconConfig().WhistleBlowerRewardQuotient
proposerReward := whistleblowerReward * params.BeaconConfig().ProposerWeight / params.BeaconConfig().WeightDenominator
err = helpers.IncreaseBalance(state, proposerIdx, proposerReward)
if err != nil {
return nil, err
}
err = helpers.IncreaseBalance(state, whistleBlowerIdx, whistleblowerReward-proposerReward)
if err != nil {
return nil, err
}
return state, nil
}

View File

@@ -0,0 +1,68 @@
package altair_test
import (
"testing"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
stateAltair "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
)
func TestSlashValidator_OK(t *testing.T) {
validatorCount := params.BeaconConfig().MinGenesisActiveValidatorCount
registry := make([]*ethpb.Validator, 0, validatorCount)
balances := make([]uint64, 0, validatorCount)
for i := uint64(0); i < validatorCount; i++ {
registry = append(registry, &ethpb.Validator{
ActivationEpoch: 0,
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
})
balances = append(balances, params.BeaconConfig().MaxEffectiveBalance)
}
base := &ethpb.BeaconStateAltair{
Validators: registry,
Slashings: make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector),
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
Balances: balances,
}
state, err := stateAltair.InitializeFromProto(base)
require.NoError(t, err)
slashedIdx := types.ValidatorIndex(2)
proposer, err := helpers.BeaconProposerIndex(state)
require.NoError(t, err, "Could not get proposer")
proposerBal, err := state.BalanceAtIndex(proposer)
require.NoError(t, err)
slashedState, err := altair.SlashValidator(state, slashedIdx)
require.NoError(t, err, "Could not slash validator")
state, ok := slashedState.(*stateAltair.BeaconState)
require.Equal(t, true, ok)
v, err := state.ValidatorAtIndex(slashedIdx)
require.NoError(t, err)
assert.Equal(t, true, v.Slashed, "Validator not slashed despite supposed to being slashed")
assert.Equal(t, helpers.CurrentEpoch(state)+params.BeaconConfig().EpochsPerSlashingsVector, v.WithdrawableEpoch, "Withdrawable epoch not the expected value")
maxBalance := params.BeaconConfig().MaxEffectiveBalance
slashedBalance := state.Slashings()[state.Slot().Mod(uint64(params.BeaconConfig().EpochsPerSlashingsVector))]
assert.Equal(t, maxBalance, slashedBalance, "Slashed balance isnt the expected amount")
whistleblowerReward := slashedBalance / params.BeaconConfig().WhistleBlowerRewardQuotient
bal, err := state.BalanceAtIndex(proposer)
require.NoError(t, err)
// The proposer is the whistleblower in phase 0.
assert.Equal(t, proposerBal+whistleblowerReward, bal, "Did not get expected balance for proposer")
bal, err = state.BalanceAtIndex(slashedIdx)
require.NoError(t, err)
v, err = state.ValidatorAtIndex(slashedIdx)
require.NoError(t, err)
assert.Equal(t, maxBalance-(v.EffectiveBalance/params.BeaconConfig().MinSlashingPenaltyQuotientAltair), bal, "Did not get expected balance for slashed validator")
}

View File

@@ -36,6 +36,7 @@ go_library(
"//shared/depositutil:go_default_library", "//shared/depositutil:go_default_library",
"//shared/hashutil:go_default_library", "//shared/hashutil:go_default_library",
"//shared/mathutil:go_default_library", "//shared/mathutil:go_default_library",
"//shared/p2putils:go_default_library",
"//shared/params:go_default_library", "//shared/params:go_default_library",
"//shared/slashutil:go_default_library", "//shared/slashutil:go_default_library",
"//shared/sliceutil:go_default_library", "//shared/sliceutil:go_default_library",
@@ -65,6 +66,7 @@ go_test(
"proposer_slashing_regression_test.go", "proposer_slashing_regression_test.go",
"proposer_slashing_test.go", "proposer_slashing_test.go",
"randao_test.go", "randao_test.go",
"signature_test.go",
], ],
data = glob(["testdata/**"]), data = glob(["testdata/**"]),
embed = [":go_default_library"], embed = [":go_default_library"],

View File

@@ -9,9 +9,10 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/state" "github.com/prysmaticlabs/prysm/beacon-chain/state"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
statepb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/prysmaticlabs/prysm/shared/attestationutil" "github.com/prysmaticlabs/prysm/shared/attestationutil"
"github.com/prysmaticlabs/prysm/shared/bls" "github.com/prysmaticlabs/prysm/shared/bls"
"github.com/prysmaticlabs/prysm/shared/p2putils"
"github.com/prysmaticlabs/prysm/shared/params" "github.com/prysmaticlabs/prysm/shared/params"
) )
@@ -21,7 +22,7 @@ func signatureSet(signedData, pub, signature, domain []byte) (*bls.SignatureSet,
if err != nil { if err != nil {
return nil, errors.Wrap(err, "could not convert bytes to public key") return nil, errors.Wrap(err, "could not convert bytes to public key")
} }
signingData := &statepb.SigningData{ signingData := &ethpb.SigningData{
ObjectRoot: signedData, ObjectRoot: signedData,
Domain: domain, Domain: domain,
} }
@@ -77,6 +78,27 @@ func VerifyBlockSignature(beaconState state.ReadOnlyBeaconState,
return helpers.VerifyBlockSigningRoot(proposerPubKey, sig, domain, rootFunc) return helpers.VerifyBlockSigningRoot(proposerPubKey, sig, domain, rootFunc)
} }
// VerifyBlockSignatureUsingCurrentFork verifies the proposer signature of a beacon block. This differs
// from the above method by not using fork data from the state and instead retrieving it
// via the respective epoch.
func VerifyBlockSignatureUsingCurrentFork(beaconState state.ReadOnlyBeaconState, blk block.SignedBeaconBlock) error {
currentEpoch := helpers.SlotToEpoch(blk.Block().Slot())
fork, err := p2putils.Fork(currentEpoch)
if err != nil {
return err
}
domain, err := helpers.Domain(fork, currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorRoot())
if err != nil {
return err
}
proposer, err := beaconState.ValidatorAtIndex(blk.Block().ProposerIndex())
if err != nil {
return err
}
proposerPubKey := proposer.PublicKey
return helpers.VerifyBlockSigningRoot(proposerPubKey, blk.Signature(), domain, blk.Block().HashTreeRoot)
}
// BlockSignatureSet retrieves the block signature set from the provided block and its corresponding state. // BlockSignatureSet retrieves the block signature set from the provided block and its corresponding state.
func BlockSignatureSet(beaconState state.ReadOnlyBeaconState, func BlockSignatureSet(beaconState state.ReadOnlyBeaconState,
proposerIndex types.ValidatorIndex, proposerIndex types.ValidatorIndex,

View File

@@ -0,0 +1,41 @@
package blocks_test
import (
"testing"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
wrapperv2 "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
)
func TestVerifyBlockSignatureUsingCurrentFork(t *testing.T) {
params.SetupTestConfigCleanup(t)
bCfg := params.BeaconConfig()
bCfg.AltairForkEpoch = 100
bCfg.ForkVersionSchedule[bytesutil.ToBytes4(bCfg.AltairForkVersion)] = 100
params.OverrideBeaconConfig(bCfg)
bState, keys := testutil.DeterministicGenesisState(t, 100)
altairBlk := testutil.NewBeaconBlockAltair()
altairBlk.Block.ProposerIndex = 0
altairBlk.Block.Slot = params.BeaconConfig().SlotsPerEpoch * 100
fData := &ethpb.Fork{
Epoch: 100,
CurrentVersion: params.BeaconConfig().AltairForkVersion,
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
}
domain, err := helpers.Domain(fData, 100, params.BeaconConfig().DomainBeaconProposer, bState.GenesisValidatorRoot())
assert.NoError(t, err)
rt, err := helpers.ComputeSigningRoot(altairBlk.Block, domain)
assert.NoError(t, err)
sig := keys[0].Sign(rt[:]).Marshal()
altairBlk.Signature = sig
wsb, err := wrapperv2.WrappedAltairSignedBeaconBlock(altairBlk)
require.NoError(t, err)
assert.NoError(t, blocks.VerifyBlockSignatureUsingCurrentFork(bState, wsb))
}

View File

@@ -51,6 +51,7 @@ go_library(
"@com_github_pkg_errors//:go_default_library", "@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_eth2_types//:go_default_library", "@com_github_prysmaticlabs_eth2_types//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library", "@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
], ],
) )
@@ -75,6 +76,7 @@ go_test(
"//beacon-chain/cache:go_default_library", "//beacon-chain/cache:go_default_library",
"//beacon-chain/state:go_default_library", "//beacon-chain/state:go_default_library",
"//beacon-chain/state/v1:go_default_library", "//beacon-chain/state/v1:go_default_library",
"//beacon-chain/state/v2:go_default_library",
"//proto/prysm/v1alpha1:go_default_library", "//proto/prysm/v1alpha1:go_default_library",
"//shared/bls:go_default_library", "//shared/bls:go_default_library",
"//shared/bytesutil:go_default_library", "//shared/bytesutil:go_default_library",

View File

@@ -17,10 +17,12 @@ import (
"github.com/prysmaticlabs/prysm/shared/hashutil" "github.com/prysmaticlabs/prysm/shared/hashutil"
"github.com/prysmaticlabs/prysm/shared/params" "github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/sliceutil" "github.com/prysmaticlabs/prysm/shared/sliceutil"
log "github.com/sirupsen/logrus"
) )
var committeeCache = cache.NewCommitteesCache() var committeeCache = cache.NewCommitteesCache()
var proposerIndicesCache = cache.NewProposerIndicesCache() var proposerIndicesCache = cache.NewProposerIndicesCache()
var syncCommitteeCache = cache.NewSyncCommittee()
// SlotCommitteeCount returns the number of crosslink committees of a slot. The // SlotCommitteeCount returns the number of crosslink committees of a slot. The
// active validator count is provided as an argument rather than a imported implementation // active validator count is provided as an argument rather than a imported implementation
@@ -271,26 +273,21 @@ func VerifyAttestationBitfieldLengths(state state.ReadOnlyBeaconState, att *ethp
return nil return nil
} }
// ShuffledIndices uses input beacon state and returns the shuffled indices of the input epoch, // Returns the active indices and the total active balance of the validators in input `state` and during input `epoch`.
// the shuffled indices then can be used to break up into committees. func activeIndicesAndBalance(s state.ReadOnlyBeaconState, epoch types.Epoch) ([]types.ValidatorIndex, uint64, error) {
func ShuffledIndices(s state.ReadOnlyBeaconState, epoch types.Epoch) ([]types.ValidatorIndex, error) { balances := uint64(0)
seed, err := Seed(s, epoch, params.BeaconConfig().DomainBeaconAttester)
if err != nil {
return nil, errors.Wrapf(err, "could not get seed for epoch %d", epoch)
}
indices := make([]types.ValidatorIndex, 0, s.NumValidators()) indices := make([]types.ValidatorIndex, 0, s.NumValidators())
if err := s.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error { if err := s.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
if IsActiveValidatorUsingTrie(val, epoch) { if IsActiveValidatorUsingTrie(val, epoch) {
balances += val.EffectiveBalance()
indices = append(indices, types.ValidatorIndex(idx)) indices = append(indices, types.ValidatorIndex(idx))
} }
return nil return nil
}); err != nil { }); err != nil {
return nil, err return nil, 0, err
} }
// UnshuffleList is used as an optimized implementation for raw speed. return indices, balances, nil
return UnshuffleList(indices, seed)
} }
// UpdateCommitteeCache gets called at the beginning of every epoch to cache the committee shuffled indices // UpdateCommitteeCache gets called at the beginning of every epoch to cache the committee shuffled indices
@@ -306,7 +303,13 @@ func UpdateCommitteeCache(state state.ReadOnlyBeaconState, epoch types.Epoch) er
return nil return nil
} }
shuffledIndices, err := ShuffledIndices(state, e) indices, balance, err := activeIndicesAndBalance(state, e)
if err != nil {
return err
}
// Get the shuffled indices based on the seed.
shuffledIndices, err := UnshuffleList(indices, seed)
if err != nil { if err != nil {
return err return err
} }
@@ -322,11 +325,23 @@ func UpdateCommitteeCache(state state.ReadOnlyBeaconState, epoch types.Epoch) er
return sortedIndices[i] < sortedIndices[j] return sortedIndices[i] < sortedIndices[j]
}) })
// Only update active balance field in cache if it's current epoch.
// Using current epoch state to update next epoch field will cause insert an invalid
// active balance value.
b := &cache.Balance{}
if e == epoch {
b = &cache.Balance{
Exist: true,
Total: balance,
}
}
if err := committeeCache.AddCommitteeShuffledList(&cache.Committees{ if err := committeeCache.AddCommitteeShuffledList(&cache.Committees{
ShuffledIndices: shuffledIndices, ShuffledIndices: shuffledIndices,
CommitteeCount: uint64(params.BeaconConfig().SlotsPerEpoch.Mul(count)), CommitteeCount: uint64(params.BeaconConfig().SlotsPerEpoch.Mul(count)),
Seed: seed, Seed: seed,
SortedIndices: sortedIndices, SortedIndices: sortedIndices,
ActiveBalance: b,
}); err != nil { }); err != nil {
return err return err
} }
@@ -383,10 +398,200 @@ func UpdateProposerIndicesInCache(state state.ReadOnlyBeaconState) error {
}) })
} }
// ClearCache clears the committee cache // ClearCache clears the beacon committee cache and sync committee cache.
func ClearCache() { func ClearCache() {
committeeCache = cache.NewCommitteesCache() committeeCache = cache.NewCommitteesCache()
proposerIndicesCache = cache.NewProposerIndicesCache() proposerIndicesCache = cache.NewProposerIndicesCache()
syncCommitteeCache = cache.NewSyncCommittee()
}
// IsCurrentPeriodSyncCommittee returns true if the input validator index belongs in the current period sync committee
// along with the sync committee root.
// 1.) Checks if the public key exists in the sync committee cache
// 2.) If 1 fails, checks if the public key exists in the input current sync committee object
func IsCurrentPeriodSyncCommittee(
st state.BeaconStateAltair, valIdx types.ValidatorIndex,
) (bool, error) {
root, err := syncPeriodBoundaryRoot(st)
if err != nil {
return false, err
}
indices, err := syncCommitteeCache.CurrentPeriodIndexPosition(bytesutil.ToBytes32(root), valIdx)
if err == cache.ErrNonExistingSyncCommitteeKey {
val, err := st.ValidatorAtIndex(valIdx)
if err != nil {
return false, nil
}
committee, err := st.CurrentSyncCommittee()
if err != nil {
return false, err
}
// Fill in the cache on miss.
go func() {
if err := syncCommitteeCache.UpdatePositionsInCommittee(bytesutil.ToBytes32(root), st); err != nil {
log.Errorf("Could not fill sync committee cache on miss: %v", err)
}
}()
return len(findSubCommitteeIndices(val.PublicKey, committee.Pubkeys)) > 0, nil
}
if err != nil {
return false, err
}
return len(indices) > 0, nil
}
// IsNextPeriodSyncCommittee returns true if the input validator index belongs in the next period sync committee
// along with the sync period boundary root.
// 1.) Checks if the public key exists in the sync committee cache
// 2.) If 1 fails, checks if the public key exists in the input next sync committee object
func IsNextPeriodSyncCommittee(
st state.BeaconStateAltair, valIdx types.ValidatorIndex,
) (bool, error) {
root, err := syncPeriodBoundaryRoot(st)
if err != nil {
return false, err
}
indices, err := syncCommitteeCache.NextPeriodIndexPosition(bytesutil.ToBytes32(root), valIdx)
if err == cache.ErrNonExistingSyncCommitteeKey {
val, err := st.ValidatorAtIndex(valIdx)
if err != nil {
return false, nil
}
committee, err := st.NextSyncCommittee()
if err != nil {
return false, err
}
return len(findSubCommitteeIndices(val.PublicKey, committee.Pubkeys)) > 0, nil
}
if err != nil {
return false, err
}
return len(indices) > 0, nil
}
// CurrentPeriodSyncSubcommitteeIndices returns the subcommittee indices of the
// current period sync committee for input validator.
func CurrentPeriodSyncSubcommitteeIndices(
st state.BeaconStateAltair, valIdx types.ValidatorIndex,
) ([]types.CommitteeIndex, error) {
root, err := syncPeriodBoundaryRoot(st)
if err != nil {
return nil, err
}
indices, err := syncCommitteeCache.CurrentPeriodIndexPosition(bytesutil.ToBytes32(root), valIdx)
if err == cache.ErrNonExistingSyncCommitteeKey {
val, err := st.ValidatorAtIndex(valIdx)
if err != nil {
return nil, nil
}
committee, err := st.CurrentSyncCommittee()
if err != nil {
return nil, err
}
// Fill in the cache on miss.
go func() {
if err := syncCommitteeCache.UpdatePositionsInCommittee(bytesutil.ToBytes32(root), st); err != nil {
log.Errorf("Could not fill sync committee cache on miss: %v", err)
}
}()
return findSubCommitteeIndices(val.PublicKey, committee.Pubkeys), nil
}
if err != nil {
return nil, err
}
return indices, nil
}
// NextPeriodSyncSubcommitteeIndices returns the subcommittee indices of the next period sync committee for input validator.
func NextPeriodSyncSubcommitteeIndices(
st state.BeaconStateAltair, valIdx types.ValidatorIndex,
) ([]types.CommitteeIndex, error) {
root, err := syncPeriodBoundaryRoot(st)
if err != nil {
return nil, err
}
indices, err := syncCommitteeCache.NextPeriodIndexPosition(bytesutil.ToBytes32(root), valIdx)
if err == cache.ErrNonExistingSyncCommitteeKey {
val, err := st.ValidatorAtIndex(valIdx)
if err != nil {
return nil, nil
}
committee, err := st.NextSyncCommittee()
if err != nil {
return nil, err
}
return findSubCommitteeIndices(val.PublicKey, committee.Pubkeys), nil
}
if err != nil {
return nil, err
}
return indices, nil
}
// UpdateSyncCommitteeCache updates sync committee cache.
// It uses `state`'s latest block header root as key. To avoid miss usage, it disallows
// block header with state root zeroed out.
func UpdateSyncCommitteeCache(st state.BeaconStateAltair) error {
nextSlot := st.Slot() + 1
if nextSlot%params.BeaconConfig().SlotsPerEpoch != 0 {
return errors.New("not at the end of the epoch to update cache")
}
if SlotToEpoch(nextSlot)%params.BeaconConfig().EpochsPerSyncCommitteePeriod != 0 {
return errors.New("not at sync committee period boundary to update cache")
}
header := st.LatestBlockHeader()
if bytes.Equal(header.StateRoot, params.BeaconConfig().ZeroHash[:]) {
return errors.New("zero hash state root can't be used to update cache")
}
prevBlockRoot, err := header.HashTreeRoot()
if err != nil {
return err
}
return syncCommitteeCache.UpdatePositionsInCommittee(prevBlockRoot, st)
}
// Loop through `pubKeys` for matching `pubKey` and get the indices where it matches.
func findSubCommitteeIndices(pubKey []byte, pubKeys [][]byte) []types.CommitteeIndex {
var indices []types.CommitteeIndex
for i, k := range pubKeys {
if bytes.Equal(k, pubKey) {
indices = append(indices, types.CommitteeIndex(i))
}
}
return indices
}
// Retrieve the current sync period boundary root by calculating sync period start epoch
// and calling `BlockRoot`.
// It uses the boundary slot - 1 for block root. (Ex: SlotsPerEpoch * EpochsPerSyncCommitteePeriod - 1)
func syncPeriodBoundaryRoot(st state.ReadOnlyBeaconState) ([]byte, error) {
// Can't call `BlockRoot` until the first slot.
if st.Slot() == params.BeaconConfig().GenesisSlot {
return params.BeaconConfig().ZeroHash[:], nil
}
startEpoch, err := SyncCommitteePeriodStartEpoch(CurrentEpoch(st))
if err != nil {
return nil, err
}
startEpochSlot, err := StartSlot(startEpoch)
if err != nil {
return nil, err
}
// Prevent underflow
if startEpochSlot >= 1 {
startEpochSlot--
}
return BlockRootAtSlot(st, startEpochSlot)
} }
// This computes proposer indices of the current epoch and returns a list of proposer indices, // This computes proposer indices of the current epoch and returns a list of proposer indices,

View File

@@ -2,15 +2,16 @@ package helpers
import ( import (
"fmt" "fmt"
"reflect"
"strconv" "strconv"
"testing" "testing"
"time"
types "github.com/prysmaticlabs/eth2-types" types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/go-bitfield" "github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1" v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
v2 "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
statepb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/bytesutil" "github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/hashutil" "github.com/prysmaticlabs/prysm/shared/hashutil"
"github.com/prysmaticlabs/prysm/shared/params" "github.com/prysmaticlabs/prysm/shared/params"
@@ -35,7 +36,7 @@ func TestComputeCommittee_WithoutCache(t *testing.T) {
} }
} }
state, err := v1.InitializeFromProto(&statepb.BeaconState{ state, err := v1.InitializeFromProto(&ethpb.BeaconState{
Validators: validators, Validators: validators,
Slot: 200, Slot: 200,
BlockRoots: make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot), BlockRoots: make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot),
@@ -91,7 +92,7 @@ func TestVerifyBitfieldLength_OK(t *testing.T) {
func TestCommitteeAssignments_CannotRetrieveFutureEpoch(t *testing.T) { func TestCommitteeAssignments_CannotRetrieveFutureEpoch(t *testing.T) {
ClearCache() ClearCache()
epoch := types.Epoch(1) epoch := types.Epoch(1)
state, err := v1.InitializeFromProto(&statepb.BeaconState{ state, err := v1.InitializeFromProto(&ethpb.BeaconState{
Slot: 0, // Epoch 0. Slot: 0, // Epoch 0.
}) })
require.NoError(t, err) require.NoError(t, err)
@@ -111,7 +112,7 @@ func TestCommitteeAssignments_NoProposerForSlot0(t *testing.T) {
ExitEpoch: params.BeaconConfig().FarFutureEpoch, ExitEpoch: params.BeaconConfig().FarFutureEpoch,
} }
} }
state, err := v1.InitializeFromProto(&statepb.BeaconState{ state, err := v1.InitializeFromProto(&ethpb.BeaconState{
Validators: validators, Validators: validators,
Slot: 2 * params.BeaconConfig().SlotsPerEpoch, // epoch 2 Slot: 2 * params.BeaconConfig().SlotsPerEpoch, // epoch 2
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector), RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
@@ -142,7 +143,7 @@ func TestCommitteeAssignments_CanRetrieve(t *testing.T) {
} }
} }
state, err := v1.InitializeFromProto(&statepb.BeaconState{ state, err := v1.InitializeFromProto(&ethpb.BeaconState{
Validators: validators, Validators: validators,
Slot: 2 * params.BeaconConfig().SlotsPerEpoch, // epoch 2 Slot: 2 * params.BeaconConfig().SlotsPerEpoch, // epoch 2
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector), RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
@@ -219,7 +220,7 @@ func TestCommitteeAssignments_CannotRetrieveFuture(t *testing.T) {
} }
} }
state, err := v1.InitializeFromProto(&statepb.BeaconState{ state, err := v1.InitializeFromProto(&ethpb.BeaconState{
Validators: validators, Validators: validators,
Slot: 2 * params.BeaconConfig().SlotsPerEpoch, // epoch 2 Slot: 2 * params.BeaconConfig().SlotsPerEpoch, // epoch 2
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector), RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
@@ -243,7 +244,7 @@ func TestCommitteeAssignments_EverySlotHasMin1Proposer(t *testing.T) {
ExitEpoch: params.BeaconConfig().FarFutureEpoch, ExitEpoch: params.BeaconConfig().FarFutureEpoch,
} }
} }
state, err := v1.InitializeFromProto(&statepb.BeaconState{ state, err := v1.InitializeFromProto(&ethpb.BeaconState{
Validators: validators, Validators: validators,
Slot: 2 * params.BeaconConfig().SlotsPerEpoch, // epoch 2 Slot: 2 * params.BeaconConfig().SlotsPerEpoch, // epoch 2
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector), RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
@@ -280,7 +281,7 @@ func TestVerifyAttestationBitfieldLengths_OK(t *testing.T) {
} }
} }
state, err := v1.InitializeFromProto(&statepb.BeaconState{ state, err := v1.InitializeFromProto(&ethpb.BeaconState{
Validators: validators, Validators: validators,
RandaoMixes: activeRoots, RandaoMixes: activeRoots,
}) })
@@ -368,38 +369,6 @@ func TestVerifyAttestationBitfieldLengths_OK(t *testing.T) {
} }
} }
func TestShuffledIndices_ShuffleRightLength(t *testing.T) {
valiatorCount := 1000
validators := make([]*ethpb.Validator, valiatorCount)
indices := make([]uint64, valiatorCount)
for i := 0; i < valiatorCount; i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
}
indices[i] = uint64(i)
}
state, err := v1.InitializeFromProto(&statepb.BeaconState{
Validators: validators,
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
})
require.NoError(t, err)
// Test for current epoch
shuffledIndices, err := ShuffledIndices(state, 0)
require.NoError(t, err)
assert.Equal(t, valiatorCount, len(shuffledIndices), "Incorrect shuffled indices count")
if reflect.DeepEqual(indices, shuffledIndices) {
t.Error("Shuffling did not happen")
}
// Test for next epoch
shuffledIndices, err = ShuffledIndices(state, 1)
require.NoError(t, err)
assert.Equal(t, valiatorCount, len(shuffledIndices), "Incorrect shuffled indices count")
if reflect.DeepEqual(indices, shuffledIndices) {
t.Error("Shuffling did not happen")
}
}
func TestUpdateCommitteeCache_CanUpdate(t *testing.T) { func TestUpdateCommitteeCache_CanUpdate(t *testing.T) {
ClearCache() ClearCache()
validatorCount := params.BeaconConfig().MinGenesisActiveValidatorCount validatorCount := params.BeaconConfig().MinGenesisActiveValidatorCount
@@ -407,11 +376,12 @@ func TestUpdateCommitteeCache_CanUpdate(t *testing.T) {
indices := make([]types.ValidatorIndex, validatorCount) indices := make([]types.ValidatorIndex, validatorCount)
for i := types.ValidatorIndex(0); uint64(i) < validatorCount; i++ { for i := types.ValidatorIndex(0); uint64(i) < validatorCount; i++ {
validators[i] = &ethpb.Validator{ validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch, ExitEpoch: params.BeaconConfig().FarFutureEpoch,
EffectiveBalance: 1,
} }
indices[i] = i indices[i] = i
} }
state, err := v1.InitializeFromProto(&statepb.BeaconState{ state, err := v1.InitializeFromProto(&ethpb.BeaconState{
Validators: validators, Validators: validators,
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector), RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
}) })
@@ -426,6 +396,13 @@ func TestUpdateCommitteeCache_CanUpdate(t *testing.T) {
indices, err = committeeCache.Committee(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epoch)), seed, idx) indices, err = committeeCache.Committee(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epoch)), seed, idx)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, params.BeaconConfig().TargetCommitteeSize, uint64(len(indices)), "Did not save correct indices lengths") assert.Equal(t, params.BeaconConfig().TargetCommitteeSize, uint64(len(indices)), "Did not save correct indices lengths")
// Total active balance should be `MinGenesisActiveValidatorCount` given each validator has effective balance of 1.
seed, err = Seed(state, 0, params.BeaconConfig().DomainBeaconAttester)
require.NoError(t, err)
balance, err := committeeCache.ActiveBalance(seed)
require.NoError(t, err)
require.Equal(t, validatorCount, balance)
} }
func BenchmarkComputeCommittee300000_WithPreCache(b *testing.B) { func BenchmarkComputeCommittee300000_WithPreCache(b *testing.B) {
@@ -435,7 +412,7 @@ func BenchmarkComputeCommittee300000_WithPreCache(b *testing.B) {
ExitEpoch: params.BeaconConfig().FarFutureEpoch, ExitEpoch: params.BeaconConfig().FarFutureEpoch,
} }
} }
state, err := v1.InitializeFromProto(&statepb.BeaconState{ state, err := v1.InitializeFromProto(&ethpb.BeaconState{
Validators: validators, Validators: validators,
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector), RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
}) })
@@ -469,7 +446,7 @@ func BenchmarkComputeCommittee3000000_WithPreCache(b *testing.B) {
ExitEpoch: params.BeaconConfig().FarFutureEpoch, ExitEpoch: params.BeaconConfig().FarFutureEpoch,
} }
} }
state, err := v1.InitializeFromProto(&statepb.BeaconState{ state, err := v1.InitializeFromProto(&ethpb.BeaconState{
Validators: validators, Validators: validators,
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector), RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
}) })
@@ -503,7 +480,7 @@ func BenchmarkComputeCommittee128000_WithOutPreCache(b *testing.B) {
ExitEpoch: params.BeaconConfig().FarFutureEpoch, ExitEpoch: params.BeaconConfig().FarFutureEpoch,
} }
} }
state, err := v1.InitializeFromProto(&statepb.BeaconState{ state, err := v1.InitializeFromProto(&ethpb.BeaconState{
Validators: validators, Validators: validators,
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector), RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
}) })
@@ -538,7 +515,7 @@ func BenchmarkComputeCommittee1000000_WithOutCache(b *testing.B) {
ExitEpoch: params.BeaconConfig().FarFutureEpoch, ExitEpoch: params.BeaconConfig().FarFutureEpoch,
} }
} }
state, err := v1.InitializeFromProto(&statepb.BeaconState{ state, err := v1.InitializeFromProto(&ethpb.BeaconState{
Validators: validators, Validators: validators,
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector), RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
}) })
@@ -573,7 +550,7 @@ func BenchmarkComputeCommittee4000000_WithOutCache(b *testing.B) {
ExitEpoch: params.BeaconConfig().FarFutureEpoch, ExitEpoch: params.BeaconConfig().FarFutureEpoch,
} }
} }
state, err := v1.InitializeFromProto(&statepb.BeaconState{ state, err := v1.InitializeFromProto(&ethpb.BeaconState{
Validators: validators, Validators: validators,
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector), RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
}) })
@@ -610,7 +587,7 @@ func TestBeaconCommitteeFromState_UpdateCacheForPreviousEpoch(t *testing.T) {
} }
} }
state, err := v1.InitializeFromProto(&statepb.BeaconState{ state, err := v1.InitializeFromProto(&ethpb.BeaconState{
Slot: params.BeaconConfig().SlotsPerEpoch, Slot: params.BeaconConfig().SlotsPerEpoch,
Validators: validators, Validators: validators,
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector), RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
@@ -635,7 +612,7 @@ func TestPrecomputeProposerIndices_Ok(t *testing.T) {
} }
} }
state, err := v1.InitializeFromProto(&statepb.BeaconState{ state, err := v1.InitializeFromProto(&ethpb.BeaconState{
Validators: validators, Validators: validators,
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector), RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
}) })
@@ -659,3 +636,373 @@ func TestPrecomputeProposerIndices_Ok(t *testing.T) {
} }
assert.DeepEqual(t, wantedProposerIndices, proposerIndices, "Did not precompute proposer indices correctly") assert.DeepEqual(t, wantedProposerIndices, proposerIndices, "Did not precompute proposer indices correctly")
} }
func TestIsCurrentEpochSyncCommittee_UsingCache(t *testing.T) {
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
}
for i := 0; i < len(validators); i++ {
k := make([]byte, 48)
copy(k, strconv.Itoa(i))
validators[i] = &ethpb.Validator{
PublicKey: k,
}
syncCommittee.Pubkeys = append(syncCommittee.Pubkeys, bytesutil.PadTo(k, 48))
}
state, err := v2.InitializeFromProto(&ethpb.BeaconStateAltair{
Validators: validators,
})
require.NoError(t, err)
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
ClearCache()
r := [32]byte{'a'}
require.NoError(t, err, syncCommitteeCache.UpdatePositionsInCommittee(r, state))
ok, err := IsCurrentPeriodSyncCommittee(state, 0)
require.NoError(t, err)
require.Equal(t, true, ok)
}
func TestIsCurrentEpochSyncCommittee_UsingCommittee(t *testing.T) {
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
}
for i := 0; i < len(validators); i++ {
k := make([]byte, 48)
copy(k, strconv.Itoa(i))
validators[i] = &ethpb.Validator{
PublicKey: k,
}
syncCommittee.Pubkeys = append(syncCommittee.Pubkeys, bytesutil.PadTo(k, 48))
}
state, err := v2.InitializeFromProto(&ethpb.BeaconStateAltair{
Validators: validators,
})
require.NoError(t, err)
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
ok, err := IsCurrentPeriodSyncCommittee(state, 0)
require.NoError(t, err)
require.Equal(t, true, ok)
}
func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
}
for i := 0; i < len(validators); i++ {
k := make([]byte, 48)
copy(k, strconv.Itoa(i))
validators[i] = &ethpb.Validator{
PublicKey: k,
}
syncCommittee.Pubkeys = append(syncCommittee.Pubkeys, bytesutil.PadTo(k, 48))
}
state, err := v2.InitializeFromProto(&ethpb.BeaconStateAltair{
Validators: validators,
})
require.NoError(t, err)
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
ok, err := IsCurrentPeriodSyncCommittee(state, 12390192)
require.NoError(t, err)
require.Equal(t, false, ok)
}
func TestIsNextEpochSyncCommittee_UsingCache(t *testing.T) {
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
}
for i := 0; i < len(validators); i++ {
k := make([]byte, 48)
copy(k, strconv.Itoa(i))
validators[i] = &ethpb.Validator{
PublicKey: k,
}
syncCommittee.Pubkeys = append(syncCommittee.Pubkeys, bytesutil.PadTo(k, 48))
}
state, err := v2.InitializeFromProto(&ethpb.BeaconStateAltair{
Validators: validators,
})
require.NoError(t, err)
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
ClearCache()
r := [32]byte{'a'}
require.NoError(t, err, syncCommitteeCache.UpdatePositionsInCommittee(r, state))
ok, err := IsNextPeriodSyncCommittee(state, 0)
require.NoError(t, err)
require.Equal(t, true, ok)
}
func TestIsNextEpochSyncCommittee_UsingCommittee(t *testing.T) {
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
}
for i := 0; i < len(validators); i++ {
k := make([]byte, 48)
copy(k, strconv.Itoa(i))
validators[i] = &ethpb.Validator{
PublicKey: k,
}
syncCommittee.Pubkeys = append(syncCommittee.Pubkeys, bytesutil.PadTo(k, 48))
}
state, err := v2.InitializeFromProto(&ethpb.BeaconStateAltair{
Validators: validators,
})
require.NoError(t, err)
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
ok, err := IsNextPeriodSyncCommittee(state, 0)
require.NoError(t, err)
require.Equal(t, true, ok)
}
func TestIsNextEpochSyncCommittee_DoesNotExist(t *testing.T) {
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
}
for i := 0; i < len(validators); i++ {
k := make([]byte, 48)
copy(k, strconv.Itoa(i))
validators[i] = &ethpb.Validator{
PublicKey: k,
}
syncCommittee.Pubkeys = append(syncCommittee.Pubkeys, bytesutil.PadTo(k, 48))
}
state, err := v2.InitializeFromProto(&ethpb.BeaconStateAltair{
Validators: validators,
})
require.NoError(t, err)
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
ok, err := IsNextPeriodSyncCommittee(state, 120391029)
require.NoError(t, err)
require.Equal(t, false, ok)
}
func TestCurrentEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
}
for i := 0; i < len(validators); i++ {
k := make([]byte, 48)
copy(k, strconv.Itoa(i))
validators[i] = &ethpb.Validator{
PublicKey: k,
}
syncCommittee.Pubkeys = append(syncCommittee.Pubkeys, bytesutil.PadTo(k, 48))
}
state, err := v2.InitializeFromProto(&ethpb.BeaconStateAltair{
Validators: validators,
})
require.NoError(t, err)
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
ClearCache()
r := [32]byte{'a'}
require.NoError(t, err, syncCommitteeCache.UpdatePositionsInCommittee(r, state))
index, err := CurrentPeriodSyncSubcommitteeIndices(state, 0)
require.NoError(t, err)
require.DeepEqual(t, []types.CommitteeIndex{0}, index)
}
func TestCurrentEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
}
for i := 0; i < len(validators); i++ {
k := make([]byte, 48)
copy(k, strconv.Itoa(i))
validators[i] = &ethpb.Validator{
PublicKey: k,
}
syncCommittee.Pubkeys = append(syncCommittee.Pubkeys, bytesutil.PadTo(k, 48))
}
state, err := v2.InitializeFromProto(&ethpb.BeaconStateAltair{
Validators: validators,
})
require.NoError(t, err)
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
root, err := syncPeriodBoundaryRoot(state)
require.NoError(t, err)
// Test that cache was empty.
_, err = syncCommitteeCache.CurrentPeriodIndexPosition(bytesutil.ToBytes32(root), 0)
require.Equal(t, cache.ErrNonExistingSyncCommitteeKey, err)
// Test that helper can retrieve the index given empty cache.
index, err := CurrentPeriodSyncSubcommitteeIndices(state, 0)
require.NoError(t, err)
require.DeepEqual(t, []types.CommitteeIndex{0}, index)
// Test that cache was able to fill on miss.
time.Sleep(100 * time.Millisecond)
index, err = syncCommitteeCache.CurrentPeriodIndexPosition(bytesutil.ToBytes32(root), 0)
require.NoError(t, err)
require.DeepEqual(t, []types.CommitteeIndex{0}, index)
}
func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
}
for i := 0; i < len(validators); i++ {
k := make([]byte, 48)
copy(k, strconv.Itoa(i))
validators[i] = &ethpb.Validator{
PublicKey: k,
}
syncCommittee.Pubkeys = append(syncCommittee.Pubkeys, bytesutil.PadTo(k, 48))
}
state, err := v2.InitializeFromProto(&ethpb.BeaconStateAltair{
Validators: validators,
})
require.NoError(t, err)
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
index, err := CurrentPeriodSyncSubcommitteeIndices(state, 129301923)
require.NoError(t, err)
require.DeepEqual(t, []types.CommitteeIndex(nil), index)
}
func TestNextEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
}
for i := 0; i < len(validators); i++ {
k := make([]byte, 48)
copy(k, strconv.Itoa(i))
validators[i] = &ethpb.Validator{
PublicKey: k,
}
syncCommittee.Pubkeys = append(syncCommittee.Pubkeys, bytesutil.PadTo(k, 48))
}
state, err := v2.InitializeFromProto(&ethpb.BeaconStateAltair{
Validators: validators,
})
require.NoError(t, err)
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
ClearCache()
r := [32]byte{'a'}
require.NoError(t, err, syncCommitteeCache.UpdatePositionsInCommittee(r, state))
index, err := NextPeriodSyncSubcommitteeIndices(state, 0)
require.NoError(t, err)
require.DeepEqual(t, []types.CommitteeIndex{0}, index)
}
func TestNextEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
}
for i := 0; i < len(validators); i++ {
k := make([]byte, 48)
copy(k, strconv.Itoa(i))
validators[i] = &ethpb.Validator{
PublicKey: k,
}
syncCommittee.Pubkeys = append(syncCommittee.Pubkeys, bytesutil.PadTo(k, 48))
}
state, err := v2.InitializeFromProto(&ethpb.BeaconStateAltair{
Validators: validators,
})
require.NoError(t, err)
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
index, err := NextPeriodSyncSubcommitteeIndices(state, 0)
require.NoError(t, err)
require.DeepEqual(t, []types.CommitteeIndex{0}, index)
}
func TestNextEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
}
for i := 0; i < len(validators); i++ {
k := make([]byte, 48)
copy(k, strconv.Itoa(i))
validators[i] = &ethpb.Validator{
PublicKey: k,
}
syncCommittee.Pubkeys = append(syncCommittee.Pubkeys, bytesutil.PadTo(k, 48))
}
state, err := v2.InitializeFromProto(&ethpb.BeaconStateAltair{
Validators: validators,
})
require.NoError(t, err)
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
index, err := NextPeriodSyncSubcommitteeIndices(state, 21093019)
require.NoError(t, err)
require.DeepEqual(t, []types.CommitteeIndex(nil), index)
}
func TestUpdateSyncCommitteeCache_BadSlot(t *testing.T) {
state, err := v1.InitializeFromProto(&ethpb.BeaconState{
Slot: 1,
})
require.NoError(t, err)
err = UpdateSyncCommitteeCache(state)
require.ErrorContains(t, "not at the end of the epoch to update cache", err)
state, err = v1.InitializeFromProto(&ethpb.BeaconState{
Slot: params.BeaconConfig().SlotsPerEpoch - 1,
})
require.NoError(t, err)
err = UpdateSyncCommitteeCache(state)
require.ErrorContains(t, "not at sync committee period boundary to update cache", err)
}
func TestUpdateSyncCommitteeCache_BadRoot(t *testing.T) {
state, err := v1.InitializeFromProto(&ethpb.BeaconState{
Slot: types.Slot(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*params.BeaconConfig().SlotsPerEpoch - 1,
LatestBlockHeader: &ethpb.BeaconBlockHeader{StateRoot: params.BeaconConfig().ZeroHash[:]},
})
require.NoError(t, err)
err = UpdateSyncCommitteeCache(state)
require.ErrorContains(t, "zero hash state root can't be used to update cache", err)
}

View File

@@ -1,7 +1,9 @@
package helpers package helpers
import ( import (
"github.com/pkg/errors"
types "github.com/prysmaticlabs/eth2-types" types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/state" "github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/shared/params" "github.com/prysmaticlabs/prysm/shared/params"
) )
@@ -47,8 +49,23 @@ func TotalBalance(state state.ReadOnlyValidators, indices []types.ValidatorIndex
// """ // """
// return get_total_balance(state, set(get_active_validator_indices(state, get_current_epoch(state)))) // return get_total_balance(state, set(get_active_validator_indices(state, get_current_epoch(state))))
func TotalActiveBalance(s state.ReadOnlyBeaconState) (uint64, error) { func TotalActiveBalance(s state.ReadOnlyBeaconState) (uint64, error) {
total := uint64(0) // Check if the active balance exists in cache.
epoch := SlotToEpoch(s.Slot()) epoch := SlotToEpoch(s.Slot())
seed, err := Seed(s, epoch, params.BeaconConfig().DomainBeaconAttester)
if err != nil {
return 0, errors.Wrap(err, "could not get seed")
}
activeBalance, err := committeeCache.ActiveBalance(seed)
if err == nil {
return activeBalance, nil
}
if err != cache.ErrNonCommitteeKey {
return 0, errors.Wrap(err, "could not interface with committee cache")
}
// Cache miss. Manually compute the active balance and fill the cache.
total := uint64(0)
if err := s.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error { if err := s.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
if IsActiveValidatorUsingTrie(val, epoch) { if IsActiveValidatorUsingTrie(val, epoch) {
total += val.EffectiveBalance() total += val.EffectiveBalance()
@@ -57,6 +74,7 @@ func TotalActiveBalance(s state.ReadOnlyBeaconState) (uint64, error) {
}); err != nil { }); err != nil {
return 0, err return 0, err
} }
return total, nil return total, nil
} }

View File

@@ -4,16 +4,16 @@ import (
"testing" "testing"
types "github.com/prysmaticlabs/eth2-types" types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1" v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
statepb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/params" "github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil/assert" "github.com/prysmaticlabs/prysm/shared/testutil/assert"
"github.com/prysmaticlabs/prysm/shared/testutil/require" "github.com/prysmaticlabs/prysm/shared/testutil/require"
) )
func TestTotalBalance_OK(t *testing.T) { func TestTotalBalance_OK(t *testing.T) {
state, err := v1.InitializeFromProto(&statepb.BeaconState{Validators: []*ethpb.Validator{ state, err := v1.InitializeFromProto(&ethpb.BeaconState{Validators: []*ethpb.Validator{
{EffectiveBalance: 27 * 1e9}, {EffectiveBalance: 28 * 1e9}, {EffectiveBalance: 27 * 1e9}, {EffectiveBalance: 28 * 1e9},
{EffectiveBalance: 32 * 1e9}, {EffectiveBalance: 40 * 1e9}, {EffectiveBalance: 32 * 1e9}, {EffectiveBalance: 40 * 1e9},
}}) }})
@@ -26,7 +26,7 @@ func TestTotalBalance_OK(t *testing.T) {
} }
func TestTotalBalance_ReturnsEffectiveBalanceIncrement(t *testing.T) { func TestTotalBalance_ReturnsEffectiveBalanceIncrement(t *testing.T) {
state, err := v1.InitializeFromProto(&statepb.BeaconState{Validators: []*ethpb.Validator{}}) state, err := v1.InitializeFromProto(&ethpb.BeaconState{Validators: []*ethpb.Validator{}})
require.NoError(t, err) require.NoError(t, err)
balance := TotalBalance(state, []types.ValidatorIndex{}) balance := TotalBalance(state, []types.ValidatorIndex{})
@@ -35,7 +35,7 @@ func TestTotalBalance_ReturnsEffectiveBalanceIncrement(t *testing.T) {
} }
func TestTotalActiveBalance_OK(t *testing.T) { func TestTotalActiveBalance_OK(t *testing.T) {
state, err := v1.InitializeFromProto(&statepb.BeaconState{Validators: []*ethpb.Validator{ state, err := v1.InitializeFromProto(&ethpb.BeaconState{Validators: []*ethpb.Validator{
{ {
EffectiveBalance: 32 * 1e9, EffectiveBalance: 32 * 1e9,
ExitEpoch: params.BeaconConfig().FarFutureEpoch, ExitEpoch: params.BeaconConfig().FarFutureEpoch,
@@ -55,6 +55,14 @@ func TestTotalActiveBalance_OK(t *testing.T) {
}}) }})
require.NoError(t, err) require.NoError(t, err)
// Validate that cache miss to start with.
epoch := SlotToEpoch(state.Slot())
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
require.NoError(t, err)
_, err = committeeCache.ActiveBalance(seed)
require.Equal(t, cache.ErrNonCommitteeKey, err)
// Validate manual calculation passes.
balance, err := TotalActiveBalance(state) balance, err := TotalActiveBalance(state)
assert.NoError(t, err) assert.NoError(t, err)
wanted := state.Validators()[0].EffectiveBalance + state.Validators()[1].EffectiveBalance + wanted := state.Validators()[0].EffectiveBalance + state.Validators()[1].EffectiveBalance +
@@ -74,7 +82,7 @@ func TestGetBalance_OK(t *testing.T) {
{i: 2, b: []uint64{0, 0, 0}}, {i: 2, b: []uint64{0, 0, 0}},
} }
for _, test := range tests { for _, test := range tests {
state, err := v1.InitializeFromProto(&statepb.BeaconState{Balances: test.b}) state, err := v1.InitializeFromProto(&ethpb.BeaconState{Balances: test.b})
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, test.b[test.i], state.Balances()[test.i], "Incorrect Validator balance") assert.Equal(t, test.b[test.i], state.Balances()[test.i], "Incorrect Validator balance")
} }
@@ -92,7 +100,7 @@ func TestIncreaseBalance_OK(t *testing.T) {
{i: 2, b: []uint64{27 * 1e9, 28 * 1e9, 32 * 1e9}, nb: 33 * 1e9, eb: 65 * 1e9}, {i: 2, b: []uint64{27 * 1e9, 28 * 1e9, 32 * 1e9}, nb: 33 * 1e9, eb: 65 * 1e9},
} }
for _, test := range tests { for _, test := range tests {
state, err := v1.InitializeFromProto(&statepb.BeaconState{ state, err := v1.InitializeFromProto(&ethpb.BeaconState{
Validators: []*ethpb.Validator{ Validators: []*ethpb.Validator{
{EffectiveBalance: 4}, {EffectiveBalance: 4}, {EffectiveBalance: 4}}, {EffectiveBalance: 4}, {EffectiveBalance: 4}, {EffectiveBalance: 4}},
Balances: test.b, Balances: test.b,
@@ -116,7 +124,7 @@ func TestDecreaseBalance_OK(t *testing.T) {
{i: 3, b: []uint64{27 * 1e9, 28 * 1e9, 1, 28 * 1e9}, nb: 28 * 1e9, eb: 0}, {i: 3, b: []uint64{27 * 1e9, 28 * 1e9, 1, 28 * 1e9}, nb: 28 * 1e9, eb: 0},
} }
for _, test := range tests { for _, test := range tests {
state, err := v1.InitializeFromProto(&statepb.BeaconState{ state, err := v1.InitializeFromProto(&ethpb.BeaconState{
Validators: []*ethpb.Validator{ Validators: []*ethpb.Validator{
{EffectiveBalance: 4}, {EffectiveBalance: 4}, {EffectiveBalance: 4}, {EffectiveBalance: 3}}, {EffectiveBalance: 4}, {EffectiveBalance: 4}, {EffectiveBalance: 4}, {EffectiveBalance: 3}},
Balances: test.b, Balances: test.b,
@@ -179,7 +187,7 @@ func TestIsInInactivityLeak(t *testing.T) {
assert.Equal(t, false, IsInInactivityLeak(prevEpoch, finalizedEpoch), "Wanted inactivity leak false") assert.Equal(t, false, IsInInactivityLeak(prevEpoch, finalizedEpoch), "Wanted inactivity leak false")
} }
func buildState(slot types.Slot, validatorCount uint64) *statepb.BeaconState { func buildState(slot types.Slot, validatorCount uint64) *ethpb.BeaconState {
validators := make([]*ethpb.Validator, validatorCount) validators := make([]*ethpb.Validator, validatorCount)
for i := 0; i < len(validators); i++ { for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{ validators[i] = &ethpb.Validator{
@@ -205,7 +213,7 @@ func buildState(slot types.Slot, validatorCount uint64) *statepb.BeaconState {
for i := 0; i < len(latestRandaoMixes); i++ { for i := 0; i < len(latestRandaoMixes); i++ {
latestRandaoMixes[i] = params.BeaconConfig().ZeroHash[:] latestRandaoMixes[i] = params.BeaconConfig().ZeroHash[:]
} }
return &statepb.BeaconState{ return &ethpb.BeaconState{
Slot: slot, Slot: slot,
Balances: validatorBalances, Balances: validatorBalances,
Validators: validators, Validators: validators,

View File

@@ -208,8 +208,8 @@ func PrevSlot(slot types.Slot) types.Slot {
// Spec code: // Spec code:
// def compute_sync_committee_period(epoch: Epoch) -> uint64: // def compute_sync_committee_period(epoch: Epoch) -> uint64:
// return epoch // EPOCHS_PER_SYNC_COMMITTEE_PERIOD // return epoch // EPOCHS_PER_SYNC_COMMITTEE_PERIOD
func SyncCommitteePeriod(e types.Epoch) uint64 { func SyncCommitteePeriod(epoch types.Epoch) uint64 {
return uint64(e / params.BeaconConfig().EpochsPerSyncCommitteePeriod) return uint64(epoch / params.BeaconConfig().EpochsPerSyncCommitteePeriod)
} }
// SyncCommitteePeriodStartEpoch returns the start epoch of a sync committee period. // SyncCommitteePeriodStartEpoch returns the start epoch of a sync committee period.

View File

@@ -8,7 +8,6 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/cache" "github.com/prysmaticlabs/prysm/beacon-chain/cache"
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1" v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
statepb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/bytesutil" "github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/hashutil" "github.com/prysmaticlabs/prysm/shared/hashutil"
"github.com/prysmaticlabs/prysm/shared/params" "github.com/prysmaticlabs/prysm/shared/params"
@@ -45,7 +44,7 @@ func TestIsActiveValidatorUsingTrie_OK(t *testing.T) {
{a: 64, b: true}, {a: 64, b: true},
} }
val := &ethpb.Validator{ActivationEpoch: 10, ExitEpoch: 100} val := &ethpb.Validator{ActivationEpoch: 10, ExitEpoch: 100}
beaconState, err := v1.InitializeFromProto(&statepb.BeaconState{Validators: []*ethpb.Validator{val}}) beaconState, err := v1.InitializeFromProto(&ethpb.BeaconState{Validators: []*ethpb.Validator{val}})
require.NoError(t, err) require.NoError(t, err)
for _, test := range tests { for _, test := range tests {
readOnlyVal, err := beaconState.ValidatorAtIndexReadOnly(0) readOnlyVal, err := beaconState.ValidatorAtIndexReadOnly(0)
@@ -210,7 +209,7 @@ func TestIsSlashableValidatorUsingTrie_OK(t *testing.T) {
} }
for _, test := range tests { for _, test := range tests {
beaconState, err := v1.InitializeFromProto(&statepb.BeaconState{Validators: []*ethpb.Validator{test.validator}}) beaconState, err := v1.InitializeFromProto(&ethpb.BeaconState{Validators: []*ethpb.Validator{test.validator}})
require.NoError(t, err) require.NoError(t, err)
readOnlyVal, err := beaconState.ValidatorAtIndexReadOnly(0) readOnlyVal, err := beaconState.ValidatorAtIndexReadOnly(0)
require.NoError(t, err) require.NoError(t, err)
@@ -234,7 +233,7 @@ func TestBeaconProposerIndex_OK(t *testing.T) {
} }
} }
state, err := v1.InitializeFromProto(&statepb.BeaconState{ state, err := v1.InitializeFromProto(&ethpb.BeaconState{
Validators: validators, Validators: validators,
Slot: 0, Slot: 0,
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector), RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
@@ -293,7 +292,7 @@ func TestBeaconProposerIndex_BadState(t *testing.T) {
roots[i] = make([]byte, 32) roots[i] = make([]byte, 32)
} }
state, err := v1.InitializeFromProto(&statepb.BeaconState{ state, err := v1.InitializeFromProto(&ethpb.BeaconState{
Validators: validators, Validators: validators,
Slot: 0, Slot: 0,
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector), RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
@@ -317,7 +316,7 @@ func TestComputeProposerIndex_Compatibility(t *testing.T) {
} }
} }
state, err := v1.InitializeFromProto(&statepb.BeaconState{ state, err := v1.InitializeFromProto(&ethpb.BeaconState{
Validators: validators, Validators: validators,
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector), RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
}) })
@@ -364,7 +363,7 @@ func TestActiveValidatorCount_Genesis(t *testing.T) {
ExitEpoch: params.BeaconConfig().FarFutureEpoch, ExitEpoch: params.BeaconConfig().FarFutureEpoch,
} }
} }
beaconState, err := v1.InitializeFromProto(&statepb.BeaconState{ beaconState, err := v1.InitializeFromProto(&ethpb.BeaconState{
Slot: 0, Slot: 0,
Validators: validators, Validators: validators,
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector), RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
@@ -400,7 +399,7 @@ func TestChurnLimit_OK(t *testing.T) {
} }
} }
beaconState, err := v1.InitializeFromProto(&statepb.BeaconState{ beaconState, err := v1.InitializeFromProto(&ethpb.BeaconState{
Slot: 1, Slot: 1,
Validators: validators, Validators: validators,
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector), RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
@@ -415,8 +414,8 @@ func TestChurnLimit_OK(t *testing.T) {
} }
func TestDomain_OK(t *testing.T) { func TestDomain_OK(t *testing.T) {
state := &statepb.BeaconState{ state := &ethpb.BeaconState{
Fork: &statepb.Fork{ Fork: &ethpb.Fork{
Epoch: 3, Epoch: 3,
PreviousVersion: []byte{0, 0, 0, 2}, PreviousVersion: []byte{0, 0, 0, 2},
CurrentVersion: []byte{0, 0, 0, 3}, CurrentVersion: []byte{0, 0, 0, 3},
@@ -445,7 +444,7 @@ func TestDomain_OK(t *testing.T) {
func TestActiveValidatorIndices(t *testing.T) { func TestActiveValidatorIndices(t *testing.T) {
farFutureEpoch := params.BeaconConfig().FarFutureEpoch farFutureEpoch := params.BeaconConfig().FarFutureEpoch
type args struct { type args struct {
state *statepb.BeaconState state *ethpb.BeaconState
epoch types.Epoch epoch types.Epoch
} }
tests := []struct { tests := []struct {
@@ -457,7 +456,7 @@ func TestActiveValidatorIndices(t *testing.T) {
{ {
name: "all_active_epoch_10", name: "all_active_epoch_10",
args: args{ args: args{
state: &statepb.BeaconState{ state: &ethpb.BeaconState{
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector), RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
Validators: []*ethpb.Validator{ Validators: []*ethpb.Validator{
{ {
@@ -481,7 +480,7 @@ func TestActiveValidatorIndices(t *testing.T) {
{ {
name: "some_active_epoch_10", name: "some_active_epoch_10",
args: args{ args: args{
state: &statepb.BeaconState{ state: &ethpb.BeaconState{
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector), RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
Validators: []*ethpb.Validator{ Validators: []*ethpb.Validator{
{ {
@@ -505,7 +504,7 @@ func TestActiveValidatorIndices(t *testing.T) {
{ {
name: "some_active_with_recent_new_epoch_10", name: "some_active_with_recent_new_epoch_10",
args: args{ args: args{
state: &statepb.BeaconState{ state: &ethpb.BeaconState{
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector), RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
Validators: []*ethpb.Validator{ Validators: []*ethpb.Validator{
{ {
@@ -533,7 +532,7 @@ func TestActiveValidatorIndices(t *testing.T) {
{ {
name: "some_active_with_recent_new_epoch_10", name: "some_active_with_recent_new_epoch_10",
args: args{ args: args{
state: &statepb.BeaconState{ state: &ethpb.BeaconState{
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector), RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
Validators: []*ethpb.Validator{ Validators: []*ethpb.Validator{
{ {
@@ -561,7 +560,7 @@ func TestActiveValidatorIndices(t *testing.T) {
{ {
name: "some_active_with_recent_new_epoch_10", name: "some_active_with_recent_new_epoch_10",
args: args{ args: args{
state: &statepb.BeaconState{ state: &ethpb.BeaconState{
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector), RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
Validators: []*ethpb.Validator{ Validators: []*ethpb.Validator{
{ {
@@ -698,7 +697,7 @@ func TestComputeProposerIndex(t *testing.T) {
} }
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
bState := &statepb.BeaconState{Validators: tt.args.validators} bState := &ethpb.BeaconState{Validators: tt.args.validators}
stTrie, err := v1.InitializeFromProtoUnsafe(bState) stTrie, err := v1.InitializeFromProtoUnsafe(bState)
require.NoError(t, err) require.NoError(t, err)
got, err := ComputeProposerIndex(stTrie, tt.args.indices, tt.args.seed) got, err := ComputeProposerIndex(stTrie, tt.args.indices, tt.args.seed)
@@ -739,20 +738,20 @@ func TestIsIsEligibleForActivation(t *testing.T) {
tests := []struct { tests := []struct {
name string name string
validator *ethpb.Validator validator *ethpb.Validator
state *statepb.BeaconState state *ethpb.BeaconState
want bool want bool
}{ }{
{"Eligible", {"Eligible",
&ethpb.Validator{ActivationEligibilityEpoch: 1, ActivationEpoch: params.BeaconConfig().FarFutureEpoch}, &ethpb.Validator{ActivationEligibilityEpoch: 1, ActivationEpoch: params.BeaconConfig().FarFutureEpoch},
&statepb.BeaconState{FinalizedCheckpoint: &ethpb.Checkpoint{Epoch: 2}}, &ethpb.BeaconState{FinalizedCheckpoint: &ethpb.Checkpoint{Epoch: 2}},
true}, true},
{"Not yet finalized", {"Not yet finalized",
&ethpb.Validator{ActivationEligibilityEpoch: 1, ActivationEpoch: params.BeaconConfig().FarFutureEpoch}, &ethpb.Validator{ActivationEligibilityEpoch: 1, ActivationEpoch: params.BeaconConfig().FarFutureEpoch},
&statepb.BeaconState{FinalizedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)}}, &ethpb.BeaconState{FinalizedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)}},
false}, false},
{"Incorrect activation epoch", {"Incorrect activation epoch",
&ethpb.Validator{ActivationEligibilityEpoch: 1}, &ethpb.Validator{ActivationEligibilityEpoch: 1},
&statepb.BeaconState{FinalizedCheckpoint: &ethpb.Checkpoint{Epoch: 2}}, &ethpb.BeaconState{FinalizedCheckpoint: &ethpb.Checkpoint{Epoch: 2}},
false}, false},
} }
for _, tt := range tests { for _, tt := range tests {

View File

@@ -192,6 +192,7 @@ func TestWeakSubjectivity_IsWithinWeakSubjectivityPeriod(t *testing.T) {
} }
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
helpers.ClearCache()
got, err := helpers.IsWithinWeakSubjectivityPeriod(tt.epoch, tt.genWsState(), tt.genWsCheckpoint()) got, err := helpers.IsWithinWeakSubjectivityPeriod(tt.epoch, tt.genWsState(), tt.genWsCheckpoint())
if tt.wantedErr != "" { if tt.wantedErr != "" {
assert.Equal(t, false, got) assert.Equal(t, false, got)

View File

@@ -26,6 +26,7 @@ go_library(
], ],
deps = [ deps = [
"//beacon-chain/cache:go_default_library", "//beacon-chain/cache:go_default_library",
"//beacon-chain/core/altair:go_default_library",
"//beacon-chain/core/blocks:go_default_library", "//beacon-chain/core/blocks:go_default_library",
"//beacon-chain/core/epoch:go_default_library", "//beacon-chain/core/epoch:go_default_library",
"//beacon-chain/core/epoch/precompute:go_default_library", "//beacon-chain/core/epoch/precompute:go_default_library",
@@ -43,6 +44,7 @@ go_library(
"//shared/mathutil:go_default_library", "//shared/mathutil:go_default_library",
"//shared/params:go_default_library", "//shared/params:go_default_library",
"//shared/traceutil:go_default_library", "//shared/traceutil:go_default_library",
"//shared/version:go_default_library",
"@com_github_pkg_errors//:go_default_library", "@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library", "@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library", "@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
@@ -56,6 +58,7 @@ go_test(
name = "go_default_test", name = "go_default_test",
size = "small", size = "small",
srcs = [ srcs = [
"altair_transition_no_verify_sig_test.go",
"benchmarks_test.go", "benchmarks_test.go",
"skip_slot_cache_test.go", "skip_slot_cache_test.go",
"state_fuzz_test.go", "state_fuzz_test.go",
@@ -71,8 +74,10 @@ go_test(
embed = [":go_default_library"], embed = [":go_default_library"],
shard_count = 3, shard_count = 3,
deps = [ deps = [
"//beacon-chain/core/altair:go_default_library",
"//beacon-chain/core/blocks:go_default_library", "//beacon-chain/core/blocks:go_default_library",
"//beacon-chain/core/helpers:go_default_library", "//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/p2p/types:go_default_library",
"//beacon-chain/state:go_default_library", "//beacon-chain/state:go_default_library",
"//beacon-chain/state/v1:go_default_library", "//beacon-chain/state/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library", "//proto/prysm/v1alpha1:go_default_library",
@@ -81,6 +86,7 @@ go_test(
"//shared/benchutil:go_default_library", "//shared/benchutil:go_default_library",
"//shared/bls:go_default_library", "//shared/bls:go_default_library",
"//shared/bytesutil:go_default_library", "//shared/bytesutil:go_default_library",
"//shared/copyutil:go_default_library",
"//shared/hashutil:go_default_library", "//shared/hashutil:go_default_library",
"//shared/params:go_default_library", "//shared/params:go_default_library",
"//shared/testutil:go_default_library", "//shared/testutil:go_default_library",

View File

@@ -0,0 +1,221 @@
package state_test
import (
"context"
"testing"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
core "github.com/prysmaticlabs/prysm/beacon-chain/core/state"
p2pType "github.com/prysmaticlabs/prysm/beacon-chain/p2p/types"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/shared/bls"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/copyutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
)
func TestExecuteAltairStateTransitionNoVerify_FullProcess(t *testing.T) {
beaconState, privKeys := testutil.DeterministicGenesisStateAltair(t, 100)
syncCommittee, err := altair.NextSyncCommittee(context.Background(), beaconState)
require.NoError(t, err)
require.NoError(t, beaconState.SetCurrentSyncCommittee(syncCommittee))
eth1Data := &ethpb.Eth1Data{
DepositCount: 100,
DepositRoot: bytesutil.PadTo([]byte{2}, 32),
BlockHash: make([]byte, 32),
}
require.NoError(t, beaconState.SetSlot(params.BeaconConfig().SlotsPerEpoch-1))
e := beaconState.Eth1Data()
e.DepositCount = 100
require.NoError(t, beaconState.SetEth1Data(e))
bh := beaconState.LatestBlockHeader()
bh.Slot = beaconState.Slot()
require.NoError(t, beaconState.SetLatestBlockHeader(bh))
require.NoError(t, beaconState.SetEth1DataVotes([]*ethpb.Eth1Data{eth1Data}))
require.NoError(t, beaconState.SetSlot(beaconState.Slot()+1))
epoch := helpers.CurrentEpoch(beaconState)
randaoReveal, err := testutil.RandaoReveal(beaconState, epoch, privKeys)
require.NoError(t, err)
require.NoError(t, beaconState.SetSlot(beaconState.Slot()-1))
nextSlotState, err := core.ProcessSlots(context.Background(), beaconState.Copy(), beaconState.Slot()+1)
require.NoError(t, err)
parentRoot, err := nextSlotState.LatestBlockHeader().HashTreeRoot()
require.NoError(t, err)
proposerIdx, err := helpers.BeaconProposerIndex(nextSlotState)
require.NoError(t, err)
block := testutil.NewBeaconBlockAltair()
block.Block.ProposerIndex = proposerIdx
block.Block.Slot = beaconState.Slot() + 1
block.Block.ParentRoot = parentRoot[:]
block.Block.Body.RandaoReveal = randaoReveal
block.Block.Body.Eth1Data = eth1Data
syncBits := bitfield.NewBitvector512()
for i := range syncBits {
syncBits[i] = 0xff
}
indices, err := altair.NextSyncCommitteeIndices(context.Background(), beaconState)
require.NoError(t, err)
h := copyutil.CopyBeaconBlockHeader(beaconState.LatestBlockHeader())
prevStateRoot, err := beaconState.HashTreeRoot(context.Background())
require.NoError(t, err)
h.StateRoot = prevStateRoot[:]
pbr, err := h.HashTreeRoot()
require.NoError(t, err)
syncSigs := make([]bls.Signature, len(indices))
for i, indice := range indices {
b := p2pType.SSZBytes(pbr[:])
sb, err := helpers.ComputeDomainAndSign(beaconState, helpers.CurrentEpoch(beaconState), &b, params.BeaconConfig().DomainSyncCommittee, privKeys[indice])
require.NoError(t, err)
sig, err := bls.SignatureFromBytes(sb)
require.NoError(t, err)
syncSigs[i] = sig
}
aggregatedSig := bls.AggregateSignatures(syncSigs).Marshal()
syncAggregate := &ethpb.SyncAggregate{
SyncCommitteeBits: syncBits,
SyncCommitteeSignature: aggregatedSig,
}
block.Block.Body.SyncAggregate = syncAggregate
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(block)
require.NoError(t, err)
stateRoot, err := core.CalculateStateRoot(context.Background(), beaconState, wsb)
require.NoError(t, err)
block.Block.StateRoot = stateRoot[:]
c := beaconState.Copy()
sig, err := testutil.BlockSignatureAltair(c, block.Block, privKeys)
require.NoError(t, err)
block.Signature = sig.Marshal()
wsb, err = wrapper.WrappedAltairSignedBeaconBlock(block)
require.NoError(t, err)
set, _, err := core.ExecuteStateTransitionNoVerifyAnySig(context.Background(), beaconState, wsb)
require.NoError(t, err)
verified, err := set.Verify()
require.NoError(t, err)
require.Equal(t, true, verified, "Could not verify signature set")
}
func TestExecuteAltairStateTransitionNoVerifySignature_CouldNotVerifyStateRoot(t *testing.T) {
beaconState, privKeys := testutil.DeterministicGenesisStateAltair(t, 100)
syncCommittee, err := altair.NextSyncCommittee(context.Background(), beaconState)
require.NoError(t, err)
require.NoError(t, beaconState.SetCurrentSyncCommittee(syncCommittee))
eth1Data := &ethpb.Eth1Data{
DepositCount: 100,
DepositRoot: bytesutil.PadTo([]byte{2}, 32),
BlockHash: make([]byte, 32),
}
require.NoError(t, beaconState.SetSlot(params.BeaconConfig().SlotsPerEpoch-1))
e := beaconState.Eth1Data()
e.DepositCount = 100
require.NoError(t, beaconState.SetEth1Data(e))
bh := beaconState.LatestBlockHeader()
bh.Slot = beaconState.Slot()
require.NoError(t, beaconState.SetLatestBlockHeader(bh))
require.NoError(t, beaconState.SetEth1DataVotes([]*ethpb.Eth1Data{eth1Data}))
require.NoError(t, beaconState.SetSlot(beaconState.Slot()+1))
epoch := helpers.CurrentEpoch(beaconState)
randaoReveal, err := testutil.RandaoReveal(beaconState, epoch, privKeys)
require.NoError(t, err)
require.NoError(t, beaconState.SetSlot(beaconState.Slot()-1))
nextSlotState, err := core.ProcessSlots(context.Background(), beaconState.Copy(), beaconState.Slot()+1)
require.NoError(t, err)
parentRoot, err := nextSlotState.LatestBlockHeader().HashTreeRoot()
require.NoError(t, err)
proposerIdx, err := helpers.BeaconProposerIndex(nextSlotState)
require.NoError(t, err)
block := testutil.NewBeaconBlockAltair()
block.Block.ProposerIndex = proposerIdx
block.Block.Slot = beaconState.Slot() + 1
block.Block.ParentRoot = parentRoot[:]
block.Block.Body.RandaoReveal = randaoReveal
block.Block.Body.Eth1Data = eth1Data
syncBits := bitfield.NewBitvector512()
for i := range syncBits {
syncBits[i] = 0xff
}
indices, err := altair.NextSyncCommitteeIndices(context.Background(), beaconState)
require.NoError(t, err)
h := copyutil.CopyBeaconBlockHeader(beaconState.LatestBlockHeader())
prevStateRoot, err := beaconState.HashTreeRoot(context.Background())
require.NoError(t, err)
h.StateRoot = prevStateRoot[:]
pbr, err := h.HashTreeRoot()
require.NoError(t, err)
syncSigs := make([]bls.Signature, len(indices))
for i, indice := range indices {
b := p2pType.SSZBytes(pbr[:])
sb, err := helpers.ComputeDomainAndSign(beaconState, helpers.CurrentEpoch(beaconState), &b, params.BeaconConfig().DomainSyncCommittee, privKeys[indice])
require.NoError(t, err)
sig, err := bls.SignatureFromBytes(sb)
require.NoError(t, err)
syncSigs[i] = sig
}
aggregatedSig := bls.AggregateSignatures(syncSigs).Marshal()
syncAggregate := &ethpb.SyncAggregate{
SyncCommitteeBits: syncBits,
SyncCommitteeSignature: aggregatedSig,
}
block.Block.Body.SyncAggregate = syncAggregate
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(block)
require.NoError(t, err)
stateRoot, err := core.CalculateStateRoot(context.Background(), beaconState, wsb)
require.NoError(t, err)
block.Block.StateRoot = stateRoot[:]
c := beaconState.Copy()
sig, err := testutil.BlockSignatureAltair(c, block.Block, privKeys)
require.NoError(t, err)
block.Signature = sig.Marshal()
block.Block.StateRoot = bytesutil.PadTo([]byte{'a'}, 32)
wsb, err = wrapper.WrappedAltairSignedBeaconBlock(block)
require.NoError(t, err)
_, _, err = core.ExecuteStateTransitionNoVerifyAnySig(context.Background(), beaconState, wsb)
require.ErrorContains(t, "could not validate state root", err)
}
func TestExecuteStateTransitionNoVerifyAnySig_PassesProcessingConditions(t *testing.T) {
beaconState, block := createFullAltairBlockWithOperations(t)
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(block)
require.NoError(t, err)
set, _, err := core.ExecuteStateTransitionNoVerifyAnySig(context.Background(), beaconState, wsb)
require.NoError(t, err)
// Test Signature set verifies.
verified, err := set.Verify()
require.NoError(t, err)
require.Equal(t, true, verified, "Could not verify signature set")
}
func createFullAltairBlockWithOperations(t *testing.T) (state.BeaconStateAltair,
*ethpb.SignedBeaconBlockAltair) {
beaconState, privKeys := testutil.DeterministicGenesisStateAltair(t, 32)
sCom, err := altair.NextSyncCommittee(context.Background(), beaconState)
assert.NoError(t, err)
assert.NoError(t, beaconState.SetCurrentSyncCommittee(sCom))
tState := beaconState.Copy()
blk, err := testutil.GenerateFullBlockAltair(tState, privKeys,
&testutil.BlockGenConfig{NumAttestations: 1, NumVoluntaryExits: 0, NumDeposits: 0}, 1)
require.NoError(t, err)
return beaconState, blk
}

View File

@@ -19,7 +19,7 @@ var SkipSlotCache = cache.NewSkipSlotCache()
// The key for skip slot cache is mixed between state root and state slot. // The key for skip slot cache is mixed between state root and state slot.
// state root is in the mix to defend against different forks with same skip slots // state root is in the mix to defend against different forks with same skip slots
// to hit the same cache. We don't want beacon states mixed up between different chains. // to hit the same cache. We don't want beacon states mixed up between different chains.
func cacheKey(ctx context.Context, state state.ReadOnlyBeaconState) ([32]byte, error) { func CacheKey(ctx context.Context, state state.ReadOnlyBeaconState) ([32]byte, error) {
bh := state.LatestBlockHeader() bh := state.LatestBlockHeader()
if bh == nil { if bh == nil {
return [32]byte{}, errors.New("block head in state can't be nil") return [32]byte{}, errors.New("block head in state can't be nil")

View File

@@ -8,7 +8,6 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/core/state" "github.com/prysmaticlabs/prysm/beacon-chain/core/state"
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1" v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
statepb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/hashutil" "github.com/prysmaticlabs/prysm/shared/hashutil"
"github.com/prysmaticlabs/prysm/shared/params" "github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil" "github.com/prysmaticlabs/prysm/shared/testutil"
@@ -42,7 +41,7 @@ func TestGenesisBeaconState_OK(t *testing.T) {
// Misc fields checks. // Misc fields checks.
assert.Equal(t, types.Slot(0), newState.Slot(), "Slot was not correctly initialized") assert.Equal(t, types.Slot(0), newState.Slot(), "Slot was not correctly initialized")
if !proto.Equal(newState.Fork(), &statepb.Fork{ if !proto.Equal(newState.Fork(), &ethpb.Fork{
PreviousVersion: genesisForkVersion, PreviousVersion: genesisForkVersion,
CurrentVersion: genesisForkVersion, CurrentVersion: genesisForkVersion,
Epoch: genesisEpoch, Epoch: genesisEpoch,
@@ -76,10 +75,10 @@ func TestGenesisBeaconState_OK(t *testing.T) {
assert.DeepEqual(t, make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector), newState.Slashings(), "Slashings was not correctly initialized") assert.DeepEqual(t, make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector), newState.Slashings(), "Slashings was not correctly initialized")
currAtt, err := newState.CurrentEpochAttestations() currAtt, err := newState.CurrentEpochAttestations()
require.NoError(t, err) require.NoError(t, err)
assert.DeepSSZEqual(t, []*statepb.PendingAttestation{}, currAtt, "CurrentEpochAttestations was not correctly initialized") assert.DeepSSZEqual(t, []*ethpb.PendingAttestation{}, currAtt, "CurrentEpochAttestations was not correctly initialized")
prevAtt, err := newState.CurrentEpochAttestations() prevAtt, err := newState.CurrentEpochAttestations()
require.NoError(t, err) require.NoError(t, err)
assert.DeepSSZEqual(t, []*statepb.PendingAttestation{}, prevAtt, "PreviousEpochAttestations was not correctly initialized") assert.DeepSSZEqual(t, []*ethpb.PendingAttestation{}, prevAtt, "PreviousEpochAttestations was not correctly initialized")
zeroHash := params.BeaconConfig().ZeroHash[:] zeroHash := params.BeaconConfig().ZeroHash[:]
// History root checks. // History root checks.

View File

@@ -11,6 +11,7 @@ import (
"github.com/pkg/errors" "github.com/pkg/errors"
types "github.com/prysmaticlabs/eth2-types" types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/cache" "github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
b "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks" b "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
e "github.com/prysmaticlabs/prysm/beacon-chain/core/epoch" e "github.com/prysmaticlabs/prysm/beacon-chain/core/epoch"
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute" "github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
@@ -21,6 +22,7 @@ import (
"github.com/prysmaticlabs/prysm/shared/mathutil" "github.com/prysmaticlabs/prysm/shared/mathutil"
"github.com/prysmaticlabs/prysm/shared/params" "github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/traceutil" "github.com/prysmaticlabs/prysm/shared/traceutil"
"github.com/prysmaticlabs/prysm/shared/version"
"go.opencensus.io/trace" "go.opencensus.io/trace"
) )
@@ -221,7 +223,7 @@ func ProcessSlots(ctx context.Context, state state.BeaconState, slot types.Slot)
} }
highestSlot := state.Slot() highestSlot := state.Slot()
key, err := cacheKey(ctx, state) key, err := CacheKey(ctx, state)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -272,16 +274,35 @@ func ProcessSlots(ctx context.Context, state state.BeaconState, slot types.Slot)
return nil, errors.Wrap(err, "could not process slot") return nil, errors.Wrap(err, "could not process slot")
} }
if CanProcessEpoch(state) { if CanProcessEpoch(state) {
state, err = ProcessEpochPrecompute(ctx, state) switch state.Version() {
if err != nil { case version.Phase0:
traceutil.AnnotateError(span, err) state, err = ProcessEpochPrecompute(ctx, state)
return nil, errors.Wrap(err, "could not process epoch with optimizations") if err != nil {
traceutil.AnnotateError(span, err)
return nil, errors.Wrap(err, "could not process epoch with optimizations")
}
case version.Altair:
state, err = altair.ProcessEpoch(ctx, state)
if err != nil {
traceutil.AnnotateError(span, err)
return nil, errors.Wrap(err, "could not process epoch")
}
default:
return nil, errors.New("beacon state should have a version")
} }
} }
if err := state.SetSlot(state.Slot() + 1); err != nil { if err := state.SetSlot(state.Slot() + 1); err != nil {
traceutil.AnnotateError(span, err) traceutil.AnnotateError(span, err)
return nil, errors.Wrap(err, "failed to increment state slot") return nil, errors.Wrap(err, "failed to increment state slot")
} }
// Transition to Altair state.
if helpers.IsEpochStart(state.Slot()) && helpers.SlotToEpoch(state.Slot()) == params.BeaconConfig().AltairForkEpoch {
state, err = altair.UpgradeToAltair(ctx, state)
if err != nil {
return nil, err
}
}
} }
if highestSlot < state.Slot() { if highestSlot < state.Slot() {

View File

@@ -6,6 +6,7 @@ import (
"fmt" "fmt"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
b "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks" b "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/state/interop" "github.com/prysmaticlabs/prysm/beacon-chain/core/state/interop"
@@ -15,6 +16,7 @@ import (
"github.com/prysmaticlabs/prysm/shared/bls" "github.com/prysmaticlabs/prysm/shared/bls"
"github.com/prysmaticlabs/prysm/shared/featureconfig" "github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/traceutil" "github.com/prysmaticlabs/prysm/shared/traceutil"
"github.com/prysmaticlabs/prysm/shared/version"
"go.opencensus.io/trace" "go.opencensus.io/trace"
) )
@@ -150,6 +152,16 @@ func CalculateStateRoot(
if err != nil { if err != nil {
return [32]byte{}, errors.Wrap(err, "could not process block") return [32]byte{}, errors.Wrap(err, "could not process block")
} }
if signed.Version() == version.Altair {
sa, err := signed.Block().Body().SyncAggregate()
if err != nil {
return [32]byte{}, err
}
state, err = altair.ProcessSyncAggregate(state, sa)
if err != nil {
return [32]byte{}, err
}
}
return state.HashTreeRoot(ctx) return state.HashTreeRoot(ctx)
} }
@@ -182,6 +194,16 @@ func ProcessBlockNoVerifyAnySig(
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
if signed.Version() == version.Altair {
sa, err := signed.Block().Body().SyncAggregate()
if err != nil {
return nil, nil, err
}
state, err = altair.ProcessSyncAggregate(state, sa)
if err != nil {
return nil, nil, err
}
}
bSet, err := b.BlockSignatureSet(state, blk.ProposerIndex(), signed.Signature(), blk.HashTreeRoot) bSet, err := b.BlockSignatureSet(state, blk.ProposerIndex(), signed.Signature(), blk.HashTreeRoot)
if err != nil { if err != nil {
@@ -240,22 +262,22 @@ func ProcessOperationsNoVerifyAttsSigs(
return nil, errors.Wrap(err, "could not verify operation lengths") return nil, errors.Wrap(err, "could not verify operation lengths")
} }
state, err := b.ProcessProposerSlashings(ctx, state, signedBeaconBlock.Block().Body().ProposerSlashings(), v.SlashValidator) var err error
if err != nil { switch signedBeaconBlock.Version() {
return nil, errors.Wrap(err, "could not process block proposer slashings") case version.Phase0:
} state, err = phase0Operations(ctx, state, signedBeaconBlock)
state, err = b.ProcessAttesterSlashings(ctx, state, signedBeaconBlock.Block().Body().AttesterSlashings(), v.SlashValidator) if err != nil {
if err != nil { return nil, err
return nil, errors.Wrap(err, "could not process block attester slashings") }
} case version.Altair:
state, err = b.ProcessAttestationsNoVerifySignature(ctx, state, signedBeaconBlock) state, err = altairOperations(ctx, state, signedBeaconBlock)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "could not process block attestations") return nil, err
} }
state, err = b.ProcessDeposits(ctx, state, signedBeaconBlock.Block().Body().Deposits()) default:
if err != nil { return nil, errors.New("block does not have correct version")
return nil, errors.Wrap(err, "could not process block validator deposits")
} }
state, err = b.ProcessVoluntaryExits(ctx, state, signedBeaconBlock.Block().Body().VoluntaryExits()) state, err = b.ProcessVoluntaryExits(ctx, state, signedBeaconBlock.Block().Body().VoluntaryExits())
if err != nil { if err != nil {
return nil, errors.Wrap(err, "could not process validator exits") return nil, errors.Wrap(err, "could not process validator exits")
@@ -309,3 +331,43 @@ func ProcessBlockForStateRoot(
return state, nil return state, nil
} }
// This calls altair specific block operations.
func altairOperations(
ctx context.Context,
state state.BeaconState,
signedBeaconBlock block.SignedBeaconBlock) (state.BeaconState, error) {
state, err := b.ProcessProposerSlashings(ctx, state, signedBeaconBlock.Block().Body().ProposerSlashings(), altair.SlashValidator)
if err != nil {
return nil, errors.Wrap(err, "could not process block proposer slashings")
}
state, err = b.ProcessAttesterSlashings(ctx, state, signedBeaconBlock.Block().Body().AttesterSlashings(), altair.SlashValidator)
if err != nil {
return nil, errors.Wrap(err, "could not process block attester slashings")
}
state, err = altair.ProcessAttestationsNoVerifySignature(ctx, state, signedBeaconBlock)
if err != nil {
return nil, errors.Wrap(err, "could not process block attestations")
}
return altair.ProcessDeposits(ctx, state, signedBeaconBlock.Block().Body().Deposits())
}
// This calls phase 0 specific block operations.
func phase0Operations(
ctx context.Context,
state state.BeaconStateAltair,
signedBeaconBlock block.SignedBeaconBlock) (state.BeaconState, error) {
state, err := b.ProcessProposerSlashings(ctx, state, signedBeaconBlock.Block().Body().ProposerSlashings(), v.SlashValidator)
if err != nil {
return nil, errors.Wrap(err, "could not process block proposer slashings")
}
state, err = b.ProcessAttesterSlashings(ctx, state, signedBeaconBlock.Block().Body().AttesterSlashings(), v.SlashValidator)
if err != nil {
return nil, errors.Wrap(err, "could not process block attester slashings")
}
state, err = b.ProcessAttestationsNoVerifySignature(ctx, state, signedBeaconBlock)
if err != nil {
return nil, errors.Wrap(err, "could not process block attestations")
}
return b.ProcessDeposits(ctx, state, signedBeaconBlock.Block().Body().Deposits())
}

View File

@@ -14,7 +14,6 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/state" "github.com/prysmaticlabs/prysm/beacon-chain/state"
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1" v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
statepb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper" "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/shared/attestationutil" "github.com/prysmaticlabs/prysm/shared/attestationutil"
"github.com/prysmaticlabs/prysm/shared/bls" "github.com/prysmaticlabs/prysm/shared/bls"
@@ -33,7 +32,7 @@ func init() {
} }
func TestExecuteStateTransition_IncorrectSlot(t *testing.T) { func TestExecuteStateTransition_IncorrectSlot(t *testing.T) {
base := &statepb.BeaconState{ base := &ethpb.BeaconState{
Slot: 5, Slot: 5,
} }
beaconState, err := v1.InitializeFromProto(base) beaconState, err := v1.InitializeFromProto(base)
@@ -243,7 +242,7 @@ func TestProcessBlock_IncorrectProcessExits(t *testing.T) {
cp := beaconState.CurrentJustifiedCheckpoint() cp := beaconState.CurrentJustifiedCheckpoint()
cp.Root = []byte("hello-world") cp.Root = []byte("hello-world")
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(cp)) require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(cp))
require.NoError(t, beaconState.AppendCurrentEpochAttestations(&statepb.PendingAttestation{})) require.NoError(t, beaconState.AppendCurrentEpochAttestations(&ethpb.PendingAttestation{}))
_, err = core.VerifyOperationLengths(context.Background(), beaconState, wrapper.WrappedPhase0SignedBeaconBlock(block)) _, err = core.VerifyOperationLengths(context.Background(), beaconState, wrapper.WrappedPhase0SignedBeaconBlock(block))
wanted := "number of voluntary exits (17) in block body exceeds allowed threshold of 16" wanted := "number of voluntary exits (17) in block body exceeds allowed threshold of 16"
assert.ErrorContains(t, wanted, err) assert.ErrorContains(t, wanted, err)
@@ -269,7 +268,7 @@ func createFullBlockWithOperations(t *testing.T) (state.BeaconState,
copy(mockRoot[:], "hello-world") copy(mockRoot[:], "hello-world")
cp.Root = mockRoot[:] cp.Root = mockRoot[:]
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(cp)) require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(cp))
require.NoError(t, beaconState.AppendCurrentEpochAttestations(&statepb.PendingAttestation{})) require.NoError(t, beaconState.AppendCurrentEpochAttestations(&ethpb.PendingAttestation{}))
proposerSlashIdx := types.ValidatorIndex(3) proposerSlashIdx := types.ValidatorIndex(3)
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
@@ -445,9 +444,9 @@ func TestProcessBlock_PassesProcessingConditions(t *testing.T) {
func TestProcessEpochPrecompute_CanProcess(t *testing.T) { func TestProcessEpochPrecompute_CanProcess(t *testing.T) {
epoch := types.Epoch(1) epoch := types.Epoch(1)
atts := []*statepb.PendingAttestation{{Data: &ethpb.AttestationData{Target: &ethpb.Checkpoint{Root: make([]byte, 32)}}, InclusionDelay: 1}} atts := []*ethpb.PendingAttestation{{Data: &ethpb.AttestationData{Target: &ethpb.Checkpoint{Root: make([]byte, 32)}}, InclusionDelay: 1}}
slashing := make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector) slashing := make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector)
base := &statepb.BeaconState{ base := &ethpb.BeaconState{
Slot: params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epoch)) + 1, Slot: params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epoch)) + 1,
BlockRoots: make([][]byte, 128), BlockRoots: make([][]byte, 128),
Slashings: slashing, Slashings: slashing,
@@ -489,7 +488,7 @@ func BenchmarkProcessBlk_65536Validators_FullBlock(b *testing.B) {
randaoMixes[i] = params.BeaconConfig().ZeroHash[:] randaoMixes[i] = params.BeaconConfig().ZeroHash[:]
} }
base := &statepb.BeaconState{ base := &ethpb.BeaconState{
Slot: 20, Slot: 20,
LatestBlockHeader: &ethpb.BeaconBlockHeader{}, LatestBlockHeader: &ethpb.BeaconBlockHeader{},
BlockRoots: make([][]byte, 254), BlockRoots: make([][]byte, 254),
@@ -500,7 +499,7 @@ func BenchmarkProcessBlk_65536Validators_FullBlock(b *testing.B) {
CurrentJustifiedCheckpoint: &ethpb.Checkpoint{ CurrentJustifiedCheckpoint: &ethpb.Checkpoint{
Root: []byte("hello-world"), Root: []byte("hello-world"),
}, },
Fork: &statepb.Fork{ Fork: &ethpb.Fork{
PreviousVersion: []byte{0, 0, 0, 0}, PreviousVersion: []byte{0, 0, 0, 0},
CurrentVersion: []byte{0, 0, 0, 0}, CurrentVersion: []byte{0, 0, 0, 0},
}, },
@@ -567,7 +566,7 @@ func BenchmarkProcessBlk_65536Validators_FullBlock(b *testing.B) {
binary.LittleEndian.PutUint64(buf, 0) binary.LittleEndian.PutUint64(buf, 0)
domain, err := helpers.Domain(s.Fork(), 0, params.BeaconConfig().DomainRandao, s.GenesisValidatorRoot()) domain, err := helpers.Domain(s.Fork(), 0, params.BeaconConfig().DomainRandao, s.GenesisValidatorRoot())
require.NoError(b, err) require.NoError(b, err)
ctr := &statepb.SigningData{ ctr := &ethpb.SigningData{
ObjectRoot: buf, ObjectRoot: buf,
Domain: domain, Domain: domain,
} }
@@ -730,7 +729,7 @@ func TestCanProcessEpoch_TrueOnEpochs(t *testing.T) {
} }
for _, tt := range tests { for _, tt := range tests {
b := &statepb.BeaconState{Slot: tt.slot} b := &ethpb.BeaconState{Slot: tt.slot}
s, err := v1.InitializeFromProto(b) s, err := v1.InitializeFromProto(b)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, tt.canProcessEpoch, core.CanProcessEpoch(s), "CanProcessEpoch(%d)", tt.slot) assert.Equal(t, tt.canProcessEpoch, core.CanProcessEpoch(s), "CanProcessEpoch(%d)", tt.slot)
@@ -797,7 +796,7 @@ func TestProcessBlock_OverMaxVoluntaryExits(t *testing.T) {
} }
func TestProcessBlock_IncorrectDeposits(t *testing.T) { func TestProcessBlock_IncorrectDeposits(t *testing.T) {
base := &statepb.BeaconState{ base := &ethpb.BeaconState{
Eth1Data: &ethpb.Eth1Data{DepositCount: 100}, Eth1Data: &ethpb.Eth1Data{DepositCount: 100},
Eth1DepositIndex: 98, Eth1DepositIndex: 98,
} }
@@ -818,7 +817,7 @@ func TestProcessBlock_IncorrectDeposits(t *testing.T) {
func TestProcessSlots_SameSlotAsParentState(t *testing.T) { func TestProcessSlots_SameSlotAsParentState(t *testing.T) {
slot := types.Slot(2) slot := types.Slot(2)
parentState, err := v1.InitializeFromProto(&statepb.BeaconState{Slot: slot}) parentState, err := v1.InitializeFromProto(&ethpb.BeaconState{Slot: slot})
require.NoError(t, err) require.NoError(t, err)
_, err = core.ProcessSlots(context.Background(), parentState, slot) _, err = core.ProcessSlots(context.Background(), parentState, slot)
@@ -827,7 +826,7 @@ func TestProcessSlots_SameSlotAsParentState(t *testing.T) {
func TestProcessSlots_LowerSlotAsParentState(t *testing.T) { func TestProcessSlots_LowerSlotAsParentState(t *testing.T) {
slot := types.Slot(2) slot := types.Slot(2)
parentState, err := v1.InitializeFromProto(&statepb.BeaconState{Slot: slot}) parentState, err := v1.InitializeFromProto(&ethpb.BeaconState{Slot: slot})
require.NoError(t, err) require.NoError(t, err)
_, err = core.ProcessSlots(context.Background(), parentState, slot-1) _, err = core.ProcessSlots(context.Background(), parentState, slot-1)

View File

@@ -7,7 +7,6 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1" v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
statepb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/params" "github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil/assert" "github.com/prysmaticlabs/prysm/shared/testutil/assert"
"github.com/prysmaticlabs/prysm/shared/testutil/require" "github.com/prysmaticlabs/prysm/shared/testutil/require"
@@ -41,7 +40,7 @@ func TestHasVoted_OK(t *testing.T) {
func TestInitiateValidatorExit_AlreadyExited(t *testing.T) { func TestInitiateValidatorExit_AlreadyExited(t *testing.T) {
exitEpoch := types.Epoch(199) exitEpoch := types.Epoch(199)
base := &statepb.BeaconState{Validators: []*ethpb.Validator{{ base := &ethpb.BeaconState{Validators: []*ethpb.Validator{{
ExitEpoch: exitEpoch}, ExitEpoch: exitEpoch},
}} }}
state, err := v1.InitializeFromProto(base) state, err := v1.InitializeFromProto(base)
@@ -56,7 +55,7 @@ func TestInitiateValidatorExit_AlreadyExited(t *testing.T) {
func TestInitiateValidatorExit_ProperExit(t *testing.T) { func TestInitiateValidatorExit_ProperExit(t *testing.T) {
exitedEpoch := types.Epoch(100) exitedEpoch := types.Epoch(100)
idx := types.ValidatorIndex(3) idx := types.ValidatorIndex(3)
base := &statepb.BeaconState{Validators: []*ethpb.Validator{ base := &ethpb.BeaconState{Validators: []*ethpb.Validator{
{ExitEpoch: exitedEpoch}, {ExitEpoch: exitedEpoch},
{ExitEpoch: exitedEpoch + 1}, {ExitEpoch: exitedEpoch + 1},
{ExitEpoch: exitedEpoch + 2}, {ExitEpoch: exitedEpoch + 2},
@@ -74,7 +73,7 @@ func TestInitiateValidatorExit_ProperExit(t *testing.T) {
func TestInitiateValidatorExit_ChurnOverflow(t *testing.T) { func TestInitiateValidatorExit_ChurnOverflow(t *testing.T) {
exitedEpoch := types.Epoch(100) exitedEpoch := types.Epoch(100)
idx := types.ValidatorIndex(4) idx := types.ValidatorIndex(4)
base := &statepb.BeaconState{Validators: []*ethpb.Validator{ base := &ethpb.BeaconState{Validators: []*ethpb.Validator{
{ExitEpoch: exitedEpoch + 2}, {ExitEpoch: exitedEpoch + 2},
{ExitEpoch: exitedEpoch + 2}, {ExitEpoch: exitedEpoch + 2},
{ExitEpoch: exitedEpoch + 2}, {ExitEpoch: exitedEpoch + 2},
@@ -110,7 +109,7 @@ func TestSlashValidator_OK(t *testing.T) {
balances = append(balances, params.BeaconConfig().MaxEffectiveBalance) balances = append(balances, params.BeaconConfig().MaxEffectiveBalance)
} }
base := &statepb.BeaconState{ base := &ethpb.BeaconState{
Validators: registry, Validators: registry,
Slashings: make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector), Slashings: make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector),
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector), RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
@@ -153,11 +152,11 @@ func TestSlashValidator_OK(t *testing.T) {
func TestActivatedValidatorIndices(t *testing.T) { func TestActivatedValidatorIndices(t *testing.T) {
tests := []struct { tests := []struct {
state *statepb.BeaconState state *ethpb.BeaconState
wanted []types.ValidatorIndex wanted []types.ValidatorIndex
}{ }{
{ {
state: &statepb.BeaconState{ state: &ethpb.BeaconState{
Validators: []*ethpb.Validator{ Validators: []*ethpb.Validator{
{ {
ActivationEpoch: 0, ActivationEpoch: 0,
@@ -179,7 +178,7 @@ func TestActivatedValidatorIndices(t *testing.T) {
wanted: []types.ValidatorIndex{0, 1, 3}, wanted: []types.ValidatorIndex{0, 1, 3},
}, },
{ {
state: &statepb.BeaconState{ state: &ethpb.BeaconState{
Validators: []*ethpb.Validator{ Validators: []*ethpb.Validator{
{ {
ActivationEpoch: helpers.ActivationExitEpoch(10), ActivationEpoch: helpers.ActivationExitEpoch(10),
@@ -189,7 +188,7 @@ func TestActivatedValidatorIndices(t *testing.T) {
wanted: []types.ValidatorIndex{}, wanted: []types.ValidatorIndex{},
}, },
{ {
state: &statepb.BeaconState{ state: &ethpb.BeaconState{
Validators: []*ethpb.Validator{ Validators: []*ethpb.Validator{
{ {
ActivationEpoch: 0, ActivationEpoch: 0,
@@ -210,11 +209,11 @@ func TestActivatedValidatorIndices(t *testing.T) {
func TestSlashedValidatorIndices(t *testing.T) { func TestSlashedValidatorIndices(t *testing.T) {
tests := []struct { tests := []struct {
state *statepb.BeaconState state *ethpb.BeaconState
wanted []types.ValidatorIndex wanted []types.ValidatorIndex
}{ }{
{ {
state: &statepb.BeaconState{ state: &ethpb.BeaconState{
Validators: []*ethpb.Validator{ Validators: []*ethpb.Validator{
{ {
WithdrawableEpoch: params.BeaconConfig().EpochsPerSlashingsVector, WithdrawableEpoch: params.BeaconConfig().EpochsPerSlashingsVector,
@@ -233,7 +232,7 @@ func TestSlashedValidatorIndices(t *testing.T) {
wanted: []types.ValidatorIndex{0, 2}, wanted: []types.ValidatorIndex{0, 2},
}, },
{ {
state: &statepb.BeaconState{ state: &ethpb.BeaconState{
Validators: []*ethpb.Validator{ Validators: []*ethpb.Validator{
{ {
WithdrawableEpoch: params.BeaconConfig().EpochsPerSlashingsVector, WithdrawableEpoch: params.BeaconConfig().EpochsPerSlashingsVector,
@@ -243,7 +242,7 @@ func TestSlashedValidatorIndices(t *testing.T) {
wanted: []types.ValidatorIndex{}, wanted: []types.ValidatorIndex{},
}, },
{ {
state: &statepb.BeaconState{ state: &ethpb.BeaconState{
Validators: []*ethpb.Validator{ Validators: []*ethpb.Validator{
{ {
WithdrawableEpoch: params.BeaconConfig().EpochsPerSlashingsVector, WithdrawableEpoch: params.BeaconConfig().EpochsPerSlashingsVector,
@@ -264,11 +263,11 @@ func TestSlashedValidatorIndices(t *testing.T) {
func TestExitedValidatorIndices(t *testing.T) { func TestExitedValidatorIndices(t *testing.T) {
tests := []struct { tests := []struct {
state *statepb.BeaconState state *ethpb.BeaconState
wanted []types.ValidatorIndex wanted []types.ValidatorIndex
}{ }{
{ {
state: &statepb.BeaconState{ state: &ethpb.BeaconState{
Validators: []*ethpb.Validator{ Validators: []*ethpb.Validator{
{ {
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance, EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
@@ -290,7 +289,7 @@ func TestExitedValidatorIndices(t *testing.T) {
wanted: []types.ValidatorIndex{0, 2}, wanted: []types.ValidatorIndex{0, 2},
}, },
{ {
state: &statepb.BeaconState{ state: &ethpb.BeaconState{
Validators: []*ethpb.Validator{ Validators: []*ethpb.Validator{
{ {
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance, EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
@@ -302,7 +301,7 @@ func TestExitedValidatorIndices(t *testing.T) {
wanted: []types.ValidatorIndex{}, wanted: []types.ValidatorIndex{},
}, },
{ {
state: &statepb.BeaconState{ state: &ethpb.BeaconState{
Validators: []*ethpb.Validator{ Validators: []*ethpb.Validator{
{ {
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance, EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,

View File

@@ -3,6 +3,7 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library( go_library(
name = "go_default_library", name = "go_default_library",
srcs = [ srcs = [
"altair.go",
"archived_point.go", "archived_point.go",
"backup.go", "backup.go",
"blocks.go", "blocks.go",
@@ -40,6 +41,7 @@ go_library(
"//beacon-chain/state:go_default_library", "//beacon-chain/state:go_default_library",
"//beacon-chain/state/genesis:go_default_library", "//beacon-chain/state/genesis:go_default_library",
"//beacon-chain/state/v1:go_default_library", "//beacon-chain/state/v1:go_default_library",
"//beacon-chain/state/v2:go_default_library",
"//proto/prysm/v1alpha1:go_default_library", "//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/block:go_default_library", "//proto/prysm/v1alpha1/block:go_default_library",
"//proto/prysm/v1alpha1/wrapper:go_default_library", "//proto/prysm/v1alpha1/wrapper:go_default_library",
@@ -50,6 +52,7 @@ go_library(
"//shared/progressutil:go_default_library", "//shared/progressutil:go_default_library",
"//shared/sliceutil:go_default_library", "//shared/sliceutil:go_default_library",
"//shared/traceutil:go_default_library", "//shared/traceutil:go_default_library",
"//shared/version:go_default_library",
"@com_github_dgraph_io_ristretto//:go_default_library", "@com_github_dgraph_io_ristretto//:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library", "@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_ferranbt_fastssz//:go_default_library", "@com_github_ferranbt_fastssz//:go_default_library",
@@ -71,6 +74,7 @@ go_test(
srcs = [ srcs = [
"archived_point_test.go", "archived_point_test.go",
"backup_test.go", "backup_test.go",
"block_altair_test.go",
"blocks_test.go", "blocks_test.go",
"checkpoint_test.go", "checkpoint_test.go",
"deposit_contract_test.go", "deposit_contract_test.go",

View File

@@ -0,0 +1,11 @@
package kv
import "bytes"
// In order for an encoding to be Altair compatible, it must be prefixed with altair key.
func hasAltairKey(enc []byte) bool {
if len(altairKey) >= len(enc) {
return false
}
return bytes.Equal(enc[:len(altairKey)], altairKey)
}

View File

@@ -0,0 +1,534 @@
package kv
import (
"context"
"sort"
"testing"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/db/filters"
v2 "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
"google.golang.org/protobuf/proto"
)
func TestStore_SaveAltairBlock_NoDuplicates(t *testing.T) {
BlockCacheSize = 1
db := setupDB(t)
slot := types.Slot(20)
ctx := context.Background()
// First we save a previous block to ensure the cache max size is reached.
prevBlock := testutil.NewBeaconBlockAltair()
prevBlock.Block.Slot = slot - 1
prevBlock.Block.ParentRoot = bytesutil.PadTo([]byte{1, 2, 3}, 32)
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(prevBlock)
require.NoError(t, err)
require.NoError(t, db.SaveBlock(ctx, wsb))
block := testutil.NewBeaconBlockAltair()
block.Block.Slot = slot
block.Block.ParentRoot = bytesutil.PadTo([]byte{1, 2, 3}, 32)
// Even with a full cache, saving new blocks should not cause
// duplicated blocks in the DB.
for i := 0; i < 100; i++ {
wsb, err = wrapper.WrappedAltairSignedBeaconBlock(block)
require.NoError(t, err)
require.NoError(t, db.SaveBlock(ctx, wsb))
}
f := filters.NewFilter().SetStartSlot(slot).SetEndSlot(slot)
retrieved, _, err := db.Blocks(ctx, f)
require.NoError(t, err)
assert.Equal(t, 1, len(retrieved))
// We reset the block cache size.
BlockCacheSize = 256
}
func TestStore_AltairBlocksCRUD(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
block := testutil.NewBeaconBlockAltair()
block.Block.Slot = 20
block.Block.ParentRoot = bytesutil.PadTo([]byte{1, 2, 3}, 32)
blockRoot, err := block.Block.HashTreeRoot()
require.NoError(t, err)
retrievedBlock, err := db.Block(ctx, blockRoot)
require.NoError(t, err)
assert.DeepEqual(t, nil, retrievedBlock, "Expected nil block")
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(block)
require.NoError(t, err)
require.NoError(t, db.SaveBlock(ctx, wsb))
assert.Equal(t, true, db.HasBlock(ctx, blockRoot), "Expected block to exist in the db")
retrievedBlock, err = db.Block(ctx, blockRoot)
require.NoError(t, err)
assert.Equal(t, true, proto.Equal(block, retrievedBlock.Proto()), "Wanted: %v, received: %v", block, retrievedBlock)
require.NoError(t, db.deleteBlock(ctx, blockRoot))
assert.Equal(t, false, db.HasBlock(ctx, blockRoot), "Expected block to have been deleted from the db")
}
func TestStore_AltairBlocksBatchDelete(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
numBlocks := 10
totalBlocks := make([]block.SignedBeaconBlock, numBlocks)
blockRoots := make([][32]byte, 0)
oddBlocks := make([]block.SignedBeaconBlock, 0)
for i := 0; i < len(totalBlocks); i++ {
b := testutil.NewBeaconBlockAltair()
b.Block.Slot = types.Slot(i)
b.Block.ParentRoot = bytesutil.PadTo([]byte("parent"), 32)
wb, err := wrapper.WrappedAltairSignedBeaconBlock(b)
require.NoError(t, err)
totalBlocks[i] = wb
if i%2 == 0 {
r, err := totalBlocks[i].Block().HashTreeRoot()
require.NoError(t, err)
blockRoots = append(blockRoots, r)
} else {
oddBlocks = append(oddBlocks, totalBlocks[i])
}
}
require.NoError(t, db.SaveBlocks(ctx, totalBlocks))
retrieved, _, err := db.Blocks(ctx, filters.NewFilter().SetParentRoot(bytesutil.PadTo([]byte("parent"), 32)))
require.NoError(t, err)
assert.Equal(t, numBlocks, len(retrieved), "Unexpected number of blocks received")
// We delete all even indexed blocks.
require.NoError(t, db.deleteBlocks(ctx, blockRoots))
// When we retrieve the data, only the odd indexed blocks should remain.
retrieved, _, err = db.Blocks(ctx, filters.NewFilter().SetParentRoot(bytesutil.PadTo([]byte("parent"), 32)))
require.NoError(t, err)
sort.Slice(retrieved, func(i, j int) bool {
return retrieved[i].Block().Slot() < retrieved[j].Block().Slot()
})
for i, block := range retrieved {
assert.Equal(t, true, proto.Equal(block.Proto(), oddBlocks[i].Proto()), "Wanted: %v, received: %v", block, oddBlocks[i])
}
}
func TestStore_AltairBlocksHandleZeroCase(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
numBlocks := 10
totalBlocks := make([]block.SignedBeaconBlock, numBlocks)
for i := 0; i < len(totalBlocks); i++ {
b := testutil.NewBeaconBlockAltair()
b.Block.Slot = types.Slot(i)
b.Block.ParentRoot = bytesutil.PadTo([]byte("parent"), 32)
wb, err := wrapper.WrappedAltairSignedBeaconBlock(b)
require.NoError(t, err)
totalBlocks[i] = wb
_, err = totalBlocks[i].Block().HashTreeRoot()
require.NoError(t, err)
}
require.NoError(t, db.SaveBlocks(ctx, totalBlocks))
zeroFilter := filters.NewFilter().SetStartSlot(0).SetEndSlot(0)
retrieved, _, err := db.Blocks(ctx, zeroFilter)
require.NoError(t, err)
assert.Equal(t, 1, len(retrieved), "Unexpected number of blocks received, expected one")
}
func TestStore_AltairBlocksHandleInvalidEndSlot(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
numBlocks := 10
totalBlocks := make([]block.SignedBeaconBlock, numBlocks)
// Save blocks from slot 1 onwards.
for i := 0; i < len(totalBlocks); i++ {
b := testutil.NewBeaconBlockAltair()
b.Block.Slot = types.Slot(i) + 1
b.Block.ParentRoot = bytesutil.PadTo([]byte("parent"), 32)
wb, err := wrapper.WrappedAltairSignedBeaconBlock(b)
require.NoError(t, err)
totalBlocks[i] = wb
_, err = totalBlocks[i].Block().HashTreeRoot()
require.NoError(t, err)
}
require.NoError(t, db.SaveBlocks(ctx, totalBlocks))
badFilter := filters.NewFilter().SetStartSlot(5).SetEndSlot(1)
_, _, err := db.Blocks(ctx, badFilter)
require.ErrorContains(t, errInvalidSlotRange.Error(), err)
goodFilter := filters.NewFilter().SetStartSlot(0).SetEndSlot(1)
requested, _, err := db.Blocks(ctx, goodFilter)
require.NoError(t, err)
assert.Equal(t, 1, len(requested), "Unexpected number of blocks received, only expected two")
}
func TestStore_AltairBlocksCRUD_NoCache(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
block := testutil.NewBeaconBlockAltair()
block.Block.Slot = 20
block.Block.ParentRoot = bytesutil.PadTo([]byte{1, 2, 3}, 32)
blockRoot, err := block.Block.HashTreeRoot()
require.NoError(t, err)
retrievedBlock, err := db.Block(ctx, blockRoot)
require.NoError(t, err)
require.DeepEqual(t, nil, retrievedBlock, "Expected nil block")
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(block)
require.NoError(t, err)
require.NoError(t, db.SaveBlock(ctx, wsb))
db.blockCache.Del(string(blockRoot[:]))
assert.Equal(t, true, db.HasBlock(ctx, blockRoot), "Expected block to exist in the db")
retrievedBlock, err = db.Block(ctx, blockRoot)
require.NoError(t, err)
assert.Equal(t, true, proto.Equal(block, retrievedBlock.Proto()), "Wanted: %v, received: %v", block, retrievedBlock)
require.NoError(t, db.deleteBlock(ctx, blockRoot))
assert.Equal(t, false, db.HasBlock(ctx, blockRoot), "Expected block to have been deleted from the db")
}
func TestStore_AltairBlocks_FiltersCorrectly(t *testing.T) {
db := setupDB(t)
b4 := testutil.NewBeaconBlockAltair()
b4.Block.Slot = 4
b4.Block.ParentRoot = bytesutil.PadTo([]byte("parent"), 32)
b5 := testutil.NewBeaconBlockAltair()
b5.Block.Slot = 5
b5.Block.ParentRoot = bytesutil.PadTo([]byte("parent2"), 32)
b6 := testutil.NewBeaconBlockAltair()
b6.Block.Slot = 6
b6.Block.ParentRoot = bytesutil.PadTo([]byte("parent2"), 32)
b7 := testutil.NewBeaconBlockAltair()
b7.Block.Slot = 7
b7.Block.ParentRoot = bytesutil.PadTo([]byte("parent3"), 32)
b8 := testutil.NewBeaconBlockAltair()
b8.Block.Slot = 8
b8.Block.ParentRoot = bytesutil.PadTo([]byte("parent4"), 32)
blocks := make([]block.SignedBeaconBlock, 0)
for _, b := range []*v2.SignedBeaconBlockAltair{b4, b5, b6, b7, b8} {
blk, err := wrapper.WrappedAltairSignedBeaconBlock(b)
require.NoError(t, err)
blocks = append(blocks, blk)
}
ctx := context.Background()
require.NoError(t, db.SaveBlocks(ctx, blocks))
tests := []struct {
filter *filters.QueryFilter
expectedNumBlocks int
}{
{
filter: filters.NewFilter().SetParentRoot(bytesutil.PadTo([]byte("parent2"), 32)),
expectedNumBlocks: 2,
},
{
// No block meets the criteria below.
filter: filters.NewFilter().SetParentRoot(bytesutil.PadTo([]byte{3, 4, 5}, 32)),
expectedNumBlocks: 0,
},
{
// Block slot range filter criteria.
filter: filters.NewFilter().SetStartSlot(5).SetEndSlot(7),
expectedNumBlocks: 3,
},
{
filter: filters.NewFilter().SetStartSlot(7).SetEndSlot(7),
expectedNumBlocks: 1,
},
{
filter: filters.NewFilter().SetStartSlot(4).SetEndSlot(8),
expectedNumBlocks: 5,
},
{
filter: filters.NewFilter().SetStartSlot(4).SetEndSlot(5),
expectedNumBlocks: 2,
},
{
filter: filters.NewFilter().SetStartSlot(5).SetEndSlot(9),
expectedNumBlocks: 4,
},
{
filter: filters.NewFilter().SetEndSlot(7),
expectedNumBlocks: 4,
},
{
filter: filters.NewFilter().SetEndSlot(8),
expectedNumBlocks: 5,
},
{
filter: filters.NewFilter().SetStartSlot(5).SetEndSlot(10),
expectedNumBlocks: 4,
},
{
// Composite filter criteria.
filter: filters.NewFilter().
SetParentRoot(bytesutil.PadTo([]byte("parent2"), 32)).
SetStartSlot(6).
SetEndSlot(8),
expectedNumBlocks: 1,
},
}
for _, tt := range tests {
retrievedBlocks, _, err := db.Blocks(ctx, tt.filter)
require.NoError(t, err)
assert.Equal(t, tt.expectedNumBlocks, len(retrievedBlocks), "Unexpected number of blocks")
}
}
func TestStore_AltairBlocks_VerifyBlockRoots(t *testing.T) {
ctx := context.Background()
db := setupDB(t)
b1 := testutil.NewBeaconBlockAltair()
b1.Block.Slot = 1
r1, err := b1.Block.HashTreeRoot()
require.NoError(t, err)
b2 := testutil.NewBeaconBlockAltair()
b2.Block.Slot = 2
r2, err := b2.Block.HashTreeRoot()
require.NoError(t, err)
for _, b := range []*v2.SignedBeaconBlockAltair{b1, b2} {
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, db.SaveBlock(ctx, wsb))
}
filter := filters.NewFilter().SetStartSlot(b1.Block.Slot).SetEndSlot(b2.Block.Slot)
roots, err := db.BlockRoots(ctx, filter)
require.NoError(t, err)
assert.DeepEqual(t, [][32]byte{r1, r2}, roots)
}
func TestStore_AltairBlocks_Retrieve_SlotRange(t *testing.T) {
db := setupDB(t)
totalBlocks := make([]block.SignedBeaconBlock, 500)
for i := 0; i < 500; i++ {
b := testutil.NewBeaconBlockAltair()
b.Block.Slot = types.Slot(i)
b.Block.ParentRoot = bytesutil.PadTo([]byte("parent"), 32)
wb, err := wrapper.WrappedAltairSignedBeaconBlock(b)
require.NoError(t, err)
totalBlocks[i] = wb
}
ctx := context.Background()
require.NoError(t, db.SaveBlocks(ctx, totalBlocks))
retrieved, _, err := db.Blocks(ctx, filters.NewFilter().SetStartSlot(100).SetEndSlot(399))
require.NoError(t, err)
assert.Equal(t, 300, len(retrieved))
}
func TestStore_AltairBlocks_Retrieve_Epoch(t *testing.T) {
db := setupDB(t)
slots := params.BeaconConfig().SlotsPerEpoch.Mul(7)
totalBlocks := make([]block.SignedBeaconBlock, slots)
for i := types.Slot(0); i < slots; i++ {
b := testutil.NewBeaconBlockAltair()
b.Block.Slot = i
b.Block.ParentRoot = bytesutil.PadTo([]byte("parent"), 32)
wb, err := wrapper.WrappedAltairSignedBeaconBlock(b)
require.NoError(t, err)
totalBlocks[i] = wb
}
ctx := context.Background()
require.NoError(t, db.SaveBlocks(ctx, totalBlocks))
retrieved, _, err := db.Blocks(ctx, filters.NewFilter().SetStartEpoch(5).SetEndEpoch(6))
require.NoError(t, err)
want := params.BeaconConfig().SlotsPerEpoch.Mul(2)
assert.Equal(t, uint64(want), uint64(len(retrieved)))
retrieved, _, err = db.Blocks(ctx, filters.NewFilter().SetStartEpoch(0).SetEndEpoch(0))
require.NoError(t, err)
want = params.BeaconConfig().SlotsPerEpoch
assert.Equal(t, uint64(want), uint64(len(retrieved)))
}
func TestStore_AltairBlocks_Retrieve_SlotRangeWithStep(t *testing.T) {
db := setupDB(t)
totalBlocks := make([]block.SignedBeaconBlock, 500)
for i := 0; i < 500; i++ {
b := testutil.NewBeaconBlockAltair()
b.Block.Slot = types.Slot(i)
b.Block.ParentRoot = bytesutil.PadTo([]byte("parent"), 32)
wb, err := wrapper.WrappedAltairSignedBeaconBlock(b)
require.NoError(t, err)
totalBlocks[i] = wb
}
const step = 2
ctx := context.Background()
require.NoError(t, db.SaveBlocks(ctx, totalBlocks))
retrieved, _, err := db.Blocks(ctx, filters.NewFilter().SetStartSlot(100).SetEndSlot(399).SetSlotStep(step))
require.NoError(t, err)
assert.Equal(t, 150, len(retrieved))
for _, b := range retrieved {
assert.Equal(t, types.Slot(0), (b.Block().Slot()-100)%step, "Unexpect block slot %d", b.Block().Slot())
}
}
func TestStore_SaveAltairBlock_CanGetHighestAt(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
block1 := testutil.NewBeaconBlockAltair()
block1.Block.Slot = 1
block2 := testutil.NewBeaconBlockAltair()
block2.Block.Slot = 10
block3 := testutil.NewBeaconBlockAltair()
block3.Block.Slot = 100
for _, b := range []*v2.SignedBeaconBlockAltair{block1, block2, block3} {
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, db.SaveBlock(ctx, wsb))
}
highestAt, err := db.HighestSlotBlocksBelow(ctx, 2)
require.NoError(t, err)
assert.Equal(t, false, len(highestAt) <= 0, "Got empty highest at slice")
assert.Equal(t, true, proto.Equal(block1, highestAt[0].Proto()), "Wanted: %v, received: %v", block1, highestAt[0])
highestAt, err = db.HighestSlotBlocksBelow(ctx, 11)
require.NoError(t, err)
assert.Equal(t, false, len(highestAt) <= 0, "Got empty highest at slice")
assert.Equal(t, true, proto.Equal(block2, highestAt[0].Proto()), "Wanted: %v, received: %v", block2, highestAt[0])
highestAt, err = db.HighestSlotBlocksBelow(ctx, 101)
require.NoError(t, err)
assert.Equal(t, false, len(highestAt) <= 0, "Got empty highest at slice")
assert.Equal(t, true, proto.Equal(block3, highestAt[0].Proto()), "Wanted: %v, received: %v", block3, highestAt[0])
r3, err := block3.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, db.deleteBlock(ctx, r3))
highestAt, err = db.HighestSlotBlocksBelow(ctx, 101)
require.NoError(t, err)
assert.Equal(t, true, proto.Equal(block2, highestAt[0].Proto()), "Wanted: %v, received: %v", block2, highestAt[0])
}
func TestStore_GenesisAltairBlock_CanGetHighestAt(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
genesisBlock := testutil.NewBeaconBlockAltair()
genesisRoot, err := genesisBlock.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesisRoot))
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(genesisBlock)
require.NoError(t, err)
require.NoError(t, db.SaveBlock(ctx, wsb))
block1 := testutil.NewBeaconBlockAltair()
block1.Block.Slot = 1
wsb, err = wrapper.WrappedAltairSignedBeaconBlock(block1)
require.NoError(t, err)
require.NoError(t, db.SaveBlock(ctx, wsb))
highestAt, err := db.HighestSlotBlocksBelow(ctx, 2)
require.NoError(t, err)
assert.Equal(t, true, proto.Equal(block1, highestAt[0].Proto()), "Wanted: %v, received: %v", block1, highestAt[0])
highestAt, err = db.HighestSlotBlocksBelow(ctx, 1)
require.NoError(t, err)
assert.Equal(t, true, proto.Equal(genesisBlock, highestAt[0].Proto()), "Wanted: %v, received: %v", genesisBlock, highestAt[0])
highestAt, err = db.HighestSlotBlocksBelow(ctx, 0)
require.NoError(t, err)
assert.Equal(t, true, proto.Equal(genesisBlock, highestAt[0].Proto()), "Wanted: %v, received: %v", genesisBlock, highestAt[0])
}
func TestStore_SaveAltairBlocks_HasCachedBlocks(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
var err error
b := make([]block.SignedBeaconBlock, 500)
for i := 0; i < 500; i++ {
blk := testutil.NewBeaconBlockAltair()
blk.Block.ParentRoot = bytesutil.PadTo([]byte("parent"), 32)
blk.Block.Slot = types.Slot(i)
b[i], err = wrapper.WrappedAltairSignedBeaconBlock(blk)
require.NoError(t, err)
}
require.NoError(t, db.SaveBlock(ctx, b[0]))
require.NoError(t, db.SaveBlocks(ctx, b))
f := filters.NewFilter().SetStartSlot(0).SetEndSlot(500)
blks, _, err := db.Blocks(ctx, f)
require.NoError(t, err)
assert.Equal(t, 500, len(blks), "Did not get wanted blocks")
}
func TestStore_SaveAltairBlocks_HasRootsMatched(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
var err error
b := make([]block.SignedBeaconBlock, 500)
for i := 0; i < 500; i++ {
blk := testutil.NewBeaconBlockAltair()
blk.Block.ParentRoot = bytesutil.PadTo([]byte("parent"), 32)
blk.Block.Slot = types.Slot(i)
b[i], err = wrapper.WrappedAltairSignedBeaconBlock(blk)
require.NoError(t, err)
}
require.NoError(t, db.SaveBlocks(ctx, b))
f := filters.NewFilter().SetStartSlot(0).SetEndSlot(500)
blks, roots, err := db.Blocks(ctx, f)
require.NoError(t, err)
assert.Equal(t, 500, len(blks), "Did not get wanted blocks")
for i, blk := range blks {
rt, err := blk.Block().HashTreeRoot()
require.NoError(t, err)
assert.Equal(t, roots[i], rt, "mismatch of block roots")
}
}
func TestStore_AltairBlocksBySlot_BlockRootsBySlot(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
b1 := testutil.NewBeaconBlockAltair()
b1.Block.Slot = 20
b2 := testutil.NewBeaconBlockAltair()
b2.Block.Slot = 100
b2.Block.ParentRoot = bytesutil.PadTo([]byte("parent1"), 32)
b3 := testutil.NewBeaconBlockAltair()
b3.Block.Slot = 100
b3.Block.ParentRoot = bytesutil.PadTo([]byte("parent2"), 32)
for _, b := range []*v2.SignedBeaconBlockAltair{b1, b2, b3} {
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, db.SaveBlock(ctx, wsb))
}
r1, err := b1.Block.HashTreeRoot()
require.NoError(t, err)
r2, err := b2.Block.HashTreeRoot()
require.NoError(t, err)
r3, err := b3.Block.HashTreeRoot()
require.NoError(t, err)
hasBlocks, retrievedBlocks, err := db.BlocksBySlot(ctx, 1)
require.NoError(t, err)
assert.Equal(t, 0, len(retrievedBlocks), "Unexpected number of blocks received, expected none")
assert.Equal(t, false, hasBlocks, "Expected no blocks")
hasBlocks, retrievedBlocks, err = db.BlocksBySlot(ctx, 20)
require.NoError(t, err)
assert.Equal(t, true, proto.Equal(b1, retrievedBlocks[0].Proto()), "Wanted: %v, received: %v", b1, retrievedBlocks[0])
assert.Equal(t, true, hasBlocks, "Expected to have blocks")
hasBlocks, retrievedBlocks, err = db.BlocksBySlot(ctx, 100)
require.NoError(t, err)
assert.Equal(t, true, proto.Equal(b2, retrievedBlocks[0].Proto()), "Wanted: %v, received: %v", b2, retrievedBlocks[0])
assert.Equal(t, true, proto.Equal(b3, retrievedBlocks[1].Proto()), "Wanted: %v, received: %v", b3, retrievedBlocks[1])
assert.Equal(t, true, hasBlocks, "Expected to have blocks")
hasBlockRoots, retrievedBlockRoots, err := db.BlockRootsBySlot(ctx, 1)
require.NoError(t, err)
assert.DeepEqual(t, [][32]byte{}, retrievedBlockRoots)
assert.Equal(t, false, hasBlockRoots, "Expected no block roots")
hasBlockRoots, retrievedBlockRoots, err = db.BlockRootsBySlot(ctx, 20)
require.NoError(t, err)
assert.DeepEqual(t, [][32]byte{r1}, retrievedBlockRoots)
assert.Equal(t, true, hasBlockRoots, "Expected no block roots")
hasBlockRoots, retrievedBlockRoots, err = db.BlockRootsBySlot(ctx, 100)
require.NoError(t, err)
assert.DeepEqual(t, [][32]byte{r2, r3}, retrievedBlockRoots)
assert.Equal(t, true, hasBlockRoots, "Expected no block roots")
}

View File

@@ -5,6 +5,7 @@ import (
"context" "context"
"fmt" "fmt"
"github.com/golang/snappy"
"github.com/pkg/errors" "github.com/pkg/errors"
types "github.com/prysmaticlabs/eth2-types" types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
@@ -15,6 +16,7 @@ import (
"github.com/prysmaticlabs/prysm/shared/bytesutil" "github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params" "github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/sliceutil" "github.com/prysmaticlabs/prysm/shared/sliceutil"
"github.com/prysmaticlabs/prysm/shared/version"
bolt "go.etcd.io/bbolt" bolt "go.etcd.io/bbolt"
"go.opencensus.io/trace" "go.opencensus.io/trace"
) )
@@ -30,24 +32,25 @@ func (s *Store) Block(ctx context.Context, blockRoot [32]byte) (block.SignedBeac
if v, ok := s.blockCache.Get(string(blockRoot[:])); v != nil && ok { if v, ok := s.blockCache.Get(string(blockRoot[:])); v != nil && ok {
return v.(block.SignedBeaconBlock), nil return v.(block.SignedBeaconBlock), nil
} }
var block *ethpb.SignedBeaconBlock var block block.SignedBeaconBlock
err := s.db.View(func(tx *bolt.Tx) error { err := s.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(blocksBucket) bkt := tx.Bucket(blocksBucket)
enc := bkt.Get(blockRoot[:]) enc := bkt.Get(blockRoot[:])
if enc == nil { if enc == nil {
return nil return nil
} }
block = &ethpb.SignedBeaconBlock{} var err error
return decode(ctx, enc, block) block, err = unmarshalBlock(ctx, enc)
return err
}) })
return wrapper.WrappedPhase0SignedBeaconBlock(block), err return block, err
} }
// HeadBlock returns the latest canonical block in the Ethereum Beacon Chain. // HeadBlock returns the latest canonical block in the Ethereum Beacon Chain.
func (s *Store) HeadBlock(ctx context.Context) (block.SignedBeaconBlock, error) { func (s *Store) HeadBlock(ctx context.Context) (block.SignedBeaconBlock, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.HeadBlock") ctx, span := trace.StartSpan(ctx, "BeaconDB.HeadBlock")
defer span.End() defer span.End()
var headBlock *ethpb.SignedBeaconBlock var headBlock block.SignedBeaconBlock
err := s.db.View(func(tx *bolt.Tx) error { err := s.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(blocksBucket) bkt := tx.Bucket(blocksBucket)
headRoot := bkt.Get(headBlockRootKey) headRoot := bkt.Get(headBlockRootKey)
@@ -58,10 +61,11 @@ func (s *Store) HeadBlock(ctx context.Context) (block.SignedBeaconBlock, error)
if enc == nil { if enc == nil {
return nil return nil
} }
headBlock = &ethpb.SignedBeaconBlock{} var err error
return decode(ctx, enc, headBlock) headBlock, err = unmarshalBlock(ctx, enc)
return err
}) })
return wrapper.WrappedPhase0SignedBeaconBlock(headBlock), err return headBlock, err
} }
// Blocks retrieves a list of beacon blocks and its respective roots by filter criteria. // Blocks retrieves a list of beacon blocks and its respective roots by filter criteria.
@@ -81,11 +85,11 @@ func (s *Store) Blocks(ctx context.Context, f *filters.QueryFilter) ([]block.Sig
for i := 0; i < len(keys); i++ { for i := 0; i < len(keys); i++ {
encoded := bkt.Get(keys[i]) encoded := bkt.Get(keys[i])
block := &ethpb.SignedBeaconBlock{} block, err := unmarshalBlock(ctx, encoded)
if err := decode(ctx, encoded, block); err != nil { if err != nil {
return err return err
} }
blocks = append(blocks, wrapper.WrappedPhase0SignedBeaconBlock(block)) blocks = append(blocks, block)
blockRoots = append(blockRoots, bytesutil.ToBytes32(keys[i])) blockRoots = append(blockRoots, bytesutil.ToBytes32(keys[i]))
} }
return nil return nil
@@ -153,11 +157,11 @@ func (s *Store) BlocksBySlot(ctx context.Context, slot types.Slot) (bool, []bloc
for i := 0; i < len(keys); i++ { for i := 0; i < len(keys); i++ {
encoded := bkt.Get(keys[i]) encoded := bkt.Get(keys[i])
block := &ethpb.SignedBeaconBlock{} block, err := unmarshalBlock(ctx, encoded)
if err := decode(ctx, encoded, block); err != nil { if err != nil {
return err return err
} }
blocks = append(blocks, wrapper.WrappedPhase0SignedBeaconBlock(block)) blocks = append(blocks, block)
} }
return nil return nil
}) })
@@ -196,11 +200,11 @@ func (s *Store) deleteBlock(ctx context.Context, blockRoot [32]byte) error {
if enc == nil { if enc == nil {
return nil return nil
} }
block := &ethpb.SignedBeaconBlock{} block, err := unmarshalBlock(ctx, enc)
if err := decode(ctx, enc, block); err != nil { if err != nil {
return err return err
} }
indicesByBucket := createBlockIndicesFromBlock(ctx, wrapper.WrappedPhase0BeaconBlock(block.Block)) indicesByBucket := createBlockIndicesFromBlock(ctx, block.Block())
if err := deleteValueForIndices(ctx, indicesByBucket, blockRoot[:], tx); err != nil { if err := deleteValueForIndices(ctx, indicesByBucket, blockRoot[:], tx); err != nil {
return errors.Wrap(err, "could not delete root for DB indices") return errors.Wrap(err, "could not delete root for DB indices")
} }
@@ -221,11 +225,11 @@ func (s *Store) deleteBlocks(ctx context.Context, blockRoots [][32]byte) error {
if enc == nil { if enc == nil {
return nil return nil
} }
block := &ethpb.SignedBeaconBlock{} block, err := unmarshalBlock(ctx, enc)
if err := decode(ctx, enc, block); err != nil { if err != nil {
return err return err
} }
indicesByBucket := createBlockIndicesFromBlock(ctx, wrapper.WrappedPhase0BeaconBlock(block.Block)) indicesByBucket := createBlockIndicesFromBlock(ctx, block.Block())
if err := deleteValueForIndices(ctx, indicesByBucket, blockRoot[:], tx); err != nil { if err := deleteValueForIndices(ctx, indicesByBucket, blockRoot[:], tx); err != nil {
return errors.Wrap(err, "could not delete root for DB indices") return errors.Wrap(err, "could not delete root for DB indices")
} }
@@ -269,7 +273,7 @@ func (s *Store) SaveBlocks(ctx context.Context, blocks []block.SignedBeaconBlock
if existingBlock := bkt.Get(blockRoot[:]); existingBlock != nil { if existingBlock := bkt.Get(blockRoot[:]); existingBlock != nil {
continue continue
} }
enc, err := encode(ctx, block.Proto()) enc, err := marshalBlock(ctx, block)
if err != nil { if err != nil {
return err return err
} }
@@ -307,7 +311,7 @@ func (s *Store) SaveHeadBlockRoot(ctx context.Context, blockRoot [32]byte) error
func (s *Store) GenesisBlock(ctx context.Context) (block.SignedBeaconBlock, error) { func (s *Store) GenesisBlock(ctx context.Context) (block.SignedBeaconBlock, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.GenesisBlock") ctx, span := trace.StartSpan(ctx, "BeaconDB.GenesisBlock")
defer span.End() defer span.End()
var block *ethpb.SignedBeaconBlock var block block.SignedBeaconBlock
err := s.db.View(func(tx *bolt.Tx) error { err := s.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(blocksBucket) bkt := tx.Bucket(blocksBucket)
root := bkt.Get(genesisBlockRootKey) root := bkt.Get(genesisBlockRootKey)
@@ -315,10 +319,11 @@ func (s *Store) GenesisBlock(ctx context.Context) (block.SignedBeaconBlock, erro
if enc == nil { if enc == nil {
return nil return nil
} }
block = &ethpb.SignedBeaconBlock{} var err error
return decode(ctx, enc, block) block, err = unmarshalBlock(ctx, enc)
return err
}) })
return wrapper.WrappedPhase0SignedBeaconBlock(block), err return block, err
} }
// SaveGenesisBlockRoot to the db. // SaveGenesisBlockRoot to the db.
@@ -579,3 +584,46 @@ func createBlockIndicesFromFilters(ctx context.Context, f *filters.QueryFilter)
} }
return indicesByBucket, nil return indicesByBucket, nil
} }
// unmarshal block from marshaled proto beacon block bytes to versioned beacon block struct type.
func unmarshalBlock(ctx context.Context, enc []byte) (block.SignedBeaconBlock, error) {
var err error
enc, err = snappy.Decode(nil, enc)
if err != nil {
return nil, err
}
switch {
case hasAltairKey(enc):
// Marshal block bytes to altair beacon block.
rawBlock := &ethpb.SignedBeaconBlockAltair{}
err := rawBlock.UnmarshalSSZ(enc[len(altairKey):])
if err != nil {
return nil, err
}
return wrapper.WrappedAltairSignedBeaconBlock(rawBlock)
default:
// Marshal block bytes to phase 0 beacon block.
rawBlock := &ethpb.SignedBeaconBlock{}
err = rawBlock.UnmarshalSSZ(enc)
if err != nil {
return nil, err
}
return wrapper.WrappedPhase0SignedBeaconBlock(rawBlock), nil
}
}
// marshal versioned beacon block from struct type down to bytes.
func marshalBlock(ctx context.Context, blk block.SignedBeaconBlock) ([]byte, error) {
obj, err := blk.MarshalSSZ()
if err != nil {
return nil, err
}
switch blk.Version() {
case version.Altair:
return snappy.Encode(nil, append(altairKey, obj...)), nil
case version.Phase0:
return snappy.Encode(nil, obj), nil
default:
return nil, errors.New("Unknown block version")
}
}

View File

@@ -7,7 +7,6 @@ import (
types "github.com/prysmaticlabs/eth2-types" types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/db/filters" "github.com/prysmaticlabs/prysm/beacon-chain/db/filters"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block" "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper" "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/shared/bytesutil" "github.com/prysmaticlabs/prysm/shared/bytesutil"
@@ -57,7 +56,7 @@ func TestStore_BlocksCRUD(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
retrievedBlock, err := db.Block(ctx, blockRoot) retrievedBlock, err := db.Block(ctx, blockRoot)
require.NoError(t, err) require.NoError(t, err)
assert.DeepEqual(t, (*ethpb.SignedBeaconBlock)(nil), retrievedBlock.Proto(), "Expected nil block") assert.DeepEqual(t, nil, retrievedBlock, "Expected nil block")
require.NoError(t, db.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(block))) require.NoError(t, db.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(block)))
assert.Equal(t, true, db.HasBlock(ctx, blockRoot), "Expected block to exist in the db") assert.Equal(t, true, db.HasBlock(ctx, blockRoot), "Expected block to exist in the db")
retrievedBlock, err = db.Block(ctx, blockRoot) retrievedBlock, err = db.Block(ctx, blockRoot)
@@ -173,7 +172,7 @@ func TestStore_BlocksCRUD_NoCache(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
retrievedBlock, err := db.Block(ctx, blockRoot) retrievedBlock, err := db.Block(ctx, blockRoot)
require.NoError(t, err) require.NoError(t, err)
require.DeepEqual(t, (*ethpb.SignedBeaconBlock)(nil), retrievedBlock.Proto(), "Expected nil block") require.DeepEqual(t, nil, retrievedBlock, "Expected nil block")
require.NoError(t, db.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(block))) require.NoError(t, db.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(block)))
db.blockCache.Del(string(blockRoot[:])) db.blockCache.Del(string(blockRoot[:]))
assert.Equal(t, true, db.HasBlock(ctx, blockRoot), "Expected block to exist in the db") assert.Equal(t, true, db.HasBlock(ctx, blockRoot), "Expected block to exist in the db")

View File

@@ -43,6 +43,9 @@ var (
justifiedCheckpointKey = []byte("justified-checkpoint") justifiedCheckpointKey = []byte("justified-checkpoint")
finalizedCheckpointKey = []byte("finalized-checkpoint") finalizedCheckpointKey = []byte("finalized-checkpoint")
powchainDataKey = []byte("powchain-data") powchainDataKey = []byte("powchain-data")
// Altair key used to identify object is altair compatible.
// Objects that are only compatible with altair should be prefixed with such key.
altairKey = []byte("altair")
// Deprecated: This index key was migrated in PR 6461. Do not use, except for migrations. // Deprecated: This index key was migrated in PR 6461. Do not use, except for migrations.
lastArchivedIndexKey = []byte("last-archived") lastArchivedIndexKey = []byte("last-archived")

View File

@@ -11,8 +11,8 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/state" "github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/beacon-chain/state/genesis" "github.com/prysmaticlabs/prysm/beacon-chain/state/genesis"
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1" v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
statepb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" v2 "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
v1alpha "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper" "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/shared/bytesutil" "github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/featureconfig" "github.com/prysmaticlabs/prysm/shared/featureconfig"
@@ -27,7 +27,6 @@ import (
func (s *Store) State(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error) { func (s *Store) State(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.State") ctx, span := trace.StartSpan(ctx, "BeaconDB.State")
defer span.End() defer span.End()
var st *statepb.BeaconState
enc, err := s.stateBytes(ctx, blockRoot) enc, err := s.stateBytes(ctx, blockRoot)
if err != nil { if err != nil {
return nil, err return nil, err
@@ -36,18 +35,13 @@ func (s *Store) State(ctx context.Context, blockRoot [32]byte) (state.BeaconStat
if len(enc) == 0 { if len(enc) == 0 {
return nil, nil return nil, nil
} }
// get the validator entries of the state // get the validator entries of the state
valEntries, valErr := s.validatorEntries(ctx, blockRoot) valEntries, valErr := s.validatorEntries(ctx, blockRoot)
if valErr != nil { if valErr != nil {
return nil, valErr return nil, valErr
} }
st, err = s.createState(ctx, enc, valEntries) return s.unmarshalState(ctx, enc, valEntries)
if err != nil {
return nil, err
}
return v1.InitializeFromProtoUnsafe(st)
} }
// GenesisState returns the genesis state in beacon chain. // GenesisState returns the genesis state in beacon chain.
@@ -65,8 +59,8 @@ func (s *Store) GenesisState(ctx context.Context) (state.BeaconState, error) {
return cached, nil return cached, nil
} }
var st *statepb.BeaconState var st state.BeaconState
if err = s.db.View(func(tx *bolt.Tx) error { err = s.db.View(func(tx *bolt.Tx) error {
// Retrieve genesis block's signing root from blocks bucket, // Retrieve genesis block's signing root from blocks bucket,
// to look up what the genesis state is. // to look up what the genesis state is.
bucket := tx.Bucket(blocksBucket) bucket := tx.Bucket(blocksBucket)
@@ -77,7 +71,6 @@ func (s *Store) GenesisState(ctx context.Context) (state.BeaconState, error) {
if enc == nil { if enc == nil {
return nil return nil
} }
// get the validator entries of the genesis state // get the validator entries of the genesis state
valEntries, valErr := s.validatorEntries(ctx, bytesutil.ToBytes32(genesisBlockRoot)) valEntries, valErr := s.validatorEntries(ctx, bytesutil.ToBytes32(genesisBlockRoot))
if valErr != nil { if valErr != nil {
@@ -85,15 +78,16 @@ func (s *Store) GenesisState(ctx context.Context) (state.BeaconState, error) {
} }
var crtErr error var crtErr error
st, crtErr = s.createState(ctx, enc, valEntries) st, err = s.unmarshalState(ctx, enc, valEntries)
return crtErr return crtErr
}); err != nil { })
if err != nil {
return nil, err return nil, err
} }
if st == nil { if st == nil || st.IsNil() {
return nil, nil return nil, nil
} }
return v1.InitializeFromProtoUnsafe(st) return st, nil
} }
// SaveState stores a state to the db using block's signing root which was used to generate the state. // SaveState stores a state to the db using block's signing root which was used to generate the state.
@@ -119,14 +113,11 @@ func (s *Store) SaveStates(ctx context.Context, states []state.ReadOnlyBeaconSta
} }
multipleEncs := make([][]byte, len(states)) multipleEncs := make([][]byte, len(states))
for i, st := range states { for i, st := range states {
pbState, err := v1.ProtobufBeaconState(st.InnerStateUnsafe()) stateBytes, err := marshalState(ctx, st)
if err != nil {
return err
}
multipleEncs[i], err = encode(ctx, pbState)
if err != nil { if err != nil {
return err return err
} }
multipleEncs[i] = stateBytes
} }
return s.db.Update(func(tx *bolt.Tx) error { return s.db.Update(func(tx *bolt.Tx) error {
@@ -151,8 +142,8 @@ func (s *Store) SaveStatesEfficient(ctx context.Context, states []state.ReadOnly
if states == nil { if states == nil {
return errors.New("nil state") return errors.New("nil state")
} }
validatorsEntries := make(map[string]*v1alpha.Validator) // It's a map to make sure that you store only new validator entries. validatorsEntries := make(map[string]*ethpb.Validator) // It's a map to make sure that you store only new validator entries.
validatorKeys := make([][]byte, len(states)) // For every state, this stores a compressed list of validator keys. validatorKeys := make([][]byte, len(states)) // For every state, this stores a compressed list of validator keys.
for i, st := range states { for i, st := range states {
pbState, err := v1.ProtobufBeaconState(st.InnerStateUnsafe()) pbState, err := v1.ProtobufBeaconState(st.InnerStateUnsafe())
if err != nil { if err != nil {
@@ -195,7 +186,7 @@ func (s *Store) SaveStatesEfficient(ctx context.Context, states []state.ReadOnly
return err return err
} }
valEntries := pbState.Validators valEntries := pbState.Validators
pbState.Validators = make([]*v1alpha.Validator, 0) pbState.Validators = make([]*ethpb.Validator, 0)
encodedState, err := encode(ctx, pbState) encodedState, err := encode(ctx, pbState)
if err != nil { if err != nil {
return err return err
@@ -270,9 +261,9 @@ func (s *Store) DeleteState(ctx context.Context, blockRoot [32]byte) error {
bkt = tx.Bucket(checkpointBucket) bkt = tx.Bucket(checkpointBucket)
enc := bkt.Get(finalizedCheckpointKey) enc := bkt.Get(finalizedCheckpointKey)
checkpoint := &v1alpha.Checkpoint{} checkpoint := &ethpb.Checkpoint{}
if enc == nil { if enc == nil {
checkpoint = &v1alpha.Checkpoint{Root: genesisBlockRoot} checkpoint = &ethpb.Checkpoint{Root: genesisBlockRoot}
} else if err := decode(ctx, enc, checkpoint); err != nil { } else if err := decode(ctx, enc, checkpoint); err != nil {
return err return err
} }
@@ -342,36 +333,86 @@ func (s *Store) DeleteStates(ctx context.Context, blockRoots [][32]byte) error {
return nil return nil
} }
// creates state from marshaled proto state bytes. Also add the validator entries retrieved // unmarshal state from marshaled proto state bytes to versioned state struct type.
// from the validator bucket and complete the state construction. func (s *Store) unmarshalState(ctx context.Context, enc []byte, validatorEntries []*ethpb.Validator) (state.BeaconState, error) {
func (s *Store) createState(ctx context.Context, enc []byte, validatorEntries []*v1alpha.Validator) (*statepb.BeaconState, error) { var err error
protoState := &statepb.BeaconState{} enc, err = snappy.Decode(nil, enc)
if err := decode(ctx, enc, protoState); err != nil {
return nil, errors.Wrap(err, "failed to unmarshal encoding")
}
ok, err := s.isStateValidatorMigrationOver()
if err != nil { if err != nil {
return protoState, err return nil, err
} }
if ok {
protoState.Validators = validatorEntries switch {
case hasAltairKey(enc):
// Marshal state bytes to altair beacon state.
protoState := &ethpb.BeaconStateAltair{}
if err := protoState.UnmarshalSSZ(enc[len(altairKey):]); err != nil {
return nil, errors.Wrap(err, "failed to unmarshal encoding for altair")
}
ok, err := s.isStateValidatorMigrationOver()
if err != nil {
return nil, err
}
if ok {
protoState.Validators = validatorEntries
}
return v2.InitializeFromProtoUnsafe(protoState)
default:
// Marshal state bytes to phase 0 beacon state.
protoState := &ethpb.BeaconState{}
if err := protoState.UnmarshalSSZ(enc); err != nil {
return nil, errors.Wrap(err, "failed to unmarshal encoding")
}
ok, err := s.isStateValidatorMigrationOver()
if err != nil {
return nil, err
}
if ok {
protoState.Validators = validatorEntries
}
return v1.InitializeFromProtoUnsafe(protoState)
}
}
// marshal versioned state from struct type down to bytes.
func marshalState(ctx context.Context, st state.ReadOnlyBeaconState) ([]byte, error) {
switch st.InnerStateUnsafe().(type) {
case *ethpb.BeaconState:
rState, ok := st.InnerStateUnsafe().(*ethpb.BeaconState)
if !ok {
return nil, errors.New("non valid inner state")
}
return encode(ctx, rState)
case *ethpb.BeaconStateAltair:
rState, ok := st.InnerStateUnsafe().(*ethpb.BeaconStateAltair)
if !ok {
return nil, errors.New("non valid inner state")
}
if rState == nil {
return nil, errors.New("nil state")
}
rawObj, err := rState.MarshalSSZ()
if err != nil {
return nil, err
}
return snappy.Encode(nil, append(altairKey, rawObj...)), nil
default:
return nil, errors.New("invalid inner state")
} }
return protoState, nil
} }
// Retrieve the validator entries for a given block root. These entries are stored in a // Retrieve the validator entries for a given block root. These entries are stored in a
// separate bucket to reduce state size. // separate bucket to reduce state size.
func (s *Store) validatorEntries(ctx context.Context, blockRoot [32]byte) ([]*v1alpha.Validator, error) { func (s *Store) validatorEntries(ctx context.Context, blockRoot [32]byte) ([]*ethpb.Validator, error) {
ok, err := s.isStateValidatorMigrationOver() ok, err := s.isStateValidatorMigrationOver()
if err != nil { if err != nil {
return nil, err return nil, err
} }
if !ok { if !ok {
return make([]*v1alpha.Validator, 0), nil return make([]*ethpb.Validator, 0), nil
} }
ctx, span := trace.StartSpan(ctx, "BeaconDB.validatorEntries") ctx, span := trace.StartSpan(ctx, "BeaconDB.validatorEntries")
defer span.End() defer span.End()
var validatorEntries []*v1alpha.Validator var validatorEntries []*ethpb.Validator
err = s.db.View(func(tx *bolt.Tx) error { err = s.db.View(func(tx *bolt.Tx) error {
// get the validator keys from the index bucket // get the validator keys from the index bucket
idxBkt := tx.Bucket(blockRootValidatorHashesBucket) idxBkt := tx.Bucket(blockRootValidatorHashesBucket)
@@ -396,7 +437,7 @@ func (s *Store) validatorEntries(ctx context.Context, blockRoot [32]byte) ([]*v1
// get the entry bytes from the cache or from the DB. // get the entry bytes from the cache or from the DB.
v, ok := s.validatorEntryCache.Get(key) v, ok := s.validatorEntryCache.Get(key)
if ok { if ok {
valEntry, vType := v.(*v1alpha.Validator) valEntry, vType := v.(*ethpb.Validator)
if vType { if vType {
s.validatorEntryCache.Set(key, valEntry, int64(valEntry.SizeSSZ())) s.validatorEntryCache.Set(key, valEntry, int64(valEntry.SizeSSZ()))
validatorEntries = append(validatorEntries, valEntry) validatorEntries = append(validatorEntries, valEntry)
@@ -411,7 +452,7 @@ func (s *Store) validatorEntries(ctx context.Context, blockRoot [32]byte) ([]*v1
if len(valEntryBytes) == 0 { if len(valEntryBytes) == 0 {
return errors.New("could not find validator entry") return errors.New("could not find validator entry")
} }
encValEntry := &v1alpha.Validator{} encValEntry := &ethpb.Validator{}
decodeErr := decode(ctx, valEntryBytes, encValEntry) decodeErr := decode(ctx, valEntryBytes, encValEntry)
if decodeErr != nil { if decodeErr != nil {
return errors.Wrap(decodeErr, "failed to decode validator entry keys") return errors.Wrap(decodeErr, "failed to decode validator entry keys")
@@ -425,7 +466,7 @@ func (s *Store) validatorEntries(ctx context.Context, blockRoot [32]byte) ([]*v1
return validatorEntries, err return validatorEntries, err
} }
/// retrieves and assembles the state information from multiple buckets. // retrieves and assembles the state information from multiple buckets.
func (s *Store) stateBytes(ctx context.Context, blockRoot [32]byte) ([]byte, error) { func (s *Store) stateBytes(ctx context.Context, blockRoot [32]byte) ([]byte, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.stateBytes") ctx, span := trace.StartSpan(ctx, "BeaconDB.stateBytes")
defer span.End() defer span.End()
@@ -468,16 +509,16 @@ func (s *Store) slotByBlockRoot(ctx context.Context, tx *bolt.Tx, blockRoot []by
return 0, errors.New("state enc can't be nil") return 0, errors.New("state enc can't be nil")
} }
// no need to construct the validator entries as it is not used here. // no need to construct the validator entries as it is not used here.
s, err := s.createState(ctx, enc, nil) s, err := s.unmarshalState(ctx, enc, nil)
if err != nil { if err != nil {
return 0, err return 0, err
} }
if s == nil { if s == nil || s.IsNil() {
return 0, errors.New("state can't be nil") return 0, errors.New("state can't be nil")
} }
return s.Slot, nil return s.Slot(), nil
} }
b := &v1alpha.SignedBeaconBlock{} b := &ethpb.SignedBeaconBlock{}
err := decode(ctx, enc, b) err := decode(ctx, enc, b)
if err != nil { if err != nil {
return 0, err return 0, err
@@ -487,7 +528,7 @@ func (s *Store) slotByBlockRoot(ctx context.Context, tx *bolt.Tx, blockRoot []by
} }
return b.Block.Slot, nil return b.Block.Slot, nil
} }
stateSummary := &statepb.StateSummary{} stateSummary := &ethpb.StateSummary{}
if err := decode(ctx, enc, stateSummary); err != nil { if err := decode(ctx, enc, stateSummary); err != nil {
return 0, err return 0, err
} }

View File

@@ -532,6 +532,48 @@ func TestStore_CleanUpDirtyStates_DontDeleteNonFinalized(t *testing.T) {
} }
} }
func TestAltairState_CanSaveRetrieve(t *testing.T) {
db := setupDB(t)
r := [32]byte{'A'}
require.Equal(t, false, db.HasState(context.Background(), r))
st, _ := testutil.DeterministicGenesisStateAltair(t, 1)
require.NoError(t, st.SetSlot(100))
require.NoError(t, db.SaveState(context.Background(), st, r))
require.Equal(t, true, db.HasState(context.Background(), r))
savedS, err := db.State(context.Background(), r)
require.NoError(t, err)
require.DeepSSZEqual(t, st.InnerStateUnsafe(), savedS.InnerStateUnsafe())
savedS, err = db.State(context.Background(), [32]byte{'B'})
require.NoError(t, err)
require.Equal(t, state.ReadOnlyBeaconState(nil), savedS, "Unsaved state should've been nil")
}
func TestAltairState_CanDelete(t *testing.T) {
db := setupDB(t)
r := [32]byte{'A'}
require.Equal(t, false, db.HasState(context.Background(), r))
st, _ := testutil.DeterministicGenesisStateAltair(t, 1)
require.NoError(t, st.SetSlot(100))
require.NoError(t, db.SaveState(context.Background(), st, r))
require.Equal(t, true, db.HasState(context.Background(), r))
require.NoError(t, db.DeleteState(context.Background(), r))
savedS, err := db.State(context.Background(), r)
require.NoError(t, err)
require.Equal(t, state.ReadOnlyBeaconState(nil), savedS, "Unsaved state should've been nil")
}
func validators(limit int) []*ethpb.Validator { func validators(limit int) []*ethpb.Validator {
var vals []*ethpb.Validator var vals []*ethpb.Validator
for i := 0; i < limit; i++ { for i := 0; i < limit; i++ {

View File

@@ -65,7 +65,7 @@ func DefaultConfig(enableDebugRPCEndpoints bool) MuxConfig {
) )
v1Alpha1PbHandler := gateway.PbMux{ v1Alpha1PbHandler := gateway.PbMux{
Registrations: v1Alpha1Registrations, Registrations: v1Alpha1Registrations,
Patterns: []string{"/eth/v1alpha1/"}, Patterns: []string{"/prysm/v1alpha1/"},
Mux: v1Alpha1Mux, Mux: v1Alpha1Mux,
} }
v1PbHandler := gateway.PbMux{ v1PbHandler := gateway.PbMux{

View File

@@ -16,7 +16,7 @@ func TestDefaultConfig(t *testing.T) {
assert.Equal(t, 4, len(cfg.V1PbMux.Registrations)) assert.Equal(t, 4, len(cfg.V1PbMux.Registrations))
assert.NotNil(t, cfg.V1Alpha1PbMux.Mux) assert.NotNil(t, cfg.V1Alpha1PbMux.Mux)
require.Equal(t, 1, len(cfg.V1Alpha1PbMux.Patterns)) require.Equal(t, 1, len(cfg.V1Alpha1PbMux.Patterns))
assert.Equal(t, "/eth/v1alpha1/", cfg.V1Alpha1PbMux.Patterns[0]) assert.Equal(t, "/prysm/v1alpha1/", cfg.V1Alpha1PbMux.Patterns[0])
assert.Equal(t, 4, len(cfg.V1Alpha1PbMux.Registrations)) assert.Equal(t, 4, len(cfg.V1Alpha1PbMux.Registrations))
}) })
@@ -28,7 +28,7 @@ func TestDefaultConfig(t *testing.T) {
assert.Equal(t, 5, len(cfg.V1PbMux.Registrations)) assert.Equal(t, 5, len(cfg.V1PbMux.Registrations))
assert.NotNil(t, cfg.V1Alpha1PbMux.Mux) assert.NotNil(t, cfg.V1Alpha1PbMux.Mux)
require.Equal(t, 1, len(cfg.V1Alpha1PbMux.Patterns)) require.Equal(t, 1, len(cfg.V1Alpha1PbMux.Patterns))
assert.Equal(t, "/eth/v1alpha1/", cfg.V1Alpha1PbMux.Patterns[0]) assert.Equal(t, "/prysm/v1alpha1/", cfg.V1Alpha1PbMux.Patterns[0])
assert.Equal(t, 5, len(cfg.V1Alpha1PbMux.Registrations)) assert.Equal(t, 5, len(cfg.V1Alpha1PbMux.Registrations))
}) })
} }

View File

@@ -26,6 +26,7 @@ go_library(
"//beacon-chain/node/registration:go_default_library", "//beacon-chain/node/registration:go_default_library",
"//beacon-chain/operations/attestations:go_default_library", "//beacon-chain/operations/attestations:go_default_library",
"//beacon-chain/operations/slashings:go_default_library", "//beacon-chain/operations/slashings:go_default_library",
"//beacon-chain/operations/synccommittee:go_default_library",
"//beacon-chain/operations/voluntaryexits:go_default_library", "//beacon-chain/operations/voluntaryexits:go_default_library",
"//beacon-chain/p2p:go_default_library", "//beacon-chain/p2p:go_default_library",
"//beacon-chain/powchain:go_default_library", "//beacon-chain/powchain:go_default_library",

View File

@@ -28,6 +28,7 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/node/registration" "github.com/prysmaticlabs/prysm/beacon-chain/node/registration"
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations" "github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/beacon-chain/operations/slashings" "github.com/prysmaticlabs/prysm/beacon-chain/operations/slashings"
"github.com/prysmaticlabs/prysm/beacon-chain/operations/synccommittee"
"github.com/prysmaticlabs/prysm/beacon-chain/operations/voluntaryexits" "github.com/prysmaticlabs/prysm/beacon-chain/operations/voluntaryexits"
"github.com/prysmaticlabs/prysm/beacon-chain/p2p" "github.com/prysmaticlabs/prysm/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/beacon-chain/powchain" "github.com/prysmaticlabs/prysm/beacon-chain/powchain"
@@ -59,23 +60,24 @@ const testSkipPowFlag = "test-skip-pow"
// full PoS node. It handles the lifecycle of the entire system and registers // full PoS node. It handles the lifecycle of the entire system and registers
// services to a service registry. // services to a service registry.
type BeaconNode struct { type BeaconNode struct {
cliCtx *cli.Context cliCtx *cli.Context
ctx context.Context ctx context.Context
cancel context.CancelFunc cancel context.CancelFunc
services *shared.ServiceRegistry services *shared.ServiceRegistry
lock sync.RWMutex lock sync.RWMutex
stop chan struct{} // Channel to wait for termination notifications. stop chan struct{} // Channel to wait for termination notifications.
db db.Database db db.Database
attestationPool attestations.Pool attestationPool attestations.Pool
exitPool voluntaryexits.PoolManager exitPool voluntaryexits.PoolManager
slashingsPool slashings.PoolManager slashingsPool slashings.PoolManager
depositCache *depositcache.DepositCache syncCommitteePool synccommittee.Pool
stateFeed *event.Feed depositCache *depositcache.DepositCache
blockFeed *event.Feed stateFeed *event.Feed
opFeed *event.Feed blockFeed *event.Feed
forkChoiceStore forkchoice.ForkChoicer opFeed *event.Feed
stateGen *stategen.State forkChoiceStore forkchoice.ForkChoicer
collector *bcnodeCollector stateGen *stategen.State
collector *bcnodeCollector
} }
// New creates a new node instance, sets up configuration options, and registers // New creates a new node instance, sets up configuration options, and registers
@@ -95,21 +97,25 @@ func New(cliCtx *cli.Context) (*BeaconNode, error) {
configureNetwork(cliCtx) configureNetwork(cliCtx)
configureInteropConfig(cliCtx) configureInteropConfig(cliCtx)
// Initializes any forks here.
params.BeaconConfig().InitializeForkSchedule()
registry := shared.NewServiceRegistry() registry := shared.NewServiceRegistry()
ctx, cancel := context.WithCancel(cliCtx.Context) ctx, cancel := context.WithCancel(cliCtx.Context)
beacon := &BeaconNode{ beacon := &BeaconNode{
cliCtx: cliCtx, cliCtx: cliCtx,
ctx: ctx, ctx: ctx,
cancel: cancel, cancel: cancel,
services: registry, services: registry,
stop: make(chan struct{}), stop: make(chan struct{}),
stateFeed: new(event.Feed), stateFeed: new(event.Feed),
blockFeed: new(event.Feed), blockFeed: new(event.Feed),
opFeed: new(event.Feed), opFeed: new(event.Feed),
attestationPool: attestations.NewPool(), attestationPool: attestations.NewPool(),
exitPool: voluntaryexits.NewPool(), exitPool: voluntaryexits.NewPool(),
slashingsPool: slashings.NewPool(), slashingsPool: slashings.NewPool(),
syncCommitteePool: synccommittee.NewPool(),
} }
depositAddress, err := registration.DepositContractAddress() depositAddress, err := registration.DepositContractAddress()
@@ -500,6 +506,7 @@ func (b *BeaconNode) registerSyncService() error {
AttPool: b.attestationPool, AttPool: b.attestationPool,
ExitPool: b.exitPool, ExitPool: b.exitPool,
SlashingPool: b.slashingsPool, SlashingPool: b.slashingsPool,
SyncCommsPool: b.syncCommitteePool,
StateGen: b.stateGen, StateGen: b.stateGen,
}) })
@@ -588,6 +595,7 @@ func (b *BeaconNode) registerRPCService() error {
AttestationsPool: b.attestationPool, AttestationsPool: b.attestationPool,
ExitPool: b.exitPool, ExitPool: b.exitPool,
SlashingsPool: b.slashingsPool, SlashingsPool: b.slashingsPool,
SyncCommitteeObjectPool: b.syncCommitteePool,
POWChainService: web3Service, POWChainService: web3Service,
ChainStartFetcher: chainStartFetcher, ChainStartFetcher: chainStartFetcher,
MockEth1Votes: mockEth1DataVotes, MockEth1Votes: mockEth1DataVotes,

View File

@@ -5,7 +5,7 @@ import (
"github.com/pkg/errors" "github.com/pkg/errors"
types "github.com/prysmaticlabs/eth2-types" types "github.com/prysmaticlabs/eth2-types"
prysmv2 "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/copyutil" "github.com/prysmaticlabs/prysm/shared/copyutil"
"github.com/prysmaticlabs/prysm/shared/queue" "github.com/prysmaticlabs/prysm/shared/queue"
) )
@@ -16,7 +16,7 @@ const syncCommitteeMaxQueueSize = 4
// SaveSyncCommitteeContribution saves a sync committee contribution in to a priority queue. // SaveSyncCommitteeContribution saves a sync committee contribution in to a priority queue.
// The priority queue is capped at syncCommitteeMaxQueueSize contributions. // The priority queue is capped at syncCommitteeMaxQueueSize contributions.
func (s *Store) SaveSyncCommitteeContribution(cont *prysmv2.SyncCommitteeContribution) error { func (s *Store) SaveSyncCommitteeContribution(cont *ethpb.SyncCommitteeContribution) error {
if cont == nil { if cont == nil {
return errNilContribution return errNilContribution
} }
@@ -33,7 +33,7 @@ func (s *Store) SaveSyncCommitteeContribution(cont *prysmv2.SyncCommitteeContrib
// Contributions exist in the queue. Append instead of insert new. // Contributions exist in the queue. Append instead of insert new.
if item != nil { if item != nil {
contributions, ok := item.Value.([]*prysmv2.SyncCommitteeContribution) contributions, ok := item.Value.([]*ethpb.SyncCommitteeContribution)
if !ok { if !ok {
return errors.New("not typed []ethpb.SyncCommitteeContribution") return errors.New("not typed []ethpb.SyncCommitteeContribution")
} }
@@ -50,7 +50,7 @@ func (s *Store) SaveSyncCommitteeContribution(cont *prysmv2.SyncCommitteeContrib
// Contribution does not exist. Insert new. // Contribution does not exist. Insert new.
if err := s.contributionCache.Push(&queue.Item{ if err := s.contributionCache.Push(&queue.Item{
Key: syncCommitteeKey(cont.Slot), Key: syncCommitteeKey(cont.Slot),
Value: []*prysmv2.SyncCommitteeContribution{copied}, Value: []*ethpb.SyncCommitteeContribution{copied},
Priority: int64(cont.Slot), Priority: int64(cont.Slot),
}); err != nil { }); err != nil {
return err return err
@@ -69,7 +69,7 @@ func (s *Store) SaveSyncCommitteeContribution(cont *prysmv2.SyncCommitteeContrib
// SyncCommitteeContributions returns sync committee contributions by slot from the priority queue. // SyncCommitteeContributions returns sync committee contributions by slot from the priority queue.
// Upon retrieval, the contribution is removed from the queue. // Upon retrieval, the contribution is removed from the queue.
func (s *Store) SyncCommitteeContributions(slot types.Slot) ([]*prysmv2.SyncCommitteeContribution, error) { func (s *Store) SyncCommitteeContributions(slot types.Slot) ([]*ethpb.SyncCommitteeContribution, error) {
s.contributionLock.RLock() s.contributionLock.RLock()
defer s.contributionLock.RUnlock() defer s.contributionLock.RUnlock()
@@ -78,9 +78,9 @@ func (s *Store) SyncCommitteeContributions(slot types.Slot) ([]*prysmv2.SyncComm
return nil, nil return nil, nil
} }
contributions, ok := item.Value.([]*prysmv2.SyncCommitteeContribution) contributions, ok := item.Value.([]*ethpb.SyncCommitteeContribution)
if !ok { if !ok {
return nil, errors.New("not typed []prysmv2.SyncCommitteeContribution") return nil, errors.New("not typed []ethpb.SyncCommitteeContribution")
} }
return contributions, nil return contributions, nil

View File

@@ -3,7 +3,7 @@ package synccommittee
import ( import (
"testing" "testing"
prysmv2 "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/testutil/require" "github.com/prysmaticlabs/prysm/shared/testutil/require"
) )
@@ -15,7 +15,7 @@ func TestSyncCommitteeContributionCache_Nil(t *testing.T) {
func TestSyncCommitteeContributionCache_RoundTrip(t *testing.T) { func TestSyncCommitteeContributionCache_RoundTrip(t *testing.T) {
store := NewStore() store := NewStore()
conts := []*prysmv2.SyncCommitteeContribution{ conts := []*ethpb.SyncCommitteeContribution{
{Slot: 1, SubcommitteeIndex: 0, Signature: []byte{'a'}}, {Slot: 1, SubcommitteeIndex: 0, Signature: []byte{'a'}},
{Slot: 1, SubcommitteeIndex: 1, Signature: []byte{'b'}}, {Slot: 1, SubcommitteeIndex: 1, Signature: []byte{'b'}},
{Slot: 2, SubcommitteeIndex: 0, Signature: []byte{'c'}}, {Slot: 2, SubcommitteeIndex: 0, Signature: []byte{'c'}},
@@ -36,36 +36,36 @@ func TestSyncCommitteeContributionCache_RoundTrip(t *testing.T) {
conts, err := store.SyncCommitteeContributions(1) conts, err := store.SyncCommitteeContributions(1)
require.NoError(t, err) require.NoError(t, err)
require.DeepSSZEqual(t, []*prysmv2.SyncCommitteeContribution(nil), conts) require.DeepSSZEqual(t, []*ethpb.SyncCommitteeContribution(nil), conts)
conts, err = store.SyncCommitteeContributions(2) conts, err = store.SyncCommitteeContributions(2)
require.NoError(t, err) require.NoError(t, err)
require.DeepSSZEqual(t, []*prysmv2.SyncCommitteeContribution(nil), conts) require.DeepSSZEqual(t, []*ethpb.SyncCommitteeContribution(nil), conts)
conts, err = store.SyncCommitteeContributions(3) conts, err = store.SyncCommitteeContributions(3)
require.NoError(t, err) require.NoError(t, err)
require.DeepSSZEqual(t, []*prysmv2.SyncCommitteeContribution{ require.DeepSSZEqual(t, []*ethpb.SyncCommitteeContribution{
{Slot: 3, SubcommitteeIndex: 0, Signature: []byte{'e'}}, {Slot: 3, SubcommitteeIndex: 0, Signature: []byte{'e'}},
{Slot: 3, SubcommitteeIndex: 1, Signature: []byte{'f'}}, {Slot: 3, SubcommitteeIndex: 1, Signature: []byte{'f'}},
}, conts) }, conts)
conts, err = store.SyncCommitteeContributions(4) conts, err = store.SyncCommitteeContributions(4)
require.NoError(t, err) require.NoError(t, err)
require.DeepSSZEqual(t, []*prysmv2.SyncCommitteeContribution{ require.DeepSSZEqual(t, []*ethpb.SyncCommitteeContribution{
{Slot: 4, SubcommitteeIndex: 0, Signature: []byte{'g'}}, {Slot: 4, SubcommitteeIndex: 0, Signature: []byte{'g'}},
{Slot: 4, SubcommitteeIndex: 1, Signature: []byte{'h'}}, {Slot: 4, SubcommitteeIndex: 1, Signature: []byte{'h'}},
}, conts) }, conts)
conts, err = store.SyncCommitteeContributions(5) conts, err = store.SyncCommitteeContributions(5)
require.NoError(t, err) require.NoError(t, err)
require.DeepSSZEqual(t, []*prysmv2.SyncCommitteeContribution{ require.DeepSSZEqual(t, []*ethpb.SyncCommitteeContribution{
{Slot: 5, SubcommitteeIndex: 0, Signature: []byte{'i'}}, {Slot: 5, SubcommitteeIndex: 0, Signature: []byte{'i'}},
{Slot: 5, SubcommitteeIndex: 1, Signature: []byte{'j'}}, {Slot: 5, SubcommitteeIndex: 1, Signature: []byte{'j'}},
}, conts) }, conts)
conts, err = store.SyncCommitteeContributions(6) conts, err = store.SyncCommitteeContributions(6)
require.NoError(t, err) require.NoError(t, err)
require.DeepSSZEqual(t, []*prysmv2.SyncCommitteeContribution{ require.DeepSSZEqual(t, []*ethpb.SyncCommitteeContribution{
{Slot: 6, SubcommitteeIndex: 0, Signature: []byte{'k'}}, {Slot: 6, SubcommitteeIndex: 0, Signature: []byte{'k'}},
{Slot: 6, SubcommitteeIndex: 1, Signature: []byte{'l'}}, {Slot: 6, SubcommitteeIndex: 1, Signature: []byte{'l'}},
}, conts) }, conts)
@@ -73,35 +73,35 @@ func TestSyncCommitteeContributionCache_RoundTrip(t *testing.T) {
// All the contributions should persist after get. // All the contributions should persist after get.
conts, err = store.SyncCommitteeContributions(1) conts, err = store.SyncCommitteeContributions(1)
require.NoError(t, err) require.NoError(t, err)
require.DeepSSZEqual(t, []*prysmv2.SyncCommitteeContribution(nil), conts) require.DeepSSZEqual(t, []*ethpb.SyncCommitteeContribution(nil), conts)
conts, err = store.SyncCommitteeContributions(2) conts, err = store.SyncCommitteeContributions(2)
require.NoError(t, err) require.NoError(t, err)
require.DeepSSZEqual(t, []*prysmv2.SyncCommitteeContribution(nil), conts) require.DeepSSZEqual(t, []*ethpb.SyncCommitteeContribution(nil), conts)
conts, err = store.SyncCommitteeContributions(3) conts, err = store.SyncCommitteeContributions(3)
require.NoError(t, err) require.NoError(t, err)
require.DeepSSZEqual(t, []*prysmv2.SyncCommitteeContribution{ require.DeepSSZEqual(t, []*ethpb.SyncCommitteeContribution{
{Slot: 3, SubcommitteeIndex: 0, Signature: []byte{'e'}}, {Slot: 3, SubcommitteeIndex: 0, Signature: []byte{'e'}},
{Slot: 3, SubcommitteeIndex: 1, Signature: []byte{'f'}}, {Slot: 3, SubcommitteeIndex: 1, Signature: []byte{'f'}},
}, conts) }, conts)
conts, err = store.SyncCommitteeContributions(4) conts, err = store.SyncCommitteeContributions(4)
require.NoError(t, err) require.NoError(t, err)
require.DeepSSZEqual(t, []*prysmv2.SyncCommitteeContribution{ require.DeepSSZEqual(t, []*ethpb.SyncCommitteeContribution{
{Slot: 4, SubcommitteeIndex: 0, Signature: []byte{'g'}}, {Slot: 4, SubcommitteeIndex: 0, Signature: []byte{'g'}},
{Slot: 4, SubcommitteeIndex: 1, Signature: []byte{'h'}}, {Slot: 4, SubcommitteeIndex: 1, Signature: []byte{'h'}},
}, conts) }, conts)
conts, err = store.SyncCommitteeContributions(5) conts, err = store.SyncCommitteeContributions(5)
require.NoError(t, err) require.NoError(t, err)
require.DeepSSZEqual(t, []*prysmv2.SyncCommitteeContribution{ require.DeepSSZEqual(t, []*ethpb.SyncCommitteeContribution{
{Slot: 5, SubcommitteeIndex: 0, Signature: []byte{'i'}}, {Slot: 5, SubcommitteeIndex: 0, Signature: []byte{'i'}},
{Slot: 5, SubcommitteeIndex: 1, Signature: []byte{'j'}}, {Slot: 5, SubcommitteeIndex: 1, Signature: []byte{'j'}},
}, conts) }, conts)
conts, err = store.SyncCommitteeContributions(6) conts, err = store.SyncCommitteeContributions(6)
require.NoError(t, err) require.NoError(t, err)
require.DeepSSZEqual(t, []*prysmv2.SyncCommitteeContribution{ require.DeepSSZEqual(t, []*ethpb.SyncCommitteeContribution{
{Slot: 6, SubcommitteeIndex: 0, Signature: []byte{'k'}}, {Slot: 6, SubcommitteeIndex: 0, Signature: []byte{'k'}},
{Slot: 6, SubcommitteeIndex: 1, Signature: []byte{'l'}}, {Slot: 6, SubcommitteeIndex: 1, Signature: []byte{'l'}},
}, conts) }, conts)

View File

@@ -3,14 +3,14 @@ package synccommittee
import ( import (
"github.com/pkg/errors" "github.com/pkg/errors"
types "github.com/prysmaticlabs/eth2-types" types "github.com/prysmaticlabs/eth2-types"
prysmv2 "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/copyutil" "github.com/prysmaticlabs/prysm/shared/copyutil"
"github.com/prysmaticlabs/prysm/shared/queue" "github.com/prysmaticlabs/prysm/shared/queue"
) )
// SaveSyncCommitteeMessage saves a sync committee message in to a priority queue. // SaveSyncCommitteeMessage saves a sync committee message in to a priority queue.
// The priority queue capped at syncCommitteeMaxQueueSize contributions. // The priority queue capped at syncCommitteeMaxQueueSize contributions.
func (s *Store) SaveSyncCommitteeMessage(msg *prysmv2.SyncCommitteeMessage) error { func (s *Store) SaveSyncCommitteeMessage(msg *ethpb.SyncCommitteeMessage) error {
if msg == nil { if msg == nil {
return errNilMessage return errNilMessage
} }
@@ -26,7 +26,7 @@ func (s *Store) SaveSyncCommitteeMessage(msg *prysmv2.SyncCommitteeMessage) erro
copied := copyutil.CopySyncCommitteeMessage(msg) copied := copyutil.CopySyncCommitteeMessage(msg)
// Messages exist in the queue. Append instead of insert new. // Messages exist in the queue. Append instead of insert new.
if item != nil { if item != nil {
messages, ok := item.Value.([]*prysmv2.SyncCommitteeMessage) messages, ok := item.Value.([]*ethpb.SyncCommitteeMessage)
if !ok { if !ok {
return errors.New("not typed []ethpb.SyncCommitteeMessage") return errors.New("not typed []ethpb.SyncCommitteeMessage")
} }
@@ -43,7 +43,7 @@ func (s *Store) SaveSyncCommitteeMessage(msg *prysmv2.SyncCommitteeMessage) erro
// Message does not exist. Insert new. // Message does not exist. Insert new.
if err := s.messageCache.Push(&queue.Item{ if err := s.messageCache.Push(&queue.Item{
Key: syncCommitteeKey(msg.Slot), Key: syncCommitteeKey(msg.Slot),
Value: []*prysmv2.SyncCommitteeMessage{copied}, Value: []*ethpb.SyncCommitteeMessage{copied},
Priority: int64(msg.Slot), Priority: int64(msg.Slot),
}); err != nil { }); err != nil {
return err return err
@@ -62,7 +62,7 @@ func (s *Store) SaveSyncCommitteeMessage(msg *prysmv2.SyncCommitteeMessage) erro
// SyncCommitteeMessages returns sync committee messages by slot from the priority queue. // SyncCommitteeMessages returns sync committee messages by slot from the priority queue.
// Upon retrieval, the message is removed from the queue. // Upon retrieval, the message is removed from the queue.
func (s *Store) SyncCommitteeMessages(slot types.Slot) ([]*prysmv2.SyncCommitteeMessage, error) { func (s *Store) SyncCommitteeMessages(slot types.Slot) ([]*ethpb.SyncCommitteeMessage, error) {
s.messageLock.RLock() s.messageLock.RLock()
defer s.messageLock.RUnlock() defer s.messageLock.RUnlock()
@@ -71,9 +71,9 @@ func (s *Store) SyncCommitteeMessages(slot types.Slot) ([]*prysmv2.SyncCommittee
return nil, nil return nil, nil
} }
messages, ok := item.Value.([]*prysmv2.SyncCommitteeMessage) messages, ok := item.Value.([]*ethpb.SyncCommitteeMessage)
if !ok { if !ok {
return nil, errors.New("not typed []prysmv2.SyncCommitteeMessage") return nil, errors.New("not typed []ethpb.SyncCommitteeMessage")
} }
return messages, nil return messages, nil

View File

@@ -3,7 +3,7 @@ package synccommittee
import ( import (
"testing" "testing"
prysmv2 "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/testutil/require" "github.com/prysmaticlabs/prysm/shared/testutil/require"
) )
@@ -15,7 +15,7 @@ func TestSyncCommitteeSignatureCache_Nil(t *testing.T) {
func TestSyncCommitteeSignatureCache_RoundTrip(t *testing.T) { func TestSyncCommitteeSignatureCache_RoundTrip(t *testing.T) {
store := NewStore() store := NewStore()
msgs := []*prysmv2.SyncCommitteeMessage{ msgs := []*ethpb.SyncCommitteeMessage{
{Slot: 1, ValidatorIndex: 0, Signature: []byte{'a'}}, {Slot: 1, ValidatorIndex: 0, Signature: []byte{'a'}},
{Slot: 1, ValidatorIndex: 1, Signature: []byte{'b'}}, {Slot: 1, ValidatorIndex: 1, Signature: []byte{'b'}},
{Slot: 2, ValidatorIndex: 0, Signature: []byte{'c'}}, {Slot: 2, ValidatorIndex: 0, Signature: []byte{'c'}},
@@ -36,36 +36,36 @@ func TestSyncCommitteeSignatureCache_RoundTrip(t *testing.T) {
msgs, err := store.SyncCommitteeMessages(1) msgs, err := store.SyncCommitteeMessages(1)
require.NoError(t, err) require.NoError(t, err)
require.DeepSSZEqual(t, []*prysmv2.SyncCommitteeMessage(nil), msgs) require.DeepSSZEqual(t, []*ethpb.SyncCommitteeMessage(nil), msgs)
msgs, err = store.SyncCommitteeMessages(2) msgs, err = store.SyncCommitteeMessages(2)
require.NoError(t, err) require.NoError(t, err)
require.DeepSSZEqual(t, []*prysmv2.SyncCommitteeMessage(nil), msgs) require.DeepSSZEqual(t, []*ethpb.SyncCommitteeMessage(nil), msgs)
msgs, err = store.SyncCommitteeMessages(3) msgs, err = store.SyncCommitteeMessages(3)
require.NoError(t, err) require.NoError(t, err)
require.DeepSSZEqual(t, []*prysmv2.SyncCommitteeMessage{ require.DeepSSZEqual(t, []*ethpb.SyncCommitteeMessage{
{Slot: 3, ValidatorIndex: 0, Signature: []byte{'e'}}, {Slot: 3, ValidatorIndex: 0, Signature: []byte{'e'}},
{Slot: 3, ValidatorIndex: 1, Signature: []byte{'f'}}, {Slot: 3, ValidatorIndex: 1, Signature: []byte{'f'}},
}, msgs) }, msgs)
msgs, err = store.SyncCommitteeMessages(4) msgs, err = store.SyncCommitteeMessages(4)
require.NoError(t, err) require.NoError(t, err)
require.DeepSSZEqual(t, []*prysmv2.SyncCommitteeMessage{ require.DeepSSZEqual(t, []*ethpb.SyncCommitteeMessage{
{Slot: 4, ValidatorIndex: 0, Signature: []byte{'g'}}, {Slot: 4, ValidatorIndex: 0, Signature: []byte{'g'}},
{Slot: 4, ValidatorIndex: 1, Signature: []byte{'h'}}, {Slot: 4, ValidatorIndex: 1, Signature: []byte{'h'}},
}, msgs) }, msgs)
msgs, err = store.SyncCommitteeMessages(5) msgs, err = store.SyncCommitteeMessages(5)
require.NoError(t, err) require.NoError(t, err)
require.DeepSSZEqual(t, []*prysmv2.SyncCommitteeMessage{ require.DeepSSZEqual(t, []*ethpb.SyncCommitteeMessage{
{Slot: 5, ValidatorIndex: 0, Signature: []byte{'i'}}, {Slot: 5, ValidatorIndex: 0, Signature: []byte{'i'}},
{Slot: 5, ValidatorIndex: 1, Signature: []byte{'j'}}, {Slot: 5, ValidatorIndex: 1, Signature: []byte{'j'}},
}, msgs) }, msgs)
msgs, err = store.SyncCommitteeMessages(6) msgs, err = store.SyncCommitteeMessages(6)
require.NoError(t, err) require.NoError(t, err)
require.DeepSSZEqual(t, []*prysmv2.SyncCommitteeMessage{ require.DeepSSZEqual(t, []*ethpb.SyncCommitteeMessage{
{Slot: 6, ValidatorIndex: 0, Signature: []byte{'k'}}, {Slot: 6, ValidatorIndex: 0, Signature: []byte{'k'}},
{Slot: 6, ValidatorIndex: 1, Signature: []byte{'l'}}, {Slot: 6, ValidatorIndex: 1, Signature: []byte{'l'}},
}, msgs) }, msgs)
@@ -73,36 +73,36 @@ func TestSyncCommitteeSignatureCache_RoundTrip(t *testing.T) {
// Messages should persist after retrieval. // Messages should persist after retrieval.
msgs, err = store.SyncCommitteeMessages(1) msgs, err = store.SyncCommitteeMessages(1)
require.NoError(t, err) require.NoError(t, err)
require.DeepSSZEqual(t, []*prysmv2.SyncCommitteeMessage(nil), msgs) require.DeepSSZEqual(t, []*ethpb.SyncCommitteeMessage(nil), msgs)
msgs, err = store.SyncCommitteeMessages(2) msgs, err = store.SyncCommitteeMessages(2)
require.NoError(t, err) require.NoError(t, err)
require.DeepSSZEqual(t, []*prysmv2.SyncCommitteeMessage(nil), msgs) require.DeepSSZEqual(t, []*ethpb.SyncCommitteeMessage(nil), msgs)
msgs, err = store.SyncCommitteeMessages(3) msgs, err = store.SyncCommitteeMessages(3)
require.NoError(t, err) require.NoError(t, err)
require.DeepSSZEqual(t, []*prysmv2.SyncCommitteeMessage{ require.DeepSSZEqual(t, []*ethpb.SyncCommitteeMessage{
{Slot: 3, ValidatorIndex: 0, Signature: []byte{'e'}}, {Slot: 3, ValidatorIndex: 0, Signature: []byte{'e'}},
{Slot: 3, ValidatorIndex: 1, Signature: []byte{'f'}}, {Slot: 3, ValidatorIndex: 1, Signature: []byte{'f'}},
}, msgs) }, msgs)
msgs, err = store.SyncCommitteeMessages(4) msgs, err = store.SyncCommitteeMessages(4)
require.NoError(t, err) require.NoError(t, err)
require.DeepSSZEqual(t, []*prysmv2.SyncCommitteeMessage{ require.DeepSSZEqual(t, []*ethpb.SyncCommitteeMessage{
{Slot: 4, ValidatorIndex: 0, Signature: []byte{'g'}}, {Slot: 4, ValidatorIndex: 0, Signature: []byte{'g'}},
{Slot: 4, ValidatorIndex: 1, Signature: []byte{'h'}}, {Slot: 4, ValidatorIndex: 1, Signature: []byte{'h'}},
}, msgs) }, msgs)
msgs, err = store.SyncCommitteeMessages(5) msgs, err = store.SyncCommitteeMessages(5)
require.NoError(t, err) require.NoError(t, err)
require.DeepSSZEqual(t, []*prysmv2.SyncCommitteeMessage{ require.DeepSSZEqual(t, []*ethpb.SyncCommitteeMessage{
{Slot: 5, ValidatorIndex: 0, Signature: []byte{'i'}}, {Slot: 5, ValidatorIndex: 0, Signature: []byte{'i'}},
{Slot: 5, ValidatorIndex: 1, Signature: []byte{'j'}}, {Slot: 5, ValidatorIndex: 1, Signature: []byte{'j'}},
}, msgs) }, msgs)
msgs, err = store.SyncCommitteeMessages(6) msgs, err = store.SyncCommitteeMessages(6)
require.NoError(t, err) require.NoError(t, err)
require.DeepSSZEqual(t, []*prysmv2.SyncCommitteeMessage{ require.DeepSSZEqual(t, []*ethpb.SyncCommitteeMessage{
{Slot: 6, ValidatorIndex: 0, Signature: []byte{'k'}}, {Slot: 6, ValidatorIndex: 0, Signature: []byte{'k'}},
{Slot: 6, ValidatorIndex: 1, Signature: []byte{'l'}}, {Slot: 6, ValidatorIndex: 1, Signature: []byte{'l'}},
}, msgs) }, msgs)

View File

@@ -2,7 +2,7 @@ package synccommittee
import ( import (
types "github.com/prysmaticlabs/eth2-types" types "github.com/prysmaticlabs/eth2-types"
prysmv2 "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
) )
var _ = Pool(&Store{}) var _ = Pool(&Store{})
@@ -13,12 +13,12 @@ var _ = Pool(&Store{})
// sync aggregators. // sync aggregators.
type Pool interface { type Pool interface {
// Methods for Sync Contributions. // Methods for Sync Contributions.
SaveSyncCommitteeContribution(contr *prysmv2.SyncCommitteeContribution) error SaveSyncCommitteeContribution(contr *ethpb.SyncCommitteeContribution) error
SyncCommitteeContributions(slot types.Slot) ([]*prysmv2.SyncCommitteeContribution, error) SyncCommitteeContributions(slot types.Slot) ([]*ethpb.SyncCommitteeContribution, error)
// Methods for Sync Committee Messages. // Methods for Sync Committee Messages.
SaveSyncCommitteeMessage(sig *prysmv2.SyncCommitteeMessage) error SaveSyncCommitteeMessage(sig *ethpb.SyncCommitteeMessage) error
SyncCommitteeMessages(slot types.Slot) ([]*prysmv2.SyncCommitteeMessage, error) SyncCommitteeMessages(slot types.Slot) ([]*ethpb.SyncCommitteeMessage, error)
} }
// NewPool returns the sync committee store fulfilling the pool interface. // NewPool returns the sync committee store fulfilling the pool interface.

View File

@@ -11,6 +11,7 @@ go_library(
"discovery.go", "discovery.go",
"doc.go", "doc.go",
"fork.go", "fork.go",
"fork_watcher.go",
"gossip_scoring_params.go", "gossip_scoring_params.go",
"gossip_topic_mappings.go", "gossip_topic_mappings.go",
"handshake.go", "handshake.go",
@@ -39,6 +40,7 @@ go_library(
], ],
deps = [ deps = [
"//beacon-chain/cache:go_default_library", "//beacon-chain/cache:go_default_library",
"//beacon-chain/core/altair:go_default_library",
"//beacon-chain/core/feed:go_default_library", "//beacon-chain/core/feed:go_default_library",
"//beacon-chain/core/feed/state:go_default_library", "//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/core/helpers:go_default_library", "//beacon-chain/core/helpers:go_default_library",
@@ -53,6 +55,7 @@ go_library(
"//proto/prysm/v1alpha1/metadata:go_default_library", "//proto/prysm/v1alpha1/metadata:go_default_library",
"//proto/prysm/v1alpha1/wrapper:go_default_library", "//proto/prysm/v1alpha1/wrapper:go_default_library",
"//shared:go_default_library", "//shared:go_default_library",
"//shared/bytesutil:go_default_library",
"//shared/featureconfig:go_default_library", "//shared/featureconfig:go_default_library",
"//shared/fileutil:go_default_library", "//shared/fileutil:go_default_library",
"//shared/hashutil:go_default_library", "//shared/hashutil:go_default_library",
@@ -68,8 +71,10 @@ go_library(
"@com_github_ethereum_go_ethereum//p2p/discover:go_default_library", "@com_github_ethereum_go_ethereum//p2p/discover:go_default_library",
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library", "@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library", "@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
"@com_github_ferranbt_fastssz//:go_default_library",
"@com_github_ipfs_go_ipfs_addr//:go_default_library", "@com_github_ipfs_go_ipfs_addr//:go_default_library",
"@com_github_kevinms_leakybucket_go//:go_default_library", "@com_github_kevinms_leakybucket_go//:go_default_library",
"@com_github_kr_pretty//:go_default_library",
"@com_github_libp2p_go_libp2p//:go_default_library", "@com_github_libp2p_go_libp2p//:go_default_library",
"@com_github_libp2p_go_libp2p//config:go_default_library", "@com_github_libp2p_go_libp2p//config:go_default_library",
"@com_github_libp2p_go_libp2p//p2p/protocol/identify:go_default_library", "@com_github_libp2p_go_libp2p//p2p/protocol/identify:go_default_library",
@@ -147,6 +152,7 @@ go_test(
"//shared/testutil/assert:go_default_library", "//shared/testutil/assert:go_default_library",
"//shared/testutil/require:go_default_library", "//shared/testutil/require:go_default_library",
"//shared/timeutils:go_default_library", "//shared/timeutils:go_default_library",
"//shared/version:go_default_library",
"@com_github_ethereum_go_ethereum//crypto:go_default_library", "@com_github_ethereum_go_ethereum//crypto:go_default_library",
"@com_github_ethereum_go_ethereum//p2p/discover:go_default_library", "@com_github_ethereum_go_ethereum//p2p/discover:go_default_library",
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library", "@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
@@ -164,6 +170,7 @@ go_test(
"@com_github_libp2p_go_libp2p_pubsub//pb:go_default_library", "@com_github_libp2p_go_libp2p_pubsub//pb:go_default_library",
"@com_github_libp2p_go_libp2p_swarm//testing:go_default_library", "@com_github_libp2p_go_libp2p_swarm//testing:go_default_library",
"@com_github_multiformats_go_multiaddr//:go_default_library", "@com_github_multiformats_go_multiaddr//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_eth2_types//:go_default_library", "@com_github_prysmaticlabs_eth2_types//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library", "@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library", "@com_github_sirupsen_logrus//:go_default_library",

View File

@@ -7,8 +7,12 @@ import (
"reflect" "reflect"
"time" "time"
ssz "github.com/ferranbt/fastssz"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/hashutil" "github.com/prysmaticlabs/prysm/shared/hashutil"
"github.com/prysmaticlabs/prysm/shared/params" "github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/traceutil" "github.com/prysmaticlabs/prysm/shared/traceutil"
@@ -20,7 +24,8 @@ import (
// GossipTypeMapping. // GossipTypeMapping.
var ErrMessageNotMapped = errors.New("message type is not mapped to a PubSub topic") var ErrMessageNotMapped = errors.New("message type is not mapped to a PubSub topic")
// Broadcast a message to the p2p network. // Broadcasts a message to the p2p network, the message is assumed to be
// broadcasted to the current fork.
func (s *Service) Broadcast(ctx context.Context, msg proto.Message) error { func (s *Service) Broadcast(ctx context.Context, msg proto.Message) error {
ctx, span := trace.StartSpan(ctx, "p2p.Broadcast") ctx, span := trace.StartSpan(ctx, "p2p.Broadcast")
defer span.End() defer span.End()
@@ -29,7 +34,7 @@ func (s *Service) Broadcast(ctx context.Context, msg proto.Message) error {
ctx, cancel := context.WithTimeout(ctx, twoSlots) ctx, cancel := context.WithTimeout(ctx, twoSlots)
defer cancel() defer cancel()
forkDigest, err := s.forkDigest() forkDigest, err := s.currentForkDigest()
if err != nil { if err != nil {
err := errors.Wrap(err, "could not retrieve fork digest") err := errors.Wrap(err, "could not retrieve fork digest")
traceutil.AnnotateError(span, err) traceutil.AnnotateError(span, err)
@@ -41,14 +46,19 @@ func (s *Service) Broadcast(ctx context.Context, msg proto.Message) error {
traceutil.AnnotateError(span, ErrMessageNotMapped) traceutil.AnnotateError(span, ErrMessageNotMapped)
return ErrMessageNotMapped return ErrMessageNotMapped
} }
return s.broadcastObject(ctx, msg, fmt.Sprintf(topic, forkDigest)) castMsg, ok := msg.(ssz.Marshaler)
if !ok {
return errors.Errorf("message of %T does not support marshaller interface", msg)
}
return s.broadcastObject(ctx, castMsg, fmt.Sprintf(topic, forkDigest))
} }
// BroadcastAttestation broadcasts an attestation to the p2p network. // BroadcastAttestation broadcasts an attestation to the p2p network, the message is assumed to be
// broadcasted to the current fork.
func (s *Service) BroadcastAttestation(ctx context.Context, subnet uint64, att *eth.Attestation) error { func (s *Service) BroadcastAttestation(ctx context.Context, subnet uint64, att *eth.Attestation) error {
ctx, span := trace.StartSpan(ctx, "p2p.BroadcastAttestation") ctx, span := trace.StartSpan(ctx, "p2p.BroadcastAttestation")
defer span.End() defer span.End()
forkDigest, err := s.forkDigest() forkDigest, err := s.currentForkDigest()
if err != nil { if err != nil {
err := errors.Wrap(err, "could not retrieve fork digest") err := errors.Wrap(err, "could not retrieve fork digest")
traceutil.AnnotateError(span, err) traceutil.AnnotateError(span, err)
@@ -61,6 +71,24 @@ func (s *Service) BroadcastAttestation(ctx context.Context, subnet uint64, att *
return nil return nil
} }
// BroadcastAttestation broadcasts an attestation to the p2p network, the message is assumed to be
// broadcasted to the current fork.
func (s *Service) BroadcastSyncCommitteeMessage(ctx context.Context, subnet uint64, sMsg *ethpb.SyncCommitteeMessage) error {
ctx, span := trace.StartSpan(ctx, "p2p.BroadcastSyncCommitteeMessage")
defer span.End()
forkDigest, err := s.currentForkDigest()
if err != nil {
err := errors.Wrap(err, "could not retrieve fork digest")
traceutil.AnnotateError(span, err)
return err
}
// Non-blocking broadcast, with attempts to discover a subnet peer if none available.
go s.broadcastSyncCommittee(ctx, subnet, sMsg, forkDigest)
return nil
}
func (s *Service) broadcastAttestation(ctx context.Context, subnet uint64, att *eth.Attestation, forkDigest [4]byte) { func (s *Service) broadcastAttestation(ctx context.Context, subnet uint64, att *eth.Attestation, forkDigest [4]byte) {
ctx, span := trace.StartSpan(ctx, "p2p.broadcastAttestation") ctx, span := trace.StartSpan(ctx, "p2p.broadcastAttestation")
defer span.End() defer span.End()
@@ -100,6 +128,13 @@ func (s *Service) broadcastAttestation(ctx context.Context, subnet uint64, att *
traceutil.AnnotateError(span, err) traceutil.AnnotateError(span, err)
} }
} }
// In the event our attestation is outdated and beyond the
// acceptable threshold, we exit early and do not broadcast it.
currSlot := helpers.CurrentSlot(uint64(s.genesisTime.Unix()))
if att.Data.Slot+params.BeaconConfig().SlotsPerEpoch < currSlot {
log.Warnf("Attestation is too old to broadcast, discarding it. Current Slot: %d , Attestation Slot: %d", currSlot, att.Data.Slot)
return
}
if err := s.broadcastObject(ctx, att, attestationToTopic(subnet, forkDigest)); err != nil { if err := s.broadcastObject(ctx, att, attestationToTopic(subnet, forkDigest)); err != nil {
log.WithError(err).Error("Failed to broadcast attestation") log.WithError(err).Error("Failed to broadcast attestation")
@@ -107,8 +142,63 @@ func (s *Service) broadcastAttestation(ctx context.Context, subnet uint64, att *
} }
} }
func (s *Service) broadcastSyncCommittee(ctx context.Context, subnet uint64, sMsg *ethpb.SyncCommitteeMessage, forkDigest [4]byte) {
ctx, span := trace.StartSpan(ctx, "p2p.broadcastSyncCommittee")
defer span.End()
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.
oneSlot := time.Duration(1*params.BeaconConfig().SecondsPerSlot) * time.Second
ctx, cancel := context.WithTimeout(ctx, oneSlot)
defer cancel()
// Ensure we have peers with this subnet.
// This adds in a special value to the subnet
// to ensure that we can re-use the same subnet locker.
wrappedSubIdx := subnet + syncLockerVal
s.subnetLocker(wrappedSubIdx).RLock()
hasPeer := s.hasPeerWithSubnet(syncCommitteeToTopic(subnet, forkDigest))
s.subnetLocker(wrappedSubIdx).RUnlock()
span.AddAttributes(
trace.BoolAttribute("hasPeer", hasPeer),
trace.Int64Attribute("slot", int64(sMsg.Slot)),
trace.Int64Attribute("subnet", int64(subnet)),
)
if !hasPeer {
syncCommitteeBroadcastAttempts.Inc()
if err := func() error {
s.subnetLocker(wrappedSubIdx).Lock()
defer s.subnetLocker(wrappedSubIdx).Unlock()
ok, err := s.FindPeersWithSubnet(ctx, syncCommitteeToTopic(subnet, forkDigest), subnet, 1)
if err != nil {
return err
}
if ok {
savedSyncCommitteeBroadcasts.Inc()
return nil
}
return errors.New("failed to find peers for subnet")
}(); err != nil {
log.WithError(err).Error("Failed to find peers")
traceutil.AnnotateError(span, err)
}
}
// In the event our sync message is outdated and beyond the
// acceptable threshold, we exit early and do not broadcast it.
if err := altair.ValidateSyncMessageTime(sMsg.Slot, s.genesisTime, params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil {
log.Warnf("Sync Committee Message is too old to broadcast, discarding it. %v", err)
return
}
if err := s.broadcastObject(ctx, sMsg, syncCommitteeToTopic(subnet, forkDigest)); err != nil {
log.WithError(err).Error("Failed to broadcast sync committee message")
traceutil.AnnotateError(span, err)
}
}
// method to broadcast messages to other peers in our gossip mesh. // method to broadcast messages to other peers in our gossip mesh.
func (s *Service) broadcastObject(ctx context.Context, obj interface{}, topic string) error { func (s *Service) broadcastObject(ctx context.Context, obj ssz.Marshaler, topic string) error {
_, span := trace.StartSpan(ctx, "p2p.broadcastObject") _, span := trace.StartSpan(ctx, "p2p.broadcastObject")
defer span.End() defer span.End()
@@ -126,7 +216,6 @@ func (s *Service) broadcastObject(ctx context.Context, obj interface{}, topic st
messageLen := int64(buf.Len()) messageLen := int64(buf.Len())
span.AddMessageSendEvent(int64(id), messageLen /*uncompressed*/, messageLen /*compressed*/) span.AddMessageSendEvent(int64(id), messageLen /*uncompressed*/, messageLen /*compressed*/)
} }
if err := s.PublishToTopic(ctx, topic+s.Encoding().ProtocolSuffix(), buf.Bytes()); err != nil { if err := s.PublishToTopic(ctx, topic+s.Encoding().ProtocolSuffix(), buf.Bytes()); err != nil {
err := errors.Wrap(err, "could not publish message") err := errors.Wrap(err, "could not publish message")
traceutil.AnnotateError(span, err) traceutil.AnnotateError(span, err)
@@ -138,3 +227,7 @@ func (s *Service) broadcastObject(ctx context.Context, obj interface{}, topic st
func attestationToTopic(subnet uint64, forkDigest [4]byte) string { func attestationToTopic(subnet uint64, forkDigest [4]byte) string {
return fmt.Sprintf(AttestationSubnetTopicFormat, forkDigest, subnet) return fmt.Sprintf(AttestationSubnetTopicFormat, forkDigest, subnet)
} }
func syncCommitteeToTopic(subnet uint64, forkDigest [4]byte) string {
return fmt.Sprintf(SyncCommitteeSubnetTopicFormat, forkDigest, subnet)
}

View File

@@ -18,8 +18,8 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers/scorers" "github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers/scorers"
p2ptest "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing" p2ptest "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing"
eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
pb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" pb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
statepb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper" "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
testpb "github.com/prysmaticlabs/prysm/proto/testing" testpb "github.com/prysmaticlabs/prysm/proto/testing"
"github.com/prysmaticlabs/prysm/shared/bytesutil" "github.com/prysmaticlabs/prysm/shared/bytesutil"
@@ -46,7 +46,7 @@ func TestService_Broadcast(t *testing.T) {
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32), genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
} }
msg := &statepb.Fork{ msg := &ethpb.Fork{
Epoch: 55, Epoch: 55,
CurrentVersion: []byte("fooo"), CurrentVersion: []byte("fooo"),
PreviousVersion: []byte("barr"), PreviousVersion: []byte("barr"),
@@ -55,7 +55,7 @@ func TestService_Broadcast(t *testing.T) {
topic := "/eth2/%x/testing" topic := "/eth2/%x/testing"
// Set a test gossip mapping for testpb.TestSimpleMessage. // Set a test gossip mapping for testpb.TestSimpleMessage.
GossipTypeMapping[reflect.TypeOf(msg)] = topic GossipTypeMapping[reflect.TypeOf(msg)] = topic
digest, err := p.forkDigest() digest, err := p.currentForkDigest()
require.NoError(t, err) require.NoError(t, err)
topic = fmt.Sprintf(topic, digest) topic = fmt.Sprintf(topic, digest)
@@ -77,7 +77,7 @@ func TestService_Broadcast(t *testing.T) {
incomingMessage, err := sub.Next(ctx) incomingMessage, err := sub.Next(ctx)
require.NoError(t, err) require.NoError(t, err)
result := &statepb.Fork{} result := &ethpb.Fork{}
require.NoError(t, p.Encoding().DecodeGossip(incomingMessage.Data, result)) require.NoError(t, p.Encoding().DecodeGossip(incomingMessage.Data, result))
if !proto.Equal(result, msg) { if !proto.Equal(result, msg) {
tt.Errorf("Did not receive expected message, got %+v, wanted %+v", result, msg) tt.Errorf("Did not receive expected message, got %+v, wanted %+v", result, msg)
@@ -169,7 +169,7 @@ func TestService_BroadcastAttestation(t *testing.T) {
topic := AttestationSubnetTopicFormat topic := AttestationSubnetTopicFormat
GossipTypeMapping[reflect.TypeOf(msg)] = topic GossipTypeMapping[reflect.TypeOf(msg)] = topic
digest, err := p.forkDigest() digest, err := p.currentForkDigest()
require.NoError(t, err) require.NoError(t, err)
topic = fmt.Sprintf(topic, digest, subnet) topic = fmt.Sprintf(topic, digest, subnet)
@@ -326,7 +326,7 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
msg := testutil.HydrateAttestation(&eth.Attestation{AggregationBits: bitfield.NewBitlist(7)}) msg := testutil.HydrateAttestation(&eth.Attestation{AggregationBits: bitfield.NewBitlist(7)})
topic := AttestationSubnetTopicFormat topic := AttestationSubnetTopicFormat
GossipTypeMapping[reflect.TypeOf(msg)] = topic GossipTypeMapping[reflect.TypeOf(msg)] = topic
digest, err := p.forkDigest() digest, err := p.currentForkDigest()
require.NoError(t, err) require.NoError(t, err)
topic = fmt.Sprintf(topic, digest, subnet) topic = fmt.Sprintf(topic, digest, subnet)
@@ -365,3 +365,66 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
t.Error("Failed to receive pubsub within 4s") t.Error("Failed to receive pubsub within 4s")
} }
} }
func TestService_BroadcastSyncCommittee(t *testing.T) {
p1 := p2ptest.NewTestP2P(t)
p2 := p2ptest.NewTestP2P(t)
p1.Connect(p2)
if len(p1.BHost.Network().Peers()) == 0 {
t.Fatal("No peers")
}
p := &Service{
host: p1.BHost,
pubsub: p1.PubSub(),
joinedTopics: map[string]*pubsub.Topic{},
cfg: &Config{},
genesisTime: time.Now(),
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
subnetsLock: make(map[uint64]*sync.RWMutex),
subnetsLockLock: sync.Mutex{},
peers: peers.NewStatus(context.Background(), &peers.StatusConfig{
ScorerParams: &scorers.Config{},
}),
}
msg := testutil.HydrateSyncCommittee(&pb.SyncCommitteeMessage{})
subnet := uint64(5)
topic := SyncCommitteeSubnetTopicFormat
GossipTypeMapping[reflect.TypeOf(msg)] = topic
digest, err := p.currentForkDigest()
require.NoError(t, err)
topic = fmt.Sprintf(topic, digest, subnet)
// External peer subscribes to the topic.
topic += p.Encoding().ProtocolSuffix()
sub, err := p2.SubscribeToTopic(topic)
require.NoError(t, err)
time.Sleep(50 * time.Millisecond) // libp2p fails without this delay...
// Async listen for the pubsub, must be before the broadcast.
var wg sync.WaitGroup
wg.Add(1)
go func(tt *testing.T) {
defer wg.Done()
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
defer cancel()
incomingMessage, err := sub.Next(ctx)
require.NoError(t, err)
result := &pb.SyncCommitteeMessage{}
require.NoError(t, p.Encoding().DecodeGossip(incomingMessage.Data, result))
if !proto.Equal(result, msg) {
tt.Errorf("Did not receive expected message, got %+v, wanted %+v", result, msg)
}
}(t)
// Broadcast to peers and wait.
require.NoError(t, p.BroadcastSyncCommitteeMessage(context.Background(), subnet, msg))
if testutil.WaitTimeout(&wg, 1*time.Second) {
t.Error("Failed to receive pubsub within 1s")
}
}

View File

@@ -16,6 +16,9 @@ import (
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/prysmaticlabs/go-bitfield" "github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/beacon-chain/cache" "github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/version"
) )
// Listener defines the discovery V5 network interface that is used // Listener defines the discovery V5 network interface that is used
@@ -36,7 +39,7 @@ type Listener interface {
// to be dynamically discoverable by others given our tracked committee ids. // to be dynamically discoverable by others given our tracked committee ids.
func (s *Service) RefreshENR() { func (s *Service) RefreshENR() {
// return early if discv5 isnt running // return early if discv5 isnt running
if s.dv5Listener == nil { if s.dv5Listener == nil || !s.isInitialized() {
return return
} }
bitV := bitfield.NewBitvector64() bitV := bitfield.NewBitvector64()
@@ -44,16 +47,43 @@ func (s *Service) RefreshENR() {
for _, idx := range committees { for _, idx := range committees {
bitV.SetBitAt(idx, true) bitV.SetBitAt(idx, true)
} }
currentBitV, err := bitvector(s.dv5Listener.Self().Record()) currentBitV, err := attBitvector(s.dv5Listener.Self().Record())
if err != nil { if err != nil {
log.Errorf("Could not retrieve bitfield: %v", err) log.Errorf("Could not retrieve att bitfield: %v", err)
return return
} }
if bytes.Equal(bitV, currentBitV) { // Compare current epoch with our fork epochs
// return early if bitfield hasn't changed currEpoch := helpers.SlotToEpoch(helpers.CurrentSlot(uint64(s.genesisTime.Unix())))
return altairForkEpoch := params.BeaconConfig().AltairForkEpoch
switch {
// Altair Behaviour
case currEpoch >= altairForkEpoch:
// Retrieve sync subnets from application level
// cache.
bitS := bitfield.Bitvector4{byte(0x00)}
committees = cache.SyncSubnetIDs.GetAllSubnets(currEpoch)
for _, idx := range committees {
bitS.SetBitAt(idx, true)
}
currentBitS, err := syncBitvector(s.dv5Listener.Self().Record())
if err != nil {
log.Errorf("Could not retrieve sync bitfield: %v", err)
return
}
if bytes.Equal(bitV, currentBitV) && bytes.Equal(bitS, currentBitS) &&
s.Metadata().Version() == version.Altair {
// return early if bitfields haven't changed
return
}
s.updateSubnetRecordWithMetadataV2(bitV, bitS)
default:
// Phase 0 behaviour.
if bytes.Equal(bitV, currentBitV) {
// return early if bitfield hasn't changed
return
}
s.updateSubnetRecordWithMetadata(bitV)
} }
s.updateSubnetRecordWithMetadata(bitV)
// ping all peers to inform them of new metadata // ping all peers to inform them of new metadata
s.pingPeers() s.pingPeers()
} }
@@ -206,7 +236,8 @@ func (s *Service) createLocalNode(
if err != nil { if err != nil {
return nil, errors.Wrap(err, "could not add eth2 fork version entry to enr") return nil, errors.Wrap(err, "could not add eth2 fork version entry to enr")
} }
return intializeAttSubnets(localNode), nil localNode = initializeAttSubnets(localNode)
return initializeSyncCommSubnets(localNode), nil
} }
func (s *Service) startDiscoveryV5( func (s *Service) startDiscoveryV5(

View File

@@ -22,6 +22,7 @@ import (
"github.com/libp2p/go-libp2p-core/peer" "github.com/libp2p/go-libp2p-core/peer"
"github.com/prysmaticlabs/go-bitfield" "github.com/prysmaticlabs/go-bitfield"
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing" mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed" "github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state" statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers" "github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers"
@@ -33,8 +34,10 @@ import (
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper" "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/shared/bytesutil" "github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/iputils" "github.com/prysmaticlabs/prysm/shared/iputils"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil/assert" "github.com/prysmaticlabs/prysm/shared/testutil/assert"
"github.com/prysmaticlabs/prysm/shared/testutil/require" "github.com/prysmaticlabs/prysm/shared/testutil/require"
"github.com/prysmaticlabs/prysm/shared/version"
logTest "github.com/sirupsen/logrus/hooks/test" logTest "github.com/sirupsen/logrus/hooks/test"
) )
@@ -340,3 +343,163 @@ func addPeer(t *testing.T, p *peers.Status, state peerdata.PeerConnectionState)
})) }))
return id return id
} }
func TestRefreshENR_ForkBoundaries(t *testing.T) {
params.SetupTestConfigCleanup(t)
// Clean up caches after usage.
defer cache.SubnetIDs.EmptyAllCaches()
defer cache.SubnetIDs.EmptyAllCaches()
tests := []struct {
name string
svcBuilder func(t *testing.T) *Service
postValidation func(t *testing.T, s *Service)
}{
{
name: "metadata no change",
svcBuilder: func(t *testing.T) *Service {
port := 2000
ipAddr, pkey := createAddrAndPrivKey(t)
s := &Service{
genesisTime: time.Now(),
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
cfg: &Config{UDPPort: uint(port)},
}
listener, err := s.createListener(ipAddr, pkey)
assert.NoError(t, err)
s.dv5Listener = listener
s.metaData = wrapper.WrappedMetadataV0(new(pb.MetaDataV0))
s.updateSubnetRecordWithMetadata([]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00})
return s
},
postValidation: func(t *testing.T, s *Service) {
assert.DeepEqual(t, bitfield.NewBitvector64(), s.metaData.AttnetsBitfield())
},
},
{
name: "metadata updated",
svcBuilder: func(t *testing.T) *Service {
port := 2000
ipAddr, pkey := createAddrAndPrivKey(t)
s := &Service{
genesisTime: time.Now(),
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
cfg: &Config{UDPPort: uint(port)},
}
listener, err := s.createListener(ipAddr, pkey)
assert.NoError(t, err)
s.dv5Listener = listener
s.metaData = wrapper.WrappedMetadataV0(new(pb.MetaDataV0))
s.updateSubnetRecordWithMetadata([]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01})
cache.SubnetIDs.AddPersistentCommittee([]byte{'A'}, []uint64{1, 2, 3, 23}, 0)
return s
},
postValidation: func(t *testing.T, s *Service) {
assert.DeepEqual(t, bitfield.Bitvector64{0xe, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0}, s.metaData.AttnetsBitfield())
},
},
{
name: "metadata updated at fork epoch",
svcBuilder: func(t *testing.T) *Service {
port := 2000
ipAddr, pkey := createAddrAndPrivKey(t)
s := &Service{
genesisTime: time.Now().Add(-5 * oneEpochDuration()),
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
cfg: &Config{UDPPort: uint(port)},
}
listener, err := s.createListener(ipAddr, pkey)
assert.NoError(t, err)
// Update params
cfg := params.BeaconConfig()
cfg.AltairForkEpoch = 5
params.OverrideBeaconConfig(cfg)
params.BeaconConfig().InitializeForkSchedule()
s.dv5Listener = listener
s.metaData = wrapper.WrappedMetadataV0(new(pb.MetaDataV0))
s.updateSubnetRecordWithMetadata([]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01})
cache.SubnetIDs.AddPersistentCommittee([]byte{'A'}, []uint64{1, 2, 3, 23}, 0)
return s
},
postValidation: func(t *testing.T, s *Service) {
assert.Equal(t, version.Altair, s.metaData.Version())
assert.DeepEqual(t, bitfield.Bitvector4{0x00}, s.metaData.MetadataObjV1().Syncnets)
assert.DeepEqual(t, bitfield.Bitvector64{0xe, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0}, s.metaData.AttnetsBitfield())
},
},
{
name: "metadata updated at fork epoch with no bitfield",
svcBuilder: func(t *testing.T) *Service {
port := 2000
ipAddr, pkey := createAddrAndPrivKey(t)
s := &Service{
genesisTime: time.Now().Add(-5 * oneEpochDuration()),
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
cfg: &Config{UDPPort: uint(port)},
}
listener, err := s.createListener(ipAddr, pkey)
assert.NoError(t, err)
// Update params
cfg := params.BeaconConfig()
cfg.AltairForkEpoch = 5
params.OverrideBeaconConfig(cfg)
params.BeaconConfig().InitializeForkSchedule()
s.dv5Listener = listener
s.metaData = wrapper.WrappedMetadataV0(new(pb.MetaDataV0))
s.updateSubnetRecordWithMetadata([]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00})
return s
},
postValidation: func(t *testing.T, s *Service) {
assert.Equal(t, version.Altair, s.metaData.Version())
assert.DeepEqual(t, bitfield.Bitvector4{0x00}, s.metaData.MetadataObjV1().Syncnets)
assert.DeepEqual(t, bitfield.Bitvector64{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, s.metaData.AttnetsBitfield())
},
},
{
name: "metadata updated past fork epoch with bitfields",
svcBuilder: func(t *testing.T) *Service {
port := 2000
ipAddr, pkey := createAddrAndPrivKey(t)
s := &Service{
genesisTime: time.Now().Add(-6 * oneEpochDuration()),
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
cfg: &Config{UDPPort: uint(port)},
}
listener, err := s.createListener(ipAddr, pkey)
assert.NoError(t, err)
// Update params
cfg := params.BeaconConfig()
cfg.AltairForkEpoch = 5
params.OverrideBeaconConfig(cfg)
params.BeaconConfig().InitializeForkSchedule()
s.dv5Listener = listener
s.metaData = wrapper.WrappedMetadataV0(new(pb.MetaDataV0))
s.updateSubnetRecordWithMetadata([]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00})
cache.SubnetIDs.AddPersistentCommittee([]byte{'A'}, []uint64{1, 2, 3, 23}, 0)
cache.SyncSubnetIDs.AddSyncCommitteeSubnets([]byte{'A'}, 0, []uint64{0, 1}, 0)
return s
},
postValidation: func(t *testing.T, s *Service) {
assert.Equal(t, version.Altair, s.metaData.Version())
assert.DeepEqual(t, bitfield.Bitvector4{0x03}, s.metaData.MetadataObjV1().Syncnets)
assert.DeepEqual(t, bitfield.Bitvector64{0xe, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0}, s.metaData.AttnetsBitfield())
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := tt.svcBuilder(t)
s.RefreshENR()
tt.postValidation(t, s)
s.dv5Listener.Close()
cache.SubnetIDs.EmptyAllCaches()
cache.SyncSubnetIDs.EmptyAllCaches()
})
}
}

View File

@@ -2,20 +2,22 @@ package encoder
import ( import (
"io" "io"
ssz "github.com/ferranbt/fastssz"
) )
// NetworkEncoding represents an encoder compatible with Ethereum consensus p2p. // NetworkEncoding represents an encoder compatible with Ethereum consensus p2p.
type NetworkEncoding interface { type NetworkEncoding interface {
// DecodeGossip to the provided gossip message. The interface must be a pointer to the decoding destination. // DecodeGossip to the provided gossip message. The interface must be a pointer to the decoding destination.
DecodeGossip([]byte, interface{}) error DecodeGossip([]byte, ssz.Unmarshaler) error
// DecodeWithMaxLength a bytes from a reader with a varint length prefix. The interface must be a pointer to the // DecodeWithMaxLength a bytes from a reader with a varint length prefix. The interface must be a pointer to the
// decoding destination. The length of the message should not be more than the provided limit. // decoding destination. The length of the message should not be more than the provided limit.
DecodeWithMaxLength(io.Reader, interface{}) error DecodeWithMaxLength(io.Reader, ssz.Unmarshaler) error
// EncodeGossip an arbitrary gossip message to the provided writer. The interface must be a pointer object to encode. // EncodeGossip an arbitrary gossip message to the provided writer. The interface must be a pointer object to encode.
EncodeGossip(io.Writer, interface{}) (int, error) EncodeGossip(io.Writer, ssz.Marshaler) (int, error)
// EncodeWithMaxLength an arbitrary message to the provided writer with a varint length prefix. The interface must be // EncodeWithMaxLength an arbitrary message to the provided writer with a varint length prefix. The interface must be
// a pointer object to encode. The encoded message should not be bigger than the provided limit. // a pointer object to encode. The encoded message should not be bigger than the provided limit.
EncodeWithMaxLength(io.Writer, interface{}) (int, error) EncodeWithMaxLength(io.Writer, ssz.Marshaler) (int, error)
// ProtocolSuffix returns the last part of the protocol ID to indicate the encoding scheme. // ProtocolSuffix returns the last part of the protocol ID to indicate the encoding scheme.
ProtocolSuffix() string ProtocolSuffix() string
} }

View File

@@ -33,15 +33,12 @@ type SszNetworkEncoder struct{}
// ProtocolSuffixSSZSnappy is the last part of the topic string to identify the encoding protocol. // ProtocolSuffixSSZSnappy is the last part of the topic string to identify the encoding protocol.
const ProtocolSuffixSSZSnappy = "ssz_snappy" const ProtocolSuffixSSZSnappy = "ssz_snappy"
func (e SszNetworkEncoder) doEncode(msg interface{}) ([]byte, error) { func (e SszNetworkEncoder) doEncode(msg fastssz.Marshaler) ([]byte, error) {
if v, ok := msg.(fastssz.Marshaler); ok { return msg.MarshalSSZ()
return v.MarshalSSZ()
}
return nil, errors.Errorf("non-supported type: %T", msg)
} }
// EncodeGossip the proto gossip message to the io.Writer. // EncodeGossip the proto gossip message to the io.Writer.
func (e SszNetworkEncoder) EncodeGossip(w io.Writer, msg interface{}) (int, error) { func (e SszNetworkEncoder) EncodeGossip(w io.Writer, msg fastssz.Marshaler) (int, error) {
if msg == nil { if msg == nil {
return 0, nil return 0, nil
} }
@@ -58,7 +55,7 @@ func (e SszNetworkEncoder) EncodeGossip(w io.Writer, msg interface{}) (int, erro
// EncodeWithMaxLength the proto message to the io.Writer. This encoding prefixes the byte slice with a protobuf varint // EncodeWithMaxLength the proto message to the io.Writer. This encoding prefixes the byte slice with a protobuf varint
// to indicate the size of the message. This checks that the encoded message isn't larger than the provided max limit. // to indicate the size of the message. This checks that the encoded message isn't larger than the provided max limit.
func (e SszNetworkEncoder) EncodeWithMaxLength(w io.Writer, msg interface{}) (int, error) { func (e SszNetworkEncoder) EncodeWithMaxLength(w io.Writer, msg fastssz.Marshaler) (int, error) {
if msg == nil { if msg == nil {
return 0, nil return 0, nil
} }
@@ -81,15 +78,12 @@ func (e SszNetworkEncoder) EncodeWithMaxLength(w io.Writer, msg interface{}) (in
return writeSnappyBuffer(w, b) return writeSnappyBuffer(w, b)
} }
func (e SszNetworkEncoder) doDecode(b []byte, to interface{}) error { func (e SszNetworkEncoder) doDecode(b []byte, to fastssz.Unmarshaler) error {
if v, ok := to.(fastssz.Unmarshaler); ok { return to.UnmarshalSSZ(b)
return v.UnmarshalSSZ(b)
}
return errors.Errorf("non-supported type: %T", to)
} }
// DecodeGossip decodes the bytes to the protobuf gossip message provided. // DecodeGossip decodes the bytes to the protobuf gossip message provided.
func (e SszNetworkEncoder) DecodeGossip(b []byte, to interface{}) error { func (e SszNetworkEncoder) DecodeGossip(b []byte, to fastssz.Unmarshaler) error {
b, err := DecodeSnappy(b, MaxGossipSize) b, err := DecodeSnappy(b, MaxGossipSize)
if err != nil { if err != nil {
return err return err
@@ -115,7 +109,7 @@ func DecodeSnappy(msg []byte, maxSize uint64) ([]byte, error) {
// DecodeWithMaxLength the bytes from io.Reader to the protobuf message provided. // DecodeWithMaxLength the bytes from io.Reader to the protobuf message provided.
// This checks that the decoded message isn't larger than the provided max limit. // This checks that the decoded message isn't larger than the provided max limit.
func (e SszNetworkEncoder) DecodeWithMaxLength(r io.Reader, to interface{}) error { func (e SszNetworkEncoder) DecodeWithMaxLength(r io.Reader, to fastssz.Unmarshaler) error {
msgLen, err := readVarint(r) msgLen, err := readVarint(r)
if err != nil { if err != nil {
return err return err

View File

@@ -10,7 +10,7 @@ import (
gogo "github.com/gogo/protobuf/proto" gogo "github.com/gogo/protobuf/proto"
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/encoder" "github.com/prysmaticlabs/prysm/beacon-chain/p2p/encoder"
statepb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/params" "github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil" "github.com/prysmaticlabs/prysm/shared/testutil"
"github.com/prysmaticlabs/prysm/shared/testutil/assert" "github.com/prysmaticlabs/prysm/shared/testutil/assert"
@@ -26,7 +26,7 @@ func TestSszNetworkEncoder_RoundTrip(t *testing.T) {
func TestSszNetworkEncoder_FailsSnappyLength(t *testing.T) { func TestSszNetworkEncoder_FailsSnappyLength(t *testing.T) {
e := &encoder.SszNetworkEncoder{} e := &encoder.SszNetworkEncoder{}
att := &statepb.Fork{} att := &ethpb.Fork{}
data := make([]byte, 32) data := make([]byte, 32)
binary.PutUvarint(data, encoder.MaxGossipSize+32) binary.PutUvarint(data, encoder.MaxGossipSize+32)
err := e.DecodeGossip(data, att) err := e.DecodeGossip(data, att)
@@ -35,14 +35,14 @@ func TestSszNetworkEncoder_FailsSnappyLength(t *testing.T) {
func testRoundTripWithLength(t *testing.T, e *encoder.SszNetworkEncoder) { func testRoundTripWithLength(t *testing.T, e *encoder.SszNetworkEncoder) {
buf := new(bytes.Buffer) buf := new(bytes.Buffer)
msg := &statepb.Fork{ msg := &ethpb.Fork{
PreviousVersion: []byte("fooo"), PreviousVersion: []byte("fooo"),
CurrentVersion: []byte("barr"), CurrentVersion: []byte("barr"),
Epoch: 9001, Epoch: 9001,
} }
_, err := e.EncodeWithMaxLength(buf, msg) _, err := e.EncodeWithMaxLength(buf, msg)
require.NoError(t, err) require.NoError(t, err)
decoded := &statepb.Fork{} decoded := &ethpb.Fork{}
require.NoError(t, e.DecodeWithMaxLength(buf, decoded)) require.NoError(t, e.DecodeWithMaxLength(buf, decoded))
if !proto.Equal(decoded, msg) { if !proto.Equal(decoded, msg) {
t.Logf("decoded=%+v\n", decoded) t.Logf("decoded=%+v\n", decoded)
@@ -52,14 +52,14 @@ func testRoundTripWithLength(t *testing.T, e *encoder.SszNetworkEncoder) {
func testRoundTripWithGossip(t *testing.T, e *encoder.SszNetworkEncoder) { func testRoundTripWithGossip(t *testing.T, e *encoder.SszNetworkEncoder) {
buf := new(bytes.Buffer) buf := new(bytes.Buffer)
msg := &statepb.Fork{ msg := &ethpb.Fork{
PreviousVersion: []byte("fooo"), PreviousVersion: []byte("fooo"),
CurrentVersion: []byte("barr"), CurrentVersion: []byte("barr"),
Epoch: 9001, Epoch: 9001,
} }
_, err := e.EncodeGossip(buf, msg) _, err := e.EncodeGossip(buf, msg)
require.NoError(t, err) require.NoError(t, err)
decoded := &statepb.Fork{} decoded := &ethpb.Fork{}
require.NoError(t, e.DecodeGossip(buf.Bytes(), decoded)) require.NoError(t, e.DecodeGossip(buf.Bytes(), decoded))
if !proto.Equal(decoded, msg) { if !proto.Equal(decoded, msg) {
t.Logf("decoded=%+v\n", decoded) t.Logf("decoded=%+v\n", decoded)
@@ -69,7 +69,7 @@ func testRoundTripWithGossip(t *testing.T, e *encoder.SszNetworkEncoder) {
func TestSszNetworkEncoder_EncodeWithMaxLength(t *testing.T) { func TestSszNetworkEncoder_EncodeWithMaxLength(t *testing.T) {
buf := new(bytes.Buffer) buf := new(bytes.Buffer)
msg := &statepb.Fork{ msg := &ethpb.Fork{
PreviousVersion: []byte("fooo"), PreviousVersion: []byte("fooo"),
CurrentVersion: []byte("barr"), CurrentVersion: []byte("barr"),
Epoch: 9001, Epoch: 9001,
@@ -86,7 +86,7 @@ func TestSszNetworkEncoder_EncodeWithMaxLength(t *testing.T) {
func TestSszNetworkEncoder_DecodeWithMaxLength(t *testing.T) { func TestSszNetworkEncoder_DecodeWithMaxLength(t *testing.T) {
buf := new(bytes.Buffer) buf := new(bytes.Buffer)
msg := &statepb.Fork{ msg := &ethpb.Fork{
PreviousVersion: []byte("fooo"), PreviousVersion: []byte("fooo"),
CurrentVersion: []byte("barr"), CurrentVersion: []byte("barr"),
Epoch: 4242, Epoch: 4242,
@@ -99,7 +99,7 @@ func TestSszNetworkEncoder_DecodeWithMaxLength(t *testing.T) {
params.OverrideBeaconNetworkConfig(c) params.OverrideBeaconNetworkConfig(c)
_, err := e.EncodeGossip(buf, msg) _, err := e.EncodeGossip(buf, msg)
require.NoError(t, err) require.NoError(t, err)
decoded := &statepb.Fork{} decoded := &ethpb.Fork{}
err = e.DecodeWithMaxLength(buf, decoded) err = e.DecodeWithMaxLength(buf, decoded)
wanted := fmt.Sprintf("goes over the provided max limit of %d", maxChunkSize) wanted := fmt.Sprintf("goes over the provided max limit of %d", maxChunkSize)
assert.ErrorContains(t, wanted, err) assert.ErrorContains(t, wanted, err)
@@ -115,13 +115,13 @@ func TestSszNetworkEncoder_DecodeWithMultipleFrames(t *testing.T) {
maxChunkSize := uint64(1 << 22) maxChunkSize := uint64(1 << 22)
c.MaxChunkSize = maxChunkSize c.MaxChunkSize = maxChunkSize
params.OverrideBeaconNetworkConfig(c) params.OverrideBeaconNetworkConfig(c)
_, err := e.EncodeWithMaxLength(buf, st.InnerStateUnsafe()) _, err := e.EncodeWithMaxLength(buf, st.InnerStateUnsafe().(*ethpb.BeaconState))
require.NoError(t, err) require.NoError(t, err)
// Max snappy block size // Max snappy block size
if buf.Len() <= 76490 { if buf.Len() <= 76490 {
t.Errorf("buffer smaller than expected, wanted > %d but got %d", 76490, buf.Len()) t.Errorf("buffer smaller than expected, wanted > %d but got %d", 76490, buf.Len())
} }
decoded := new(statepb.BeaconState) decoded := new(ethpb.BeaconState)
err = e.DecodeWithMaxLength(buf, decoded) err = e.DecodeWithMaxLength(buf, decoded)
assert.NoError(t, err) assert.NoError(t, err)
} }
@@ -144,7 +144,7 @@ func TestSszNetworkEncoder_MaxInt64(t *testing.T) {
func TestSszNetworkEncoder_DecodeWithBadSnappyStream(t *testing.T) { func TestSszNetworkEncoder_DecodeWithBadSnappyStream(t *testing.T) {
st := newBadSnappyStream() st := newBadSnappyStream()
e := &encoder.SszNetworkEncoder{} e := &encoder.SszNetworkEncoder{}
decoded := new(statepb.Fork) decoded := new(ethpb.Fork)
err := e.DecodeWithMaxLength(st, decoded) err := e.DecodeWithMaxLength(st, decoded)
assert.ErrorContains(t, io.EOF.Error(), err) assert.ErrorContains(t, io.EOF.Error(), err)
} }

View File

@@ -3,11 +3,11 @@ package p2p
import ( import (
"bytes" "bytes"
"fmt" "fmt"
"math"
"time" "time"
"github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/enr" "github.com/ethereum/go-ethereum/p2p/enr"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
pb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" pb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/p2putils" "github.com/prysmaticlabs/prysm/shared/p2putils"
@@ -20,16 +20,12 @@ import (
var eth2ENRKey = params.BeaconNetworkConfig().ETH2Key var eth2ENRKey = params.BeaconNetworkConfig().ETH2Key
// ForkDigest returns the current fork digest of // ForkDigest returns the current fork digest of
// the node. // the node according to the local clock.
func (s *Service) forkDigest() ([4]byte, error) { func (s *Service) currentForkDigest() ([4]byte, error) {
if s.currentForkDigest != [4]byte{} { if !s.isInitialized() {
return s.currentForkDigest, nil return [4]byte{}, errors.New("state is not initialized")
} }
fd, err := p2putils.CreateForkDigest(s.genesisTime, s.genesisValidatorsRoot) return p2putils.CreateForkDigest(s.genesisTime, s.genesisValidatorsRoot)
if err != nil {
s.currentForkDigest = fd
}
return fd, err
} }
// Compares fork ENRs between an incoming peer's record and our node's // Compares fork ENRs between an incoming peer's record and our node's
@@ -97,20 +93,10 @@ func addForkEntry(
if timeutils.Now().Before(genesisTime) { if timeutils.Now().Before(genesisTime) {
currentEpoch = 0 currentEpoch = 0
} }
fork, err := p2putils.Fork(currentEpoch) nextForkVersion, nextForkEpoch := p2putils.NextForkData(currentEpoch)
if err != nil {
return nil, err
}
nextForkEpoch := params.BeaconConfig().NextForkEpoch
nextForkVersion := params.BeaconConfig().NextForkVersion
// Set to the current fork version if our next fork is not planned.
if nextForkEpoch == math.MaxUint64 {
nextForkVersion = fork.CurrentVersion
}
enrForkID := &pb.ENRForkID{ enrForkID := &pb.ENRForkID{
CurrentForkDigest: digest[:], CurrentForkDigest: digest[:],
NextForkVersion: nextForkVersion, NextForkVersion: nextForkVersion[:],
NextForkEpoch: nextForkEpoch, NextForkEpoch: nextForkEpoch,
} }
enc, err := enrForkID.MarshalSSZ() enc, err := enrForkID.MarshalSSZ()

View File

@@ -144,7 +144,7 @@ func TestStartDiscv5_SameForkDigests_DifferentNextForkData(t *testing.T) {
c := params.BeaconConfig() c := params.BeaconConfig()
nextForkEpoch := types.Epoch(i) nextForkEpoch := types.Epoch(i)
c.NextForkEpoch = nextForkEpoch c.ForkVersionSchedule[[4]byte{'A', 'B', 'C', 'D'}] = nextForkEpoch
params.OverrideBeaconConfig(c) params.OverrideBeaconConfig(c)
// We give every peer a different genesis validators root, which // We give every peer a different genesis validators root, which
@@ -209,14 +209,12 @@ func TestStartDiscv5_SameForkDigests_DifferentNextForkData(t *testing.T) {
func TestDiscv5_AddRetrieveForkEntryENR(t *testing.T) { func TestDiscv5_AddRetrieveForkEntryENR(t *testing.T) {
params.SetupTestConfigCleanup(t) params.SetupTestConfigCleanup(t)
c := params.BeaconConfig() c := params.BeaconConfig()
c.ForkVersionSchedule = map[types.Epoch][]byte{ c.ForkVersionSchedule = map[[4]byte]types.Epoch{
0: params.BeaconConfig().GenesisForkVersion, bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion): 0,
1: {0, 0, 0, 1}, {0, 0, 0, 1}: 1,
} }
nextForkEpoch := types.Epoch(1) nextForkEpoch := types.Epoch(1)
nextForkVersion := []byte{0, 0, 0, 1} nextForkVersion := []byte{0, 0, 0, 1}
c.NextForkEpoch = nextForkEpoch
c.NextForkVersion = nextForkVersion
params.OverrideBeaconConfig(c) params.OverrideBeaconConfig(c)
genesisTime := time.Now() genesisTime := time.Now()
@@ -255,6 +253,7 @@ func TestDiscv5_AddRetrieveForkEntryENR(t *testing.T) {
} }
func TestAddForkEntry_Genesis(t *testing.T) { func TestAddForkEntry_Genesis(t *testing.T) {
params.SetupTestConfigCleanup(t)
temp := t.TempDir() temp := t.TempDir()
randNum := rand.Int() randNum := rand.Int()
tempPath := path.Join(temp, strconv.Itoa(randNum)) tempPath := path.Join(temp, strconv.Itoa(randNum))
@@ -264,6 +263,11 @@ func TestAddForkEntry_Genesis(t *testing.T) {
db, err := enode.OpenDB("") db, err := enode.OpenDB("")
require.NoError(t, err) require.NoError(t, err)
bCfg := params.BeaconConfig()
bCfg.ForkVersionSchedule = map[[4]byte]types.Epoch{}
bCfg.ForkVersionSchedule[bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)] = bCfg.GenesisEpoch
params.OverrideBeaconConfig(bCfg)
localNode := enode.NewLocalNode(db, pkey) localNode := enode.NewLocalNode(db, pkey)
localNode, err = addForkEntry(localNode, time.Now().Add(10*time.Second), bytesutil.PadTo([]byte{'A', 'B', 'C', 'D'}, 32)) localNode, err = addForkEntry(localNode, time.Now().Add(10*time.Second), bytesutil.PadTo([]byte{'A', 'B', 'C', 'D'}, 32))
require.NoError(t, err) require.NoError(t, err)

View File

@@ -0,0 +1,30 @@
package p2p
import (
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/slotutil"
)
// A background routine which listens for new and upcoming forks and
// updates the node's discovery service to reflect any new fork version
// changes.
func (s *Service) forkWatcher() {
slotTicker := slotutil.NewSlotTicker(s.genesisTime, params.BeaconConfig().SecondsPerSlot)
for {
select {
case currSlot := <-slotTicker.C():
currEpoch := helpers.SlotToEpoch(currSlot)
if currEpoch == params.BeaconConfig().AltairForkEpoch {
_, err := addForkEntry(s.dv5Listener.LocalNode(), s.genesisTime, s.genesisValidatorsRoot)
if err != nil {
log.WithError(err).Error("Could not add fork entry")
}
}
case <-s.ctx.Done():
log.Debug("Context closed, exiting goroutine")
slotTicker.Done()
return
}
}
}

View File

@@ -21,9 +21,15 @@ const (
// aggregateWeight specifies the scoring weight that we apply to // aggregateWeight specifies the scoring weight that we apply to
// our aggregate topic. // our aggregate topic.
aggregateWeight = 0.5 aggregateWeight = 0.5
// syncContributionWeight specifies the scoring weight that we apply to
// our sync contribution topic.
syncContributionWeight = 0.2
// attestationTotalWeight specifies the scoring weight that we apply to // attestationTotalWeight specifies the scoring weight that we apply to
// our attestation subnet topic. // our attestation subnet topic.
attestationTotalWeight = 1 attestationTotalWeight = 1
// syncCommitteesTotalWeight specifies the scoring weight that we apply to
// our sync subnet topic.
syncCommitteesTotalWeight = 0.4
// attesterSlashingWeight specifies the scoring weight that we apply to // attesterSlashingWeight specifies the scoring weight that we apply to
// our attester slashing topic. // our attester slashing topic.
attesterSlashingWeight = 0.05 attesterSlashingWeight = 0.05
@@ -92,6 +98,10 @@ func (s *Service) topicScoreParams(topic string) (*pubsub.TopicScoreParams, erro
return defaultAggregateTopicParams(activeValidators) return defaultAggregateTopicParams(activeValidators)
case strings.Contains(topic, "beacon_attestation"): case strings.Contains(topic, "beacon_attestation"):
return defaultAggregateSubnetTopicParams(activeValidators) return defaultAggregateSubnetTopicParams(activeValidators)
case strings.Contains(topic, GossipSyncCommitteeMessage):
return defaultSyncSubnetTopicParams(activeValidators)
case strings.Contains(topic, "sync_committee_contribution_and_proof"):
return defaultSyncContributionTopicParams()
case strings.Contains(topic, "voluntary_exit"): case strings.Contains(topic, "voluntary_exit"):
return defaultVoluntaryExitTopicParams(), nil return defaultVoluntaryExitTopicParams(), nil
case strings.Contains(topic, "proposer_slashing"): case strings.Contains(topic, "proposer_slashing"):
@@ -215,6 +225,48 @@ func defaultAggregateTopicParams(activeValidators uint64) (*pubsub.TopicScorePar
}, nil }, nil
} }
func defaultSyncContributionTopicParams() (*pubsub.TopicScoreParams, error) {
// Determine the expected message rate for the particular gossip topic.
aggPerSlot := params.BeaconConfig().SyncCommitteeSubnetCount * params.BeaconConfig().TargetAggregatorsPerSyncSubcommittee
firstMessageCap, err := decayLimit(scoreDecay(1*oneEpochDuration()), float64(aggPerSlot*2/gossipSubD))
if err != nil {
log.Warnf("skipping initializing topic scoring: %v", err)
return nil, nil
}
firstMessageWeight := maxFirstDeliveryScore / firstMessageCap
meshThreshold, err := decayThreshold(scoreDecay(1*oneEpochDuration()), float64(aggPerSlot)/dampeningFactor)
if err != nil {
log.Warnf("skipping initializing topic scoring: %v", err)
return nil, nil
}
meshWeight := -scoreByWeight(syncContributionWeight, meshThreshold)
meshCap := 4 * meshThreshold
if !meshDeliveryIsScored {
// Set the mesh weight as zero as a temporary measure, so as to prevent
// the average nodes from being penalised.
meshWeight = 0
}
return &pubsub.TopicScoreParams{
TopicWeight: syncContributionWeight,
TimeInMeshWeight: maxInMeshScore / inMeshCap(),
TimeInMeshQuantum: inMeshTime(),
TimeInMeshCap: inMeshCap(),
FirstMessageDeliveriesWeight: firstMessageWeight,
FirstMessageDeliveriesDecay: scoreDecay(1 * oneEpochDuration()),
FirstMessageDeliveriesCap: firstMessageCap,
MeshMessageDeliveriesWeight: meshWeight,
MeshMessageDeliveriesDecay: scoreDecay(1 * oneEpochDuration()),
MeshMessageDeliveriesCap: meshCap,
MeshMessageDeliveriesThreshold: meshThreshold,
MeshMessageDeliveriesWindow: 2 * time.Second,
MeshMessageDeliveriesActivation: 1 * oneEpochDuration(),
MeshFailurePenaltyWeight: meshWeight,
MeshFailurePenaltyDecay: scoreDecay(1 * oneEpochDuration()),
InvalidMessageDeliveriesWeight: -maxScore() / syncContributionWeight,
InvalidMessageDeliveriesDecay: scoreDecay(50 * oneEpochDuration()),
}, nil
}
func defaultAggregateSubnetTopicParams(activeValidators uint64) (*pubsub.TopicScoreParams, error) { func defaultAggregateSubnetTopicParams(activeValidators uint64) (*pubsub.TopicScoreParams, error) {
subnetCount := params.BeaconNetworkConfig().AttestationSubnetCount subnetCount := params.BeaconNetworkConfig().AttestationSubnetCount
// Get weight for each specific subnet. // Get weight for each specific subnet.
@@ -238,8 +290,13 @@ func defaultAggregateSubnetTopicParams(activeValidators uint64) (*pubsub.TopicSc
firstDecay = 4 firstDecay = 4
meshDecay = 16 meshDecay = 16
} }
rate := numPerSlot * 2 / gossipSubD
if rate == 0 {
log.Warn("rate is 0, skipping initializing topic scoring")
return nil, nil
}
// Determine expected first deliveries based on the message rate. // Determine expected first deliveries based on the message rate.
firstMessageCap, err := decayLimit(scoreDecay(firstDecay*oneEpochDuration()), float64(numPerSlot*2/gossipSubD)) firstMessageCap, err := decayLimit(scoreDecay(firstDecay*oneEpochDuration()), float64(rate))
if err != nil { if err != nil {
log.Warnf("skipping initializing topic scoring: %v", err) log.Warnf("skipping initializing topic scoring: %v", err)
return nil, nil return nil, nil
@@ -279,6 +336,69 @@ func defaultAggregateSubnetTopicParams(activeValidators uint64) (*pubsub.TopicSc
}, nil }, nil
} }
func defaultSyncSubnetTopicParams(activeValidators uint64) (*pubsub.TopicScoreParams, error) {
subnetCount := params.BeaconConfig().SyncCommitteeSubnetCount
// Get weight for each specific subnet.
topicWeight := syncCommitteesTotalWeight / float64(subnetCount)
syncComSize := params.BeaconConfig().SyncCommitteeSize
// Set the max as the sync committee size
if activeValidators > syncComSize {
activeValidators = syncComSize
}
subnetWeight := activeValidators / subnetCount
if subnetWeight == 0 {
log.Warn("Subnet weight is 0, skipping initializing topic scoring")
return nil, nil
}
firstDecay := time.Duration(1)
meshDecay := time.Duration(4)
rate := subnetWeight * 2 / gossipSubD
if rate == 0 {
log.Warn("rate is 0, skipping initializing topic scoring")
return nil, nil
}
// Determine expected first deliveries based on the message rate.
firstMessageCap, err := decayLimit(scoreDecay(firstDecay*oneEpochDuration()), float64(rate))
if err != nil {
log.Warnf("skipping initializing topic scoring: %v", err)
return nil, nil
}
firstMessageWeight := maxFirstDeliveryScore / firstMessageCap
// Determine expected mesh deliveries based on message rate applied with a dampening factor.
meshThreshold, err := decayThreshold(scoreDecay(meshDecay*oneEpochDuration()), float64(subnetWeight)/dampeningFactor)
if err != nil {
log.Warnf("skipping initializing topic scoring: %v", err)
return nil, nil
}
meshWeight := -scoreByWeight(topicWeight, meshThreshold)
meshCap := 4 * meshThreshold
if !meshDeliveryIsScored {
// Set the mesh weight as zero as a temporary measure, so as to prevent
// the average nodes from being penalised.
meshWeight = 0
}
return &pubsub.TopicScoreParams{
TopicWeight: topicWeight,
TimeInMeshWeight: maxInMeshScore / inMeshCap(),
TimeInMeshQuantum: inMeshTime(),
TimeInMeshCap: inMeshCap(),
FirstMessageDeliveriesWeight: firstMessageWeight,
FirstMessageDeliveriesDecay: scoreDecay(firstDecay * oneEpochDuration()),
FirstMessageDeliveriesCap: firstMessageCap,
MeshMessageDeliveriesWeight: meshWeight,
MeshMessageDeliveriesDecay: scoreDecay(meshDecay * oneEpochDuration()),
MeshMessageDeliveriesCap: meshCap,
MeshMessageDeliveriesThreshold: meshThreshold,
MeshMessageDeliveriesWindow: 2 * time.Second,
MeshMessageDeliveriesActivation: 1 * oneEpochDuration(),
MeshFailurePenaltyWeight: meshWeight,
MeshFailurePenaltyDecay: scoreDecay(meshDecay * oneEpochDuration()),
InvalidMessageDeliveriesWeight: -maxScore() / topicWeight,
InvalidMessageDeliveriesDecay: scoreDecay(50 * oneEpochDuration()),
}, nil
}
func defaultAttesterSlashingTopicParams() *pubsub.TopicScoreParams { func defaultAttesterSlashingTopicParams() *pubsub.TopicScoreParams {
return &pubsub.TopicScoreParams{ return &pubsub.TopicScoreParams{
TopicWeight: attesterSlashingWeight, TopicWeight: attesterSlashingWeight,
@@ -401,8 +521,9 @@ func scoreByWeight(weight, threshold float64) float64 {
// maxScore attainable by a peer. // maxScore attainable by a peer.
func maxScore() float64 { func maxScore() float64 {
totalWeight := beaconBlockWeight + aggregateWeight + attestationTotalWeight + totalWeight := beaconBlockWeight + aggregateWeight + +syncContributionWeight +
attesterSlashingWeight + proposerSlashingWeight + voluntaryExitWeight attestationTotalWeight + syncCommitteesTotalWeight + attesterSlashingWeight +
proposerSlashingWeight + voluntaryExitWeight
return (maxInMeshScore + maxFirstDeliveryScore) * totalWeight return (maxInMeshScore + maxFirstDeliveryScore) * totalWeight
} }

View File

@@ -3,27 +3,53 @@ package p2p
import ( import (
"reflect" "reflect"
types "github.com/prysmaticlabs/eth2-types"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
pb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" pb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/params"
"google.golang.org/protobuf/proto" "google.golang.org/protobuf/proto"
) )
// GossipTopicMappings represent the protocol ID to protobuf message type map for easy // gossipTopicMappings represent the protocol ID to protobuf message type map for easy
// lookup. // lookup.
var GossipTopicMappings = map[string]proto.Message{ var gossipTopicMappings = map[string]proto.Message{
BlockSubnetTopicFormat: &pb.SignedBeaconBlock{}, BlockSubnetTopicFormat: &pb.SignedBeaconBlock{},
AttestationSubnetTopicFormat: &pb.Attestation{}, AttestationSubnetTopicFormat: &pb.Attestation{},
ExitSubnetTopicFormat: &pb.SignedVoluntaryExit{}, ExitSubnetTopicFormat: &pb.SignedVoluntaryExit{},
ProposerSlashingSubnetTopicFormat: &pb.ProposerSlashing{}, ProposerSlashingSubnetTopicFormat: &pb.ProposerSlashing{},
AttesterSlashingSubnetTopicFormat: &pb.AttesterSlashing{}, AttesterSlashingSubnetTopicFormat: &pb.AttesterSlashing{},
AggregateAndProofSubnetTopicFormat: &pb.SignedAggregateAttestationAndProof{}, AggregateAndProofSubnetTopicFormat: &pb.SignedAggregateAttestationAndProof{},
SyncContributionAndProofSubnetTopicFormat: &ethpb.SignedContributionAndProof{},
SyncCommitteeSubnetTopicFormat: &ethpb.SyncCommitteeMessage{},
}
// GossipTopicMappings is a function to return the assigned data type
// versioned by epoch.
func GossipTopicMappings(topic string, epoch types.Epoch) proto.Message {
if topic == BlockSubnetTopicFormat && epoch >= params.BeaconConfig().AltairForkEpoch {
return &ethpb.SignedBeaconBlockAltair{}
}
return gossipTopicMappings[topic]
}
// AllTopics returns all topics stored in our
// gossip mapping.
func AllTopics() []string {
topics := []string{}
for k := range gossipTopicMappings {
topics = append(topics, k)
}
return topics
} }
// GossipTypeMapping is the inverse of GossipTopicMappings so that an arbitrary protobuf message // GossipTypeMapping is the inverse of GossipTopicMappings so that an arbitrary protobuf message
// can be mapped to a protocol ID string. // can be mapped to a protocol ID string.
var GossipTypeMapping = make(map[reflect.Type]string, len(GossipTopicMappings)) var GossipTypeMapping = make(map[reflect.Type]string, len(gossipTopicMappings))
func init() { func init() {
for k, v := range GossipTopicMappings { for k, v := range gossipTopicMappings {
GossipTypeMapping[reflect.TypeOf(v)] = k GossipTypeMapping[reflect.TypeOf(v)] = k
} }
// Specially handle Altair Objects.
GossipTypeMapping[reflect.TypeOf(&ethpb.SignedBeaconBlockAltair{})] = BlockSubnetTopicFormat
} }

View File

@@ -3,14 +3,39 @@ package p2p
import ( import (
"reflect" "reflect"
"testing" "testing"
eth2types "github.com/prysmaticlabs/eth2-types"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
) )
func TestMappingHasNoDuplicates(t *testing.T) { func TestMappingHasNoDuplicates(t *testing.T) {
m := make(map[reflect.Type]bool) m := make(map[reflect.Type]bool)
for _, v := range GossipTopicMappings { for _, v := range gossipTopicMappings {
if _, ok := m[reflect.TypeOf(v)]; ok { if _, ok := m[reflect.TypeOf(v)]; ok {
t.Errorf("%T is duplicated in the topic mapping", v) t.Errorf("%T is duplicated in the topic mapping", v)
} }
m[reflect.TypeOf(v)] = true m[reflect.TypeOf(v)] = true
} }
} }
func TestGossipTopicMappings_CorrectBlockType(t *testing.T) {
params.SetupTestConfigCleanup(t)
bCfg := params.BeaconConfig()
forkEpoch := eth2types.Epoch(100)
bCfg.AltairForkEpoch = forkEpoch
bCfg.ForkVersionSchedule[bytesutil.ToBytes4(bCfg.AltairForkVersion)] = eth2types.Epoch(100)
params.OverrideBeaconConfig(bCfg)
// Before Fork
pMessage := GossipTopicMappings(BlockSubnetTopicFormat, 0)
_, ok := pMessage.(*ethpb.SignedBeaconBlock)
assert.Equal(t, true, ok)
// After Fork
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, forkEpoch)
_, ok = pMessage.(*ethpb.SignedBeaconBlockAltair)
assert.Equal(t, true, ok)
}

View File

@@ -35,6 +35,7 @@ type P2P interface {
type Broadcaster interface { type Broadcaster interface {
Broadcast(context.Context, proto.Message) error Broadcast(context.Context, proto.Message) error
BroadcastAttestation(ctx context.Context, subnet uint64, att *ethpb.Attestation) error BroadcastAttestation(ctx context.Context, subnet uint64, att *ethpb.Attestation) error
BroadcastSyncCommitteeMessage(ctx context.Context, subnet uint64, sMsg *ethpb.SyncCommitteeMessage) error
} }
// SetStreamHandler configures p2p to handle streams of a certain topic ID. // SetStreamHandler configures p2p to handle streams of a certain topic ID.

View File

@@ -25,6 +25,16 @@ var (
Name: "p2p_attestation_subnet_attempted_broadcasts", Name: "p2p_attestation_subnet_attempted_broadcasts",
Help: "The number of attestations that were attempted to be broadcast.", Help: "The number of attestations that were attempted to be broadcast.",
}) })
savedSyncCommitteeBroadcasts = promauto.NewCounter(prometheus.CounterOpts{
Name: "p2p_sync_committee_subnet_recovered_broadcasts",
Help: "The number of sync committee messages that were attempted to be broadcast with no peers on " +
"the subnet. The beacon node increments this counter when the broadcast is blocked " +
"until a subnet peer can be found.",
})
syncCommitteeBroadcastAttempts = promauto.NewCounter(prometheus.CounterOpts{
Name: "p2p_sync_committee_subnet_attempted_broadcasts",
Help: "The number of sync committee that were attempted to be broadcast.",
})
) )
func (s *Service) updateMetrics() { func (s *Service) updateMetrics() {

View File

@@ -235,7 +235,7 @@ func (p *Status) SetMetadata(pid peer.ID, metaData metadata.Metadata) {
defer p.store.Unlock() defer p.store.Unlock()
peerData := p.store.PeerDataGetOrCreate(pid) peerData := p.store.PeerDataGetOrCreate(pid)
peerData.MetaData = metaData peerData.MetaData = metaData.Copy()
} }
// Metadata returns a copy of the metadata corresponding to the provided // Metadata returns a copy of the metadata corresponding to the provided

View File

@@ -2,16 +2,21 @@ package p2p
import ( import (
"context" "context"
"encoding/hex"
"strings"
"time" "time"
"github.com/libp2p/go-libp2p-core/peer" "github.com/libp2p/go-libp2p-core/peer"
pubsub "github.com/libp2p/go-libp2p-pubsub" pubsub "github.com/libp2p/go-libp2p-pubsub"
pubsub_pb "github.com/libp2p/go-libp2p-pubsub/pb" pubsub_pb "github.com/libp2p/go-libp2p-pubsub/pb"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/encoder" "github.com/prysmaticlabs/prysm/beacon-chain/p2p/encoder"
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags" "github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
pbrpc "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" pbrpc "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/featureconfig" "github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/hashutil" "github.com/prysmaticlabs/prysm/shared/hashutil"
"github.com/prysmaticlabs/prysm/shared/p2putils"
"github.com/prysmaticlabs/prysm/shared/params" "github.com/prysmaticlabs/prysm/shared/params"
) )
@@ -36,6 +41,11 @@ const (
randomSubD = 6 // random gossip target randomSubD = 6 // random gossip target
) )
var errInvalidTopic = errors.New("invalid topic format")
// Specifies the fixed size context length.
const digestLength = 4
// JoinTopic will join PubSub topic, if not already joined. // JoinTopic will join PubSub topic, if not already joined.
func (s *Service) JoinTopic(topic string, opts ...pubsub.TopicOpt) (*pubsub.Topic, error) { func (s *Service) JoinTopic(topic string, opts ...pubsub.TopicOpt) (*pubsub.Topic, error) {
s.joinedTopicsLock.Lock() s.joinedTopicsLock.Lock()
@@ -132,7 +142,26 @@ func (s *Service) peerInspector(peerMap map[peer.ID]*pubsub.PeerScoreSnapshot) {
// Otherwise, set `message-id` to the first 20 bytes of the `SHA256` hash of // Otherwise, set `message-id` to the first 20 bytes of the `SHA256` hash of
// the concatenation of `MESSAGE_DOMAIN_INVALID_SNAPPY` with the raw message data, // the concatenation of `MESSAGE_DOMAIN_INVALID_SNAPPY` with the raw message data,
// i.e. `SHA256(MESSAGE_DOMAIN_INVALID_SNAPPY + message.data)[:20]`. // i.e. `SHA256(MESSAGE_DOMAIN_INVALID_SNAPPY + message.data)[:20]`.
func msgIDFunction(pmsg *pubsub_pb.Message) string { func (s *Service) msgIDFunction(pmsg *pubsub_pb.Message) string {
digest, err := ExtractGossipDigest(*pmsg.Topic)
if err != nil {
// Impossible condition that should
// never be hit.
msg := make([]byte, 20)
copy(msg, "invalid")
return string(msg)
}
_, fEpoch, err := p2putils.RetrieveForkDataFromDigest(digest, s.genesisValidatorsRoot)
if err != nil {
// Impossible condition that should
// never be hit.
msg := make([]byte, 20)
copy(msg, "invalid")
return string(msg)
}
if fEpoch >= params.BeaconConfig().AltairForkEpoch {
return s.altairMsgID(pmsg)
}
decodedData, err := encoder.DecodeSnappy(pmsg.Data, params.BeaconNetworkConfig().GossipMaxSize) decodedData, err := encoder.DecodeSnappy(pmsg.Data, params.BeaconNetworkConfig().GossipMaxSize)
if err != nil { if err != nil {
combinedData := append(params.BeaconNetworkConfig().MessageDomainInvalidSnappy[:], pmsg.Data...) combinedData := append(params.BeaconNetworkConfig().MessageDomainInvalidSnappy[:], pmsg.Data...)
@@ -144,6 +173,43 @@ func msgIDFunction(pmsg *pubsub_pb.Message) string {
return string(h[:20]) return string(h[:20])
} }
// Spec:
// The derivation of the message-id has changed starting with Altair to incorporate the message topic along with the message data.
// These are fields of the Message Protobuf, and interpreted as empty byte strings if missing. The message-id MUST be the following
// 20 byte value computed from the message:
//
// If message.data has a valid snappy decompression, set message-id to the first 20 bytes of the SHA256 hash of the concatenation of
// the following data: MESSAGE_DOMAIN_VALID_SNAPPY, the length of the topic byte string (encoded as little-endian uint64), the topic
// byte string, and the snappy decompressed message data: i.e. SHA256(MESSAGE_DOMAIN_VALID_SNAPPY + uint_to_bytes(uint64(len(message.topic)))
// + message.topic + snappy_decompress(message.data))[:20]. Otherwise, set message-id to the first 20 bytes of the SHA256 hash of the concatenation
// of the following data: MESSAGE_DOMAIN_INVALID_SNAPPY, the length of the topic byte string (encoded as little-endian uint64),
// the topic byte string, and the raw message data: i.e. SHA256(MESSAGE_DOMAIN_INVALID_SNAPPY + uint_to_bytes(uint64(len(message.topic))) + message.topic + message.data)[:20].
func (s *Service) altairMsgID(pmsg *pubsub_pb.Message) string {
topic := *pmsg.Topic
topicLen := uint64(len(topic))
topicLenBytes := bytesutil.Uint64ToBytesLittleEndian(topicLen)
decodedData, err := encoder.DecodeSnappy(pmsg.Data, params.BeaconNetworkConfig().GossipMaxSize)
if err != nil {
totalLength := len(params.BeaconNetworkConfig().MessageDomainInvalidSnappy) + len(topicLenBytes) + int(topicLen) + len(pmsg.Data)
combinedData := make([]byte, 0, totalLength)
combinedData = append(combinedData, params.BeaconNetworkConfig().MessageDomainInvalidSnappy[:]...)
combinedData = append(combinedData, topicLenBytes...)
combinedData = append(combinedData, topic...)
combinedData = append(combinedData, pmsg.Data...)
h := hashutil.Hash(combinedData)
return string(h[:20])
}
totalLength := len(params.BeaconNetworkConfig().MessageDomainValidSnappy) + len(topicLenBytes) + int(topicLen) + len(decodedData)
combinedData := make([]byte, 0, totalLength)
combinedData = append(combinedData, params.BeaconNetworkConfig().MessageDomainValidSnappy[:]...)
combinedData = append(combinedData, topicLenBytes...)
combinedData = append(combinedData, topic...)
combinedData = append(combinedData, decodedData...)
h := hashutil.Hash(combinedData)
return string(h[:20])
}
// creates a custom gossipsub parameter set. // creates a custom gossipsub parameter set.
func pubsubGossipParam() pubsub.GossipSubParams { func pubsubGossipParam() pubsub.GossipSubParams {
gParams := pubsub.DefaultGossipSubParams() gParams := pubsub.DefaultGossipSubParams()
@@ -184,3 +250,27 @@ func convertTopicScores(topicMap map[string]*pubsub.TopicScoreSnapshot) map[stri
} }
return newMap return newMap
} }
// Extracts the relevant fork digest from the gossip topic.
func ExtractGossipDigest(topic string) ([4]byte, error) {
splitParts := strings.Split(topic, "/")
parts := []string{}
for _, p := range splitParts {
if p == "" {
continue
}
parts = append(parts, p)
}
if len(parts) < 2 {
return [4]byte{}, errors.Wrapf(errInvalidTopic, "it only has %d parts: %v", len(parts), parts)
}
strDigest := parts[1]
digest, err := hex.DecodeString(strDigest)
if err != nil {
return [4]byte{}, err
}
if len(digest) != digestLength {
return [4]byte{}, errors.Errorf("invalid digest length wanted %d but got %d", digestLength, len(digest))
}
return bytesutil.ToBytes4(digest), nil
}

View File

@@ -8,11 +8,18 @@ import (
pubsub "github.com/libp2p/go-libp2p-pubsub" pubsub "github.com/libp2p/go-libp2p-pubsub"
pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb" pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb"
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/encoder" "github.com/prysmaticlabs/prysm/beacon-chain/p2p/encoder"
"github.com/prysmaticlabs/prysm/shared/p2putils"
"github.com/prysmaticlabs/prysm/shared/params"
) )
var _ pubsub.SubscriptionFilter = (*Service)(nil) var _ pubsub.SubscriptionFilter = (*Service)(nil)
const pubsubSubscriptionRequestLimit = 100 // It is set at this limit to handle the possibility
// of double topic subscriptions at fork boundaries.
// -> 64 Attestation Subnets * 2.
// -> 4 Sync Committee Subnets * 2.
// -> Block,Aggregate,ProposerSlashing,AttesterSlashing,Exits,SyncContribution * 2.
const pubsubSubscriptionRequestLimit = 200
// CanSubscribe returns true if the topic is of interest and we could subscribe to it. // CanSubscribe returns true if the topic is of interest and we could subscribe to it.
func (s *Service) CanSubscribe(topic string) bool { func (s *Service) CanSubscribe(topic string) bool {
@@ -30,12 +37,17 @@ func (s *Service) CanSubscribe(topic string) bool {
if parts[1] != "eth2" { if parts[1] != "eth2" {
return false return false
} }
fd, err := s.forkDigest() fd, err := s.currentForkDigest()
if err != nil { if err != nil {
log.WithError(err).Error("Could not determine fork digest") log.WithError(err).Error("Could not determine fork digest")
return false return false
} }
if parts[2] != fmt.Sprintf("%x", fd) { digest, err := p2putils.ForkDigestFromEpoch(params.BeaconConfig().AltairForkEpoch, s.genesisValidatorsRoot)
if err != nil {
log.WithError(err).Error("Could not determine next fork digest")
return false
}
if parts[2] != fmt.Sprintf("%x", fd) && parts[2] != fmt.Sprintf("%x", digest) {
return false return false
} }
if parts[4] != encoder.ProtocolSuffixSSZSnappy { if parts[4] != encoder.ProtocolSuffixSSZSnappy {
@@ -43,7 +55,7 @@ func (s *Service) CanSubscribe(topic string) bool {
} }
// Check the incoming topic matches any topic mapping. This includes a check for part[3]. // Check the incoming topic matches any topic mapping. This includes a check for part[3].
for gt := range GossipTopicMappings { for gt := range gossipTopicMappings {
if _, err := scanfcheck(strings.Join(parts[0:4], "/"), gt); err == nil { if _, err := scanfcheck(strings.Join(parts[0:4], "/"), gt); err == nil {
return true return true
} }

View File

@@ -14,7 +14,8 @@ import (
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state" statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/encoder" "github.com/prysmaticlabs/prysm/beacon-chain/p2p/encoder"
"github.com/prysmaticlabs/prysm/shared/bytesutil" "github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params" "github.com/prysmaticlabs/prysm/shared/p2putils"
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
"github.com/prysmaticlabs/prysm/shared/timeutils" "github.com/prysmaticlabs/prysm/shared/timeutils"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@@ -22,6 +23,10 @@ import (
func TestService_CanSubscribe(t *testing.T) { func TestService_CanSubscribe(t *testing.T) {
currentFork := [4]byte{0x01, 0x02, 0x03, 0x04} currentFork := [4]byte{0x01, 0x02, 0x03, 0x04}
validProtocolSuffix := "/" + encoder.ProtocolSuffixSSZSnappy validProtocolSuffix := "/" + encoder.ProtocolSuffixSSZSnappy
genesisTime := time.Now()
valRoot := [32]byte{}
digest, err := p2putils.CreateForkDigest(genesisTime, valRoot[:])
assert.NoError(t, err)
type test struct { type test struct {
name string name string
topic string topic string
@@ -30,7 +35,7 @@ func TestService_CanSubscribe(t *testing.T) {
tests := []test{ tests := []test{
{ {
name: "block topic on current fork", name: "block topic on current fork",
topic: fmt.Sprintf(BlockSubnetTopicFormat, currentFork) + validProtocolSuffix, topic: fmt.Sprintf(BlockSubnetTopicFormat, digest) + validProtocolSuffix,
want: true, want: true,
}, },
{ {
@@ -60,17 +65,17 @@ func TestService_CanSubscribe(t *testing.T) {
}, },
{ {
name: "bad prefix", name: "bad prefix",
topic: fmt.Sprintf("/eth3/%x/foobar", currentFork) + validProtocolSuffix, topic: fmt.Sprintf("/eth3/%x/foobar", digest) + validProtocolSuffix,
want: false, want: false,
}, },
{ {
name: "topic not in gossip mapping", name: "topic not in gossip mapping",
topic: fmt.Sprintf("/eth2/%x/foobar", currentFork) + validProtocolSuffix, topic: fmt.Sprintf("/eth2/%x/foobar", digest) + validProtocolSuffix,
want: false, want: false,
}, },
{ {
name: "att subnet topic on current fork", name: "att subnet topic on current fork",
topic: fmt.Sprintf(AttestationSubnetTopicFormat, currentFork, 55 /*subnet*/) + validProtocolSuffix, topic: fmt.Sprintf(AttestationSubnetTopicFormat, digest, 55 /*subnet*/) + validProtocolSuffix,
want: true, want: true,
}, },
{ {
@@ -81,11 +86,11 @@ func TestService_CanSubscribe(t *testing.T) {
} }
// Ensure all gossip topic mappings pass validation. // Ensure all gossip topic mappings pass validation.
for topic := range GossipTopicMappings { for _, topic := range AllTopics() {
formatting := []interface{}{currentFork} formatting := []interface{}{digest}
// Special case for attestation subnets which have a second formatting placeholder. // Special case for attestation subnets which have a second formatting placeholder.
if topic == AttestationSubnetTopicFormat { if topic == AttestationSubnetTopicFormat || topic == SyncCommitteeSubnetTopicFormat {
formatting = append(formatting, 0 /* some subnet ID */) formatting = append(formatting, 0 /* some subnet ID */)
} }
@@ -99,9 +104,8 @@ func TestService_CanSubscribe(t *testing.T) {
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
s := &Service{ s := &Service{
currentForkDigest: currentFork, genesisValidatorsRoot: valRoot[:],
genesisValidatorsRoot: make([]byte, 32), genesisTime: genesisTime,
genesisTime: time.Now(),
} }
if got := s.CanSubscribe(tt.topic); got != tt.want { if got := s.CanSubscribe(tt.topic); got != tt.want {
t.Errorf("CanSubscribe(%s) = %v, want %v", tt.topic, got, tt.want) t.Errorf("CanSubscribe(%s) = %v, want %v", tt.topic, got, tt.want)
@@ -189,7 +193,7 @@ func Test_scanfcheck(t *testing.T) {
func TestGossipTopicMapping_scanfcheck_GossipTopicFormattingSanityCheck(t *testing.T) { func TestGossipTopicMapping_scanfcheck_GossipTopicFormattingSanityCheck(t *testing.T) {
// scanfcheck only supports integer based substitutions at the moment. Any others will // scanfcheck only supports integer based substitutions at the moment. Any others will
// inaccurately fail validation. // inaccurately fail validation.
for topic := range GossipTopicMappings { for _, topic := range AllTopics() {
t.Run(topic, func(t *testing.T) { t.Run(topic, func(t *testing.T) {
for i, c := range topic { for i, c := range topic {
if string(c) == "%" { if string(c) == "%" {
@@ -204,8 +208,11 @@ func TestGossipTopicMapping_scanfcheck_GossipTopicFormattingSanityCheck(t *testi
} }
func TestService_FilterIncomingSubscriptions(t *testing.T) { func TestService_FilterIncomingSubscriptions(t *testing.T) {
currentFork := [4]byte{0x01, 0x02, 0x03, 0x04}
validProtocolSuffix := "/" + encoder.ProtocolSuffixSSZSnappy validProtocolSuffix := "/" + encoder.ProtocolSuffixSSZSnappy
genesisTime := time.Now()
valRoot := [32]byte{}
digest, err := p2putils.CreateForkDigest(genesisTime, valRoot[:])
assert.NoError(t, err)
type args struct { type args struct {
id peer.ID id peer.ID
subs []*pubsubpb.RPC_SubOpts subs []*pubsubpb.RPC_SubOpts
@@ -241,7 +248,7 @@ func TestService_FilterIncomingSubscriptions(t *testing.T) {
return &b return &b
}(), }(),
Topicid: func() *string { Topicid: func() *string {
s := fmt.Sprintf(BlockSubnetTopicFormat, currentFork) + validProtocolSuffix s := fmt.Sprintf(BlockSubnetTopicFormat, digest) + validProtocolSuffix
return &s return &s
}(), }(),
}, },
@@ -255,7 +262,7 @@ func TestService_FilterIncomingSubscriptions(t *testing.T) {
return &b return &b
}(), }(),
Topicid: func() *string { Topicid: func() *string {
s := fmt.Sprintf(BlockSubnetTopicFormat, currentFork) + validProtocolSuffix s := fmt.Sprintf(BlockSubnetTopicFormat, digest) + validProtocolSuffix
return &s return &s
}(), }(),
}, },
@@ -271,7 +278,7 @@ func TestService_FilterIncomingSubscriptions(t *testing.T) {
return &b return &b
}(), }(),
Topicid: func() *string { Topicid: func() *string {
s := fmt.Sprintf(BlockSubnetTopicFormat, currentFork) + validProtocolSuffix s := fmt.Sprintf(BlockSubnetTopicFormat, digest) + validProtocolSuffix
return &s return &s
}(), }(),
}, },
@@ -281,7 +288,7 @@ func TestService_FilterIncomingSubscriptions(t *testing.T) {
return &b return &b
}(), }(),
Topicid: func() *string { Topicid: func() *string {
s := fmt.Sprintf(BlockSubnetTopicFormat, currentFork) + validProtocolSuffix s := fmt.Sprintf(BlockSubnetTopicFormat, digest) + validProtocolSuffix
return &s return &s
}(), }(),
}, },
@@ -295,7 +302,7 @@ func TestService_FilterIncomingSubscriptions(t *testing.T) {
return &b return &b
}(), }(),
Topicid: func() *string { Topicid: func() *string {
s := fmt.Sprintf(BlockSubnetTopicFormat, currentFork) + validProtocolSuffix s := fmt.Sprintf(BlockSubnetTopicFormat, digest) + validProtocolSuffix
return &s return &s
}(), }(),
}, },
@@ -305,9 +312,8 @@ func TestService_FilterIncomingSubscriptions(t *testing.T) {
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
s := &Service{ s := &Service{
currentForkDigest: currentFork, genesisValidatorsRoot: valRoot[:],
genesisValidatorsRoot: make([]byte, 32), genesisTime: genesisTime,
genesisTime: time.Now(),
} }
got, err := s.FilterIncomingSubscriptions(tt.args.id, tt.args.subs) got, err := s.FilterIncomingSubscriptions(tt.args.id, tt.args.subs)
if (err != nil) != tt.wantErr { if (err != nil) != tt.wantErr {
@@ -350,14 +356,4 @@ func TestService_MonitorsStateForkUpdates(t *testing.T) {
time.Sleep(50 * time.Millisecond) time.Sleep(50 * time.Millisecond)
require.True(t, s.isInitialized()) require.True(t, s.isInitialized())
require.NotEmpty(t, s.currentForkDigest)
}
func TestService_doesntSupportForksYet(t *testing.T) {
// Part of phase 1 will include a state transition which updates the state's fork. In phase 0,
// there are no forks or fork schedule planned. As such, we'll work on supporting fork upgrades
// in phase 1 changes.
if len(params.BeaconConfig().ForkVersionSchedule) > 0 {
t.Fatal("pubsub subscription filters do not support fork schedule (yet)")
}
} }

View File

@@ -9,9 +9,12 @@ import (
"github.com/golang/snappy" "github.com/golang/snappy"
pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb" pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb"
"github.com/pkg/errors"
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing" mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/encoder" "github.com/prysmaticlabs/prysm/beacon-chain/p2p/encoder"
testp2p "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing" testp2p "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/hashutil" "github.com/prysmaticlabs/prysm/shared/hashutil"
"github.com/prysmaticlabs/prysm/shared/params" "github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil/assert" "github.com/prysmaticlabs/prysm/shared/testutil/assert"
@@ -58,16 +61,112 @@ func TestService_PublishToTopicConcurrentMapWrite(t *testing.T) {
} }
func TestMessageIDFunction_HashesCorrectly(t *testing.T) { func TestMessageIDFunction_HashesCorrectly(t *testing.T) {
s := &Service{
cfg: &Config{
TCPPort: 0,
UDPPort: 0,
},
genesisTime: time.Now(),
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
}
d, err := s.currentForkDigest()
assert.NoError(t, err)
tpc := fmt.Sprintf(BlockSubnetTopicFormat, d)
invalidSnappy := [32]byte{'J', 'U', 'N', 'K'} invalidSnappy := [32]byte{'J', 'U', 'N', 'K'}
pMsg := &pubsubpb.Message{Data: invalidSnappy[:]} pMsg := &pubsubpb.Message{Data: invalidSnappy[:], Topic: &tpc}
hashedData := hashutil.Hash(append(params.BeaconNetworkConfig().MessageDomainInvalidSnappy[:], pMsg.Data...)) hashedData := hashutil.Hash(append(params.BeaconNetworkConfig().MessageDomainInvalidSnappy[:], pMsg.Data...))
msgID := string(hashedData[:20]) msgID := string(hashedData[:20])
assert.Equal(t, msgID, msgIDFunction(pMsg), "Got incorrect msg id") assert.Equal(t, msgID, s.msgIDFunction(pMsg), "Got incorrect msg id")
validObj := [32]byte{'v', 'a', 'l', 'i', 'd'} validObj := [32]byte{'v', 'a', 'l', 'i', 'd'}
enc := snappy.Encode(nil, validObj[:]) enc := snappy.Encode(nil, validObj[:])
nMsg := &pubsubpb.Message{Data: enc} nMsg := &pubsubpb.Message{Data: enc, Topic: &tpc}
hashedData = hashutil.Hash(append(params.BeaconNetworkConfig().MessageDomainValidSnappy[:], validObj[:]...)) hashedData = hashutil.Hash(append(params.BeaconNetworkConfig().MessageDomainValidSnappy[:], validObj[:]...))
msgID = string(hashedData[:20]) msgID = string(hashedData[:20])
assert.Equal(t, msgID, msgIDFunction(nMsg), "Got incorrect msg id") assert.Equal(t, msgID, s.msgIDFunction(nMsg), "Got incorrect msg id")
}
func TestMessageIDFunction_HashesCorrectlyAltair(t *testing.T) {
s := &Service{
cfg: &Config{
TCPPort: 0,
UDPPort: 0,
},
genesisTime: time.Now(),
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
}
d, err := helpers.ComputeForkDigest(params.BeaconConfig().AltairForkVersion, s.genesisValidatorsRoot)
assert.NoError(t, err)
tpc := fmt.Sprintf(BlockSubnetTopicFormat, d)
topicLen := uint64(len(tpc))
topicLenBytes := bytesutil.Uint64ToBytesLittleEndian(topicLen)
invalidSnappy := [32]byte{'J', 'U', 'N', 'K'}
pMsg := &pubsubpb.Message{Data: invalidSnappy[:], Topic: &tpc}
// Create object to hash
combinedObj := append(params.BeaconNetworkConfig().MessageDomainInvalidSnappy[:], topicLenBytes...)
combinedObj = append(combinedObj, tpc...)
combinedObj = append(combinedObj, pMsg.Data...)
hashedData := hashutil.Hash(combinedObj)
msgID := string(hashedData[:20])
assert.Equal(t, msgID, s.msgIDFunction(pMsg), "Got incorrect msg id")
validObj := [32]byte{'v', 'a', 'l', 'i', 'd'}
enc := snappy.Encode(nil, validObj[:])
nMsg := &pubsubpb.Message{Data: enc, Topic: &tpc}
// Create object to hash
combinedObj = append(params.BeaconNetworkConfig().MessageDomainValidSnappy[:], topicLenBytes...)
combinedObj = append(combinedObj, tpc...)
combinedObj = append(combinedObj, validObj[:]...)
hashedData = hashutil.Hash(combinedObj)
msgID = string(hashedData[:20])
assert.Equal(t, msgID, s.msgIDFunction(nMsg), "Got incorrect msg id")
}
func TestExtractGossipDigest(t *testing.T) {
tests := []struct {
name string
topic string
want [4]byte
wantErr bool
error error
}{
{
name: "too short topic",
topic: "/eth2/",
want: [4]byte{},
wantErr: true,
error: errors.New("invalid topic format"),
},
{
name: "invalid digest in topic",
topic: "/eth2/zzxxyyaa/beacon_block" + "/" + encoder.ProtocolSuffixSSZSnappy,
want: [4]byte{},
wantErr: true,
error: errors.New("encoding/hex: invalid byte"),
},
{
name: "short digest",
topic: fmt.Sprintf(BlockSubnetTopicFormat, []byte{0xb5, 0x30, 0x3f}) + "/" + encoder.ProtocolSuffixSSZSnappy,
want: [4]byte{},
wantErr: true,
error: errors.New("invalid digest length wanted"),
},
{
name: "valid topic",
topic: fmt.Sprintf(BlockSubnetTopicFormat, []byte{0xb5, 0x30, 0x3f, 0x2a}) + "/" + encoder.ProtocolSuffixSSZSnappy,
want: [4]byte{0xb5, 0x30, 0x3f, 0x2a},
wantErr: false,
error: nil,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := ExtractGossipDigest(tt.topic)
assert.Equal(t, err != nil, tt.wantErr)
if tt.wantErr {
assert.ErrorContains(t, tt.error.Error(), err)
}
assert.DeepEqual(t, tt.want, got)
})
}
} }

View File

@@ -7,56 +7,82 @@ import (
types "github.com/prysmaticlabs/eth2-types" types "github.com/prysmaticlabs/eth2-types"
p2ptypes "github.com/prysmaticlabs/prysm/beacon-chain/p2p/types" p2ptypes "github.com/prysmaticlabs/prysm/beacon-chain/p2p/types"
pb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" pb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/params"
) )
// SchemaVersionV1 specifies the schema version for our rpc protocol ID. // SchemaVersionV1 specifies the schema version for our rpc protocol ID.
const SchemaVersionV1 = "/1" const SchemaVersionV1 = "/1"
// SchemaVersionV2 specifies the next schema version for our rpc protocol ID.
const SchemaVersionV2 = "/2"
// Specifies the protocol prefix for all our Req/Resp topics. // Specifies the protocol prefix for all our Req/Resp topics.
const protocolPrefix = "/eth2/beacon_chain/req" const protocolPrefix = "/eth2/beacon_chain/req"
// Specifies the name for the status message topic. // StatusMessageName specifies the name for the status message topic.
const statusMessageName = "/status" const StatusMessageName = "/status"
// Specifies the name for the goodbye message topic. // GoodbyeMessageName specifies the name for the goodbye message topic.
const goodbyeMessageName = "/goodbye" const GoodbyeMessageName = "/goodbye"
// Specifies the name for the beacon blocks by range message topic. // BeaconBlocksByRangeMessageName specifies the name for the beacon blocks by range message topic.
const beaconBlocksByRangeMessageName = "/beacon_blocks_by_range" const BeaconBlocksByRangeMessageName = "/beacon_blocks_by_range"
// Specifies the name for the beacon blocks by root message topic. // BeaconBlocksByRootsMessageName specifies the name for the beacon blocks by root message topic.
const beaconBlocksByRootsMessageName = "/beacon_blocks_by_root" const BeaconBlocksByRootsMessageName = "/beacon_blocks_by_root"
// Specifies the name for the ping message topic. // PingMessageName Specifies the name for the ping message topic.
const pingMessageName = "/ping" const PingMessageName = "/ping"
// Specifies the name for the metadata message topic. // MetadataMessageName specifies the name for the metadata message topic.
const metadataMessageName = "/metadata" const MetadataMessageName = "/metadata"
const ( const (
// V1 RPC Topics // V1 RPC Topics
// RPCStatusTopicV1 defines the v1 topic for the status rpc method. // RPCStatusTopicV1 defines the v1 topic for the status rpc method.
RPCStatusTopicV1 = protocolPrefix + statusMessageName + SchemaVersionV1 RPCStatusTopicV1 = protocolPrefix + StatusMessageName + SchemaVersionV1
// RPCGoodByeTopicV1 defines the v1 topic for the goodbye rpc method. // RPCGoodByeTopicV1 defines the v1 topic for the goodbye rpc method.
RPCGoodByeTopicV1 = protocolPrefix + goodbyeMessageName + SchemaVersionV1 RPCGoodByeTopicV1 = protocolPrefix + GoodbyeMessageName + SchemaVersionV1
// RPCBlocksByRangeTopicV1 defines v1 the topic for the blocks by range rpc method. // RPCBlocksByRangeTopicV1 defines v1 the topic for the blocks by range rpc method.
RPCBlocksByRangeTopicV1 = protocolPrefix + beaconBlocksByRangeMessageName + SchemaVersionV1 RPCBlocksByRangeTopicV1 = protocolPrefix + BeaconBlocksByRangeMessageName + SchemaVersionV1
// RPCBlocksByRootTopicV1 defines the v1 topic for the blocks by root rpc method. // RPCBlocksByRootTopicV1 defines the v1 topic for the blocks by root rpc method.
RPCBlocksByRootTopicV1 = protocolPrefix + beaconBlocksByRootsMessageName + SchemaVersionV1 RPCBlocksByRootTopicV1 = protocolPrefix + BeaconBlocksByRootsMessageName + SchemaVersionV1
// RPCPingTopicV1 defines the v1 topic for the ping rpc method. // RPCPingTopicV1 defines the v1 topic for the ping rpc method.
RPCPingTopicV1 = protocolPrefix + pingMessageName + SchemaVersionV1 RPCPingTopicV1 = protocolPrefix + PingMessageName + SchemaVersionV1
// RPCMetaDataTopicV1 defines the v1 topic for the metadata rpc method. // RPCMetaDataTopicV1 defines the v1 topic for the metadata rpc method.
RPCMetaDataTopicV1 = protocolPrefix + metadataMessageName + SchemaVersionV1 RPCMetaDataTopicV1 = protocolPrefix + MetadataMessageName + SchemaVersionV1
// V2 RPC Topics
// RPCBlocksByRangeTopicV2 defines v2 the topic for the blocks by range rpc method.
RPCBlocksByRangeTopicV2 = protocolPrefix + BeaconBlocksByRangeMessageName + SchemaVersionV2
// RPCBlocksByRootTopicV2 defines the v2 topic for the blocks by root rpc method.
RPCBlocksByRootTopicV2 = protocolPrefix + BeaconBlocksByRootsMessageName + SchemaVersionV2
// RPCMetaDataTopicV2 defines the v2 topic for the metadata rpc method.
RPCMetaDataTopicV2 = protocolPrefix + MetadataMessageName + SchemaVersionV2
)
// RPC errors for topic parsing.
const (
invalidRPCMessageType = "provided message type doesn't have a registered mapping"
) )
// RPCTopicMappings map the base message type to the rpc request. // RPCTopicMappings map the base message type to the rpc request.
var RPCTopicMappings = map[string]interface{}{ var RPCTopicMappings = map[string]interface{}{
RPCStatusTopicV1: new(pb.Status), // RPC Status Message
RPCGoodByeTopicV1: new(types.SSZUint64), RPCStatusTopicV1: new(pb.Status),
// RPC Goodbye Message
RPCGoodByeTopicV1: new(types.SSZUint64),
// RPC Block By Range Message
RPCBlocksByRangeTopicV1: new(pb.BeaconBlocksByRangeRequest), RPCBlocksByRangeTopicV1: new(pb.BeaconBlocksByRangeRequest),
RPCBlocksByRootTopicV1: new(p2ptypes.BeaconBlockByRootsReq), RPCBlocksByRangeTopicV2: new(pb.BeaconBlocksByRangeRequest),
RPCPingTopicV1: new(types.SSZUint64), // RPC Block By Root Message
RPCMetaDataTopicV1: new(interface{}), RPCBlocksByRootTopicV1: new(p2ptypes.BeaconBlockByRootsReq),
RPCBlocksByRootTopicV2: new(p2ptypes.BeaconBlockByRootsReq),
// RPC Ping Message
RPCPingTopicV1: new(types.SSZUint64),
// RPC Metadata Message
RPCMetaDataTopicV1: new(interface{}),
RPCMetaDataTopicV2: new(interface{}),
} }
// Maps all registered protocol prefixes. // Maps all registered protocol prefixes.
@@ -67,16 +93,24 @@ var protocolMapping = map[string]bool{
// Maps all the protocol message names for the different rpc // Maps all the protocol message names for the different rpc
// topics. // topics.
var messageMapping = map[string]bool{ var messageMapping = map[string]bool{
statusMessageName: true, StatusMessageName: true,
goodbyeMessageName: true, GoodbyeMessageName: true,
beaconBlocksByRangeMessageName: true, BeaconBlocksByRangeMessageName: true,
beaconBlocksByRootsMessageName: true, BeaconBlocksByRootsMessageName: true,
pingMessageName: true, PingMessageName: true,
metadataMessageName: true, MetadataMessageName: true,
}
// Maps all the RPC messages which are to updated in altair.
var altairMapping = map[string]bool{
BeaconBlocksByRangeMessageName: true,
BeaconBlocksByRootsMessageName: true,
MetadataMessageName: true,
} }
var versionMapping = map[string]bool{ var versionMapping = map[string]bool{
SchemaVersionV1: true, SchemaVersionV1: true,
SchemaVersionV2: true,
} }
// VerifyTopicMapping verifies that the topic and its accompanying // VerifyTopicMapping verifies that the topic and its accompanying
@@ -187,3 +221,17 @@ func (r RPCTopic) Version() string {
} }
return version return version
} }
// TopicFromMessage constructs the rpc topic from the provided message
// type and epoch.
func TopicFromMessage(msg string, epoch types.Epoch) (string, error) {
if !messageMapping[msg] {
return "", errors.Errorf("%s: %s", invalidRPCMessageType, msg)
}
version := SchemaVersionV1
isAltair := epoch >= params.BeaconConfig().AltairForkEpoch
if isAltair && altairMapping[msg] {
version = SchemaVersionV2
}
return protocolPrefix + msg + version, nil
}

View File

@@ -1,10 +1,15 @@
package p2p package p2p
import ( import (
"fmt"
"strings"
"testing" "testing"
eth2types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/types" "github.com/prysmaticlabs/prysm/beacon-chain/p2p/types"
pb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" pb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil/assert" "github.com/prysmaticlabs/prysm/shared/testutil/assert"
"github.com/prysmaticlabs/prysm/shared/testutil/require" "github.com/prysmaticlabs/prysm/shared/testutil/require"
) )
@@ -34,9 +39,9 @@ func TestTopicDeconstructor(t *testing.T) {
}, },
{ {
name: "valid status topic", name: "valid status topic",
topic: protocolPrefix + statusMessageName + SchemaVersionV1, topic: protocolPrefix + StatusMessageName + SchemaVersionV1,
expectedError: "", expectedError: "",
output: []string{protocolPrefix, statusMessageName, SchemaVersionV1}, output: []string{protocolPrefix, StatusMessageName, SchemaVersionV1},
}, },
{ {
name: "malformed status topic", name: "malformed status topic",
@@ -46,13 +51,13 @@ func TestTopicDeconstructor(t *testing.T) {
}, },
{ {
name: "valid beacon block by range topic", name: "valid beacon block by range topic",
topic: protocolPrefix + beaconBlocksByRangeMessageName + SchemaVersionV1 + "/ssz_snappy", topic: protocolPrefix + BeaconBlocksByRangeMessageName + SchemaVersionV1 + "/ssz_snappy",
expectedError: "", expectedError: "",
output: []string{protocolPrefix, beaconBlocksByRangeMessageName, SchemaVersionV1}, output: []string{protocolPrefix, BeaconBlocksByRangeMessageName, SchemaVersionV1},
}, },
{ {
name: "beacon block by range topic with malformed version", name: "beacon block by range topic with malformed version",
topic: protocolPrefix + beaconBlocksByRangeMessageName + "/v" + "/ssz_snappy", topic: protocolPrefix + BeaconBlocksByRangeMessageName + "/v" + "/ssz_snappy",
expectedError: "unable to find a valid schema version for /eth2/beacon_chain/req/beacon_blocks_by_range/v/ssz_snappy", expectedError: "unable to find a valid schema version for /eth2/beacon_chain/req/beacon_blocks_by_range/v/ssz_snappy",
output: []string{""}, output: []string{""},
}, },
@@ -73,3 +78,45 @@ func TestTopicDeconstructor(t *testing.T) {
}) })
} }
} }
func TestTopicFromMessage_CorrectType(t *testing.T) {
params.SetupTestConfigCleanup(t)
bCfg := params.BeaconConfig()
forkEpoch := eth2types.Epoch(100)
bCfg.AltairForkEpoch = forkEpoch
bCfg.ForkVersionSchedule[bytesutil.ToBytes4(bCfg.AltairForkVersion)] = eth2types.Epoch(100)
params.OverrideBeaconConfig(bCfg)
// Garbage Message
badMsg := "wljdjska"
_, err := TopicFromMessage(badMsg, 0)
assert.ErrorContains(t, fmt.Sprintf("%s: %s", invalidRPCMessageType, badMsg), err)
// Before Fork
for m := range messageMapping {
topic, err := TopicFromMessage(m, 0)
assert.NoError(t, err)
assert.Equal(t, true, strings.Contains(topic, SchemaVersionV1))
_, _, version, err := TopicDeconstructor(topic)
assert.NoError(t, err)
assert.Equal(t, SchemaVersionV1, version)
}
// After Fork
for m := range messageMapping {
topic, err := TopicFromMessage(m, forkEpoch)
assert.NoError(t, err)
if altairMapping[m] {
assert.Equal(t, true, strings.Contains(topic, SchemaVersionV2))
_, _, version, err := TopicDeconstructor(topic)
assert.NoError(t, err)
assert.Equal(t, SchemaVersionV2, version)
} else {
assert.Equal(t, true, strings.Contains(topic, SchemaVersionV1))
_, _, version, err := TopicDeconstructor(topic)
assert.NoError(t, err)
assert.Equal(t, SchemaVersionV1, version)
}
}
}

Some files were not shown because too many files have changed in this diff Show More