Compare commits

..

294 Commits

Author SHA1 Message Date
nisdas
765122dc05 fix one more test 2021-08-04 22:27:26 +08:00
nisdas
a283fa58fb fixes 2021-08-04 21:25:23 +08:00
nisdas
12fe2d91ed fix tests 2021-08-04 19:47:35 +08:00
nisdas
d44905329c Merge branch 'hf1' of https://github.com/prysmaticlabs/geth-sharding into interopFixes 2021-08-04 16:22:21 +08:00
Nishant Das
e2238bd6d1 Fix Spec Edge Case For Altair (#9295)
* fix edge case

* Update validator/client/sync_committee.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2021-08-04 16:22:01 +08:00
nisdas
c0310ad534 remove tracer 2021-08-04 15:25:59 +08:00
nisdas
e10ac0af02 Merge branch 'hf1' of https://github.com/prysmaticlabs/geth-sharding into interopFixes 2021-08-04 15:22:11 +08:00
nisdas
6e358da5ed checkpoint fixes from the last few days 2021-08-04 15:19:45 +08:00
terence tsao
df291e2ffb Update BUILD.bazel 2021-07-30 10:49:26 -07:00
terence tsao
5ba5b303d3 Fix spec test 2021-07-30 10:49:16 -07:00
terence tsao
f2ce4dcab3 Fix validator build 2021-07-30 10:19:10 -07:00
terence tsao
8765c3ac42 Add contexts to sync committee functions 2021-07-30 09:53:45 -07:00
terence tsao
57fff2d88e Update BUILD.bazel 2021-07-30 09:31:47 -07:00
terence tsao
c010a972e7 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-07-30 09:31:24 -07:00
terence tsao
c02ed805b0 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-07-29 10:32:53 -07:00
Raul Jordan
93adf4980a Merge branch 'develop' into hf1 2021-07-29 09:26:19 -05:00
nisdas
3fe969992a fix bad merge 2021-07-29 19:25:34 +08:00
nisdas
2135108830 deepsource 2021-07-29 18:35:07 +08:00
nisdas
4c146dc896 gaz 2021-07-29 18:31:56 +08:00
nisdas
042a3cda02 fix again 2021-07-29 18:31:31 +08:00
nisdas
b8676480f0 fix again 2021-07-29 16:56:42 +08:00
nisdas
711022d34e fix p2p 2021-07-29 16:47:25 +08:00
nisdas
eec93be4ed fix 2021-07-29 16:33:20 +08:00
nisdas
21d096622f fix tests 2021-07-29 15:51:17 +08:00
nisdas
62846d61b8 fix spec tests 2021-07-29 15:45:12 +08:00
nisdas
a228a407be fix tests so far 2021-07-29 15:35:11 +08:00
Raul Jordan
f527b676da val tests pass 2021-07-28 22:43:44 -05:00
Raul Jordan
5bd4e10dd6 fix build 2021-07-28 22:04:54 -05:00
Raul Jordan
d19e13352b beacon builds 2021-07-28 21:56:10 -05:00
rauljordan
6bda9a0bf2 fix conflict in proto file 2021-07-28 20:19:46 -05:00
rauljordan
2da6b7bb97 attempt fix merge confs 2021-07-28 20:19:00 -05:00
Raul Jordan
7faed861c4 Merge branch 'develop' into hf1 2021-07-28 11:30:41 -05:00
nisdas
8b9129d84e merge fixes 2021-07-28 23:11:49 +08:00
nisdas
8b219b14da Merge branch 'develop' of https://github.com/prysmaticlabs/geth-sharding into hf1 2021-07-28 20:32:15 +08:00
Raul Jordan
5bf9bd3d73 fix up conflicts with develop 2021-07-27 12:27:24 -05:00
Raul Jordan
59f12c8ac1 sync develop 2021-07-27 12:02:49 -05:00
terence tsao
1094ca0838 Update field_trie.go 2021-07-27 08:52:51 -07:00
terence tsao
ebe4b309c0 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-07-27 08:52:42 -07:00
rauljordan
ea94f0e70d add changes from develop 2021-07-26 21:16:03 -05:00
rauljordan
47443e130d merge slasher proto 2021-07-26 21:13:44 -05:00
rauljordan
4dfa5c2757 rem slashing proto 2021-07-26 21:02:09 -05:00
terence tsao
1851d40f74 Go fmt 2021-07-26 17:31:59 -07:00
terence tsao
eee1d47655 Fix conflicts 2021-07-26 17:14:57 -07:00
terence tsao
7ce76652fb Merge 2021-07-26 16:57:49 -07:00
prestonvanloon
19e6f0c19a Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-07-23 11:46:41 -05:00
Raul Jordan
6470e2718a Merge branch 'develop' into hf1 2021-07-23 11:01:22 -05:00
Raul Jordan
30cd5c076e fix confs 2021-07-23 11:00:47 -05:00
terence tsao
03d8af5cda Core/altair: sync committee tests clean up 2021-07-23 08:27:32 -07:00
Raul Jordan
194f0cb76d merge with develop 2021-07-23 10:04:22 -05:00
terence tsao
2a0e8510d4 Update skip_slot_cache_test.go 2021-07-23 07:32:36 -07:00
Nishant Das
5e35f778b9 unregister rpc topics (#9241) 2021-07-23 13:00:44 +08:00
terence tsao
972ae7f169 Merge branch 'hf1' of github.com:prysmaticlabs/prysm into hf1 2021-07-22 08:12:58 -07:00
terence tsao
80fafaddff Move TestSyncCommitteePeriod 2021-07-22 08:12:35 -07:00
rauljordan
e6ecdfde0d regen protos 2021-07-21 17:38:01 -05:00
rauljordan
1daf51788d merge with develop 2021-07-21 17:34:53 -05:00
terence tsao
35055539a7 Revamp head state + sync committee operations and the bug fix (#9200) 2021-07-21 08:04:18 -07:00
Nishant Das
81ab3ca46c Enable Sync Evaluator For E2E (#9240) 2021-07-21 06:56:26 -07:00
terence tsao
5895b10678 Fix next epoch sync duty calculation (#9233)
* Fix next epoch duty assignment

* Regression test

* Update comment

* Go fmt

* Fix sync committee disable namings

* Fix test comment
2021-07-21 10:08:59 +08:00
prestonvanloon
7ea645ed37 Merge commit '8d1c9fe1e22630d9fd03c4be861a1d33f35b7a11' of github.com:prysmaticlabs/prysm into hf1 2021-07-20 09:46:46 -05:00
prestonvanloon
a900792160 Merge commit '412ddbb29ef73466b880e7ba4101252ac88cf078' of github.com:prysmaticlabs/prysm into hf1 2021-07-20 09:44:45 -05:00
prestonvanloon
cd87bfd8ab Merge commit '8a7010f5aaeb95abd1f7036fcd356817a2b339fe' of github.com:prysmaticlabs/prysm into hf1 2021-07-20 09:38:53 -05:00
terence tsao
98477a0286 Merge commit '1beb0071b5730f0ec0fb1beb9056c06d55ef562b' of github.com:prysmaticlabs/prysm into hf1 2021-07-20 07:26:55 -07:00
prestonvanloon
2d1a63d9f4 Merge commit '2a0c4e0d5fe8e8a422e3bb47a7a692db56ec55c9' of github.com:prysmaticlabs/prysm into hf1 2021-07-20 08:58:58 -05:00
terence tsao
52be270f0a Update TargetAggregatorsPerSyncSubcommittee to 16 (#9229) 2021-07-20 10:56:06 +08:00
prestonvanloon
895a86fd53 Merge commit '15bfcf8ff6f43db6029c5d8eee0b71a7bead86b0' of github.com:prysmaticlabs/prysm into hf1 2021-07-19 21:52:37 -05:00
terence tsao
af6246a5f9 Merge commit '15704053e112b8bb89472c75cba77e67e26e2c49' of github.com:prysmaticlabs/prysm into hf1 2021-07-19 18:29:13 -07:00
terence tsao
5e80ceeff9 Remove unused sync committee feed type 2021-07-19 14:01:00 -07:00
Nishant Das
ee661971f0 Add Fixes Observed From Devnet (#9205)
* add changes and test cases

* gaz

* add one more test

* comment

* deep source

* fix
2021-07-18 12:32:32 +08:00
nisdas
cc7e36776d gaz 2021-07-17 18:34:12 +08:00
nisdas
14a9d9a1ad Merge branch 'develop' of https://github.com/prysmaticlabs/geth-sharding into hf1 2021-07-17 18:33:45 +08:00
terence tsao
2b9fb29ed2 Sync with develop 2021-07-16 15:20:45 -07:00
Nishant Das
9300d1026f Add E2E Support For Altair (#9173) 2021-07-16 08:06:54 -07:00
Nishant Das
48345eb68e Fix Gossip Message ID Versioning (#9206) 2021-07-16 08:03:35 -07:00
Nishant Das
60d14f1806 Allow For Proper Registration and Deregistration of Topics (#9194)
* add fixes

* gaz

* fix

* fix

* Update beacon-chain/sync/fork_watcher.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* preston's review

* add unit test

* gaz

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2021-07-16 10:50:40 +08:00
terence tsao
d80d4d01a6 Update spec test (#9198) 2021-07-15 16:13:59 -07:00
terence tsao
275192680f Fix build 2021-07-15 07:10:19 -07:00
terence tsao
9ca958064e Fix build 2021-07-15 06:06:03 -07:00
Nishant Das
604958da6c Add List Blocks RPC For Altair (#9186)
* add new method in

* fix

* preston's review

* gaz
2021-07-15 13:22:40 +08:00
terence tsao
ade94444f2 Add active balance to committee cache (#9187) 2021-07-14 18:32:17 -07:00
terence tsao
4df2f4c790 Fix signSlotWithSelectionProof signature 2021-07-14 15:56:50 -07:00
terence tsao
3f8f5edb3f Clean up deposits 2021-07-14 13:25:28 -07:00
terence tsao
0ad4e433a5 Sync with develop 2021-07-14 10:08:08 -07:00
terence tsao
1be2503e82 Disallow sync committee cache update if state root is 0s (#9190) 2021-07-14 08:00:23 -07:00
terence tsao
0dd228bb94 Sync committee pool can retrieve without pop (#9179)
* `RetrieveByKey` for priority queue

* Sync committee pool to use `RetrieveByKey`

* Update message_test.go

* remove err

* fix test

Co-authored-by: nisdas <nishdas93@gmail.com>
2021-07-13 16:13:21 +08:00
terence tsao
78450ea557 Remove validator pubkey to index map (#9177) 2021-07-12 13:44:09 -07:00
terence tsao
f0e6d4a0bd Sync committee cache: fill on miss (#9176) 2021-07-12 10:49:49 -07:00
terence tsao
97901c90a5 Update sync committee cache using block root at boundary slot - 1 (#9171) 2021-07-12 08:06:47 -07:00
nisdas
1379dbfc23 Merge branch 'develop' of https://github.com/prysmaticlabs/geth-sharding into hf1 2021-07-12 14:01:52 +08:00
terence tsao
19dbc7e249 Revamp head state + sync committee operations (#9167) 2021-07-09 15:02:03 -07:00
terence tsao
76a70065f2 Add more metrics (#9168) 2021-07-09 08:01:15 -05:00
Nishant Das
51f513b246 Remove Non Spec Config Variables (#9170)
* only one way

* cleanup

* extra

* fix

* fix test
2021-07-09 07:57:26 -05:00
Nishant Das
2b349a1b06 Interop Fixes (#9163) 2021-07-08 20:33:44 -07:00
terence tsao
a819caca16 State gen's reply can upgrade state (#9164) 2021-07-08 15:38:30 -07:00
Nishant Das
e7116d4ea8 Fix Goimports and DeepSource (#9148)
* add goimports

* fix again

* imports

* deep source

* assignment

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2021-07-08 13:31:16 -05:00
prestonvanloon
f8cd989161 Fix build 2021-07-08 10:42:50 -05:00
prestonvanloon
4c19265ac5 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-07-08 10:33:11 -05:00
Nishant Das
f361bf781f Altair Networking Support (#8994)
* add method and test

* fix message type across forks

* invert fork version schedule

* add support for forks in metadata

* start fixing discovery

* fix fork version stuff

* fix up fork digest usage

* add fork transition mechanism

* gaz

* add fixes to mappings

* checking in current progress

* fix aggregates

* fix scheduling

* fix

* fix copy

* fix rpc stuff

* fixes

* add new topics and files

* gaz

* add pool

* finish pipeline for sync aggregate proofs

* gaz

* Update beacon-chain/cache/sync_subnet_ids.go

Co-authored-by: terence tsao <terence@prysmaticlabs.com>

* add support for sync committee pipeline

* fix

* check better

* fix

* fix again

* remove redundant code

* remove older code

* checkpoint

* get it working

* gaz

* regen

* checkpoint

* gaz

* build

* fix edge case

* fix all errors

* sync with hf1

* fix issues

* gaz

* fix bad rpc respones

* fix it all

* hash

* save everything

* fix all remaining tests

* comments

* fix build

* terence's review

* preston's review

* build

* gaz

* gaz

* add signature test

* add in more tests

* gaz

* add more tests

* fix goimports

* Revert "fix goimports"

This reverts commit 41bf7b4a5c.

* fix tests

* fix all tests

* fix all tests

* fix everything

* fix last test

* fix rpc registration

* fix build

* fix build

* remove outdated method

* add recovery

* Update subscriber.go

* terence's review

* fix bad merge

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2021-07-08 11:25:41 +08:00
terence tsao
a458e556e0 Align sync committee pipelines to alpha8 (#9160) 2021-07-07 10:54:48 -07:00
Nishant Das
773b259cd5 add in (#9158)
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2021-07-07 11:03:16 -05:00
terence tsao
2bb3da1ba3 Clean up sync committee p2p pipelines (#9153) 2021-07-07 07:35:01 -07:00
Raul Jordan
47367d98b4 Resolve Deep Source Warnings (#9154) 2021-07-07 06:33:40 -07:00
terence tsao
1ff18c07a4 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-07-06 20:17:17 -07:00
terence tsao
279a95deba Fix spec tests 2021-07-06 13:26:17 -07:00
terence tsao
c0bfa6ef79 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1
# Conflicts:
#	beacon-chain/core/state/BUILD.bazel
#	beacon-chain/core/state/transition.go
#	beacon-chain/core/state/transition_no_verify_sig.go
#	beacon-chain/db/kv/BUILD.bazel
#	beacon-chain/db/kv/blocks.go
#	beacon-chain/db/kv/blocks_test.go
#	beacon-chain/rpc/eth/v1/beacon/BUILD.bazel
#	beacon-chain/rpc/prysm/v1alpha1/beacon/BUILD.bazel
#	beacon-chain/rpc/prysm/v1alpha1/validator/BUILD.bazel
#	beacon-chain/rpc/prysm/v1alpha1/validator/proposer.go
#	beacon-chain/rpc/prysm/v1alpha1/validator/proposer_test.go
#	beacon-chain/rpc/service.go
#	beacon-chain/state/stategen/BUILD.bazel
#	beacon-chain/state/stategen/replay.go
#	beacon-chain/sync/BUILD.bazel
#	beacon-chain/sync/rpc_chunked_response.go
#	proto/eth/v1alpha1/wrapper/beacon_block.go
#	shared/interfaces/BUILD.bazel
#	shared/testutil/BUILD.bazel
#	shared/testutil/block_test.go
2021-07-06 13:15:35 -07:00
terence tsao
7e961c2be9 Merge branch 'hf1' of github.com:prysmaticlabs/prysm into hf1 2021-07-06 11:09:43 -07:00
terence tsao
c7c7f9bf1b Fix missing 3rd arguments for gomock's Do 2021-07-06 11:09:32 -07:00
Nishant Das
7ce85cac31 Fix Stategen For Altair (#9152) 2021-07-06 10:43:36 -07:00
terence tsao
2d836f485d Move test only functions to testutil (#9137) 2021-07-06 12:08:52 -05:00
terence tsao
780253b786 Go fmt 2021-07-05 10:10:08 -07:00
terence tsao
710bb98575 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-07-05 09:38:49 -07:00
Nishant Das
d5387851d0 Validator Fixes From Interop (#9110)
* validator fixes

* gaz

* fix validator

* pull in fixes

* add in test

* terence's review

* fix references

* fix all tests
2021-07-02 13:50:16 +08:00
terence tsao
ab8dd3788f Altair: Green minimal spec tests (#9078) 2021-06-30 19:28:44 -07:00
terence tsao
bf1b550b7d Green Altair e2e tests (#9131) 2021-06-30 16:33:08 -07:00
prestonvanloon
705564108c Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-06-30 14:27:43 -05:00
terence tsao
3df82e7540 Use sync committee start period root as cache key (#9128) 2021-06-30 08:54:31 -07:00
Raul Jordan
d0a749ce4b ensure build 2021-06-30 10:49:54 -05:00
Raul Jordan
081c80998c fix conflicts and merge with develop 2021-06-30 10:44:18 -05:00
terence tsao
8c62f10b74 Altair: proposer pack sync aggregate (#9124)
* Pack sync aggregate into beacon block

* Build.bazel
2021-06-29 15:22:16 -05:00
terence tsao
e232b3ce30 Merge branch 'hf1' of github.com:prysmaticlabs/prysm into hf1 2021-06-29 12:14:37 -07:00
terence tsao
17153bb4e9 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-06-29 10:26:38 -07:00
Raul Jordan
329a45c06a fix confs 2021-06-29 12:24:47 -05:00
Raul Jordan
1c82394a69 sync changes 2021-06-29 12:21:09 -05:00
terence tsao
856081c80c Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-06-29 09:17:56 -07:00
terence tsao
6fff327864 Gazelle 2021-06-29 09:12:21 -07:00
terence tsao
e2879f8352 Resolve conflict 2021-06-29 09:12:14 -07:00
terence tsao
523fe58f61 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-06-29 09:10:07 -07:00
terence tsao
04a303c8d2 Add logging (#9115) 2021-06-29 08:27:41 +08:00
Raul Jordan
0844bd62ea sync changes 2021-06-28 13:47:42 -05:00
Nishant Das
816dc47b17 Add New V2 Streaming Blocks Method (#9094)
* add latest changes in

* gaz

* regen mocks

* build file

* gazelle

* import

* import

* add iface check

* add one more case

* add one more case

* add proper type

* fix wierd imports

* fix mocks

* go imports

* fix build

* fix go mocks

* pass in non nil objects

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2021-06-28 16:06:53 +08:00
terence tsao
caeec851d4 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-06-27 10:58:31 -07:00
terence tsao
062933af35 Update spec test to alpha8 (#9103) 2021-06-25 13:21:58 -07:00
terence tsao
169573c32e Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-06-25 12:27:56 -07:00
Raul Jordan
e7a7b2861e Merge branch 'develop' into hf1 2021-06-24 18:12:57 -06:00
Nishant Das
fe6c80fe95 Add Sync Subnet Id Cache (#9089)
* add stuff

* remove log

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2021-06-24 11:56:02 -05:00
Nishant Das
2f52dfe96e Add Updated Fork Utils (#9090)
* add fork utils

* fix it

* more unit tests

* fix test

* fix test

* fix tests

* fix tests

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2021-06-24 11:00:27 -05:00
Raul Jordan
93a7b96f16 skip test for validator at current release 2021-06-24 09:43:00 -06:00
Raul Jordan
f078b62c3e Merge branch 'develop' into hf1 2021-06-24 09:42:12 -06:00
Preston Van Loon
f476c39708 Use same version of spectests and spec in WORKSPACE (#9084)
* Use same version of spectests and spec in WORKSPACE

* Fix test loading eth2 spec files
2021-06-23 14:59:32 -05:00
terence tsao
c02e507422 Use correct fork version object for domain data (#9081)
* Use correct fork version object

* Use p2putils

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2021-06-23 13:40:03 -05:00
Raul Jordan
ece07e5fbb Implement Beacon Validator RPC V2 In Proper Package (#9083)
* implement v2 endpoints in own folder

* passing tests

* naming
2021-06-23 13:15:23 -05:00
prestonvanloon
6bbe3dbd10 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-06-23 12:48:22 -05:00
Raul Jordan
18bfc2a34e move rpc packages (#9082) 2021-06-23 12:27:19 -05:00
Raul Jordan
b774af9535 Altair: Use Validator Index for Sync Committee Caching (#9074)
* begin using validator indices for sync committee caching

* amend to use validator indices in rpc

* cache tests pass

* refactor for deduplication

* broken file

* amend core helper tests

* fix tests

* comment

* fix fuzzer iface

* imports

* fix up tests
2021-06-23 12:09:38 -05:00
Raul Jordan
719a5fca02 V2 Prysm Proto Definitions (#9062)
* begin v2 prysm protos

* define

* v2 protos build

* add in generated files

* imports

* do not modify v1alpha1

* revert imports

* use alias pattern

* attempt alias

* attempt new fastssz

* edit

* versioning all works

* edit protos

* edit terms

* edit to reflect proto versioning

* fix more build issues

* beacon chain builds

* beacon chain and shared packages build

* begin helping validator client build

* define protos

* gaz

* build

* tidy check

* gazelle

* revert
2021-06-23 11:27:06 -05:00
Nishant Das
b4a0e4375a add new changes (#9079) 2021-06-23 22:35:35 +08:00
terence tsao
4d276d2fdf Add sync committee head state cache (#9077) 2021-06-23 07:57:11 +08:00
terence tsao
8797179cfb Altair: Update sync committee cache (#9043)
* Update sync committee cache

* Remove debug log

* Gazelle
2021-06-23 07:39:22 +08:00
terence tsao
7cc38108aa Remove copy when push to priority queue (#9076)
* No reflect copy and clean up `SaveSyncCommitteeMessage`

* Update BUILD.bazel

* Go mod tidy
2021-06-23 07:38:30 +08:00
Raul Jordan
365ced285e Merge branch 'develop' into hf1 2021-06-22 14:23:33 -06:00
terence tsao
97e5730fd9 Altair: validator submit SignedContributionAndProof (#9064)
* Contribution and proof protos

* Completed `GetSyncCommitteeContribution` and `SubmitSignedContributionAndProof`

* Update beacon_validator_client_mock.go

* sync committee tests

* Update deposit_test.go
2021-06-22 18:58:56 +08:00
terence tsao
3919b49000 Fixed failing tests from resolving conflict 2021-06-21 21:56:13 -06:00
terence tsao
7f13396e44 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-06-21 08:10:13 -07:00
terence tsao
ae3e5718e6 Altair: check validator is an aggregator for sync committee (#9058) 2021-06-18 11:09:26 -07:00
Nishant Das
97a49240ba Add Block Proposals In Altair (#9056)
* add protobufs

* add tests

* mockgen

* fix mocks

* terence's review

* add ugly hack

* hack

* make ineffassign happy

* add back
2021-06-18 15:42:07 +08:00
terence tsao
232d519445 Fix sync committee duty off by 1 (#9057)
* Fix sync committee duty off by 1

* Update sync_committee_test.go
2021-06-18 11:57:55 +08:00
terence tsao
afd815bb5d Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-06-17 11:00:57 -07:00
terence tsao
56d383a354 Merge branch 'hf1' of github.com:prysmaticlabs/prysm into hf1 2021-06-17 10:44:25 -07:00
terence tsao
733023df03 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-06-17 10:09:04 -07:00
Nishant Das
9d9ce13753 Use Proper Test Util Methods For Altair (#9054)
* fix

* fix block

* terence's review

* fix
2021-06-17 14:22:12 +08:00
terence tsao
975e7a76bf Altair: Validator can submit sync committee msg (#9048)
* Grpc for submitting sync committee message

* RPC server implementation

* RPC client validator implementation

* Tests

* Use slot to get block root

* Gazelle

* Update tests

* Update tests

* Underflow check
2021-06-17 12:21:33 +08:00
Nishant Das
20ae23bd42 Add Sync Committee Pipelines (#9016)
* add sync pipelines

* gaz

* fix iface

* finally have 1 test working

* add all test cases

* gaz

* fix visibility

* ugly hack

* add all new test cases

* add more stuff

* all tests passing

* gaz

* fix lint
2021-06-17 10:42:34 +08:00
terence tsao
63cf429fa0 Add fallback check to sync committee helper (#9051)
* Add fallback to helper for sync committee check

* Gazelle

* Satisfy fuzz test

* Errors in its own file

* Update BUILD.bazel

* Gazelle
2021-06-17 10:28:21 +08:00
terence tsao
2aab4e2efe Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-06-16 10:44:45 -07:00
terence tsao
e2156f25e0 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-06-14 20:30:50 -07:00
terence tsao
a537833f75 Altair: add sync committee to duty rpc (#9037)
* Helpers to update sync committee cache

* Update grpc

* Update server implementation

* Update client implementation

* Fix test setup & deepsrc complains
2021-06-14 21:48:31 +00:00
terence tsao
61bf95e4e2 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-06-14 10:54:00 -07:00
terence tsao
357d3f3b6a Altair: Reuse validator wrapper (#9027) 2021-06-11 13:44:43 -07:00
terence tsao
b0bbfcab7f Run go mod tidy 2021-06-11 13:06:52 -07:00
terence tsao
06801a5230 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-06-11 12:01:27 -07:00
terence tsao
278857d576 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-06-10 20:29:44 -07:00
terence tsao
9c5c70fb32 Altair: sign block can handle versioning (#9013) 2021-06-10 15:44:49 -07:00
terence tsao
54326af141 Sync committee to use priority queue (#9018) 2021-06-10 15:44:28 -07:00
terence tsao
6020682ad1 Altair: beacon block operation in DB (#9012) 2021-06-10 15:16:32 -07:00
terence tsao
3f45d54986 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-06-09 14:27:24 -07:00
terence tsao
ecb51dc55d Altair: save state in DB (#9004)
* Altair: save state in DB

* Simplify return for `hasAltairKey`

* Go fmt
2021-06-09 11:34:42 +08:00
terence tsao
cbf4aeb859 Align to alpha7 (#9005) 2021-06-09 09:17:31 +08:00
terence tsao
b107bd2a5a Copy sync aggregate (#9003) 2021-06-09 09:17:12 +08:00
terence tsao
a5e2c3f551 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-06-08 08:06:20 -07:00
terence tsao
efffaeb359 Merge branch 'hf1' of github.com:prysmaticlabs/prysm into hf1 2021-06-08 08:05:12 -07:00
terence tsao
144576cf36 Altair Add back minimal spec tests (#8993) 2021-06-08 07:22:56 -07:00
Victor Farazdagi
fcf2be08d8 Altair: ssz_static tests (#8847) 2021-06-07 20:57:05 -07:00
terence tsao
2b5cd139f0 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-06-07 10:53:12 -07:00
terence tsao
77a4fdb509 Altair: fork transition and spec tests (#8919) 2021-06-04 15:43:36 -07:00
Nishant Das
a14d37b0ad Handle Multiple Block Types in Req/Resp (#8905)
* add in fork version

* fix

* add in for minimal

* params added in

* fix up

* checkpoint changes

* clean up

* add new stuff

* add new stuff

* fix rpc context

* fix context

* trigger gh hook

* trigger gh hook

* fix all tests

* fix perms

* support fast ssz everywhere

* fix tests

* fix

* map checks

* add initializer

* terence's review

* add metadata test

* add tests

* fix imports

* fix build

* remove junk
2021-06-04 11:30:53 +08:00
terence tsao
38e28af51e Gazelle 2021-06-03 19:34:27 -07:00
terence tsao
6dbe6cfd8c Add caches for sync committee signature and contribution (#8982) 2021-06-03 17:16:00 -07:00
rauljordan
c156c1fb91 rewrite imports and use builtin ethereumapis 2021-06-03 14:50:27 -05:00
rauljordan
393a744091 merge in develop 2021-06-03 14:33:43 -05:00
terence tsao
f83993b211 Altair: Utilize block interface for clean up (#8969) 2021-06-02 09:31:18 -07:00
nisdas
41433f8b2e Merge branch 'develop' of https://github.com/prysmaticlabs/geth-sharding into hf1 2021-06-02 14:02:51 +08:00
terence tsao
95f62de465 Add sync committee test (#8967) 2021-06-01 12:32:12 -07:00
terence tsao
fbb140eff7 Gazelle 2021-06-01 11:22:25 -07:00
terence tsao
22483a285a Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-06-01 11:10:21 -07:00
terence tsao
1d835d9859 Altair: Add Altair block wrapper (#8949)
* Add altair block wrapper

* Update comment

* fix test

Co-authored-by: nisdas <nishdas93@gmail.com>
2021-05-28 17:25:51 +08:00
terence tsao
98f8ab331a Altair: More spec tests (#8941) 2021-05-26 14:10:01 -07:00
terence tsao
0e88418b12 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-05-26 11:34:31 -07:00
terence tsao
ef3ff6f1d5 Sync with develop 2021-05-26 10:44:10 -07:00
terence tsao
7c22496c65 Fix sha256 2021-05-24 11:00:17 -07:00
terence tsao
c5256d09e0 Altair: update spec tests to alpha.6 (#8931) 2021-05-24 10:46:20 -07:00
terence tsao
d3d1eb833e Add fork spec tests (#8917) 2021-05-21 14:35:22 -05:00
terence tsao
2d9fd4ea29 Update to alpha.5 (#8915) 2021-05-20 13:32:51 -07:00
terence tsao
be168e4034 Altair: Add upgrade logic and tests (#8912) 2021-05-20 12:22:33 -07:00
terence tsao
f7c2b9c197 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-05-20 11:00:10 -07:00
Nishant Das
89b7cf9be3 Add In Altair Fork Data (#8903)
* add in fork version

* fix

* add in for minimal

* params added in

* fix up

* radek's review
2021-05-20 16:56:19 +08:00
Preston Van Loon
3591f85a66 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-05-19 12:34:58 -05:00
terence tsao
6ba5ad0325 Altair: align to alpha.4 release (#8900) 2021-05-18 11:14:56 -07:00
Preston Van Loon
76b16a8989 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-05-18 12:41:43 -05:00
terence tsao
74a19741b4 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-05-18 09:16:24 -07:00
nisdas
fdb68c482e Merge branch 'develop' of https://github.com/prysmaticlabs/geth-sharding into hf1 2021-05-18 13:23:49 +08:00
terence tsao
b51729bd2f Merge branch 'hf1' of github.com:prysmaticlabs/prysm into hf1 2021-05-17 14:08:26 -07:00
terence tsao
aef1269223 Sync with develop, remove gogo 2021-05-17 14:08:18 -07:00
terence tsao
9fc1683ec7 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-05-17 12:01:34 -07:00
terence tsao
3790c5edb2 Altair: Add validator helpers for sync committee (#8891)
* Add validator helpers for sync committee

* Fix `TestAssignedToSyncCommittee`

* Update beacon-chain/core/altair/sync_committee.go

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>

* Better error message

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2021-05-17 13:53:38 -05:00
terence tsao
c6c7f8234d Altair: sync committee cache (#8884)
* Starting sync committee cache

* Update index position in committee

* Better comments and tests

* Go fmt

* Manually update BUILD.bazel

* Missing a comma with gazelle

* Unhandle error

* Add cache disabled for fuzzing

* Better place for to lock
2021-05-17 18:23:52 +00:00
terence tsao
c66ea88da8 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-05-17 10:33:04 -07:00
terence tsao
9a8facd76b Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-05-13 10:54:26 -07:00
terence tsao
ca7e0e4807 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-05-12 16:10:44 -07:00
terence tsao
6acedb7dfd Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-05-10 09:41:15 -07:00
terence tsao
a3183bc33e Add SyncCommitteeSigningData Protobuf (#8870) 2021-05-07 17:35:39 +00:00
terence tsao
f6caf627e1 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-05-06 22:25:41 -07:00
Victor Farazdagi
fa696a883d Altair: "sanity" spec tests (#8831)
* Altair: "sanity" spec tests

* Apply suggestions from code review

Co-authored-by: terence tsao <terence@prysmaticlabs.com>

* fix imports

* update cache disable

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2021-05-03 22:19:22 +03:00
terence tsao
cbbf188637 Fix an epoch offset for sync committee rotation (#8857)
* Fix an epoch offset for sync committee rotation

* Regression condition
2021-05-03 21:53:59 +03:00
terence tsao
4d3e65bdcd Altair: Fix field roots computations (#8856)
* Fix sync committee and participation roots

* Skip HTR test because generic fssz impelemntation is wrong

* Gazelle

* Update beacon-chain/state/state-altair/field_root_sync_committee.go

Co-authored-by: Victor Farazdagi <simple.square@gmail.com>
2021-05-03 11:23:01 +03:00
terence tsao
0be2bde4cc Altair: Add Eth2FastAggregateVerify (#8836)
* Add

* Add test

* Add stub

* Add stub
2021-04-30 08:35:51 +02:00
terence tsao
d5662556bc Use process inactivity updates (#8841) 2021-04-30 08:28:46 +02:00
terence tsao
26a10ca56e Merge branch 'hf1' of github.com:prysmaticlabs/prysm into hf1 2021-04-28 07:20:52 -07:00
terence tsao
bda70352ca Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-04-28 07:20:33 -07:00
Victor Farazdagi
fbd45dbf50 Altair: "epoch_processing" spec tests (#8806)
* effective_balance_updates tests

* eth1_data_reset tests

* historical_roots_update tests

* justification_and_finalization tests

* randao_mixes_reset tests

* registry -> registry_updates

* registry_updates tests

* rewards_and_penalties tests

* slashings and slashings_reset tests
2021-04-28 17:15:21 +03:00
Victor Farazdagi
119ef0f8fa Merge branch 'develop' into hf1 2021-04-27 22:20:04 +03:00
Victor Farazdagi
04f38324ba Altair: "rewards" spec tests (#8818)
* Altair: "rewards" spec tests

* better listing of tests

* update RunPrecomputeRewardsAndPenaltiesTests

* remove redundant code
2021-04-27 22:19:25 +03:00
terence tsao
fa27b6e24c Apply inactivity when validator is slashed (#8820)
Co-authored-by: Victor Farazdagi <simple.square@gmail.com>
2021-04-27 09:22:19 +03:00
terence tsao
fe647e99fc Move ProcessInactivityScores outside of ProcessRewards (#8813)
* Move ProcessInactivityScores out of rewards

* Fix tests
2021-04-27 07:57:25 +03:00
terence tsao
bbcaa7eaf2 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-04-25 08:44:34 -07:00
terence tsao
1c1b2eb811 Fix precision (#8808) 2021-04-23 13:08:28 -07:00
Victor Farazdagi
427e792073 Altair: "operations" spec tests (#8795)
* altair/operations/attestation test

* altair/operations/proposer_slashing tests

* altair/operations/block_header tests

* altair/operations/deposit tests

* altair/operations/proposer_slashing tests

* altair/operations/voluntary_exit tests

* ignoring minimal/altair for time being
2021-04-23 17:40:15 +03:00
Victor Farazdagi
463481febe Merge branch 'develop' into hf1 2021-04-23 15:14:15 +03:00
Victor Farazdagi
6e41923388 Merge branch 'develop' into hf1 2021-04-22 20:20:16 +03:00
Victor Farazdagi
17798f878a Merge branch 'develop' into hf1 2021-04-22 18:21:27 +03:00
terence tsao
d502f0825a Altair: ExecuteStateTransitionNoVerifyAnySig for state transition (#8796) 2021-04-21 14:29:00 -07:00
terence tsao
96fe2b76bf Altair: base reward implementation (#8797)
* Add altair basereward

* Test
2021-04-21 23:05:21 +03:00
terence tsao
a51a4ca9eb Merge branch 'hf1' of github.com:prysmaticlabs/prysm into hf1 2021-04-21 09:34:36 -07:00
terence tsao
9dd8a1737c Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-04-21 07:50:14 -07:00
Victor Farazdagi
c97f74ccef Merge branch 'develop' into hf1 2021-04-21 14:24:26 +03:00
terence tsao
806a923974 Altair: Add ProcessBlockNoVerifyAnySig (#8784) 2021-04-19 12:02:54 -07:00
terence tsao
4b4c2b97b7 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-04-16 14:49:33 -07:00
terence tsao
9d22ea840e Altair: Implement ProcessSlots and tests (#8774) 2021-04-16 09:09:22 -07:00
terence tsao
8a507d749a Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-04-15 14:19:24 -07:00
terence tsao
2850581611 Altair: Implement process epoch (#8772) 2021-04-15 09:06:39 -07:00
terence tsao
59bc0c679c Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-04-14 09:38:46 -07:00
terence tsao
969dec8ad2 Altair: Add SetInactivityScores and test (#8762)
* Starting epoch precompute Altair

* Add ProcessRewardsAndPenaltiesPrecompute and tests

* Apply suggestions from code review

* remove redundant err checking

* combine params

* gazelle

* Update beacon-chain/core/altair/epoch_precompute.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update beacon-chain/core/altair/epoch_precompute.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Named returns

* Combine function parameters

* Fix ineffectual asssignment

* Add process_inactivity_updates and test

* Fix ordering

Co-authored-by: Victor Farazdagi <simple.square@gmail.com>
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2021-04-13 13:19:33 -05:00
terence tsao
91fb8eea8c Altair: add modified process rewards and penalties (#8753) 2021-04-13 10:37:32 -07:00
terence tsao
e7ebdb11be Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-04-13 09:55:28 -07:00
terence tsao
ff3bb0aa8a Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-04-12 12:31:07 -07:00
terence tsao
5945849cb4 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-04-11 14:35:25 -07:00
terence tsao
3435a61413 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-04-09 17:00:24 -07:00
terence tsao
a3b69600ef Altair: Slashing for block and epoch (#8732) 2021-04-09 16:41:08 -07:00
terence tsao
01841434ec Add ProcessParticipationFlagUpdates (#8731) 2021-04-09 13:44:23 -05:00
terence tsao
f60edb055c Altair: process attestation for block (#8729) 2021-04-08 20:53:16 -07:00
terence tsao
ee3d106a36 Altair: Process sync committee updates for epoch transition (#8728) 2021-04-08 12:53:07 -07:00
terence tsao
9b41a069eb Altair: Process sync committee (#8721) 2021-04-08 06:37:02 -07:00
terence tsao
dc1d5b778b Altair: Sync committee helpers (#8717)
* Add sync commitee helpers

* Remove extra i++
2021-04-07 16:44:24 +03:00
terence tsao
224b92781f Add Altair state trie black box tests (#8716) 2021-04-06 17:22:08 -07:00
terence tsao
6f54a9d057 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-04-06 09:49:59 -07:00
terence tsao
7906e571a8 Altair: genesis state (#8713) 2021-04-06 07:54:09 -07:00
terence tsao
458817d5ad Altair: Process deposits (#8712) 2021-04-05 13:57:20 -07:00
terence tsao
06290c6805 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-04-05 09:57:05 -07:00
terence tsao
1adf1f1bef Merge commit '9aa2dd1ae65a1c31bdc61413282871d7df8b1e8f' into hf1 2021-04-01 11:38:18 -07:00
terence tsao
af57cf5e96 Merge commit 'fe4a852e786f01eed02293f4578cd7a4f5e8bc4d' into hf1 2021-04-01 11:38:06 -07:00
terence tsao
d59ba818f0 Merge commit '3d405910e7d16c1af315097a74f0b8682a9a1eeb' into hf1 2021-04-01 11:33:30 -07:00
terence tsao
9aa2dd1ae6 Part of 3 of beacon state Altair pkg - copy and hash tree root (#8690) 2021-03-31 09:39:02 -07:00
terence tsao
f3abe70838 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-03-30 16:46:22 -07:00
terence tsao
fe4a852e78 Part of 2 of beacon state Altair pkg - getters and setters (#8658) 2021-03-30 16:35:54 -07:00
terence tsao
6af0f619c9 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-03-29 06:55:46 -07:00
terence tsao
3d405910e7 Part of 1 of beacon state Altair pkg (#8670) 2021-03-25 10:49:39 -07:00
terence tsao
2779daee32 Sync with develop 2021-03-25 09:57:35 -07:00
terence tsao
a0ba4a8563 Sync with develop 2021-03-25 09:50:02 -07:00
terence tsao
926b3725a1 Part of 1 of beacon state Altair pkg (#8652)
* Part 1 of stateAltair pkg- trie and partial field root

* Gazelle

* Sync with develop

* Update visibility
2021-03-24 13:25:38 +00:00
terence tsao
5cc9f4df0b Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-03-23 15:03:05 -07:00
terence tsao
fd297999b8 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-03-23 14:01:16 -07:00
terence tsao
0d45eeac56 Add altair config (#8642)
* Add altair config

* Add domain config and fix test

* Remove old configs

* Use Altair
2021-03-22 20:50:30 -07:00
terence tsao
e2fcd25039 Rename v1 to altair. Add inactivity_scores field (#8641) 2021-03-22 20:50:19 -07:00
terence tsao
2436d84370 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-03-22 09:47:23 -07:00
terence tsao
5418d8c367 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-03-19 13:21:40 -07:00
terence tsao
55e5dee7ab Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-03-18 21:02:40 -07:00
terence tsao
6a06a4bf98 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-03-18 07:08:04 -07:00
terence tsao
a9d981dce1 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-03-17 07:57:24 -07:00
terence tsao
a69947ba51 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-03-16 11:22:08 -07:00
terence tsao
6a32b18ca9 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-03-15 07:47:52 -07:00
terence tsao
9ebf8651b4 Merge branch 'hf1' of github.com:prysmaticlabs/prysm into hf1 2021-03-15 07:47:45 -07:00
terence tsao
8467485aec Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-03-12 13:22:52 -08:00
terence tsao
fdb6cf9b57 HF1: Beacon state protobuf definition (#8581) 2021-03-11 15:51:33 -08:00
terence tsao
3da55ad7a4 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-03-11 06:17:53 -08:00
terence tsao
773d561361 Merge branch 'develop' of github.com:prysmaticlabs/prysm into hf1 2021-03-09 10:13:12 -08:00
terence tsao
7f6d3ccb36 HF1: Add penalty config values (#8522)
* Add new configs

* More tests

* Fix message name
2021-02-25 14:59:47 -06:00
4036 changed files with 180564 additions and 383894 deletions

0
.bazelignore Normal file
View File

230
.bazelrc
View File

@@ -1,10 +1,9 @@
# Import bazelrc presets
import %workspace%/build/bazelrc/convenience.bazelrc
import %workspace%/build/bazelrc/correctness.bazelrc
import %workspace%/build/bazelrc/cross.bazelrc
import %workspace%/build/bazelrc/debug.bazelrc
import %workspace%/build/bazelrc/hermetic-cc.bazelrc
import %workspace%/build/bazelrc/performance.bazelrc
# Print warnings for tests with inappropriate test size or timeout.
test --test_verbose_timeout_warnings
# Only build test targets when running bazel test //...
test --build_tests_only
test --test_output=errors
# E2E run with debug gotag
test:e2e --define gotags=debug
@@ -12,39 +11,232 @@ test:e2e --define gotags=debug
# Clearly indicate that coverage is enabled to disable certain nogo checks.
coverage --define=coverage_enabled=1
# Fix for rules_docker. See: https://github.com/bazelbuild/rules_docker/issues/842
build --host_force_python=PY2
test --host_force_python=PY2
run --host_force_python=PY2
# Networking is blocked for tests by default, add "requires-network" tag to your test if networking
# is required within the sandbox. Network sandboxing only works on linux.
build --sandbox_default_allow_network=false
# Stamp binaries with git information
build --workspace_status_command=./hack/workspace_status.sh
build --workspace_status_command=./scripts/workspace_status.sh
build --stamp
# Use mainnet protobufs at runtime
run --define ssz=mainnet
test --define ssz=mainnet
build --define ssz=mainnet
# Prevent PATH changes from rebuilding when switching from IDE to command line.
build --incompatible_strict_action_env
test --incompatible_strict_action_env
run --incompatible_strict_action_env
# Disable kafka by default, it takes a long time to build...
build --define kafka_enabled=false
test --define kafka_enabled=false
run --define kafka_enabled=false
build --define blst_disabled=false
test --define blst_disabled=false
run --define blst_disabled=false
build:kafka_enabled --define kafka_enabled=true
build:kafka_enabled --define gotags=kafka_enabled
build:blst_disabled --define blst_disabled=true
build:blst_disabled --define gotags=blst_disabled
build:minimal --//proto:network=minimal
build:minimal --@io_bazel_rules_go//go/config:tags=minimal
# Release flags
build:release --compilation_mode=opt
build:release --stamp
build:release --define pgo_enabled=1
build:release --config=llvm
# LLVM compiler for building C/C++ dependencies.
build:llvm --crosstool_top=@llvm_toolchain//:toolchain
build:llvm --define compiler=llvm
build:llvm --copt -fno-sanitize=vptr,function
build:llvm --linkopt -fno-sanitize=vptr,function
build:asan --copt -fsanitize=address,undefined
build:asan --copt -fno-omit-frame-pointer
build:asan --linkopt -fsanitize=address,undefined
build:asan --copt -fno-sanitize=vptr,function
build:asan --linkopt -fno-sanitize=vptr,function
build:asan --copt -DADDRESS_SANITIZER=1
build:asan --copt -D__SANITIZE_ADDRESS__
build:asan --linkopt -ldl
build:llvm-asan --config=llvm
build:llvm-asan --config=asan
build:llvm-asan --linkopt -fuse-ld=ld.lld
build:fuzz --define=gotags=libfuzzer
build:fuzz --config=llvm-asan
build:fuzz --copt=-fsanitize=fuzzer-no-link
build:fuzz --linkopt=-fsanitize=fuzzer
build:fuzz --copt=-fno-omit-frame-pointer
build:fuzz --define=FUZZING_ENGINE=libfuzzer
build:fuzz --copt=-DFUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
build:fuzz --linkopt -Wl,--no-as-needed
build:fuzz --define=gc_goopts=-d=libfuzzer,checkptr
build:fuzz --run_under=//tools:fuzz_wrapper
build:fuzz --compilation_mode=opt
build:fuzz --define=blst_disabled=true
test:fuzz --local_test_jobs="HOST_CPUS*.5"
# Build binary with cgo symbolizer for debugging / profiling.
build:cgo_symbolizer --config=llvm
build:cgo_symbolizer --copt=-g
build:cgo_symbolizer --define=USE_CGO_SYMBOLIZER=true
build:cgo_symbolizer -c dbg
build:cgo_symbolizer --define=gotags=cgosymbolizer_enabled
# multi-arch cross-compiling toolchain configs:
-----------------------------------------------
build:cross --crosstool_top=@prysm_toolchains//:multiarch_toolchain
build:cross --host_platform=@io_bazel_rules_go//go/toolchain:linux_amd64
build:cross --host_crosstool_top=@prysm_toolchains//:hostonly_toolchain
# linux_amd64 config for cross compiler toolchain, not strictly necessary since host/exec env is amd64
build:linux_amd64 --platforms=@io_bazel_rules_go//go/toolchain:linux_amd64_cgo
# osx_amd64 config for cross compiler toolchain
build:osx_amd64 --config=cross
build:osx_amd64 --platforms=@io_bazel_rules_go//go/toolchain:darwin_amd64_cgo
build:osx_amd64 --compiler=osxcross
# windows
build:windows_amd64 --config=cross
build:windows_amd64 --platforms=@io_bazel_rules_go//go/toolchain:windows_amd64_cgo
build:windows_amd64 --compiler=mingw-w64
# linux_arm64 conifg for cross compiler toolchain
build:linux_arm64 --config=cross
build:linux_arm64 --platforms=@io_bazel_rules_go//go/toolchain:linux_arm64_cgo
build:linux_arm64 --copt=-funsafe-math-optimizations
build:linux_arm64 --copt=-ftree-vectorize
build:linux_arm64 --copt=-fomit-frame-pointer
build:linux_arm64 --cpu=aarch64
build:linux_arm64 --compiler=clang
build:linux_arm64 --copt=-march=armv8-a
# toolchain build debug configs
#------------------------------
build:debug --sandbox_debug
build:debug --toolchain_resolution_debug=".*"
build:debug --toolchain_resolution_debug
build:debug --verbose_failures
build:debug -s
# Set bazel gotag
build --define gotags=bazel
# windows debug
build:windows_amd64_debug --config=windows_amd64
build:windows_amd64_debug --config=debug
# Abseil requires c++14 or greater.
build --cxxopt=-std=c++20
build --host_cxxopt=-std=c++20
# osx_amd64 debug config
build:osx_amd64_debug --config=debug
build:osx_amd64_debug --config=osx_amd64
# linux_arm64_debug
build:linux_arm64_debug --config=linux_arm64
build:linux_arm64_debug --config=debug
# linux_amd64_debug
build:linux_amd64_debug --config=linux_amd64
build:linux_amd64_debug --config=debug
# Docker Sandbox Configs
#-----------------------
# Note all docker sandbox configs must run from a linux x86_64 host
# build:docker-sandbox --experimental_docker_image=gcr.io/prysmaticlabs/rbe-worker:latest
build:docker-sandbox --spawn_strategy=docker --strategy=Javac=docker --genrule_strategy=docker
build:docker-sandbox --define=EXECUTOR=remote
build:docker-sandbox --experimental_docker_verbose
build:docker-sandbox --experimental_enable_docker_sandbox
build:docker-sandbox --crosstool_top=@rbe_ubuntu_clang//cc:toolchain
build:docker-sandbox --host_javabase=@rbe_ubuntu_clang//java:jdk
build:docker-sandbox --javabase=@rbe_ubuntu_clang//java:jdk
build:docker-sandbox --host_java_toolchain=@bazel_tools//tools/jdk:toolchain_hostjdk8
build:docker-sandbox --java_toolchain=@bazel_tools//tools/jdk:toolchain_hostjdk8
build:docker-sandbox --extra_execution_platforms=@rbe_ubuntu_clang//config:platform
build:docker-sandbox --host_platform=@rbe_ubuntu_clang//config:platform
build:docker-sandbox --platforms=@rbe_ubuntu_clang//config:platform
build:docker-sandbox --extra_toolchains=@prysm_toolchains//:cc-toolchain-multiarch
# windows_amd64 docker sandbox build config
build:windows_amd64_docker --config=docker-sandbox --config=windows_amd64
build:windows_amd64_docker_debug --config=windows_amd64_docker --config=debug
# osx_amd64 docker sandbox build config
build:osx_amd64_docker --config=docker-sandbox --config=osx_amd64
build:osx_amd64_docker_debug --config=osx_amd64_docker --config=debug
# linux_arm64 docker sandbox build config
build:linux_arm64_docker --config=docker-sandbox --config=linux_arm64
build:linux_arm64_docker_debug --config=linux_arm64_docker --config=debug
# linux_amd64 docker sandbox build config
build:linux_amd64_docker --config=docker-sandbox --config=linux_amd64
build:linux_amd64_docker_debug --config=linux_amd64_docker --config=debug
# Remote Build Execution
#-----------------------
# Originally from https://github.com/bazelbuild/bazel-toolchains/blob/master/bazelrc/bazel-2.0.0.bazelrc
#
# Depending on how many machines are in the remote execution instance, setting
# this higher can make builds faster by allowing more jobs to run in parallel.
# Setting it too high can result in jobs that timeout, however, while waiting
# for a remote machine to execute them.
build:remote --jobs=50
# Set several flags related to specifying the platform, toolchain and java
# properties.
# These flags should only be used as is for the rbe-ubuntu16-04 container
# and need to be adapted to work with other toolchain containers.
build:remote --host_javabase=@rbe_ubuntu_clang//java:jdk
build:remote --javabase=@rbe_ubuntu_clang//java:jdk
build:remote --host_java_toolchain=@bazel_tools//tools/jdk:toolchain_hostjdk8
build:remote --java_toolchain=@bazel_tools//tools/jdk:toolchain_hostjdk8
build:remote --crosstool_top=@rbe_ubuntu_clang//cc:toolchain
build:remote --action_env=BAZEL_DO_NOT_DETECT_CPP_TOOLCHAIN=1
# Platform flags:
# The toolchain container used for execution is defined in the target indicated
# by "extra_execution_platforms", "host_platform" and "platforms".
# More about platforms: https://docs.bazel.build/versions/master/platforms.html
build:remote --extra_toolchains=@rbe_ubuntu_clang//config:cc-toolchain
build:remote --extra_execution_platforms=@rbe_ubuntu_clang//config:platform
build:remote --host_platform=@rbe_ubuntu_clang//config:platform
build:remote --platforms=@rbe_ubuntu_clang//config:platform
# Starting with Bazel 0.27.0 strategies do not need to be explicitly
# defined. See https://github.com/bazelbuild/bazel/issues/7480
build:remote --define=EXECUTOR=remote
# Enable remote execution so actions are performed on the remote systems.
# build:remote --remote_executor=grpcs://remotebuildexecution.googleapis.com
# Enforce stricter environment rules, which eliminates some non-hermetic
# behavior and therefore improves both the remote cache hit rate and the
# correctness and repeatability of the build.
build:remote --incompatible_strict_action_env=true
# Set a higher timeout value, just in case.
build:remote --remote_timeout=3600
# Enable authentication. This will pick up application default credentials by
# default. You can use --google_credentials=some_file.json to use a service
# account credential instead.
# build:remote --google_default_credentials=true
# Enable build without the bytes
# See: https://github.com/bazelbuild/bazel/issues/6862
build:remote --experimental_remote_download_outputs=toplevel --experimental_inmemory_jdeps_files --experimental_inmemory_dotd_files
build:remote --remote_local_fallback
# Ignore GoStdLib with remote caching
build --modify_execution_info='GoStdlib.*=+no-remote-cache'

View File

@@ -1 +1 @@
6.4.0
3.7.0

View File

@@ -9,51 +9,37 @@
#build:remote-cache --strategy=Genrule=standalone
# Prysm specific remote-cache properties.
#build:remote-cache --disk_cache=
build:remote-cache --remote_download_minimal
build:remote-cache --experimental_remote_build_event_upload=minimal
build:remote-cache --remote_cache=grpc://bazel-remote-cache:9092
# Does not work with rules_oci. See https://github.com/bazel-contrib/rules_oci/issues/292
#build:remote-cache --experimental_remote_downloader=grpc://bazel-remote-cache:9092
build:remote-cache --experimental_remote_downloader=grpc://bazel-remote-cache:9092
build:remote-cache --remote_local_fallback
build:remote-cache --experimental_remote_cache_async
build:remote-cache --experimental_remote_merkle_tree_cache
build:remote-cache --experimental_action_cache_store_output_metadata
build:remote-cache --experimental_remote_cache_compression
# Enforce stricter environment rules, which eliminates some non-hermetic
# behavior and therefore improves both the remote cache hit rate and the
# correctness and repeatability of the build.
build:remote-cache --incompatible_strict_action_env=true
build --experimental_use_hermetic_linux_sandbox
# Import workspace options.
import %workspace%/.bazelrc
# Enable blake3 once it is supported in remote cache. See: https://github.com/buchgr/bazel-remote/issues/710
# startup --digest_function=blake3
startup --host_jvm_args=-Xmx8g --host_jvm_args=-Xms4g
startup --host_jvm_args=-Xmx2g --host_jvm_args=-Xms2g
query --repository_cache=/tmp/repositorycache
query --experimental_repository_cache_hardlinks
build --repository_cache=/tmp/repositorycache
build --experimental_repository_cache_hardlinks
build --experimental_strict_action_env
build --disk_cache=/tmp/bazelbuilds
build --experimental_multi_threaded_digest
build --sandbox_tmpfs_path=/tmp
build --verbose_failures
build --announce_rc
build --show_progress_rate_limit=5
build --curses=no --color=no
build --curses=yes --color=no
build --keep_going
build --test_output=errors
build --flaky_test_attempts=5
build --build_runfile_links=false # Only build runfile symlink forest when required by local action, test, or run command.
# Disabled race detection due to unstable test results under constrained environment build kite
# build --features=race
# Better caching
build:nostamp --nostamp
# Build metadata
build --build_metadata=ROLE=CI
build --build_metadata=REPO_URL=https://github.com/prysmaticlabs/prysm.git
build --workspace_status_command=./hack/workspace_status_ci.sh
# Buildbuddy
build --bes_results_url=https://app.buildbuddy.io/invocation/
build --bes_backend=grpcs://remote.buildbuddy.io
# Disable flaky test detection for fuzzing.
test:fuzz --flaky_test_attempts=1

View File

@@ -11,7 +11,7 @@ name = "go"
enabled = true
[analyzers.meta]
import_paths = ["github.com/prysmaticlabs/prysm/v4"]
import_paths = ["github.com/prysmaticlabs/prysm"]
[[analyzers]]
name = "test-coverage"

2
.github/CODEOWNERS vendored
View File

@@ -5,4 +5,4 @@
*.bzl @prestonvanloon
# Anyone on prylabs team can approve dependency updates.
deps.bzl @prysmaticlabs/core-team
deps.bzl @prysmaticlabs/core-team

59
.github/ISSUE_TEMPLATE/bug_report.md vendored Normal file
View File

@@ -0,0 +1,59 @@
---
name: "\U0001F41EBug report"
about: Report a bug or problem with running Prysm
---
<!--💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎
Hellooo! 😄
To help us tend to your issue faster, please search our currently open issues before submitting a new one.
Existing issues often contain information about workarounds, resolution, or progress updates.
💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎💎-->
# 🐞 Bug Report
### Description
<!-- ✍️--> A clear and concise description of the problem...
### Has this worked before in a previous version?
<!-- Did this behavior use to work in the previous version? -->
<!-- ✍️--> Yes, the previous version in which this bug was not present was: ....
## 🔬 Minimal Reproduction
<!--
Please let us know how we can reproduce this issue. Include the exact method you used to run Prysm along with any flags used in your beacon chain and/or validator. Make sure you don't upload any confidential files or private keys.
-->
## 🔥 Error
<pre><code>
<!-- If the issue is accompanied by an error, please share the error logs with us below. If you have a lot of logs, place make a paste bin with your logs and share the link with us here: -->
<!-- ✍️-->
</code></pre>
## 🌍 Your Environment
**Operating System:**
<pre>
<code>
</code>
</pre>
**What version of Prysm are you running? (Which release)**
<pre>
<code>
</code>
</pre>
**Anything else relevant (validator index / public key)?**

View File

@@ -1,66 +0,0 @@
name: 🐞 Bug report
description: Report a bug or problem with running Prysm
labels: ["Bug"]
body:
- type: markdown
attributes:
value: |
To help us tend to your issue faster, please search our currently open issues before submitting a new one.
Existing issues often contain information about workarounds, resolution, or progress updates.
- type: textarea
id: what-happened
attributes:
label: Describe the bug
description: |
A clear and concise description of the problem...
validations:
required: true
- type: textarea
id: previous-version
attributes:
label: Has this worked before in a previous version?
description: Did this behavior use to work in the previous version?
render: text
- type: textarea
id: reproduction-steps
attributes:
label: 🔬 Minimal Reproduction
description: |
Please let us know how we can reproduce this issue.
Include the exact method you used to run Prysm along with any flags used in your beacon chain and/or validator.
Make sure you don't upload any confidential files or private keys.
placeholder: |
Steps to reproduce:
1. Start '...'
2. Then '...'
3. Check '...'
4. See error
- type: textarea
id: errors
attributes:
label: Error
description: |
If the issue is accompanied by an error, please share the error logs with us below.
If you have a lot of logs, place make a paste bin with your logs and share the link with us here:
render: text
- type: dropdown
id: platform
attributes:
label: Platform(s)
description: What platform(s) did this occur on?
multiple: true
options:
- Linux (x86)
- Linux (ARM)
- Mac (Intel)
- Mac (Apple Silicon)
- Windows (x86)
- Windows (ARM)
- type: input
attributes:
label: What version of Prysm are you running? (Which release)
description: You can check your Prysm version by running your beacon node or validator with the `--version` flag.
- type: textarea
attributes:
label: Anything else relevant (validator index / public key)?

View File

@@ -16,12 +16,12 @@ Existing issues often contain information about workarounds, resolution, or prog
### Description
<!-- ✍️ A clear and concise description of the problem or missing capability... -->
<!-- ✍️--> A clear and concise description of the problem or missing capability...
### Describe the solution you'd like
<!-- ✍️ If you have a solution in mind, please describe it. -->
<!-- ✍️--> If you have a solution in mind, please describe it.
### Describe alternatives you've considered
<!-- ✍️ Have you considered any alternative solutions or workarounds? -->
<!-- ✍️--> Have you considered any alternative solutions or workarounds?

5
.github/actions/gofmt/Dockerfile vendored Normal file
View File

@@ -0,0 +1,5 @@
FROM cytopia/gofmt
COPY entrypoint.sh /entrypoint.sh
ENTRYPOINT ["/entrypoint.sh"]

12
.github/actions/gofmt/action.yml vendored Normal file
View File

@@ -0,0 +1,12 @@
name: 'Gofmt checker'
description: 'Checks that all project files have been properly formatted.'
inputs:
path:
description: 'Path to check'
required: true
default: './'
runs:
using: 'docker'
image: 'Dockerfile'
args:
- ${{ inputs.path }}

15
.github/actions/gofmt/entrypoint.sh vendored Executable file
View File

@@ -0,0 +1,15 @@
#!/bin/sh -l
set -e
cd $GITHUB_WORKSPACE
# Check if any files are not formatted.
nonformatted="$(gofmt -l $1 2>&1)"
# Return if `go fmt` passes.
[ -z "$nonformatted" ] && exit 0
# Notify of issues with formatting.
echo "Following files need to be properly formatted:"
echo "$nonformatted"
exit 1

View File

@@ -1,4 +1,4 @@
FROM golang:1.21-alpine
FROM golang:alpine
COPY entrypoint.sh /entrypoint.sh

View File

@@ -1,13 +1,13 @@
#!/bin/sh -l
set -e
export PATH="$PATH:/usr/local/go/bin"
export PATH=$PATH:/usr/local/go/bin
cd "$GITHUB_WORKSPACE"
cd $GITHUB_WORKSPACE
cp go.mod go.mod.orig
cp go.sum go.sum.orig
go mod tidy -compat=1.17
go mod tidy
echo "Checking go.mod and go.sum:"
checks=0

View File

@@ -0,0 +1,41 @@
name: Update DAppNodePackages
on:
push:
tags:
- '*'
jobs:
dappnode-update-beacon-chain:
name: Trigger a beacon-chain release
runs-on: ubuntu-latest
steps:
- name: Get latest tag
id: get_tag
run: echo ::set-output name=TAG::${GITHUB_REF/refs\/tags\//}
- name: Send dispatch event to DAppNodePackage-prysm-beacon-chain
env:
DISPATCH_REPO: dappnode/DAppNodePackage-prysm-beacon-chain
run: |
curl -v -X POST -u "${{ secrets.PAT_GITHUB }}" \
-H "Accept: application/vnd.github.everest-preview+json" \
-H "Content-Type: application/json" \
--data '{"event_type":"new_release", "client_payload": { "tag":"${{ steps.get_tag.outputs.TAG }}"}}' \
https://api.github.com/repos/$DISPATCH_REPO/dispatches
dappnode-update-validator:
name: Trigger a validator release
runs-on: ubuntu-latest
steps:
- name: Get latest tag
id: get_tag
run: echo ::set-output name=TAG::${GITHUB_REF/refs\/tags\//}
- name: Send dispatch event to DAppNodePackage validator repository
env:
DISPATCH_REPO: dappnode/DAppNodePackage-prysm-validator
run: |
curl -v -X POST -u "${{ secrets.PAT_GITHUB }}" \
-H "Accept: application/vnd.github.everest-preview+json" \
-H "Content-Type: application/json" \
--data '{"event_type":"new_release", "client_payload": { "tag":"${{ steps.get_tag.outputs.TAG }}"}}' \
https://api.github.com/repos/$DISPATCH_REPO/dispatches

View File

@@ -1,45 +0,0 @@
name: "fuzz"
on:
workflow_dispatch:
schedule:
- cron: "0 12 * * *"
permissions:
contents: write
pull-requests: write
jobs:
list:
runs-on: ubuntu-latest
timeout-minutes: 180
steps:
- uses: actions/checkout@v3
- uses: actions/setup-go@v4
with:
go-version: '1.21.5'
- id: list
uses: shogo82148/actions-go-fuzz/list@v0
with:
tags: fuzz,develop
outputs:
fuzz-tests: ${{steps.list.outputs.fuzz-tests}}
fuzz:
runs-on: ubuntu-latest
timeout-minutes: 360
needs: list
strategy:
fail-fast: false
matrix:
include: ${{fromJson(needs.list.outputs.fuzz-tests)}}
steps:
- uses: actions/checkout@v3
- uses: actions/setup-go@v4
with:
go-version: '1.21.5'
- uses: shogo82148/actions-go-fuzz/run@v0
with:
packages: ${{ matrix.package }}
fuzz-regexp: ${{ matrix.func }}
fuzz-time: "20m"
tags: fuzz,develop

View File

@@ -5,57 +5,31 @@ on:
branches: [ master ]
pull_request:
branches: [ '*' ]
merge_group:
types: [checks_requested]
jobs:
formatting:
name: Formatting
check:
name: Check
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
uses: actions/checkout@v1
- name: Go mod tidy checker
id: gomodtidy
uses: ./.github/actions/gomodtidy
gosec:
name: Gosec scan
runs-on: ubuntu-latest
env:
GO111MODULE: on
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set up Go 1.21
uses: actions/setup-go@v3
- name: Gofmt checker
id: gofmt
uses: ./.github/actions/gofmt
with:
go-version: '1.21.5'
- name: Run Gosec Security Scanner
run: | # https://github.com/securego/gosec/issues/469
export PATH=$PATH:$(go env GOPATH)/bin
go install github.com/securego/gosec/v2/cmd/gosec@v2.15.0
gosec -exclude-generated -exclude=G307 -exclude-dir=crypto/bls/herumi ./...
lint:
name: Lint
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set up Go 1.21
uses: actions/setup-go@v3
path: ./
- name: GoImports checker
id: goimports
uses: Jerome1337/goimports-action@v1.0.2
with:
go-version: '1.21.5'
id: go
- name: Golangci-lint
uses: golangci/golangci-lint-action@v3
with:
version: v1.55.2
args: --config=.golangci.yml --out-${NO_FUTURE}format colored-line-number
goimports-path: ./
build:
name: Build
@@ -64,7 +38,7 @@ jobs:
- name: Set up Go 1.x
uses: actions/setup-go@v2
with:
go-version: '1.21.5'
go-version: ^1.14
id: go
- name: Check out code into the Go module directory
@@ -77,14 +51,6 @@ jobs:
- name: Build
# Use blst tag to allow go and bazel builds for blst.
run: go build -v ./...
env:
CGO_CFLAGS: "-O2 -D__BLST_PORTABLE__"
# fuzz leverage go tag based stubs at compile time.
# Building and testing with these tags should be checked and enforced at pre-submit.
- name: Test for fuzzing
run: go test -tags=fuzz,develop ./... -test.run=^Fuzz
env:
CGO_CFLAGS: "-O2 -D__BLST_PORTABLE__"
# Tests run via Bazel for now...
# - name: Test

View File

@@ -1,22 +0,0 @@
name: Horusec Security Scan
on:
schedule:
# Runs cron at 16.00 UTC on
- cron: '0 0 * * SUN'
jobs:
Horusec_Scan:
name: horusec-Scan
runs-on: ubuntu-latest
if: github.ref == 'refs/heads/develop'
steps:
- name: Check out code
uses: actions/checkout@v2
with: # Required when commit authors is enabled
fetch-depth: 0
- name: Running Security Scan
run: |
curl -fsSL https://raw.githubusercontent.com/ZupIT/horusec/main/deployments/scripts/install.sh | bash -s latest
horusec start -t="10000" -p="./" -e="true" -i="**/crypto/bls/herumi/**, **/**/*_test.go, **/third_party/afl/**, **/crypto/keystore/key.go"

13
.gitignore vendored
View File

@@ -30,14 +30,9 @@ password.txt
# Dist files
dist
# libfuzzer
oom-*
crash-*
# deepsource cli
bin
# p2p metaData
metaData
# execution API authentication
jwt.hex
# manual testing
tmp

View File

@@ -1,94 +1,69 @@
run:
skip-files:
- validator/web/site_data.go
- .*_test.go
skip-dirs:
- proto
- tools/analyzers
timeout: 10m
go: '1.21.5'
linters-settings:
govet:
check-shadowing: true
settings:
printf:
funcs:
- (github.com/golangci/golangci-lint/pkg/logutils.Log).Infof
- (github.com/golangci/golangci-lint/pkg/logutils.Log).Warnf
- (github.com/golangci/golangci-lint/pkg/logutils.Log).Errorf
- (github.com/golangci/golangci-lint/pkg/logutils.Log).Fatalf
golint:
min-confidence: 0
gocyclo:
min-complexity: 10
maligned:
suggest-new: true
dupl:
threshold: 100
goconst:
min-len: 2
min-occurrences: 2
depguard:
list-type: blacklist
packages:
# logging is allowed only by logutils.Log, logrus
# is allowed to use only in logutils package
- github.com/sirupsen/logrus
misspell:
locale: US
lll:
line-length: 140
goimports:
local-prefixes: github.com/golangci/golangci-lint
gocritic:
enabled-tags:
- performance
- style
- experimental
disabled-checks:
- wrapperFunc
linters:
enable-all: true
disable:
# Deprecated linters:
enable:
- deadcode
- exhaustivestruct
- golint
- govet
- ifshort
- interfacer
- maligned
- nosnakecase
- scopelint
- structcheck
- varcheck
# Disabled for now:
- asasalint
- bodyclose
- containedctx
- contextcheck
- cyclop
- depguard
- dogsled
- dupl
- durationcheck
- errorlint
- exhaustive
- exhaustruct
- forbidigo
- forcetypeassert
- funlen
- gci
- gochecknoglobals
- gochecknoinits
- goconst
- gocritic
- gocyclo
- godot
- godox
- goerr113
- gofumpt
- gomnd
- gomoddirectives
- inamedparam
- interfacebloat
- ireturn
- lll
- maintidx
- makezero
- musttag
- nakedret
- nestif
- nilnil
- nlreturn
- noctx
- nolintlint
- nonamedreturns
- nosprintfhostport
- perfsprint
- prealloc
- predeclared
- promlinter
- protogetter
- revive
- staticcheck
- stylecheck
- tagalign
- tagliatelle
- thelper
- goimports
- golint
- gosec
- misspell
- structcheck
- typecheck
- unparam
- varnamelen
- wastedassign
- wrapcheck
- wsl
- varcheck
- gofmt
- unused
disable-all: true
linters-settings:
gocognit:
# TODO: We should target for < 50
min-complexity: 65
run:
skip-dirs:
- proto/
- ^contracts/
deadline: 10m
output:
print-issued-lines: true
sort-results: true
# golangci.com configuration
# https://github.com/golangci/golangci/wiki/Configuration
service:
golangci-lint-version: 1.15.0 # use the fixed version to not introduce new linters unexpectedly
prepare:
- echo "here I can run custom commands, but no preparation needed for this repo"

View File

@@ -5,22 +5,21 @@ Contact: mailto:security@prysmaticlabs.com
Encryption: openpgp4fpr:0AE0051D647BA3C1A917AF4072E33E4DF1A5036E
Encryption: openpgp4fpr:CD08DE68C60B82D3EE2A3F7D95452A701810FEDB
Encryption: openpgp4fpr:317D6E91058F8F3C2303BA7756313E44581297A6
Encryption: openpgp4fpr:79C59A585E3FD3AFFA00F5C22940A6479DA7C9EC
Preferred-Languages: en
Canonical: https://github.com/prysmaticlabs/prysm/tree/master/.well-known/security.txt
-----BEGIN PGP SIGNATURE-----
iQIzBAEBCgAdFiEECuAFHWR7o8GpF69AcuM+TfGlA24FAmGOfiYACgkQcuM+TfGl
A24YwRAAiQk3w6yzqSEggrOlNoNn04iu/rWZdn5ihkQgzACXy8XH2D1gdKLChE/X
7e5bUtgE2aCuHryQjwoKxqZakviBJFstVmHgF64rXv2zKhpqA30Mj4fI+T3zn8I+
+FpFV0TTsxNLDx+AcR1eQ1nSayO7ImUDIfOQNDDnSZZy42Bc+F+QIGKB3aH/8bpG
kT+bDTZrXvX+TE1gZTbAtZG8sH8g/zadoWEHIhfXUuYb0kTz+DRzAxoqU4j4Z4ee
1zSfFAgfJwxJP4kWD7s4xkE1sBbCgGBeD6cW/C2lbcfIei+XSizLpHW3jD9dNqh4
fLkmEspSa/LV/iXFq8nFzu/GLww4q+sQZDzzDKZyws54CrATinRitZMhzoIL0bTn
yFZVOGHosFAMEVZ36dl1Aw2+B2W6tr2CVr9c5zfV+kup5/KZH1EmT5nYY/zFwfg2
jYCFB5wmYeiyWZvuprgJXRArgVZLZaJxwWazlPVk4i/4vPvRgvfHqOwHCBe8DXy0
VHPhpewwb/ECYek1KoaNQflgR8iH2GMHkC5RjhGDAt1S0AQDtite5m4ZYt1kvO9E
k/znkv89dduhL9CKDvZvnI+DICwsTrf//4KJ8PM/qaPAJa4GvtiUU/eS/jKBivtv
OP5dZQtX6KPc9ewqqZgn622uHSezoBidgeTkdZsJ6tw2eIu0lsY=
=V7L0
iQIzBAEBCgAdFiEECuAFHWR7o8GpF69AcuM+TfGlA24FAl++klgACgkQcuM+TfGl
A27rQw/6A29p1W20J0v+h218p8XWLSUpTIGLnZTxw6KqdyVXMzlsQK0YG4G2s2AB
0LKh7Ae/Di5E0U+Z4AjUW5nc5eaCxK36GMscH9Ah0rgJwNYxEJw7/2o8ZqVT/Ip2
+56rFihRqxFZfaCNKFVuZFaL9jKewV9FKYP38ID6/SnTcrOHiu2AoAlyZGmB03p+
iT57SPRHatygeY4xb/gwcfREFWEv+VHGyBTv8A+6ABZDxyurboCFMERHzFICrbmk
8UdHxxlWZDnHAbAUyAwpERC5znx6IHXQJwF8TMtu6XY6a6axT2XBOyJDF9/mZOz+
kdkz6loX5uxaQBGLtTv6Kqf1yUGANOZ16VhHvWwL209LmHmigIVQ+qSM6c79PsW/
vrsqdz3GBsiMC5Fq2vYgnbgzpfE8Atjn0y7E+j4R7IvwOAE/Ro/b++nqnc4YqhME
P/yTcfGftaCrdSNnQCXeoV9JxpFM5Xy8KV3eexvNKbcgA/9DtgxL5i+s5ZJkUT9A
+qJvoRrRyIym32ghkHgtFJKB3PLCdobeoOVRk6EnMo9zKSiSK2rZEJW8Ccbo515D
W9qUOn3GF7lNVuUFAU/YKEdmDp/AVaViZ7vH+8aq0LC0HBkZ8XlzWnWoArS8sMhw
fX0R9g/HMgrwNte/d0mwim5lJ2Plgv60Bh4grJqwZJeWbU0zi1U=
=uW+X
-----END PGP SIGNATURE-----

View File

@@ -3,8 +3,8 @@ load("@com_github_atlassian_bazel_tools//gometalinter:def.bzl", "gometalinter")
load("@com_github_atlassian_bazel_tools//goimports:def.bzl", "goimports")
load("@io_kubernetes_build//defs:run_in_workspace.bzl", "workspace_binary")
load("@io_bazel_rules_go//go:def.bzl", "nogo")
load("@graknlabs_bazel_distribution//common:rules.bzl", "assemble_targz", "assemble_versioned")
load("@bazel_skylib//rules:common_settings.bzl", "string_setting")
load("@prysm//tools/nogo_config:def.bzl", "nogo_config_exclude")
prefix = "github.com/prysmaticlabs/prysm"
@@ -12,12 +12,10 @@ exports_files([
"LICENSE.md",
])
# gazelle:prefix github.com/prysmaticlabs/prysm/v4
# gazelle:prefix github.com/prysmaticlabs/prysm
# gazelle:map_kind go_library go_library @prysm//tools/go:def.bzl
# gazelle:map_kind go_test go_test @prysm//tools/go:def.bzl
# gazelle:map_kind go_repository go_repository @prysm//tools/go:def.bzl
# gazelle:build_tags bazel
# gazelle:exclude tools/analyzers/**/testdata/**
gazelle(
name = "gazelle",
prefix = prefix,
@@ -83,167 +81,57 @@ workspace_binary(
cmd = "@com_github_golang_lint//golint",
)
STATICCHECK_ANALYZERS = [
# Enabled static checks. See https://staticcheck.dev/docs/checks/
# Please. keep this list sorted. Don't be a bad person by inserting stuff randomly.
"sa1000",
"sa1001",
"sa1002",
"sa1003",
"sa1004",
"sa1005",
"sa1006",
"sa1007",
"sa1008",
"sa1010",
"sa1011",
"sa1012",
"sa1013",
"sa1014",
"sa1015",
"sa1016",
"sa1017",
"sa1018",
# "sa1019", # TODO: Fix all uses of deprecated things.
"sa1020",
"sa1021",
"sa1023",
"sa1024",
"sa1025",
"sa1026",
"sa1027",
"sa1028",
"sa1029",
"sa1030",
"sa2000",
"sa2001",
"sa2002",
"sa2003",
"sa3000",
"sa3001",
"sa4000",
"sa4001",
"sa4003",
"sa4004",
"sa4005",
"sa4006",
"sa4008",
"sa4009",
"sa4010",
"sa4011",
"sa4012",
"sa4013",
"sa4014",
"sa4015",
"sa4016",
"sa4017",
"sa4018",
"sa4019",
"sa4020",
"sa4021",
"sa4022",
"sa4023",
"sa4024",
"sa4025",
"sa4026",
"sa4027",
"sa4028",
"sa4029",
"sa4030",
"sa4031",
"sa4032",
"sa5000",
"sa5001",
"sa5002",
"sa5003",
"sa5004",
"sa5005",
"sa5007",
"sa5008",
"sa5009",
"sa5010",
"sa5011",
"sa5012",
"sa6000",
"sa6001",
"sa6002",
"sa6003",
"sa6005",
"sa6006",
"sa9001",
"sa9002",
#"sa9003", # Doesn't build. See https://github.com/dominikh/go-tools/pull/1483
"sa9004",
"sa9005",
"sa9006",
"sa9007",
"sa9008",
]
nogo_config_exclude(
name = "nogo_config_with_excludes",
checks = [sa.upper() for sa in STATICCHECK_ANALYZERS],
exclude_files = [
"external/.*",
],
input = "nogo_config.json",
)
nogo(
name = "nogo",
config = ":nogo_config_with_excludes",
config = "nogo_config.json",
visibility = ["//visibility:public"],
deps = [
"@org_golang_x_tools//go/analysis/passes/unsafeptr:go_default_library",
"@org_golang_x_tools//go/analysis/passes/unreachable:go_default_library",
"@org_golang_x_tools//go/analysis/passes/unmarshal:go_default_library",
"@org_golang_x_tools//go/analysis/passes/tests:go_default_library",
"@org_golang_x_tools//go/analysis/passes/structtag:go_default_library",
"@org_golang_x_tools//go/analysis/passes/stdmethods:go_default_library",
"@org_golang_x_tools//go/analysis/passes/shift:go_default_library",
# "@org_golang_x_tools//go/analysis/passes/shadow:go_default_library",
"@org_golang_x_tools//go/analysis/passes/printf:go_default_library",
"@org_golang_x_tools//go/analysis/passes/pkgfact:go_default_library",
"@org_golang_x_tools//go/analysis/passes/nilness:go_default_library",
"@org_golang_x_tools//go/analysis/passes/nilfunc:go_default_library",
"@org_golang_x_tools//go/analysis/passes/loopclosure:go_default_library",
"@org_golang_x_tools//go/analysis/passes/httpresponse:go_default_library",
"@org_golang_x_tools//go/analysis/passes/findcall:go_default_library",
"@org_golang_x_tools//go/analysis/passes/deepequalerrors:go_default_library",
"@org_golang_x_tools//go/analysis/passes/ctrlflow:go_default_library",
"@org_golang_x_tools//go/analysis/passes/copylock:go_default_library",
# "@org_golang_x_tools//go/analysis/passes/cgocall:go_default_library",
"@org_golang_x_tools//go/analysis/passes/buildtag:go_default_library",
"@org_golang_x_tools//go/analysis/passes/buildssa:go_default_library",
"@org_golang_x_tools//go/analysis/passes/bools:go_default_library",
"@org_golang_x_tools//go/analysis/passes/atomicalign:go_default_library",
"@org_golang_x_tools//go/analysis/passes/atomic:go_default_library",
"@org_golang_x_tools//go/analysis/passes/assign:go_default_library",
"@org_golang_x_tools//go/analysis/passes/inspect:go_default_library",
"@org_golang_x_tools//go/analysis/passes/asmdecl:go_default_library",
"//tools/analyzers/comparesame:go_default_library",
"//tools/analyzers/cryptorand:go_default_library",
"//tools/analyzers/errcheck:go_default_library",
"//tools/analyzers/featureconfig:go_default_library",
"//tools/analyzers/gocognit:go_default_library",
"//tools/analyzers/ineffassign:go_default_library",
"//tools/analyzers/interfacechecker:go_default_library",
"//tools/analyzers/logruswitherror:go_default_library",
"//tools/analyzers/maligned:go_default_library",
"//tools/analyzers/nop:go_default_library",
"//tools/analyzers/properpermissions:go_default_library",
"//tools/analyzers/recursivelock:go_default_library",
"//tools/analyzers/shadowpredecl:go_default_library",
"//tools/analyzers/slicedirect:go_default_library",
"//tools/analyzers/uintcast:go_default_library",
"@org_golang_x_tools//go/analysis/passes/unsafeptr:go_tool_library",
"@org_golang_x_tools//go/analysis/passes/unreachable:go_tool_library",
"@org_golang_x_tools//go/analysis/passes/unmarshal:go_tool_library",
"@org_golang_x_tools//go/analysis/passes/tests:go_tool_library",
"@org_golang_x_tools//go/analysis/passes/structtag:go_tool_library",
"@org_golang_x_tools//go/analysis/passes/stdmethods:go_tool_library",
"@org_golang_x_tools//go/analysis/passes/shift:go_tool_library",
# "@org_golang_x_tools//go/analysis/passes/shadow:go_tool_library",
"@org_golang_x_tools//go/analysis/passes/printf:go_tool_library",
"@org_golang_x_tools//go/analysis/passes/pkgfact:go_tool_library",
"@org_golang_x_tools//go/analysis/passes/nilness:go_tool_library",
"@org_golang_x_tools//go/analysis/passes/nilfunc:go_tool_library",
"@org_golang_x_tools//go/analysis/passes/loopclosure:go_tool_library",
"@org_golang_x_tools//go/analysis/passes/httpresponse:go_tool_library",
"@org_golang_x_tools//go/analysis/passes/findcall:go_tool_library",
"@org_golang_x_tools//go/analysis/passes/deepequalerrors:go_tool_library",
"@org_golang_x_tools//go/analysis/passes/ctrlflow:go_tool_library",
"@org_golang_x_tools//go/analysis/passes/copylock:go_tool_library",
# "@org_golang_x_tools//go/analysis/passes/cgocall:go_tool_library",
"@org_golang_x_tools//go/analysis/passes/buildtag:go_tool_library",
"@org_golang_x_tools//go/analysis/passes/buildssa:go_tool_library",
"@org_golang_x_tools//go/analysis/passes/bools:go_tool_library",
"@org_golang_x_tools//go/analysis/passes/atomicalign:go_tool_library",
"@org_golang_x_tools//go/analysis/passes/atomic:go_tool_library",
"@org_golang_x_tools//go/analysis/passes/assign:go_tool_library",
"@org_golang_x_tools//go/analysis/passes/inspect:go_tool_library",
"@org_golang_x_tools//go/analysis/passes/asmdecl:go_tool_library",
"//tools/analyzers/maligned:go_tool_library",
"//tools/analyzers/cryptorand:go_tool_library",
"//tools/analyzers/errcheck:go_tool_library",
"//tools/analyzers/featureconfig:go_tool_library",
"//tools/analyzers/comparesame:go_tool_library",
"//tools/analyzers/shadowpredecl:go_tool_library",
"//tools/analyzers/nop:go_tool_library",
"//tools/analyzers/slicedirect:go_tool_library",
"//tools/analyzers/interfacechecker:go_tool_library",
"//tools/analyzers/ineffassign:go_tool_library",
"//tools/analyzers/properpermissions:go_tool_library",
] + select({
# nogo checks that fail with coverage enabled.
":coverage_enabled": [],
"//conditions:default": [
"@org_golang_x_tools//go/analysis/passes/composite:go_default_library",
"@org_golang_x_tools//go/analysis/passes/lostcancel:go_default_library",
"@org_golang_x_tools//go/analysis/passes/lostcancel:go_tool_library",
"@org_golang_x_tools//go/analysis/passes/composite:go_tool_library",
],
}) + ["@co_honnef_go_tools//staticcheck/%s:go_default_library" % c for c in STATICCHECK_ANALYZERS],
}),
)
config_setting(
@@ -251,16 +139,26 @@ config_setting(
values = {"define": "coverage_enabled=1"},
)
config_setting(
name = "pgo_enabled",
values = {"define": "pgo_enabled=1"},
)
common_files = {
"//:LICENSE.md": "LICENSE.md",
"//:README.md": "README.md",
}
toolchain(
name = "built_cmake_toolchain",
toolchain = "@rules_foreign_cc//tools/build_defs/native_tools:built_cmake",
toolchain_type = "@rules_foreign_cc//tools/build_defs:cmake_toolchain",
)
string_setting(
name = "gotags",
build_setting_default = "",
values = [
"",
"libfuzzer",
],
)
sh_binary(
name = "prysm_sh",
srcs = ["prysm.sh"],

View File

@@ -1,6 +1,6 @@
# Contribution Guidelines
Note: The latest and most up-to-date documentation can be found on our [docs portal](https://docs.prylabs.network/docs/contribute/contribution-guidelines).
Note: The latest and most up to date documenation can be found on our [docs portal](https://docs.prylabs.network/docs/contribute/contribution-guidelines).
Excited by our work and want to get involved in building out our sharding releases? Or maybe you haven't learned as much about the Ethereum protocol but are a savvy developer?
@@ -10,9 +10,9 @@ You can explore our [Open Issues](https://github.com/prysmaticlabs/prysm/issues)
**1. Set up Prysm following the instructions in README.md.**
**2. Fork the Prysm repo.**
**2. Fork the prysm repo.**
Sign in to your GitHub account or create a new account if you do not have one already. Then navigate your browser to https://github.com/prysmaticlabs/prysm/. In the upper right hand corner of the page, click “fork”. This will create a copy of the Prysm repo in your account.
Sign in to your Github account or create a new account if you do not have one already. Then navigate your browser to https://github.com/prysmaticlabs/prysm/. In the upper right hand corner of the page, click “fork”. This will create a copy of the Prysm repo in your account.
**3. Create a local clone of Prysm.**
@@ -23,7 +23,7 @@ $ git clone https://github.com/prysmaticlabs/prysm.git
$ cd $GOPATH/src/github.com/prysmaticlabs/prysm
```
**4. Link your local clone to the fork on your GitHub repo.**
**4. Link your local clone to the fork on your Github repo.**
```
$ git remote add myprysmrepo https://github.com/<your_github_user_name>/prysm.git
@@ -68,7 +68,7 @@ $ go test <file_you_are_working_on>
$ git add --all
```
This command stages all the files that you have changed. You can add individual files by specifying the file name or names and eliminating the “-- all”.
This command stages all of the files that you have changed. You can add individual files by specifying the file name or names and eliminating the “-- all”.
**11. Commit the file or files.**
@@ -96,7 +96,8 @@ If there are conflicts between your edits and those made by others since you sta
$ git status
```
Open those files one at a time, and you will see lines inserted by Git that identify the conflicts:
Open those files one at a time and you
will see lines inserted by Git that identify the conflicts:
```
<<<<<< HEAD
@@ -118,7 +119,7 @@ $ git push myrepo feature-in-progress-branch
**15. Check to be sure your fork of the Prysm repo contains your feature branch with the latest edits.**
Navigate to your fork of the repo on GitHub. On the upper left where the current branch is listed, change the branch to your feature-in-progress-branch. Open the files that you have worked on and check to make sure they include your changes.
Navigate to your fork of the repo on Github. On the upper left where the current branch is listed, change the branch to your feature-in-progress-branch. Open the files that you have worked on and check to make sure they include your changes.
**16. Create a pull request.**
@@ -150,7 +151,7 @@ pick hash fix a bug
pick hash add a feature
```
Replace the word pick with the word “squash” for every line but the first, so you end with ….
Replace the word pick with the word “squash” for every line but the first so you end with ….
```
pick hash do some work
@@ -177,7 +178,7 @@ We consider two types of contributions to our repo and categorize them as follow
Anyone can become a part-time contributor and help out on implementing Ethereum consensus. The responsibilities of a part-time contributor include:
- Engaging in Gitter conversations, asking the questions on how to begin contributing to the project
- Opening up GitHub issues to express interest in code to implement
- Opening up github issues to express interest in code to implement
- Opening up PRs referencing any open issue in the repo. PRs should include:
- Detailed context of what would be required for merge
- Tests that are consistent with how other tests are written in our implementation
@@ -187,12 +188,12 @@ Anyone can become a part-time contributor and help out on implementing Ethereum
### Core Contributors
Core contributors are remote contractors of Prysmatic Labs, LLC. and are considered critical team members of our organization. Core devs have all the responsibilities of part-time contributors plus the majority of the following:
Core contributors are remote contractors of Prysmatic Labs, LLC. and are considered critical team members of our organization. Core devs have all of the responsibilities of part-time contributors plus the majority of the following:
- Stay up to date on the latest beacon chain specification
- Monitor GitHub issues and PRs to make sure owner, labels, descriptions are correct
- Stay up to date on the latest beacon chain sepcification
- Monitor github issues and PRs to make sure owner, labels, descriptions are correct
- Formulate independent ideas, suggest new work to do, point out improvements to existing approaches
- Participate in code review, ensure code quality is excellent, and ensure high code coverage
- Participate in code review, ensure code quality is excellent, and have ensure high code coverage
- Help with social media presence, write bi-weekly development update
- Represent Prysmatic Labs at events to help spread the word on scalability research and solutions

View File

@@ -1,4 +1,4 @@
# Dependency Management in Prysm
# Dependency Managagement in Prysm
Prysm is go project with many complicated dependencies, including some c++ based libraries. There
are two parts to Prysm's dependency management. Go modules and bazel managed dependencies. Be sure
@@ -28,13 +28,13 @@ including complicated c++ dependencies.
One key advantage of Bazel over vanilla `go build` is that Bazel automatically (re)builds generated
pb.go files at build time when file changes are present in any protobuf definition file or after
any updates to the protobuf compiler or other relevant dependencies. Vanilla go users should run
the following scripts often to ensure their generated files are up to date. Furthermore, Prysm
the following scripts often to ensure their generated files are up to date. Further more, Prysm
generates SSZ marshal related code based on defined data structures. These generated files must
also be updated and checked in as frequently.
```bash
./hack/update-go-pbs.sh
./hack/update-go-ssz.sh
./scripts/update-go-pbs.sh
./scripts/update-go-ssz.sh
```
*Recommendation: Use go build only for local development and use bazel build for production.*

View File

@@ -26,18 +26,15 @@ You can use `bazel run //tools/genesis-state-gen` to create a deterministic gene
### Usage
- **--genesis-time** uint: Unix timestamp used as the genesis time in the generated genesis state (defaults to now)
- **--mainnet-config** bool: Select whether genesis state should be generated with mainnet or minimal (default) params
- **--num-validators** int: Number of validators to deterministically include in the generated genesis state
- **--output-ssz** string: Output filename of the SSZ marshaling of the generated genesis state
- **--config-name=interop** string: name of the beacon chain config to use when generating the state. ex mainnet|minimal|interop
**deprecated flag: use --config-name instead**
- **--mainnet-config** bool: Select whether genesis state should be generated with mainnet or minimal (default) params
The example below creates 64 validator keys, instantiates a genesis state with those 64 validators and with genesis unix timestamp 1567542540,
and finally writes a ssz encoded output to ~/Desktop/genesis.ssz. This file can be used to kickstart the beacon chain in the next section. When using the `--interop-*` flags, the beacon node will assume the `interop` config should be used, unless a different config is specified on the command line.
and finally writes a ssz encoded output to ~/Desktop/genesis.ssz. This file can be used to kickstart the beacon chain in the next section.
```
bazel run //tools/genesis-state-gen -- --config-name interop --output-ssz ~/Desktop/genesis.ssz --num-validators 64 --genesis-time 1567542540
bazel run //tools/genesis-state-gen -- --output-ssz ~/Desktop/genesis.ssz --num-validators 64 --genesis-time 1567542540
```
## Launching a Beacon Node + Validator Client
@@ -49,10 +46,8 @@ Open up two terminal windows, run:
```
bazel run //beacon-chain -- \
--bootstrap-node= \
--deposit-contract 0x8A04d14125D0FDCDc742F4A05C051De07232EDa4 \
--datadir=/tmp/beacon-chain-interop \
--deposit-contract $(curl -s https://prylabs.net/contract) \
--force-clear-db \
--min-sync-peers=0 \
--interop-num-validators 64 \
--interop-eth1data-votes
```
@@ -74,10 +69,8 @@ Assuming you generated a `genesis.ssz` file with 64 validators, open up two term
```
bazel run //beacon-chain -- \
--bootstrap-node= \
--deposit-contract 0x8A04d14125D0FDCDc742F4A05C051De07232EDa4 \
--datadir=/tmp/beacon-chain-interop \
--deposit-contract $(curl -s https://prylabs.net/contract) \
--force-clear-db \
--min-sync-peers=0 \
--interop-genesis-state /path/to/genesis.ssz \
--interop-eth1data-votes
```

View File

@@ -2,16 +2,14 @@
[![Build status](https://badge.buildkite.com/b555891daf3614bae4284dcf365b2340cefc0089839526f096.svg?branch=master)](https://buildkite.com/prysmatic-labs/prysm)
[![Go Report Card](https://goreportcard.com/badge/github.com/prysmaticlabs/prysm)](https://goreportcard.com/report/github.com/prysmaticlabs/prysm)
[![Consensus_Spec_Version 1.3.0](https://img.shields.io/badge/Consensus%20Spec%20Version-v1.3.0-blue.svg)](https://github.com/ethereum/consensus-specs/tree/v1.3.0)
[![Execution_API_Version 1.0.0-beta.2](https://img.shields.io/badge/Execution%20API%20Version-v1.0.0.beta.2-blue.svg)](https://github.com/ethereum/execution-apis/tree/v1.0.0-beta.2/src/engine)
[![Discord](https://user-images.githubusercontent.com/7288322/34471967-1df7808a-efbb-11e7-9088-ed0b04151291.png)](https://discord.gg/prysmaticlabs)
[![GitPOAP Badge](https://public-api.gitpoap.io/v1/repo/prysmaticlabs/prysm/badge)](https://www.gitpoap.io/gh/prysmaticlabs/prysm)
[![ETH2.0_Spec_Version 1.0.0](https://img.shields.io/badge/ETH2.0%20Spec%20Version-v1.0.0-blue.svg)](https://github.com/ethereum/eth2.0-specs/tree/v1.0.0)
[![Discord](https://user-images.githubusercontent.com/7288322/34471967-1df7808a-efbb-11e7-9088-ed0b04151291.png)](https://discord.gg/CTYGPUJ)
This is the core repository for Prysm, a [Golang](https://golang.org/) implementation of the [Ethereum Consensus](https://ethereum.org/en/developers/docs/consensus-mechanisms/#proof-of-stake) [specification](https://github.com/ethereum/consensus-specs), developed by [Offchain Labs](https://www.offchainlabs.com). See the [Changelog](https://github.com/prysmaticlabs/prysm/releases) for details of the latest releases and upcoming breaking changes.
This is the core repository for Prysm, a [Golang](https://golang.org/) implementation of the [Ethereum Consensus](https://ethereum.org/en/eth2/) specification, developed by [Prysmatic Labs](https://prysmaticlabs.com). See the [Changelog](https://github.com/prysmaticlabs/prysm/releases) for details of the latest releases and upcoming breaking changes.
### Getting Started
A detailed set of installation and usage instructions as well as breakdowns of each individual component are available in the [official documentation portal](https://docs.prylabs.network). If you still have questions, feel free to stop by our [Discord](https://discord.gg/prysmaticlabs).
A detailed set of installation and usage instructions as well as breakdowns of each individual component are available in the [official documentation portal](https://docs.prylabs.network). If you still have questions, feel free to stop by our [Discord](https://discord.gg/CTYGPUJ).
### Staking on Mainnet

View File

@@ -1,11 +0,0 @@
# Security Policy
## Supported Versions
[Releases](https://github.com/prysmaticlabs/prysm/releases/) contains all available releases. We recommend using the [most recently released version](https://github.com/prysmaticlabs/prysm/releases/latest).
## Reporting a Vulnerability
Please see our signed [security.txt](https://github.com/prysmaticlabs/prysm/blob/develop/.well-known/security.txt) for preferred encryption and reporting destination.
**Please do not file a public ticket** mentioning the vulnerability, as doing so could increase the likelihood of the vulnerability being used before a fix has been created, released and installed on the network.

View File

@@ -1,53 +1,45 @@
# Terms of Use
Effective as of November 2, 2023
## Terms of Use
By downloading, accessing or using the Prysm implementation (“Prysm”), you (referenced herein as “you” or the “user”) certify that you have read and agreed to the terms and conditions below (the “Terms”) which form a binding contract between you and Offchain Labs, Inc. (as successor in interest to Prysmatic Labs LLC) (referenced herein as “Offchain Labs”, “we” or “us”). If you do not agree to the Terms, do not download or use Prysm. Additionally, the Terms of Use available at https://arbitrum.io/tos (or any successor site, the “OCL Terms of Use”) are hereby incorporated by reference into these Terms. In the event of any conflict between provisions set forth herein and those set forth in the OCL Terms of Use, the provisions set forth herein shall control.
Effective as of Oct 14, 2020
## About Prysm
By downloading, accessing or using the Prysm implementation (“Prysm”), you (referenced herein as “you” or the “user”) certify that you have read and agreed to the terms and conditions below (the “Terms”) which form a binding contract between you and Prysmatic Labs (referenced herein as “we” or “us”). If you do not agree to the Terms, do not download or use Prysm.
Prysm is a client implementation for the Ethereum blockchains consensus protocol. To participate in the network, a user must send ETH from the Ethereum mainnet blockchain to a validator deposit smart contract on Ethereum mainnet. Validators participate in proposing and voting on blocks in the protocol, and the network applies rewards/penalties based on their behavior. A detailed set of installation and usage instructions as well as breakdowns of each individual component are available in the official documentation portal, however, we do not warrant the accuracy, completeness or usefulness of this documentation. Any reliance you place on such information is strictly at your own risk.
### About Prysm
Prysm is a client implementation for Ethereum consensus protocol for a proof-of-stake blockchain. To participate in the network, a user must send ETH from the Eth1.0 chain into a validator deposit contract, which will queue in the user as a validator in the system. Validators participate in proposing and voting on blocks in the protocol, and the network applies rewards/penalties based on their behavior. A detailed set of installation and usage instructions as well as breakdowns of each individual component are available in the official documentation portal, however, we do not warrant the accuracy, completeness or usefulness of this documentation. Any reliance you place on such information is strictly at your own risk.
## Licensing Terms
Prysm is an open-source software program licensed pursuant to the GNU General Public License v3.0.
The Offchain Labs name, the term “Prysm” and all related names, logos, product and service names, designs and slogans are trademarks of Offchain Labs or its affiliates and/or licensors. You must not use such marks without our prior written permission.
PLEASE READ THESE TERMS CAREFULLY, AS THE OCL TERMS OF USE INCORPORATED BY REFERENCE HEREIN CONTAIN AN AGREEMENT TO ARBITRATE AND OTHER IMPORTANT INFORMATION REGARDING YOUR LEGAL RIGHTS, REMEDIES, AND OBLIGATIONS. THE AGREEMENT TO ARBITRATE REQUIRES (WITH LIMITED EXCEPTION) THAT YOU SUBMIT CLAIMS YOU HAVE AGAINST US TO BINDING AND FINAL ARBITRATION, AND FURTHER (1) YOU WILL ONLY BE PERMITTED TO PURSUE CLAIMS AGAINST OFFCHAIN LABS ON AN INDIVIDUAL BASIS, NOT AS A PLAINTIFF OR CLASS MEMBER IN ANY CLASS OR REPRESENTATIVE ACTION OR PROCEEDING, (2) YOU WILL ONLY BE PERMITTED TO SEEK RELIEF (INCLUDING MONETARY, INJUNCTIVE, AND DECLARATORY RELIEF) ON AN INDIVIDUAL BASIS, AND (3) YOU MAY NOT BE ABLE TO HAVE ANY CLAIMS YOU HAVE AGAINST US RESOLVED BY A JURY OR IN A COURT OF LAW.
### Licensing Terms
Prysm is a fully open-source software program licensed pursuant to the GNU General Public License v3.0.
## Risks of Operating Prysm
The Prysmatic Labs name, the term “Prysm” and all related names, logos, product and service names, designs and slogans are trademarks of Prysmatic Labs or its affiliates and/or licensors. You must not use such marks without our prior written permission.
The use of Prysm and acting as a validator on the Ethereum network can lead to loss of money, tokens and value. Ethereum is still an experimental system and ETH remains a risky investment. You alone are responsible for your actions on Prysm, including the security of your ETH and meeting any applicable minimum system requirements.
### Risks of Operating Prysm
The use of Prysm and acting as a validator on the Ethereum network can lead to loss of money. Ethereum is still an experimental system and ETH remains a risky investment. You alone are responsible for your actions on Prysm including the security of your ETH and meeting any applicable minimum system requirements.
Use of Prysm and the ability to receive rewards or penalties may be affected at any time by mistakes made by the user or other users, software problems such as bugs, errors, incorrectly constructed transactions, unsafe cryptographic libraries or malware affecting the network, technical failures in the hardware of a user, security problems experienced by a user and/or actions or inactions of third parties and/or events experienced by third parties, among other risks. We cannot and do not guarantee that any user of Prysm will make money, that the Prysm network will operate in accordance with the documentation or that transactions will be effective or secure.
YOU ACKNOWLEDGE THAT WE ARE NOT RESPONSIBLE FOR ANY RISKS ASSOCIATED WITH YOUR USE OF PRYSM, AND CANNOT BE HELD LIABLE FOR ANY RESULTING LOSSES THAT YOU EXPERIENCE WHILE ACCESSING OR USING PRYSM.
We make no claims that Prysm is appropriate or permitted for use in any specific jurisdiction. Access to Prysm may not be legal by certain persons or in certain jurisdictions or countries. If you access Prysm, you do so on your own initiative and are responsible for compliance with local laws.
BY ACCESSING AND USING PRYSM, YOU REPRESENT AND WARRANT THAT YOU UNDERSTAND THE INHERENT RISKS ASSOCIATED WITH USING CRYPTOGRAPHIC AND BLOCKCHAIN-BASED SYSTEMS, AND THAT YOU HAVE A WORKING KNOWLEDGE OF THE USAGE AND INTRICACIES OF DIGITAL ASSETS, SUCH AS THOSE FOLLOWING THE ETHEREUM TOKEN STANDARD (ERC-20). YOU FURTHER UNDERSTAND THAT THE MARKETS FOR DIGITAL ASSETS ARE HIGHLY VOLATILE DUE TO VARIOUS FACTORS, INCLUDING ADOPTION, SPECULATION, TECHNOLOGY, SECURITY, AND REGULATION. YOU ACKNOWLEDGE AND ACCEPT THAT THE COST AND SPEED OF TRANSACTING WITH CRYPTOGRAPHIC AND BLOCKCHAIN-BASED SYSTEMS SUCH AS ETHEREUM ARE VARIABLE AND MAY INCREASE DRAMATICALLY AT ANY TIME. YOU UNDERSTAND THAT ANYONE CAN CREATE A TOKEN, INCLUDING FAKE VERSIONS OF EXISTING TOKENS AND TOKENS THAT FALSELY CLAIM TO REPRESENT PROJECTS, AND ACKNOWLEDGE AND ACCEPT THE RISK THAT YOU MAY MISTAKENLY INTERACT WITH THOSE OR OTHER TOKENS. YOU FURTHER ACKNOWLEDGE THAT WE ARE NOT RESPONSIBLE FOR ANY OF THE VARIABLES OR RISKS DESCRIBED IN THESE TERMS. YOU UNDERSTAND AND AGREE TO ASSUME FULL RESPONSIBILITY FOR ALL OF THE RISKS OF ACCESSING AND USING PRYSM. YOU ARE SOLELY RESPONSIBLE FOR YOUR WALLETS, FOR SAFEGUARDING THE ASSOCIATED PRIVATE KEY AND FOR ANY ACTIVITY THAT OCCURS USING YOUR WALLET. WITHOUT LIMITING THE FOREGOING, YOU ALSO UNDERSTAND THAT THERE MAY BE TAX AND REGULATORY RISKS RELATED TO USING PRYSM. IT IS YOUR SOLE RESPONSIBILITY TO DETERMINE WHETHER, AND TO WHAT EXTENT, ANY TAXES APPLY TO ANY TRANSACTIONS YOU CONDUCT IN CONNECTION WITH YOUR USE OF PRYSM, AND TO WITHHOLD, COLLECT, REPORT AND REMIT THE CORRECT AMOUNTS OF TAXES TO THE APPROPRIATE TAX AUTHORITIES. DIGITAL ASSETS, BLOCKCHAIN TECHNOLOGY, AND ANY RELATED SOFTWARE AND SERVICES ARE ALSO SUBJECT TO LEGAL AND REGULATORY UNCERTAINTY IN THE UNITED STATES AND OTHER JURISDICTIONS. YOU UNDERSTAND THAT LEGISLATIVE AND REGULATORY CHANGES OR ACTIONS MAY ADVERSELY AFFECT THE USAGE, TRANSFERABILITY, TRANSACTABILITY AND ACCESSIBILITY RELATED TO PRYSM.
Some Internet plans will charge an additional amount for any excess upload bandwidth used that isnt included in the plan and may terminate your connection without warning because of overuse. We advise that you check whether your Internet connection is subjected to such limitations and monitor your bandwidth use so that you can stop Prysm before you reach your upload limit.
We make no claims that Prysm is appropriate or permitted for use in any specific jurisdiction. Access to Prysm may not be legal by certain persons or in certain jurisdictions or countries. If you access Prysm, you do so on your own initiative and are responsible for compliance with all Applicable Law (as defined below), including, without limitation, for the avoidance of doubt, local laws.
### Warranty Disclaimer
PRYSM IS PROVIDED ON AN “AS-IS” BASIS AND MAY INCLUDE ERRORS, OMISSIONS, OR OTHER INACCURACIES. PRYSMATIC LABS AND ITS CONTRIBUTORS MAKE NO REPRESENTATIONS OR WARRANTIES ABOUT PRYSM FOR ANY PURPOSE, AND HEREBY EXPRESSLY DISCLAIM ALL WARRANTIES, EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT OR ANY OTHER IMPLIED WARRANTY UNDER THE UNIFORM COMPUTER INFORMATION TRANSACTIONS ACT AS ENACTED BY ANY STATE. WE ALSO MAKE NO REPRESENTATIONS OR WARRANTIES THAT PRYSM WILL OPERATE ERROR-FREE, UNINTERRUPTED, OR IN A MANNER THAT WILL MEET YOUR REQUIREMENTS AND/OR NEEDS. THEREFORE, YOU ASSUME THE ENTIRE RISK REGARDING THE QUALITY AND/OR PERFORMANCE OF PRYSM AND ANY TRANSACTIONS ENTERED INTO THEREON.
Some Internet plans will charge additional amounts for bandwidth or any excess upload bandwidth used that isnt included in the plan and may terminate your connection without warning because of overuse. We advise that you check whether your Internet connection is subjected to any such limitations and monitor your bandwidth use and upload volumes.
### Limitation of Liability
In no event will Prysmatic Labs or any of its contributors be liable, whether in contract, warranty, tort (including negligence, whether active, passive or imputed), product liability, strict liability or other theory, breach of statutory duty or otherwise arising out of, or in connection with, your use of Prysm, for any direct, indirect, incidental, special or consequential damages (including any loss of profits or data, business interruption or other pecuniary loss, or damage, loss or other compromise of data, in each case whether direct, indirect, incidental, special or consequential) arising out of use Prysm, even if we or other users have been advised of the possibility of such damages. The foregoing limitations and disclaimers shall apply to the maximum extent permitted by applicable law, even if any remedy fails of its essential purpose. You acknowledge and agree that the limitations of liability afforded us hereunder constitute a material and actual inducement and condition to entering into these Terms, and are reasonable, fair and equitable in scope to protect our legitimate interests in light of the fact that we are not receiving consideration from you for providing Prysm.
## Warranty Disclaimer
### Indemnification
To the maximum extent permitted by law, you will defend, indemnify and hold Prysmatic Labs and its contributors harmless from and against any and all claims, actions, suits, investigations, or proceedings by any third party (including any party or purported party to or beneficiary or purported beneficiary of any transaction on Prysm), as well as any and all losses, liabilities,
damages, costs, and expenses (including reasonable attorneys fees) arising out of, accruing from, or in any way related to (i) your breach of the terms of this Agreement, (ii) any transaction, or the failure to occur of any transaction on Prysm, and (iii) your negligence, fraud, or willful misconduct.
PRYSM IS PROVIDED ON AN “AS-IS” BASIS AND MAY INCLUDE ERRORS, OMISSIONS, OR OTHER INACCURACIES. WITHOUT LIMITING ANYTHING SET FORTH ELSEWHERE IN THESE TERMS, OFFCHAIN LABS AND ITS CONTRIBUTORS MAKE NO REPRESENTATIONS OR WARRANTIES ABOUT PRYSM FOR ANY PURPOSE, AND HEREBY EXPRESSLY DISCLAIM ALL WARRANTIES, EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT OR ANY OTHER IMPLIED WARRANTY UNDER THE UNIFORM COMPUTER INFORMATION TRANSACTIONS ACT AS ENACTED BY ANY STATE OR OTHER GOVERNMENTAL AUTHORITY. WE ALSO MAKE NO REPRESENTATIONS OR WARRANTIES THAT PRYSM WILL OPERATE ERROR-FREE, UNINTERRUPTED, OR IN A MANNER THAT WILL MEET YOUR REQUIREMENTS AND/OR NEEDS. THEREFORE, YOU ASSUME THE ENTIRE RISK REGARDING THE QUALITY AND/OR PERFORMANCE OF PRYSM AND ANY TRANSACTIONS ENTERED INTO THEREON.
### Compliance with Laws and Tax Obligations
Your use of Prysm is subject to all applicable laws of any governmental authority, including, without limitation, federal, state and foreign securities laws, tax laws, tariff and trade laws, ordinances, judgments, decrees, injunctions, writs and orders or like actions of any governmental authority and rules, regulations, orders, interpretations, licenses, and permits of any federal,
regional, state, county, municipal or other governmental authority and you agree to comply with all such laws in your use of Prysm. The users of Prysm are solely responsible to determinate what, if any, taxes apply to their ETH transactions. The owners of, or contributors to, Prysm are not responsible for determining the taxes that apply to ETH transactions.
## Limitation of Liability
### Miscellaneous
These Terms will be construed and enforced in accordance with the laws of the state of Illinois as applied to agreements entered into and completely performed in Illinois. You agree to the personal jurisdiction by and venue in Illinois and waive any objection to such jurisdiction or venue.
IN NO EVENT WILL OFFCHAIN LABS OR ANY OF ITS AFFILIATES OR ITS OR ANY SUCH AFFILIATES DIRECTORS, OFFICERS, EMPLOYEES, AGENTS, OR REPRESENTATIVES OR ANY CONTRIBUTORS (COLLECTIVELY, THE “OCL PARTIES”) BE LIABLE, WHETHER IN CONTRACT, WARRANTY, TORT (INCLUDING NEGLIGENCE, WHETHER ACTIVE, PASSIVE OR IMPUTED), PRODUCT LIABILITY, STRICT LIABILITY OR OTHER THEORY, BREACH OF STATUTORY DUTY OR OTHERWISE ARISING OUT OF, OR IN CONNECTION WITH, YOUR USE OF PRYSM, FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES (INCLUDING ANY LOSS OF PROFITS OR DATA, BUSINESS INTERRUPTION OR OTHER PECUNIARY LOSS, OR DAMAGE, LOSS OR OTHER COMPROMISE OF DATA, IN EACH CASE WHETHER DIRECT, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL) ARISING OUT OF USE PRYSM, EVEN IF WE OR OTHER USERS HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. The foregoing limitations and disclaimers shall apply to the maximum extent permitted by Applicable Law, even if any remedy fails of its essential purpose. You acknowledge and agree that the limitations of liability afforded us hereunder constitute a material and actual inducement and condition to entering into these Terms, and are reasonable, fair and equitable in scope to protect our legitimate interests in light of the fact that we are not receiving consideration from you for providing Prysm.
We reserve the right to revise these Terms, and your rights and obligations are at all times subject to the then-current Terms provided on Prysm. Your continued use of Prysm constitutes acceptance of such revised Terms.
## Indemnification
To the maximum extent permitted by Applicable Law, you will defend, indemnify and hold each OCL Party harmless from and against any and all claims, actions, suits, investigations, or proceedings by any third party (including any party or purported party to or beneficiary or purported beneficiary of any transaction or other activity on Prysm), as well as any and all losses, liabilities, damages, costs, and expenses (including reasonable attorneys fees and costs) arising out of, accruing from, or in any way related to (i) your breach of the terms of this Agreement, (ii) any transaction, or the failure to occur of any transaction on Prysm, and (iii) your negligence, fraud, or willful misconduct.
## Compliance with Laws
Your use of Prysm is subject to all applicable laws of any governmental authority, including, without limitation, federal, state and foreign securities laws, tax laws, tariff and trade laws, ordinances, judgments, decrees, injunctions, writs and orders or like actions of any governmental authority and rules, regulations, orders, interpretations, licenses, and permits of any federal, regional, state, county, municipal or other governmental authority (collectively, “Applicable Law”) and you agree to comply with all such Applicable Law in your use of Prysm. The users of Prysm are solely responsible to determinate what, if any, taxes apply to their ETH transactions. The owners of, or contributors to, Prysm are not responsible for determining the taxes that apply to ETH transactions.
## Miscellaneous
These Terms will be governed by the laws of the State of Delaware without regard to its conflict of law provisions. With respect to any disputes or claims not subject to arbitration, as set forth in the OCL Terms of Use, you and Offchain Labs submit to the personal and exclusive jurisdiction of the state and federal courts located within New York, New York and waive any objection to such jurisdiction and venue. The failure of Offchain Labs to exercise or enforce any right or provision of these Terms will not constitute a waiver of such right or provision.
We reserve the right to revise these Terms, and your rights and obligations are at all times subject to the then-current Terms provided on Prysm. Your use of Prysm following any such revision to these Terms constitutes acceptance of such revised Terms.
These Terms constitute the entire agreement between you and Offchain Labs regarding use of Prysm and will supersede all prior agreements whether, written or oral. No usage of trade or other regular practice or method of dealing between the parties will be used to modify, interpret, supplement, or alter the terms of these Terms.
If any portion of these Terms is held invalid or unenforceable, such invalidity or enforceability will not affect the other provisions of these Terms, which will remain in full force and effect, and the invalid or unenforceable portion will be given effect to the greatest extent possible. The failure of a party to require performance of any provision will not affect that partys right to require performance at any time thereafter, nor will a waiver of any breach or default of these Terms or any provision of these Terms constitute a waiver of any subsequent breach or default or a waiver of the provision itself.
These Terms constitute the entire agreement between you and Prysmatic Labs regarding use of Prysm and will supersede all prior agreements whether, written or oral. No usage of trade or other regular practice or method of dealing between the parties will be used to modify, interpret, supplement, or alter the terms of these Terms.
If any portion of these Terms is held invalid or unenforceable, such invalidity or enforceability will not affect the other provisions of these Terms, which will remain in full force and effect, and the invalid or unenforceable portion will be given effect to the greatest extent possible. The failure of a party to require performance of any provision will not affect that partys right to require performance at any time thereafter, nor will a waiver of any breach or default of these Terms or any provision of these Terms constitute a waiver of any subsequent breach or default or a waiver of the provision itself.

396
WORKSPACE
View File

@@ -4,68 +4,47 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")
http_archive(
name = "rules_pkg",
sha256 = "8c20f74bca25d2d442b327ae26768c02cf3c99e93fad0381f32be9aab1967675",
name = "bazel_toolchains",
sha256 = "8e0633dfb59f704594f19ae996a35650747adc621ada5e8b9fb588f808c89cb0",
strip_prefix = "bazel-toolchains-3.7.0",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/rules_pkg/releases/download/0.8.1/rules_pkg-0.8.1.tar.gz",
"https://github.com/bazelbuild/rules_pkg/releases/download/0.8.1/rules_pkg-0.8.1.tar.gz",
"https://mirror.bazel.build/github.com/bazelbuild/bazel-toolchains/releases/download/3.7.0/bazel-toolchains-3.7.0.tar.gz",
"https://github.com/bazelbuild/bazel-toolchains/releases/download/3.7.0/bazel-toolchains-3.7.0.tar.gz",
],
)
load("@rules_pkg//:deps.bzl", "rules_pkg_dependencies")
rules_pkg_dependencies()
http_archive(
name = "hermetic_cc_toolchain",
sha256 = "973ab22945b921ef45b8e1d6ce01ca7ce1b8a462167449a36e297438c4ec2755",
strip_prefix = "hermetic_cc_toolchain-5098046bccc15d2962f3cc8e7e53d6a2a26072dc",
urls = [
"https://github.com/uber/hermetic_cc_toolchain/archive/5098046bccc15d2962f3cc8e7e53d6a2a26072dc.tar.gz", # 2023-06-28
],
name = "com_grail_bazel_toolchain",
sha256 = "040b9d00b8a03e8a28e38159ad0f2d0e0de625d93f453a9f226971a8c47e757b",
strip_prefix = "bazel-toolchain-5f82830f9d6a1941c3eb29683c1864ccf2862454",
urls = ["https://github.com/grailbio/bazel-toolchain/archive/5f82830f9d6a1941c3eb29683c1864ccf2862454.tar.gz"],
)
load("@hermetic_cc_toolchain//toolchain:defs.bzl", zig_toolchains = "toolchains")
load("@com_grail_bazel_toolchain//toolchain:deps.bzl", "bazel_toolchain_dependencies")
# Temporarily use a nightly build until 0.12.0 is released.
# See: https://github.com/prysmaticlabs/prysm/issues/13130
zig_toolchains(
host_platform_sha256 = {
"linux-aarch64": "45afb8e32adde825165f4f293fcea9ecea503f7f9ec0e9bf4435afe70e67fb70",
"linux-x86_64": "f136c6a8a0f6adcb057d73615fbcd6f88281b3593f7008d5f7ed514ff925c02e",
"macos-aarch64": "05d995853c05243151deff47b60bdc2674f1e794a939eaeca0f42312da031cee",
"macos-x86_64": "721754ba5a50f31e8a1f0e1a74cace26f8246576878ac4a8591b0ee7b6db1fc1",
"windows-x86_64": "93f5248b2ea8c5ee8175e15b1384e133edc1cd49870b3ea259062a2e04164343",
},
url_formats = [
"https://ziglang.org/builds/zig-{host_platform}-{version}.{_ext}",
"https://mirror.bazel.build/ziglang.org/builds/zig-{host_platform}-{version}.{_ext}",
"https://prysmaticlabs.com/mirror/ziglang.org/builds/zig-{host_platform}-{version}.{_ext}",
],
version = "0.12.0-dev.1349+fa022d1ec",
bazel_toolchain_dependencies()
load("@com_grail_bazel_toolchain//toolchain:rules.bzl", "llvm_toolchain")
llvm_toolchain(
name = "llvm_toolchain",
llvm_version = "10.0.0",
)
# Register zig sdk toolchains with support for Ubuntu 20.04 (Focal Fossa) which has an EOL date of April, 2025.
# For ubuntu glibc support, see https://launchpad.net/ubuntu/+source/glibc
register_toolchains(
"@zig_sdk//toolchain:linux_amd64_gnu.2.31",
"@zig_sdk//toolchain:linux_arm64_gnu.2.31",
# Hermetic cc toolchain is not yet supported on darwin. Sysroot needs to be provided.
# See https://github.com/uber/hermetic_cc_toolchain#osx-sysroot
# "@zig_sdk//toolchain:darwin_amd64",
# "@zig_sdk//toolchain:darwin_arm64",
# Windows builds are not supported yet.
# "@zig_sdk//toolchain:windows_amd64",
)
load("@llvm_toolchain//:toolchains.bzl", "llvm_register_toolchains")
load("@prysm//tools/cross-toolchain:darwin_cc_hack.bzl", "configure_nonhermetic_darwin")
configure_nonhermetic_darwin()
llvm_register_toolchains()
load("@prysm//tools/cross-toolchain:prysm_toolchains.bzl", "configure_prysm_toolchains")
configure_prysm_toolchains()
load("@prysm//tools/cross-toolchain:rbe_toolchains_config.bzl", "rbe_toolchains_config")
rbe_toolchains_config()
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
http_archive(
name = "bazel_skylib",
sha256 = "1c531376ac7e5a180e0237938a2536de0c54d93f5c278634818e0efc952dd56c",
@@ -81,10 +60,10 @@ bazel_skylib_workspace()
http_archive(
name = "bazel_gazelle",
sha256 = "d3fa66a39028e97d76f9e2db8f1b0c11c099e8e01bf363a923074784e451f809",
sha256 = "62ca106be173579c0a167deb23358fdfe71ffa1e4cfdddf5582af26520f1c66f",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.33.0/bazel-gazelle-v0.33.0.tar.gz",
"https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.33.0/bazel-gazelle-v0.33.0.tar.gz",
"https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.23.0/bazel-gazelle-v0.23.0.tar.gz",
"https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.23.0/bazel-gazelle-v0.23.0.tar.gz",
],
)
@@ -96,51 +75,26 @@ http_archive(
)
http_archive(
name = "rules_distroless",
sha256 = "e64f06e452cd153aeab81f752ccf4642955b3af319e64f7bc7a7c9252f76b10e",
strip_prefix = "rules_distroless-f5e678217b57ce3ad2f1c0204bd4e9d416255773",
url = "https://github.com/GoogleContainerTools/rules_distroless/archive/f5e678217b57ce3ad2f1c0204bd4e9d416255773.tar.gz",
)
load("@rules_distroless//distroless:dependencies.bzl", "rules_distroless_dependencies")
rules_distroless_dependencies()
load("@aspect_bazel_lib//lib:repositories.bzl", "aspect_bazel_lib_dependencies", "aspect_bazel_lib_register_toolchains")
aspect_bazel_lib_dependencies()
aspect_bazel_lib_register_toolchains()
http_archive(
name = "rules_oci",
sha256 = "c71c25ed333a4909d2dd77e0b16c39e9912525a98c7fa85144282be8d04ef54c",
strip_prefix = "rules_oci-1.3.4",
url = "https://github.com/bazel-contrib/rules_oci/releases/download/v1.3.4/rules_oci-v1.3.4.tar.gz",
)
load("@rules_oci//oci:dependencies.bzl", "rules_oci_dependencies")
rules_oci_dependencies()
load("@rules_oci//oci:repositories.bzl", "LATEST_CRANE_VERSION", "oci_register_toolchains")
oci_register_toolchains(
name = "oci",
crane_version = LATEST_CRANE_VERSION,
name = "io_bazel_rules_docker",
sha256 = "59d5b42ac315e7eadffa944e86e90c2990110a1c8075f1cd145f487e999d22b3",
strip_prefix = "rules_docker-0.17.0",
urls = ["https://github.com/bazelbuild/rules_docker/releases/download/v0.17.0/rules_docker-v0.17.0.tar.gz"],
)
http_archive(
name = "io_bazel_rules_go",
patch_args = ["-p1"],
patches = [
# Required until https://github.com/bazelbuild/rules_go/pull/2450 merges otherwise nilness
# nogo check fails for certain third_party dependencies.
"//third_party:io_bazel_rules_go.patch",
# Expose internals of go_test for custom build transitions.
"//third_party:io_bazel_rules_go_test.patch",
],
sha256 = "d6ab6b57e48c09523e93050f13698f708428cfd5e619252e369d377af6597707",
sha256 = "7c10271940c6bce577d51a075ae77728964db285dac0a46614a7934dc34303e6",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.43.0/rules_go-v0.43.0.zip",
"https://github.com/bazelbuild/rules_go/releases/download/v0.43.0/rules_go-v0.43.0.zip",
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.26.0/rules_go-v0.26.0.tar.gz",
"https://github.com/bazelbuild/rules_go/releases/download/v0.26.0/rules_go-v0.26.0.tar.gz",
],
)
@@ -159,33 +113,68 @@ git_repository(
# gazelle args: -go_prefix github.com/gogo/protobuf -proto legacy
)
load("@rules_oci//oci:pull.bzl", "oci_pull")
# A multi-arch base image
oci_pull(
name = "linux_debian11_multiarch_base", # Debian bullseye
digest = "sha256:b82f113425c5b5c714151aaacd8039bc141821cdcd3c65202d42bdf9c43ae60b", # 2023-12-12
image = "gcr.io/distroless/cc-debian11",
platforms = [
"linux/amd64",
"linux/arm64/v8",
],
reproducible = True,
http_archive(
name = "fuzzit_linux",
build_file_content = "exports_files([\"fuzzit\"])",
sha256 = "9ca76ac1c22d9360936006efddf992977ebf8e4788ded8e5f9d511285c9ac774",
urls = ["https://github.com/fuzzitdev/fuzzit/releases/download/v2.4.76/fuzzit_Linux_x86_64.zip"],
)
load("@prysm//tools:image_deps.bzl", "prysm_image_deps")
git_repository(
name = "graknlabs_bazel_distribution",
commit = "962f3a7e56942430c0ec120c24f9e9f2a9c2ce1a",
remote = "https://github.com/graknlabs/bazel-distribution",
shallow_since = "1569509514 +0300",
)
prysm_image_deps()
load(
"@io_bazel_rules_docker//repositories:repositories.bzl",
container_repositories = "repositories",
)
container_repositories()
load(
"@io_bazel_rules_docker//container:container.bzl",
"container_pull",
)
container_pull(
name = "alpine_cc_linux_amd64",
digest = "sha256:752aa0c9a88461ffc50c5267bb7497ef03a303e38b2c8f7f2ded9bebe5f1f00e",
registry = "index.docker.io",
repository = "pinglamb/alpine-glibc",
)
container_pull(
name = "fuzzit_base",
digest = "sha256:24a39a4360b07b8f0121eb55674a2e757ab09f0baff5569332fefd227ee4338f",
registry = "gcr.io",
repository = "fuzzit-public/stretch-llvm8",
)
load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies")
go_rules_dependencies()
go_register_toolchains(
go_version = "1.21.5",
go_version = "1.16.4",
nogo = "@//:nogo",
)
http_archive(
name = "prysm_testnet_site",
build_file_content = """
proto_library(
name = "faucet_proto",
srcs = ["src/proto/faucet.proto"],
visibility = ["//visibility:public"],
)""",
sha256 = "29742136ff9faf47343073c4569a7cf21b8ed138f726929e09e3c38ab83544f7",
strip_prefix = "prysm-testnet-site-5c711600f0a77fc553b18cf37b880eaffef4afdb",
url = "https://github.com/prestonvanloon/prysm-testnet-site/archive/5c711600f0a77fc553b18cf37b880eaffef4afdb.tar.gz",
)
http_archive(
name = "io_kubernetes_build",
sha256 = "b84fbd1173acee9d02a7d3698ad269fdf4f7aa081e9cecd40e012ad0ad8cfa2a",
@@ -204,31 +193,14 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "516d551cfb3e50e4ac2f42db0992f4ceb573a7cb1616d727a725c8161485329f",
url = "https://github.com/eth-clients/slashing-protection-interchange-tests/archive/refs/tags/v5.3.0.tar.gz",
sha256 = "91434d5fd5e1c6eb7b0174fed2afe25e09bddf00e1e4c431db931b2cee4e7773",
url = "https://github.com/eth2-clients/slashing-protection-interchange-tests/archive/b8413ca42dc92308019d0d4db52c87e9e125c4e9.tar.gz",
)
eth2_spec_version = "v1.1.0-beta.1"
http_archive(
name = "eip4881_spec_tests",
build_file_content = """
filegroup(
name = "test_data",
srcs = glob([
"**/*.yaml",
]),
visibility = ["//visibility:public"],
)
""",
sha256 = "89cb659498c0d196fc9f957f8b849b2e1a5c041c3b2b3ae5432ac5c26944297e",
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
)
consensus_spec_version = "v1.4.0-beta.5"
bls_test_version = "v0.1.1"
http_archive(
name = "consensus_spec_tests_general",
name = "eth2_spec_tests_general",
build_file_content = """
filegroup(
name = "test_data",
@@ -239,12 +211,12 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "9017ffff84d64a7c4c9e6ff9f421f9479f71d3b463b738f54e02158dbb4f50f0",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
sha256 = "e9b4cc60a3e676c6b4a9348424e44cff1ebada603ffb31b0df600dbd70e7fbf6",
url = "https://github.com/ethereum/eth2.0-spec-tests/releases/download/%s/general.tar.gz" % eth2_spec_version,
)
http_archive(
name = "consensus_spec_tests_minimal",
name = "eth2_spec_tests_minimal",
build_file_content = """
filegroup(
name = "test_data",
@@ -255,12 +227,12 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "f08711682553fe7c9362f1400ed8c56b2fa9576df08581fcad4c508ba8ad4788",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
sha256 = "cf82dc729ffe7b924f852e57d1973e1a6377c5b52acc903c953277fa9b4e6de8",
url = "https://github.com/ethereum/eth2.0-spec-tests/releases/download/%s/minimal.tar.gz" % eth2_spec_version,
)
http_archive(
name = "consensus_spec_tests_mainnet",
name = "eth2_spec_tests_mainnet",
build_file_content = """
filegroup(
name = "test_data",
@@ -271,12 +243,12 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "7ea3189e3879f2ac62467cbf2945c00b6c94d30cdefb2d645c630b1018c50e10",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
sha256 = "6c6792375b81858037014e282d28a64b0cf12e12daf16054265c85403b8b329f",
url = "https://github.com/ethereum/eth2.0-spec-tests/releases/download/%s/mainnet.tar.gz" % eth2_spec_version,
)
http_archive(
name = "consensus_spec",
name = "eth2_spec",
build_file_content = """
filegroup(
name = "spec_data",
@@ -286,98 +258,74 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "4119992a2efc79e5cb2bdc07ed08c0b1fa32332cbd0d88e6467f34938df97026",
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
sha256 = "16094dad1bab4e8ab3adb60c10e311cd1e294cd7bbf5a89505f24bebd3d0e513",
strip_prefix = "eth2.0-specs-" + eth2_spec_version[1:],
url = "https://github.com/ethereum/eth2.0-specs/archive/refs/tags/%s.tar.gz" % eth2_spec_version,
)
http_archive(
name = "bls_spec_tests",
build_file_content = """
filegroup(
name = "test_data",
srcs = glob([
"**/*.yaml",
]),
visibility = ["//visibility:public"],
)
""",
sha256 = "93c7d006e7c5b882cbd11dc9ec6c5d0e07f4a8c6b27a32f964eb17cf2db9763a",
url = "https://github.com/ethereum/bls12-381-tests/releases/download/%s/bls_tests_yaml.tar.gz" % bls_test_version,
name = "com_github_bazelbuild_buildtools",
sha256 = "7a182df18df1debabd9e36ae07c8edfa1378b8424a04561b674d933b965372b3",
strip_prefix = "buildtools-f2aed9ee205d62d45c55cfabbfd26342f8526862",
url = "https://github.com/bazelbuild/buildtools/archive/f2aed9ee205d62d45c55cfabbfd26342f8526862.zip",
)
http_archive(
name = "eth2_networks",
build_file_content = """
filegroup(
name = "configs",
srcs = glob([
"shared/**/config.yaml",
]),
visibility = ["//visibility:public"],
)
""",
sha256 = "2701e1e1a3ec10c673fe7dbdbbe6f02c8ae8c922aebbf6e720d8c72d5458aafe",
strip_prefix = "eth2-networks-7b4897888cebef23801540236f73123e21774954",
url = "https://github.com/eth-clients/eth2-networks/archive/7b4897888cebef23801540236f73123e21774954.tar.gz",
)
http_archive(
name = "goerli_testnet",
build_file_content = """
filegroup(
name = "configs",
srcs = [
"prater/config.yaml",
],
visibility = ["//visibility:public"],
)
""",
sha256 = "43fc0f55ddff7b511713e2de07aa22846a67432df997296fb4fc09cd8ed1dcdb",
strip_prefix = "goerli-6522ac6684693740cd4ddcc2a0662e03702aa4a1",
url = "https://github.com/eth-clients/goerli/archive/6522ac6684693740cd4ddcc2a0662e03702aa4a1.tar.gz",
)
http_archive(
name = "holesky_testnet",
build_file_content = """
filegroup(
name = "configs",
srcs = [
"custom_config_data/config.yaml",
],
visibility = ["//visibility:public"],
)
""",
sha256 = "9f66d8d5644982d3d0d2e3d2b9ebe77a5f96638a5d7fcd715599c32818195cb3",
strip_prefix = "holesky-ea39b9006210848e13f28d92e12a30548cecd41d",
url = "https://github.com/eth-clients/holesky/archive/ea39b9006210848e13f28d92e12a30548cecd41d.tar.gz", # 2023-09-21
)
http_archive(
git_repository(
name = "com_google_protobuf",
sha256 = "4e176116949be52b0408dfd24f8925d1eb674a781ae242a75296b17a1c721395",
strip_prefix = "protobuf-23.3",
commit = "436bd7880e458532901c58f4d9d1ea23fa7edd52",
remote = "https://github.com/protocolbuffers/protobuf",
shallow_since = "1617835118 -0700",
)
# Group the sources of the library so that CMake rule have access to it
all_content = """filegroup(name = "all", srcs = glob(["**"]), visibility = ["//visibility:public"])"""
http_archive(
name = "rules_foreign_cc",
sha256 = "b85ce66a3410f7370d1a9a61dfe3a29c7532b7637caeb2877d8d0dfd41d77abb",
strip_prefix = "rules_foreign_cc-3515b20a2417c4dd51c8a4a8cac1f6ecf3c6d934",
url = "https://github.com/bazelbuild/rules_foreign_cc/archive/3515b20a2417c4dd51c8a4a8cac1f6ecf3c6d934.zip",
)
load("@rules_foreign_cc//:workspace_definitions.bzl", "rules_foreign_cc_dependencies")
rules_foreign_cc_dependencies([
"@prysm//:built_cmake_toolchain",
])
http_archive(
name = "librdkafka",
build_file_content = all_content,
sha256 = "3b99a36c082a67ef6295eabd4fb3e32ab0bff7c6b0d397d6352697335f4e57eb",
strip_prefix = "librdkafka-1.4.2",
urls = ["https://github.com/edenhill/librdkafka/archive/v1.4.2.tar.gz"],
)
http_archive(
name = "sigp_beacon_fuzz_corpora",
build_file = "//third_party:beacon-fuzz/corpora.BUILD",
sha256 = "42993d0901a316afda45b4ba6d53c7c21f30c551dcec290a4ca131c24453d1ef",
strip_prefix = "beacon-fuzz-corpora-bac24ad78d45cc3664c0172241feac969c1ac29b",
urls = [
"https://github.com/protocolbuffers/protobuf/archive/v23.3.tar.gz",
"https://github.com/sigp/beacon-fuzz-corpora/archive/bac24ad78d45cc3664c0172241feac969c1ac29b.tar.gz",
],
)
# External dependencies
http_archive(
name = "googleapis",
sha256 = "9d1a930e767c93c825398b8f8692eca3fe353b9aaadedfbcf1fca2282c85df88",
strip_prefix = "googleapis-64926d52febbf298cb82a8f472ade4a3969ba922",
urls = [
"https://github.com/googleapis/googleapis/archive/64926d52febbf298cb82a8f472ade4a3969ba922.zip",
],
name = "prysm_web_ui",
build_file_content = """
filegroup(
name = "site",
srcs = glob(["**/*"]),
visibility = ["//visibility:public"],
)
load("@googleapis//:repository_rules.bzl", "switched_rules_by_language")
switched_rules_by_language(
name = "com_google_googleapis_imports",
go = True,
""",
sha256 = "54ce527b83d092da01127f2e3816f4d5cfbab69354caba8537f1ea55889b6d7c",
urls = [
"https://github.com/prysmaticlabs/prysm-web-ui/releases/download/v1.0.0-beta.4/prysm-web-ui.tar.gz",
],
)
load("//:deps.bzl", "prysm_deps")
@@ -389,9 +337,31 @@ load("@prysm//third_party/herumi:herumi.bzl", "bls_dependencies")
bls_dependencies()
load("@prysm//testing/endtoend:deps.bzl", "e2e_deps")
load(
"@io_bazel_rules_docker//go:image.bzl",
_go_image_repos = "repositories",
)
e2e_deps()
# Golang images
# This is using gcr.io/distroless/base
_go_image_repos()
# CC images
# This is using gcr.io/distroless/base
load(
"@io_bazel_rules_docker//cc:image.bzl",
_cc_image_repos = "repositories",
)
_cc_image_repos()
load("@com_github_ethereum_go_ethereum//:deps.bzl", "geth_dependencies")
geth_dependencies()
load("@io_bazel_rules_go//extras:embed_data_deps.bzl", "go_embed_data_dependencies")
go_embed_data_dependencies()
load("@com_github_atlassian_bazel_tools//gometalinter:deps.bzl", "gometalinter_dependencies")
@@ -401,6 +371,10 @@ load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies")
gazelle_dependencies()
load("@com_github_bazelbuild_buildtools//buildifier:deps.bzl", "buildifier_dependencies")
buildifier_dependencies()
load("@com_google_protobuf//:protobuf_deps.bzl", "protobuf_deps")
protobuf_deps()

View File

@@ -1,11 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"constants.go",
"headers.go",
],
importpath = "github.com/prysmaticlabs/prysm/v4/api",
visibility = ["//visibility:public"],
)

View File

@@ -1,20 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"client.go",
"errors.go",
"options.go",
],
importpath = "github.com/prysmaticlabs/prysm/v4/api/client",
visibility = ["//visibility:public"],
deps = ["@com_github_pkg_errors//:go_default_library"],
)
go_test(
name = "go_default_test",
srcs = ["client_test.go"],
embed = [":go_default_library"],
deps = ["//testing/require:go_default_library"],
)

View File

@@ -1,60 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"checkpoint.go",
"client.go",
"doc.go",
],
importpath = "github.com/prysmaticlabs/prysm/v4/api/client/beacon",
visibility = ["//visibility:public"],
deps = [
"//api/client:go_default_library",
"//api/server:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/rpc/eth/beacon:go_default_library",
"//beacon-chain/rpc/eth/config:go_default_library",
"//beacon-chain/rpc/eth/shared:go_default_library",
"//beacon-chain/rpc/prysm/beacon:go_default_library",
"//beacon-chain/state:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//encoding/ssz/detect:go_default_library",
"//io/file:go_default_library",
"//network/forks:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@org_golang_x_mod//semver:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"checkpoint_test.go",
"client_test.go",
],
embed = [":go_default_library"],
deps = [
"//api/client:go_default_library",
"//beacon-chain/state:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/blocks/testing:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/ssz/detect:go_default_library",
"//network/forks:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"//time/slots:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)

View File

@@ -1,271 +0,0 @@
package beacon
import (
"context"
"fmt"
"path"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/pkg/errors"
base "github.com/prysmaticlabs/prysm/v4/api/client"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/encoding/ssz/detect"
"github.com/prysmaticlabs/prysm/v4/io/file"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
"github.com/prysmaticlabs/prysm/v4/time/slots"
log "github.com/sirupsen/logrus"
"golang.org/x/mod/semver"
)
var errCheckpointBlockMismatch = errors.New("mismatch between checkpoint sync state and block")
// OriginData represents the BeaconState and ReadOnlySignedBeaconBlock necessary to start an empty Beacon Node
// using Checkpoint Sync.
type OriginData struct {
sb []byte
bb []byte
st state.BeaconState
b interfaces.ReadOnlySignedBeaconBlock
vu *detect.VersionedUnmarshaler
br [32]byte
sr [32]byte
}
// SaveBlock saves the downloaded block to a unique file in the given path.
// For readability and collision avoidance, the file name includes: type, config name, slot and root
func (o *OriginData) SaveBlock(dir string) (string, error) {
blockPath := path.Join(dir, fname("block", o.vu, o.b.Block().Slot(), o.br))
return blockPath, file.WriteFile(blockPath, o.BlockBytes())
}
// SaveState saves the downloaded state to a unique file in the given path.
// For readability and collision avoidance, the file name includes: type, config name, slot and root
func (o *OriginData) SaveState(dir string) (string, error) {
statePath := path.Join(dir, fname("state", o.vu, o.st.Slot(), o.sr))
return statePath, file.WriteFile(statePath, o.StateBytes())
}
// StateBytes returns the ssz-encoded bytes of the downloaded BeaconState value.
func (o *OriginData) StateBytes() []byte {
return o.sb
}
// BlockBytes returns the ssz-encoded bytes of the downloaded ReadOnlySignedBeaconBlock value.
func (o *OriginData) BlockBytes() []byte {
return o.bb
}
func fname(prefix string, vu *detect.VersionedUnmarshaler, slot primitives.Slot, root [32]byte) string {
return fmt.Sprintf("%s_%s_%s_%d-%#x.ssz", prefix, vu.Config.ConfigName, version.String(vu.Fork), slot, root)
}
// DownloadFinalizedData downloads the most recently finalized state, and the block most recently applied to that state.
// This pair can be used to initialize a new beacon node via checkpoint sync.
func DownloadFinalizedData(ctx context.Context, client *Client) (*OriginData, error) {
sb, err := client.GetState(ctx, IdFinalized)
if err != nil {
return nil, err
}
vu, err := detect.FromState(sb)
if err != nil {
return nil, errors.Wrap(err, "error detecting chain config for finalized state")
}
log.Printf("detected supported config in remote finalized state, name=%s, fork=%s", vu.Config.ConfigName, version.String(vu.Fork))
s, err := vu.UnmarshalBeaconState(sb)
if err != nil {
return nil, errors.Wrap(err, "error unmarshaling finalized state to correct version")
}
slot := s.LatestBlockHeader().Slot
bb, err := client.GetBlock(ctx, IdFromSlot(slot))
if err != nil {
return nil, errors.Wrapf(err, "error requesting block by slot = %d", slot)
}
b, err := vu.UnmarshalBeaconBlock(bb)
if err != nil {
return nil, errors.Wrap(err, "unable to unmarshal block to a supported type using the detected fork schedule")
}
br, err := b.Block().HashTreeRoot()
if err != nil {
return nil, errors.Wrap(err, "error computing hash_tree_root of retrieved block")
}
bodyRoot, err := b.Block().Body().HashTreeRoot()
if err != nil {
return nil, errors.Wrap(err, "error computing hash_tree_root of retrieved block body")
}
sbr := bytesutil.ToBytes32(s.LatestBlockHeader().BodyRoot)
if sbr != bodyRoot {
return nil, errors.Wrapf(errCheckpointBlockMismatch, "state body root = %#x, block body root = %#x", sbr, bodyRoot)
}
sr, err := s.HashTreeRoot(ctx)
if err != nil {
return nil, errors.Wrapf(err, "failed to compute htr for finalized state at slot=%d", s.Slot())
}
log.
WithField("block_slot", b.Block().Slot()).
WithField("state_slot", s.Slot()).
WithField("state_root", hexutil.Encode(sr[:])).
WithField("block_root", hexutil.Encode(br[:])).
Info("Downloaded checkpoint sync state and block.")
return &OriginData{
st: s,
b: b,
sb: sb,
bb: bb,
vu: vu,
br: br,
sr: sr,
}, nil
}
// WeakSubjectivityData represents the state root, block root and epoch of the BeaconState + ReadOnlySignedBeaconBlock
// that falls at the beginning of the current weak subjectivity period. These values can be used to construct
// a weak subjectivity checkpoint beacon node flag to be used for validation.
type WeakSubjectivityData struct {
BlockRoot [32]byte
StateRoot [32]byte
Epoch primitives.Epoch
}
// CheckpointString returns the standard string representation of a Checkpoint.
// The format is a hex-encoded block root, followed by the epoch of the block, separated by a colon. For example:
// "0x1c35540cac127315fabb6bf29181f2ae0de1a3fc909d2e76ba771e61312cc49a:74888"
func (wsd *WeakSubjectivityData) CheckpointString() string {
return fmt.Sprintf("%#x:%d", wsd.BlockRoot, wsd.Epoch)
}
// ComputeWeakSubjectivityCheckpoint attempts to use the prysm weak_subjectivity api
// to obtain the current weak_subjectivity checkpoint.
// For non-prysm nodes, the same computation will be performed with extra steps,
// using the head state downloaded from the beacon node api.
func ComputeWeakSubjectivityCheckpoint(ctx context.Context, client *Client) (*WeakSubjectivityData, error) {
ws, err := client.GetWeakSubjectivity(ctx)
if err != nil {
// a 404/405 is expected if querying an endpoint that doesn't support the weak subjectivity checkpoint api
if !errors.Is(err, base.ErrNotOK) {
return nil, errors.Wrap(err, "unexpected API response for prysm-only weak subjectivity checkpoint API")
}
// fall back to vanilla Beacon Node API method
return computeBackwardsCompatible(ctx, client)
}
log.Printf("server weak subjectivity checkpoint response - epoch=%d, block_root=%#x, state_root=%#x", ws.Epoch, ws.BlockRoot, ws.StateRoot)
return ws, nil
}
const (
prysmMinimumVersion = "v2.0.7"
prysmImplementationName = "Prysm"
)
// errUnsupportedPrysmCheckpointVersion indicates remote beacon node can't be used for checkpoint retrieval.
var errUnsupportedPrysmCheckpointVersion = errors.New("node does not meet minimum version requirements for checkpoint retrieval")
// for older endpoints or clients that do not support the weak_subjectivity api method
// we gather the necessary data for a checkpoint sync by:
// - inspecting the remote server's head state and computing the weak subjectivity epoch locally
// - requesting the state at the first slot of the epoch
// - using hash_tree_root(state.latest_block_header) to compute the block the state integrates
// - requesting that block by its root
func computeBackwardsCompatible(ctx context.Context, client *Client) (*WeakSubjectivityData, error) {
log.Print("falling back to generic checkpoint derivation, weak_subjectivity API not supported by server")
nv, err := client.GetNodeVersion(ctx)
if err != nil {
return nil, errors.Wrap(err, "unable to proceed with fallback method without confirming node version")
}
if nv.implementation == prysmImplementationName && semver.Compare(nv.semver, prysmMinimumVersion) < 0 {
return nil, errors.Wrapf(errUnsupportedPrysmCheckpointVersion, "%s < minimum (%s)", nv.semver, prysmMinimumVersion)
}
epoch, err := getWeakSubjectivityEpochFromHead(ctx, client)
if err != nil {
return nil, errors.Wrap(err, "error computing weak subjectivity epoch via head state inspection")
}
// use first slot of the epoch for the state slot
slot, err := slots.EpochStart(epoch)
if err != nil {
return nil, errors.Wrapf(err, "error computing first slot of epoch=%d", epoch)
}
log.Printf("requesting checkpoint state at slot %d", slot)
// get the state at the first slot of the epoch
sb, err := client.GetState(ctx, IdFromSlot(slot))
if err != nil {
return nil, errors.Wrapf(err, "failed to request state by slot from api, slot=%d", slot)
}
// ConfigFork is used to unmarshal the BeaconState so we can read the block root in latest_block_header
vu, err := detect.FromState(sb)
if err != nil {
return nil, errors.Wrap(err, "error detecting chain config for beacon state")
}
log.Printf("detected supported config in checkpoint state, name=%s, fork=%s", vu.Config.ConfigName, version.String(vu.Fork))
s, err := vu.UnmarshalBeaconState(sb)
if err != nil {
return nil, errors.Wrap(err, "error using detected config fork to unmarshal state bytes")
}
// compute state and block roots
sr, err := s.HashTreeRoot(ctx)
if err != nil {
return nil, errors.Wrap(err, "error computing hash_tree_root of state")
}
h := s.LatestBlockHeader()
h.StateRoot = sr[:]
br, err := h.HashTreeRoot()
if err != nil {
return nil, errors.Wrap(err, "error while computing block root using state data")
}
bb, err := client.GetBlock(ctx, IdFromRoot(br))
if err != nil {
return nil, errors.Wrapf(err, "error requesting block by root = %d", br)
}
b, err := vu.UnmarshalBeaconBlock(bb)
if err != nil {
return nil, errors.Wrap(err, "unable to unmarshal block to a supported type using the detected fork schedule")
}
br, err = b.Block().HashTreeRoot()
if err != nil {
return nil, errors.Wrap(err, "error computing hash_tree_root for block obtained via root")
}
return &WeakSubjectivityData{
Epoch: epoch,
BlockRoot: br,
StateRoot: sr,
}, nil
}
// this method downloads the head state, which can be used to find the correct chain config
// and use prysm's helper methods to compute the latest weak subjectivity epoch.
func getWeakSubjectivityEpochFromHead(ctx context.Context, client *Client) (primitives.Epoch, error) {
headBytes, err := client.GetState(ctx, IdHead)
if err != nil {
return 0, err
}
vu, err := detect.FromState(headBytes)
if err != nil {
return 0, errors.Wrap(err, "error detecting chain config for beacon state")
}
log.Printf("detected supported config in remote head state, name=%s, fork=%s", vu.Config.ConfigName, version.String(vu.Fork))
headState, err := vu.UnmarshalBeaconState(headBytes)
if err != nil {
return 0, errors.Wrap(err, "error unmarshaling state to correct version")
}
epoch, err := helpers.LatestWeakSubjectivityEpoch(ctx, headState, vu.Config)
if err != nil {
return 0, errors.Wrap(err, "error computing the weak subjectivity epoch from head state")
}
log.Printf("(computed client-side) weak subjectivity epoch = %d", epoch)
return epoch, nil
}

View File

@@ -1,483 +0,0 @@
package beacon
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"testing"
"github.com/prysmaticlabs/prysm/v4/api/client"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
blocktest "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks/testing"
"github.com/prysmaticlabs/prysm/v4/network/forks"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/util"
"github.com/prysmaticlabs/prysm/v4/time/slots"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/ssz/detect"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/testing/require"
)
type testRT struct {
rt func(*http.Request) (*http.Response, error)
}
func (rt *testRT) RoundTrip(req *http.Request) (*http.Response, error) {
if rt.rt != nil {
return rt.rt(req)
}
return nil, errors.New("RoundTripper not implemented")
}
var _ http.RoundTripper = &testRT{}
func marshalToEnvelope(val interface{}) ([]byte, error) {
raw, err := json.Marshal(val)
if err != nil {
return nil, errors.Wrap(err, "error marshaling value to place in data envelope")
}
env := struct {
Data json.RawMessage `json:"data"`
}{
Data: raw,
}
return json.Marshal(env)
}
func TestMarshalToEnvelope(t *testing.T) {
d := struct {
Version string `json:"version"`
}{
Version: "Prysm/v2.0.5 (linux amd64)",
}
encoded, err := marshalToEnvelope(d)
require.NoError(t, err)
expected := `{"data":{"version":"Prysm/v2.0.5 (linux amd64)"}}`
require.Equal(t, expected, string(encoded))
}
func TestFallbackVersionCheck(t *testing.T) {
trans := &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
switch req.URL.Path {
case getNodeVersionPath:
res.StatusCode = http.StatusOK
b := bytes.NewBuffer(nil)
d := struct {
Version string `json:"version"`
}{
Version: "Prysm/v2.0.5 (linux amd64)",
}
encoded, err := marshalToEnvelope(d)
require.NoError(t, err)
b.Write(encoded)
res.Body = io.NopCloser(b)
case getWeakSubjectivityPath:
res.StatusCode = http.StatusNotFound
}
return res, nil
}}
c, err := NewClient("http://localhost:3500", client.WithRoundTripper(trans))
require.NoError(t, err)
ctx := context.Background()
_, err = ComputeWeakSubjectivityCheckpoint(ctx, c)
require.ErrorIs(t, err, errUnsupportedPrysmCheckpointVersion)
}
func TestFname(t *testing.T) {
vu := &detect.VersionedUnmarshaler{
Config: params.MainnetConfig(),
Fork: version.Phase0,
}
slot := primitives.Slot(23)
prefix := "block"
var root [32]byte
copy(root[:], []byte{0x23, 0x23, 0x23})
expected := "block_mainnet_phase0_23-0x2323230000000000000000000000000000000000000000000000000000000000.ssz"
actual := fname(prefix, vu, slot, root)
require.Equal(t, expected, actual)
vu.Config = params.MinimalSpecConfig()
vu.Fork = version.Altair
slot = 17
prefix = "state"
copy(root[29:], []byte{0x17, 0x17, 0x17})
expected = "state_minimal_altair_17-0x2323230000000000000000000000000000000000000000000000000000171717.ssz"
actual = fname(prefix, vu, slot, root)
require.Equal(t, expected, actual)
}
func TestDownloadWeakSubjectivityCheckpoint(t *testing.T) {
ctx := context.Background()
cfg := params.MainnetConfig().Copy()
epoch := cfg.AltairForkEpoch - 1
// set up checkpoint state, using the epoch that will be computed as the ws checkpoint state based on the head state
wSlot, err := slots.EpochStart(epoch)
require.NoError(t, err)
wst, err := util.NewBeaconState()
require.NoError(t, err)
fork, err := forkForEpoch(cfg, epoch)
require.NoError(t, err)
require.NoError(t, wst.SetFork(fork))
// set up checkpoint block
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
b, err = blocktest.SetBlockParentRoot(b, cfg.ZeroHash)
require.NoError(t, err)
b, err = blocktest.SetBlockSlot(b, wSlot)
require.NoError(t, err)
b, err = blocktest.SetProposerIndex(b, 0)
require.NoError(t, err)
// set up state header pointing at checkpoint block - this is how the block is downloaded by root
header, err := b.Header()
require.NoError(t, err)
require.NoError(t, wst.SetLatestBlockHeader(header.Header))
// order of operations can be confusing here:
// - when computing the state root, make sure block header is complete, EXCEPT the state root should be zero-value
// - before computing the block root (to match the request route), the block should include the state root
// *computed from the state with a header that does not have a state root set yet*
wRoot, err := wst.HashTreeRoot(ctx)
require.NoError(t, err)
b, err = blocktest.SetBlockStateRoot(b, wRoot)
require.NoError(t, err)
serBlock, err := b.MarshalSSZ()
require.NoError(t, err)
bRoot, err := b.Block().HashTreeRoot()
require.NoError(t, err)
wsSerialized, err := wst.MarshalSSZ()
require.NoError(t, err)
expectedWSD := WeakSubjectivityData{
BlockRoot: bRoot,
StateRoot: wRoot,
Epoch: epoch,
}
trans := &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
switch req.URL.Path {
case getWeakSubjectivityPath:
res.StatusCode = http.StatusOK
cp := struct {
Epoch string `json:"epoch"`
Root string `json:"root"`
}{
Epoch: fmt.Sprintf("%d", slots.ToEpoch(b.Block().Slot())),
Root: fmt.Sprintf("%#x", bRoot),
}
wsr := struct {
Checkpoint interface{} `json:"ws_checkpoint"`
StateRoot string `json:"state_root"`
}{
Checkpoint: cp,
StateRoot: fmt.Sprintf("%#x", wRoot),
}
rb, err := marshalToEnvelope(wsr)
require.NoError(t, err)
res.Body = io.NopCloser(bytes.NewBuffer(rb))
case renderGetStatePath(IdFromSlot(wSlot)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(wsSerialized))
case renderGetBlockPath(IdFromRoot(bRoot)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(serBlock))
}
return res, nil
}}
c, err := NewClient("http://localhost:3500", client.WithRoundTripper(trans))
require.NoError(t, err)
wsd, err := ComputeWeakSubjectivityCheckpoint(ctx, c)
require.NoError(t, err)
require.Equal(t, expectedWSD.Epoch, wsd.Epoch)
require.Equal(t, expectedWSD.StateRoot, wsd.StateRoot)
require.Equal(t, expectedWSD.BlockRoot, wsd.BlockRoot)
}
// runs computeBackwardsCompatible directly
// and via ComputeWeakSubjectivityCheckpoint with a round tripper that triggers the backwards compatible code path
func TestDownloadBackwardsCompatibleCombined(t *testing.T) {
ctx := context.Background()
cfg := params.MainnetConfig()
st, expectedEpoch := defaultTestHeadState(t, cfg)
serialized, err := st.MarshalSSZ()
require.NoError(t, err)
// set up checkpoint state, using the epoch that will be computed as the ws checkpoint state based on the head state
wSlot, err := slots.EpochStart(expectedEpoch)
require.NoError(t, err)
wst, err := util.NewBeaconState()
require.NoError(t, err)
fork, err := forkForEpoch(cfg, cfg.GenesisEpoch)
require.NoError(t, err)
require.NoError(t, wst.SetFork(fork))
// set up checkpoint block
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
b, err = blocktest.SetBlockParentRoot(b, cfg.ZeroHash)
require.NoError(t, err)
b, err = blocktest.SetBlockSlot(b, wSlot)
require.NoError(t, err)
b, err = blocktest.SetProposerIndex(b, 0)
require.NoError(t, err)
// set up state header pointing at checkpoint block - this is how the block is downloaded by root
header, err := b.Header()
require.NoError(t, err)
require.NoError(t, wst.SetLatestBlockHeader(header.Header))
// order of operations can be confusing here:
// - when computing the state root, make sure block header is complete, EXCEPT the state root should be zero-value
// - before computing the block root (to match the request route), the block should include the state root
// *computed from the state with a header that does not have a state root set yet*
wRoot, err := wst.HashTreeRoot(ctx)
require.NoError(t, err)
b, err = blocktest.SetBlockStateRoot(b, wRoot)
require.NoError(t, err)
serBlock, err := b.MarshalSSZ()
require.NoError(t, err)
bRoot, err := b.Block().HashTreeRoot()
require.NoError(t, err)
wsSerialized, err := wst.MarshalSSZ()
require.NoError(t, err)
trans := &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
switch req.URL.Path {
case getNodeVersionPath:
res.StatusCode = http.StatusOK
b := bytes.NewBuffer(nil)
d := struct {
Version string `json:"version"`
}{
Version: "Lighthouse/v0.1.5 (Linux x86_64)",
}
encoded, err := marshalToEnvelope(d)
require.NoError(t, err)
b.Write(encoded)
res.Body = io.NopCloser(b)
case getWeakSubjectivityPath:
res.StatusCode = http.StatusNotFound
case renderGetStatePath(IdHead):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(serialized))
case renderGetStatePath(IdFromSlot(wSlot)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(wsSerialized))
case renderGetBlockPath(IdFromRoot(bRoot)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(serBlock))
}
return res, nil
}}
c, err := NewClient("http://localhost:3500", client.WithRoundTripper(trans))
require.NoError(t, err)
wsPub, err := ComputeWeakSubjectivityCheckpoint(ctx, c)
require.NoError(t, err)
wsPriv, err := computeBackwardsCompatible(ctx, c)
require.NoError(t, err)
require.DeepEqual(t, wsPriv, wsPub)
}
func TestGetWeakSubjectivityEpochFromHead(t *testing.T) {
st, expectedEpoch := defaultTestHeadState(t, params.MainnetConfig())
serialized, err := st.MarshalSSZ()
require.NoError(t, err)
trans := &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
if req.URL.Path == renderGetStatePath(IdHead) {
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(serialized))
}
return res, nil
}}
c, err := NewClient("http://localhost:3500", client.WithRoundTripper(trans))
require.NoError(t, err)
actualEpoch, err := getWeakSubjectivityEpochFromHead(context.Background(), c)
require.NoError(t, err)
require.Equal(t, expectedEpoch, actualEpoch)
}
func forkForEpoch(cfg *params.BeaconChainConfig, epoch primitives.Epoch) (*ethpb.Fork, error) {
os := forks.NewOrderedSchedule(cfg)
currentVersion, err := os.VersionForEpoch(epoch)
if err != nil {
return nil, err
}
prevVersion, err := os.Previous(currentVersion)
if err != nil {
if !errors.Is(err, forks.ErrNoPreviousVersion) {
return nil, err
}
// use same version for both in the case of genesis
prevVersion = currentVersion
}
forkEpoch := cfg.ForkVersionSchedule[currentVersion]
return &ethpb.Fork{
PreviousVersion: prevVersion[:],
CurrentVersion: currentVersion[:],
Epoch: forkEpoch,
}, nil
}
func defaultTestHeadState(t *testing.T, cfg *params.BeaconChainConfig) (state.BeaconState, primitives.Epoch) {
st, err := util.NewBeaconStateAltair()
require.NoError(t, err)
fork, err := forkForEpoch(cfg, cfg.AltairForkEpoch)
require.NoError(t, err)
require.NoError(t, st.SetFork(fork))
slot, err := slots.EpochStart(cfg.AltairForkEpoch)
require.NoError(t, err)
require.NoError(t, st.SetSlot(slot))
var validatorCount, avgBalance uint64 = 100, 35
require.NoError(t, populateValidators(cfg, st, validatorCount, avgBalance))
require.NoError(t, st.SetFinalizedCheckpoint(&ethpb.Checkpoint{
Epoch: fork.Epoch - 10,
Root: make([]byte, 32),
}))
// to see the math for this, look at helpers.LatestWeakSubjectivityEpoch
// and for the values use mainnet config values, the validatorCount and avgBalance above, and altair fork epoch
expectedEpoch := slots.ToEpoch(st.Slot()) - 224
return st, expectedEpoch
}
// TODO(10429): refactor beacon state options in testing/util to take a state.BeaconState so this can become an option
func populateValidators(cfg *params.BeaconChainConfig, st state.BeaconState, valCount, avgBalance uint64) error {
validators := make([]*ethpb.Validator, valCount)
balances := make([]uint64, len(validators))
for i := uint64(0); i < valCount; i++ {
validators[i] = &ethpb.Validator{
PublicKey: make([]byte, cfg.BLSPubkeyLength),
WithdrawalCredentials: make([]byte, 32),
EffectiveBalance: avgBalance * 1e9,
ExitEpoch: cfg.FarFutureEpoch,
}
balances[i] = validators[i].EffectiveBalance
}
if err := st.SetValidators(validators); err != nil {
return err
}
return st.SetBalances(balances)
}
func TestDownloadFinalizedData(t *testing.T) {
ctx := context.Background()
cfg := params.MainnetConfig().Copy()
// avoid the altair zone because genesis tests are easier to set up
epoch := cfg.AltairForkEpoch - 1
// set up checkpoint state, using the epoch that will be computed as the ws checkpoint state based on the head state
slot, err := slots.EpochStart(epoch)
require.NoError(t, err)
st, err := util.NewBeaconState()
require.NoError(t, err)
fork, err := forkForEpoch(cfg, epoch)
require.NoError(t, err)
require.NoError(t, st.SetFork(fork))
require.NoError(t, st.SetSlot(slot))
// set up checkpoint block
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
b, err = blocktest.SetBlockParentRoot(b, cfg.ZeroHash)
require.NoError(t, err)
b, err = blocktest.SetBlockSlot(b, slot)
require.NoError(t, err)
b, err = blocktest.SetProposerIndex(b, 0)
require.NoError(t, err)
// set up state header pointing at checkpoint block - this is how the block is downloaded by root
header, err := b.Header()
require.NoError(t, err)
require.NoError(t, st.SetLatestBlockHeader(header.Header))
// order of operations can be confusing here:
// - when computing the state root, make sure block header is complete, EXCEPT the state root should be zero-value
// - before computing the block root (to match the request route), the block should include the state root
// *computed from the state with a header that does not have a state root set yet*
sr, err := st.HashTreeRoot(ctx)
require.NoError(t, err)
b, err = blocktest.SetBlockStateRoot(b, sr)
require.NoError(t, err)
mb, err := b.MarshalSSZ()
require.NoError(t, err)
br, err := b.Block().HashTreeRoot()
require.NoError(t, err)
ms, err := st.MarshalSSZ()
require.NoError(t, err)
trans := &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
switch req.URL.Path {
case renderGetStatePath(IdFinalized):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(ms))
case renderGetBlockPath(IdFromSlot(b.Block().Slot())):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(mb))
default:
res.StatusCode = http.StatusInternalServerError
res.Body = io.NopCloser(bytes.NewBufferString(""))
}
return res, nil
}}
c, err := NewClient("http://localhost:3500", client.WithRoundTripper(trans))
require.NoError(t, err)
// sanity check before we go through checkpoint
// make sure we can download the state and unmarshal it with the VersionedUnmarshaler
sb, err := c.GetState(ctx, IdFinalized)
require.NoError(t, err)
require.Equal(t, true, bytes.Equal(sb, ms))
vu, err := detect.FromState(sb)
require.NoError(t, err)
us, err := vu.UnmarshalBeaconState(sb)
require.NoError(t, err)
ushtr, err := us.HashTreeRoot(ctx)
require.NoError(t, err)
require.Equal(t, sr, ushtr)
expected := &OriginData{
sb: ms,
bb: mb,
br: br,
sr: sr,
}
od, err := DownloadFinalizedData(ctx, c)
require.NoError(t, err)
require.Equal(t, true, bytes.Equal(expected.sb, od.sb))
require.Equal(t, true, bytes.Equal(expected.bb, od.bb))
require.Equal(t, expected.br, od.br)
require.Equal(t, expected.sr, od.sr)
}

View File

@@ -1,366 +0,0 @@
package beacon
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net/http"
"net/url"
"path"
"regexp"
"sort"
"strconv"
"text/template"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/api/client"
"github.com/prysmaticlabs/prysm/v4/api/server"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/beacon"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/config"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
apibeacon "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/prysm/beacon"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/network/forks"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
log "github.com/sirupsen/logrus"
)
const (
getSignedBlockPath = "/eth/v2/beacon/blocks"
getBlockRootPath = "/eth/v1/beacon/blocks/{{.Id}}/root"
getForkForStatePath = "/eth/v1/beacon/states/{{.Id}}/fork"
getWeakSubjectivityPath = "/prysm/v1/beacon/weak_subjectivity"
getForkSchedulePath = "/eth/v1/config/fork_schedule"
getConfigSpecPath = "/eth/v1/config/spec"
getStatePath = "/eth/v2/debug/beacon/states"
getNodeVersionPath = "/eth/v1/node/version"
changeBLStoExecutionPath = "/eth/v1/beacon/pool/bls_to_execution_changes"
)
// StateOrBlockId represents the block_id / state_id parameters that several of the Eth Beacon API methods accept.
// StateOrBlockId constants are defined for named identifiers, and helper methods are provided
// for slot and root identifiers. Example text from the Eth Beacon Node API documentation:
//
// "Block identifier can be one of: "head" (canonical head in node's view), "genesis", "finalized",
// <slot>, <hex encoded blockRoot with 0x prefix>."
type StateOrBlockId string
const (
IdGenesis StateOrBlockId = "genesis"
IdHead StateOrBlockId = "head"
IdFinalized StateOrBlockId = "finalized"
)
// IdFromRoot encodes a block root in the format expected by the API in places where a root can be used to identify
// a BeaconState or SignedBeaconBlock.
func IdFromRoot(r [32]byte) StateOrBlockId {
return StateOrBlockId(fmt.Sprintf("%#x", r))
}
// IdFromSlot encodes a Slot in the format expected by the API in places where a slot can be used to identify
// a BeaconState or SignedBeaconBlock.
func IdFromSlot(s primitives.Slot) StateOrBlockId {
return StateOrBlockId(strconv.FormatUint(uint64(s), 10))
}
// idTemplate is used to create template functions that can interpolate StateOrBlockId values.
func idTemplate(ts string) func(StateOrBlockId) string {
t := template.Must(template.New("").Parse(ts))
f := func(id StateOrBlockId) string {
b := bytes.NewBuffer(nil)
err := t.Execute(b, struct{ Id string }{Id: string(id)})
if err != nil {
panic(fmt.Sprintf("invalid idTemplate: %s", ts))
}
return b.String()
}
// run the template to ensure that it is valid
// this should happen load time (using package scoped vars) to ensure runtime errors aren't possible
_ = f(IdGenesis)
return f
}
func renderGetBlockPath(id StateOrBlockId) string {
return path.Join(getSignedBlockPath, string(id))
}
// Client provides a collection of helper methods for calling the Eth Beacon Node API endpoints.
type Client struct {
*client.Client
}
// NewClient returns a new Client that includes functions for rest calls to Beacon API.
func NewClient(host string, opts ...client.ClientOpt) (*Client, error) {
c, err := client.NewClient(host, opts...)
if err != nil {
return nil, err
}
return &Client{c}, nil
}
// GetBlock retrieves the SignedBeaconBlock for the given block id.
// Block identifier can be one of: "head" (canonical head in node's view), "genesis", "finalized",
// <slot>, <hex encoded blockRoot with 0x prefix>. Variables of type StateOrBlockId are exported by this package
// for the named identifiers.
// The return value contains the ssz-encoded bytes.
func (c *Client) GetBlock(ctx context.Context, blockId StateOrBlockId) ([]byte, error) {
blockPath := renderGetBlockPath(blockId)
b, err := c.Get(ctx, blockPath, client.WithSSZEncoding())
if err != nil {
return nil, errors.Wrapf(err, "error requesting state by id = %s", blockId)
}
return b, nil
}
var getBlockRootTpl = idTemplate(getBlockRootPath)
// GetBlockRoot retrieves the hash_tree_root of the BeaconBlock for the given block id.
// Block identifier can be one of: "head" (canonical head in node's view), "genesis", "finalized",
// <slot>, <hex encoded blockRoot with 0x prefix>. Variables of type StateOrBlockId are exported by this package
// for the named identifiers.
func (c *Client) GetBlockRoot(ctx context.Context, blockId StateOrBlockId) ([32]byte, error) {
rootPath := getBlockRootTpl(blockId)
b, err := c.Get(ctx, rootPath)
if err != nil {
return [32]byte{}, errors.Wrapf(err, "error requesting block root by id = %s", blockId)
}
jsonr := &struct{ Data struct{ Root string } }{}
err = json.Unmarshal(b, jsonr)
if err != nil {
return [32]byte{}, errors.Wrap(err, "error decoding json data from get block root response")
}
rs, err := hexutil.Decode(jsonr.Data.Root)
if err != nil {
return [32]byte{}, errors.Wrap(err, fmt.Sprintf("error decoding hex-encoded value %s", jsonr.Data.Root))
}
return bytesutil.ToBytes32(rs), nil
}
var getForkTpl = idTemplate(getForkForStatePath)
// GetFork queries the Beacon Node API for the Fork from the state identified by stateId.
// Block identifier can be one of: "head" (canonical head in node's view), "genesis", "finalized",
// <slot>, <hex encoded blockRoot with 0x prefix>. Variables of type StateOrBlockId are exported by this package
// for the named identifiers.
func (c *Client) GetFork(ctx context.Context, stateId StateOrBlockId) (*ethpb.Fork, error) {
body, err := c.Get(ctx, getForkTpl(stateId))
if err != nil {
return nil, errors.Wrapf(err, "error requesting fork by state id = %s", stateId)
}
fr := &shared.Fork{}
dataWrapper := &struct{ Data *shared.Fork }{Data: fr}
err = json.Unmarshal(body, dataWrapper)
if err != nil {
return nil, errors.Wrap(err, "error decoding json response in GetFork")
}
return fr.ToConsensus()
}
// GetForkSchedule retrieve all forks, past present and future, of which this node is aware.
func (c *Client) GetForkSchedule(ctx context.Context) (forks.OrderedSchedule, error) {
body, err := c.Get(ctx, getForkSchedulePath)
if err != nil {
return nil, errors.Wrap(err, "error requesting fork schedule")
}
fsr := &forkScheduleResponse{}
err = json.Unmarshal(body, fsr)
if err != nil {
return nil, err
}
ofs, err := fsr.OrderedForkSchedule()
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("problem unmarshaling %s response", getForkSchedulePath))
}
return ofs, nil
}
// GetConfigSpec retrieve the current configs of the network used by the beacon node.
func (c *Client) GetConfigSpec(ctx context.Context) (*config.GetSpecResponse, error) {
body, err := c.Get(ctx, getConfigSpecPath)
if err != nil {
return nil, errors.Wrap(err, "error requesting configSpecPath")
}
fsr := &config.GetSpecResponse{}
err = json.Unmarshal(body, fsr)
if err != nil {
return nil, err
}
return fsr, nil
}
type NodeVersion struct {
implementation string
semver string
systemInfo string
}
var versionRE = regexp.MustCompile(`^(\w+)/(v\d+\.\d+\.\d+[-a-zA-Z0-9]*)\s*/?(.*)$`)
func parseNodeVersion(v string) (*NodeVersion, error) {
groups := versionRE.FindStringSubmatch(v)
if len(groups) != 4 {
return nil, errors.Wrapf(client.ErrInvalidNodeVersion, "could not be parsed: %s", v)
}
return &NodeVersion{
implementation: groups[1],
semver: groups[2],
systemInfo: groups[3],
}, nil
}
// GetNodeVersion requests that the beacon node identify information about its implementation in a format
// similar to a HTTP User-Agent field. ex: Lighthouse/v0.1.5 (Linux x86_64)
func (c *Client) GetNodeVersion(ctx context.Context) (*NodeVersion, error) {
b, err := c.Get(ctx, getNodeVersionPath)
if err != nil {
return nil, errors.Wrap(err, "error requesting node version")
}
d := struct {
Data struct {
Version string `json:"version"`
} `json:"data"`
}{}
err = json.Unmarshal(b, &d)
if err != nil {
return nil, errors.Wrapf(err, "error unmarshaling response body: %s", string(b))
}
return parseNodeVersion(d.Data.Version)
}
func renderGetStatePath(id StateOrBlockId) string {
return path.Join(getStatePath, string(id))
}
// GetState retrieves the BeaconState for the given state id.
// State identifier can be one of: "head" (canonical head in node's view), "genesis", "finalized",
// <slot>, <hex encoded stateRoot with 0x prefix>. Variables of type StateOrBlockId are exported by this package
// for the named identifiers.
// The return value contains the ssz-encoded bytes.
func (c *Client) GetState(ctx context.Context, stateId StateOrBlockId) ([]byte, error) {
statePath := path.Join(getStatePath, string(stateId))
b, err := c.Get(ctx, statePath, client.WithSSZEncoding())
if err != nil {
return nil, errors.Wrapf(err, "error requesting state by id = %s", stateId)
}
return b, nil
}
// GetWeakSubjectivity calls a proposed API endpoint that is unique to prysm
// This api method does the following:
// - computes weak subjectivity epoch
// - finds the highest non-skipped block preceding the epoch
// - returns the htr of the found block and returns this + the value of state_root from the block
func (c *Client) GetWeakSubjectivity(ctx context.Context) (*WeakSubjectivityData, error) {
body, err := c.Get(ctx, getWeakSubjectivityPath)
if err != nil {
return nil, err
}
v := &apibeacon.GetWeakSubjectivityResponse{}
err = json.Unmarshal(body, v)
if err != nil {
return nil, err
}
epoch, err := strconv.ParseUint(v.Data.WsCheckpoint.Epoch, 10, 64)
if err != nil {
return nil, err
}
blockRoot, err := hexutil.Decode(v.Data.WsCheckpoint.Root)
if err != nil {
return nil, err
}
stateRoot, err := hexutil.Decode(v.Data.StateRoot)
if err != nil {
return nil, err
}
return &WeakSubjectivityData{
Epoch: primitives.Epoch(epoch),
BlockRoot: bytesutil.ToBytes32(blockRoot),
StateRoot: bytesutil.ToBytes32(stateRoot),
}, nil
}
// SubmitChangeBLStoExecution calls a beacon API endpoint to set the withdrawal addresses based on the given signed messages.
// If the API responds with something other than OK there will be failure messages associated to the corresponding request message.
func (c *Client) SubmitChangeBLStoExecution(ctx context.Context, request []*shared.SignedBLSToExecutionChange) error {
u := c.BaseURL().ResolveReference(&url.URL{Path: changeBLStoExecutionPath})
body, err := json.Marshal(request)
if err != nil {
return errors.Wrap(err, "failed to marshal JSON")
}
req, err := http.NewRequestWithContext(ctx, http.MethodPost, u.String(), bytes.NewBuffer(body))
if err != nil {
return errors.Wrap(err, "invalid format, failed to create new POST request object")
}
req.Header.Set("Content-Type", "application/json")
resp, err := c.Do(req)
if err != nil {
return err
}
defer func() {
err = resp.Body.Close()
}()
if resp.StatusCode != http.StatusOK {
decoder := json.NewDecoder(resp.Body)
decoder.DisallowUnknownFields()
errorJson := &server.IndexedVerificationFailureError{}
if err := decoder.Decode(errorJson); err != nil {
return errors.Wrapf(err, "failed to decode error JSON for %s", resp.Request.URL)
}
for _, failure := range errorJson.Failures {
w := request[failure.Index].Message
log.WithFields(log.Fields{
"validator_index": w.ValidatorIndex,
"withdrawal_address": w.ToExecutionAddress,
}).Error(failure.Message)
}
return errors.Errorf("POST error %d: %s", errorJson.Code, errorJson.Message)
}
return nil
}
// GetBLStoExecutionChanges gets all the set withdrawal messages in the node's operation pool.
// Returns a struct representation of json response.
func (c *Client) GetBLStoExecutionChanges(ctx context.Context) (*beacon.BLSToExecutionChangesPoolResponse, error) {
body, err := c.Get(ctx, changeBLStoExecutionPath)
if err != nil {
return nil, err
}
poolResponse := &beacon.BLSToExecutionChangesPoolResponse{}
err = json.Unmarshal(body, poolResponse)
if err != nil {
return nil, err
}
return poolResponse, nil
}
type forkScheduleResponse struct {
Data []shared.Fork
}
func (fsr *forkScheduleResponse) OrderedForkSchedule() (forks.OrderedSchedule, error) {
ofs := make(forks.OrderedSchedule, 0)
for _, d := range fsr.Data {
epoch, err := strconv.Atoi(d.Epoch)
if err != nil {
return nil, err
}
vSlice, err := hexutil.Decode(d.CurrentVersion)
if err != nil {
return nil, err
}
if len(vSlice) != 4 {
return nil, fmt.Errorf("got %d byte version, expected 4 bytes. version hex=%s", len(vSlice), d.CurrentVersion)
}
version := bytesutil.ToBytes4(vSlice)
ofs = append(ofs, forks.ForkScheduleEntry{
Version: version,
Epoch: primitives.Epoch(uint64(epoch)),
})
}
sort.Sort(ofs)
return ofs, nil
}

View File

@@ -1,139 +0,0 @@
package beacon
import (
"net/url"
"testing"
"github.com/prysmaticlabs/prysm/v4/api/client"
"github.com/prysmaticlabs/prysm/v4/testing/require"
)
func TestParseNodeVersion(t *testing.T) {
cases := []struct {
name string
v string
err error
nv *NodeVersion
}{
{
name: "empty string",
v: "",
err: client.ErrInvalidNodeVersion,
},
{
name: "Prysm as the version string",
v: "Prysm",
err: client.ErrInvalidNodeVersion,
},
{
name: "semver only",
v: "v2.0.6",
err: client.ErrInvalidNodeVersion,
},
{
name: "complete version",
v: "Prysm/v2.0.6 (linux amd64)",
nv: &NodeVersion{
implementation: "Prysm",
semver: "v2.0.6",
systemInfo: "(linux amd64)",
},
},
{
name: "nimbus version",
v: "Nimbus/v22.4.0-039bec-stateofus",
nv: &NodeVersion{
implementation: "Nimbus",
semver: "v22.4.0-039bec-stateofus",
systemInfo: "",
},
},
{
name: "teku version",
v: "teku/v22.3.2/linux-x86_64/oracle-java-11",
nv: &NodeVersion{
implementation: "teku",
semver: "v22.3.2",
systemInfo: "linux-x86_64/oracle-java-11",
},
},
{
name: "lighthouse version",
v: "Lighthouse/v2.1.1-5f628a7/x86_64-linux",
nv: &NodeVersion{
implementation: "Lighthouse",
semver: "v2.1.1-5f628a7",
systemInfo: "x86_64-linux",
},
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
nv, err := parseNodeVersion(c.v)
if c.err != nil {
require.ErrorIs(t, err, c.err)
} else {
require.NoError(t, err)
require.DeepEqual(t, c.nv, nv)
}
})
}
}
func TestValidHostname(t *testing.T) {
cases := []struct {
name string
hostArg string
path string
joined string
err error
}{
{
name: "hostname without port",
hostArg: "mydomain.org",
err: client.ErrMalformedHostname,
},
{
name: "hostname with port",
hostArg: "mydomain.org:3500",
path: getNodeVersionPath,
joined: "http://mydomain.org:3500/eth/v1/node/version",
},
{
name: "https scheme, hostname with port",
hostArg: "https://mydomain.org:3500",
path: getNodeVersionPath,
joined: "https://mydomain.org:3500/eth/v1/node/version",
},
{
name: "http scheme, hostname without port",
hostArg: "http://mydomain.org",
path: getNodeVersionPath,
joined: "http://mydomain.org/eth/v1/node/version",
},
{
name: "http scheme, trailing slash, hostname without port",
hostArg: "http://mydomain.org/",
path: getNodeVersionPath,
joined: "http://mydomain.org/eth/v1/node/version",
},
{
name: "http scheme, hostname with basic auth creds and no port",
hostArg: "http://username:pass@mydomain.org/",
path: getNodeVersionPath,
joined: "http://username:pass@mydomain.org/eth/v1/node/version",
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
cl, err := NewClient(c.hostArg)
if c.err != nil {
require.ErrorIs(t, err, c.err)
return
}
require.NoError(t, err)
require.Equal(t, c.joined, cl.BaseURL().ResolveReference(&url.URL{Path: c.path}).String())
})
}
}

View File

@@ -1,5 +0,0 @@
/*
Package beacon provides a client for interacting with the standard Eth Beacon Node API.
Interactive swagger documentation for the API is available here: https://ethereum.github.io/beacon-APIs/
*/
package beacon

View File

@@ -1,58 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"bid.go",
"client.go",
"errors.go",
"types.go",
],
importpath = "github.com/prysmaticlabs/prysm/v4/api/client/builder",
visibility = ["//visibility:public"],
deps = [
"//beacon-chain/rpc/eth/shared:go_default_library",
"//config/fieldparams:go_default_library",
"//consensus-types:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//math:go_default_library",
"//monitoring/tracing:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_fastssz//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@io_opencensus_go//trace:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"client_test.go",
"types_test.go",
],
data = glob(["testdata/**"]),
embed = [":go_default_library"],
deps = [
"//beacon-chain/rpc/eth/shared:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//math:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)

View File

@@ -1,291 +0,0 @@
package builder
import (
"github.com/pkg/errors"
ssz "github.com/prysmaticlabs/fastssz"
consensus_types "github.com/prysmaticlabs/prysm/v4/consensus-types"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
)
// SignedBid is an interface describing the method set of a signed builder bid.
type SignedBid interface {
Message() (Bid, error)
Signature() []byte
Version() int
IsNil() bool
}
// Bid is an interface describing the method set of a builder bid.
type Bid interface {
Header() (interfaces.ExecutionData, error)
BlobKzgCommitments() ([][]byte, error)
Value() []byte
Pubkey() []byte
Version() int
IsNil() bool
HashTreeRoot() ([32]byte, error)
HashTreeRootWith(hh *ssz.Hasher) error
}
type signedBuilderBid struct {
p *ethpb.SignedBuilderBid
}
// WrappedSignedBuilderBid is a constructor which wraps a protobuf signed bit into an interface.
func WrappedSignedBuilderBid(p *ethpb.SignedBuilderBid) (SignedBid, error) {
w := signedBuilderBid{p: p}
if w.IsNil() {
return nil, consensus_types.ErrNilObjectWrapped
}
return w, nil
}
// Message --
func (b signedBuilderBid) Message() (Bid, error) {
return WrappedBuilderBid(b.p.Message)
}
// Signature --
func (b signedBuilderBid) Signature() []byte {
return b.p.Signature
}
// Version --
func (b signedBuilderBid) Version() int {
return version.Bellatrix
}
// IsNil --
func (b signedBuilderBid) IsNil() bool {
return b.p == nil
}
type signedBuilderBidCapella struct {
p *ethpb.SignedBuilderBidCapella
}
// WrappedSignedBuilderBidCapella is a constructor which wraps a protobuf signed bit into an interface.
func WrappedSignedBuilderBidCapella(p *ethpb.SignedBuilderBidCapella) (SignedBid, error) {
w := signedBuilderBidCapella{p: p}
if w.IsNil() {
return nil, consensus_types.ErrNilObjectWrapped
}
return w, nil
}
// Message --
func (b signedBuilderBidCapella) Message() (Bid, error) {
return WrappedBuilderBidCapella(b.p.Message)
}
// Signature --
func (b signedBuilderBidCapella) Signature() []byte {
return b.p.Signature
}
// Version --
func (b signedBuilderBidCapella) Version() int {
return version.Capella
}
// IsNil --
func (b signedBuilderBidCapella) IsNil() bool {
return b.p == nil
}
type builderBid struct {
p *ethpb.BuilderBid
}
// WrappedBuilderBid is a constructor which wraps a protobuf bid into an interface.
func WrappedBuilderBid(p *ethpb.BuilderBid) (Bid, error) {
w := builderBid{p: p}
if w.IsNil() {
return nil, consensus_types.ErrNilObjectWrapped
}
return w, nil
}
// Header --
func (b builderBid) Header() (interfaces.ExecutionData, error) {
return blocks.WrappedExecutionPayloadHeader(b.p.Header)
}
// BlobKzgCommitments --
func (b builderBid) BlobKzgCommitments() ([][]byte, error) {
return [][]byte{}, errors.New("blob kzg commitments not available before Deneb")
}
// Version --
func (b builderBid) Version() int {
return version.Bellatrix
}
// Value --
func (b builderBid) Value() []byte {
return b.p.Value
}
// Pubkey --
func (b builderBid) Pubkey() []byte {
return b.p.Pubkey
}
// IsNil --
func (b builderBid) IsNil() bool {
return b.p == nil
}
// HashTreeRoot --
func (b builderBid) HashTreeRoot() ([32]byte, error) {
return b.p.HashTreeRoot()
}
// HashTreeRootWith --
func (b builderBid) HashTreeRootWith(hh *ssz.Hasher) error {
return b.p.HashTreeRootWith(hh)
}
type builderBidCapella struct {
p *ethpb.BuilderBidCapella
}
// WrappedBuilderBidCapella is a constructor which wraps a protobuf bid into an interface.
func WrappedBuilderBidCapella(p *ethpb.BuilderBidCapella) (Bid, error) {
w := builderBidCapella{p: p}
if w.IsNil() {
return nil, consensus_types.ErrNilObjectWrapped
}
return w, nil
}
// Header returns the execution data interface.
func (b builderBidCapella) Header() (interfaces.ExecutionData, error) {
// We have to convert big endian to little endian because the value is coming from the execution layer.
return blocks.WrappedExecutionPayloadHeaderCapella(b.p.Header, blocks.PayloadValueToGwei(b.p.Value))
}
// BlobKzgCommitments --
func (b builderBidCapella) BlobKzgCommitments() ([][]byte, error) {
return [][]byte{}, errors.New("blob kzg commitments not available before Deneb")
}
// Version --
func (b builderBidCapella) Version() int {
return version.Capella
}
// Value --
func (b builderBidCapella) Value() []byte {
return b.p.Value
}
// Pubkey --
func (b builderBidCapella) Pubkey() []byte {
return b.p.Pubkey
}
// IsNil --
func (b builderBidCapella) IsNil() bool {
return b.p == nil
}
// HashTreeRoot --
func (b builderBidCapella) HashTreeRoot() ([32]byte, error) {
return b.p.HashTreeRoot()
}
// HashTreeRootWith --
func (b builderBidCapella) HashTreeRootWith(hh *ssz.Hasher) error {
return b.p.HashTreeRootWith(hh)
}
type builderBidDeneb struct {
p *ethpb.BuilderBidDeneb
}
// WrappedBuilderBidDeneb is a constructor which wraps a protobuf bid into an interface.
func WrappedBuilderBidDeneb(p *ethpb.BuilderBidDeneb) (Bid, error) {
w := builderBidDeneb{p: p}
if w.IsNil() {
return nil, consensus_types.ErrNilObjectWrapped
}
return w, nil
}
// Version --
func (b builderBidDeneb) Version() int {
return version.Deneb
}
// Value --
func (b builderBidDeneb) Value() []byte {
return b.p.Value
}
// Pubkey --
func (b builderBidDeneb) Pubkey() []byte {
return b.p.Pubkey
}
// IsNil --
func (b builderBidDeneb) IsNil() bool {
return b.p == nil
}
// HashTreeRoot --
func (b builderBidDeneb) HashTreeRoot() ([32]byte, error) {
return b.p.HashTreeRoot()
}
// HashTreeRootWith --
func (b builderBidDeneb) HashTreeRootWith(hh *ssz.Hasher) error {
return b.p.HashTreeRootWith(hh)
}
// Header --
func (b builderBidDeneb) Header() (interfaces.ExecutionData, error) {
// We have to convert big endian to little endian because the value is coming from the execution layer.
return blocks.WrappedExecutionPayloadHeaderDeneb(b.p.Header, blocks.PayloadValueToGwei(b.p.Value))
}
// BlobKzgCommitments --
func (b builderBidDeneb) BlobKzgCommitments() ([][]byte, error) {
return b.p.BlobKzgCommitments, nil
}
type signedBuilderBidDeneb struct {
p *ethpb.SignedBuilderBidDeneb
}
// WrappedSignedBuilderBidDeneb is a constructor which wraps a protobuf signed bit into an interface.
func WrappedSignedBuilderBidDeneb(p *ethpb.SignedBuilderBidDeneb) (SignedBid, error) {
w := signedBuilderBidDeneb{p: p}
if w.IsNil() {
return nil, consensus_types.ErrNilObjectWrapped
}
return w, nil
}
// Message --
func (b signedBuilderBidDeneb) Message() (Bid, error) {
return WrappedBuilderBidDeneb(b.p.Message)
}
// Signature --
func (b signedBuilderBidDeneb) Signature() []byte {
return b.p.Signature
}
// Version --
func (b signedBuilderBidDeneb) Version() int {
return version.Deneb
}
// IsNil --
func (b signedBuilderBidDeneb) IsNil() bool {
return b.p == nil
}

View File

@@ -1,451 +0,0 @@
package builder
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net"
"net/http"
"net/url"
"strings"
"text/template"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/monitoring/tracing"
v1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
log "github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
const (
getExecHeaderPath = "/eth/v1/builder/header/{{.Slot}}/{{.ParentHash}}/{{.Pubkey}}"
getStatus = "/eth/v1/builder/status"
postBlindedBeaconBlockPath = "/eth/v1/builder/blinded_blocks"
postRegisterValidatorPath = "/eth/v1/builder/validators"
)
var errMalformedHostname = errors.New("hostname must include port, separated by one colon, like example.com:3500")
var errMalformedRequest = errors.New("required request data are missing")
var errNotBlinded = errors.New("submitted block is not blinded")
// ClientOpt is a functional option for the Client type (http.Client wrapper)
type ClientOpt func(*Client)
type observer interface {
observe(r *http.Request) error
}
func WithObserver(m observer) ClientOpt {
return func(c *Client) {
c.obvs = append(c.obvs, m)
}
}
type requestLogger struct{}
func (*requestLogger) observe(r *http.Request) (e error) {
b := bytes.NewBuffer(nil)
if r.Body == nil {
log.WithFields(log.Fields{
"body-base64": "(nil value)",
"url": r.URL.String(),
}).Info("builder http request")
return nil
}
t := io.TeeReader(r.Body, b)
defer func() {
if r.Body != nil {
e = r.Body.Close()
}
}()
body, err := io.ReadAll(t)
if err != nil {
return err
}
r.Body = io.NopCloser(b)
log.WithFields(log.Fields{
"body-base64": string(body),
"url": r.URL.String(),
}).Info("builder http request")
return nil
}
var _ observer = &requestLogger{}
// BuilderClient provides a collection of helper methods for calling Builder API endpoints.
type BuilderClient interface {
NodeURL() string
GetHeader(ctx context.Context, slot primitives.Slot, parentHash [32]byte, pubkey [48]byte) (SignedBid, error)
RegisterValidator(ctx context.Context, svr []*ethpb.SignedValidatorRegistrationV1) error
SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, *v1.BlobsBundle, error)
Status(ctx context.Context) error
}
// Client provides a collection of helper methods for calling Builder API endpoints.
type Client struct {
hc *http.Client
baseURL *url.URL
obvs []observer
}
// NewClient constructs a new client with the provided options (ex WithTimeout).
// `host` is the base host + port used to construct request urls. This value can be
// a URL string, or NewClient will assume an http endpoint if just `host:port` is used.
func NewClient(host string, opts ...ClientOpt) (*Client, error) {
u, err := urlForHost(host)
if err != nil {
return nil, err
}
c := &Client{
hc: &http.Client{},
baseURL: u,
}
for _, o := range opts {
o(c)
}
return c, nil
}
func urlForHost(h string) (*url.URL, error) {
// try to parse as url (being permissive)
if u, err := url.Parse(h); err == nil && u.Host != "" {
return u, nil
}
// try to parse as host:port
host, port, err := net.SplitHostPort(h)
if err != nil {
return nil, errMalformedHostname
}
return &url.URL{Host: net.JoinHostPort(host, port), Scheme: "http"}, nil
}
// NodeURL returns a human-readable string representation of the beacon node base url.
func (c *Client) NodeURL() string {
return c.baseURL.String()
}
type reqOption func(*http.Request)
// do is a generic, opinionated request function to reduce boilerplate amongst the methods in this package api/client/builder.
func (c *Client) do(ctx context.Context, method string, path string, body io.Reader, opts ...reqOption) (res []byte, err error) {
ctx, span := trace.StartSpan(ctx, "builder.client.do")
defer func() {
tracing.AnnotateError(span, err)
span.End()
}()
u := c.baseURL.ResolveReference(&url.URL{Path: path})
span.AddAttributes(trace.StringAttribute("url", u.String()),
trace.StringAttribute("method", method))
req, err := http.NewRequestWithContext(ctx, method, u.String(), body)
if err != nil {
return
}
req.Header.Add("User-Agent", version.BuildData())
for _, o := range opts {
o(req)
}
for _, o := range c.obvs {
if err = o.observe(req); err != nil {
return
}
}
r, err := c.hc.Do(req)
if err != nil {
return
}
defer func() {
closeErr := r.Body.Close()
if closeErr != nil {
log.WithError(closeErr).Error("Failed to close response body")
}
}()
if r.StatusCode != http.StatusOK {
err = non200Err(r)
return
}
res, err = io.ReadAll(r.Body)
if err != nil {
err = errors.Wrap(err, "error reading http response body from builder server")
return
}
return
}
var execHeaderTemplate = template.Must(template.New("").Parse(getExecHeaderPath))
func execHeaderPath(slot primitives.Slot, parentHash [32]byte, pubkey [48]byte) (string, error) {
v := struct {
Slot primitives.Slot
ParentHash string
Pubkey string
}{
Slot: slot,
ParentHash: fmt.Sprintf("%#x", parentHash),
Pubkey: fmt.Sprintf("%#x", pubkey),
}
b := bytes.NewBuffer(nil)
err := execHeaderTemplate.Execute(b, v)
if err != nil {
return "", errors.Wrapf(err, "error rendering exec header template with slot=%d, parentHash=%#x, pubkey=%#x", slot, parentHash, pubkey)
}
return b.String(), nil
}
// GetHeader is used by a proposing validator to request an execution payload header from the Builder node.
func (c *Client) GetHeader(ctx context.Context, slot primitives.Slot, parentHash [32]byte, pubkey [48]byte) (SignedBid, error) {
path, err := execHeaderPath(slot, parentHash, pubkey)
if err != nil {
return nil, err
}
hb, err := c.do(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, err
}
v := &VersionResponse{}
if err := json.Unmarshal(hb, v); err != nil {
return nil, errors.Wrapf(err, "error unmarshaling the builder GetHeader response, using slot=%d, parentHash=%#x, pubkey=%#x", slot, parentHash, pubkey)
}
switch strings.ToLower(v.Version) {
case strings.ToLower(version.String(version.Deneb)):
hr := &ExecHeaderResponseDeneb{}
if err := json.Unmarshal(hb, hr); err != nil {
return nil, errors.Wrapf(err, "error unmarshaling the builder GetHeader response, using slot=%d, parentHash=%#x, pubkey=%#x", slot, parentHash, pubkey)
}
p, err := hr.ToProto()
if err != nil {
return nil, errors.Wrapf(err, "could not extract proto message from header")
}
return WrappedSignedBuilderBidDeneb(p)
case strings.ToLower(version.String(version.Capella)):
hr := &ExecHeaderResponseCapella{}
if err := json.Unmarshal(hb, hr); err != nil {
return nil, errors.Wrapf(err, "error unmarshaling the builder GetHeader response, using slot=%d, parentHash=%#x, pubkey=%#x", slot, parentHash, pubkey)
}
p, err := hr.ToProto()
if err != nil {
return nil, errors.Wrapf(err, "could not extract proto message from header")
}
return WrappedSignedBuilderBidCapella(p)
case strings.ToLower(version.String(version.Bellatrix)):
hr := &ExecHeaderResponse{}
if err := json.Unmarshal(hb, hr); err != nil {
return nil, errors.Wrapf(err, "error unmarshaling the builder GetHeader response, using slot=%d, parentHash=%#x, pubkey=%#x", slot, parentHash, pubkey)
}
p, err := hr.ToProto()
if err != nil {
return nil, errors.Wrap(err, "could not extract proto message from header")
}
return WrappedSignedBuilderBid(p)
default:
return nil, fmt.Errorf("unsupported header version %s", strings.ToLower(v.Version))
}
}
// RegisterValidator encodes the SignedValidatorRegistrationV1 message to json (including hex-encoding the byte
// fields with 0x prefixes) and posts to the builder validator registration endpoint.
func (c *Client) RegisterValidator(ctx context.Context, svr []*ethpb.SignedValidatorRegistrationV1) error {
ctx, span := trace.StartSpan(ctx, "builder.client.RegisterValidator")
defer span.End()
span.AddAttributes(trace.Int64Attribute("num_reqs", int64(len(svr))))
if len(svr) == 0 {
err := errors.Wrap(errMalformedRequest, "empty validator registration list")
tracing.AnnotateError(span, err)
return err
}
vs := make([]*shared.SignedValidatorRegistration, len(svr))
for i := 0; i < len(svr); i++ {
vs[i] = shared.SignedValidatorRegistrationFromConsensus(svr[i])
}
body, err := json.Marshal(vs)
if err != nil {
err := errors.Wrap(err, "error encoding the SignedValidatorRegistration value body in RegisterValidator")
tracing.AnnotateError(span, err)
return err
}
_, err = c.do(ctx, http.MethodPost, postRegisterValidatorPath, bytes.NewBuffer(body))
return err
}
// SubmitBlindedBlock calls the builder API endpoint that binds the validator to the builder and submits the block.
// The response is the full execution payload used to create the blinded block.
func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
if !sb.IsBlinded() {
return nil, nil, errNotBlinded
}
switch sb.Version() {
case version.Bellatrix:
psb, err := sb.PbBlindedBellatrixBlock()
if err != nil {
return nil, nil, errors.Wrapf(err, "could not get protobuf block")
}
b, err := shared.SignedBlindedBeaconBlockBellatrixFromConsensus(&ethpb.SignedBlindedBeaconBlockBellatrix{Block: psb.Block, Signature: bytesutil.SafeCopyBytes(psb.Signature)})
if err != nil {
return nil, nil, errors.Wrapf(err, "could not convert SignedBlindedBeaconBlockBellatrix to json marshalable type")
}
body, err := json.Marshal(b)
if err != nil {
return nil, nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockBellatrix value body in SubmitBlindedBlock")
}
versionOpt := func(r *http.Request) {
r.Header.Add("Eth-Consensus-Version", version.String(version.Bellatrix))
}
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), versionOpt)
if err != nil {
return nil, nil, errors.Wrap(err, "error posting the SignedBlindedBeaconBlockBellatrix to the builder api")
}
ep := &ExecPayloadResponse{}
if err := json.Unmarshal(rb, ep); err != nil {
return nil, nil, errors.Wrap(err, "error unmarshaling the builder SubmitBlindedBlock response")
}
if strings.ToLower(ep.Version) != version.String(version.Bellatrix) {
return nil, nil, errors.New("not a bellatrix payload")
}
p, err := ep.ToProto()
if err != nil {
return nil, nil, errors.Wrapf(err, "could not extract proto message from payload")
}
payload, err := blocks.WrappedExecutionPayload(p)
if err != nil {
return nil, nil, errors.Wrapf(err, "could not wrap execution payload in interface")
}
return payload, nil, nil
case version.Capella:
psb, err := sb.PbBlindedCapellaBlock()
if err != nil {
return nil, nil, errors.Wrapf(err, "could not get protobuf block")
}
b, err := shared.SignedBlindedBeaconBlockCapellaFromConsensus(&ethpb.SignedBlindedBeaconBlockCapella{Block: psb.Block, Signature: bytesutil.SafeCopyBytes(psb.Signature)})
if err != nil {
return nil, nil, errors.Wrapf(err, "could not convert SignedBlindedBeaconBlockCapella to json marshalable type")
}
body, err := json.Marshal(b)
if err != nil {
return nil, nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockCapella value body in SubmitBlindedBlockCapella")
}
versionOpt := func(r *http.Request) {
r.Header.Add("Eth-Consensus-Version", version.String(version.Capella))
}
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), versionOpt)
if err != nil {
return nil, nil, errors.Wrap(err, "error posting the SignedBlindedBeaconBlockCapella to the builder api")
}
ep := &ExecPayloadResponseCapella{}
if err := json.Unmarshal(rb, ep); err != nil {
return nil, nil, errors.Wrap(err, "error unmarshaling the builder SubmitBlindedBlockCapella response")
}
if strings.ToLower(ep.Version) != version.String(version.Capella) {
return nil, nil, errors.New("not a capella payload")
}
p, err := ep.ToProto()
if err != nil {
return nil, nil, errors.Wrapf(err, "could not extract proto message from payload")
}
payload, err := blocks.WrappedExecutionPayloadCapella(p, 0)
if err != nil {
return nil, nil, errors.Wrapf(err, "could not wrap execution payload in interface")
}
return payload, nil, nil
case version.Deneb:
psb, err := sb.PbBlindedDenebBlock()
if err != nil {
return nil, nil, errors.Wrapf(err, "could not get protobuf block")
}
b, err := shared.SignedBlindedBeaconBlockDenebFromConsensus(&ethpb.SignedBlindedBeaconBlockDeneb{Message: psb.Message, Signature: bytesutil.SafeCopyBytes(psb.Signature)})
if err != nil {
return nil, nil, errors.Wrapf(err, "could not convert SignedBlindedBeaconBlockDeneb to json marshalable type")
}
body, err := json.Marshal(b)
if err != nil {
return nil, nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockDeneb value body in SubmitBlindedBlockDeneb")
}
versionOpt := func(r *http.Request) {
r.Header.Add("Eth-Consensus-Version", version.String(version.Deneb))
}
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), versionOpt)
if err != nil {
return nil, nil, errors.Wrap(err, "error posting the SignedBlindedBeaconBlockDeneb to the builder api")
}
ep := &ExecPayloadResponseDeneb{}
if err := json.Unmarshal(rb, ep); err != nil {
return nil, nil, errors.Wrap(err, "error unmarshaling the builder SubmitBlindedBlockDeneb response")
}
if strings.ToLower(ep.Version) != version.String(version.Deneb) {
return nil, nil, errors.New("not a deneb payload")
}
p, blobBundle, err := ep.ToProto()
if err != nil {
return nil, nil, errors.Wrapf(err, "could not extract proto message from payload")
}
payload, err := blocks.WrappedExecutionPayloadDeneb(p, 0)
if err != nil {
return nil, nil, errors.Wrapf(err, "could not wrap execution payload in interface")
}
return payload, blobBundle, nil
default:
return nil, nil, fmt.Errorf("unsupported block version %s", version.String(sb.Version()))
}
}
// Status asks the remote builder server for a health check. A response of 200 with an empty body is the success/healthy
// response, and an error response may have an error message. This method will return a nil value for error in the
// happy path, and an error with information about the server response body for a non-200 response.
func (c *Client) Status(ctx context.Context) error {
_, err := c.do(ctx, http.MethodGet, getStatus, nil)
return err
}
func non200Err(response *http.Response) error {
bodyBytes, err := io.ReadAll(response.Body)
var errMessage ErrorMessage
var body string
if err != nil {
body = "(Unable to read response body.)"
} else {
body = "response body:\n" + string(bodyBytes)
}
msg := fmt.Sprintf("code=%d, url=%s, body=%s", response.StatusCode, response.Request.URL, body)
switch response.StatusCode {
case http.StatusNoContent:
log.WithError(ErrNoContent).Debug(msg)
return ErrNoContent
case http.StatusBadRequest:
log.WithError(ErrBadRequest).Debug(msg)
if jsonErr := json.Unmarshal(bodyBytes, &errMessage); jsonErr != nil {
return errors.Wrap(jsonErr, "unable to read response body")
}
return errors.Wrap(ErrBadRequest, errMessage.Message)
case http.StatusNotFound:
log.WithError(ErrNotFound).Debug(msg)
if jsonErr := json.Unmarshal(bodyBytes, &errMessage); jsonErr != nil {
return errors.Wrap(jsonErr, "unable to read response body")
}
return errors.Wrap(ErrNotFound, errMessage.Message)
case http.StatusInternalServerError:
log.WithError(ErrNotOK).Debug(msg)
if jsonErr := json.Unmarshal(bodyBytes, &errMessage); jsonErr != nil {
return errors.Wrap(jsonErr, "unable to read response body")
}
return errors.Wrap(ErrNotOK, errMessage.Message)
default:
log.WithError(ErrNotOK).Debug(msg)
return errors.Wrap(ErrNotOK, fmt.Sprintf("unsupported error code: %d", response.StatusCode))
}
}

View File

@@ -1,892 +0,0 @@
package builder
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"math/big"
"net/http"
"net/url"
"strconv"
"testing"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
types "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
v1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
log "github.com/sirupsen/logrus"
)
type roundtrip func(*http.Request) (*http.Response, error)
func (fn roundtrip) RoundTrip(r *http.Request) (*http.Response, error) {
return fn(r)
}
func TestClient_Status(t *testing.T) {
ctx := context.Background()
statusPath := "/eth/v1/builder/status"
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
defer func() {
if r.Body == nil {
return
}
require.NoError(t, r.Body.Close())
}()
require.Equal(t, statusPath, r.URL.Path)
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(bytes.NewBuffer(nil)),
Request: r.Clone(ctx),
}, nil
}),
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
require.NoError(t, c.Status(ctx))
hc = &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
defer func() {
if r.Body == nil {
return
}
require.NoError(t, r.Body.Close())
}()
require.Equal(t, statusPath, r.URL.Path)
message := ErrorMessage{
Code: 500,
Message: "Internal server error",
}
resp, err := json.Marshal(message)
require.NoError(t, err)
return &http.Response{
StatusCode: http.StatusInternalServerError,
Body: io.NopCloser(bytes.NewBuffer(resp)),
Request: r.Clone(ctx),
}, nil
}),
}
c = &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
require.ErrorIs(t, c.Status(ctx), ErrNotOK)
}
func TestClient_RegisterValidator(t *testing.T) {
ctx := context.Background()
expectedBody := `[{"message":{"fee_recipient":"0x0000000000000000000000000000000000000000","gas_limit":"23","timestamp":"42","pubkey":"0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"},"signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"}]`
expectedPath := "/eth/v1/builder/validators"
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
body, err := io.ReadAll(r.Body)
defer func() {
require.NoError(t, r.Body.Close())
}()
require.NoError(t, err)
require.Equal(t, expectedBody, string(body))
require.Equal(t, expectedPath, r.URL.Path)
require.Equal(t, http.MethodPost, r.Method)
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(bytes.NewBuffer(nil)),
Request: r.Clone(ctx),
}, nil
}),
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
reg := &eth.SignedValidatorRegistrationV1{
Message: &eth.ValidatorRegistrationV1{
FeeRecipient: ezDecode(t, params.BeaconConfig().EthBurnAddressHex),
GasLimit: 23,
Timestamp: 42,
Pubkey: ezDecode(t, "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"),
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
}
require.NoError(t, c.RegisterValidator(ctx, []*eth.SignedValidatorRegistrationV1{reg}))
}
func TestClient_GetHeader(t *testing.T) {
ctx := context.Background()
expectedPath := "/eth/v1/builder/header/23/0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2/0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"
var slot types.Slot = 23
parentHash := ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
pubkey := ezDecode(t, "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a")
t.Run("server error", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, expectedPath, r.URL.Path)
message := ErrorMessage{
Code: 500,
Message: "Internal server error",
}
resp, err := json.Marshal(message)
require.NoError(t, err)
return &http.Response{
StatusCode: http.StatusInternalServerError,
Body: io.NopCloser(bytes.NewBuffer(resp)),
Request: r.Clone(ctx),
}, nil
}),
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
_, err := c.GetHeader(ctx, slot, bytesutil.ToBytes32(parentHash), bytesutil.ToBytes48(pubkey))
require.ErrorIs(t, err, ErrNotOK)
})
t.Run("header not available", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, expectedPath, r.URL.Path)
return &http.Response{
StatusCode: http.StatusNoContent,
Body: io.NopCloser(bytes.NewBuffer([]byte("No header is available."))),
Request: r.Clone(ctx),
}, nil
}),
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
_, err := c.GetHeader(ctx, slot, bytesutil.ToBytes32(parentHash), bytesutil.ToBytes48(pubkey))
require.ErrorIs(t, err, ErrNoContent)
})
t.Run("bellatrix", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, expectedPath, r.URL.Path)
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(bytes.NewBufferString(testExampleHeaderResponse)),
Request: r.Clone(ctx),
}, nil
}),
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
h, err := c.GetHeader(ctx, slot, bytesutil.ToBytes32(parentHash), bytesutil.ToBytes48(pubkey))
require.NoError(t, err)
expectedSig := ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505")
require.Equal(t, true, bytes.Equal(expectedSig, h.Signature()))
expectedTxRoot := ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
bid, err := h.Message()
require.NoError(t, err)
bidHeader, err := bid.Header()
require.NoError(t, err)
withdrawalsRoot, err := bidHeader.TransactionsRoot()
require.NoError(t, err)
require.Equal(t, true, bytes.Equal(expectedTxRoot, withdrawalsRoot))
require.Equal(t, uint64(1), bidHeader.GasUsed())
value, err := stringToUint256("652312848583266388373324160190187140051835877600158453279131187530910662656")
require.NoError(t, err)
require.Equal(t, fmt.Sprintf("%#x", value.SSZBytes()), fmt.Sprintf("%#x", bid.Value()))
bidValue := bytesutil.ReverseByteOrder(bid.Value())
require.DeepEqual(t, bidValue, value.Bytes())
require.DeepEqual(t, big.NewInt(0).SetBytes(bidValue), value.Int)
})
t.Run("capella", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, expectedPath, r.URL.Path)
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(bytes.NewBufferString(testExampleHeaderResponseCapella)),
Request: r.Clone(ctx),
}, nil
}),
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
h, err := c.GetHeader(ctx, slot, bytesutil.ToBytes32(parentHash), bytesutil.ToBytes48(pubkey))
require.NoError(t, err)
expectedWithdrawalsRoot := ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
bid, err := h.Message()
require.NoError(t, err)
bidHeader, err := bid.Header()
require.NoError(t, err)
withdrawalsRoot, err := bidHeader.WithdrawalsRoot()
require.NoError(t, err)
require.Equal(t, true, bytes.Equal(expectedWithdrawalsRoot, withdrawalsRoot))
value, err := stringToUint256("652312848583266388373324160190187140051835877600158453279131187530910662656")
require.NoError(t, err)
require.Equal(t, fmt.Sprintf("%#x", value.SSZBytes()), fmt.Sprintf("%#x", bid.Value()))
bidValue := bytesutil.ReverseByteOrder(bid.Value())
require.DeepEqual(t, bidValue, value.Bytes())
require.DeepEqual(t, big.NewInt(0).SetBytes(bidValue), value.Int)
})
t.Run("deneb", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, expectedPath, r.URL.Path)
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(bytes.NewBufferString(testExampleHeaderResponseDeneb)),
Request: r.Clone(ctx),
}, nil
}),
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
h, err := c.GetHeader(ctx, slot, bytesutil.ToBytes32(parentHash), bytesutil.ToBytes48(pubkey))
require.NoError(t, err)
expectedWithdrawalsRoot := ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
bid, err := h.Message()
require.NoError(t, err)
bidHeader, err := bid.Header()
require.NoError(t, err)
withdrawalsRoot, err := bidHeader.WithdrawalsRoot()
require.NoError(t, err)
require.Equal(t, true, bytes.Equal(expectedWithdrawalsRoot, withdrawalsRoot))
value, err := stringToUint256("652312848583266388373324160190187140051835877600158453279131187530910662656")
require.NoError(t, err)
require.Equal(t, fmt.Sprintf("%#x", value.SSZBytes()), fmt.Sprintf("%#x", bid.Value()))
bidValue := bytesutil.ReverseByteOrder(bid.Value())
require.DeepEqual(t, bidValue, value.Bytes())
require.DeepEqual(t, big.NewInt(0).SetBytes(bidValue), value.Int)
kcgCommitments, err := bid.BlobKzgCommitments()
require.NoError(t, err)
require.Equal(t, len(kcgCommitments) > 0, true)
for i := range kcgCommitments {
require.Equal(t, len(kcgCommitments[i]) == 48, true)
}
})
t.Run("deneb, too many kzg commitments", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, expectedPath, r.URL.Path)
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(bytes.NewBufferString(testExampleHeaderResponseDenebTooManyBlobs)),
Request: r.Clone(ctx),
}, nil
}),
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
_, err := c.GetHeader(ctx, slot, bytesutil.ToBytes32(parentHash), bytesutil.ToBytes48(pubkey))
require.ErrorContains(t, "could not extract proto message from header: too many blob commitments: 7", err)
})
t.Run("unsupported version", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, expectedPath, r.URL.Path)
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(bytes.NewBufferString(testExampleHeaderResponseUnknownVersion)),
Request: r.Clone(ctx),
}, nil
}),
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
_, err := c.GetHeader(ctx, slot, bytesutil.ToBytes32(parentHash), bytesutil.ToBytes48(pubkey))
require.ErrorContains(t, "unsupported header version", err)
})
}
func TestSubmitBlindedBlock(t *testing.T) {
ctx := context.Background()
t.Run("bellatrix", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
require.Equal(t, "bellatrix", r.Header.Get("Eth-Consensus-Version"))
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayload)),
Request: r.Clone(ctx),
}, nil
}),
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
sbbb, err := blocks.NewSignedBeaconBlock(testSignedBlindedBeaconBlockBellatrix(t))
require.NoError(t, err)
ep, _, err := c.SubmitBlindedBlock(ctx, sbbb)
require.NoError(t, err)
require.Equal(t, true, bytes.Equal(ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"), ep.ParentHash()))
bfpg, err := stringToUint256("452312848583266388373324160190187140051835877600158453279131187530910662656")
require.NoError(t, err)
require.Equal(t, fmt.Sprintf("%#x", bfpg.SSZBytes()), fmt.Sprintf("%#x", ep.BaseFeePerGas()))
require.Equal(t, uint64(1), ep.GasLimit())
})
t.Run("capella", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
require.Equal(t, "capella", r.Header.Get("Eth-Consensus-Version"))
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayloadCapella)),
Request: r.Clone(ctx),
}, nil
}),
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
sbb, err := blocks.NewSignedBeaconBlock(testSignedBlindedBeaconBlockCapella(t))
require.NoError(t, err)
ep, _, err := c.SubmitBlindedBlock(ctx, sbb)
require.NoError(t, err)
withdrawals, err := ep.Withdrawals()
require.NoError(t, err)
require.Equal(t, 1, len(withdrawals))
assert.Equal(t, uint64(1), withdrawals[0].Index)
assert.Equal(t, types.ValidatorIndex(1), withdrawals[0].ValidatorIndex)
assert.DeepEqual(t, ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943"), withdrawals[0].Address)
assert.Equal(t, uint64(1), withdrawals[0].Amount)
})
t.Run("deneb", func(t *testing.T) {
test := testSignedBlindedBeaconBlockDeneb(t)
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
require.Equal(t, "deneb", r.Header.Get("Eth-Consensus-Version"))
var req shared.SignedBlindedBeaconBlockDeneb
err := json.NewDecoder(r.Body).Decode(&req)
require.NoError(t, err)
block, err := req.ToConsensus()
require.NoError(t, err)
require.DeepEqual(t, block, test)
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayloadDeneb)),
Request: r.Clone(ctx),
}, nil
}),
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
sbb, err := blocks.NewSignedBeaconBlock(test)
require.NoError(t, err)
ep, blobBundle, err := c.SubmitBlindedBlock(ctx, sbb)
require.NoError(t, err)
withdrawals, err := ep.Withdrawals()
require.NoError(t, err)
require.Equal(t, 1, len(withdrawals))
assert.Equal(t, uint64(1), withdrawals[0].Index)
assert.Equal(t, types.ValidatorIndex(1), withdrawals[0].ValidatorIndex)
assert.DeepEqual(t, ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943"), withdrawals[0].Address)
assert.Equal(t, uint64(1), withdrawals[0].Amount)
require.NotNil(t, blobBundle)
})
t.Run("mismatched versions, expected bellatrix got capella", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayloadCapella)), // send a Capella payload
Request: r.Clone(ctx),
}, nil
}),
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
sbbb, err := blocks.NewSignedBeaconBlock(testSignedBlindedBeaconBlockBellatrix(t))
require.NoError(t, err)
_, _, err = c.SubmitBlindedBlock(ctx, sbbb)
require.ErrorContains(t, "not a bellatrix payload", err)
})
t.Run("not blinded", func(t *testing.T) {
sbb, err := blocks.NewSignedBeaconBlock(&eth.SignedBeaconBlockBellatrix{Block: &eth.BeaconBlockBellatrix{Body: &eth.BeaconBlockBodyBellatrix{ExecutionPayload: &v1.ExecutionPayload{}}}})
require.NoError(t, err)
_, _, err = (&Client{}).SubmitBlindedBlock(ctx, sbb)
require.ErrorIs(t, err, errNotBlinded)
})
}
func testSignedBlindedBeaconBlockBellatrix(t *testing.T) *eth.SignedBlindedBeaconBlockBellatrix {
return &eth.SignedBlindedBeaconBlockBellatrix{
Block: &eth.BlindedBeaconBlockBellatrix{
Slot: 1,
ProposerIndex: 1,
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
Body: &eth.BlindedBeaconBlockBodyBellatrix{
RandaoReveal: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
Eth1Data: &eth.Eth1Data{
DepositRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
DepositCount: 1,
BlockHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
Graffiti: ezDecode(t, "0xdeadbeefc0ffee"),
ProposerSlashings: []*eth.ProposerSlashing{
{
Header_1: &eth.SignedBeaconBlockHeader{
Header: &eth.BeaconBlockHeader{
Slot: 1,
ProposerIndex: 1,
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
BodyRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
Header_2: &eth.SignedBeaconBlockHeader{
Header: &eth.BeaconBlockHeader{
Slot: 1,
ProposerIndex: 1,
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
BodyRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
},
},
AttesterSlashings: []*eth.AttesterSlashing{
{
Attestation_1: &eth.IndexedAttestation{
AttestingIndices: []uint64{1},
Data: &eth.AttestationData{
Slot: 1,
CommitteeIndex: 1,
BeaconBlockRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
Source: &eth.Checkpoint{
Epoch: 1,
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
Target: &eth.Checkpoint{
Epoch: 1,
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
Attestation_2: &eth.IndexedAttestation{
AttestingIndices: []uint64{1},
Data: &eth.AttestationData{
Slot: 1,
CommitteeIndex: 1,
BeaconBlockRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
Source: &eth.Checkpoint{
Epoch: 1,
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
Target: &eth.Checkpoint{
Epoch: 1,
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
},
},
Attestations: []*eth.Attestation{
{
AggregationBits: bitfield.Bitlist{0x01},
Data: &eth.AttestationData{
Slot: 1,
CommitteeIndex: 1,
BeaconBlockRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
Source: &eth.Checkpoint{
Epoch: 1,
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
Target: &eth.Checkpoint{
Epoch: 1,
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
},
Deposits: []*eth.Deposit{
{
Proof: [][]byte{ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")},
Data: &eth.Deposit_Data{
PublicKey: ezDecode(t, "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"),
WithdrawalCredentials: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
Amount: 1,
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
},
},
VoluntaryExits: []*eth.SignedVoluntaryExit{
{
Exit: &eth.VoluntaryExit{
Epoch: 1,
ValidatorIndex: 1,
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
},
SyncAggregate: &eth.SyncAggregate{
SyncCommitteeSignature: make([]byte, 48),
SyncCommitteeBits: bitfield.Bitvector512{0x01},
},
ExecutionPayloadHeader: &v1.ExecutionPayloadHeader{
ParentHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
FeeRecipient: ezDecode(t, "0xabcf8e0d4e9587369b2301d0790347320302cc09"),
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
ReceiptsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
LogsBloom: ezDecode(t, "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"),
PrevRandao: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
BlockNumber: 1,
GasLimit: 1,
GasUsed: 1,
Timestamp: 1,
ExtraData: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
BaseFeePerGas: []byte(strconv.FormatUint(1, 10)),
BlockHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
TransactionsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
},
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
}
}
func testSignedBlindedBeaconBlockCapella(t *testing.T) *eth.SignedBlindedBeaconBlockCapella {
return &eth.SignedBlindedBeaconBlockCapella{
Block: &eth.BlindedBeaconBlockCapella{
Slot: 1,
ProposerIndex: 1,
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
Body: &eth.BlindedBeaconBlockBodyCapella{
RandaoReveal: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
Eth1Data: &eth.Eth1Data{
DepositRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
DepositCount: 1,
BlockHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
Graffiti: ezDecode(t, "0xdeadbeefc0ffee"),
ProposerSlashings: []*eth.ProposerSlashing{
{
Header_1: &eth.SignedBeaconBlockHeader{
Header: &eth.BeaconBlockHeader{
Slot: 1,
ProposerIndex: 1,
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
BodyRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
Header_2: &eth.SignedBeaconBlockHeader{
Header: &eth.BeaconBlockHeader{
Slot: 1,
ProposerIndex: 1,
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
BodyRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
},
},
AttesterSlashings: []*eth.AttesterSlashing{
{
Attestation_1: &eth.IndexedAttestation{
AttestingIndices: []uint64{1},
Data: &eth.AttestationData{
Slot: 1,
CommitteeIndex: 1,
BeaconBlockRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
Source: &eth.Checkpoint{
Epoch: 1,
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
Target: &eth.Checkpoint{
Epoch: 1,
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
Attestation_2: &eth.IndexedAttestation{
AttestingIndices: []uint64{1},
Data: &eth.AttestationData{
Slot: 1,
CommitteeIndex: 1,
BeaconBlockRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
Source: &eth.Checkpoint{
Epoch: 1,
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
Target: &eth.Checkpoint{
Epoch: 1,
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
},
},
Attestations: []*eth.Attestation{
{
AggregationBits: bitfield.Bitlist{0x01},
Data: &eth.AttestationData{
Slot: 1,
CommitteeIndex: 1,
BeaconBlockRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
Source: &eth.Checkpoint{
Epoch: 1,
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
Target: &eth.Checkpoint{
Epoch: 1,
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
},
Deposits: []*eth.Deposit{
{
Proof: [][]byte{ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")},
Data: &eth.Deposit_Data{
PublicKey: ezDecode(t, "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"),
WithdrawalCredentials: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
Amount: 1,
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
},
},
VoluntaryExits: []*eth.SignedVoluntaryExit{
{
Exit: &eth.VoluntaryExit{
Epoch: 1,
ValidatorIndex: 1,
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
},
SyncAggregate: &eth.SyncAggregate{
SyncCommitteeSignature: make([]byte, 48),
SyncCommitteeBits: bitfield.Bitvector512{0x01},
},
ExecutionPayloadHeader: &v1.ExecutionPayloadHeaderCapella{
ParentHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
FeeRecipient: ezDecode(t, "0xabcf8e0d4e9587369b2301d0790347320302cc09"),
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
ReceiptsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
LogsBloom: ezDecode(t, "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"),
PrevRandao: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
BlockNumber: 1,
GasLimit: 1,
GasUsed: 1,
Timestamp: 1,
ExtraData: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
BaseFeePerGas: []byte(strconv.FormatUint(1, 10)),
BlockHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
TransactionsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
WithdrawalsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
},
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
}
}
func testSignedBlindedBeaconBlockDeneb(t *testing.T) *eth.SignedBlindedBeaconBlockDeneb {
basebytes, err := bytesutil.Uint256ToSSZBytes("14074904626401341155369551180448584754667373453244490859944217516317499064576")
if err != nil {
log.Error(err)
}
return &eth.SignedBlindedBeaconBlockDeneb{
Message: &eth.BlindedBeaconBlockDeneb{
Slot: 1,
ProposerIndex: 1,
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
Body: &eth.BlindedBeaconBlockBodyDeneb{
RandaoReveal: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
Eth1Data: &eth.Eth1Data{
DepositRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
DepositCount: 1,
BlockHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
Graffiti: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
ProposerSlashings: []*eth.ProposerSlashing{
{
Header_1: &eth.SignedBeaconBlockHeader{
Header: &eth.BeaconBlockHeader{
Slot: 1,
ProposerIndex: 1,
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
BodyRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
Header_2: &eth.SignedBeaconBlockHeader{
Header: &eth.BeaconBlockHeader{
Slot: 1,
ProposerIndex: 1,
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
BodyRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
},
},
AttesterSlashings: []*eth.AttesterSlashing{
{
Attestation_1: &eth.IndexedAttestation{
AttestingIndices: []uint64{1},
Data: &eth.AttestationData{
Slot: 1,
CommitteeIndex: 1,
BeaconBlockRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
Source: &eth.Checkpoint{
Epoch: 1,
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
Target: &eth.Checkpoint{
Epoch: 1,
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
Attestation_2: &eth.IndexedAttestation{
AttestingIndices: []uint64{1},
Data: &eth.AttestationData{
Slot: 1,
CommitteeIndex: 1,
BeaconBlockRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
Source: &eth.Checkpoint{
Epoch: 1,
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
Target: &eth.Checkpoint{
Epoch: 1,
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
},
},
Attestations: []*eth.Attestation{
{
AggregationBits: bitfield.Bitlist{0x01},
Data: &eth.AttestationData{
Slot: 1,
CommitteeIndex: 1,
BeaconBlockRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
Source: &eth.Checkpoint{
Epoch: 1,
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
Target: &eth.Checkpoint{
Epoch: 1,
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
},
Deposits: []*eth.Deposit{
{
Proof: [][]byte{ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")},
Data: &eth.Deposit_Data{
PublicKey: ezDecode(t, "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"),
WithdrawalCredentials: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
Amount: 1,
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
},
},
VoluntaryExits: []*eth.SignedVoluntaryExit{
{
Exit: &eth.VoluntaryExit{
Epoch: 1,
ValidatorIndex: 1,
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
},
SyncAggregate: &eth.SyncAggregate{
SyncCommitteeSignature: make([]byte, 96),
SyncCommitteeBits: ezDecode(t, "0x6451e9f951ebf05edc01de67e593484b672877054f055903ff0df1a1a945cf30ca26bb4d4b154f94a1bc776bcf5d0efb3603e1f9b8ee2499ccdcfe2a18cef458"),
},
ExecutionPayloadHeader: &v1.ExecutionPayloadHeaderDeneb{
ParentHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
FeeRecipient: ezDecode(t, "0xabcf8e0d4e9587369b2301d0790347320302cc09"),
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
ReceiptsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
LogsBloom: ezDecode(t, "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"),
PrevRandao: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
BlockNumber: 1,
GasLimit: 1,
GasUsed: 1,
Timestamp: 1,
ExtraData: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
BaseFeePerGas: basebytes,
BlockHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
TransactionsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
WithdrawalsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
BlobGasUsed: 1,
ExcessBlobGas: 2,
},
},
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
}
}
func TestRequestLogger(t *testing.T) {
wo := WithObserver(&requestLogger{})
c, err := NewClient("localhost:3500", wo)
require.NoError(t, err)
ctx := context.Background()
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, getStatus, r.URL.Path)
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayload)),
Request: r.Clone(ctx),
}, nil
}),
}
c.hc = hc
err = c.Status(ctx)
require.NoError(t, err)
}

View File

@@ -1,17 +0,0 @@
package builder
import "github.com/pkg/errors"
// ErrNotOK is used to indicate when an HTTP request to the Beacon Node API failed with any non-2xx response code.
// More specific errors may be returned, but an error in reaction to a non-2xx response will always wrap ErrNotOK.
var ErrNotOK = errors.New("did not receive 200 response from API")
// ErrNotFound specifically means that a '404 - NOT FOUND' response was received from the API.
var ErrNotFound = errors.Wrap(ErrNotOK, "recv 404 NotFound response from API")
// ErrBadRequest specifically means that a '400 - BAD REQUEST' response was received from the API.
var ErrBadRequest = errors.Wrap(ErrNotOK, "recv 400 BadRequest response from API")
// ErrNoContent specifically means that a '204 - No Content' response was received from the API.
// Typically, a 204 is a success but in this case for the Header API means No header is available
var ErrNoContent = errors.New("recv 204 no content response from API, No header is available")

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -1 +0,0 @@
{"parent_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","fee_recipient":"0xabcf8e0d4e9587369b2301d0790347320302cc09","state_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","receipts_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","logs_bloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","prev_randao":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","block_number":"1","gas_limit":"1","gas_used":"1","timestamp":"1","extra_data":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","base_fee_per_gas":"14074904626401341155369551180448584754667373453244490859944217516317499064576","block_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","transactions_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","withdrawals_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"}

View File

@@ -1 +0,0 @@
{"parent_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","fee_recipient":"0xabcf8e0d4e9587369b2301d0790347320302cc09","state_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","receipts_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","logs_bloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","prev_randao":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","block_number":"1","gas_limit":"1","gas_used":"1","timestamp":"1","extra_data":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","base_fee_per_gas":"14074904626401341155369551180448584754667373453244490859944217516317499064576","block_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","transactions_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"}

View File

@@ -1,16 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["mock.go"],
importpath = "github.com/prysmaticlabs/prysm/v4/api/client/builder/testing",
visibility = ["//visibility:public"],
deps = [
"//api/client/builder:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
],
)

View File

@@ -1,51 +0,0 @@
package testing
import (
"context"
"github.com/prysmaticlabs/prysm/v4/api/client/builder"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
v1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
)
// MockClient is a mock implementation of BuilderClient.
type MockClient struct {
RegisteredVals map[[48]byte]bool
}
// NewClient creates a new, correctly initialized mock.
func NewClient() MockClient {
return MockClient{RegisteredVals: map[[48]byte]bool{}}
}
// NodeURL --
func (MockClient) NodeURL() string {
return ""
}
// GetHeader --
func (MockClient) GetHeader(_ context.Context, _ primitives.Slot, _ [32]byte, _ [48]byte) (builder.SignedBid, error) {
return nil, nil
}
// RegisterValidator --
func (m MockClient) RegisterValidator(_ context.Context, svr []*ethpb.SignedValidatorRegistrationV1) error {
for _, r := range svr {
b := bytesutil.ToBytes48(r.Message.Pubkey)
m.RegisteredVals[b] = true
}
return nil
}
// SubmitBlindedBlock --
func (MockClient) SubmitBlindedBlock(_ context.Context, _ interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
return nil, nil, nil
}
// Status --
func (MockClient) Status(_ context.Context) error {
return nil
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,97 +0,0 @@
package client
import (
"context"
"io"
"net"
"net/http"
"net/url"
"github.com/pkg/errors"
)
// Client is a wrapper object around the HTTP client.
type Client struct {
hc *http.Client
baseURL *url.URL
token string
}
// NewClient constructs a new client with the provided options (ex WithTimeout).
// `host` is the base host + port used to construct request urls. This value can be
// a URL string, or NewClient will assume an http endpoint if just `host:port` is used.
func NewClient(host string, opts ...ClientOpt) (*Client, error) {
u, err := urlForHost(host)
if err != nil {
return nil, err
}
c := &Client{
hc: &http.Client{},
baseURL: u,
}
for _, o := range opts {
o(c)
}
return c, nil
}
// Token returns the bearer token used for jwt authentication
func (c *Client) Token() string {
return c.token
}
// BaseURL returns the base url of the client
func (c *Client) BaseURL() *url.URL {
return c.baseURL
}
// Do execute the request against the http client
func (c *Client) Do(req *http.Request) (*http.Response, error) {
return c.hc.Do(req)
}
func urlForHost(h string) (*url.URL, error) {
// try to parse as url (being permissive)
u, err := url.Parse(h)
if err == nil && u.Host != "" {
return u, nil
}
// try to parse as host:port
host, port, err := net.SplitHostPort(h)
if err != nil {
return nil, ErrMalformedHostname
}
return &url.URL{Host: net.JoinHostPort(host, port), Scheme: "http"}, nil
}
// NodeURL returns a human-readable string representation of the beacon node base url.
func (c *Client) NodeURL() string {
return c.baseURL.String()
}
// Get is a generic, opinionated GET function to reduce boilerplate amongst the getters in this package.
func (c *Client) Get(ctx context.Context, path string, opts ...ReqOption) ([]byte, error) {
u := c.baseURL.ResolveReference(&url.URL{Path: path})
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil)
if err != nil {
return nil, err
}
for _, o := range opts {
o(req)
}
r, err := c.hc.Do(req)
if err != nil {
return nil, err
}
defer func() {
err = r.Body.Close()
}()
if r.StatusCode != http.StatusOK {
return nil, Non200Err(r)
}
b, err := io.ReadAll(r.Body)
if err != nil {
return nil, errors.Wrap(err, "error reading http response body")
}
return b, nil
}

View File

@@ -1,48 +0,0 @@
package client
import (
"net/url"
"testing"
"github.com/prysmaticlabs/prysm/v4/testing/require"
)
func TestValidHostname(t *testing.T) {
cases := []struct {
name string
hostArg string
path string
joined string
err error
}{
{
name: "hostname without port",
hostArg: "mydomain.org",
err: ErrMalformedHostname,
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
cl, err := NewClient(c.hostArg)
if c.err != nil {
require.ErrorIs(t, err, c.err)
return
}
require.NoError(t, err)
require.Equal(t, c.joined, cl.BaseURL().ResolveReference(&url.URL{Path: c.path}).String())
})
}
}
func TestWithAuthenticationToken(t *testing.T) {
cl, err := NewClient("https://www.offchainlabs.com:3500", WithAuthenticationToken("my token"))
require.NoError(t, err)
require.Equal(t, cl.Token(), "my token")
}
func TestBaseURL(t *testing.T) {
cl, err := NewClient("https://www.offchainlabs.com:3500")
require.NoError(t, err)
require.Equal(t, "www.offchainlabs.com", cl.BaseURL().Hostname())
require.Equal(t, "3500", cl.BaseURL().Port())
}

View File

@@ -1,40 +0,0 @@
package client
import (
"fmt"
"io"
"net/http"
"github.com/pkg/errors"
)
// ErrMalformedHostname is used to indicate if a host name's format is incorrect.
var ErrMalformedHostname = errors.New("hostname must include port, separated by one colon, like example.com:3500")
// ErrNotOK is used to indicate when an HTTP request to the API failed with any non-2xx response code.
// More specific errors may be returned, but an error in reaction to a non-2xx response will always wrap ErrNotOK.
var ErrNotOK = errors.New("did not receive 2xx response from API")
// ErrNotFound specifically means that a '404 - NOT FOUND' response was received from the API.
var ErrNotFound = errors.Wrap(ErrNotOK, "recv 404 NotFound response from API")
// ErrInvalidNodeVersion indicates that the /eth/v1/node/version API response format was not recognized.
var ErrInvalidNodeVersion = errors.New("invalid node version response")
// Non200Err is a function that parses an HTTP response to handle responses that are not 200 with a formatted error.
func Non200Err(response *http.Response) error {
bodyBytes, err := io.ReadAll(response.Body)
var body string
if err != nil {
body = "(Unable to read response body.)"
} else {
body = "response body:\n" + string(bodyBytes)
}
msg := fmt.Sprintf("code=%d, url=%s, body=%s", response.StatusCode, response.Request.URL, body)
switch response.StatusCode {
case http.StatusNotFound:
return errors.Wrap(ErrNotFound, msg)
default:
return errors.Wrap(ErrNotOK, msg)
}
}

View File

@@ -1,48 +0,0 @@
package client
import (
"fmt"
"net/http"
"time"
)
// ReqOption is a request functional option.
type ReqOption func(*http.Request)
// WithSSZEncoding is a request functional option that adds SSZ encoding header.
func WithSSZEncoding() ReqOption {
return func(req *http.Request) {
req.Header.Set("Accept", "application/octet-stream")
}
}
// WithAuthorizationToken is a request functional option that adds header for authorization token.
func WithAuthorizationToken(token string) ReqOption {
return func(req *http.Request) {
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token))
}
}
// ClientOpt is a functional option for the Client type (http.Client wrapper)
type ClientOpt func(*Client)
// WithTimeout sets the .Timeout attribute of the wrapped http.Client.
func WithTimeout(timeout time.Duration) ClientOpt {
return func(c *Client) {
c.hc.Timeout = timeout
}
}
// WithRoundTripper replaces the underlying HTTP's transport with a custom one.
func WithRoundTripper(t http.RoundTripper) ClientOpt {
return func(c *Client) {
c.hc.Transport = t
}
}
// WithAuthenticationToken sets an oauth token to be used.
func WithAuthenticationToken(token string) ClientOpt {
return func(c *Client) {
c.token = token
}
}

View File

@@ -1,13 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["client.go"],
importpath = "github.com/prysmaticlabs/prysm/v4/api/client/validator",
visibility = ["//visibility:public"],
deps = [
"//api/client:go_default_library",
"//validator/rpc:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)

View File

@@ -1,121 +0,0 @@
package validator
import (
"context"
"encoding/json"
"fmt"
"strings"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/api/client"
"github.com/prysmaticlabs/prysm/v4/validator/rpc"
)
const (
localKeysPath = "/eth/v1/keystores"
remoteKeysPath = "/eth/v1/remotekeys"
feeRecipientPath = "/eth/v1/validator/{pubkey}/feerecipient"
)
// Client provides a collection of helper methods for calling the Keymanager API endpoints.
type Client struct {
*client.Client
}
// NewClient returns a new Client that includes functions for REST calls to keymanager APIs.
func NewClient(host string, opts ...client.ClientOpt) (*Client, error) {
c, err := client.NewClient(host, opts...)
if err != nil {
return nil, err
}
return &Client{c}, nil
}
// GetValidatorPubKeys gets the current list of web3signer or the local validator public keys in hex format.
func (c *Client) GetValidatorPubKeys(ctx context.Context) ([]string, error) {
jsonlocal, err := c.GetLocalValidatorKeys(ctx)
if err != nil {
return nil, err
}
jsonremote, err := c.GetRemoteValidatorKeys(ctx)
if err != nil {
return nil, err
}
if len(jsonlocal.Data) == 0 && len(jsonremote.Data) == 0 {
return nil, errors.New("there are no local keys or remote keys on the validator")
}
hexKeys := make(map[string]bool)
for index := range jsonlocal.Data {
hexKeys[jsonlocal.Data[index].ValidatingPubkey] = true
}
for index := range jsonremote.Data {
hexKeys[jsonremote.Data[index].Pubkey] = true
}
keys := make([]string, 0)
for k := range hexKeys {
keys = append(keys, k)
}
return keys, nil
}
// GetLocalValidatorKeys calls the keymanager APIs for local validator keys
func (c *Client) GetLocalValidatorKeys(ctx context.Context) (*rpc.ListKeystoresResponse, error) {
localBytes, err := c.Get(ctx, localKeysPath, client.WithAuthorizationToken(c.Token()))
if err != nil {
return nil, err
}
jsonlocal := &rpc.ListKeystoresResponse{}
if err := json.Unmarshal(localBytes, jsonlocal); err != nil {
return nil, errors.Wrap(err, "failed to parse local keystore list")
}
return jsonlocal, nil
}
// GetRemoteValidatorKeys calls the keymanager APIs for web3signer validator keys
func (c *Client) GetRemoteValidatorKeys(ctx context.Context) (*rpc.ListRemoteKeysResponse, error) {
remoteBytes, err := c.Get(ctx, remoteKeysPath, client.WithAuthorizationToken(c.Token()))
if err != nil {
if !strings.Contains(err.Error(), "Prysm Wallet is not of type Web3Signer") {
return nil, err
}
}
jsonremote := &rpc.ListRemoteKeysResponse{}
if len(remoteBytes) != 0 {
if err := json.Unmarshal(remoteBytes, jsonremote); err != nil {
return nil, errors.Wrap(err, "failed to parse remote keystore list")
}
}
return jsonremote, nil
}
// GetFeeRecipientAddresses takes a list of validators in hex format and returns an equal length list of fee recipients in hex format.
func (c *Client) GetFeeRecipientAddresses(ctx context.Context, validators []string) ([]string, error) {
feeRecipients := make([]string, len(validators))
for index, validator := range validators {
feejson, err := c.GetFeeRecipientAddress(ctx, validator)
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("keymanager API failed to retrieve fee recipient for validator %s", validators[index]))
}
if feejson.Data == nil {
continue
}
feeRecipients[index] = feejson.Data.Ethaddress
}
return feeRecipients, nil
}
// GetFeeRecipientAddress takes a public key and calls the keymanager API to return its fee recipient.
func (c *Client) GetFeeRecipientAddress(ctx context.Context, pubkey string) (*rpc.GetFeeRecipientByPubkeyResponse, error) {
path := strings.Replace(feeRecipientPath, "{pubkey}", pubkey, 1)
b, err := c.Get(ctx, path, client.WithAuthorizationToken(c.Token()))
if err != nil {
return nil, err
}
feejson := &rpc.GetFeeRecipientByPubkeyResponse{}
if err := json.Unmarshal(b, feejson); err != nil {
return nil, errors.Wrap(err, "failed to parse fee recipient")
}
return feejson, nil
}

View File

@@ -1,3 +0,0 @@
package api
const WebUrlPrefix = "/v2/validator/"

View File

@@ -1,43 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"gateway.go",
"log.go",
"modifiers.go",
"options.go",
],
importpath = "github.com/prysmaticlabs/prysm/v4/api/gateway",
visibility = [
"//beacon-chain:__subpackages__",
"//validator:__subpackages__",
],
deps = [
"//api/server:go_default_library",
"//runtime:go_default_library",
"@com_github_gorilla_mux//:go_default_library",
"@com_github_grpc_ecosystem_grpc_gateway_v2//runtime:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@org_golang_google_grpc//:go_default_library",
"@org_golang_google_grpc//connectivity:go_default_library",
"@org_golang_google_grpc//credentials:go_default_library",
"@org_golang_google_grpc//credentials/insecure:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["gateway_test.go"],
embed = [":go_default_library"],
deps = [
"//cmd/beacon-chain/flags:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"@com_github_gorilla_mux//:go_default_library",
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
"@com_github_urfave_cli_v2//:go_default_library",
],
)

View File

@@ -1,212 +0,0 @@
// Package gateway defines a grpc-gateway server that serves HTTP-JSON traffic and acts a proxy between HTTP and gRPC.
package gateway
import (
"context"
"fmt"
"net"
"net/http"
"time"
"github.com/gorilla/mux"
gwruntime "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/api/server"
"github.com/prysmaticlabs/prysm/v4/runtime"
"google.golang.org/grpc"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/credentials/insecure"
)
var _ runtime.Service = (*Gateway)(nil)
// PbMux serves grpc-gateway requests for selected patterns using registered protobuf handlers.
type PbMux struct {
Registrations []PbHandlerRegistration // Protobuf registrations to be registered in Mux.
Patterns []string // URL patterns that will be handled by Mux.
Mux *gwruntime.ServeMux // The router that will be used for grpc-gateway requests.
}
// PbHandlerRegistration is a function that registers a protobuf handler.
type PbHandlerRegistration func(context.Context, *gwruntime.ServeMux, *grpc.ClientConn) error
// MuxHandler is a function that implements the mux handler functionality.
type MuxHandler func(
h http.HandlerFunc,
w http.ResponseWriter,
req *http.Request,
)
// Config parameters for setting up the gateway service.
type config struct {
maxCallRecvMsgSize uint64
remoteCert string
gatewayAddr string
remoteAddr string
allowedOrigins []string
muxHandler MuxHandler
pbHandlers []*PbMux
router *mux.Router
timeout time.Duration
}
// Gateway is the gRPC gateway to serve HTTP JSON traffic as a proxy and forward it to the gRPC server.
type Gateway struct {
cfg *config
conn *grpc.ClientConn
server *http.Server
cancel context.CancelFunc
ctx context.Context
startFailure error
}
// New returns a new instance of the Gateway.
func New(ctx context.Context, opts ...Option) (*Gateway, error) {
g := &Gateway{
ctx: ctx,
cfg: &config{},
}
for _, opt := range opts {
if err := opt(g); err != nil {
return nil, err
}
}
if g.cfg.router == nil {
g.cfg.router = mux.NewRouter()
}
return g, nil
}
// Start the gateway service.
func (g *Gateway) Start() {
ctx, cancel := context.WithCancel(g.ctx)
g.cancel = cancel
conn, err := g.dial(ctx, "tcp", g.cfg.remoteAddr)
if err != nil {
log.WithError(err).Error("Failed to connect to gRPC server")
g.startFailure = err
return
}
g.conn = conn
for _, h := range g.cfg.pbHandlers {
for _, r := range h.Registrations {
if err := r(ctx, h.Mux, g.conn); err != nil {
log.WithError(err).Error("Failed to register handler")
g.startFailure = err
return
}
}
for _, p := range h.Patterns {
g.cfg.router.PathPrefix(p).Handler(h.Mux)
}
}
corsMux := server.CorsHandler(g.cfg.allowedOrigins).Middleware(g.cfg.router)
if g.cfg.muxHandler != nil {
g.cfg.router.PathPrefix("/").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
g.cfg.muxHandler(corsMux.ServeHTTP, w, r)
})
}
g.server = &http.Server{
Addr: g.cfg.gatewayAddr,
Handler: corsMux,
ReadHeaderTimeout: time.Second,
}
go func() {
log.WithField("address", g.cfg.gatewayAddr).Info("Starting gRPC gateway")
if err := g.server.ListenAndServe(); err != http.ErrServerClosed {
log.WithError(err).Error("Failed to start gRPC gateway")
g.startFailure = err
return
}
}()
}
// Status of grpc gateway. Returns an error if this service is unhealthy.
func (g *Gateway) Status() error {
if g.startFailure != nil {
return g.startFailure
}
if s := g.conn.GetState(); s != connectivity.Ready {
return fmt.Errorf("grpc server is %s", s)
}
return nil
}
// Stop the gateway with a graceful shutdown.
func (g *Gateway) Stop() error {
if g.server != nil {
shutdownCtx, shutdownCancel := context.WithTimeout(g.ctx, 2*time.Second)
defer shutdownCancel()
if err := g.server.Shutdown(shutdownCtx); err != nil {
if errors.Is(err, context.DeadlineExceeded) {
log.Warn("Existing connections terminated")
} else {
log.WithError(err).Error("Failed to gracefully shut down server")
}
}
}
if g.cancel != nil {
g.cancel()
}
return nil
}
// dial the gRPC server.
func (g *Gateway) dial(ctx context.Context, network, addr string) (*grpc.ClientConn, error) {
switch network {
case "tcp":
return g.dialTCP(ctx, addr)
case "unix":
return g.dialUnix(ctx, addr)
default:
return nil, fmt.Errorf("unsupported network type %q", network)
}
}
// dialTCP creates a client connection via TCP.
// "addr" must be a valid TCP address with a port number.
func (g *Gateway) dialTCP(ctx context.Context, addr string) (*grpc.ClientConn, error) {
var security grpc.DialOption
if len(g.cfg.remoteCert) > 0 {
creds, err := credentials.NewClientTLSFromFile(g.cfg.remoteCert, "")
if err != nil {
return nil, err
}
security = grpc.WithTransportCredentials(creds)
} else {
// Use insecure credentials when there's no remote cert provided.
security = grpc.WithTransportCredentials(insecure.NewCredentials())
}
opts := []grpc.DialOption{
security,
grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(int(g.cfg.maxCallRecvMsgSize))),
}
return grpc.DialContext(ctx, addr, opts...)
}
// dialUnix creates a client connection via a unix domain socket.
// "addr" must be a valid path to the socket.
func (g *Gateway) dialUnix(ctx context.Context, addr string) (*grpc.ClientConn, error) {
d := func(addr string, timeout time.Duration) (net.Conn, error) {
return net.DialTimeout("unix", addr, timeout)
}
f := func(ctx context.Context, addr string) (net.Conn, error) {
if deadline, ok := ctx.Deadline(); ok {
return d(addr, time.Until(deadline))
}
return d(addr, 0)
}
opts := []grpc.DialOption{
grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithContextDialer(f),
grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(int(g.cfg.maxCallRecvMsgSize))),
}
return grpc.DialContext(ctx, addr, opts...)
}

View File

@@ -1,107 +0,0 @@
package gateway
import (
"context"
"flag"
"fmt"
"net/http"
"net/http/httptest"
"net/url"
"testing"
"github.com/gorilla/mux"
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
logTest "github.com/sirupsen/logrus/hooks/test"
"github.com/urfave/cli/v2"
)
func TestGateway_Customized(t *testing.T) {
r := mux.NewRouter()
cert := "cert"
origins := []string{"origin"}
size := uint64(100)
opts := []Option{
WithRouter(r),
WithRemoteCert(cert),
WithAllowedOrigins(origins),
WithMaxCallRecvMsgSize(size),
WithMuxHandler(func(
_ http.HandlerFunc,
_ http.ResponseWriter,
_ *http.Request,
) {
}),
}
g, err := New(context.Background(), opts...)
require.NoError(t, err)
assert.Equal(t, r, g.cfg.router)
assert.Equal(t, cert, g.cfg.remoteCert)
require.Equal(t, 1, len(g.cfg.allowedOrigins))
assert.Equal(t, origins[0], g.cfg.allowedOrigins[0])
assert.Equal(t, size, g.cfg.maxCallRecvMsgSize)
}
func TestGateway_StartStop(t *testing.T) {
hook := logTest.NewGlobal()
app := cli.App{}
set := flag.NewFlagSet("test", 0)
ctx := cli.NewContext(&app, set, nil)
gatewayPort := ctx.Int(flags.GRPCGatewayPort.Name)
gatewayHost := ctx.String(flags.GRPCGatewayHost.Name)
rpcHost := ctx.String(flags.RPCHost.Name)
selfAddress := fmt.Sprintf("%s:%d", rpcHost, ctx.Int(flags.RPCPort.Name))
gatewayAddress := fmt.Sprintf("%s:%d", gatewayHost, gatewayPort)
opts := []Option{
WithGatewayAddr(gatewayAddress),
WithRemoteAddr(selfAddress),
WithMuxHandler(func(
_ http.HandlerFunc,
_ http.ResponseWriter,
_ *http.Request,
) {
}),
}
g, err := New(context.Background(), opts...)
require.NoError(t, err)
g.Start()
go func() {
require.LogsContain(t, hook, "Starting gRPC gateway")
require.LogsDoNotContain(t, hook, "Starting API middleware")
}()
err = g.Stop()
require.NoError(t, err)
}
func TestGateway_NilHandler_NotFoundHandlerRegistered(t *testing.T) {
app := cli.App{}
set := flag.NewFlagSet("test", 0)
ctx := cli.NewContext(&app, set, nil)
gatewayPort := ctx.Int(flags.GRPCGatewayPort.Name)
gatewayHost := ctx.String(flags.GRPCGatewayHost.Name)
rpcHost := ctx.String(flags.RPCHost.Name)
selfAddress := fmt.Sprintf("%s:%d", rpcHost, ctx.Int(flags.RPCPort.Name))
gatewayAddress := fmt.Sprintf("%s:%d", gatewayHost, gatewayPort)
opts := []Option{
WithGatewayAddr(gatewayAddress),
WithRemoteAddr(selfAddress),
}
g, err := New(context.Background(), opts...)
require.NoError(t, err)
writer := httptest.NewRecorder()
g.cfg.router.ServeHTTP(writer, &http.Request{Method: "GET", Host: "localhost", URL: &url.URL{Path: "/foo"}})
assert.Equal(t, http.StatusNotFound, writer.Code)
}

View File

@@ -1,30 +0,0 @@
package gateway
import (
"context"
"net/http"
"strconv"
gwruntime "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
"google.golang.org/protobuf/proto"
)
func HttpResponseModifier(ctx context.Context, w http.ResponseWriter, _ proto.Message) error {
md, ok := gwruntime.ServerMetadataFromContext(ctx)
if !ok {
return nil
}
// set http status code
if vals := md.HeaderMD.Get("x-http-code"); len(vals) > 0 {
code, err := strconv.Atoi(vals[0])
if err != nil {
return err
}
// delete the headers to not expose any grpc-metadata in http response
delete(md.HeaderMD, "x-http-code")
delete(w.Header(), "Grpc-Metadata-X-Http-Code")
w.WriteHeader(code)
}
return nil
}

View File

@@ -1,79 +0,0 @@
package gateway
import (
"time"
"github.com/gorilla/mux"
gwruntime "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
)
type Option func(g *Gateway) error
func WithPbHandlers(handlers []*PbMux) Option {
return func(g *Gateway) error {
g.cfg.pbHandlers = handlers
return nil
}
}
func WithMuxHandler(m MuxHandler) Option {
return func(g *Gateway) error {
g.cfg.muxHandler = m
return nil
}
}
func WithGatewayAddr(addr string) Option {
return func(g *Gateway) error {
g.cfg.gatewayAddr = addr
return nil
}
}
func WithRemoteAddr(addr string) Option {
return func(g *Gateway) error {
g.cfg.remoteAddr = addr
return nil
}
}
// WithRouter allows adding a custom mux router to the gateway.
func WithRouter(r *mux.Router) Option {
return func(g *Gateway) error {
g.cfg.router = r
return nil
}
}
// WithAllowedOrigins allows adding a set of allowed origins to the gateway.
func WithAllowedOrigins(origins []string) Option {
return func(g *Gateway) error {
g.cfg.allowedOrigins = origins
return nil
}
}
// WithRemoteCert allows adding a custom certificate to the gateway,
func WithRemoteCert(cert string) Option {
return func(g *Gateway) error {
g.cfg.remoteCert = cert
return nil
}
}
// WithMaxCallRecvMsgSize allows specifying the maximum allowed gRPC message size.
func WithMaxCallRecvMsgSize(size uint64) Option {
return func(g *Gateway) error {
g.cfg.maxCallRecvMsgSize = size
return nil
}
}
// WithTimeout allows changing the timeout value for API calls.
func WithTimeout(seconds uint64) Option {
return func(g *Gateway) error {
g.cfg.timeout = time.Second * time.Duration(seconds)
gwruntime.DefaultContextTimeout = time.Second * time.Duration(seconds)
return nil
}
}

View File

@@ -1,30 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"grpcutils.go",
"parameters.go",
],
importpath = "github.com/prysmaticlabs/prysm/v4/api/grpc",
visibility = ["//visibility:public"],
deps = [
"@com_github_sirupsen_logrus//:go_default_library",
"@org_golang_google_grpc//:go_default_library",
"@org_golang_google_grpc//metadata:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["grpcutils_test.go"],
embed = [":go_default_library"],
deps = [
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"@com_github_grpc_ecosystem_grpc_gateway_v2//runtime:go_default_library",
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
"@org_golang_google_grpc//:go_default_library",
"@org_golang_google_grpc//metadata:go_default_library",
],
)

View File

@@ -1,16 +0,0 @@
package grpc
// CustomErrorMetadataKey is the name of the metadata key storing additional error information.
// Metadata value is expected to be a byte-encoded JSON object.
const CustomErrorMetadataKey = "Custom-Error"
// HttpCodeMetadataKey is the key to use when setting custom HTTP status codes in gRPC metadata.
const HttpCodeMetadataKey = "X-Http-Code"
// MetadataPrefix is the prefix for grpc headers on metadata
const MetadataPrefix = "Grpc-Metadata"
// WithPrefix creates a new string with grpc metadata prefix
func WithPrefix(value string) string {
return MetadataPrefix + "-" + value
}

View File

@@ -1,11 +0,0 @@
package api
const (
VersionHeader = "Eth-Consensus-Version"
ExecutionPayloadBlindedHeader = "Eth-Execution-Payload-Blinded"
ExecutionPayloadValueHeader = "Eth-Execution-Payload-Value"
ConsensusBlockValueHeader = "Eth-Consensus-Block-Value"
JsonMediaType = "application/json"
OctetStreamMediaType = "application/octet-stream"
EventStreamMediaType = "text/event-stream"
)

View File

@@ -1,22 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["pagination.go"],
importpath = "github.com/prysmaticlabs/prysm/v4/api/pagination",
visibility = ["//visibility:public"],
deps = [
"//config/params:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["pagination_test.go"],
deps = [
":go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
],
)

View File

@@ -1,103 +0,0 @@
package pagination_test
import (
"testing"
"github.com/prysmaticlabs/prysm/v4/api/pagination"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
)
func TestStartAndEndPage(t *testing.T) {
tests := []struct {
token string
pageSize int
totalSize int
nextToken string
start int
end int
}{
{
token: "0",
pageSize: 9,
totalSize: 100,
nextToken: "1",
start: 0,
end: 9,
},
{
token: "10",
pageSize: 4,
totalSize: 100,
nextToken: "11",
start: 40,
end: 44,
},
{
token: "100",
pageSize: 5,
totalSize: 1000,
nextToken: "101",
start: 500,
end: 505,
},
{
token: "3",
pageSize: 33,
totalSize: 100,
nextToken: "",
start: 99,
end: 100,
},
{
token: "34",
pageSize: 500,
totalSize: 17500,
nextToken: "",
start: 17000,
end: 17500,
},
}
for _, test := range tests {
start, end, next, err := pagination.StartAndEndPage(test.token, test.pageSize, test.totalSize)
require.NoError(t, err)
if test.start != start {
t.Errorf("expected start and computed start are not equal %d, %d", test.start, start)
}
if test.end != end {
t.Errorf("expected end and computed end are not equal %d, %d", test.end, end)
}
if test.nextToken != next {
t.Errorf("expected next token and computed next token are not equal %v, %v", test.nextToken, next)
}
}
}
func TestStartAndEndPage_CannotConvertPage(t *testing.T) {
wanted := "could not convert page token: strconv.Atoi: parsing"
_, _, _, err := pagination.StartAndEndPage("bad", 0, 0)
assert.ErrorContains(t, wanted, err)
}
func TestStartAndEndPage_ExceedsMaxPage(t *testing.T) {
wanted := "page start 0 >= list 0"
_, _, _, err := pagination.StartAndEndPage("", 0, 0)
assert.ErrorContains(t, wanted, err)
}
func TestStartAndEndPage_InvalidPageValues(t *testing.T) {
_, _, _, err := pagination.StartAndEndPage("10", -1, 10)
assert.ErrorContains(t, "invalid page and total sizes provided", err)
_, _, _, err = pagination.StartAndEndPage("12", 10, -10)
assert.ErrorContains(t, "invalid page and total sizes provided", err)
_, _, _, err = pagination.StartAndEndPage("12", -50, -60)
assert.ErrorContains(t, "invalid page and total sizes provided", err)
}
func TestStartAndEndPage_InvalidTokenValue(t *testing.T) {
_, _, _, err := pagination.StartAndEndPage("-12", 50, 60)
assert.ErrorContains(t, "invalid token value provided", err)
}

View File

@@ -1,30 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"error.go",
"middleware.go",
"util.go",
],
importpath = "github.com/prysmaticlabs/prysm/v4/api/server",
visibility = ["//visibility:public"],
deps = [
"@com_github_gorilla_mux//:go_default_library",
"@com_github_rs_cors//:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"error_test.go",
"middleware_test.go",
"util_test.go",
],
embed = [":go_default_library"],
deps = [
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
],
)

View File

@@ -1,45 +0,0 @@
package server
import (
"fmt"
"strings"
)
// DecodeError represents an error resulting from trying to decode an HTTP request.
// It tracks the full field name for which decoding failed.
type DecodeError struct {
path []string
err error
}
// NewDecodeError wraps an error (either the initial decoding error or another DecodeError).
// The current field that failed decoding must be passed in.
func NewDecodeError(err error, field string) *DecodeError {
de, ok := err.(*DecodeError)
if ok {
return &DecodeError{path: append([]string{field}, de.path...), err: de.err}
}
return &DecodeError{path: []string{field}, err: err}
}
// Error returns the formatted error message which contains the full field name and the actual decoding error.
func (e *DecodeError) Error() string {
return fmt.Sprintf("could not decode %s: %s", strings.Join(e.path, "."), e.err.Error())
}
// IndexedVerificationFailureError wraps a collection of verification failures.
type IndexedVerificationFailureError struct {
Message string `json:"message"`
Code int `json:"code"`
Failures []*IndexedVerificationFailure `json:"failures"`
}
func (e *IndexedVerificationFailureError) StatusCode() int {
return e.Code
}
// IndexedVerificationFailure represents an issue when verifying a single indexed object e.g. an item in an array.
type IndexedVerificationFailure struct {
Index int `json:"index"`
Message string `json:"message"`
}

View File

@@ -1,16 +0,0 @@
package server
import (
"errors"
"testing"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
)
func TestDecodeError(t *testing.T) {
e := errors.New("not a number")
de := NewDecodeError(e, "Z")
de = NewDecodeError(de, "Y")
de = NewDecodeError(de, "X")
assert.Equal(t, "could not decode X.Y.Z: not a number", de.Error())
}

View File

@@ -1,32 +0,0 @@
package server
import (
"net/http"
"github.com/gorilla/mux"
"github.com/rs/cors"
)
// NormalizeQueryValuesHandler normalizes an input query of "key=value1,value2,value3" to "key=value1&key=value2&key=value3"
func NormalizeQueryValuesHandler(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
query := r.URL.Query()
NormalizeQueryValues(query)
r.URL.RawQuery = query.Encode()
next.ServeHTTP(w, r)
})
}
// CorsHandler sets the cors settings on api endpoints
func CorsHandler(allowOrigins []string) mux.MiddlewareFunc {
c := cors.New(cors.Options{
AllowedOrigins: allowOrigins,
AllowedMethods: []string{http.MethodPost, http.MethodGet, http.MethodDelete, http.MethodOptions},
AllowCredentials: true,
MaxAge: 600,
AllowedHeaders: []string{"*"},
})
return c.Handler
}

View File

@@ -1,54 +0,0 @@
package server
import (
"net/http"
"net/http/httptest"
"testing"
"github.com/prysmaticlabs/prysm/v4/testing/require"
)
func TestNormalizeQueryValuesHandler(t *testing.T) {
nextHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
_, err := w.Write([]byte("next handler"))
require.NoError(t, err)
})
handler := NormalizeQueryValuesHandler(nextHandler)
tests := []struct {
name string
inputQuery string
expectedQuery string
}{
{
name: "3 values",
inputQuery: "key=value1,value2,value3",
expectedQuery: "key=value1&key=value2&key=value3", // replace with expected normalized value
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
req, err := http.NewRequest("GET", "/test?"+test.inputQuery, nil)
if err != nil {
t.Fatal(err)
}
rr := httptest.NewRecorder()
handler.ServeHTTP(rr, req)
if rr.Code != http.StatusOK {
t.Errorf("handler returned wrong status code: got %v want %v", rr.Code, http.StatusOK)
}
if req.URL.RawQuery != test.expectedQuery {
t.Errorf("query not normalized: got %v want %v", req.URL.RawQuery, test.expectedQuery)
}
if rr.Body.String() != "next handler" {
t.Errorf("next handler was not executed")
}
})
}
}

View File

@@ -1,17 +0,0 @@
package server
import (
"net/url"
"strings"
)
// NormalizeQueryValues replaces comma-separated values with individual values
func NormalizeQueryValues(queryParams url.Values) {
for key, vals := range queryParams {
splitVals := make([]string, 0)
for _, v := range vals {
splitVals = append(splitVals, strings.Split(v, ",")...)
}
queryParams[key] = splitVals
}
}

View File

@@ -1,21 +0,0 @@
package server
import (
"testing"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
)
func TestNormalizeQueryValues(t *testing.T) {
input := make(map[string][]string)
input["key"] = []string{"value1", "value2,value3,value4", "value5"}
NormalizeQueryValues(input)
require.Equal(t, 5, len(input["key"]))
assert.Equal(t, "value1", input["key"][0])
assert.Equal(t, "value2", input["key"][1])
assert.Equal(t, "value3", input["key"][2])
assert.Equal(t, "value4", input["key"][3])
assert.Equal(t, "value5", input["key"][4])
}

View File

@@ -1,33 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"debounce.go",
"every.go",
"multilock.go",
"scatter.go",
],
importpath = "github.com/prysmaticlabs/prysm/v4/async",
visibility = ["//visibility:public"],
deps = ["@com_github_sirupsen_logrus//:go_default_library"],
)
go_test(
name = "go_default_test",
srcs = [
"benchmark_test.go",
"debounce_test.go",
"every_test.go",
"multilock_test.go",
"scatter_test.go",
],
embed = [":go_default_library"],
deps = [
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_stretchr_testify//assert:go_default_library",
],
)

View File

@@ -1,14 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["abool.go"],
importpath = "github.com/prysmaticlabs/prysm/v4/async/abool",
visibility = ["//visibility:public"],
)
go_test(
name = "go_default_test",
srcs = ["abool_test.go"],
embed = [":go_default_library"],
)

View File

@@ -1,63 +0,0 @@
package async_test
import (
"crypto/rand"
"crypto/sha256"
"sync"
"testing"
"github.com/prysmaticlabs/prysm/v4/async"
"github.com/prysmaticlabs/prysm/v4/testing/require"
log "github.com/sirupsen/logrus"
)
var input [][]byte
const (
benchmarkElements = 65536
benchmarkElementSize = 32
benchmarkHashRuns = 128
)
func init() {
input = make([][]byte, benchmarkElements)
for i := 0; i < benchmarkElements; i++ {
input[i] = make([]byte, benchmarkElementSize)
_, err := rand.Read(input[i])
if err != nil {
log.WithError(err).Debug("Cannot read from rand")
}
}
}
// hash repeatedly hashes the data passed to it
func hash(input [][]byte) [][]byte {
output := make([][]byte, len(input))
for i := range input {
copy(output, input)
for j := 0; j < benchmarkHashRuns; j++ {
hash := sha256.Sum256(output[i])
output[i] = hash[:]
}
}
return output
}
func BenchmarkHash(b *testing.B) {
for i := 0; i < b.N; i++ {
hash(input)
}
}
func BenchmarkHashMP(b *testing.B) {
output := make([][]byte, len(input))
for i := 0; i < b.N; i++ {
workerResults, err := async.Scatter(len(input), func(offset int, entries int, _ *sync.RWMutex) (interface{}, error) {
return hash(input[offset : offset+entries]), nil
})
require.NoError(b, err)
for _, result := range workerResults {
copy(output[result.Offset:], result.Extent.([][]byte))
}
}
}

View File

@@ -1,117 +0,0 @@
package async_test
import (
"context"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/prysmaticlabs/prysm/v4/async"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/testing/util"
)
func TestDebounce_NoEvents(t *testing.T) {
eventsChan := make(chan interface{}, 100)
ctx, cancel := context.WithCancel(context.Background())
interval := time.Second
timesHandled := int32(0)
wg := &sync.WaitGroup{}
wg.Add(1)
go func() {
time.AfterFunc(interval, func() {
cancel()
})
}()
go func() {
async.Debounce(ctx, interval, eventsChan, func(event interface{}) {
atomic.AddInt32(&timesHandled, 1)
})
wg.Done()
}()
if util.WaitTimeout(wg, interval*2) {
t.Fatalf("Test should have exited by now, timed out")
}
assert.Equal(t, int32(0), atomic.LoadInt32(&timesHandled), "Wrong number of handled calls")
}
func TestDebounce_CtxClosing(t *testing.T) {
eventsChan := make(chan interface{}, 100)
ctx, cancel := context.WithCancel(context.Background())
interval := time.Second
timesHandled := int32(0)
wg := &sync.WaitGroup{}
wg.Add(1)
go func() {
ticker := time.NewTicker(time.Millisecond * 100)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
eventsChan <- struct{}{}
}
}
}()
go func() {
time.AfterFunc(interval, func() {
cancel()
})
}()
go func() {
async.Debounce(ctx, interval, eventsChan, func(event interface{}) {
atomic.AddInt32(&timesHandled, 1)
})
wg.Done()
}()
if util.WaitTimeout(wg, interval*2) {
t.Fatalf("Test should have exited by now, timed out")
}
assert.Equal(t, int32(0), atomic.LoadInt32(&timesHandled), "Wrong number of handled calls")
}
func TestDebounce_SingleHandlerInvocation(t *testing.T) {
eventsChan := make(chan interface{}, 100)
ctx, cancel := context.WithCancel(context.Background())
interval := time.Second
timesHandled := int32(0)
go async.Debounce(ctx, interval, eventsChan, func(event interface{}) {
atomic.AddInt32(&timesHandled, 1)
})
for i := 0; i < 100; i++ {
eventsChan <- struct{}{}
}
// We should expect 100 rapid fire changes to only have caused
// 1 handler to trigger after the debouncing period.
time.Sleep(interval * 2)
assert.Equal(t, int32(1), atomic.LoadInt32(&timesHandled), "Wrong number of handled calls")
cancel()
}
func TestDebounce_MultipleHandlerInvocation(t *testing.T) {
eventsChan := make(chan interface{}, 100)
ctx, cancel := context.WithCancel(context.Background())
interval := time.Second
timesHandled := int32(0)
go async.Debounce(ctx, interval, eventsChan, func(event interface{}) {
atomic.AddInt32(&timesHandled, 1)
})
for i := 0; i < 100; i++ {
eventsChan <- struct{}{}
}
require.Equal(t, int32(0), atomic.LoadInt32(&timesHandled), "Events must prevent from handler execution")
// By this time the first event should be triggered.
time.Sleep(2 * time.Second)
assert.Equal(t, int32(1), atomic.LoadInt32(&timesHandled), "Wrong number of handled calls")
// Second event.
eventsChan <- struct{}{}
time.Sleep(2 * time.Second)
assert.Equal(t, int32(2), atomic.LoadInt32(&timesHandled), "Wrong number of handled calls")
cancel()
}

View File

@@ -1,29 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"feed.go",
"subscription.go",
],
importpath = "github.com/prysmaticlabs/prysm/v4/async/event",
visibility = ["//visibility:public"],
deps = ["//time/mclock:go_default_library"],
)
go_test(
name = "go_default_test",
size = "small",
srcs = [
"example_feed_test.go",
"example_scope_test.go",
"example_subscription_test.go",
"feed_test.go",
"subscription_test.go",
],
embed = [":go_default_library"],
deps = [
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
],
)

View File

@@ -1,40 +0,0 @@
package async_test
import (
"context"
"sync/atomic"
"testing"
"time"
"github.com/prysmaticlabs/prysm/v4/async"
)
func TestEveryRuns(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
i := int32(0)
async.RunEvery(ctx, 100*time.Millisecond, func() {
atomic.AddInt32(&i, 1)
})
// Sleep for a bit and ensure the value has increased.
time.Sleep(200 * time.Millisecond)
if atomic.LoadInt32(&i) == 0 {
t.Error("Counter failed to increment with ticker")
}
cancel()
// Sleep for a bit to let the cancel take place.
time.Sleep(100 * time.Millisecond)
last := atomic.LoadInt32(&i)
// Sleep for a bit and ensure the value has not increased.
time.Sleep(200 * time.Millisecond)
if atomic.LoadInt32(&i) != last {
t.Error("Counter incremented after stop")
}
}

19
beacon-chain/BUILD.bazel Normal file
View File

@@ -0,0 +1,19 @@
load("//tools:target_migration.bzl", "moved_targets")
moved_targets(
[
":push_images_debug",
":push_images_alpine",
":push_images",
":image_bundle_debug",
":image_debug",
":image_bundle_alpine",
":image_bundle",
":image_with_creation_time",
":image_alpine",
":image",
":go_default_test",
":beacon-chain",
],
"//cmd/beacon-chain",
)

View File

@@ -6,4 +6,4 @@ You can also read our main [README](https://github.com/prysmaticlabs/prysm/blob/
[![Discord](https://user-images.githubusercontent.com/7288322/34471967-1df7808a-efbb-11e7-9088-ed0b04151291.png)](https://discord.gg/CTYGPUJ)
Also, read the official beacon chain [specification](https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/beacon-chain.md), this design spec serves as a source of truth for the beacon chain implementation we follow at Prysmatic Labs.
Also, read the official beacon chain [specification](https://github.com/ethereum/eth2.0-specs/blob/master/specs/phase0/beacon-chain.md), this design spec serves as a source of truth for the beacon chain implementation we follow at Prysmatic Labs.

View File

@@ -4,185 +4,176 @@ go_library(
name = "go_default_library",
srcs = [
"chain_info.go",
"chain_info_forkchoice.go",
"currently_syncing_block.go",
"error.go",
"execution_engine.go",
"forkchoice_update_execution.go",
"head.go",
"head_sync_committee_info.go",
"info.go",
"init_sync_process_block.go",
"lightclient.go",
"log.go",
"merge_ascii_art.go",
"metrics.go",
"options.go",
"pow_block.go",
"process_attestation.go",
"process_attestation_helpers.go",
"process_block.go",
"process_block_helpers.go",
"receive_attestation.go",
"receive_blob.go",
"receive_block.go",
"service.go",
"tracked_proposer.go",
"weak_subjectivity_checks.go",
],
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain",
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/blockchain",
visibility = [
"//beacon-chain:__subpackages__",
"//cmd/beacon-chain:__subpackages__",
"//testing/slasher/simulator:__pkg__",
"//testing/spectest:__subpackages__",
"//fuzz:__pkg__",
],
deps = [
"//async:go_default_library",
"//async/event:go_default_library",
"//beacon-chain/blockchain/kzg:go_default_library",
"//beacon-chain/cache:go_default_library",
"//beacon-chain/cache/depositcache:go_default_library",
"//beacon-chain/core/altair:go_default_library",
"//beacon-chain/core/blocks:go_default_library",
"//beacon-chain/core/epoch/precompute:go_default_library",
"//beacon-chain/core/feed:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/core/transition:go_default_library",
"//beacon-chain/core/state:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/filesystem:go_default_library",
"//beacon-chain/db/filters:go_default_library",
"//beacon-chain/execution:go_default_library",
"//beacon-chain/forkchoice:go_default_library",
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
"//beacon-chain/forkchoice/types:go_default_library",
"//beacon-chain/forkchoice/protoarray:go_default_library",
"//beacon-chain/operations/attestations:go_default_library",
"//beacon-chain/operations/blstoexec:go_default_library",
"//beacon-chain/operations/slashings:go_default_library",
"//beacon-chain/operations/voluntaryexits:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/startup:go_default_library",
"//beacon-chain/powchain:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/forkchoice:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/payload-attribute:go_default_library",
"//consensus-types/primitives:go_default_library",
"//crypto/bls:go_default_library",
"//encoding/bytesutil:go_default_library",
"//math:go_default_library",
"//monitoring/tracing:go_default_library",
"//proto/engine/v1:go_default_library",
"//beacon-chain/state/v2:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
"//proto/eth/v1:go_default_library",
"//proto/eth/v2:go_default_library",
"//proto/migration:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/attestation:go_default_library",
"//runtime/version:go_default_library",
"//time:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_holiman_uint256//:go_default_library",
"//proto/prysm/v1alpha1/block:go_default_library",
"//shared/attestationutil:go_default_library",
"//shared/bls:go_default_library",
"//shared/bytesutil:go_default_library",
"//shared/copyutil:go_default_library",
"//shared/featureconfig:go_default_library",
"//shared/mputil:go_default_library",
"//shared/params:go_default_library",
"//shared/slotutil:go_default_library",
"//shared/timeutils:go_default_library",
"//shared/traceutil:go_default_library",
"//shared/version:go_default_library",
"@com_github_emicklei_dot//:go_default_library",
"@com_github_hashicorp_golang_lru//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@io_opencensus_go//trace:go_default_library",
"@org_golang_x_sync//errgroup:go_default_library",
],
)
test_suite(
name = "go_default_test",
tests = [
":go_raceoff_test",
":go_raceon_test",
],
)
go_test(
name = "go_default_test",
name = "go_raceoff_test",
size = "medium",
srcs = [
"blockchain_test.go",
"chain_info_norace_test.go",
"chain_info_test.go",
"checktags_test.go",
"error_test.go",
"execution_engine_test.go",
"forkchoice_update_execution_test.go",
"head_sync_committee_info_test.go",
"head_test.go",
"init_sync_process_block_test.go",
"info_test.go",
"init_test.go",
"lightclient_test.go",
"log_test.go",
"metrics_test.go",
"mock_test.go",
"pow_block_test.go",
"process_attestation_test.go",
"process_block_test.go",
"receive_attestation_test.go",
"receive_block_test.go",
"service_norace_test.go",
"service_test.go",
"setup_test.go",
"weak_subjectivity_checks_test.go",
],
embed = [":go_default_library"],
gotags = ["develop"],
tags = ["CI_race_detection"],
deps = [
"//async/event:go_default_library",
"//beacon-chain/blockchain/testing:go_default_library",
"//beacon-chain/cache:go_default_library",
"//beacon-chain/cache/depositcache:go_default_library",
"//beacon-chain/core/blocks:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/core/transition:go_default_library",
"//beacon-chain/core/state:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/filesystem:go_default_library",
"//beacon-chain/db/testing:go_default_library",
"//beacon-chain/execution:go_default_library",
"//beacon-chain/execution/testing:go_default_library",
"//beacon-chain/forkchoice:go_default_library",
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
"//beacon-chain/forkchoice/types:go_default_library",
"//beacon-chain/operations/attestations:go_default_library",
"//beacon-chain/operations/blstoexec:go_default_library",
"//beacon-chain/operations/slashings:go_default_library",
"//beacon-chain/operations/voluntaryexits:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/startup:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//container/trie:go_default_library",
"//crypto/bls:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/eth/v1:go_default_library",
"//proto/eth/v2:go_default_library",
"//beacon-chain/powchain:go_default_library",
"//beacon-chain/state/stateutil:go_default_library",
"//beacon-chain/state/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"//time:go_default_library",
"//time/slots:go_default_library",
"//proto/prysm/v1alpha1/wrapper:go_default_library",
"//shared/bytesutil:go_default_library",
"//shared/event:go_default_library",
"//shared/params:go_default_library",
"//shared/testutil:go_default_library",
"//shared/testutil/assert:go_default_library",
"//shared/testutil/require:go_default_library",
"@com_github_ethereum_go_ethereum//:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
"@in_gopkg_d4l3k_messagediff_v1//:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
"@org_golang_x_net//context:go_default_library",
],
)
go_test(
name = "go_raceon_test",
srcs = [
"chain_info_norace_test.go",
"checktags_test.go",
"init_test.go",
"receive_block_test.go",
"service_norace_test.go",
],
embed = [":go_default_library"],
gc_goopts = [
# Go 1.14 enables checkptr by default when building with -race or -msan. There is a pointer
# issue in boltdb, so must disable checkptr at compile time. This flag can be removed once
# the project is migrated to etcd's version of boltdb and the issue has been fixed.
# See: https://github.com/etcd-io/bbolt/issues/187.
"-d=checkptr=0",
],
gotags = ["develop"],
race = "on",
tags = ["race_on"],
deps = [
"//beacon-chain/blockchain/testing:go_default_library",
"//beacon-chain/cache/depositcache:go_default_library",
"//beacon-chain/core/blocks:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/state:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/testing:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/powchain:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/wrapper:go_default_library",
"//shared/bytesutil:go_default_library",
"//shared/event:go_default_library",
"//shared/params:go_default_library",
"//shared/testutil:go_default_library",
"//shared/testutil/assert:go_default_library",
"//shared/testutil/require:go_default_library",
"@com_github_ethereum_go_ethereum//:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
"@com_github_holiman_uint256//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
"@org_golang_x_net//context:go_default_library",
],
)

View File

@@ -1,7 +1,7 @@
package blockchain
import (
"io"
"io/ioutil"
"testing"
"github.com/sirupsen/logrus"
@@ -9,7 +9,7 @@ import (
func TestMain(m *testing.M) {
logrus.SetLevel(logrus.DebugLevel)
logrus.SetOutput(io.Discard)
logrus.SetOutput(ioutil.Discard)
m.Run()
}

View File

@@ -1,81 +1,59 @@
package blockchain
import (
"bytes"
"context"
"time"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/forkchoice"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/time/slots"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/copyutil"
"github.com/prysmaticlabs/prysm/shared/params"
"go.opencensus.io/trace"
)
// ChainInfoFetcher defines a common interface for methods in blockchain service which
// directly retrieve chain info related data.
// directly retrieves chain info related data.
type ChainInfoFetcher interface {
HeadFetcher
FinalizationFetcher
GenesisFetcher
CanonicalFetcher
ForkFetcher
TimeFetcher
HeadDomainFetcher
ForkchoiceFetcher
}
// ForkchoiceFetcher defines a common interface for methods that access directly
// forkchoice information. These typically require a lock and external callers
// are requested to call methods within this blockchain package that takes care
// of locking forkchoice
type ForkchoiceFetcher interface {
Ancestor(context.Context, []byte, primitives.Slot) ([]byte, error)
CachedHeadRoot() [32]byte
GetProposerHead() [32]byte
SetForkChoiceGenesisTime(uint64)
UpdateHead(context.Context, primitives.Slot)
HighestReceivedBlockSlot() primitives.Slot
ReceivedBlocksLastEpoch() (uint64, error)
InsertNode(context.Context, state.BeaconState, [32]byte) error
ForkChoiceDump(context.Context) (*forkchoice.Dump, error)
NewSlot(context.Context, primitives.Slot) error
ProposerBoost() [32]byte
}
// TimeFetcher retrieves the Ethereum consensus data that's related to time.
type TimeFetcher interface {
GenesisTime() time.Time
CurrentSlot() primitives.Slot
CurrentSlot() types.Slot
}
// GenesisFetcher retrieves the Ethereum consensus data related to its genesis.
type GenesisFetcher interface {
GenesisValidatorsRoot() [32]byte
GenesisValidatorRoot() [32]byte
}
// HeadFetcher defines a common interface for methods in blockchain service which
// directly retrieve head related data.
// directly retrieves head related data.
type HeadFetcher interface {
HeadSlot() primitives.Slot
HeadSlot() types.Slot
HeadRoot(ctx context.Context) ([]byte, error)
HeadBlock(ctx context.Context) (interfaces.ReadOnlySignedBeaconBlock, error)
HeadBlock(ctx context.Context) (block.SignedBeaconBlock, error)
HeadState(ctx context.Context) (state.BeaconState, error)
HeadStateReadOnly(ctx context.Context) (state.ReadOnlyBeaconState, error)
HeadValidatorsIndices(ctx context.Context, epoch primitives.Epoch) ([]primitives.ValidatorIndex, error)
HeadGenesisValidatorsRoot() [32]byte
HeadValidatorsIndices(ctx context.Context, epoch types.Epoch) ([]types.ValidatorIndex, error)
HeadSeed(ctx context.Context, epoch types.Epoch) ([32]byte, error)
HeadGenesisValidatorRoot() [32]byte
HeadETH1Data() *ethpb.Eth1Data
HeadPublicKeyToValidatorIndex(pubKey [fieldparams.BLSPubkeyLength]byte) (primitives.ValidatorIndex, bool)
HeadValidatorIndexToPublicKey(ctx context.Context, index primitives.ValidatorIndex) ([fieldparams.BLSPubkeyLength]byte, error)
ChainHeads() ([][32]byte, []primitives.Slot)
TargetRootForEpoch([32]byte, primitives.Epoch) ([32]byte, error)
HeadPublicKeyToValidatorIndex(ctx context.Context, pubKey [48]byte) (types.ValidatorIndex, bool)
HeadValidatorIndexToPublicKey(ctx context.Context, index types.ValidatorIndex) ([48]byte, error)
ProtoArrayStore() *protoarray.Store
ChainHeads() ([][32]byte, []types.Slot)
HeadSyncCommitteeFetcher
HeadDomainFetcher
}
@@ -83,65 +61,51 @@ type HeadFetcher interface {
// ForkFetcher retrieves the current fork information of the Ethereum beacon chain.
type ForkFetcher interface {
CurrentFork() *ethpb.Fork
GenesisFetcher
TimeFetcher
}
// TemporalOracle is like ForkFetcher minus CurrentFork()
type TemporalOracle interface {
GenesisFetcher
TimeFetcher
}
// CanonicalFetcher retrieves the current chain's canonical information.
type CanonicalFetcher interface {
IsCanonical(ctx context.Context, blockRoot [32]byte) (bool, error)
VerifyBlkDescendant(ctx context.Context, blockRoot [32]byte) error
}
// FinalizationFetcher defines a common interface for methods in blockchain service which
// directly retrieve finalization and justification related data.
// directly retrieves finalization and justification related data.
type FinalizationFetcher interface {
FinalizedCheckpt() *ethpb.Checkpoint
CurrentJustifiedCheckpt() *ethpb.Checkpoint
PreviousJustifiedCheckpt() *ethpb.Checkpoint
UnrealizedJustifiedPayloadBlockHash() [32]byte
FinalizedBlockHash() [32]byte
InForkchoice([32]byte) bool
IsFinalized(ctx context.Context, blockRoot [32]byte) bool
}
// OptimisticModeFetcher retrieves information about optimistic status of the node.
type OptimisticModeFetcher interface {
IsOptimistic(ctx context.Context) (bool, error)
IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error)
}
// FinalizedCheckpt returns the latest finalized checkpoint from chain store.
// FinalizedCheckpt returns the latest finalized checkpoint from head state.
func (s *Service) FinalizedCheckpt() *ethpb.Checkpoint {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
cp := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
return &ethpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
if s.finalizedCheckpt == nil {
return &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
}
return copyutil.CopyCheckpoint(s.finalizedCheckpt)
}
// PreviousJustifiedCheckpt returns the current justified checkpoint from chain store.
func (s *Service) PreviousJustifiedCheckpt() *ethpb.Checkpoint {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
cp := s.cfg.ForkChoiceStore.PreviousJustifiedCheckpoint()
return &ethpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
}
// CurrentJustifiedCheckpt returns the current justified checkpoint from chain store.
// CurrentJustifiedCheckpt returns the current justified checkpoint from head state.
func (s *Service) CurrentJustifiedCheckpt() *ethpb.Checkpoint {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
cp := s.cfg.ForkChoiceStore.JustifiedCheckpoint()
return &ethpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
if s.justifiedCheckpt == nil {
return &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
}
return copyutil.CopyCheckpoint(s.justifiedCheckpt)
}
// PreviousJustifiedCheckpt returns the previous justified checkpoint from head state.
func (s *Service) PreviousJustifiedCheckpt() *ethpb.Checkpoint {
if s.prevJustifiedCheckpt == nil {
return &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
}
return copyutil.CopyCheckpoint(s.prevJustifiedCheckpt)
}
// HeadSlot returns the slot of the head of the chain.
func (s *Service) HeadSlot() primitives.Slot {
func (s *Service) HeadSlot() types.Slot {
s.headLock.RLock()
defer s.headLock.RUnlock()
@@ -157,8 +121,9 @@ func (s *Service) HeadRoot(ctx context.Context) ([]byte, error) {
s.headLock.RLock()
defer s.headLock.RUnlock()
if s.head != nil && s.head.root != params.BeaconConfig().ZeroHash {
return bytesutil.SafeCopyBytes(s.head.root[:]), nil
if s.headRoot() != params.BeaconConfig().ZeroHash {
r := s.headRoot()
return r[:], nil
}
b, err := s.cfg.BeaconDB.HeadBlock(ctx)
@@ -180,12 +145,12 @@ func (s *Service) HeadRoot(ctx context.Context) ([]byte, error) {
// HeadBlock returns the head block of the chain.
// If the head is nil from service struct,
// it will attempt to get the head block from DB.
func (s *Service) HeadBlock(ctx context.Context) (interfaces.ReadOnlySignedBeaconBlock, error) {
func (s *Service) HeadBlock(ctx context.Context) (block.SignedBeaconBlock, error) {
s.headLock.RLock()
defer s.headLock.RUnlock()
if s.hasHeadState() {
return s.headBlock()
return s.headBlock(), nil
}
return s.cfg.BeaconDB.HeadBlock(ctx)
@@ -210,41 +175,31 @@ func (s *Service) HeadState(ctx context.Context) (state.BeaconState, error) {
return s.cfg.StateGen.StateByRoot(ctx, s.headRoot())
}
// HeadStateReadOnly returns the read only head state of the chain.
// If the head is nil from service struct, it will attempt to get the
// head state from DB. Any callers of this method MUST only use the
// state instance to read fields from the state. Any type assertions back
// to the concrete type and subsequent use of it could lead to corruption
// of the state.
func (s *Service) HeadStateReadOnly(ctx context.Context) (state.ReadOnlyBeaconState, error) {
ctx, span := trace.StartSpan(ctx, "blockChain.HeadStateReadOnly")
defer span.End()
s.headLock.RLock()
defer s.headLock.RUnlock()
ok := s.hasHeadState()
span.AddAttributes(trace.BoolAttribute("cache_hit", ok))
if ok {
return s.headStateReadOnly(ctx), nil
}
return s.cfg.StateGen.StateByRoot(ctx, s.headRoot())
}
// HeadValidatorsIndices returns a list of active validator indices from the head view of a given epoch.
func (s *Service) HeadValidatorsIndices(ctx context.Context, epoch primitives.Epoch) ([]primitives.ValidatorIndex, error) {
func (s *Service) HeadValidatorsIndices(ctx context.Context, epoch types.Epoch) ([]types.ValidatorIndex, error) {
s.headLock.RLock()
defer s.headLock.RUnlock()
if !s.hasHeadState() {
return []primitives.ValidatorIndex{}, nil
return []types.ValidatorIndex{}, nil
}
return helpers.ActiveValidatorIndices(ctx, s.headState(ctx), epoch)
return helpers.ActiveValidatorIndices(s.headState(ctx), epoch)
}
// HeadGenesisValidatorsRoot returns genesis validators root of the head state.
func (s *Service) HeadGenesisValidatorsRoot() [32]byte {
// HeadSeed returns the seed from the head view of a given epoch.
func (s *Service) HeadSeed(ctx context.Context, epoch types.Epoch) ([32]byte, error) {
s.headLock.RLock()
defer s.headLock.RUnlock()
if !s.hasHeadState() {
return [32]byte{}, nil
}
return helpers.Seed(s.headState(ctx), epoch, params.BeaconConfig().DomainBeaconAttester)
}
// HeadGenesisValidatorRoot returns genesis validator root of the head state.
func (s *Service) HeadGenesisValidatorRoot() [32]byte {
s.headLock.RLock()
defer s.headLock.RUnlock()
@@ -252,7 +207,7 @@ func (s *Service) HeadGenesisValidatorsRoot() [32]byte {
return [32]byte{}
}
return s.headGenesisValidatorsRoot()
return s.headGenesisValidatorRoot()
}
// HeadETH1Data returns the eth1data of the current head state.
@@ -266,21 +221,26 @@ func (s *Service) HeadETH1Data() *ethpb.Eth1Data {
return s.head.state.Eth1Data()
}
// ProtoArrayStore returns the proto array store object.
func (s *Service) ProtoArrayStore() *protoarray.Store {
return s.cfg.ForkChoiceStore.Store()
}
// GenesisTime returns the genesis time of beacon chain.
func (s *Service) GenesisTime() time.Time {
return s.genesisTime
}
// GenesisValidatorsRoot returns the genesis validators
// GenesisValidatorRoot returns the genesis validator
// root of the chain.
func (s *Service) GenesisValidatorsRoot() [32]byte {
func (s *Service) GenesisValidatorRoot() [32]byte {
s.headLock.RLock()
defer s.headLock.RUnlock()
if !s.hasHeadState() {
return [32]byte{}
}
return bytesutil.ToBytes32(s.head.state.GenesisValidatorsRoot())
return bytesutil.ToBytes32(s.head.state.GenesisValidatorRoot())
}
// CurrentFork retrieves the latest fork information of the beacon chain.
@@ -299,249 +259,53 @@ func (s *Service) CurrentFork() *ethpb.Fork {
// IsCanonical returns true if the input block root is part of the canonical chain.
func (s *Service) IsCanonical(ctx context.Context, blockRoot [32]byte) (bool, error) {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
// If the block has not been finalized, check fork choice store to see if the block is canonical
if s.cfg.ForkChoiceStore.HasNode(blockRoot) {
return s.cfg.ForkChoiceStore.IsCanonical(blockRoot), nil
// If the block has been finalized, the block will always be part of the canonical chain.
if s.cfg.BeaconDB.IsFinalizedBlock(ctx, blockRoot) {
return true, nil
}
// If the block has been finalized, the block will always be part of the canonical chain.
return s.cfg.BeaconDB.IsFinalizedBlock(ctx, blockRoot), nil
// If the block has not been finalized, check fork choice store to see if the block is canonical
return s.cfg.ForkChoiceStore.IsCanonical(blockRoot), nil
}
// ChainHeads returns all possible chain heads (leaves of fork choice tree).
// Heads roots and heads slots are returned.
func (s *Service) ChainHeads() ([][32]byte, []types.Slot) {
nodes := s.ProtoArrayStore().Nodes()
// Deliberate choice to not preallocate space for below.
// Heads cant be more than 2-3 in the worst case where pre-allocation will be 64 to begin with.
headsRoots := make([][32]byte, 0)
headsSlots := make([]types.Slot, 0)
nonExistentNode := ^uint64(0)
for _, node := range nodes {
// Possible heads have no children.
if node.BestDescendant() == nonExistentNode && node.BestChild() == nonExistentNode {
headsRoots = append(headsRoots, node.Root())
headsSlots = append(headsSlots, node.Slot())
}
}
return headsRoots, headsSlots
}
// HeadPublicKeyToValidatorIndex returns the validator index of the `pubkey` in current head state.
func (s *Service) HeadPublicKeyToValidatorIndex(pubKey [fieldparams.BLSPubkeyLength]byte) (primitives.ValidatorIndex, bool) {
func (s *Service) HeadPublicKeyToValidatorIndex(ctx context.Context, pubKey [48]byte) (types.ValidatorIndex, bool) {
s.headLock.RLock()
defer s.headLock.RUnlock()
if !s.hasHeadState() {
return 0, false
}
return s.headValidatorIndexAtPubkey(pubKey)
return s.headState(ctx).ValidatorIndexByPubkey(pubKey)
}
// HeadValidatorIndexToPublicKey returns the pubkey of the validator `index` in current head state.
func (s *Service) HeadValidatorIndexToPublicKey(_ context.Context, index primitives.ValidatorIndex) ([fieldparams.BLSPubkeyLength]byte, error) {
func (s *Service) HeadValidatorIndexToPublicKey(ctx context.Context, index types.ValidatorIndex) ([48]byte, error) {
s.headLock.RLock()
defer s.headLock.RUnlock()
if !s.hasHeadState() {
return [fieldparams.BLSPubkeyLength]byte{}, nil
}
v, err := s.headValidatorAtIndex(index)
v, err := s.headState(ctx).ValidatorAtIndexReadOnly(index)
if err != nil {
return [fieldparams.BLSPubkeyLength]byte{}, err
return [48]byte{}, err
}
return v.PublicKey(), nil
}
// IsOptimistic returns true if the current head is optimistic.
func (s *Service) IsOptimistic(_ context.Context) (bool, error) {
if slots.ToEpoch(s.CurrentSlot()) < params.BeaconConfig().BellatrixForkEpoch {
return false, nil
}
s.headLock.RLock()
headRoot := s.head.root
headSlot := s.head.slot
headOptimistic := s.head.optimistic
s.headLock.RUnlock()
// we trust the head package for recent head slots, otherwise fallback to forkchoice
if headSlot+2 >= s.CurrentSlot() {
return headOptimistic, nil
}
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(headRoot)
if err == nil {
return optimistic, nil
}
if !errors.Is(err, doublylinkedtree.ErrNilNode) {
return true, err
}
// If fockchoice does not have the headroot, then the node is considered
// optimistic
return true, nil
}
// IsFinalized returns true if the input root is finalized.
// It first checks latest finalized root then checks finalized root index in DB.
func (s *Service) IsFinalized(ctx context.Context, root [32]byte) bool {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
if s.cfg.ForkChoiceStore.FinalizedCheckpoint().Root == root {
return true
}
// If node exists in our store, then it is not
// finalized.
if s.cfg.ForkChoiceStore.HasNode(root) {
return false
}
return s.cfg.BeaconDB.IsFinalizedBlock(ctx, root)
}
// InForkchoice returns true if the given root is found in forkchoice
// This in particular means that the blockroot is a descendant of the
// finalized checkpoint
func (s *Service) InForkchoice(root [32]byte) bool {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.HasNode(root)
}
// IsViableForCheckpoint returns whether the given checkpoint is a checkpoint in any
// chain known to forkchoice
func (s *Service) IsViableForCheckpoint(cp *forkchoicetypes.Checkpoint) (bool, error) {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.IsViableForCheckpoint(cp)
}
// IsOptimisticForRoot takes the root as argument instead of the current head
// and returns true if it is optimistic.
func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error) {
s.cfg.ForkChoiceStore.RLock()
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(root)
s.cfg.ForkChoiceStore.RUnlock()
if err == nil {
return optimistic, nil
}
if !errors.Is(err, doublylinkedtree.ErrNilNode) {
return false, err
}
// if the requested root is the headroot and the root is not found in
// forkchoice, the node should respond that it is optimistic
headRoot, err := s.HeadRoot(ctx)
if err != nil {
return true, err
}
if bytes.Equal(headRoot, root[:]) {
return true, nil
}
ss, err := s.cfg.BeaconDB.StateSummary(ctx, root)
if err != nil {
return false, err
}
if ss == nil {
ss, err = s.recoverStateSummary(ctx, root)
if err != nil {
return true, err
}
}
validatedCheckpoint, err := s.cfg.BeaconDB.LastValidatedCheckpoint(ctx)
if err != nil {
return false, err
}
if slots.ToEpoch(ss.Slot) > validatedCheckpoint.Epoch {
return true, nil
}
// Historical non-canonical blocks here are returned as optimistic for safety.
isCanonical, err := s.IsCanonical(ctx, root)
if err != nil {
return false, err
}
if slots.ToEpoch(ss.Slot)+1 < validatedCheckpoint.Epoch {
return !isCanonical, nil
}
// Checkpoint root could be zeros before the first finalized epoch. Use genesis root if the case.
lastValidated, err := s.cfg.BeaconDB.StateSummary(ctx, s.ensureRootNotZeros(bytesutil.ToBytes32(validatedCheckpoint.Root)))
if err != nil {
return false, err
}
if lastValidated == nil {
lastValidated, err = s.recoverStateSummary(ctx, root)
if err != nil {
return false, err
}
}
if ss.Slot > lastValidated.Slot {
return true, nil
}
return !isCanonical, nil
}
// TargetRootForEpoch wraps the corresponding method in forkchoice
func (s *Service) TargetRootForEpoch(root [32]byte, epoch primitives.Epoch) ([32]byte, error) {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.TargetRootForEpoch(root, epoch)
}
// Ancestor returns the block root of an ancestry block from the input block root.
//
// Spec pseudocode definition:
//
// def get_ancestor(store: Store, root: Root, slot: Slot) -> Root:
// block = store.blocks[root]
// if block.slot > slot:
// return get_ancestor(store, block.parent_root, slot)
// elif block.slot == slot:
// return root
// else:
// # root is older than queried slot, thus a skip slot. Return most recent root prior to slot
// return root
func (s *Service) Ancestor(ctx context.Context, root []byte, slot primitives.Slot) ([]byte, error) {
ctx, span := trace.StartSpan(ctx, "blockChain.ancestor")
defer span.End()
r := bytesutil.ToBytes32(root)
// Get ancestor root from fork choice store instead of recursively looking up blocks in DB.
// This is most optimal outcome.
s.cfg.ForkChoiceStore.RLock()
ar, err := s.cfg.ForkChoiceStore.AncestorRoot(ctx, r, slot)
s.cfg.ForkChoiceStore.RUnlock()
if err != nil {
// Try getting ancestor root from DB when failed to retrieve from fork choice store.
// This is the second line of defense for retrieving ancestor root.
ar, err = s.ancestorByDB(ctx, r, slot)
if err != nil {
return nil, err
}
}
return ar[:], nil
}
// SetOptimisticToInvalid wraps the corresponding method in forkchoice
func (s *Service) SetOptimisticToInvalid(ctx context.Context, root, parent, lvh [32]byte) ([][32]byte, error) {
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
return s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, root, parent, lvh)
}
// SetGenesisTime sets the genesis time of beacon chain.
func (s *Service) SetGenesisTime(t time.Time) {
s.genesisTime = t
}
func (s *Service) recoverStateSummary(ctx context.Context, blockRoot [32]byte) (*ethpb.StateSummary, error) {
if s.cfg.BeaconDB.HasBlock(ctx, blockRoot) {
b, err := s.cfg.BeaconDB.Block(ctx, blockRoot)
if err != nil {
return nil, err
}
summary := &ethpb.StateSummary{Slot: b.Block().Slot(), Root: blockRoot[:]}
if err := s.cfg.BeaconDB.SaveStateSummary(ctx, summary); err != nil {
return nil, err
}
return summary, nil
}
return nil, errBlockDoesNotExist
}
// BlockBeingSynced returns whether the block with the given root is currently being synced
func (s *Service) BlockBeingSynced(root [32]byte) bool {
return s.blockBeingSynced.isSyncing(root)
}
// RecentBlockSlot returns block slot form fork choice store
func (s *Service) RecentBlockSlot(root [32]byte) (primitives.Slot, error) {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.Slot(root)
}

View File

@@ -1,101 +0,0 @@
package blockchain
import (
"context"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/consensus-types/forkchoice"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
)
// CachedHeadRoot returns the corresponding value from Forkchoice
func (s *Service) CachedHeadRoot() [32]byte {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.CachedHeadRoot()
}
// GetProposerHead returns the corresponding value from forkchoice
func (s *Service) GetProposerHead() [32]byte {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.GetProposerHead()
}
// ShouldOverrideFCU returns the corresponding value from forkchoice
func (s *Service) ShouldOverrideFCU() bool {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.ShouldOverrideFCU()
}
// SetForkChoiceGenesisTime sets the genesis time in Forkchoice
func (s *Service) SetForkChoiceGenesisTime(timestamp uint64) {
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
s.cfg.ForkChoiceStore.SetGenesisTime(timestamp)
}
// HighestReceivedBlockSlot returns the corresponding value from forkchoice
func (s *Service) HighestReceivedBlockSlot() primitives.Slot {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.HighestReceivedBlockSlot()
}
// ReceivedBlocksLastEpoch returns the corresponding value from forkchoice
func (s *Service) ReceivedBlocksLastEpoch() (uint64, error) {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.ReceivedBlocksLastEpoch()
}
// InsertNode is a wrapper for node insertion which is self locked
func (s *Service) InsertNode(ctx context.Context, st state.BeaconState, root [32]byte) error {
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
return s.cfg.ForkChoiceStore.InsertNode(ctx, st, root)
}
// ForkChoiceDump returns the corresponding value from forkchoice
func (s *Service) ForkChoiceDump(ctx context.Context) (*forkchoice.Dump, error) {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.ForkChoiceDump(ctx)
}
// NewSlot returns the corresponding value from forkchoice
func (s *Service) NewSlot(ctx context.Context, slot primitives.Slot) error {
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
return s.cfg.ForkChoiceStore.NewSlot(ctx, slot)
}
// ProposerBoost wraps the corresponding method from forkchoice
func (s *Service) ProposerBoost() [32]byte {
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
return s.cfg.ForkChoiceStore.ProposerBoost()
}
// ChainHeads returns all possible chain heads (leaves of fork choice tree).
// Heads roots and heads slots are returned.
func (s *Service) ChainHeads() ([][32]byte, []primitives.Slot) {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.Tips()
}
// UnrealizedJustifiedPayloadBlockHash returns unrealized justified payload block hash from forkchoice.
func (s *Service) UnrealizedJustifiedPayloadBlockHash() [32]byte {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.UnrealizedJustifiedPayloadBlockHash()
}
// FinalizedBlockHash returns finalized payload block hash from forkchoice.
func (s *Service) FinalizedBlockHash() [32]byte {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.FinalizedPayloadBlockHash()
}

View File

@@ -4,93 +4,58 @@ import (
"context"
"testing"
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/testing/util"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
)
func TestHeadSlot_DataRace(t *testing.T) {
beaconDB := testDB.SetupDB(t)
s := &Service{
cfg: &config{BeaconDB: beaconDB},
cfg: &Config{BeaconDB: beaconDB},
}
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
st, _ := util.DeterministicGenesisState(t, 1)
wait := make(chan struct{})
go func() {
defer close(wait)
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
}()
s.HeadSlot()
<-wait
}
func TestHeadRoot_DataRace(t *testing.T) {
beaconDB := testDB.SetupDB(t)
s := &Service{
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB, doublylinkedtree.New())},
cfg: &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
head: &head{root: [32]byte{'A'}},
}
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
wait := make(chan struct{})
st, _ := util.DeterministicGenesisState(t, 1)
go func() {
defer close(wait)
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
}()
_, err = s.HeadRoot(context.Background())
_, err := s.HeadRoot(context.Background())
require.NoError(t, err)
<-wait
}
func TestHeadBlock_DataRace(t *testing.T) {
beaconDB := testDB.SetupDB(t)
wsb, err := blocks.NewSignedBeaconBlock(&ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{}}})
require.NoError(t, err)
s := &Service{
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB, doublylinkedtree.New())},
head: &head{block: wsb},
cfg: &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
head: &head{block: wrapper.WrappedPhase0SignedBeaconBlock(&ethpb.SignedBeaconBlock{})},
}
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
wait := make(chan struct{})
st, _ := util.DeterministicGenesisState(t, 1)
go func() {
defer close(wait)
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
}()
_, err = s.HeadBlock(context.Background())
_, err := s.HeadBlock(context.Background())
require.NoError(t, err)
<-wait
}
func TestHeadState_DataRace(t *testing.T) {
beaconDB := testDB.SetupDB(t)
s := &Service{
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB, doublylinkedtree.New())},
cfg: &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
}
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
wait := make(chan struct{})
st, _ := util.DeterministicGenesisState(t, 1)
root := bytesutil.ToBytes32(bytesutil.PadTo([]byte{'s'}, 32))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(context.Background(), root))
require.NoError(t, beaconDB.SaveState(context.Background(), st, root))
go func() {
defer close(wait)
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
}()
_, err = s.HeadState(context.Background())
_, err := s.HeadState(context.Background())
require.NoError(t, err)
<-wait
}

View File

@@ -5,21 +5,18 @@ import (
"testing"
"time"
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/testing/util"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
"google.golang.org/protobuf/proto"
)
@@ -28,38 +25,10 @@ var _ ChainInfoFetcher = (*Service)(nil)
var _ TimeFetcher = (*Service)(nil)
var _ ForkFetcher = (*Service)(nil)
// prepareForkchoiceState prepares a beacon state with the given data to mock
// insert into forkchoice
func prepareForkchoiceState(
_ context.Context,
slot primitives.Slot,
blockRoot [32]byte,
parentRoot [32]byte,
payloadHash [32]byte,
justified *ethpb.Checkpoint,
finalized *ethpb.Checkpoint,
) (state.BeaconState, [32]byte, error) {
blockHeader := &ethpb.BeaconBlockHeader{
ParentRoot: parentRoot[:],
}
executionHeader := &enginev1.ExecutionPayloadHeader{
BlockHash: payloadHash[:],
}
base := &ethpb.BeaconStateBellatrix{
Slot: slot,
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
BlockRoots: make([][]byte, 1),
CurrentJustifiedCheckpoint: justified,
FinalizedCheckpoint: finalized,
LatestExecutionPayloadHeader: executionHeader,
LatestBlockHeader: blockHeader,
}
base.BlockRoots[0] = append(base.BlockRoots[0], blockRoot[:]...)
st, err := state_native.InitializeFromProtoBellatrix(base)
return st, blockRoot, err
func TestFinalizedCheckpt_Nil(t *testing.T) {
beaconDB := testDB.SetupDB(t)
c := setupBeaconChain(t, beaconDB)
assert.DeepEqual(t, params.BeaconConfig().ZeroHash[:], c.FinalizedCheckpt().Root, "Incorrect pre chain start value")
}
func TestHeadRoot_Nil(t *testing.T) {
@@ -70,135 +39,121 @@ func TestHeadRoot_Nil(t *testing.T) {
assert.DeepEqual(t, params.BeaconConfig().ZeroHash[:], headRoot, "Incorrect pre chain start value")
}
func TestFinalizedCheckpt_CanRetrieve(t *testing.T) {
beaconDB := testDB.SetupDB(t)
cp := &ethpb.Checkpoint{Epoch: 5, Root: bytesutil.PadTo([]byte("foo"), 32)}
c := setupBeaconChain(t, beaconDB)
c.finalizedCheckpt = cp
assert.Equal(t, cp.Epoch, c.FinalizedCheckpt().Epoch, "Unexpected finalized epoch")
}
func TestFinalizedCheckpt_GenesisRootOk(t *testing.T) {
service, tr := minimalTestService(t)
ctx, fcs := tr.ctx, tr.fcs
gs, _ := util.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
cp := service.FinalizedCheckpt()
assert.DeepEqual(t, [32]byte{}, bytesutil.ToBytes32(cp.Root))
cp = service.CurrentJustifiedCheckpt()
assert.DeepEqual(t, [32]byte{}, bytesutil.ToBytes32(cp.Root))
// check that forkchoice has the right genesis root as the node root
root, err := fcs.Head(ctx)
require.NoError(t, err)
require.Equal(t, service.originBlockRoot, root)
beaconDB := testDB.SetupDB(t)
genesisRoot := [32]byte{'A'}
cp := &ethpb.Checkpoint{Root: genesisRoot[:]}
c := setupBeaconChain(t, beaconDB)
c.finalizedCheckpt = cp
c.genesisRoot = genesisRoot
assert.DeepEqual(t, c.genesisRoot[:], c.FinalizedCheckpt().Root)
}
func TestCurrentJustifiedCheckpt_CanRetrieve(t *testing.T) {
service, tr := minimalTestService(t)
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
beaconDB := testDB.SetupDB(t)
jroot := [32]byte{'j'}
cp := &forkchoicetypes.Checkpoint{Epoch: 6, Root: jroot}
bState, _ := util.DeterministicGenesisState(t, 10)
require.NoError(t, beaconDB.SaveState(ctx, bState, jroot))
require.NoError(t, fcs.UpdateJustifiedCheckpoint(ctx, cp))
jp := service.CurrentJustifiedCheckpt()
assert.Equal(t, cp.Epoch, jp.Epoch, "Unexpected justified epoch")
require.Equal(t, cp.Root, bytesutil.ToBytes32(jp.Root))
c := setupBeaconChain(t, beaconDB)
assert.Equal(t, params.BeaconConfig().ZeroHash, bytesutil.ToBytes32(c.CurrentJustifiedCheckpt().Root), "Unexpected justified epoch")
cp := &ethpb.Checkpoint{Epoch: 6, Root: bytesutil.PadTo([]byte("foo"), 32)}
c.justifiedCheckpt = cp
assert.Equal(t, cp.Epoch, c.CurrentJustifiedCheckpt().Epoch, "Unexpected justified epoch")
}
func TestFinalizedBlockHash(t *testing.T) {
service, tr := minimalTestService(t)
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
func TestJustifiedCheckpt_GenesisRootOk(t *testing.T) {
beaconDB := testDB.SetupDB(t)
r := [32]byte{'f'}
cp := &forkchoicetypes.Checkpoint{Epoch: 6, Root: r}
bState, _ := util.DeterministicGenesisState(t, 10)
require.NoError(t, beaconDB.SaveState(ctx, bState, r))
require.NoError(t, fcs.UpdateFinalizedCheckpoint(cp))
h := service.FinalizedBlockHash()
require.Equal(t, params.BeaconConfig().ZeroHash, h)
require.Equal(t, r, fcs.FinalizedCheckpoint().Root)
c := setupBeaconChain(t, beaconDB)
genesisRoot := [32]byte{'B'}
cp := &ethpb.Checkpoint{Root: genesisRoot[:]}
c.justifiedCheckpt = cp
c.genesisRoot = genesisRoot
assert.DeepEqual(t, c.genesisRoot[:], c.CurrentJustifiedCheckpt().Root)
}
func TestUnrealizedJustifiedBlockHash(t *testing.T) {
ctx := context.Background()
service := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}}
ojc := &ethpb.Checkpoint{Root: []byte{'j'}}
ofc := &ethpb.Checkpoint{Root: []byte{'f'}}
st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
service.cfg.ForkChoiceStore.SetBalancesByRooter(func(_ context.Context, _ [32]byte) ([]uint64, error) { return []uint64{}, nil })
require.NoError(t, service.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(ctx, &forkchoicetypes.Checkpoint{Epoch: 6, Root: [32]byte{'j'}}))
func TestPreviousJustifiedCheckpt_CanRetrieve(t *testing.T) {
beaconDB := testDB.SetupDB(t)
h := service.UnrealizedJustifiedPayloadBlockHash()
require.Equal(t, params.BeaconConfig().ZeroHash, h)
require.Equal(t, [32]byte{'j'}, service.cfg.ForkChoiceStore.JustifiedCheckpoint().Root)
cp := &ethpb.Checkpoint{Epoch: 7, Root: bytesutil.PadTo([]byte("foo"), 32)}
c := setupBeaconChain(t, beaconDB)
assert.Equal(t, params.BeaconConfig().ZeroHash, bytesutil.ToBytes32(c.CurrentJustifiedCheckpt().Root), "Unexpected justified epoch")
c.prevJustifiedCheckpt = cp
assert.Equal(t, cp.Epoch, c.PreviousJustifiedCheckpt().Epoch, "Unexpected previous justified epoch")
}
func TestPrevJustifiedCheckpt_GenesisRootOk(t *testing.T) {
beaconDB := testDB.SetupDB(t)
genesisRoot := [32]byte{'C'}
cp := &ethpb.Checkpoint{Root: genesisRoot[:]}
c := setupBeaconChain(t, beaconDB)
c.prevJustifiedCheckpt = cp
c.genesisRoot = genesisRoot
assert.DeepEqual(t, c.genesisRoot[:], c.PreviousJustifiedCheckpt().Root)
}
func TestHeadSlot_CanRetrieve(t *testing.T) {
c := &Service{}
s, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{})
s, err := v1.InitializeFromProto(&ethpb.BeaconState{})
require.NoError(t, err)
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
b.SetSlot(100)
c.head = &head{block: b, state: s}
assert.Equal(t, primitives.Slot(100), c.HeadSlot())
c.head = &head{slot: 100, state: s}
assert.Equal(t, types.Slot(100), c.HeadSlot())
}
func TestHeadRoot_CanRetrieve(t *testing.T) {
service, tr := minimalTestService(t)
ctx := tr.ctx
gs, _ := util.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
r, err := service.HeadRoot(ctx)
c := &Service{}
c.head = &head{root: [32]byte{'A'}}
r, err := c.HeadRoot(context.Background())
require.NoError(t, err)
assert.Equal(t, service.originBlockRoot, bytesutil.ToBytes32(r))
assert.Equal(t, [32]byte{'A'}, bytesutil.ToBytes32(r))
}
func TestHeadRoot_UseDB(t *testing.T) {
service, tr := minimalTestService(t)
ctx, beaconDB := tr.ctx, tr.db
service.head = &head{root: params.BeaconConfig().ZeroHash}
b := util.NewBeaconBlock()
beaconDB := testDB.SetupDB(t)
c := &Service{cfg: &Config{BeaconDB: beaconDB}}
c.head = &head{root: params.BeaconConfig().ZeroHash}
b := testutil.NewBeaconBlock()
br, err := b.Block.HashTreeRoot()
require.NoError(t, err)
wsb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
require.NoError(t, beaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Root: br[:]}))
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, br))
r, err := service.HeadRoot(ctx)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(b)))
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: br[:]}))
require.NoError(t, beaconDB.SaveHeadBlockRoot(context.Background(), br))
r, err := c.HeadRoot(context.Background())
require.NoError(t, err)
assert.Equal(t, br, bytesutil.ToBytes32(r))
}
func TestHeadBlock_CanRetrieve(t *testing.T) {
b := util.NewBeaconBlock()
b := testutil.NewBeaconBlock()
b.Block.Slot = 1
s, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{})
require.NoError(t, err)
wsb, err := blocks.NewSignedBeaconBlock(b)
s, err := v1.InitializeFromProto(&ethpb.BeaconState{})
require.NoError(t, err)
c := &Service{}
c.head = &head{block: wsb, state: s}
c.head = &head{block: wrapper.WrappedPhase0SignedBeaconBlock(b), state: s}
received, err := c.HeadBlock(context.Background())
recevied, err := c.HeadBlock(context.Background())
require.NoError(t, err)
pb, err := received.Proto()
require.NoError(t, err)
assert.DeepEqual(t, b, pb, "Incorrect head block received")
assert.DeepEqual(t, b, recevied.Proto(), "Incorrect head block received")
}
func TestHeadState_CanRetrieve(t *testing.T) {
s, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{Slot: 2, GenesisValidatorsRoot: params.BeaconConfig().ZeroHash[:]})
s, err := v1.InitializeFromProto(&ethpb.BeaconState{Slot: 2, GenesisValidatorsRoot: params.BeaconConfig().ZeroHash[:]})
require.NoError(t, err)
c := &Service{}
c.head = &head{state: s}
headState, err := c.HeadState(context.Background())
require.NoError(t, err)
assert.DeepEqual(t, headState.ToProtoUnsafe(), s.ToProtoUnsafe(), "Incorrect head state received")
assert.DeepEqual(t, headState.InnerStateUnsafe(), s.InnerStateUnsafe(), "Incorrect head state received")
}
func TestGenesisTime_CanRetrieve(t *testing.T) {
@@ -209,7 +164,7 @@ func TestGenesisTime_CanRetrieve(t *testing.T) {
func TestCurrentFork_CanRetrieve(t *testing.T) {
f := &ethpb.Fork{Epoch: 999}
s, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{Fork: f})
s, err := v1.InitializeFromProto(&ethpb.BeaconState{Fork: f})
require.NoError(t, err)
c := &Service{}
c.head = &head{state: s}
@@ -229,15 +184,15 @@ func TestCurrentFork_NilHeadSTate(t *testing.T) {
}
}
func TestGenesisValidatorsRoot_CanRetrieve(t *testing.T) {
func TestGenesisValidatorRoot_CanRetrieve(t *testing.T) {
// Should not panic if head state is nil.
c := &Service{}
assert.Equal(t, [32]byte{}, c.GenesisValidatorsRoot(), "Did not get correct genesis validators root")
assert.Equal(t, [32]byte{}, c.GenesisValidatorRoot(), "Did not get correct genesis validator root")
s, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{GenesisValidatorsRoot: []byte{'a'}})
s, err := v1.InitializeFromProto(&ethpb.BeaconState{GenesisValidatorsRoot: []byte{'a'}})
require.NoError(t, err)
c.head = &head{state: s}
assert.Equal(t, [32]byte{'a'}, c.GenesisValidatorsRoot(), "Did not get correct genesis validators root")
assert.Equal(t, [32]byte{'a'}, c.GenesisValidatorRoot(), "Did not get correct genesis validator root")
}
func TestHeadETH1Data_Nil(t *testing.T) {
@@ -248,7 +203,7 @@ func TestHeadETH1Data_Nil(t *testing.T) {
func TestHeadETH1Data_CanRetrieve(t *testing.T) {
d := &ethpb.Eth1Data{DepositCount: 999}
s, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{Eth1Data: d})
s, err := v1.InitializeFromProto(&ethpb.BeaconState{Eth1Data: d})
require.NoError(t, err)
c := &Service{}
c.head = &head{state: s}
@@ -262,11 +217,11 @@ func TestIsCanonical_Ok(t *testing.T) {
beaconDB := testDB.SetupDB(t)
c := setupBeaconChain(t, beaconDB)
blk := util.NewBeaconBlock()
blk := testutil.NewBeaconBlock()
blk.Block.Slot = 0
root, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
util.SaveBlock(t, ctx, beaconDB, blk)
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(blk)))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, root))
can, err := c.IsCanonical(ctx, root)
require.NoError(t, err)
@@ -278,7 +233,7 @@ func TestIsCanonical_Ok(t *testing.T) {
}
func TestService_HeadValidatorsIndices(t *testing.T) {
s, _ := util.DeterministicGenesisState(t, 10)
s, _ := testutil.DeterministicGenesisState(t, 10)
c := &Service{}
c.head = &head{}
@@ -292,91 +247,74 @@ func TestService_HeadValidatorsIndices(t *testing.T) {
require.Equal(t, 10, len(indices))
}
func TestService_HeadGenesisValidatorsRoot(t *testing.T) {
s, _ := util.DeterministicGenesisState(t, 1)
func TestService_HeadSeed(t *testing.T) {
s, _ := testutil.DeterministicGenesisState(t, 1)
c := &Service{}
seed, err := helpers.Seed(s, 0, params.BeaconConfig().DomainBeaconAttester)
require.NoError(t, err)
c.head = &head{}
root := c.HeadGenesisValidatorsRoot()
root, err := c.HeadSeed(context.Background(), 0)
require.NoError(t, err)
require.Equal(t, [32]byte{}, root)
c.head = &head{state: s}
root = c.HeadGenesisValidatorsRoot()
require.DeepEqual(t, root[:], s.GenesisValidatorsRoot())
root, err = c.HeadSeed(context.Background(), 0)
require.NoError(t, err)
require.DeepEqual(t, seed, root)
}
//
// A <- B <- C
// \ \
// \ ---------- E
// ---------- D
func TestService_HeadGenesisValidatorRoot(t *testing.T) {
s, _ := testutil.DeterministicGenesisState(t, 1)
c := &Service{}
c.head = &head{}
root := c.HeadGenesisValidatorRoot()
require.Equal(t, [32]byte{}, root)
c.head = &head{state: s}
root = c.HeadGenesisValidatorRoot()
require.DeepEqual(t, root[:], s.GenesisValidatorRoot())
}
func TestService_ProtoArrayStore(t *testing.T) {
c := &Service{cfg: &Config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}}
p := c.ProtoArrayStore()
require.Equal(t, 0, int(p.FinalizedEpoch()))
}
func TestService_ChainHeads(t *testing.T) {
ctx := context.Background()
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}}
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
c := &Service{cfg: &Config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}}
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, [32]byte{}, 0, 0))
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{}, 0, 0))
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{}, 0, 0))
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 103, [32]byte{'d'}, [32]byte{}, [32]byte{}, 0, 0))
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, [32]byte{}, 0, 0))
roots, slots := c.ChainHeads()
require.Equal(t, 3, len(roots))
rootMap := map[[32]byte]primitives.Slot{{'c'}: 102, {'d'}: 103, {'e'}: 104}
for i, root := range roots {
slot, ok := rootMap[root]
require.Equal(t, true, ok)
require.Equal(t, slot, slots[i])
}
require.DeepEqual(t, [][32]byte{{'c'}, {'d'}, {'e'}}, roots)
require.DeepEqual(t, []types.Slot{102, 103, 104}, slots)
}
func TestService_HeadPublicKeyToValidatorIndex(t *testing.T) {
s, _ := util.DeterministicGenesisState(t, 10)
s, _ := testutil.DeterministicGenesisState(t, 10)
c := &Service{}
c.head = &head{state: s}
_, e := c.HeadPublicKeyToValidatorIndex([fieldparams.BLSPubkeyLength]byte{})
_, e := c.HeadPublicKeyToValidatorIndex(context.Background(), [48]byte{})
require.Equal(t, false, e)
v, err := s.ValidatorAtIndex(0)
require.NoError(t, err)
i, e := c.HeadPublicKeyToValidatorIndex(bytesutil.ToBytes48(v.PublicKey))
i, e := c.HeadPublicKeyToValidatorIndex(context.Background(), bytesutil.ToBytes48(v.PublicKey))
require.Equal(t, true, e)
require.Equal(t, primitives.ValidatorIndex(0), i)
}
func TestService_HeadPublicKeyToValidatorIndexNil(t *testing.T) {
c := &Service{}
c.head = nil
idx, e := c.HeadPublicKeyToValidatorIndex([fieldparams.BLSPubkeyLength]byte{})
require.Equal(t, false, e)
require.Equal(t, primitives.ValidatorIndex(0), idx)
c.head = &head{state: nil}
i, e := c.HeadPublicKeyToValidatorIndex([fieldparams.BLSPubkeyLength]byte{})
require.Equal(t, false, e)
require.Equal(t, primitives.ValidatorIndex(0), i)
require.Equal(t, types.ValidatorIndex(0), i)
}
func TestService_HeadValidatorIndexToPublicKey(t *testing.T) {
s, _ := util.DeterministicGenesisState(t, 10)
s, _ := testutil.DeterministicGenesisState(t, 10)
c := &Service{}
c.head = &head{state: s}
@@ -388,203 +326,3 @@ func TestService_HeadValidatorIndexToPublicKey(t *testing.T) {
require.Equal(t, bytesutil.ToBytes48(v.PublicKey), p)
}
func TestService_HeadValidatorIndexToPublicKeyNil(t *testing.T) {
c := &Service{}
c.head = nil
p, err := c.HeadValidatorIndexToPublicKey(context.Background(), 0)
require.NoError(t, err)
require.Equal(t, [fieldparams.BLSPubkeyLength]byte{}, p)
c.head = &head{state: nil}
p, err = c.HeadValidatorIndexToPublicKey(context.Background(), 0)
require.NoError(t, err)
require.Equal(t, [fieldparams.BLSPubkeyLength]byte{}, p)
}
func TestService_IsOptimistic(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.BellatrixForkEpoch = 0
params.OverrideBeaconConfig(cfg)
ctx := context.Background()
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
st, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
opt, err := c.IsOptimistic(ctx)
require.NoError(t, err)
require.Equal(t, primitives.Slot(0), c.CurrentSlot())
require.Equal(t, false, opt)
c.SetGenesisTime(time.Now().Add(-time.Second * time.Duration(4*params.BeaconConfig().SecondsPerSlot)))
opt, err = c.IsOptimistic(ctx)
require.NoError(t, err)
require.Equal(t, true, opt)
}
func TestService_IsOptimisticBeforeBellatrix(t *testing.T) {
ctx := context.Background()
c := &Service{genesisTime: time.Now()}
opt, err := c.IsOptimistic(ctx)
require.NoError(t, err)
require.Equal(t, false, opt)
}
func TestService_IsOptimisticForRoot(t *testing.T) {
ctx := context.Background()
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
st, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
opt, err := c.IsOptimisticForRoot(ctx, [32]byte{'a'})
require.NoError(t, err)
require.Equal(t, true, opt)
}
func TestService_IsOptimisticForRoot_DB(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
c.head = &head{root: params.BeaconConfig().ZeroHash}
b := util.NewBeaconBlock()
b.Block.Slot = 10
br, err := b.Block.HashTreeRoot()
require.NoError(t, err)
util.SaveBlock(t, context.Background(), beaconDB, b)
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: br[:], Slot: 10}))
optimisticBlock := util.NewBeaconBlock()
optimisticBlock.Block.Slot = 97
optimisticRoot, err := optimisticBlock.Block.HashTreeRoot()
require.NoError(t, err)
util.SaveBlock(t, context.Background(), beaconDB, optimisticBlock)
validatedBlock := util.NewBeaconBlock()
validatedBlock.Block.Slot = 9
validatedRoot, err := validatedBlock.Block.HashTreeRoot()
require.NoError(t, err)
util.SaveBlock(t, context.Background(), beaconDB, validatedBlock)
validatedCheckpoint := &ethpb.Checkpoint{Root: br[:]}
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
optimistic, err := c.IsOptimisticForRoot(ctx, optimisticRoot)
require.NoError(t, err)
require.Equal(t, true, optimistic)
cp := &ethpb.Checkpoint{
Epoch: 1,
Root: validatedRoot[:],
}
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, validatedRoot))
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, cp))
validated, err := c.IsOptimisticForRoot(ctx, validatedRoot)
require.NoError(t, err)
require.Equal(t, false, validated)
// Before the first finalized epoch, finalized root could be zeros.
validatedCheckpoint = &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, br))
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: params.BeaconConfig().ZeroHash[:], Slot: 10}))
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: optimisticRoot[:], Slot: 11}))
optimistic, err = c.IsOptimisticForRoot(ctx, optimisticRoot)
require.NoError(t, err)
require.Equal(t, true, optimistic)
}
func TestService_IsOptimisticForRoot_DB_non_canonical(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
c.head = &head{root: params.BeaconConfig().ZeroHash}
b := util.NewBeaconBlock()
b.Block.Slot = 10
br, err := b.Block.HashTreeRoot()
require.NoError(t, err)
util.SaveBlock(t, context.Background(), beaconDB, b)
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: br[:], Slot: 10}))
optimisticBlock := util.NewBeaconBlock()
optimisticBlock.Block.Slot = 97
optimisticRoot, err := optimisticBlock.Block.HashTreeRoot()
require.NoError(t, err)
util.SaveBlock(t, context.Background(), beaconDB, optimisticBlock)
validatedBlock := util.NewBeaconBlock()
validatedBlock.Block.Slot = 9
validatedRoot, err := validatedBlock.Block.HashTreeRoot()
require.NoError(t, err)
util.SaveBlock(t, context.Background(), beaconDB, validatedBlock)
validatedCheckpoint := &ethpb.Checkpoint{Root: br[:]}
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: optimisticRoot[:], Slot: 11}))
optimistic, err := c.IsOptimisticForRoot(ctx, optimisticRoot)
require.NoError(t, err)
require.Equal(t, true, optimistic)
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: validatedRoot[:], Slot: 9}))
validated, err := c.IsOptimisticForRoot(ctx, validatedRoot)
require.NoError(t, err)
require.Equal(t, true, validated)
}
func TestService_IsOptimisticForRoot_StateSummaryRecovered(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
c.head = &head{root: params.BeaconConfig().ZeroHash}
b := util.NewBeaconBlock()
b.Block.Slot = 10
br, err := b.Block.HashTreeRoot()
require.NoError(t, err)
util.SaveBlock(t, context.Background(), beaconDB, b)
_, err = c.IsOptimisticForRoot(ctx, br)
assert.NoError(t, err)
summ, err := beaconDB.StateSummary(ctx, br)
assert.NoError(t, err)
assert.NotNil(t, summ)
assert.Equal(t, 10, int(summ.Slot))
assert.DeepEqual(t, br[:], summ.Root)
}
func TestService_IsFinalized(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}}
r1 := [32]byte{'a'}
require.NoError(t, c.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{
Root: r1,
}))
b := util.NewBeaconBlock()
br, err := b.Block.HashTreeRoot()
require.NoError(t, err)
util.SaveBlock(t, ctx, beaconDB, b)
require.NoError(t, beaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Root: br[:], Slot: 10}))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, br))
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, &ethpb.Checkpoint{
Root: br[:],
}))
require.Equal(t, true, c.IsFinalized(ctx, r1))
require.Equal(t, true, c.IsFinalized(ctx, br))
require.Equal(t, false, c.IsFinalized(ctx, [32]byte{'c'}))
}

View File

@@ -1,4 +1,4 @@
//go:build !develop
// +build !develop
package blockchain

View File

@@ -1,27 +0,0 @@
package blockchain
import "sync"
type currentlySyncingBlock struct {
sync.Mutex
roots map[[32]byte]struct{}
}
func (b *currentlySyncingBlock) set(root [32]byte) {
b.Lock()
defer b.Unlock()
b.roots[root] = struct{}{}
}
func (b *currentlySyncingBlock) unset(root [32]byte) {
b.Lock()
defer b.Unlock()
delete(b.roots, root)
}
func (b *currentlySyncingBlock) isSyncing(root [32]byte) bool {
b.Lock()
defer b.Unlock()
_, ok := b.roots[root]
return ok
}

View File

@@ -1,118 +0,0 @@
package blockchain
import "github.com/pkg/errors"
var (
// ErrInvalidPayload is returned when the payload is invalid
ErrInvalidPayload = invalidBlock{error: errors.New("received an INVALID payload from execution engine")}
// ErrInvalidBlockHashPayloadStatus is returned when the payload has invalid block hash.
ErrInvalidBlockHashPayloadStatus = invalidBlock{error: errors.New("received an INVALID_BLOCK_HASH payload from execution engine")}
// ErrUndefinedExecutionEngineError is returned when the execution engine returns an error that is not defined
ErrUndefinedExecutionEngineError = errors.New("received an undefined execution engine error")
// errNilFinalizedInStore is returned when a nil finalized checkpt is returned from store.
errNilFinalizedInStore = errors.New("nil finalized checkpoint returned from store")
// errNilFinalizedCheckpoint is returned when a nil finalized checkpt is returned from a state.
errNilFinalizedCheckpoint = errors.New("nil finalized checkpoint returned from state")
// errNilJustifiedCheckpoint is returned when a nil justified checkpt is returned from a state.
errNilJustifiedCheckpoint = errors.New("nil justified checkpoint returned from state")
// errBlockDoesNotExist is returned when a block does not exist for a particular state summary.
errBlockDoesNotExist = errors.New("could not find block in DB")
// errBlockNotFoundInCacheOrDB is returned when a block is not found in the cache or DB.
errBlockNotFoundInCacheOrDB = errors.New("block not found in cache or db")
// errWSBlockNotFound is returned when a block is not found in the WS cache or DB.
errWSBlockNotFound = errors.New("weak subjectivity root not found in db")
// errWSBlockNotFoundInEpoch is returned when a block is not found in the WS cache or DB within epoch.
errWSBlockNotFoundInEpoch = errors.New("weak subjectivity root not found in db within epoch")
// ErrNotDescendantOfFinalized is returned when a block is not a descendant of the finalized checkpoint
ErrNotDescendantOfFinalized = invalidBlock{error: errors.New("not descendant of finalized checkpoint")}
// ErrNotCheckpoint is returned when a given checkpoint is not a
// checkpoint in any chain known to forkchoice
ErrNotCheckpoint = errors.New("not a checkpoint in forkchoice")
)
var errMaxBlobsExceeded = errors.New("Expected commitments in block exceeds MAX_BLOBS_PER_BLOCK")
// An invalid block is the block that fails state transition based on the core protocol rules.
// The beacon node shall not be accepting nor building blocks that branch off from an invalid block.
// Some examples of invalid blocks are:
// The block violates state transition rules.
// The block is deemed invalid according to execution layer client.
// The block violates certain fork choice rules (before finalized slot, not finalized ancestor)
type invalidBlock struct {
invalidAncestorRoots [][32]byte
error
root [32]byte
lastValidHash [32]byte
}
type invalidBlockError interface {
Error() string
InvalidAncestorRoots() [][32]byte
BlockRoot() [32]byte
LastValidHash() [32]byte
}
// BlockRoot returns the invalid block root.
func (e invalidBlock) BlockRoot() [32]byte {
return e.root
}
// LastValidHash returns the last valid hash root.
func (e invalidBlock) LastValidHash() [32]byte {
return e.lastValidHash
}
// InvalidAncestorRoots returns an optional list of invalid roots of the invalid block which leads up last valid root.
func (e invalidBlock) InvalidAncestorRoots() [][32]byte {
return e.invalidAncestorRoots
}
// IsInvalidBlock returns true if the error has `invalidBlock`.
func IsInvalidBlock(e error) bool {
if e == nil {
return false
}
var d invalidBlockError
return errors.As(e, &d)
}
// InvalidBlockLVH returns the invalid block last valid hash root. If the error
// doesn't have a last valid hash, [32]byte{} is returned.
func InvalidBlockLVH(e error) [32]byte {
if e == nil {
return [32]byte{}
}
var d invalidBlockError
ok := errors.As(e, &d)
if !ok {
return [32]byte{}
}
return d.LastValidHash()
}
// InvalidBlockRoot returns the invalid block root. If the error
// doesn't have an invalid blockroot. [32]byte{} is returned.
func InvalidBlockRoot(e error) [32]byte {
if e == nil {
return [32]byte{}
}
var d invalidBlockError
ok := errors.As(e, &d)
if !ok {
return [32]byte{}
}
return d.BlockRoot()
}
// InvalidAncestorRoots returns a list of invalid roots up to last valid root.
func InvalidAncestorRoots(e error) [][32]byte {
if e == nil {
return [][32]byte{}
}
var d invalidBlockError
ok := errors.As(e, &d)
if !ok {
return [][32]byte{}
}
return d.InvalidAncestorRoots()
}

View File

@@ -1,44 +0,0 @@
package blockchain
import (
"testing"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/testing/require"
)
func TestIsInvalidBlock(t *testing.T) {
require.Equal(t, true, IsInvalidBlock(ErrInvalidPayload)) // Already wrapped.
err := invalidBlock{error: ErrInvalidPayload}
require.Equal(t, true, IsInvalidBlock(err))
newErr := errors.Wrap(err, "wrap me")
require.Equal(t, true, IsInvalidBlock(newErr))
require.DeepEqual(t, [][32]byte(nil), InvalidAncestorRoots(err))
}
func TestInvalidBlockRoot(t *testing.T) {
require.Equal(t, [32]byte{}, InvalidBlockRoot(ErrUndefinedExecutionEngineError))
require.Equal(t, [32]byte{}, InvalidBlockRoot(ErrInvalidPayload))
err := invalidBlock{error: ErrInvalidPayload, root: [32]byte{'a'}}
require.Equal(t, [32]byte{'a'}, InvalidBlockRoot(err))
require.DeepEqual(t, [][32]byte(nil), InvalidAncestorRoots(err))
newErr := errors.Wrap(err, "wrap me")
require.Equal(t, [32]byte{'a'}, InvalidBlockRoot(newErr))
}
func TestInvalidRoots(t *testing.T) {
roots := [][32]byte{{'d'}, {'b'}, {'c'}}
err := invalidBlock{error: ErrInvalidPayload, root: [32]byte{'a'}, invalidAncestorRoots: roots}
require.Equal(t, true, IsInvalidBlock(err))
require.Equal(t, [32]byte{'a'}, InvalidBlockRoot(err))
require.DeepEqual(t, roots, InvalidAncestorRoots(err))
newErr := errors.Wrap(err, "wrap me")
require.Equal(t, true, IsInvalidBlock(err))
require.Equal(t, [32]byte{'a'}, InvalidBlockRoot(newErr))
require.DeepEqual(t, roots, InvalidAncestorRoots(newErr))
}

View File

@@ -1,415 +0,0 @@
package blockchain
import (
"context"
"crypto/sha256"
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/execution"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/config/features"
"github.com/prysmaticlabs/prysm/v4/config/params"
consensusblocks "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
payloadattribute "github.com/prysmaticlabs/prysm/v4/consensus-types/payload-attribute"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
"github.com/prysmaticlabs/prysm/v4/time/slots"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
const blobCommitmentVersionKZG uint8 = 0x01
var defaultLatestValidHash = bytesutil.PadTo([]byte{0xff}, 32)
// notifyForkchoiceUpdate signals execution engine the fork choice updates. Execution engine should:
// 1. Re-organizes the execution payload chain and corresponding state to make head_block_hash the head.
// 2. Applies finality to the execution state: it irreversibly persists the chain of all execution payloads and corresponding state, up to and including finalized_block_hash.
func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*enginev1.PayloadIDBytes, error) {
ctx, span := trace.StartSpan(ctx, "blockChain.notifyForkchoiceUpdate")
defer span.End()
if arg.headBlock.IsNil() {
log.Error("Head block is nil")
return nil, nil
}
headBlk := arg.headBlock.Block()
if headBlk == nil || headBlk.IsNil() || headBlk.Body().IsNil() {
log.Error("Head block is nil")
return nil, nil
}
// Must not call fork choice updated until the transition conditions are met on the Pow network.
isExecutionBlk, err := blocks.IsExecutionBlock(headBlk.Body())
if err != nil {
log.WithError(err).Error("Could not determine if head block is execution block")
return nil, nil
}
if !isExecutionBlk {
return nil, nil
}
headPayload, err := headBlk.Body().Execution()
if err != nil {
log.WithError(err).Error("Could not get execution payload for head block")
return nil, nil
}
finalizedHash := s.cfg.ForkChoiceStore.FinalizedPayloadBlockHash()
justifiedHash := s.cfg.ForkChoiceStore.UnrealizedJustifiedPayloadBlockHash()
fcs := &enginev1.ForkchoiceState{
HeadBlockHash: headPayload.BlockHash(),
SafeBlockHash: justifiedHash[:],
FinalizedBlockHash: finalizedHash[:],
}
if arg.attributes == nil {
arg.attributes = payloadattribute.EmptyWithVersion(headBlk.Version())
}
payloadID, lastValidHash, err := s.cfg.ExecutionEngineCaller.ForkchoiceUpdated(ctx, fcs, arg.attributes)
if err != nil {
switch err {
case execution.ErrAcceptedSyncingPayloadStatus:
forkchoiceUpdatedOptimisticNodeCount.Inc()
log.WithFields(logrus.Fields{
"headSlot": headBlk.Slot(),
"headPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(headPayload.BlockHash())),
"finalizedPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(finalizedHash[:])),
}).Info("Called fork choice updated with optimistic block")
return payloadID, nil
case execution.ErrInvalidPayloadStatus:
forkchoiceUpdatedInvalidNodeCount.Inc()
headRoot := arg.headRoot
if len(lastValidHash) == 0 {
lastValidHash = defaultLatestValidHash
}
invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, headRoot, headBlk.ParentRoot(), bytesutil.ToBytes32(lastValidHash))
if err != nil {
log.WithError(err).Error("Could not set head root to invalid")
return nil, nil
}
if err := s.removeInvalidBlockAndState(ctx, invalidRoots); err != nil {
log.WithError(err).Error("Could not remove invalid block and state")
return nil, nil
}
r, err := s.cfg.ForkChoiceStore.Head(ctx)
if err != nil {
log.WithFields(logrus.Fields{
"slot": headBlk.Slot(),
"blockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(headRoot[:])),
"invalidChildrenCount": len(invalidRoots),
}).Warn("Pruned invalid blocks, could not update head root")
return nil, invalidBlock{error: ErrInvalidPayload, root: arg.headRoot, invalidAncestorRoots: invalidRoots}
}
b, err := s.getBlock(ctx, r)
if err != nil {
log.WithError(err).Error("Could not get head block")
return nil, nil
}
st, err := s.cfg.StateGen.StateByRoot(ctx, r)
if err != nil {
log.WithError(err).Error("Could not get head state")
return nil, nil
}
pid, err := s.notifyForkchoiceUpdate(ctx, &fcuConfig{
headState: st,
headRoot: r,
headBlock: b,
attributes: arg.attributes,
})
if err != nil {
return nil, err // Returning err because it's recursive here.
}
if err := s.saveHead(ctx, r, b, st); err != nil {
log.WithError(err).Error("could not save head after pruning invalid blocks")
}
log.WithFields(logrus.Fields{
"slot": headBlk.Slot(),
"blockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(headRoot[:])),
"invalidChildrenCount": len(invalidRoots),
"newHeadRoot": fmt.Sprintf("%#x", bytesutil.Trunc(r[:])),
}).Warn("Pruned invalid blocks")
return pid, invalidBlock{error: ErrInvalidPayload, root: arg.headRoot, invalidAncestorRoots: invalidRoots}
default:
log.WithError(err).Error(ErrUndefinedExecutionEngineError)
return nil, nil
}
}
forkchoiceUpdatedValidNodeCount.Inc()
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, arg.headRoot); err != nil {
log.WithError(err).Error("Could not set head root to valid")
return nil, nil
}
// If the forkchoice update call has an attribute, update the payload ID cache.
hasAttr := arg.attributes != nil && !arg.attributes.IsEmpty()
nextSlot := s.CurrentSlot() + 1
if hasAttr && payloadID != nil {
var pId [8]byte
copy(pId[:], payloadID[:])
log.WithFields(logrus.Fields{
"blockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(arg.headRoot[:])),
"headSlot": headBlk.Slot(),
"payloadID": fmt.Sprintf("%#x", bytesutil.Trunc(payloadID[:])),
}).Info("Forkchoice updated with payload attributes for proposal")
s.cfg.PayloadIDCache.Set(nextSlot, arg.headRoot, pId)
} else if hasAttr && payloadID == nil && !features.Get().PrepareAllPayloads {
log.WithFields(logrus.Fields{
"blockHash": fmt.Sprintf("%#x", headPayload.BlockHash()),
"slot": headBlk.Slot(),
}).Error("Received nil payload ID on VALID engine response")
}
return payloadID, nil
}
// getPayloadHash returns the payload hash given the block root.
// if the block is before bellatrix fork epoch, it returns the zero hash.
func (s *Service) getPayloadHash(ctx context.Context, root []byte) ([32]byte, error) {
blk, err := s.getBlock(ctx, s.ensureRootNotZeros(bytesutil.ToBytes32(root)))
if err != nil {
return [32]byte{}, err
}
if blocks.IsPreBellatrixVersion(blk.Block().Version()) {
return params.BeaconConfig().ZeroHash, nil
}
payload, err := blk.Block().Body().Execution()
if err != nil {
return [32]byte{}, errors.Wrap(err, "could not get execution payload")
}
return bytesutil.ToBytes32(payload.BlockHash()), nil
}
// notifyNewPayload signals execution engine on a new payload.
// It returns true if the EL has returned VALID for the block
func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion int,
preStateHeader interfaces.ExecutionData, blk interfaces.ReadOnlySignedBeaconBlock) (bool, error) {
ctx, span := trace.StartSpan(ctx, "blockChain.notifyNewPayload")
defer span.End()
// Execution payload is only supported in Bellatrix and beyond. Pre
// merge blocks are never optimistic
if blk == nil {
return false, errors.New("signed beacon block can't be nil")
}
if preStateVersion < version.Bellatrix {
return true, nil
}
if err := consensusblocks.BeaconBlockIsNil(blk); err != nil {
return false, err
}
body := blk.Block().Body()
enabled, err := blocks.IsExecutionEnabledUsingHeader(preStateHeader, body)
if err != nil {
return false, errors.Wrap(invalidBlock{error: err}, "could not determine if execution is enabled")
}
if !enabled {
return true, nil
}
payload, err := body.Execution()
if err != nil {
return false, errors.Wrap(invalidBlock{error: err}, "could not get execution payload")
}
var lastValidHash []byte
if blk.Version() >= version.Deneb {
var versionedHashes []common.Hash
versionedHashes, err = kzgCommitmentsToVersionedHashes(blk.Block().Body())
if err != nil {
return false, errors.Wrap(err, "could not get versioned hashes to feed the engine")
}
pr := common.Hash(blk.Block().ParentRoot())
lastValidHash, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, versionedHashes, &pr)
} else {
lastValidHash, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, []common.Hash{}, &common.Hash{} /*empty version hashes and root before Deneb*/)
}
switch err {
case nil:
newPayloadValidNodeCount.Inc()
return true, nil
case execution.ErrAcceptedSyncingPayloadStatus:
newPayloadOptimisticNodeCount.Inc()
log.WithFields(logrus.Fields{
"slot": blk.Block().Slot(),
"payloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash())),
}).Info("Called new payload with optimistic block")
return false, nil
case execution.ErrInvalidPayloadStatus:
lvh := bytesutil.ToBytes32(lastValidHash)
return false, invalidBlock{
error: ErrInvalidPayload,
lastValidHash: lvh,
}
default:
return false, errors.WithMessage(ErrUndefinedExecutionEngineError, err.Error())
}
}
// reportInvalidBlock deals with the event that an invalid block was detected by the execution layer
func (s *Service) pruneInvalidBlock(ctx context.Context, root, parentRoot, lvh [32]byte) error {
newPayloadInvalidNodeCount.Inc()
invalidRoots, err := s.SetOptimisticToInvalid(ctx, root, parentRoot, lvh)
if err != nil {
return err
}
if err := s.removeInvalidBlockAndState(ctx, invalidRoots); err != nil {
return err
}
log.WithFields(logrus.Fields{
"blockRoot": fmt.Sprintf("%#x", root),
"invalidChildrenCount": len(invalidRoots),
}).Warn("Pruned invalid blocks")
return invalidBlock{
invalidAncestorRoots: invalidRoots,
error: ErrInvalidPayload,
lastValidHash: lvh,
}
}
// getPayloadAttributes returns the payload attributes for the given state and slot.
// The attribute is required to initiate a payload build process in the context of an `engine_forkchoiceUpdated` call.
func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState, slot primitives.Slot, headRoot []byte) payloadattribute.Attributer {
emptyAttri := payloadattribute.EmptyWithVersion(st.Version())
// If it is an epoch boundary then process slots to get the right
// shuffling before checking if the proposer is tracked. Otherwise
// perform this check before. This is cheap as the NSC has already been updated.
var val cache.TrackedValidator
var ok bool
e := slots.ToEpoch(slot)
stateEpoch := slots.ToEpoch(st.Slot())
if e == stateEpoch {
val, ok = s.trackedProposer(st, slot)
if !ok {
return emptyAttri
}
}
st = st.Copy()
if slot > st.Slot() {
var err error
st, err = transition.ProcessSlotsUsingNextSlotCache(ctx, st, headRoot, slot)
if err != nil {
log.WithError(err).Error("Could not process slots to get payload attribute")
return emptyAttri
}
}
if e > stateEpoch {
emptyAttri := payloadattribute.EmptyWithVersion(st.Version())
val, ok = s.trackedProposer(st, slot)
if !ok {
return emptyAttri
}
}
// Get previous randao.
prevRando, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
if err != nil {
log.WithError(err).Error("Could not get randao mix to get payload attribute")
return emptyAttri
}
// Get timestamp.
t, err := slots.ToTime(uint64(s.genesisTime.Unix()), slot)
if err != nil {
log.WithError(err).Error("Could not get timestamp to get payload attribute")
return emptyAttri
}
var attr payloadattribute.Attributer
switch st.Version() {
case version.Deneb:
withdrawals, err := st.ExpectedWithdrawals()
if err != nil {
log.WithError(err).Error("Could not get expected withdrawals to get payload attribute")
return emptyAttri
}
attr, err = payloadattribute.New(&enginev1.PayloadAttributesV3{
Timestamp: uint64(t.Unix()),
PrevRandao: prevRando,
SuggestedFeeRecipient: val.FeeRecipient[:],
Withdrawals: withdrawals,
ParentBeaconBlockRoot: headRoot,
})
if err != nil {
log.WithError(err).Error("Could not get payload attribute")
return emptyAttri
}
case version.Capella:
withdrawals, err := st.ExpectedWithdrawals()
if err != nil {
log.WithError(err).Error("Could not get expected withdrawals to get payload attribute")
return emptyAttri
}
attr, err = payloadattribute.New(&enginev1.PayloadAttributesV2{
Timestamp: uint64(t.Unix()),
PrevRandao: prevRando,
SuggestedFeeRecipient: val.FeeRecipient[:],
Withdrawals: withdrawals,
})
if err != nil {
log.WithError(err).Error("Could not get payload attribute")
return emptyAttri
}
case version.Bellatrix:
attr, err = payloadattribute.New(&enginev1.PayloadAttributes{
Timestamp: uint64(t.Unix()),
PrevRandao: prevRando,
SuggestedFeeRecipient: val.FeeRecipient[:],
})
if err != nil {
log.WithError(err).Error("Could not get payload attribute")
return emptyAttri
}
default:
log.WithField("version", st.Version()).Error("Could not get payload attribute due to unknown state version")
return emptyAttri
}
return attr
}
// removeInvalidBlockAndState removes the invalid block, blob and its corresponding state from the cache and DB.
func (s *Service) removeInvalidBlockAndState(ctx context.Context, blkRoots [][32]byte) error {
for _, root := range blkRoots {
if err := s.cfg.StateGen.DeleteStateFromCaches(ctx, root); err != nil {
return err
}
// Delete block also deletes the state as well.
if err := s.cfg.BeaconDB.DeleteBlock(ctx, root); err != nil {
// TODO(10487): If a caller requests to delete a root that's justified and finalized. We should gracefully shutdown.
// This is an irreparable condition, it would me a justified or finalized block has become invalid.
return err
}
// No op if the sidecar does not exist.
if err := s.cfg.BeaconDB.DeleteBlobSidecars(ctx, root); err != nil {
return err
}
}
return nil
}
func kzgCommitmentsToVersionedHashes(body interfaces.ReadOnlyBeaconBlockBody) ([]common.Hash, error) {
commitments, err := body.BlobKzgCommitments()
if err != nil {
return nil, errors.Wrap(invalidBlock{error: err}, "could not get blob kzg commitments")
}
versionedHashes := make([]common.Hash, len(commitments))
for i, commitment := range commitments {
versionedHashes[i] = ConvertKzgCommitmentToVersionedHash(commitment)
}
return versionedHashes, nil
}
func ConvertKzgCommitmentToVersionedHash(commitment []byte) common.Hash {
versionedHash := sha256.Sum256(commitment)
versionedHash[0] = blobCommitmentVersionKZG
return versionedHash
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,150 +0,0 @@
package blockchain
import (
"context"
"fmt"
"time"
"github.com/pkg/errors"
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
payloadattribute "github.com/prysmaticlabs/prysm/v4/consensus-types/payload-attribute"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/time/slots"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
func (s *Service) isNewHead(r [32]byte) bool {
s.headLock.RLock()
defer s.headLock.RUnlock()
currentHeadRoot := s.originBlockRoot
if s.head != nil {
currentHeadRoot = s.headRoot()
}
return r != currentHeadRoot || r == [32]byte{}
}
func (s *Service) getStateAndBlock(ctx context.Context, r [32]byte) (state.BeaconState, interfaces.ReadOnlySignedBeaconBlock, error) {
if !s.hasBlockInInitSyncOrDB(ctx, r) {
return nil, nil, errors.New("block does not exist")
}
newHeadBlock, err := s.getBlock(ctx, r)
if err != nil {
return nil, nil, err
}
headState, err := s.cfg.StateGen.StateByRoot(ctx, r)
if err != nil {
return nil, nil, err
}
return headState, newHeadBlock, nil
}
type fcuConfig struct {
headState state.BeaconState
headBlock interfaces.ReadOnlySignedBeaconBlock
headRoot [32]byte
proposingSlot primitives.Slot
attributes payloadattribute.Attributer
}
// sendFCU handles the logic to notify the engine of a forckhoice update
// for the first time when processing an incoming block during regular sync. It
// always updates the shuffling caches and handles epoch transitions when the
// incoming block is late, preparing payload attributes in this case while it
// only sends a message with empty attributes for early blocks.
func (s *Service) sendFCU(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error {
if !s.isNewHead(cfg.headRoot) {
return nil
}
if fcuArgs.attributes != nil && !fcuArgs.attributes.IsEmpty() && s.shouldOverrideFCU(cfg.headRoot, s.CurrentSlot()+1) {
return nil
}
return s.forkchoiceUpdateWithExecution(cfg.ctx, fcuArgs)
}
// sendFCUWithAttributes computes the payload attributes and sends an FCU message
// to the engine if needed
func (s *Service) sendFCUWithAttributes(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) {
slotCtx, cancel := context.WithTimeout(context.Background(), slotDeadline)
defer cancel()
cfg.ctx = slotCtx
if err := s.computePayloadAttributes(cfg, fcuArgs); err != nil {
log.WithError(err).Error("could not compute payload attributes")
return
}
if fcuArgs.attributes.IsEmpty() {
return
}
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
if _, err := s.notifyForkchoiceUpdate(cfg.ctx, fcuArgs); err != nil {
log.WithError(err).Error("could not update forkchoice with payload attributes for proposal")
}
}
// fockchoiceUpdateWithExecution is a wrapper around notifyForkchoiceUpdate. It decides whether a new call to FCU should be made.
func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, args *fcuConfig) error {
_, span := trace.StartSpan(ctx, "beacon-chain.blockchain.forkchoiceUpdateWithExecution")
defer span.End()
// Note: Use the service context here to avoid the parent context being ended during a forkchoice update.
ctx = trace.NewContext(s.ctx, span)
_, err := s.notifyForkchoiceUpdate(ctx, args)
if err != nil {
return errors.Wrap(err, "could not notify forkchoice update")
}
if err := s.saveHead(ctx, args.headRoot, args.headBlock, args.headState); err != nil {
log.WithError(err).Error("could not save head")
}
// Only need to prune attestations from pool if the head has changed.
if err := s.pruneAttsFromPool(args.headBlock); err != nil {
log.WithError(err).Error("could not prune attestations from pool")
}
return nil
}
// shouldOverrideFCU checks whether the incoming block is still subject to being
// reorged or not by the next proposer.
func (s *Service) shouldOverrideFCU(newHeadRoot [32]byte, proposingSlot primitives.Slot) bool {
headWeight, err := s.cfg.ForkChoiceStore.Weight(newHeadRoot)
if err != nil {
log.WithError(err).WithField("root", fmt.Sprintf("%#x", newHeadRoot)).Warn("could not determine node weight")
}
currentSlot := s.CurrentSlot()
if proposingSlot == currentSlot {
proposerHead := s.cfg.ForkChoiceStore.GetProposerHead()
if proposerHead != newHeadRoot {
return true
}
log.WithFields(logrus.Fields{
"root": fmt.Sprintf("%#x", newHeadRoot),
"weight": headWeight,
}).Infof("Attempted late block reorg aborted due to attestations at %d seconds",
params.BeaconConfig().SecondsPerSlot)
lateBlockFailedAttemptSecondThreshold.Inc()
} else {
if s.cfg.ForkChoiceStore.ShouldOverrideFCU() {
return true
}
secs, err := slots.SecondsSinceSlotStart(currentSlot,
uint64(s.genesisTime.Unix()), uint64(time.Now().Unix()))
if err != nil {
log.WithError(err).Error("could not compute seconds since slot start")
}
if secs >= doublylinkedtree.ProcessAttestationsThreshold {
log.WithFields(logrus.Fields{
"root": fmt.Sprintf("%#x", newHeadRoot),
"weight": headWeight,
}).Infof("Attempted late block reorg aborted due to attestations at %d seconds",
doublylinkedtree.ProcessAttestationsThreshold)
lateBlockFailedAttemptFirstThreshold.Inc()
}
}
return false
}

View File

@@ -1,191 +0,0 @@
package blockchain
import (
"context"
"testing"
"time"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
mockExecution "github.com/prysmaticlabs/prysm/v4/beacon-chain/execution/testing"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/testing/util"
logTest "github.com/sirupsen/logrus/hooks/test"
)
func TestService_isNewHead(t *testing.T) {
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
require.Equal(t, true, service.isNewHead([32]byte{}))
service.head = &head{root: [32]byte{1}}
require.Equal(t, true, service.isNewHead([32]byte{2}))
require.Equal(t, false, service.isNewHead([32]byte{1}))
// Nil head should use origin root
service.head = nil
service.originBlockRoot = [32]byte{3}
require.Equal(t, true, service.isNewHead([32]byte{2}))
require.Equal(t, false, service.isNewHead([32]byte{3}))
}
func TestService_getHeadStateAndBlock(t *testing.T) {
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
_, _, err := service.getStateAndBlock(context.Background(), [32]byte{})
require.ErrorContains(t, "block does not exist", err)
blk, err := blocks.NewSignedBeaconBlock(util.HydrateSignedBeaconBlock(&ethpb.SignedBeaconBlock{Signature: []byte{1}}))
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), blk))
st, _ := util.DeterministicGenesisState(t, 1)
r, err := blk.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), st, r))
gotState, err := service.cfg.BeaconDB.State(context.Background(), r)
require.NoError(t, err)
require.DeepEqual(t, st.ToProto(), gotState.ToProto())
gotBlk, err := service.cfg.BeaconDB.Block(context.Background(), r)
require.NoError(t, err)
require.DeepEqual(t, blk, gotBlk)
}
func TestService_forkchoiceUpdateWithExecution_exceptionalCases(t *testing.T) {
ctx := context.Background()
opts := testServiceOptsWithDB(t)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.PayloadIDCache = cache.NewPayloadIDCache()
service.cfg.TrackedValidatorsCache = cache.NewTrackedValidatorsCache()
b := util.NewBeaconBlock()
b.Block.Slot = 2
wsb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
r1, err := b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.saveInitSyncBlock(ctx, r1, wsb))
st, _ := util.DeterministicGenesisState(t, 1)
service.head = &head{
root: r1,
block: wsb,
state: st,
}
service.cfg.PayloadIDCache.Set(2, [32]byte{2}, [8]byte{1})
b = util.NewBeaconBlock()
b.Block.Slot = 3
util.SaveBlock(t, ctx, service.cfg.BeaconDB, b)
r1, err = b.Block.HashTreeRoot()
require.NoError(t, err)
st, _ = util.DeterministicGenesisState(t, 1)
service.head = &head{
root: r1,
block: wsb,
state: st,
}
service.cfg.PayloadIDCache.Set(2, [32]byte{2}, [8]byte{1})
args := &fcuConfig{
headState: st,
headRoot: r1,
headBlock: wsb,
proposingSlot: service.CurrentSlot() + 1,
}
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, args))
payloadID, has := service.cfg.PayloadIDCache.PayloadID(2, [32]byte{2})
require.Equal(t, true, has)
require.Equal(t, primitives.PayloadID{1}, payloadID)
}
func TestService_forkchoiceUpdateWithExecution_SameHeadRootNewProposer(t *testing.T) {
service, tr := minimalTestService(t, WithPayloadIDCache(cache.NewPayloadIDCache()))
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
altairBlk := util.SaveBlock(t, ctx, beaconDB, util.NewBeaconBlockAltair())
altairBlkRoot, err := altairBlk.Block().HashTreeRoot()
require.NoError(t, err)
bellatrixBlk := util.SaveBlock(t, ctx, beaconDB, util.NewBeaconBlockBellatrix())
bellatrixBlkRoot, err := bellatrixBlk.Block().HashTreeRoot()
require.NoError(t, err)
st, _ := util.DeterministicGenesisState(t, 10)
service.head = &head{
state: st,
}
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
state, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 1, altairBlkRoot, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 2, bellatrixBlkRoot, altairBlkRoot, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
service.cfg.ExecutionEngineCaller = &mockExecution.EngineClient{}
require.NoError(t, beaconDB.SaveState(ctx, st, bellatrixBlkRoot))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, bellatrixBlkRoot))
sb, err := blocks.NewSignedBeaconBlock(util.HydrateSignedBeaconBlockBellatrix(&ethpb.SignedBeaconBlockBellatrix{}))
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, sb))
r, err := sb.Block().HashTreeRoot()
require.NoError(t, err)
// Set head to be the same but proposing next slot
service.head.root = r
service.head.block = sb
service.head.state = st
service.cfg.PayloadIDCache.Set(service.CurrentSlot()+1, [32]byte{} /* root */, [8]byte{})
args := &fcuConfig{
headState: st,
headBlock: sb,
headRoot: r,
proposingSlot: service.CurrentSlot() + 1,
}
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, args))
}
func TestShouldOverrideFCU(t *testing.T) {
hook := logTest.NewGlobal()
service, tr := minimalTestService(t)
ctx, fcs := tr.ctx, tr.fcs
service.SetGenesisTime(time.Now().Add(-time.Duration(2*params.BeaconConfig().SecondsPerSlot) * time.Second))
headRoot := [32]byte{'b'}
parentRoot := [32]byte{'a'}
ojc := &ethpb.Checkpoint{}
st, root, err := prepareForkchoiceState(ctx, 1, parentRoot, [32]byte{}, [32]byte{}, ojc, ojc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, st, root))
st, root, err = prepareForkchoiceState(ctx, 2, headRoot, parentRoot, [32]byte{}, ojc, ojc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, st, root))
require.Equal(t, primitives.Slot(2), service.CurrentSlot())
require.Equal(t, true, service.shouldOverrideFCU(headRoot, 2))
require.LogsDoNotContain(t, hook, "12 seconds")
require.Equal(t, false, service.shouldOverrideFCU(parentRoot, 2))
require.LogsContain(t, hook, "12 seconds")
head, err := fcs.Head(ctx)
require.NoError(t, err)
require.Equal(t, headRoot, head)
fcs.SetGenesisTime(uint64(time.Now().Unix()) - 29)
require.Equal(t, true, service.shouldOverrideFCU(parentRoot, 3))
require.LogsDoNotContain(t, hook, "10 seconds")
fcs.SetGenesisTime(uint64(time.Now().Unix()) - 24)
service.SetGenesisTime(time.Now().Add(-time.Duration(2*params.BeaconConfig().SecondsPerSlot+10) * time.Second))
require.Equal(t, false, service.shouldOverrideFCU(parentRoot, 3))
require.LogsContain(t, hook, "10 seconds")
}

View File

@@ -6,172 +6,158 @@ import (
"fmt"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/math"
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
"github.com/prysmaticlabs/prysm/v4/time/slots"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
ethpbv1 "github.com/prysmaticlabs/prysm/proto/eth/v1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/slotutil"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
// UpdateAndSaveHeadWithBalances updates the beacon state head after getting justified balanced from cache.
// This function is only used in spec-tests, it does save the head after updating it.
func (s *Service) UpdateAndSaveHeadWithBalances(ctx context.Context) error {
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
headRoot, err := s.cfg.ForkChoiceStore.Head(ctx)
if err != nil {
return errors.Wrap(err, "could not update head")
// This defines the current chain service's view of head.
type head struct {
slot types.Slot // current head slot.
root [32]byte // current head root.
block block.SignedBeaconBlock // current head block.
state state.BeaconState // current head state.
}
// Determined the head from the fork choice service and saves its new data
// (head root, head block, and head state) to the local service cache.
func (s *Service) updateHead(ctx context.Context, balances []uint64) error {
ctx, span := trace.StartSpan(ctx, "blockChain.updateHead")
defer span.End()
// To get the proper head update, a node first checks its best justified
// can become justified. This is designed to prevent bounce attack and
// ensure head gets its best justified info.
if s.bestJustifiedCheckpt.Epoch > s.justifiedCheckpt.Epoch {
s.justifiedCheckpt = s.bestJustifiedCheckpt
if err := s.cacheJustifiedStateBalances(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root)); err != nil {
return err
}
}
headBlock, err := s.getBlock(ctx, headRoot)
// Get head from the fork choice service.
f := s.finalizedCheckpt
j := s.justifiedCheckpt
// To get head before the first justified epoch, the fork choice will start with genesis root
// instead of zero hashes.
headStartRoot := bytesutil.ToBytes32(j.Root)
if headStartRoot == params.BeaconConfig().ZeroHash {
headStartRoot = s.genesisRoot
}
// In order to process head, fork choice store requires justified info.
// If the fork choice store is missing justified block info, a node should
// re-initiate fork choice store using the latest justified info.
// This recovers a fatal condition and should not happen in run time.
if !s.cfg.ForkChoiceStore.HasNode(headStartRoot) {
jb, err := s.cfg.BeaconDB.Block(ctx, headStartRoot)
if err != nil {
return err
}
s.cfg.ForkChoiceStore = protoarray.New(j.Epoch, f.Epoch, bytesutil.ToBytes32(f.Root))
if err := s.insertBlockToForkChoiceStore(ctx, jb.Block(), headStartRoot, f, j); err != nil {
return err
}
}
headRoot, err := s.cfg.ForkChoiceStore.Head(ctx, j.Epoch, headStartRoot, balances, f.Epoch)
if err != nil {
return err
}
headState, err := s.cfg.StateGen.StateByRoot(ctx, headRoot)
if err != nil {
return errors.Wrap(err, "could not retrieve head state in DB")
}
return s.saveHead(ctx, headRoot, headBlock, headState)
}
// This defines the current chain service's view of head.
type head struct {
root [32]byte // current head root.
block interfaces.ReadOnlySignedBeaconBlock // current head block.
state state.BeaconState // current head state.
slot primitives.Slot // the head block slot number
optimistic bool // optimistic status when saved head
// Save head to the local service cache.
return s.saveHead(ctx, headRoot)
}
// This saves head info to the local service cache, it also saves the
// new head root to the DB.
// Caller of the method MUST acquire a lock on forkchoice.
func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock interfaces.ReadOnlySignedBeaconBlock, headState state.BeaconState) error {
func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
ctx, span := trace.StartSpan(ctx, "blockChain.saveHead")
defer span.End()
// Do nothing if head hasn't changed.
if !s.isNewHead(newHeadRoot) {
return nil
}
if err := blocks.BeaconBlockIsNil(headBlock); err != nil {
r, err := s.HeadRoot(ctx)
if err != nil {
return err
}
if headState == nil || headState.IsNil() {
return errors.New("cannot save nil head state")
if headRoot == bytesutil.ToBytes32(r) {
return nil
}
// If the head state is not available, just return nil.
// There's nothing to cache
if !s.cfg.BeaconDB.HasStateSummary(ctx, newHeadRoot) {
if !s.cfg.BeaconDB.HasStateSummary(ctx, headRoot) {
return nil
}
s.headLock.RLock()
oldHeadBlock, err := s.headBlock()
// Get the new head block from DB.
newHeadBlock, err := s.cfg.BeaconDB.Block(ctx, headRoot)
if err != nil {
s.headLock.RUnlock()
return errors.Wrap(err, "could not get old head block")
return err
}
oldStateRoot := oldHeadBlock.Block().StateRoot()
s.headLock.RUnlock()
if newHeadBlock == nil || newHeadBlock.IsNil() || newHeadBlock.Block().IsNil() {
return errors.New("cannot save nil head block")
}
// Get the new head state from cached state or DB.
newHeadState, err := s.cfg.StateGen.StateByRoot(ctx, headRoot)
if err != nil {
return errors.Wrap(err, "could not retrieve head state in DB")
}
if newHeadState == nil || newHeadState.IsNil() {
return errors.New("cannot save nil head state")
}
// A chain re-org occurred, so we fire an event notifying the rest of the services.
headSlot := s.HeadSlot()
newHeadSlot := headBlock.Block().Slot()
newStateRoot := headBlock.Block().StateRoot()
r, err := s.HeadRoot(ctx)
if err != nil {
return errors.Wrap(err, "could not get old head root")
}
oldHeadRoot := bytesutil.ToBytes32(r)
isOptimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(newHeadRoot)
if err != nil {
log.WithError(err).Error("could not check if node is optimistically synced")
}
if headBlock.Block().ParentRoot() != oldHeadRoot {
// A chain re-org occurred, so we fire an event notifying the rest of the services.
commonRoot, forkSlot, err := s.cfg.ForkChoiceStore.CommonAncestor(ctx, oldHeadRoot, newHeadRoot)
if err != nil {
log.WithError(err).Error("Could not find common ancestor root")
commonRoot = params.BeaconConfig().ZeroHash
}
dis := headSlot + newHeadSlot - 2*forkSlot
dep := math.Max(uint64(headSlot-forkSlot), uint64(newHeadSlot-forkSlot))
oldWeight, err := s.cfg.ForkChoiceStore.Weight(oldHeadRoot)
if err != nil {
log.WithField("root", fmt.Sprintf("%#x", oldHeadRoot)).Warn("could not determine node weight")
}
newWeight, err := s.cfg.ForkChoiceStore.Weight(newHeadRoot)
if err != nil {
log.WithField("root", fmt.Sprintf("%#x", newHeadRoot)).Warn("could not determine node weight")
}
newHeadSlot := newHeadBlock.Block().Slot()
oldHeadRoot := s.headRoot()
oldStateRoot := s.headBlock().Block().StateRoot()
newStateRoot := newHeadBlock.Block().StateRoot()
if bytesutil.ToBytes32(newHeadBlock.Block().ParentRoot()) != bytesutil.ToBytes32(r) {
log.WithFields(logrus.Fields{
"newSlot": fmt.Sprintf("%d", newHeadSlot),
"newRoot": fmt.Sprintf("%#x", newHeadRoot),
"newWeight": newWeight,
"oldSlot": fmt.Sprintf("%d", headSlot),
"oldRoot": fmt.Sprintf("%#x", oldHeadRoot),
"oldWeight": oldWeight,
"commonAncestorRoot": fmt.Sprintf("%#x", commonRoot),
"distance": dis,
"depth": dep,
}).Info("Chain reorg occurred")
reorgDistance.Observe(float64(dis))
reorgDepth.Observe(float64(dep))
"newSlot": fmt.Sprintf("%d", newHeadSlot),
"oldSlot": fmt.Sprintf("%d", headSlot),
}).Debug("Chain reorg occurred")
absoluteSlotDifference := slotutil.AbsoluteValueSlotDifference(newHeadSlot, headSlot)
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.Reorg,
Data: &ethpbv1.EventChainReorg{
Slot: newHeadSlot,
Depth: math.Max(uint64(headSlot-forkSlot), uint64(newHeadSlot-forkSlot)),
OldHeadBlock: oldHeadRoot[:],
NewHeadBlock: newHeadRoot[:],
OldHeadState: oldStateRoot[:],
NewHeadState: newStateRoot[:],
Epoch: slots.ToEpoch(newHeadSlot),
ExecutionOptimistic: isOptimistic,
Slot: newHeadSlot,
Depth: absoluteSlotDifference,
OldHeadBlock: oldHeadRoot[:],
NewHeadBlock: headRoot[:],
OldHeadState: oldStateRoot,
NewHeadState: newStateRoot,
Epoch: helpers.SlotToEpoch(newHeadSlot),
},
})
if err := s.saveOrphanedOperations(ctx, oldHeadRoot, newHeadRoot); err != nil {
return err
}
reorgCount.Inc()
}
// Cache the new head info.
newHead := &head{
root: newHeadRoot,
block: headBlock,
state: headState,
optimistic: isOptimistic,
slot: headBlock.Block().Slot(),
}
if err := s.setHead(newHead); err != nil {
return errors.Wrap(err, "could not set head")
}
s.setHead(headRoot, newHeadBlock, newHeadState)
// Save the new head root to DB.
if err := s.cfg.BeaconDB.SaveHeadBlockRoot(ctx, newHeadRoot); err != nil {
if err := s.cfg.BeaconDB.SaveHeadBlockRoot(ctx, headRoot); err != nil {
return errors.Wrap(err, "could not save head root in DB")
}
// Forward an event capturing a new chain head over a common event feed
// done in a goroutine to avoid blocking the critical runtime main routine.
go func() {
if err := s.notifyNewHeadEvent(ctx, newHeadSlot, headState, newStateRoot[:], newHeadRoot[:]); err != nil {
if err := s.notifyNewHeadEvent(newHeadSlot, newHeadState, newStateRoot, headRoot[:]); err != nil {
log.WithError(err).Error("Could not notify event feed of new chain head")
}
}()
@@ -182,8 +168,8 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
// This gets called to update canonical root mapping. It does not save head block
// root in DB. With the inception of initial-sync-cache-state flag, it uses finalized
// check point as anchors to resume sync therefore head is no longer needed to be saved on per slot basis.
func (s *Service) saveHeadNoDB(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock, r [32]byte, hs state.BeaconState, optimistic bool) error {
if err := blocks.BeaconBlockIsNil(b); err != nil {
func (s *Service) saveHeadNoDB(ctx context.Context, b block.SignedBeaconBlock, r [32]byte, hs state.BeaconState) error {
if err := helpers.VerifyNilBeaconBlock(b); err != nil {
return err
}
cachedHeadRoot, err := s.HeadRoot(ctx)
@@ -194,64 +180,44 @@ func (s *Service) saveHeadNoDB(ctx context.Context, b interfaces.ReadOnlySignedB
return nil
}
bCp, err := b.Copy()
if err != nil {
return err
}
if err := s.setHeadInitialSync(r, bCp, hs, optimistic); err != nil {
return errors.Wrap(err, "could not set head")
}
s.setHeadInitialSync(r, b.Copy(), hs)
return nil
}
// This sets head view object which is used to track the head slot, root, block, state and optimistic status
func (s *Service) setHead(newHead *head) error {
// This sets head view object which is used to track the head slot, root, block and state.
func (s *Service) setHead(root [32]byte, block block.SignedBeaconBlock, state state.BeaconState) {
s.headLock.Lock()
defer s.headLock.Unlock()
// This does a full copy of the block and state.
bCp, err := newHead.block.Copy()
if err != nil {
return err
}
s.head = &head{
root: newHead.root,
block: bCp,
state: newHead.state.Copy(),
optimistic: newHead.optimistic,
slot: newHead.slot,
slot: block.Block().Slot(),
root: root,
block: block.Copy(),
state: state.Copy(),
}
return nil
}
// This sets head view object which is used to track the head slot, root, block and state. The method
// assumes that state being passed into the method will not be modified by any other alternate
// caller which holds the state's reference.
func (s *Service) setHeadInitialSync(root [32]byte, block interfaces.ReadOnlySignedBeaconBlock, state state.BeaconState, optimistic bool) error {
func (s *Service) setHeadInitialSync(root [32]byte, block block.SignedBeaconBlock, state state.BeaconState) {
s.headLock.Lock()
defer s.headLock.Unlock()
// This does a full copy of the block only.
bCp, err := block.Copy()
if err != nil {
return err
}
s.head = &head{
root: root,
block: bCp,
state: state,
optimistic: optimistic,
slot: block.Block().Slot(),
root: root,
block: block.Copy(),
state: state,
}
return nil
}
// This returns the head slot.
// This is a lock free version.
func (s *Service) headSlot() primitives.Slot {
if s.head == nil || s.head.block == nil || s.head.block.Block() == nil {
return 0
}
return s.head.block.Block().Slot()
func (s *Service) headSlot() types.Slot {
return s.head.slot
}
// This returns the head root.
@@ -268,7 +234,7 @@ func (s *Service) headRoot() [32]byte {
// This returns the head block.
// It does a full copy on head block for immutability.
// This is a lock free version.
func (s *Service) headBlock() (interfaces.ReadOnlySignedBeaconBlock, error) {
func (s *Service) headBlock() block.SignedBeaconBlock {
return s.head.block.Copy()
}
@@ -276,40 +242,16 @@ func (s *Service) headBlock() (interfaces.ReadOnlySignedBeaconBlock, error) {
// It does a full copy on head state for immutability.
// This is a lock free version.
func (s *Service) headState(ctx context.Context) state.BeaconState {
_, span := trace.StartSpan(ctx, "blockChain.headState")
ctx, span := trace.StartSpan(ctx, "blockChain.headState")
defer span.End()
return s.head.state.Copy()
}
// This returns a read only version of the head state.
// It does not perform a copy of the head state.
// This returns the genesis validator root of the head state.
// This is a lock free version.
func (s *Service) headStateReadOnly(ctx context.Context) state.ReadOnlyBeaconState {
_, span := trace.StartSpan(ctx, "blockChain.headStateReadOnly")
defer span.End()
return s.head.state
}
// This returns the genesis validators root of the head state.
// This is a lock free version.
func (s *Service) headGenesisValidatorsRoot() [32]byte {
return bytesutil.ToBytes32(s.head.state.GenesisValidatorsRoot())
}
// This returns the validator referenced by the provided index in
// the head state.
// This is a lock free version.
func (s *Service) headValidatorAtIndex(index primitives.ValidatorIndex) (state.ReadOnlyValidator, error) {
return s.head.state.ValidatorAtIndexReadOnly(index)
}
// This returns the validator index referenced by the provided pubkey in
// the head state.
// This is a lock free version.
func (s *Service) headValidatorIndexAtPubkey(pubKey [fieldparams.BLSPubkeyLength]byte) (primitives.ValidatorIndex, bool) {
return s.head.state.ValidatorIndexByPubkey(pubKey)
func (s *Service) headGenesisValidatorRoot() [32]byte {
return bytesutil.ToBytes32(s.head.state.GenesisValidatorRoot())
}
// Returns true if head state exists.
@@ -318,28 +260,78 @@ func (s *Service) hasHeadState() bool {
return s.head != nil && s.head.state != nil
}
// This caches justified state balances to be used for fork choice.
func (s *Service) cacheJustifiedStateBalances(ctx context.Context, justifiedRoot [32]byte) error {
if err := s.cfg.BeaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
return err
}
s.clearInitSyncBlocks()
var justifiedState state.BeaconState
var err error
if justifiedRoot == s.genesisRoot {
justifiedState, err = s.cfg.BeaconDB.GenesisState(ctx)
if err != nil {
return err
}
} else {
justifiedState, err = s.cfg.StateGen.StateByRoot(ctx, justifiedRoot)
if err != nil {
return err
}
}
if justifiedState == nil || justifiedState.IsNil() {
return errors.New("justified state can't be nil")
}
epoch := helpers.CurrentEpoch(justifiedState)
justifiedBalances := make([]uint64, justifiedState.NumValidators())
if err := justifiedState.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
if helpers.IsActiveValidatorUsingTrie(val, epoch) {
justifiedBalances[idx] = val.EffectiveBalance()
} else {
justifiedBalances[idx] = 0
}
return nil
}); err != nil {
return err
}
s.justifiedBalancesLock.Lock()
defer s.justifiedBalancesLock.Unlock()
s.justifiedBalances = justifiedBalances
return nil
}
func (s *Service) getJustifiedBalances() []uint64 {
s.justifiedBalancesLock.RLock()
defer s.justifiedBalancesLock.RUnlock()
return s.justifiedBalances
}
// Notifies a common event feed of a new chain head event. Called right after a new
// chain head is determined, set, and saved to disk.
func (s *Service) notifyNewHeadEvent(
ctx context.Context,
newHeadSlot primitives.Slot,
newHeadSlot types.Slot,
newHeadState state.BeaconState,
newHeadStateRoot,
newHeadRoot []byte,
) error {
previousDutyDependentRoot := s.originBlockRoot[:]
currentDutyDependentRoot := s.originBlockRoot[:]
previousDutyDependentRoot := s.genesisRoot[:]
currentDutyDependentRoot := s.genesisRoot[:]
var previousDutyEpoch primitives.Epoch
currentDutyEpoch := slots.ToEpoch(newHeadSlot)
var previousDutyEpoch types.Epoch
currentDutyEpoch := helpers.SlotToEpoch(newHeadSlot)
if currentDutyEpoch > 0 {
previousDutyEpoch = currentDutyEpoch.Sub(1)
}
currentDutySlot, err := slots.EpochStart(currentDutyEpoch)
currentDutySlot, err := helpers.StartSlot(currentDutyEpoch)
if err != nil {
return errors.Wrap(err, "could not get duty slot")
}
previousDutySlot, err := slots.EpochStart(previousDutyEpoch)
previousDutySlot, err := helpers.StartSlot(previousDutyEpoch)
if err != nil {
return errors.Wrap(err, "could not get duty slot")
}
@@ -355,90 +347,16 @@ func (s *Service) notifyNewHeadEvent(
return errors.Wrap(err, "could not get duty dependent root")
}
}
isOptimistic, err := s.IsOptimistic(ctx)
if err != nil {
return errors.Wrap(err, "could not check if node is optimistically synced")
}
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.NewHead,
Data: &ethpbv1.EventHead{
Slot: newHeadSlot,
Block: newHeadRoot,
State: newHeadStateRoot,
EpochTransition: slots.IsEpochStart(newHeadSlot),
EpochTransition: helpers.IsEpochEnd(newHeadSlot),
PreviousDutyDependentRoot: previousDutyDependentRoot,
CurrentDutyDependentRoot: currentDutyDependentRoot,
ExecutionOptimistic: isOptimistic,
},
})
return nil
}
// This saves the Attestations and BLSToExecChanges between `orphanedRoot` and the common ancestor root that is derived using `newHeadRoot`.
// It also filters out the attestations that is one epoch older as a defense so invalid attestations don't flow into the attestation pool.
func (s *Service) saveOrphanedOperations(ctx context.Context, orphanedRoot [32]byte, newHeadRoot [32]byte) error {
commonAncestorRoot, _, err := s.cfg.ForkChoiceStore.CommonAncestor(ctx, newHeadRoot, orphanedRoot)
switch {
// Exit early if there's no common ancestor and root doesn't exist, there would be nothing to save.
case errors.Is(err, forkchoice.ErrUnknownCommonAncestor):
return nil
case err != nil:
return err
}
for orphanedRoot != commonAncestorRoot {
if ctx.Err() != nil {
return ctx.Err()
}
orphanedBlk, err := s.getBlock(ctx, orphanedRoot)
if err != nil {
return err
}
// If the block is an epoch older, break out of the loop since we can't include atts anyway.
// This prevents stuck within this for loop longer than necessary.
if orphanedBlk.Block().Slot()+params.BeaconConfig().SlotsPerEpoch <= s.CurrentSlot() {
break
}
for _, a := range orphanedBlk.Block().Body().Attestations() {
// if the attestation is one epoch older, it wouldn't been useful to save it.
if a.Data.Slot+params.BeaconConfig().SlotsPerEpoch < s.CurrentSlot() {
continue
}
if helpers.IsAggregated(a) {
if err := s.cfg.AttPool.SaveAggregatedAttestation(a); err != nil {
return err
}
} else {
if err := s.cfg.AttPool.SaveUnaggregatedAttestation(a); err != nil {
return err
}
}
saveOrphanedAttCount.Inc()
}
for _, as := range orphanedBlk.Block().Body().AttesterSlashings() {
if err := s.cfg.SlashingPool.InsertAttesterSlashing(ctx, s.headStateReadOnly(ctx), as); err != nil {
log.WithError(err).Error("Could not insert reorg attester slashing")
}
}
for _, vs := range orphanedBlk.Block().Body().ProposerSlashings() {
if err := s.cfg.SlashingPool.InsertProposerSlashing(ctx, s.headStateReadOnly(ctx), vs); err != nil {
log.WithError(err).Error("Could not insert reorg proposer slashing")
}
}
for _, v := range orphanedBlk.Block().Body().VoluntaryExits() {
s.cfg.ExitPool.InsertVoluntaryExit(v)
}
if orphanedBlk.Version() >= version.Capella {
changes, err := orphanedBlk.Block().Body().BLSToExecutionChanges()
if err != nil {
return errors.Wrap(err, "could not get BLSToExecutionChanges")
}
for _, c := range changes {
s.cfg.BLSToExecPool.InsertBLSToExecChange(c)
}
}
parentRoot := orphanedBlk.Block().ParentRoot()
orphanedRoot = bytesutil.ToBytes32(parentRoot[:])
}
return nil
}

View File

@@ -2,84 +2,65 @@ package blockchain
import (
"context"
"fmt"
"sync"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/async"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/altair"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/time/slots"
lru "github.com/hashicorp/golang-lru"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
core "github.com/prysmaticlabs/prysm/beacon-chain/core/state"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
stateAltair "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/params"
)
// Initialize the state cache for sync committees.
var syncCommitteeHeadStateCache = cache.NewSyncCommitteeHeadState()
// HeadSyncCommitteeFetcher is the interface that wraps the head sync committee related functions.
// The head sync committee functions return callers sync committee indices and public keys with respect to current head state.
type HeadSyncCommitteeFetcher interface {
HeadSyncCommitteeIndices(ctx context.Context, index primitives.ValidatorIndex, slot primitives.Slot) ([]primitives.CommitteeIndex, error)
HeadSyncCommitteePubKeys(ctx context.Context, slot primitives.Slot, committeeIndex primitives.CommitteeIndex) ([][]byte, error)
HeadCurrentSyncCommitteeIndices(ctx context.Context, index types.ValidatorIndex, slot types.Slot) ([]types.CommitteeIndex, error)
HeadNextSyncCommitteeIndices(ctx context.Context, index types.ValidatorIndex, slot types.Slot) ([]types.CommitteeIndex, error)
HeadSyncCommitteePubKeys(ctx context.Context, slot types.Slot, committeeIndex types.CommitteeIndex) ([][]byte, error)
}
// HeadDomainFetcher is the interface that wraps the head sync domain related functions.
// The head sync committee domain functions return callers domain data with respect to slot and head state.
type HeadDomainFetcher interface {
HeadSyncCommitteeDomain(ctx context.Context, slot primitives.Slot) ([]byte, error)
HeadSyncSelectionProofDomain(ctx context.Context, slot primitives.Slot) ([]byte, error)
HeadSyncContributionProofDomain(ctx context.Context, slot primitives.Slot) ([]byte, error)
HeadSyncCommitteeDomain(ctx context.Context, slot types.Slot) ([]byte, error)
HeadSyncSelectionProofDomain(ctx context.Context, slot types.Slot) ([]byte, error)
HeadSyncContributionProofDomain(ctx context.Context, slot types.Slot) ([]byte, error)
}
// HeadSyncCommitteeDomain returns the head sync committee domain using current head state advanced up to `slot`.
func (s *Service) HeadSyncCommitteeDomain(ctx context.Context, slot primitives.Slot) ([]byte, error) {
func (s *Service) HeadSyncCommitteeDomain(ctx context.Context, slot types.Slot) ([]byte, error) {
s.headLock.RLock()
defer s.headLock.RUnlock()
return s.domainWithHeadState(ctx, slot, params.BeaconConfig().DomainSyncCommittee)
}
// HeadSyncSelectionProofDomain returns the head sync committee domain using current head state advanced up to `slot`.
func (s *Service) HeadSyncSelectionProofDomain(ctx context.Context, slot primitives.Slot) ([]byte, error) {
func (s *Service) HeadSyncSelectionProofDomain(ctx context.Context, slot types.Slot) ([]byte, error) {
s.headLock.RLock()
defer s.headLock.RUnlock()
return s.domainWithHeadState(ctx, slot, params.BeaconConfig().DomainSyncCommitteeSelectionProof)
}
// HeadSyncContributionProofDomain returns the head sync committee domain using current head state advanced up to `slot`.
func (s *Service) HeadSyncContributionProofDomain(ctx context.Context, slot primitives.Slot) ([]byte, error) {
func (s *Service) HeadSyncContributionProofDomain(ctx context.Context, slot types.Slot) ([]byte, error) {
s.headLock.RLock()
defer s.headLock.RUnlock()
return s.domainWithHeadState(ctx, slot, params.BeaconConfig().DomainContributionAndProof)
}
// HeadSyncCommitteeIndices returns the sync committee index position using the head state. Input `slot` is taken in consideration
// where validator's duty for `slot - 1` is used for block inclusion in `slot`. That means when a validator is at epoch boundary
// across EPOCHS_PER_SYNC_COMMITTEE_PERIOD then the validator will be considered using next period sync committee.
//
// Spec definition:
// Being assigned to a sync committee for a given slot means that the validator produces and broadcasts signatures for slot - 1 for inclusion in slot.
// This means that when assigned to an epoch sync committee signatures must be produced and broadcast for slots on range
// [compute_start_slot_at_epoch(epoch) - 1, compute_start_slot_at_epoch(epoch) + SLOTS_PER_EPOCH - 1)
// rather than for the range
// [compute_start_slot_at_epoch(epoch), compute_start_slot_at_epoch(epoch) + SLOTS_PER_EPOCH)
func (s *Service) HeadSyncCommitteeIndices(ctx context.Context, index primitives.ValidatorIndex, slot primitives.Slot) ([]primitives.CommitteeIndex, error) {
nextSlotEpoch := slots.ToEpoch(slot + 1)
currentEpoch := slots.ToEpoch(slot)
switch {
case slots.SyncCommitteePeriod(nextSlotEpoch) == slots.SyncCommitteePeriod(currentEpoch):
return s.headCurrentSyncCommitteeIndices(ctx, index, slot)
// At sync committee period boundary, validator should sample the next epoch sync committee.
case slots.SyncCommitteePeriod(nextSlotEpoch) == slots.SyncCommitteePeriod(currentEpoch)+1:
return s.headNextSyncCommitteeIndices(ctx, index, slot)
default:
// Impossible condition.
return nil, errors.New("could get calculate sync subcommittee based on the period")
}
}
// headCurrentSyncCommitteeIndices returns the input validator `index`'s position indices in the current sync committee with respect to `slot`.
// HeadCurrentSyncCommitteeIndices returns the input validator `index`'s position indices in the current sync committee with respect to `slot`.
// Head state advanced up to `slot` is used for calculation.
func (s *Service) headCurrentSyncCommitteeIndices(ctx context.Context, index primitives.ValidatorIndex, slot primitives.Slot) ([]primitives.CommitteeIndex, error) {
func (s *Service) HeadCurrentSyncCommitteeIndices(ctx context.Context, index types.ValidatorIndex, slot types.Slot) ([]types.CommitteeIndex, error) {
s.headLock.RLock()
defer s.headLock.RUnlock()
headState, err := s.getSyncCommitteeHeadState(ctx, slot)
if err != nil {
return nil, err
@@ -87,9 +68,12 @@ func (s *Service) headCurrentSyncCommitteeIndices(ctx context.Context, index pri
return helpers.CurrentPeriodSyncSubcommitteeIndices(headState, index)
}
// headNextSyncCommitteeIndices returns the input validator `index`'s position indices in the next sync committee with respect to `slot`.
// HeadNextSyncCommitteeIndices returns the input validator `index`'s position indices in the next sync committee with respect to `slot`.
// Head state advanced up to `slot` is used for calculation.
func (s *Service) headNextSyncCommitteeIndices(ctx context.Context, index primitives.ValidatorIndex, slot primitives.Slot) ([]primitives.CommitteeIndex, error) {
func (s *Service) HeadNextSyncCommitteeIndices(ctx context.Context, index types.ValidatorIndex, slot types.Slot) ([]types.CommitteeIndex, error) {
s.headLock.RLock()
defer s.headLock.RUnlock()
headState, err := s.getSyncCommitteeHeadState(ctx, slot)
if err != nil {
return nil, err
@@ -99,17 +83,20 @@ func (s *Service) headNextSyncCommitteeIndices(ctx context.Context, index primit
// HeadSyncCommitteePubKeys returns the head sync committee public keys with respect to `slot` and subcommittee index `committeeIndex`.
// Head state advanced up to `slot` is used for calculation.
func (s *Service) HeadSyncCommitteePubKeys(ctx context.Context, slot primitives.Slot, committeeIndex primitives.CommitteeIndex) ([][]byte, error) {
func (s *Service) HeadSyncCommitteePubKeys(ctx context.Context, slot types.Slot, committeeIndex types.CommitteeIndex) ([][]byte, error) {
s.headLock.RLock()
defer s.headLock.RUnlock()
headState, err := s.getSyncCommitteeHeadState(ctx, slot)
if err != nil {
return nil, err
}
nextSlotEpoch := slots.ToEpoch(headState.Slot() + 1)
currEpoch := slots.ToEpoch(headState.Slot())
nextSlotEpoch := helpers.SlotToEpoch(headState.Slot() + 1)
currEpoch := helpers.SlotToEpoch(headState.Slot())
var syncCommittee *ethpb.SyncCommittee
if currEpoch == nextSlotEpoch || slots.SyncCommitteePeriod(currEpoch) == slots.SyncCommitteePeriod(nextSlotEpoch) {
if helpers.SyncCommitteePeriod(currEpoch) == helpers.SyncCommitteePeriod(nextSlotEpoch) {
syncCommittee, err = headState.CurrentSyncCommittee()
if err != nil {
return nil, err
@@ -125,52 +112,77 @@ func (s *Service) HeadSyncCommitteePubKeys(ctx context.Context, slot primitives.
}
// returns calculated domain using input `domain` and `slot`.
func (s *Service) domainWithHeadState(ctx context.Context, slot primitives.Slot, domain [4]byte) ([]byte, error) {
func (s *Service) domainWithHeadState(ctx context.Context, slot types.Slot, domain [4]byte) ([]byte, error) {
headState, err := s.getSyncCommitteeHeadState(ctx, slot)
if err != nil {
return nil, err
}
return signing.Domain(headState.Fork(), slots.ToEpoch(headState.Slot()), domain, headState.GenesisValidatorsRoot())
return helpers.Domain(headState.Fork(), helpers.SlotToEpoch(headState.Slot()), domain, headState.GenesisValidatorRoot())
}
// returns the head state that is advanced up to `slot`. It utilizes the cache `syncCommitteeHeadState` by retrieving using `slot` as key.
// For the cache miss, it processes head state up to slot and fill the cache with `slot` as key.
func (s *Service) getSyncCommitteeHeadState(ctx context.Context, slot primitives.Slot) (state.BeaconState, error) {
func (s *Service) getSyncCommitteeHeadState(ctx context.Context, slot types.Slot) (state.BeaconState, error) {
var headState state.BeaconState
var err error
mLock := async.NewMultilock(fmt.Sprintf("%s-%d", "syncHeadState", slot))
mLock.Lock()
defer mLock.Unlock()
// If there's already a head state exists with the request slot, we don't need to process slots.
cachedState, err := syncCommitteeHeadStateCache.Get(slot)
switch {
case err == nil:
cachedState := syncCommitteeHeadStateCache.get(slot)
if cachedState != nil && !cachedState.IsNil() {
syncHeadStateHit.Inc()
headState = cachedState
return headState, nil
case errors.Is(err, cache.ErrNotFound):
} else {
headState, err = s.HeadState(ctx)
if err != nil {
return nil, err
}
if headState == nil || headState.IsNil() {
return nil, errors.New("nil state")
}
headRoot, err := s.HeadRoot(ctx)
if err != nil {
return nil, err
}
headState, err = transition.ProcessSlotsUsingNextSlotCache(ctx, headState, headRoot, slot)
if err != nil {
return nil, err
if slot > headState.Slot() {
headState, err = core.ProcessSlots(ctx, headState, slot)
if err != nil {
return nil, err
}
}
syncHeadStateMiss.Inc()
err = syncCommitteeHeadStateCache.Put(slot, headState)
return headState, err
default:
// In the event, we encounter another error
// we return it.
return nil, err
syncCommitteeHeadStateCache.add(slot, headState)
}
return headState, nil
}
var syncCommitteeHeadStateCache = newSyncCommitteeHeadState()
// syncCommitteeHeadState to caches latest head state requested by the sync committee participant.
type syncCommitteeHeadState struct {
cache *lru.Cache
lock sync.RWMutex
}
// newSyncCommitteeHeadState initializes the lru cache for `syncCommitteeHeadState` with size of 1.
func newSyncCommitteeHeadState() *syncCommitteeHeadState {
c, err := lru.New(1) // only need size of 1 to avoid redundant state copy, HTR, and process slots.
if err != nil {
panic(err)
}
return &syncCommitteeHeadState{cache: c}
}
// add `slot` as key and `state` as value onto the lru cache.
func (c *syncCommitteeHeadState) add(slot types.Slot, state state.BeaconState) {
c.lock.Lock()
defer c.lock.Unlock()
c.cache.Add(slot, state)
}
// get `state` using `slot` as key. Return nil if nothing is found.
func (c *syncCommitteeHeadState) get(slot types.Slot) state.BeaconState {
c.lock.RLock()
defer c.lock.RUnlock()
val, exists := c.cache.Get(slot)
if !exists {
return nil
}
if val == nil {
return nil
}
return val.(*stateAltair.BeaconState)
}

View File

@@ -4,61 +4,35 @@ import (
"context"
"testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing"
dbTest "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/testing/util"
"github.com/prysmaticlabs/prysm/v4/time/slots"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
)
func TestService_HeadSyncCommitteeIndices(t *testing.T) {
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
c := &Service{cfg: &config{BeaconDB: dbTest.SetupDB(t)}}
c.head = &head{state: s}
// Current period
slot := 2*uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) + 1
a, err := c.HeadSyncCommitteeIndices(context.Background(), 0, primitives.Slot(slot))
require.NoError(t, err)
// Current period where slot-2 across EPOCHS_PER_SYNC_COMMITTEE_PERIOD
slot = 3*uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) - 2
b, err := c.HeadSyncCommitteeIndices(context.Background(), 0, primitives.Slot(slot))
require.NoError(t, err)
require.DeepEqual(t, a, b)
// Next period where slot-1 across EPOCHS_PER_SYNC_COMMITTEE_PERIOD
slot = 3*uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) - 1
b, err = c.HeadSyncCommitteeIndices(context.Background(), 0, primitives.Slot(slot))
require.NoError(t, err)
require.DeepNotEqual(t, a, b)
}
func TestService_headCurrentSyncCommitteeIndices(t *testing.T) {
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
c := &Service{cfg: &config{BeaconDB: dbTest.SetupDB(t)}}
func TestService_HeadCurrentSyncCommitteeIndices(t *testing.T) {
s, _ := testutil.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
c := &Service{}
c.head = &head{state: s}
// Process slot up to `EpochsPerSyncCommitteePeriod` so it can `ProcessSyncCommitteeUpdates`.
slot := uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) + 1
indices, err := c.headCurrentSyncCommitteeIndices(context.Background(), 0, primitives.Slot(slot))
indices, err := c.HeadCurrentSyncCommitteeIndices(context.Background(), 0, types.Slot(slot))
require.NoError(t, err)
// NextSyncCommittee becomes CurrentSyncCommittee so it should be empty by default.
require.Equal(t, 0, len(indices))
}
func TestService_headNextSyncCommitteeIndices(t *testing.T) {
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
func TestService_HeadNextSyncCommitteeIndices(t *testing.T) {
s, _ := testutil.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
c := &Service{}
c.head = &head{state: s}
// Process slot up to `EpochsPerSyncCommitteePeriod` so it can `ProcessSyncCommitteeUpdates`.
slot := uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) + 1
indices, err := c.headNextSyncCommitteeIndices(context.Background(), 0, primitives.Slot(slot))
indices, err := c.HeadNextSyncCommitteeIndices(context.Background(), 0, types.Slot(slot))
require.NoError(t, err)
// NextSyncCommittee should be be empty after `ProcessSyncCommitteeUpdates`. Validator should get indices.
@@ -66,13 +40,13 @@ func TestService_headNextSyncCommitteeIndices(t *testing.T) {
}
func TestService_HeadSyncCommitteePubKeys(t *testing.T) {
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
c := &Service{cfg: &config{BeaconDB: dbTest.SetupDB(t)}}
s, _ := testutil.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
c := &Service{}
c.head = &head{state: s}
// Process slot up to 2 * `EpochsPerSyncCommitteePeriod` so it can run `ProcessSyncCommitteeUpdates` twice.
slot := uint64(2*params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) + 1
pubkeys, err := c.HeadSyncCommitteePubKeys(context.Background(), primitives.Slot(slot), 0)
pubkeys, err := c.HeadSyncCommitteePubKeys(context.Background(), types.Slot(slot), 0)
require.NoError(t, err)
// Any subcommittee should match the subcommittee size.
@@ -81,11 +55,11 @@ func TestService_HeadSyncCommitteePubKeys(t *testing.T) {
}
func TestService_HeadSyncCommitteeDomain(t *testing.T) {
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
c := &Service{cfg: &config{BeaconDB: dbTest.SetupDB(t)}}
s, _ := testutil.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
c := &Service{}
c.head = &head{state: s}
wanted, err := signing.Domain(s.Fork(), slots.ToEpoch(s.Slot()), params.BeaconConfig().DomainSyncCommittee, s.GenesisValidatorsRoot())
wanted, err := helpers.Domain(s.Fork(), helpers.SlotToEpoch(s.Slot()), params.BeaconConfig().DomainSyncCommittee, s.GenesisValidatorRoot())
require.NoError(t, err)
d, err := c.HeadSyncCommitteeDomain(context.Background(), 0)
@@ -95,11 +69,11 @@ func TestService_HeadSyncCommitteeDomain(t *testing.T) {
}
func TestService_HeadSyncContributionProofDomain(t *testing.T) {
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
s, _ := testutil.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
c := &Service{}
c.head = &head{state: s}
wanted, err := signing.Domain(s.Fork(), slots.ToEpoch(s.Slot()), params.BeaconConfig().DomainContributionAndProof, s.GenesisValidatorsRoot())
wanted, err := helpers.Domain(s.Fork(), helpers.SlotToEpoch(s.Slot()), params.BeaconConfig().DomainContributionAndProof, s.GenesisValidatorRoot())
require.NoError(t, err)
d, err := c.HeadSyncContributionProofDomain(context.Background(), 0)
@@ -109,11 +83,11 @@ func TestService_HeadSyncContributionProofDomain(t *testing.T) {
}
func TestService_HeadSyncSelectionProofDomain(t *testing.T) {
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
s, _ := testutil.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
c := &Service{}
c.head = &head{state: s}
wanted, err := signing.Domain(s.Fork(), slots.ToEpoch(s.Slot()), params.BeaconConfig().DomainSyncCommitteeSelectionProof, s.GenesisValidatorsRoot())
wanted, err := helpers.Domain(s.Fork(), helpers.SlotToEpoch(s.Slot()), params.BeaconConfig().DomainSyncCommitteeSelectionProof, s.GenesisValidatorRoot())
require.NoError(t, err)
d, err := c.HeadSyncSelectionProofDomain(context.Background(), 0)
@@ -123,17 +97,12 @@ func TestService_HeadSyncSelectionProofDomain(t *testing.T) {
}
func TestSyncCommitteeHeadStateCache_RoundTrip(t *testing.T) {
c := syncCommitteeHeadStateCache
t.Cleanup(func() {
syncCommitteeHeadStateCache = cache.NewSyncCommitteeHeadState()
})
beaconState, _ := util.DeterministicGenesisStateAltair(t, 100)
c := newSyncCommitteeHeadState()
beaconState, _ := testutil.DeterministicGenesisStateAltair(t, 100)
require.NoError(t, beaconState.SetSlot(100))
cachedState, err := c.Get(101)
require.ErrorContains(t, cache.ErrNotFound.Error(), err)
cachedState := c.get(101)
require.Equal(t, nil, cachedState)
require.NoError(t, c.Put(101, beaconState))
cachedState, err = c.Get(101)
require.NoError(t, err)
c.add(101, beaconState)
cachedState = c.get(101)
require.DeepEqual(t, beaconState, cachedState)
}

View File

@@ -3,24 +3,18 @@ package blockchain
import (
"bytes"
"context"
"sort"
"testing"
"time"
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/blstoexec"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/testing/util"
"github.com/prysmaticlabs/prysm/v4/time/slots"
types "github.com/prysmaticlabs/eth2-types"
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
ethpbv1 "github.com/prysmaticlabs/prysm/proto/eth/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/shared/testutil"
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
logTest "github.com/sirupsen/logrus/hooks/test"
)
@@ -29,12 +23,10 @@ func TestSaveHead_Same(t *testing.T) {
service := setupBeaconChain(t, beaconDB)
r := [32]byte{'A'}
service.head = &head{root: r}
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
st, _ := util.DeterministicGenesisState(t, 1)
require.NoError(t, service.saveHead(context.Background(), r, b, st))
assert.Equal(t, primitives.Slot(0), service.headSlot(), "Head did not stay the same")
service.head = &head{slot: 0, root: r}
require.NoError(t, service.saveHead(context.Background(), r))
assert.Equal(t, types.Slot(0), service.headSlot(), "Head did not stay the same")
assert.Equal(t, r, service.headRoot(), "Head did not stay the same")
}
@@ -43,51 +35,41 @@ func TestSaveHead_Different(t *testing.T) {
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
oldBlock := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, util.NewBeaconBlock())
oldRoot, err := oldBlock.Block().HashTreeRoot()
require.NoError(t, err)
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
state, blkRoot, err := prepareForkchoiceState(ctx, oldBlock.Block().Slot(), oldRoot, oldBlock.Block().ParentRoot(), [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
oldRoot := [32]byte{'A'}
service.head = &head{
root: oldRoot,
block: oldBlock,
slot: 0,
root: oldRoot,
block: wrapper.WrappedPhase0SignedBeaconBlock(
&ethpb.SignedBeaconBlock{
Block: &ethpb.BeaconBlock{
Slot: 0,
StateRoot: make([]byte, 32),
},
},
),
}
newHeadSignedBlock := util.NewBeaconBlock()
newHeadSignedBlock := testutil.NewBeaconBlock()
newHeadSignedBlock.Block.Slot = 1
newHeadBlock := newHeadSignedBlock.Block
wsb := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, newHeadSignedBlock)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(newHeadSignedBlock)))
newRoot, err := newHeadBlock.HashTreeRoot()
require.NoError(t, err)
state, blkRoot, err = prepareForkchoiceState(ctx, wsb.Block().Slot()-1, wsb.Block().ParentRoot(), service.cfg.ForkChoiceStore.CachedHeadRoot(), [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, wsb.Block().Slot(), newRoot, wsb.Block().ParentRoot(), [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
headState, err := util.NewBeaconState()
headState, err := testutil.NewBeaconState()
require.NoError(t, err)
require.NoError(t, headState.SetSlot(1))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Slot: 1, Root: newRoot[:]}))
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), headState, newRoot))
require.NoError(t, service.saveHead(context.Background(), newRoot, wsb, headState))
require.NoError(t, service.saveHead(context.Background(), newRoot))
assert.Equal(t, primitives.Slot(1), service.HeadSlot(), "Head did not change")
assert.Equal(t, types.Slot(1), service.HeadSlot(), "Head did not change")
cachedRoot, err := service.HeadRoot(context.Background())
require.NoError(t, err)
assert.DeepEqual(t, cachedRoot, newRoot[:], "Head did not change")
headBlock, err := service.headBlock()
require.NoError(t, err)
pb, err := headBlock.Proto()
require.NoError(t, err)
assert.DeepEqual(t, newHeadSignedBlock, pb, "Head did not change")
assert.DeepSSZEqual(t, headState.ToProto(), service.headState(ctx).ToProto(), "Head did not change")
assert.DeepEqual(t, newHeadSignedBlock, service.headBlock().Proto(), "Head did not change")
assert.DeepSSZEqual(t, headState.CloneInnerState(), service.headState(ctx).CloneInnerState(), "Head did not change")
}
func TestSaveHead_Different_Reorg(t *testing.T) {
@@ -96,73 +78,89 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
oldBlock := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, util.NewBeaconBlock())
oldRoot, err := oldBlock.Block().HashTreeRoot()
require.NoError(t, err)
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
state, blkRoot, err := prepareForkchoiceState(ctx, oldBlock.Block().Slot(), oldRoot, oldBlock.Block().ParentRoot(), [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
oldRoot := [32]byte{'A'}
service.head = &head{
root: oldRoot,
block: oldBlock,
slot: 0,
root: oldRoot,
block: wrapper.WrappedPhase0SignedBeaconBlock(
&ethpb.SignedBeaconBlock{
Block: &ethpb.BeaconBlock{
Slot: 0,
StateRoot: make([]byte, 32),
},
},
),
}
reorgChainParent := [32]byte{'B'}
state, blkRoot, err = prepareForkchoiceState(ctx, 0, reorgChainParent, oldRoot, oldBlock.Block().ParentRoot(), ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
newHeadSignedBlock := util.NewBeaconBlock()
newHeadSignedBlock := testutil.NewBeaconBlock()
newHeadSignedBlock.Block.Slot = 1
newHeadSignedBlock.Block.ParentRoot = reorgChainParent[:]
newHeadBlock := newHeadSignedBlock.Block
wsb := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, newHeadSignedBlock)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(newHeadSignedBlock)))
newRoot, err := newHeadBlock.HashTreeRoot()
require.NoError(t, err)
state, blkRoot, err = prepareForkchoiceState(ctx, wsb.Block().Slot(), newRoot, wsb.Block().ParentRoot(), [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
headState, err := util.NewBeaconState()
headState, err := testutil.NewBeaconState()
require.NoError(t, err)
require.NoError(t, headState.SetSlot(1))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Slot: 1, Root: newRoot[:]}))
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), headState, newRoot))
require.NoError(t, service.saveHead(context.Background(), newRoot, wsb, headState))
require.NoError(t, service.saveHead(context.Background(), newRoot))
assert.Equal(t, primitives.Slot(1), service.HeadSlot(), "Head did not change")
assert.Equal(t, types.Slot(1), service.HeadSlot(), "Head did not change")
cachedRoot, err := service.HeadRoot(context.Background())
require.NoError(t, err)
if !bytes.Equal(cachedRoot, newRoot[:]) {
t.Error("Head did not change")
}
headBlock, err := service.headBlock()
require.NoError(t, err)
pb, err := headBlock.Proto()
require.NoError(t, err)
assert.DeepEqual(t, newHeadSignedBlock, pb, "Head did not change")
assert.DeepSSZEqual(t, headState.ToProto(), service.headState(ctx).ToProto(), "Head did not change")
assert.DeepEqual(t, newHeadSignedBlock, service.headBlock().Proto(), "Head did not change")
assert.DeepSSZEqual(t, headState.CloneInnerState(), service.headState(ctx).CloneInnerState(), "Head did not change")
require.LogsContain(t, hook, "Chain reorg occurred")
require.LogsContain(t, hook, "distance=1")
require.LogsContain(t, hook, "depth=1")
}
func TestCacheJustifiedStateBalances_CanCache(t *testing.T) {
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
state, _ := testutil.DeterministicGenesisState(t, 100)
r := [32]byte{'a'}
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: r[:]}))
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), state, r))
require.NoError(t, service.cacheJustifiedStateBalances(context.Background(), r))
require.DeepEqual(t, service.getJustifiedBalances(), state.Balances(), "Incorrect justified balances")
}
func TestUpdateHead_MissingJustifiedRoot(t *testing.T) {
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
b := testutil.NewBeaconBlock()
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(b)))
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
service.justifiedCheckpt = &ethpb.Checkpoint{Root: r[:]}
service.finalizedCheckpt = &ethpb.Checkpoint{}
service.bestJustifiedCheckpt = &ethpb.Checkpoint{}
require.NoError(t, service.updateHead(context.Background(), []uint64{}))
}
func Test_notifyNewHeadEvent(t *testing.T) {
t.Run("genesis_state_root", func(t *testing.T) {
bState, _ := util.DeterministicGenesisState(t, 10)
bState, _ := testutil.DeterministicGenesisState(t, 10)
notifier := &mock.MockStateNotifier{RecordEvents: true}
srv := &Service{
cfg: &config{
cfg: &Config{
StateNotifier: notifier,
},
originBlockRoot: [32]byte{1},
genesisRoot: [32]byte{1},
}
newHeadStateRoot := [32]byte{2}
newHeadRoot := [32]byte{3}
err := srv.notifyNewHeadEvent(context.Background(), 1, bState, newHeadStateRoot[:], newHeadRoot[:])
err := srv.notifyNewHeadEvent(1, bState, newHeadStateRoot[:], newHeadRoot[:])
require.NoError(t, err)
events := notifier.ReceivedEvents()
require.Equal(t, 1, len(events))
@@ -174,30 +172,30 @@ func Test_notifyNewHeadEvent(t *testing.T) {
Block: newHeadRoot[:],
State: newHeadStateRoot[:],
EpochTransition: false,
PreviousDutyDependentRoot: srv.originBlockRoot[:],
CurrentDutyDependentRoot: srv.originBlockRoot[:],
PreviousDutyDependentRoot: srv.genesisRoot[:],
CurrentDutyDependentRoot: srv.genesisRoot[:],
}
require.DeepSSZEqual(t, wanted, eventHead)
})
t.Run("non_genesis_values", func(t *testing.T) {
bState, _ := util.DeterministicGenesisState(t, 10)
bState, _ := testutil.DeterministicGenesisState(t, 10)
notifier := &mock.MockStateNotifier{RecordEvents: true}
genesisRoot := [32]byte{1}
srv := &Service{
cfg: &config{
cfg: &Config{
StateNotifier: notifier,
},
originBlockRoot: genesisRoot,
genesisRoot: genesisRoot,
}
epoch1Start, err := slots.EpochStart(1)
epoch1Start, err := helpers.StartSlot(1)
require.NoError(t, err)
epoch2Start, err := slots.EpochStart(1)
epoch2Start, err := helpers.StartSlot(1)
require.NoError(t, err)
require.NoError(t, bState.SetSlot(epoch1Start))
newHeadStateRoot := [32]byte{2}
newHeadRoot := [32]byte{3}
err = srv.notifyNewHeadEvent(context.Background(), epoch2Start, bState, newHeadStateRoot[:], newHeadRoot[:])
err = srv.notifyNewHeadEvent(epoch2Start, bState, newHeadStateRoot[:], newHeadRoot[:])
require.NoError(t, err)
events := notifier.ReceivedEvents()
require.Equal(t, 1, len(events))
@@ -208,408 +206,10 @@ func Test_notifyNewHeadEvent(t *testing.T) {
Slot: epoch2Start,
Block: newHeadRoot[:],
State: newHeadStateRoot[:],
EpochTransition: true,
EpochTransition: false,
PreviousDutyDependentRoot: genesisRoot[:],
CurrentDutyDependentRoot: make([]byte, 32),
}
require.DeepSSZEqual(t, wanted, eventHead)
})
}
func TestRetrieveHead_ReadOnly(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
oldBlock := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, util.NewBeaconBlock())
oldRoot, err := oldBlock.Block().HashTreeRoot()
require.NoError(t, err)
service.head = &head{
root: oldRoot,
block: oldBlock,
}
newHeadSignedBlock := util.NewBeaconBlock()
newHeadSignedBlock.Block.Slot = 1
newHeadBlock := newHeadSignedBlock.Block
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
wsb := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, newHeadSignedBlock)
newRoot, err := newHeadBlock.HashTreeRoot()
require.NoError(t, err)
state, blkRoot, err := prepareForkchoiceState(ctx, wsb.Block().Slot()-1, wsb.Block().ParentRoot(), service.cfg.ForkChoiceStore.CachedHeadRoot(), [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, wsb.Block().Slot(), newRoot, wsb.Block().ParentRoot(), [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
headState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, headState.SetSlot(1))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Slot: 1, Root: newRoot[:]}))
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), headState, newRoot))
require.NoError(t, service.saveHead(context.Background(), newRoot, wsb, headState))
rOnlyState, err := service.HeadStateReadOnly(ctx)
require.NoError(t, err)
assert.Equal(t, rOnlyState, service.head.state, "Head is not the same object")
}
func TestSaveOrphanedAtts(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
service.genesisTime = time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
// Chain setup
// 0 -- 1 -- 2 -- 3
// \-4
st, keys := util.DeterministicGenesisState(t, 64)
blkG, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 0)
assert.NoError(t, err)
util.SaveBlock(t, ctx, service.cfg.BeaconDB, blkG)
rG, err := blkG.Block.HashTreeRoot()
require.NoError(t, err)
blk1, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
assert.NoError(t, err)
blk1.Block.ParentRoot = rG[:]
r1, err := blk1.Block.HashTreeRoot()
require.NoError(t, err)
blk2, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 2)
assert.NoError(t, err)
blk2.Block.ParentRoot = r1[:]
r2, err := blk2.Block.HashTreeRoot()
require.NoError(t, err)
blk3, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 3)
assert.NoError(t, err)
blk3.Block.ParentRoot = r2[:]
r3, err := blk3.Block.HashTreeRoot()
require.NoError(t, err)
blk4 := util.NewBeaconBlock()
blk4.Block.Slot = 4
blk4.Block.ParentRoot = rG[:]
r4, err := blk4.Block.HashTreeRoot()
require.NoError(t, err)
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk3, blk4} {
r, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
util.SaveBlock(t, ctx, beaconDB, blk)
}
require.NoError(t, service.saveOrphanedOperations(ctx, r3, r4))
require.Equal(t, 3, service.cfg.AttPool.AggregatedAttestationCount())
wantAtts := []*ethpb.Attestation{
blk3.Block.Body.Attestations[0],
blk2.Block.Body.Attestations[0],
blk1.Block.Body.Attestations[0],
}
atts := service.cfg.AttPool.AggregatedAttestations()
sort.Slice(atts, func(i, j int) bool {
return atts[i].Data.Slot > atts[j].Data.Slot
})
require.DeepEqual(t, wantAtts, atts)
}
func TestSaveOrphanedOps(t *testing.T) {
params.SetupTestConfigCleanup(t)
config := params.BeaconConfig()
config.ShardCommitteePeriod = 0
params.OverrideBeaconConfig(config)
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
service.genesisTime = time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
// Chain setup
// 0 -- 1 -- 2 -- 3
// \-4
st, keys := util.DeterministicGenesisState(t, 64)
service.head = &head{state: st}
blkG, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 0)
assert.NoError(t, err)
util.SaveBlock(t, ctx, service.cfg.BeaconDB, blkG)
rG, err := blkG.Block.HashTreeRoot()
require.NoError(t, err)
blk1, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
assert.NoError(t, err)
blk1.Block.ParentRoot = rG[:]
r1, err := blk1.Block.HashTreeRoot()
require.NoError(t, err)
blk2, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 2)
assert.NoError(t, err)
blk2.Block.ParentRoot = r1[:]
r2, err := blk2.Block.HashTreeRoot()
require.NoError(t, err)
blkConfig := util.DefaultBlockGenConfig()
blkConfig.NumBLSChanges = 5
blkConfig.NumProposerSlashings = 1
blkConfig.NumAttesterSlashings = 1
blkConfig.NumVoluntaryExits = 1
blk3, err := util.GenerateFullBlock(st, keys, blkConfig, 3)
assert.NoError(t, err)
blk3.Block.ParentRoot = r2[:]
r3, err := blk3.Block.HashTreeRoot()
require.NoError(t, err)
blk4 := util.NewBeaconBlock()
blk4.Block.Slot = 4
blk4.Block.ParentRoot = rG[:]
r4, err := blk4.Block.HashTreeRoot()
require.NoError(t, err)
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk3, blk4} {
r, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
util.SaveBlock(t, ctx, beaconDB, blk)
}
require.NoError(t, service.saveOrphanedOperations(ctx, r3, r4))
require.Equal(t, 3, service.cfg.AttPool.AggregatedAttestationCount())
wantAtts := []*ethpb.Attestation{
blk3.Block.Body.Attestations[0],
blk2.Block.Body.Attestations[0],
blk1.Block.Body.Attestations[0],
}
atts := service.cfg.AttPool.AggregatedAttestations()
sort.Slice(atts, func(i, j int) bool {
return atts[i].Data.Slot > atts[j].Data.Slot
})
require.DeepEqual(t, wantAtts, atts)
require.Equal(t, 1, len(service.cfg.SlashingPool.PendingProposerSlashings(ctx, st, false)))
require.Equal(t, 1, len(service.cfg.SlashingPool.PendingAttesterSlashings(ctx, st, false)))
exits, err := service.cfg.ExitPool.PendingExits()
require.NoError(t, err)
require.Equal(t, 1, len(exits))
}
func TestSaveOrphanedAtts_CanFilter(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
service.cfg.BLSToExecPool = blstoexec.NewPool()
service.genesisTime = time.Now().Add(time.Duration(-1*int64(params.BeaconConfig().SlotsPerEpoch+2)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
// Chain setup
// 0 -- 1 -- 2
// \-4
st, keys := util.DeterministicGenesisStateCapella(t, 64)
blkConfig := util.DefaultBlockGenConfig()
blkConfig.NumBLSChanges = 5
blkG, err := util.GenerateFullBlockCapella(st, keys, blkConfig, 1)
assert.NoError(t, err)
util.SaveBlock(t, ctx, service.cfg.BeaconDB, blkG)
rG, err := blkG.Block.HashTreeRoot()
require.NoError(t, err)
blkConfig.NumBLSChanges = 10
blk1, err := util.GenerateFullBlockCapella(st, keys, blkConfig, 2)
assert.NoError(t, err)
blk1.Block.ParentRoot = rG[:]
r1, err := blk1.Block.HashTreeRoot()
require.NoError(t, err)
blkConfig.NumBLSChanges = 15
blk2, err := util.GenerateFullBlockCapella(st, keys, blkConfig, 3)
assert.NoError(t, err)
blk2.Block.ParentRoot = r1[:]
r2, err := blk2.Block.HashTreeRoot()
require.NoError(t, err)
blk4 := util.NewBeaconBlockCapella()
blkConfig.NumBLSChanges = 0
blk4.Block.Slot = 4
blk4.Block.ParentRoot = rG[:]
r4, err := blk4.Block.HashTreeRoot()
require.NoError(t, err)
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
for _, blk := range []*ethpb.SignedBeaconBlockCapella{blkG, blk1, blk2, blk4} {
r, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
util.SaveBlock(t, ctx, beaconDB, blk)
}
require.NoError(t, service.saveOrphanedOperations(ctx, r2, r4))
require.Equal(t, 1, service.cfg.AttPool.AggregatedAttestationCount())
pending, err := service.cfg.BLSToExecPool.PendingBLSToExecChanges()
require.NoError(t, err)
require.Equal(t, 15, len(pending))
}
func TestSaveOrphanedAtts_DoublyLinkedTrie(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
service.genesisTime = time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
// Chain setup
// 0 -- 1 -- 2 -- 3
// \-4
st, keys := util.DeterministicGenesisState(t, 64)
blkG, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 0)
assert.NoError(t, err)
util.SaveBlock(t, ctx, service.cfg.BeaconDB, blkG)
rG, err := blkG.Block.HashTreeRoot()
require.NoError(t, err)
blk1, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
assert.NoError(t, err)
blk1.Block.ParentRoot = rG[:]
r1, err := blk1.Block.HashTreeRoot()
require.NoError(t, err)
blk2, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 2)
assert.NoError(t, err)
blk2.Block.ParentRoot = r1[:]
r2, err := blk2.Block.HashTreeRoot()
require.NoError(t, err)
blk3, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 3)
assert.NoError(t, err)
blk3.Block.ParentRoot = r2[:]
r3, err := blk3.Block.HashTreeRoot()
require.NoError(t, err)
blk4 := util.NewBeaconBlock()
blk4.Block.Slot = 4
blk4.Block.ParentRoot = rG[:]
r4, err := blk4.Block.HashTreeRoot()
require.NoError(t, err)
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk3, blk4} {
r, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
util.SaveBlock(t, ctx, beaconDB, blk)
}
require.NoError(t, service.saveOrphanedOperations(ctx, r3, r4))
require.Equal(t, 3, service.cfg.AttPool.AggregatedAttestationCount())
wantAtts := []*ethpb.Attestation{
blk3.Block.Body.Attestations[0],
blk2.Block.Body.Attestations[0],
blk1.Block.Body.Attestations[0],
}
atts := service.cfg.AttPool.AggregatedAttestations()
sort.Slice(atts, func(i, j int) bool {
return atts[i].Data.Slot > atts[j].Data.Slot
})
require.DeepEqual(t, wantAtts, atts)
}
func TestSaveOrphanedAtts_CanFilter_DoublyLinkedTrie(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
service.genesisTime = time.Now().Add(time.Duration(-1*int64(params.BeaconConfig().SlotsPerEpoch+2)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
// Chain setup
// 0 -- 1 -- 2
// \-4
st, keys := util.DeterministicGenesisState(t, 64)
blkG, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 0)
assert.NoError(t, err)
util.SaveBlock(t, ctx, service.cfg.BeaconDB, blkG)
rG, err := blkG.Block.HashTreeRoot()
require.NoError(t, err)
blk1, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
assert.NoError(t, err)
blk1.Block.ParentRoot = rG[:]
r1, err := blk1.Block.HashTreeRoot()
require.NoError(t, err)
blk2, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 2)
assert.NoError(t, err)
blk2.Block.ParentRoot = r1[:]
r2, err := blk2.Block.HashTreeRoot()
require.NoError(t, err)
blk4 := util.NewBeaconBlock()
blk4.Block.Slot = 4
blk4.Block.ParentRoot = rG[:]
r4, err := blk4.Block.HashTreeRoot()
require.NoError(t, err)
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk4} {
r, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
util.SaveBlock(t, ctx, beaconDB, blk)
}
require.NoError(t, service.saveOrphanedOperations(ctx, r2, r4))
require.Equal(t, 0, service.cfg.AttPool.AggregatedAttestationCount())
}
func TestUpdateHead_noSavedChanges(t *testing.T) {
service, tr := minimalTestService(t)
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
ojp := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, [32]byte{}, ojp, ojp)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, st, blkRoot))
bellatrixBlk := util.SaveBlock(t, ctx, beaconDB, util.NewBeaconBlockBellatrix())
bellatrixBlkRoot, err := bellatrixBlk.Block().HashTreeRoot()
require.NoError(t, err)
fcp := &ethpb.Checkpoint{
Root: bellatrixBlkRoot[:],
Epoch: 0,
}
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, bellatrixBlkRoot))
bellatrixState, _ := util.DeterministicGenesisStateBellatrix(t, 2)
require.NoError(t, beaconDB.SaveState(ctx, bellatrixState, bellatrixBlkRoot))
service.cfg.StateGen.SaveFinalizedState(0, bellatrixBlkRoot, bellatrixState)
headRoot := service.headRoot()
require.Equal(t, [32]byte{}, headRoot)
st, blkRoot, err = prepareForkchoiceState(ctx, 0, bellatrixBlkRoot, [32]byte{}, [32]byte{}, fcp, fcp)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, st, blkRoot))
fcs.SetBalancesByRooter(func(context.Context, [32]byte) ([]uint64, error) { return []uint64{1, 2}, nil })
require.NoError(t, fcs.UpdateJustifiedCheckpoint(ctx, &forkchoicetypes.Checkpoint{}))
newRoot, err := service.cfg.ForkChoiceStore.Head(ctx)
require.NoError(t, err)
require.NotEqual(t, headRoot, newRoot)
require.Equal(t, headRoot, service.headRoot())
}

Some files were not shown because too many files have changed in this diff Show More