Compare commits

...

75 Commits

Author SHA1 Message Date
terencechain
86a883aa19 Add capella fork epoch for Goerli (#12073)
* Add capella fork epoch for Goerli

* update pr

---------

Co-authored-by: nisdas <nishdas93@gmail.com>
2023-03-04 09:43:20 +00:00
Preston Van Loon
c000e8fde5 Raise the max grpc message size to a very large value by default (#12072)
* Raise the max grpc message size to a very large value by default

* unused import

---------

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-03-02 19:04:58 +00:00
dependabot[bot]
75e5887f07 Bump golang.org/x/net from 0.6.0 to 0.7.0 (#12063)
* Bump golang.org/x/net from 0.6.0 to 0.7.0

Bumps [golang.org/x/net](https://github.com/golang/net) from 0.6.0 to 0.7.0.
- [Release notes](https://github.com/golang/net/releases)
- [Commits](https://github.com/golang/net/compare/v0.6.0...v0.7.0)

---
updated-dependencies:
- dependency-name: golang.org/x/net
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>

* gazelle

* go mod tidy

---------

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-03-02 17:37:16 +00:00
Radosław Kapka
4ca3c5b058 Add slot to proposal error logs (#12071)
* Add slot to proposal error logs

* remove one field
2023-03-02 16:33:39 +00:00
Radosław Kapka
25d06d41be Fix type name in field_trie_helpers error message (#12070)
(cherry picked from commit bc9330c329)
2023-03-02 14:33:22 +00:00
Potuz
0a87210514 Forkchoice external locks v2 (#12036)
* write locks

* fix forkchoice tests

* blockchain locks

* lock on IsOptimistic

* use forkchoice instead of chaininfo within savehead

* Use forkchoice HasNode to check if a block is consistent with finality

* interface fix

* Use forkchoice HasNode to check if a block is consistent with finality

* interface fix

* fix tests

* remove VerifyFinalizedBlkDescendant

* don't write lock wrappers

* fix validateBeaconBlock

* Terence's review and more missing locks

* add lock for InForkChoice

* lock head on fillMissingBlockPayload

* fix lock on IsOptimisticForRoot

* fix lock in fillMissingBlockPayloadId

* extra comments

* lock proposerBoost on spectests

* nishant's review

---------

Co-authored-by: Nishant Das <nishdas93@gmail.com>
2023-03-02 09:10:52 -03:00
Nishant Das
196798eacc Update Deps For Capella (#12067)
* update

* gzl

* Using zstd workaround from @tals, per github.com/bazelbuild/rules_go/issues/3411

* gaz

* hacky patch

---------

Co-authored-by: rkapka <rkapka@wp.pl>
Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-03-02 03:31:41 +00:00
Radosław Kapka
17fe935343 Deprecate --interop-genesis-state (#12008)
* Deprecaste `--interop-genesis-state`

* better pattern

* fix errors

* test fix

---------

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-03-01 19:39:17 +00:00
Radosław Kapka
ac4483417d Redesign voluntary exits pool (#11898)
Co-authored-by: Nishant Das <nishdas93@gmail.com>
2023-03-01 17:44:00 +01:00
Potuz
0d3fb0a32b lock head on fillMissingBlockPayload (#12068) 2023-03-01 13:09:10 +00:00
Nishant Das
3d337b07e1 Remove Ropsten Testnet Config (#12058)
* remove support for ropsten testnet

* add deprecated flag for ropsten

---------

Co-authored-by: P <1674920+peterbitfly@users.noreply.github.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2023-03-01 13:26:32 +08:00
Raul Jordan
11b90e1f63 Store Blinded Beacon Blocks by Default for New Prysm Databases (#11591)
* fresh database checks

* gaz

* fix up tests

* fix test

* build

* bellatrix pass tests

* fix flags

* gaz

* fix up rpc test

* gaz

* building

* refactor, remove boolean blindness

* kv blocks

* fix up ensure coverage

* e2e default

* add missing case

* nishant feedback:

* log

---------

Co-authored-by: Nishant Das <nishdas93@gmail.com>
2023-03-01 00:07:23 +00:00
Preston Van Loon
3c73bac798 Update protoc-gen-go-cast to suppress tool output (#12062)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-02-28 23:04:22 +00:00
Preston Van Loon
91fee5db17 Update distroless base images (#12061)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-02-28 22:50:37 +00:00
Preston Van Loon
155b0c161e Bazel: cleanup .bazelrc file (#12059)
* Reorganize bazelrc and intro new flags

* move cross compilation toolchain into its own bazelrc

* Restore build_tests_only
2023-02-28 21:36:43 +00:00
Nishant Das
a7010d817d Fix Scenario Test Failures (#12056)
* fix scenario failures

* fix up

* continue fixing
2023-02-28 23:21:15 +08:00
james-prysm
c0dd233a1c E2E: beacon api post attester duties (#11899)
* adding new post request util and attester duties test

* adding in status checks

* fixing error

* increasing size of request

* fixing gofmt
2023-02-28 13:35:02 +08:00
Preston Van Loon
c391fad258 Update rules docker to v0.25.0 (#12054)
Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
2023-02-27 23:46:10 +00:00
Radosław Kapka
e92b546a36 Return proposer reward from ProcessSyncAggregate (#12047) 2023-02-27 19:04:23 +01:00
Patrice Vignola
765345ac3a Remove the gRPC fallback client from the validator REST API (#12051) 2023-02-27 12:46:34 +00:00
Potuz
ec13d52f03 Remove VerifyFinalizedBlkDescendant (#12046)
* Use forkchoice HasNode to check if a block is consistent with finality

* interface fix

* fix tests

* remove VerifyFinalizedBlkDescendant

* fix validateBeaconBlock
2023-02-25 13:39:13 -03:00
Manu NALEPA
08ebc99bc3 Add (lack of) REST implementation for GetFeeRecipientByPubKey (#11991)
Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2023-02-24 21:43:52 +00:00
james-prysm
8918e8c274 prysmctl: check capella fork for blstoexecutionchange (#12039)
* adding in a check for current fork to disable submission prior

* fixing linting

* fixing error return

* updating error contains in test

* fixing unit test

---------

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-02-24 16:28:32 +00:00
Patrice Vignola
0e4185b40c Remove the StreamDuties endpoint (#12044)
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2023-02-24 15:56:31 +00:00
Nishant Das
383edb3553 Fix Validator Participation Evaluator (#12045)
* fix bug

* remove proto changes
2023-02-24 10:39:06 +00:00
james-prysm
40589905bc Prysm V4: Web3signer changes for capella (#12001)
* adding in changes for capella

* fixing metrics

* updating the web3signer version
2023-02-24 06:21:39 +00:00
Potuz
ae76240f83 Use forkchoice HasNode to check if a block is consistent with finality (#12040)
* Use forkchoice HasNode to check if a block is consistent with finality

* interface fix

* fix tests
2023-02-23 21:21:34 +00:00
Radosław Kapka
096cf7b8c1 Test invalid change inclusion in blstoexec pool (#12037)
Co-authored-by: Nishant Das <nishdas93@gmail.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-02-23 17:18:37 +00:00
james-prysm
7c3027801b Prysm v4 - mark old endpoints for deprecation (#11997)
* updating some protos

* updating proto endpoints

* updating generated code

* fixing linting

* updating protos

* updating based on feedback. also removing unused storage protos

* adding in deprecation notice on the server functions

* Update proto/prysm/v1alpha1/beacon_chain.proto

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* Update proto/prysm/v1alpha1/beacon_chain.proto

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* Update proto/prysm/v1alpha1/health.proto

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* Update proto/prysm/v1alpha1/validator.proto

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* Update proto/prysm/v1alpha1/debug.proto

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* Update proto/eth/v1/beacon_chain.proto

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* Update proto/prysm/v1alpha1/health.proto

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update proto/eth/service/beacon_chain_service.proto

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* Update proto/eth/service/beacon_chain_service.proto

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* Update proto/eth/service/beacon_chain_service.proto

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* addressing feedback

* rewording comment

* updating comments

---------

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-02-23 16:32:03 +00:00
Raul Jordan
8f6d5ff075 Web-ui-version-v2.0.3-commit (#12038)
Co-authored-by: james-prysm <james-prysm@users.noreply.github.com>
2023-02-23 10:11:42 -06:00
james-prysm
c379c9ea47 Prysm V4 - Deprecate web UI endpoints (#12025)
* wip adding deprecation to web api endpoints

* wip adding more deprecation notices

* adding in a few more deprecation markers

* updating order of comments
2023-02-23 15:24:06 +00:00
Radosław Kapka
75e8f85ba5 Handle EL offline/unavailable in getSyncingStatus (#12031)
* Handle EL offline/unavailable in `getSyncingStatus`

* fix type

* better name

* test fix
2023-02-23 12:09:56 +00:00
Nishant Das
8d82ca08ab Allow Testing Of Withdrawals On E2E Mainnet (#12027) 2023-02-23 12:04:53 +01:00
Radosław Kapka
29f645f0cc Add Eth-Consensus-Version header to SubmitBlindedBlock (#12029)
* Add `Eth-Consensus-Version` header to `SubmitBlindedBlock`

* bring back function

---------

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-02-22 17:42:05 +00:00
Radosław Kapka
15016555f5 Remove SubmitBlindedBlockCapella (#12030)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-02-22 17:06:52 +00:00
Potuz
98d2d5f324 implement get_proposer_head (#12022)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-02-22 16:40:01 +00:00
Nishant Das
a4579398cb Add Clarifying Comments for Withdrawal Functions (#12028) 2023-02-22 11:00:36 +00:00
Pedro Gomes
094ff877f6 Fix NPE, ensure the listener is available (#11946)
* Fix NPE, ensure the listener is available

* Added test

---------

Co-authored-by: Nishant Das <nishdas93@gmail.com>
2023-02-22 06:53:09 +00:00
terencechain
9ccfe17227 Check unblinded payload root before proposal (#12026)
Co-authored-by: Nishant Das <nishdas93@gmail.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-02-22 02:28:59 +00:00
Manuel Caspari
bf25b34ac1 Fix error handling when using BatchCall (#11960)
* fix error handling when using BatchCall

* fix typo

* simplify code

---------

Co-authored-by: Nishant Das <nishdas93@gmail.com>
2023-02-22 01:42:43 +00:00
terencechain
da69af9b6e Fix first bellatrix block when using mev-boost (#12019)
* Update head before block proposal

* Fix first bellatrix block when using mev-boost

---------

Co-authored-by: Nishant Das <nishdas93@gmail.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-02-22 00:42:51 +00:00
terencechain
3c8f5deb66 Add v1.3.0-rc.3 spec tests (#12024)
* Add v1.3.0-rc.3 spec tests

* Replace 4844 with deneb

---------

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-02-21 23:01:32 +00:00
Potuz
dc5cf88243 Do not check signatures when packing BLS changes (#12021)
* Do not check signatures when packing BLS changes

* gaz

* fix test

---------

Co-authored-by: terencechain <terence@prysmaticlabs.com>
2023-02-21 22:18:54 +00:00
terencechain
0c52064d90 Engine API: fix blockValue endianness conversion (#12018) 2023-02-21 13:42:32 -08:00
Preston Van Loon
032f9b7450 Update general spectest tar.gz SHA value (#12023)
Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
2023-02-21 15:45:20 +00:00
Potuz
166f7119ec Implement should_override_forkchoice_update (#11981)
* Implement should_override_forchoice_update

* add tests

* remove unused exported function

* go mod tidy

* fix go mod

* Fix go mod tidy

* Fix context import

* mod tidy

* fix test

* Terence's review

* fix test

---------

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2023-02-18 07:37:03 -03:00
Potuz
b687d29d02 Forkchoice interface cleanup (#12010)
* remove AllTipsAreInvalid

* rename InsertOptimisticChain

* remove proposer boost interface

* remove HasParent
2023-02-17 17:36:52 +00:00
terencechain
2839f2c124 Proposer compare withdrawal roots (#11977)
* Proposer use highest value payload vs header

* Default to interface for changes

* Clean up

* Clean up

* Clean up

* Capella condition

* Proposer compare withdrawal roots

* Add unit test

* Fix test

---------

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-02-17 15:20:12 +00:00
Potuz
3d0ecdff3f Fix forkchoice endpoint path (#11986)
* fix endpoint path

* update protos

* fix double import

---------

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-02-17 13:45:25 +00:00
terencechain
481d24cb1b Update head before block proposal (#11992)
* Update head before block proposal

* Move before opt sync

* Fix test

---------

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-02-17 08:41:26 +00:00
Nishant Das
7d7c91c17f Fix Failing E2E Runs (#11993)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-02-17 08:08:54 +00:00
terencechain
a6dd561b9d Process blind withdrawals (#11995)
* Update head before block proposal

* fix e2e runs

* Can process blind withdrwals

* Rm bad change

* Should be in process payload header

* Version check

* Compare roots

* Typo

* Remove redundant checks

* Add tests

* Rm errors

---------

Co-authored-by: nisdas <nishdas93@gmail.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-02-17 15:49:30 +08:00
Potuz
c7f0a94b19 Remove state balance cache (#11964)
* Remove state balance cache

* remove unused metrics

* add missing balance update

* add comment about locks

* update balance only when updataing the justified checkpoint on unrealization

* update checkpoint balances at launch

* fix unit tests

* fix remaining test

* review

* fix build file

* fix test

---------

Co-authored-by: terencechain <terence@prysmaticlabs.com>
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2023-02-15 14:19:46 +00:00
terencechain
f49720209e Utilize head only state on rpc endpoints (#11950)
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2023-02-14 19:57:11 +00:00
Nishant Das
2d03c233b8 Add Support For Multiclient E2E in Capella (#11989)
* add support for it

* fix it up
2023-02-14 13:05:54 -06:00
Dhruv Bodani
bc643f8872 Add REST implementation for SubmitAggregateSelectionProof (#11980)
* add implementation of submit aggregate selection proof

* add tests

* handle head optimistic case

* fix imports

* address deepsource errors

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2023-02-14 11:25:21 +00:00
Potuz
fa4c9beb13 Only pack attestations and deposits if no err (#11987)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-02-14 06:12:59 +00:00
Nishant Das
449d767294 Fix Bandwith Limiter Panic (#11988) 2023-02-14 05:44:20 +00:00
Manu NALEPA
93298dfc56 Fix #11948 and reduces call to GetFeeRecipientByPubKey only in ListFeeRecipientByPubkey in nothing is defined in VC (#11970)
* Fix #11948 and reduces call to `GetFeeRecipientByPubKey` only in `ListFeeRecipientByPubkey` in nothing is defined in VC

* Fix typo
2023-02-14 04:53:54 +00:00
Patrice Vignola
791110f795 Add REST implementation for Validator's StreamBlocksAltair (#11974)
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2023-02-13 14:22:11 +01:00
Nishant Das
63186c8b0f Fix Batch Requesting Of Headers (#11982)
* fix bug

* remove
2023-02-11 01:38:13 +00:00
terencechain
e136e10ebb Proposer use highest value payload vs header (#11967) 2023-02-10 11:36:19 -08:00
Sammy Rosso
81b29ea2d8 Update produce block funcs to support Capella (#11959)
Co-authored-by: rkapka <rkapka@wp.pl>
2023-02-10 17:20:38 +01:00
terencechain
71b4468be6 Add Capella fork epoch for Sepolia (#11979)
Co-authored-by: Nishant Das <nishdas93@gmail.com>
2023-02-10 10:46:39 +00:00
Nishant Das
76ed634f73 Capella E2E (#11951)
* save changes

* fix build

* fix build

* fix signatures

* fix

* fix

* finally fix it

* mainnet

* deps

* make it 10

* back to phase0

* fix build

* gofmt

* etherbase

* rm logs

* build

* james review

* fix yaml

* Update testing/endtoend/evaluators/fork.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* radek's review

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2023-02-10 14:19:15 +08:00
Dhruv Bodani
5092738be6 add REST implementation for GetSyncSubcommitteeIndex (#11971)
* add implementation for GetSyncSubcommitteeIndex

* fix goimports

* update error parsing for validator sync committee index

Co-authored-by: Radosław Kapka <radek@prysmaticlabs.com>

* fix imports

---------

Co-authored-by: Radosław Kapka <radek@prysmaticlabs.com>
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2023-02-09 10:03:32 +00:00
terencechain
d4f3257f60 Rename block interfaces (#11975) 2023-02-09 10:23:32 +01:00
terencechain
e1f7c52e6b Add and use SignedBeaconBlockWriteOnly (#11968) 2023-02-08 08:39:14 -08:00
Potuz
0d6e8718fd Aditya's PR 18 (#11945)
* Aditya's PR 18

This PR implements PR18. There is still a missing piece which is
consistency of head with finalized checkpoint. I will think on ways to
enforce this.

* prune finalized incompatible

* don't check finalization on viable for head

* unit tests fixes

* gazelle

* remove finalized epoch from viableForHead

* remove finalized epoch from leadsToViableHead

* use non-slashed indices

* function rename

* lint fixes

* lint fixes

* lint fixes
2023-02-05 09:40:07 -03:00
a
4f6cb3209d Generic map (#11953)
* generic-map

* add tx bench

* fix keys

* rename functions to hopfully make static lint hapy

* reduce a line :)

* remove generic map

---------

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2023-02-02 17:59:51 +00:00
Radosław Kapka
dc4440abe7 Small cleanup (#11963) 2023-02-02 17:24:09 +00:00
terencechain
23c4cc0249 Prune canonical attestations when head changes (#11771)
* Only prune canonical attestations

* Feedback from potuz

* Merge conflict

---------

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-02-02 16:53:01 +00:00
terencechain
9e15316351 Enhance block arrival delay metrics by adding a gauge (#11897)
* Enhance block arrival delay metrics

* Remove histograms

* Revert histogram

* Rm new line

---------

Co-authored-by: Nishant Das <nishdas93@gmail.com>
2023-02-02 15:43:25 +00:00
Potuz
0d01debe9a reuse code from stategen in the justified balance cache (#11961)
* add balance cache tests to testsuite

* reuse code from stategen in the justified balance cache

* remove state getters from the balance cache

* fix unit tests

* rebase on 11962

* unused error

---------

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-02-02 14:53:08 +00:00
Potuz
1b11a9672a add balance cache tests to testsuite (#11962) 2023-02-02 13:41:44 +00:00
441 changed files with 14938 additions and 8858 deletions

178
.bazelrc
View File

@@ -1,9 +1,9 @@
# Print warnings for tests with inappropriate test size or timeout.
test --test_verbose_timeout_warnings
# Only build test targets when running bazel test //...
test --build_tests_only
test --test_output=errors
# Import bazelrc presets
import %workspace%/build/bazelrc/convenience.bazelrc
import %workspace%/build/bazelrc/correctness.bazelrc
import %workspace%/build/bazelrc/cross.bazelrc
import %workspace%/build/bazelrc/debug.bazelrc
import %workspace%/build/bazelrc/performance.bazelrc
# E2E run with debug gotag
test:e2e --define gotags=debug
@@ -11,21 +11,9 @@ test:e2e --define gotags=debug
# Clearly indicate that coverage is enabled to disable certain nogo checks.
coverage --define=coverage_enabled=1
# Fix for rules_docker. See: https://github.com/bazelbuild/rules_docker/issues/842
build --host_force_python=PY2
run --host_force_python=PY2
# Networking is blocked for tests by default, add "requires-network" tag to your test if networking
# is required within the sandbox. Network sandboxing only works on linux.
build --sandbox_default_allow_network=false
# Stamp binaries with git information
build --workspace_status_command=./hack/workspace_status.sh
# Prevent PATH changes from rebuilding when switching from IDE to command line.
build --incompatible_strict_action_env
run --incompatible_strict_action_env
build --define blst_disabled=false
run --define blst_disabled=false
@@ -68,42 +56,6 @@ build:cgo_symbolizer --define=USE_CGO_SYMBOLIZER=true
build:cgo_symbolizer -c dbg
build:cgo_symbolizer --define=gotags=cgosymbolizer_enabled
# multi-arch cross-compiling toolchain configs:
-----------------------------------------------
build:cross --crosstool_top=@prysm_toolchains//:multiarch_toolchain
build:cross --host_platform=@io_bazel_rules_go//go/toolchain:linux_amd64
build:cross --host_crosstool_top=@prysm_toolchains//:hostonly_toolchain
# linux_amd64 config for cross compiler toolchain, not strictly necessary since host/exec env is amd64
build:linux_amd64 --platforms=@io_bazel_rules_go//go/toolchain:linux_amd64_cgo
# osx_amd64 config for cross compiler toolchain
build:osx_amd64 --config=cross
build:osx_amd64 --platforms=@io_bazel_rules_go//go/toolchain:darwin_amd64_cgo
build:osx_amd64 --compiler=osxcross
# osx_arm64 config for cross compiler toolchain
build:osx_arm64 --config=cross
build:osx_arm64 --platforms=@io_bazel_rules_go//go/toolchain:darwin_arm64_cgo
build:osx_arm64 --compiler=osxcross
build:osx_arm64 --cpu=aarch64
# windows
build:windows_amd64 --config=cross
build:windows_amd64 --platforms=@io_bazel_rules_go//go/toolchain:windows_amd64_cgo
build:windows_amd64 --compiler=mingw-w64
# linux_arm64 conifg for cross compiler toolchain
build:linux_arm64 --config=cross
build:linux_arm64 --platforms=@io_bazel_rules_go//go/toolchain:linux_arm64_cgo
build:linux_arm64 --copt=-funsafe-math-optimizations
build:linux_arm64 --copt=-ftree-vectorize
build:linux_arm64 --copt=-fomit-frame-pointer
build:linux_arm64 --cpu=aarch64
build:linux_arm64 --compiler=clang
build:linux_arm64 --copt=-march=armv8-a
# toolchain build debug configs
#------------------------------
build:debug --sandbox_debug
@@ -111,123 +63,5 @@ build:debug --toolchain_resolution_debug
build:debug --verbose_failures
build:debug -s
# windows debug
build:windows_amd64_debug --config=windows_amd64
build:windows_amd64_debug --config=debug
# osx_amd64 debug config
build:osx_amd64_debug --config=debug
build:osx_amd64_debug --config=osx_amd64
# osx_arm64 debug config
build:osx_arm64_debug --config=debug
build:osx_arm64_debug --config=osx_arm64
# linux_arm64_debug
build:linux_arm64_debug --config=linux_arm64
build:linux_arm64_debug --config=debug
# linux_amd64_debug
build:linux_amd64_debug --config=linux_amd64
build:linux_amd64_debug --config=debug
# Docker Sandbox Configs
#-----------------------
# Note all docker sandbox configs must run from a linux x86_64 host
# build:docker-sandbox --experimental_docker_image=gcr.io/prysmaticlabs/rbe-worker:latest
build:docker-sandbox --spawn_strategy=docker --strategy=Javac=docker --genrule_strategy=docker
build:docker-sandbox --define=EXECUTOR=remote
build:docker-sandbox --experimental_docker_verbose
build:docker-sandbox --experimental_enable_docker_sandbox
build:docker-sandbox --crosstool_top=@rbe_ubuntu_clang//cc:toolchain
build:docker-sandbox --host_javabase=@rbe_ubuntu_clang//java:jdk
build:docker-sandbox --javabase=@rbe_ubuntu_clang//java:jdk
build:docker-sandbox --host_java_toolchain=@bazel_tools//tools/jdk:toolchain_hostjdk8
build:docker-sandbox --java_toolchain=@bazel_tools//tools/jdk:toolchain_hostjdk8
build:docker-sandbox --extra_execution_platforms=@rbe_ubuntu_clang//config:platform
build:docker-sandbox --host_platform=@rbe_ubuntu_clang//config:platform
build:docker-sandbox --platforms=@rbe_ubuntu_clang//config:platform
build:docker-sandbox --extra_toolchains=@prysm_toolchains//:cc-toolchain-multiarch
# windows_amd64 docker sandbox build config
build:windows_amd64_docker --config=docker-sandbox --config=windows_amd64
build:windows_amd64_docker_debug --config=windows_amd64_docker --config=debug
# osx_amd64 docker sandbox build config
build:osx_amd64_docker --config=docker-sandbox --config=osx_amd64
build:osx_amd64_docker_debug --config=osx_amd64_docker --config=debug
# osx_arm64 docker sandbox build config
build:osx_arm64_docker --config=docker-sandbox --config=osx_arm64
build:osx_arm64_docker_debug --config=osx_arm64_docker --config=debug
# linux_arm64 docker sandbox build config
build:linux_arm64_docker --config=docker-sandbox --config=linux_arm64
build:linux_arm64_docker_debug --config=linux_arm64_docker --config=debug
# linux_amd64 docker sandbox build config
build:linux_amd64_docker --config=docker-sandbox --config=linux_amd64
build:linux_amd64_docker_debug --config=linux_amd64_docker --config=debug
# Remote Build Execution
#-----------------------
# Originally from https://github.com/bazelbuild/bazel-toolchains/blob/master/bazelrc/bazel-2.0.0.bazelrc
#
# Depending on how many machines are in the remote execution instance, setting
# this higher can make builds faster by allowing more jobs to run in parallel.
# Setting it too high can result in jobs that timeout, however, while waiting
# for a remote machine to execute them.
build:remote --jobs=50
# Set several flags related to specifying the platform, toolchain and java
# properties.
# These flags should only be used as is for the rbe-ubuntu16-04 container
# and need to be adapted to work with other toolchain containers.
build:remote --host_javabase=@rbe_ubuntu_clang//java:jdk
build:remote --javabase=@rbe_ubuntu_clang//java:jdk
build:remote --host_java_toolchain=@bazel_tools//tools/jdk:toolchain_hostjdk8
build:remote --java_toolchain=@bazel_tools//tools/jdk:toolchain_hostjdk8
build:remote --crosstool_top=@rbe_ubuntu_clang//cc:toolchain
build:remote --action_env=BAZEL_DO_NOT_DETECT_CPP_TOOLCHAIN=1
# Platform flags:
# The toolchain container used for execution is defined in the target indicated
# by "extra_execution_platforms", "host_platform" and "platforms".
# More about platforms: https://docs.bazel.build/versions/master/platforms.html
build:remote --extra_toolchains=@rbe_ubuntu_clang//config:cc-toolchain
build:remote --extra_execution_platforms=@rbe_ubuntu_clang//config:platform
build:remote --host_platform=@rbe_ubuntu_clang//config:platform
build:remote --platforms=@rbe_ubuntu_clang//config:platform
# Starting with Bazel 0.27.0 strategies do not need to be explicitly
# defined. See https://github.com/bazelbuild/bazel/issues/7480
build:remote --define=EXECUTOR=remote
# Enable remote execution so actions are performed on the remote systems.
# build:remote --remote_executor=grpcs://remotebuildexecution.googleapis.com
# Enforce stricter environment rules, which eliminates some non-hermetic
# behavior and therefore improves both the remote cache hit rate and the
# correctness and repeatability of the build.
build:remote --incompatible_strict_action_env=true
# Set a higher timeout value, just in case.
build:remote --remote_timeout=3600
# Enable authentication. This will pick up application default credentials by
# default. You can use --google_credentials=some_file.json to use a service
# account credential instead.
# build:remote --google_default_credentials=true
# Enable build without the bytes
# See: https://github.com/bazelbuild/bazel/issues/6862
build:remote --experimental_remote_download_outputs=toplevel --experimental_inmemory_jdeps_files --experimental_inmemory_dotd_files
build:remote --remote_local_fallback
# Ignore GoStdLib with remote caching
build --modify_execution_info='GoStdlib.*=+no-remote-cache'
# Set bazel gotag
build --define gotags=bazel

View File

@@ -76,9 +76,8 @@ http_archive(
http_archive(
name = "io_bazel_rules_docker",
sha256 = "1f4e59843b61981a96835dc4ac377ad4da9f8c334ebe5e0bb3f58f80c09735f4",
strip_prefix = "rules_docker-0.19.0",
urls = ["https://github.com/bazelbuild/rules_docker/releases/download/v0.19.0/rules_docker-v0.19.0.tar.gz"],
sha256 = "b1e80761a8a8243d03ebca8845e9cc1ba6c82ce7c5179ce2b295cd36f7e394bf",
urls = ["https://github.com/bazelbuild/rules_docker/releases/download/v0.25.0/rules_docker-v0.25.0.tar.gz"],
)
http_archive(
@@ -122,32 +121,36 @@ load(
"container_pull",
)
# Pulled gcr.io/distroless/cc-debian11:latest on 2022-02-23
container_pull(
name = "cc_image_base",
digest = "sha256:41036fc7ed8df0f6addc18484cef0c94a85867508967789f947e11ffd5ff0cc8",
name = "cc_image_base_amd64",
digest = "sha256:2a0daf90a7deb78465bfca3ef2eee6e91ce0a5706059f05d79d799a51d339523",
registry = "gcr.io",
repository = "distroless/cc",
repository = "distroless/cc-debian11",
)
# Pulled gcr.io/distroless/cc-debian11:debug on 2022-02-23
container_pull(
name = "cc_debug_image_base",
digest = "sha256:6865ad48467c89c3c3524d4c426f52ad12d9ab7dec31fad31fae69da40eb6445",
name = "cc_debug_image_base_amd64",
digest = "sha256:7bd596f5f200588f13a69c268eea6ce428b222b67cd7428d6a7fef95e75c052a",
registry = "gcr.io",
repository = "distroless/cc",
repository = "distroless/cc-debian11",
)
# Pulled from gcr.io/distroless/base-debian11:latest on 2022-02-23
container_pull(
name = "go_image_base",
digest = "sha256:b9b124f955961599e72630654107a0cf04e08e6fa777fa250b8f840728abd770",
name = "go_image_base_amd64",
digest = "sha256:34e682800774ecbd0954b1663d90238505f1ba5543692dbc75feef7dd4839e90",
registry = "gcr.io",
repository = "distroless/base",
repository = "distroless/base-debian11",
)
# Pulled from gcr.io/distroless/base-debian11:debug on 2022-02-23
container_pull(
name = "go_debug_image_base",
digest = "sha256:65668d2b78d25df3d8ccf5a778d017fcaba513b52078c700083eaeef212b9979",
name = "go_debug_image_base_amd64",
digest = "sha256:0f503c6bfd207793bc416f20a35bf6b75d769a903c48f180ad73f60f7b60d7bd",
registry = "gcr.io",
repository = "distroless/base",
repository = "distroless/base-debian11",
)
container_pull(
@@ -188,9 +191,7 @@ filegroup(
url = "https://github.com/eth-clients/slashing-protection-interchange-tests/archive/b8413ca42dc92308019d0d4db52c87e9e125c4e9.tar.gz",
)
consensus_spec_version = "v1.3.0-rc.2"
consensus_spec_test_version = "v1.3.0-rc.2-hotfix"
consensus_spec_version = "v1.3.0-rc.3"
bls_test_version = "v0.1.1"
@@ -206,8 +207,8 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "5c90f42ff30857def5ae0952904c5cad6dbc16310a86d3a0914f37b557920779",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_test_version,
sha256 = "c56ea4e8fb2fea4b0b23d2191a87bc7e676738dd8a623b44ac847bfeaae5fe64",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
)
http_archive(
@@ -222,8 +223,8 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "8fbec5f1fa30fb1194b42230bf75f05690a72cf3c56af5b5ac8aafb94c60fc19",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_test_version,
sha256 = "c97730b372b81e9ee698c4f09eafaec3fb4be177fab323b32e5ef78d0873fb5c",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
)
http_archive(
@@ -238,8 +239,8 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "cb22875dbb67b12f577d12a5aa8074de4c8bd31d74fff45cc110be515c1df847",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_test_version,
sha256 = "bdd8255c8a536fa83366144aa32fcc3df92a4997401f88f7cda934d13e90d11c",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
)
http_archive(
@@ -253,7 +254,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "c728947ccf8b628691c76cc92f2f59c02f1c7882a463f26365d32c098bb946b8",
sha256 = "f6539e19b8e4e45f45b80da39c87dfe7c61d9d7cb51fa7a3a36bdaca11a89693",
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
)
@@ -284,9 +285,9 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "82b01a48b143fe0f2fb7fb5f5dd385c1f934335a12d7954f08b1d45d77427b5e",
strip_prefix = "eth2-networks-674f7a1d01d9c18345456eab76e3871b3df2126b",
url = "https://github.com/eth-clients/eth2-networks/archive/674f7a1d01d9c18345456eab76e3871b3df2126b.tar.gz",
sha256 = "2701e1e1a3ec10c673fe7dbdbbe6f02c8ae8c922aebbf6e720d8c72d5458aafe",
strip_prefix = "eth2-networks-7b4897888cebef23801540236f73123e21774954",
url = "https://github.com/eth-clients/eth2-networks/archive/7b4897888cebef23801540236f73123e21774954.tar.gz",
)
http_archive(
@@ -317,9 +318,9 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "b2226874526805d64c29e5053fa28e511b57c0860585d6d59777ee81ff4859ca",
sha256 = "5006614c33e358699b4e072c649cd4c3866f7d41a691449d5156f6c6e07a4c60",
urls = [
"https://github.com/prysmaticlabs/prysm-web-ui/releases/download/v2.0.2/prysm-web-ui.tar.gz",
"https://github.com/prysmaticlabs/prysm-web-ui/releases/download/v2.0.3/prysm-web-ui.tar.gz",
],
)

View File

@@ -18,13 +18,13 @@ import (
"golang.org/x/mod/semver"
)
// OriginData represents the BeaconState and SignedBeaconBlock necessary to start an empty Beacon Node
// OriginData represents the BeaconState and ReadOnlySignedBeaconBlock necessary to start an empty Beacon Node
// using Checkpoint Sync.
type OriginData struct {
sb []byte
bb []byte
st state.BeaconState
b interfaces.SignedBeaconBlock
b interfaces.ReadOnlySignedBeaconBlock
vu *detect.VersionedUnmarshaler
br [32]byte
sr [32]byte
@@ -49,7 +49,7 @@ func (o *OriginData) StateBytes() []byte {
return o.sb
}
// BlockBytes returns the ssz-encoded bytes of the downloaded SignedBeaconBlock value.
// BlockBytes returns the ssz-encoded bytes of the downloaded ReadOnlySignedBeaconBlock value.
func (o *OriginData) BlockBytes() []byte {
return o.bb
}
@@ -116,7 +116,7 @@ func DownloadFinalizedData(ctx context.Context, client *Client) (*OriginData, er
}, nil
}
// WeakSubjectivityData represents the state root, block root and epoch of the BeaconState + SignedBeaconBlock
// WeakSubjectivityData represents the state root, block root and epoch of the BeaconState + ReadOnlySignedBeaconBlock
// that falls at the beginning of the current weak subjectivity period. These values can be used to construct
// a weak subjectivity checkpoint beacon node flag to be used for validation.
type WeakSubjectivityData struct {

View File

@@ -1,9 +1,12 @@
package builder
import (
"math/big"
ssz "github.com/prysmaticlabs/fastssz"
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/runtime/version"
)
@@ -154,9 +157,11 @@ func WrappedBuilderBidCapella(p *ethpb.BuilderBidCapella) (Bid, error) {
return w, nil
}
// Header --
// Header returns the execution data interface.
func (b builderBidCapella) Header() (interfaces.ExecutionData, error) {
return blocks.WrappedExecutionPayloadHeaderCapella(b.p.Header)
// We have to convert big endian to little endian because the value is coming from the execution layer.
v := bytesutil.ReverseByteOrder(b.p.Value)
return blocks.WrappedExecutionPayloadHeaderCapella(b.p.Header, big.NewInt(0).SetBytes(v))
}
// Version --

View File

@@ -6,6 +6,7 @@ import (
"encoding/json"
"fmt"
"io"
"math/big"
"net"
"net/http"
"net/url"
@@ -20,7 +21,6 @@ import (
"github.com/prysmaticlabs/prysm/v3/monitoring/tracing"
"github.com/prysmaticlabs/prysm/v3/network"
"github.com/prysmaticlabs/prysm/v3/network/authorization"
v1 "github.com/prysmaticlabs/prysm/v3/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/runtime/version"
log "github.com/sirupsen/logrus"
@@ -89,7 +89,7 @@ type BuilderClient interface {
NodeURL() string
GetHeader(ctx context.Context, slot primitives.Slot, parentHash [32]byte, pubkey [48]byte) (SignedBid, error)
RegisterValidator(ctx context.Context, svr []*ethpb.SignedValidatorRegistrationV1) error
SubmitBlindedBlock(ctx context.Context, sb interfaces.SignedBeaconBlock) (interfaces.ExecutionData, error)
SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, error)
Status(ctx context.Context) error
}
@@ -277,7 +277,7 @@ func (c *Client) RegisterValidator(ctx context.Context, svr []*ethpb.SignedValid
// SubmitBlindedBlock calls the builder API endpoint that binds the validator to the builder and submits the block.
// The response is the full execution payload used to create the blinded block.
func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.SignedBeaconBlock) (interfaces.ExecutionData, error) {
func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, error) {
if !sb.IsBlinded() {
return nil, errNotBlinded
}
@@ -295,7 +295,10 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.SignedBea
ctx, cancel := context.WithTimeout(ctx, submitBlindedBlockTimeout)
defer cancel()
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body))
versionOpt := func(r *http.Request) {
r.Header.Add("Eth-Consensus-Version", version.String(version.Bellatrix))
}
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), versionOpt)
if err != nil {
return nil, errors.Wrap(err, "error posting the SignedBlindedBeaconBlockBellatrix to the builder api")
@@ -322,7 +325,10 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.SignedBea
ctx, cancel := context.WithTimeout(ctx, submitBlindedBlockTimeout)
defer cancel()
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body))
versionOpt := func(r *http.Request) {
r.Header.Add("Eth-Consensus-Version", version.String(version.Capella))
}
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), versionOpt)
if err != nil {
return nil, errors.Wrap(err, "error posting the SignedBlindedBeaconBlockCapella to the builder api")
@@ -335,35 +341,12 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.SignedBea
if err != nil {
return nil, errors.Wrapf(err, "could not extract proto message from payload")
}
return blocks.WrappedExecutionPayloadCapella(p)
return blocks.WrappedExecutionPayloadCapella(p, big.NewInt(0))
default:
return nil, fmt.Errorf("unsupported block version %s", version.String(sb.Version()))
}
}
// SubmitBlindedBlockCapella calls the builder API endpoint that binds the validator to the builder and submits the block.
// The response is the full ExecutionPayloadCapella used to create the blinded block.
func (c *Client) SubmitBlindedBlockCapella(ctx context.Context, sb *ethpb.SignedBlindedBeaconBlockCapella) (*v1.ExecutionPayloadCapella, error) {
v := &SignedBlindedBeaconBlockCapella{SignedBlindedBeaconBlockCapella: sb}
body, err := json.Marshal(v)
if err != nil {
return nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockCapella value body in SubmitBlindedBlockCapella")
}
ctx, cancel := context.WithTimeout(ctx, submitBlindedBlockTimeout)
defer cancel()
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body))
if err != nil {
return nil, errors.Wrap(err, "error posting the SignedBlindedBeaconBlockBellatrix to the builder api")
}
ep := &ExecPayloadResponseCapella{}
if err := json.Unmarshal(rb, ep); err != nil {
return nil, errors.Wrap(err, "error unmarshaling the builder SubmitBlindedBlockCapella response")
}
return ep.ToProto()
}
// Status asks the remote builder server for a health check. A response of 200 with an empty body is the success/healthy
// response, and an error response may have an error message. This method will return a nil value for error in the
// happy path, and an error with information about the server response body for a non-200 response.

View File

@@ -6,6 +6,7 @@ import (
"encoding/json"
"fmt"
"io"
"math/big"
"net/http"
"net/url"
"strconv"
@@ -199,6 +200,9 @@ func TestClient_GetHeader(t *testing.T) {
value, err := stringToUint256("652312848583266388373324160190187140051835877600158453279131187530910662656")
require.NoError(t, err)
require.Equal(t, fmt.Sprintf("%#x", value.SSZBytes()), fmt.Sprintf("%#x", bid.Value()))
bidValue := bytesutil.ReverseByteOrder(bid.Value())
require.DeepEqual(t, bidValue, value.Bytes())
require.DeepEqual(t, big.NewInt(0).SetBytes(bidValue), value.Int)
})
t.Run("capella", func(t *testing.T) {
hc := &http.Client{
@@ -225,6 +229,12 @@ func TestClient_GetHeader(t *testing.T) {
withdrawalsRoot, err := bidHeader.WithdrawalsRoot()
require.NoError(t, err)
require.Equal(t, true, bytes.Equal(expectedWithdrawalsRoot, withdrawalsRoot))
value, err := stringToUint256("652312848583266388373324160190187140051835877600158453279131187530910662656")
require.NoError(t, err)
require.Equal(t, fmt.Sprintf("%#x", value.SSZBytes()), fmt.Sprintf("%#x", bid.Value()))
bidValue := bytesutil.ReverseByteOrder(bid.Value())
require.DeepEqual(t, bidValue, value.Bytes())
require.DeepEqual(t, big.NewInt(0).SetBytes(bidValue), value.Int)
})
t.Run("unsupported version", func(t *testing.T) {
hc := &http.Client{
@@ -253,6 +263,7 @@ func TestSubmitBlindedBlock(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
require.Equal(t, "bellatrix", r.Header.Get("Eth-Consensus-Version"))
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayload)),
@@ -278,6 +289,7 @@ func TestSubmitBlindedBlock(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
require.Equal(t, "capella", r.Header.Get("Eth-Consensus-Version"))
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayloadCapella)),

View File

@@ -40,7 +40,7 @@ func (m MockClient) RegisterValidator(_ context.Context, svr []*ethpb.SignedVali
}
// SubmitBlindedBlock --
func (MockClient) SubmitBlindedBlock(_ context.Context, _ interfaces.SignedBeaconBlock) (interfaces.ExecutionData, error) {
func (MockClient) SubmitBlindedBlock(_ context.Context, _ interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, error) {
return nil, nil
}

View File

@@ -22,7 +22,6 @@ go_library(
"receive_attestation.go",
"receive_block.go",
"service.go",
"state_balance_cache.go",
"weak_subjectivity_checks.go",
],
importpath = "github.com/prysmaticlabs/prysm/v3/beacon-chain/blockchain",

View File

@@ -52,7 +52,7 @@ type GenesisFetcher interface {
type HeadFetcher interface {
HeadSlot() primitives.Slot
HeadRoot(ctx context.Context) ([]byte, error)
HeadBlock(ctx context.Context) (interfaces.SignedBeaconBlock, error)
HeadBlock(ctx context.Context) (interfaces.ReadOnlySignedBeaconBlock, error)
HeadState(ctx context.Context) (state.BeaconState, error)
HeadStateReadOnly(ctx context.Context) (state.ReadOnlyBeaconState, error)
HeadValidatorsIndices(ctx context.Context, epoch primitives.Epoch) ([]primitives.ValidatorIndex, error)
@@ -84,7 +84,7 @@ type FinalizationFetcher interface {
FinalizedCheckpt() *ethpb.Checkpoint
CurrentJustifiedCheckpt() *ethpb.Checkpoint
PreviousJustifiedCheckpt() *ethpb.Checkpoint
VerifyFinalizedBlkDescendant(ctx context.Context, blockRoot [32]byte) error
InForkchoice([32]byte) bool
IsFinalized(ctx context.Context, blockRoot [32]byte) bool
}
@@ -96,24 +96,32 @@ type OptimisticModeFetcher interface {
// FinalizedCheckpt returns the latest finalized checkpoint from chain store.
func (s *Service) FinalizedCheckpt() *ethpb.Checkpoint {
s.ForkChoicer().RLock()
defer s.ForkChoicer().RUnlock()
cp := s.ForkChoicer().FinalizedCheckpoint()
return &ethpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
}
// PreviousJustifiedCheckpt returns the current justified checkpoint from chain store.
func (s *Service) PreviousJustifiedCheckpt() *ethpb.Checkpoint {
s.ForkChoicer().RLock()
defer s.ForkChoicer().RUnlock()
cp := s.ForkChoicer().PreviousJustifiedCheckpoint()
return &ethpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
}
// CurrentJustifiedCheckpt returns the current justified checkpoint from chain store.
func (s *Service) CurrentJustifiedCheckpt() *ethpb.Checkpoint {
s.ForkChoicer().RLock()
defer s.ForkChoicer().RUnlock()
cp := s.ForkChoicer().JustifiedCheckpoint()
return &ethpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
}
// BestJustifiedCheckpt returns the best justified checkpoint from store.
func (s *Service) BestJustifiedCheckpt() *ethpb.Checkpoint {
s.ForkChoicer().RLock()
defer s.ForkChoicer().RUnlock()
cp := s.ForkChoicer().BestJustifiedCheckpoint()
return &ethpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
}
@@ -158,7 +166,7 @@ func (s *Service) HeadRoot(ctx context.Context) ([]byte, error) {
// HeadBlock returns the head block of the chain.
// If the head is nil from service struct,
// it will attempt to get the head block from DB.
func (s *Service) HeadBlock(ctx context.Context) (interfaces.SignedBeaconBlock, error) {
func (s *Service) HeadBlock(ctx context.Context) (interfaces.ReadOnlySignedBeaconBlock, error) {
s.headLock.RLock()
defer s.headLock.RUnlock()
@@ -277,6 +285,8 @@ func (s *Service) CurrentFork() *ethpb.Fork {
// IsCanonical returns true if the input block root is part of the canonical chain.
func (s *Service) IsCanonical(ctx context.Context, blockRoot [32]byte) (bool, error) {
s.ForkChoicer().RLock()
defer s.ForkChoicer().RUnlock()
// If the block has not been finalized, check fork choice store to see if the block is canonical
if s.cfg.ForkChoiceStore.HasNode(blockRoot) {
return s.cfg.ForkChoiceStore.IsCanonical(blockRoot), nil
@@ -289,6 +299,8 @@ func (s *Service) IsCanonical(ctx context.Context, blockRoot [32]byte) (bool, er
// ChainHeads returns all possible chain heads (leaves of fork choice tree).
// Heads roots and heads slots are returned.
func (s *Service) ChainHeads() ([][32]byte, []primitives.Slot) {
s.ForkChoicer().RLock()
defer s.ForkChoicer().RUnlock()
return s.cfg.ForkChoiceStore.Tips()
}
@@ -330,9 +342,8 @@ func (s *Service) IsOptimistic(ctx context.Context) (bool, error) {
headRoot := s.head.root
s.headLock.RUnlock()
if s.cfg.ForkChoiceStore.AllTipsAreInvalid() {
return true, nil
}
s.ForkChoicer().RLock()
defer s.ForkChoicer().RUnlock()
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(headRoot)
if err == nil {
return optimistic, nil
@@ -348,6 +359,8 @@ func (s *Service) IsOptimistic(ctx context.Context) (bool, error) {
// IsFinalized returns true if the input root is finalized.
// It first checks latest finalized root then checks finalized root index in DB.
func (s *Service) IsFinalized(ctx context.Context, root [32]byte) bool {
s.ForkChoicer().RLock()
defer s.ForkChoicer().RUnlock()
if s.ForkChoicer().FinalizedCheckpoint().Root == root {
return true
}
@@ -359,10 +372,21 @@ func (s *Service) IsFinalized(ctx context.Context, root [32]byte) bool {
return s.cfg.BeaconDB.IsFinalizedBlock(ctx, root)
}
// InForkchoice returns true if the given root is found in forkchoice
// This in particular means that the blockroot is a descendant of the
// finalized checkpoint
func (s *Service) InForkchoice(root [32]byte) bool {
s.ForkChoicer().RLock()
defer s.ForkChoicer().RUnlock()
return s.ForkChoicer().HasNode(root)
}
// IsOptimisticForRoot takes the root as argument instead of the current head
// and returns true if it is optimistic.
func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error) {
s.ForkChoicer().RLock()
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(root)
s.ForkChoicer().RUnlock()
if err == nil {
return optimistic, nil
}

View File

@@ -96,7 +96,7 @@ func TestFinalizedCheckpt_GenesisRootOk(t *testing.T) {
cp = service.CurrentJustifiedCheckpt()
assert.DeepEqual(t, [32]byte{}, bytesutil.ToBytes32(cp.Root))
// check that forkchoice has the right genesis root as the node root
root, err := fcs.Head(ctx, []uint64{})
root, err := fcs.Head(ctx)
require.NoError(t, err)
require.Equal(t, service.originBlockRoot, root)
@@ -114,8 +114,12 @@ func TestCurrentJustifiedCheckpt_CanRetrieve(t *testing.T) {
service, err := NewService(ctx, opts...)
require.NoError(t, err)
cp := &forkchoicetypes.Checkpoint{Epoch: 6, Root: [32]byte{'j'}}
require.NoError(t, fcs.UpdateJustifiedCheckpoint(cp))
jroot := [32]byte{'j'}
cp := &forkchoicetypes.Checkpoint{Epoch: 6, Root: jroot}
bState, _ := util.DeterministicGenesisState(t, 10)
require.NoError(t, beaconDB.SaveState(ctx, bState, jroot))
require.NoError(t, fcs.UpdateJustifiedCheckpoint(ctx, cp))
jp := service.CurrentJustifiedCheckpt()
assert.Equal(t, cp.Epoch, jp.Epoch, "Unexpected justified epoch")
require.Equal(t, cp.Root, bytesutil.ToBytes32(jp.Root))
@@ -127,7 +131,7 @@ func TestHeadSlot_CanRetrieve(t *testing.T) {
require.NoError(t, err)
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
b.Block().SetSlot(100)
b.SetSlot(100)
c.head = &head{block: b, state: s}
assert.Equal(t, primitives.Slot(100), c.HeadSlot())
}

View File

@@ -21,14 +21,12 @@ var (
errWrongBlockCount = errors.New("wrong number of blocks or block roots")
// errBlockNotFoundInCacheOrDB is returned when a block is not found in the cache or DB.
errBlockNotFoundInCacheOrDB = errors.New("block not found in cache or db")
// errNilStateFromStategen is returned when a nil state is returned from the state generator.
errNilStateFromStategen = errors.New("justified state can't be nil")
// errWSBlockNotFound is returned when a block is not found in the WS cache or DB.
errWSBlockNotFound = errors.New("weak subjectivity root not found in db")
// errWSBlockNotFoundInEpoch is returned when a block is not found in the WS cache or DB within epoch.
errWSBlockNotFoundInEpoch = errors.New("weak subjectivity root not found in db within epoch")
// errNotDescendantOfFinalized is returned when a block is not a descendant of the finalized checkpoint
errNotDescendantOfFinalized = invalidBlock{error: errors.New("not descendant of finalized checkpoint")}
ErrNotDescendantOfFinalized = invalidBlock{error: errors.New("not descendant of finalized checkpoint")}
)
// An invalid block is the block that fails state transition based on the core protocol rules.

View File

@@ -31,7 +31,7 @@ var defaultLatestValidHash = bytesutil.PadTo([]byte{0xff}, 32)
type notifyForkchoiceUpdateArg struct {
headState state.BeaconState
headRoot [32]byte
headBlock interfaces.BeaconBlock
headBlock interfaces.ReadOnlyBeaconBlock
}
// notifyForkchoiceUpdate signals execution engine the fork choice updates. Execution engine should:
@@ -98,7 +98,7 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
return nil, nil
}
r, err := s.cfg.ForkChoiceStore.Head(ctx, s.justifiedBalances.balances)
r, err := s.cfg.ForkChoiceStore.Head(ctx)
if err != nil {
log.WithFields(logrus.Fields{
"slot": headBlk.Slot(),
@@ -182,7 +182,7 @@ func (s *Service) getPayloadHash(ctx context.Context, root []byte) ([32]byte, er
// notifyNewPayload signals execution engine on a new payload.
// It returns true if the EL has returned VALID for the block
func (s *Service) notifyNewPayload(ctx context.Context, postStateVersion int,
postStateHeader interfaces.ExecutionData, blk interfaces.SignedBeaconBlock) (bool, error) {
postStateHeader interfaces.ExecutionData, blk interfaces.ReadOnlySignedBeaconBlock) (bool, error) {
ctx, span := trace.StartSpan(ctx, "blockChain.notifyNewPayload")
defer span.End()

View File

@@ -131,7 +131,7 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
tests := []struct {
name string
blk interfaces.BeaconBlock
blk interfaces.ReadOnlyBeaconBlock
headRoot [32]byte
finalizedRoot [32]byte
justifiedRoot [32]byte
@@ -140,7 +140,7 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
}{
{
name: "phase0 block",
blk: func() interfaces.BeaconBlock {
blk: func() interfaces.ReadOnlyBeaconBlock {
b, err := consensusblocks.NewBeaconBlock(&ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{}})
require.NoError(t, err)
return b
@@ -148,7 +148,7 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
},
{
name: "altair block",
blk: func() interfaces.BeaconBlock {
blk: func() interfaces.ReadOnlyBeaconBlock {
b, err := consensusblocks.NewBeaconBlock(&ethpb.BeaconBlockAltair{Body: &ethpb.BeaconBlockBodyAltair{}})
require.NoError(t, err)
return b
@@ -156,7 +156,7 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
},
{
name: "not execution block",
blk: func() interfaces.BeaconBlock {
blk: func() interfaces.ReadOnlyBeaconBlock {
b, err := consensusblocks.NewBeaconBlock(&ethpb.BeaconBlockBellatrix{
Body: &ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: &v1.ExecutionPayload{
@@ -177,7 +177,7 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
},
{
name: "happy case: finalized root is altair block",
blk: func() interfaces.BeaconBlock {
blk: func() interfaces.ReadOnlyBeaconBlock {
b, err := consensusblocks.NewBeaconBlock(&ethpb.BeaconBlockBellatrix{
Body: &ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: &v1.ExecutionPayload{},
@@ -191,7 +191,7 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
},
{
name: "happy case: finalized root is bellatrix block",
blk: func() interfaces.BeaconBlock {
blk: func() interfaces.ReadOnlyBeaconBlock {
b, err := consensusblocks.NewBeaconBlock(&ethpb.BeaconBlockBellatrix{
Body: &ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: &v1.ExecutionPayload{},
@@ -205,7 +205,7 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
},
{
name: "forkchoice updated with optimistic block",
blk: func() interfaces.BeaconBlock {
blk: func() interfaces.ReadOnlyBeaconBlock {
b, err := consensusblocks.NewBeaconBlock(&ethpb.BeaconBlockBellatrix{
Body: &ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: &v1.ExecutionPayload{},
@@ -220,7 +220,7 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
},
{
name: "forkchoice updated with invalid block",
blk: func() interfaces.BeaconBlock {
blk: func() interfaces.ReadOnlyBeaconBlock {
b, err := consensusblocks.NewBeaconBlock(&ethpb.BeaconBlockBellatrix{
Body: &ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: &v1.ExecutionPayload{},
@@ -302,8 +302,8 @@ func Test_NotifyForkchoiceUpdate_NIlLVH(t *testing.T) {
service.cfg.ForkChoiceStore = fcs
service.cfg.ProposerSlotIndexCache = cache.NewProposerPayloadIDsCache()
service.justifiedBalances.balances = []uint64{50, 100, 200}
require.NoError(t, err)
fcs.SetBalancesByRooter(func(context.Context, [32]byte) ([]uint64, error) { return []uint64{50, 100, 200}, nil })
require.NoError(t, fcs.UpdateJustifiedCheckpoint(ctx, &forkchoicetypes.Checkpoint{}))
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
state, blkRoot, err := prepareForkchoiceState(ctx, 1, bra, [32]byte{}, [32]byte{'A'}, ojc, ofc)
@@ -419,12 +419,15 @@ func Test_NotifyForkchoiceUpdateRecursive_DoublyLinkedTree(t *testing.T) {
service.cfg.ForkChoiceStore = fcs
service.cfg.ProposerSlotIndexCache = cache.NewProposerPayloadIDsCache()
service.justifiedBalances.balances = []uint64{50, 100, 200}
require.NoError(t, err)
fcs.SetBalancesByRooter(func(context.Context, [32]byte) ([]uint64, error) { return []uint64{50, 100, 200}, nil })
require.NoError(t, fcs.UpdateJustifiedCheckpoint(ctx, &forkchoicetypes.Checkpoint{}))
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
state, blkRoot, err := prepareForkchoiceState(ctx, 1, bra, [32]byte{}, [32]byte{'A'}, ojc, ofc)
require.NoError(t, err)
bState, _ := util.DeterministicGenesisState(t, 10)
require.NoError(t, beaconDB.SaveState(ctx, bState, bra))
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 2, brb, bra, [32]byte{'B'}, ojc, ofc)
require.NoError(t, err)
@@ -450,9 +453,12 @@ func Test_NotifyForkchoiceUpdateRecursive_DoublyLinkedTree(t *testing.T) {
fcs.ProcessAttestation(ctx, []uint64{0}, brd, 1)
fcs.ProcessAttestation(ctx, []uint64{1}, brf, 1)
fcs.ProcessAttestation(ctx, []uint64{2}, brg, 1)
fcs.SetBalancesByRooter(service.cfg.StateGen.ActiveNonSlashedBalancesByRoot)
jc := &forkchoicetypes.Checkpoint{Epoch: 0, Root: bra}
require.NoError(t, fcs.UpdateJustifiedCheckpoint(jc))
headRoot, err := fcs.Head(ctx, []uint64{50, 100, 200})
require.NoError(t, fcs.UpdateJustifiedCheckpoint(ctx, jc))
fcs.SetBalancesByRooter(func(context.Context, [32]byte) ([]uint64, error) { return []uint64{50, 100, 200}, nil })
require.NoError(t, fcs.UpdateJustifiedCheckpoint(ctx, &forkchoicetypes.Checkpoint{}))
headRoot, err := fcs.Head(ctx)
require.NoError(t, err)
require.Equal(t, brg, headRoot)
@@ -476,7 +482,7 @@ func Test_NotifyForkchoiceUpdateRecursive_DoublyLinkedTree(t *testing.T) {
require.Equal(t, brf, InvalidBlockRoot(err))
// Ensure Head is D
headRoot, err = fcs.Head(ctx, service.justifiedBalances.balances)
headRoot, err = fcs.Head(ctx)
require.NoError(t, err)
require.Equal(t, brd, headRoot)
@@ -548,7 +554,7 @@ func Test_NotifyNewPayload(t *testing.T) {
postState bstate.BeaconState
invalidBlock bool
isValidPayload bool
blk interfaces.SignedBeaconBlock
blk interfaces.ReadOnlySignedBeaconBlock
newPayloadErr error
errString string
name string
@@ -594,7 +600,7 @@ func Test_NotifyNewPayload(t *testing.T) {
{
name: "altair pre state, happy case",
postState: bellatrixState,
blk: func() interfaces.SignedBeaconBlock {
blk: func() interfaces.ReadOnlySignedBeaconBlock {
blk := &ethpb.SignedBeaconBlockBellatrix{
Block: &ethpb.BeaconBlockBellatrix{
Body: &ethpb.BeaconBlockBodyBellatrix{
@@ -613,7 +619,7 @@ func Test_NotifyNewPayload(t *testing.T) {
{
name: "not at merge transition",
postState: bellatrixState,
blk: func() interfaces.SignedBeaconBlock {
blk: func() interfaces.ReadOnlySignedBeaconBlock {
blk := &ethpb.SignedBeaconBlockBellatrix{
Block: &ethpb.BeaconBlockBellatrix{
Body: &ethpb.BeaconBlockBodyBellatrix{
@@ -639,7 +645,7 @@ func Test_NotifyNewPayload(t *testing.T) {
{
name: "happy case",
postState: bellatrixState,
blk: func() interfaces.SignedBeaconBlock {
blk: func() interfaces.ReadOnlySignedBeaconBlock {
blk := &ethpb.SignedBeaconBlockBellatrix{
Block: &ethpb.BeaconBlockBellatrix{
Body: &ethpb.BeaconBlockBodyBellatrix{
@@ -658,7 +664,7 @@ func Test_NotifyNewPayload(t *testing.T) {
{
name: "undefined error from ee",
postState: bellatrixState,
blk: func() interfaces.SignedBeaconBlock {
blk: func() interfaces.ReadOnlySignedBeaconBlock {
blk := &ethpb.SignedBeaconBlockBellatrix{
Block: &ethpb.BeaconBlockBellatrix{
Body: &ethpb.BeaconBlockBodyBellatrix{
@@ -678,7 +684,7 @@ func Test_NotifyNewPayload(t *testing.T) {
{
name: "invalid block hash error from ee",
postState: bellatrixState,
blk: func() interfaces.SignedBeaconBlock {
blk: func() interfaces.ReadOnlySignedBeaconBlock {
blk := &ethpb.SignedBeaconBlockBellatrix{
Block: &ethpb.BeaconBlockBellatrix{
Body: &ethpb.BeaconBlockBodyBellatrix{
@@ -886,7 +892,7 @@ func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
fjc := &forkchoicetypes.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash}
require.NoError(t, fcs.UpdateJustifiedCheckpoint(fjc))
require.NoError(t, fcs.UpdateJustifiedCheckpoint(ctx, fjc))
require.NoError(t, fcs.UpdateFinalizedCheckpoint(fjc))
state, blkRoot, err := prepareForkchoiceState(ctx, 0, genesisRoot, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
@@ -953,6 +959,7 @@ func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
twentyfc := &ethpb.Checkpoint{Epoch: 20, Root: validRoot[:]}
state, blkRoot, err = prepareForkchoiceState(ctx, 640, validRoot, genesisRoot, params.BeaconConfig().ZeroHash, twentyjc, twentyfc)
require.NoError(t, err)
fcs.SetBalancesByRooter(func(_ context.Context, _ [32]byte) ([]uint64, error) { return []uint64{}, nil })
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
require.NoError(t, fcs.SetOptimisticToValid(ctx, validRoot))
assert.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, validRoot))

View File

@@ -25,7 +25,7 @@ func (s *Service) isNewHead(r [32]byte) bool {
return r != currentHeadRoot || r == [32]byte{}
}
func (s *Service) getStateAndBlock(ctx context.Context, r [32]byte) (state.BeaconState, interfaces.SignedBeaconBlock, error) {
func (s *Service) getStateAndBlock(ctx context.Context, r [32]byte) (state.BeaconState, interfaces.ReadOnlySignedBeaconBlock, error) {
if !s.hasBlockInInitSyncOrDB(ctx, r) {
return nil, nil, errors.New("block does not exist")
}
@@ -65,6 +65,11 @@ func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, newHeadRoot
if err := s.saveHead(ctx, newHeadRoot, headBlock, headState); err != nil {
log.WithError(err).Error("could not save head")
}
// Only need to prune attestations from pool if the head has changed.
if err := s.pruneAttsFromPool(headBlock); err != nil {
return err
}
}
return nil

View File

@@ -28,14 +28,7 @@ import (
// UpdateAndSaveHeadWithBalances updates the beacon state head after getting justified balanced from cache.
// This function is only used in spec-tests, it does save the head after updating it.
func (s *Service) UpdateAndSaveHeadWithBalances(ctx context.Context) error {
jp := s.CurrentJustifiedCheckpt()
balances, err := s.justifiedBalances.get(ctx, bytesutil.ToBytes32(jp.Root))
if err != nil {
msg := fmt.Sprintf("could not read balances for state w/ justified checkpoint %#x", jp.Root)
return errors.Wrap(err, msg)
}
headRoot, err := s.cfg.ForkChoiceStore.Head(ctx, balances)
headRoot, err := s.cfg.ForkChoiceStore.Head(ctx)
if err != nil {
return errors.Wrap(err, "could not update head")
}
@@ -52,14 +45,15 @@ func (s *Service) UpdateAndSaveHeadWithBalances(ctx context.Context) error {
// This defines the current chain service's view of head.
type head struct {
root [32]byte // current head root.
block interfaces.SignedBeaconBlock // current head block.
state state.BeaconState // current head state.
root [32]byte // current head root.
block interfaces.ReadOnlySignedBeaconBlock // current head block.
state state.BeaconState // current head state.
}
// This saves head info to the local service cache, it also saves the
// new head root to the DB.
func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock interfaces.SignedBeaconBlock, headState state.BeaconState) error {
// Caller of the method MUST aqcuire a lock on forkchoice.
func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock interfaces.ReadOnlySignedBeaconBlock, headState state.BeaconState) error {
ctx, span := trace.StartSpan(ctx, "blockChain.saveHead")
defer span.End()
@@ -129,7 +123,7 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
reorgDistance.Observe(float64(dis))
reorgDepth.Observe(float64(dep))
isOptimistic, err := s.IsOptimistic(ctx)
isOptimistic, err := s.ForkChoicer().IsOptimistic(newHeadRoot)
if err != nil {
return errors.Wrap(err, "could not check if node is optimistically synced")
}
@@ -177,7 +171,7 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
// This gets called to update canonical root mapping. It does not save head block
// root in DB. With the inception of initial-sync-cache-state flag, it uses finalized
// check point as anchors to resume sync therefore head is no longer needed to be saved on per slot basis.
func (s *Service) saveHeadNoDB(ctx context.Context, b interfaces.SignedBeaconBlock, r [32]byte, hs state.BeaconState) error {
func (s *Service) saveHeadNoDB(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock, r [32]byte, hs state.BeaconState) error {
if err := blocks.BeaconBlockIsNil(b); err != nil {
return err
}
@@ -200,7 +194,7 @@ func (s *Service) saveHeadNoDB(ctx context.Context, b interfaces.SignedBeaconBlo
}
// This sets head view object which is used to track the head slot, root, block and state.
func (s *Service) setHead(root [32]byte, block interfaces.SignedBeaconBlock, state state.BeaconState) error {
func (s *Service) setHead(root [32]byte, block interfaces.ReadOnlySignedBeaconBlock, state state.BeaconState) error {
s.headLock.Lock()
defer s.headLock.Unlock()
@@ -220,7 +214,7 @@ func (s *Service) setHead(root [32]byte, block interfaces.SignedBeaconBlock, sta
// This sets head view object which is used to track the head slot, root, block and state. The method
// assumes that state being passed into the method will not be modified by any other alternate
// caller which holds the state's reference.
func (s *Service) setHeadInitialSync(root [32]byte, block interfaces.SignedBeaconBlock, state state.BeaconState) error {
func (s *Service) setHeadInitialSync(root [32]byte, block interfaces.ReadOnlySignedBeaconBlock, state state.BeaconState) error {
s.headLock.Lock()
defer s.headLock.Unlock()
@@ -260,7 +254,7 @@ func (s *Service) headRoot() [32]byte {
// This returns the head block.
// It does a full copy on head block for immutability.
// This is a lock free version.
func (s *Service) headBlock() (interfaces.SignedBeaconBlock, error) {
func (s *Service) headBlock() (interfaces.ReadOnlySignedBeaconBlock, error) {
return s.head.block.Copy()
}

View File

@@ -10,6 +10,7 @@ import (
mock "github.com/prysmaticlabs/prysm/v3/beacon-chain/blockchain/testing"
testDB "github.com/prysmaticlabs/prysm/v3/beacon-chain/db/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/doubly-linked-tree"
forkchoicetypes "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/blstoexec"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/v3/config/features"
@@ -152,20 +153,6 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
require.LogsContain(t, hook, "depth=1")
}
func TestCacheJustifiedStateBalances_CanCache(t *testing.T) {
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
ctx := context.Background()
state, _ := util.DeterministicGenesisState(t, 100)
r := [32]byte{'a'}
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: r[:]}))
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), state, r))
balances, err := service.justifiedBalances.get(ctx, r)
require.NoError(t, err)
require.DeepEqual(t, balances, state.Balances(), "Incorrect justified balances")
}
func Test_notifyNewHeadEvent(t *testing.T) {
t.Run("genesis_state_root", func(t *testing.T) {
bState, _ := util.DeterministicGenesisState(t, 10)
@@ -559,7 +546,9 @@ func TestUpdateHead_noSavedChanges(t *testing.T) {
st, blkRoot, err = prepareForkchoiceState(ctx, 0, bellatrixBlkRoot, [32]byte{}, [32]byte{}, fcp, fcp)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, st, blkRoot))
newRoot, err := service.cfg.ForkChoiceStore.Head(ctx, []uint64{1, 2})
fcs.SetBalancesByRooter(func(context.Context, [32]byte) ([]uint64, error) { return []uint64{1, 2}, nil })
require.NoError(t, fcs.UpdateJustifiedCheckpoint(ctx, &forkchoicetypes.Checkpoint{}))
newRoot, err := service.cfg.ForkChoiceStore.Head(ctx)
require.NoError(t, err)
require.NotEqual(t, headRoot, newRoot)
require.Equal(t, headRoot, service.headRoot())

View File

@@ -10,7 +10,7 @@ import (
// This saves a beacon block to the initial sync blocks cache. It rate limits how many blocks
// the cache keeps in memory (2 epochs worth of blocks) and saves them to DB when it hits this limit.
func (s *Service) saveInitSyncBlock(ctx context.Context, r [32]byte, b interfaces.SignedBeaconBlock) error {
func (s *Service) saveInitSyncBlock(ctx context.Context, r [32]byte, b interfaces.ReadOnlySignedBeaconBlock) error {
s.initSyncBlocksLock.Lock()
s.initSyncBlocks[r] = b
numBlocks := len(s.initSyncBlocks)
@@ -43,7 +43,7 @@ func (s *Service) hasBlockInInitSyncOrDB(ctx context.Context, r [32]byte) bool {
// Returns block for a given root `r` from either the initial sync blocks cache or the DB.
// Error is returned if the block is not found in either cache or DB.
func (s *Service) getBlock(ctx context.Context, r [32]byte) (interfaces.SignedBeaconBlock, error) {
func (s *Service) getBlock(ctx context.Context, r [32]byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
s.initSyncBlocksLock.RLock()
// Check cache first because it's faster.
@@ -64,11 +64,11 @@ func (s *Service) getBlock(ctx context.Context, r [32]byte) (interfaces.SignedBe
// This retrieves all the beacon blocks from the initial sync blocks cache, the returned
// blocks are unordered.
func (s *Service) getInitSyncBlocks() []interfaces.SignedBeaconBlock {
func (s *Service) getInitSyncBlocks() []interfaces.ReadOnlySignedBeaconBlock {
s.initSyncBlocksLock.RLock()
defer s.initSyncBlocksLock.RUnlock()
blks := make([]interfaces.SignedBeaconBlock, 0, len(s.initSyncBlocks))
blks := make([]interfaces.ReadOnlySignedBeaconBlock, 0, len(s.initSyncBlocks))
for _, b := range s.initSyncBlocks {
blks = append(blks, b)
}
@@ -79,5 +79,5 @@ func (s *Service) getInitSyncBlocks() []interfaces.SignedBeaconBlock {
func (s *Service) clearInitSyncBlocks() {
s.initSyncBlocksLock.Lock()
defer s.initSyncBlocksLock.Unlock()
s.initSyncBlocks = make(map[[32]byte]interfaces.SignedBeaconBlock)
s.initSyncBlocks = make(map[[32]byte]interfaces.ReadOnlySignedBeaconBlock)
}

View File

@@ -21,7 +21,7 @@ import (
var log = logrus.WithField("prefix", "blockchain")
// logs state transition related data every slot.
func logStateTransitionData(b interfaces.BeaconBlock) error {
func logStateTransitionData(b interfaces.ReadOnlyBeaconBlock) error {
log := log.WithField("slot", b.Slot())
if len(b.Body().Attestations()) > 0 {
log = log.WithField("attestations", len(b.Body().Attestations()))
@@ -66,7 +66,7 @@ func logStateTransitionData(b interfaces.BeaconBlock) error {
return nil
}
func logBlockSyncStatus(block interfaces.BeaconBlock, blockRoot [32]byte, justified, finalized *ethpb.Checkpoint, receivedTime time.Time, genesisTime uint64) error {
func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte, justified, finalized *ethpb.Checkpoint, receivedTime time.Time, genesisTime uint64) error {
startTime, err := slots.ToTime(genesisTime, block.Slot())
if err != nil {
return err
@@ -102,7 +102,7 @@ func logBlockSyncStatus(block interfaces.BeaconBlock, blockRoot [32]byte, justif
}
// logs payload related data every slot.
func logPayload(block interfaces.BeaconBlock) error {
func logPayload(block interfaces.ReadOnlyBeaconBlock) error {
isExecutionBlk, err := blocks.IsExecutionBlock(block.Body())
if err != nil {
return errors.Wrap(err, "could not determine if block is execution block")

View File

@@ -25,11 +25,11 @@ func Test_logStateTransitionData(t *testing.T) {
require.NoError(t, err)
tests := []struct {
name string
b func() interfaces.BeaconBlock
b func() interfaces.ReadOnlyBeaconBlock
want string
}{
{name: "empty block body",
b: func() interfaces.BeaconBlock {
b: func() interfaces.ReadOnlyBeaconBlock {
wb, err := blocks.NewBeaconBlock(&ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{}})
require.NoError(t, err)
return wb
@@ -37,7 +37,7 @@ func Test_logStateTransitionData(t *testing.T) {
want: "\"Finished applying state transition\" prefix=blockchain slot=0",
},
{name: "has attestation",
b: func() interfaces.BeaconBlock {
b: func() interfaces.ReadOnlyBeaconBlock {
wb, err := blocks.NewBeaconBlock(&ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{Attestations: []*ethpb.Attestation{{}}}})
require.NoError(t, err)
return wb
@@ -45,7 +45,7 @@ func Test_logStateTransitionData(t *testing.T) {
want: "\"Finished applying state transition\" attestations=1 prefix=blockchain slot=0",
},
{name: "has deposit",
b: func() interfaces.BeaconBlock {
b: func() interfaces.ReadOnlyBeaconBlock {
wb, err := blocks.NewBeaconBlock(
&ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{
Attestations: []*ethpb.Attestation{{}},
@@ -56,7 +56,7 @@ func Test_logStateTransitionData(t *testing.T) {
want: "\"Finished applying state transition\" attestations=1 deposits=1 prefix=blockchain slot=0",
},
{name: "has attester slashing",
b: func() interfaces.BeaconBlock {
b: func() interfaces.ReadOnlyBeaconBlock {
wb, err := blocks.NewBeaconBlock(&ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{
AttesterSlashings: []*ethpb.AttesterSlashing{{}}}})
require.NoError(t, err)
@@ -65,7 +65,7 @@ func Test_logStateTransitionData(t *testing.T) {
want: "\"Finished applying state transition\" attesterSlashings=1 prefix=blockchain slot=0",
},
{name: "has proposer slashing",
b: func() interfaces.BeaconBlock {
b: func() interfaces.ReadOnlyBeaconBlock {
wb, err := blocks.NewBeaconBlock(&ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{
ProposerSlashings: []*ethpb.ProposerSlashing{{}}}})
require.NoError(t, err)
@@ -74,7 +74,7 @@ func Test_logStateTransitionData(t *testing.T) {
want: "\"Finished applying state transition\" prefix=blockchain proposerSlashings=1 slot=0",
},
{name: "has exit",
b: func() interfaces.BeaconBlock {
b: func() interfaces.ReadOnlyBeaconBlock {
wb, err := blocks.NewBeaconBlock(&ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{
VoluntaryExits: []*ethpb.SignedVoluntaryExit{{}}}})
require.NoError(t, err)
@@ -83,7 +83,7 @@ func Test_logStateTransitionData(t *testing.T) {
want: "\"Finished applying state transition\" prefix=blockchain slot=0 voluntaryExits=1",
},
{name: "has everything",
b: func() interfaces.BeaconBlock {
b: func() interfaces.ReadOnlyBeaconBlock {
wb, err := blocks.NewBeaconBlock(&ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{
Attestations: []*ethpb.Attestation{{}},
Deposits: []*ethpb.Deposit{{}},
@@ -96,7 +96,7 @@ func Test_logStateTransitionData(t *testing.T) {
want: "\"Finished applying state transition\" attestations=1 attesterSlashings=1 deposits=1 prefix=blockchain proposerSlashings=1 slot=0 voluntaryExits=1",
},
{name: "has payload",
b: func() interfaces.BeaconBlock { return wrappedPayloadBlk },
b: func() interfaces.ReadOnlyBeaconBlock { return wrappedPayloadBlk },
want: "\"Finished applying state transition\" payloadHash=0x010203 prefix=blockchain slot=0 syncBitsCount=0 txCount=2",
},
}

View File

@@ -130,14 +130,6 @@ var (
Name: "sync_head_state_hit",
Help: "The number of sync head state requests that are present in the cache.",
})
stateBalanceCacheHit = promauto.NewCounter(prometheus.CounterOpts{
Name: "state_balance_cache_hit",
Help: "Count the number of state balance cache hits.",
})
stateBalanceCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
Name: "state_balance_cache_miss",
Help: "Count the number of state balance cache hits.",
})
newPayloadValidNodeCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "new_payload_valid_node_count",
Help: "Count the number of valid nodes after newPayload EE call",
@@ -353,7 +345,7 @@ func reportEpochMetrics(ctx context.Context, postState, headState state.BeaconSt
return nil
}
func reportAttestationInclusion(blk interfaces.BeaconBlock) {
func reportAttestationInclusion(blk interfaces.ReadOnlyBeaconBlock) {
for _, att := range blk.Body().Attestations() {
attestationInclusionDelay.Observe(float64(blk.Slot() - att.Data.Slot))
}

View File

@@ -1,13 +1,10 @@
package blockchain
import (
"context"
"errors"
"testing"
testDB "github.com/prysmaticlabs/prysm/v3/beacon-chain/db/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state/stategen"
)
@@ -25,26 +22,5 @@ func testServiceOptsWithDB(t *testing.T) []Option {
// in your code path. this is a lightweight way to satisfy the stategen/beacondb
// initialization requirements w/o the overhead of db init.
func testServiceOptsNoDB() []Option {
return []Option{
withStateBalanceCache(satisfactoryStateBalanceCache()),
}
}
type mockStateByRooter struct {
state state.BeaconState
err error
}
var _ stateByRooter = &mockStateByRooter{}
func (m mockStateByRooter) StateByRoot(_ context.Context, _ [32]byte) (state.BeaconState, error) {
return m.state, m.err
}
// returns an instance of the state balance cache that can be used
// to satisfy the requirement for one in NewService, but which will
// always return an error if used.
func satisfactoryStateBalanceCache() *stateBalanceCache {
err := errors.New("satisfactoryStateBalanceCache doesn't perform real caching")
return &stateBalanceCache{stateGen: mockStateByRooter{err: err}}
return []Option{}
}

View File

@@ -156,13 +156,6 @@ func WithSlasherAttestationsFeed(f *event.Feed) Option {
}
}
func withStateBalanceCache(c *stateBalanceCache) Option {
return func(s *Service) error {
s.justifiedBalances = c
return nil
}
}
// WithFinalizedStateAtStartUp to store finalized state at start up.
func WithFinalizedStateAtStartUp(st state.BeaconState) Option {
return func(s *Service) error {

View File

@@ -22,7 +22,7 @@ import (
// validateMergeBlock validates terminal block hash in the event of manual overrides before checking for total difficulty.
//
// def validate_merge_block(block: BeaconBlock) -> None:
// def validate_merge_block(block: ReadOnlyBeaconBlock) -> None:
//
// if TERMINAL_BLOCK_HASH != Hash32():
// # If `TERMINAL_BLOCK_HASH` is used as an override, the activation epoch must be reached.
@@ -38,7 +38,7 @@ import (
// assert pow_parent is not None
// # Check if `pow_block` is a valid terminal PoW block
// assert is_valid_terminal_pow_block(pow_block, pow_parent)
func (s *Service) validateMergeBlock(ctx context.Context, b interfaces.SignedBeaconBlock) error {
func (s *Service) validateMergeBlock(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock) error {
if err := blocks.BeaconBlockIsNil(b); err != nil {
return err
}

View File

@@ -249,7 +249,7 @@ func Test_validateTerminalBlockHash(t *testing.T) {
require.NoError(t, err)
blk, err := blocks.NewSignedBeaconBlock(util.HydrateSignedBeaconBlockBellatrix(&ethpb.SignedBeaconBlockBellatrix{}))
require.NoError(t, err)
blk.Block().SetSlot(1)
require.NoError(t, blk.Block().Body().SetExecution(wrapped))
blk.SetSlot(1)
require.NoError(t, blk.SetExecution(wrapped))
require.NoError(t, service.validateMergeBlock(ctx, blk))
}

View File

@@ -8,7 +8,6 @@ import (
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/transition"
testDB "github.com/prysmaticlabs/prysm/v3/beacon-chain/db/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/doubly-linked-tree"
forkchoicetypes "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state/stategen"
fieldparams "github.com/prysmaticlabs/prysm/v3/config/fieldparams"
"github.com/prysmaticlabs/prysm/v3/config/params"
@@ -324,94 +323,3 @@ func TestVerifyBeaconBlock_OK(t *testing.T) {
assert.NoError(t, service.verifyBeaconBlock(ctx, d), "Did not receive the wanted error")
}
func TestVerifyFinalizedConsistency_InconsistentRoot_DoublyLinkedTree(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fcs)),
WithForkChoiceStore(fcs),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
b32 := util.NewBeaconBlock()
b32.Block.Slot = 32
util.SaveBlock(t, ctx, service.cfg.BeaconDB, b32)
r32, err := b32.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.ForkChoicer().UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: 1}))
b33 := util.NewBeaconBlock()
b33.Block.Slot = 33
b33.Block.ParentRoot = r32[:]
util.SaveBlock(t, ctx, service.cfg.BeaconDB, b33)
r33, err := b33.Block.HashTreeRoot()
require.NoError(t, err)
err = service.VerifyFinalizedConsistency(context.Background(), r33[:])
require.ErrorContains(t, "Root and finalized store are not consistent", err)
}
func TestVerifyFinalizedConsistency_OK(t *testing.T) {
ctx := context.Background()
opts := testServiceOptsWithDB(t)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
b32 := util.NewBeaconBlock()
b32.Block.Slot = 32
util.SaveBlock(t, ctx, service.cfg.BeaconDB, b32)
r32, err := b32.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.ForkChoicer().UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: 1, Root: r32}))
b33 := util.NewBeaconBlock()
b33.Block.Slot = 33
b33.Block.ParentRoot = r32[:]
util.SaveBlock(t, ctx, service.cfg.BeaconDB, b33)
r33, err := b33.Block.HashTreeRoot()
require.NoError(t, err)
err = service.VerifyFinalizedConsistency(context.Background(), r33[:])
require.NoError(t, err)
}
func TestVerifyFinalizedConsistency_IsCanonical(t *testing.T) {
ctx := context.Background()
opts := testServiceOptsWithDB(t)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
b32 := util.NewBeaconBlock()
b32.Block.Slot = 32
r32, err := b32.Block.HashTreeRoot()
require.NoError(t, err)
b33 := util.NewBeaconBlock()
b33.Block.Slot = 33
b33.Block.ParentRoot = r32[:]
r33, err := b33.Block.HashTreeRoot()
require.NoError(t, err)
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
state, blkRoot, err := prepareForkchoiceState(ctx, b32.Block.Slot, r32, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, b33.Block.Slot, r33, r32, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
jc := &forkchoicetypes.Checkpoint{Epoch: 0, Root: r32}
require.NoError(t, service.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(jc))
_, err = service.cfg.ForkChoiceStore.Head(ctx, []uint64{})
require.NoError(t, err)
err = service.VerifyFinalizedConsistency(context.Background(), r33[:])
require.NoError(t, err)
}

View File

@@ -47,7 +47,7 @@ var initialSyncBlockCacheSize = uint64(2 * params.BeaconConfig().SlotsPerEpoch)
//
// Spec pseudocode definition:
//
// def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
// def on_block(store: Store, signed_block: ReadOnlySignedBeaconBlock) -> None:
// block = signed_block.message
// # Parent block must be known
// assert block.parent_root in store.block_states
@@ -93,7 +93,7 @@ var initialSyncBlockCacheSize = uint64(2 * params.BeaconConfig().SlotsPerEpoch)
// ancestor_at_finalized_slot = get_ancestor(store, store.justified_checkpoint.root, finalized_slot)
// if ancestor_at_finalized_slot != store.finalized_checkpoint.root:
// store.justified_checkpoint = state.current_justified_checkpoint
func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlock, blockRoot [32]byte) error {
func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) error {
ctx, span := trace.StartSpan(ctx, "blockChain.onBlock")
defer span.End()
if err := consensusblocks.BeaconBlockIsNil(signed); err != nil {
@@ -107,6 +107,11 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
return err
}
// Verify that the parent block is in forkchoice
if !s.ForkChoicer().HasNode(b.ParentRoot()) {
return ErrNotDescendantOfFinalized
}
// Save current justified and finalized epochs for future use.
currStoreJustifiedEpoch := s.ForkChoicer().JustifiedCheckpoint().Epoch
currStoreFinalizedEpoch := s.ForkChoicer().FinalizedCheckpoint().Epoch
@@ -186,14 +191,8 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
}
justified := s.ForkChoicer().JustifiedCheckpoint()
balances, err := s.justifiedBalances.get(ctx, justified.Root)
if err != nil {
msg := fmt.Sprintf("could not read balances for state w/ justified checkpoint %#x", justified.Root)
return errors.Wrap(err, msg)
}
start := time.Now()
headRoot, err := s.cfg.ForkChoiceStore.Head(ctx, balances)
headRoot, err := s.cfg.ForkChoiceStore.Head(ctx)
if err != nil {
log.WithError(err).Warn("Could not update head")
}
@@ -219,10 +218,6 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
return err
}
if err := s.pruneCanonicalAttsFromPool(ctx, blockRoot, signed); err != nil {
return err
}
// Send notification of the processed block to the state feed.
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.BlockProcessed,
@@ -317,7 +312,7 @@ func getStateVersionAndPayload(st state.BeaconState) (int, interfaces.ExecutionD
return preStateVersion, preStateHeader, nil
}
func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeaconBlock,
func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.ReadOnlySignedBeaconBlock,
blockRoots [][32]byte) error {
ctx, span := trace.StartSpan(ctx, "blockChain.onBlockBatch")
defer span.End()
@@ -453,7 +448,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
}
}
// Insert all nodes but the last one to forkchoice
if err := s.cfg.ForkChoiceStore.InsertOptimisticChain(ctx, pendingNodes); err != nil {
if err := s.cfg.ForkChoiceStore.InsertChain(ctx, pendingNodes); err != nil {
return errors.Wrap(err, "could not insert batch to forkchoice")
}
// Insert the last block to forkchoice
@@ -537,7 +532,7 @@ func (s *Service) handleEpochBoundary(ctx context.Context, postState state.Beaco
// This feeds in the block to fork choice store. It's allows fork choice store
// to gain information on the most current chain.
func (s *Service) insertBlockToForkchoiceStore(ctx context.Context, blk interfaces.BeaconBlock, root [32]byte, st state.BeaconState) error {
func (s *Service) insertBlockToForkchoiceStore(ctx context.Context, blk interfaces.ReadOnlyBeaconBlock, root [32]byte, st state.BeaconState) error {
ctx, span := trace.StartSpan(ctx, "blockChain.insertBlockToForkchoiceStore")
defer span.End()
@@ -554,7 +549,7 @@ func (s *Service) insertBlockToForkchoiceStore(ctx context.Context, blk interfac
// This feeds in the attestations included in the block to fork choice store. It's allows fork choice store
// to gain information on the most current chain.
func (s *Service) handleBlockAttestations(ctx context.Context, blk interfaces.BeaconBlock, st state.BeaconState) error {
func (s *Service) handleBlockAttestations(ctx context.Context, blk interfaces.ReadOnlyBeaconBlock, st state.BeaconState) error {
// Feed in block's attestations to fork choice store.
for _, a := range blk.Body().Attestations() {
committee, err := helpers.BeaconCommitteeFromState(ctx, st, a.Data.Slot, a.Data.CommitteeIndex)
@@ -577,6 +572,7 @@ func (s *Service) handleBlockAttestations(ctx context.Context, blk interfaces.Be
// InsertSlashingsToForkChoiceStore inserts attester slashing indices to fork choice store.
// To call this function, it's caller's responsibility to ensure the slashing object is valid.
// This function requires a write lock on forkchoice.
func (s *Service) InsertSlashingsToForkChoiceStore(ctx context.Context, slashings []*ethpb.AttesterSlashing) {
for _, slashing := range slashings {
indices := blocks.SlashableAttesterIndices(slashing)
@@ -588,7 +584,7 @@ func (s *Service) InsertSlashingsToForkChoiceStore(ctx context.Context, slashing
// This saves post state info to DB or cache. This also saves post state info to fork choice store.
// Post state info consists of processed block and state. Do not call this method unless the block and state are verified.
func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b interfaces.SignedBeaconBlock, st state.BeaconState) error {
func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b interfaces.ReadOnlySignedBeaconBlock, st state.BeaconState) error {
ctx, span := trace.StartSpan(ctx, "blockChain.savePostStateInfo")
defer span.End()
if err := s.cfg.BeaconDB.SaveBlock(ctx, b); err != nil {
@@ -600,17 +596,8 @@ func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b interface
return nil
}
// This removes the attestations from the mem pool. It will only remove the attestations if input root `r` is canonical,
// meaning the block `b` is part of the canonical chain.
func (s *Service) pruneCanonicalAttsFromPool(ctx context.Context, r [32]byte, b interfaces.SignedBeaconBlock) error {
canonical, err := s.IsCanonical(ctx, r)
if err != nil {
return err
}
if !canonical {
return nil
}
// This removes the attestations in block `b` from the attestation mem pool.
func (s *Service) pruneAttsFromPool(b interfaces.ReadOnlySignedBeaconBlock) error {
atts := b.Block().Body().Attestations()
for _, att := range atts {
if helpers.IsAggregated(att) {
@@ -627,7 +614,7 @@ func (s *Service) pruneCanonicalAttsFromPool(ctx context.Context, r [32]byte, b
}
// validateMergeTransitionBlock validates the merge transition block.
func (s *Service) validateMergeTransitionBlock(ctx context.Context, stateVersion int, stateHeader interfaces.ExecutionData, blk interfaces.SignedBeaconBlock) error {
func (s *Service) validateMergeTransitionBlock(ctx context.Context, stateVersion int, stateHeader interfaces.ExecutionData, blk interfaces.ReadOnlySignedBeaconBlock) error {
// Skip validation if block is older than Bellatrix.
if blocks.IsPreBellatrixVersion(blk.Block().Version()) {
return nil
@@ -706,26 +693,31 @@ func (s *Service) fillMissingBlockPayloadId(ctx context.Context, ti time.Time) e
if !atHalfSlot(ti) {
return nil
}
if s.CurrentSlot() == s.cfg.ForkChoiceStore.HighestReceivedBlockSlot() {
s.ForkChoicer().RLock()
highestReceivedSlot := s.cfg.ForkChoiceStore.HighestReceivedBlockSlot()
s.ForkChoicer().RUnlock()
if s.CurrentSlot() == highestReceivedSlot {
return nil
}
// Head root should be empty when retrieving proposer index for the next slot.
_, id, has := s.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(s.CurrentSlot()+1, [32]byte{} /* head root */)
// There exists proposer for next slot, but we haven't called fcu w/ payload attribute yet.
if has && id == [8]byte{} {
missedPayloadIDFilledCount.Inc()
headBlock, err := s.headBlock()
if err != nil {
return err
} else {
if _, err := s.notifyForkchoiceUpdate(ctx, &notifyForkchoiceUpdateArg{
headState: s.headState(ctx),
headRoot: s.headRoot(),
headBlock: headBlock.Block(),
}); err != nil {
return err
}
}
if !has || id != [8]byte{} {
return nil
}
return nil
missedPayloadIDFilledCount.Inc()
s.headLock.RLock()
headBlock, err := s.headBlock()
if err != nil {
return err
}
headState := s.headState(ctx)
headRoot := s.headRoot()
s.headLock.RUnlock()
_, err = s.notifyForkchoiceUpdate(ctx, &notifyForkchoiceUpdateArg{
headState: headState,
headRoot: headRoot,
headBlock: headBlock.Block(),
})
return err
}

View File

@@ -1,7 +1,6 @@
package blockchain
import (
"bytes"
"context"
"fmt"
@@ -14,7 +13,6 @@ import (
"github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
mathutil "github.com/prysmaticlabs/prysm/v3/math"
"github.com/prysmaticlabs/prysm/v3/monitoring/tracing"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/time/slots"
"go.opencensus.io/trace"
@@ -28,7 +26,7 @@ func (s *Service) CurrentSlot() primitives.Slot {
// getBlockPreState returns the pre state of an incoming block. It uses the parent root of the block
// to retrieve the state in DB. It verifies the pre state's validity and the incoming block
// is in the correct time window.
func (s *Service) getBlockPreState(ctx context.Context, b interfaces.BeaconBlock) (state.BeaconState, error) {
func (s *Service) getBlockPreState(ctx context.Context, b interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "blockChain.getBlockPreState")
defer span.End()
@@ -59,7 +57,7 @@ func (s *Service) getBlockPreState(ctx context.Context, b interfaces.BeaconBlock
}
// verifyBlkPreState validates input block has a valid pre-state.
func (s *Service) verifyBlkPreState(ctx context.Context, b interfaces.BeaconBlock) error {
func (s *Service) verifyBlkPreState(ctx context.Context, b interfaces.ReadOnlyBeaconBlock) error {
ctx, span := trace.StartSpan(ctx, "blockChain.verifyBlkPreState")
defer span.End()
@@ -71,10 +69,6 @@ func (s *Service) verifyBlkPreState(ctx context.Context, b interfaces.BeaconBloc
return errors.New("could not reconstruct parent state")
}
if err := s.VerifyFinalizedBlkDescendant(ctx, parentRoot); err != nil {
return err
}
has, err := s.cfg.StateGen.HasState(ctx, parentRoot)
if err != nil {
return err
@@ -88,38 +82,9 @@ func (s *Service) verifyBlkPreState(ctx context.Context, b interfaces.BeaconBloc
return nil
}
// VerifyFinalizedBlkDescendant validates if input block root is a descendant of the
// current finalized block root.
func (s *Service) VerifyFinalizedBlkDescendant(ctx context.Context, root [32]byte) error {
ctx, span := trace.StartSpan(ctx, "blockChain.VerifyFinalizedBlkDescendant")
defer span.End()
finalized := s.ForkChoicer().FinalizedCheckpoint()
fRoot := s.ensureRootNotZeros(finalized.Root)
fSlot, err := slots.EpochStart(finalized.Epoch)
if err != nil {
return err
}
bFinalizedRoot, err := s.ancestor(ctx, root[:], fSlot)
if err != nil {
return errors.Wrap(err, "could not get finalized block root")
}
if bFinalizedRoot == nil {
return fmt.Errorf("no finalized block known for block %#x", bytesutil.Trunc(root[:]))
}
if !bytes.Equal(bFinalizedRoot, fRoot[:]) {
err := fmt.Errorf("block %#x is not a descendant of the current finalized block slot %d, %#x != %#x",
bytesutil.Trunc(root[:]), fSlot, bytesutil.Trunc(bFinalizedRoot),
bytesutil.Trunc(fRoot[:]))
tracing.AnnotateError(span, err)
return invalidBlock{error: err}
}
return nil
}
// verifyBlkFinalizedSlot validates input block is not less than or equal
// to current finalized slot.
func (s *Service) verifyBlkFinalizedSlot(b interfaces.BeaconBlock) error {
func (s *Service) verifyBlkFinalizedSlot(b interfaces.ReadOnlyBeaconBlock) error {
finalized := s.ForkChoicer().FinalizedCheckpoint()
finalizedSlot, err := slots.EpochStart(finalized.Epoch)
if err != nil {
@@ -201,7 +166,7 @@ func (s *Service) ancestor(ctx context.Context, root []byte, slot primitives.Slo
r := bytesutil.ToBytes32(root)
// Get ancestor root from fork choice store instead of recursively looking up blocks in DB.
// This is most optimal outcome.
ar, err := s.ancestorByForkChoiceStore(ctx, r, slot)
ar, err := s.cfg.ForkChoiceStore.AncestorRoot(ctx, r, slot)
if err != nil {
// Try getting ancestor root from DB when failed to retrieve from fork choice store.
// This is the second line of defense for retrieving ancestor root.
@@ -211,38 +176,28 @@ func (s *Service) ancestor(ctx context.Context, root []byte, slot primitives.Slo
}
}
return ar, nil
}
// This retrieves an ancestor root using fork choice store. The look up is looping through the a flat array structure.
func (s *Service) ancestorByForkChoiceStore(ctx context.Context, r [32]byte, slot primitives.Slot) ([]byte, error) {
ctx, span := trace.StartSpan(ctx, "blockChain.ancestorByForkChoiceStore")
defer span.End()
if !s.cfg.ForkChoiceStore.HasParent(r) {
return nil, errors.New("could not find root in fork choice store")
}
root, err := s.cfg.ForkChoiceStore.AncestorRoot(ctx, r, slot)
return root[:], err
return ar[:], nil
}
// This retrieves an ancestor root using DB. The look up is recursively looking up DB. Slower than `ancestorByForkChoiceStore`.
func (s *Service) ancestorByDB(ctx context.Context, r [32]byte, slot primitives.Slot) ([]byte, error) {
func (s *Service) ancestorByDB(ctx context.Context, r [32]byte, slot primitives.Slot) (root [32]byte, err error) {
ctx, span := trace.StartSpan(ctx, "blockChain.ancestorByDB")
defer span.End()
root = [32]byte{}
// Stop recursive ancestry lookup if context is cancelled.
if ctx.Err() != nil {
return nil, ctx.Err()
err = ctx.Err()
return
}
signed, err := s.getBlock(ctx, r)
if err != nil {
return nil, err
return root, err
}
b := signed.Block()
if b.Slot() == slot || b.Slot() < slot {
return r[:], nil
return r, nil
}
return s.ancestorByDB(ctx, b.ParentRoot(), slot)
@@ -250,7 +205,7 @@ func (s *Service) ancestorByDB(ctx context.Context, r [32]byte, slot primitives.
// This retrieves missing blocks from DB (ie. the blocks that couldn't be received over sync) and inserts them to fork choice store.
// This is useful for block tree visualizer and additional vote accounting.
func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfaces.BeaconBlock,
func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfaces.ReadOnlyBeaconBlock,
fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
pendingNodes := make([]*forkchoicetypes.BlockAndCheckpoints, 0)
@@ -282,9 +237,9 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfa
return nil
}
if root != s.ensureRootNotZeros(finalized.Root) && !s.ForkChoicer().HasNode(root) {
return errNotDescendantOfFinalized
return ErrNotDescendantOfFinalized
}
return s.cfg.ForkChoiceStore.InsertOptimisticChain(ctx, pendingNodes)
return s.cfg.ForkChoiceStore.InsertChain(ctx, pendingNodes)
}
// inserts finalized deposits into our finalized deposit trie.

View File

@@ -65,6 +65,10 @@ func TestStore_OnBlock(t *testing.T) {
st, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
ojc := &ethpb.Checkpoint{}
stfcs, root, err := prepareForkchoiceState(ctx, 0, validGenesisRoot, [32]byte{}, [32]byte{}, ojc, ojc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, stfcs, root))
roots, err := blockTree1(t, beaconDB, validGenesisRoot[:])
require.NoError(t, err)
random := util.NewBeaconBlock()
@@ -73,11 +77,16 @@ func TestStore_OnBlock(t *testing.T) {
util.SaveBlock(t, ctx, beaconDB, random)
randomParentRoot, err := random.Block.HashTreeRoot()
assert.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Slot: st.Slot(), Root: randomParentRoot[:]}))
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), randomParentRoot))
randomParentRoot2 := roots[1]
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Slot: st.Slot(), Root: randomParentRoot2}))
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), bytesutil.ToBytes32(randomParentRoot2)))
stfcs, root, err = prepareForkchoiceState(ctx, 2, bytesutil.ToBytes32(randomParentRoot2),
validGenesisRoot, [32]byte{'r'}, ojc, ojc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, stfcs, root))
tests := []struct {
name string
@@ -108,10 +117,11 @@ func TestStore_OnBlock(t *testing.T) {
blk: func() *ethpb.SignedBeaconBlock {
b := util.NewBeaconBlock()
b.Block.ParentRoot = randomParentRoot[:]
b.Block.Slot = 2
return b
}(),
s: st.Copy(),
wantErrString: "is not a descendant of the current finalized block",
wantErrString: "not descendant of finalized checkpoint",
},
{
name: "same slot as finalized block",
@@ -157,7 +167,7 @@ func TestStore_OnBlockBatch(t *testing.T) {
require.NoError(t, service.saveGenesisData(ctx, st))
bState := st.Copy()
var blks []interfaces.SignedBeaconBlock
var blks []interfaces.ReadOnlySignedBeaconBlock
var blkRoots [][32]byte
for i := 0; i < 97; i++ {
b, err := util.GenerateFullBlock(bState, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
@@ -200,7 +210,7 @@ func TestStore_OnBlockBatch_NotifyNewPayload(t *testing.T) {
require.NoError(t, service.saveGenesisData(ctx, st))
bState := st.Copy()
var blks []interfaces.SignedBeaconBlock
var blks []interfaces.ReadOnlySignedBeaconBlock
var blkRoots [][32]byte
blkCount := 4
for i := 0; i <= blkCount; i++ {
@@ -441,7 +451,7 @@ func TestFillForkChoiceMissingBlocks_FinalizedSibling(t *testing.T) {
err = service.fillInForkChoiceMissingBlocks(
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
require.Equal(t, errNotDescendantOfFinalized.Error(), err.Error())
require.Equal(t, ErrNotDescendantOfFinalized.Error(), err.Error())
}
// blockTree1 constructs the following tree:
@@ -707,87 +717,6 @@ func TestEnsureRootNotZeroHashes(t *testing.T) {
assert.Equal(t, root, r, "Did not get wanted justified root")
}
func TestVerifyBlkDescendant(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fcs)),
WithForkChoiceStore(fcs),
}
b := util.NewBeaconBlock()
b.Block.Slot = 32
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
util.SaveBlock(t, ctx, beaconDB, b)
b1 := util.NewBeaconBlock()
b1.Block.Slot = 32
b1.Block.Body.Graffiti = bytesutil.PadTo([]byte{'a'}, 32)
r1, err := b1.Block.HashTreeRoot()
require.NoError(t, err)
util.SaveBlock(t, ctx, beaconDB, b1)
type args struct {
parentRoot [32]byte
finalizedRoot [32]byte
}
tests := []struct {
name string
args args
wantedErr string
invalidBlockRoot bool
}{
{
name: "could not get finalized block in block service cache",
args: args{
finalizedRoot: [32]byte{'a'},
},
wantedErr: "block not found in cache or db",
},
{
name: "could not get finalized block root in DB",
args: args{
finalizedRoot: r,
parentRoot: [32]byte{'a'},
},
wantedErr: "could not get finalized block root",
},
{
name: "is not descendant",
args: args{
finalizedRoot: r1,
parentRoot: r,
},
wantedErr: "is not a descendant of the current finalized block slot",
invalidBlockRoot: true,
},
{
name: "is descendant",
args: args{
finalizedRoot: r,
parentRoot: r,
},
},
}
for _, tt := range tests {
service, err := NewService(ctx, opts...)
require.NoError(t, err)
require.NoError(t, service.ForkChoicer().UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Root: tt.args.finalizedRoot, Epoch: 1}))
err = service.VerifyFinalizedBlkDescendant(ctx, tt.args.parentRoot)
if tt.wantedErr != "" {
assert.ErrorContains(t, tt.wantedErr, err)
if tt.invalidBlockRoot {
require.Equal(t, true, IsInvalidBlock(err))
}
} else if err != nil {
assert.NoError(t, err)
}
}
}
func TestHandleEpochBoundary_UpdateFirstSlot(t *testing.T) {
ctx := context.Background()
opts := testServiceOptsNoDB()
@@ -1072,7 +1001,7 @@ func TestInsertFinalizedDeposits_MultipleFinalizedRoutines(t *testing.T) {
}
}
func TestRemoveBlockAttestationsInPool_Canonical(t *testing.T) {
func TestRemoveBlockAttestationsInPool(t *testing.T) {
genesis, keys := util.DeterministicGenesisState(t, 64)
b, err := util.GenerateFullBlock(genesis, keys, util.DefaultBlockGenConfig(), 1)
assert.NoError(t, err)
@@ -1089,29 +1018,10 @@ func TestRemoveBlockAttestationsInPool_Canonical(t *testing.T) {
require.NoError(t, service.cfg.AttPool.SaveAggregatedAttestations(atts))
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, service.pruneCanonicalAttsFromPool(ctx, r, wsb))
require.NoError(t, service.pruneAttsFromPool(wsb))
require.Equal(t, 0, service.cfg.AttPool.AggregatedAttestationCount())
}
func TestRemoveBlockAttestationsInPool_NonCanonical(t *testing.T) {
genesis, keys := util.DeterministicGenesisState(t, 64)
b, err := util.GenerateFullBlock(genesis, keys, util.DefaultBlockGenConfig(), 1)
assert.NoError(t, err)
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
atts := b.Block.Body.Attestations
require.NoError(t, service.cfg.AttPool.SaveAggregatedAttestations(atts))
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, service.pruneCanonicalAttsFromPool(ctx, r, wsb))
require.Equal(t, 1, service.cfg.AttPool.AggregatedAttestationCount())
}
func Test_getStateVersionAndPayload(t *testing.T) {
tests := []struct {
name string
@@ -2011,10 +1921,12 @@ func TestNoViableHead_Reboot(t *testing.T) {
attSrv, err := attestations.NewService(ctx, &attestations.Config{})
require.NoError(t, err)
newfc := doublylinkedtree.New()
newStateGen := stategen.New(beaconDB, newfc)
newfc.SetBalancesByRooter(newStateGen.ActiveNonSlashedBalancesByRoot)
opts := []Option{
WithDatabase(beaconDB),
WithAttestationPool(attestations.NewPool()),
WithStateGen(stategen.New(beaconDB, newfc)),
WithStateGen(newStateGen),
WithForkChoiceStore(newfc),
WithStateNotifier(&mock.MockStateNotifier{}),
WithExecutionEngineCaller(mockEngine),
@@ -2124,6 +2036,12 @@ func TestNoViableHead_Reboot(t *testing.T) {
require.Equal(t, blk, nil)
service.cfg.ForkChoiceStore = doublylinkedtree.New()
justified, err := service.cfg.BeaconDB.JustifiedCheckpoint(ctx)
require.NoError(t, err)
jroot := bytesutil.ToBytes32(justified.Root)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, genesisState, jroot))
service.cfg.ForkChoiceStore.SetBalancesByRooter(service.cfg.StateGen.ActiveNonSlashedBalancesByRoot)
require.NoError(t, service.StartFromSavedState(genesisState))
// Forkchoice has the genesisRoot loaded at startup
@@ -2135,7 +2053,6 @@ func TestNoViableHead_Reboot(t *testing.T) {
optimistic, err := service.IsOptimistic(ctx)
require.NoError(t, err)
require.Equal(t, false, optimistic)
require.Equal(t, false, service.ForkChoicer().AllTipsAreInvalid())
// Check that the node's justified checkpoint does not agree with the
// last valid state's justified checkpoint
@@ -2152,68 +2069,17 @@ func TestNoViableHead_Reboot(t *testing.T) {
require.NoError(t, err)
root, err = b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.onBlock(ctx, wsb, root))
// Check that the head is still INVALID and the node is optimistic
// We use onBlockBatch here because the valid chain is missing in forkchoice
require.NoError(t, service.onBlockBatch(ctx, []interfaces.ReadOnlySignedBeaconBlock{wsb}, [][32]byte{root}))
// Check that the head is now VALID and the node is not optimistic
require.Equal(t, genesisRoot, service.ensureRootNotZeros(service.ForkChoicer().CachedHeadRoot()))
headRoot, err = service.HeadRoot(ctx)
require.NoError(t, err)
require.Equal(t, genesisRoot, bytesutil.ToBytes32(headRoot))
optimistic, err = service.IsOptimistic(ctx)
require.NoError(t, err)
require.Equal(t, true, optimistic)
require.Equal(t, true, service.ForkChoicer().AllTipsAreInvalid())
st, err = service.cfg.StateGen.StateByRoot(ctx, root)
require.NoError(t, err)
// Import blocks 21--23
for i := 21; i < 24; i++ {
driftGenesisTime(service, int64(i), 0)
require.NoError(t, err)
b, err := util.GenerateFullBlockBellatrix(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
require.NoError(t, err)
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
root, err = b.Block.HashTreeRoot()
require.NoError(t, err)
err = service.onBlock(ctx, wsb, root)
require.NoError(t, err)
st, err = service.cfg.StateGen.StateByRoot(ctx, root)
require.NoError(t, err)
}
// Head should still be INVALID and the node is optimistic
require.Equal(t, genesisRoot, service.ensureRootNotZeros(service.ForkChoicer().CachedHeadRoot()))
headRoot, err = service.HeadRoot(ctx)
require.NoError(t, err)
require.Equal(t, genesisRoot, bytesutil.ToBytes32(headRoot))
optimistic, err = service.IsOptimistic(ctx)
require.NoError(t, err)
require.Equal(t, true, optimistic)
require.Equal(t, true, service.ForkChoicer().AllTipsAreInvalid())
// Import block 24, it should justify Epoch 3 and become HEAD, the node
// recovers
driftGenesisTime(service, 24, 0)
b, err = util.GenerateFullBlockBellatrix(st, keys, util.DefaultBlockGenConfig(), 24)
require.NoError(t, err)
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
root, err = b.Block.HashTreeRoot()
require.NoError(t, err)
err = service.onBlock(ctx, wsb, root)
require.NoError(t, err)
require.Equal(t, root, service.ForkChoicer().CachedHeadRoot())
headRoot, err = service.HeadRoot(ctx)
require.NoError(t, err)
require.Equal(t, root, bytesutil.ToBytes32(headRoot))
sjc = service.CurrentJustifiedCheckpt()
require.Equal(t, primitives.Epoch(3), sjc.Epoch)
optimistic, err = service.IsOptimistic(ctx)
require.NoError(t, err)
require.Equal(t, false, optimistic)
require.Equal(t, false, service.ForkChoicer().AllTipsAreInvalid())
}
func TestOnBlock_HandleBlockAttestations(t *testing.T) {

View File

@@ -29,7 +29,7 @@ type AttestationStateFetcher interface {
type AttestationReceiver interface {
AttestationStateFetcher
VerifyLmdFfgConsistency(ctx context.Context, att *ethpb.Attestation) error
VerifyFinalizedConsistency(ctx context.Context, root []byte) error
InForkchoice([32]byte) bool
}
// AttestationTargetState returns the pre state of attestation.
@@ -60,33 +60,6 @@ func (s *Service) VerifyLmdFfgConsistency(ctx context.Context, a *ethpb.Attestat
return nil
}
// VerifyFinalizedConsistency verifies input root is consistent with finalized store.
// When the input root is not be consistent with finalized store then we know it is not
// on the finalized check point that leads to current canonical chain and should be rejected accordingly.
func (s *Service) VerifyFinalizedConsistency(ctx context.Context, root []byte) error {
// A canonical root implies the root to has an ancestor that aligns with finalized check point.
// In this case, we could exit early to save on additional computation.
blockRoot := bytesutil.ToBytes32(root)
if s.cfg.ForkChoiceStore.HasNode(blockRoot) && s.cfg.ForkChoiceStore.IsCanonical(blockRoot) {
return nil
}
f := s.FinalizedCheckpt()
ss, err := slots.EpochStart(f.Epoch)
if err != nil {
return err
}
r, err := s.ancestor(ctx, root, ss)
if err != nil {
return err
}
if !bytes.Equal(f.Root, r) {
return errors.New("Root and finalized store are not consistent")
}
return nil
}
// This routine processes fork choice attestations from the pool to account for validator votes and fork choice.
func (s *Service) spawnProcessAttestationsRoutine(stateFeed *event.Feed) {
// Wait for state to be initialized.
@@ -135,21 +108,14 @@ func (s *Service) spawnProcessAttestationsRoutine(stateFeed *event.Feed) {
// UpdateHead updates the canonical head of the chain based on information from fork-choice attestations and votes.
// It requires no external inputs.
func (s *Service) UpdateHead(ctx context.Context) error {
// Only one process can process attestations and update head at a time.
s.processAttestationsLock.Lock()
defer s.processAttestationsLock.Unlock()
start := time.Now()
s.ForkChoicer().Lock()
defer s.ForkChoicer().Unlock()
s.processAttestations(ctx)
processAttsElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
justified := s.ForkChoicer().JustifiedCheckpoint()
balances, err := s.justifiedBalances.get(ctx, justified.Root)
if err != nil {
return err
}
start = time.Now()
newHeadRoot, err := s.cfg.ForkChoiceStore.Head(ctx, balances)
newHeadRoot, err := s.cfg.ForkChoiceStore.Head(ctx)
if err != nil {
log.WithError(err).Error("Could not compute head from new attestations")
}

View File

@@ -9,7 +9,9 @@ import (
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/transition"
testDB "github.com/prysmaticlabs/prysm/v3/beacon-chain/db/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/doubly-linked-tree"
forkchoicetypes "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/v3/config/params"
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
@@ -125,19 +127,25 @@ func TestProcessAttestations_Ok(t *testing.T) {
func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
ctx := context.Background()
opts := testServiceOptsWithDB(t)
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
opts = append(opts,
newStateGen := stategen.New(beaconDB, fcs)
fcs.SetBalancesByRooter(newStateGen.ActiveNonSlashedBalancesByRoot)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(newStateGen),
WithAttestationPool(attestations.NewPool()),
WithStateNotifier(&mockBeaconNode{}),
WithForkChoiceStore(fcs),
)
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.genesisTime = prysmTime.Now().Add(-2 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
genesisState, pks := util.DeterministicGenesisState(t, 64)
require.NoError(t, service.saveGenesisData(ctx, genesisState))
ojc := &ethpb.Checkpoint{Epoch: 0, Root: service.originBlockRoot[:]}
require.NoError(t, fcs.UpdateJustifiedCheckpoint(ctx, &forkchoicetypes.Checkpoint{Epoch: 0, Root: service.originBlockRoot}))
copied := genesisState.Copy()
// Generate a new block for attesters to attest
blk, err := util.GenerateFullBlock(copied, pks, util.DefaultBlockGenConfig(), 1)
@@ -158,9 +166,10 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
require.NoError(t, service.cfg.AttPool.SaveForkchoiceAttestations(atts))
// Verify the target is in forkchoice
require.Equal(t, true, fcs.HasNode(bytesutil.ToBytes32(atts[0].Data.BeaconBlockRoot)))
require.Equal(t, tRoot, bytesutil.ToBytes32(atts[0].Data.BeaconBlockRoot))
require.Equal(t, true, fcs.HasNode(service.originBlockRoot))
// Insert a new block to forkchoice
ojc := &ethpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]}
b, err := util.GenerateFullBlock(genesisState, pks, util.DefaultBlockGenConfig(), 2)
require.NoError(t, err)
b.Block.ParentRoot = service.originBlockRoot[:]
@@ -182,19 +191,24 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
func TestService_UpdateHead_NoAtts(t *testing.T) {
ctx := context.Background()
opts := testServiceOptsWithDB(t)
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
opts = append(opts,
newStateGen := stategen.New(beaconDB, fcs)
fcs.SetBalancesByRooter(newStateGen.ActiveNonSlashedBalancesByRoot)
opts := []Option{
WithDatabase(beaconDB),
WithAttestationPool(attestations.NewPool()),
WithStateNotifier(&mockBeaconNode{}),
WithStateGen(newStateGen),
WithForkChoiceStore(fcs),
)
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.genesisTime = prysmTime.Now().Add(-2 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
genesisState, pks := util.DeterministicGenesisState(t, 64)
require.NoError(t, service.saveGenesisData(ctx, genesisState))
require.NoError(t, fcs.UpdateJustifiedCheckpoint(ctx, &forkchoicetypes.Checkpoint{Epoch: 0, Root: service.originBlockRoot}))
copied := genesisState.Copy()
// Generate a new block
blk, err := util.GenerateFullBlock(copied, pks, util.DefaultBlockGenConfig(), 1)

View File

@@ -8,6 +8,7 @@ import (
statefeed "github.com/prysmaticlabs/prysm/v3/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v3/monitoring/tracing"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/runtime/version"
@@ -21,8 +22,8 @@ var epochsSinceFinalitySaveHotStateDB = primitives.Epoch(100)
// BlockReceiver interface defines the methods of chain service for receiving and processing new blocks.
type BlockReceiver interface {
ReceiveBlock(ctx context.Context, block interfaces.SignedBeaconBlock, blockRoot [32]byte) error
ReceiveBlockBatch(ctx context.Context, blocks []interfaces.SignedBeaconBlock, blkRoots [][32]byte) error
ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) error
ReceiveBlockBatch(ctx context.Context, blocks []interfaces.ReadOnlySignedBeaconBlock, blkRoots [][32]byte) error
HasBlock(ctx context.Context, root [32]byte) bool
}
@@ -36,7 +37,7 @@ type SlashingReceiver interface {
// 1. Validate block, apply state transition and update checkpoints
// 2. Apply fork choice to the processed block
// 3. Save latest head info
func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.SignedBeaconBlock, blockRoot [32]byte) error {
func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) error {
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveBlock")
defer span.End()
receivedTime := time.Now()
@@ -45,6 +46,9 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.SignedBeaco
return err
}
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
// Apply state transition on the new block.
if err := s.onBlock(ctx, blockCopy, blockRoot); err != nil {
err := errors.Wrap(err, "could not process block")
@@ -63,11 +67,13 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.SignedBeaco
}
// Reports on block and fork choice metrics.
finalized := s.FinalizedCheckpt()
cp := s.ForkChoicer().FinalizedCheckpoint()
finalized := &ethpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
reportSlotMetrics(blockCopy.Block().Slot(), s.HeadSlot(), s.CurrentSlot(), finalized)
// Log block sync status.
justified := s.CurrentJustifiedCheckpt()
cp = s.ForkChoicer().JustifiedCheckpoint()
justified := &ethpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
if err := logBlockSyncStatus(blockCopy.Block(), blockRoot, justified, finalized, receivedTime, uint64(s.genesisTime.Unix())); err != nil {
log.WithError(err).Error("Unable to log block sync status")
}
@@ -86,10 +92,13 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.SignedBeaco
// ReceiveBlockBatch processes the whole block batch at once, assuming the block batch is linear ,transitioning
// the state, performing batch verification of all collected signatures and then performing the appropriate
// actions for a block post-transition.
func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []interfaces.SignedBeaconBlock, blkRoots [][32]byte) error {
func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []interfaces.ReadOnlySignedBeaconBlock, blkRoots [][32]byte) error {
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveBlockBatch")
defer span.End()
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
// Apply state transition on the incoming newly received block batches, one by one.
if err := s.onBlockBatch(ctx, blocks, blkRoots); err != nil {
err := errors.Wrap(err, "could not process block in batch")
@@ -114,14 +123,15 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []interfaces.Sig
})
// Reports on blockCopy and fork choice metrics.
finalized := s.FinalizedCheckpt()
cp := s.ForkChoicer().FinalizedCheckpoint()
finalized := &ethpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
reportSlotMetrics(blockCopy.Block().Slot(), s.HeadSlot(), s.CurrentSlot(), finalized)
}
if err := s.cfg.BeaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
return err
}
finalized := s.FinalizedCheckpt()
finalized := s.ForkChoicer().FinalizedCheckpoint()
if finalized == nil {
return errNilFinalizedInStore
}
@@ -142,10 +152,12 @@ func (s *Service) HasBlock(ctx context.Context, root [32]byte) bool {
// ReceiveAttesterSlashing receives an attester slashing and inserts it to forkchoice
func (s *Service) ReceiveAttesterSlashing(ctx context.Context, slashing *ethpb.AttesterSlashing) {
s.ForkChoicer().Lock()
defer s.ForkChoicer().Unlock()
s.InsertSlashingsToForkChoiceStore(ctx, []*ethpb.AttesterSlashing{slashing})
}
func (s *Service) handlePostBlockOperations(b interfaces.BeaconBlock) error {
func (s *Service) handlePostBlockOperations(b interfaces.ReadOnlyBeaconBlock) error {
// Mark block exits as seen so we don't include same ones in future blocks.
for _, e := range b.Body().VoluntaryExits() {
s.cfg.ExitPool.MarkIncluded(e)
@@ -163,7 +175,7 @@ func (s *Service) handlePostBlockOperations(b interfaces.BeaconBlock) error {
return nil
}
func (s *Service) handleBlockBLSToExecChanges(blk interfaces.BeaconBlock) error {
func (s *Service) handleBlockBLSToExecChanges(blk interfaces.ReadOnlyBeaconBlock) error {
if blk.Version() < version.Capella {
return nil
}
@@ -179,11 +191,12 @@ func (s *Service) handleBlockBLSToExecChanges(blk interfaces.BeaconBlock) error
// This checks whether it's time to start saving hot state to DB.
// It's time when there's `epochsSinceFinalitySaveHotStateDB` epochs of non-finality.
// Requires a read lock on forkchoice
func (s *Service) checkSaveHotStateDB(ctx context.Context) error {
currentEpoch := slots.ToEpoch(s.CurrentSlot())
// Prevent `sinceFinality` going underflow.
var sinceFinality primitives.Epoch
finalized := s.FinalizedCheckpt()
finalized := s.ForkChoicer().FinalizedCheckpoint()
if finalized == nil {
return errNilFinalizedInStore
}

View File

@@ -97,7 +97,8 @@ func TestService_ReceiveBlock(t *testing.T) {
),
},
check: func(t *testing.T, s *Service) {
pending := s.cfg.ExitPool.PendingExits(genesis, 1, true /* no limit */)
pending, err := s.cfg.ExitPool.PendingExits()
require.NoError(t, err)
if len(pending) != 0 {
t.Errorf(
"Did not mark the correct number of exits. Got %d pending but wanted %d",
@@ -261,7 +262,7 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
require.NoError(t, err)
wsb, err := blocks.NewSignedBeaconBlock(tt.args.block)
require.NoError(t, err)
blks := []interfaces.SignedBeaconBlock{wsb}
blks := []interfaces.ReadOnlySignedBeaconBlock{wsb}
roots := [][32]byte{root}
err = s.ReceiveBlockBatch(ctx, blks, roots)
if tt.wantedErr != "" {

View File

@@ -44,21 +44,19 @@ import (
// Service represents a service that handles the internal
// logic of managing the full PoS beacon chain.
type Service struct {
cfg *config
ctx context.Context
cancel context.CancelFunc
genesisTime time.Time
head *head
headLock sync.RWMutex
originBlockRoot [32]byte // genesis root, or weak subjectivity checkpoint root, depending on how the node is initialized
nextEpochBoundarySlot primitives.Slot
boundaryRoots [][32]byte
checkpointStateCache *cache.CheckpointStateCache
initSyncBlocks map[[32]byte]interfaces.SignedBeaconBlock
initSyncBlocksLock sync.RWMutex
justifiedBalances *stateBalanceCache
wsVerifier *WeakSubjectivityVerifier
processAttestationsLock sync.Mutex
cfg *config
ctx context.Context
cancel context.CancelFunc
genesisTime time.Time
head *head
headLock sync.RWMutex
originBlockRoot [32]byte // genesis root, or weak subjectivity checkpoint root, depending on how the node is initialized
nextEpochBoundarySlot primitives.Slot
boundaryRoots [][32]byte
checkpointStateCache *cache.CheckpointStateCache
initSyncBlocks map[[32]byte]interfaces.ReadOnlySignedBeaconBlock
initSyncBlocksLock sync.RWMutex
wsVerifier *WeakSubjectivityVerifier
}
// config options for the service.
@@ -94,7 +92,7 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
cancel: cancel,
boundaryRoots: [][32]byte{},
checkpointStateCache: cache.NewCheckpointStateCache(),
initSyncBlocks: make(map[[32]byte]interfaces.SignedBeaconBlock),
initSyncBlocks: make(map[[32]byte]interfaces.ReadOnlySignedBeaconBlock),
cfg: &config{ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache()},
}
for _, opt := range opts {
@@ -103,12 +101,6 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
}
}
var err error
if srv.justifiedBalances == nil {
srv.justifiedBalances, err = newStateBalanceCache(srv.cfg.StateGen)
if err != nil {
return nil, err
}
}
srv.wsVerifier, err = NewWeakSubjectivityVerifier(srv.cfg.WeakSubjectivityCheckpt, srv.cfg.BeaconDB)
if err != nil {
return nil, err
@@ -207,7 +199,9 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
}
fRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(finalized.Root))
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: justified.Epoch,
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(s.ctx, &forkchoicetypes.Checkpoint{Epoch: justified.Epoch,
Root: bytesutil.ToBytes32(justified.Root)}); err != nil {
return errors.Wrap(err, "could not update forkchoice's justified checkpoint")
}
@@ -433,6 +427,8 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon
s.originBlockRoot = genesisBlkRoot
s.cfg.StateGen.SaveFinalizedState(0 /*slot*/, genesisBlkRoot, genesisState)
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, genesisState, genesisBlkRoot); err != nil {
log.WithError(err).Fatal("Could not process genesis block for fork choice")
}
@@ -453,6 +449,7 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon
// 1.) Check fork choice store.
// 2.) Check DB.
// Checking 1.) is ten times faster than checking 2.)
// this function requires a lock in forkchoice
func (s *Service) hasBlock(ctx context.Context, root [32]byte) bool {
if s.cfg.ForkChoiceStore.HasNode(root) {
return true

View File

@@ -443,7 +443,7 @@ func TestServiceStop_SaveCachedBlocks(t *testing.T) {
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB, doublylinkedtree.New())},
ctx: ctx,
cancel: cancel,
initSyncBlocks: make(map[[32]byte]interfaces.SignedBeaconBlock),
initSyncBlocks: make(map[[32]byte]interfaces.ReadOnlySignedBeaconBlock),
}
bb := util.NewBeaconBlock()
r, err := bb.Block.HashTreeRoot()

View File

@@ -1,81 +0,0 @@
package blockchain
import (
"context"
"errors"
"sync"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state/stategen"
)
type stateBalanceCache struct {
sync.Mutex
balances []uint64
root [32]byte
stateGen stateByRooter
}
type stateByRooter interface {
StateByRoot(context.Context, [32]byte) (state.BeaconState, error)
}
// newStateBalanceCache exists to remind us that stateBalanceCache needs a state gen
// to avoid nil pointer bugs when updating the cache in the read path (get())
func newStateBalanceCache(sg *stategen.State) (*stateBalanceCache, error) {
if sg == nil {
return nil, errors.New("can't initialize state balance cache without stategen")
}
return &stateBalanceCache{stateGen: sg}, nil
}
// update is called by get() when the requested root doesn't match
// the previously read value. This cache assumes we only want to cache one
// set of balances for a single root (the current justified root).
//
// WARNING: this is not thread-safe on its own, relies on get() for locking
func (c *stateBalanceCache) update(ctx context.Context, justifiedRoot [32]byte) ([]uint64, error) {
stateBalanceCacheMiss.Inc()
justifiedState, err := c.stateGen.StateByRoot(ctx, justifiedRoot)
if err != nil {
return nil, err
}
if justifiedState == nil || justifiedState.IsNil() {
return nil, errNilStateFromStategen
}
epoch := time.CurrentEpoch(justifiedState)
justifiedBalances := make([]uint64, justifiedState.NumValidators())
var balanceAccumulator = func(idx int, val state.ReadOnlyValidator) error {
if helpers.IsActiveValidatorUsingTrie(val, epoch) {
justifiedBalances[idx] = val.EffectiveBalance()
} else {
justifiedBalances[idx] = 0
}
return nil
}
if err := justifiedState.ReadFromEveryValidator(balanceAccumulator); err != nil {
return nil, err
}
c.balances = justifiedBalances
c.root = justifiedRoot
return c.balances, nil
}
// getBalances takes an explicit justifiedRoot so it can invalidate the singleton cache key
// when the justified root changes, and takes a context so that the long-running stategen
// read path can connect to the upstream cancellation/timeout chain.
func (c *stateBalanceCache) get(ctx context.Context, justifiedRoot [32]byte) ([]uint64, error) {
c.Lock()
defer c.Unlock()
if justifiedRoot != [32]byte{} && justifiedRoot == c.root {
stateBalanceCacheHit.Inc()
return c.balances, nil
}
return c.update(ctx, justifiedRoot)
}

View File

@@ -1,227 +0,0 @@
package blockchain
import (
"context"
"encoding/binary"
"errors"
"testing"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
state_native "github.com/prysmaticlabs/prysm/v3/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/testing/require"
"github.com/prysmaticlabs/prysm/v3/time/slots"
)
type testStateOpt func(*ethpb.BeaconStateAltair)
func testStateWithValidators(v []*ethpb.Validator) testStateOpt {
return func(a *ethpb.BeaconStateAltair) {
a.Validators = v
}
}
func testStateWithSlot(slot primitives.Slot) testStateOpt {
return func(a *ethpb.BeaconStateAltair) {
a.Slot = slot
}
}
func testStateFixture(opts ...testStateOpt) state.BeaconState {
a := &ethpb.BeaconStateAltair{}
for _, o := range opts {
o(a)
}
s, _ := state_native.InitializeFromProtoUnsafeAltair(a)
return s
}
func generateTestValidators(count int, opts ...func(*ethpb.Validator)) []*ethpb.Validator {
vs := make([]*ethpb.Validator, count)
var i uint32 = 0
for ; i < uint32(count); i++ {
pk := make([]byte, 48)
binary.LittleEndian.PutUint32(pk, i)
v := &ethpb.Validator{PublicKey: pk}
for _, o := range opts {
o(v)
}
vs[i] = v
}
return vs
}
func oddValidatorsExpired(currentSlot primitives.Slot) func(*ethpb.Validator) {
return func(v *ethpb.Validator) {
pki := binary.LittleEndian.Uint64(v.PublicKey)
if pki%2 == 0 {
v.ExitEpoch = primitives.Epoch(int(slots.ToEpoch(currentSlot)) + 1)
} else {
v.ExitEpoch = primitives.Epoch(int(slots.ToEpoch(currentSlot)) - 1)
}
}
}
func oddValidatorsQueued(currentSlot primitives.Slot) func(*ethpb.Validator) {
return func(v *ethpb.Validator) {
v.ExitEpoch = primitives.Epoch(int(slots.ToEpoch(currentSlot)) + 1)
pki := binary.LittleEndian.Uint64(v.PublicKey)
if pki%2 == 0 {
v.ActivationEpoch = primitives.Epoch(int(slots.ToEpoch(currentSlot)) - 1)
} else {
v.ActivationEpoch = primitives.Epoch(int(slots.ToEpoch(currentSlot)) + 1)
}
}
}
func allValidatorsValid(currentSlot primitives.Slot) func(*ethpb.Validator) {
return func(v *ethpb.Validator) {
v.ActivationEpoch = primitives.Epoch(int(slots.ToEpoch(currentSlot)) - 1)
v.ExitEpoch = primitives.Epoch(int(slots.ToEpoch(currentSlot)) + 1)
}
}
func balanceIsKeyTimes2(v *ethpb.Validator) {
pki := binary.LittleEndian.Uint64(v.PublicKey)
v.EffectiveBalance = uint64(pki) * 2
}
func testHalfExpiredValidators() ([]*ethpb.Validator, []uint64) {
balances := []uint64{0, 0, 4, 0, 8, 0, 12, 0, 16, 0}
return generateTestValidators(10,
oddValidatorsExpired(primitives.Slot(99)),
balanceIsKeyTimes2), balances
}
func testHalfQueuedValidators() ([]*ethpb.Validator, []uint64) {
balances := []uint64{0, 0, 4, 0, 8, 0, 12, 0, 16, 0}
return generateTestValidators(10,
oddValidatorsQueued(primitives.Slot(99)),
balanceIsKeyTimes2), balances
}
func testAllValidValidators() ([]*ethpb.Validator, []uint64) {
balances := []uint64{0, 2, 4, 6, 8, 10, 12, 14, 16, 18}
return generateTestValidators(10,
allValidatorsValid(primitives.Slot(99)),
balanceIsKeyTimes2), balances
}
func TestStateBalanceCache(t *testing.T) {
type sbcTestCase struct {
err error
root [32]byte
sbc *stateBalanceCache
balances []uint64
name string
}
sentinelCacheMiss := errors.New("cache missed, as expected")
sentinelBalances := []uint64{1, 2, 3, 4, 5}
halfExpiredValidators, halfExpiredBalances := testHalfExpiredValidators()
halfQueuedValidators, halfQueuedBalances := testHalfQueuedValidators()
allValidValidators, allValidBalances := testAllValidValidators()
cases := []sbcTestCase{
{
root: bytesutil.ToBytes32([]byte{'A'}),
balances: sentinelBalances,
sbc: &stateBalanceCache{
stateGen: &mockStateByRooter{
err: sentinelCacheMiss,
},
root: bytesutil.ToBytes32([]byte{'A'}),
balances: sentinelBalances,
},
name: "cache hit",
},
// this works by using a staterooter that returns a known error
// so really we're testing the miss by making sure stategen got called
// this also tells us stategen errors are propagated
{
sbc: &stateBalanceCache{
stateGen: &mockStateByRooter{
err: sentinelCacheMiss,
},
root: bytesutil.ToBytes32([]byte{'B'}),
},
err: sentinelCacheMiss,
root: bytesutil.ToBytes32([]byte{'A'}),
name: "cache miss",
},
{
sbc: &stateBalanceCache{
stateGen: &mockStateByRooter{},
root: bytesutil.ToBytes32([]byte{'B'}),
},
err: errNilStateFromStategen,
root: bytesutil.ToBytes32([]byte{'A'}),
name: "error for nil state upon cache miss",
},
{
sbc: &stateBalanceCache{
stateGen: &mockStateByRooter{
state: testStateFixture(
testStateWithSlot(99),
testStateWithValidators(halfExpiredValidators)),
},
},
balances: halfExpiredBalances,
root: bytesutil.ToBytes32([]byte{'A'}),
name: "test filtering by exit epoch",
},
{
sbc: &stateBalanceCache{
stateGen: &mockStateByRooter{
state: testStateFixture(
testStateWithSlot(99),
testStateWithValidators(halfQueuedValidators)),
},
},
balances: halfQueuedBalances,
root: bytesutil.ToBytes32([]byte{'A'}),
name: "test filtering by activation epoch",
},
{
sbc: &stateBalanceCache{
stateGen: &mockStateByRooter{
state: testStateFixture(
testStateWithSlot(99),
testStateWithValidators(allValidValidators)),
},
},
balances: allValidBalances,
root: bytesutil.ToBytes32([]byte{'A'}),
name: "happy path",
},
{
sbc: &stateBalanceCache{
stateGen: &mockStateByRooter{
state: testStateFixture(
testStateWithSlot(99),
testStateWithValidators(allValidValidators)),
},
},
balances: allValidBalances,
root: [32]byte{},
name: "zero root",
},
}
ctx := context.Background()
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
cache := c.sbc
cacheRootStart := cache.root
b, err := cache.get(ctx, c.root)
require.ErrorIs(t, err, c.err)
require.DeepEqual(t, c.balances, b)
if c.err != nil {
// if there was an error somewhere, the root should not have changed (unless it already matched)
require.Equal(t, cacheRootStart, cache.root)
} else {
// when successful, the cache should always end with a root matching the request
require.Equal(t, c.root, cache.root)
}
})
}
}

View File

@@ -32,6 +32,7 @@ var ErrNilState = errors.New("nil state")
// ChainService defines the mock interface for testing
type ChainService struct {
NotFinalized bool
Optimistic bool
ValidAttestation bool
ValidatorsRoot [32]byte
@@ -47,10 +48,10 @@ type ChainService struct {
InitSyncBlockRoots map[[32]byte]bool
DB db.Database
State state.BeaconState
Block interfaces.SignedBeaconBlock
Block interfaces.ReadOnlySignedBeaconBlock
VerifyBlkDescendantErr error
stateNotifier statefeed.Notifier
BlocksReceived []interfaces.SignedBeaconBlock
BlocksReceived []interfaces.ReadOnlySignedBeaconBlock
SyncCommitteeIndices []primitives.CommitteeIndex
blockNotifier blockfeed.Notifier
opNotifier opfeed.Notifier
@@ -170,7 +171,7 @@ func (mon *MockOperationNotifier) OperationFeed() *event.Feed {
}
// ReceiveBlockInitialSync mocks ReceiveBlockInitialSync method in chain service.
func (s *ChainService) ReceiveBlockInitialSync(ctx context.Context, block interfaces.SignedBeaconBlock, _ [32]byte) error {
func (s *ChainService) ReceiveBlockInitialSync(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, _ [32]byte) error {
if s.State == nil {
return ErrNilState
}
@@ -198,7 +199,7 @@ func (s *ChainService) ReceiveBlockInitialSync(ctx context.Context, block interf
}
// ReceiveBlockBatch processes blocks in batches from initial-sync.
func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []interfaces.SignedBeaconBlock, _ [][32]byte) error {
func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []interfaces.ReadOnlySignedBeaconBlock, _ [][32]byte) error {
if s.State == nil {
return ErrNilState
}
@@ -228,7 +229,7 @@ func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []interfaces.
}
// ReceiveBlock mocks ReceiveBlock method in chain service.
func (s *ChainService) ReceiveBlock(ctx context.Context, block interfaces.SignedBeaconBlock, _ [32]byte) error {
func (s *ChainService) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, _ [32]byte) error {
if s.ReceiveBlockMockErr != nil {
return s.ReceiveBlockMockErr
}
@@ -275,7 +276,7 @@ func (s *ChainService) HeadRoot(_ context.Context) ([]byte, error) {
}
// HeadBlock mocks HeadBlock method in chain service.
func (s *ChainService) HeadBlock(context.Context) (interfaces.SignedBeaconBlock, error) {
func (s *ChainService) HeadBlock(context.Context) (interfaces.ReadOnlySignedBeaconBlock, error) {
return s.Block, nil
}
@@ -389,11 +390,6 @@ func (_ *ChainService) HeadGenesisValidatorsRoot() [32]byte {
return [32]byte{}
}
// VerifyFinalizedBlkDescendant mocks VerifyBlkDescendant and always returns nil.
func (s *ChainService) VerifyFinalizedBlkDescendant(_ context.Context, _ [32]byte) error {
return s.VerifyBlkDescendantErr
}
// VerifyLmdFfgConsistency mocks VerifyLmdFfgConsistency and always returns nil.
func (_ *ChainService) VerifyLmdFfgConsistency(_ context.Context, a *ethpb.Attestation) error {
if !bytes.Equal(a.Data.BeaconBlockRoot, a.Data.Target.Root) {
@@ -402,14 +398,6 @@ func (_ *ChainService) VerifyLmdFfgConsistency(_ context.Context, a *ethpb.Attes
return nil
}
// VerifyFinalizedConsistency mocks VerifyFinalizedConsistency and always returns nil.
func (s *ChainService) VerifyFinalizedConsistency(_ context.Context, r []byte) error {
if !bytes.Equal(r, s.FinalizedCheckPoint.Root) {
return errors.New("Root and finalized store are not consistent")
}
return nil
}
// ChainHeads mocks ChainHeads and always return nil.
func (_ *ChainService) ChainHeads() ([][32]byte, []primitives.Slot) {
return [][32]byte{
@@ -459,6 +447,11 @@ func (s *ChainService) IsOptimistic(_ context.Context) (bool, error) {
return s.Optimistic, nil
}
// InForkchoice mocks the same method in the chain service
func (s *ChainService) InForkchoice(_ [32]byte) bool {
return !s.NotFinalized
}
// IsOptimisticForRoot mocks the same method in the chain service.
func (s *ChainService) IsOptimisticForRoot(_ context.Context, root [32]byte) (bool, error) {
s.OptimisticCheckRootReceived = root

View File

@@ -22,7 +22,7 @@ var ErrNoBuilder = errors.New("builder endpoint not configured")
// BlockBuilder defines the interface for interacting with the block builder
type BlockBuilder interface {
SubmitBlindedBlock(ctx context.Context, block interfaces.SignedBeaconBlock) (interfaces.ExecutionData, error)
SubmitBlindedBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, error)
GetHeader(ctx context.Context, slot primitives.Slot, parentHash [32]byte, pubKey [48]byte) (builder.SignedBid, error)
RegisterValidator(ctx context.Context, reg []*ethpb.SignedValidatorRegistrationV1) error
Configured() bool
@@ -82,7 +82,7 @@ func (*Service) Stop() error {
}
// SubmitBlindedBlock submits a blinded block to the builder relay network.
func (s *Service) SubmitBlindedBlock(ctx context.Context, b interfaces.SignedBeaconBlock) (interfaces.ExecutionData, error) {
func (s *Service) SubmitBlindedBlock(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, error) {
ctx, span := trace.StartSpan(ctx, "builder.SubmitBlindedBlock")
defer span.End()
start := time.Now()

View File

@@ -8,11 +8,13 @@ go_library(
visibility = ["//visibility:public"],
deps = [
"//api/client/builder:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//time/slots:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)

View File

@@ -2,14 +2,17 @@ package testing
import (
"context"
"math/big"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v3/api/client/builder"
"github.com/prysmaticlabs/prysm/v3/config/params"
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
v1 "github.com/prysmaticlabs/prysm/v3/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/time/slots"
)
// MockBuilderService to mock builder.
@@ -19,6 +22,7 @@ type MockBuilderService struct {
PayloadCapella *v1.ExecutionPayloadCapella
ErrSubmitBlindedBlock error
Bid *ethpb.SignedBuilderBid
BidCapella *ethpb.SignedBuilderBidCapella
ErrGetHeader error
ErrRegisterValidator error
}
@@ -29,7 +33,7 @@ func (s *MockBuilderService) Configured() bool {
}
// SubmitBlindedBlock for mocking.
func (s *MockBuilderService) SubmitBlindedBlock(_ context.Context, _ interfaces.SignedBeaconBlock) (interfaces.ExecutionData, error) {
func (s *MockBuilderService) SubmitBlindedBlock(_ context.Context, _ interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, error) {
if s.Payload != nil {
w, err := blocks.WrappedExecutionPayload(s.Payload)
if err != nil {
@@ -37,7 +41,7 @@ func (s *MockBuilderService) SubmitBlindedBlock(_ context.Context, _ interfaces.
}
return w, s.ErrSubmitBlindedBlock
}
w, err := blocks.WrappedExecutionPayloadCapella(s.PayloadCapella)
w, err := blocks.WrappedExecutionPayloadCapella(s.PayloadCapella, big.NewInt(0))
if err != nil {
return nil, errors.Wrap(err, "could not wrap capella payload")
}
@@ -45,10 +49,13 @@ func (s *MockBuilderService) SubmitBlindedBlock(_ context.Context, _ interfaces.
}
// GetHeader for mocking.
func (s *MockBuilderService) GetHeader(context.Context, primitives.Slot, [32]byte, [48]byte) (builder.SignedBid, error) {
func (s *MockBuilderService) GetHeader(ctx context.Context, slot primitives.Slot, hr [32]byte, pb [48]byte) (builder.SignedBid, error) {
if slots.ToEpoch(slot) >= params.BeaconConfig().CapellaForkEpoch {
return builder.WrappedSignedBuilderBidCapella(s.BidCapella)
}
w, err := builder.WrappedSignedBuilderBid(s.Bid)
if err != nil {
return nil, errors.Wrap(err, "could not wrap bid")
return nil, errors.Wrap(err, "could not wrap capella bid")
}
return w, s.ErrGetHeader
}

View File

@@ -24,7 +24,7 @@ import (
func ProcessAttestationsNoVerifySignature(
ctx context.Context,
beaconState state.BeaconState,
b interfaces.SignedBeaconBlock,
b interfaces.ReadOnlySignedBeaconBlock,
) (state.BeaconState, error) {
if err := consensusblocks.BeaconBlockIsNil(b); err != nil {
return nil, err

View File

@@ -44,49 +44,51 @@ import (
// increase_balance(state, get_beacon_proposer_index(state), proposer_reward)
// else:
// decrease_balance(state, participant_index, participant_reward)
func ProcessSyncAggregate(ctx context.Context, s state.BeaconState, sync *ethpb.SyncAggregate) (state.BeaconState, error) {
s, votedKeys, err := processSyncAggregate(ctx, s, sync)
func ProcessSyncAggregate(ctx context.Context, s state.BeaconState, sync *ethpb.SyncAggregate) (state.BeaconState, uint64, error) {
s, votedKeys, reward, err := processSyncAggregate(ctx, s, sync)
if err != nil {
return nil, errors.Wrap(err, "could not filter sync committee votes")
return nil, 0, errors.Wrap(err, "could not filter sync committee votes")
}
if err := VerifySyncCommitteeSig(s, votedKeys, sync.SyncCommitteeSignature); err != nil {
return nil, errors.Wrap(err, "could not verify sync committee signature")
return nil, 0, errors.Wrap(err, "could not verify sync committee signature")
}
return s, nil
return s, reward, nil
}
// processSyncAggregate applies all the logic in the spec function `process_sync_aggregate` except
// verifying the BLS signatures. It returns the modified beacons state and the list of validators'
// public keys that voted, for future signature verification.
// verifying the BLS signatures. It returns the modified beacons state, the list of validators'
// public keys that voted (for future signature verification) and the proposer reward for including
// sync aggregate messages.
func processSyncAggregate(ctx context.Context, s state.BeaconState, sync *ethpb.SyncAggregate) (
state.BeaconState,
[]bls.PublicKey,
uint64,
error) {
currentSyncCommittee, err := s.CurrentSyncCommittee()
if err != nil {
return nil, nil, err
return nil, nil, 0, err
}
if currentSyncCommittee == nil {
return nil, nil, errors.New("nil current sync committee in state")
return nil, nil, 0, errors.New("nil current sync committee in state")
}
committeeKeys := currentSyncCommittee.Pubkeys
if sync.SyncCommitteeBits.Len() > uint64(len(committeeKeys)) {
return nil, nil, errors.New("bits length exceeds committee length")
return nil, nil, 0, errors.New("bits length exceeds committee length")
}
votedKeys := make([]bls.PublicKey, 0, len(committeeKeys))
activeBalance, err := helpers.TotalActiveBalance(s)
if err != nil {
return nil, nil, err
return nil, nil, 0, err
}
proposerReward, participantReward, err := SyncRewards(activeBalance)
if err != nil {
return nil, nil, err
return nil, nil, 0, err
}
proposerIndex, err := helpers.BeaconProposerIndex(ctx, s)
if err != nil {
return nil, nil, err
return nil, nil, 0, err
}
earnedProposerReward := uint64(0)
@@ -94,29 +96,29 @@ func processSyncAggregate(ctx context.Context, s state.BeaconState, sync *ethpb.
vIdx, exists := s.ValidatorIndexByPubkey(bytesutil.ToBytes48(committeeKeys[i]))
// Impossible scenario.
if !exists {
return nil, nil, errors.New("validator public key does not exist in state")
return nil, nil, 0, errors.New("validator public key does not exist in state")
}
if sync.SyncCommitteeBits.BitAt(i) {
pubKey, err := bls.PublicKeyFromBytes(committeeKeys[i])
if err != nil {
return nil, nil, err
return nil, nil, 0, err
}
votedKeys = append(votedKeys, pubKey)
if err := helpers.IncreaseBalance(s, vIdx, participantReward); err != nil {
return nil, nil, err
return nil, nil, 0, err
}
earnedProposerReward += proposerReward
} else {
if err := helpers.DecreaseBalance(s, vIdx, participantReward); err != nil {
return nil, nil, err
return nil, nil, 0, err
}
}
}
if err := helpers.IncreaseBalance(s, proposerIndex, earnedProposerReward); err != nil {
return nil, nil, err
return nil, nil, 0, err
}
return s, votedKeys, err
return s, votedKeys, earnedProposerReward, err
}
// VerifySyncCommitteeSig verifies sync committee signature `syncSig` is valid with respect to public keys `syncKeys`.

View File

@@ -17,6 +17,7 @@ import (
"github.com/prysmaticlabs/prysm/v3/crypto/bls"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/testing/assert"
"github.com/prysmaticlabs/prysm/v3/testing/require"
"github.com/prysmaticlabs/prysm/v3/testing/util"
"github.com/prysmaticlabs/prysm/v3/time/slots"
@@ -53,8 +54,10 @@ func TestProcessSyncCommittee_PerfectParticipation(t *testing.T) {
SyncCommitteeSignature: aggregatedSig,
}
beaconState, err = altair.ProcessSyncAggregate(context.Background(), beaconState, syncAggregate)
var reward uint64
beaconState, reward, err = altair.ProcessSyncAggregate(context.Background(), beaconState, syncAggregate)
require.NoError(t, err)
assert.Equal(t, uint64(72192), reward)
// Use a non-sync committee index to compare profitability.
syncCommittee := make(map[primitives.ValidatorIndex]bool)
@@ -127,7 +130,7 @@ func TestProcessSyncCommittee_MixParticipation_BadSignature(t *testing.T) {
SyncCommitteeSignature: aggregatedSig,
}
_, err = altair.ProcessSyncAggregate(context.Background(), beaconState, syncAggregate)
_, _, err = altair.ProcessSyncAggregate(context.Background(), beaconState, syncAggregate)
require.ErrorContains(t, "invalid sync committee signature", err)
}
@@ -164,7 +167,7 @@ func TestProcessSyncCommittee_MixParticipation_GoodSignature(t *testing.T) {
SyncCommitteeSignature: aggregatedSig,
}
_, err = altair.ProcessSyncAggregate(context.Background(), beaconState, syncAggregate)
_, _, err = altair.ProcessSyncAggregate(context.Background(), beaconState, syncAggregate)
require.NoError(t, err)
}
@@ -189,7 +192,7 @@ func TestProcessSyncCommittee_DontPrecompute(t *testing.T) {
SyncCommitteeBits: syncBits,
}
require.NoError(t, beaconState.UpdateBalancesAtIndex(idx, 0))
st, votedKeys, err := altair.ProcessSyncAggregateEported(context.Background(), beaconState, syncAggregate)
st, votedKeys, _, err := altair.ProcessSyncAggregateEported(context.Background(), beaconState, syncAggregate)
require.NoError(t, err)
require.Equal(t, 511, len(votedKeys))
require.DeepEqual(t, committeeKeys[0], votedKeys[0].Marshal())
@@ -212,7 +215,7 @@ func TestProcessSyncCommittee_processSyncAggregate(t *testing.T) {
SyncCommitteeBits: syncBits,
}
st, votedKeys, err := altair.ProcessSyncAggregateEported(context.Background(), beaconState, syncAggregate)
st, votedKeys, _, err := altair.ProcessSyncAggregateEported(context.Background(), beaconState, syncAggregate)
require.NoError(t, err)
votedMap := make(map[[fieldparams.BLSPubkeyLength]byte]bool)
for _, key := range votedKeys {

View File

@@ -24,7 +24,7 @@ import (
func ProcessAttestationsNoVerifySignature(
ctx context.Context,
beaconState state.BeaconState,
b interfaces.SignedBeaconBlock,
b interfaces.ReadOnlySignedBeaconBlock,
) (state.BeaconState, error) {
if err := blocks.BeaconBlockIsNil(b); err != nil {
return nil, err

View File

@@ -6,8 +6,3 @@ var errNilSignedWithdrawalMessage = errors.New("nil SignedBLSToExecutionChange m
var errNilWithdrawalMessage = errors.New("nil BLSToExecutionChange message")
var errInvalidBLSPrefix = errors.New("withdrawal credential prefix is not a BLS prefix")
var errInvalidWithdrawalCredentials = errors.New("withdrawal credentials do not match")
var errInvalidWithdrawalIndex = errors.New("invalid withdrawal index")
var errInvalidValidatorIndex = errors.New("invalid validator index")
var errInvalidWithdrawalAmount = errors.New("invalid withdrawal amount")
var errInvalidExecutionAddress = errors.New("invalid execution address")
var errInvalidWithdrawalNumber = errors.New("invalid number of withdrawals")

View File

@@ -116,7 +116,7 @@ func VerifyExitAndSignature(
return nil
}
// verifyExitConditions implements the spec defined validation for voluntary exits(excluding signatures).
// verifyExitConditions implements the spec defined validation for voluntary exits (excluding signatures).
//
// Spec pseudocode definition:
//

View File

@@ -39,7 +39,7 @@ func NewGenesisBlock(stateRoot []byte) *ethpb.SignedBeaconBlock {
var ErrUnrecognizedState = errors.New("unknown underlying type for state.BeaconState value")
func NewGenesisBlockForState(ctx context.Context, st state.BeaconState) (interfaces.SignedBeaconBlock, error) {
func NewGenesisBlockForState(ctx context.Context, st state.BeaconState) (interfaces.ReadOnlySignedBeaconBlock, error) {
root, err := st.HashTreeRoot(ctx)
if err != nil {
return nil, err

View File

@@ -18,7 +18,7 @@ import (
//
// Spec pseudocode definition:
//
// def process_block_header(state: BeaconState, block: BeaconBlock) -> None:
// def process_block_header(state: BeaconState, block: ReadOnlyBeaconBlock) -> None:
// # Verify that the slots match
// assert block.slot == state.slot
// # Verify that the block is newer than latest block header
@@ -42,7 +42,7 @@ import (
func ProcessBlockHeader(
ctx context.Context,
beaconState state.BeaconState,
block interfaces.SignedBeaconBlock,
block interfaces.ReadOnlySignedBeaconBlock,
) (state.BeaconState, error) {
if err := blocks.BeaconBlockIsNil(block); err != nil {
return nil, err
@@ -74,7 +74,7 @@ func ProcessBlockHeader(
//
// Spec pseudocode definition:
//
// def process_block_header(state: BeaconState, block: BeaconBlock) -> None:
// def process_block_header(state: BeaconState, block: ReadOnlyBeaconBlock) -> None:
// # Verify that the slots match
// assert block.slot == state.slot
// # Verify that the block is newer than latest block header

View File

@@ -51,10 +51,10 @@ func IsMergeTransitionComplete(st state.BeaconState) (bool, error) {
// IsExecutionBlock returns whether the block has a non-empty ExecutionPayload.
//
// Spec code:
// def is_execution_block(block: BeaconBlock) -> bool:
// def is_execution_block(block: ReadOnlyBeaconBlock) -> bool:
//
// return block.body.execution_payload != ExecutionPayload()
func IsExecutionBlock(body interfaces.BeaconBlockBody) (bool, error) {
func IsExecutionBlock(body interfaces.ReadOnlyBeaconBlockBody) (bool, error) {
if body == nil {
return false, errors.New("nil block body")
}
@@ -77,10 +77,10 @@ func IsExecutionBlock(body interfaces.BeaconBlockBody) (bool, error) {
// Meaning the payload header is beacon state is non-empty or the payload in block body is non-empty.
//
// Spec code:
// def is_execution_enabled(state: BeaconState, body: BeaconBlockBody) -> bool:
// def is_execution_enabled(state: BeaconState, body: ReadOnlyBeaconBlockBody) -> bool:
//
// return is_merge_block(state, body) or is_merge_complete(state)
func IsExecutionEnabled(st state.BeaconState, body interfaces.BeaconBlockBody) (bool, error) {
func IsExecutionEnabled(st state.BeaconState, body interfaces.ReadOnlyBeaconBlockBody) (bool, error) {
if st == nil || body == nil {
return false, errors.New("nil state or block body")
}
@@ -96,7 +96,7 @@ func IsExecutionEnabled(st state.BeaconState, body interfaces.BeaconBlockBody) (
// IsExecutionEnabledUsingHeader returns true if the execution is enabled using post processed payload header and block body.
// This is an optimized version of IsExecutionEnabled where beacon state is not required as an argument.
func IsExecutionEnabledUsingHeader(header interfaces.ExecutionData, body interfaces.BeaconBlockBody) (bool, error) {
func IsExecutionEnabledUsingHeader(header interfaces.ExecutionData, body interfaces.ReadOnlyBeaconBlockBody) (bool, error) {
isEmpty, err := blocks.IsEmptyExecutionData(header)
if err != nil {
return false, err
@@ -200,12 +200,9 @@ func ValidatePayload(st state.BeaconState, payload interfaces.ExecutionData) err
// transactions_root=hash_tree_root(payload.transactions),
// )
func ProcessPayload(st state.BeaconState, payload interfaces.ExecutionData) (state.BeaconState, error) {
var err error
if st.Version() >= version.Capella {
withdrawals, err := payload.Withdrawals()
if err != nil {
return nil, errors.Wrap(err, "could not get payload withdrawals")
}
st, err = ProcessWithdrawals(st, withdrawals)
st, err = ProcessWithdrawals(st, payload)
if err != nil {
return nil, errors.Wrap(err, "could not process withdrawals")
}
@@ -267,6 +264,13 @@ func ValidatePayloadHeader(st state.BeaconState, header interfaces.ExecutionData
// ProcessPayloadHeader processes the payload header.
func ProcessPayloadHeader(st state.BeaconState, header interfaces.ExecutionData) (state.BeaconState, error) {
var err error
if st.Version() >= version.Capella {
st, err = ProcessWithdrawals(st, header)
if err != nil {
return nil, errors.Wrap(err, "could not process withdrawals")
}
}
if err := ValidatePayloadHeaderWhenMergeCompletes(st, header); err != nil {
return nil, err
}
@@ -280,7 +284,7 @@ func ProcessPayloadHeader(st state.BeaconState, header interfaces.ExecutionData)
}
// GetBlockPayloadHash returns the hash of the execution payload of the block
func GetBlockPayloadHash(blk interfaces.BeaconBlock) ([32]byte, error) {
func GetBlockPayloadHash(blk interfaces.ReadOnlyBeaconBlock) ([32]byte, error) {
var payloadHash [32]byte
if IsPreBellatrixVersion(blk.Version()) {
return payloadHash, nil

View File

@@ -1,6 +1,7 @@
package blocks_test
import (
"math/big"
"testing"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/blocks"
@@ -609,7 +610,7 @@ func Test_ProcessPayloadCapella(t *testing.T) {
random, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
require.NoError(t, err)
payload.PrevRandao = random
wrapped, err := consensusblocks.WrappedExecutionPayloadCapella(payload)
wrapped, err := consensusblocks.WrappedExecutionPayloadCapella(payload, big.NewInt(0))
require.NoError(t, err)
_, err = blocks.ProcessPayload(st, wrapped)
require.NoError(t, err)
@@ -873,7 +874,7 @@ func emptyPayloadHeaderCapella() (interfaces.ExecutionData, error) {
TransactionsRoot: make([]byte, fieldparams.RootLength),
WithdrawalsRoot: make([]byte, fieldparams.RootLength),
ExtraData: make([]byte, 0),
})
}, big.NewInt(0))
}
func emptyPayload() *enginev1.ExecutionPayload {

View File

@@ -18,7 +18,7 @@ import (
//
// Spec pseudocode definition:
//
// def process_randao(state: BeaconState, body: BeaconBlockBody) -> None:
// def process_randao(state: BeaconState, body: ReadOnlyBeaconBlockBody) -> None:
// epoch = get_current_epoch(state)
// # Verify RANDAO reveal
// proposer = state.validators[get_beacon_proposer_index(state)]
@@ -30,7 +30,7 @@ import (
func ProcessRandao(
ctx context.Context,
beaconState state.BeaconState,
b interfaces.SignedBeaconBlock,
b interfaces.ReadOnlySignedBeaconBlock,
) (state.BeaconState, error) {
if err := blocks.BeaconBlockIsNil(b); err != nil {
return nil, err

View File

@@ -99,7 +99,7 @@ func VerifyBlockHeaderSignature(beaconState state.BeaconState, header *ethpb.Sig
// VerifyBlockSignatureUsingCurrentFork verifies the proposer signature of a beacon block. This differs
// from the above method by not using fork data from the state and instead retrieving it
// via the respective epoch.
func VerifyBlockSignatureUsingCurrentFork(beaconState state.ReadOnlyBeaconState, blk interfaces.SignedBeaconBlock) error {
func VerifyBlockSignatureUsingCurrentFork(beaconState state.ReadOnlyBeaconState, blk interfaces.ReadOnlySignedBeaconBlock) error {
currentEpoch := slots.ToEpoch(blk.Block().Slot())
fork, err := forks.Fork(currentEpoch)
if err != nil {

View File

@@ -2,18 +2,20 @@ package blocks
import (
"bytes"
"fmt"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/signing"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
fieldparams "github.com/prysmaticlabs/prysm/v3/config/fieldparams"
"github.com/prysmaticlabs/prysm/v3/config/params"
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v3/crypto/bls"
"github.com/prysmaticlabs/prysm/v3/crypto/hash"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v3/encoding/ssz"
enginev1 "github.com/prysmaticlabs/prysm/v3/proto/engine/v1"
ethpbv2 "github.com/prysmaticlabs/prysm/v3/proto/eth/v2"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/runtime/version"
@@ -21,9 +23,13 @@ import (
const executionToBLSPadding = 12
// ProcessBLSToExecutionChanges processes a list of BLS Changes and validates them. However,
// the method doesn't immediately verify the signatures in the changes and prefers to extract
// a signature set from them at the end of the transition and then verify them via the
// signature set.
func ProcessBLSToExecutionChanges(
st state.BeaconState,
signed interfaces.SignedBeaconBlock) (state.BeaconState, error) {
signed interfaces.ReadOnlySignedBeaconBlock) (state.BeaconState, error) {
if signed.Version() < version.Capella {
return st, nil
}
@@ -110,39 +116,80 @@ func ValidateBLSToExecutionChange(st state.ReadOnlyBeaconState, signed *ethpb.Si
return val, nil
}
func ProcessWithdrawals(st state.BeaconState, withdrawals []*enginev1.Withdrawal) (state.BeaconState, error) {
expected, err := st.ExpectedWithdrawals()
// ProcessWithdrawals processes the validator withdrawals from the provided execution payload
// into the beacon state.
//
// Spec pseudocode definition:
//
// def process_withdrawals(state: BeaconState, payload: ExecutionPayload) -> None:
//
// expected_withdrawals = get_expected_withdrawals(state)
// assert len(payload.withdrawals) == len(expected_withdrawals)
//
// for expected_withdrawal, withdrawal in zip(expected_withdrawals, payload.withdrawals):
// assert withdrawal == expected_withdrawal
// decrease_balance(state, withdrawal.validator_index, withdrawal.amount)
//
// # Update the next withdrawal index if this block contained withdrawals
// if len(expected_withdrawals) != 0:
// latest_withdrawal = expected_withdrawals[-1]
// state.next_withdrawal_index = WithdrawalIndex(latest_withdrawal.index + 1)
//
// # Update the next validator index to start the next withdrawal sweep
// if len(expected_withdrawals) == MAX_WITHDRAWALS_PER_PAYLOAD:
// # Next sweep starts after the latest withdrawal's validator index
// next_validator_index = ValidatorIndex((expected_withdrawals[-1].validator_index + 1) % len(state.validators))
// state.next_withdrawal_validator_index = next_validator_index
// else:
// # Advance sweep by the max length of the sweep if there was not a full set of withdrawals
// next_index = state.next_withdrawal_validator_index + MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP
// next_validator_index = ValidatorIndex(next_index % len(state.validators))
// state.next_withdrawal_validator_index = next_validator_index
func ProcessWithdrawals(st state.BeaconState, executionData interfaces.ExecutionData) (state.BeaconState, error) {
expectedWithdrawals, err := st.ExpectedWithdrawals()
if err != nil {
return nil, errors.Wrap(err, "could not get expected withdrawals")
}
if len(expected) != len(withdrawals) {
return nil, errInvalidWithdrawalNumber
var wdRoot [32]byte
if executionData.IsBlinded() {
r, err := executionData.WithdrawalsRoot()
if err != nil {
return nil, errors.Wrap(err, "could not get withdrawals root")
}
wdRoot = bytesutil.ToBytes32(r)
} else {
wds, err := executionData.Withdrawals()
if err != nil {
return nil, errors.Wrap(err, "could not get withdrawals")
}
wdRoot, err = ssz.WithdrawalSliceRoot(hash.CustomSHA256Hasher(), wds, fieldparams.MaxWithdrawalsPerPayload)
if err != nil {
return nil, errors.Wrap(err, "could not get withdrawals root")
}
}
for i, withdrawal := range withdrawals {
if withdrawal.Index != expected[i].Index {
return nil, errInvalidWithdrawalIndex
}
if withdrawal.ValidatorIndex != expected[i].ValidatorIndex {
return nil, errInvalidValidatorIndex
}
if !bytes.Equal(withdrawal.Address, expected[i].Address) {
return nil, errInvalidExecutionAddress
}
if withdrawal.Amount != expected[i].Amount {
return nil, errInvalidWithdrawalAmount
}
expectedRoot, err := ssz.WithdrawalSliceRoot(hash.CustomSHA256Hasher(), expectedWithdrawals, fieldparams.MaxWithdrawalsPerPayload)
if err != nil {
return nil, errors.Wrap(err, "could not get expected withdrawals root")
}
if expectedRoot != wdRoot {
return nil, fmt.Errorf("expected withdrawals root %#x, got %#x", expectedRoot, wdRoot)
}
for _, withdrawal := range expectedWithdrawals {
err := helpers.DecreaseBalance(st, withdrawal.ValidatorIndex, withdrawal.Amount)
if err != nil {
return nil, errors.Wrap(err, "could not decrease balance")
}
}
if len(withdrawals) > 0 {
if err := st.SetNextWithdrawalIndex(withdrawals[len(withdrawals)-1].Index + 1); err != nil {
if len(expectedWithdrawals) > 0 {
if err := st.SetNextWithdrawalIndex(expectedWithdrawals[len(expectedWithdrawals)-1].Index + 1); err != nil {
return nil, errors.Wrap(err, "could not set next withdrawal index")
}
}
var nextValidatorIndex primitives.ValidatorIndex
if uint64(len(withdrawals)) < params.BeaconConfig().MaxWithdrawalsPerPayload {
if uint64(len(expectedWithdrawals)) < params.BeaconConfig().MaxWithdrawalsPerPayload {
nextValidatorIndex, err = st.NextWithdrawalValidatorIndex()
if err != nil {
return nil, errors.Wrap(err, "could not get next withdrawal validator index")
@@ -150,7 +197,7 @@ func ProcessWithdrawals(st state.BeaconState, withdrawals []*enginev1.Withdrawal
nextValidatorIndex += primitives.ValidatorIndex(params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep)
nextValidatorIndex = nextValidatorIndex % primitives.ValidatorIndex(st.NumValidators())
} else {
nextValidatorIndex = withdrawals[len(withdrawals)-1].ValidatorIndex + 1
nextValidatorIndex = expectedWithdrawals[len(expectedWithdrawals)-1].ValidatorIndex + 1
if nextValidatorIndex == primitives.ValidatorIndex(st.NumValidators()) {
nextValidatorIndex = 0
}
@@ -161,6 +208,8 @@ func ProcessWithdrawals(st state.BeaconState, withdrawals []*enginev1.Withdrawal
return st, nil
}
// BLSChangesSignatureBatch extracts the relevant signatures from the provided execution change
// messages and transforms them into a signature batch object.
func BLSChangesSignatureBatch(
st state.ReadOnlyBeaconState,
changes []*ethpb.SignedBLSToExecutionChange,

View File

@@ -1,6 +1,7 @@
package blocks_test
import (
"math/big"
"math/rand"
"testing"
@@ -9,6 +10,7 @@ import (
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
state_native "github.com/prysmaticlabs/prysm/v3/beacon-chain/state/state-native"
fieldparams "github.com/prysmaticlabs/prysm/v3/config/fieldparams"
"github.com/prysmaticlabs/prysm/v3/config/params"
consensusblocks "github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
@@ -236,6 +238,425 @@ func TestProcessBLSToExecutionChange(t *testing.T) {
})
}
func TestProcessBlindWithdrawals(t *testing.T) {
const (
currentEpoch = primitives.Epoch(10)
epochInFuture = primitives.Epoch(12)
epochInPast = primitives.Epoch(8)
numValidators = 128
notWithdrawableIndex = 127
notPartiallyWithdrawable = 126
maxSweep = uint64(80)
)
maxEffectiveBalance := params.BeaconConfig().MaxEffectiveBalance
type args struct {
Name string
NextWithdrawalValidatorIndex primitives.ValidatorIndex
NextWithdrawalIndex uint64
FullWithdrawalIndices []primitives.ValidatorIndex
PartialWithdrawalIndices []primitives.ValidatorIndex
Withdrawals []*enginev1.Withdrawal
}
type control struct {
NextWithdrawalValidatorIndex primitives.ValidatorIndex
NextWithdrawalIndex uint64
ExpectedError bool
Balances map[uint64]uint64
}
type Test struct {
Args args
Control control
}
executionAddress := func(i primitives.ValidatorIndex) []byte {
wc := make([]byte, 20)
wc[19] = byte(i)
return wc
}
withdrawalAmount := func(i primitives.ValidatorIndex) uint64 {
return maxEffectiveBalance + uint64(i)*100000
}
fullWithdrawal := func(i primitives.ValidatorIndex, idx uint64) *enginev1.Withdrawal {
return &enginev1.Withdrawal{
Index: idx,
ValidatorIndex: i,
Address: executionAddress(i),
Amount: withdrawalAmount(i),
}
}
partialWithdrawal := func(i primitives.ValidatorIndex, idx uint64) *enginev1.Withdrawal {
return &enginev1.Withdrawal{
Index: idx,
ValidatorIndex: i,
Address: executionAddress(i),
Amount: withdrawalAmount(i) - maxEffectiveBalance,
}
}
tests := []Test{
{
Args: args{
Name: "success no withdrawals",
NextWithdrawalValidatorIndex: 10,
NextWithdrawalIndex: 3,
},
Control: control{
NextWithdrawalValidatorIndex: 90,
NextWithdrawalIndex: 3,
},
},
{
Args: args{
Name: "success one full withdrawal",
NextWithdrawalIndex: 3,
NextWithdrawalValidatorIndex: 5,
FullWithdrawalIndices: []primitives.ValidatorIndex{70},
Withdrawals: []*enginev1.Withdrawal{
fullWithdrawal(70, 3),
},
},
Control: control{
NextWithdrawalValidatorIndex: 85,
NextWithdrawalIndex: 4,
Balances: map[uint64]uint64{70: 0},
},
},
{
Args: args{
Name: "success one partial withdrawal",
NextWithdrawalIndex: 21,
NextWithdrawalValidatorIndex: 120,
PartialWithdrawalIndices: []primitives.ValidatorIndex{7},
Withdrawals: []*enginev1.Withdrawal{
partialWithdrawal(7, 21),
},
},
Control: control{
NextWithdrawalValidatorIndex: 72,
NextWithdrawalIndex: 22,
Balances: map[uint64]uint64{7: maxEffectiveBalance},
},
},
{
Args: args{
Name: "success many full withdrawals",
NextWithdrawalIndex: 22,
NextWithdrawalValidatorIndex: 4,
FullWithdrawalIndices: []primitives.ValidatorIndex{7, 19, 28, 1},
Withdrawals: []*enginev1.Withdrawal{
fullWithdrawal(7, 22), fullWithdrawal(19, 23), fullWithdrawal(28, 24),
},
},
Control: control{
NextWithdrawalValidatorIndex: 84,
NextWithdrawalIndex: 25,
Balances: map[uint64]uint64{7: 0, 19: 0, 28: 0},
},
},
{
Args: args{
Name: "Less than max sweep at end",
NextWithdrawalIndex: 22,
NextWithdrawalValidatorIndex: 4,
FullWithdrawalIndices: []primitives.ValidatorIndex{80, 81, 82, 83},
Withdrawals: []*enginev1.Withdrawal{
fullWithdrawal(80, 22), fullWithdrawal(81, 23), fullWithdrawal(82, 24),
fullWithdrawal(83, 25),
},
},
Control: control{
NextWithdrawalValidatorIndex: 84,
NextWithdrawalIndex: 26,
Balances: map[uint64]uint64{80: 0, 81: 0, 82: 0, 83: 0},
},
},
{
Args: args{
Name: "Less than max sweep and beginning",
NextWithdrawalIndex: 22,
NextWithdrawalValidatorIndex: 4,
FullWithdrawalIndices: []primitives.ValidatorIndex{4, 5, 6},
Withdrawals: []*enginev1.Withdrawal{
fullWithdrawal(4, 22), fullWithdrawal(5, 23), fullWithdrawal(6, 24),
},
},
Control: control{
NextWithdrawalValidatorIndex: 84,
NextWithdrawalIndex: 25,
Balances: map[uint64]uint64{4: 0, 5: 0, 6: 0},
},
},
{
Args: args{
Name: "success many partial withdrawals",
NextWithdrawalIndex: 22,
NextWithdrawalValidatorIndex: 4,
PartialWithdrawalIndices: []primitives.ValidatorIndex{7, 19, 28},
Withdrawals: []*enginev1.Withdrawal{
partialWithdrawal(7, 22), partialWithdrawal(19, 23), partialWithdrawal(28, 24),
},
},
Control: control{
NextWithdrawalValidatorIndex: 84,
NextWithdrawalIndex: 25,
Balances: map[uint64]uint64{
7: maxEffectiveBalance,
19: maxEffectiveBalance,
28: maxEffectiveBalance,
},
},
},
{
Args: args{
Name: "success many withdrawals",
NextWithdrawalIndex: 22,
NextWithdrawalValidatorIndex: 88,
FullWithdrawalIndices: []primitives.ValidatorIndex{7, 19, 28},
PartialWithdrawalIndices: []primitives.ValidatorIndex{2, 1, 89, 15},
Withdrawals: []*enginev1.Withdrawal{
partialWithdrawal(89, 22), partialWithdrawal(1, 23), partialWithdrawal(2, 24),
fullWithdrawal(7, 25), partialWithdrawal(15, 26), fullWithdrawal(19, 27),
fullWithdrawal(28, 28),
},
},
Control: control{
NextWithdrawalValidatorIndex: 40,
NextWithdrawalIndex: 29,
Balances: map[uint64]uint64{
7: 0, 19: 0, 28: 0,
2: maxEffectiveBalance, 1: maxEffectiveBalance, 89: maxEffectiveBalance,
15: maxEffectiveBalance,
},
},
},
{
Args: args{
Name: "success more than max fully withdrawals",
NextWithdrawalIndex: 22,
NextWithdrawalValidatorIndex: 0,
FullWithdrawalIndices: []primitives.ValidatorIndex{1, 2, 3, 4, 5, 6, 7, 8, 9, 21, 22, 23, 24, 25, 26, 27, 29, 35, 89},
Withdrawals: []*enginev1.Withdrawal{
fullWithdrawal(1, 22), fullWithdrawal(2, 23), fullWithdrawal(3, 24),
fullWithdrawal(4, 25), fullWithdrawal(5, 26), fullWithdrawal(6, 27),
fullWithdrawal(7, 28), fullWithdrawal(8, 29), fullWithdrawal(9, 30),
fullWithdrawal(21, 31), fullWithdrawal(22, 32), fullWithdrawal(23, 33),
fullWithdrawal(24, 34), fullWithdrawal(25, 35), fullWithdrawal(26, 36),
fullWithdrawal(27, 37),
},
},
Control: control{
NextWithdrawalValidatorIndex: 28,
NextWithdrawalIndex: 38,
Balances: map[uint64]uint64{
1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0,
21: 0, 22: 0, 23: 0, 24: 0, 25: 0, 26: 0, 27: 0,
},
},
},
{
Args: args{
Name: "success more than max partially withdrawals",
NextWithdrawalIndex: 22,
NextWithdrawalValidatorIndex: 0,
PartialWithdrawalIndices: []primitives.ValidatorIndex{1, 2, 3, 4, 5, 6, 7, 8, 9, 21, 22, 23, 24, 25, 26, 27, 29, 35, 89},
Withdrawals: []*enginev1.Withdrawal{
partialWithdrawal(1, 22), partialWithdrawal(2, 23), partialWithdrawal(3, 24),
partialWithdrawal(4, 25), partialWithdrawal(5, 26), partialWithdrawal(6, 27),
partialWithdrawal(7, 28), partialWithdrawal(8, 29), partialWithdrawal(9, 30),
partialWithdrawal(21, 31), partialWithdrawal(22, 32), partialWithdrawal(23, 33),
partialWithdrawal(24, 34), partialWithdrawal(25, 35), partialWithdrawal(26, 36),
partialWithdrawal(27, 37),
},
},
Control: control{
NextWithdrawalValidatorIndex: 28,
NextWithdrawalIndex: 38,
Balances: map[uint64]uint64{
1: maxEffectiveBalance,
2: maxEffectiveBalance,
3: maxEffectiveBalance,
4: maxEffectiveBalance,
5: maxEffectiveBalance,
6: maxEffectiveBalance,
7: maxEffectiveBalance,
8: maxEffectiveBalance,
9: maxEffectiveBalance,
21: maxEffectiveBalance,
22: maxEffectiveBalance,
23: maxEffectiveBalance,
24: maxEffectiveBalance,
25: maxEffectiveBalance,
26: maxEffectiveBalance,
27: maxEffectiveBalance,
},
},
},
{
Args: args{
Name: "failure wrong number of partial withdrawal",
NextWithdrawalIndex: 21,
NextWithdrawalValidatorIndex: 37,
PartialWithdrawalIndices: []primitives.ValidatorIndex{7},
Withdrawals: []*enginev1.Withdrawal{
partialWithdrawal(7, 21), partialWithdrawal(9, 22),
},
},
Control: control{
ExpectedError: true,
},
},
{
Args: args{
Name: "failure invalid withdrawal index",
NextWithdrawalIndex: 22,
NextWithdrawalValidatorIndex: 4,
FullWithdrawalIndices: []primitives.ValidatorIndex{7, 19, 28, 1},
Withdrawals: []*enginev1.Withdrawal{
fullWithdrawal(7, 22), fullWithdrawal(19, 23), fullWithdrawal(28, 25),
fullWithdrawal(1, 25),
},
},
Control: control{
ExpectedError: true,
},
},
{
Args: args{
Name: "failure invalid validator index",
NextWithdrawalIndex: 22,
NextWithdrawalValidatorIndex: 4,
FullWithdrawalIndices: []primitives.ValidatorIndex{7, 19, 28, 1},
Withdrawals: []*enginev1.Withdrawal{
fullWithdrawal(7, 22), fullWithdrawal(19, 23), fullWithdrawal(27, 24),
fullWithdrawal(1, 25),
},
},
Control: control{
ExpectedError: true,
},
},
{
Args: args{
Name: "failure invalid withdrawal amount",
NextWithdrawalIndex: 22,
NextWithdrawalValidatorIndex: 4,
FullWithdrawalIndices: []primitives.ValidatorIndex{7, 19, 28, 1},
Withdrawals: []*enginev1.Withdrawal{
fullWithdrawal(7, 22), fullWithdrawal(19, 23), partialWithdrawal(28, 24),
fullWithdrawal(1, 25),
},
},
Control: control{
ExpectedError: true,
},
},
{
Args: args{
Name: "failure validator not fully withdrawable",
NextWithdrawalIndex: 22,
NextWithdrawalValidatorIndex: 4,
FullWithdrawalIndices: []primitives.ValidatorIndex{notWithdrawableIndex},
Withdrawals: []*enginev1.Withdrawal{
fullWithdrawal(notWithdrawableIndex, 22),
},
},
Control: control{
ExpectedError: true,
},
},
{
Args: args{
Name: "failure validator not partially withdrawable",
NextWithdrawalIndex: 22,
NextWithdrawalValidatorIndex: 4,
PartialWithdrawalIndices: []primitives.ValidatorIndex{notPartiallyWithdrawable},
Withdrawals: []*enginev1.Withdrawal{
fullWithdrawal(notPartiallyWithdrawable, 22),
},
},
Control: control{
ExpectedError: true,
},
},
}
checkPostState := func(t *testing.T, expected control, st state.BeaconState) {
l, err := st.NextWithdrawalValidatorIndex()
require.NoError(t, err)
require.Equal(t, expected.NextWithdrawalValidatorIndex, l)
n, err := st.NextWithdrawalIndex()
require.NoError(t, err)
require.Equal(t, expected.NextWithdrawalIndex, n)
balances := st.Balances()
for idx, bal := range expected.Balances {
require.Equal(t, bal, balances[idx])
}
}
prepareValidators := func(st *ethpb.BeaconStateCapella, arguments args) (state.BeaconState, error) {
validators := make([]*ethpb.Validator, numValidators)
st.Balances = make([]uint64, numValidators)
for i := range validators {
v := &ethpb.Validator{}
v.EffectiveBalance = maxEffectiveBalance
v.WithdrawableEpoch = epochInFuture
v.WithdrawalCredentials = make([]byte, 32)
v.WithdrawalCredentials[31] = byte(i)
st.Balances[i] = v.EffectiveBalance - uint64(rand.Intn(1000))
validators[i] = v
}
for _, idx := range arguments.FullWithdrawalIndices {
if idx != notWithdrawableIndex {
validators[idx].WithdrawableEpoch = epochInPast
}
st.Balances[idx] = withdrawalAmount(idx)
validators[idx].WithdrawalCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
}
for _, idx := range arguments.PartialWithdrawalIndices {
validators[idx].WithdrawalCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
st.Balances[idx] = withdrawalAmount(idx)
}
st.Validators = validators
return state_native.InitializeFromProtoCapella(st)
}
for _, test := range tests {
t.Run(test.Args.Name, func(t *testing.T) {
saved := params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep
params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep = maxSweep
if test.Args.Withdrawals == nil {
test.Args.Withdrawals = make([]*enginev1.Withdrawal, 0)
}
if test.Args.FullWithdrawalIndices == nil {
test.Args.FullWithdrawalIndices = make([]primitives.ValidatorIndex, 0)
}
if test.Args.PartialWithdrawalIndices == nil {
test.Args.PartialWithdrawalIndices = make([]primitives.ValidatorIndex, 0)
}
slot, err := slots.EpochStart(currentEpoch)
require.NoError(t, err)
spb := &ethpb.BeaconStateCapella{
Slot: slot,
NextWithdrawalValidatorIndex: test.Args.NextWithdrawalValidatorIndex,
NextWithdrawalIndex: test.Args.NextWithdrawalIndex,
}
st, err := prepareValidators(spb, test.Args)
require.NoError(t, err)
wdRoot, err := ssz.WithdrawalSliceRoot(hash.CustomSHA256Hasher(), test.Args.Withdrawals, fieldparams.MaxWithdrawalsPerPayload)
require.NoError(t, err)
p, err := consensusblocks.WrappedExecutionPayloadHeaderCapella(&enginev1.ExecutionPayloadHeaderCapella{WithdrawalsRoot: wdRoot[:]}, big.NewInt(0))
require.NoError(t, err)
post, err := blocks.ProcessWithdrawals(st, p)
if test.Control.ExpectedError {
require.NotNil(t, err)
} else {
require.NoError(t, err)
checkPostState(t, test.Control, post)
}
params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep = saved
})
}
}
func TestProcessWithdrawals(t *testing.T) {
const (
@@ -641,7 +1062,9 @@ func TestProcessWithdrawals(t *testing.T) {
}
st, err := prepareValidators(spb, test.Args)
require.NoError(t, err)
post, err := blocks.ProcessWithdrawals(st, test.Args.Withdrawals)
p, err := consensusblocks.WrappedExecutionPayloadCapella(&enginev1.ExecutionPayloadCapella{Withdrawals: test.Args.Withdrawals}, big.NewInt(0))
require.NoError(t, err)
post, err := blocks.ProcessWithdrawals(st, p)
if test.Control.ExpectedError {
require.NotNil(t, err)
} else {

View File

@@ -16,25 +16,25 @@ var errNilState = errors.New("nil state")
// UnrealizedCheckpoints returns the justification and finalization checkpoints of the
// given state as if it was progressed with empty slots until the next epoch. It
// also returns the total active balance during the epoch.
func UnrealizedCheckpoints(st state.BeaconState) (uint64, *ethpb.Checkpoint, *ethpb.Checkpoint, error) {
func UnrealizedCheckpoints(st state.BeaconState) (*ethpb.Checkpoint, *ethpb.Checkpoint, error) {
if st == nil || st.IsNil() {
return 0, nil, nil, errNilState
return nil, nil, errNilState
}
if slots.ToEpoch(st.Slot()) <= params.BeaconConfig().GenesisEpoch+1 {
jc := st.CurrentJustifiedCheckpoint()
fc := st.FinalizedCheckpoint()
return 0, jc, fc, nil
return jc, fc, nil
}
activeBalance, prevTarget, currentTarget, err := st.UnrealizedCheckpointBalances()
if err != nil {
return 0, nil, nil, err
return nil, nil, err
}
justification := processJustificationBits(st, activeBalance, prevTarget, currentTarget)
jc, fc, err := computeCheckpoints(st, justification)
return activeBalance, jc, fc, err
return jc, fc, err
}
// ProcessJustificationAndFinalizationPreCompute processes justification and finalization during

View File

@@ -242,12 +242,10 @@ func TestUnrealizedCheckpoints(t *testing.T) {
_, _, err = altair.InitializePrecomputeValidators(context.Background(), state)
require.NoError(t, err)
ab, jc, fc, err := precompute.UnrealizedCheckpoints(state)
jc, fc, err := precompute.UnrealizedCheckpoints(state)
require.NoError(t, err)
require.DeepEqual(t, test.expectedJustified, jc.Epoch)
require.DeepEqual(t, test.expectedFinalized, fc.Epoch)
eb := params.BeaconConfig().MinGenesisActiveValidatorCount * params.BeaconConfig().MaxEffectiveBalance
require.Equal(t, eb, ab)
})
}
}

View File

@@ -11,6 +11,6 @@ const (
// ReceivedBlockData is the data sent with ReceivedBlock events.
type ReceivedBlockData struct {
SignedBlock interfaces.SignedBeaconBlock
SignedBlock interfaces.ReadOnlySignedBeaconBlock
IsOptimistic bool
}

View File

@@ -35,7 +35,7 @@ type BlockProcessedData struct {
// BlockRoot of the processed block.
BlockRoot [32]byte
// SignedBlock is the physical processed block.
SignedBlock interfaces.SignedBeaconBlock
SignedBlock interfaces.ReadOnlySignedBeaconBlock
// Verified is true if the block's BLS contents have been verified.
Verified bool
}

View File

@@ -43,6 +43,12 @@ func IsActiveValidatorUsingTrie(validator state.ReadOnlyValidator, epoch primiti
return checkValidatorActiveStatus(validator.ActivationEpoch(), validator.ExitEpoch(), epoch)
}
// IsActiveNonSlashedValidatorUsingTrie checks if a read only validator is active and not slashed
func IsActiveNonSlashedValidatorUsingTrie(validator state.ReadOnlyValidator, epoch primitives.Epoch) bool {
active := checkValidatorActiveStatus(validator.ActivationEpoch(), validator.ExitEpoch(), epoch)
return active && !validator.Slashed()
}
func checkValidatorActiveStatus(activationEpoch, exitEpoch, epoch primitives.Epoch) bool {
return activationEpoch <= epoch && epoch < exitEpoch
}

View File

@@ -56,6 +56,34 @@ func TestIsActiveValidatorUsingTrie_OK(t *testing.T) {
}
}
func TestIsActiveNonSlashedValidatorUsingTrie_OK(t *testing.T) {
tests := []struct {
a primitives.Epoch
s bool
b bool
}{
{a: 0, s: false, b: false},
{a: 10, s: false, b: true},
{a: 100, s: false, b: false},
{a: 1000, s: false, b: false},
{a: 64, s: false, b: true},
{a: 0, s: true, b: false},
{a: 10, s: true, b: false},
{a: 100, s: true, b: false},
{a: 1000, s: true, b: false},
{a: 64, s: true, b: false},
}
for _, test := range tests {
val := &ethpb.Validator{ActivationEpoch: 10, ExitEpoch: 100}
val.Slashed = test.s
beaconState, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{Validators: []*ethpb.Validator{val}})
require.NoError(t, err)
readOnlyVal, err := beaconState.ValidatorAtIndexReadOnly(0)
require.NoError(t, err)
assert.Equal(t, test.b, IsActiveNonSlashedValidatorUsingTrie(readOnlyVal, test.a), "IsActiveNonSlashedValidatorUsingTrie(%d)", test.a)
}
}
func TestIsSlashableValidator_OK(t *testing.T) {
tests := []struct {
name string

View File

@@ -11,7 +11,7 @@ import (
)
// WriteBlockToDisk as a block ssz. Writes to temp directory. Debug!
func WriteBlockToDisk(block interfaces.SignedBeaconBlock, failed bool) {
func WriteBlockToDisk(block interfaces.ReadOnlySignedBeaconBlock, failed bool) {
if !features.Get().WriteSSZStateTransitions {
return
}

View File

@@ -35,7 +35,7 @@ import (
//
// Spec pseudocode definition:
//
// def state_transition(state: BeaconState, signed_block: SignedBeaconBlock, validate_result: bool=True) -> None:
// def state_transition(state: BeaconState, signed_block: ReadOnlySignedBeaconBlock, validate_result: bool=True) -> None:
// block = signed_block.message
// # Process slots (including those with no blocks) since block
// process_slots(state, block.slot)
@@ -50,7 +50,7 @@ import (
func ExecuteStateTransition(
ctx context.Context,
state state.BeaconState,
signed interfaces.SignedBeaconBlock,
signed interfaces.ReadOnlySignedBeaconBlock,
) (state.BeaconState, error) {
if ctx.Err() != nil {
return nil, ctx.Err()
@@ -312,7 +312,7 @@ func ProcessSlots(ctx context.Context, state state.BeaconState, slot primitives.
}
// VerifyOperationLengths verifies that block operation lengths are valid.
func VerifyOperationLengths(_ context.Context, state state.BeaconState, b interfaces.SignedBeaconBlock) (state.BeaconState, error) {
func VerifyOperationLengths(_ context.Context, state state.BeaconState, b interfaces.ReadOnlySignedBeaconBlock) (state.BeaconState, error) {
if err := blocks.BeaconBlockIsNil(b); err != nil {
return nil, err
}

View File

@@ -29,7 +29,7 @@ import (
//
// Spec pseudocode definition:
//
// def state_transition(state: BeaconState, signed_block: SignedBeaconBlock, validate_result: bool=True) -> None:
// def state_transition(state: BeaconState, signed_block: ReadOnlySignedBeaconBlock, validate_result: bool=True) -> None:
// block = signed_block.message
// # Process slots (including those with no blocks) since block
// process_slots(state, block.slot)
@@ -44,7 +44,7 @@ import (
func ExecuteStateTransitionNoVerifyAnySig(
ctx context.Context,
st state.BeaconState,
signed interfaces.SignedBeaconBlock,
signed interfaces.ReadOnlySignedBeaconBlock,
) (*bls.SignatureBatch, state.BeaconState, error) {
if ctx.Err() != nil {
return nil, nil, ctx.Err()
@@ -96,7 +96,7 @@ func ExecuteStateTransitionNoVerifyAnySig(
//
// Spec pseudocode definition:
//
// def state_transition(state: BeaconState, signed_block: SignedBeaconBlock, validate_result: bool=True) -> None:
// def state_transition(state: BeaconState, signed_block: ReadOnlySignedBeaconBlock, validate_result: bool=True) -> None:
// block = signed_block.message
// # Process slots (including those with no blocks) since block
// process_slots(state, block.slot)
@@ -111,7 +111,7 @@ func ExecuteStateTransitionNoVerifyAnySig(
func CalculateStateRoot(
ctx context.Context,
state state.BeaconState,
signed interfaces.SignedBeaconBlock,
signed interfaces.ReadOnlySignedBeaconBlock,
) ([32]byte, error) {
ctx, span := trace.StartSpan(ctx, "core.state.CalculateStateRoot")
defer span.End()
@@ -153,7 +153,7 @@ func CalculateStateRoot(
//
// Spec pseudocode definition:
//
// def process_block(state: BeaconState, block: BeaconBlock) -> None:
// def process_block(state: BeaconState, block: ReadOnlyBeaconBlock) -> None:
// process_block_header(state, block)
// process_randao(state, block.body)
// process_eth1_data(state, block.body)
@@ -161,7 +161,7 @@ func CalculateStateRoot(
func ProcessBlockNoVerifyAnySig(
ctx context.Context,
st state.BeaconState,
signed interfaces.SignedBeaconBlock,
signed interfaces.ReadOnlySignedBeaconBlock,
) (*bls.SignatureBatch, state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "core.state.ProcessBlockNoVerifyAnySig")
defer span.End()
@@ -222,7 +222,7 @@ func ProcessBlockNoVerifyAnySig(
//
// Spec pseudocode definition:
//
// def process_operations(state: BeaconState, body: BeaconBlockBody) -> None:
// def process_operations(state: BeaconState, body: ReadOnlyBeaconBlockBody) -> None:
// # Verify that outstanding deposits are processed up to the maximum number of deposits
// assert len(body.deposits) == min(MAX_DEPOSITS, state.eth1_data.deposit_count - state.eth1_deposit_index)
//
@@ -238,7 +238,7 @@ func ProcessBlockNoVerifyAnySig(
func ProcessOperationsNoVerifyAttsSigs(
ctx context.Context,
state state.BeaconState,
signedBeaconBlock interfaces.SignedBeaconBlock) (state.BeaconState, error) {
signedBeaconBlock interfaces.ReadOnlySignedBeaconBlock) (state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "core.state.ProcessOperationsNoVerifyAttsSigs")
defer span.End()
if err := blocks.BeaconBlockIsNil(signedBeaconBlock); err != nil {
@@ -272,7 +272,7 @@ func ProcessOperationsNoVerifyAttsSigs(
// and randao signature verifications.
//
// Spec pseudocode definition:
// def process_block(state: BeaconState, block: BeaconBlock) -> None:
// def process_block(state: BeaconState, block: ReadOnlyBeaconBlock) -> None:
//
// process_block_header(state, block)
// if is_execution_enabled(state, block.body):
@@ -284,7 +284,7 @@ func ProcessOperationsNoVerifyAttsSigs(
func ProcessBlockForStateRoot(
ctx context.Context,
state state.BeaconState,
signed interfaces.SignedBeaconBlock,
signed interfaces.ReadOnlySignedBeaconBlock,
) (state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "core.state.ProcessBlockForStateRoot")
defer span.End()
@@ -351,7 +351,7 @@ func ProcessBlockForStateRoot(
if err != nil {
return nil, errors.Wrap(err, "could not get sync aggregate from block")
}
state, err = altair.ProcessSyncAggregate(ctx, state, sa)
state, _, err = altair.ProcessSyncAggregate(ctx, state, sa)
if err != nil {
return nil, errors.Wrap(err, "process_sync_aggregate failed")
}
@@ -363,7 +363,7 @@ func ProcessBlockForStateRoot(
func altairOperations(
ctx context.Context,
st state.BeaconState,
signedBeaconBlock interfaces.SignedBeaconBlock) (state.BeaconState, error) {
signedBeaconBlock interfaces.ReadOnlySignedBeaconBlock) (state.BeaconState, error) {
st, err := b.ProcessProposerSlashings(ctx, st, signedBeaconBlock.Block().Body().ProposerSlashings(), v.SlashValidator)
if err != nil {
return nil, errors.Wrap(err, "could not process altair proposer slashing")
@@ -390,7 +390,7 @@ func altairOperations(
func phase0Operations(
ctx context.Context,
st state.BeaconState,
signedBeaconBlock interfaces.SignedBeaconBlock) (state.BeaconState, error) {
signedBeaconBlock interfaces.ReadOnlySignedBeaconBlock) (state.BeaconState, error) {
st, err := b.ProcessProposerSlashings(ctx, st, signedBeaconBlock.Block().Body().ProposerSlashings(), v.SlashValidator)
if err != nil {
return nil, errors.Wrap(err, "could not process block proposer slashings")

View File

@@ -6,6 +6,7 @@ go_library(
importpath = "github.com/prysmaticlabs/prysm/v3/beacon-chain/core/validators",
visibility = [
"//beacon-chain:__subpackages__",
"//testing/endtoend:__subpackages__",
"//testing/spectest:__subpackages__",
],
deps = [

View File

@@ -178,7 +178,6 @@ func SlashValidator(
if err != nil {
return nil, errors.Wrap(err, "could not get proposer idx")
}
// In phase 0, the proposer is the whistleblower.
whistleBlowerIdx := proposerIdx
whistleblowerReward := validator.EffectiveBalance / params.BeaconConfig().WhistleBlowerRewardQuotient
proposerReward := whistleblowerReward / proposerRewardQuotient

View File

@@ -20,16 +20,16 @@ import (
// ReadOnlyDatabase defines a struct which only has read access to database methods.
type ReadOnlyDatabase interface {
// Block related methods.
Block(ctx context.Context, blockRoot [32]byte) (interfaces.SignedBeaconBlock, error)
Blocks(ctx context.Context, f *filters.QueryFilter) ([]interfaces.SignedBeaconBlock, [][32]byte, error)
Block(ctx context.Context, blockRoot [32]byte) (interfaces.ReadOnlySignedBeaconBlock, error)
Blocks(ctx context.Context, f *filters.QueryFilter) ([]interfaces.ReadOnlySignedBeaconBlock, [][32]byte, error)
BlockRoots(ctx context.Context, f *filters.QueryFilter) ([][32]byte, error)
BlocksBySlot(ctx context.Context, slot primitives.Slot) ([]interfaces.SignedBeaconBlock, error)
BlocksBySlot(ctx context.Context, slot primitives.Slot) ([]interfaces.ReadOnlySignedBeaconBlock, error)
BlockRootsBySlot(ctx context.Context, slot primitives.Slot) (bool, [][32]byte, error)
HasBlock(ctx context.Context, blockRoot [32]byte) bool
GenesisBlock(ctx context.Context) (interfaces.SignedBeaconBlock, error)
GenesisBlock(ctx context.Context) (interfaces.ReadOnlySignedBeaconBlock, error)
GenesisBlockRoot(ctx context.Context) ([32]byte, error)
IsFinalizedBlock(ctx context.Context, blockRoot [32]byte) bool
FinalizedChildBlock(ctx context.Context, blockRoot [32]byte) (interfaces.SignedBeaconBlock, error)
FinalizedChildBlock(ctx context.Context, blockRoot [32]byte) (interfaces.ReadOnlySignedBeaconBlock, error)
HighestRootsBelowSlot(ctx context.Context, slot primitives.Slot) (primitives.Slot, [][32]byte, error)
// State related methods.
State(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error)
@@ -65,8 +65,8 @@ type NoHeadAccessDatabase interface {
// Block related methods.
DeleteBlock(ctx context.Context, root [32]byte) error
SaveBlock(ctx context.Context, block interfaces.SignedBeaconBlock) error
SaveBlocks(ctx context.Context, blocks []interfaces.SignedBeaconBlock) error
SaveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock) error
SaveBlocks(ctx context.Context, blocks []interfaces.ReadOnlySignedBeaconBlock) error
SaveGenesisBlockRoot(ctx context.Context, blockRoot [32]byte) error
// State related methods.
SaveState(ctx context.Context, state state.ReadOnlyBeaconState, blockRoot [32]byte) error
@@ -97,7 +97,7 @@ type HeadAccessDatabase interface {
NoHeadAccessDatabase
// Block related methods.
HeadBlock(ctx context.Context) (interfaces.SignedBeaconBlock, error)
HeadBlock(ctx context.Context) (interfaces.ReadOnlySignedBeaconBlock, error)
SaveHeadBlockRoot(ctx context.Context, blockRoot [32]byte) error
// Genesis operations.

View File

@@ -18,7 +18,6 @@ go_library(
"log.go",
"migration.go",
"migration_archived_index.go",
"migration_blinded_beacon_blocks.go",
"migration_block_slot_index.go",
"migration_state_validators.go",
"schema.go",

View File

@@ -10,7 +10,6 @@ import (
"github.com/pkg/errors"
ssz "github.com/prysmaticlabs/fastssz"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/db/filters"
"github.com/prysmaticlabs/prysm/v3/config/features"
"github.com/prysmaticlabs/prysm/v3/config/params"
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
@@ -28,14 +27,14 @@ import (
var errInvalidSlotRange = errors.New("invalid end slot and start slot provided")
// Block retrieval by root.
func (s *Store) Block(ctx context.Context, blockRoot [32]byte) (interfaces.SignedBeaconBlock, error) {
func (s *Store) Block(ctx context.Context, blockRoot [32]byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.Block")
defer span.End()
// Return block from cache if it exists.
if v, ok := s.blockCache.Get(string(blockRoot[:])); v != nil && ok {
return v.(interfaces.SignedBeaconBlock), nil
return v.(interfaces.ReadOnlySignedBeaconBlock), nil
}
var blk interfaces.SignedBeaconBlock
var blk interfaces.ReadOnlySignedBeaconBlock
err := s.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(blocksBucket)
enc := bkt.Get(blockRoot[:])
@@ -91,10 +90,10 @@ func (s *Store) BackfillBlockRoot(ctx context.Context) ([32]byte, error) {
}
// HeadBlock returns the latest canonical block in the Ethereum Beacon Chain.
func (s *Store) HeadBlock(ctx context.Context) (interfaces.SignedBeaconBlock, error) {
func (s *Store) HeadBlock(ctx context.Context) (interfaces.ReadOnlySignedBeaconBlock, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.HeadBlock")
defer span.End()
var headBlock interfaces.SignedBeaconBlock
var headBlock interfaces.ReadOnlySignedBeaconBlock
err := s.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(blocksBucket)
headRoot := bkt.Get(headBlockRootKey)
@@ -113,10 +112,10 @@ func (s *Store) HeadBlock(ctx context.Context) (interfaces.SignedBeaconBlock, er
}
// Blocks retrieves a list of beacon blocks and its respective roots by filter criteria.
func (s *Store) Blocks(ctx context.Context, f *filters.QueryFilter) ([]interfaces.SignedBeaconBlock, [][32]byte, error) {
func (s *Store) Blocks(ctx context.Context, f *filters.QueryFilter) ([]interfaces.ReadOnlySignedBeaconBlock, [][32]byte, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.Blocks")
defer span.End()
blocks := make([]interfaces.SignedBeaconBlock, 0)
blocks := make([]interfaces.ReadOnlySignedBeaconBlock, 0)
blockRoots := make([][32]byte, 0)
err := s.db.View(func(tx *bolt.Tx) error {
@@ -186,11 +185,11 @@ func (s *Store) HasBlock(ctx context.Context, blockRoot [32]byte) bool {
}
// BlocksBySlot retrieves a list of beacon blocks and its respective roots by slot.
func (s *Store) BlocksBySlot(ctx context.Context, slot primitives.Slot) ([]interfaces.SignedBeaconBlock, error) {
func (s *Store) BlocksBySlot(ctx context.Context, slot primitives.Slot) ([]interfaces.ReadOnlySignedBeaconBlock, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.BlocksBySlot")
defer span.End()
blocks := make([]interfaces.SignedBeaconBlock, 0)
blocks := make([]interfaces.ReadOnlySignedBeaconBlock, 0)
err := s.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(blocksBucket)
roots, err := blockRootsBySlot(ctx, tx, slot)
@@ -259,7 +258,7 @@ func (s *Store) DeleteBlock(ctx context.Context, root [32]byte) error {
}
// SaveBlock to the db.
func (s *Store) SaveBlock(ctx context.Context, signed interfaces.SignedBeaconBlock) error {
func (s *Store) SaveBlock(ctx context.Context, signed interfaces.ReadOnlySignedBeaconBlock) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveBlock")
defer span.End()
blockRoot, err := signed.Block().HashTreeRoot()
@@ -269,11 +268,27 @@ func (s *Store) SaveBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
if v, ok := s.blockCache.Get(string(blockRoot[:])); v != nil && ok {
return nil
}
return s.SaveBlocks(ctx, []interfaces.SignedBeaconBlock{signed})
return s.SaveBlocks(ctx, []interfaces.ReadOnlySignedBeaconBlock{signed})
}
// This function determines if we should save beacon blocks in the DB in blinded format by checking
// if a `saveBlindedBeaconBlocks` key exists in the database. Otherwise, we check if the last
// blocked stored to check if it is blinded, and then write that `saveBlindedBeaconBlocks` key
// to the DB for future checks.
func (s *Store) shouldSaveBlinded(ctx context.Context) (bool, error) {
var saveBlinded bool
if err := s.db.View(func(tx *bolt.Tx) error {
metadataBkt := tx.Bucket(chainMetadataBucket)
saveBlinded = len(metadataBkt.Get(saveBlindedBeaconBlocksKey)) > 0
return nil
}); err != nil {
return false, err
}
return saveBlinded, nil
}
// SaveBlocks via bulk updates to the db.
func (s *Store) SaveBlocks(ctx context.Context, blks []interfaces.SignedBeaconBlock) error {
func (s *Store) SaveBlocks(ctx context.Context, blks []interfaces.ReadOnlySignedBeaconBlock) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveBlocks")
defer span.End()
@@ -287,7 +302,7 @@ func (s *Store) SaveBlocks(ctx context.Context, blks []interfaces.SignedBeaconBl
if err != nil {
return err
}
enc, err := marshalBlock(ctx, blk)
enc, err := s.marshalBlock(ctx, blk)
if err != nil {
return err
}
@@ -296,6 +311,10 @@ func (s *Store) SaveBlocks(ctx context.Context, blks []interfaces.SignedBeaconBl
indicesByBucket := createBlockIndicesFromBlock(ctx, blk.Block())
indicesForBlocks[i] = indicesByBucket
}
saveBlinded, err := s.shouldSaveBlinded(ctx)
if err != nil {
return err
}
return s.db.Update(func(tx *bolt.Tx) error {
bkt := tx.Bucket(blocksBucket)
for i, blk := range blks {
@@ -305,7 +324,7 @@ func (s *Store) SaveBlocks(ctx context.Context, blks []interfaces.SignedBeaconBl
if err := updateValueForIndices(ctx, indicesForBlocks[i], blockRoots[i], tx); err != nil {
return errors.Wrap(err, "could not update DB indices")
}
if features.Get().EnableOnlyBlindedBeaconBlocks {
if saveBlinded {
blindedBlock, err := blk.ToBlinded()
if err != nil {
if !errors.Is(err, blocks.ErrUnsupportedVersion) {
@@ -341,10 +360,10 @@ func (s *Store) SaveHeadBlockRoot(ctx context.Context, blockRoot [32]byte) error
}
// GenesisBlock retrieves the genesis block of the beacon chain.
func (s *Store) GenesisBlock(ctx context.Context) (interfaces.SignedBeaconBlock, error) {
func (s *Store) GenesisBlock(ctx context.Context) (interfaces.ReadOnlySignedBeaconBlock, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.GenesisBlock")
defer span.End()
var blk interfaces.SignedBeaconBlock
var blk interfaces.ReadOnlySignedBeaconBlock
err := s.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(blocksBucket)
root := bkt.Get(genesisBlockRootKey)
@@ -710,7 +729,7 @@ func blockRootsBySlot(ctx context.Context, tx *bolt.Tx, slot primitives.Slot) ([
// createBlockIndicesFromBlock takes in a beacon block and returns
// a map of bolt DB index buckets corresponding to each particular key for indices for
// data, such as (shard indices bucket -> shard 5).
func createBlockIndicesFromBlock(ctx context.Context, block interfaces.BeaconBlock) map[string][]byte {
func createBlockIndicesFromBlock(ctx context.Context, block interfaces.ReadOnlyBeaconBlock) map[string][]byte {
ctx, span := trace.StartSpan(ctx, "BeaconDB.createBlockIndicesFromBlock")
defer span.End()
indicesByBucket := make(map[string][]byte)
@@ -765,7 +784,7 @@ func createBlockIndicesFromFilters(ctx context.Context, f *filters.QueryFilter)
}
// unmarshal block from marshaled proto beacon block bytes to versioned beacon block struct type.
func unmarshalBlock(_ context.Context, enc []byte) (interfaces.SignedBeaconBlock, error) {
func unmarshalBlock(_ context.Context, enc []byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
var err error
enc, err = snappy.Decode(nil, enc)
if err != nil {
@@ -809,50 +828,71 @@ func unmarshalBlock(_ context.Context, enc []byte) (interfaces.SignedBeaconBlock
return blocks.NewSignedBeaconBlock(rawBlock)
}
// marshal versioned beacon block from struct type down to bytes.
func marshalBlock(_ context.Context, blk interfaces.SignedBeaconBlock) ([]byte, error) {
func (s *Store) marshalBlock(
ctx context.Context,
blk interfaces.ReadOnlySignedBeaconBlock,
) ([]byte, error) {
shouldBlind, err := s.shouldSaveBlinded(ctx)
if err != nil {
return nil, err
}
if shouldBlind {
return marshalBlockBlinded(ctx, blk)
}
return marshalBlockFull(ctx, blk)
}
// Encodes a full beacon block to the DB with its associated key.
func marshalBlockFull(
_ context.Context,
blk interfaces.ReadOnlySignedBeaconBlock,
) ([]byte, error) {
var encodedBlock []byte
var err error
blockToSave := blk
if features.Get().EnableOnlyBlindedBeaconBlocks {
blindedBlock, err := blk.ToBlinded()
switch {
case errors.Is(err, blocks.ErrUnsupportedVersion):
encodedBlock, err = blk.MarshalSSZ()
if err != nil {
return nil, errors.Wrap(err, "could not marshal non-blinded block")
}
case err != nil:
return nil, errors.Wrap(err, "could not convert block to blinded format")
default:
encodedBlock, err = blindedBlock.MarshalSSZ()
if err != nil {
return nil, errors.Wrap(err, "could not marshal blinded block")
}
blockToSave = blindedBlock
}
} else {
encodedBlock, err = blk.MarshalSSZ()
if err != nil {
return nil, err
}
encodedBlock, err = blk.MarshalSSZ()
if err != nil {
return nil, err
}
switch blockToSave.Version() {
switch blk.Version() {
case version.Capella:
if blockToSave.IsBlinded() {
return snappy.Encode(nil, append(capellaBlindKey, encodedBlock...)), nil
}
return snappy.Encode(nil, append(capellaKey, encodedBlock...)), nil
case version.Bellatrix:
if blockToSave.IsBlinded() {
return snappy.Encode(nil, append(bellatrixBlindKey, encodedBlock...)), nil
}
return snappy.Encode(nil, append(bellatrixKey, encodedBlock...)), nil
case version.Altair:
return snappy.Encode(nil, append(altairKey, encodedBlock...)), nil
case version.Phase0:
return snappy.Encode(nil, encodedBlock), nil
default:
return nil, errors.New("Unknown block version")
return nil, errors.New("unknown block version")
}
}
// Encodes a blinded beacon block with its associated key.
// If the block does not support blinding, we then encode it as a full
// block with its associated key by calling marshalBlockFull.
func marshalBlockBlinded(
ctx context.Context,
blk interfaces.ReadOnlySignedBeaconBlock,
) ([]byte, error) {
blindedBlock, err := blk.ToBlinded()
if err != nil {
switch {
case errors.Is(err, blocks.ErrUnsupportedVersion):
return marshalBlockFull(ctx, blk)
default:
return nil, errors.Wrap(err, "could not convert block to blinded format")
}
}
encodedBlock, err := blindedBlock.MarshalSSZ()
if err != nil {
return nil, errors.Wrap(err, "could not marshal blinded block")
}
switch blk.Version() {
case version.Capella:
return snappy.Encode(nil, append(capellaBlindKey, encodedBlock...)), nil
case version.Bellatrix:
return snappy.Encode(nil, append(bellatrixBlindKey, encodedBlock...)), nil
default:
return nil, fmt.Errorf("unsupported block version: %v", blk.Version())
}
}

View File

@@ -21,11 +21,11 @@ import (
var blockTests = []struct {
name string
newBlock func(primitives.Slot, []byte) (interfaces.SignedBeaconBlock, error)
newBlock func(primitives.Slot, []byte) (interfaces.ReadOnlySignedBeaconBlock, error)
}{
{
name: "phase0",
newBlock: func(slot primitives.Slot, root []byte) (interfaces.SignedBeaconBlock, error) {
newBlock: func(slot primitives.Slot, root []byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
b := util.NewBeaconBlock()
b.Block.Slot = slot
if root != nil {
@@ -36,7 +36,7 @@ var blockTests = []struct {
},
{
name: "altair",
newBlock: func(slot primitives.Slot, root []byte) (interfaces.SignedBeaconBlock, error) {
newBlock: func(slot primitives.Slot, root []byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
b := util.NewBeaconBlockAltair()
b.Block.Slot = slot
if root != nil {
@@ -47,7 +47,7 @@ var blockTests = []struct {
},
{
name: "bellatrix",
newBlock: func(slot primitives.Slot, root []byte) (interfaces.SignedBeaconBlock, error) {
newBlock: func(slot primitives.Slot, root []byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
b := util.NewBeaconBlockBellatrix()
b.Block.Slot = slot
if root != nil {
@@ -58,7 +58,7 @@ var blockTests = []struct {
},
{
name: "bellatrix blind",
newBlock: func(slot primitives.Slot, root []byte) (interfaces.SignedBeaconBlock, error) {
newBlock: func(slot primitives.Slot, root []byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
b := util.NewBlindedBeaconBlockBellatrix()
b.Block.Slot = slot
if root != nil {
@@ -69,7 +69,7 @@ var blockTests = []struct {
},
{
name: "capella",
newBlock: func(slot primitives.Slot, root []byte) (interfaces.SignedBeaconBlock, error) {
newBlock: func(slot primitives.Slot, root []byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
b := util.NewBeaconBlockCapella()
b.Block.Slot = slot
if root != nil {
@@ -80,7 +80,7 @@ var blockTests = []struct {
},
{
name: "capella blind",
newBlock: func(slot primitives.Slot, root []byte) (interfaces.SignedBeaconBlock, error) {
newBlock: func(slot primitives.Slot, root []byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
b := util.NewBlindedBeaconBlockCapella()
b.Block.Slot = slot
if root != nil {
@@ -181,7 +181,7 @@ func TestStore_BlocksHandleZeroCase(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
numBlocks := 10
totalBlocks := make([]interfaces.SignedBeaconBlock, numBlocks)
totalBlocks := make([]interfaces.ReadOnlySignedBeaconBlock, numBlocks)
for i := 0; i < len(totalBlocks); i++ {
b, err := tt.newBlock(primitives.Slot(i), bytesutil.PadTo([]byte("parent"), 32))
require.NoError(t, err)
@@ -204,7 +204,7 @@ func TestStore_BlocksHandleInvalidEndSlot(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
numBlocks := 10
totalBlocks := make([]interfaces.SignedBeaconBlock, numBlocks)
totalBlocks := make([]interfaces.ReadOnlySignedBeaconBlock, numBlocks)
// Save blocks from slot 1 onwards.
for i := 0; i < len(totalBlocks); i++ {
b, err := tt.newBlock(primitives.Slot(i+1), bytesutil.PadTo([]byte("parent"), 32))
@@ -381,7 +381,7 @@ func TestStore_Blocks_FiltersCorrectly(t *testing.T) {
require.NoError(t, err)
b8, err := tt.newBlock(primitives.Slot(8), bytesutil.PadTo([]byte("parent4"), 32))
require.NoError(t, err)
blocks := []interfaces.SignedBeaconBlock{
blocks := []interfaces.ReadOnlySignedBeaconBlock{
b4,
b5,
b6,
@@ -485,7 +485,7 @@ func TestStore_Blocks_Retrieve_SlotRange(t *testing.T) {
for _, tt := range blockTests {
t.Run(tt.name, func(t *testing.T) {
db := setupDB(t)
totalBlocks := make([]interfaces.SignedBeaconBlock, 500)
totalBlocks := make([]interfaces.ReadOnlySignedBeaconBlock, 500)
for i := 0; i < 500; i++ {
b, err := tt.newBlock(primitives.Slot(i), bytesutil.PadTo([]byte("parent"), 32))
require.NoError(t, err)
@@ -505,7 +505,7 @@ func TestStore_Blocks_Retrieve_Epoch(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
db := setupDB(t)
slots := params.BeaconConfig().SlotsPerEpoch.Mul(7)
totalBlocks := make([]interfaces.SignedBeaconBlock, slots)
totalBlocks := make([]interfaces.ReadOnlySignedBeaconBlock, slots)
for i := primitives.Slot(0); i < slots; i++ {
b, err := tt.newBlock(i, bytesutil.PadTo([]byte("parent"), 32))
require.NoError(t, err)
@@ -529,7 +529,7 @@ func TestStore_Blocks_Retrieve_SlotRangeWithStep(t *testing.T) {
for _, tt := range blockTests {
t.Run(tt.name, func(t *testing.T) {
db := setupDB(t)
totalBlocks := make([]interfaces.SignedBeaconBlock, 500)
totalBlocks := make([]interfaces.ReadOnlySignedBeaconBlock, 500)
for i := 0; i < 500; i++ {
b, err := tt.newBlock(primitives.Slot(i), bytesutil.PadTo([]byte("parent"), 32))
require.NoError(t, err)
@@ -722,7 +722,7 @@ func TestStore_SaveBlocks_HasCachedBlocks(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
b := make([]interfaces.SignedBeaconBlock, 500)
b := make([]interfaces.ReadOnlySignedBeaconBlock, 500)
for i := 0; i < 500; i++ {
blk, err := tt.newBlock(primitives.Slot(i), bytesutil.PadTo([]byte("parent"), 32))
require.NoError(t, err)
@@ -746,7 +746,7 @@ func TestStore_SaveBlocks_HasRootsMatched(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
b := make([]interfaces.SignedBeaconBlock, 500)
b := make([]interfaces.ReadOnlySignedBeaconBlock, 500)
for i := 0; i < 500; i++ {
blk, err := tt.newBlock(primitives.Slot(i), bytesutil.PadTo([]byte("parent"), 32))
require.NoError(t, err)

View File

@@ -190,11 +190,11 @@ func (s *Store) IsFinalizedBlock(ctx context.Context, blockRoot [32]byte) bool {
// FinalizedChildBlock returns the child block of a provided finalized block. If
// no finalized block or its respective child block exists we return with a nil
// block.
func (s *Store) FinalizedChildBlock(ctx context.Context, blockRoot [32]byte) (interfaces.SignedBeaconBlock, error) {
func (s *Store) FinalizedChildBlock(ctx context.Context, blockRoot [32]byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.FinalizedChildBlock")
defer span.End()
var blk interfaces.SignedBeaconBlock
var blk interfaces.ReadOnlySignedBeaconBlock
err := s.db.View(func(tx *bolt.Tx) error {
blkBytes := tx.Bucket(finalizedBlockRootsIndexBucket).Get(blockRoot[:])
if blkBytes == nil {

View File

@@ -142,7 +142,7 @@ func TestStore_IsFinalizedChildBlock(t *testing.T) {
slotsPerEpoch := uint64(params.BeaconConfig().SlotsPerEpoch)
ctx := context.Background()
eval := func(t testing.TB, ctx context.Context, db *Store, blks []interfaces.SignedBeaconBlock) {
eval := func(t testing.TB, ctx context.Context, db *Store, blks []interfaces.ReadOnlySignedBeaconBlock) {
require.NoError(t, db.SaveBlocks(ctx, blks))
root, err := blks[slotsPerEpoch].Block().HashTreeRoot()
require.NoError(t, err)
@@ -193,15 +193,15 @@ func TestStore_IsFinalizedChildBlock(t *testing.T) {
})
}
func sszRootOrDie(t *testing.T, block interfaces.SignedBeaconBlock) []byte {
func sszRootOrDie(t *testing.T, block interfaces.ReadOnlySignedBeaconBlock) []byte {
root, err := block.Block().HashTreeRoot()
require.NoError(t, err)
return root[:]
}
func makeBlocks(t *testing.T, i, n uint64, previousRoot [32]byte) []interfaces.SignedBeaconBlock {
func makeBlocks(t *testing.T, i, n uint64, previousRoot [32]byte) []interfaces.ReadOnlySignedBeaconBlock {
blocks := make([]*ethpb.SignedBeaconBlock, n)
ifaceBlocks := make([]interfaces.SignedBeaconBlock, n)
ifaceBlocks := make([]interfaces.ReadOnlySignedBeaconBlock, n)
for j := i; j < n+i; j++ {
parentRoot := make([]byte, fieldparams.RootLength)
copy(parentRoot, previousRoot[:])
@@ -217,9 +217,9 @@ func makeBlocks(t *testing.T, i, n uint64, previousRoot [32]byte) []interfaces.S
return ifaceBlocks
}
func makeBlocksAltair(t *testing.T, startIdx, num uint64, previousRoot [32]byte) []interfaces.SignedBeaconBlock {
func makeBlocksAltair(t *testing.T, startIdx, num uint64, previousRoot [32]byte) []interfaces.ReadOnlySignedBeaconBlock {
blocks := make([]*ethpb.SignedBeaconBlockAltair, num)
ifaceBlocks := make([]interfaces.SignedBeaconBlock, num)
ifaceBlocks := make([]interfaces.ReadOnlySignedBeaconBlock, num)
for j := startIdx; j < num+startIdx; j++ {
parentRoot := make([]byte, fieldparams.RootLength)
copy(parentRoot, previousRoot[:])

View File

@@ -1,7 +1,6 @@
package kv
import (
"github.com/prysmaticlabs/prysm/v3/config/features"
"github.com/prysmaticlabs/prysm/v3/config/params"
)
@@ -10,7 +9,4 @@ func init() {
if err := params.SetActive(params.MainnetTestConfig()); err != nil {
panic(err)
}
features.Init(&features.Flags{
EnableOnlyBlindedBeaconBlocks: true,
})
}

View File

@@ -17,6 +17,7 @@ import (
"github.com/prysmaticlabs/prysm/v3/beacon-chain/db/iface"
"github.com/prysmaticlabs/prysm/v3/config/features"
"github.com/prysmaticlabs/prysm/v3/config/params"
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v3/io/file"
bolt "go.etcd.io/bbolt"
)
@@ -194,7 +195,8 @@ func NewKVStore(ctx context.Context, dirPath string) (*Store, error) {
if err = prometheus.Register(createBoltCollector(kv.db)); err != nil {
return nil, err
}
if err = kv.checkNeedsResync(); err != nil {
// Setup the type of block storage used depending on whether or not this is a fresh database.
if err := kv.setupBlockStorageType(ctx); err != nil {
return nil, err
}
return kv, nil
@@ -229,21 +231,60 @@ func (s *Store) DatabasePath() string {
return s.databasePath
}
func (s *Store) checkNeedsResync() error {
return s.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(migrationsBucket)
hasDisabledFeature := !features.Get().EnableOnlyBlindedBeaconBlocks
if hasDisabledFeature && bkt.Get(migrationBlindedBeaconBlocksKey) != nil {
return fmt.Errorf(
"you have disabled the flag %s, and your node must resync to ensure your "+
"database is compatible. If you do not want to resync, please re-enable the %s flag",
features.EnableOnlyBlindedBeaconBlocks.Name,
features.EnableOnlyBlindedBeaconBlocks.Name,
)
func (s *Store) setupBlockStorageType(ctx context.Context) error {
// We check if we want to save blinded beacon blocks by checking a key in the db
// otherwise, we check the last stored block and set that key in the DB if it is blinded.
headBlock, err := s.HeadBlock(ctx)
if err != nil {
return errors.Wrap(err, "could not get head block when setting up block storage type")
}
err = blocks.BeaconBlockIsNil(headBlock)
isNilBlk := err != nil
saveFull := features.Get().SaveFullExecutionPayloads
var saveBlinded bool
if err := s.db.Update(func(tx *bolt.Tx) error {
// If we have a key stating we wish to save blinded beacon blocks, then we set saveBlinded to true.
metadataBkt := tx.Bucket(chainMetadataBucket)
keyExists := len(metadataBkt.Get(saveBlindedBeaconBlocksKey)) > 0
if keyExists {
saveBlinded = true
return nil
}
// If the head block exists and is blinded, we update the key in the DB to
// say we wish to save all blocks as blinded.
if !isNilBlk && headBlock.IsBlinded() {
if err := metadataBkt.Put(saveBlindedBeaconBlocksKey, []byte{1}); err != nil {
return err
}
saveBlinded = true
}
if isNilBlk && !saveFull {
if err := metadataBkt.Put(saveBlindedBeaconBlocksKey, []byte{1}); err != nil {
return err
}
saveBlinded = true
}
return nil
})
}); err != nil {
return err
}
// If the user wants to save full execution payloads but their database is saving blinded blocks only,
// we then throw an error as the node should not start.
if saveFull && saveBlinded {
return fmt.Errorf(
"cannot use the %s flag with this existing database, as it has already been initialized to only store "+
"execution payload headers (aka blinded beacon blocks). If you want to use this flag, you must re-sync your node with a fresh "+
"database. We recommend using checkpoint sync https://docs.prylabs.network/docs/prysm-usage/checkpoint-sync/",
features.SaveFullExecutionPayloads.Name,
)
}
if saveFull {
log.Warn("Saving full beacon blocks to the database. For greater disk space savings, we recommend resyncing from an empty database with " +
"checkpoint sync to save only blinded beacon blocks by default")
}
return nil
}
func createBuckets(tx *bolt.Tx, buckets ...[]byte) error {

View File

@@ -2,10 +2,14 @@ package kv
import (
"context"
"fmt"
"testing"
"github.com/prysmaticlabs/prysm/v3/config/features"
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/testing/require"
"github.com/prysmaticlabs/prysm/v3/testing/util"
bolt "go.etcd.io/bbolt"
)
@@ -19,16 +23,169 @@ func setupDB(t testing.TB) *Store {
return db
}
func Test_checkNeedsResync(t *testing.T) {
store := setupDB(t)
resetFn := features.InitWithReset(&features.Flags{
EnableOnlyBlindedBeaconBlocks: false,
func Test_setupBlockStorageType(t *testing.T) {
ctx := context.Background()
t.Run("fresh database with feature enabled to store full blocks should store full blocks", func(t *testing.T) {
resetFn := features.InitWithReset(&features.Flags{
SaveFullExecutionPayloads: true,
})
defer resetFn()
store := setupDB(t)
blk := util.NewBeaconBlockBellatrix()
blk.Block.Body.ExecutionPayload.BlockNumber = 1
wrappedBlock, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
root, err := wrappedBlock.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, store.SaveBlock(ctx, wrappedBlock))
require.NoError(t, store.SaveStateSummary(ctx, &ethpb.StateSummary{Root: root[:]}))
require.NoError(t, store.SaveHeadBlockRoot(ctx, root))
retrievedBlk, err := store.Block(ctx, root)
require.NoError(t, err)
require.Equal(t, false, retrievedBlk.IsBlinded())
require.DeepEqual(t, wrappedBlock, retrievedBlk)
})
t.Run("fresh database with default settings should store blinded", func(t *testing.T) {
resetFn := features.InitWithReset(&features.Flags{
SaveFullExecutionPayloads: false,
})
defer resetFn()
store := setupDB(t)
blk := util.NewBeaconBlockBellatrix()
blk.Block.Body.ExecutionPayload.BlockNumber = 1
wrappedBlock, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
root, err := wrappedBlock.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, store.SaveBlock(ctx, wrappedBlock))
require.NoError(t, store.SaveStateSummary(ctx, &ethpb.StateSummary{Root: root[:]}))
require.NoError(t, store.SaveHeadBlockRoot(ctx, root))
retrievedBlk, err := store.Block(ctx, root)
require.NoError(t, err)
require.Equal(t, true, retrievedBlk.IsBlinded())
wantedBlk, err := wrappedBlock.ToBlinded()
require.NoError(t, err)
require.DeepEqual(t, wantedBlk, retrievedBlk)
})
t.Run("existing database with blinded blocks but no key in metadata bucket should continue storing blinded blocks", func(t *testing.T) {
store := setupDB(t)
require.NoError(t, store.db.Update(func(tx *bolt.Tx) error {
return tx.Bucket(chainMetadataBucket).Put(saveBlindedBeaconBlocksKey, []byte{1})
}))
blk := util.NewBlindedBeaconBlockBellatrix()
blk.Block.Body.ExecutionPayloadHeader.BlockNumber = 1
wrappedBlock, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
root, err := wrappedBlock.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, store.SaveBlock(ctx, wrappedBlock))
require.NoError(t, store.SaveStateSummary(ctx, &ethpb.StateSummary{Root: root[:]}))
require.NoError(t, store.SaveHeadBlockRoot(ctx, root))
retrievedBlk, err := store.Block(ctx, root)
require.NoError(t, err)
require.Equal(t, true, retrievedBlk.IsBlinded())
require.DeepEqual(t, wrappedBlock, retrievedBlk)
// We then delete the key from the bucket.
require.NoError(t, store.db.Update(func(tx *bolt.Tx) error {
return tx.Bucket(chainMetadataBucket).Delete(saveBlindedBeaconBlocksKey)
}))
// Not a fresh database, has blinded blocks already and should continue being that way.
err = store.setupBlockStorageType(ctx)
require.NoError(t, err)
var shouldSaveBlinded bool
require.NoError(t, store.db.Update(func(tx *bolt.Tx) error {
bkt := tx.Bucket(chainMetadataBucket)
shouldSaveBlinded = len(bkt.Get(saveBlindedBeaconBlocksKey)) > 0
return nil
}))
// Should have set the chain metadata bucket to save blinded
require.Equal(t, true, shouldSaveBlinded)
blkFull := util.NewBeaconBlockBellatrix()
blkFull.Block.Body.ExecutionPayload.BlockNumber = 2
wrappedBlock, err = blocks.NewSignedBeaconBlock(blkFull)
require.NoError(t, err)
root, err = wrappedBlock.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, store.SaveBlock(ctx, wrappedBlock))
retrievedBlk, err = store.Block(ctx, root)
require.NoError(t, err)
require.Equal(t, true, retrievedBlk.IsBlinded())
wrappedBlinded, err := wrappedBlock.ToBlinded()
require.NoError(t, err)
require.DeepEqual(t, wrappedBlinded, retrievedBlk)
})
t.Run("existing database with full blocks type should continue storing full blocks", func(t *testing.T) {
store := setupDB(t)
require.NoError(t, store.db.Update(func(tx *bolt.Tx) error {
return tx.Bucket(chainMetadataBucket).Delete(saveBlindedBeaconBlocksKey)
}))
blk := util.NewBeaconBlockBellatrix()
blk.Block.Body.ExecutionPayload.BlockNumber = 1
wrappedBlock, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
root, err := wrappedBlock.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, store.SaveBlock(ctx, wrappedBlock))
require.NoError(t, store.SaveStateSummary(ctx, &ethpb.StateSummary{Root: root[:]}))
require.NoError(t, store.SaveHeadBlockRoot(ctx, root))
retrievedBlk, err := store.Block(ctx, root)
require.NoError(t, err)
require.Equal(t, false, retrievedBlk.IsBlinded())
require.DeepEqual(t, wrappedBlock, retrievedBlk)
// Not a fresh database, has full blocks already and should continue being that way.
err = store.setupBlockStorageType(ctx)
require.NoError(t, err)
blk = util.NewBeaconBlockBellatrix()
blk.Block.Body.ExecutionPayload.BlockNumber = 2
wrappedBlock, err = blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
root, err = wrappedBlock.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, store.SaveBlock(ctx, wrappedBlock))
retrievedBlk, err = store.Block(ctx, root)
require.NoError(t, err)
require.Equal(t, false, retrievedBlk.IsBlinded())
require.DeepEqual(t, wrappedBlock, retrievedBlk)
})
t.Run("existing database with blinded blocks type should error if user enables full blocks feature flag", func(t *testing.T) {
store := setupDB(t)
blk := util.NewBeaconBlockBellatrix()
blk.Block.Body.ExecutionPayload.BlockNumber = 1
wrappedBlock, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
root, err := wrappedBlock.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, store.SaveBlock(ctx, wrappedBlock))
require.NoError(t, store.SaveStateSummary(ctx, &ethpb.StateSummary{Root: root[:]}))
require.NoError(t, store.SaveHeadBlockRoot(ctx, root))
retrievedBlk, err := store.Block(ctx, root)
require.NoError(t, err)
require.Equal(t, true, retrievedBlk.IsBlinded())
wantedBlk, err := wrappedBlock.ToBlinded()
require.NoError(t, err)
require.DeepEqual(t, wantedBlk, retrievedBlk)
// Trying to enable full blocks with a database that is already storing blinded blocks should error.
resetFn := features.InitWithReset(&features.Flags{
SaveFullExecutionPayloads: true,
})
defer resetFn()
err = store.setupBlockStorageType(ctx)
errMsg := "cannot use the %s flag with this existing database, as it has already been initialized"
require.ErrorContains(t, fmt.Sprintf(errMsg, features.SaveFullExecutionPayloads.Name), err)
})
defer resetFn()
require.NoError(t, store.db.Update(func(tx *bolt.Tx) error {
bkt := tx.Bucket(migrationsBucket)
return bkt.Put(migrationBlindedBeaconBlocksKey, migrationCompleted)
}))
err := store.checkNeedsResync()
require.ErrorContains(t, "your node must resync", err)
}

View File

@@ -14,7 +14,6 @@ var migrations = []migration{
migrateArchivedIndex,
migrateBlockSlotIndex,
migrateStateValidators,
migrateBlindedBeaconBlocksEnabled,
}
// RunMigrations defined in the migrations array.

View File

@@ -1,27 +0,0 @@
package kv
import (
"bytes"
"context"
"github.com/prysmaticlabs/prysm/v3/config/features"
bolt "go.etcd.io/bbolt"
)
var migrationBlindedBeaconBlocksKey = []byte("blinded-beacon-blocks-enabled")
func migrateBlindedBeaconBlocksEnabled(ctx context.Context, db *bolt.DB) error {
if !features.Get().EnableOnlyBlindedBeaconBlocks {
return nil // Only write to the migrations bucket if the feature is enabled.
}
if updateErr := db.Update(func(tx *bolt.Tx) error {
mb := tx.Bucket(migrationsBucket)
if b := mb.Get(migrationBlindedBeaconBlocksKey); bytes.Equal(b, migrationCompleted) {
return nil // Migration already completed.
}
return mb.Put(migrationBlindedBeaconBlocksKey, migrationCompleted)
}); updateErr != nil {
return updateErr
}
return nil
}

View File

@@ -49,12 +49,12 @@ var (
// Below keys are used to identify objects are to be fork compatible.
// Objects that are only compatible with specific forks should be prefixed with such keys.
altairKey = []byte("altair")
bellatrixKey = []byte("merge")
bellatrixBlindKey = []byte("blind-bellatrix")
capellaKey = []byte("capella")
capellaBlindKey = []byte("blind-capella")
altairKey = []byte("altair")
bellatrixKey = []byte("merge")
bellatrixBlindKey = []byte("blind-bellatrix")
capellaKey = []byte("capella")
capellaBlindKey = []byte("blind-capella")
saveBlindedBeaconBlocksKey = []byte("save-blinded-beacon-blocks")
// block root included in the beacon state used by weak subjectivity initial sync
originCheckpointBlockRootKey = []byte("origin-checkpoint-block-root")
// block root tracking the progress of backfill, or pointing at genesis if backfill has not been initiated

View File

@@ -368,7 +368,7 @@ func TestStore_StatesBatchDelete(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
numBlocks := 100
totalBlocks := make([]interfaces.SignedBeaconBlock, numBlocks)
totalBlocks := make([]interfaces.ReadOnlySignedBeaconBlock, numBlocks)
blockRoots := make([][32]byte, 0)
evenBlockRoots := make([][32]byte, 0)
for i := 0; i < len(totalBlocks); i++ {

View File

@@ -129,9 +129,9 @@ go_test(
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//:go_default_library",
"@com_github_ethereum_go_ethereum//accounts/abi/bind/backends:go_default_library",
"@com_github_ethereum_go_ethereum//beacon/engine:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_ethereum_go_ethereum//core/beacon:go_default_library",
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
"@com_github_ethereum_go_ethereum//rpc:go_default_library",
"@com_github_holiman_uint256//:go_default_library",

View File

@@ -240,7 +240,7 @@ func TestService_logTtdStatus_NotSyncedClient(t *testing.T) {
ttd := new(uint256.Int)
reached, err := service.logTtdStatus(context.Background(), ttd.SetUint64(24343))
require.NoError(t, err)
require.ErrorContains(t, "missing required field 'parentHash' for Header", err)
require.Equal(t, false, reached)
}

View File

@@ -64,10 +64,10 @@ type ForkchoiceUpdatedResponse struct {
// to an execution client's engine API.
type ExecutionPayloadReconstructor interface {
ReconstructFullBlock(
ctx context.Context, blindedBlock interfaces.SignedBeaconBlock,
ctx context.Context, blindedBlock interfaces.ReadOnlySignedBeaconBlock,
) (interfaces.SignedBeaconBlock, error)
ReconstructFullBellatrixBlockBatch(
ctx context.Context, blindedBlocks []interfaces.SignedBeaconBlock,
ctx context.Context, blindedBlocks []interfaces.ReadOnlySignedBeaconBlock,
) ([]interfaces.SignedBeaconBlock, error)
}
@@ -211,12 +211,13 @@ func (s *Service) GetPayload(ctx context.Context, payloadId [8]byte, slot primit
defer cancel()
if slots.ToEpoch(slot) >= params.BeaconConfig().CapellaForkEpoch {
result := &pb.ExecutionPayloadCapella{}
result := &pb.ExecutionPayloadCapellaWithValue{}
err := s.rpcClient.CallContext(ctx, result, GetPayloadMethodV2, pb.PayloadIDBytes(payloadId))
if err != nil {
return nil, handleRPCError(err)
}
return blocks.WrappedExecutionPayloadCapella(result)
return blocks.WrappedExecutionPayloadCapella(result.Payload, big.NewInt(0).SetBytes(bytesutil.ReverseByteOrder(result.Value)))
}
result := &pb.ExecutionPayload{}
@@ -390,30 +391,27 @@ func (s *Service) ExecutionBlocksByHashes(ctx context.Context, hashes []common.H
numOfHashes := len(hashes)
elems := make([]gethRPC.BatchElem, 0, numOfHashes)
execBlks := make([]*pb.ExecutionBlock, 0, numOfHashes)
errs := make([]error, 0, numOfHashes)
if numOfHashes == 0 {
return execBlks, nil
}
for _, h := range hashes {
blk := &pb.ExecutionBlock{}
err := error(nil)
newH := h
elems = append(elems, gethRPC.BatchElem{
Method: ExecutionBlockByHashMethod,
Args: []interface{}{newH, withTxs},
Result: blk,
Error: err,
Error: error(nil),
})
execBlks = append(execBlks, blk)
errs = append(errs, err)
}
ioErr := s.rpcClient.BatchCall(elems)
if ioErr != nil {
return nil, ioErr
}
for _, e := range errs {
if e != nil {
return nil, handleRPCError(e)
for _, e := range elems {
if e.Error != nil {
return nil, handleRPCError(e.Error)
}
}
return execBlks, nil
@@ -442,7 +440,7 @@ func (s *Service) HeaderByNumber(ctx context.Context, number *big.Int) (*types.H
// ReconstructFullBlock takes in a blinded beacon block and reconstructs
// a beacon block with a full execution payload via the engine API.
func (s *Service) ReconstructFullBlock(
ctx context.Context, blindedBlock interfaces.SignedBeaconBlock,
ctx context.Context, blindedBlock interfaces.ReadOnlySignedBeaconBlock,
) (interfaces.SignedBeaconBlock, error) {
if err := blocks.BeaconBlockIsNil(blindedBlock); err != nil {
return nil, errors.Wrap(err, "cannot reconstruct bellatrix block from nil data")
@@ -493,7 +491,7 @@ func (s *Service) ReconstructFullBlock(
// ReconstructFullBellatrixBlockBatch takes in a batch of blinded beacon blocks and reconstructs
// them with a full execution payload for each block via the engine API.
func (s *Service) ReconstructFullBellatrixBlockBatch(
ctx context.Context, blindedBlocks []interfaces.SignedBeaconBlock,
ctx context.Context, blindedBlocks []interfaces.ReadOnlySignedBeaconBlock,
) ([]interfaces.SignedBeaconBlock, error) {
if len(blindedBlocks) == 0 {
return []interfaces.SignedBeaconBlock{}, nil
@@ -532,6 +530,7 @@ func (s *Service) ReconstructFullBellatrixBlockBatch(
// For each valid payload, we reconstruct the full block from it with the
// blinded block.
fullBlocks := make([]interfaces.SignedBeaconBlock, len(blindedBlocks))
for sliceIdx, realIdx := range validExecPayloads {
b := execBlocks[sliceIdx]
if b == nil {
@@ -549,7 +548,7 @@ func (s *Service) ReconstructFullBellatrixBlockBatch(
if err != nil {
return nil, err
}
blindedBlocks[realIdx] = fullBlock
fullBlocks[realIdx] = fullBlock
}
// For blocks that are pre-merge we simply reconstruct them via an empty
// execution payload.
@@ -559,10 +558,10 @@ func (s *Service) ReconstructFullBellatrixBlockBatch(
if err != nil {
return nil, err
}
blindedBlocks[realIdx] = fullBlock
fullBlocks[realIdx] = fullBlock
}
reconstructedExecutionPayloadCount.Add(float64(len(blindedBlocks)))
return blindedBlocks, nil
return fullBlocks, nil
}
func fullPayloadFromExecutionBlock(
@@ -623,7 +622,7 @@ func fullPayloadFromExecutionBlock(
BlockHash: blockHash[:],
Transactions: txs,
Withdrawals: block.Withdrawals,
})
}, big.NewInt(0)) // We can't get the block value and don't care about the block value for this instance
}
// Handles errors received from the RPC server according to the specification.

View File

@@ -11,9 +11,9 @@ import (
"reflect"
"testing"
"github.com/ethereum/go-ethereum/beacon/engine"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/beacon"
"github.com/ethereum/go-ethereum/core/types"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/execution"
@@ -23,10 +23,10 @@ import (
func FuzzForkChoiceResponse(f *testing.F) {
valHash := common.Hash([32]byte{0xFF, 0x01})
payloadID := beacon.PayloadID([8]byte{0x01, 0xFF, 0xAA, 0x00, 0xEE, 0xFE, 0x00, 0x00})
payloadID := engine.PayloadID([8]byte{0x01, 0xFF, 0xAA, 0x00, 0xEE, 0xFE, 0x00, 0x00})
valErr := "asjajshjahsaj"
seed := &beacon.ForkChoiceResponse{
PayloadStatus: beacon.PayloadStatusV1{
seed := &engine.ForkChoiceResponse{
PayloadStatus: engine.PayloadStatusV1{
Status: "INVALID_TERMINAL_BLOCK",
LatestValidHash: &valHash,
ValidationError: &valErr,
@@ -37,7 +37,7 @@ func FuzzForkChoiceResponse(f *testing.F) {
assert.NoError(f, err)
f.Add(output)
f.Fuzz(func(t *testing.T, jsonBlob []byte) {
gethResp := &beacon.ForkChoiceResponse{}
gethResp := &engine.ForkChoiceResponse{}
prysmResp := &execution.ForkchoiceUpdatedResponse{}
gethErr := json.Unmarshal(jsonBlob, gethResp)
prysmErr := json.Unmarshal(jsonBlob, prysmResp)
@@ -49,14 +49,14 @@ func FuzzForkChoiceResponse(f *testing.F) {
gethBlob, gethErr := json.Marshal(gethResp)
prysmBlob, prysmErr := json.Marshal(prysmResp)
assert.Equal(t, gethErr != nil, prysmErr != nil, "geth and prysm unmarshaller return inconsistent errors")
newGethResp := &beacon.ForkChoiceResponse{}
newGethResp := &engine.ForkChoiceResponse{}
newGethErr := json.Unmarshal(prysmBlob, newGethResp)
assert.NoError(t, newGethErr)
if newGethResp.PayloadStatus.Status == "UNKNOWN" {
return
}
newGethResp2 := &beacon.ForkChoiceResponse{}
newGethResp2 := &engine.ForkChoiceResponse{}
newGethErr = json.Unmarshal(gethBlob, newGethResp2)
assert.NoError(t, newGethErr)
@@ -75,7 +75,7 @@ func FuzzForkChoiceResponse(f *testing.F) {
func FuzzExchangeTransitionConfiguration(f *testing.F) {
valHash := common.Hash([32]byte{0xFF, 0x01})
ttd := hexutil.Big(*big.NewInt(math.MaxInt))
seed := &beacon.TransitionConfigurationV1{
seed := &engine.TransitionConfigurationV1{
TerminalTotalDifficulty: &ttd,
TerminalBlockHash: valHash,
TerminalBlockNumber: hexutil.Uint64(math.MaxUint64),
@@ -85,7 +85,7 @@ func FuzzExchangeTransitionConfiguration(f *testing.F) {
assert.NoError(f, err)
f.Add(output)
f.Fuzz(func(t *testing.T, jsonBlob []byte) {
gethResp := &beacon.TransitionConfigurationV1{}
gethResp := &engine.TransitionConfigurationV1{}
prysmResp := &pb.TransitionConfiguration{}
gethErr := json.Unmarshal(jsonBlob, gethResp)
prysmErr := json.Unmarshal(jsonBlob, prysmResp)
@@ -103,11 +103,11 @@ func FuzzExchangeTransitionConfiguration(f *testing.F) {
if gethErr != nil {
t.Errorf("%s %s", gethResp.TerminalTotalDifficulty.String(), prysmResp.TerminalTotalDifficulty)
}
newGethResp := &beacon.TransitionConfigurationV1{}
newGethResp := &engine.TransitionConfigurationV1{}
newGethErr := json.Unmarshal(prysmBlob, newGethResp)
assert.NoError(t, newGethErr)
newGethResp2 := &beacon.TransitionConfigurationV1{}
newGethResp2 := &engine.TransitionConfigurationV1{}
newGethErr = json.Unmarshal(gethBlob, newGethResp2)
assert.NoError(t, newGethErr)
})
@@ -115,7 +115,7 @@ func FuzzExchangeTransitionConfiguration(f *testing.F) {
func FuzzExecutionPayload(f *testing.F) {
logsBloom := [256]byte{'j', 'u', 'n', 'k'}
execData := &beacon.ExecutableDataV1{
execData := &engine.ExecutableData{
ParentHash: common.Hash([32]byte{0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01}),
FeeRecipient: common.Address([20]byte{0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF}),
StateRoot: common.Hash([32]byte{0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01}),
@@ -135,7 +135,7 @@ func FuzzExecutionPayload(f *testing.F) {
assert.NoError(f, err)
f.Add(output)
f.Fuzz(func(t *testing.T, jsonBlob []byte) {
gethResp := &beacon.ExecutableDataV1{}
gethResp := &engine.ExecutableData{}
prysmResp := &pb.ExecutionPayload{}
gethErr := json.Unmarshal(jsonBlob, gethResp)
prysmErr := json.Unmarshal(jsonBlob, prysmResp)
@@ -147,10 +147,10 @@ func FuzzExecutionPayload(f *testing.F) {
gethBlob, gethErr := json.Marshal(gethResp)
prysmBlob, prysmErr := json.Marshal(prysmResp)
assert.Equal(t, gethErr != nil, prysmErr != nil, "geth and prysm unmarshaller return inconsistent errors")
newGethResp := &beacon.ExecutableDataV1{}
newGethResp := &engine.ExecutableData{}
newGethErr := json.Unmarshal(prysmBlob, newGethResp)
assert.NoError(t, newGethErr)
newGethResp2 := &beacon.ExecutableDataV1{}
newGethResp2 := &engine.ExecutableData{}
newGethErr = json.Unmarshal(gethBlob, newGethResp2)
assert.NoError(t, newGethErr)

View File

@@ -80,7 +80,7 @@ func TestClient_IPC(t *testing.T) {
require.DeepEqual(t, want, resPb)
})
t.Run(GetPayloadMethodV2, func(t *testing.T) {
want, ok := fix["ExecutionPayloadCapella"].(*pb.ExecutionPayloadCapella)
want, ok := fix["ExecutionPayloadCapellaWithValue"].(*pb.ExecutionPayloadCapellaWithValue)
require.Equal(t, true, ok)
payloadId := [8]byte{1}
resp, err := srv.GetPayload(ctx, payloadId, params.BeaconConfig().SlotsPerEpoch)
@@ -125,7 +125,7 @@ func TestClient_IPC(t *testing.T) {
require.Equal(t, true, ok)
req, ok := fix["ExecutionPayloadCapella"].(*pb.ExecutionPayloadCapella)
require.Equal(t, true, ok)
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(req)
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(req, big.NewInt(0))
require.NoError(t, err)
latestValidHash, err := srv.NewPayload(ctx, wrappedPayload)
require.NoError(t, err)
@@ -155,8 +155,6 @@ func TestClient_IPC(t *testing.T) {
}
func TestClient_HTTP(t *testing.T) {
t.Skip("Skipping HTTP test to support Capella devnet-3")
ctx := context.Background()
fix := fixtures()
@@ -211,7 +209,7 @@ func TestClient_HTTP(t *testing.T) {
})
t.Run(GetPayloadMethodV2, func(t *testing.T) {
payloadId := [8]byte{1}
want, ok := fix["ExecutionPayloadCapella"].(*pb.ExecutionPayloadCapella)
want, ok := fix["ExecutionPayloadCapellaWithValue"].(*pb.GetPayloadV2ResponseJson)
require.Equal(t, true, ok)
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
@@ -251,7 +249,17 @@ func TestClient_HTTP(t *testing.T) {
require.NoError(t, err)
pb, err := resp.PbCapella()
require.NoError(t, err)
require.DeepEqual(t, want, pb)
require.DeepEqual(t, want.ExecutionPayload.BlockHash.Bytes(), pb.BlockHash)
require.DeepEqual(t, want.ExecutionPayload.StateRoot.Bytes(), pb.StateRoot)
require.DeepEqual(t, want.ExecutionPayload.ParentHash.Bytes(), pb.ParentHash)
require.DeepEqual(t, want.ExecutionPayload.FeeRecipient.Bytes(), pb.FeeRecipient)
require.DeepEqual(t, want.ExecutionPayload.PrevRandao.Bytes(), pb.PrevRandao)
require.DeepEqual(t, want.ExecutionPayload.ParentHash.Bytes(), pb.ParentHash)
v, err := resp.Value()
require.NoError(t, err)
wantedValue := []byte{17, 255} // 0x11ff
require.DeepEqual(t, wantedValue, v.Bytes())
})
t.Run(ForkchoiceUpdatedMethod+" VALID status", func(t *testing.T) {
forkChoiceState := &pb.ForkchoiceState{
@@ -415,7 +423,7 @@ func TestClient_HTTP(t *testing.T) {
client := newPayloadV2Setup(t, want, execPayload)
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload)
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload, big.NewInt(0))
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload)
require.NoError(t, err)
@@ -443,7 +451,7 @@ func TestClient_HTTP(t *testing.T) {
client := newPayloadV2Setup(t, want, execPayload)
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload)
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload, big.NewInt(0))
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload)
require.ErrorIs(t, ErrAcceptedSyncingPayloadStatus, err)
@@ -471,7 +479,7 @@ func TestClient_HTTP(t *testing.T) {
client := newPayloadV2Setup(t, want, execPayload)
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload)
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload, big.NewInt(0))
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload)
require.ErrorIs(t, ErrInvalidBlockHashPayloadStatus, err)
@@ -499,7 +507,7 @@ func TestClient_HTTP(t *testing.T) {
client := newPayloadV2Setup(t, want, execPayload)
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload)
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload, big.NewInt(0))
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload)
require.ErrorIs(t, ErrInvalidPayloadStatus, err)
@@ -743,7 +751,7 @@ func TestReconstructFullBellatrixBlockBatch(t *testing.T) {
t.Run("nil block", func(t *testing.T) {
service := &Service{}
_, err := service.ReconstructFullBellatrixBlockBatch(ctx, []interfaces.SignedBeaconBlock{nil})
_, err := service.ReconstructFullBellatrixBlockBatch(ctx, []interfaces.ReadOnlySignedBeaconBlock{nil})
require.ErrorContains(t, "nil data", err)
})
t.Run("only blinded block", func(t *testing.T) {
@@ -752,7 +760,7 @@ func TestReconstructFullBellatrixBlockBatch(t *testing.T) {
bellatrixBlock := util.NewBeaconBlockBellatrix()
wrapped, err := blocks.NewSignedBeaconBlock(bellatrixBlock)
require.NoError(t, err)
_, err = service.ReconstructFullBellatrixBlockBatch(ctx, []interfaces.SignedBeaconBlock{wrapped})
_, err = service.ReconstructFullBellatrixBlockBatch(ctx, []interfaces.ReadOnlySignedBeaconBlock{wrapped})
require.ErrorContains(t, want, err)
})
t.Run("pre-merge execution payload", func(t *testing.T) {
@@ -767,7 +775,7 @@ func TestReconstructFullBellatrixBlockBatch(t *testing.T) {
require.NoError(t, err)
wantedWrapped, err := blocks.NewSignedBeaconBlock(wanted)
require.NoError(t, err)
reconstructed, err := service.ReconstructFullBellatrixBlockBatch(ctx, []interfaces.SignedBeaconBlock{wrapped})
reconstructed, err := service.ReconstructFullBellatrixBlockBatch(ctx, []interfaces.ReadOnlySignedBeaconBlock{wrapped})
require.NoError(t, err)
require.DeepEqual(t, []interfaces.SignedBeaconBlock{wantedWrapped}, reconstructed)
})
@@ -864,7 +872,7 @@ func TestReconstructFullBellatrixBlockBatch(t *testing.T) {
copiedWrapped, err := wrapped.Copy()
require.NoError(t, err)
reconstructed, err := service.ReconstructFullBellatrixBlockBatch(ctx, []interfaces.SignedBeaconBlock{wrappedEmpty, wrapped, copiedWrapped})
reconstructed, err := service.ReconstructFullBellatrixBlockBatch(ctx, []interfaces.ReadOnlySignedBeaconBlock{wrappedEmpty, wrapped, copiedWrapped})
require.NoError(t, err)
// Make sure empty blocks are handled correctly
@@ -1297,6 +1305,26 @@ func fixtures() map[string]interface{} {
Transactions: [][]byte{foo[:]},
Withdrawals: []*pb.Withdrawal{},
}
hexUint := hexutil.Uint64(1)
executionPayloadWithValueFixtureCapella := &pb.GetPayloadV2ResponseJson{
ExecutionPayload: &pb.ExecutionPayloadCapellaJSON{
ParentHash: &common.Hash{'a'},
FeeRecipient: &common.Address{'b'},
StateRoot: &common.Hash{'c'},
ReceiptsRoot: &common.Hash{'d'},
LogsBloom: &hexutil.Bytes{'e'},
PrevRandao: &common.Hash{'f'},
BaseFeePerGas: fmt.Sprintf("%s", "0x123"),
BlockHash: &common.Hash{'g'},
Transactions: []hexutil.Bytes{{'h'}},
Withdrawals: []*pb.Withdrawal{},
BlockNumber: &hexUint,
GasLimit: &hexUint,
GasUsed: &hexUint,
Timestamp: &hexUint,
},
BlockValue: fmt.Sprintf("%s", "0x11ff"),
}
parent := bytesutil.PadTo([]byte("parentHash"), fieldparams.RootLength)
sha3Uncles := bytesutil.PadTo([]byte("sha3Uncles"), fieldparams.RootLength)
miner := bytesutil.PadTo([]byte("miner"), fieldparams.FeeRecipientLength)
@@ -1392,6 +1420,7 @@ func fixtures() map[string]interface{} {
"ExecutionBlock": executionBlock,
"ExecutionPayload": executionPayloadFixture,
"ExecutionPayloadCapella": executionPayloadFixtureCapella,
"ExecutionPayloadCapellaWithValue": executionPayloadWithValueFixtureCapella,
"ValidPayloadStatus": validStatus,
"InvalidBlockHashStatus": inValidBlockHashStatus,
"AcceptedStatus": acceptedStatus,
@@ -1577,9 +1606,9 @@ func (*testEngineService) GetPayloadV1(
func (*testEngineService) GetPayloadV2(
_ context.Context, _ pb.PayloadIDBytes,
) *pb.ExecutionPayloadCapella {
) *pb.ExecutionPayloadCapellaWithValue {
fix := fixtures()
item, ok := fix["ExecutionPayloadCapella"].(*pb.ExecutionPayloadCapella)
item, ok := fix["ExecutionPayloadCapellaWithValue"].(*pb.ExecutionPayloadCapellaWithValue)
if !ok {
panic("not found")
}

View File

@@ -115,7 +115,7 @@ func (s *Service) newRPCClientWithAuth(ctx context.Context, endpoint network.End
}
switch u.Scheme {
case "http", "https":
client, err = gethRPC.DialHTTPWithClient(endpoint.Url, endpoint.HttpClient())
client, err = gethRPC.DialOptions(ctx, endpoint.Url, gethRPC.WithHTTPClient(endpoint.HttpClient()))
if err != nil {
return nil, err
}

View File

@@ -413,29 +413,27 @@ func (s *Service) batchRequestHeaders(startBlock, endBlock uint64) ([]*types.Hea
requestRange := (endBlock - startBlock) + 1
elems := make([]gethRPC.BatchElem, 0, requestRange)
headers := make([]*types.HeaderInfo, 0, requestRange)
errs := make([]error, 0, requestRange)
if requestRange == 0 {
return headers, nil
}
for i := startBlock; i <= endBlock; i++ {
header := &types.HeaderInfo{}
err := error(nil)
elems = append(elems, gethRPC.BatchElem{
Method: "eth_getBlockByNumber",
Args: []interface{}{hexutil.EncodeBig(big.NewInt(0).SetUint64(i)), false},
Result: header,
Error: err,
Error: error(nil),
})
headers = append(headers, header)
errs = append(errs, err)
}
ioErr := s.rpcClient.BatchCall(elems)
if ioErr != nil {
return nil, ioErr
}
for _, e := range errs {
if e != nil {
return nil, e
for _, e := range elems {
if e.Error != nil {
return nil, e.Error
}
}
for _, h := range headers {
@@ -667,6 +665,12 @@ func (s *Service) cacheBlockHeaders(start, end uint64) error {
for i := start; i < end; i += batchSize {
startReq := i
endReq := i + batchSize
if endReq > 0 {
// Reduce the end request by one
// to prevent total batch size from exceeding
// the allotted limit.
endReq -= 1
}
if endReq > end {
endReq = end
}

View File

@@ -736,6 +736,9 @@ func TestService_CacheBlockHeaders(t *testing.T) {
assert.Equal(t, 1, rClient.numOfCalls)
// Reset Num of Calls
rClient.numOfCalls = 0
// Increase header request limit to trigger the batch limiting
// code path.
s.cfg.eth1HeaderReqLimit = 1001
assert.NoError(t, s.cacheBlockHeaders(1000, 3000))
// 1000 - 2000 would be 1001 headers which is higher than our request limit, it

View File

@@ -10,6 +10,7 @@ import (
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/params"
clparams "github.com/prysmaticlabs/prysm/v3/config/params"
"github.com/prysmaticlabs/prysm/v3/time/slots"
)
// defaultMinerAddress is used to send deposits and test transactions in the e2e test.
@@ -83,6 +84,15 @@ func GethTestnetGenesis(genesisTime uint64, cfg *clparams.BeaconChainConfig) cor
if !ok {
panic(fmt.Sprintf("unable to parse TerminalTotalDifficulty as an integer = %s", clparams.BeaconConfig().TerminalTotalDifficulty))
}
var shanghaiTime *uint64
if cfg.CapellaForkEpoch != math.MaxUint64 {
startSlot, err := slots.EpochStart(cfg.CapellaForkEpoch)
if err == nil {
startTime := slots.StartTime(genesisTime, startSlot)
newTime := uint64(startTime.Unix())
shanghaiTime = &newTime
}
}
cc := &params.ChainConfig{
ChainID: big.NewInt(defaultTestChainId),
HomesteadBlock: bigz,
@@ -106,6 +116,7 @@ func GethTestnetGenesis(genesisTime uint64, cfg *clparams.BeaconChainConfig) cor
Period: cfg.SecondsPerETH1Block,
Epoch: 20000,
},
ShanghaiTime: shanghaiTime,
}
da := defaultDepositContractAllocation(cfg.DepositContractAddress)
ma := minerAllocation()

View File

@@ -38,6 +38,7 @@ type EngineClient struct {
TerminalBlockHash []byte
TerminalBlockHashExists bool
OverrideValidHash [32]byte
BlockValue *big.Int
}
// NewPayload --
@@ -58,7 +59,7 @@ func (e *EngineClient) ForkchoiceUpdated(
// GetPayload --
func (e *EngineClient) GetPayload(_ context.Context, _ [8]byte, s primitives.Slot) (interfaces.ExecutionData, error) {
if slots.ToEpoch(s) >= params.BeaconConfig().CapellaForkEpoch {
return blocks.WrappedExecutionPayloadCapella(e.ExecutionPayloadCapella)
return blocks.WrappedExecutionPayloadCapella(e.ExecutionPayloadCapella, e.BlockValue)
}
p, err := blocks.WrappedExecutionPayload(e.ExecutionPayload)
if err != nil {
@@ -88,7 +89,7 @@ func (e *EngineClient) ExecutionBlockByHash(_ context.Context, h common.Hash, _
// ReconstructFullBlock --
func (e *EngineClient) ReconstructFullBlock(
_ context.Context, blindedBlock interfaces.SignedBeaconBlock,
_ context.Context, blindedBlock interfaces.ReadOnlySignedBeaconBlock,
) (interfaces.SignedBeaconBlock, error) {
if !blindedBlock.Block().IsBlinded() {
return nil, errors.New("block must be blinded")
@@ -107,7 +108,7 @@ func (e *EngineClient) ReconstructFullBlock(
// ReconstructFullBellatrixBlockBatch --
func (e *EngineClient) ReconstructFullBellatrixBlockBatch(
ctx context.Context, blindedBlocks []interfaces.SignedBeaconBlock,
ctx context.Context, blindedBlocks []interfaces.ReadOnlySignedBeaconBlock,
) ([]interfaces.SignedBeaconBlock, error) {
fullBlocks := make([]interfaces.SignedBeaconBlock, 0, len(blindedBlocks))
for _, b := range blindedBlocks {

View File

@@ -11,6 +11,7 @@ go_library(
"on_tick.go",
"optimistic_sync.go",
"proposer_boost.go",
"reorg_late_blocks.go",
"store.go",
"types.go",
"unrealized_justification.go",
@@ -53,6 +54,7 @@ go_test(
"on_tick_test.go",
"optimistic_sync_test.go",
"proposer_boost_test.go",
"reorg_late_blocks_test.go",
"store_test.go",
"unrealized_justification_test.go",
"vote_test.go",

View File

@@ -12,12 +12,12 @@ import (
)
func TestFFGUpdates_OneBranch(t *testing.T) {
balances := []uint64{1, 1}
f := setup(0, 0)
f.justifiedBalances = []uint64{1, 1}
ctx := context.Background()
// The head should always start at the finalized block.
r, err := f.Head(context.Background(), balances)
r, err := f.Head(context.Background())
require.NoError(t, err)
assert.Equal(t, params.BeaconConfig().ZeroHash, r, "Incorrect head with genesis")
@@ -47,7 +47,7 @@ func TestFFGUpdates_OneBranch(t *testing.T) {
// 2
// |
// 3 <- head
r, err = f.Head(context.Background(), balances)
r, err = f.Head(context.Background())
require.NoError(t, err)
assert.Equal(t, indexToHash(3), r, "Incorrect head for with justified epoch at 0")
@@ -61,7 +61,7 @@ func TestFFGUpdates_OneBranch(t *testing.T) {
// 3
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Root: indexToHash(1), Epoch: 1}
f.store.finalizedCheckpoint = &forkchoicetypes.Checkpoint{Root: indexToHash(0), Epoch: 0}
r, err = f.Head(context.Background(), balances)
r, err = f.Head(context.Background())
require.NoError(t, err)
assert.Equal(t, indexToHash(2), r, "Incorrect head with justified epoch at 1")
@@ -74,17 +74,17 @@ func TestFFGUpdates_OneBranch(t *testing.T) {
// |
// 3 <- head
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Root: indexToHash(3), Epoch: 2}
r, err = f.Head(context.Background(), balances)
r, err = f.Head(context.Background())
require.NoError(t, err)
assert.Equal(t, indexToHash(3), r, "Incorrect head with justified epoch at 2")
}
func TestFFGUpdates_TwoBranches(t *testing.T) {
balances := []uint64{1, 1}
f := setup(0, 0)
f.justifiedBalances = []uint64{1, 1}
ctx := context.Background()
r, err := f.Head(context.Background(), balances)
r, err := f.Head(context.Background())
require.NoError(t, err)
assert.Equal(t, params.BeaconConfig().ZeroHash, r, "Incorrect head with genesis")
@@ -145,7 +145,7 @@ func TestFFGUpdates_TwoBranches(t *testing.T) {
// 7 8
// | |
// 9 10 <-- head
r, err = f.Head(context.Background(), balances)
r, err = f.Head(context.Background())
require.NoError(t, err)
assert.Equal(t, indexToHash(10), r, "Incorrect head with justified epoch at 0")
@@ -175,7 +175,7 @@ func TestFFGUpdates_TwoBranches(t *testing.T) {
// 7 8
// | |
// head -> 9 10
r, err = f.Head(context.Background(), balances)
r, err = f.Head(context.Background())
require.NoError(t, err)
assert.Equal(t, indexToHash(9), r, "Incorrect head with justified epoch at 0")
@@ -205,12 +205,12 @@ func TestFFGUpdates_TwoBranches(t *testing.T) {
// 7 8
// | |
// 9 10 <-- head
r, err = f.Head(context.Background(), balances)
r, err = f.Head(context.Background())
require.NoError(t, err)
assert.Equal(t, indexToHash(10), r, "Incorrect head with justified epoch at 0")
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: 1, Root: indexToHash(1)}
r, err = f.Head(context.Background(), balances)
r, err = f.Head(context.Background())
require.NoError(t, err)
assert.Equal(t, indexToHash(7), r, "Incorrect head with justified epoch at 0")
}
@@ -225,6 +225,7 @@ func setup(justifiedEpoch, finalizedEpoch primitives.Epoch) *ForkChoice {
if err != nil {
return nil
}
f.SetBalancesByRooter(func(_ context.Context, _ [32]byte) ([]uint64, error) { return f.justifiedBalances, nil })
err = f.InsertNode(ctx, state, blkRoot)
if err != nil {
return nil

View File

@@ -46,8 +46,6 @@ func New() *ForkChoice {
// NodeCount returns the current number of nodes in the Store.
func (f *ForkChoice) NodeCount() int {
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
return len(f.store.nodeByRoot)
}
@@ -55,25 +53,17 @@ func (f *ForkChoice) NodeCount() int {
// It firsts computes validator's balance changes then recalculates block tree from leaves to root.
func (f *ForkChoice) Head(
ctx context.Context,
justifiedStateBalances []uint64,
) ([32]byte, error) {
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.Head")
defer span.End()
f.votesLock.Lock()
defer f.votesLock.Unlock()
calledHeadCount.Inc()
// Using the write lock here because subsequent calls to `updateBalances`, `applyProposerBoostScore`,
// `applyWeightChanges`, `updateBestDescendant`, and `head` require write operations on nodes.
f.store.nodesLock.Lock()
defer f.store.nodesLock.Unlock()
if err := f.updateBalances(justifiedStateBalances); err != nil {
if err := f.updateBalances(); err != nil {
return [32]byte{}, errors.Wrap(err, "could not update balances")
}
if err := f.store.applyProposerBoostScore(justifiedStateBalances); err != nil {
if err := f.applyProposerBoostScore(); err != nil {
return [32]byte{}, errors.Wrap(err, "could not apply proposer boost score")
}
@@ -95,8 +85,6 @@ func (f *ForkChoice) Head(
func (f *ForkChoice) ProcessAttestation(ctx context.Context, validatorIndices []uint64, blockRoot [32]byte, targetEpoch primitives.Epoch) {
_, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.ProcessAttestation")
defer span.End()
f.votesLock.Lock()
defer f.votesLock.Unlock()
for _, index := range validatorIndices {
// Validator indices will grow the vote cache.
@@ -162,87 +150,80 @@ func (f *ForkChoice) InsertNode(ctx context.Context, state state.BeaconState, ro
// updateCheckpoints update the checkpoints when inserting a new node.
func (f *ForkChoice) updateCheckpoints(ctx context.Context, jc, fc *ethpb.Checkpoint) error {
f.store.checkpointsLock.Lock()
if jc.Epoch > f.store.justifiedCheckpoint.Epoch {
if jc.Epoch > f.store.bestJustifiedCheckpoint.Epoch {
f.store.bestJustifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: jc.Epoch,
Root: bytesutil.ToBytes32(jc.Root)}
}
currentSlot := slots.CurrentSlot(f.store.genesisTime)
if slots.SinceEpochStarts(currentSlot) < params.BeaconConfig().SafeSlotsToUpdateJustified {
f.store.prevJustifiedCheckpoint = f.store.justifiedCheckpoint
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: jc.Epoch,
Root: bytesutil.ToBytes32(jc.Root)}
} else {
currentJcp := f.store.justifiedCheckpoint
currentRoot := currentJcp.Root
if currentRoot == params.BeaconConfig().ZeroHash {
currentRoot = f.store.originRoot
}
jSlot, err := slots.EpochStart(currentJcp.Epoch)
if err != nil {
f.store.checkpointsLock.Unlock()
return err
}
jcRoot := bytesutil.ToBytes32(jc.Root)
// Releasing here the checkpoints lock because
// AncestorRoot acquires a lock on nodes and that can
// cause a double lock.
f.store.checkpointsLock.Unlock()
root, err := f.AncestorRoot(ctx, jcRoot, jSlot)
if err != nil {
return err
}
f.store.checkpointsLock.Lock()
if root == currentRoot {
if !features.Get().EnableDefensivePull {
currentSlot := slots.CurrentSlot(f.store.genesisTime)
if slots.SinceEpochStarts(currentSlot) < params.BeaconConfig().SafeSlotsToUpdateJustified {
f.store.prevJustifiedCheckpoint = f.store.justifiedCheckpoint
root := bytesutil.ToBytes32(jc.Root)
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: jc.Epoch,
Root: jcRoot}
Root: root}
if err := f.updateJustifiedBalances(ctx, root); err != nil {
return errors.Wrap(err, "could not update justified balances")
}
} else {
currentJcp := f.store.justifiedCheckpoint
currentRoot := currentJcp.Root
if currentRoot == params.BeaconConfig().ZeroHash {
currentRoot = f.store.originRoot
}
jSlot, err := slots.EpochStart(currentJcp.Epoch)
if err != nil {
return err
}
jcRoot := bytesutil.ToBytes32(jc.Root)
root, err := f.AncestorRoot(ctx, jcRoot, jSlot)
if err != nil {
return err
}
if root == currentRoot {
f.store.prevJustifiedCheckpoint = f.store.justifiedCheckpoint
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: jc.Epoch,
Root: jcRoot}
if err := f.updateJustifiedBalances(ctx, jcRoot); err != nil {
return errors.Wrap(err, "could not update justified balances")
}
}
}
} else {
f.store.prevJustifiedCheckpoint = f.store.justifiedCheckpoint
jcRoot := bytesutil.ToBytes32(jc.Root)
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: jc.Epoch, Root: jcRoot}
if err := f.updateJustifiedBalances(ctx, jcRoot); err != nil {
return errors.Wrap(err, "could not update justified balances")
}
}
}
// Update finalization
if fc.Epoch <= f.store.finalizedCheckpoint.Epoch {
f.store.checkpointsLock.Unlock()
return nil
}
f.store.finalizedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: fc.Epoch,
Root: bytesutil.ToBytes32(fc.Root)}
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: jc.Epoch,
Root: bytesutil.ToBytes32(jc.Root)}
f.store.checkpointsLock.Unlock()
if !features.Get().EnableDefensivePull {
root := bytesutil.ToBytes32(jc.Root)
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: jc.Epoch,
Root: root}
if err := f.updateJustifiedBalances(ctx, root); err != nil {
return errors.Wrap(err, "could not update justified balances")
}
}
return f.store.prune(ctx)
}
// HasNode returns true if the node exists in fork choice store,
// false else wise.
func (f *ForkChoice) HasNode(root [32]byte) bool {
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
_, ok := f.store.nodeByRoot[root]
return ok
}
// HasParent returns true if the node parent exists in fork choice store,
// false else wise.
func (f *ForkChoice) HasParent(root [32]byte) bool {
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
node, ok := f.store.nodeByRoot[root]
if !ok || node == nil {
return false
}
return node.parent != nil
}
// IsCanonical returns true if the given root is part of the canonical chain.
func (f *ForkChoice) IsCanonical(root [32]byte) bool {
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
node, ok := f.store.nodeByRoot[root]
if !ok || node == nil {
return false
@@ -262,8 +243,9 @@ func (f *ForkChoice) IsCanonical(root [32]byte) bool {
// IsOptimistic returns true if the given root has been optimistically synced.
func (f *ForkChoice) IsOptimistic(root [32]byte) (bool, error) {
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
if f.store.allTipsAreInvalid {
return true, nil
}
node, ok := f.store.nodeByRoot[root]
if !ok || node == nil {
@@ -278,9 +260,6 @@ func (f *ForkChoice) AncestorRoot(ctx context.Context, root [32]byte, slot primi
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.AncestorRoot")
defer span.End()
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
node, ok := f.store.nodeByRoot[root]
if !ok || node == nil {
return [32]byte{}, errors.Wrap(ErrNilNode, "could not determine ancestor root")
@@ -302,9 +281,10 @@ func (f *ForkChoice) AncestorRoot(ctx context.Context, root [32]byte, slot primi
}
// updateBalances updates the balances that directly voted for each block taking into account the
// validators' latest votes. This function requires a lock in Store.nodesLock
// and votesLock
func (f *ForkChoice) updateBalances(newBalances []uint64) error {
// validators' latest votes.
func (f *ForkChoice) updateBalances() error {
newBalances := f.justifiedBalances
for index, vote := range f.votes {
// Skip if validator has been slashed
if f.store.slashedIndices[primitives.ValidatorIndex(index)] {
@@ -347,7 +327,6 @@ func (f *ForkChoice) updateBalances(newBalances []uint64) error {
return errors.Wrap(ErrNilNode, "could not update balances")
}
if currentNode.balance < oldBalance {
f.store.proposerBoostLock.RLock()
log.WithFields(logrus.Fields{
"nodeRoot": fmt.Sprintf("%#x", bytesutil.Trunc(vote.currentRoot[:])),
"oldBalance": oldBalance,
@@ -357,7 +336,6 @@ func (f *ForkChoice) updateBalances(newBalances []uint64) error {
"previousProposerBoostRoot": fmt.Sprintf("%#x", bytesutil.Trunc(f.store.previousProposerBoostRoot[:])),
"previousProposerBoostScore": f.store.previousProposerBoostScore,
}).Warning("node with invalid balance, setting it to zero")
f.store.proposerBoostLock.RUnlock()
currentNode.balance = 0
} else {
currentNode.balance -= oldBalance
@@ -385,8 +363,6 @@ func (f *ForkChoice) ProposerBoost() [fieldparams.RootLength]byte {
// SetOptimisticToValid sets the node with the given root as a fully validated node
func (f *ForkChoice) SetOptimisticToValid(ctx context.Context, root [fieldparams.RootLength]byte) error {
f.store.nodesLock.Lock()
defer f.store.nodesLock.Unlock()
node, ok := f.store.nodeByRoot[root]
if !ok || node == nil {
return errors.Wrap(ErrNilNode, "could not set node to valid")
@@ -396,29 +372,21 @@ func (f *ForkChoice) SetOptimisticToValid(ctx context.Context, root [fieldparams
// BestJustifiedCheckpoint of fork choice store.
func (f *ForkChoice) BestJustifiedCheckpoint() *forkchoicetypes.Checkpoint {
f.store.checkpointsLock.RLock()
defer f.store.checkpointsLock.RUnlock()
return f.store.bestJustifiedCheckpoint
}
// PreviousJustifiedCheckpoint of fork choice store.
func (f *ForkChoice) PreviousJustifiedCheckpoint() *forkchoicetypes.Checkpoint {
f.store.checkpointsLock.RLock()
defer f.store.checkpointsLock.RUnlock()
return f.store.prevJustifiedCheckpoint
}
// JustifiedCheckpoint of fork choice store.
func (f *ForkChoice) JustifiedCheckpoint() *forkchoicetypes.Checkpoint {
f.store.checkpointsLock.RLock()
defer f.store.checkpointsLock.RUnlock()
return f.store.justifiedCheckpoint
}
// FinalizedCheckpoint of fork choice store.
func (f *ForkChoice) FinalizedCheckpoint() *forkchoicetypes.Checkpoint {
f.store.checkpointsLock.RLock()
defer f.store.checkpointsLock.RUnlock()
return f.store.finalizedCheckpoint
}
@@ -431,11 +399,6 @@ func (f *ForkChoice) SetOptimisticToInvalid(ctx context.Context, root, parentRoo
// store-tracked list. Votes from these validators are not accounted for
// in forkchoice.
func (f *ForkChoice) InsertSlashedIndex(_ context.Context, index primitives.ValidatorIndex) {
f.votesLock.RLock()
defer f.votesLock.RUnlock()
f.store.nodesLock.Lock()
defer f.store.nodesLock.Unlock()
// return early if the index was already included:
if f.store.slashedIndices[index] {
return
@@ -465,17 +428,15 @@ func (f *ForkChoice) InsertSlashedIndex(_ context.Context, index primitives.Vali
}
// UpdateJustifiedCheckpoint sets the justified checkpoint to the given one
func (f *ForkChoice) UpdateJustifiedCheckpoint(jc *forkchoicetypes.Checkpoint) error {
func (f *ForkChoice) UpdateJustifiedCheckpoint(ctx context.Context, jc *forkchoicetypes.Checkpoint) error {
if jc == nil {
return errInvalidNilCheckpoint
}
f.store.checkpointsLock.Lock()
defer f.store.checkpointsLock.Unlock()
f.store.prevJustifiedCheckpoint = f.store.justifiedCheckpoint
f.store.justifiedCheckpoint = jc
bj := f.store.bestJustifiedCheckpoint
if bj == nil || bj.Root == params.BeaconConfig().ZeroHash || jc.Epoch > bj.Epoch {
f.store.bestJustifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: jc.Epoch, Root: jc.Root}
f.store.bestJustifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: jc.Epoch, Root: jc.Root}
if err := f.updateJustifiedBalances(ctx, jc.Root); err != nil {
return errors.Wrap(err, "could not update justified balances")
}
return nil
}
@@ -485,8 +446,6 @@ func (f *ForkChoice) UpdateFinalizedCheckpoint(fc *forkchoicetypes.Checkpoint) e
if fc == nil {
return errInvalidNilCheckpoint
}
f.store.checkpointsLock.Lock()
defer f.store.checkpointsLock.Unlock()
f.store.finalizedCheckpoint = fc
return nil
}
@@ -496,9 +455,6 @@ func (f *ForkChoice) CommonAncestor(ctx context.Context, r1 [32]byte, r2 [32]byt
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.CommonAncestorRoot")
defer span.End()
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
n1, ok := f.store.nodeByRoot[r1]
if !ok || n1 == nil {
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
@@ -545,7 +501,7 @@ func (f *ForkChoice) CommonAncestor(ctx context.Context, r1 [32]byte, r2 [32]byt
// number). All blocks are assumed to be a strict chain
// where blocks[i].Parent = blocks[i+1]. Also we assume that the parent of the
// last block in this list is already included in forkchoice store.
func (f *ForkChoice) InsertOptimisticChain(ctx context.Context, chain []*forkchoicetypes.BlockAndCheckpoints) error {
func (f *ForkChoice) InsertChain(ctx context.Context, chain []*forkchoicetypes.BlockAndCheckpoints) error {
if len(chain) == 0 {
return nil
}
@@ -581,8 +537,6 @@ func (f *ForkChoice) SetOriginRoot(root [32]byte) {
// CachedHeadRoot returns the last cached head root
func (f *ForkChoice) CachedHeadRoot() [32]byte {
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
node := f.store.headNode
if node == nil {
return [32]byte{}
@@ -592,8 +546,6 @@ func (f *ForkChoice) CachedHeadRoot() [32]byte {
// FinalizedPayloadBlockHash returns the hash of the payload at the finalized checkpoint
func (f *ForkChoice) FinalizedPayloadBlockHash() [32]byte {
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
root := f.FinalizedCheckpoint().Root
node, ok := f.store.nodeByRoot[root]
if !ok || node == nil {
@@ -605,8 +557,6 @@ func (f *ForkChoice) FinalizedPayloadBlockHash() [32]byte {
// JustifiedPayloadBlockHash returns the hash of the payload at the justified checkpoint
func (f *ForkChoice) JustifiedPayloadBlockHash() [32]byte {
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
root := f.JustifiedCheckpoint().Root
node, ok := f.store.nodeByRoot[root]
if !ok || node == nil {
@@ -672,11 +622,28 @@ func (f *ForkChoice) SetBalancesByRooter(handler forkchoice.BalancesByRooter) {
// Weight returns the weight of the given root if found on the store
func (f *ForkChoice) Weight(root [32]byte) (uint64, error) {
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
n, ok := f.store.nodeByRoot[root]
if !ok || n == nil {
return 0, ErrNilNode
}
return n.weight, nil
}
// updateJustifiedBalances updates the validators balances on the justified checkpoint pointed by root.
func (f *ForkChoice) updateJustifiedBalances(ctx context.Context, root [32]byte) error {
balances, err := f.balancesByRoot(ctx, root)
if err != nil {
return errors.Wrap(err, "could not get justified balances")
}
f.justifiedBalances = balances
f.store.committeeWeight = 0
f.numActiveValidators = 0
for _, val := range balances {
if val > 0 {
f.store.committeeWeight += val
f.numActiveValidators++
}
}
f.store.committeeWeight /= uint64(params.BeaconConfig().SlotsPerEpoch)
return nil
}

View File

@@ -82,7 +82,8 @@ func TestForkChoice_UpdateBalancesPositiveChange(t *testing.T) {
// Each node gets one unique vote. The weight should look like 103 <- 102 <- 101 because
// they get propagated back.
require.NoError(t, f.updateBalances([]uint64{10, 20, 30}))
f.justifiedBalances = []uint64{10, 20, 30}
require.NoError(t, f.updateBalances())
s := f.store
assert.Equal(t, uint64(10), s.nodeByRoot[indexToHash(1)].balance)
assert.Equal(t, uint64(20), s.nodeByRoot[indexToHash(2)].balance)
@@ -113,7 +114,8 @@ func TestForkChoice_UpdateBalancesNegativeChange(t *testing.T) {
{indexToHash(3), indexToHash(3), 0},
}
require.NoError(t, f.updateBalances([]uint64{10, 20, 30}))
f.justifiedBalances = []uint64{10, 20, 30}
require.NoError(t, f.updateBalances())
assert.Equal(t, uint64(10), s.nodeByRoot[indexToHash(1)].balance)
assert.Equal(t, uint64(20), s.nodeByRoot[indexToHash(2)].balance)
assert.Equal(t, uint64(30), s.nodeByRoot[indexToHash(3)].balance)
@@ -143,7 +145,8 @@ func TestForkChoice_UpdateBalancesUnderflow(t *testing.T) {
{indexToHash(3), indexToHash(3), 0},
}
require.NoError(t, f.updateBalances([]uint64{10, 20, 30}))
f.justifiedBalances = []uint64{10, 20, 30}
require.NoError(t, f.updateBalances())
assert.Equal(t, uint64(0), s.nodeByRoot[indexToHash(1)].balance)
assert.Equal(t, uint64(0), s.nodeByRoot[indexToHash(2)].balance)
assert.Equal(t, uint64(5), s.nodeByRoot[indexToHash(3)].balance)
@@ -202,7 +205,6 @@ func TestForkChoice_IsCanonicalReorg(t *testing.T) {
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
f.store.nodesLock.Lock()
f.store.nodeByRoot[[32]byte{'3'}].balance = 10
require.NoError(t, f.store.treeRootNode.applyWeightChanges(ctx))
require.Equal(t, uint64(10), f.store.nodeByRoot[[32]byte{'1'}].weight)
@@ -210,7 +212,6 @@ func TestForkChoice_IsCanonicalReorg(t *testing.T) {
require.NoError(t, f.store.treeRootNode.updateBestDescendant(ctx, 1, 1, 1))
require.DeepEqual(t, [32]byte{'3'}, f.store.treeRootNode.bestDescendant.root)
f.store.nodesLock.Unlock()
r1 := [32]byte{'1'}
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: 1, Root: r1}
@@ -297,7 +298,7 @@ func TestForkChoice_RemoveEquivocating(t *testing.T) {
st, blkRoot, err := prepareForkchoiceState(ctx, 1, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
head, err := f.Head(ctx, []uint64{})
head, err := f.Head(ctx)
require.NoError(t, err)
require.Equal(t, [32]byte{'a'}, head)
@@ -308,21 +309,23 @@ func TestForkChoice_RemoveEquivocating(t *testing.T) {
st, blkRoot, err = prepareForkchoiceState(ctx, 3, [32]byte{'c'}, [32]byte{'a'}, [32]byte{'C'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
head, err = f.Head(ctx, []uint64{})
head, err = f.Head(ctx)
require.NoError(t, err)
require.Equal(t, [32]byte{'c'}, head)
// Insert two attestations for block b, one for c it becomes head
f.ProcessAttestation(ctx, []uint64{1, 2}, [32]byte{'b'}, 1)
f.ProcessAttestation(ctx, []uint64{3}, [32]byte{'c'}, 1)
head, err = f.Head(ctx, []uint64{100, 200, 200, 300})
f.justifiedBalances = []uint64{100, 200, 200, 300}
head, err = f.Head(ctx)
require.NoError(t, err)
require.Equal(t, [32]byte{'b'}, head)
// Process b's slashing, c is now head
f.InsertSlashedIndex(ctx, 1)
require.Equal(t, uint64(200), f.store.nodeByRoot[[32]byte{'b'}].balance)
head, err = f.Head(ctx, []uint64{100, 200, 200, 300})
f.justifiedBalances = []uint64{100, 200, 200, 300}
head, err = f.Head(ctx)
require.Equal(t, uint64(200), f.store.nodeByRoot[[32]byte{'b'}].weight)
require.Equal(t, uint64(300), f.store.nodeByRoot[[32]byte{'c'}].weight)
require.NoError(t, err)
@@ -331,7 +334,8 @@ func TestForkChoice_RemoveEquivocating(t *testing.T) {
// Process b's slashing again, should be a noop
f.InsertSlashedIndex(ctx, 1)
require.Equal(t, uint64(200), f.store.nodeByRoot[[32]byte{'b'}].balance)
head, err = f.Head(ctx, []uint64{100, 200, 200, 300})
f.justifiedBalances = []uint64{100, 200, 200, 300}
head, err = f.Head(ctx)
require.Equal(t, uint64(200), f.store.nodeByRoot[[32]byte{'b'}].weight)
require.Equal(t, uint64(300), f.store.nodeByRoot[[32]byte{'c'}].weight)
require.NoError(t, err)
@@ -351,11 +355,12 @@ func indexToHash(i uint64) [32]byte {
func TestForkChoice_UpdateJustifiedAndFinalizedCheckpoints(t *testing.T) {
f := setup(1, 1)
ctx := context.Background()
jr := [32]byte{'j'}
fr := [32]byte{'f'}
jc := &forkchoicetypes.Checkpoint{Root: jr, Epoch: 3}
fc := &forkchoicetypes.Checkpoint{Root: fr, Epoch: 2}
require.NoError(t, f.UpdateJustifiedCheckpoint(jc))
require.NoError(t, f.UpdateJustifiedCheckpoint(ctx, jc))
require.NoError(t, f.UpdateFinalizedCheckpoint(fc))
require.Equal(t, f.store.justifiedCheckpoint.Epoch, jc.Epoch)
require.Equal(t, f.store.justifiedCheckpoint.Root, jc.Root)
@@ -573,7 +578,7 @@ func TestStore_CommonAncestor(t *testing.T) {
require.ErrorIs(t, err, forkchoice.ErrUnknownCommonAncestor)
}
func TestStore_InsertOptimisticChain(t *testing.T) {
func TestStore_InsertChain(t *testing.T) {
f := setup(1, 1)
blks := make([]*forkchoicetypes.BlockAndCheckpoints, 0)
blk := util.NewBeaconBlock()
@@ -606,10 +611,10 @@ func TestStore_InsertOptimisticChain(t *testing.T) {
for i := 0; i < len(blks); i++ {
args[i] = blks[10-i-1]
}
require.NoError(t, f.InsertOptimisticChain(context.Background(), args))
require.NoError(t, f.InsertChain(context.Background(), args))
f = setup(1, 1)
require.NoError(t, f.InsertOptimisticChain(context.Background(), args[2:]))
require.NoError(t, f.InsertChain(context.Background(), args[2:]))
}
func TestForkChoice_UpdateCheckpoints(t *testing.T) {
@@ -778,3 +783,16 @@ func TestWeight(t *testing.T) {
require.ErrorIs(t, err, ErrNilNode)
require.Equal(t, uint64(0), w)
}
func TestForkchoice_UpdateJustifiedBalances(t *testing.T) {
f := setup(0, 0)
balances := []uint64{10, 0, 0, 40, 50, 60, 0, 80, 90, 100}
f.balancesByRoot = func(context.Context, [32]byte) ([]uint64, error) {
return balances, nil
}
require.NoError(t, f.updateJustifiedBalances(context.Background(), [32]byte{}))
require.Equal(t, uint64(7), f.numActiveValidators)
require.Equal(t, uint64(430)/32, f.store.committeeWeight)
require.DeepEqual(t, balances, f.justifiedBalances)
}

View File

@@ -10,12 +10,12 @@ import (
)
func TestNoVote_CanFindHead(t *testing.T) {
balances := make([]uint64, 16)
f := setup(1, 1)
f.justifiedBalances = make([]uint64, 16)
ctx := context.Background()
// The head should always start at the finalized block.
r, err := f.Head(context.Background(), balances)
r, err := f.Head(context.Background())
require.NoError(t, err)
if r != params.BeaconConfig().ZeroHash {
t.Errorf("Incorrect head with genesis")
@@ -28,7 +28,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
state, blkRoot, err := prepareForkchoiceState(context.Background(), 0, indexToHash(2), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
r, err = f.Head(context.Background(), balances)
r, err = f.Head(context.Background())
require.NoError(t, err)
assert.Equal(t, indexToHash(2), r, "Incorrect head for with justified epoch at 1")
@@ -39,7 +39,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
r, err = f.Head(context.Background(), balances)
r, err = f.Head(context.Background())
require.NoError(t, err)
assert.Equal(t, indexToHash(2), r, "Incorrect head for with justified epoch at 1")
@@ -52,7 +52,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(3), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
r, err = f.Head(context.Background(), balances)
r, err = f.Head(context.Background())
require.NoError(t, err)
assert.Equal(t, indexToHash(2), r, "Incorrect head for with justified epoch at 1")
@@ -65,7 +65,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(4), indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
r, err = f.Head(context.Background(), balances)
r, err = f.Head(context.Background())
require.NoError(t, err)
assert.Equal(t, indexToHash(4), r, "Incorrect head for with justified epoch at 1")
@@ -80,7 +80,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(5), indexToHash(4), params.BeaconConfig().ZeroHash, 2, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
r, err = f.Head(context.Background(), balances)
r, err = f.Head(context.Background())
require.NoError(t, err)
assert.Equal(t, indexToHash(5), r, "Incorrect head for with justified epoch at 2")
@@ -97,7 +97,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(6), indexToHash(5), params.BeaconConfig().ZeroHash, 2, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
r, err = f.Head(context.Background(), balances)
r, err = f.Head(context.Background())
require.NoError(t, err)
assert.Equal(t, indexToHash(6), r, "Incorrect head for with justified epoch at 2")
}

View File

@@ -9,11 +9,19 @@ import (
"github.com/prysmaticlabs/prysm/v3/config/params"
"github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
v1 "github.com/prysmaticlabs/prysm/v3/proto/eth/v1"
"github.com/prysmaticlabs/prysm/v3/time/slots"
)
// orphanLateBlockFirstThreshold is the number of seconds after which we
// consider a block to be late, and thus a candidate to being reorged.
const orphanLateBlockFirstThreshold = 4
// processAttestationsThreshold is the number of seconds after which we
// process attestations for the current slot
const processAttestationsThreshold = 10
// applyWeightChanges recomputes the weight of the node passed as an argument and all of its descendants,
// using the current balance stored in each node. This function requires a lock
// in Store.nodesLock
// using the current balance stored in each node.
func (n *Node) applyWeightChanges(ctx context.Context) error {
// Recursively calling the children to sum their weights.
childrenWeight := uint64(0)
@@ -34,7 +42,7 @@ func (n *Node) applyWeightChanges(ctx context.Context) error {
}
// updateBestDescendant updates the best descendant of this node and its
// children. This function assumes the caller has a lock on Store.nodesLock
// children.
func (n *Node) updateBestDescendant(ctx context.Context, justifiedEpoch, finalizedEpoch, currentEpoch primitives.Epoch) error {
if ctx.Err() != nil {
return ctx.Err()
@@ -54,7 +62,7 @@ func (n *Node) updateBestDescendant(ctx context.Context, justifiedEpoch, finaliz
if err := child.updateBestDescendant(ctx, justifiedEpoch, finalizedEpoch, currentEpoch); err != nil {
return err
}
childLeadsToViableHead := child.leadsToViableHead(justifiedEpoch, finalizedEpoch, currentEpoch)
childLeadsToViableHead := child.leadsToViableHead(justifiedEpoch, currentEpoch)
if childLeadsToViableHead && !hasViableDescendant {
// The child leads to a viable head, but the current
// parent's best child doesn't.
@@ -89,25 +97,21 @@ func (n *Node) updateBestDescendant(ctx context.Context, justifiedEpoch, finaliz
// viableForHead returns true if the node is viable to head.
// Any node with different finalized or justified epoch than
// the ones in fork choice store should not be viable to head.
func (n *Node) viableForHead(justifiedEpoch, finalizedEpoch, currentEpoch primitives.Epoch) bool {
func (n *Node) viableForHead(justifiedEpoch, currentEpoch primitives.Epoch) bool {
justified := justifiedEpoch == n.justifiedEpoch || justifiedEpoch == 0
finalized := finalizedEpoch == n.finalizedEpoch || finalizedEpoch == 0
if features.Get().EnableDefensivePull && !justified && justifiedEpoch+1 == currentEpoch {
if n.unrealizedJustifiedEpoch+1 >= currentEpoch {
if n.unrealizedJustifiedEpoch+1 >= currentEpoch && n.justifiedEpoch+2 >= currentEpoch {
justified = true
}
if n.unrealizedFinalizedEpoch >= finalizedEpoch {
finalized = true
}
}
return justified && finalized
return justified
}
func (n *Node) leadsToViableHead(justifiedEpoch, finalizedEpoch, currentEpoch primitives.Epoch) bool {
func (n *Node) leadsToViableHead(justifiedEpoch, currentEpoch primitives.Epoch) bool {
if n.bestDescendant == nil {
return n.viableForHead(justifiedEpoch, finalizedEpoch, currentEpoch)
return n.viableForHead(justifiedEpoch, currentEpoch)
}
return n.bestDescendant.viableForHead(justifiedEpoch, finalizedEpoch, currentEpoch)
return n.bestDescendant.viableForHead(justifiedEpoch, currentEpoch)
}
// setNodeAndParentValidated sets the current node and all the ancestors as validated (i.e. non-optimistic).
@@ -127,6 +131,26 @@ func (n *Node) setNodeAndParentValidated(ctx context.Context) error {
return n.parent.setNodeAndParentValidated(ctx)
}
// arrivedEarly returns whether this node was inserted before the first
// threshold to orphan a block.
// Note that genesisTime has seconds granularity, therefore we use a strict
// inequality < here. For example a block that arrives 3.9999 seconds into the
// slot will have secs = 3 below.
func (n *Node) arrivedEarly(genesisTime uint64) (bool, error) {
secs, err := slots.SecondsSinceSlotStart(n.slot, genesisTime, n.timestamp)
return secs < orphanLateBlockFirstThreshold, err
}
// arrivedAfterOrphanCheck returns whether this block was inserted after the
// intermediate checkpoint to check for candidate of being orphaned.
// Note that genesisTime has seconds granularity, therefore we use an
// inequality >= here. For example a block that arrives 10.00001 seconds into the
// slot will have secs = 10 below.
func (n *Node) arrivedAfterOrphanCheck(genesisTime uint64) (bool, error) {
secs, err := slots.SecondsSinceSlotStart(n.slot, genesisTime, n.timestamp)
return secs >= processAttestationsThreshold, err
}
// nodeTreeDump appends to the given list all the nodes descending from this one
func (n *Node) nodeTreeDump(ctx context.Context, nodes []*v1.ForkChoiceNode) ([]*v1.ForkChoiceNode, error) {
if ctx.Err() != nil {
@@ -166,21 +190,3 @@ func (n *Node) nodeTreeDump(ctx context.Context, nodes []*v1.ForkChoiceNode) ([]
}
return nodes, nil
}
// VotedFraction returns the fraction of the committee that voted directly for
// this node.
func (f *ForkChoice) VotedFraction(root [32]byte) (uint64, error) {
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
// Avoid division by zero before a block is inserted.
if f.store.committeeBalance == 0 {
return 0, nil
}
node, ok := f.store.nodeByRoot[root]
if !ok || node == nil {
return 0, ErrNilNode
}
return node.balance * 100 / f.store.committeeBalance, nil
}

View File

@@ -27,8 +27,6 @@ func TestNode_ApplyWeightChanges_PositiveChange(t *testing.T) {
// The updated balances of each node is 100
s := f.store
s.nodesLock.Lock()
defer s.nodesLock.Unlock()
s.nodeByRoot[indexToHash(1)].balance = 100
s.nodeByRoot[indexToHash(2)].balance = 100
s.nodeByRoot[indexToHash(3)].balance = 100
@@ -55,8 +53,6 @@ func TestNode_ApplyWeightChanges_NegativeChange(t *testing.T) {
// The updated balances of each node is 100
s := f.store
s.nodesLock.Lock()
defer s.nodesLock.Unlock()
s.nodeByRoot[indexToHash(1)].weight = 400
s.nodeByRoot[indexToHash(2)].weight = 400
s.nodeByRoot[indexToHash(3)].weight = 400
@@ -144,18 +140,16 @@ func TestNode_ViableForHead(t *testing.T) {
tests := []struct {
n *Node
justifiedEpoch primitives.Epoch
finalizedEpoch primitives.Epoch
want bool
}{
{&Node{}, 0, 0, true},
{&Node{}, 1, 0, false},
{&Node{}, 0, 1, false},
{&Node{finalizedEpoch: 1, justifiedEpoch: 1}, 1, 1, true},
{&Node{finalizedEpoch: 1, justifiedEpoch: 1}, 2, 2, false},
{&Node{finalizedEpoch: 3, justifiedEpoch: 4}, 4, 3, true},
{&Node{}, 0, true},
{&Node{}, 1, false},
{&Node{finalizedEpoch: 1, justifiedEpoch: 1}, 1, true},
{&Node{finalizedEpoch: 1, justifiedEpoch: 1}, 2, false},
{&Node{finalizedEpoch: 3, justifiedEpoch: 4}, 4, true},
}
for _, tc := range tests {
got := tc.n.viableForHead(tc.justifiedEpoch, tc.finalizedEpoch, 5)
got := tc.n.viableForHead(tc.justifiedEpoch, 5)
assert.Equal(t, tc.want, got)
}
}
@@ -179,10 +173,10 @@ func TestNode_LeadsToViableHead(t *testing.T) {
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
require.Equal(t, true, f.store.treeRootNode.leadsToViableHead(4, 3, 5))
require.Equal(t, true, f.store.nodeByRoot[indexToHash(5)].leadsToViableHead(4, 3, 5))
require.Equal(t, false, f.store.nodeByRoot[indexToHash(2)].leadsToViableHead(4, 3, 5))
require.Equal(t, false, f.store.nodeByRoot[indexToHash(4)].leadsToViableHead(4, 3, 5))
require.Equal(t, true, f.store.treeRootNode.leadsToViableHead(4, 5))
require.Equal(t, true, f.store.nodeByRoot[indexToHash(5)].leadsToViableHead(4, 5))
require.Equal(t, false, f.store.nodeByRoot[indexToHash(2)].leadsToViableHead(4, 5))
require.Equal(t, false, f.store.nodeByRoot[indexToHash(4)].leadsToViableHead(4, 5))
}
func TestNode_SetFullyValidated(t *testing.T) {
@@ -262,44 +256,71 @@ func TestNode_SetFullyValidated(t *testing.T) {
}
}
func TestStore_VotedFraction(t *testing.T) {
f := setup(1, 1)
func TestNode_TimeStampsChecks(t *testing.T) {
f := setup(0, 0)
ctx := context.Background()
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
// early block
driftGenesisTime(f, 1, 1)
root := [32]byte{'a'}
f.justifiedBalances = []uint64{10}
state, blkRoot, err := prepareForkchoiceState(ctx, 1, root, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1)
headRoot, err := f.Head(ctx)
require.NoError(t, err)
require.Equal(t, root, headRoot)
early, err := f.store.headNode.arrivedEarly(f.store.genesisTime)
require.NoError(t, err)
require.Equal(t, true, early)
late, err := f.store.headNode.arrivedAfterOrphanCheck(f.store.genesisTime)
require.NoError(t, err)
require.Equal(t, false, late)
// late block
driftGenesisTime(f, 2, orphanLateBlockFirstThreshold+1)
root = [32]byte{'b'}
state, blkRoot, err = prepareForkchoiceState(ctx, 2, root, [32]byte{'a'}, [32]byte{'B'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
headRoot, err = f.Head(ctx)
require.NoError(t, err)
require.Equal(t, root, headRoot)
early, err = f.store.headNode.arrivedEarly(f.store.genesisTime)
require.NoError(t, err)
require.Equal(t, false, early)
late, err = f.store.headNode.arrivedAfterOrphanCheck(f.store.genesisTime)
require.NoError(t, err)
require.Equal(t, false, late)
// No division by zero error
vote, err := f.VotedFraction([32]byte{'b'})
// very late block
driftGenesisTime(f, 3, processAttestationsThreshold+1)
root = [32]byte{'c'}
state, blkRoot, err = prepareForkchoiceState(ctx, 3, root, [32]byte{'b'}, [32]byte{'C'}, 0, 0)
require.NoError(t, err)
require.Equal(t, uint64(0), vote)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
headRoot, err = f.Head(ctx)
require.NoError(t, err)
require.Equal(t, root, headRoot)
early, err = f.store.headNode.arrivedEarly(f.store.genesisTime)
require.NoError(t, err)
require.Equal(t, false, early)
late, err = f.store.headNode.arrivedAfterOrphanCheck(f.store.genesisTime)
require.NoError(t, err)
require.Equal(t, true, late)
// Zero balance in the node
f.store.committeeBalance = 100 * params.BeaconConfig().MaxEffectiveBalance
vote, err = f.VotedFraction([32]byte{'b'})
// block from the future
root = [32]byte{'d'}
state, blkRoot, err = prepareForkchoiceState(ctx, 5, root, [32]byte{'c'}, [32]byte{'D'}, 1, 1)
require.NoError(t, err)
require.Equal(t, uint64(0), vote)
// Attestations are not counted until we process Head
balances := []uint64{params.BeaconConfig().MaxEffectiveBalance, params.BeaconConfig().MaxEffectiveBalance}
_, err = f.Head(context.Background(), balances)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
headRoot, err = f.Head(ctx)
require.NoError(t, err)
f.ProcessAttestation(context.Background(), []uint64{0, 1}, [32]byte{'b'}, 2)
vote, err = f.VotedFraction([32]byte{'b'})
require.NoError(t, err)
require.Equal(t, uint64(0), vote)
// After we call head the voted fraction is obtained.
_, err = f.Head(context.Background(), balances)
require.NoError(t, err)
vote, err = f.VotedFraction([32]byte{'b'})
require.NoError(t, err)
require.Equal(t, uint64(2), vote)
// Check for non-existent root
_, err = f.VotedFraction([32]byte{'c'})
require.ErrorIs(t, err, ErrNilNode)
require.Equal(t, root, headRoot)
early, err = f.store.headNode.arrivedEarly(f.store.genesisTime)
require.ErrorContains(t, "invalid timestamp", err)
require.Equal(t, true, early)
late, err = f.store.headNode.arrivedAfterOrphanCheck(f.store.genesisTime)
require.ErrorContains(t, "invalid timestamp", err)
require.Equal(t, false, late)
}

View File

@@ -31,8 +31,10 @@ import (
// if ancestor_at_finalized_slot == store.finalized_checkpoint.root:
// store.justified_checkpoint = store.best_justified_checkpoint
func (f *ForkChoice) NewSlot(ctx context.Context, slot primitives.Slot) error {
f.Lock()
defer f.Unlock()
// Reset proposer boost root
if err := f.ResetBoostedProposerRoot(ctx); err != nil {
if err := f.resetBoostedProposerRoot(ctx); err != nil {
return errors.Wrap(err, "could not reset boosted proposer root in fork choice")
}
@@ -42,11 +44,9 @@ func (f *ForkChoice) NewSlot(ctx context.Context, slot primitives.Slot) error {
}
// Update store.justified_checkpoint if a better checkpoint on the store.finalized_checkpoint chain
f.store.checkpointsLock.RLock()
bjcp := f.store.bestJustifiedCheckpoint
jcp := f.store.justifiedCheckpoint
fcp := f.store.finalizedCheckpoint
f.store.checkpointsLock.RUnlock()
if bjcp.Epoch > jcp.Epoch {
finalizedSlot, err := slots.EpochStart(fcp.Epoch)
if err != nil {
@@ -62,14 +62,17 @@ func (f *ForkChoice) NewSlot(ctx context.Context, slot primitives.Slot) error {
return err
}
if r == fcp.Root {
f.store.checkpointsLock.Lock()
f.store.prevJustifiedCheckpoint = jcp
f.store.justifiedCheckpoint = bjcp
f.store.checkpointsLock.Unlock()
if err := f.updateJustifiedBalances(ctx, bjcp.Root); err != nil {
log.Error("could not update justified balances")
}
}
}
if !features.Get().DisablePullTips {
f.updateUnrealizedCheckpoints()
if err := f.updateUnrealizedCheckpoints(ctx); err != nil {
return errors.Wrap(err, "could not update unrealized checkpoints")
}
}
return f.store.prune(ctx)
}

View File

@@ -85,7 +85,7 @@ func TestStore_NewSlot(t *testing.T) {
require.NoError(t, f.InsertNode(ctx, state, blkRoot)) // bad
require.NoError(t, f.UpdateFinalizedCheckpoint(test.args.finalized))
require.NoError(t, f.UpdateJustifiedCheckpoint(test.args.justified))
require.NoError(t, f.UpdateJustifiedCheckpoint(ctx, test.args.justified))
f.store.bestJustifiedCheckpoint = test.args.bestJustified
require.NoError(t, f.NewSlot(ctx, test.args.slot))

View File

@@ -8,34 +8,28 @@ import (
)
func (s *Store) setOptimisticToInvalid(ctx context.Context, root, parentRoot, payloadHash [32]byte) ([][32]byte, error) {
s.nodesLock.RLock()
invalidRoots := make([][32]byte, 0)
node, ok := s.nodeByRoot[root]
if !ok {
node, ok = s.nodeByRoot[parentRoot]
if !ok || node == nil {
s.nodesLock.RUnlock()
return invalidRoots, errors.Wrap(ErrNilNode, "could not set node to invalid")
}
// return early if the parent is LVH
if node.payloadHash == payloadHash {
s.nodesLock.RUnlock()
return invalidRoots, nil
}
} else {
if node == nil {
s.nodesLock.RUnlock()
return invalidRoots, errors.Wrap(ErrNilNode, "could not set node to invalid")
}
if node.parent.root != parentRoot {
s.nodesLock.RUnlock()
return invalidRoots, errInvalidParentRoot
}
}
firstInvalid := node
for ; firstInvalid.parent != nil && firstInvalid.parent.payloadHash != payloadHash; firstInvalid = firstInvalid.parent {
if ctx.Err() != nil {
s.nodesLock.RUnlock()
return invalidRoots, ctx.Err()
}
}
@@ -44,20 +38,16 @@ func (s *Store) setOptimisticToInvalid(ctx context.Context, root, parentRoot, pa
if firstInvalid.parent == nil {
// return early if the invalid node was not imported
if node.root == parentRoot {
s.nodesLock.RUnlock()
return invalidRoots, nil
}
firstInvalid = node
}
s.nodesLock.RUnlock()
return s.removeNode(ctx, firstInvalid)
}
// removeNode removes the node with the given root and all of its children
// from the Fork Choice Store.
func (s *Store) removeNode(ctx context.Context, node *Node) ([][32]byte, error) {
s.nodesLock.Lock()
defer s.nodesLock.Unlock()
invalidRoots := make([][32]byte, 0)
if node == nil {
@@ -96,7 +86,6 @@ func (s *Store) removeNodeAndChildren(ctx context.Context, node *Node, invalidRo
}
}
invalidRoots = append(invalidRoots, node.root)
s.proposerBoostLock.Lock()
if node.root == s.proposerBoostRoot {
s.proposerBoostRoot = [32]byte{}
}
@@ -104,15 +93,7 @@ func (s *Store) removeNodeAndChildren(ctx context.Context, node *Node, invalidRo
s.previousProposerBoostRoot = params.BeaconConfig().ZeroHash
s.previousProposerBoostScore = 0
}
s.proposerBoostLock.Unlock()
delete(s.nodeByRoot, node.root)
delete(s.nodeByPayload, node.payloadHash)
return invalidRoots, nil
}
// AllTipsAreInvalid returns true if no forkchoice tip is viable for head
func (f *ForkChoice) AllTipsAreInvalid() bool {
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
return f.store.allTipsAreInvalid
}

View File

@@ -237,19 +237,15 @@ func TestSetOptimisticToInvalid_ProposerBoost(t *testing.T) {
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
f.store.proposerBoostLock.Lock()
f.store.proposerBoostRoot = [32]byte{'c'}
f.store.previousProposerBoostScore = 10
f.store.previousProposerBoostRoot = [32]byte{'b'}
f.store.proposerBoostLock.Unlock()
_, err = f.SetOptimisticToInvalid(ctx, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'A'})
require.NoError(t, err)
f.store.proposerBoostLock.RLock()
require.Equal(t, uint64(0), f.store.previousProposerBoostScore)
require.DeepEqual(t, [32]byte{}, f.store.proposerBoostRoot)
require.DeepEqual(t, params.BeaconConfig().ZeroHash, f.store.previousProposerBoostRoot)
f.store.proposerBoostLock.RUnlock()
}
// This is a regression test (10565)

View File

@@ -2,42 +2,47 @@ package doublylinkedtree
import (
"context"
"fmt"
"github.com/pkg/errors"
fieldparams "github.com/prysmaticlabs/prysm/v3/config/fieldparams"
"github.com/prysmaticlabs/prysm/v3/config/params"
)
// ResetBoostedProposerRoot sets the value of the proposer boosted root to zeros.
func (f *ForkChoice) ResetBoostedProposerRoot(_ context.Context) error {
f.store.proposerBoostLock.Lock()
// resetBoostedProposerRoot sets the value of the proposer boosted root to zeros.
func (f *ForkChoice) resetBoostedProposerRoot(_ context.Context) error {
f.store.proposerBoostRoot = [32]byte{}
f.store.proposerBoostLock.Unlock()
return nil
}
// Given a list of validator balances, we compute the proposer boost score
// that should be given to a proposer based on their committee weight, derived from
// the total active balances, the size of a committee, and a boost score constant.
// IMPORTANT: The caller MUST pass in a list of validator balances where balances > 0 refer to active
// validators while balances == 0 are for inactive validators.
func computeProposerBoostScore(validatorBalances []uint64) (score uint64, err error) {
totalActiveBalance := uint64(0)
numActive := uint64(0)
for _, balance := range validatorBalances {
// We only consider balances > 0. The input slice should be constructed
// as balance > 0 for all active validators and 0 for inactive ones.
if balance == 0 {
continue
// applyProposerBoostScore applies the current proposer boost scores to the
// relevant nodes.
func (f *ForkChoice) applyProposerBoostScore() error {
s := f.store
proposerScore := uint64(0)
if s.previousProposerBoostRoot != params.BeaconConfig().ZeroHash {
previousNode, ok := s.nodeByRoot[s.previousProposerBoostRoot]
if !ok || previousNode == nil {
log.WithError(errInvalidProposerBoostRoot).Errorf(fmt.Sprintf("invalid prev root %#x", s.previousProposerBoostRoot))
} else {
previousNode.balance -= s.previousProposerBoostScore
}
totalActiveBalance += balance
numActive += 1
}
if numActive == 0 {
// Should never happen.
err = errors.New("no active validators")
return
if s.proposerBoostRoot != params.BeaconConfig().ZeroHash {
currentNode, ok := s.nodeByRoot[s.proposerBoostRoot]
if !ok || currentNode == nil {
log.WithError(errInvalidProposerBoostRoot).Errorf(fmt.Sprintf("invalid current root %#x", s.proposerBoostRoot))
} else {
proposerScore = (s.committeeWeight * params.BeaconConfig().ProposerScoreBoost) / 100
currentNode.balance += proposerScore
}
}
committeeWeight := totalActiveBalance / uint64(params.BeaconConfig().SlotsPerEpoch)
score = (committeeWeight * params.BeaconConfig().ProposerScoreBoost) / 100
return
s.previousProposerBoostRoot = s.proposerBoostRoot
s.previousProposerBoostScore = proposerScore
return nil
}
// ProposerBoost of fork choice store.
func (s *Store) proposerBoost() [fieldparams.RootLength]byte {
return s.proposerBoostRoot
}

Some files were not shown because too many files have changed in this diff Show More