Compare commits

..

62 Commits

Author SHA1 Message Date
terence tsao
fd37e6fe13 Clean up and refactor RPC validations 2024-11-27 16:23:33 -08:00
kasey
1707cf3ec7 http response handling improvements (#14673)
Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2024-11-27 22:13:45 +00:00
wangjingcun
bdbb850250 chore: fix 404 status URL (#14675)
Signed-off-by: wangjingcun <wangjingcun@aliyun.com>
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2024-11-27 15:54:00 +00:00
Dhruv Bodani
b28b1ed6ce Add error count prom metric (#14670)
* add error count prom metric

* address review comments

* add comment for response writer

* update changelog
2024-11-27 11:56:07 +00:00
Sammy Rosso
74bb0821a8 Use slot to determine fork version (#14653)
* Use slot to determine version

* gaz

* solve cyclic dependency

* Radek' review

* unit test

* gaz

* use require instead of assert

* fix test

* fix test

* fix TestGetAggregateAttestation

* fix ListAttestations test

* James' review

* Radek' review

* add extra checks to GetAttesterSlashingsV2

* fix matchingAtts

* improve tests + fix

* fix

* stop appending all non electra atts

* more tests

* changelog

* revert TestProduceSyncCommitteeContribution changes

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
Co-authored-by: rkapka <radoslaw.kapka@gmail.com>
2024-11-26 22:52:58 +00:00
terence
8025a483e2 Remove kzg proof check for blob reconstructor (#14671) 2024-11-26 19:36:42 +00:00
Manu NALEPA
0475631543 Improve connection/disconnection logging. (#14665)
* Improve disconnection logs.

* Update beacon-chain/p2p/handshake.go

Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com>

* Address Sammy's comment.

* Update beacon-chain/p2p/handshake.go

Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com>

* Fix Sammy's comment.

* Fix Sammy's comment.

* `MockPeerManager`: Stop mixing value and pointer receivers (deepsource).

* Remove unused parameters (deepsource)

* Fix receiver names (deepsource)

* Change not after into before (deepsource)

* Update beacon-chain/p2p/handshake.go

Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com>

* Update beacon-chain/p2p/peers/status.go

Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com>

---------

Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com>
2024-11-26 17:53:27 +00:00
Potuz
f27092fa91 Check if validator exists when applying pending deposit (#14666)
* Check if validator exists when applying pending deposit

* Add test TestProcessPendingDepositsMultiplesSameDeposits

* keep a map of added pubkeys

---------

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2024-11-25 20:31:02 +00:00
Radosław Kapka
67cef41cbf Better attestation packing for Electra (#14534)
* Better attestation packing for Electra

* changelog <3

* bzl

* sort before constructing on-chain aggregates

* move ctx to top

* extract Electra logic and add comments

* benchmark
2024-11-25 18:41:51 +00:00
Manu NALEPA
258908d50e Diverse log improvements, comment additions and small refactors. (#14658)
* `logProposedBlock`: Fix log.

Before, the value of the pointer to the function were printed for `blockNumber`
instead of the block number itself.

* Add blob prefix before sidecars.

In order to prepare for data columns sidecars.

* Verification: Add log prefix.

* `validate_aggregate_proof.go`: Add comments.

* `blobSubscriber`: Fix error message.

* `registerHandlers`: Rename, add comments and little refactor.

* Remove duplicate `pb` vs. `ethpb` import.

* `rpc_ping.go`: Factorize / Add comments.

* `blobSidecarsByRangeRPCHandler`: Do not write error response if rate limited.

* `sendRecentBeaconBlocksRequest` ==> `sendBeaconBlocksRequest`.

The function itself does not know anything about the age of the beacon block.

* `beaconBlocksByRangeRPCHandler`: Refactor and add logs.

* `retentionSeconds` ==> `retentionDuration`.

* `oneEpoch`: Add documentation.

* `TestProposer_ProposeBlock_OK`: Improve error message.

* `getLocalPayloadFromEngine`: Tiny refactor.

* `eth1DataMajorityVote`: Improve log message.

* Implement `ConvertPeerIDToNodeID`and do note generate random private key if peerDAS is enabled.

* Remove useless `_`.

* `parsePeersEnr`: Fix error mesages.

* `ShouldOverrideFCU`: Fix error message.

* `blocks.go`: Minor comments improvements.

* CI: Upgrade golanci and enable spancheck.

* `ConvertPeerIDToNodeID`: Add godoc comment.

* Update CHANGELOG.md

Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com>

* Update beacon-chain/sync/initial-sync/service_test.go

Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com>

* Update beacon-chain/sync/rpc_beacon_blocks_by_range.go

Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com>

* Update beacon-chain/sync/rpc_blob_sidecars_by_range.go

Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com>

* Update beacon-chain/sync/rpc_ping.go

Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com>

* Remove trailing whitespace in godoc.

---------

Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com>
2024-11-25 09:22:33 +00:00
Manu NALEPA
415a42a4aa Add proto for DataColumnIdentifier, DataColumnSidecar, DataColumnSidecarsByRangeRequest and MetadataV2. (#14649)
* Add data column sidecars proto.

* Fix Terence's comment.

* Re-add everything.
2024-11-22 09:50:06 +00:00
kasey
25eae3acda Fix eventstream electra atts (#14655)
* fix handler for electra atts

* same fix for attestation_slashing

* changelog

---------

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2024-11-22 03:04:00 +00:00
Rupam Dey
956d9d108c Update light-client consensus types (#14652)
* update diff

* deps

* changelog

* remove `SetNextSyncCommitteeBranchElectra`
2024-11-21 12:28:44 +00:00
Sammy Rosso
c285715f9f Add missing Eth-Consensus-Version headers (#14647)
* add missing Eth-Consensus-Version headers

* changelog

* fix header return value
2024-11-20 22:16:33 +00:00
james-prysm
9382ae736d validator REST: attestation v2 (#14633)
* wip

* fixing tests

* adding unit tests

* fixing tests

* adding back v1 usage

* changelog

* rolling back test and adding placeholder

* adding electra tests

* adding attestation nil check based on review

* reduce code duplication

* linting

* fixing tests

* based on sammy review

* radek feedback

* adding fall back for pre electra and updated tests

* fixing api calls and associated tests

* gaz

* Update validator/client/beacon-api/propose_attestation.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* review feedback

* add missing fallback

* fixing tests

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2024-11-20 17:13:57 +00:00
Radosław Kapka
f16ff45a6b Update light client protobufs (#14650)
* Update light client protobufs

* changelog <3
2024-11-20 14:47:54 +00:00
kasey
8d6577be84 defer payload attribute computation (#14644)
* defer payload attribute computation

* fire payload event on skipped slots

* changelog

* fix test and missing version attr

* fix lint

* deepsource

* mv head block lookup for missed slots to streamer

---------

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2024-11-19 16:49:52 +00:00
james-prysm
9de75b5376 reorganizing p2p and backfill service registration for consistency (#14640)
* reorganizing for consistency

* Update beacon-chain/node/node.go

Co-authored-by: kasey <489222+kasey@users.noreply.github.com>

* kasey's feedback

---------

Co-authored-by: kasey <489222+kasey@users.noreply.github.com>
2024-11-19 16:29:59 +00:00
james-prysm
a7ba11df37 adding nil checks on attestation interface (#14638)
* adding nil checks on interface

* changelog

* add linting

* adding missed checks

* review feedback

* attestation bits should not be in nil check

* fixing nil checks

* simplifying function

* fixing some missed items

* more missed items

* fixing more tests

* reverting some changes and fixing more tests

* adding in source check back in

* missed test

* sammy's review

* radek feedback
2024-11-18 17:51:17 +00:00
Stefano
00aeea3656 feat(issue-12348): add validator index label to validator_statuses me… (#14473)
* feat(issue-12348): add validator index label to validator_statuses metric

* fix: epochDuties added label on emission of metric

---------

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2024-11-18 16:35:05 +00:00
james-prysm
9dbf979e77 move get data after nil check for attestations (#14642)
* move getData to after validations

* changelog
2024-11-15 18:28:35 +00:00
james-prysm
be60504512 Validator REST api: adding in check for empty keys changed (#14637)
* adding in check for empty keys changed

* changelog

* kasey feedback

* fixing unit tests

* Update CHANGELOG.md

Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com>

---------

Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com>
2024-11-13 16:09:11 +00:00
james-prysm
1857496159 Electra: unskipping merkle spec tests: (#14635)
* unskipping spec tests

* changelog
2024-11-12 15:41:44 +00:00
Justin Traglia
ccf61e1700 Rename remaining "deposit receipt" to "deposit request" (#14629)
* Rename remaining "deposit receipt" to "deposit request"

* Add changelog entry

---------

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2024-11-08 21:15:43 +00:00
Justin Traglia
4edbd2f9ef Remove outdated spectest exclusions for EIP-6110 (#14630)
Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2024-11-08 20:41:02 +00:00
james-prysm
5179af1438 validator REST API: block v2 and Electra support (#14623)
* adding electra to validator client rest for get and post, also migrates to use the v2 endpoints

* changelog

* fixing test

* fixing linting
2024-11-08 18:24:51 +00:00
Sammy Rosso
c0f9689e30 Add POST /eth/v2/beacon/pool/attestations endpoint (#14621)
* modify v1 and add v2

* test

* changelog

* small fixes

* fix tests

* simplify functions + remove duplication

* Radek' review + group V2 tests

* better errors

* fix tests
2024-11-08 11:33:27 +00:00
Sammy Rosso
ff8240a04f Add /eth/v2/validator/aggregate_attestation (#14481)
* add endpoint

* changelog

* fix tests

* fix endpoint

* remove useless broken code

* review + fix endpoint

* gaz

* fix aggregate selection proof test

* fixes

* new way of aggregating

* nit

* fix part of the tests

* fix tests

* cleanup

* fix AggSelectionProof test

* tests

* v1 tests

* v2 tests

* commiittee bits

---------

Co-authored-by: rkapka <radoslaw.kapka@gmail.com>
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2024-11-07 13:34:18 +00:00
Nishant Das
847498c648 Optimize Message ID Computation (#14591)
* Cast to String Without Allocating

* Make it its own method

* Changelog

* Gosec

* Add benchmark, fuzz test, and @kasey's implementation.

* Gosec

* Fix benchmark test names

* Kasey's Suggestion

* Radek's Suggestion

---------

Co-authored-by: Preston Van Loon <preston@pvl.dev>
2024-11-07 12:54:58 +00:00
Jun Song
2633684339 Use GetBlockAttestationV2 at handler (#14624) 2024-11-07 05:52:53 +00:00
terence
ab3f1963e2 Return early blob constructor if not deneb (#14605)
* Return early blob constructor if not deneb

* Update CHANGELOG.md

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Remove test

* Remove space

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2024-11-06 15:09:20 +00:00
Justin Traglia
b87d02eeb3 Fix various small things in state-native code (#14604)
* Add nil checks in AppendPending*() functions

* Import errors

* Run goimports

* Move PendingDeposit.Amount to right spot

* Rename DequeuePartialWithdrawals to DequeuePendingPartialWithdrawals

* Remove parans from errNotSupported arg

* In electraField, move LatestExecutionPayloadHeader

* Add changelog entry

---------

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2024-11-05 16:07:40 +00:00
Cam Sweeney
bcb4155523 prevent panic by returning on connection error (#14602)
* prevent panic by returning on connection error

* add test

* don't close eventschannel on error

---------

Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com>
Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2024-11-04 21:10:58 +00:00
Preston Van Loon
77f10b9e0e Benchmark process slots (#14616)
* Benchmark process slots

* Update changelog
2024-11-04 16:27:07 +00:00
Nishant Das
928b707ef1 Rollback Block With Context Deadline (#14608)
* Rollback deadline

* Fix it

* Preston's Suggestion
2024-11-04 15:47:30 +00:00
Nishant Das
91c15247e5 Allow Protobuf State To Be Created Without Copying (#14613)
* Read raw validator registry

* Changelog

* Radek's Suggestion

* Add Nil Check
2024-11-04 15:09:15 +00:00
Potuz
5ef5b65ffe Blocks after capella are execution (#14614)
* Blocks after capella are execution

* fix test
2024-11-04 13:59:41 +00:00
Nishant Das
9ae97786c5 Build Proto State Object Once (#14612)
* Call it Once

* Changelog
2024-11-04 09:21:34 +00:00
Potuz
66d1bb54f6 Change the signature of ProcessPayload (#14610) 2024-11-03 14:37:52 +00:00
Potuz
4d98049054 Use ROBlock earlier in the pipeline (#14609) 2024-11-03 11:04:12 +00:00
Justin Traglia
d5ff25b59d Fix order & add slashed=false to new validator instances (#14595)
* Fix order & add slashed=false to new validator instances

* Add a line to the changelog

* Update godocs
2024-11-02 15:35:40 +00:00
james-prysm
a265cf08fa adding in a check to make sure duplicates are now allowed (#14601) 2024-11-01 19:11:12 +00:00
james-prysm
f2ade3caff Remove validator count log (#14600)
* remove validator count API call and update logs

* fixing test

* changelog

* removing unused function

* gaz

* casing

* fixing more tests
2024-11-01 16:58:29 +00:00
james-prysm
e6ffc0701e Electra: exclude empty requests in requests list (#14580)
* implementing new decisions around exectuion requests

* fixing test fixture

* adding in more edge case checks and tests

* changelog

* fixing unsafe type cast

* Update beacon-chain/execution/engine_client_test.go

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>

* Update proto/engine/v1/electra.go

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>

* Update proto/engine/v1/electra.go

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>

* Update proto/engine/v1/electra.go

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>

* Update proto/engine/v1/electra.go

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>

* Update proto/engine/v1/electra_test.go

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>

* Update proto/engine/v1/electra_test.go

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>

* updating based on preston's feedback and adding more tests for edge cases

* adding more edgecases, and unit tests

* fixing tests

* missed some export changes

* adding more tests

* Update proto/engine/v1/electra.go

Co-authored-by: Potuz <potuz@prysmaticlabs.com>

* reducing complexity of function based on feedback

---------

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
Co-authored-by: Potuz <potuz@prysmaticlabs.com>
2024-10-31 19:12:23 +00:00
james-prysm
61c296e075 eip7251: Bugfix and more withdrawal tests (#14578)
* addressing bug with withdrawals for devnet 5

* changelog

* fixing if statement check

* adding test

* terence's review comments

* attempting to fix weird comment formatting

* moving back more comments

* Update CHANGELOG.md

Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com>

---------

Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com>
2024-10-31 17:02:22 +00:00
Sammy Rosso
f264680739 Simplify ExitedValidatorIndices (#14587)
* fix

* add to comment

* modify test

* remove unused parameter

* changelog

* exclude ejected from exited

* fix linter

---------

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2024-10-31 14:49:25 +00:00
Sammy Rosso
8fe024f6a1 Simplify EjectedValidatorIndices (#14588)
* fix

* fix test

* add to comment

* changelog
2024-10-31 11:22:40 +00:00
Nishant Das
6b7dd833a3 Use Read Only Head State When Computing Active Indices (#14592)
* Use Read Only Head State

* Use Read Only Head State
2024-10-31 06:48:56 +00:00
james-prysm
060527032b small improvements to logs (#14405)
* removing redundant log, and poorly worded log

* move log printing message

* changelog

* reverting a log change
2024-10-29 20:42:24 +00:00
Preston Van Loon
a29ecb6bbe Update beacon-chain pgo profile (#14589)
* Update pprof image with a recent one

* Update CHANGELOG.md
2024-10-29 15:18:56 +00:00
james-prysm
54656e172f keymanager API: bug fixes and inconsistencies (#14586)
* fixing error handling and returning empty requests with the wrong wallet type

* changelog

* adding some unit tests

* Update validator/rpc/handlers_keymanager.go

Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com>

* Update CHANGELOG.md

Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com>

* Update validator/rpc/handlers_keymanager.go

Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com>

* Update validator/rpc/intercepter.go

Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com>

* Update validator/rpc/intercepter_test.go

Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com>

* Update validator/rpc/intercepter_test.go

Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com>

* lint

---------

Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com>
2024-10-29 14:37:14 +00:00
kasey
97c8adb003 fix --backfill-oldest-slot flag handling (#14584)
* fix --backfill-oldest-slot flag handling

* changelog

---------

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2024-10-28 15:58:30 +00:00
Nishant Das
2fe8614115 Rollback Block During Processing (#14554)
* Change it to rollback

* Spacing

* Reverse order of rollback

* Add Rollback Test

* Potuz's Test Suggestion

* Potuz's Suggestion
2024-10-28 14:06:20 +00:00
Sammy Rosso
09accc7132 Add GET /eth/v2/beacon/pool/attestations endpoint (#14560)
* add ListAttestationsV2 endpoint

* fix endpoint

* changelog

* add endpoint to tests

* add trailing comma

* add version header + lint fix

* all reviews

* modify v1 and comments

* fix linter

* Radek' review
2024-10-28 10:59:27 +00:00
terence
53f1f11c6d Fix length check between kzg commitments and exist (#14581) 2024-10-25 17:59:57 +00:00
Sammy Rosso
48fe9d9c4d Add missing version headers (#14566)
* add version headers

* use headstate and add unit test

* changelog
2024-10-25 16:01:55 +00:00
Preston Van Loon
4386c244e1 Docker fix: Update bazel-lib to latest version (#14579)
* Update bazel-lib

* Update rules_oci

* Compress pkg_tar

* Add a note about 401 download for distroless/cc-debian11

* Update CHANGELOG.md

* Re-upload distroless/cc-debian11 to prysmaticlabs/distroless/cc-debian11 such that the image is public.

Ran the following command:
gcrane cp 'gcr.io/distroless/cc-debian11@sha256:b82f113425c5b5c714151aaacd8039bc141821cdcd3c65202d42bdf9c43ae60b' gcr.io/prysmaticlabs/distroless/cc-debian11:latest

* Back out "Update rules_oci"

This backs out commit 64200fb25e.
2024-10-25 02:30:21 +00:00
terence
7ac522d8ff Use engine api get-blobs for block subscriber (#14513)
* Use engine api get-blobs for block subscriber

Debug

changelog

add proto marshal and unmarshal

Kasey's feedback

* Feedback

* Preston's feedback

* Exist argument should not be hardcoded with kzg count
2024-10-24 21:30:14 +00:00
kasey
52cf3a155d Safe StreamEvents write loop (#14557)
* new type for tests where errors are only logged

* StreamHandler waits for write loop exit

* add test case for writer timeout

* add changelog

* add missing file

* logging fix

* fix logging test to allow info logs

* naming/comments; make response controller private

* simplify cancel defers

* fix typo in test file name

---------

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2024-10-24 19:16:17 +00:00
Potuz
83ed320826 Use ROBlock in block processing pipeline (#14571)
* Use ROBlock in block processing pipeline

* Pass blockchain tests

* passing forkchoice tests

* pass rpc test

* change changelog
2024-10-24 13:27:15 +00:00
Preston Van Loon
616cdc1e8b Update CHANGELOG.md to reflect v5.1.2 hotfix release (#14547) 2024-10-22 15:28:38 +00:00
Radosław Kapka
361712e886 Update the monitor package to Electra (#14562)
* Update the monitor package to Electra

* changelog <3
2024-10-22 03:09:18 +00:00
306 changed files with 11809 additions and 3389 deletions

View File

@@ -54,7 +54,7 @@ jobs:
- name: Golangci-lint
uses: golangci/golangci-lint-action@v5
with:
version: v1.55.2
version: v1.56.1
args: --config=.golangci.yml --out-${NO_FUTURE}format colored-line-number
build:

View File

@@ -73,6 +73,7 @@ linters:
- promlinter
- protogetter
- revive
- spancheck
- staticcheck
- stylecheck
- tagalign

View File

@@ -4,17 +4,32 @@ All notable changes to this project will be documented in this file.
The format is based on Keep a Changelog, and this project adheres to Semantic Versioning.
## [Unreleased](https://github.com/prysmaticlabs/prysm/compare/v5.1.1...HEAD)
## [Unreleased](https://github.com/prysmaticlabs/prysm/compare/v5.1.2...HEAD)
### Added
- Electra EIP6110: Queue deposit [pr](https://github.com/prysmaticlabs/prysm/pull/14430)
- Electra EIP6110: Queue deposit [pr](https://github.com/prysmaticlabs/prysm/pull/14430).
- Add Bellatrix tests for light client functions.
- Add Discovery Rebooter Feature.
- Added GetBlockAttestationsV2 endpoint.
- Light client support: Consensus types for Electra
- Light client support: Consensus types for Electra.
- Added SubmitPoolAttesterSlashingV2 endpoint.
- Added SubmitAggregateAndProofsRequestV2 endpoint.
- Updated the `beacon-chain/monitor` package to Electra. [PR](https://github.com/prysmaticlabs/prysm/pull/14562)
- Added ListAttestationsV2 endpoint.
- Add ability to rollback node's internal state during processing.
- Change how unsafe protobuf state is created to prevent unnecessary copies.
- Added benchmarks for process slots for Capella, Deneb, Electra.
- Add helper to cast bytes to string without allocating memory.
- Added GetAggregatedAttestationV2 endpoint.
- Added SubmitAttestationsV2 endpoint.
- Validator REST mode Electra block support.
- Added validator index label to `validator_statuses` metric.
- Added Validator REST mode use of Attestation V2 endpoints and Electra attestations.
- PeerDAS: Added proto for `DataColumnIdentifier`, `DataColumnSidecar`, `DataColumnSidecarsByRangeRequest` and `MetadataV2`.
- Better attestation packing for Electra. [PR](https://github.com/prysmaticlabs/prysm/pull/14534)
- P2P: Add logs when a peer is (dis)connected. Add the reason of the disconnection when we initiate it.
- Added a Prometheus error counter metric for HTTP requests to track beacon node requests.
### Changed
@@ -27,7 +42,31 @@ The format is based on Keep a Changelog, and this project adheres to Semantic Ve
- Return false from HasBlock if the block is being synced.
- Cleanup forkchoice on failed insertions.
- Use read only validator for core processing to avoid unnecessary copying.
- Fee Recipient log also prints total tracked validators cache size.
- Use ROBlock across block processing pipeline.
- Added missing Eth-Consensus-Version headers to GetBlockAttestationsV2 and GetAttesterSlashingsV2 endpoints.
- When instantiating new validators, explicit set `Slashed` to false and move `EffectiveBalance` to match struct definition.
- Updated pgo profile for beacon chain with holesky data. This improves the profile guided
optimizations in the go compiler.
- Use read only state when computing the active validator list.
- Simplified `ExitedValidatorIndices`.
- Simplified `EjectedValidatorIndices`.
- `engine_newPayloadV4`,`engine_getPayloadV4` are changes due to new execution request serialization decisions, [PR](https://github.com/prysmaticlabs/prysm/pull/14580)
- Fixed various small things in state-native code.
- Use ROBlock earlier in block syncing pipeline.
- Changed the signature of `ProcessPayload`.
- Only Build the Protobuf state once during serialization.
- Capella blocks are execution.
- Fixed panic when http request to subscribe to event stream fails.
- Return early for blob reconstructor during capella fork.
- Updated block endpoint from V1 to V2.
- Rename instances of "deposit receipts" to "deposit requests".
- Non-blocking payload attribute event handling in beacon api [pr](https://github.com/prysmaticlabs/prysm/pull/14644).
- Updated light client protobufs. [PR](https://github.com/prysmaticlabs/prysm/pull/14650)
- Added `Eth-Consensus-Version` header to `ListAttestationsV2` and `GetAggregateAttestationV2` endpoints.
- Updated light client consensus types. [PR](https://github.com/prysmaticlabs/prysm/pull/14652)
- Fixed pending deposits processing on Electra.
- Modified `ListAttestationsV2`, `GetAttesterSlashingsV2` and `GetAggregateAttestationV2` endpoints to use slot to determine fork version.
- Improvements to HTTP response handling. [pr](https://github.com/prysmaticlabs/prysm/pull/14673)
### Deprecated
@@ -36,15 +75,50 @@ The format is based on Keep a Changelog, and this project adheres to Semantic Ve
### Removed
- Removed finalized validator index cache, no longer needed.
- Removed validator queue position log on key reload and wait for activation.
- Removed outdated spectest exclusions for EIP-6110.
- Removed kzg proof check from blob reconstructor.
### Fixed
- Fixed mesh size by appending `gParams.Dhi = gossipSubDhi`
- Fix skipping partial withdrawals count.
- recover from panics when writing the event stream [pr](https://github.com/prysmaticlabs/prysm/pull/14545)
- wait for the async StreamEvent writer to exit before leaving the http handler, avoiding race condition panics [pr](https://github.com/prysmaticlabs/prysm/pull/14557)
- Certain deb files were returning a 404 which made building new docker images without an existing
cache impossible. This has been fixed with updates to rules_oci and bazel-lib.
- Fixed an issue where the length check between block body KZG commitments and the existing cache from the database was incompatible.
- Fix `--backfill-oldest-slot` handling - this flag was totally broken, the code would always backfill to the default slot [pr](https://github.com/prysmaticlabs/prysm/pull/14584)
- Fix keymanager API should return corrected error format for malformed tokens
- Fix keymanager API so that get keys returns an empty response instead of a 500 error when using an unsupported keystore.
- Small log imporvement, removing some redundant or duplicate logs
- EIP7521 - Fixes withdrawal bug by accounting for pending partial withdrawals and deducting already withdrawn amounts from the sweep balance. [PR](https://github.com/prysmaticlabs/prysm/pull/14578)
- unskip electra merkle spec test
- Fix panic in validator REST mode when checking status after removing all keys
- Fix panic on attestation interface since we call data before validation
- corrects nil check on some interface attestation types
- temporary solution to handling electra attesation and attester_slashing events. [pr](14655)
- Diverse log improvements and comment additions.
### Security
## [v5.1.2](https://github.com/prysmaticlabs/prysm/compare/v5.1.1...v5.1.2) - 2024-10-16
This is a hotfix release with one change.
Prysm v5.1.1 contains an updated implementation of the beacon api streaming events endpoint. This
new implementation contains a bug that can cause a panic in certain conditions. The issue is
difficult to reproduce reliably and we are still trying to determine the root cause, but in the
meantime we are issuing a patch that recovers from the panic to prevent the node from crashing.
This only impacts the v5.1.1 release beacon api event stream endpoints. This endpoint is used by the
prysm REST mode validator (a feature which requires the validator to be configured to use the beacon
api intead of prysm's stock grpc endpoints) or accessory software that connects to the events api,
like https://github.com/ethpandaops/ethereum-metrics-exporter
### Fixed
- Recover from panics when writing the event stream [#14545](https://github.com/prysmaticlabs/prysm/pull/14545)
## [v5.1.1](https://github.com/prysmaticlabs/prysm/compare/v5.1.0...v5.1.1) - 2024-10-15
@@ -78,6 +152,7 @@ Updating to this release is recommended at your convenience.
- fastssz version bump (better error messages).
- SSE implementation that sheds stuck clients. [pr](https://github.com/prysmaticlabs/prysm/pull/14413)
- Added GetPoolAttesterSlashingsV2 endpoint.
- Use engine API get-blobs for block subscriber to reduce block import latency and potentially reduce bandwidth.
### Changed
@@ -137,6 +212,7 @@ Updating to this release is recommended at your convenience.
- Light client support: fix light client attested header execution fields' wrong version bug.
- Testing: added custom matcher for better push settings testing.
- Registered `GetDepositSnapshot` Beacon API endpoint.
- Fix rolling back of a block due to a context deadline.
### Security

View File

@@ -101,9 +101,9 @@ http_archive(
http_archive(
name = "aspect_bazel_lib",
sha256 = "f5ea76682b209cc0bd90d0f5a3b26d2f7a6a2885f0c5f615e72913f4805dbb0d",
strip_prefix = "bazel-lib-2.5.0",
url = "https://github.com/aspect-build/bazel-lib/releases/download/v2.5.0/bazel-lib-v2.5.0.tar.gz",
sha256 = "a272d79bb0ac6b6965aa199b1f84333413452e87f043b53eca7f347a23a478e8",
strip_prefix = "bazel-lib-2.9.3",
url = "https://github.com/bazel-contrib/bazel-lib/releases/download/v2.9.3/bazel-lib-v2.9.3.tar.gz",
)
load("@aspect_bazel_lib//lib:repositories.bzl", "aspect_bazel_lib_dependencies", "aspect_bazel_lib_register_toolchains")
@@ -165,7 +165,7 @@ load("@rules_oci//oci:pull.bzl", "oci_pull")
oci_pull(
name = "linux_debian11_multiarch_base", # Debian bullseye
digest = "sha256:b82f113425c5b5c714151aaacd8039bc141821cdcd3c65202d42bdf9c43ae60b", # 2023-12-12
image = "gcr.io/distroless/cc-debian11",
image = "gcr.io/prysmaticlabs/distroless/cc-debian11",
platforms = [
"linux/amd64",
"linux/arm64/v8",

View File

@@ -12,6 +12,7 @@ go_library(
visibility = ["//visibility:public"],
deps = [
"//api:go_default_library",
"//api/client:go_default_library",
"//api/server/structs:go_default_library",
"//config/fieldparams:go_default_library",
"//consensus-types:go_default_library",

View File

@@ -14,6 +14,7 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/api"
"github.com/prysmaticlabs/prysm/v5/api/client"
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
@@ -176,7 +177,7 @@ func (c *Client) do(ctx context.Context, method string, path string, body io.Rea
err = non200Err(r)
return
}
res, err = io.ReadAll(r.Body)
res, err = io.ReadAll(io.LimitReader(r.Body, client.MaxBodySize))
if err != nil {
err = errors.Wrap(err, "error reading http response body from builder server")
return
@@ -358,7 +359,7 @@ func (c *Client) Status(ctx context.Context) error {
}
func non200Err(response *http.Response) error {
bodyBytes, err := io.ReadAll(response.Body)
bodyBytes, err := io.ReadAll(io.LimitReader(response.Body, client.MaxErrBodySize))
var errMessage ErrorMessage
var body string
if err != nil {

View File

@@ -10,11 +10,17 @@ import (
"github.com/pkg/errors"
)
const (
MaxBodySize int64 = 1 << 23 // 8MB default, WithMaxBodySize can override
MaxErrBodySize int64 = 1 << 17 // 128KB
)
// Client is a wrapper object around the HTTP client.
type Client struct {
hc *http.Client
baseURL *url.URL
token string
hc *http.Client
baseURL *url.URL
token string
maxBodySize int64
}
// NewClient constructs a new client with the provided options (ex WithTimeout).
@@ -26,8 +32,9 @@ func NewClient(host string, opts ...ClientOpt) (*Client, error) {
return nil, err
}
c := &Client{
hc: &http.Client{},
baseURL: u,
hc: &http.Client{},
baseURL: u,
maxBodySize: MaxBodySize,
}
for _, o := range opts {
o(c)
@@ -72,7 +79,7 @@ func (c *Client) NodeURL() string {
// Get is a generic, opinionated GET function to reduce boilerplate amongst the getters in this package.
func (c *Client) Get(ctx context.Context, path string, opts ...ReqOption) ([]byte, error) {
u := c.baseURL.ResolveReference(&url.URL{Path: path})
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil)
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), http.NoBody)
if err != nil {
return nil, err
}
@@ -89,7 +96,7 @@ func (c *Client) Get(ctx context.Context, path string, opts ...ReqOption) ([]byt
if r.StatusCode != http.StatusOK {
return nil, Non200Err(r)
}
b, err := io.ReadAll(r.Body)
b, err := io.ReadAll(io.LimitReader(r.Body, c.maxBodySize))
if err != nil {
return nil, errors.Wrap(err, "error reading http response body")
}

View File

@@ -25,16 +25,16 @@ var ErrInvalidNodeVersion = errors.New("invalid node version response")
var ErrConnectionIssue = errors.New("could not connect")
// Non200Err is a function that parses an HTTP response to handle responses that are not 200 with a formatted error.
func Non200Err(response *http.Response) error {
bodyBytes, err := io.ReadAll(response.Body)
func Non200Err(r *http.Response) error {
b, err := io.ReadAll(io.LimitReader(r.Body, MaxErrBodySize))
var body string
if err != nil {
body = "(Unable to read response body.)"
} else {
body = "response body:\n" + string(bodyBytes)
body = "response body:\n" + string(b)
}
msg := fmt.Sprintf("code=%d, url=%s, body=%s", response.StatusCode, response.Request.URL, body)
switch response.StatusCode {
msg := fmt.Sprintf("code=%d, url=%s, body=%s", r.StatusCode, r.Request.URL, body)
switch r.StatusCode {
case http.StatusNotFound:
return errors.Wrap(ErrNotFound, msg)
default:

View File

@@ -93,6 +93,7 @@ func (h *EventStream) Subscribe(eventsChannel chan<- *Event) {
EventType: EventConnectionError,
Data: []byte(errors.Wrap(err, client.ErrConnectionIssue.Error()).Error()),
}
return
}
defer func() {

View File

@@ -40,7 +40,7 @@ func TestNewEventStream(t *testing.T) {
func TestEventStream(t *testing.T) {
mux := http.NewServeMux()
mux.HandleFunc("/eth/v1/events", func(w http.ResponseWriter, r *http.Request) {
mux.HandleFunc("/eth/v1/events", func(w http.ResponseWriter, _ *http.Request) {
flusher, ok := w.(http.Flusher)
require.Equal(t, true, ok)
for i := 1; i <= 3; i++ {
@@ -79,3 +79,23 @@ func TestEventStream(t *testing.T) {
}
}
}
func TestEventStreamRequestError(t *testing.T) {
topics := []string{"head"}
eventsChannel := make(chan *Event, 1)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// use valid url that will result in failed request with nil body
stream, err := NewEventStream(ctx, http.DefaultClient, "http://badhost:1234", topics)
require.NoError(t, err)
// error will happen when request is made, should be received over events channel
go stream.Subscribe(eventsChannel)
event := <-eventsChannel
if event.EventType != EventConnectionError {
t.Errorf("Expected event type %q, got %q", EventConnectionError, event.EventType)
}
}

View File

@@ -46,3 +46,10 @@ func WithAuthenticationToken(token string) ClientOpt {
c.token = token
}
}
// WithMaxBodySize overrides the default max body size of 8MB.
func WithMaxBodySize(size int64) ClientOpt {
return func(c *Client) {
c.maxBodySize = size
}
}

View File

@@ -21,11 +21,12 @@ type GetCommitteesResponse struct {
}
type ListAttestationsResponse struct {
Data []*Attestation `json:"data"`
Version string `json:"version,omitempty"`
Data json.RawMessage `json:"data"`
}
type SubmitAttestationsRequest struct {
Data []*Attestation `json:"data"`
Data json.RawMessage `json:"data"`
}
type ListVoluntaryExitsResponse struct {

View File

@@ -7,7 +7,8 @@ import (
)
type AggregateAttestationResponse struct {
Data *Attestation `json:"data"`
Version string `json:"version,omitempty"`
Data json.RawMessage `json:"data"`
}
type SubmitContributionAndProofsRequest struct {

View File

@@ -12,6 +12,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
consensus_blocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/forkchoice"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
@@ -44,7 +45,7 @@ type ForkchoiceFetcher interface {
UpdateHead(context.Context, primitives.Slot)
HighestReceivedBlockSlot() primitives.Slot
ReceivedBlocksLastEpoch() (uint64, error)
InsertNode(context.Context, state.BeaconState, [32]byte) error
InsertNode(context.Context, state.BeaconState, consensus_blocks.ROBlock) error
ForkChoiceDump(context.Context) (*forkchoice.Dump, error)
NewSlot(context.Context, primitives.Slot) error
ProposerBoost() [32]byte
@@ -242,7 +243,7 @@ func (s *Service) HeadValidatorsIndices(ctx context.Context, epoch primitives.Ep
if !s.hasHeadState() {
return []primitives.ValidatorIndex{}, nil
}
return helpers.ActiveValidatorIndices(ctx, s.headState(ctx), epoch)
return helpers.ActiveValidatorIndices(ctx, s.headStateReadOnly(ctx), epoch)
}
// HeadGenesisValidatorsRoot returns genesis validators root of the head state.

View File

@@ -4,6 +4,7 @@ import (
"context"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
consensus_blocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/forkchoice"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
)
@@ -44,10 +45,10 @@ func (s *Service) ReceivedBlocksLastEpoch() (uint64, error) {
}
// InsertNode is a wrapper for node insertion which is self locked
func (s *Service) InsertNode(ctx context.Context, st state.BeaconState, root [32]byte) error {
func (s *Service) InsertNode(ctx context.Context, st state.BeaconState, block consensus_blocks.ROBlock) error {
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
return s.cfg.ForkChoiceStore.InsertNode(ctx, st, root)
return s.cfg.ForkChoiceStore.InsertNode(ctx, st, block)
}
// ForkChoiceDump returns the corresponding value from forkchoice

View File

@@ -13,6 +13,7 @@ import (
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
consensus_blocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
@@ -38,7 +39,7 @@ func prepareForkchoiceState(
payloadHash [32]byte,
justified *ethpb.Checkpoint,
finalized *ethpb.Checkpoint,
) (state.BeaconState, [32]byte, error) {
) (state.BeaconState, consensus_blocks.ROBlock, error) {
blockHeader := &ethpb.BeaconBlockHeader{
ParentRoot: parentRoot[:],
}
@@ -59,7 +60,26 @@ func prepareForkchoiceState(
base.BlockRoots[0] = append(base.BlockRoots[0], blockRoot[:]...)
st, err := state_native.InitializeFromProtoBellatrix(base)
return st, blockRoot, err
if err != nil {
return nil, consensus_blocks.ROBlock{}, err
}
blk := &ethpb.SignedBeaconBlockBellatrix{
Block: &ethpb.BeaconBlockBellatrix{
Slot: slot,
ParentRoot: parentRoot[:],
Body: &ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: &enginev1.ExecutionPayload{
BlockHash: payloadHash[:],
},
},
},
}
signed, err := blocks.NewSignedBeaconBlock(blk)
if err != nil {
return nil, consensus_blocks.ROBlock{}, err
}
roblock, err := consensus_blocks.NewROBlockWithRoot(signed, blockRoot)
return st, roblock, err
}
func TestHeadRoot_Nil(t *testing.T) {
@@ -122,9 +142,9 @@ func TestUnrealizedJustifiedBlockHash(t *testing.T) {
service := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}}
ojc := &ethpb.Checkpoint{Root: []byte{'j'}}
ofc := &ethpb.Checkpoint{Root: []byte{'f'}}
st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
st, roblock, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
service.cfg.ForkChoiceStore.SetBalancesByRooter(func(_ context.Context, _ [32]byte) ([]uint64, error) { return []uint64{}, nil })
require.NoError(t, service.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(ctx, &forkchoicetypes.Checkpoint{Epoch: 6, Root: [32]byte{'j'}}))
@@ -316,24 +336,24 @@ func TestService_ChainHeads(t *testing.T) {
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}}
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
st, roblock, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
st, roblock, err = prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
st, roblock, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
st, roblock, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
st, roblock, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
st, roblock, err = prepareForkchoiceState(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
roots, slots := c.ChainHeads()
require.Equal(t, 3, len(roots))
@@ -413,12 +433,12 @@ func TestService_IsOptimistic(t *testing.T) {
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
st, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
st, roblock, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
st, roblock, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
opt, err := c.IsOptimistic(ctx)
require.NoError(t, err)
@@ -449,12 +469,12 @@ func TestService_IsOptimisticForRoot(t *testing.T) {
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
st, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
st, roblock, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
st, roblock, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
opt, err := c.IsOptimisticForRoot(ctx, [32]byte{'a'})
require.NoError(t, err)

View File

@@ -6,8 +6,11 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/async/event"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
@@ -69,6 +72,7 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
if arg.attributes == nil {
arg.attributes = payloadattribute.EmptyWithVersion(headBlk.Version())
}
go firePayloadAttributesEvent(ctx, s.cfg.StateNotifier.StateFeed(), arg)
payloadID, lastValidHash, err := s.cfg.ExecutionEngineCaller.ForkchoiceUpdated(ctx, fcs, arg.attributes)
if err != nil {
switch {
@@ -167,6 +171,38 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
return payloadID, nil
}
func firePayloadAttributesEvent(ctx context.Context, f event.SubscriberSender, cfg *fcuConfig) {
pidx, err := helpers.BeaconProposerIndex(ctx, cfg.headState)
if err != nil {
log.WithError(err).
WithField("head_root", cfg.headRoot[:]).
Error("Could not get proposer index for PayloadAttributes event")
return
}
evd := payloadattribute.EventData{
ProposerIndex: pidx,
ProposalSlot: cfg.headState.Slot(),
ParentBlockRoot: cfg.headRoot[:],
Attributer: cfg.attributes,
HeadRoot: cfg.headRoot,
HeadState: cfg.headState,
HeadBlock: cfg.headBlock,
}
if cfg.headBlock != nil && !cfg.headBlock.IsNil() {
headPayload, err := cfg.headBlock.Block().Body().Execution()
if err != nil {
log.WithError(err).Error("Could not get execution payload for head block")
return
}
evd.ParentBlockHash = headPayload.BlockHash()
evd.ParentBlockNumber = headPayload.BlockNumber()
}
f.Send(&feed.Event{
Type: statefeed.PayloadAttributes,
Data: evd,
})
}
// getPayloadHash returns the payload hash given the block root.
// if the block is before bellatrix fork epoch, it returns the zero hash.
func (s *Service) getPayloadHash(ctx context.Context, root []byte) ([32]byte, error) {

View File

@@ -1135,9 +1135,14 @@ func TestComputePayloadAttribute(t *testing.T) {
// Cache hit, advance state, no fee recipient
slot := primitives.Slot(1)
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
blk := util.NewBeaconBlockBellatrix()
signed, err := consensusblocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
roblock, err := consensusblocks.NewROBlockWithRoot(signed, [32]byte{'a'})
require.NoError(t, err)
cfg := &postBlockProcessConfig{
ctx: ctx,
blockRoot: [32]byte{'a'},
ctx: ctx,
roblock: roblock,
}
fcu := &fcuConfig{
headState: st,

View File

@@ -32,18 +32,18 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
util.SaveBlock(t, ctx, beaconDB, blkWithoutState)
cp := &ethpb.Checkpoint{}
st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, cp, cp)
st, roblock, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, cp, cp)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
blkWithStateBadAtt := util.NewBeaconBlock()
blkWithStateBadAtt.Block.Slot = 1
r, err := blkWithStateBadAtt.Block.HashTreeRoot()
require.NoError(t, err)
cp = &ethpb.Checkpoint{Root: r[:]}
st, blkRoot, err = prepareForkchoiceState(ctx, blkWithStateBadAtt.Block.Slot, r, [32]byte{}, params.BeaconConfig().ZeroHash, cp, cp)
st, roblock, err = prepareForkchoiceState(ctx, blkWithStateBadAtt.Block.Slot, r, [32]byte{}, params.BeaconConfig().ZeroHash, cp, cp)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
util.SaveBlock(t, ctx, beaconDB, blkWithStateBadAtt)
BlkWithStateBadAttRoot, err := blkWithStateBadAtt.Block.HashTreeRoot()
require.NoError(t, err)
@@ -92,12 +92,12 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
{
name: "process nil attestation",
a: nil,
wantedErr: "attestation can't be nil",
wantedErr: "attestation is nil",
},
{
name: "process nil field (a.Data) in attestation",
a: &ethpb.Attestation{},
wantedErr: "attestation's data can't be nil",
wantedErr: "attestation is nil",
},
{
name: "process nil field (a.Target) in attestation",
@@ -139,9 +139,9 @@ func TestStore_OnAttestation_Ok_DoublyLinkedTree(t *testing.T) {
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
ojc := &ethpb.Checkpoint{Epoch: 0, Root: tRoot[:]}
ofc := &ethpb.Checkpoint{Epoch: 0, Root: tRoot[:]}
state, blkRoot, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, ojc, ofc)
state, roblock, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, roblock))
require.NoError(t, service.OnAttestation(ctx, att[0], 0))
}
@@ -318,10 +318,9 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
require.NoError(t, err)
checkpoint := &ethpb.Checkpoint{Epoch: epoch, Root: r1[:]}
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(checkpoint.Root)))
st, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r1, [32]byte{}, params.BeaconConfig().ZeroHash, checkpoint, checkpoint)
st, roblock, err := prepareForkchoiceState(ctx, blk.Block.Slot, r1, [32]byte{}, params.BeaconConfig().ZeroHash, checkpoint, checkpoint)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, r1))
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
returned, err := service.getAttPreState(ctx, checkpoint)
require.NoError(t, err)
assert.Equal(t, params.BeaconConfig().SlotsPerEpoch.Mul(uint64(checkpoint.Epoch)), returned.Slot(), "Incorrectly returned base state")
@@ -337,10 +336,9 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
require.NoError(t, err)
newCheckpoint := &ethpb.Checkpoint{Epoch: epoch, Root: r2[:]}
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(newCheckpoint.Root)))
st, blkRoot, err = prepareForkchoiceState(ctx, blk.Block.Slot, r2, r1, params.BeaconConfig().ZeroHash, newCheckpoint, newCheckpoint)
st, roblock, err = prepareForkchoiceState(ctx, blk.Block.Slot, r2, r1, params.BeaconConfig().ZeroHash, newCheckpoint, newCheckpoint)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, r2))
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
returned, err = service.getAttPreState(ctx, newCheckpoint)
require.NoError(t, err)
s, err := slots.EpochStart(newCheckpoint.Epoch)

View File

@@ -7,8 +7,6 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
coreTime "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
@@ -46,8 +44,7 @@ var initialSyncBlockCacheSize = uint64(2 * params.BeaconConfig().SlotsPerEpoch)
// process the beacon block after validating the state transition function
type postBlockProcessConfig struct {
ctx context.Context
signed interfaces.ReadOnlySignedBeaconBlock
blockRoot [32]byte
roblock consensusblocks.ROBlock
headRoot [32]byte
postState state.BeaconState
isValidPayload bool
@@ -61,7 +58,7 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
ctx, span := trace.StartSpan(cfg.ctx, "blockChain.onBlock")
defer span.End()
cfg.ctx = ctx
if err := consensusblocks.BeaconBlockIsNil(cfg.signed); err != nil {
if err := consensusblocks.BeaconBlockIsNil(cfg.roblock); err != nil {
return invalidBlock{error: err}
}
startTime := time.Now()
@@ -73,19 +70,22 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
defer s.sendLightClientFeeds(cfg)
defer s.sendStateFeedOnBlock(cfg)
defer reportProcessingTime(startTime)
defer reportAttestationInclusion(cfg.signed.Block())
defer reportAttestationInclusion(cfg.roblock.Block())
err := s.cfg.ForkChoiceStore.InsertNode(ctx, cfg.postState, cfg.blockRoot)
err := s.cfg.ForkChoiceStore.InsertNode(ctx, cfg.postState, cfg.roblock)
if err != nil {
return errors.Wrapf(err, "could not insert block %d to fork choice store", cfg.signed.Block().Slot())
// Do not use parent context in the event it deadlined
ctx = trace.NewContext(context.Background(), span)
s.rollbackBlock(ctx, cfg.roblock.Root())
return errors.Wrapf(err, "could not insert block %d to fork choice store", cfg.roblock.Block().Slot())
}
if err := s.handleBlockAttestations(ctx, cfg.signed.Block(), cfg.postState); err != nil {
if err := s.handleBlockAttestations(ctx, cfg.roblock.Block(), cfg.postState); err != nil {
return errors.Wrap(err, "could not handle block's attestations")
}
s.InsertSlashingsToForkChoiceStore(ctx, cfg.signed.Block().Body().AttesterSlashings())
s.InsertSlashingsToForkChoiceStore(ctx, cfg.roblock.Block().Body().AttesterSlashings())
if cfg.isValidPayload {
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, cfg.blockRoot); err != nil {
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, cfg.roblock.Root()); err != nil {
return errors.Wrap(err, "could not set optimistic block to valid")
}
}
@@ -95,8 +95,8 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
log.WithError(err).Warn("Could not update head")
}
newBlockHeadElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
if cfg.headRoot != cfg.blockRoot {
s.logNonCanonicalBlockReceived(cfg.blockRoot, cfg.headRoot)
if cfg.headRoot != cfg.roblock.Root() {
s.logNonCanonicalBlockReceived(cfg.roblock.Root(), cfg.headRoot)
return nil
}
if err := s.getFCUArgs(cfg, fcuArgs); err != nil {
@@ -154,7 +154,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
}
// Fill in missing blocks
if err := s.fillInForkChoiceMissingBlocks(ctx, blks[0].Block(), preState.CurrentJustifiedCheckpoint(), preState.FinalizedCheckpoint()); err != nil {
if err := s.fillInForkChoiceMissingBlocks(ctx, blks[0], preState.CurrentJustifiedCheckpoint(), preState.FinalizedCheckpoint()); err != nil {
return errors.Wrap(err, "could not fill in missing blocks to forkchoice")
}
@@ -234,7 +234,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
if err := avs.IsDataAvailable(ctx, s.CurrentSlot(), b); err != nil {
return errors.Wrapf(err, "could not validate blob data availability at slot %d", b.Block().Slot())
}
args := &forkchoicetypes.BlockAndCheckpoints{Block: b.Block(),
args := &forkchoicetypes.BlockAndCheckpoints{Block: b,
JustifiedCheckpoint: jCheckpoints[i],
FinalizedCheckpoint: fCheckpoints[i]}
pendingNodes[len(blks)-i-1] = args
@@ -279,7 +279,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
return errors.Wrap(err, "could not insert batch to forkchoice")
}
// Insert the last block to forkchoice
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, preState, lastBR); err != nil {
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, preState, lastB); err != nil {
return errors.Wrap(err, "could not insert last block in batch to forkchoice")
}
// Set their optimistic status
@@ -618,9 +618,6 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
if !s.inRegularSync() {
return
}
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.MissedSlot,
})
s.headLock.RLock()
headRoot := s.headRoot()
headState := s.headState(ctx)
@@ -648,6 +645,13 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
attribute := s.getPayloadAttribute(ctx, headState, s.CurrentSlot()+1, headRoot[:])
// return early if we are not proposing next slot
if attribute.IsEmpty() {
fcuArgs := &fcuConfig{
headState: headState,
headRoot: headRoot,
headBlock: nil,
attributes: attribute,
}
go firePayloadAttributesEvent(ctx, s.cfg.StateNotifier.StateFeed(), fcuArgs)
return
}
@@ -688,3 +692,15 @@ func (s *Service) handleInvalidExecutionError(ctx context.Context, err error, bl
}
return err
}
// In the event of an issue processing a block we rollback changes done to the db and our caches
// to always ensure that the node's internal state is consistent.
func (s *Service) rollbackBlock(ctx context.Context, blockRoot [32]byte) {
log.Warnf("Rolling back insertion of block with root %#x due to processing error", blockRoot)
if err := s.cfg.StateGen.DeleteStateFromCaches(ctx, blockRoot); err != nil {
log.WithError(err).Errorf("Could not delete state from caches with block root %#x", blockRoot)
}
if err := s.cfg.BeaconDB.DeleteBlock(ctx, blockRoot); err != nil {
log.WithError(err).Errorf("Could not delete block with block root %#x", blockRoot)
}
}

View File

@@ -18,6 +18,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/config/features"
field_params "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
consensus_blocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
@@ -42,7 +43,7 @@ func (s *Service) getFCUArgs(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) er
if !s.inRegularSync() {
return nil
}
slot := cfg.signed.Block().Slot()
slot := cfg.roblock.Block().Slot()
if slots.WithinVotingWindow(uint64(s.genesisTime.Unix()), slot) {
return nil
}
@@ -50,9 +51,9 @@ func (s *Service) getFCUArgs(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) er
}
func (s *Service) getFCUArgsEarlyBlock(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error {
if cfg.blockRoot == cfg.headRoot {
if cfg.roblock.Root() == cfg.headRoot {
fcuArgs.headState = cfg.postState
fcuArgs.headBlock = cfg.signed
fcuArgs.headBlock = cfg.roblock
fcuArgs.headRoot = cfg.headRoot
fcuArgs.proposingSlot = s.CurrentSlot() + 1
return nil
@@ -96,7 +97,7 @@ func (s *Service) fcuArgsNonCanonicalBlock(cfg *postBlockProcessConfig, fcuArgs
// sendStateFeedOnBlock sends an event that a new block has been synced
func (s *Service) sendStateFeedOnBlock(cfg *postBlockProcessConfig) {
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(cfg.blockRoot)
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(cfg.roblock.Root())
if err != nil {
log.WithError(err).Debug("Could not check if block is optimistic")
optimistic = true
@@ -105,9 +106,9 @@ func (s *Service) sendStateFeedOnBlock(cfg *postBlockProcessConfig) {
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.BlockProcessed,
Data: &statefeed.BlockProcessedData{
Slot: cfg.signed.Block().Slot(),
BlockRoot: cfg.blockRoot,
SignedBlock: cfg.signed,
Slot: cfg.roblock.Block().Slot(),
BlockRoot: cfg.roblock.Root(),
SignedBlock: cfg.roblock,
Verified: true,
Optimistic: optimistic,
},
@@ -117,7 +118,7 @@ func (s *Service) sendStateFeedOnBlock(cfg *postBlockProcessConfig) {
// sendLightClientFeeds sends the light client feeds when feature flag is enabled.
func (s *Service) sendLightClientFeeds(cfg *postBlockProcessConfig) {
if features.Get().EnableLightClient {
if _, err := s.sendLightClientOptimisticUpdate(cfg.ctx, cfg.signed, cfg.postState); err != nil {
if _, err := s.sendLightClientOptimisticUpdate(cfg.ctx, cfg.roblock, cfg.postState); err != nil {
log.WithError(err).Error("Failed to send light client optimistic update")
}
@@ -125,7 +126,7 @@ func (s *Service) sendLightClientFeeds(cfg *postBlockProcessConfig) {
finalized := s.ForkChoicer().FinalizedCheckpoint()
// LightClientFinalityUpdate needs super majority
s.tryPublishLightClientFinalityUpdate(cfg.ctx, cfg.signed, finalized, cfg.postState)
s.tryPublishLightClientFinalityUpdate(cfg.ctx, cfg.roblock, finalized, cfg.postState)
}
}
@@ -252,20 +253,21 @@ func (s *Service) sendLightClientOptimisticUpdate(ctx context.Context, signed in
// before sending FCU to the engine.
func (s *Service) updateCachesPostBlockProcessing(cfg *postBlockProcessConfig) error {
slot := cfg.postState.Slot()
if err := transition.UpdateNextSlotCache(cfg.ctx, cfg.blockRoot[:], cfg.postState); err != nil {
root := cfg.roblock.Root()
if err := transition.UpdateNextSlotCache(cfg.ctx, root[:], cfg.postState); err != nil {
return errors.Wrap(err, "could not update next slot state cache")
}
if !slots.IsEpochEnd(slot) {
return nil
}
return s.handleEpochBoundary(cfg.ctx, slot, cfg.postState, cfg.blockRoot[:])
return s.handleEpochBoundary(cfg.ctx, slot, cfg.postState, root[:])
}
// handleSecondFCUCall handles a second call to FCU when syncing a new block.
// This is useful when proposing in the next block and we want to defer the
// computation of the next slot shuffling.
func (s *Service) handleSecondFCUCall(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) {
if (fcuArgs.attributes == nil || fcuArgs.attributes.IsEmpty()) && cfg.headRoot == cfg.blockRoot {
if (fcuArgs.attributes == nil || fcuArgs.attributes.IsEmpty()) && cfg.headRoot == cfg.roblock.Root() {
go s.sendFCUWithAttributes(cfg, fcuArgs)
}
}
@@ -281,7 +283,7 @@ func reportProcessingTime(startTime time.Time) {
// called on blocks that arrive after the attestation voting window, or in a
// background routine after syncing early blocks.
func (s *Service) computePayloadAttributes(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error {
if cfg.blockRoot == cfg.headRoot {
if cfg.roblock.Root() == cfg.headRoot {
if err := s.updateCachesPostBlockProcessing(cfg); err != nil {
return err
}
@@ -438,7 +440,7 @@ func (s *Service) ancestorByDB(ctx context.Context, r [32]byte, slot primitives.
// This retrieves missing blocks from DB (ie. the blocks that couldn't be received over sync) and inserts them to fork choice store.
// This is useful for block tree visualizer and additional vote accounting.
func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfaces.ReadOnlyBeaconBlock,
func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, signed interfaces.ReadOnlySignedBeaconBlock,
fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
pendingNodes := make([]*forkchoicetypes.BlockAndCheckpoints, 0)
@@ -448,10 +450,15 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfa
if err != nil {
return err
}
pendingNodes = append(pendingNodes, &forkchoicetypes.BlockAndCheckpoints{Block: blk,
// The first block can have a bogus root since the block is not inserted in forkchoice
roblock, err := consensus_blocks.NewROBlockWithRoot(signed, [32]byte{})
if err != nil {
return err
}
pendingNodes = append(pendingNodes, &forkchoicetypes.BlockAndCheckpoints{Block: roblock,
JustifiedCheckpoint: jCheckpoint, FinalizedCheckpoint: fCheckpoint})
// As long as parent node is not in fork choice store, and parent node is in DB.
root := blk.ParentRoot()
root := roblock.Block().ParentRoot()
for !s.cfg.ForkChoiceStore.HasNode(root) && s.cfg.BeaconDB.HasBlock(ctx, root) {
b, err := s.getBlock(ctx, root)
if err != nil {
@@ -460,8 +467,12 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfa
if b.Block().Slot() <= fSlot {
break
}
roblock, err := consensus_blocks.NewROBlockWithRoot(b, root)
if err != nil {
return err
}
root = b.Block().ParentRoot()
args := &forkchoicetypes.BlockAndCheckpoints{Block: b.Block(),
args := &forkchoicetypes.BlockAndCheckpoints{Block: roblock,
JustifiedCheckpoint: jCheckpoint,
FinalizedCheckpoint: fCheckpoint}
pendingNodes = append(pendingNodes, args)

View File

@@ -145,9 +145,8 @@ func TestFillForkChoiceMissingBlocks_CanSave(t *testing.T) {
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
fcp2 := &forkchoicetypes.Checkpoint{Epoch: 0, Root: r0}
require.NoError(t, service.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(fcp2))
err = service.fillInForkChoiceMissingBlocks(
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
context.Background(), wsb, beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
require.NoError(t, err)
// 5 nodes from the block tree 1. B0 - B3 - B4 - B6 - B8
@@ -190,7 +189,7 @@ func TestFillForkChoiceMissingBlocks_RootsMatch(t *testing.T) {
require.NoError(t, service.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(fcp2))
err = service.fillInForkChoiceMissingBlocks(
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
context.Background(), wsb, beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
require.NoError(t, err)
// 5 nodes from the block tree 1. B0 - B3 - B4 - B6 - B8
@@ -246,7 +245,7 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized(t *testing.T) {
// Set finalized epoch to 2.
require.NoError(t, service.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: 2, Root: r64}))
err = service.fillInForkChoiceMissingBlocks(
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
context.Background(), wsb, beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
require.NoError(t, err)
// There should be 1 node: block 65
@@ -279,7 +278,7 @@ func TestFillForkChoiceMissingBlocks_FinalizedSibling(t *testing.T) {
require.NoError(t, err)
err = service.fillInForkChoiceMissingBlocks(
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
context.Background(), wsb, beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
require.Equal(t, ErrNotDescendantOfFinalized.Error(), err.Error())
}
@@ -566,7 +565,9 @@ func TestOnBlock_CanFinalize_WithOnTick(t *testing.T) {
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, r, [32]byte{}, postState, true}))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
require.NoError(t, err)
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
require.NoError(t, err)
@@ -614,7 +615,9 @@ func TestOnBlock_CanFinalize(t *testing.T) {
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, r, [32]byte{}, postState, true}))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
require.NoError(t, err)
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
require.NoError(t, err)
@@ -640,7 +643,9 @@ func TestOnBlock_CanFinalize(t *testing.T) {
func TestOnBlock_NilBlock(t *testing.T) {
service, tr := minimalTestService(t)
err := service.postBlockProcess(&postBlockProcessConfig{tr.ctx, nil, [32]byte{}, [32]byte{}, nil, true})
signed := &consensusblocks.SignedBeaconBlock{}
roblock := consensusblocks.ROBlock{ReadOnlySignedBeaconBlock: signed}
err := service.postBlockProcess(&postBlockProcessConfig{tr.ctx, roblock, [32]byte{}, nil, true})
require.Equal(t, true, IsInvalidBlock(err))
}
@@ -688,7 +693,9 @@ func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) {
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, r, [32]byte{}, postState, false}))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
require.NoError(t, err)
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
testState, err = service.cfg.StateGen.StateByRoot(ctx, r)
require.NoError(t, err)
}
@@ -1114,7 +1121,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
postState, err := service.validateStateTransition(ctx, preState, wsb1)
require.NoError(t, err)
lock.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb1, r1, [32]byte{}, postState, true}))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb1, r1)
require.NoError(t, err)
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
lock.Unlock()
wg.Done()
}()
@@ -1124,7 +1133,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
postState, err := service.validateStateTransition(ctx, preState, wsb2)
require.NoError(t, err)
lock.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb2, r2, [32]byte{}, postState, true}))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb2, r2)
require.NoError(t, err)
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
lock.Unlock()
wg.Done()
}()
@@ -1134,7 +1145,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
postState, err := service.validateStateTransition(ctx, preState, wsb3)
require.NoError(t, err)
lock.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb3, r3, [32]byte{}, postState, true}))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb3, r3)
require.NoError(t, err)
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
lock.Unlock()
wg.Done()
}()
@@ -1144,7 +1157,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
postState, err := service.validateStateTransition(ctx, preState, wsb4)
require.NoError(t, err)
lock.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb4, r4, [32]byte{}, postState, true}))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb4, r4)
require.NoError(t, err)
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
lock.Unlock()
wg.Done()
}()
@@ -1219,7 +1234,9 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
}
for i := 6; i < 12; i++ {
@@ -1237,7 +1254,9 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false})
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
require.NoError(t, err)
}
@@ -1256,7 +1275,9 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false})
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
require.NoError(t, err)
}
// Check that we haven't justified the second epoch yet
@@ -1278,7 +1299,9 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, firstInvalidRoot, wsb, postState))
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, firstInvalidRoot, [32]byte{}, postState, false})
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, firstInvalidRoot)
require.NoError(t, err)
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
require.NoError(t, err)
jc = service.cfg.ForkChoiceStore.JustifiedCheckpoint()
require.Equal(t, primitives.Epoch(2), jc.Epoch)
@@ -1306,7 +1329,9 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
postState, err = service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false})
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
// Check that forkchoice's head is the last invalid block imported. The
// store's headroot is the previous head (since the invalid block did
@@ -1335,7 +1360,9 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
postState, err = service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, true})
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
require.NoError(t, err)
// Check the newly imported block is head, it justified the right
// checkpoint and the node is no longer optimistic
@@ -1397,7 +1424,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
}
for i := 6; i < 12; i++ {
@@ -1415,7 +1444,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false})
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
require.NoError(t, err)
}
@@ -1435,7 +1466,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false})
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
require.NoError(t, err)
}
// Check that we haven't justified the second epoch yet
@@ -1457,7 +1490,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, firstInvalidRoot, wsb, postState))
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, firstInvalidRoot, [32]byte{}, postState, false})
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, firstInvalidRoot)
require.NoError(t, err)
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
require.NoError(t, err)
jc = service.cfg.ForkChoiceStore.JustifiedCheckpoint()
require.Equal(t, primitives.Epoch(2), jc.Epoch)
@@ -1485,7 +1520,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
require.NoError(t, err)
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
require.NoError(t, err)
_, err = service.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, wsb, root)
rowsb, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
_, err = service.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, rowsb)
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
// Check that forkchoice's head and store's headroot are the previous head (since the invalid block did
// not finish importing and it was never imported to forkchoice). Check
@@ -1513,7 +1550,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
postState, err = service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, true})
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
require.NoError(t, err)
// Check the newly imported block is head, it justified the right
// checkpoint and the node is no longer optimistic
@@ -1578,7 +1617,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
}
for i := 6; i < 12; i++ {
@@ -1597,7 +1638,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false})
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
require.NoError(t, err)
}
@@ -1616,7 +1659,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, lastValidRoot, wsb, postState))
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, lastValidRoot, [32]byte{}, postState, false})
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, lastValidRoot)
require.NoError(t, err)
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
require.NoError(t, err)
// save the post state and the payload Hash of this block since it will
// be the LVH
@@ -1643,7 +1688,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, invalidRoots[i-13], wsb, postState))
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, invalidRoots[i-13], [32]byte{}, postState, false})
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, invalidRoots[i-13])
require.NoError(t, err)
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
require.NoError(t, err)
}
// Check that we have justified the second epoch
@@ -1669,7 +1716,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
require.NoError(t, err)
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
require.NoError(t, err)
_, err = service.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, wsb, root)
rowsb, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
_, err = service.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, rowsb)
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
// Check that forkchoice's head and store's headroot are the previous head (since the invalid block did
@@ -1708,7 +1757,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
postState, err = service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, true}))
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
// Check that the head is still INVALID and the node is still optimistic
require.Equal(t, invalidHeadRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
optimistic, err = service.IsOptimistic(ctx)
@@ -1731,7 +1782,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, true})
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
require.NoError(t, err)
st, err = service.cfg.StateGen.StateByRoot(ctx, root)
require.NoError(t, err)
@@ -1757,7 +1810,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
postState, err = service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, true})
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
require.NoError(t, err)
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
sjc = service.CurrentJustifiedCheckpt()
@@ -1813,7 +1868,9 @@ func TestNoViableHead_Reboot(t *testing.T) {
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
}
for i := 6; i < 12; i++ {
@@ -1831,7 +1888,9 @@ func TestNoViableHead_Reboot(t *testing.T) {
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false})
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
require.NoError(t, err)
}
@@ -1850,7 +1909,9 @@ func TestNoViableHead_Reboot(t *testing.T) {
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, lastValidRoot, wsb, postState))
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, lastValidRoot, [32]byte{}, postState, false})
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, lastValidRoot)
require.NoError(t, err)
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
require.NoError(t, err)
// save the post state and the payload Hash of this block since it will
// be the LVH
@@ -1879,7 +1940,9 @@ func TestNoViableHead_Reboot(t *testing.T) {
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
require.NoError(t, err)
@@ -1905,7 +1968,9 @@ func TestNoViableHead_Reboot(t *testing.T) {
require.NoError(t, err)
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
require.NoError(t, err)
_, err = service.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, wsb, root)
rowsb, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
_, err = service.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, rowsb)
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
// Check that the headroot/state are not in DB and restart the node
@@ -1995,7 +2060,9 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
st, err = service.HeadState(ctx)
require.NoError(t, err)
@@ -2059,7 +2126,9 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
st, err = service.HeadState(ctx)
require.NoError(t, err)
@@ -2209,11 +2278,11 @@ func Test_getFCUArgs(t *testing.T) {
require.NoError(t, err)
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, [32]byte{'a'})
require.NoError(t, err)
cfg := &postBlockProcessConfig{
ctx: ctx,
signed: wsb,
blockRoot: [32]byte{'a'},
roblock: roblock,
postState: st,
isValidPayload: true,
}
@@ -2223,11 +2292,143 @@ func Test_getFCUArgs(t *testing.T) {
require.ErrorContains(t, "block does not exist", err)
// canonical branch
cfg.headRoot = cfg.blockRoot
cfg.headRoot = cfg.roblock.Root()
fcuArgs = &fcuConfig{}
err = s.getFCUArgs(cfg, fcuArgs)
require.NoError(t, err)
require.Equal(t, cfg.blockRoot, fcuArgs.headRoot)
require.Equal(t, cfg.roblock.Root(), fcuArgs.headRoot)
}
func TestRollbackBlock(t *testing.T) {
service, tr := minimalTestService(t)
ctx := tr.ctx
st, keys := util.DeterministicGenesisState(t, 64)
stateRoot, err := st.HashTreeRoot(ctx)
require.NoError(t, err, "Could not hash genesis state")
require.NoError(t, service.saveGenesisData(ctx, st))
genesis := blocks.NewGenesisBlock(stateRoot[:])
wsb, err := consensusblocks.NewSignedBeaconBlock(genesis)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb), "Could not save genesis block")
parentRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err, "Could not get signing root")
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, parentRoot), "Could not save genesis state")
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
st, err = service.HeadState(ctx)
require.NoError(t, err)
b, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
require.NoError(t, err)
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
require.Equal(t, true, service.cfg.BeaconDB.HasBlock(ctx, root))
hasState, err := service.cfg.StateGen.HasState(ctx, root)
require.NoError(t, err)
require.Equal(t, true, hasState)
// Set invalid parent root to trigger forkchoice error.
wsb.SetParentRoot([]byte("bad"))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
// Rollback block insertion into db and caches.
require.ErrorContains(t, fmt.Sprintf("could not insert block %d to fork choice store", roblock.Block().Slot()), service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
// The block should no longer exist.
require.Equal(t, false, service.cfg.BeaconDB.HasBlock(ctx, root))
hasState, err = service.cfg.StateGen.HasState(ctx, root)
require.NoError(t, err)
require.Equal(t, false, hasState)
}
func TestRollbackBlock_ContextDeadline(t *testing.T) {
service, tr := minimalTestService(t)
ctx := tr.ctx
st, keys := util.DeterministicGenesisState(t, 64)
stateRoot, err := st.HashTreeRoot(ctx)
require.NoError(t, err, "Could not hash genesis state")
require.NoError(t, service.saveGenesisData(ctx, st))
genesis := blocks.NewGenesisBlock(stateRoot[:])
wsb, err := consensusblocks.NewSignedBeaconBlock(genesis)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb), "Could not save genesis block")
parentRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err, "Could not get signing root")
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, parentRoot), "Could not save genesis state")
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
require.NoError(t, service.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, &ethpb.Checkpoint{Root: parentRoot[:]}))
require.NoError(t, service.cfg.BeaconDB.SaveFinalizedCheckpoint(ctx, &ethpb.Checkpoint{Root: parentRoot[:]}))
st, err = service.HeadState(ctx)
require.NoError(t, err)
b, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 33)
require.NoError(t, err)
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
b, err = util.GenerateFullBlock(postState, keys, util.DefaultBlockGenConfig(), 34)
require.NoError(t, err)
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
root, err = b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err = service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err = service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
require.Equal(t, true, service.cfg.BeaconDB.HasBlock(ctx, root))
hasState, err := service.cfg.StateGen.HasState(ctx, root)
require.NoError(t, err)
require.Equal(t, true, hasState)
// Set deadlined context when processing the block
cancCtx, canc := context.WithCancel(context.Background())
canc()
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
parentRoot = roblock.Block().ParentRoot()
cj := &ethpb.Checkpoint{}
cj.Epoch = 1
cj.Root = parentRoot[:]
require.NoError(t, postState.SetCurrentJustifiedCheckpoint(cj))
require.NoError(t, postState.SetFinalizedCheckpoint(cj))
// Rollback block insertion into db and caches.
require.ErrorContains(t, "context canceled", service.postBlockProcess(&postBlockProcessConfig{cancCtx, roblock, [32]byte{}, postState, false}))
// The block should no longer exist.
require.Equal(t, false, service.cfg.BeaconDB.HasBlock(ctx, root))
hasState, err = service.cfg.StateGen.HasState(ctx, root)
require.NoError(t, err)
require.Equal(t, false, hasState)
}
func fakeCommitments(n int) [][]byte {

View File

@@ -10,6 +10,7 @@ import (
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
consensus_blocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
@@ -46,7 +47,7 @@ func TestVerifyLMDFFGConsistent(t *testing.T) {
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, r32))
state, r33, err := prepareForkchoiceState(ctx, 33, [32]byte{'b'}, r32, params.BeaconConfig().ZeroHash, fc, fc)
state, r33, err := prepareForkchoiceState(ctx, 33, [32]byte{'b'}, r32.Root(), params.BeaconConfig().ZeroHash, fc, fc)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, r33))
@@ -54,10 +55,12 @@ func TestVerifyLMDFFGConsistent(t *testing.T) {
a := util.NewAttestation()
a.Data.Target.Epoch = 1
a.Data.Target.Root = []byte{'c'}
a.Data.BeaconBlockRoot = r33[:]
r33Root := r33.Root()
a.Data.BeaconBlockRoot = r33Root[:]
require.ErrorContains(t, wanted, service.VerifyLmdFfgConsistency(context.Background(), a))
a.Data.Target.Root = r32[:]
r32Root := r32.Root()
a.Data.Target.Root = r32Root[:]
err = service.VerifyLmdFfgConsistency(context.Background(), a)
require.NoError(t, err, "Could not verify LMD and FFG votes to be consistent")
}
@@ -116,7 +119,9 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, tRoot, wsb, postState))
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, tRoot, [32]byte{}, postState, false}))
roblock, err := consensus_blocks.NewROBlockWithRoot(wsb, tRoot)
require.NoError(t, err)
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
copied, err = service.cfg.StateGen.StateByRoot(ctx, tRoot)
require.NoError(t, err)
require.Equal(t, 2, fcs.NodeCount())
@@ -176,7 +181,9 @@ func TestService_UpdateHead_NoAtts(t *testing.T) {
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, tRoot, wsb, postState))
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, tRoot, [32]byte{}, postState, false}))
roblock, err := consensus_blocks.NewROBlockWithRoot(wsb, tRoot)
require.NoError(t, err)
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
require.Equal(t, 2, fcs.NodeCount())
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
require.Equal(t, tRoot, service.head.root)

View File

@@ -17,6 +17,8 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/config/features"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
consensus_blocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
consensusblocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
@@ -83,7 +85,12 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
}
currentCheckpoints := s.saveCurrentCheckpoints(preState)
postState, isValidPayload, err := s.validateExecutionAndConsensus(ctx, preState, blockCopy, blockRoot)
roblock, err := consensus_blocks.NewROBlockWithRoot(blockCopy, blockRoot)
if err != nil {
return err
}
postState, isValidPayload, err := s.validateExecutionAndConsensus(ctx, preState, roblock)
if err != nil {
return err
}
@@ -102,8 +109,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
}
args := &postBlockProcessConfig{
ctx: ctx,
signed: blockCopy,
blockRoot: blockRoot,
roblock: roblock,
postState: postState,
isValidPayload: isValidPayload,
}
@@ -184,8 +190,7 @@ func (s *Service) updateCheckpoints(
func (s *Service) validateExecutionAndConsensus(
ctx context.Context,
preState state.BeaconState,
block interfaces.SignedBeaconBlock,
blockRoot [32]byte,
block consensusblocks.ROBlock,
) (state.BeaconState, bool, error) {
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
if err != nil {
@@ -204,7 +209,7 @@ func (s *Service) validateExecutionAndConsensus(
var isValidPayload bool
eg.Go(func() error {
var err error
isValidPayload, err = s.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, block, blockRoot)
isValidPayload, err = s.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, block)
if err != nil {
return errors.Wrap(err, "could not notify the engine of the new payload")
}
@@ -555,16 +560,16 @@ func (s *Service) sendBlockAttestationsToSlasher(signed interfaces.ReadOnlySigne
}
// validateExecutionOnBlock notifies the engine of the incoming block execution payload and returns true if the payload is valid
func (s *Service) validateExecutionOnBlock(ctx context.Context, ver int, header interfaces.ExecutionData, signed interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) (bool, error) {
isValidPayload, err := s.notifyNewPayload(ctx, ver, header, signed)
func (s *Service) validateExecutionOnBlock(ctx context.Context, ver int, header interfaces.ExecutionData, block consensusblocks.ROBlock) (bool, error) {
isValidPayload, err := s.notifyNewPayload(ctx, ver, header, block)
if err != nil {
s.cfg.ForkChoiceStore.Lock()
err = s.handleInvalidExecutionError(ctx, err, blockRoot, signed.Block().ParentRoot())
err = s.handleInvalidExecutionError(ctx, err, block.Root(), block.Block().ParentRoot())
s.cfg.ForkChoiceStore.Unlock()
return false, err
}
if signed.Version() < version.Capella && isValidPayload {
if err := s.validateMergeTransitionBlock(ctx, ver, header, signed); err != nil {
if block.Block().Version() < version.Capella && isValidPayload {
if err := s.validateMergeTransitionBlock(ctx, ver, header, block); err != nil {
return isValidPayload, err
}
}

View File

@@ -36,6 +36,7 @@ import (
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
consensus_blocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
@@ -303,7 +304,15 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
if err != nil {
return errors.Wrap(err, "could not get finalized checkpoint state")
}
if err := s.cfg.ForkChoiceStore.InsertNode(s.ctx, st, fRoot); err != nil {
finalizedBlock, err := s.cfg.BeaconDB.Block(s.ctx, fRoot)
if err != nil {
return errors.Wrap(err, "could not get finalized checkpoint block")
}
roblock, err := consensus_blocks.NewROBlockWithRoot(finalizedBlock, fRoot)
if err != nil {
return err
}
if err := s.cfg.ForkChoiceStore.InsertNode(s.ctx, st, roblock); err != nil {
return errors.Wrap(err, "could not insert finalized block to forkchoice")
}
if !features.Get().EnableStartOptimistic {
@@ -515,7 +524,11 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, genesisState, genesisBlkRoot); err != nil {
gb, err := consensus_blocks.NewROBlockWithRoot(genesisBlk, genesisBlkRoot)
if err != nil {
return err
}
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, genesisState, gb); err != nil {
log.WithError(err).Fatal("Could not process genesis block for fork choice")
}
s.cfg.ForkChoiceStore.SetOriginRoot(genesisBlkRoot)

View File

@@ -376,11 +376,15 @@ func TestHasBlock_ForkChoiceAndDB_DoublyLinkedTree(t *testing.T) {
cfg: &config{ForkChoiceStore: doublylinkedtree.New(), BeaconDB: beaconDB},
}
b := util.NewBeaconBlock()
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
require.NoError(t, err)
beaconState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, s.cfg.ForkChoiceStore.InsertNode(ctx, beaconState, r))
require.NoError(t, s.cfg.ForkChoiceStore.InsertNode(ctx, beaconState, roblock))
assert.Equal(t, false, s.hasBlock(ctx, [32]byte{}), "Should not have block")
assert.Equal(t, true, s.hasBlock(ctx, r), "Should have block")
@@ -453,7 +457,11 @@ func BenchmarkHasBlockForkChoiceStore_DoublyLinkedTree(b *testing.B) {
require.NoError(b, err)
beaconState, err := util.NewBeaconState()
require.NoError(b, err)
require.NoError(b, s.cfg.ForkChoiceStore.InsertNode(ctx, beaconState, r))
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
require.NoError(b, err)
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
require.NoError(b, err)
require.NoError(b, s.cfg.ForkChoiceStore.InsertNode(ctx, beaconState, roblock))
b.ResetTimer()
for i := 0; i < b.N; i++ {

View File

@@ -567,7 +567,7 @@ func prepareForkchoiceState(
payloadHash [32]byte,
justified *ethpb.Checkpoint,
finalized *ethpb.Checkpoint,
) (state.BeaconState, [32]byte, error) {
) (state.BeaconState, blocks.ROBlock, error) {
blockHeader := &ethpb.BeaconBlockHeader{
ParentRoot: parentRoot[:],
}
@@ -588,7 +588,26 @@ func prepareForkchoiceState(
base.BlockRoots[0] = append(base.BlockRoots[0], blockRoot[:]...)
st, err := state_native.InitializeFromProtoBellatrix(base)
return st, blockRoot, err
if err != nil {
return nil, blocks.ROBlock{}, err
}
blk := &ethpb.SignedBeaconBlockBellatrix{
Block: &ethpb.BeaconBlockBellatrix{
Slot: slot,
ParentRoot: parentRoot[:],
Body: &ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: &enginev1.ExecutionPayload{
BlockHash: payloadHash[:],
},
},
},
}
signed, err := blocks.NewSignedBeaconBlock(blk)
if err != nil {
return nil, blocks.ROBlock{}, err
}
roblock, err := blocks.NewROBlockWithRoot(signed, blockRoot)
return st, roblock, err
}
// CachedHeadRoot mocks the same method in the chain service
@@ -631,9 +650,9 @@ func (s *ChainService) HighestReceivedBlockSlot() primitives.Slot {
}
// InsertNode mocks the same method in the chain service
func (s *ChainService) InsertNode(ctx context.Context, st state.BeaconState, root [32]byte) error {
func (s *ChainService) InsertNode(ctx context.Context, st state.BeaconState, block blocks.ROBlock) error {
if s.ForkChoiceStore != nil {
return s.ForkChoiceStore.InsertNode(ctx, st, root)
return s.ForkChoiceStore.InsertNode(ctx, st, block)
}
return nil
}

View File

@@ -47,9 +47,3 @@ func (t *TrackedValidatorsCache) Validating() bool {
defer t.Unlock()
return len(t.trackedValidators) > 0
}
func (t *TrackedValidatorsCache) Size() int {
t.Lock()
defer t.Unlock()
return len(t.trackedValidators)
}

View File

@@ -187,11 +187,12 @@ func AddValidatorToRegistry(beaconState state.BeaconState, pubKey []byte, withdr
// return Validator(
// pubkey=pubkey,
// withdrawal_credentials=withdrawal_credentials,
// effective_balance=effective_balance,
// slashed=False,
// activation_eligibility_epoch=FAR_FUTURE_EPOCH,
// activation_epoch=FAR_FUTURE_EPOCH,
// exit_epoch=FAR_FUTURE_EPOCH,
// withdrawable_epoch=FAR_FUTURE_EPOCH,
// effective_balance=effective_balance,
// )
func GetValidatorFromDeposit(pubKey []byte, withdrawalCredentials []byte, amount uint64) *ethpb.Validator {
effectiveBalance := amount - (amount % params.BeaconConfig().EffectiveBalanceIncrement)
@@ -202,10 +203,11 @@ func GetValidatorFromDeposit(pubKey []byte, withdrawalCredentials []byte, amount
return &ethpb.Validator{
PublicKey: pubKey,
WithdrawalCredentials: withdrawalCredentials,
EffectiveBalance: effectiveBalance,
Slashed: false,
ActivationEligibilityEpoch: params.BeaconConfig().FarFutureEpoch,
ActivationEpoch: params.BeaconConfig().FarFutureEpoch,
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
EffectiveBalance: effectiveBalance,
}
}

View File

@@ -448,6 +448,7 @@ func TestValidateIndexedAttestation_AboveMaxLength(t *testing.T) {
Target: &ethpb.Checkpoint{
Epoch: primitives.Epoch(i),
},
Source: &ethpb.Checkpoint{},
}
}
@@ -489,6 +490,7 @@ func TestValidateIndexedAttestation_BadAttestationsSignatureSet(t *testing.T) {
Target: &ethpb.Checkpoint{
Root: []byte{},
},
Source: &ethpb.Checkpoint{},
},
Signature: sig.Marshal(),
AggregationBits: list,

View File

@@ -61,6 +61,9 @@ func IsExecutionBlock(body interfaces.ReadOnlyBeaconBlockBody) (bool, error) {
if body == nil {
return false, errors.New("nil block body")
}
if body.Version() >= version.Capella {
return true, nil
}
payload, err := body.Execution()
switch {
case errors.Is(err, consensus_types.ErrUnsupportedField):
@@ -202,24 +205,24 @@ func ValidatePayload(st state.BeaconState, payload interfaces.ExecutionData) err
// block_hash=payload.block_hash,
// transactions_root=hash_tree_root(payload.transactions),
// )
func ProcessPayload(st state.BeaconState, body interfaces.ReadOnlyBeaconBlockBody) (state.BeaconState, error) {
func ProcessPayload(st state.BeaconState, body interfaces.ReadOnlyBeaconBlockBody) error {
payload, err := body.Execution()
if err != nil {
return nil, err
return err
}
if err := verifyBlobCommitmentCount(body); err != nil {
return nil, err
return err
}
if err := ValidatePayloadWhenMergeCompletes(st, payload); err != nil {
return nil, err
return err
}
if err := ValidatePayload(st, payload); err != nil {
return nil, err
return err
}
if err := st.SetLatestExecutionPayloadHeader(payload); err != nil {
return nil, err
return err
}
return st, nil
return nil
}
func verifyBlobCommitmentCount(body interfaces.ReadOnlyBeaconBlockBody) error {

View File

@@ -253,7 +253,8 @@ func Test_IsExecutionBlockCapella(t *testing.T) {
require.NoError(t, err)
got, err := blocks.IsExecutionBlock(wrappedBlock.Body())
require.NoError(t, err)
require.Equal(t, false, got)
// #14614
require.Equal(t, true, got)
}
func Test_IsExecutionEnabled(t *testing.T) {
@@ -587,8 +588,7 @@ func Test_ProcessPayload(t *testing.T) {
ExecutionPayload: tt.payload,
})
require.NoError(t, err)
st, err := blocks.ProcessPayload(st, body)
if err != nil {
if err := blocks.ProcessPayload(st, body); err != nil {
require.Equal(t, tt.err.Error(), err.Error())
} else {
require.Equal(t, tt.err, err)
@@ -619,8 +619,7 @@ func Test_ProcessPayloadCapella(t *testing.T) {
ExecutionPayload: payload,
})
require.NoError(t, err)
_, err = blocks.ProcessPayload(st, body)
require.NoError(t, err)
require.NoError(t, blocks.ProcessPayload(st, body))
}
func Test_ProcessPayload_Blinded(t *testing.T) {
@@ -677,8 +676,7 @@ func Test_ProcessPayload_Blinded(t *testing.T) {
ExecutionPayloadHeader: p,
})
require.NoError(t, err)
st, err := blocks.ProcessPayload(st, body)
if err != nil {
if err := blocks.ProcessPayload(st, body); err != nil {
require.Equal(t, tt.err.Error(), err.Error())
} else {
require.Equal(t, tt.err, err)

View File

@@ -120,35 +120,36 @@ func ValidateBLSToExecutionChange(st state.ReadOnlyBeaconState, signed *ethpb.Si
//
// Spec pseudocode definition:
//
// def process_withdrawals(state: BeaconState, payload: ExecutionPayload) -> None:
// expected_withdrawals, partial_withdrawals_count = get_expected_withdrawals(state) # [Modified in Electra:EIP7251]
// def process_withdrawals(state: BeaconState, payload: ExecutionPayload) -> None:
//
// assert len(payload.withdrawals) == len(expected_withdrawals)
// expected_withdrawals, processed_partial_withdrawals_count = get_expected_withdrawals(state) # [Modified in Electra:EIP7251]
//
// for expected_withdrawal, withdrawal in zip(expected_withdrawals, payload.withdrawals):
// assert withdrawal == expected_withdrawal
// decrease_balance(state, withdrawal.validator_index, withdrawal.amount)
// assert len(payload.withdrawals) == len(expected_withdrawals)
//
// # Update pending partial withdrawals [New in Electra:EIP7251]
// state.pending_partial_withdrawals = state.pending_partial_withdrawals[partial_withdrawals_count:]
// for expected_withdrawal, withdrawal in zip(expected_withdrawals, payload.withdrawals):
// assert withdrawal == expected_withdrawal
// decrease_balance(state, withdrawal.validator_index, withdrawal.amount)
//
// # Update the next withdrawal index if this block contained withdrawals
// if len(expected_withdrawals) != 0:
// latest_withdrawal = expected_withdrawals[-1]
// state.next_withdrawal_index = WithdrawalIndex(latest_withdrawal.index + 1)
// # Update pending partial withdrawals [New in Electra:EIP7251]
// state.pending_partial_withdrawals = state.pending_partial_withdrawals[processed_partial_withdrawals_count:]
//
// # Update the next validator index to start the next withdrawal sweep
// if len(expected_withdrawals) == MAX_WITHDRAWALS_PER_PAYLOAD:
// # Next sweep starts after the latest withdrawal's validator index
// next_validator_index = ValidatorIndex((expected_withdrawals[-1].validator_index + 1) % len(state.validators))
// state.next_withdrawal_validator_index = next_validator_index
// else:
// # Advance sweep by the max length of the sweep if there was not a full set of withdrawals
// next_index = state.next_withdrawal_validator_index + MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP
// next_validator_index = ValidatorIndex(next_index % len(state.validators))
// state.next_withdrawal_validator_index = next_validator_index
// # Update the next withdrawal index if this block contained withdrawals
// if len(expected_withdrawals) != 0:
// latest_withdrawal = expected_withdrawals[-1]
// state.next_withdrawal_index = WithdrawalIndex(latest_withdrawal.index + 1)
//
// # Update the next validator index to start the next withdrawal sweep
// if len(expected_withdrawals) == MAX_WITHDRAWALS_PER_PAYLOAD:
// # Next sweep starts after the latest withdrawal's validator index
// next_validator_index = ValidatorIndex((expected_withdrawals[-1].validator_index + 1) % len(state.validators))
// state.next_withdrawal_validator_index = next_validator_index
// else:
// # Advance sweep by the max length of the sweep if there was not a full set of withdrawals
// next_index = state.next_withdrawal_validator_index + MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP
// next_validator_index = ValidatorIndex(next_index % len(state.validators))
// state.next_withdrawal_validator_index = next_validator_index
func ProcessWithdrawals(st state.BeaconState, executionData interfaces.ExecutionData) (state.BeaconState, error) {
expectedWithdrawals, partialWithdrawalsCount, err := st.ExpectedWithdrawals()
expectedWithdrawals, processedPartialWithdrawalsCount, err := st.ExpectedWithdrawals()
if err != nil {
return nil, errors.Wrap(err, "could not get expected withdrawals")
}
@@ -192,7 +193,7 @@ func ProcessWithdrawals(st state.BeaconState, executionData interfaces.Execution
}
if st.Version() >= version.Electra {
if err := st.DequeuePartialWithdrawals(partialWithdrawalsCount); err != nil {
if err := st.DequeuePendingPartialWithdrawals(processedPartialWithdrawalsCount); err != nil {
return nil, fmt.Errorf("unable to dequeue partial withdrawals from state: %w", err)
}
}

View File

@@ -386,8 +386,14 @@ func batchProcessNewPendingDeposits(ctx context.Context, state state.BeaconState
return errors.Wrap(err, "batch signature verification failed")
}
pubKeyMap := make(map[[48]byte]struct{}, len(pendingDeposits))
// Process each deposit individually
for _, pendingDeposit := range pendingDeposits {
_, found := pubKeyMap[bytesutil.ToBytes48(pendingDeposit.PublicKey)]
if !found {
pubKeyMap[bytesutil.ToBytes48(pendingDeposit.PublicKey)] = struct{}{}
}
validSignature := allSignaturesVerified
// If batch verification failed, check the individual deposit signature
@@ -405,9 +411,16 @@ func batchProcessNewPendingDeposits(ctx context.Context, state state.BeaconState
// Add validator to the registry if the signature is valid
if validSignature {
err = AddValidatorToRegistry(state, pendingDeposit.PublicKey, pendingDeposit.WithdrawalCredentials, pendingDeposit.Amount)
if err != nil {
return errors.Wrap(err, "failed to add validator to registry")
if found {
index, _ := state.ValidatorIndexByPubkey(bytesutil.ToBytes48(pendingDeposit.PublicKey))
if err := helpers.IncreaseBalance(state, index, pendingDeposit.Amount); err != nil {
return errors.Wrap(err, "could not increase balance")
}
} else {
err = AddValidatorToRegistry(state, pendingDeposit.PublicKey, pendingDeposit.WithdrawalCredentials, pendingDeposit.Amount)
if err != nil {
return errors.Wrap(err, "failed to add validator to registry")
}
}
}
}
@@ -508,11 +521,12 @@ func AddValidatorToRegistry(beaconState state.BeaconState, pubKey []byte, withdr
// validator = Validator(
// pubkey=pubkey,
// withdrawal_credentials=withdrawal_credentials,
// effective_balance=Gwei(0),
// slashed=False,
// activation_eligibility_epoch=FAR_FUTURE_EPOCH,
// activation_epoch=FAR_FUTURE_EPOCH,
// exit_epoch=FAR_FUTURE_EPOCH,
// withdrawable_epoch=FAR_FUTURE_EPOCH,
// effective_balance=Gwei(0),
// )
//
// # [Modified in Electra:EIP7251]
@@ -524,11 +538,12 @@ func GetValidatorFromDeposit(pubKey []byte, withdrawalCredentials []byte, amount
validator := &ethpb.Validator{
PublicKey: pubKey,
WithdrawalCredentials: withdrawalCredentials,
EffectiveBalance: 0,
Slashed: false,
ActivationEligibilityEpoch: params.BeaconConfig().FarFutureEpoch,
ActivationEpoch: params.BeaconConfig().FarFutureEpoch,
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
EffectiveBalance: 0,
}
v, err := state_native.NewValidator(validator)
if err != nil {
@@ -558,7 +573,7 @@ func ProcessDepositRequests(ctx context.Context, beaconState state.BeaconState,
return beaconState, nil
}
// processDepositRequest processes the specific deposit receipt
// processDepositRequest processes the specific deposit request
// def process_deposit_request(state: BeaconState, deposit_request: DepositRequest) -> None:
//
// # Set deposit request start index
@@ -588,8 +603,8 @@ func processDepositRequest(beaconState state.BeaconState, request *enginev1.Depo
}
if err := beaconState.AppendPendingDeposit(&ethpb.PendingDeposit{
PublicKey: bytesutil.SafeCopyBytes(request.Pubkey),
Amount: request.Amount,
WithdrawalCredentials: bytesutil.SafeCopyBytes(request.WithdrawalCredentials),
Amount: request.Amount,
Signature: bytesutil.SafeCopyBytes(request.Signature),
Slot: beaconState.Slot(),
}); err != nil {

View File

@@ -22,6 +22,40 @@ import (
"github.com/prysmaticlabs/prysm/v5/testing/util"
)
func TestProcessPendingDepositsMultiplesSameDeposits(t *testing.T) {
st := stateWithActiveBalanceETH(t, 1000)
deps := make([]*eth.PendingDeposit, 2) // Make same deposit twice
validators := st.Validators()
sk, err := bls.RandKey()
require.NoError(t, err)
for i := 0; i < len(deps); i += 1 {
wc := make([]byte, 32)
wc[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
wc[31] = byte(i)
validators[i].PublicKey = sk.PublicKey().Marshal()
validators[i].WithdrawalCredentials = wc
deps[i] = stateTesting.GeneratePendingDeposit(t, sk, 32, bytesutil.ToBytes32(wc), 0)
}
require.NoError(t, st.SetPendingDeposits(deps))
err = electra.ProcessPendingDeposits(context.TODO(), st, 10000)
require.NoError(t, err)
val := st.Validators()
seenPubkeys := make(map[string]struct{})
for i := 0; i < len(val); i += 1 {
if len(val[i].PublicKey) == 0 {
continue
}
_, ok := seenPubkeys[string(val[i].PublicKey)]
if ok {
t.Fatalf("duplicated pubkeys")
} else {
seenPubkeys[string(val[i].PublicKey)] = struct{}{}
}
}
}
func TestProcessPendingDeposits(t *testing.T) {
tests := []struct {
name string
@@ -285,7 +319,7 @@ func TestBatchProcessNewPendingDeposits(t *testing.T) {
wc[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
wc[31] = byte(0)
validDep := stateTesting.GeneratePendingDeposit(t, sk, params.BeaconConfig().MinActivationBalance, bytesutil.ToBytes32(wc), 0)
invalidDep := &eth.PendingDeposit{}
invalidDep := &eth.PendingDeposit{PublicKey: make([]byte, 48)}
// have a combination of valid and invalid deposits
deps := []*eth.PendingDeposit{validDep, invalidDep}
require.NoError(t, electra.BatchProcessNewPendingDeposits(context.Background(), st, deps))

View File

@@ -29,7 +29,6 @@ var (
ProcessParticipationFlagUpdates = altair.ProcessParticipationFlagUpdates
ProcessSyncCommitteeUpdates = altair.ProcessSyncCommitteeUpdates
AttestationsDelta = altair.AttestationsDelta
ProcessSyncAggregate = altair.ProcessSyncAggregate
)
// ProcessEpoch describes the per epoch operations that are performed on the beacon state.

View File

@@ -84,11 +84,11 @@ func ProcessOperations(
}
st, err = ProcessDepositRequests(ctx, st, requests.Deposits)
if err != nil {
return nil, errors.Wrap(err, "could not process deposit receipts")
return nil, errors.Wrap(err, "could not process deposit requests")
}
st, err = ProcessWithdrawalRequests(ctx, st, requests.Withdrawals)
if err != nil {
return nil, errors.Wrap(err, "could not process execution layer withdrawal requests")
return nil, errors.Wrap(err, "could not process withdrawal requests")
}
if err := ProcessConsolidationRequests(ctx, st, requests.Consolidations); err != nil {
return nil, fmt.Errorf("could not process consolidation requests: %w", err)

View File

@@ -31,6 +31,8 @@ const (
LightClientFinalityUpdate
// LightClientOptimisticUpdate event
LightClientOptimisticUpdate
// PayloadAttributes events are fired upon a missed slot or new head.
PayloadAttributes
)
// BlockProcessedData is the data sent with BlockProcessed events.

View File

@@ -23,11 +23,8 @@ var (
// Access to these nil fields will result in run time panic,
// it is recommended to run these checks as first line of defense.
func ValidateNilAttestation(attestation ethpb.Att) error {
if attestation == nil {
return errors.New("attestation can't be nil")
}
if attestation.GetData() == nil {
return errors.New("attestation's data can't be nil")
if attestation == nil || attestation.IsNil() {
return errors.New("attestation is nil")
}
if attestation.GetData().Source == nil {
return errors.New("attestation's source can't be nil")

View File

@@ -260,12 +260,12 @@ func TestValidateNilAttestation(t *testing.T) {
{
name: "nil attestation",
attestation: nil,
errString: "attestation can't be nil",
errString: "attestation is nil",
},
{
name: "nil attestation data",
attestation: &ethpb.Attestation{},
errString: "attestation's data can't be nil",
errString: "attestation is nil",
},
{
name: "nil attestation source",

View File

@@ -333,8 +333,7 @@ func ProcessBlockForStateRoot(
return nil, errors.Wrap(err, "could not process withdrawals")
}
}
state, err = b.ProcessPayload(state, blk.Body())
if err != nil {
if err = b.ProcessPayload(state, blk.Body()); err != nil {
return nil, errors.Wrap(err, "could not process execution data")
}
}

View File

@@ -698,3 +698,45 @@ func TestProcessSlotsConditionally(t *testing.T) {
assert.Equal(t, primitives.Slot(6), s.Slot())
})
}
func BenchmarkProcessSlots_Capella(b *testing.B) {
st, _ := util.DeterministicGenesisStateCapella(b, params.BeaconConfig().MaxValidatorsPerCommittee)
var err error
b.ResetTimer()
for i := 0; i < b.N; i++ {
st, err = transition.ProcessSlots(context.Background(), st, st.Slot()+1)
if err != nil {
b.Fatalf("Failed to process slot %v", err)
}
}
}
func BenchmarkProcessSlots_Deneb(b *testing.B) {
st, _ := util.DeterministicGenesisStateDeneb(b, params.BeaconConfig().MaxValidatorsPerCommittee)
var err error
b.ResetTimer()
for i := 0; i < b.N; i++ {
st, err = transition.ProcessSlots(context.Background(), st, st.Slot()+1)
if err != nil {
b.Fatalf("Failed to process slot %v", err)
}
}
}
func BenchmarkProcessSlots_Electra(b *testing.B) {
st, _ := util.DeterministicGenesisStateElectra(b, params.BeaconConfig().MaxValidatorsPerCommittee)
var err error
b.ResetTimer()
for i := 0; i < b.N; i++ {
st, err = transition.ProcessSlots(context.Background(), st, st.Slot()+1)
if err != nil {
b.Fatalf("Failed to process slot %v", err)
}
}
}

View File

@@ -241,76 +241,35 @@ func SlashedValidatorIndices(epoch primitives.Epoch, validators []*ethpb.Validat
return slashed
}
// ExitedValidatorIndices determines the indices exited during the current epoch.
func ExitedValidatorIndices(epoch primitives.Epoch, validators []*ethpb.Validator, activeValidatorCount uint64) ([]primitives.ValidatorIndex, error) {
// ExitedValidatorIndices returns the indices of validators who exited during the specified epoch.
//
// A validator is considered to have exited during an epoch if their ExitEpoch equals the epoch and
// excludes validators that have been ejected.
// This function simplifies the exit determination by directly checking the validator's ExitEpoch,
// avoiding the complexities and potential inaccuracies of calculating withdrawable epochs.
func ExitedValidatorIndices(epoch primitives.Epoch, validators []*ethpb.Validator) ([]primitives.ValidatorIndex, error) {
exited := make([]primitives.ValidatorIndex, 0)
exitEpochs := make([]primitives.Epoch, 0)
for i := 0; i < len(validators); i++ {
val := validators[i]
if val.ExitEpoch != params.BeaconConfig().FarFutureEpoch {
exitEpochs = append(exitEpochs, val.ExitEpoch)
}
}
exitQueueEpoch := primitives.Epoch(0)
for _, i := range exitEpochs {
if exitQueueEpoch < i {
exitQueueEpoch = i
}
}
// We use the exit queue churn to determine if we have passed a churn limit.
exitQueueChurn := uint64(0)
for _, val := range validators {
if val.ExitEpoch == exitQueueEpoch {
exitQueueChurn++
}
}
churn := helpers.ValidatorExitChurnLimit(activeValidatorCount)
if churn < exitQueueChurn {
exitQueueEpoch++
}
withdrawableEpoch := exitQueueEpoch + params.BeaconConfig().MinValidatorWithdrawabilityDelay
for i, val := range validators {
if val.ExitEpoch == epoch && val.WithdrawableEpoch == withdrawableEpoch &&
val.EffectiveBalance > params.BeaconConfig().EjectionBalance {
if val.ExitEpoch == epoch && val.EffectiveBalance > params.BeaconConfig().EjectionBalance {
exited = append(exited, primitives.ValidatorIndex(i))
}
}
return exited, nil
}
// EjectedValidatorIndices determines the indices ejected during the given epoch.
func EjectedValidatorIndices(epoch primitives.Epoch, validators []*ethpb.Validator, activeValidatorCount uint64) ([]primitives.ValidatorIndex, error) {
// EjectedValidatorIndices returns the indices of validators who were ejected during the specified epoch.
//
// A validator is considered ejected during an epoch if:
// - Their ExitEpoch equals the epoch.
// - Their EffectiveBalance is less than or equal to the EjectionBalance threshold.
//
// This function simplifies the ejection determination by directly checking the validator's ExitEpoch
// and EffectiveBalance, avoiding the complexities and potential inaccuracies of calculating
// withdrawable epochs.
func EjectedValidatorIndices(epoch primitives.Epoch, validators []*ethpb.Validator) ([]primitives.ValidatorIndex, error) {
ejected := make([]primitives.ValidatorIndex, 0)
exitEpochs := make([]primitives.Epoch, 0)
for i := 0; i < len(validators); i++ {
val := validators[i]
if val.ExitEpoch != params.BeaconConfig().FarFutureEpoch {
exitEpochs = append(exitEpochs, val.ExitEpoch)
}
}
exitQueueEpoch := primitives.Epoch(0)
for _, i := range exitEpochs {
if exitQueueEpoch < i {
exitQueueEpoch = i
}
}
// We use the exit queue churn to determine if we have passed a churn limit.
exitQueueChurn := uint64(0)
for _, val := range validators {
if val.ExitEpoch == exitQueueEpoch {
exitQueueChurn++
}
}
churn := helpers.ValidatorExitChurnLimit(activeValidatorCount)
if churn < exitQueueChurn {
exitQueueEpoch++
}
withdrawableEpoch := exitQueueEpoch + params.BeaconConfig().MinValidatorWithdrawabilityDelay
for i, val := range validators {
if val.ExitEpoch == epoch && val.WithdrawableEpoch == withdrawableEpoch &&
val.EffectiveBalance <= params.BeaconConfig().EjectionBalance {
if val.ExitEpoch == epoch && val.EffectiveBalance <= params.BeaconConfig().EjectionBalance {
ejected = append(ejected, primitives.ValidatorIndex(i))
}
}

View File

@@ -389,19 +389,16 @@ func TestExitedValidatorIndices(t *testing.T) {
state: &ethpb.BeaconState{
Validators: []*ethpb.Validator{
{
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
ExitEpoch: 0,
WithdrawableEpoch: params.BeaconConfig().MinValidatorWithdrawabilityDelay,
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
ExitEpoch: 0,
},
{
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
ExitEpoch: 0,
WithdrawableEpoch: 10,
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
ExitEpoch: 10,
},
{
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
ExitEpoch: 0,
WithdrawableEpoch: params.BeaconConfig().MinValidatorWithdrawabilityDelay,
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
ExitEpoch: 0,
},
},
},
@@ -433,11 +430,7 @@ func TestExitedValidatorIndices(t *testing.T) {
},
}
for _, tt := range tests {
s, err := state_native.InitializeFromProtoPhase0(tt.state)
require.NoError(t, err)
activeCount, err := helpers.ActiveValidatorCount(context.Background(), s, time.PrevEpoch(s))
require.NoError(t, err)
exitedIndices, err := validators.ExitedValidatorIndices(0, tt.state.Validators, activeCount)
exitedIndices, err := validators.ExitedValidatorIndices(0, tt.state.Validators)
require.NoError(t, err)
assert.DeepEqual(t, tt.wanted, exitedIndices)
}

View File

@@ -23,10 +23,10 @@ import (
bolt "go.etcd.io/bbolt"
)
// used to represent errors for inconsistent slot ranges.
// Used to represent errors for inconsistent slot ranges.
var errInvalidSlotRange = errors.New("invalid end slot and start slot provided")
// Block retrieval by root.
// Block retrieval by root. Return nil if block is not found.
func (s *Store) Block(ctx context.Context, blockRoot [32]byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.Block")
defer span.End()

View File

@@ -18,6 +18,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"github.com/prysmaticlabs/prysm/v5/time"
"github.com/prysmaticlabs/prysm/v5/time/slots"
bolt "go.etcd.io/bbolt"
@@ -603,14 +604,14 @@ func (s *Store) unmarshalState(_ context.Context, enc []byte, validatorEntries [
// marshal versioned state from struct type down to bytes.
func marshalState(ctx context.Context, st state.ReadOnlyBeaconState) ([]byte, error) {
switch st.ToProtoUnsafe().(type) {
case *ethpb.BeaconState:
switch st.Version() {
case version.Phase0:
rState, ok := st.ToProtoUnsafe().(*ethpb.BeaconState)
if !ok {
return nil, errors.New("non valid inner state")
}
return encode(ctx, rState)
case *ethpb.BeaconStateAltair:
case version.Altair:
rState, ok := st.ToProtoUnsafe().(*ethpb.BeaconStateAltair)
if !ok {
return nil, errors.New("non valid inner state")
@@ -623,7 +624,7 @@ func marshalState(ctx context.Context, st state.ReadOnlyBeaconState) ([]byte, er
return nil, err
}
return snappy.Encode(nil, append(altairKey, rawObj...)), nil
case *ethpb.BeaconStateBellatrix:
case version.Bellatrix:
rState, ok := st.ToProtoUnsafe().(*ethpb.BeaconStateBellatrix)
if !ok {
return nil, errors.New("non valid inner state")
@@ -636,7 +637,7 @@ func marshalState(ctx context.Context, st state.ReadOnlyBeaconState) ([]byte, er
return nil, err
}
return snappy.Encode(nil, append(bellatrixKey, rawObj...)), nil
case *ethpb.BeaconStateCapella:
case version.Capella:
rState, ok := st.ToProtoUnsafe().(*ethpb.BeaconStateCapella)
if !ok {
return nil, errors.New("non valid inner state")
@@ -649,7 +650,7 @@ func marshalState(ctx context.Context, st state.ReadOnlyBeaconState) ([]byte, er
return nil, err
}
return snappy.Encode(nil, append(capellaKey, rawObj...)), nil
case *ethpb.BeaconStateDeneb:
case version.Deneb:
rState, ok := st.ToProtoUnsafe().(*ethpb.BeaconStateDeneb)
if !ok {
return nil, errors.New("non valid inner state")
@@ -662,7 +663,7 @@ func marshalState(ctx context.Context, st state.ReadOnlyBeaconState) ([]byte, er
return nil, err
}
return snappy.Encode(nil, append(denebKey, rawObj...)), nil
case *ethpb.BeaconStateElectra:
case version.Electra:
rState, ok := st.ToProtoUnsafe().(*ethpb.BeaconStateElectra)
if !ok {
return nil, errors.New("non valid inner state")

View File

@@ -688,7 +688,7 @@ func decodeSlasherChunk(enc []byte) ([]uint16, error) {
// Encode attestation record to bytes.
// The output encoded attestation record consists in the signing root concatenated with the compressed attestation record.
func encodeAttestationRecord(att *slashertypes.IndexedAttestationWrapper) ([]byte, error) {
if att == nil || att.IndexedAttestation == nil {
if att == nil || att.IndexedAttestation == nil || att.IndexedAttestation.IsNil() {
return []byte{}, errors.New("nil proposal record")
}

View File

@@ -37,6 +37,7 @@ go_library(
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//beacon-chain/verification:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
@@ -105,8 +106,11 @@ go_test(
"//beacon-chain/db/testing:go_default_library",
"//beacon-chain/execution/testing:go_default_library",
"//beacon-chain/execution/types:go_default_library",
"//beacon-chain/forkchoice:go_default_library",
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
"//beacon-chain/startup:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//beacon-chain/verification:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",

View File

@@ -14,6 +14,7 @@ import (
"github.com/holiman/uint256"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/execution/types"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
@@ -23,6 +24,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
pb "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"github.com/prysmaticlabs/prysm/v5/time/slots"
"github.com/sirupsen/logrus"
@@ -79,6 +81,8 @@ const (
GetPayloadBodiesByRangeV1 = "engine_getPayloadBodiesByRangeV1"
// ExchangeCapabilities request string for JSON-RPC.
ExchangeCapabilities = "engine_exchangeCapabilities"
// GetBlobsV1 request string for JSON-RPC.
GetBlobsV1 = "engine_getBlobsV1"
// Defines the seconds before timing out engine endpoints with non-block execution semantics.
defaultEngineTimeout = time.Second
)
@@ -93,16 +97,15 @@ type ForkchoiceUpdatedResponse struct {
ValidationError string `json:"validationError"`
}
// PayloadReconstructor defines a service that can reconstruct a full beacon
// block with an execution payload from a signed beacon block and a connection
// to an execution client's engine API.
type PayloadReconstructor interface {
// Reconstructor defines a service responsible for reconstructing full beacon chain objects by utilizing the execution API and making requests through the execution client.
type Reconstructor interface {
ReconstructFullBlock(
ctx context.Context, blindedBlock interfaces.ReadOnlySignedBeaconBlock,
) (interfaces.SignedBeaconBlock, error)
ReconstructFullBellatrixBlockBatch(
ctx context.Context, blindedBlocks []interfaces.ReadOnlySignedBeaconBlock,
) ([]interfaces.SignedBeaconBlock, error)
ReconstructBlobSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, indices []bool) ([]blocks.VerifiedROBlob, error)
}
// EngineCaller defines a client that can interact with an Ethereum
@@ -494,6 +497,20 @@ func (s *Service) HeaderByNumber(ctx context.Context, number *big.Int) (*types.H
return hdr, err
}
// GetBlobs returns the blob and proof from the execution engine for the given versioned hashes.
func (s *Service) GetBlobs(ctx context.Context, versionedHashes []common.Hash) ([]*pb.BlobAndProof, error) {
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.GetBlobs")
defer span.End()
// If the execution engine does not support `GetBlobsV1`, return early to prevent encountering an error later.
if !s.capabilityCache.has(GetBlobsV1) {
return nil, nil
}
result := make([]*pb.BlobAndProof, len(versionedHashes))
err := s.rpcClient.CallContext(ctx, &result, GetBlobsV1, versionedHashes)
return result, handleRPCError(err)
}
// ReconstructFullBlock takes in a blinded beacon block and reconstructs
// a beacon block with a full execution payload via the engine API.
func (s *Service) ReconstructFullBlock(
@@ -522,6 +539,103 @@ func (s *Service) ReconstructFullBellatrixBlockBatch(
return unb, nil
}
// ReconstructBlobSidecars reconstructs the verified blob sidecars for a given beacon block.
// It retrieves the KZG commitments from the block body, fetches the associated blobs and proofs,
// and constructs the corresponding verified read-only blob sidecars.
//
// The 'exists' argument is a boolean list (must be the same length as body.BlobKzgCommitments), where each element corresponds to whether a
// particular blob sidecar already exists. If exists[i] is true, the blob for the i-th KZG commitment
// has already been retrieved and does not need to be fetched again from the execution layer (EL).
//
// For example:
// - len(block.Body().BlobKzgCommitments()) == 6
// - If exists = [true, false, true, false, true, false], the function will fetch the blobs
// associated with indices 1, 3, and 5 (since those are marked as non-existent).
// - If exists = [false ... x 6], the function will attempt to fetch all blobs.
//
// Only the blobs that do not already exist (where exists[i] is false) are fetched using the KZG commitments from block body.
func (s *Service) ReconstructBlobSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, exists []bool) ([]blocks.VerifiedROBlob, error) {
blockBody := block.Block().Body()
kzgCommitments, err := blockBody.BlobKzgCommitments()
if err != nil {
return nil, errors.Wrap(err, "could not get blob KZG commitments")
}
if len(kzgCommitments) > len(exists) {
return nil, fmt.Errorf("length of KZG commitments (%d) is greater than length of exists (%d)", len(kzgCommitments), len(exists))
}
// Collect KZG hashes for non-existing blobs
var kzgHashes []common.Hash
for i, commitment := range kzgCommitments {
if !exists[i] {
kzgHashes = append(kzgHashes, primitives.ConvertKzgCommitmentToVersionedHash(commitment))
}
}
if len(kzgHashes) == 0 {
return nil, nil
}
// Fetch blobs from EL
blobs, err := s.GetBlobs(ctx, kzgHashes)
if err != nil {
return nil, errors.Wrap(err, "could not get blobs")
}
if len(blobs) == 0 {
return nil, nil
}
header, err := block.Header()
if err != nil {
return nil, errors.Wrap(err, "could not get header")
}
// Reconstruct verified blob sidecars
var verifiedBlobs []blocks.VerifiedROBlob
for i, blobIndex := 0, 0; i < len(kzgCommitments); i++ {
if exists[i] {
continue
}
if blobIndex >= len(blobs) || blobs[blobIndex] == nil {
blobIndex++
continue
}
blob := blobs[blobIndex]
blobIndex++
proof, err := blocks.MerkleProofKZGCommitment(blockBody, i)
if err != nil {
log.WithError(err).WithField("index", i).Error("failed to get Merkle proof for KZG commitment")
continue
}
sidecar := &ethpb.BlobSidecar{
Index: uint64(i),
Blob: blob.Blob,
KzgCommitment: kzgCommitments[i],
KzgProof: blob.KzgProof,
SignedBlockHeader: header,
CommitmentInclusionProof: proof,
}
roBlob, err := blocks.NewROBlobWithRoot(sidecar, blockRoot)
if err != nil {
log.WithError(err).WithField("index", i).Error("failed to create RO blob with root")
continue
}
v := s.blobVerifier(roBlob, verification.ELMemPoolRequirements)
verifiedBlob, err := v.VerifiedROBlob()
if err != nil {
log.WithError(err).WithField("index", i).Error("failed to verify RO blob")
continue
}
verifiedBlobs = append(verifiedBlobs, verifiedBlob)
}
return verifiedBlobs, nil
}
func fullPayloadFromPayloadBody(
header interfaces.ExecutionData, body *pb.ExecutionPayloadBody, bVersion int,
) (interfaces.ExecutionData, error) {

View File

@@ -2,6 +2,7 @@ package execution
import (
"context"
"crypto/rand"
"encoding/json"
"fmt"
"io"
@@ -20,6 +21,7 @@ import (
"github.com/holiman/uint256"
"github.com/pkg/errors"
mocks "github.com/prysmaticlabs/prysm/v5/beacon-chain/execution/testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
@@ -37,9 +39,9 @@ import (
)
var (
_ = PayloadReconstructor(&Service{})
_ = Reconstructor(&Service{})
_ = EngineCaller(&Service{})
_ = PayloadReconstructor(&Service{})
_ = Reconstructor(&Service{})
_ = EngineCaller(&mocks.EngineClient{})
)
@@ -1767,7 +1769,9 @@ func fixturesStruct() *payloadFixtures {
Proofs: []hexutil.Bytes{[]byte("proof1"), []byte("proof2")},
Blobs: []hexutil.Bytes{{'a'}, {'b'}},
},
ExecutionRequests: []hexutil.Bytes{depositRequestBytes, withdrawalRequestBytes, consolidationRequestBytes},
ExecutionRequests: []hexutil.Bytes{append([]byte{pb.DepositRequestType}, depositRequestBytes...),
append([]byte{pb.WithdrawalRequestType}, withdrawalRequestBytes...),
append([]byte{pb.ConsolidationRequestType}, consolidationRequestBytes...)},
}
parent := bytesutil.PadTo([]byte("parentHash"), fieldparams.RootLength)
sha3Uncles := bytesutil.PadTo([]byte("sha3Uncles"), fieldparams.RootLength)
@@ -2390,3 +2394,122 @@ func Test_ExchangeCapabilities(t *testing.T) {
}
})
}
func TestReconstructBlobSidecars(t *testing.T) {
client := &Service{capabilityCache: &capabilityCache{}}
b := util.NewBeaconBlockDeneb()
kzgCommitments := createRandomKzgCommitments(t, 6)
b.Block.Body.BlobKzgCommitments = kzgCommitments
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
sb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
ctx := context.Background()
t.Run("all seen", func(t *testing.T) {
exists := []bool{true, true, true, true, true, true}
verifiedBlobs, err := client.ReconstructBlobSidecars(ctx, sb, r, exists)
require.NoError(t, err)
require.Equal(t, 0, len(verifiedBlobs))
})
t.Run("get-blobs end point is not supported", func(t *testing.T) {
exists := []bool{true, true, true, true, true, false}
verifiedBlobs, err := client.ReconstructBlobSidecars(ctx, sb, r, exists)
require.NoError(t, err)
require.Equal(t, 0, len(verifiedBlobs))
})
client.capabilityCache = &capabilityCache{capabilities: map[string]interface{}{GetBlobsV1: nil}}
t.Run("recovered 6 missing blobs", func(t *testing.T) {
srv := createBlobServer(t, 6)
defer srv.Close()
rpcClient, client := setupRpcClient(t, srv.URL, client)
defer rpcClient.Close()
exists := [6]bool{}
verifiedBlobs, err := client.ReconstructBlobSidecars(ctx, sb, r, exists[:])
require.NoError(t, err)
require.Equal(t, 6, len(verifiedBlobs))
})
t.Run("recovered 3 missing blobs", func(t *testing.T) {
srv := createBlobServer(t, 3)
defer srv.Close()
rpcClient, client := setupRpcClient(t, srv.URL, client)
defer rpcClient.Close()
exists := []bool{true, false, true, false, true, false}
verifiedBlobs, err := client.ReconstructBlobSidecars(ctx, sb, r, exists)
require.NoError(t, err)
require.Equal(t, 3, len(verifiedBlobs))
})
t.Run("kzg is longer than exist", func(t *testing.T) {
srv := createBlobServer(t, 3)
defer srv.Close()
rpcClient, client := setupRpcClient(t, srv.URL, client)
defer rpcClient.Close()
exists := []bool{true, false, true, false, true}
_, err := client.ReconstructBlobSidecars(ctx, sb, r, exists)
require.ErrorContains(t, "length of KZG commitments (6) is greater than length of exists (5)", err)
})
}
func createRandomKzgCommitments(t *testing.T, num int) [][]byte {
kzgCommitments := make([][]byte, num)
for i := range kzgCommitments {
kzgCommitments[i] = make([]byte, 48)
_, err := rand.Read(kzgCommitments[i])
require.NoError(t, err)
}
return kzgCommitments
}
func createBlobServer(t *testing.T, numBlobs int) *httptest.Server {
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
defer func() {
require.NoError(t, r.Body.Close())
}()
blobs := make([]pb.BlobAndProofJson, numBlobs)
for i := range blobs {
blobs[i] = pb.BlobAndProofJson{Blob: []byte(fmt.Sprintf("blob%d", i+1)), KzgProof: []byte(fmt.Sprintf("proof%d", i+1))}
}
respJSON := map[string]interface{}{
"jsonrpc": "2.0",
"id": 1,
"result": blobs,
}
require.NoError(t, json.NewEncoder(w).Encode(respJSON))
}))
}
func setupRpcClient(t *testing.T, url string, client *Service) (*rpc.Client, *Service) {
rpcClient, err := rpc.DialHTTP(url)
require.NoError(t, err)
client.rpcClient = rpcClient
client.capabilityCache = &capabilityCache{capabilities: map[string]interface{}{GetBlobsV1: nil}}
client.blobVerifier = testNewBlobVerifier()
return rpcClient, client
}
func testNewBlobVerifier() verification.NewBlobVerifier {
return func(b blocks.ROBlob, reqs []verification.Requirement) verification.BlobVerifier {
return &verification.MockBlobVerifier{
CbVerifiedROBlob: func() (blocks.VerifiedROBlob, error) {
return blocks.VerifiedROBlob{}, nil
},
}
}
}

View File

@@ -7,6 +7,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
"github.com/prysmaticlabs/prysm/v5/network"
"github.com/prysmaticlabs/prysm/v5/network/authorization"
)
@@ -115,3 +116,11 @@ func WithJwtId(jwtId string) Option {
return nil
}
}
// WithVerifierWaiter gives the sync package direct access to the verifier waiter.
func WithVerifierWaiter(v *verification.InitializerWaiter) Option {
return func(s *Service) error {
s.verifierWaiter = v
return nil
}
}

View File

@@ -78,6 +78,13 @@ func (s *Service) pollConnectionStatus(ctx context.Context) {
currClient.Close()
}
log.WithField("endpoint", logs.MaskCredentialsLogging(s.cfg.currHttpEndpoint.Url)).Info("Connected to new endpoint")
c, err := s.ExchangeCapabilities(ctx)
if err != nil {
errorLogger(err, "Could not exchange capabilities with execution client")
}
s.capabilityCache.save(c)
return
case <-s.ctx.Done():
log.Debug("Received cancelled context,closing existing powchain service")

View File

@@ -29,7 +29,9 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/container/trie"
contracts "github.com/prysmaticlabs/prysm/v5/contracts/deposit"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
@@ -155,6 +157,9 @@ type Service struct {
lastReceivedMerkleIndex int64 // Keeps track of the last received index to prevent log spam.
runError error
preGenesisState state.BeaconState
verifierWaiter *verification.InitializerWaiter
blobVerifier verification.NewBlobVerifier
capabilityCache *capabilityCache
}
// NewService sets up a new instance with an ethclient when given a web3 endpoint as a string in the config.
@@ -192,6 +197,7 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
lastReceivedMerkleIndex: -1,
preGenesisState: genState,
eth1HeadTicker: time.NewTicker(time.Duration(params.BeaconConfig().SecondsPerETH1Block) * time.Second),
capabilityCache: &capabilityCache{},
}
for _, opt := range opts {
@@ -229,6 +235,13 @@ func (s *Service) Start() {
}
}
v, err := s.verifierWaiter.WaitForInitializer(s.ctx)
if err != nil {
log.WithError(err).Error("Could not get verification initializer")
return
}
s.blobVerifier = newBlobVerifierFromInitializer(v)
s.isRunning = true
// Poll the execution client connection and fallback if errors occur.
@@ -886,3 +899,39 @@ func (s *Service) migrateOldDepositTree(eth1DataInDB *ethpb.ETH1ChainData) error
func (s *Service) removeStartupState() {
s.cfg.finalizedStateAtStartup = nil
}
func newBlobVerifierFromInitializer(ini *verification.Initializer) verification.NewBlobVerifier {
return func(b blocks.ROBlob, reqs []verification.Requirement) verification.BlobVerifier {
return ini.NewBlobVerifier(b, reqs)
}
}
type capabilityCache struct {
capabilities map[string]interface{}
capabilitiesLock sync.RWMutex
}
func (c *capabilityCache) save(cs []string) {
c.capabilitiesLock.Lock()
defer c.capabilitiesLock.Unlock()
if c.capabilities == nil {
c.capabilities = make(map[string]interface{})
}
for _, capability := range cs {
c.capabilities[capability] = struct{}{}
}
}
func (c *capabilityCache) has(capability string) bool {
c.capabilitiesLock.RLock()
defer c.capabilitiesLock.RUnlock()
if c.capabilities == nil {
return false
}
_, ok := c.capabilities[capability]
return ok
}

View File

@@ -19,8 +19,11 @@ import (
dbutil "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/testing"
mockExecution "github.com/prysmaticlabs/prysm/v5/beacon-chain/execution/testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/execution/types"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice"
doublylinkedtree "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/container/trie"
contracts "github.com/prysmaticlabs/prysm/v5/contracts/deposit"
@@ -92,10 +95,16 @@ func TestStart_OK(t *testing.T) {
t.Cleanup(func() {
server.Stop()
})
c := startup.NewClockSynchronizer()
require.NoError(t, c.SetClock(startup.NewClock(time.Unix(0, 0), [32]byte{})))
waiter := verification.NewInitializerWaiter(
c, forkchoice.NewROForkChoice(nil), nil)
web3Service, err := NewService(context.Background(),
WithHttpEndpoint(endpoint),
WithDepositContractAddress(testAcc.ContractAddr),
WithDatabase(beaconDB),
WithVerifierWaiter(waiter),
)
require.NoError(t, err, "unable to setup execution service")
web3Service = setDefaultMocks(web3Service)

View File

@@ -36,6 +36,8 @@ type EngineClient struct {
OverrideValidHash [32]byte
GetPayloadResponse *blocks.GetPayloadResponse
ErrGetPayload error
BlobSidecars []blocks.VerifiedROBlob
ErrorBlobSidecars error
}
// NewPayload --
@@ -106,6 +108,11 @@ func (e *EngineClient) ReconstructFullBellatrixBlockBatch(
return fullBlocks, nil
}
// ReconstructBlobSidecars is a mock implementation of the ReconstructBlobSidecars method.
func (e *EngineClient) ReconstructBlobSidecars(context.Context, interfaces.ReadOnlySignedBeaconBlock, [32]byte, []bool) ([]blocks.VerifiedROBlob, error) {
return e.BlobSidecars, e.ErrorBlobSidecars
}
// GetTerminalBlockHash --
func (e *EngineClient) GetTerminalBlockHash(ctx context.Context, transitionTime uint64) ([]byte, bool, error) {
ttd := new(big.Int)

View File

@@ -18,6 +18,7 @@ go_library(
"//beacon-chain/forkchoice/types:go_default_library",
"//beacon-chain/state:go_default_library",
"//config/fieldparams:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/forkchoice:go_default_library",
"//consensus-types/primitives:go_default_library",
"@com_github_pkg_errors//:go_default_library",

View File

@@ -23,13 +23,13 @@ go_library(
"//testing/spectest:__subpackages__",
],
deps = [
"//beacon-chain/core/blocks:go_default_library",
"//beacon-chain/core/epoch/precompute:go_default_library",
"//beacon-chain/forkchoice:go_default_library",
"//beacon-chain/forkchoice/types:go_default_library",
"//beacon-chain/state:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/forkchoice:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",

View File

@@ -11,4 +11,3 @@ var errInvalidOptimisticStatus = errors.New("invalid optimistic status")
var errInvalidNilCheckpoint = errors.New("invalid nil checkpoint")
var errInvalidUnrealizedJustifiedEpoch = errors.New("invalid unrealized justified epoch")
var errInvalidUnrealizedFinalizedEpoch = errors.New("invalid unrealized finalized epoch")
var errNilBlockHeader = errors.New("invalid nil block header")

View File

@@ -6,18 +6,17 @@ import (
"time"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice"
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
consensus_blocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
forkchoice2 "github.com/prysmaticlabs/prysm/v5/consensus-types/forkchoice"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"github.com/prysmaticlabs/prysm/v5/time/slots"
"github.com/sirupsen/logrus"
)
@@ -105,26 +104,10 @@ func (f *ForkChoice) ProcessAttestation(ctx context.Context, validatorIndices []
}
// InsertNode processes a new block by inserting it to the fork choice store.
func (f *ForkChoice) InsertNode(ctx context.Context, state state.BeaconState, root [32]byte) error {
func (f *ForkChoice) InsertNode(ctx context.Context, state state.BeaconState, roblock consensus_blocks.ROBlock) error {
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.InsertNode")
defer span.End()
slot := state.Slot()
bh := state.LatestBlockHeader()
if bh == nil {
return errNilBlockHeader
}
parentRoot := bytesutil.ToBytes32(bh.ParentRoot)
var payloadHash [32]byte
if state.Version() >= version.Bellatrix {
ph, err := state.LatestExecutionPayloadHeader()
if err != nil {
return err
}
if ph != nil {
copy(payloadHash[:], ph.BlockHash())
}
}
jc := state.CurrentJustifiedCheckpoint()
if jc == nil {
return errInvalidNilCheckpoint
@@ -135,7 +118,7 @@ func (f *ForkChoice) InsertNode(ctx context.Context, state state.BeaconState, ro
return errInvalidNilCheckpoint
}
finalizedEpoch := fc.Epoch
node, err := f.store.insert(ctx, slot, root, parentRoot, payloadHash, justifiedEpoch, finalizedEpoch)
node, err := f.store.insert(ctx, roblock, justifiedEpoch, finalizedEpoch)
if err != nil {
return err
}
@@ -490,15 +473,8 @@ func (f *ForkChoice) InsertChain(ctx context.Context, chain []*forkchoicetypes.B
return nil
}
for i := len(chain) - 1; i > 0; i-- {
b := chain[i].Block
r := chain[i-1].Block.ParentRoot()
parentRoot := b.ParentRoot()
payloadHash, err := blocks.GetBlockPayloadHash(b)
if err != nil {
return err
}
if _, err := f.store.insert(ctx,
b.Slot(), r, parentRoot, payloadHash,
chain[i].Block,
chain[i].JustifiedCheckpoint.Epoch, chain[i].FinalizedCheckpoint.Epoch); err != nil {
return err
}

View File

@@ -32,7 +32,7 @@ func prepareForkchoiceState(
payloadHash [32]byte,
justifiedEpoch primitives.Epoch,
finalizedEpoch primitives.Epoch,
) (state.BeaconState, [32]byte, error) {
) (state.BeaconState, blocks.ROBlock, error) {
blockHeader := &ethpb.BeaconBlockHeader{
ParentRoot: parentRoot[:],
}
@@ -59,21 +59,40 @@ func prepareForkchoiceState(
}
st, err := state_native.InitializeFromProtoBellatrix(base)
return st, blockRoot, err
if err != nil {
return nil, blocks.ROBlock{}, err
}
blk := &ethpb.SignedBeaconBlockBellatrix{
Block: &ethpb.BeaconBlockBellatrix{
Slot: slot,
ParentRoot: parentRoot[:],
Body: &ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: &enginev1.ExecutionPayload{
BlockHash: payloadHash[:],
},
},
},
}
signed, err := blocks.NewSignedBeaconBlock(blk)
if err != nil {
return nil, blocks.ROBlock{}, err
}
roblock, err := blocks.NewROBlockWithRoot(signed, blockRoot)
return st, roblock, err
}
func TestForkChoice_UpdateBalancesPositiveChange(t *testing.T) {
f := setup(0, 0)
ctx := context.Background()
st, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0)
st, roblock, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0)
require.NoError(t, f.InsertNode(ctx, st, roblock))
st, roblock, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0)
require.NoError(t, f.InsertNode(ctx, st, roblock))
st, roblock, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
require.NoError(t, f.InsertNode(ctx, st, roblock))
f.votes = []Vote{
{indexToHash(1), indexToHash(1), 0},
@@ -94,15 +113,15 @@ func TestForkChoice_UpdateBalancesPositiveChange(t *testing.T) {
func TestForkChoice_UpdateBalancesNegativeChange(t *testing.T) {
f := setup(0, 0)
ctx := context.Background()
st, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0)
st, roblock, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0)
require.NoError(t, f.InsertNode(ctx, st, roblock))
st, roblock, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0)
require.NoError(t, f.InsertNode(ctx, st, roblock))
st, roblock, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
require.NoError(t, f.InsertNode(ctx, st, roblock))
s := f.store
s.nodeByRoot[indexToHash(1)].balance = 100
s.nodeByRoot[indexToHash(2)].balance = 100
@@ -125,15 +144,15 @@ func TestForkChoice_UpdateBalancesNegativeChange(t *testing.T) {
func TestForkChoice_UpdateBalancesUnderflow(t *testing.T) {
f := setup(0, 0)
ctx := context.Background()
st, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0)
st, roblock, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0)
require.NoError(t, f.InsertNode(ctx, st, roblock))
st, roblock, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0)
require.NoError(t, f.InsertNode(ctx, st, roblock))
st, roblock, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
require.NoError(t, f.InsertNode(ctx, st, roblock))
s := f.store
s.nodeByRoot[indexToHash(1)].balance = 100
s.nodeByRoot[indexToHash(2)].balance = 100
@@ -156,24 +175,24 @@ func TestForkChoice_UpdateBalancesUnderflow(t *testing.T) {
func TestForkChoice_IsCanonical(t *testing.T) {
f := setup(1, 1)
ctx := context.Background()
st, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
st, roblock, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, f.InsertNode(ctx, st, roblock))
st, roblock, err = prepareForkchoiceState(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, f.InsertNode(ctx, st, roblock))
st, roblock, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 4, indexToHash(4), indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, f.InsertNode(ctx, st, roblock))
st, roblock, err = prepareForkchoiceState(ctx, 4, indexToHash(4), indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 5, indexToHash(5), indexToHash(4), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, f.InsertNode(ctx, st, roblock))
st, roblock, err = prepareForkchoiceState(ctx, 5, indexToHash(5), indexToHash(4), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 6, indexToHash(6), indexToHash(5), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, f.InsertNode(ctx, st, roblock))
st, roblock, err = prepareForkchoiceState(ctx, 6, indexToHash(6), indexToHash(5), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
require.NoError(t, f.InsertNode(ctx, st, roblock))
require.Equal(t, true, f.IsCanonical(params.BeaconConfig().ZeroHash))
require.Equal(t, false, f.IsCanonical(indexToHash(1)))
@@ -187,24 +206,24 @@ func TestForkChoice_IsCanonical(t *testing.T) {
func TestForkChoice_IsCanonicalReorg(t *testing.T) {
f := setup(1, 1)
ctx := context.Background()
st, blkRoot, err := prepareForkchoiceState(ctx, 1, [32]byte{'1'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
st, roblock, err := prepareForkchoiceState(ctx, 1, [32]byte{'1'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 2, [32]byte{'2'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, f.InsertNode(ctx, st, roblock))
st, roblock, err = prepareForkchoiceState(ctx, 2, [32]byte{'2'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 3, [32]byte{'3'}, [32]byte{'1'}, params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, f.InsertNode(ctx, st, roblock))
st, roblock, err = prepareForkchoiceState(ctx, 3, [32]byte{'3'}, [32]byte{'1'}, params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 4, [32]byte{'4'}, [32]byte{'2'}, params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, f.InsertNode(ctx, st, roblock))
st, roblock, err = prepareForkchoiceState(ctx, 4, [32]byte{'4'}, [32]byte{'2'}, params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 5, [32]byte{'5'}, [32]byte{'4'}, params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, f.InsertNode(ctx, st, roblock))
st, roblock, err = prepareForkchoiceState(ctx, 5, [32]byte{'5'}, [32]byte{'4'}, params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 6, [32]byte{'6'}, [32]byte{'5'}, params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, f.InsertNode(ctx, st, roblock))
st, roblock, err = prepareForkchoiceState(ctx, 6, [32]byte{'6'}, [32]byte{'5'}, params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
require.NoError(t, f.InsertNode(ctx, st, roblock))
f.store.nodeByRoot[[32]byte{'3'}].balance = 10
require.NoError(t, f.store.treeRootNode.applyWeightChanges(ctx))
@@ -233,15 +252,15 @@ func TestForkChoice_IsCanonicalReorg(t *testing.T) {
func TestForkChoice_AncestorRoot(t *testing.T) {
f := setup(1, 1)
ctx := context.Background()
st, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
st, roblock, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, f.InsertNode(ctx, st, roblock))
st, roblock, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 5, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, f.InsertNode(ctx, st, roblock))
st, roblock, err = prepareForkchoiceState(ctx, 5, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
require.NoError(t, f.InsertNode(ctx, st, roblock))
f.store.treeRootNode = f.store.nodeByRoot[indexToHash(1)]
f.store.treeRootNode.parent = nil
@@ -265,12 +284,12 @@ func TestForkChoice_AncestorRoot(t *testing.T) {
func TestForkChoice_AncestorEqualSlot(t *testing.T) {
f := setup(1, 1)
ctx := context.Background()
st, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'1'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
st, roblock, err := prepareForkchoiceState(ctx, 100, [32]byte{'1'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'3'}, [32]byte{'1'}, params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, f.InsertNode(ctx, st, roblock))
st, roblock, err = prepareForkchoiceState(ctx, 101, [32]byte{'3'}, [32]byte{'1'}, params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
require.NoError(t, f.InsertNode(ctx, st, roblock))
r, err := f.AncestorRoot(ctx, [32]byte{'3'}, 100)
require.NoError(t, err)
@@ -280,12 +299,12 @@ func TestForkChoice_AncestorEqualSlot(t *testing.T) {
func TestForkChoice_AncestorLowerSlot(t *testing.T) {
f := setup(1, 1)
ctx := context.Background()
st, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'1'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
st, roblock, err := prepareForkchoiceState(ctx, 100, [32]byte{'1'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 200, [32]byte{'3'}, [32]byte{'1'}, params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, f.InsertNode(ctx, st, roblock))
st, roblock, err = prepareForkchoiceState(ctx, 200, [32]byte{'3'}, [32]byte{'1'}, params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
require.NoError(t, f.InsertNode(ctx, st, roblock))
r, err := f.AncestorRoot(ctx, [32]byte{'3'}, 150)
require.NoError(t, err)
@@ -296,20 +315,20 @@ func TestForkChoice_RemoveEquivocating(t *testing.T) {
ctx := context.Background()
f := setup(1, 1)
// Insert a block it will be head
st, blkRoot, err := prepareForkchoiceState(ctx, 1, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
st, roblock, err := prepareForkchoiceState(ctx, 1, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
require.NoError(t, f.InsertNode(ctx, st, roblock))
head, err := f.Head(ctx)
require.NoError(t, err)
require.Equal(t, [32]byte{'a'}, head)
// Insert two extra blocks
st, blkRoot, err = prepareForkchoiceState(ctx, 2, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1)
st, roblock, err = prepareForkchoiceState(ctx, 2, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 3, [32]byte{'c'}, [32]byte{'a'}, [32]byte{'C'}, 1, 1)
require.NoError(t, f.InsertNode(ctx, st, roblock))
st, roblock, err = prepareForkchoiceState(ctx, 3, [32]byte{'c'}, [32]byte{'a'}, [32]byte{'C'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
require.NoError(t, f.InsertNode(ctx, st, roblock))
head, err = f.Head(ctx)
require.NoError(t, err)
require.Equal(t, [32]byte{'c'}, head)
@@ -378,36 +397,36 @@ func TestStore_CommonAncestor(t *testing.T) {
// \-- c -- f
// \-- g
// \ -- h -- i -- j
st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
st, roblock, err := prepareForkchoiceState(ctx, 0, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 1, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1)
require.NoError(t, f.InsertNode(ctx, st, roblock))
st, roblock, err = prepareForkchoiceState(ctx, 1, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 2, [32]byte{'c'}, [32]byte{'a'}, [32]byte{'C'}, 1, 1)
require.NoError(t, f.InsertNode(ctx, st, roblock))
st, roblock, err = prepareForkchoiceState(ctx, 2, [32]byte{'c'}, [32]byte{'a'}, [32]byte{'C'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 3, [32]byte{'d'}, [32]byte{'b'}, [32]byte{}, 1, 1)
require.NoError(t, f.InsertNode(ctx, st, roblock))
st, roblock, err = prepareForkchoiceState(ctx, 3, [32]byte{'d'}, [32]byte{'b'}, [32]byte{}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 4, [32]byte{'e'}, [32]byte{'d'}, [32]byte{}, 1, 1)
require.NoError(t, f.InsertNode(ctx, st, roblock))
st, roblock, err = prepareForkchoiceState(ctx, 4, [32]byte{'e'}, [32]byte{'d'}, [32]byte{}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 5, [32]byte{'f'}, [32]byte{'c'}, [32]byte{}, 1, 1)
require.NoError(t, f.InsertNode(ctx, st, roblock))
st, roblock, err = prepareForkchoiceState(ctx, 5, [32]byte{'f'}, [32]byte{'c'}, [32]byte{}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 6, [32]byte{'g'}, [32]byte{'c'}, [32]byte{}, 1, 1)
require.NoError(t, f.InsertNode(ctx, st, roblock))
st, roblock, err = prepareForkchoiceState(ctx, 6, [32]byte{'g'}, [32]byte{'c'}, [32]byte{}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 7, [32]byte{'h'}, [32]byte{'c'}, [32]byte{}, 1, 1)
require.NoError(t, f.InsertNode(ctx, st, roblock))
st, roblock, err = prepareForkchoiceState(ctx, 7, [32]byte{'h'}, [32]byte{'c'}, [32]byte{}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 8, [32]byte{'i'}, [32]byte{'h'}, [32]byte{}, 1, 1)
require.NoError(t, f.InsertNode(ctx, st, roblock))
st, roblock, err = prepareForkchoiceState(ctx, 8, [32]byte{'i'}, [32]byte{'h'}, [32]byte{}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 9, [32]byte{'j'}, [32]byte{'i'}, [32]byte{}, 1, 1)
require.NoError(t, f.InsertNode(ctx, st, roblock))
st, roblock, err = prepareForkchoiceState(ctx, 9, [32]byte{'j'}, [32]byte{'i'}, [32]byte{}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
require.NoError(t, f.InsertNode(ctx, st, roblock))
tests := []struct {
name string
@@ -498,18 +517,18 @@ func TestStore_CommonAncestor(t *testing.T) {
// a -- b -- c -- d
f = setup(0, 0)
st, blkRoot, err = prepareForkchoiceState(ctx, 0, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
st, roblock, err = prepareForkchoiceState(ctx, 0, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 1, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1)
require.NoError(t, f.InsertNode(ctx, st, roblock))
st, roblock, err = prepareForkchoiceState(ctx, 1, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 2, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1)
require.NoError(t, f.InsertNode(ctx, st, roblock))
st, roblock, err = prepareForkchoiceState(ctx, 2, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 3, [32]byte{'d'}, [32]byte{'c'}, [32]byte{}, 1, 1)
require.NoError(t, f.InsertNode(ctx, st, roblock))
st, roblock, err = prepareForkchoiceState(ctx, 3, [32]byte{'d'}, [32]byte{'c'}, [32]byte{}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
require.NoError(t, f.InsertNode(ctx, st, roblock))
tests = []struct {
name string
r1 [32]byte
@@ -590,7 +609,9 @@ func TestStore_InsertChain(t *testing.T) {
require.NoError(t, err)
wsb, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
blks = append(blks, &forkchoicetypes.BlockAndCheckpoints{Block: wsb.Block(),
roblock, err := blocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
blks = append(blks, &forkchoicetypes.BlockAndCheckpoints{Block: roblock,
JustifiedCheckpoint: &ethpb.Checkpoint{Epoch: 1, Root: params.BeaconConfig().ZeroHash[:]},
FinalizedCheckpoint: &ethpb.Checkpoint{Epoch: 1, Root: params.BeaconConfig().ZeroHash[:]},
})
@@ -599,14 +620,16 @@ func TestStore_InsertChain(t *testing.T) {
blk.Block.Slot = primitives.Slot(i)
copiedRoot := root
blk.Block.ParentRoot = copiedRoot[:]
root, err = blk.Block.HashTreeRoot()
require.NoError(t, err)
wsb, err = blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
blks = append(blks, &forkchoicetypes.BlockAndCheckpoints{Block: wsb.Block(),
roblock, err := blocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
blks = append(blks, &forkchoicetypes.BlockAndCheckpoints{Block: roblock,
JustifiedCheckpoint: &ethpb.Checkpoint{Epoch: 1, Root: params.BeaconConfig().ZeroHash[:]},
FinalizedCheckpoint: &ethpb.Checkpoint{Epoch: 1, Root: params.BeaconConfig().ZeroHash[:]},
})
root, err = blk.Block.HashTreeRoot()
require.NoError(t, err)
}
args := make([]*forkchoicetypes.BlockAndCheckpoints, 10)
for i := 0; i < len(blks); i++ {
@@ -670,26 +693,26 @@ func TestForkChoice_UpdateCheckpoints(t *testing.T) {
fcs.store.finalizedCheckpoint = tt.finalized
fcs.store.genesisTime = uint64(time.Now().Unix()) - uint64(tt.currentSlot)*params.BeaconConfig().SecondsPerSlot
st, blkRoot, err := prepareForkchoiceState(ctx, 32, [32]byte{'f'},
st, roblock, err := prepareForkchoiceState(ctx, 32, [32]byte{'f'},
[32]byte{}, [32]byte{}, tt.finalized.Epoch, tt.finalized.Epoch)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 64, [32]byte{'j'},
require.NoError(t, fcs.InsertNode(ctx, st, roblock))
st, roblock, err = prepareForkchoiceState(ctx, 64, [32]byte{'j'},
[32]byte{'f'}, [32]byte{}, tt.justified.Epoch, tt.finalized.Epoch)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 96, [32]byte{'b'},
require.NoError(t, fcs.InsertNode(ctx, st, roblock))
st, roblock, err = prepareForkchoiceState(ctx, 96, [32]byte{'b'},
[32]byte{'j'}, [32]byte{}, tt.newJustified.Epoch, tt.newFinalized.Epoch)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 96, [32]byte{'c'},
require.NoError(t, fcs.InsertNode(ctx, st, roblock))
st, roblock, err = prepareForkchoiceState(ctx, 96, [32]byte{'c'},
[32]byte{'f'}, [32]byte{}, tt.newJustified.Epoch, tt.newFinalized.Epoch)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, st, blkRoot))
st, blkRoot, err = prepareForkchoiceState(ctx, 65, [32]byte{'h'},
require.NoError(t, fcs.InsertNode(ctx, st, roblock))
st, roblock, err = prepareForkchoiceState(ctx, 65, [32]byte{'h'},
[32]byte{'f'}, [32]byte{}, tt.newFinalized.Epoch, tt.newFinalized.Epoch)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, st, blkRoot))
require.NoError(t, fcs.InsertNode(ctx, st, roblock))
// restart justifications cause insertion messed it up
fcs.store.justifiedCheckpoint = tt.justified
fcs.store.finalizedCheckpoint = tt.finalized
@@ -715,9 +738,9 @@ func TestWeight(t *testing.T) {
f := setup(0, 0)
root := [32]byte{'a'}
st, blkRoot, err := prepareForkchoiceState(ctx, 0, root, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
st, roblock, err := prepareForkchoiceState(ctx, 0, root, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
require.NoError(t, f.InsertNode(ctx, st, roblock))
n, ok := f.store.nodeByRoot[root]
require.Equal(t, true, ok)
@@ -747,9 +770,9 @@ func TestForkChoice_UnrealizedJustifiedPayloadBlockHash(t *testing.T) {
ctx := context.Background()
f := setup(0, 0)
st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
st, roblock, err := prepareForkchoiceState(ctx, 0, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
require.NoError(t, f.InsertNode(ctx, st, roblock))
f.store.unrealizedJustifiedCheckpoint.Root = [32]byte{'a'}
got := f.UnrealizedJustifiedPayloadBlockHash()
@@ -760,90 +783,90 @@ func TestForkChoiceIsViableForCheckpoint(t *testing.T) {
f := setup(0, 0)
ctx := context.Background()
st, root, err := prepareForkchoiceState(ctx, 0, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 0, 0)
st, blk, err := prepareForkchoiceState(ctx, 0, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 0, 0)
require.NoError(t, err)
// No Node
viable, err := f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: root})
viable, err := f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: blk.Root()})
require.NoError(t, err)
require.Equal(t, false, viable)
// No Children
require.NoError(t, f.InsertNode(ctx, st, root))
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: root, Epoch: 0})
require.NoError(t, f.InsertNode(ctx, st, blk))
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: blk.Root(), Epoch: 0})
require.NoError(t, err)
require.Equal(t, true, viable)
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: root, Epoch: 1})
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: blk.Root(), Epoch: 1})
require.NoError(t, err)
require.Equal(t, true, viable)
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: root, Epoch: 2})
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: blk.Root(), Epoch: 2})
require.NoError(t, err)
require.Equal(t, true, viable)
st, bRoot, err := prepareForkchoiceState(ctx, 1, [32]byte{'b'}, root, [32]byte{'B'}, 0, 0)
st, blk2, err := prepareForkchoiceState(ctx, 1, [32]byte{'b'}, blk.Root(), [32]byte{'B'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, bRoot))
require.NoError(t, f.InsertNode(ctx, st, blk2))
// Epoch start
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: root})
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: blk.Root()})
require.NoError(t, err)
require.Equal(t, true, viable)
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: root, Epoch: 1})
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: blk.Root(), Epoch: 1})
require.NoError(t, err)
require.Equal(t, false, viable)
// No Children but impossible checkpoint
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: bRoot})
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: blk2.Root()})
require.NoError(t, err)
require.Equal(t, false, viable)
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: bRoot, Epoch: 1})
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: blk2.Root(), Epoch: 1})
require.NoError(t, err)
require.Equal(t, true, viable)
st, cRoot, err := prepareForkchoiceState(ctx, 2, [32]byte{'c'}, bRoot, [32]byte{'C'}, 0, 0)
st, blk3, err := prepareForkchoiceState(ctx, 2, [32]byte{'c'}, blk2.Root(), [32]byte{'C'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, cRoot))
require.NoError(t, f.InsertNode(ctx, st, blk3))
// Children in same epoch
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: bRoot})
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: blk2.Root()})
require.NoError(t, err)
require.Equal(t, false, viable)
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: bRoot, Epoch: 1})
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: blk2.Root(), Epoch: 1})
require.NoError(t, err)
require.Equal(t, false, viable)
st, dRoot, err := prepareForkchoiceState(ctx, params.BeaconConfig().SlotsPerEpoch, [32]byte{'d'}, bRoot, [32]byte{'D'}, 0, 0)
st, blk4, err := prepareForkchoiceState(ctx, params.BeaconConfig().SlotsPerEpoch, [32]byte{'d'}, blk2.Root(), [32]byte{'D'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, dRoot))
require.NoError(t, f.InsertNode(ctx, st, blk4))
// Children in next epoch but boundary
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: bRoot})
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: blk2.Root()})
require.NoError(t, err)
require.Equal(t, false, viable)
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: bRoot, Epoch: 1})
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: blk2.Root(), Epoch: 1})
require.NoError(t, err)
require.Equal(t, false, viable)
// Boundary block
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: dRoot, Epoch: 1})
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: blk4.Root(), Epoch: 1})
require.NoError(t, err)
require.Equal(t, true, viable)
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: dRoot, Epoch: 0})
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: blk4.Root(), Epoch: 0})
require.NoError(t, err)
require.Equal(t, false, viable)
// Children in next epoch
st, eRoot, err := prepareForkchoiceState(ctx, params.BeaconConfig().SlotsPerEpoch+1, [32]byte{'e'}, bRoot, [32]byte{'E'}, 0, 0)
st, blk5, err := prepareForkchoiceState(ctx, params.BeaconConfig().SlotsPerEpoch+1, [32]byte{'e'}, blk2.Root(), [32]byte{'E'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, eRoot))
require.NoError(t, f.InsertNode(ctx, st, blk5))
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: bRoot, Epoch: 1})
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: blk2.Root(), Epoch: 1})
require.NoError(t, err)
require.Equal(t, true, viable)
}
@@ -851,14 +874,14 @@ func TestForkChoiceIsViableForCheckpoint(t *testing.T) {
func TestForkChoiceSlot(t *testing.T) {
f := setup(0, 0)
ctx := context.Background()
st, root, err := prepareForkchoiceState(ctx, 3, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 0, 0)
st, blk, err := prepareForkchoiceState(ctx, 3, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 0, 0)
require.NoError(t, err)
// No Node
_, err = f.Slot(root)
_, err = f.Slot(blk.Root())
require.ErrorIs(t, ErrNilNode, err)
require.NoError(t, f.InsertNode(ctx, st, root))
slot, err := f.Slot(root)
require.NoError(t, f.InsertNode(ctx, st, blk))
slot, err := f.Slot(blk.Root())
require.NoError(t, err)
require.Equal(t, primitives.Slot(3), slot)
}
@@ -867,16 +890,16 @@ func TestForkchoiceParentRoot(t *testing.T) {
f := setup(0, 0)
ctx := context.Background()
root1 := [32]byte{'a'}
st, root, err := prepareForkchoiceState(ctx, 3, root1, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 0, 0)
st, blk, err := prepareForkchoiceState(ctx, 3, root1, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, root))
require.NoError(t, f.InsertNode(ctx, st, blk))
root2 := [32]byte{'b'}
st, root, err = prepareForkchoiceState(ctx, 3, root2, root1, [32]byte{'A'}, 0, 0)
st, blk, err = prepareForkchoiceState(ctx, 3, root2, root1, [32]byte{'A'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, root))
require.NoError(t, f.InsertNode(ctx, st, blk))
root, err = f.ParentRoot(root2)
root, err := f.ParentRoot(root2)
require.NoError(t, err)
require.Equal(t, root1, root)
@@ -892,12 +915,12 @@ func TestForkchoiceParentRoot(t *testing.T) {
func TestForkChoice_CleanupInserting(t *testing.T) {
f := setup(0, 0)
ctx := context.Background()
st, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 2, 2)
st, roblock, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 2, 2)
f.SetBalancesByRooter(func(_ context.Context, _ [32]byte) ([]uint64, error) {
return f.justifiedBalances, errors.New("mock err")
})
require.NoError(t, err)
require.NotNil(t, f.InsertNode(ctx, st, blkRoot))
require.Equal(t, false, f.HasNode(blkRoot))
require.NotNil(t, f.InsertNode(ctx, st, roblock))
require.Equal(t, false, f.HasNode(roblock.Root()))
}

View File

@@ -14,15 +14,15 @@ import (
func TestNode_ApplyWeightChanges_PositiveChange(t *testing.T) {
f := setup(0, 0)
ctx := context.Background()
state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0)
state, blk, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0)
require.NoError(t, f.InsertNode(ctx, state, blk))
state, blk, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0)
require.NoError(t, f.InsertNode(ctx, state, blk))
state, blk, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
require.NoError(t, f.InsertNode(ctx, state, blk))
// The updated balances of each node is 100
s := f.store
@@ -41,15 +41,15 @@ func TestNode_ApplyWeightChanges_PositiveChange(t *testing.T) {
func TestNode_ApplyWeightChanges_NegativeChange(t *testing.T) {
f := setup(0, 0)
ctx := context.Background()
state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0)
state, blk, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0)
require.NoError(t, f.InsertNode(ctx, state, blk))
state, blk, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0)
require.NoError(t, f.InsertNode(ctx, state, blk))
state, blk, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
require.NoError(t, f.InsertNode(ctx, state, blk))
// The updated balances of each node is 100
s := f.store
@@ -72,9 +72,9 @@ func TestNode_UpdateBestDescendant_NonViableChild(t *testing.T) {
f := setup(1, 1)
ctx := context.Background()
// Input child is not viable.
state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 2, 3)
state, blk, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 2, 3)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
require.NoError(t, f.InsertNode(ctx, state, blk))
// Verify parent's best child and best descendant are `none`.
s := f.store
@@ -87,9 +87,9 @@ func TestNode_UpdateBestDescendant_ViableChild(t *testing.T) {
f := setup(1, 1)
ctx := context.Background()
// Input child is the best descendant
state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
state, blk, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
require.NoError(t, f.InsertNode(ctx, state, blk))
s := f.store
assert.Equal(t, 1, len(s.treeRootNode.children))
@@ -100,12 +100,12 @@ func TestNode_UpdateBestDescendant_HigherWeightChild(t *testing.T) {
f := setup(1, 1)
ctx := context.Background()
// Input child is the best descendant
state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
state, blk, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, f.InsertNode(ctx, state, blk))
state, blk, err = prepareForkchoiceState(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
require.NoError(t, f.InsertNode(ctx, state, blk))
s := f.store
s.nodeByRoot[indexToHash(1)].weight = 100
@@ -120,12 +120,12 @@ func TestNode_UpdateBestDescendant_LowerWeightChild(t *testing.T) {
f := setup(1, 1)
ctx := context.Background()
// Input child is the best descendant
state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
state, blk, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, f.InsertNode(ctx, state, blk))
state, blk, err = prepareForkchoiceState(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
require.NoError(t, f.InsertNode(ctx, state, blk))
s := f.store
s.nodeByRoot[indexToHash(1)].weight = 200
@@ -159,21 +159,21 @@ func TestNode_ViableForHead(t *testing.T) {
func TestNode_LeadsToViableHead(t *testing.T) {
f := setup(4, 3)
ctx := context.Background()
state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
state, blk, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, f.InsertNode(ctx, state, blk))
state, blk, err = prepareForkchoiceState(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, f.InsertNode(ctx, state, blk))
state, blk, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 4, indexToHash(4), indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, f.InsertNode(ctx, state, blk))
state, blk, err = prepareForkchoiceState(ctx, 4, indexToHash(4), indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 5, indexToHash(5), indexToHash(3), params.BeaconConfig().ZeroHash, 4, 3)
require.NoError(t, f.InsertNode(ctx, state, blk))
state, blk, err = prepareForkchoiceState(ctx, 5, indexToHash(5), indexToHash(3), params.BeaconConfig().ZeroHash, 4, 3)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
require.NoError(t, f.InsertNode(ctx, state, blk))
require.Equal(t, true, f.store.treeRootNode.leadsToViableHead(4, 5))
require.Equal(t, true, f.store.nodeByRoot[indexToHash(5)].leadsToViableHead(4, 5))
@@ -192,28 +192,28 @@ func TestNode_SetFullyValidated(t *testing.T) {
// \
// -- 5 (true)
//
state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
state, blk, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
storeNodes[1] = f.store.nodeByRoot[blkRoot]
require.NoError(t, f.InsertNode(ctx, state, blk))
storeNodes[1] = f.store.nodeByRoot[blk.Root()]
require.NoError(t, f.SetOptimisticToValid(ctx, params.BeaconConfig().ZeroHash))
state, blkRoot, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
state, blk, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
storeNodes[2] = f.store.nodeByRoot[blkRoot]
require.NoError(t, f.InsertNode(ctx, state, blk))
storeNodes[2] = f.store.nodeByRoot[blk.Root()]
require.NoError(t, f.SetOptimisticToValid(ctx, indexToHash(1)))
state, blkRoot, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1)
state, blk, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
storeNodes[3] = f.store.nodeByRoot[blkRoot]
state, blkRoot, err = prepareForkchoiceState(ctx, 4, indexToHash(4), indexToHash(3), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, f.InsertNode(ctx, state, blk))
storeNodes[3] = f.store.nodeByRoot[blk.Root()]
state, blk, err = prepareForkchoiceState(ctx, 4, indexToHash(4), indexToHash(3), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
storeNodes[4] = f.store.nodeByRoot[blkRoot]
state, blkRoot, err = prepareForkchoiceState(ctx, 5, indexToHash(5), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, f.InsertNode(ctx, state, blk))
storeNodes[4] = f.store.nodeByRoot[blk.Root()]
state, blk, err = prepareForkchoiceState(ctx, 5, indexToHash(5), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
storeNodes[5] = f.store.nodeByRoot[blkRoot]
require.NoError(t, f.InsertNode(ctx, state, blk))
storeNodes[5] = f.store.nodeByRoot[blk.Root()]
opt, err := f.IsOptimistic(indexToHash(5))
require.NoError(t, err)
@@ -266,9 +266,9 @@ func TestNode_TimeStampsChecks(t *testing.T) {
driftGenesisTime(f, 1, 1)
root := [32]byte{'a'}
f.justifiedBalances = []uint64{10}
state, blkRoot, err := prepareForkchoiceState(ctx, 1, root, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 0, 0)
state, blk, err := prepareForkchoiceState(ctx, 1, root, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
require.NoError(t, f.InsertNode(ctx, state, blk))
headRoot, err := f.Head(ctx)
require.NoError(t, err)
require.Equal(t, root, headRoot)
@@ -283,9 +283,9 @@ func TestNode_TimeStampsChecks(t *testing.T) {
// late block
driftGenesisTime(f, 2, orphanLateBlockFirstThreshold+1)
root = [32]byte{'b'}
state, blkRoot, err = prepareForkchoiceState(ctx, 2, root, [32]byte{'a'}, [32]byte{'B'}, 0, 0)
state, blk, err = prepareForkchoiceState(ctx, 2, root, [32]byte{'a'}, [32]byte{'B'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
require.NoError(t, f.InsertNode(ctx, state, blk))
headRoot, err = f.Head(ctx)
require.NoError(t, err)
require.Equal(t, root, headRoot)
@@ -299,9 +299,9 @@ func TestNode_TimeStampsChecks(t *testing.T) {
// very late block
driftGenesisTime(f, 3, ProcessAttestationsThreshold+1)
root = [32]byte{'c'}
state, blkRoot, err = prepareForkchoiceState(ctx, 3, root, [32]byte{'b'}, [32]byte{'C'}, 0, 0)
state, blk, err = prepareForkchoiceState(ctx, 3, root, [32]byte{'b'}, [32]byte{'C'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
require.NoError(t, f.InsertNode(ctx, state, blk))
headRoot, err = f.Head(ctx)
require.NoError(t, err)
require.Equal(t, root, headRoot)
@@ -314,9 +314,9 @@ func TestNode_TimeStampsChecks(t *testing.T) {
// block from the future
root = [32]byte{'d'}
state, blkRoot, err = prepareForkchoiceState(ctx, 5, root, [32]byte{'c'}, [32]byte{'D'}, 1, 1)
state, blk, err = prepareForkchoiceState(ctx, 5, root, [32]byte{'c'}, [32]byte{'D'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
require.NoError(t, f.InsertNode(ctx, state, blk))
headRoot, err = f.Head(ctx)
require.NoError(t, err)
require.Equal(t, root, headRoot)

View File

@@ -484,23 +484,23 @@ func TestForkChoice_missingProposerBoostRoots(t *testing.T) {
}
f.justifiedBalances = balances
driftGenesisTime(f, 1, 0)
st, root, err := prepareForkchoiceState(ctx, 1, [32]byte{'r'}, [32]byte{}, [32]byte{}, 1, 1)
st, blk, err := prepareForkchoiceState(ctx, 1, [32]byte{'r'}, [32]byte{}, [32]byte{}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, root))
require.NoError(t, f.InsertNode(ctx, st, blk))
f.store.previousProposerBoostRoot = [32]byte{'p'}
headRoot, err := f.Head(ctx)
require.NoError(t, err)
require.Equal(t, root, headRoot)
require.Equal(t, blk.Root(), headRoot)
require.Equal(t, [32]byte{'r'}, f.store.proposerBoostRoot)
f.store.proposerBoostRoot = [32]byte{'p'}
driftGenesisTime(f, 3, 0)
st, root, err = prepareForkchoiceState(ctx, 2, [32]byte{'a'}, [32]byte{'r'}, [32]byte{}, 1, 1)
st, blk, err = prepareForkchoiceState(ctx, 2, [32]byte{'a'}, [32]byte{'r'}, [32]byte{}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, root))
require.NoError(t, f.InsertNode(ctx, st, blk))
headRoot, err = f.Head(ctx)
require.NoError(t, err)
require.Equal(t, root, headRoot)
require.Equal(t, blk.Root(), headRoot)
require.Equal(t, [32]byte{'p'}, f.store.proposerBoostRoot)
}

View File

@@ -53,7 +53,7 @@ func (f *ForkChoice) ShouldOverrideFCU() (override bool) {
// Only reorg blocks that arrive late
early, err := head.arrivedEarly(f.store.genesisTime)
if err != nil {
log.WithError(err).Error("could not check if block arrived early")
log.WithError(err).Error("Could not check if block arrived early")
return
}
if early {

View File

@@ -19,23 +19,23 @@ func TestForkChoice_ShouldOverrideFCU(t *testing.T) {
f.store.committeeWeight /= uint64(params.BeaconConfig().SlotsPerEpoch)
ctx := context.Background()
driftGenesisTime(f, 1, 0)
st, root, err := prepareForkchoiceState(ctx, 1, [32]byte{'a'}, [32]byte{}, [32]byte{'A'}, 0, 0)
st, blk, err := prepareForkchoiceState(ctx, 1, [32]byte{'a'}, [32]byte{}, [32]byte{'A'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, root))
require.NoError(t, f.InsertNode(ctx, st, blk))
attesters := make([]uint64, f.numActiveValidators-64)
for i := range attesters {
attesters[i] = uint64(i + 64)
}
f.ProcessAttestation(ctx, attesters, root, 0)
f.ProcessAttestation(ctx, attesters, blk.Root(), 0)
orphanLateBlockFirstThreshold := params.BeaconConfig().SecondsPerSlot / params.BeaconConfig().IntervalsPerSlot
driftGenesisTime(f, 2, orphanLateBlockFirstThreshold+1)
st, root, err = prepareForkchoiceState(ctx, 2, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 0, 0)
st, blk, err = prepareForkchoiceState(ctx, 2, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, root))
require.NoError(t, f.InsertNode(ctx, st, blk))
headRoot, err := f.Head(ctx)
require.NoError(t, err)
require.Equal(t, root, headRoot)
require.Equal(t, blk.Root(), headRoot)
t.Run("head is weak", func(t *testing.T) {
require.Equal(t, true, f.ShouldOverrideFCU())
@@ -117,23 +117,23 @@ func TestForkChoice_GetProposerHead(t *testing.T) {
ctx := context.Background()
driftGenesisTime(f, 1, 0)
parentRoot := [32]byte{'a'}
st, root, err := prepareForkchoiceState(ctx, 1, parentRoot, [32]byte{}, [32]byte{'A'}, 0, 0)
st, blk, err := prepareForkchoiceState(ctx, 1, parentRoot, [32]byte{}, [32]byte{'A'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, root))
require.NoError(t, f.InsertNode(ctx, st, blk))
attesters := make([]uint64, f.numActiveValidators-64)
for i := range attesters {
attesters[i] = uint64(i + 64)
}
f.ProcessAttestation(ctx, attesters, root, 0)
f.ProcessAttestation(ctx, attesters, blk.Root(), 0)
driftGenesisTime(f, 3, 1)
childRoot := [32]byte{'b'}
st, root, err = prepareForkchoiceState(ctx, 2, childRoot, [32]byte{'a'}, [32]byte{'B'}, 0, 0)
st, blk, err = prepareForkchoiceState(ctx, 2, childRoot, [32]byte{'a'}, [32]byte{'B'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, root))
require.NoError(t, f.InsertNode(ctx, st, blk))
headRoot, err := f.Head(ctx)
require.NoError(t, err)
require.Equal(t, root, headRoot)
require.Equal(t, blk.Root(), headRoot)
orphanLateBlockFirstThreshold := params.BeaconConfig().SecondsPerSlot / params.BeaconConfig().IntervalsPerSlot
f.store.headNode.timestamp -= params.BeaconConfig().SecondsPerSlot - orphanLateBlockFirstThreshold
t.Run("head is weak", func(t *testing.T) {

View File

@@ -8,8 +8,10 @@ import (
"github.com/pkg/errors"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
consensus_blocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"github.com/prysmaticlabs/prysm/v5/time/slots"
)
@@ -63,12 +65,24 @@ func (s *Store) head(ctx context.Context) ([32]byte, error) {
// insert registers a new block node to the fork choice store's node list.
// It then updates the new node's parent with the best child and descendant node.
func (s *Store) insert(ctx context.Context,
slot primitives.Slot,
root, parentRoot, payloadHash [fieldparams.RootLength]byte,
roblock consensus_blocks.ROBlock,
justifiedEpoch, finalizedEpoch primitives.Epoch) (*Node, error) {
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.insert")
defer span.End()
root := roblock.Root()
block := roblock.Block()
slot := block.Slot()
parentRoot := block.ParentRoot()
var payloadHash [32]byte
if block.Version() >= version.Bellatrix {
execution, err := block.Body().Execution()
if err != nil {
return nil, err
}
copy(payloadHash[:], execution.BlockHash())
}
// Return if the block has been inserted into Store before.
if n, ok := s.nodeByRoot[root]; ok {
return n, nil

View File

@@ -26,9 +26,9 @@ func TestStore_FinalizedEpoch(t *testing.T) {
func TestStore_NodeCount(t *testing.T) {
f := setup(0, 0)
state, blkRoot, err := prepareForkchoiceState(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0)
state, blk, err := prepareForkchoiceState(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(context.Background(), state, blkRoot))
require.NoError(t, f.InsertNode(context.Background(), state, blk))
require.Equal(t, 2, f.NodeCount())
}
@@ -133,7 +133,10 @@ func TestStore_Insert(t *testing.T) {
fc := &forkchoicetypes.Checkpoint{Epoch: 0}
s := &Store{nodeByRoot: nodeByRoot, treeRootNode: treeRootNode, nodeByPayload: nodeByPayload, justifiedCheckpoint: jc, finalizedCheckpoint: fc, highestReceivedNode: &Node{}}
payloadHash := [32]byte{'a'}
_, err := s.insert(context.Background(), 100, indexToHash(100), indexToHash(0), payloadHash, 1, 1)
ctx := context.Background()
_, blk, err := prepareForkchoiceState(ctx, 100, indexToHash(100), indexToHash(0), payloadHash, 1, 1)
require.NoError(t, err)
_, err = s.insert(ctx, blk, 1, 1)
require.NoError(t, err)
assert.Equal(t, 2, len(s.nodeByRoot), "Did not insert block")
assert.Equal(t, (*Node)(nil), treeRootNode.parent, "Incorrect parent")
@@ -327,7 +330,10 @@ func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
// Make sure it doesn't underflow
s.genesisTime = uint64(time.Now().Add(time.Duration(-1*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second).Unix())
_, err := s.insert(context.Background(), 1, [32]byte{'a'}, b, b, 1, 1)
ctx := context.Background()
_, blk, err := prepareForkchoiceState(ctx, 1, [32]byte{'a'}, b, b, 1, 1)
require.NoError(t, err)
_, err = s.insert(ctx, blk, 1, 1)
require.NoError(t, err)
count, err := f.ReceivedBlocksLastEpoch()
require.NoError(t, err)
@@ -337,7 +343,9 @@ func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
// 64
// Received block last epoch is 1
_, err = s.insert(context.Background(), 64, [32]byte{'A'}, b, b, 1, 1)
_, blk, err = prepareForkchoiceState(ctx, 64, [32]byte{'A'}, b, b, 1, 1)
require.NoError(t, err)
_, err = s.insert(ctx, blk, 1, 1)
require.NoError(t, err)
s.genesisTime = uint64(time.Now().Add(time.Duration((-64*int64(params.BeaconConfig().SecondsPerSlot))-1) * time.Second).Unix())
count, err = f.ReceivedBlocksLastEpoch()
@@ -348,7 +356,9 @@ func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
// 64 65
// Received block last epoch is 2
_, err = s.insert(context.Background(), 65, [32]byte{'B'}, b, b, 1, 1)
_, blk, err = prepareForkchoiceState(ctx, 65, [32]byte{'B'}, b, b, 1, 1)
require.NoError(t, err)
_, err = s.insert(ctx, blk, 1, 1)
require.NoError(t, err)
s.genesisTime = uint64(time.Now().Add(time.Duration(-66*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second).Unix())
count, err = f.ReceivedBlocksLastEpoch()
@@ -359,7 +369,9 @@ func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
// 64 65 66
// Received block last epoch is 3
_, err = s.insert(context.Background(), 66, [32]byte{'C'}, b, b, 1, 1)
_, blk, err = prepareForkchoiceState(ctx, 66, [32]byte{'C'}, b, b, 1, 1)
require.NoError(t, err)
_, err = s.insert(ctx, blk, 1, 1)
require.NoError(t, err)
s.genesisTime = uint64(time.Now().Add(time.Duration(-66*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second).Unix())
count, err = f.ReceivedBlocksLastEpoch()
@@ -370,7 +382,9 @@ func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
// 64 65 66
// 98
// Received block last epoch is 1
_, err = s.insert(context.Background(), 98, [32]byte{'D'}, b, b, 1, 1)
_, blk, err = prepareForkchoiceState(ctx, 98, [32]byte{'D'}, b, b, 1, 1)
require.NoError(t, err)
_, err = s.insert(ctx, blk, 1, 1)
require.NoError(t, err)
s.genesisTime = uint64(time.Now().Add(time.Duration(-98*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second).Unix())
count, err = f.ReceivedBlocksLastEpoch()
@@ -382,7 +396,9 @@ func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
// 98
// 132
// Received block last epoch is 1
_, err = s.insert(context.Background(), 132, [32]byte{'E'}, b, b, 1, 1)
_, blk, err = prepareForkchoiceState(ctx, 132, [32]byte{'E'}, b, b, 1, 1)
require.NoError(t, err)
_, err = s.insert(ctx, blk, 1, 1)
require.NoError(t, err)
s.genesisTime = uint64(time.Now().Add(time.Duration(-132*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second).Unix())
count, err = f.ReceivedBlocksLastEpoch()
@@ -395,7 +411,9 @@ func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
// 132
// 99
// Received block last epoch is still 1. 99 is outside the window
_, err = s.insert(context.Background(), 99, [32]byte{'F'}, b, b, 1, 1)
_, blk, err = prepareForkchoiceState(ctx, 99, [32]byte{'F'}, b, b, 1, 1)
require.NoError(t, err)
_, err = s.insert(ctx, blk, 1, 1)
require.NoError(t, err)
s.genesisTime = uint64(time.Now().Add(time.Duration(-132*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second).Unix())
count, err = f.ReceivedBlocksLastEpoch()
@@ -408,7 +426,9 @@ func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
// 132
// 99 100
// Received block last epoch is still 1. 100 is at the same position as 132
_, err = s.insert(context.Background(), 100, [32]byte{'G'}, b, b, 1, 1)
_, blk, err = prepareForkchoiceState(ctx, 100, [32]byte{'G'}, b, b, 1, 1)
require.NoError(t, err)
_, err = s.insert(ctx, blk, 1, 1)
require.NoError(t, err)
s.genesisTime = uint64(time.Now().Add(time.Duration(-132*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second).Unix())
count, err = f.ReceivedBlocksLastEpoch()
@@ -421,7 +441,9 @@ func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
// 132
// 99 100 101
// Received block last epoch is 2. 101 is within the window
_, err = s.insert(context.Background(), 101, [32]byte{'H'}, b, b, 1, 1)
_, blk, err = prepareForkchoiceState(ctx, 101, [32]byte{'H'}, b, b, 1, 1)
require.NoError(t, err)
_, err = s.insert(ctx, blk, 1, 1)
require.NoError(t, err)
s.genesisTime = uint64(time.Now().Add(time.Duration(-132*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second).Unix())
count, err = f.ReceivedBlocksLastEpoch()
@@ -443,94 +465,94 @@ func TestStore_TargetRootForEpoch(t *testing.T) {
ctx := context.Background()
f := setup(1, 1)
state, blkRoot, err := prepareForkchoiceState(ctx, params.BeaconConfig().SlotsPerEpoch, [32]byte{'a'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
state, blk, err := prepareForkchoiceState(ctx, params.BeaconConfig().SlotsPerEpoch, [32]byte{'a'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
target, err := f.TargetRootForEpoch(blkRoot, 1)
require.NoError(t, f.InsertNode(ctx, state, blk))
target, err := f.TargetRootForEpoch(blk.Root(), 1)
require.NoError(t, err)
require.Equal(t, target, blkRoot)
require.Equal(t, target, blk.Root())
state, root1, err := prepareForkchoiceState(ctx, params.BeaconConfig().SlotsPerEpoch+1, [32]byte{'b'}, blkRoot, params.BeaconConfig().ZeroHash, 1, 1)
state, blk1, err := prepareForkchoiceState(ctx, params.BeaconConfig().SlotsPerEpoch+1, [32]byte{'b'}, blk.Root(), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, root1))
target, err = f.TargetRootForEpoch(root1, 1)
require.NoError(t, f.InsertNode(ctx, state, blk1))
target, err = f.TargetRootForEpoch(blk1.Root(), 1)
require.NoError(t, err)
require.Equal(t, target, blkRoot)
require.Equal(t, target, blk.Root())
// Insert a block for the next epoch (missed slot 0)
state, root2, err := prepareForkchoiceState(ctx, 2*params.BeaconConfig().SlotsPerEpoch+1, [32]byte{'c'}, root1, params.BeaconConfig().ZeroHash, 1, 1)
state, blk2, err := prepareForkchoiceState(ctx, 2*params.BeaconConfig().SlotsPerEpoch+1, [32]byte{'c'}, blk1.Root(), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, root2))
target, err = f.TargetRootForEpoch(root2, 2)
require.NoError(t, f.InsertNode(ctx, state, blk2))
target, err = f.TargetRootForEpoch(blk2.Root(), 2)
require.NoError(t, err)
require.Equal(t, target, root1)
require.Equal(t, target, blk1.Root())
state, root3, err := prepareForkchoiceState(ctx, 2*params.BeaconConfig().SlotsPerEpoch+2, [32]byte{'d'}, root2, params.BeaconConfig().ZeroHash, 1, 1)
state, blk3, err := prepareForkchoiceState(ctx, 2*params.BeaconConfig().SlotsPerEpoch+2, [32]byte{'d'}, blk2.Root(), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, root3))
target, err = f.TargetRootForEpoch(root2, 2)
require.NoError(t, f.InsertNode(ctx, state, blk3))
target, err = f.TargetRootForEpoch(blk2.Root(), 2)
require.NoError(t, err)
require.Equal(t, target, root1)
require.Equal(t, target, blk1.Root())
// Prune finalization
s := f.store
s.finalizedCheckpoint.Root = root1
s.finalizedCheckpoint.Root = blk1.Root()
require.NoError(t, s.prune(ctx))
target, err = f.TargetRootForEpoch(root1, 1)
target, err = f.TargetRootForEpoch(blk1.Root(), 1)
require.NoError(t, err)
require.Equal(t, [32]byte{}, target)
// Insert a block for next epoch (slot 0 present)
state, root4, err := prepareForkchoiceState(ctx, 3*params.BeaconConfig().SlotsPerEpoch, [32]byte{'e'}, root1, params.BeaconConfig().ZeroHash, 1, 1)
state, blk4, err := prepareForkchoiceState(ctx, 3*params.BeaconConfig().SlotsPerEpoch, [32]byte{'e'}, blk1.Root(), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, root4))
target, err = f.TargetRootForEpoch(root4, 3)
require.NoError(t, f.InsertNode(ctx, state, blk4))
target, err = f.TargetRootForEpoch(blk4.Root(), 3)
require.NoError(t, err)
require.Equal(t, target, root4)
require.Equal(t, target, blk4.Root())
state, root5, err := prepareForkchoiceState(ctx, 3*params.BeaconConfig().SlotsPerEpoch+1, [32]byte{'f'}, root4, params.BeaconConfig().ZeroHash, 1, 1)
state, blk5, err := prepareForkchoiceState(ctx, 3*params.BeaconConfig().SlotsPerEpoch+1, [32]byte{'f'}, blk4.Root(), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, root5))
target, err = f.TargetRootForEpoch(root5, 3)
require.NoError(t, f.InsertNode(ctx, state, blk5))
target, err = f.TargetRootForEpoch(blk5.Root(), 3)
require.NoError(t, err)
require.Equal(t, target, root4)
require.Equal(t, target, blk4.Root())
// Target root where the target epoch is same or ahead of the block slot
target, err = f.TargetRootForEpoch(root5, 4)
target, err = f.TargetRootForEpoch(blk5.Root(), 4)
require.NoError(t, err)
require.Equal(t, target, root5)
require.Equal(t, target, blk5.Root())
// Target root where the target epoch is two epochs ago
target, err = f.TargetRootForEpoch(root5, 2)
target, err = f.TargetRootForEpoch(blk5.Root(), 2)
require.NoError(t, err)
require.Equal(t, root1, target) // the parent of root4 in epoch 3 is root 1 in epoch 1
require.Equal(t, blk1.Root(), target) // the parent of root4 in epoch 3 is root 1 in epoch 1
// Target root where the target is two epochs ago, slot 0 was missed
state, root6, err := prepareForkchoiceState(ctx, 4*params.BeaconConfig().SlotsPerEpoch+1, [32]byte{'g'}, root5, params.BeaconConfig().ZeroHash, 1, 1)
state, blk6, err := prepareForkchoiceState(ctx, 4*params.BeaconConfig().SlotsPerEpoch+1, [32]byte{'g'}, blk5.Root(), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, root6))
target, err = f.TargetRootForEpoch(root6, 4)
require.NoError(t, f.InsertNode(ctx, state, blk6))
target, err = f.TargetRootForEpoch(blk6.Root(), 4)
require.NoError(t, err)
require.Equal(t, target, root5)
target, err = f.TargetRootForEpoch(root6, 2)
require.Equal(t, target, blk5.Root())
target, err = f.TargetRootForEpoch(blk6.Root(), 2)
require.NoError(t, err)
require.Equal(t, target, root1)
require.Equal(t, target, blk1.Root())
// Prune finalization
s.finalizedCheckpoint.Root = root4
s.finalizedCheckpoint.Root = blk4.Root()
require.NoError(t, s.prune(ctx))
target, err = f.TargetRootForEpoch(root4, 3)
target, err = f.TargetRootForEpoch(blk4.Root(), 3)
require.NoError(t, err)
require.Equal(t, root4, target)
require.Equal(t, blk4.Root(), target)
}
func TestStore_CleanupInserting(t *testing.T) {
f := setup(0, 0)
ctx := context.Background()
st, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0)
st, blk, err := prepareForkchoiceState(ctx, 1, indexToHash(1), indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0)
require.NoError(t, err)
require.NotNil(t, f.InsertNode(ctx, st, blkRoot))
require.Equal(t, false, f.HasNode(blkRoot))
require.NotNil(t, f.InsertNode(ctx, st, blk))
require.Equal(t, false, f.HasNode(blk.Root()))
}

View File

@@ -6,6 +6,7 @@ import (
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
consensus_blocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
forkchoice2 "github.com/prysmaticlabs/prysm/v5/consensus-types/forkchoice"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
)
@@ -41,7 +42,7 @@ type HeadRetriever interface {
// BlockProcessor processes the block that's used for accounting fork choice.
type BlockProcessor interface {
InsertNode(context.Context, state.BeaconState, [32]byte) error
InsertNode(context.Context, state.BeaconState, consensus_blocks.ROBlock) error
InsertChain(context.Context, []*forkchoicetypes.BlockAndCheckpoints) error
}

View File

@@ -7,7 +7,7 @@ go_library(
visibility = ["//visibility:public"],
deps = [
"//config/fieldparams:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/primitives:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
],

View File

@@ -2,7 +2,7 @@ package types
import (
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
consensus_blocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
)
@@ -17,7 +17,7 @@ type Checkpoint struct {
// BlockAndCheckpoints to call the InsertOptimisticChain function
type BlockAndCheckpoints struct {
Block interfaces.ReadOnlyBeaconBlock
Block consensus_blocks.ROBlock
JustifiedCheckpoint *ethpb.Checkpoint
FinalizedCheckpoint *ethpb.Checkpoint
}

View File

@@ -185,42 +185,42 @@ func (s *Service) processUnaggregatedAttestation(ctx context.Context, att ethpb.
}
// processUnaggregatedAttestation logs when the beacon node observes an aggregated attestation from tracked validator.
func (s *Service) processAggregatedAttestation(ctx context.Context, att *ethpb.AggregateAttestationAndProof) {
func (s *Service) processAggregatedAttestation(ctx context.Context, att ethpb.AggregateAttAndProof) {
s.Lock()
defer s.Unlock()
if s.trackedIndex(att.AggregatorIndex) {
if s.trackedIndex(att.GetAggregatorIndex()) {
log.WithFields(logrus.Fields{
"aggregatorIndex": att.AggregatorIndex,
"slot": att.Aggregate.Data.Slot,
"aggregatorIndex": att.GetAggregatorIndex(),
"slot": att.AggregateVal().GetData().Slot,
"beaconBlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(
att.Aggregate.Data.BeaconBlockRoot)),
att.AggregateVal().GetData().BeaconBlockRoot)),
"sourceRoot": fmt.Sprintf("%#x", bytesutil.Trunc(
att.Aggregate.Data.Source.Root)),
att.AggregateVal().GetData().Source.Root)),
"targetRoot": fmt.Sprintf("%#x", bytesutil.Trunc(
att.Aggregate.Data.Target.Root)),
att.AggregateVal().GetData().Target.Root)),
}).Info("Processed attestation aggregation")
aggregatedPerf := s.aggregatedPerformance[att.AggregatorIndex]
aggregatedPerf := s.aggregatedPerformance[att.GetAggregatorIndex()]
aggregatedPerf.totalAggregations++
s.aggregatedPerformance[att.AggregatorIndex] = aggregatedPerf
aggregationCounter.WithLabelValues(fmt.Sprintf("%d", att.AggregatorIndex)).Inc()
s.aggregatedPerformance[att.GetAggregatorIndex()] = aggregatedPerf
aggregationCounter.WithLabelValues(fmt.Sprintf("%d", att.GetAggregatorIndex())).Inc()
}
var root [32]byte
copy(root[:], att.Aggregate.Data.BeaconBlockRoot)
copy(root[:], att.AggregateVal().GetData().BeaconBlockRoot)
st := s.config.StateGen.StateByRootIfCachedNoCopy(root)
if st == nil {
log.WithField("beaconBlockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debug(
"Skipping aggregated attestation due to state not found in cache")
return
}
attestingIndices, err := attestingIndices(ctx, st, att.Aggregate)
attestingIndices, err := attestingIndices(ctx, st, att.AggregateVal())
if err != nil {
log.WithError(err).Error("Could not get attesting indices")
return
}
for _, idx := range attestingIndices {
if s.canUpdateAttestedValidator(primitives.ValidatorIndex(idx), att.Aggregate.Data.Slot) {
logFields := logMessageTimelyFlagsForIndex(primitives.ValidatorIndex(idx), att.Aggregate.Data)
if s.canUpdateAttestedValidator(primitives.ValidatorIndex(idx), att.AggregateVal().GetData().Slot) {
logFields := logMessageTimelyFlagsForIndex(primitives.ValidatorIndex(idx), att.AggregateVal().GetData())
log.WithFields(logFields).Info("Processed aggregated attestation")
}
}

View File

@@ -192,20 +192,13 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
beacon.verifyInitWaiter = verification.NewInitializerWaiter(
beacon.clockWaiter, forkchoice.NewROForkChoice(beacon.forkChoicer), beacon.stateGen)
pa := peers.NewAssigner(beacon.fetchP2P().Peers(), beacon.forkChoicer)
beacon.BackfillOpts = append(
beacon.BackfillOpts,
backfill.WithVerifierWaiter(beacon.verifyInitWaiter),
backfill.WithInitSyncWaiter(initSyncWaiter(ctx, beacon.initialSyncComplete)),
)
bf, err := backfill.NewService(ctx, bfs, beacon.BlobStorage, beacon.clockWaiter, beacon.fetchP2P(), pa, beacon.BackfillOpts...)
if err != nil {
return nil, errors.Wrap(err, "error initializing backfill service")
}
if err := registerServices(cliCtx, beacon, synchronizer, bf, bfs); err != nil {
if err := registerServices(cliCtx, beacon, synchronizer, bfs); err != nil {
return nil, errors.Wrap(err, "could not register services")
}
@@ -292,11 +285,6 @@ func startBaseServices(cliCtx *cli.Context, beacon *BeaconNode, depositAddress s
return nil, errors.Wrap(err, "could not start slashing DB")
}
log.Debugln("Registering P2P Service")
if err := beacon.registerP2P(cliCtx); err != nil {
return nil, errors.Wrap(err, "could not register P2P service")
}
bfs, err := backfill.NewUpdater(ctx, beacon.db)
if err != nil {
return nil, errors.Wrap(err, "could not create backfill updater")
@@ -315,9 +303,15 @@ func startBaseServices(cliCtx *cli.Context, beacon *BeaconNode, depositAddress s
return bfs, nil
}
func registerServices(cliCtx *cli.Context, beacon *BeaconNode, synchronizer *startup.ClockSynchronizer, bf *backfill.Service, bfs *backfill.Store) error {
if err := beacon.services.RegisterService(bf); err != nil {
return errors.Wrap(err, "could not register backfill service")
func registerServices(cliCtx *cli.Context, beacon *BeaconNode, synchronizer *startup.ClockSynchronizer, bfs *backfill.Store) error {
log.Debugln("Registering P2P Service")
if err := beacon.registerP2P(cliCtx); err != nil {
return errors.Wrap(err, "could not register P2P service")
}
log.Debugln("Registering Backfill Service")
if err := beacon.RegisterBackfillService(cliCtx, bfs); err != nil {
return errors.Wrap(err, "could not register Back Fill service")
}
log.Debugln("Registering POW Chain Service")
@@ -796,6 +790,7 @@ func (b *BeaconNode) registerPOWChainService() error {
execution.WithBeaconNodeStatsUpdater(bs),
execution.WithFinalizedStateAtStartup(b.finalizedStateAtStartUp),
execution.WithJwtId(b.cliCtx.String(flags.JwtId.Name)),
execution.WithVerifierWaiter(b.verifyInitWaiter),
)
web3Service, err := execution.NewService(b.ctx, opts...)
if err != nil {
@@ -838,7 +833,7 @@ func (b *BeaconNode) registerSyncService(initialSyncComplete chan struct{}, bFil
regularsync.WithStateGen(b.stateGen),
regularsync.WithSlasherAttestationsFeed(b.slasherAttestationsFeed),
regularsync.WithSlasherBlockHeadersFeed(b.slasherBlockHeadersFeed),
regularsync.WithPayloadReconstructor(web3Service),
regularsync.WithReconstructor(web3Service),
regularsync.WithClockWaiter(b.clockWaiter),
regularsync.WithInitialSyncComplete(initialSyncComplete),
regularsync.WithStateNotifier(b),
@@ -953,55 +948,55 @@ func (b *BeaconNode) registerRPCService(router *http.ServeMux) error {
p2pService := b.fetchP2P()
rpcService := rpc.NewService(b.ctx, &rpc.Config{
ExecutionEngineCaller: web3Service,
ExecutionPayloadReconstructor: web3Service,
Host: host,
Port: port,
BeaconMonitoringHost: beaconMonitoringHost,
BeaconMonitoringPort: beaconMonitoringPort,
CertFlag: cert,
KeyFlag: key,
BeaconDB: b.db,
Broadcaster: p2pService,
PeersFetcher: p2pService,
PeerManager: p2pService,
MetadataProvider: p2pService,
ChainInfoFetcher: chainService,
HeadFetcher: chainService,
CanonicalFetcher: chainService,
ForkFetcher: chainService,
ForkchoiceFetcher: chainService,
FinalizationFetcher: chainService,
BlockReceiver: chainService,
BlobReceiver: chainService,
AttestationReceiver: chainService,
GenesisTimeFetcher: chainService,
GenesisFetcher: chainService,
OptimisticModeFetcher: chainService,
AttestationsPool: b.attestationPool,
ExitPool: b.exitPool,
SlashingsPool: b.slashingsPool,
BLSChangesPool: b.blsToExecPool,
SyncCommitteeObjectPool: b.syncCommitteePool,
ExecutionChainService: web3Service,
ExecutionChainInfoFetcher: web3Service,
ChainStartFetcher: chainStartFetcher,
MockEth1Votes: mockEth1DataVotes,
SyncService: syncService,
DepositFetcher: depositFetcher,
PendingDepositFetcher: b.depositCache,
BlockNotifier: b,
StateNotifier: b,
OperationNotifier: b,
StateGen: b.stateGen,
EnableDebugRPCEndpoints: enableDebugRPCEndpoints,
MaxMsgSize: maxMsgSize,
BlockBuilder: b.fetchBuilderService(),
Router: router,
ClockWaiter: b.clockWaiter,
BlobStorage: b.BlobStorage,
TrackedValidatorsCache: b.trackedValidatorsCache,
PayloadIDCache: b.payloadIDCache,
ExecutionEngineCaller: web3Service,
ExecutionReconstructor: web3Service,
Host: host,
Port: port,
BeaconMonitoringHost: beaconMonitoringHost,
BeaconMonitoringPort: beaconMonitoringPort,
CertFlag: cert,
KeyFlag: key,
BeaconDB: b.db,
Broadcaster: p2pService,
PeersFetcher: p2pService,
PeerManager: p2pService,
MetadataProvider: p2pService,
ChainInfoFetcher: chainService,
HeadFetcher: chainService,
CanonicalFetcher: chainService,
ForkFetcher: chainService,
ForkchoiceFetcher: chainService,
FinalizationFetcher: chainService,
BlockReceiver: chainService,
BlobReceiver: chainService,
AttestationReceiver: chainService,
GenesisTimeFetcher: chainService,
GenesisFetcher: chainService,
OptimisticModeFetcher: chainService,
AttestationsPool: b.attestationPool,
ExitPool: b.exitPool,
SlashingsPool: b.slashingsPool,
BLSChangesPool: b.blsToExecPool,
SyncCommitteeObjectPool: b.syncCommitteePool,
ExecutionChainService: web3Service,
ExecutionChainInfoFetcher: web3Service,
ChainStartFetcher: chainStartFetcher,
MockEth1Votes: mockEth1DataVotes,
SyncService: syncService,
DepositFetcher: depositFetcher,
PendingDepositFetcher: b.depositCache,
BlockNotifier: b,
StateNotifier: b,
OperationNotifier: b,
StateGen: b.stateGen,
EnableDebugRPCEndpoints: enableDebugRPCEndpoints,
MaxMsgSize: maxMsgSize,
BlockBuilder: b.fetchBuilderService(),
Router: router,
ClockWaiter: b.clockWaiter,
BlobStorage: b.BlobStorage,
TrackedValidatorsCache: b.trackedValidatorsCache,
PayloadIDCache: b.payloadIDCache,
})
return b.services.RegisterService(rpcService)
@@ -1135,6 +1130,16 @@ func (b *BeaconNode) registerBuilderService(cliCtx *cli.Context) error {
return b.services.RegisterService(svc)
}
func (b *BeaconNode) RegisterBackfillService(cliCtx *cli.Context, bfs *backfill.Store) error {
pa := peers.NewAssigner(b.fetchP2P().Peers(), b.forkChoicer)
bf, err := backfill.NewService(cliCtx.Context, bfs, b.BlobStorage, b.clockWaiter, b.fetchP2P(), pa, b.BackfillOpts...)
if err != nil {
return errors.Wrap(err, "error initializing backfill service")
}
return b.services.RegisterService(bf)
}
func hasNetworkFlag(cliCtx *cli.Context) bool {
for _, flag := range features.NetworkFlags {
for _, name := range flag.Names() {

View File

@@ -49,12 +49,12 @@ func TestKV_Aggregated_SaveAggregatedAttestation(t *testing.T) {
{
name: "nil attestation",
att: nil,
wantErrString: "attestation can't be nil",
wantErrString: "attestation is nil",
},
{
name: "nil attestation data",
att: &ethpb.Attestation{},
wantErrString: "attestation's data can't be nil",
wantErrString: "attestation is nil",
},
{
name: "not aggregated",
@@ -206,7 +206,7 @@ func TestKV_Aggregated_AggregatedAttestations(t *testing.T) {
func TestKV_Aggregated_DeleteAggregatedAttestation(t *testing.T) {
t.Run("nil attestation", func(t *testing.T) {
cache := NewAttCaches()
assert.ErrorContains(t, "attestation can't be nil", cache.DeleteAggregatedAttestation(nil))
assert.ErrorContains(t, "attestation is nil", cache.DeleteAggregatedAttestation(nil))
att := util.HydrateAttestation(&ethpb.Attestation{AggregationBits: bitfield.Bitlist{0b10101}, Data: &ethpb.AttestationData{Slot: 2}})
assert.NoError(t, cache.DeleteAggregatedAttestation(att))
})
@@ -288,7 +288,7 @@ func TestKV_Aggregated_HasAggregatedAttestation(t *testing.T) {
name: "nil attestation",
input: nil,
want: false,
err: errors.New("can't be nil"),
err: errors.New("is nil"),
},
{
name: "nil attestation data",
@@ -296,7 +296,7 @@ func TestKV_Aggregated_HasAggregatedAttestation(t *testing.T) {
AggregationBits: bitfield.Bitlist{0b1111},
},
want: false,
err: errors.New("can't be nil"),
err: errors.New("is nil"),
},
{
name: "empty cache aggregated",

View File

@@ -8,7 +8,7 @@ import (
// SaveBlockAttestation saves an block attestation in cache.
func (c *AttCaches) SaveBlockAttestation(att ethpb.Att) error {
if att == nil {
if att == nil || att.IsNil() {
return nil
}
@@ -53,10 +53,9 @@ func (c *AttCaches) BlockAttestations() []ethpb.Att {
// DeleteBlockAttestation deletes a block attestation in cache.
func (c *AttCaches) DeleteBlockAttestation(att ethpb.Att) error {
if att == nil {
if att == nil || att.IsNil() {
return nil
}
id, err := attestation.NewId(att, attestation.Data)
if err != nil {
return errors.Wrap(err, "could not create attestation ID")

View File

@@ -8,7 +8,7 @@ import (
// SaveForkchoiceAttestation saves an forkchoice attestation in cache.
func (c *AttCaches) SaveForkchoiceAttestation(att ethpb.Att) error {
if att == nil {
if att == nil || att.IsNil() {
return nil
}
@@ -50,7 +50,7 @@ func (c *AttCaches) ForkchoiceAttestations() []ethpb.Att {
// DeleteForkchoiceAttestation deletes a forkchoice attestation in cache.
func (c *AttCaches) DeleteForkchoiceAttestation(att ethpb.Att) error {
if att == nil {
if att == nil || att.IsNil() {
return nil
}

View File

@@ -14,7 +14,7 @@ import (
// SaveUnaggregatedAttestation saves an unaggregated attestation in cache.
func (c *AttCaches) SaveUnaggregatedAttestation(att ethpb.Att) error {
if att == nil {
if att == nil || att.IsNil() {
return nil
}
if helpers.IsAggregated(att) {
@@ -130,9 +130,10 @@ func (c *AttCaches) UnaggregatedAttestationsBySlotIndexElectra(
// DeleteUnaggregatedAttestation deletes the unaggregated attestations in cache.
func (c *AttCaches) DeleteUnaggregatedAttestation(att ethpb.Att) error {
if att == nil {
if att == nil || att.IsNil() {
return nil
}
if helpers.IsAggregated(att) {
return errors.New("attestation is aggregated")
}
@@ -161,7 +162,7 @@ func (c *AttCaches) DeleteSeenUnaggregatedAttestations() (int, error) {
count := 0
for r, att := range c.unAggregatedAtt {
if att == nil || helpers.IsAggregated(att) {
if att == nil || att.IsNil() || helpers.IsAggregated(att) {
continue
}
if seen, err := c.hasSeenBit(att); err == nil && seen {

View File

@@ -7,6 +7,7 @@ go_library(
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/attestations/mock",
visibility = ["//visibility:public"],
deps = [
"//beacon-chain/operations/attestations:go_default_library",
"//consensus-types/primitives:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
],

View File

@@ -3,13 +3,17 @@ package mock
import (
"context"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
)
var _ attestations.Pool = &PoolMock{}
// PoolMock --
type PoolMock struct {
AggregatedAtts []*ethpb.Attestation
AggregatedAtts []ethpb.Att
UnaggregatedAtts []ethpb.Att
}
// AggregateUnaggregatedAttestations --
@@ -23,18 +27,18 @@ func (*PoolMock) AggregateUnaggregatedAttestationsBySlotIndex(_ context.Context,
}
// SaveAggregatedAttestation --
func (*PoolMock) SaveAggregatedAttestation(_ *ethpb.Attestation) error {
func (*PoolMock) SaveAggregatedAttestation(_ ethpb.Att) error {
panic("implement me")
}
// SaveAggregatedAttestations --
func (m *PoolMock) SaveAggregatedAttestations(atts []*ethpb.Attestation) error {
func (m *PoolMock) SaveAggregatedAttestations(atts []ethpb.Att) error {
m.AggregatedAtts = append(m.AggregatedAtts, atts...)
return nil
}
// AggregatedAttestations --
func (m *PoolMock) AggregatedAttestations() []*ethpb.Attestation {
func (m *PoolMock) AggregatedAttestations() []ethpb.Att {
return m.AggregatedAtts
}
@@ -43,13 +47,18 @@ func (*PoolMock) AggregatedAttestationsBySlotIndex(_ context.Context, _ primitiv
panic("implement me")
}
// AggregatedAttestationsBySlotIndexElectra --
func (*PoolMock) AggregatedAttestationsBySlotIndexElectra(_ context.Context, _ primitives.Slot, _ primitives.CommitteeIndex) []*ethpb.AttestationElectra {
panic("implement me")
}
// DeleteAggregatedAttestation --
func (*PoolMock) DeleteAggregatedAttestation(_ *ethpb.Attestation) error {
func (*PoolMock) DeleteAggregatedAttestation(_ ethpb.Att) error {
panic("implement me")
}
// HasAggregatedAttestation --
func (*PoolMock) HasAggregatedAttestation(_ *ethpb.Attestation) (bool, error) {
func (*PoolMock) HasAggregatedAttestation(_ ethpb.Att) (bool, error) {
panic("implement me")
}
@@ -59,18 +68,19 @@ func (*PoolMock) AggregatedAttestationCount() int {
}
// SaveUnaggregatedAttestation --
func (*PoolMock) SaveUnaggregatedAttestation(_ *ethpb.Attestation) error {
func (*PoolMock) SaveUnaggregatedAttestation(_ ethpb.Att) error {
panic("implement me")
}
// SaveUnaggregatedAttestations --
func (*PoolMock) SaveUnaggregatedAttestations(_ []*ethpb.Attestation) error {
panic("implement me")
func (m *PoolMock) SaveUnaggregatedAttestations(atts []ethpb.Att) error {
m.UnaggregatedAtts = append(m.UnaggregatedAtts, atts...)
return nil
}
// UnaggregatedAttestations --
func (*PoolMock) UnaggregatedAttestations() ([]*ethpb.Attestation, error) {
panic("implement me")
func (m *PoolMock) UnaggregatedAttestations() ([]ethpb.Att, error) {
return m.UnaggregatedAtts, nil
}
// UnaggregatedAttestationsBySlotIndex --
@@ -78,8 +88,13 @@ func (*PoolMock) UnaggregatedAttestationsBySlotIndex(_ context.Context, _ primit
panic("implement me")
}
// UnaggregatedAttestationsBySlotIndexElectra --
func (*PoolMock) UnaggregatedAttestationsBySlotIndexElectra(_ context.Context, _ primitives.Slot, _ primitives.CommitteeIndex) []*ethpb.AttestationElectra {
panic("implement me")
}
// DeleteUnaggregatedAttestation --
func (*PoolMock) DeleteUnaggregatedAttestation(_ *ethpb.Attestation) error {
func (*PoolMock) DeleteUnaggregatedAttestation(_ ethpb.Att) error {
panic("implement me")
}
@@ -94,42 +109,42 @@ func (*PoolMock) UnaggregatedAttestationCount() int {
}
// SaveBlockAttestation --
func (*PoolMock) SaveBlockAttestation(_ *ethpb.Attestation) error {
func (*PoolMock) SaveBlockAttestation(_ ethpb.Att) error {
panic("implement me")
}
// SaveBlockAttestations --
func (*PoolMock) SaveBlockAttestations(_ []*ethpb.Attestation) error {
func (*PoolMock) SaveBlockAttestations(_ []ethpb.Att) error {
panic("implement me")
}
// BlockAttestations --
func (*PoolMock) BlockAttestations() []*ethpb.Attestation {
func (*PoolMock) BlockAttestations() []ethpb.Att {
panic("implement me")
}
// DeleteBlockAttestation --
func (*PoolMock) DeleteBlockAttestation(_ *ethpb.Attestation) error {
func (*PoolMock) DeleteBlockAttestation(_ ethpb.Att) error {
panic("implement me")
}
// SaveForkchoiceAttestation --
func (*PoolMock) SaveForkchoiceAttestation(_ *ethpb.Attestation) error {
func (*PoolMock) SaveForkchoiceAttestation(_ ethpb.Att) error {
panic("implement me")
}
// SaveForkchoiceAttestations --
func (*PoolMock) SaveForkchoiceAttestations(_ []*ethpb.Attestation) error {
func (*PoolMock) SaveForkchoiceAttestations(_ []ethpb.Att) error {
panic("implement me")
}
// ForkchoiceAttestations --
func (*PoolMock) ForkchoiceAttestations() []*ethpb.Attestation {
func (*PoolMock) ForkchoiceAttestations() []ethpb.Att {
panic("implement me")
}
// DeleteForkchoiceAttestation --
func (*PoolMock) DeleteForkchoiceAttestation(_ *ethpb.Attestation) error {
func (*PoolMock) DeleteForkchoiceAttestation(_ ethpb.Att) error {
panic("implement me")
}

View File

@@ -75,6 +75,8 @@ go_library(
"//runtime/version:go_default_library",
"//time:go_default_library",
"//time/slots:go_default_library",
"@com_github_btcsuite_btcd_btcec_v2//:go_default_library",
"@com_github_ethereum_go_ethereum//crypto:go_default_library",
"@com_github_ethereum_go_ethereum//p2p/discover:go_default_library",
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",

View File

@@ -33,7 +33,7 @@ func (*Service) InterceptPeerDial(_ peer.ID) (allow bool) {
// multiaddr for the given peer.
func (s *Service) InterceptAddrDial(pid peer.ID, m multiaddr.Multiaddr) (allow bool) {
// Disallow bad peers from dialing in.
if s.peers.IsBad(pid) {
if s.peers.IsBad(pid) != nil {
return false
}
return filterConnections(s.addrFilter, m)

View File

@@ -50,7 +50,7 @@ func TestPeer_AtMaxLimit(t *testing.T) {
}()
for i := 0; i < highWatermarkBuffer; i++ {
addPeer(t, s.peers, peers.PeerConnected, false)
addPeer(t, s.peers, peers.Connected, false)
}
// create alternate host
@@ -159,7 +159,7 @@ func TestService_RejectInboundPeersBeyondLimit(t *testing.T) {
inboundLimit += 1
// Add in up to inbound peer limit.
for i := 0; i < int(inboundLimit); i++ {
addPeer(t, s.peers, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED), false)
addPeer(t, s.peers, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED), false)
}
valid = s.InterceptAccept(&maEndpoints{raddr: multiAddress})
if valid {

View File

@@ -189,7 +189,7 @@ func (s *Service) RefreshENR() {
s.updateSubnetRecordWithMetadataV2(bitV, bitS)
}
// ping all peers to inform them of new metadata
s.pingPeers()
s.pingPeersAndLogEnr()
}
// listen for new nodes watches for new nodes in the network and adds them to the peerstore.
@@ -452,7 +452,7 @@ func (s *Service) filterPeer(node *enode.Node) bool {
}
// Ignore bad nodes.
if s.peers.IsBad(peerData.ID) {
if s.peers.IsBad(peerData.ID) != nil {
return false
}

View File

@@ -378,14 +378,14 @@ func TestInboundPeerLimit(t *testing.T) {
}
for i := 0; i < 30; i++ {
_ = addPeer(t, s.peers, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED), false)
_ = addPeer(t, s.peers, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED), false)
}
require.Equal(t, true, s.isPeerAtLimit(false), "not at limit for outbound peers")
require.Equal(t, false, s.isPeerAtLimit(true), "at limit for inbound peers")
for i := 0; i < highWatermarkBuffer; i++ {
_ = addPeer(t, s.peers, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED), false)
_ = addPeer(t, s.peers, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED), false)
}
require.Equal(t, true, s.isPeerAtLimit(true), "not at limit for inbound peers")
@@ -404,13 +404,13 @@ func TestOutboundPeerThreshold(t *testing.T) {
}
for i := 0; i < 2; i++ {
_ = addPeer(t, s.peers, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED), true)
_ = addPeer(t, s.peers, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED), true)
}
require.Equal(t, true, s.isBelowOutboundPeerThreshold(), "not at outbound peer threshold")
for i := 0; i < 3; i++ {
_ = addPeer(t, s.peers, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED), true)
_ = addPeer(t, s.peers, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED), true)
}
require.Equal(t, false, s.isBelowOutboundPeerThreshold(), "still at outbound peer threshold")
@@ -477,7 +477,7 @@ func TestCorrectUDPVersion(t *testing.T) {
}
// addPeer is a helper to add a peer with a given connection state)
func addPeer(t *testing.T, p *peers.Status, state peerdata.PeerConnectionState, outbound bool) peer.ID {
func addPeer(t *testing.T, p *peers.Status, state peerdata.ConnectionState, outbound bool) peer.ID {
// Set up some peers with different states
mhBytes := []byte{0x11, 0x04}
idBytes := make([]byte, 4)

View File

@@ -2,7 +2,6 @@ package p2p
import (
"context"
"errors"
"fmt"
"io"
"sync"
@@ -10,6 +9,7 @@ import (
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/peerdata"
prysmTime "github.com/prysmaticlabs/prysm/v5/time"
@@ -25,6 +25,46 @@ func peerMultiaddrString(conn network.Conn) string {
return fmt.Sprintf("%s/p2p/%s", conn.RemoteMultiaddr().String(), conn.RemotePeer().String())
}
func (s *Service) connectToPeer(conn network.Conn) {
s.peers.SetConnectionState(conn.RemotePeer(), peers.Connected)
// Go through the handshake process.
log.WithFields(logrus.Fields{
"direction": conn.Stat().Direction.String(),
"multiAddr": peerMultiaddrString(conn),
"activePeers": len(s.peers.Active()),
}).Debug("Initiate peer connection")
}
func (s *Service) disconnectFromPeerOnError(
conn network.Conn,
goodByeFunc func(ctx context.Context, id peer.ID) error,
badPeerErr error,
) {
// Get the remote peer ID.
remotePeerID := conn.RemotePeer()
// Set the peer to disconnecting state.
s.peers.SetConnectionState(remotePeerID, peers.Disconnecting)
// Only attempt a goodbye if we are still connected to the peer.
if s.host.Network().Connectedness(remotePeerID) == network.Connected {
if err := goodByeFunc(context.TODO(), remotePeerID); err != nil {
log.WithError(err).Error("Unable to disconnect from peer")
}
}
log.
WithError(badPeerErr).
WithFields(logrus.Fields{
"multiaddr": peerMultiaddrString(conn),
"direction": conn.Stat().Direction.String(),
"remainingActivePeers": len(s.peers.Active()),
}).
Debug("Initiate peer disconnection")
s.peers.SetConnectionState(remotePeerID, peers.Disconnected)
}
// AddConnectionHandler adds a callback function which handles the connection with a
// newly added peer. It performs a handshake with that peer by sending a hello request
// and validating the response from the peer.
@@ -57,18 +97,9 @@ func (s *Service) AddConnectionHandler(reqFunc, goodByeFunc func(ctx context.Con
}
s.host.Network().Notify(&network.NotifyBundle{
ConnectedF: func(net network.Network, conn network.Conn) {
ConnectedF: func(_ network.Network, conn network.Conn) {
remotePeer := conn.RemotePeer()
disconnectFromPeer := func() {
s.peers.SetConnectionState(remotePeer, peers.PeerDisconnecting)
// Only attempt a goodbye if we are still connected to the peer.
if s.host.Network().Connectedness(remotePeer) == network.Connected {
if err := goodByeFunc(context.TODO(), remotePeer); err != nil {
log.WithError(err).Error("Unable to disconnect from peer")
}
}
s.peers.SetConnectionState(remotePeer, peers.PeerDisconnected)
}
// Connection handler must be non-blocking as part of libp2p design.
go func() {
if peerHandshaking(remotePeer) {
@@ -77,28 +108,21 @@ func (s *Service) AddConnectionHandler(reqFunc, goodByeFunc func(ctx context.Con
return
}
defer peerFinished(remotePeer)
// Handle the various pre-existing conditions that will result in us not handshaking.
peerConnectionState, err := s.peers.ConnectionState(remotePeer)
if err == nil && (peerConnectionState == peers.PeerConnected || peerConnectionState == peers.PeerConnecting) {
if err == nil && (peerConnectionState == peers.Connected || peerConnectionState == peers.Connecting) {
log.WithField("currentState", peerConnectionState).WithField("reason", "already active").Trace("Ignoring connection request")
return
}
s.peers.Add(nil /* ENR */, remotePeer, conn.RemoteMultiaddr(), conn.Stat().Direction)
// Defensive check in the event we still get a bad peer.
if s.peers.IsBad(remotePeer) {
log.WithField("reason", "bad peer").Trace("Ignoring connection request")
disconnectFromPeer()
if err := s.peers.IsBad(remotePeer); err != nil {
s.disconnectFromPeerOnError(conn, goodByeFunc, err)
return
}
validPeerConnection := func() {
s.peers.SetConnectionState(conn.RemotePeer(), peers.PeerConnected)
// Go through the handshake process.
log.WithFields(logrus.Fields{
"direction": conn.Stat().Direction,
"multiAddr": peerMultiaddrString(conn),
"activePeers": len(s.peers.Active()),
}).Debug("Peer connected")
}
// Do not perform handshake on inbound dials.
if conn.Stat().Direction == network.DirInbound {
@@ -117,63 +141,80 @@ func (s *Service) AddConnectionHandler(reqFunc, goodByeFunc func(ctx context.Con
// If peer hasn't sent a status request, we disconnect with them
if _, err := s.peers.ChainState(remotePeer); errors.Is(err, peerdata.ErrPeerUnknown) || errors.Is(err, peerdata.ErrNoPeerStatus) {
statusMessageMissing.Inc()
disconnectFromPeer()
s.disconnectFromPeerOnError(conn, goodByeFunc, errors.Wrap(err, "chain state"))
return
}
if peerExists {
updated, err := s.peers.ChainStateLastUpdated(remotePeer)
if err != nil {
disconnectFromPeer()
s.disconnectFromPeerOnError(conn, goodByeFunc, errors.Wrap(err, "chain state last updated"))
return
}
// exit if we don't receive any current status messages from
// peer.
if updated.IsZero() || !updated.After(currentTime) {
disconnectFromPeer()
// Exit if we don't receive any current status messages from peer.
if updated.IsZero() {
s.disconnectFromPeerOnError(conn, goodByeFunc, errors.New("is zero"))
return
}
if updated.Before(currentTime) {
s.disconnectFromPeerOnError(conn, goodByeFunc, errors.New("did not update"))
return
}
}
validPeerConnection()
s.connectToPeer(conn)
return
}
s.peers.SetConnectionState(conn.RemotePeer(), peers.PeerConnecting)
s.peers.SetConnectionState(conn.RemotePeer(), peers.Connecting)
if err := reqFunc(context.TODO(), conn.RemotePeer()); err != nil && !errors.Is(err, io.EOF) {
log.WithError(err).Trace("Handshake failed")
disconnectFromPeer()
s.disconnectFromPeerOnError(conn, goodByeFunc, err)
return
}
validPeerConnection()
s.connectToPeer(conn)
}()
},
})
}
// AddDisconnectionHandler disconnects from peers. It handles updating the peer status.
// AddDisconnectionHandler disconnects from peers. It handles updating the peer status.
// This also calls the handler responsible for maintaining other parts of the sync or p2p system.
func (s *Service) AddDisconnectionHandler(handler func(ctx context.Context, id peer.ID) error) {
s.host.Network().Notify(&network.NotifyBundle{
DisconnectedF: func(net network.Network, conn network.Conn) {
log := log.WithField("multiAddr", peerMultiaddrString(conn))
peerID := conn.RemotePeer()
log.WithFields(logrus.Fields{
"multiAddr": peerMultiaddrString(conn),
"direction": conn.Stat().Direction.String(),
})
// Must be handled in a goroutine as this callback cannot be blocking.
go func() {
// Exit early if we are still connected to the peer.
if net.Connectedness(conn.RemotePeer()) == network.Connected {
if net.Connectedness(peerID) == network.Connected {
return
}
priorState, err := s.peers.ConnectionState(conn.RemotePeer())
priorState, err := s.peers.ConnectionState(peerID)
if err != nil {
// Can happen if the peer has already disconnected, so...
priorState = peers.PeerDisconnected
priorState = peers.Disconnected
}
s.peers.SetConnectionState(conn.RemotePeer(), peers.PeerDisconnecting)
s.peers.SetConnectionState(peerID, peers.Disconnecting)
if err := handler(context.TODO(), conn.RemotePeer()); err != nil {
log.WithError(err).Error("Disconnect handler failed")
}
s.peers.SetConnectionState(conn.RemotePeer(), peers.PeerDisconnected)
s.peers.SetConnectionState(peerID, peers.Disconnected)
// Only log disconnections if we were fully connected.
if priorState == peers.PeerConnected {
log.WithField("activePeers", len(s.peers.Active())).Debug("Peer disconnected")
if priorState == peers.Connected {
activePeersCount := len(s.peers.Active())
log.WithField("remainingActivePeers", activePeersCount).Debug("Peer disconnected")
}
}()
},

View File

@@ -29,7 +29,7 @@ func MsgID(genesisValidatorsRoot []byte, pmsg *pubsubpb.Message) string {
// never be hit.
msg := make([]byte, 20)
copy(msg, "invalid")
return string(msg)
return bytesutil.UnsafeCastToString(msg)
}
digest, err := ExtractGossipDigest(*pmsg.Topic)
if err != nil {
@@ -37,7 +37,7 @@ func MsgID(genesisValidatorsRoot []byte, pmsg *pubsubpb.Message) string {
// never be hit.
msg := make([]byte, 20)
copy(msg, "invalid")
return string(msg)
return bytesutil.UnsafeCastToString(msg)
}
_, fEpoch, err := forks.RetrieveForkDataFromDigest(digest, genesisValidatorsRoot)
if err != nil {
@@ -45,7 +45,7 @@ func MsgID(genesisValidatorsRoot []byte, pmsg *pubsubpb.Message) string {
// never be hit.
msg := make([]byte, 20)
copy(msg, "invalid")
return string(msg)
return bytesutil.UnsafeCastToString(msg)
}
if fEpoch >= params.BeaconConfig().AltairForkEpoch {
return postAltairMsgID(pmsg, fEpoch)
@@ -54,11 +54,11 @@ func MsgID(genesisValidatorsRoot []byte, pmsg *pubsubpb.Message) string {
if err != nil {
combinedData := append(params.BeaconConfig().MessageDomainInvalidSnappy[:], pmsg.Data...)
h := hash.Hash(combinedData)
return string(h[:20])
return bytesutil.UnsafeCastToString(h[:20])
}
combinedData := append(params.BeaconConfig().MessageDomainValidSnappy[:], decodedData...)
h := hash.Hash(combinedData)
return string(h[:20])
return bytesutil.UnsafeCastToString(h[:20])
}
// Spec:
@@ -93,13 +93,13 @@ func postAltairMsgID(pmsg *pubsubpb.Message, fEpoch primitives.Epoch) string {
// should never happen
msg := make([]byte, 20)
copy(msg, "invalid")
return string(msg)
return bytesutil.UnsafeCastToString(msg)
}
if uint64(totalLength) > gossipPubSubSize {
// this should never happen
msg := make([]byte, 20)
copy(msg, "invalid")
return string(msg)
return bytesutil.UnsafeCastToString(msg)
}
combinedData := make([]byte, 0, totalLength)
combinedData = append(combinedData, params.BeaconConfig().MessageDomainInvalidSnappy[:]...)
@@ -107,7 +107,7 @@ func postAltairMsgID(pmsg *pubsubpb.Message, fEpoch primitives.Epoch) string {
combinedData = append(combinedData, topic...)
combinedData = append(combinedData, pmsg.Data...)
h := hash.Hash(combinedData)
return string(h[:20])
return bytesutil.UnsafeCastToString(h[:20])
}
totalLength, err := math.AddInt(
len(params.BeaconConfig().MessageDomainValidSnappy),
@@ -120,7 +120,7 @@ func postAltairMsgID(pmsg *pubsubpb.Message, fEpoch primitives.Epoch) string {
// should never happen
msg := make([]byte, 20)
copy(msg, "invalid")
return string(msg)
return bytesutil.UnsafeCastToString(msg)
}
combinedData := make([]byte, 0, totalLength)
combinedData = append(combinedData, params.BeaconConfig().MessageDomainValidSnappy[:]...)
@@ -128,5 +128,5 @@ func postAltairMsgID(pmsg *pubsubpb.Message, fEpoch primitives.Epoch) string {
combinedData = append(combinedData, topic...)
combinedData = append(combinedData, decodedData...)
h := hash.Hash(combinedData)
return string(h[:20])
return bytesutil.UnsafeCastToString(h[:20])
}

View File

@@ -23,8 +23,8 @@ var (
ErrNoPeerStatus = errors.New("no chain status for peer")
)
// PeerConnectionState is the state of the connection.
type PeerConnectionState ethpb.ConnectionState
// ConnectionState is the state of the connection.
type ConnectionState ethpb.ConnectionState
// StoreConfig holds peer store parameters.
type StoreConfig struct {
@@ -49,7 +49,7 @@ type PeerData struct {
// Network related data.
Address ma.Multiaddr
Direction network.Direction
ConnState PeerConnectionState
ConnState ConnectionState
Enr *enr.Record
NextValidTime time.Time
// Chain related data.

View File

@@ -20,6 +20,7 @@ go_library(
"//crypto/rand:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)

View File

@@ -4,6 +4,7 @@ import (
"time"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/peerdata"
)
@@ -61,7 +62,7 @@ func (s *BadResponsesScorer) Score(pid peer.ID) float64 {
// scoreNoLock is a lock-free version of Score.
func (s *BadResponsesScorer) scoreNoLock(pid peer.ID) float64 {
if s.isBadPeerNoLock(pid) {
if s.isBadPeerNoLock(pid) != nil {
return BadPeerScore
}
score := float64(0)
@@ -116,18 +117,24 @@ func (s *BadResponsesScorer) Increment(pid peer.ID) {
// IsBadPeer states if the peer is to be considered bad.
// If the peer is unknown this will return `false`, which makes using this function easier than returning an error.
func (s *BadResponsesScorer) IsBadPeer(pid peer.ID) bool {
func (s *BadResponsesScorer) IsBadPeer(pid peer.ID) error {
s.store.RLock()
defer s.store.RUnlock()
return s.isBadPeerNoLock(pid)
}
// isBadPeerNoLock is lock-free version of IsBadPeer.
func (s *BadResponsesScorer) isBadPeerNoLock(pid peer.ID) bool {
func (s *BadResponsesScorer) isBadPeerNoLock(pid peer.ID) error {
if peerData, ok := s.store.PeerData(pid); ok {
return peerData.BadResponses >= s.config.Threshold
if peerData.BadResponses >= s.config.Threshold {
return errors.Errorf("peer exceeded bad responses threshold: got %d, threshold %d", peerData.BadResponses, s.config.Threshold)
}
return nil
}
return false
return nil
}
// BadPeers returns the peers that are considered bad.
@@ -137,7 +144,7 @@ func (s *BadResponsesScorer) BadPeers() []peer.ID {
badPeers := make([]peer.ID, 0)
for pid := range s.store.Peers() {
if s.isBadPeerNoLock(pid) {
if s.isBadPeerNoLock(pid) != nil {
badPeers = append(badPeers, pid)
}
}

View File

@@ -33,19 +33,19 @@ func TestScorers_BadResponses_Score(t *testing.T) {
assert.Equal(t, 0., scorer.Score(pid), "Unexpected score for unregistered peer")
scorer.Increment(pid)
assert.Equal(t, false, scorer.IsBadPeer(pid))
assert.NoError(t, scorer.IsBadPeer(pid))
assert.Equal(t, -2.5, scorer.Score(pid))
scorer.Increment(pid)
assert.Equal(t, false, scorer.IsBadPeer(pid))
assert.NoError(t, scorer.IsBadPeer(pid))
assert.Equal(t, float64(-5), scorer.Score(pid))
scorer.Increment(pid)
assert.Equal(t, false, scorer.IsBadPeer(pid))
assert.NoError(t, scorer.IsBadPeer(pid))
assert.Equal(t, float64(-7.5), scorer.Score(pid))
scorer.Increment(pid)
assert.Equal(t, true, scorer.IsBadPeer(pid))
assert.NotNil(t, scorer.IsBadPeer(pid))
assert.Equal(t, -100.0, scorer.Score(pid))
}
@@ -152,17 +152,17 @@ func TestScorers_BadResponses_IsBadPeer(t *testing.T) {
})
scorer := peerStatuses.Scorers().BadResponsesScorer()
pid := peer.ID("peer1")
assert.Equal(t, false, scorer.IsBadPeer(pid))
assert.NoError(t, scorer.IsBadPeer(pid))
peerStatuses.Add(nil, pid, nil, network.DirUnknown)
assert.Equal(t, false, scorer.IsBadPeer(pid))
assert.NoError(t, scorer.IsBadPeer(pid))
for i := 0; i < scorers.DefaultBadResponsesThreshold; i++ {
scorer.Increment(pid)
if i == scorers.DefaultBadResponsesThreshold-1 {
assert.Equal(t, true, scorer.IsBadPeer(pid), "Unexpected peer status")
assert.NotNil(t, scorer.IsBadPeer(pid), "Unexpected peer status")
} else {
assert.Equal(t, false, scorer.IsBadPeer(pid), "Unexpected peer status")
assert.NoError(t, scorer.IsBadPeer(pid), "Unexpected peer status")
}
}
}
@@ -185,11 +185,11 @@ func TestScorers_BadResponses_BadPeers(t *testing.T) {
scorer.Increment(pids[2])
scorer.Increment(pids[4])
}
assert.Equal(t, false, scorer.IsBadPeer(pids[0]), "Invalid peer status")
assert.Equal(t, true, scorer.IsBadPeer(pids[1]), "Invalid peer status")
assert.Equal(t, true, scorer.IsBadPeer(pids[2]), "Invalid peer status")
assert.Equal(t, false, scorer.IsBadPeer(pids[3]), "Invalid peer status")
assert.Equal(t, true, scorer.IsBadPeer(pids[4]), "Invalid peer status")
assert.NoError(t, scorer.IsBadPeer(pids[0]), "Invalid peer status")
assert.NotNil(t, scorer.IsBadPeer(pids[1]), "Invalid peer status")
assert.NotNil(t, scorer.IsBadPeer(pids[2]), "Invalid peer status")
assert.NoError(t, scorer.IsBadPeer(pids[3]), "Invalid peer status")
assert.NotNil(t, scorer.IsBadPeer(pids[4]), "Invalid peer status")
want := []peer.ID{pids[1], pids[2], pids[4]}
badPeers := scorer.BadPeers()
sort.Slice(badPeers, func(i, j int) bool {

View File

@@ -177,8 +177,8 @@ func (s *BlockProviderScorer) processedBlocksNoLock(pid peer.ID) uint64 {
// Block provider scorer cannot guarantee that lower score of a peer is indeed a sign of a bad peer.
// Therefore this scorer never marks peers as bad, and relies on scores to probabilistically sort
// out low-scorers (see WeightSorted method).
func (*BlockProviderScorer) IsBadPeer(_ peer.ID) bool {
return false
func (*BlockProviderScorer) IsBadPeer(_ peer.ID) error {
return nil
}
// BadPeers returns the peers that are considered bad.

View File

@@ -119,7 +119,7 @@ func TestScorers_BlockProvider_Score(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Run(tt.name, func(*testing.T) {
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
PeerLimit: 30,
ScorerParams: &scorers.Config{
@@ -224,7 +224,7 @@ func TestScorers_BlockProvider_Sorted(t *testing.T) {
}{
{
name: "no peers",
update: func(s *scorers.BlockProviderScorer) {},
update: func(*scorers.BlockProviderScorer) {},
have: []peer.ID{},
want: []peer.ID{},
},
@@ -451,7 +451,7 @@ func TestScorers_BlockProvider_FormatScorePretty(t *testing.T) {
})
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Run(tt.name, func(*testing.T) {
peerStatuses := peerStatusGen()
scorer := peerStatuses.Scorers().BlockProviderScorer()
if tt.update != nil {
@@ -481,8 +481,8 @@ func TestScorers_BlockProvider_BadPeerMarking(t *testing.T) {
})
scorer := peerStatuses.Scorers().BlockProviderScorer()
assert.Equal(t, false, scorer.IsBadPeer("peer1"), "Unexpected status for unregistered peer")
assert.NoError(t, scorer.IsBadPeer("peer1"), "Unexpected status for unregistered peer")
scorer.IncrementProcessedBlocks("peer1", 64)
assert.Equal(t, false, scorer.IsBadPeer("peer1"))
assert.NoError(t, scorer.IsBadPeer("peer1"))
assert.Equal(t, 0, len(scorer.BadPeers()))
}

View File

@@ -2,6 +2,7 @@ package scorers
import (
"github.com/libp2p/go-libp2p/core/peer"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/peerdata"
pbrpc "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
)
@@ -51,19 +52,24 @@ func (s *GossipScorer) scoreNoLock(pid peer.ID) float64 {
}
// IsBadPeer states if the peer is to be considered bad.
func (s *GossipScorer) IsBadPeer(pid peer.ID) bool {
func (s *GossipScorer) IsBadPeer(pid peer.ID) error {
s.store.RLock()
defer s.store.RUnlock()
return s.isBadPeerNoLock(pid)
}
// isBadPeerNoLock is lock-free version of IsBadPeer.
func (s *GossipScorer) isBadPeerNoLock(pid peer.ID) bool {
func (s *GossipScorer) isBadPeerNoLock(pid peer.ID) error {
peerData, ok := s.store.PeerData(pid)
if !ok {
return false
return nil
}
return peerData.GossipScore < gossipThreshold
if peerData.GossipScore < gossipThreshold {
return errors.Errorf("gossip score below threshold: got %f - threshold %f", peerData.GossipScore, gossipThreshold)
}
return nil
}
// BadPeers returns the peers that are considered bad.
@@ -73,7 +79,7 @@ func (s *GossipScorer) BadPeers() []peer.ID {
badPeers := make([]peer.ID, 0)
for pid := range s.store.Peers() {
if s.isBadPeerNoLock(pid) {
if s.isBadPeerNoLock(pid) != nil {
badPeers = append(badPeers, pid)
}
}

View File

@@ -21,7 +21,7 @@ func TestScorers_Gossip_Score(t *testing.T) {
}{
{
name: "nonexistent peer",
update: func(scorer *scorers.GossipScorer) {
update: func(*scorers.GossipScorer) {
},
check: func(scorer *scorers.GossipScorer) {
assert.Equal(t, 0.0, scorer.Score("peer1"), "Unexpected score")
@@ -34,7 +34,7 @@ func TestScorers_Gossip_Score(t *testing.T) {
},
check: func(scorer *scorers.GossipScorer) {
assert.Equal(t, -101.0, scorer.Score("peer1"), "Unexpected score")
assert.Equal(t, true, scorer.IsBadPeer("peer1"), "Unexpected good peer")
assert.NotNil(t, scorer.IsBadPeer("peer1"), "Unexpected good peer")
},
},
{
@@ -44,7 +44,7 @@ func TestScorers_Gossip_Score(t *testing.T) {
},
check: func(scorer *scorers.GossipScorer) {
assert.Equal(t, 10.0, scorer.Score("peer1"), "Unexpected score")
assert.Equal(t, false, scorer.IsBadPeer("peer1"), "Unexpected bad peer")
assert.Equal(t, nil, scorer.IsBadPeer("peer1"), "Unexpected bad peer")
_, _, topicMap, err := scorer.GossipData("peer1")
assert.NoError(t, err)
assert.Equal(t, uint64(100), topicMap["a"].TimeInMesh, "incorrect time in mesh")
@@ -53,7 +53,7 @@ func TestScorers_Gossip_Score(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Run(tt.name, func(*testing.T) {
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
ScorerParams: &scorers.Config{},
})

View File

@@ -46,7 +46,7 @@ func (s *PeerStatusScorer) Score(pid peer.ID) float64 {
// scoreNoLock is a lock-free version of Score.
func (s *PeerStatusScorer) scoreNoLock(pid peer.ID) float64 {
if s.isBadPeerNoLock(pid) {
if s.isBadPeerNoLock(pid) != nil {
return BadPeerScore
}
score := float64(0)
@@ -67,30 +67,34 @@ func (s *PeerStatusScorer) scoreNoLock(pid peer.ID) float64 {
}
// IsBadPeer states if the peer is to be considered bad.
func (s *PeerStatusScorer) IsBadPeer(pid peer.ID) bool {
func (s *PeerStatusScorer) IsBadPeer(pid peer.ID) error {
s.store.RLock()
defer s.store.RUnlock()
return s.isBadPeerNoLock(pid)
}
// isBadPeerNoLock is lock-free version of IsBadPeer.
func (s *PeerStatusScorer) isBadPeerNoLock(pid peer.ID) bool {
func (s *PeerStatusScorer) isBadPeerNoLock(pid peer.ID) error {
peerData, ok := s.store.PeerData(pid)
if !ok {
return false
return nil
}
// Mark peer as bad, if the latest error is one of the terminal ones.
terminalErrs := []error{
p2ptypes.ErrWrongForkDigestVersion,
p2ptypes.ErrInvalidFinalizedRoot,
p2ptypes.ErrInvalidRequest,
}
for _, err := range terminalErrs {
if errors.Is(peerData.ChainStateValidationError, err) {
return true
return err
}
}
return false
return nil
}
// BadPeers returns the peers that are considered bad.
@@ -100,7 +104,7 @@ func (s *PeerStatusScorer) BadPeers() []peer.ID {
badPeers := make([]peer.ID, 0)
for pid := range s.store.Peers() {
if s.isBadPeerNoLock(pid) {
if s.isBadPeerNoLock(pid) != nil {
badPeers = append(badPeers, pid)
}
}

View File

@@ -122,7 +122,7 @@ func TestScorers_PeerStatus_Score(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Run(tt.name, func(*testing.T) {
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
ScorerParams: &scorers.Config{},
})
@@ -140,12 +140,12 @@ func TestScorers_PeerStatus_IsBadPeer(t *testing.T) {
ScorerParams: &scorers.Config{},
})
pid := peer.ID("peer1")
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer(pid))
assert.Equal(t, false, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid))
assert.NoError(t, peerStatuses.Scorers().IsBadPeer(pid))
assert.NoError(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid))
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid, &pb.Status{}, p2ptypes.ErrWrongForkDigestVersion)
assert.Equal(t, true, peerStatuses.Scorers().IsBadPeer(pid))
assert.Equal(t, true, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid))
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer(pid))
assert.NotNil(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid))
}
func TestScorers_PeerStatus_BadPeers(t *testing.T) {
@@ -155,22 +155,22 @@ func TestScorers_PeerStatus_BadPeers(t *testing.T) {
pid1 := peer.ID("peer1")
pid2 := peer.ID("peer2")
pid3 := peer.ID("peer3")
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer(pid1))
assert.Equal(t, false, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid1))
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer(pid2))
assert.Equal(t, false, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid2))
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer(pid3))
assert.Equal(t, false, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid3))
assert.NoError(t, peerStatuses.Scorers().IsBadPeer(pid1))
assert.NoError(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid1))
assert.NoError(t, peerStatuses.Scorers().IsBadPeer(pid2))
assert.NoError(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid2))
assert.NoError(t, peerStatuses.Scorers().IsBadPeer(pid3))
assert.NoError(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid3))
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid1, &pb.Status{}, p2ptypes.ErrWrongForkDigestVersion)
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid2, &pb.Status{}, nil)
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid3, &pb.Status{}, p2ptypes.ErrWrongForkDigestVersion)
assert.Equal(t, true, peerStatuses.Scorers().IsBadPeer(pid1))
assert.Equal(t, true, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid1))
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer(pid2))
assert.Equal(t, false, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid2))
assert.Equal(t, true, peerStatuses.Scorers().IsBadPeer(pid3))
assert.Equal(t, true, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid3))
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer(pid1))
assert.NotNil(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid1))
assert.NoError(t, peerStatuses.Scorers().IsBadPeer(pid2))
assert.NoError(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid2))
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer(pid3))
assert.NotNil(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid3))
assert.Equal(t, 2, len(peerStatuses.Scorers().PeerStatusScorer().BadPeers()))
assert.Equal(t, 2, len(peerStatuses.Scorers().BadPeers()))
}

View File

@@ -6,6 +6,7 @@ import (
"time"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/peerdata"
"github.com/prysmaticlabs/prysm/v5/config/features"
)
@@ -24,7 +25,7 @@ const BadPeerScore = gossipThreshold
// Scorer defines minimum set of methods every peer scorer must expose.
type Scorer interface {
Score(pid peer.ID) float64
IsBadPeer(pid peer.ID) bool
IsBadPeer(pid peer.ID) error
BadPeers() []peer.ID
}
@@ -124,26 +125,29 @@ func (s *Service) ScoreNoLock(pid peer.ID) float64 {
}
// IsBadPeer traverses all the scorers to see if any of them classifies peer as bad.
func (s *Service) IsBadPeer(pid peer.ID) bool {
func (s *Service) IsBadPeer(pid peer.ID) error {
s.store.RLock()
defer s.store.RUnlock()
return s.IsBadPeerNoLock(pid)
}
// IsBadPeerNoLock is a lock-free version of IsBadPeer.
func (s *Service) IsBadPeerNoLock(pid peer.ID) bool {
if s.scorers.badResponsesScorer.isBadPeerNoLock(pid) {
return true
func (s *Service) IsBadPeerNoLock(pid peer.ID) error {
if err := s.scorers.badResponsesScorer.isBadPeerNoLock(pid); err != nil {
return errors.Wrap(err, "bad responses scorer")
}
if s.scorers.peerStatusScorer.isBadPeerNoLock(pid) {
return true
if err := s.scorers.peerStatusScorer.isBadPeerNoLock(pid); err != nil {
return errors.Wrap(err, "peer status scorer")
}
if features.Get().EnablePeerScorer {
if s.scorers.gossipScorer.isBadPeerNoLock(pid) {
return true
if err := s.scorers.gossipScorer.isBadPeerNoLock(pid); err != nil {
return errors.Wrap(err, "gossip scorer")
}
}
return false
return nil
}
// BadPeers returns the peers that are considered bad by any of registered scorers.
@@ -153,7 +157,7 @@ func (s *Service) BadPeers() []peer.ID {
badPeers := make([]peer.ID, 0)
for pid := range s.store.Peers() {
if s.IsBadPeerNoLock(pid) {
if s.IsBadPeerNoLock(pid) != nil {
badPeers = append(badPeers, pid)
}
}

View File

@@ -100,7 +100,7 @@ func TestScorers_Service_Score(t *testing.T) {
return scores
}
pack := func(scorer *scorers.Service, s1, s2, s3 float64) map[string]float64 {
pack := func(_ *scorers.Service, s1, s2, s3 float64) map[string]float64 {
return map[string]float64{
"peer1": roundScore(s1),
"peer2": roundScore(s2),
@@ -237,7 +237,7 @@ func TestScorers_Service_loop(t *testing.T) {
for i := 0; i < s1.Params().Threshold+5; i++ {
s1.Increment(pid1)
}
assert.Equal(t, true, s1.IsBadPeer(pid1), "Peer should be marked as bad")
assert.NotNil(t, s1.IsBadPeer(pid1), "Peer should be marked as bad")
s2.IncrementProcessedBlocks("peer1", 221)
assert.Equal(t, uint64(221), s2.ProcessedBlocks("peer1"))
@@ -252,7 +252,7 @@ func TestScorers_Service_loop(t *testing.T) {
for {
select {
case <-ticker.C:
if s1.IsBadPeer(pid1) == false && s2.ProcessedBlocks("peer1") == 0 {
if s1.IsBadPeer(pid1) == nil && s2.ProcessedBlocks("peer1") == 0 {
return
}
case <-ctx.Done():
@@ -263,7 +263,7 @@ func TestScorers_Service_loop(t *testing.T) {
}()
<-done
assert.Equal(t, false, s1.IsBadPeer(pid1), "Peer should not be marked as bad")
assert.NoError(t, s1.IsBadPeer(pid1), "Peer should not be marked as bad")
assert.Equal(t, uint64(0), s2.ProcessedBlocks("peer1"), "No blocks are expected")
}
@@ -278,10 +278,10 @@ func TestScorers_Service_IsBadPeer(t *testing.T) {
},
})
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer("peer1"))
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer1"))
peerStatuses.Scorers().BadResponsesScorer().Increment("peer1")
peerStatuses.Scorers().BadResponsesScorer().Increment("peer1")
assert.Equal(t, true, peerStatuses.Scorers().IsBadPeer("peer1"))
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer1"))
}
func TestScorers_Service_BadPeers(t *testing.T) {
@@ -295,16 +295,16 @@ func TestScorers_Service_BadPeers(t *testing.T) {
},
})
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer("peer1"))
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer("peer2"))
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer("peer3"))
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer1"))
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer2"))
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer3"))
assert.Equal(t, 0, len(peerStatuses.Scorers().BadPeers()))
for _, pid := range []peer.ID{"peer1", "peer3"} {
peerStatuses.Scorers().BadResponsesScorer().Increment(pid)
peerStatuses.Scorers().BadResponsesScorer().Increment(pid)
}
assert.Equal(t, true, peerStatuses.Scorers().IsBadPeer("peer1"))
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer("peer2"))
assert.Equal(t, true, peerStatuses.Scorers().IsBadPeer("peer3"))
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer1"))
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer2"))
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer3"))
assert.Equal(t, 2, len(peerStatuses.Scorers().BadPeers()))
}

View File

@@ -34,6 +34,7 @@ import (
"github.com/libp2p/go-libp2p/core/peer"
ma "github.com/multiformats/go-multiaddr"
manet "github.com/multiformats/go-multiaddr/net"
"github.com/pkg/errors"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/peerdata"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/scorers"
@@ -49,14 +50,14 @@ import (
)
const (
// PeerDisconnected means there is no connection to the peer.
PeerDisconnected peerdata.PeerConnectionState = iota
// PeerDisconnecting means there is an on-going attempt to disconnect from the peer.
PeerDisconnecting
// PeerConnected means the peer has an active connection.
PeerConnected
// PeerConnecting means there is an on-going attempt to connect to the peer.
PeerConnecting
// Disconnected means there is no connection to the peer.
Disconnected peerdata.ConnectionState = iota
// Disconnecting means there is an on-going attempt to disconnect from the peer.
Disconnecting
// Connected means the peer has an active connection.
Connected
// Connecting means there is an on-going attempt to connect to the peer.
Connecting
)
const (
@@ -150,7 +151,7 @@ func (p *Status) Add(record *enr.Record, pid peer.ID, address ma.Multiaddr, dire
Address: address,
Direction: direction,
// Peers start disconnected; state will be updated when the handshake process begins.
ConnState: PeerDisconnected,
ConnState: Disconnected,
}
if record != nil {
peerData.Enr = record
@@ -212,7 +213,7 @@ func (p *Status) IsActive(pid peer.ID) bool {
defer p.store.RUnlock()
peerData, ok := p.store.PeerData(pid)
return ok && (peerData.ConnState == PeerConnected || peerData.ConnState == PeerConnecting)
return ok && (peerData.ConnState == Connected || peerData.ConnState == Connecting)
}
// IsAboveInboundLimit checks if we are above our current inbound
@@ -222,7 +223,7 @@ func (p *Status) IsAboveInboundLimit() bool {
defer p.store.RUnlock()
totalInbound := 0
for _, peerData := range p.store.Peers() {
if peerData.ConnState == PeerConnected &&
if peerData.ConnState == Connected &&
peerData.Direction == network.DirInbound {
totalInbound += 1
}
@@ -286,7 +287,7 @@ func (p *Status) SubscribedToSubnet(index uint64) []peer.ID {
peers := make([]peer.ID, 0)
for pid, peerData := range p.store.Peers() {
// look at active peers
connectedStatus := peerData.ConnState == PeerConnecting || peerData.ConnState == PeerConnected
connectedStatus := peerData.ConnState == Connecting || peerData.ConnState == Connected
if connectedStatus && peerData.MetaData != nil && !peerData.MetaData.IsNil() && peerData.MetaData.AttnetsBitfield() != nil {
indices := indicesFromBitfield(peerData.MetaData.AttnetsBitfield())
for _, idx := range indices {
@@ -301,7 +302,7 @@ func (p *Status) SubscribedToSubnet(index uint64) []peer.ID {
}
// SetConnectionState sets the connection state of the given remote peer.
func (p *Status) SetConnectionState(pid peer.ID, state peerdata.PeerConnectionState) {
func (p *Status) SetConnectionState(pid peer.ID, state peerdata.ConnectionState) {
p.store.Lock()
defer p.store.Unlock()
@@ -311,14 +312,14 @@ func (p *Status) SetConnectionState(pid peer.ID, state peerdata.PeerConnectionSt
// ConnectionState gets the connection state of the given remote peer.
// This will error if the peer does not exist.
func (p *Status) ConnectionState(pid peer.ID) (peerdata.PeerConnectionState, error) {
func (p *Status) ConnectionState(pid peer.ID) (peerdata.ConnectionState, error) {
p.store.RLock()
defer p.store.RUnlock()
if peerData, ok := p.store.PeerData(pid); ok {
return peerData.ConnState, nil
}
return PeerDisconnected, peerdata.ErrPeerUnknown
return Disconnected, peerdata.ErrPeerUnknown
}
// ChainStateLastUpdated gets the last time the chain state of the given remote peer was updated.
@@ -335,19 +336,29 @@ func (p *Status) ChainStateLastUpdated(pid peer.ID) (time.Time, error) {
// IsBad states if the peer is to be considered bad (by *any* of the registered scorers).
// If the peer is unknown this will return `false`, which makes using this function easier than returning an error.
func (p *Status) IsBad(pid peer.ID) bool {
func (p *Status) IsBad(pid peer.ID) error {
p.store.RLock()
defer p.store.RUnlock()
return p.isBad(pid)
}
// isBad is the lock-free version of IsBad.
func (p *Status) isBad(pid peer.ID) bool {
func (p *Status) isBad(pid peer.ID) error {
// Do not disconnect from trusted peers.
if p.store.IsTrustedPeer(pid) {
return false
return nil
}
return p.isfromBadIP(pid) || p.scorers.IsBadPeerNoLock(pid)
if err := p.isfromBadIP(pid); err != nil {
return errors.Wrap(err, "peer is from a bad IP")
}
if err := p.scorers.IsBadPeerNoLock(pid); err != nil {
return errors.Wrap(err, "is bad peer no lock")
}
return nil
}
// NextValidTime gets the earliest possible time it is to contact/dial
@@ -411,7 +422,7 @@ func (p *Status) Connecting() []peer.ID {
defer p.store.RUnlock()
peers := make([]peer.ID, 0)
for pid, peerData := range p.store.Peers() {
if peerData.ConnState == PeerConnecting {
if peerData.ConnState == Connecting {
peers = append(peers, pid)
}
}
@@ -424,7 +435,7 @@ func (p *Status) Connected() []peer.ID {
defer p.store.RUnlock()
peers := make([]peer.ID, 0)
for pid, peerData := range p.store.Peers() {
if peerData.ConnState == PeerConnected {
if peerData.ConnState == Connected {
peers = append(peers, pid)
}
}
@@ -450,7 +461,7 @@ func (p *Status) InboundConnected() []peer.ID {
defer p.store.RUnlock()
peers := make([]peer.ID, 0)
for pid, peerData := range p.store.Peers() {
if peerData.ConnState == PeerConnected && peerData.Direction == network.DirInbound {
if peerData.ConnState == Connected && peerData.Direction == network.DirInbound {
peers = append(peers, pid)
}
}
@@ -463,7 +474,7 @@ func (p *Status) InboundConnectedWithProtocol(protocol InternetProtocol) []peer.
defer p.store.RUnlock()
peers := make([]peer.ID, 0)
for pid, peerData := range p.store.Peers() {
if peerData.ConnState == PeerConnected && peerData.Direction == network.DirInbound && strings.Contains(peerData.Address.String(), string(protocol)) {
if peerData.ConnState == Connected && peerData.Direction == network.DirInbound && strings.Contains(peerData.Address.String(), string(protocol)) {
peers = append(peers, pid)
}
}
@@ -489,7 +500,7 @@ func (p *Status) OutboundConnected() []peer.ID {
defer p.store.RUnlock()
peers := make([]peer.ID, 0)
for pid, peerData := range p.store.Peers() {
if peerData.ConnState == PeerConnected && peerData.Direction == network.DirOutbound {
if peerData.ConnState == Connected && peerData.Direction == network.DirOutbound {
peers = append(peers, pid)
}
}
@@ -502,7 +513,7 @@ func (p *Status) OutboundConnectedWithProtocol(protocol InternetProtocol) []peer
defer p.store.RUnlock()
peers := make([]peer.ID, 0)
for pid, peerData := range p.store.Peers() {
if peerData.ConnState == PeerConnected && peerData.Direction == network.DirOutbound && strings.Contains(peerData.Address.String(), string(protocol)) {
if peerData.ConnState == Connected && peerData.Direction == network.DirOutbound && strings.Contains(peerData.Address.String(), string(protocol)) {
peers = append(peers, pid)
}
}
@@ -515,7 +526,7 @@ func (p *Status) Active() []peer.ID {
defer p.store.RUnlock()
peers := make([]peer.ID, 0)
for pid, peerData := range p.store.Peers() {
if peerData.ConnState == PeerConnecting || peerData.ConnState == PeerConnected {
if peerData.ConnState == Connecting || peerData.ConnState == Connected {
peers = append(peers, pid)
}
}
@@ -528,7 +539,7 @@ func (p *Status) Disconnecting() []peer.ID {
defer p.store.RUnlock()
peers := make([]peer.ID, 0)
for pid, peerData := range p.store.Peers() {
if peerData.ConnState == PeerDisconnecting {
if peerData.ConnState == Disconnecting {
peers = append(peers, pid)
}
}
@@ -541,7 +552,7 @@ func (p *Status) Disconnected() []peer.ID {
defer p.store.RUnlock()
peers := make([]peer.ID, 0)
for pid, peerData := range p.store.Peers() {
if peerData.ConnState == PeerDisconnected {
if peerData.ConnState == Disconnected {
peers = append(peers, pid)
}
}
@@ -554,7 +565,7 @@ func (p *Status) Inactive() []peer.ID {
defer p.store.RUnlock()
peers := make([]peer.ID, 0)
for pid, peerData := range p.store.Peers() {
if peerData.ConnState == PeerDisconnecting || peerData.ConnState == PeerDisconnected {
if peerData.ConnState == Disconnecting || peerData.ConnState == Disconnected {
peers = append(peers, pid)
}
}
@@ -592,7 +603,7 @@ func (p *Status) Prune() {
return
}
notBadPeer := func(pid peer.ID) bool {
return !p.isBad(pid)
return p.isBad(pid) == nil
}
notTrustedPeer := func(pid peer.ID) bool {
return !p.isTrustedPeers(pid)
@@ -605,7 +616,7 @@ func (p *Status) Prune() {
// Select disconnected peers with a smaller bad response count.
for pid, peerData := range p.store.Peers() {
// Should not prune trusted peer or prune the peer dara and unset trusted peer.
if peerData.ConnState == PeerDisconnected && notBadPeer(pid) && notTrustedPeer(pid) {
if peerData.ConnState == Disconnected && notBadPeer(pid) && notTrustedPeer(pid) {
peersToPrune = append(peersToPrune, &peerResp{
pid: pid,
score: p.Scorers().ScoreNoLock(pid),
@@ -657,7 +668,7 @@ func (p *Status) deprecatedPrune() {
// Select disconnected peers with a smaller bad response count.
for pid, peerData := range p.store.Peers() {
// Should not prune trusted peer or prune the peer dara and unset trusted peer.
if peerData.ConnState == PeerDisconnected && notBadPeer(peerData) && notTrustedPeer(pid) {
if peerData.ConnState == Disconnected && notBadPeer(peerData) && notTrustedPeer(pid) {
peersToPrune = append(peersToPrune, &peerResp{
pid: pid,
badResp: peerData.BadResponses,
@@ -814,7 +825,7 @@ func (p *Status) PeersToPrune() []peer.ID {
peersToPrune := make([]*peerResp, 0)
// Select connected and inbound peers to prune.
for pid, peerData := range p.store.Peers() {
if peerData.ConnState == PeerConnected &&
if peerData.ConnState == Connected &&
peerData.Direction == network.DirInbound && !p.store.IsTrustedPeer(pid) {
peersToPrune = append(peersToPrune, &peerResp{
pid: pid,
@@ -880,7 +891,7 @@ func (p *Status) deprecatedPeersToPrune() []peer.ID {
peersToPrune := make([]*peerResp, 0)
// Select connected and inbound peers to prune.
for pid, peerData := range p.store.Peers() {
if peerData.ConnState == PeerConnected &&
if peerData.ConnState == Connected &&
peerData.Direction == network.DirInbound && !p.store.IsTrustedPeer(pid) {
peersToPrune = append(peersToPrune, &peerResp{
pid: pid,
@@ -982,24 +993,28 @@ func (p *Status) isTrustedPeers(pid peer.ID) bool {
// this method assumes the store lock is acquired before
// executing the method.
func (p *Status) isfromBadIP(pid peer.ID) bool {
func (p *Status) isfromBadIP(pid peer.ID) error {
peerData, ok := p.store.PeerData(pid)
if !ok {
return false
return nil
}
if peerData.Address == nil {
return false
return nil
}
ip, err := manet.ToIP(peerData.Address)
if err != nil {
return true
return errors.Wrap(err, "to ip")
}
if val, ok := p.ipTracker[ip.String()]; ok {
if val > CollocationLimit {
return true
return errors.Errorf("collocation limit exceeded: got %d - limit %d", val, CollocationLimit)
}
}
return false
return nil
}
func (p *Status) addIpToTracker(pid peer.ID) {

View File

@@ -215,7 +215,7 @@ func TestPeerSubscribedToSubnet(t *testing.T) {
// Add some peers with different states
numPeers := 2
for i := 0; i < numPeers; i++ {
addPeer(t, p, peers.PeerConnected)
addPeer(t, p, peers.Connected)
}
expectedPeer := p.All()[1]
bitV := bitfield.NewBitvector64()
@@ -230,7 +230,7 @@ func TestPeerSubscribedToSubnet(t *testing.T) {
}))
numPeers = 3
for i := 0; i < numPeers; i++ {
addPeer(t, p, peers.PeerDisconnected)
addPeer(t, p, peers.Disconnected)
}
ps := p.SubscribedToSubnet(2)
assert.Equal(t, 1, len(ps), "Unexpected num of peers")
@@ -259,7 +259,7 @@ func TestPeerImplicitAdd(t *testing.T) {
id, err := peer.Decode("16Uiu2HAkyWZ4Ni1TpvDS8dPxsozmHY85KaiFjodQuV6Tz5tkHVeR")
require.NoError(t, err)
connectionState := peers.PeerConnecting
connectionState := peers.Connecting
p.SetConnectionState(id, connectionState)
resConnectionState, err := p.ConnectionState(id)
@@ -347,7 +347,7 @@ func TestPeerBadResponses(t *testing.T) {
require.NoError(t, err)
}
assert.Equal(t, false, p.IsBad(id), "Peer marked as bad when should be good")
assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
address, err := ma.NewMultiaddr("/ip4/213.202.254.180/tcp/13000")
require.NoError(t, err, "Failed to create address")
@@ -358,25 +358,25 @@ func TestPeerBadResponses(t *testing.T) {
resBadResponses, err := scorer.Count(id)
require.NoError(t, err)
assert.Equal(t, 0, resBadResponses, "Unexpected bad responses")
assert.Equal(t, false, p.IsBad(id), "Peer marked as bad when should be good")
assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
scorer.Increment(id)
resBadResponses, err = scorer.Count(id)
require.NoError(t, err)
assert.Equal(t, 1, resBadResponses, "Unexpected bad responses")
assert.Equal(t, false, p.IsBad(id), "Peer marked as bad when should be good")
assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
scorer.Increment(id)
resBadResponses, err = scorer.Count(id)
require.NoError(t, err)
assert.Equal(t, 2, resBadResponses, "Unexpected bad responses")
assert.Equal(t, true, p.IsBad(id), "Peer not marked as bad when it should be")
assert.NotNil(t, p.IsBad(id), "Peer not marked as bad when it should be")
scorer.Increment(id)
resBadResponses, err = scorer.Count(id)
require.NoError(t, err)
assert.Equal(t, 3, resBadResponses, "Unexpected bad responses")
assert.Equal(t, true, p.IsBad(id), "Peer not marked as bad when it should be")
assert.NotNil(t, p.IsBad(id), "Peer not marked as bad when it should be")
}
func TestAddMetaData(t *testing.T) {
@@ -393,7 +393,7 @@ func TestAddMetaData(t *testing.T) {
// Add some peers with different states
numPeers := 5
for i := 0; i < numPeers; i++ {
addPeer(t, p, peers.PeerConnected)
addPeer(t, p, peers.Connected)
}
newPeer := p.All()[2]
@@ -422,19 +422,19 @@ func TestPeerConnectionStatuses(t *testing.T) {
// Add some peers with different states
numPeersDisconnected := 11
for i := 0; i < numPeersDisconnected; i++ {
addPeer(t, p, peers.PeerDisconnected)
addPeer(t, p, peers.Disconnected)
}
numPeersConnecting := 7
for i := 0; i < numPeersConnecting; i++ {
addPeer(t, p, peers.PeerConnecting)
addPeer(t, p, peers.Connecting)
}
numPeersConnected := 43
for i := 0; i < numPeersConnected; i++ {
addPeer(t, p, peers.PeerConnected)
addPeer(t, p, peers.Connected)
}
numPeersDisconnecting := 4
for i := 0; i < numPeersDisconnecting; i++ {
addPeer(t, p, peers.PeerDisconnecting)
addPeer(t, p, peers.Disconnecting)
}
// Now confirm the states
@@ -463,7 +463,7 @@ func TestPeerValidTime(t *testing.T) {
numPeersConnected := 6
for i := 0; i < numPeersConnected; i++ {
addPeer(t, p, peers.PeerConnected)
addPeer(t, p, peers.Connected)
}
allPeers := p.All()
@@ -510,10 +510,10 @@ func TestPrune(t *testing.T) {
for i := 0; i < p.MaxPeerLimit()+100; i++ {
if i%7 == 0 {
// Peer added as disconnected.
_ = addPeer(t, p, peers.PeerDisconnected)
_ = addPeer(t, p, peers.Disconnected)
}
// Peer added to peer handler.
_ = addPeer(t, p, peers.PeerConnected)
_ = addPeer(t, p, peers.Connected)
}
disPeers := p.Disconnected()
@@ -571,23 +571,23 @@ func TestPeerIPTracker(t *testing.T) {
if err != nil {
t.Fatal(err)
}
badPeers = append(badPeers, createPeer(t, p, addr, network.DirUnknown, peerdata.PeerConnectionState(ethpb.ConnectionState_DISCONNECTED)))
badPeers = append(badPeers, createPeer(t, p, addr, network.DirUnknown, peerdata.ConnectionState(ethpb.ConnectionState_DISCONNECTED)))
}
for _, pr := range badPeers {
assert.Equal(t, true, p.IsBad(pr), "peer with bad ip is not bad")
assert.NotNil(t, p.IsBad(pr), "peer with bad ip is not bad")
}
// Add in bad peers, so that our records are trimmed out
// from the peer store.
for i := 0; i < p.MaxPeerLimit()+100; i++ {
// Peer added to peer handler.
pid := addPeer(t, p, peers.PeerDisconnected)
pid := addPeer(t, p, peers.Disconnected)
p.Scorers().BadResponsesScorer().Increment(pid)
}
p.Prune()
for _, pr := range badPeers {
assert.Equal(t, false, p.IsBad(pr), "peer with good ip is regarded as bad")
assert.NoError(t, p.IsBad(pr), "peer with good ip is regarded as bad")
}
}
@@ -601,8 +601,11 @@ func TestTrimmedOrderedPeers(t *testing.T) {
},
})
expectedTarget := primitives.Epoch(2)
maxPeers := 3
const (
expectedTarget = primitives.Epoch(2)
maxPeers = 3
)
var mockroot2 [32]byte
var mockroot3 [32]byte
var mockroot4 [32]byte
@@ -611,36 +614,41 @@ func TestTrimmedOrderedPeers(t *testing.T) {
copy(mockroot3[:], "three")
copy(mockroot4[:], "four")
copy(mockroot5[:], "five")
// Peer 1
pid1 := addPeer(t, p, peers.PeerConnected)
pid1 := addPeer(t, p, peers.Connected)
p.SetChainState(pid1, &pb.Status{
HeadSlot: 3 * params.BeaconConfig().SlotsPerEpoch,
FinalizedEpoch: 3,
FinalizedRoot: mockroot3[:],
})
// Peer 2
pid2 := addPeer(t, p, peers.PeerConnected)
pid2 := addPeer(t, p, peers.Connected)
p.SetChainState(pid2, &pb.Status{
HeadSlot: 4 * params.BeaconConfig().SlotsPerEpoch,
FinalizedEpoch: 4,
FinalizedRoot: mockroot4[:],
})
// Peer 3
pid3 := addPeer(t, p, peers.PeerConnected)
pid3 := addPeer(t, p, peers.Connected)
p.SetChainState(pid3, &pb.Status{
HeadSlot: 5 * params.BeaconConfig().SlotsPerEpoch,
FinalizedEpoch: 5,
FinalizedRoot: mockroot5[:],
})
// Peer 4
pid4 := addPeer(t, p, peers.PeerConnected)
pid4 := addPeer(t, p, peers.Connected)
p.SetChainState(pid4, &pb.Status{
HeadSlot: 2 * params.BeaconConfig().SlotsPerEpoch,
FinalizedEpoch: 2,
FinalizedRoot: mockroot2[:],
})
// Peer 5
pid5 := addPeer(t, p, peers.PeerConnected)
pid5 := addPeer(t, p, peers.Connected)
p.SetChainState(pid5, &pb.Status{
HeadSlot: 2 * params.BeaconConfig().SlotsPerEpoch,
FinalizedEpoch: 2,
@@ -680,12 +688,12 @@ func TestAtInboundPeerLimit(t *testing.T) {
})
for i := 0; i < 15; i++ {
// Peer added to peer handler.
createPeer(t, p, nil, network.DirOutbound, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED))
createPeer(t, p, nil, network.DirOutbound, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED))
}
assert.Equal(t, false, p.IsAboveInboundLimit(), "Inbound limit exceeded")
for i := 0; i < 31; i++ {
// Peer added to peer handler.
createPeer(t, p, nil, network.DirInbound, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED))
createPeer(t, p, nil, network.DirInbound, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED))
}
assert.Equal(t, true, p.IsAboveInboundLimit(), "Inbound limit not exceeded")
}
@@ -705,7 +713,7 @@ func TestPrunePeers(t *testing.T) {
})
for i := 0; i < 15; i++ {
// Peer added to peer handler.
createPeer(t, p, nil, network.DirOutbound, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED))
createPeer(t, p, nil, network.DirOutbound, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED))
}
// Assert there are no prunable peers.
peersToPrune := p.PeersToPrune()
@@ -713,7 +721,7 @@ func TestPrunePeers(t *testing.T) {
for i := 0; i < 18; i++ {
// Peer added to peer handler.
createPeer(t, p, nil, network.DirInbound, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED))
createPeer(t, p, nil, network.DirInbound, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED))
}
// Assert there are the correct prunable peers.
@@ -723,7 +731,7 @@ func TestPrunePeers(t *testing.T) {
// Add in more peers.
for i := 0; i < 13; i++ {
// Peer added to peer handler.
createPeer(t, p, nil, network.DirInbound, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED))
createPeer(t, p, nil, network.DirInbound, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED))
}
// Set up bad scores for inbound peers.
@@ -767,7 +775,7 @@ func TestPrunePeers_TrustedPeers(t *testing.T) {
for i := 0; i < 15; i++ {
// Peer added to peer handler.
createPeer(t, p, nil, network.DirOutbound, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED))
createPeer(t, p, nil, network.DirOutbound, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED))
}
// Assert there are no prunable peers.
peersToPrune := p.PeersToPrune()
@@ -775,7 +783,7 @@ func TestPrunePeers_TrustedPeers(t *testing.T) {
for i := 0; i < 18; i++ {
// Peer added to peer handler.
createPeer(t, p, nil, network.DirInbound, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED))
createPeer(t, p, nil, network.DirInbound, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED))
}
// Assert there are the correct prunable peers.
@@ -785,7 +793,7 @@ func TestPrunePeers_TrustedPeers(t *testing.T) {
// Add in more peers.
for i := 0; i < 13; i++ {
// Peer added to peer handler.
createPeer(t, p, nil, network.DirInbound, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED))
createPeer(t, p, nil, network.DirInbound, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED))
}
var trustedPeers []peer.ID
@@ -821,7 +829,7 @@ func TestPrunePeers_TrustedPeers(t *testing.T) {
// Add more peers to check if trusted peers can be pruned after they are deleted from trusted peer set.
for i := 0; i < 9; i++ {
// Peer added to peer handler.
createPeer(t, p, nil, network.DirInbound, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED))
createPeer(t, p, nil, network.DirInbound, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED))
}
// Delete trusted peers.
@@ -865,14 +873,14 @@ func TestStatus_BestPeer(t *testing.T) {
headSlot primitives.Slot
finalizedEpoch primitives.Epoch
}
tests := []struct {
name string
peers []*peerConfig
limitPeers int
ourFinalizedEpoch primitives.Epoch
targetEpoch primitives.Epoch
// targetEpochSupport denotes how many peers support returned epoch.
targetEpochSupport int
name string
peers []*peerConfig
limitPeers int
ourFinalizedEpoch primitives.Epoch
targetEpoch primitives.Epoch
targetEpochSupport int // Denotes how many peers support returned epoch.
}{
{
name: "head slot matches finalized epoch",
@@ -885,6 +893,7 @@ func TestStatus_BestPeer(t *testing.T) {
{finalizedEpoch: 3, headSlot: 3 * params.BeaconConfig().SlotsPerEpoch},
},
limitPeers: 15,
ourFinalizedEpoch: 0,
targetEpoch: 4,
targetEpochSupport: 4,
},
@@ -902,6 +911,7 @@ func TestStatus_BestPeer(t *testing.T) {
{finalizedEpoch: 3, headSlot: 4 * params.BeaconConfig().SlotsPerEpoch},
},
limitPeers: 15,
ourFinalizedEpoch: 0,
targetEpoch: 4,
targetEpochSupport: 4,
},
@@ -916,6 +926,7 @@ func TestStatus_BestPeer(t *testing.T) {
{finalizedEpoch: 3, headSlot: 42 * params.BeaconConfig().SlotsPerEpoch},
},
limitPeers: 15,
ourFinalizedEpoch: 0,
targetEpoch: 4,
targetEpochSupport: 4,
},
@@ -930,8 +941,8 @@ func TestStatus_BestPeer(t *testing.T) {
{finalizedEpoch: 3, headSlot: 46 * params.BeaconConfig().SlotsPerEpoch},
{finalizedEpoch: 6, headSlot: 6 * params.BeaconConfig().SlotsPerEpoch},
},
ourFinalizedEpoch: 5,
limitPeers: 15,
ourFinalizedEpoch: 5,
targetEpoch: 6,
targetEpochSupport: 1,
},
@@ -950,8 +961,8 @@ func TestStatus_BestPeer(t *testing.T) {
{finalizedEpoch: 7, headSlot: 7 * params.BeaconConfig().SlotsPerEpoch},
{finalizedEpoch: 8, headSlot: 8 * params.BeaconConfig().SlotsPerEpoch},
},
ourFinalizedEpoch: 5,
limitPeers: 15,
ourFinalizedEpoch: 5,
targetEpoch: 6,
targetEpochSupport: 5,
},
@@ -970,8 +981,8 @@ func TestStatus_BestPeer(t *testing.T) {
{finalizedEpoch: 7, headSlot: 7 * params.BeaconConfig().SlotsPerEpoch},
{finalizedEpoch: 8, headSlot: 8 * params.BeaconConfig().SlotsPerEpoch},
},
ourFinalizedEpoch: 5,
limitPeers: 4,
ourFinalizedEpoch: 5,
targetEpoch: 6,
targetEpochSupport: 4,
},
@@ -986,8 +997,8 @@ func TestStatus_BestPeer(t *testing.T) {
{finalizedEpoch: 8, headSlot: 8 * params.BeaconConfig().SlotsPerEpoch},
{finalizedEpoch: 8, headSlot: 8 * params.BeaconConfig().SlotsPerEpoch},
},
ourFinalizedEpoch: 5,
limitPeers: 15,
ourFinalizedEpoch: 5,
targetEpoch: 8,
targetEpochSupport: 3,
},
@@ -1002,7 +1013,7 @@ func TestStatus_BestPeer(t *testing.T) {
},
})
for _, peerConfig := range tt.peers {
p.SetChainState(addPeer(t, p, peers.PeerConnected), &pb.Status{
p.SetChainState(addPeer(t, p, peers.Connected), &pb.Status{
FinalizedEpoch: peerConfig.finalizedEpoch,
HeadSlot: peerConfig.headSlot,
})
@@ -1028,7 +1039,7 @@ func TestBestFinalized_returnsMaxValue(t *testing.T) {
for i := 0; i <= maxPeers+100; i++ {
p.Add(new(enr.Record), peer.ID(rune(i)), nil, network.DirOutbound)
p.SetConnectionState(peer.ID(rune(i)), peers.PeerConnected)
p.SetConnectionState(peer.ID(rune(i)), peers.Connected)
p.SetChainState(peer.ID(rune(i)), &pb.Status{
FinalizedEpoch: 10,
})
@@ -1051,7 +1062,7 @@ func TestStatus_BestNonFinalized(t *testing.T) {
peerSlots := []primitives.Slot{32, 32, 32, 32, 235, 233, 258, 268, 270}
for i, headSlot := range peerSlots {
p.Add(new(enr.Record), peer.ID(rune(i)), nil, network.DirOutbound)
p.SetConnectionState(peer.ID(rune(i)), peers.PeerConnected)
p.SetConnectionState(peer.ID(rune(i)), peers.Connected)
p.SetChainState(peer.ID(rune(i)), &pb.Status{
HeadSlot: headSlot,
})
@@ -1074,17 +1085,17 @@ func TestStatus_CurrentEpoch(t *testing.T) {
},
})
// Peer 1
pid1 := addPeer(t, p, peers.PeerConnected)
pid1 := addPeer(t, p, peers.Connected)
p.SetChainState(pid1, &pb.Status{
HeadSlot: params.BeaconConfig().SlotsPerEpoch * 4,
})
// Peer 2
pid2 := addPeer(t, p, peers.PeerConnected)
pid2 := addPeer(t, p, peers.Connected)
p.SetChainState(pid2, &pb.Status{
HeadSlot: params.BeaconConfig().SlotsPerEpoch * 5,
})
// Peer 3
pid3 := addPeer(t, p, peers.PeerConnected)
pid3 := addPeer(t, p, peers.Connected)
p.SetChainState(pid3, &pb.Status{
HeadSlot: params.BeaconConfig().SlotsPerEpoch * 4,
})
@@ -1103,8 +1114,8 @@ func TestInbound(t *testing.T) {
})
addr, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/33333")
require.NoError(t, err)
inbound := createPeer(t, p, addr, network.DirInbound, peers.PeerConnected)
createPeer(t, p, addr, network.DirOutbound, peers.PeerConnected)
inbound := createPeer(t, p, addr, network.DirInbound, peers.Connected)
createPeer(t, p, addr, network.DirOutbound, peers.Connected)
result := p.Inbound()
require.Equal(t, 1, len(result))
@@ -1123,8 +1134,8 @@ func TestInboundConnected(t *testing.T) {
addr, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/33333")
require.NoError(t, err)
inbound := createPeer(t, p, addr, network.DirInbound, peers.PeerConnected)
createPeer(t, p, addr, network.DirInbound, peers.PeerConnecting)
inbound := createPeer(t, p, addr, network.DirInbound, peers.Connected)
createPeer(t, p, addr, network.DirInbound, peers.Connecting)
result := p.InboundConnected()
require.Equal(t, 1, len(result))
@@ -1157,7 +1168,7 @@ func TestInboundConnectedWithProtocol(t *testing.T) {
multiaddr, err := ma.NewMultiaddr(addr)
require.NoError(t, err)
peer := createPeer(t, p, multiaddr, network.DirInbound, peers.PeerConnected)
peer := createPeer(t, p, multiaddr, network.DirInbound, peers.Connected)
expectedTCP[peer.String()] = true
}
@@ -1166,7 +1177,7 @@ func TestInboundConnectedWithProtocol(t *testing.T) {
multiaddr, err := ma.NewMultiaddr(addr)
require.NoError(t, err)
peer := createPeer(t, p, multiaddr, network.DirInbound, peers.PeerConnected)
peer := createPeer(t, p, multiaddr, network.DirInbound, peers.Connected)
expectedQUIC[peer.String()] = true
}
@@ -1203,8 +1214,8 @@ func TestOutbound(t *testing.T) {
})
addr, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/33333")
require.NoError(t, err)
createPeer(t, p, addr, network.DirInbound, peers.PeerConnected)
outbound := createPeer(t, p, addr, network.DirOutbound, peers.PeerConnected)
createPeer(t, p, addr, network.DirInbound, peers.Connected)
outbound := createPeer(t, p, addr, network.DirOutbound, peers.Connected)
result := p.Outbound()
require.Equal(t, 1, len(result))
@@ -1223,8 +1234,8 @@ func TestOutboundConnected(t *testing.T) {
addr, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/33333")
require.NoError(t, err)
inbound := createPeer(t, p, addr, network.DirOutbound, peers.PeerConnected)
createPeer(t, p, addr, network.DirOutbound, peers.PeerConnecting)
inbound := createPeer(t, p, addr, network.DirOutbound, peers.Connected)
createPeer(t, p, addr, network.DirOutbound, peers.Connecting)
result := p.OutboundConnected()
require.Equal(t, 1, len(result))
@@ -1257,7 +1268,7 @@ func TestOutboundConnectedWithProtocol(t *testing.T) {
multiaddr, err := ma.NewMultiaddr(addr)
require.NoError(t, err)
peer := createPeer(t, p, multiaddr, network.DirOutbound, peers.PeerConnected)
peer := createPeer(t, p, multiaddr, network.DirOutbound, peers.Connected)
expectedTCP[peer.String()] = true
}
@@ -1266,7 +1277,7 @@ func TestOutboundConnectedWithProtocol(t *testing.T) {
multiaddr, err := ma.NewMultiaddr(addr)
require.NoError(t, err)
peer := createPeer(t, p, multiaddr, network.DirOutbound, peers.PeerConnected)
peer := createPeer(t, p, multiaddr, network.DirOutbound, peers.Connected)
expectedQUIC[peer.String()] = true
}
@@ -1293,7 +1304,7 @@ func TestOutboundConnectedWithProtocol(t *testing.T) {
}
// addPeer is a helper to add a peer with a given connection state)
func addPeer(t *testing.T, p *peers.Status, state peerdata.PeerConnectionState) peer.ID {
func addPeer(t *testing.T, p *peers.Status, state peerdata.ConnectionState) peer.ID {
// Set up some peers with different states
mhBytes := []byte{0x11, 0x04}
idBytes := make([]byte, 4)
@@ -1312,7 +1323,7 @@ func addPeer(t *testing.T, p *peers.Status, state peerdata.PeerConnectionState)
}
func createPeer(t *testing.T, p *peers.Status, addr ma.Multiaddr,
dir network.Direction, state peerdata.PeerConnectionState) peer.ID {
dir network.Direction, state peerdata.ConnectionState) peer.ID {
mhBytes := []byte{0x11, 0x04}
idBytes := make([]byte, 4)
_, err := rand.Read(idBytes)

View File

@@ -165,14 +165,14 @@ func (s *Service) pubsubOptions() []pubsub.Option {
func parsePeersEnr(peers []string) ([]peer.AddrInfo, error) {
addrs, err := PeersFromStringAddrs(peers)
if err != nil {
return nil, fmt.Errorf("Cannot convert peers raw ENRs into multiaddresses: %w", err)
return nil, fmt.Errorf("cannot convert peers raw ENRs into multiaddresses: %w", err)
}
if len(addrs) == 0 {
return nil, fmt.Errorf("Converting peers raw ENRs into multiaddresses resulted in an empty list")
return nil, fmt.Errorf("converting peers raw ENRs into multiaddresses resulted in an empty list")
}
directAddrInfos, err := peer.AddrInfosFromP2pAddrs(addrs...)
if err != nil {
return nil, fmt.Errorf("Cannot convert peers multiaddresses into AddrInfos: %w", err)
return nil, fmt.Errorf("cannot convert peers multiaddresses into AddrInfos: %w", err)
}
return directAddrInfos, nil
}

View File

@@ -202,12 +202,13 @@ func (s *Service) Start() {
s.startupErr = err
return
}
err = s.connectToBootnodes()
if err != nil {
log.WithError(err).Error("Could not add bootnode to the exclusion list")
if err := s.connectToBootnodes(); err != nil {
log.WithError(err).Error("Could not connect to boot nodes")
s.startupErr = err
return
}
s.dv5Listener = listener
go s.listenForNewNodes()
}
@@ -384,12 +385,17 @@ func (s *Service) AddPingMethod(reqFunc func(ctx context.Context, id peer.ID) er
s.pingMethodLock.Unlock()
}
func (s *Service) pingPeers() {
func (s *Service) pingPeersAndLogEnr() {
s.pingMethodLock.RLock()
defer s.pingMethodLock.RUnlock()
localENR := s.dv5Listener.Self()
log.WithField("ENR", localENR).Info("New node record")
if s.pingMethod == nil {
return
}
for _, pid := range s.peers.Connected() {
go func(id peer.ID) {
if err := s.pingMethod(s.ctx, id); err != nil {
@@ -462,8 +468,8 @@ func (s *Service) connectWithPeer(ctx context.Context, info peer.AddrInfo) error
if info.ID == s.host.ID() {
return nil
}
if s.Peers().IsBad(info.ID) {
return errors.New("refused to connect to bad peer")
if err := s.Peers().IsBad(info.ID); err != nil {
return errors.Wrap(err, "refused to connect to bad peer")
}
ctx, cancel := context.WithTimeout(ctx, maxDialTimeout)
defer cancel()

Some files were not shown because too many files have changed in this diff Show More