Compare commits

...

164 Commits

Author SHA1 Message Date
Kasey Kirkham
2b8384ff0b adapt to all the divergent changes between peerDAS and peerdas-sync 2025-08-15 22:16:27 -05:00
Kasey
b0cc1ec0aa DataColumnSidecar backfill 2025-08-15 20:23:50 -05:00
Manu NALEPA
6e02170881 Add rate limiter check. 2025-08-15 10:28:55 +02:00
Manu NALEPA
bf6007c5f0 Fix Potuz's and Preston's comment. 2025-08-15 09:06:36 +02:00
Manu NALEPA
22a6820ede Add extra flags 2025-08-15 01:48:16 +02:00
Manu NALEPA
00ab5a1051 Add DataColumnStorage and SubscribeAllDataSubnets flag. 2025-08-15 00:25:34 +02:00
Manu NALEPA
41cf0ec59e Fix James' comment. 2025-08-14 17:04:03 +02:00
Manu NALEPA
39ca2eaae1 Merge branch 'develop' into peerdas-sync 2025-08-14 14:27:28 +02:00
Manu NALEPA
d5c56355f8 Fix Potuz's comment. 2025-08-14 14:03:08 +02:00
Manu NALEPA
5a3e450067 Fix Potuz's comment. 2025-08-14 14:01:20 +02:00
Justin Traglia
4ed90a02ef Rename various variables/functions to be more clear (#15529)
* Rename various variables/functions to be more clear

* Add changelog fragment

---------

Co-authored-by: Bastin <43618253+Inspector-Butters@users.noreply.github.com>
2025-08-14 11:06:22 +00:00
Manu NALEPA
e50472ab25 Fix typo. 2025-08-14 09:34:31 +02:00
Manu NALEPA
53d69d407b selectPeers: Avoid map with key but empty value. 2025-08-14 09:28:11 +02:00
trinadh61
7d528c75bb adding user agent validator beacon client (#15574)
* adding user agent validator beacon client

* Update runtime/version/version.go

Co-authored-by: Bastin <43618253+Inspector-Butters@users.noreply.github.com>

* test cases

* contribution readme

* setting user agent to build version data

---------

Co-authored-by: Bastin <43618253+Inspector-Butters@users.noreply.github.com>
2025-08-13 21:36:17 +00:00
Manu NALEPA
58687620f6 Fix James' comment. 2025-08-13 22:30:33 +02:00
Preston Van Loon
e7b2953d5a Address out of bounds concern in beacon-chain/core/peerdas/das_core.go (#15586) 2025-08-13 15:07:26 +00:00
Muzry
acf35e849e Update endpoint to return 404 after isOptimistic check (#15559)
* Update endpoint to return 404 after isOptimistic check

* Fix error handling by using predefined errors

* fix: helpers build.bazel

* remove the StateIdDecodeError

---------

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2025-08-13 14:40:20 +00:00
Manu NALEPA
d36875332d Fix Preston's comment. 2025-08-13 16:00:13 +02:00
Manu NALEPA
0a9ff2dc8b Fix James' comment. 2025-08-13 15:04:51 +02:00
Manu NALEPA
cc8a5fc422 Revert "Fix James' comment."
This reverts commit a3f919205a.
2025-08-13 15:03:15 +02:00
Manu NALEPA
9dcb8be2df Fix Potuz's comment. 2025-08-13 15:01:31 +02:00
Manu NALEPA
51e465d690 Revert "Fix Potuz's comment."
This reverts commit c45230b455.
2025-08-13 14:15:27 +02:00
Manu NALEPA
ddbf9cb404 Implement TestFetchDataColumnSidecars. 2025-08-13 12:00:50 +02:00
Manu NALEPA
9ed1496c8f Fix James's comment. 2025-08-13 09:14:25 +02:00
Manu NALEPA
5b04cab118 Fix Preston's comment. 2025-08-13 01:11:22 +02:00
Manu NALEPA
0b681f6861 Update cmd/beacon-chain/flags/config.go
Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
2025-08-13 01:00:19 +02:00
Manu NALEPA
88b363e3cf Merge branch 'develop' into peerdas-sync 2025-08-13 00:45:31 +02:00
Preston Van Loon
c826d334a1 Add missing Fulu block type in stream handler (grpc StreamBlocksAltair) (#15583) 2025-08-12 22:26:26 +00:00
Manu NALEPA
f521948b4d Fix flakiness in TestSelectPeers. 2025-08-12 23:14:56 +02:00
Manu NALEPA
a3f919205a Fix James' comment. 2025-08-12 22:44:26 +02:00
Manu NALEPA
474f458834 Fix James' comment. 2025-08-12 22:35:48 +02:00
Manu NALEPA
bd17dfefb9 Fix James' commit. 2025-08-12 22:17:23 +02:00
Manu NALEPA
3f361a79a0 Fix James's comment. 2025-08-12 22:10:33 +02:00
Manu NALEPA
09b7fa6bc9 Fix James' comment. 2025-08-12 22:08:33 +02:00
Manu NALEPA
76d974694c Implement TestCategorizeIndices. 2025-08-12 15:47:29 +02:00
Manu NALEPA
dca7f282e6 Implement TestSelectPeers. 2025-08-12 15:11:33 +02:00
Manu NALEPA
327309b5f7 Implement TestUpdateResults. 2025-08-12 13:45:45 +02:00
Manu NALEPA
8c44999d21 Implement TestFetchDataColumnSidecarsFromPeers. 2025-08-12 12:23:20 +02:00
Manu NALEPA
bb2fd617a5 Implement TestSendDataColumnSidecarsRequest. 2025-08-12 11:59:30 +02:00
Manu NALEPA
c31c1e2674 Fix Satyajit's comment. 2025-08-12 10:09:39 +02:00
Preston Van Loon
eace128ee9 Fix panic in blob cache when scs array is empty or shorter than commitments (#15581)
* Fix panic in beacon-chain/das/blob_cache.go

* Regression test for empty/short scs array panic

* Changelog fragment
2025-08-12 03:26:22 +00:00
terence
9161b80a32 Implement fulu mev-boost change for blinded block submission (#15486)
* Builder: handle fulu submit blind block return

* Builder: update post-Fulu client to handle 202 Accepted response

* Builder: consolidate HTTP request methods and improve status code validation
2025-08-12 00:49:52 +00:00
terence
009a1507a1 Move aggregated attestation cache key generation outside locks (#15579) 2025-08-11 21:55:44 +00:00
kasey
fa71a6e117 Fusaka ENR changes (#15501)
* fusaka fork digest enr changes

* Add tests for fulu NFD key

* add testcase

* Update log field to spell out 'nfd' as 'NextForkDigest'

---------

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
Co-authored-by: Preston Van Loon <preston@pvl.dev>
Co-authored-by: Potuz <potuz@prysmaticlabs.com>
2025-08-11 21:40:39 +00:00
kasey
3da40ecd9c Refactor fork schedules (#15490)
* overhaul fork schedule management for bpos

* Unify log

* Radek's comments

* Use arg config to determine previous epoch, with regression test

* Remove unnecessary NewClock. @potuz feedback

* Continuation of previous commit: Remove unnecessary NewClock. @potuz feedback

* Remove VerifyBlockHeaderSignatureUsingCurrentFork

* cosmetic changes

* Remove unnecessary copy. entryWithForkDigest passes by value, not by pointer so it shold be fine

* Reuse ErrInvalidTopic from p2p package

* Unskip TestServer_GetBeaconConfig

* Resolve TODO about forkwatcher in local mode

* remove Copy()

---------

Co-authored-by: Kasey <kasey@users.noreply.github.com>
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
Co-authored-by: rkapka <radoslaw.kapka@gmail.com>
Co-authored-by: Preston Van Loon <preston@pvl.dev>
2025-08-11 16:08:53 +00:00
terence
f7f992c256 gofmt: fix formatting issues in test files (#15577) 2025-08-11 15:56:53 +00:00
Manu NALEPA
978ffa4780 Add tests for sendDataColumnSidecarsRequest. 2025-08-11 00:35:40 +02:00
Manu NALEPA
407bf6785f Fix Potuz's comment. 2025-08-10 23:07:56 +02:00
Manu NALEPA
c45230b455 Fix Potuz's comment. 2025-08-10 22:47:00 +02:00
Manu NALEPA
0af6591001 Fix Potuz's comment. 2025-08-10 22:41:00 +02:00
Manu NALEPA
1af249da31 Fix Potuz's comment. 2025-08-10 22:02:22 +02:00
Manu NALEPA
901f6b6e6c Fix Potuz's comment. 2025-08-10 21:59:17 +02:00
Manu NALEPA
a3cdda56d9 Fix Potuz's comment. 2025-08-10 21:53:54 +02:00
Manu NALEPA
cf3200fa06 Fix Potuz's comment. 2025-08-10 21:47:55 +02:00
Manu NALEPA
04cafa1959 Partially fix Potuz's comment. 2025-08-10 21:39:27 +02:00
Manu NALEPA
498b945a61 Fix Satyajit's comment. 2025-08-10 21:35:39 +02:00
terence
09565e0c3a Refactor attestation cache key generation outside critical section (#15572)
* Refactor attestation cache key generation outside critical section

* Improve attestation cache key error handling and logging
2025-08-10 17:41:30 +00:00
Jun Song
05a3736310 refactor: removing redundant codes in htrutils.go (#15453)
* refactor: use auto-generated HashTreeRoot functions in htrutil.go

* refactor: use type alias for Transaction & use SliceRoot for TransactionsRoot

* changelog

* fix: TransactionsRoot receives raw 2d bytes as an argument

* fix: handle nil argument

* test: add nil test for fork and checkpoint

---------

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2025-08-10 10:44:01 +00:00
kasey
84c8653a52 initialize genesis data asap at node start (#15470)
* initialize genesis data asap at node start

* add genesis validation tests with embedded state verification

* Add test for hardcoded mainnet genesis validator root and time from init() function

* Add test for UnmarshalState in encoding/ssz/detect/configfork.go

* Add tests for genesis.Initialize

* Move genesis/embedded to genesis/internal/embedded

* Gazelle / BUILD fix

* James feedback

* Fix lint

* Revert lock

---------

Co-authored-by: Kasey <kasey@users.noreply.github.com>
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
Co-authored-by: Preston Van Loon <preston@pvl.dev>
2025-08-10 02:09:40 +00:00
Tomás Andróil
921ff23c6b refactor: use central flag name for validator HTTP server port (#15236)
* Update validator.go

* Create tomasandroil_replace_grpc_gateway_flag_name.md
2025-08-09 10:35:04 +00:00
Manu NALEPA
90317ba5b5 Fix Potuz's comment. 2025-08-09 01:24:00 +02:00
Radosław Kapka
87235fb384 Don't submit duplicate aggregated SignedContributionAndProof messages (#15571) 2025-08-08 22:57:36 +00:00
Manu NALEPA
b35358f440 Fix Potuz's comment. 2025-08-09 00:47:55 +02:00
Manu NALEPA
b926066495 Fix Potuz's comment. 2025-08-08 20:59:28 +02:00
Manu NALEPA
263ddf9a7b PeerDAS: Implement sync 2025-08-08 20:05:12 +02:00
james-prysm
2ec5914b4a fixing builder version check (#15568)
* adding fix

* fixing test
2025-08-08 01:42:55 +00:00
Potuz
fe000e5629 Fix validateConsensus (#15548)
* Fix validateConsensus

Reported by NuConstruct

The stater package looks for a stateroot using the head state from the
blockchain package. However, this state is very unlikely to have the
poststateroot since that's only added after slot processing. I assume
that essentially any REST endpoint that uses this mechanism to get head
is broken if it needs to gather a state by stateroot.

This PR is a placeholder to verify this is the issue, here I just check
if the NSC already has the post-state since that will have already the
processing state cached.

* Add changelog

* add fallback

* Fix tests
2025-08-07 13:08:40 +00:00
Preston Van Loon
447a3d8add Update go to v1.24.6 (#15566) 2025-08-06 20:59:26 +00:00
Jun Song
b00aaef202 Persist metadata sequence number using Beacon DB (#15554)
* Add entry for sequence number in chain-metadata bucket & Basic getter/setter

* Mark p2p-metadata flag as deprecated

* Fix metaDataFromConfig: use DB instead to get seqnum

* Save sequence number after updating the metadata

* Fix beacon-chain/p2p unit tests: add DB in config

* Add changelog

* Add ReadOnlyDatabaseWithSeqNum

* Code suggestion from Manu

* Remove seqnum getter at interface

---------

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2025-08-06 20:18:33 +00:00
Potuz
0f6070a866 Fix race on ReceibeBlock (#15565)
* Fix race on ReceibeBlock

In the event two routines for `ReceiveBlock` are triggered with the same
block (it may happen if one routine is triggered over gossip and the
other in init-sync) it may happen that the second routine believes it's
syncing the block for the first time. This is because the cache on
`blocksBeingSynced` is not checked to be set and the block may still not
be put in forkchoice by the first routine.

In the normal case this would not cause any trouble as the second
forkchoice insertion is a noop by design. However, if the second routine
times out or has any error in processing (for example the engine will
return an error if we try to send FCU to an older head) then the second
routine will attempt to remove the inserted block from forkchoice and
this bricks the node since forkchoice refuses to remove a valid node,
the root is removed inconditionally from db and the node ends up with a
root that is not in db and remains in forkchoice.

This PR just prevents the race.

As a followup perhaps we can gate the rollback function from db to nodes
that are effectively not in forkchoice, alternatively, force removal
from forkchoice when rolling back from db (although this version is
complicated due to possible accounting issues on forkchoice).

* Fix lint
2025-08-06 17:18:13 +00:00
Justin Traglia
2a09c9f681 Fix BlobsBundleV2 proofs limit (#15530)
* Assign max_cell_proofs_length the correct value

* Add changelog fragment

* Run update-go-pbs.sh

* Run update-go-ssz.sh

---------

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2025-08-05 21:33:45 +00:00
Jun Song
9c6ccd67c1 Add Fulu case for saveStatesEfficientInternal (#15553)
* Add Fulu case for saveStatesEfficientInternal

* Add changelog

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2025-08-05 19:25:17 +00:00
Radosław Kapka
36e5d4926b Redesign the pending attestation queue (#15024)
* Redesign pending attestation queue

* fix bad signature test

* equality tests

* changelog <3

* rename functions

* change logs

* fix fuzzing

* fixes after rebasing

* build fix

* review

* James' review

* fix imports
2025-08-05 18:35:19 +00:00
terence
17b7d3ff12 Add fork choice check to pending attestations processing (#15547) 2025-08-05 16:08:47 +00:00
terence
fb2bceece8 beacon api: optimize val assignment lookup (#15558) 2025-08-05 15:23:37 +00:00
terence
d012ab653c Beacon api: fix proposer duty computation for fulu (#15534) 2025-08-05 15:17:02 +00:00
Preston Van Loon
46e7555c42 Update cc-debian11 to latest (#15562) 2025-08-04 16:17:13 +00:00
Preston Van Loon
181df3995e Update go to v1.24.5 (#15561) 2025-08-04 15:17:59 +00:00
Manu NALEPA
149e220b61 Validator custody: Update to the latest specification. (#15532)
* Validator custody: Update to the latest specfication.

* Update beacon-chain/blockchain/process_block.go

Co-authored-by: terence <terence@prysmaticlabs.com>

* Fix James' comment.

* Fix James' comment.

* Fix James' comment.

---------

Co-authored-by: terence <terence@prysmaticlabs.com>
Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2025-08-02 06:21:08 +00:00
Bastin
ae4b982a6c Fix finality update bugs & Move broadcast logic to LC Store (#15540)
* fix IsBetterFinalityUpdate and add tests

fix finality update bugs

* Update lightclient.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update beacon-chain/core/light-client/lightclient.go

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2025-08-01 12:35:21 +00:00
Radosław Kapka
f330021785 Do not compare liveness response with LH (#15556)
* Do not compare liveness response with LH

* changelog <3
2025-07-31 14:37:36 +00:00
james-prysm
bd6b4ecd5b wrapping goodbye messages in goroutine to speed up node shutdown (#15542)
* wrapping goodbye messages in goroutine to speed up node shutdown

* fixing requirement
2025-07-31 12:52:31 +00:00
Potuz
d7d8764a91 Trigger payload attribute event on early blocks (#15541)
Currently the payload attribute events is triggered on
`forkchoiceUpodateWithExecution`. However when we import an early block,
we do not call this function, we make two calls to FCU, the first one is
on a locked path at the end of `postBlockProcess` and this call is made
without any payload attributes to avoid updating the shuffling caches.

The second call is made on `handleSecondFCUCall` which calls directly
`notifyForkchoiceUpdate` bypassing the call to
`forkchoiceUpdateWithExecution`, but this call is the one that actually
computes the payload attributes. So the event handler is never called
with the new attributes.

This PR moves the event trigger to the same place where we actually call
FCU with the computed payload attributes.

Some considerations with forkchoice locking logic: since the calls are
always in a go routine, anyway the routine will wait to forkchoice to be
unlocked to proceed.

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2025-07-30 19:34:34 +00:00
Muzry
9b7f91d947 bugfix: submitPoolSyncCommitteeSignatures response inconsistent (#15516)
* fix: submitPoolSyncCommitteeSignatures reponse inconsistent

* update: bazel build file

* update: add changelog fragment file

* update api/server/structs/BUILD.bazel format

* update the unit test

* update: the error format

---------

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2025-07-29 16:08:28 +00:00
terence
57e27199bd Fix builder bid version compatibility to support Electra bids with Fulu blocks (#15536) 2025-07-29 14:16:05 +00:00
Potuz
11ca766ed6 Add timing metric for PublishBlockV2 endpoint (#15539)
This commit adds a Prometheus histogram metric to measure the processing
duration of the PublishBlockV2 beacon API endpoint in milliseconds.

The metric covers the complete request processing time including:
- Request validation and parsing
- Block decoding (SSZ/JSON)
- Broadcast validation checks
- Block proposal through ProposeBeaconBlock
- All synchronous operations and awaited goroutines

Background operations that run in goroutines (block broadcasting, blob
sidecar processing) are included in the timing since the main function
waits for their completion before returning.

Files changed:
- beacon-chain/rpc/eth/beacon/metrics.go: New metric definition
- beacon-chain/rpc/eth/beacon/handlers.go: Timing instrumentation
- beacon-chain/rpc/eth/beacon/BUILD.bazel: Added metrics.go and Prometheus deps
- changelog/potuz_add_publishv2_metric.md: Changelog entry

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-authored-by: Claude <noreply@anthropic.com>
2025-07-28 18:20:57 +00:00
terence
cd6cc76d58 Add BLOB_SCHEDULE to eth/v1/config/spec endpoint (#15485)
* Beacon api: fix get config blob schedule

* Numbers should be string instead of float

* more generalized implementation for nested objects

* removing unused function

* fixing linting

* removing redundant switch fields

* adding additional log for debugging

* Fix build.

* adding skip function based on kasey's recommendation

* fixing test

---------

Co-authored-by: james-prysm <james@prysmaticlabs.com>
Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
Co-authored-by: Manu NALEPA <enalepa@offchainlabs.com>
2025-07-25 19:06:10 +00:00
terence
fc4a1469f0 Include requested state root in StateNotFoundError message for debugging (#15533) 2025-07-25 17:50:26 +00:00
Justin Traglia
f3dc4c283e Improve das-core functions (#15524)
* Improve das-core functions

* Add changelog fragment

---------

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2025-07-25 15:35:05 +00:00
raulk
6ddf271688 fix(beacon-api): return syncnets and cgc in Metadata. (#15506)
* fix(beacon-api): return syncnets and cgc in Metadata.

* changelog

* fixing implementation and adding unit tests

* gaz

---------

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
Co-authored-by: james-prysm <james@prysmaticlabs.com>
2025-07-25 15:26:56 +00:00
Justin Traglia
af7afba26e Move reconstruction lock to prevent unnecessary work (#15528)
* Move reconstruction lock to prevent unnecessary work

* Add changelog fragment
2025-07-24 21:06:53 +00:00
james-prysm
b740a4ff83 implements the proposer lookahead api (#15525)
* implements the proposer lookahead api

* radek's feedback
2025-07-24 15:00:43 +00:00
Radosław Kapka
385c2224e8 Return zero value for Eth-Consensus-Block-Value on error (#15526) 2025-07-24 13:58:23 +00:00
Justin Traglia
04b39d1a4d Fix some nits associated with data column sidecar verification (#15521)
* Fix some nits associated with data column sidecar verification

* Add changelog fragment
2025-07-23 20:02:32 +00:00
james-prysm
4c40caf7fd persistent enode db to persist seq number (#15519)
* adding persistent db path for persistent seq data information

* fixing typo

* Update changelog/James-prysm_persistent-seq-number.md

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* Update beacon-chain/p2p/discovery_test.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* adding log updated based on manu's suggestion

---------

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-07-23 13:55:05 +00:00
Justin Traglia
bc209cadab Use MinEpochsForDataColumnSidecarsRequest in WithinDAPeriod for Fulu (#15522)
* Use MinEpochsForDataColumnSidecarsRequest in WithinDAPeriod for Fulu

* Make fixes

* Add blank line
2025-07-23 13:35:27 +00:00
Justin Traglia
856742ff68 Update links to consensus-specs to point to master branch (#15523)
* Update links to consensus-specs to point to master branch

* Add changelog fragment
2025-07-23 08:32:09 +00:00
Manu NALEPA
abe16a9cb4 Fix downscore by peers when a node gracefully stops. (#15505)
* Log when downscoring a peer.

* `validateSequenceNumber`: Downscore peer in function, clarify and add logs

* `AddConnectionHandler`: Send majority code to the outer scope (no funtional change).

* `disconnectBadPeer`: Improve log.

* `sendRPCStatusRequest`: Improve log.

* `findPeersWithSubnets`: Add preventive peer filtering.
(As done in `s.findPeers`.)

* `Stop`: Use one `defer` for the whole function.
Reminder: `defer`s are executed backwards.

* `Stop`: Send a goodbye message to all connected peers when stopping the service.

Before this commit, stopping the service did not send any goodbye message to all connected peers. The issue with this approach is that the peer still thinks we are alive, and behaves so by trying to communicate with us. Unfortunatly, because we are offline, we cannot respond. Because of that, the peer starts to downscore us, and then bans us. As a consequence, when we restart, the peer refuses our connection request.

By sending a goodbye message when stopping the service, we ensure the peer stops to expect anything from us. When restarting, everything is allright.

* `ConnectedF` and `DisconnectedF`: Workaround very probable libp2p bug by preventing outbound connection to very recently disconnected peers.

* Fix James' comment.

* Fix James' comment.

* Fix James' comment.

* Fix James' comment.

* Fix James' comment.

* `AddDisconnectionHandler`: Handle multiple close calls to `DisconnectedF` for the same peer.
2025-07-22 20:15:18 +00:00
james-prysm
77958022e7 removing ssz-only flag ( reverting feature) and fix accept header middleware (#15433)
* removing ssz-only flag

* gaz

* reverting other uses of sszonly

* gaz

* adding kasey and radek's suggestions

* update changelog

* adding test

* radek advice with new headers and tests

* adding logs and fixing comments

* adding logs and fixing comments

* gaz

* Update validator/client/beacon-api/rest_handler_client.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update api/apiutil/header.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update api/apiutil/header.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* radek's comments

* adding another failing case based on radek's suggestion

* another unit test

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2025-07-22 16:06:51 +00:00
kasey
c21fae239f don't use gzip handler for sse (#15517)
Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2025-07-22 13:54:25 +00:00
Bastin
deb3ba7f21 Remove unused parameter from LC functions (#15514)
* remove unused parameter

* format code
2025-07-21 13:50:21 +00:00
Manu NALEPA
f288a3c0e1 Workaround TestHostIsResolved by using "more reliable" DNS resolver. (#15515) 2025-07-21 13:37:05 +00:00
Bastin
a4ca6355d0 Abstract away LC update validation rules into the LC store (#15508)
* abstract update validation into IsBetter funcs

* Update beacon-chain/core/light-client/store.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* Update beacon-chain/core/light-client/store.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* clean up

* clean up

* clean up again

---------

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-07-21 09:37:02 +00:00
Manu NALEPA
cd0821d026 Subnets subscription: Avoid dynamic subscribing blocking in case not enough peers per subnets are found. (#15471)
* Subnets subscription: Avoid dynamic subscribing blocking in case not enough peers per subnets are found.

* `subscribeWithParameters`: Use struct to avoid too many function parameters (no functional changes).

* Optimise subnets search.

Currently, when we are looking for peers in let's say data column sidecars subnets 3, 6 and 7, we first look for peers in subnet 3.
If, during the crawling, we meet some peers with subnet 6, we discard them (because we are exclusively looking for peers with subnet 3).
When we are happy, we start again with peers with subnet 6.

This commit optimizes that by looking for peers with satisfy our constraints in one look.

* Fix James' comment.

* Fix James' comment.

* Fix James' comment.

* Fix James' commnet.

* Fix James' comment.

* Fix James' comment.

* Fix James's comment.

* Simplify following James' comment.

* Fix James' comment.

* Update beacon-chain/sync/rpc_goodbye.go

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>

* Update config/params/config.go

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>

* Update beacon-chain/sync/subscriber.go

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>

* Fix Preston's comment.

* Fix Preston's comment.

* `TestService_BroadcastDataColumn`: Re-add sleep 50 ms.

* Fix Preston's comment.

* Update beacon-chain/p2p/subnets.go

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>

---------

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
2025-07-18 17:19:15 +00:00
Bastin
8b53887891 Save LC Bootstraps only on finalized checkpoints (#15497)
* Unify LC API (updates)

* Remove unused fields in LC beacon API server

* bootstraps only on checkpoints

* Update beacon-chain/blockchain/receive_block.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* move tests

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2025-07-18 15:53:31 +00:00
Radosław Kapka
8623a144d9 Write Content-Encoding header in the response properly when gzip encoding is requested (#15499)
* Write gzip header properly

* changelog

* regression test
2025-07-17 19:14:48 +00:00
Bastin
f3314d2d24 Abstract validation logic for saving LC updates into the store function (#15504)
* Unify LC API (updates)

* Remove unused fields in LC beacon API server

* refactor lc logic into core

* fix tests
2025-07-17 14:58:18 +00:00
Bastin
bcd65e7a4d Remove extra lc server fields (#15493)
* Unify LC API (updates)

* Remove unused fields in LC beacon API server
2025-07-17 11:50:16 +00:00
Bastin
dce89a1627 Unify LC API 2/2 (updates) (#15488)
* Unify LC API (updates)

* Update beacon-chain/core/light-client/store.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2025-07-17 10:59:51 +00:00
Manu NALEPA
09485c2062 Add bundle v2 support for submit blind block (#15503)
* Add bundle v2 support for submit blind block

* add tests

---------

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2025-07-17 07:51:28 +00:00
Manu NALEPA
9e014da0b9 Log: Add milliseconds to log timestamps (#15496) 2025-07-16 10:07:12 +00:00
Manu NALEPA
d8fedacc26 beaconBlockSubscriber: Implement data column sidecars reconstruction with data retrieved from the execution client when receiving a block via gossip. (#15483) 2025-07-16 06:00:23 +00:00
Potuz
3def16caaa Fix InitializeProposerLookahead (#15450)
* Fix InitializeProposerLookahead

Get the right Active validator indices for each epoch after the fork
transition.

Co-Authored-By: Claude <noreply@anthropic.com>

* Add test

Co-Authored-By: Claude <noreply@anthropic.com>

---------

Co-authored-by: Claude <noreply@anthropic.com>
Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2025-07-16 03:16:11 +00:00
Rose Jethani
4e5bfa9760 Fixes server ignores request to gzip data (#14982)
* files changed

* Update api/server/middleware/middleware.go

* fixed AcceptEncodingHeaderHandler

* Update changelog/rose2221-develop.md

* Added tests'

* Update api/server/middleware/middleware_test.go

* updated bazel

* formatting

* fixed e2e_test

* zip only json

* zip only json

---------

Co-authored-by: Radosław Kapka <radoslaw.kapka@gmail.com>
Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2025-07-15 16:39:20 +00:00
terence
70ac53f991 fix(execution): skip genesis block retrieval when EIP-6110 is active (#15494) 2025-07-15 16:00:53 +00:00
terence
6f5ff03b42 optimize data column seen cache memory usage with slot-aware pruning (#15477)
* optimize data column seen cache memory usage with slot-aware pruning

* Manu's feedback

* Fix lint
2025-07-15 16:00:36 +00:00
Preston Van Loon
499d27b6ae Use time.Time instead of uint64 for genesis time (#15419)
* Convert genesis times from seconds to time.Time

* Fixing failed forkchoice tests in a new commit so it doesn't get worse

Fixing failed spectest tests in a new commit so it doesn't get worse

Fixing forkchoice tests, then spectests

* Fixing forkchoice tests, then spectests. Now asking for help...

* Fix TestForkChoice_GetProposerHead

* Fix broken build

* Resolve TODO(preston) items

* Changelog fragment

* Resolve TODO(preston) items again

* Resolve lint issues

* Use consistant field names for sinceSlotStart (no spaces)

* Manu's feedback

* Renamed StartTime -> UnsafeStartTime, marked as deprecated because it doesn't handle overflow scenarios.
Renamed SlotTime -> StartTime
Renamed SlotAt -> At
Handled the error in cases where StartTime was used.

@james-prysm feedback

* Revert beacon-chain/blockchain/receive_block_test.go from 1b7844de

* Fixing issues after rebase

* Accepted suggestions from @potuz

* Remove CanonicalHeadSlot from merge conflicts

---------

Co-authored-by: potuz <potuz@prysmaticlabs.com>
2025-07-14 21:04:50 +00:00
Kaloyan Tanev
56e8881bc1 Rework DV aggregation selection proofs (#15156)
* Reword DV selection proofs

* Add changelog

* Expect 1 call to DomainData in DV update duties test

---------

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2025-07-14 15:33:26 +00:00
james-prysm
78f8411ad2 move validator run slot ticker (#15479)
* moving the ticker from chain start to right before the main loop and also after the wait for activation edge case

* fixing unit test

* adding in a unit test

* adding in comment based on potuz's feedback
2025-07-11 19:39:52 +00:00
james-prysm
83943b5dd8 validator client: removing need to call canonical head api (#15480)
* removing need to call cononical head api

* typo

* removing unneeded tests

* fixing unit tests
2025-07-10 20:25:17 +00:00
james-prysm
bc7e4f7751 switching enable to disable for duties (#15445) 2025-07-10 18:57:36 +00:00
Manu NALEPA
02f8726e46 VerifiedROBlobFromDisk and VerifiedRODataColumnFromDisk: Add tests. (#15484) 2025-07-10 16:11:12 +00:00
terence
16b567f6af Add log capitalization analyzer and apply changes (#15452)
* Add log capitalization analyzer and apply fixes across codebase

Implements a new nogo analyzer to enforce proper log message capitalization and applies the fixes to all affected log statements throughout the beacon chain, validator, and supporting components.

Co-Authored-By: Claude <noreply@anthropic.com>

* Radek's feedback

---------

Co-authored-by: Claude <noreply@anthropic.com>
2025-07-10 13:43:38 +00:00
Bastin
5c1d827335 Unify LC API 1/2 (bootstrap) (#15476)
* add versiotToForkEpoch map

* Unify LC API (bootstrap)
2025-07-10 13:07:46 +00:00
Bastin
68d7df0e4f add versiotToForkEpoch map (#15482) 2025-07-10 12:57:28 +00:00
kasey
288b33750d Isolate committee cache (#15478)
* always init service through NewService

* move head state cache to service struct

* changelog

---------

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2025-07-09 21:43:00 +00:00
terence
f4f48d6372 Optimize BuildBlobSidecars Merkle proof computation by pre-computing subtrees (#15473)
* Optimize BuildBlobSidecars Merkle proof computation by pre-computing subtrees

Co-Authored-By: Claude <noreply@anthropic.com>

* Add change log

* Fix change log

---------

Co-authored-by: Claude <noreply@anthropic.com>
2025-07-09 16:12:14 +00:00
james-prysm
f2d57f0b5f changes for safe validator shutdown and restarts on healthcheck (#15401)
* poc changes for safe validator shutdown

* simplifying health routine and adding safe shutdown after max restarts reached

* fixing health tests

* fixing tests

* changelog

* gofmt

* fixing runner

* improve how runner times out

* improvements to ux on logs

* linting

* adding in max healthcheck flag

* changelog

* Update james-prysm_safe-validator-shutdown.md

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/client/runner.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/client/service.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/client/runner.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/client/runner.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* addressing some feedback from radek

* addressing some more feedback

* fixing name based on feedback

* fixing mistake on max health checks

* conflict accidently checked in

* go 1.23 no longer needs you to stop for the ticker

* Update flags.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* wip no unit test for recursive healthy host find

* rework healthcheck

* gaz

* fixing bugs and improving logs with new monitor

* removing health tracker, fixing runner tests, and adding placeholder for monitor tests

* fixing event stream check

* linting

* adding in health monitor tests

* gaz

* improving test

* removing some log.fatals

* forgot to remove comment

* missed fatal removal

* doppleganger should exit the node safely now

* Update validator/client/health_monitor.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* radek review

* Update validator/client/validator.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/client/validator.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/client/health_monitor.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/client/health_monitor.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/client/health_monitor.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/client/validator.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* radek feedback

* read up on more suggestions and making fixes to channel

* suggested updates after more reading

* reverting some of this because it froze the validator after healthcheck failed

* fully reverting

* some improvements I found during testing

* Update cmd/validator/flags/flags.go

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>

* preston's feedback

* clarifications on changelog

* converted to using an event feed instead of my own channel publishing implementation, adding relevant logs

* preston log suggestion

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
2025-07-09 15:39:06 +00:00
Radosław Kapka
7025e50a6c Make the multi-value slice permanent (#15414)
* Remove Old State Paths

* Changelog

* Gazelle

* Lint

* Fix State Tests

* fix tests, update native state code

* move ErrOutOfBounds error from consensus types to mvslice

* fix TestStreamEvents_OperationsEvents

* add missing gc to fuzz tests

* more test fixes

* build fix

---------

Co-authored-by: nisdas <nishdas93@gmail.com>
2025-07-08 17:45:20 +00:00
Radosław Kapka
961ea05454 Allow SSZ requests for pending deposits, partial withdrawals and consolidations (#15474) 2025-07-08 17:45:05 +00:00
terence
da5525096c Move data col reconstruction log to after reconstruction / save complete (#15475)
* Move data col reconstruction / save log to after they are done

* Rename to reconstructionDuration

* Rename to reconstructionAndSaveDuration
2025-07-08 14:49:31 +00:00
terence
2d2507b907 Attest timely by default (#15410)
* Attest timely by default

* Fix deprecated flag naming
2025-07-04 18:28:48 +00:00
terence
a701f07f3a Increase mainnet default builder gas limit to 45M (#15455) 2025-07-04 15:58:32 +00:00
terence
f4bbe5ca40 Add batch verifier limit (#15467) 2025-07-04 15:57:33 +00:00
Manu NALEPA
4be8de2476 PeerDAS: Implement reconstruction of data column sidecars retrieved from the execution client. (#15469)
* `BestFinalized`: No functional change. Improve comments and reduce scope.

* PeerDAS execution: Implement engine method `GetBlobsV2` and `ReconstructDataColumnSidecars`.

* Fix James' comment.

* Fix James' comment.
2025-07-03 22:35:28 +00:00
james-prysm
fac509a3e6 duties v2 fix no assignment panic (#15466)
* adding fix

* preston's suggestion
2025-07-03 15:44:51 +00:00
Manu NALEPA
b1ac8209b2 PeerDAS: Implement reconstruct. (#15454)
* PeerDAS: Implement reconstruct.

* Fix Preston's comment.

* Fix Preston's comment.
2025-07-02 19:02:55 +00:00
Bastin
74c9586c66 refactor lc kv tests (#15465)
* refactor lc kv tests

* clean up

* Update lightclient_test.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2025-07-02 13:24:32 +00:00
Bastin
f0ad3dfaeb Refactor lc bootstrap tests (#15462)
* fix versioning

* changelog

* fix blockchain tests

* fix linter issue

* fix spec tests

* fix default lc update version

* fix lc header version

* gzl

* clean up the code

* Update testing/spectest/shared/common/light_client/update_ranking.go

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>

* add fulu set up in update ranking

* pass att block to createDefaultLCUpdate

* address comments

* linter

* Update lightclient.go

* refactor lc bootstrap tests

* changelog

* sort imports

* refactor lc bootstrap tests

* changelog

* Implement the new Fulu Metadata. (#15440)

* clean up

---------

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
Co-authored-by: Manu NALEPA <enalepa@offchainlabs.com>
2025-07-02 13:23:39 +00:00
Bastin
2540196747 Put the initialization of LC Store behind the enable-light-client flag (#15464)
* put lc store behind flag

* lint

* remove extra line
2025-07-02 11:09:37 +00:00
Manu NALEPA
f133751cce --chain-config-file: Do not use any more mainnet boot nodes. (#15460) 2025-07-02 09:36:31 +00:00
Bastin
bddcc158e4 Fix LC versioning bug (#15400)
* fix versioning

* changelog

* fix blockchain tests

* fix linter issue

* fix spec tests

* fix default lc update version

* fix lc header version

* gzl

* clean up the code

* Update testing/spectest/shared/common/light_client/update_ranking.go

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>

* add fulu set up in update ranking

* pass att block to createDefaultLCUpdate

* address comments

* linter

* Update lightclient.go

* sort imports

---------

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
2025-07-01 21:40:18 +00:00
Manu NALEPA
bc7664321b Implement the new Fulu Metadata. (#15440) 2025-07-01 07:07:32 +00:00
Manu NALEPA
97f416b3a7 dataColumnSidecarByRootRPCHandler: Do not rely any more on map iteration, which does not produce reproducible output order. (#15441) 2025-06-26 13:07:19 +00:00
Manu NALEPA
1c1e0f38bb PeerDAS: Implement beacon API blob sidecar endpoint for Fulu (#15436)
* `TestGetBlob`: Refactor (no functional change).

* `GenerateTestFuluBlockWithSidecars`: Generate commitments consistent with blobs.

This is actually not needed for this PR, but it does not hurt.

* Beacon API: Implement blob sidecars endpoint for Fulu.

* Fix Radek's comment.

* Update beacon-chain/rpc/lookup/blocker.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Fix Radek's comment.

* Update beacon-chain/rpc/lookup/blocker.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2025-06-26 10:05:22 +00:00
Manu NALEPA
121914d0d7 Implement SendDataColumnSidecarsByRangeRequest and SendDataColumnSidecarsByRootRequest. (#15430)
* Implement `SendDataColumnSidecarsByRangeRequest` and `SendDataColumnSidecarsByRootRequest`.

* Fix James's comment.

* Fix James's comment.

* Fix James' comment.

* Fix marshaller.
2025-06-24 21:40:16 +00:00
Manu NALEPA
e8625cd89d Peerdas various (#15423)
* Topic mapping: Groupe `const` and `var`.

Cosmetic change, no functional change.

* `TopicFromMessage`: Do not assume anymore that all Fulu specific topics are V3 only.

* Proto: Remove unused `DataColumnIdentifier` and add new `StatusV2`.

`DataColumnIdentifier` was removed in the spec here: https://github.com/ethereum/consensus-specs/pull/4284. Eventually, we stopped using it in Prysm, but never removed the corresponding proto message.

The new `StatusV2` is introduced in the spec here: https://github.com/ethereum/consensus-specs/pull/4374

* `readChunkedDataColumnSideCar` ==> `readChunkedDataColumnSidecar`.

* `rpc_send_request.go`: Reorganize file (no function change).

* `readChunkedDataColumnSidecar`: Add `validationFunctions` parameter and add tests.
2025-06-23 14:47:02 +00:00
terence
667aaf1564 Remove invalid from logs for blob sidecars missing parent or out of range (#15428) 2025-06-22 21:55:44 +00:00
Bastin
e020907d2a Revert "SSZ support for submitAttestationsV2 (#15422)" (#15426)
This reverts commit 9927cea35a.
2025-06-20 21:21:16 +00:00
Bastin
9927cea35a SSZ support for submitAttestationsV2 (#15422)
* ssz support for submitAttestationsV2

* changelog

* lowercase errors

* save 1 line

* Update beacon-chain/rpc/eth/beacon/handlers_pool.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* add comment

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2025-06-20 17:21:48 +00:00
Manu NALEPA
d4233471d2 PeerDAS: Implement data columns by range RPC handler. (#15421)
* `dataColumnsRPCMinValidSlot`: Add new test case.

* peerDAS: Implement `dataColumnSidecarByRangeRPCHandler`.

* `rateLimitingAmount`:  Define out of the function.

* Fix James' comment.

* Fix James' comment.
2025-06-20 15:22:50 +00:00
james-prysm
d63ae69920 adding ssz for get block endpoint (#15390)
* adding get ssz

* adding some tests

* gaz

* adding ssz to e2e

* wip ssz

* adding in additional check on header type

* remove unused

* renaming json rest handler, and adding in usage of use ssz debug flag

* fixing unit tests

* fixing tests

* gaz

* radek feedback

* Update config/features/config.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update config/features/flags.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update config/features/flags.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/client/beacon-api/get_beacon_block.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/client/beacon-api/get_beacon_block.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/client/beacon-api/get_beacon_block.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* addressing feedback

* missing import

* another missing import

* fixing tests

* gaz

* removing unused

* gaz

* more radek feedback

* fixing context

* adding in check for non accepted conent type

* reverting to not create more edgecases

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2025-06-20 14:27:09 +00:00
terence
b9fd32dfff Remove deposit count from sync new block log (#15420)
* Remove deposit count from sync new block log

* Fix tests
2025-06-19 23:49:17 +00:00
Manu NALEPA
559d02bf4d peerDAS: Implement dataColumnSidecarByRootRPCHandler. (#15405)
* `CreateTestVerifiedRoDataColumnSidecars`: Use consistent block root.

* peerDAS: Implement `dataColumnSidecarByRootRPCHandler`.

* Fix James' comment.

* Fix James' comment.
2025-06-19 12:12:45 +00:00
Preston Van Loon
62fec4d1f3 Replace context.Background with testing.TB.Context where possible (#15416)
* Replace context.Background with testing.TB.Context where possible

* Fix failing tests
2025-06-16 22:09:18 +00:00
Preston Van Loon
6a13ba9125 remove eth1voting tool (#15415) 2025-06-16 19:53:39 +00:00
terence
2a3876427f Fix blob peer count metric (#15412) 2025-06-16 14:27:55 +00:00
Potuz
f8b88db1a4 EIP 7917 (#15129)
* Add the new Fulu state with the new field

* fix the hasher for the fulu state

* Fix ToProto() and ToProtoUnsafe()

* Add the fields as shared

* Add epoch transition code

* short circuit the proposer cache to use the state

* Marshal the state JSON

* update spectests to 1.6.0-alpha.1

* Remove deneb and electra entries from blob schedule

This was cherry picked from PR #15364
and edited to remove the minimal cases

* Fix minimal tests

* Increase deadling for processing blocks in spectests

* Preston's review

* review

---------

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2025-06-12 20:40:34 +00:00
kasey
d12da8cbe0 remove usages of params from proto packages (#15403)
* remove usages of params from proto packages

* make it harder to mess up the order of request limit args

* remove errant edit (Terence review)

* fix missed updates after sig change

---------

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2025-06-11 21:15:10 +00:00
terence
6087875da5 beacon api: Stream event for data column sidecar (#15387) 2025-06-10 22:33:52 +00:00
Radosław Kapka
214f4a76fb Reclaim memory manually in some tests that fuzz the beacon state (#15395) 2025-06-10 21:30:51 +00:00
Radosław Kapka
f5a9394c77 Reorganize processing of light client updates (#15383)
* Reorganize processing of light client updates

* changelog <3
2025-06-09 17:08:23 +00:00
Manu NALEPA
4095da8568 PeerDAS: Implement small, unrelated changes. (#15386)
* `verifyBlobCommitmentCount`: Print max allowed blob count in error message.

* `TestPersist`: Use `fieldparams.RootLength` instead of `32`.

* `TestDataColumnSidecarsByRootReq_Marshal`: Remove blank line.

* `ConvertPeerIDToNodeID`: Improve readability by using one line per field.

* `parseIndices`: Return `[]int` instead of `[]uint64`.

Rational: These indices are used in
`func (m *SparseMerkleTrie) MerkleProof(index int) ([][]byte, error)`

that requires `int` and not `uint64`.
This `MerkleProof` function is used at a lot of places in the codebase.
==> Changing the signature of `parseIndices` is simpler than changing the signature of `MerkleProof`.
2025-06-06 12:20:52 +00:00
Manu NALEPA
f1288a18ec PeerDAS: Implement DAS (#15367)
* PeerDAS: Implement DAS

* Fix Terence's comment.

* Fix Terence comment.

* Fix Terence's comment.

* Fix James' comment.

* Fix James' comment.

* Rename some variable/files with blobs.

* Fix James' comment.

* Fix James' comment.

* Fix James' comment.
2025-06-06 10:06:52 +00:00
1135 changed files with 34794 additions and 18226 deletions

View File

@@ -194,6 +194,7 @@ nogo(
"//tools/analyzers/gocognit:go_default_library",
"//tools/analyzers/ineffassign:go_default_library",
"//tools/analyzers/interfacechecker:go_default_library",
"//tools/analyzers/logcapitalization:go_default_library",
"//tools/analyzers/logruswitherror:go_default_library",
"//tools/analyzers/maligned:go_default_library",
"//tools/analyzers/nop:go_default_library",

View File

@@ -1,7 +1,7 @@
workspace(name = "prysm")
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
http_archive(
name = "rules_pkg",
@@ -16,8 +16,6 @@ load("@rules_pkg//:deps.bzl", "rules_pkg_dependencies")
rules_pkg_dependencies()
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
http_archive(
name = "toolchains_protoc",
sha256 = "abb1540f8a9e045422730670ebb2f25b41fa56ca5a7cf795175a110a0a68f4ad",
@@ -192,7 +190,7 @@ load("@rules_oci//oci:pull.bzl", "oci_pull")
# A multi-arch base image
oci_pull(
name = "linux_debian11_multiarch_base", # Debian bullseye
digest = "sha256:b82f113425c5b5c714151aaacd8039bc141821cdcd3c65202d42bdf9c43ae60b", # 2023-12-12
digest = "sha256:55a5e011b2c4246b4c51e01fcc2b452d151e03df052e357465f0392fcd59fddf",
image = "gcr.io/prysmaticlabs/distroless/cc-debian11",
platforms = [
"linux/amd64",
@@ -210,7 +208,7 @@ load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_depe
go_rules_dependencies()
go_register_toolchains(
go_version = "1.24.0",
go_version = "1.24.6",
nogo = "@//:nogo",
)
@@ -255,16 +253,16 @@ filegroup(
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
)
consensus_spec_version = "v1.6.0-alpha.0"
consensus_spec_version = "v1.6.0-alpha.1"
load("@prysm//tools:download_spectests.bzl", "consensus_spec_tests")
consensus_spec_tests(
name = "consensus_spec_tests",
flavors = {
"general": "sha256-W7oKvoM0nAkyitykRxAw6kmCvjYC01IqiNJy0AmCnMM=",
"minimal": "sha256-ig7/zxomjv6buBWMom4IxAJh3lFJ9+JnY44E7c8ZNP8=",
"mainnet": "sha256-mjx+MkXtPhCNv4c4knLYLIkvIdpF7WTjx/ElvGPQzSo=",
"general": "sha256-o4t9p3R+fQHF4KOykGmwlG3zDw5wUdVWprkzId8aIsk=",
"minimal": "sha256-sU7ToI8t3MR8x0vVjC8ERmAHZDWpEmnAC9FWIpHi5x4=",
"mainnet": "sha256-YKS4wngg0LgI9Upp4MYJ77aG+8+e/G4YeqEIlp06LZw=",
},
version = consensus_spec_version,
)
@@ -280,7 +278,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
integrity = "sha256-u0RkIZIeGttb3sInR31mO64aBSwxALqO5SYIPlqEvPo=",
integrity = "sha256-Nv4TEuEJPQIM4E6T9J0FOITsmappmXZjGtlhe1HEXnU=",
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
)

View File

@@ -2,18 +2,28 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["common.go"],
srcs = [
"common.go",
"header.go",
],
importpath = "github.com/OffchainLabs/prysm/v6/api/apiutil",
visibility = ["//visibility:public"],
deps = ["//consensus-types/primitives:go_default_library"],
deps = [
"//consensus-types/primitives:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["common_test.go"],
srcs = [
"common_test.go",
"header_test.go",
],
embed = [":go_default_library"],
deps = [
"//consensus-types/primitives:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
],
)

122
api/apiutil/header.go Normal file
View File

@@ -0,0 +1,122 @@
package apiutil
import (
"mime"
"sort"
"strconv"
"strings"
log "github.com/sirupsen/logrus"
)
type mediaRange struct {
mt string // canonicalised mediatype, e.g. "application/json"
q float64 // quality factor (01)
raw string // original string useful for logging/debugging
spec int // 2=exact, 1=type/*, 0=*/*
}
func parseMediaRange(field string) (mediaRange, bool) {
field = strings.TrimSpace(field)
mt, params, err := mime.ParseMediaType(field)
if err != nil {
log.WithError(err).Debug("Failed to parse header field")
return mediaRange{}, false
}
r := mediaRange{mt: mt, q: 1, spec: 2, raw: field}
if qs, ok := params["q"]; ok {
v, err := strconv.ParseFloat(qs, 64)
if err != nil || v < 0 || v > 1 {
log.WithField("q", qs).Debug("Invalid quality factor (01)")
return mediaRange{}, false // skip invalid entry
}
r.q = v
}
switch {
case mt == "*/*":
r.spec = 0
case strings.HasSuffix(mt, "/*"):
r.spec = 1
}
return r, true
}
func hasExplicitQ(r mediaRange) bool {
return strings.Contains(strings.ToLower(r.raw), ";q=")
}
// ParseAccept returns media ranges sorted by q (desc) then specificity.
func ParseAccept(header string) []mediaRange {
if header == "" {
return []mediaRange{{mt: "*/*", q: 1, spec: 0, raw: "*/*"}}
}
var out []mediaRange
for _, field := range strings.Split(header, ",") {
if r, ok := parseMediaRange(field); ok {
out = append(out, r)
}
}
sort.SliceStable(out, func(i, j int) bool {
ei, ej := hasExplicitQ(out[i]), hasExplicitQ(out[j])
if ei != ej {
return ei // explicit beats implicit
}
if out[i].q != out[j].q {
return out[i].q > out[j].q
}
return out[i].spec > out[j].spec
})
return out
}
// Matches reports whether content type is acceptable per the header.
func Matches(header, ct string) bool {
for _, r := range ParseAccept(header) {
switch {
case r.q == 0:
continue
case r.mt == "*/*":
return true
case strings.HasSuffix(r.mt, "/*"):
if strings.HasPrefix(ct, r.mt[:len(r.mt)-1]) {
return true
}
case r.mt == ct:
return true
}
}
return false
}
// Negotiate selects the best server type according to the header.
// Returns the chosen type and true, or "", false when nothing matches.
func Negotiate(header string, serverTypes []string) (string, bool) {
for _, r := range ParseAccept(header) {
if r.q == 0 {
continue
}
for _, s := range serverTypes {
if Matches(r.mt, s) {
return s, true
}
}
}
return "", false
}
// PrimaryAcceptMatches only checks if the first accept matches
func PrimaryAcceptMatches(header, produced string) bool {
for _, r := range ParseAccept(header) {
if r.q == 0 {
continue // explicitly unacceptable skip
}
return Matches(r.mt, produced)
}
return false
}

174
api/apiutil/header_test.go Normal file
View File

@@ -0,0 +1,174 @@
package apiutil
import (
"testing"
"github.com/OffchainLabs/prysm/v6/testing/require"
)
func TestParseAccept(t *testing.T) {
type want struct {
mt string
q float64
spec int
}
cases := []struct {
name string
header string
want []want
}{
{
name: "empty header becomes */*;q=1",
header: "",
want: []want{{mt: "*/*", q: 1, spec: 0}},
},
{
name: "quality ordering then specificity",
header: "application/json;q=0.2, */*;q=0.1, application/xml;q=0.5, text/*;q=0.5",
want: []want{
{mt: "application/xml", q: 0.5, spec: 2},
{mt: "text/*", q: 0.5, spec: 1},
{mt: "application/json", q: 0.2, spec: 2},
{mt: "*/*", q: 0.1, spec: 0},
},
},
{
name: "invalid pieces are skipped",
header: "text/plain; q=boom, application/json",
want: []want{{mt: "application/json", q: 1, spec: 2}},
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
got := ParseAccept(tc.header)
gotProjected := make([]want, len(got))
for i, g := range got {
gotProjected[i] = want{mt: g.mt, q: g.q, spec: g.spec}
}
require.DeepEqual(t, gotProjected, tc.want)
})
}
}
func TestMatches(t *testing.T) {
cases := []struct {
name string
accept string
ct string
matches bool
}{
{"exact match", "application/json", "application/json", true},
{"type wildcard", "application/*;q=0.8", "application/xml", true},
{"global wildcard", "*/*;q=0.1", "image/png", true},
{"explicitly unacceptable (q=0)", "text/*;q=0", "text/plain", false},
{"no match", "image/png", "application/json", false},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
got := Matches(tc.accept, tc.ct)
require.Equal(t, tc.matches, got)
})
}
}
func TestNegotiate(t *testing.T) {
cases := []struct {
name string
accept string
serverTypes []string
wantType string
ok bool
}{
{
name: "highest quality wins",
accept: "application/json;q=0.8,application/xml;q=0.9",
serverTypes: []string{"application/json", "application/xml"},
wantType: "application/xml",
ok: true,
},
{
name: "wildcard matches first server type",
accept: "*/*;q=0.5",
serverTypes: []string{"application/octet-stream", "application/json"},
wantType: "application/octet-stream",
ok: true,
},
{
name: "no acceptable type",
accept: "image/png",
serverTypes: []string{"application/json"},
wantType: "",
ok: false,
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
got, ok := Negotiate(tc.accept, tc.serverTypes)
require.Equal(t, tc.ok, ok)
require.Equal(t, tc.wantType, got)
})
}
}
func TestPrimaryAcceptMatches(t *testing.T) {
tests := []struct {
name string
accept string
produced string
expect bool
}{
{
name: "prefers json",
accept: "application/json;q=0.9,application/xml",
produced: "application/json",
expect: true,
},
{
name: "wildcard application beats other wildcard",
accept: "application/*;q=0.2,*/*;q=0.1",
produced: "application/xml",
expect: true,
},
{
name: "json wins",
accept: "application/xml;q=0.8,application/json;q=0.9",
produced: "application/json",
expect: true,
},
{
name: "json loses",
accept: "application/xml;q=0.8,application/json;q=0.9,application/octet-stream;q=0.99",
produced: "application/json",
expect: false,
},
{
name: "json wins with non q option",
accept: "application/xml;q=0.8,image/png,application/json;q=0.9",
produced: "application/json",
expect: true,
},
{
name: "json not primary",
accept: "image/png,application/json",
produced: "application/json",
expect: false,
},
{
name: "absent header",
accept: "",
produced: "text/plain",
expect: true,
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
got := PrimaryAcceptMatches(tc.accept, tc.produced)
require.Equal(t, got, tc.expect)
})
}
}

View File

@@ -16,7 +16,6 @@ go_library(
"//api/server/structs:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//network/forks:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_pkg_errors//:go_default_library",

View File

@@ -9,7 +9,6 @@ import (
"net/url"
"path"
"regexp"
"sort"
"strconv"
"github.com/OffchainLabs/prysm/v6/api/client"
@@ -17,7 +16,6 @@ import (
"github.com/OffchainLabs/prysm/v6/api/server/structs"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/network/forks"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/pkg/errors"
@@ -137,24 +135,6 @@ func (c *Client) GetFork(ctx context.Context, stateId StateOrBlockId) (*ethpb.Fo
return fr.ToConsensus()
}
// GetForkSchedule retrieve all forks, past present and future, of which this node is aware.
func (c *Client) GetForkSchedule(ctx context.Context) (forks.OrderedSchedule, error) {
body, err := c.Get(ctx, getForkSchedulePath)
if err != nil {
return nil, errors.Wrap(err, "error requesting fork schedule")
}
fsr := &forkScheduleResponse{}
err = json.Unmarshal(body, fsr)
if err != nil {
return nil, err
}
ofs, err := fsr.OrderedForkSchedule()
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("problem unmarshaling %s response", getForkSchedulePath))
}
return ofs, nil
}
// GetConfigSpec retrieve the current configs of the network used by the beacon node.
func (c *Client) GetConfigSpec(ctx context.Context) (*structs.GetSpecResponse, error) {
body, err := c.Get(ctx, getConfigSpecPath)
@@ -334,31 +314,3 @@ func (c *Client) GetBLStoExecutionChanges(ctx context.Context) (*structs.BLSToEx
}
return poolResponse, nil
}
type forkScheduleResponse struct {
Data []structs.Fork
}
func (fsr *forkScheduleResponse) OrderedForkSchedule() (forks.OrderedSchedule, error) {
ofs := make(forks.OrderedSchedule, 0)
for _, d := range fsr.Data {
epoch, err := strconv.ParseUint(d.Epoch, 10, 64)
if err != nil {
return nil, errors.Wrapf(err, "error parsing epoch %s", d.Epoch)
}
vSlice, err := hexutil.Decode(d.CurrentVersion)
if err != nil {
return nil, err
}
if len(vSlice) != 4 {
return nil, fmt.Errorf("got %d byte version, expected 4 bytes. version hex=%s", len(vSlice), d.CurrentVersion)
}
version := bytesutil.ToBytes4(vSlice)
ofs = append(ofs, forks.ForkScheduleEntry{
Version: version,
Epoch: primitives.Epoch(epoch),
})
}
sort.Sort(ofs)
return ofs, nil
}

View File

@@ -1,20 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"health.go",
"interfaces.go",
"mock.go",
],
importpath = "github.com/OffchainLabs/prysm/v6/api/client/beacon/health",
visibility = ["//visibility:public"],
deps = ["@org_uber_go_mock//gomock:go_default_library"],
)
go_test(
name = "go_default_test",
srcs = ["health_test.go"],
embed = [":go_default_library"],
deps = ["@org_uber_go_mock//gomock:go_default_library"],
)

View File

@@ -1,58 +0,0 @@
package health
import (
"context"
"sync"
)
type NodeHealthTracker struct {
isHealthy *bool
healthChan chan bool
node Node
sync.RWMutex
}
func NewTracker(node Node) Tracker {
return &NodeHealthTracker{
node: node,
healthChan: make(chan bool, 1),
}
}
// HealthUpdates provides a read-only channel for health updates.
func (n *NodeHealthTracker) HealthUpdates() <-chan bool {
return n.healthChan
}
func (n *NodeHealthTracker) IsHealthy(_ context.Context) bool {
n.RLock()
defer n.RUnlock()
if n.isHealthy == nil {
return false
}
return *n.isHealthy
}
func (n *NodeHealthTracker) CheckHealth(ctx context.Context) bool {
n.Lock()
defer n.Unlock()
newStatus := n.node.IsHealthy(ctx)
if n.isHealthy == nil {
n.isHealthy = &newStatus
}
isStatusChanged := newStatus != *n.isHealthy
if isStatusChanged {
// Update the health status
n.isHealthy = &newStatus
// Send the new status to the health channel, potentially overwriting the existing value
select {
case <-n.healthChan:
n.healthChan <- newStatus
default:
n.healthChan <- newStatus
}
}
return newStatus
}

View File

@@ -1,111 +0,0 @@
package health
import (
"context"
"sync"
"testing"
"go.uber.org/mock/gomock"
)
func TestNodeHealth_IsHealthy(t *testing.T) {
tests := []struct {
name string
isHealthy bool
want bool
}{
{"initially healthy", true, true},
{"initially unhealthy", false, false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
n := &NodeHealthTracker{
isHealthy: &tt.isHealthy,
healthChan: make(chan bool, 1),
}
if got := n.IsHealthy(context.Background()); got != tt.want {
t.Errorf("IsHealthy() = %v, want %v", got, tt.want)
}
})
}
}
func TestNodeHealth_UpdateNodeHealth(t *testing.T) {
tests := []struct {
name string
initial bool // Initial health status
newStatus bool // Status to update to
shouldSend bool // Should a message be sent through the channel
}{
{"healthy to unhealthy", true, false, true},
{"unhealthy to healthy", false, true, true},
{"remain healthy", true, true, false},
{"remain unhealthy", false, false, false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
client := NewMockHealthClient(ctrl)
client.EXPECT().IsHealthy(gomock.Any()).Return(tt.newStatus)
n := &NodeHealthTracker{
isHealthy: &tt.initial,
node: client,
healthChan: make(chan bool, 1),
}
s := n.CheckHealth(context.Background())
// Check if health status was updated
if s != tt.newStatus {
t.Errorf("UpdateNodeHealth() failed to update isHealthy from %v to %v", tt.initial, tt.newStatus)
}
select {
case status := <-n.HealthUpdates():
if !tt.shouldSend {
t.Errorf("UpdateNodeHealth() unexpectedly sent status %v to HealthCh", status)
} else if status != tt.newStatus {
t.Errorf("UpdateNodeHealth() sent wrong status %v, want %v", status, tt.newStatus)
}
default:
if tt.shouldSend {
t.Error("UpdateNodeHealth() did not send any status to HealthCh when expected")
}
}
})
}
}
func TestNodeHealth_Concurrency(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
client := NewMockHealthClient(ctrl)
n := NewTracker(client)
var wg sync.WaitGroup
// Number of goroutines to spawn for both reading and writing
numGoroutines := 6
wg.Add(numGoroutines * 2) // for readers and writers
// Concurrently update health status
for i := 0; i < numGoroutines; i++ {
go func() {
defer wg.Done()
client.EXPECT().IsHealthy(gomock.Any()).Return(false).Times(1)
n.CheckHealth(context.Background())
client.EXPECT().IsHealthy(gomock.Any()).Return(true).Times(1)
n.CheckHealth(context.Background())
}()
}
// Concurrently read health status
for i := 0; i < numGoroutines; i++ {
go func() {
defer wg.Done()
_ = n.IsHealthy(context.Background()) // Just read the value
}()
}
wg.Wait() // Wait for all goroutines to finish
}

View File

@@ -1,13 +0,0 @@
package health
import "context"
type Tracker interface {
HealthUpdates() <-chan bool
CheckHealth(ctx context.Context) bool
Node
}
type Node interface {
IsHealthy(ctx context.Context) bool
}

View File

@@ -1,58 +0,0 @@
package health
import (
"context"
"reflect"
"sync"
"go.uber.org/mock/gomock"
)
var (
_ = Node(&MockHealthClient{})
)
// MockHealthClient is a mock of HealthClient interface.
type MockHealthClient struct {
ctrl *gomock.Controller
recorder *MockHealthClientMockRecorder
sync.Mutex
}
// MockHealthClientMockRecorder is the mock recorder for MockHealthClient.
type MockHealthClientMockRecorder struct {
mock *MockHealthClient
}
// IsHealthy mocks base method.
func (m *MockHealthClient) IsHealthy(arg0 context.Context) bool {
m.Lock()
defer m.Unlock()
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "IsHealthy", arg0)
ret0, ok := ret[0].(bool)
if !ok {
return false
}
return ret0
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockHealthClient) EXPECT() *MockHealthClientMockRecorder {
return m.recorder
}
// IsHealthy indicates an expected call of IsHealthy.
func (mr *MockHealthClientMockRecorder) IsHealthy(arg0 any) *gomock.Call {
mr.mock.Lock()
defer mr.mock.Unlock()
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsHealthy", reflect.TypeOf((*MockHealthClient)(nil).IsHealthy), arg0)
}
// NewMockHealthClient creates a new mock instance.
func NewMockHealthClient(ctrl *gomock.Controller) *MockHealthClient {
mock := &MockHealthClient{ctrl: ctrl}
mock.recorder = &MockHealthClientMockRecorder{mock}
return mock
}

View File

@@ -50,6 +50,7 @@ go_test(
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//math:go_default_library",

View File

@@ -72,7 +72,7 @@ func (*requestLogger) observe(r *http.Request) (e error) {
log.WithFields(log.Fields{
"bodyBase64": "(nil value)",
"url": r.URL.String(),
}).Info("builder http request")
}).Info("Builder http request")
return nil
}
t := io.TeeReader(r.Body, b)
@@ -89,7 +89,7 @@ func (*requestLogger) observe(r *http.Request) (e error) {
log.WithFields(log.Fields{
"bodyBase64": string(body),
"url": r.URL.String(),
}).Info("builder http request")
}).Info("Builder http request")
return nil
}
@@ -101,7 +101,8 @@ type BuilderClient interface {
NodeURL() string
GetHeader(ctx context.Context, slot primitives.Slot, parentHash [32]byte, pubkey [48]byte) (SignedBid, error)
RegisterValidator(ctx context.Context, svr []*ethpb.SignedValidatorRegistrationV1) error
SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, *v1.BlobsBundle, error)
SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, v1.BlobsBundler, error)
SubmitBlindedBlockPostFulu(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) error
Status(ctx context.Context) error
}
@@ -152,7 +153,8 @@ func (c *Client) NodeURL() string {
type reqOption func(*http.Request)
// do is a generic, opinionated request function to reduce boilerplate amongst the methods in this package api/client/builder.
func (c *Client) do(ctx context.Context, method string, path string, body io.Reader, opts ...reqOption) (res []byte, header http.Header, err error) {
// It validates that the HTTP response status matches the expectedStatus parameter.
func (c *Client) do(ctx context.Context, method string, path string, body io.Reader, expectedStatus int, opts ...reqOption) (res []byte, header http.Header, err error) {
ctx, span := trace.StartSpan(ctx, "builder.client.do")
defer func() {
tracing.AnnotateError(span, err)
@@ -187,8 +189,8 @@ func (c *Client) do(ctx context.Context, method string, path string, body io.Rea
log.WithError(closeErr).Error("Failed to close response body")
}
}()
if r.StatusCode != http.StatusOK {
err = non200Err(r)
if r.StatusCode != expectedStatus {
err = unexpectedStatusErr(r, expectedStatus)
return
}
res, err = io.ReadAll(io.LimitReader(r.Body, client.MaxBodySize))
@@ -236,7 +238,7 @@ func (c *Client) GetHeader(ctx context.Context, slot primitives.Slot, parentHash
r.Header.Set("Accept", api.JsonMediaType)
}
}
data, header, err := c.do(ctx, http.MethodGet, path, nil, getOpts)
data, header, err := c.do(ctx, http.MethodGet, path, nil, http.StatusOK, getOpts)
if err != nil {
return nil, errors.Wrap(err, "error getting header from builder server")
}
@@ -409,7 +411,7 @@ func (c *Client) RegisterValidator(ctx context.Context, svr []*ethpb.SignedValid
}
}
if _, _, err = c.do(ctx, http.MethodPost, postRegisterValidatorPath, bytes.NewBuffer(body), postOpts); err != nil {
if _, _, err = c.do(ctx, http.MethodPost, postRegisterValidatorPath, bytes.NewBuffer(body), http.StatusOK, postOpts); err != nil {
return errors.Wrap(err, "do")
}
log.WithField("registrationCount", len(svr)).Debug("Successfully registered validator(s) on builder")
@@ -446,6 +448,9 @@ func sszValidatorRegisterRequest(svr []*ethpb.SignedValidatorRegistrationV1) ([]
var errResponseVersionMismatch = errors.New("builder API response uses a different version than requested in " + api.VersionHeader + " header")
func getVersionsBlockToPayload(blockVersion int) (int, error) {
if blockVersion >= version.Fulu {
return version.Fulu, nil
}
if blockVersion >= version.Deneb {
return version.Deneb, nil
}
@@ -460,7 +465,7 @@ func getVersionsBlockToPayload(blockVersion int) (int, error) {
// SubmitBlindedBlock calls the builder API endpoint that binds the validator to the builder and submits the block.
// The response is the full execution payload used to create the blinded block.
func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, v1.BlobsBundler, error) {
body, postOpts, err := c.buildBlindedBlockRequest(sb)
if err != nil {
return nil, nil, err
@@ -468,7 +473,7 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
// post the blinded block - the execution payload response should contain the unblinded payload, along with the
// blobs bundle if it is post deneb.
data, header, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), postOpts)
data, header, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), http.StatusOK, postOpts)
if err != nil {
return nil, nil, errors.Wrap(err, "error posting the blinded block to the builder api")
}
@@ -498,6 +503,24 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
return ed, blobs, nil
}
// SubmitBlindedBlockPostFulu calls the builder API endpoint post-Fulu where relays only return status codes.
// This method is used after the Fulu fork when MEV-boost relays no longer return execution payloads.
func (c *Client) SubmitBlindedBlockPostFulu(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) error {
body, postOpts, err := c.buildBlindedBlockRequest(sb)
if err != nil {
return err
}
// Post the blinded block - the response should only contain a status code (no payload)
_, _, err = c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), http.StatusAccepted, postOpts)
if err != nil {
return errors.Wrap(err, "error posting the blinded block to the builder api post-Fulu")
}
// Success is indicated by no error (status 202)
return nil
}
func (c *Client) checkBlockVersion(respBytes []byte, header http.Header) (int, error) {
var versionHeader string
if c.sszEnabled {
@@ -558,7 +581,7 @@ func (c *Client) buildBlindedBlockRequest(sb interfaces.ReadOnlySignedBeaconBloc
func (c *Client) parseBlindedBlockResponse(
respBytes []byte,
forkVersion int,
) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
) (interfaces.ExecutionData, v1.BlobsBundler, error) {
if c.sszEnabled {
return c.parseBlindedBlockResponseSSZ(respBytes, forkVersion)
}
@@ -568,8 +591,18 @@ func (c *Client) parseBlindedBlockResponse(
func (c *Client) parseBlindedBlockResponseSSZ(
respBytes []byte,
forkVersion int,
) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
if forkVersion >= version.Deneb {
) (interfaces.ExecutionData, v1.BlobsBundler, error) {
if forkVersion >= version.Fulu {
payloadAndBlobs := &v1.ExecutionPayloadDenebAndBlobsBundleV2{}
if err := payloadAndBlobs.UnmarshalSSZ(respBytes); err != nil {
return nil, nil, errors.Wrap(err, "unable to unmarshal ExecutionPayloadDenebAndBlobsBundleV2 SSZ")
}
ed, err := blocks.NewWrappedExecutionData(payloadAndBlobs.Payload)
if err != nil {
return nil, nil, errors.Wrapf(err, "unable to wrap execution data for %s", version.String(forkVersion))
}
return ed, payloadAndBlobs.BlobsBundle, nil
} else if forkVersion >= version.Deneb {
payloadAndBlobs := &v1.ExecutionPayloadDenebAndBlobsBundle{}
if err := payloadAndBlobs.UnmarshalSSZ(respBytes); err != nil {
return nil, nil, errors.Wrap(err, "unable to unmarshal ExecutionPayloadDenebAndBlobsBundle SSZ")
@@ -644,11 +677,11 @@ func (c *Client) Status(ctx context.Context) error {
getOpts := func(r *http.Request) {
r.Header.Set("Accept", api.JsonMediaType)
}
_, _, err := c.do(ctx, http.MethodGet, getStatus, nil, getOpts)
_, _, err := c.do(ctx, http.MethodGet, getStatus, nil, http.StatusOK, getOpts)
return err
}
func non200Err(response *http.Response) error {
func unexpectedStatusErr(response *http.Response, expected int) error {
bodyBytes, err := io.ReadAll(io.LimitReader(response.Body, client.MaxErrBodySize))
var errMessage ErrorMessage
var body string
@@ -657,7 +690,7 @@ func non200Err(response *http.Response) error {
} else {
body = "response body:\n" + string(bodyBytes)
}
msg := fmt.Sprintf("code=%d, url=%s, body=%s", response.StatusCode, response.Request.URL, body)
msg := fmt.Sprintf("expected=%d, got=%d, url=%s, body=%s", expected, response.StatusCode, response.Request.URL, body)
switch response.StatusCode {
case http.StatusUnsupportedMediaType:
log.WithError(ErrUnsupportedMediaType).Debug(msg)

View File

@@ -14,6 +14,7 @@ import (
"github.com/OffchainLabs/prysm/v6/api/server/structs"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
v1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
@@ -31,7 +32,7 @@ func (fn roundtrip) RoundTrip(r *http.Request) (*http.Response, error) {
}
func TestClient_Status(t *testing.T) {
ctx := context.Background()
ctx := t.Context()
statusPath := "/eth/v1/builder/status"
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
@@ -84,7 +85,7 @@ func TestClient_Status(t *testing.T) {
}
func TestClient_RegisterValidator(t *testing.T) {
ctx := context.Background()
ctx := t.Context()
expectedBody := `[{"message":{"fee_recipient":"0x0000000000000000000000000000000000000000","gas_limit":"23","timestamp":"42","pubkey":"0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"},"signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"}]`
expectedPath := "/eth/v1/builder/validators"
t.Run("JSON success", func(t *testing.T) {
@@ -168,7 +169,7 @@ func TestClient_RegisterValidator(t *testing.T) {
}
func TestClient_GetHeader(t *testing.T) {
ctx := context.Background()
ctx := t.Context()
expectedPath := "/eth/v1/builder/header/23/0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2/0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"
var slot primitives.Slot = 23
parentHash := ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
@@ -601,7 +602,7 @@ func TestClient_GetHeader(t *testing.T) {
}
func TestSubmitBlindedBlock(t *testing.T) {
ctx := context.Background()
ctx := t.Context()
t.Run("bellatrix", func(t *testing.T) {
hc := &http.Client{
@@ -1554,12 +1555,95 @@ func testSignedBlindedBeaconBlockElectra(t *testing.T) *eth.SignedBlindedBeaconB
}
}
func TestSubmitBlindedBlockPostFulu(t *testing.T) {
ctx := t.Context()
t.Run("success", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
require.Equal(t, "bellatrix", r.Header.Get("Eth-Consensus-Version"))
require.Equal(t, api.JsonMediaType, r.Header.Get("Content-Type"))
require.Equal(t, api.JsonMediaType, r.Header.Get("Accept"))
// Post-Fulu: only return status code, no payload
return &http.Response{
StatusCode: http.StatusAccepted,
Body: io.NopCloser(bytes.NewBufferString("")),
Request: r.Clone(ctx),
}, nil
}),
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
sbbb, err := blocks.NewSignedBeaconBlock(testSignedBlindedBeaconBlockBellatrix(t))
require.NoError(t, err)
err = c.SubmitBlindedBlockPostFulu(ctx, sbbb)
require.NoError(t, err)
})
t.Run("success_ssz", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
require.Equal(t, "bellatrix", r.Header.Get(api.VersionHeader))
require.Equal(t, api.OctetStreamMediaType, r.Header.Get("Content-Type"))
require.Equal(t, api.OctetStreamMediaType, r.Header.Get("Accept"))
// Post-Fulu: only return status code, no payload
return &http.Response{
StatusCode: http.StatusAccepted,
Body: io.NopCloser(bytes.NewBufferString("")),
Request: r.Clone(ctx),
}, nil
}),
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
sszEnabled: true,
}
sbbb, err := blocks.NewSignedBeaconBlock(testSignedBlindedBeaconBlockBellatrix(t))
require.NoError(t, err)
err = c.SubmitBlindedBlockPostFulu(ctx, sbbb)
require.NoError(t, err)
})
t.Run("error_response", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
require.Equal(t, "bellatrix", r.Header.Get("Eth-Consensus-Version"))
message := ErrorMessage{
Code: 400,
Message: "Bad Request",
}
resp, err := json.Marshal(message)
require.NoError(t, err)
return &http.Response{
StatusCode: http.StatusBadRequest,
Body: io.NopCloser(bytes.NewBuffer(resp)),
Request: r.Clone(ctx),
}, nil
}),
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
sbbb, err := blocks.NewSignedBeaconBlock(testSignedBlindedBeaconBlockBellatrix(t))
require.NoError(t, err)
err = c.SubmitBlindedBlockPostFulu(ctx, sbbb)
require.ErrorIs(t, err, ErrNotOK)
})
}
func TestRequestLogger(t *testing.T) {
wo := WithObserver(&requestLogger{})
c, err := NewClient("localhost:3500", wo)
require.NoError(t, err)
ctx := context.Background()
ctx := t.Context()
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, getStatus, r.URL.Path)
@@ -1574,3 +1658,166 @@ func TestRequestLogger(t *testing.T) {
err = c.Status(ctx)
require.NoError(t, err)
}
func TestGetVersionsBlockToPayload(t *testing.T) {
tests := []struct {
name string
blockVersion int
expectedVersion int
expectedError bool
}{
{
name: "Fulu version",
blockVersion: 6, // version.Fulu
expectedVersion: 6,
expectedError: false,
},
{
name: "Deneb version",
blockVersion: 4, // version.Deneb
expectedVersion: 4,
expectedError: false,
},
{
name: "Capella version",
blockVersion: 3, // version.Capella
expectedVersion: 3,
expectedError: false,
},
{
name: "Bellatrix version",
blockVersion: 2, // version.Bellatrix
expectedVersion: 2,
expectedError: false,
},
{
name: "Unsupported version",
blockVersion: 0,
expectedError: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
version, err := getVersionsBlockToPayload(tt.blockVersion)
if tt.expectedError {
assert.NotNil(t, err)
} else {
assert.NoError(t, err)
assert.Equal(t, tt.expectedVersion, version)
}
})
}
}
func TestParseBlindedBlockResponseSSZ_WithBlobsBundleV2(t *testing.T) {
c := &Client{sszEnabled: true}
// Create test payload
payload := &v1.ExecutionPayloadDeneb{
ParentHash: make([]byte, 32),
FeeRecipient: make([]byte, 20),
StateRoot: make([]byte, 32),
ReceiptsRoot: make([]byte, 32),
LogsBloom: make([]byte, 256),
PrevRandao: make([]byte, 32),
BlockNumber: 123456,
GasLimit: 30000000,
GasUsed: 21000,
Timestamp: 1234567890,
ExtraData: []byte("test-extra-data"),
BaseFeePerGas: make([]byte, 32),
BlockHash: make([]byte, 32),
Transactions: [][]byte{},
Withdrawals: []*v1.Withdrawal{},
BlobGasUsed: 1024,
ExcessBlobGas: 2048,
}
// Create test BlobsBundleV2
bundleV2 := &v1.BlobsBundleV2{
KzgCommitments: [][]byte{make([]byte, 48), make([]byte, 48)},
Proofs: [][]byte{make([]byte, 48), make([]byte, 48)},
Blobs: [][]byte{make([]byte, 131072), make([]byte, 131072)},
}
// Test Fulu version (should use ExecutionPayloadDenebAndBlobsBundleV2)
t.Run("Fulu version with BlobsBundleV2", func(t *testing.T) {
payloadAndBlobsV2 := &v1.ExecutionPayloadDenebAndBlobsBundleV2{
Payload: payload,
BlobsBundle: bundleV2,
}
respBytes, err := payloadAndBlobsV2.MarshalSSZ()
require.NoError(t, err)
ed, bundle, err := c.parseBlindedBlockResponseSSZ(respBytes, 6) // version.Fulu
require.NoError(t, err)
require.NotNil(t, ed)
require.NotNil(t, bundle)
// Verify the bundle is BlobsBundleV2
bundleV2Result, ok := bundle.(*v1.BlobsBundleV2)
assert.Equal(t, true, ok, "Expected BlobsBundleV2 type")
require.Equal(t, len(bundleV2.KzgCommitments), len(bundleV2Result.KzgCommitments))
require.Equal(t, len(bundleV2.Proofs), len(bundleV2Result.Proofs))
require.Equal(t, len(bundleV2.Blobs), len(bundleV2Result.Blobs))
})
// Test Deneb version (should use regular BlobsBundle)
t.Run("Deneb version with regular BlobsBundle", func(t *testing.T) {
regularBundle := &v1.BlobsBundle{
KzgCommitments: bundleV2.KzgCommitments,
Proofs: bundleV2.Proofs,
Blobs: bundleV2.Blobs,
}
payloadAndBlobs := &v1.ExecutionPayloadDenebAndBlobsBundle{
Payload: payload,
BlobsBundle: regularBundle,
}
respBytes, err := payloadAndBlobs.MarshalSSZ()
require.NoError(t, err)
ed, bundle, err := c.parseBlindedBlockResponseSSZ(respBytes, 4) // version.Deneb
require.NoError(t, err)
require.NotNil(t, ed)
require.NotNil(t, bundle)
// Verify the bundle is regular BlobsBundle
regularBundleResult, ok := bundle.(*v1.BlobsBundle)
assert.Equal(t, true, ok, "Expected BlobsBundle type")
require.Equal(t, len(regularBundle.KzgCommitments), len(regularBundleResult.KzgCommitments))
})
// Test invalid SSZ data
t.Run("Invalid SSZ data", func(t *testing.T) {
invalidBytes := []byte("invalid-ssz-data")
ed, bundle, err := c.parseBlindedBlockResponseSSZ(invalidBytes, 6)
assert.NotNil(t, err)
assert.Equal(t, true, ed == nil)
assert.Equal(t, true, bundle == nil)
})
}
func TestSubmitBlindedBlock_BlobsBundlerInterface(t *testing.T) {
// Note: The full integration test is complex due to version detection logic
// The key functionality is tested in the parseBlindedBlockResponseSSZ tests above
// and in the mock service tests which verify the interface changes work correctly
t.Run("Interface signature verification", func(t *testing.T) {
// This test verifies that the SubmitBlindedBlock method signature
// has been updated to return BlobsBundler interface
client := &Client{}
// Verify the method exists with the correct signature
// by using reflection or by checking it compiles with the interface
var _ func(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, v1.BlobsBundler, error) = client.SubmitBlindedBlock
// This test passes if the signature is correct
assert.Equal(t, true, true)
})
}

View File

@@ -41,10 +41,15 @@ func (m MockClient) RegisterValidator(_ context.Context, svr []*ethpb.SignedVali
}
// SubmitBlindedBlock --
func (MockClient) SubmitBlindedBlock(_ context.Context, _ interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
func (MockClient) SubmitBlindedBlock(_ context.Context, _ interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, v1.BlobsBundler, error) {
return nil, nil, nil
}
// SubmitBlindedBlockPostFulu --
func (MockClient) SubmitBlindedBlockPostFulu(_ context.Context, _ interfaces.ReadOnlySignedBeaconBlock) error {
return nil
}
// Status --
func (MockClient) Status(_ context.Context) error {
return nil

View File

@@ -1699,7 +1699,7 @@ func TestExecutionPayloadHeaderCapellaRoundtrip(t *testing.T) {
require.DeepEqual(t, string(expected[0:len(expected)-1]), string(m))
}
func TestErrorMessage_non200Err(t *testing.T) {
func TestErrorMessage_unexpectedStatusErr(t *testing.T) {
mockRequest := &http.Request{
URL: &url.URL{Path: "example.com"},
}
@@ -1779,7 +1779,7 @@ func TestErrorMessage_non200Err(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := non200Err(tt.args)
err := unexpectedStatusErr(tt.args, http.StatusOK)
if err != nil && tt.wantMessage != "" {
require.ErrorContains(t, tt.wantMessage, err)
}

View File

@@ -1,7 +1,6 @@
package event
import (
"context"
"fmt"
"net/http"
"net/http/httptest"
@@ -30,7 +29,7 @@ func TestNewEventStream(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
_, err := NewEventStream(context.Background(), &http.Client{}, tt.host, tt.topics)
_, err := NewEventStream(t.Context(), &http.Client{}, tt.host, tt.topics)
if (err != nil) != tt.wantErr {
t.Errorf("NewEventStream() error = %v, wantErr %v", err, tt.wantErr)
}
@@ -56,7 +55,7 @@ func TestEventStream(t *testing.T) {
topics := []string{"head"}
eventsChannel := make(chan *Event, 1)
stream, err := NewEventStream(context.Background(), http.DefaultClient, server.URL, topics)
stream, err := NewEventStream(t.Context(), http.DefaultClient, server.URL, topics)
require.NoError(t, err)
go stream.Subscribe(eventsChannel)

View File

@@ -1,7 +1,6 @@
package grpc
import (
"context"
"testing"
"github.com/OffchainLabs/prysm/v6/testing/assert"
@@ -16,7 +15,7 @@ type customErrorData struct {
func TestAppendHeaders(t *testing.T) {
t.Run("one_header", func(t *testing.T) {
ctx := AppendHeaders(context.Background(), []string{"first=value1"})
ctx := AppendHeaders(t.Context(), []string{"first=value1"})
md, ok := metadata.FromOutgoingContext(ctx)
require.Equal(t, true, ok, "Failed to read context metadata")
require.Equal(t, 1, md.Len(), "MetadataV0 contains wrong number of values")
@@ -24,7 +23,7 @@ func TestAppendHeaders(t *testing.T) {
})
t.Run("multiple_headers", func(t *testing.T) {
ctx := AppendHeaders(context.Background(), []string{"first=value1", "second=value2"})
ctx := AppendHeaders(t.Context(), []string{"first=value1", "second=value2"})
md, ok := metadata.FromOutgoingContext(ctx)
require.Equal(t, true, ok, "Failed to read context metadata")
require.Equal(t, 2, md.Len(), "MetadataV0 contains wrong number of values")
@@ -33,7 +32,7 @@ func TestAppendHeaders(t *testing.T) {
})
t.Run("one_empty_header", func(t *testing.T) {
ctx := AppendHeaders(context.Background(), []string{"first=value1", ""})
ctx := AppendHeaders(t.Context(), []string{"first=value1", ""})
md, ok := metadata.FromOutgoingContext(ctx)
require.Equal(t, true, ok, "Failed to read context metadata")
require.Equal(t, 1, md.Len(), "MetadataV0 contains wrong number of values")
@@ -42,7 +41,7 @@ func TestAppendHeaders(t *testing.T) {
t.Run("incorrect_header", func(t *testing.T) {
logHook := logTest.NewGlobal()
ctx := AppendHeaders(context.Background(), []string{"first=value1", "second"})
ctx := AppendHeaders(t.Context(), []string{"first=value1", "second"})
md, ok := metadata.FromOutgoingContext(ctx)
require.Equal(t, true, ok, "Failed to read context metadata")
require.Equal(t, 1, md.Len(), "MetadataV0 contains wrong number of values")
@@ -51,7 +50,7 @@ func TestAppendHeaders(t *testing.T) {
})
t.Run("header_value_with_equal_sign", func(t *testing.T) {
ctx := AppendHeaders(context.Background(), []string{"first=value=1"})
ctx := AppendHeaders(t.Context(), []string{"first=value=1"})
md, ok := metadata.FromOutgoingContext(ctx)
require.Equal(t, true, ok, "Failed to read context metadata")
require.Equal(t, 1, md.Len(), "MetadataV0 contains wrong number of values")

View File

@@ -1,7 +1,6 @@
package httprest
import (
"context"
"flag"
"fmt"
"net"
@@ -34,7 +33,7 @@ func TestServer_StartStop(t *testing.T) {
WithRouter(handler),
}
g, err := New(context.Background(), opts...)
g, err := New(t.Context(), opts...)
require.NoError(t, err)
g.Start()
@@ -62,7 +61,7 @@ func TestServer_NilHandler_NotFoundHandlerRegistered(t *testing.T) {
WithRouter(handler),
}
g, err := New(context.Background(), opts...)
g, err := New(t.Context(), opts...)
require.NoError(t, err)
writer := httptest.NewRecorder()

View File

@@ -8,7 +8,12 @@ go_library(
],
importpath = "github.com/OffchainLabs/prysm/v6/api/server/middleware",
visibility = ["//visibility:public"],
deps = ["@com_github_rs_cors//:go_default_library"],
deps = [
"//api:go_default_library",
"//api/apiutil:go_default_library",
"@com_github_rs_cors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)
go_test(
@@ -22,5 +27,6 @@ go_test(
"//api:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)

View File

@@ -1,11 +1,15 @@
package middleware
import (
"compress/gzip"
"fmt"
"net/http"
"strings"
"github.com/OffchainLabs/prysm/v6/api"
"github.com/OffchainLabs/prysm/v6/api/apiutil"
"github.com/rs/cors"
log "github.com/sirupsen/logrus"
)
type Middleware func(http.Handler) http.Handler
@@ -71,47 +75,64 @@ func ContentTypeHandler(acceptedMediaTypes []string) Middleware {
func AcceptHeaderHandler(serverAcceptedTypes []string) Middleware {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
acceptHeader := r.Header.Get("Accept")
// header is optional and should skip if not provided
if acceptHeader == "" {
if _, ok := apiutil.Negotiate(r.Header.Get("Accept"), serverAcceptedTypes); !ok {
http.Error(w, "Not Acceptable", http.StatusNotAcceptable)
return
}
next.ServeHTTP(w, r)
})
}
}
// AcceptEncodingHeaderHandler compresses the response before sending it back to the client, if gzip is supported.
func AcceptEncodingHeaderHandler() Middleware {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if !strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
next.ServeHTTP(w, r)
return
}
accepted := false
acceptTypes := strings.Split(acceptHeader, ",")
// follows rules defined in https://datatracker.ietf.org/doc/html/rfc2616#section-14.1
for _, acceptType := range acceptTypes {
acceptType = strings.TrimSpace(acceptType)
if acceptType == "*/*" {
accepted = true
break
gz := gzip.NewWriter(w)
gzipRW := &gzipResponseWriter{gz: gz, ResponseWriter: w}
defer func() {
if !gzipRW.zip {
return
}
for _, serverAcceptedType := range serverAcceptedTypes {
if strings.HasPrefix(acceptType, serverAcceptedType) {
accepted = true
break
}
if acceptType != "/*" && strings.HasSuffix(acceptType, "/*") && strings.HasPrefix(serverAcceptedType, acceptType[:len(acceptType)-2]) {
accepted = true
break
}
if err := gz.Close(); err != nil {
log.WithError(err).Error("Failed to close gzip writer")
}
if accepted {
break
}
}
}()
if !accepted {
http.Error(w, fmt.Sprintf("Not Acceptable: %s", acceptHeader), http.StatusNotAcceptable)
return
}
next.ServeHTTP(w, r)
next.ServeHTTP(gzipRW, r)
})
}
}
type gzipResponseWriter struct {
gz *gzip.Writer
http.ResponseWriter
zip bool
}
func (g *gzipResponseWriter) WriteHeader(statusCode int) {
if strings.Contains(g.Header().Get("Content-Type"), api.JsonMediaType) {
// Removing the current Content-Length because zipping will change it.
g.Header().Del("Content-Length")
g.Header().Set("Content-Encoding", "gzip")
g.zip = true
}
g.ResponseWriter.WriteHeader(statusCode)
}
func (g *gzipResponseWriter) Write(b []byte) (int, error) {
if g.zip {
return g.gz.Write(b)
}
return g.ResponseWriter.Write(b)
}
func MiddlewareChain(h http.Handler, mw []Middleware) http.Handler {
if len(mw) < 1 {
return h

View File

@@ -1,14 +1,35 @@
package middleware
import (
"bytes"
"compress/gzip"
"io"
"net/http"
"net/http/httptest"
"testing"
"github.com/OffchainLabs/prysm/v6/api"
"github.com/OffchainLabs/prysm/v6/testing/require"
log "github.com/sirupsen/logrus"
)
// frozenHeaderRecorder allows asserting that response headers were not modified
// after the call to WriteHeader.
//
// Its purpose is to have a regression test for https://github.com/OffchainLabs/prysm/pull/15499.
type frozenHeaderRecorder struct {
*httptest.ResponseRecorder
frozenHeader http.Header
}
func (r *frozenHeaderRecorder) WriteHeader(code int) {
if r.frozenHeader != nil {
return
}
r.ResponseRecorder.WriteHeader(code)
r.frozenHeader = r.ResponseRecorder.Header().Clone()
}
func TestNormalizeQueryValuesHandler(t *testing.T) {
nextHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
_, err := w.Write([]byte("next handler"))
@@ -124,6 +145,90 @@ func TestContentTypeHandler(t *testing.T) {
}
}
func TestAcceptEncodingHeaderHandler(t *testing.T) {
dummyContent := "Test gzip middleware content"
nextHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", r.Header.Get("Accept"))
w.WriteHeader(http.StatusOK)
_, err := w.Write([]byte(dummyContent))
require.NoError(t, err)
})
handler := AcceptEncodingHeaderHandler()(nextHandler)
tests := []struct {
name string
accept string
acceptEncoding string
expectCompressed bool
}{
{
name: "Accept gzip",
accept: api.JsonMediaType,
acceptEncoding: "gzip",
expectCompressed: true,
},
{
name: "Accept multiple encodings",
accept: api.JsonMediaType,
acceptEncoding: "deflate, gzip",
expectCompressed: true,
},
{
name: "Accept unsupported encoding",
accept: api.JsonMediaType,
acceptEncoding: "deflate",
expectCompressed: false,
},
{
name: "No accept encoding header",
accept: api.JsonMediaType,
acceptEncoding: "",
expectCompressed: false,
},
{
name: "SSZ",
accept: api.OctetStreamMediaType,
acceptEncoding: "gzip",
expectCompressed: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
req := httptest.NewRequest("GET", "/", nil)
req.Header.Set("Accept", tt.accept)
if tt.acceptEncoding != "" {
req.Header.Set("Accept-Encoding", tt.acceptEncoding)
}
rr := &frozenHeaderRecorder{ResponseRecorder: httptest.NewRecorder()}
handler.ServeHTTP(rr, req)
if tt.expectCompressed {
require.Equal(t, "gzip", rr.frozenHeader.Get("Content-Encoding"), "Expected Content-Encoding header to be 'gzip'")
compressedBody := rr.Body.Bytes()
require.NotEqual(t, dummyContent, string(compressedBody), "Response body should be compressed and differ from the original")
gzReader, err := gzip.NewReader(bytes.NewReader(compressedBody))
require.NoError(t, err, "Failed to create gzipReader")
defer func() {
if err := gzReader.Close(); err != nil {
log.WithError(err).Error("Failed to close gzip reader")
}
}()
decompressedBody, err := io.ReadAll(gzReader)
require.NoError(t, err, "Failed to decompress response body")
require.Equal(t, dummyContent, string(decompressedBody), "Decompressed content should match the original")
} else {
require.Equal(t, dummyContent, rr.Body.String(), "Response body should be uncompressed and match the original")
}
})
}
}
func TestAcceptHeaderHandler(t *testing.T) {
acceptedTypes := []string{"application/json", "application/octet-stream"}

View File

@@ -36,6 +36,7 @@ go_library(
"//consensus-types/primitives:go_default_library",
"//consensus-types/validator:go_default_library",
"//container/slice:go_default_library",
"//crypto/bls:go_default_library",
"//encoding/bytesutil:go_default_library",
"//math:go_default_library",
"//proto/engine/v1:go_default_library",

View File

@@ -10,6 +10,7 @@ import (
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/consensus-types/validator"
"github.com/OffchainLabs/prysm/v6/container/slice"
"github.com/OffchainLabs/prysm/v6/crypto/bls"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/math"
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
@@ -699,6 +700,11 @@ func (m *SyncCommitteeMessage) ToConsensus() (*eth.SyncCommitteeMessage, error)
if err != nil {
return nil, server.NewDecodeError(err, "Signature")
}
// Add validation to check if the signature is valid BLS format
_, err = bls.SignatureFromBytes(sig)
if err != nil {
return nil, server.NewDecodeError(err, "Signature")
}
return &eth.SyncCommitteeMessage{
Slot: primitives.Slot(slot),

View File

@@ -74,7 +74,7 @@ func BeaconStateFromConsensus(st beaconState.BeaconState) (*BeaconState, error)
}
return &BeaconState{
GenesisTime: fmt.Sprintf("%d", st.GenesisTime()),
GenesisTime: fmt.Sprintf("%d", st.GenesisTime().Unix()),
GenesisValidatorsRoot: hexutil.Encode(st.GenesisValidatorsRoot()),
Slot: fmt.Sprintf("%d", st.Slot()),
Fork: ForkFromConsensus(st.Fork()),
@@ -177,7 +177,7 @@ func BeaconStateAltairFromConsensus(st beaconState.BeaconState) (*BeaconStateAlt
}
return &BeaconStateAltair{
GenesisTime: fmt.Sprintf("%d", st.GenesisTime()),
GenesisTime: fmt.Sprintf("%d", st.GenesisTime().Unix()),
GenesisValidatorsRoot: hexutil.Encode(st.GenesisValidatorsRoot()),
Slot: fmt.Sprintf("%d", st.Slot()),
Fork: ForkFromConsensus(st.Fork()),
@@ -295,7 +295,7 @@ func BeaconStateBellatrixFromConsensus(st beaconState.BeaconState) (*BeaconState
}
return &BeaconStateBellatrix{
GenesisTime: fmt.Sprintf("%d", st.GenesisTime()),
GenesisTime: fmt.Sprintf("%d", st.GenesisTime().Unix()),
GenesisValidatorsRoot: hexutil.Encode(st.GenesisValidatorsRoot()),
Slot: fmt.Sprintf("%d", st.Slot()),
Fork: ForkFromConsensus(st.Fork()),
@@ -430,7 +430,7 @@ func BeaconStateCapellaFromConsensus(st beaconState.BeaconState) (*BeaconStateCa
}
return &BeaconStateCapella{
GenesisTime: fmt.Sprintf("%d", st.GenesisTime()),
GenesisTime: fmt.Sprintf("%d", st.GenesisTime().Unix()),
GenesisValidatorsRoot: hexutil.Encode(st.GenesisValidatorsRoot()),
Slot: fmt.Sprintf("%d", st.Slot()),
Fork: ForkFromConsensus(st.Fork()),
@@ -568,7 +568,7 @@ func BeaconStateDenebFromConsensus(st beaconState.BeaconState) (*BeaconStateDene
}
return &BeaconStateDeneb{
GenesisTime: fmt.Sprintf("%d", st.GenesisTime()),
GenesisTime: fmt.Sprintf("%d", st.GenesisTime().Unix()),
GenesisValidatorsRoot: hexutil.Encode(st.GenesisValidatorsRoot()),
Slot: fmt.Sprintf("%d", st.Slot()),
Fork: ForkFromConsensus(st.Fork()),
@@ -742,7 +742,7 @@ func BeaconStateElectraFromConsensus(st beaconState.BeaconState) (*BeaconStateEl
}
return &BeaconStateElectra{
GenesisTime: fmt.Sprintf("%d", st.GenesisTime()),
GenesisTime: fmt.Sprintf("%d", st.GenesisTime().Unix()),
GenesisValidatorsRoot: hexutil.Encode(st.GenesisValidatorsRoot()),
Slot: fmt.Sprintf("%d", st.Slot()),
Fork: ForkFromConsensus(st.Fork()),
@@ -923,9 +923,16 @@ func BeaconStateFuluFromConsensus(st beaconState.BeaconState) (*BeaconStateFulu,
if err != nil {
return nil, err
}
srcLookahead, err := st.ProposerLookahead()
if err != nil {
return nil, err
}
lookahead := make([]string, len(srcLookahead))
for i, v := range srcLookahead {
lookahead[i] = fmt.Sprintf("%d", uint64(v))
}
return &BeaconStateFulu{
GenesisTime: fmt.Sprintf("%d", st.GenesisTime()),
GenesisTime: fmt.Sprintf("%d", st.GenesisTime().Unix()),
GenesisValidatorsRoot: hexutil.Encode(st.GenesisValidatorsRoot()),
Slot: fmt.Sprintf("%d", st.Slot()),
Fork: ForkFromConsensus(st.Fork()),
@@ -962,5 +969,6 @@ func BeaconStateFuluFromConsensus(st beaconState.BeaconState) (*BeaconStateFulu,
PendingDeposits: PendingDepositsFromConsensus(pbd),
PendingPartialWithdrawals: PendingPartialWithdrawalsFromConsensus(ppw),
PendingConsolidations: PendingConsolidationsFromConsensus(pc),
ProposerLookahead: lookahead,
}, nil
}

View File

@@ -283,3 +283,10 @@ type GetPendingPartialWithdrawalsResponse struct {
Finalized bool `json:"finalized"`
Data []*PendingPartialWithdrawal `json:"data"`
}
type GetProposerLookaheadResponse struct {
Version string `json:"version"`
ExecutionOptimistic bool `json:"execution_optimistic"`
Finalized bool `json:"finalized"`
Data []string `json:"data"` // validator indexes
}

View File

@@ -25,6 +25,13 @@ type BlockGossipEvent struct {
Block string `json:"block"`
}
type DataColumnGossipEvent struct {
Slot string `json:"slot"`
Index string `json:"index"`
BlockRoot string `json:"block_root"`
KzgCommitments []string `json:"kzg_commitments"`
}
type AggregatedAttEventSource struct {
Aggregate *Attestation `json:"aggregate"`
}

View File

@@ -27,6 +27,8 @@ type Identity struct {
type Metadata struct {
SeqNumber string `json:"seq_number"`
Attnets string `json:"attnets"`
Syncnets string `json:"syncnets,omitempty"`
Cgc string `json:"custody_group_count,omitempty"`
}
type GetPeerResponse struct {

View File

@@ -219,4 +219,5 @@ type BeaconStateFulu struct {
PendingDeposits []*PendingDeposit `json:"pending_deposits"`
PendingPartialWithdrawals []*PendingPartialWithdrawal `json:"pending_partial_withdrawals"`
PendingConsolidations []*PendingConsolidation `json:"pending_consolidations"`
ProposerLookahead []string `json:"proposer_lookahead"`
}

View File

@@ -15,7 +15,7 @@ import (
func TestDebounce_NoEvents(t *testing.T) {
eventsChan := make(chan interface{}, 100)
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
interval := time.Second
timesHandled := int32(0)
wg := &sync.WaitGroup{}
@@ -39,7 +39,7 @@ func TestDebounce_NoEvents(t *testing.T) {
func TestDebounce_CtxClosing(t *testing.T) {
eventsChan := make(chan interface{}, 100)
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
interval := time.Second
timesHandled := int32(0)
wg := &sync.WaitGroup{}
@@ -75,7 +75,7 @@ func TestDebounce_CtxClosing(t *testing.T) {
func TestDebounce_SingleHandlerInvocation(t *testing.T) {
eventsChan := make(chan interface{}, 100)
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
interval := time.Second
timesHandled := int32(0)
go async.Debounce(ctx, interval, eventsChan, func(event interface{}) {
@@ -93,7 +93,7 @@ func TestDebounce_SingleHandlerInvocation(t *testing.T) {
func TestDebounce_MultipleHandlerInvocation(t *testing.T) {
eventsChan := make(chan interface{}, 100)
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
interval := time.Second
timesHandled := int32(0)
go async.Debounce(ctx, interval, eventsChan, func(event interface{}) {

View File

@@ -19,10 +19,10 @@ func RunEvery(ctx context.Context, period time.Duration, f func()) {
for {
select {
case <-ticker.C:
log.WithField("function", funcName).Trace("running")
log.WithField("function", funcName).Trace("Running")
f()
case <-ctx.Done():
log.WithField("function", funcName).Debug("context is closed, exiting")
log.WithField("function", funcName).Debug("Context is closed, exiting")
ticker.Stop()
return
}

View File

@@ -10,7 +10,7 @@ import (
)
func TestEveryRuns(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
i := int32(0)
async.RunEvery(ctx, 100*time.Millisecond, func() {

View File

@@ -73,6 +73,7 @@ go_library(
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//beacon-chain/verification:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
@@ -181,6 +182,7 @@ go_test(
"//container/trie:go_default_library",
"//crypto/bls:go_default_library",
"//encoding/bytesutil:go_default_library",
"//genesis:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/eth/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
@@ -194,6 +196,7 @@ go_test(
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
"@com_github_holiman_uint256//:go_default_library",
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",

View File

@@ -41,7 +41,7 @@ type ForkchoiceFetcher interface {
Ancestor(context.Context, []byte, primitives.Slot) ([]byte, error)
CachedHeadRoot() [32]byte
GetProposerHead() [32]byte
SetForkChoiceGenesisTime(uint64)
SetForkChoiceGenesisTime(time.Time)
UpdateHead(context.Context, primitives.Slot)
HighestReceivedBlockSlot() primitives.Slot
ReceivedBlocksLastEpoch() (uint64, error)
@@ -514,7 +514,7 @@ func (s *Service) Ancestor(ctx context.Context, root []byte, slot primitives.Slo
// SetGenesisTime sets the genesis time of beacon chain.
func (s *Service) SetGenesisTime(t time.Time) {
s.genesisTime = t
s.genesisTime = t.Truncate(time.Second) // Genesis time has a precision of 1 second.
}
func (s *Service) recoverStateSummary(ctx context.Context, blockRoot [32]byte) (*ethpb.StateSummary, error) {

View File

@@ -2,6 +2,7 @@ package blockchain
import (
"context"
"time"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
consensus_blocks "github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
@@ -27,7 +28,7 @@ func (s *Service) GetProposerHead() [32]byte {
}
// SetForkChoiceGenesisTime sets the genesis time in Forkchoice
func (s *Service) SetForkChoiceGenesisTime(timestamp uint64) {
func (s *Service) SetForkChoiceGenesisTime(timestamp time.Time) {
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
s.cfg.ForkChoiceStore.SetGenesisTime(timestamp)

View File

@@ -1,12 +1,8 @@
package blockchain
import (
"context"
"testing"
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
doublylinkedtree "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stategen"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
@@ -15,82 +11,71 @@ import (
)
func TestHeadSlot_DataRace(t *testing.T) {
beaconDB := testDB.SetupDB(t)
s := &Service{
cfg: &config{BeaconDB: beaconDB},
}
s := testServiceWithDB(t)
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
st, _ := util.DeterministicGenesisState(t, 1)
wait := make(chan struct{})
go func() {
defer close(wait)
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
require.NoError(t, s.saveHead(t.Context(), [32]byte{}, b, st))
}()
s.HeadSlot()
<-wait
}
func TestHeadRoot_DataRace(t *testing.T) {
beaconDB := testDB.SetupDB(t)
s := &Service{
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB, doublylinkedtree.New())},
head: &head{root: [32]byte{'A'}},
}
s := testServiceWithDB(t)
s.head = &head{root: [32]byte{'A'}}
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
wait := make(chan struct{})
st, _ := util.DeterministicGenesisState(t, 1)
go func() {
defer close(wait)
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
require.NoError(t, s.saveHead(t.Context(), [32]byte{}, b, st))
}()
_, err = s.HeadRoot(context.Background())
_, err = s.HeadRoot(t.Context())
require.NoError(t, err)
<-wait
}
func TestHeadBlock_DataRace(t *testing.T) {
beaconDB := testDB.SetupDB(t)
wsb, err := blocks.NewSignedBeaconBlock(&ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{}}})
require.NoError(t, err)
s := &Service{
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB, doublylinkedtree.New())},
head: &head{block: wsb},
}
s := testServiceWithDB(t)
s.head = &head{block: wsb}
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
wait := make(chan struct{})
st, _ := util.DeterministicGenesisState(t, 1)
go func() {
defer close(wait)
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
require.NoError(t, s.saveHead(t.Context(), [32]byte{}, b, st))
}()
_, err = s.HeadBlock(context.Background())
_, err = s.HeadBlock(t.Context())
require.NoError(t, err)
<-wait
}
func TestHeadState_DataRace(t *testing.T) {
beaconDB := testDB.SetupDB(t)
s := &Service{
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB, doublylinkedtree.New())},
}
s := testServiceWithDB(t)
beaconDB := s.cfg.BeaconDB
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
wait := make(chan struct{})
st, _ := util.DeterministicGenesisState(t, 1)
root := bytesutil.ToBytes32(bytesutil.PadTo([]byte{'s'}, 32))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(context.Background(), root))
require.NoError(t, beaconDB.SaveState(context.Background(), st, root))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(t.Context(), root))
require.NoError(t, beaconDB.SaveState(t.Context(), st, root))
go func() {
defer close(wait)
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
require.NoError(t, s.saveHead(t.Context(), [32]byte{}, b, st))
}()
_, err = s.HeadState(context.Background())
_, err = s.HeadState(t.Context())
require.NoError(t, err)
<-wait
}

View File

@@ -6,7 +6,6 @@ import (
"time"
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
doublylinkedtree "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/doubly-linked-tree"
forkchoicetypes "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/types"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
state_native "github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native"
@@ -15,6 +14,7 @@ import (
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/genesis"
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/testing/assert"
@@ -84,7 +84,7 @@ func prepareForkchoiceState(
func TestHeadRoot_Nil(t *testing.T) {
beaconDB := testDB.SetupDB(t)
c := setupBeaconChain(t, beaconDB)
headRoot, err := c.HeadRoot(context.Background())
headRoot, err := c.HeadRoot(t.Context())
require.NoError(t, err)
assert.DeepEqual(t, params.BeaconConfig().ZeroHash[:], headRoot, "Incorrect pre chain start value")
}
@@ -137,8 +137,8 @@ func TestFinalizedBlockHash(t *testing.T) {
}
func TestUnrealizedJustifiedBlockHash(t *testing.T) {
ctx := context.Background()
service := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}}
ctx := t.Context()
service := testServiceWithDB(t)
ojc := &ethpb.Checkpoint{Root: []byte{'j'}}
ofc := &ethpb.Checkpoint{Root: []byte{'f'}}
st, roblock, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
@@ -153,7 +153,7 @@ func TestUnrealizedJustifiedBlockHash(t *testing.T) {
}
func TestHeadSlot_CanRetrieve(t *testing.T) {
c := &Service{}
c := testServiceNoDB(t)
s, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{})
require.NoError(t, err)
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
@@ -200,10 +200,10 @@ func TestHeadBlock_CanRetrieve(t *testing.T) {
require.NoError(t, err)
wsb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
c := &Service{}
c := testServiceNoDB(t)
c.head = &head{block: wsb, state: s}
received, err := c.HeadBlock(context.Background())
received, err := c.HeadBlock(t.Context())
require.NoError(t, err)
pb, err := received.Proto()
require.NoError(t, err)
@@ -213,15 +213,16 @@ func TestHeadBlock_CanRetrieve(t *testing.T) {
func TestHeadState_CanRetrieve(t *testing.T) {
s, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{Slot: 2, GenesisValidatorsRoot: params.BeaconConfig().ZeroHash[:]})
require.NoError(t, err)
c := &Service{}
c := testServiceNoDB(t)
c.head = &head{state: s}
headState, err := c.HeadState(context.Background())
headState, err := c.HeadState(t.Context())
require.NoError(t, err)
assert.DeepEqual(t, headState.ToProtoUnsafe(), s.ToProtoUnsafe(), "Incorrect head state received")
}
func TestGenesisTime_CanRetrieve(t *testing.T) {
c := &Service{genesisTime: time.Unix(999, 0)}
c := testServiceNoDB(t)
c.genesisTime = time.Unix(999, 0)
wanted := time.Unix(999, 0)
assert.Equal(t, wanted, c.GenesisTime(), "Did not get wanted genesis time")
}
@@ -230,7 +231,7 @@ func TestCurrentFork_CanRetrieve(t *testing.T) {
f := &ethpb.Fork{Epoch: 999}
s, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{Fork: f})
require.NoError(t, err)
c := &Service{}
c := testServiceNoDB(t)
c.head = &head{state: s}
if !proto.Equal(c.CurrentFork(), f) {
t.Error("Received incorrect fork version")
@@ -242,7 +243,7 @@ func TestCurrentFork_NilHeadSTate(t *testing.T) {
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
}
c := &Service{}
c := testServiceNoDB(t)
if !proto.Equal(c.CurrentFork(), f) {
t.Error("Received incorrect fork version")
}
@@ -250,7 +251,7 @@ func TestCurrentFork_NilHeadSTate(t *testing.T) {
func TestGenesisValidatorsRoot_CanRetrieve(t *testing.T) {
// Should not panic if head state is nil.
c := &Service{}
c := testServiceNoDB(t)
assert.Equal(t, [32]byte{}, c.GenesisValidatorsRoot(), "Did not get correct genesis validators root")
s, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{GenesisValidatorsRoot: []byte{'a'}})
@@ -269,7 +270,7 @@ func TestHeadETH1Data_CanRetrieve(t *testing.T) {
d := &ethpb.Eth1Data{DepositCount: 999}
s, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{Eth1Data: d})
require.NoError(t, err)
c := &Service{}
c := testServiceNoDB(t)
c.head = &head{state: s}
if !proto.Equal(c.HeadETH1Data(), d) {
t.Error("Received incorrect eth1 data")
@@ -277,7 +278,7 @@ func TestHeadETH1Data_CanRetrieve(t *testing.T) {
}
func TestIsCanonical_Ok(t *testing.T) {
ctx := context.Background()
ctx := t.Context()
beaconDB := testDB.SetupDB(t)
c := setupBeaconChain(t, beaconDB)
@@ -298,22 +299,22 @@ func TestIsCanonical_Ok(t *testing.T) {
func TestService_HeadValidatorsIndices(t *testing.T) {
s, _ := util.DeterministicGenesisState(t, 10)
c := &Service{}
c := testServiceNoDB(t)
c.head = &head{}
indices, err := c.HeadValidatorsIndices(context.Background(), 0)
indices, err := c.HeadValidatorsIndices(t.Context(), 0)
require.NoError(t, err)
require.Equal(t, 0, len(indices))
c.head = &head{state: s}
indices, err = c.HeadValidatorsIndices(context.Background(), 0)
indices, err = c.HeadValidatorsIndices(t.Context(), 0)
require.NoError(t, err)
require.Equal(t, 10, len(indices))
}
func TestService_HeadGenesisValidatorsRoot(t *testing.T) {
s, _ := util.DeterministicGenesisState(t, 1)
c := &Service{}
c := testServiceNoDB(t)
c.head = &head{}
root := c.HeadGenesisValidatorsRoot()
@@ -331,8 +332,8 @@ func TestService_HeadGenesisValidatorsRoot(t *testing.T) {
// ---------- D
func TestService_ChainHeads(t *testing.T) {
ctx := context.Background()
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}}
ctx := t.Context()
c := testServiceWithDB(t)
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
st, roblock, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
@@ -366,7 +367,7 @@ func TestService_ChainHeads(t *testing.T) {
func TestService_HeadPublicKeyToValidatorIndex(t *testing.T) {
s, _ := util.DeterministicGenesisState(t, 10)
c := &Service{}
c := testServiceNoDB(t)
c.head = &head{state: s}
_, e := c.HeadPublicKeyToValidatorIndex([fieldparams.BLSPubkeyLength]byte{})
@@ -381,7 +382,7 @@ func TestService_HeadPublicKeyToValidatorIndex(t *testing.T) {
}
func TestService_HeadPublicKeyToValidatorIndexNil(t *testing.T) {
c := &Service{}
c := testServiceNoDB(t)
c.head = nil
idx, e := c.HeadPublicKeyToValidatorIndex([fieldparams.BLSPubkeyLength]byte{})
@@ -396,10 +397,10 @@ func TestService_HeadPublicKeyToValidatorIndexNil(t *testing.T) {
func TestService_HeadValidatorIndexToPublicKey(t *testing.T) {
s, _ := util.DeterministicGenesisState(t, 10)
c := &Service{}
c := testServiceNoDB(t)
c.head = &head{state: s}
p, err := c.HeadValidatorIndexToPublicKey(context.Background(), 0)
p, err := c.HeadValidatorIndexToPublicKey(t.Context(), 0)
require.NoError(t, err)
v, err := s.ValidatorAtIndex(0)
@@ -409,15 +410,15 @@ func TestService_HeadValidatorIndexToPublicKey(t *testing.T) {
}
func TestService_HeadValidatorIndexToPublicKeyNil(t *testing.T) {
c := &Service{}
c := testServiceNoDB(t)
c.head = nil
p, err := c.HeadValidatorIndexToPublicKey(context.Background(), 0)
p, err := c.HeadValidatorIndexToPublicKey(t.Context(), 0)
require.NoError(t, err)
require.Equal(t, [fieldparams.BLSPubkeyLength]byte{}, p)
c.head = &head{state: nil}
p, err = c.HeadValidatorIndexToPublicKey(context.Background(), 0)
p, err = c.HeadValidatorIndexToPublicKey(t.Context(), 0)
require.NoError(t, err)
require.Equal(t, [fieldparams.BLSPubkeyLength]byte{}, p)
}
@@ -428,10 +429,12 @@ func TestService_IsOptimistic(t *testing.T) {
cfg.BellatrixForkEpoch = 0
params.OverrideBeaconConfig(cfg)
ctx := context.Background()
ctx := t.Context()
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
c := testServiceWithDB(t)
c.SetGenesisTime(time.Now())
c.head = &head{root: [32]byte{'b'}}
st, roblock, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
@@ -450,22 +453,24 @@ func TestService_IsOptimistic(t *testing.T) {
require.Equal(t, true, opt)
// If head is nil, for some reason, an error should be returned rather than panic.
c = &Service{}
c = testServiceNoDB(t)
_, err = c.IsOptimistic(ctx)
require.ErrorIs(t, err, ErrNilHead)
}
func TestService_IsOptimisticBeforeBellatrix(t *testing.T) {
ctx := context.Background()
c := &Service{genesisTime: time.Now()}
ctx := t.Context()
c := testServiceNoDB(t)
c.genesisTime = time.Now()
opt, err := c.IsOptimistic(ctx)
require.NoError(t, err)
require.Equal(t, false, opt)
}
func TestService_IsOptimisticForRoot(t *testing.T) {
ctx := context.Background()
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
ctx := t.Context()
c := testServiceWithDB(t)
c.head = &head{root: [32]byte{'b'}}
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
st, roblock, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
@@ -481,28 +486,29 @@ func TestService_IsOptimisticForRoot(t *testing.T) {
}
func TestService_IsOptimisticForRoot_DB(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
ctx := t.Context()
c := testServiceWithDB(t)
c.head = &head{root: [32]byte{'b'}}
beaconDB := c.cfg.BeaconDB
c.head = &head{root: params.BeaconConfig().ZeroHash}
b := util.NewBeaconBlock()
b.Block.Slot = 10
br, err := b.Block.HashTreeRoot()
require.NoError(t, err)
util.SaveBlock(t, context.Background(), beaconDB, b)
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: br[:], Slot: 10}))
util.SaveBlock(t, t.Context(), beaconDB, b)
require.NoError(t, beaconDB.SaveStateSummary(t.Context(), &ethpb.StateSummary{Root: br[:], Slot: 10}))
optimisticBlock := util.NewBeaconBlock()
optimisticBlock.Block.Slot = 97
optimisticRoot, err := optimisticBlock.Block.HashTreeRoot()
require.NoError(t, err)
util.SaveBlock(t, context.Background(), beaconDB, optimisticBlock)
util.SaveBlock(t, t.Context(), beaconDB, optimisticBlock)
validatedBlock := util.NewBeaconBlock()
validatedBlock.Block.Slot = 9
validatedRoot, err := validatedBlock.Block.HashTreeRoot()
require.NoError(t, err)
util.SaveBlock(t, context.Background(), beaconDB, validatedBlock)
util.SaveBlock(t, t.Context(), beaconDB, validatedBlock)
validatedCheckpoint := &ethpb.Checkpoint{Root: br[:]}
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
@@ -524,48 +530,48 @@ func TestService_IsOptimisticForRoot_DB(t *testing.T) {
// Before the first finalized epoch, finalized root could be zeros.
validatedCheckpoint = &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, br))
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: params.BeaconConfig().ZeroHash[:], Slot: 10}))
require.NoError(t, beaconDB.SaveStateSummary(t.Context(), &ethpb.StateSummary{Root: params.BeaconConfig().ZeroHash[:], Slot: 10}))
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: optimisticRoot[:], Slot: 11}))
require.NoError(t, beaconDB.SaveStateSummary(t.Context(), &ethpb.StateSummary{Root: optimisticRoot[:], Slot: 11}))
optimistic, err = c.IsOptimisticForRoot(ctx, optimisticRoot)
require.NoError(t, err)
require.Equal(t, true, optimistic)
}
func TestService_IsOptimisticForRoot_DB_non_canonical(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
ctx := t.Context()
c := testServiceWithDB(t)
beaconDB := c.cfg.BeaconDB
c.head = &head{root: params.BeaconConfig().ZeroHash}
b := util.NewBeaconBlock()
b.Block.Slot = 10
br, err := b.Block.HashTreeRoot()
require.NoError(t, err)
util.SaveBlock(t, context.Background(), beaconDB, b)
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: br[:], Slot: 10}))
util.SaveBlock(t, t.Context(), beaconDB, b)
require.NoError(t, beaconDB.SaveStateSummary(t.Context(), &ethpb.StateSummary{Root: br[:], Slot: 10}))
optimisticBlock := util.NewBeaconBlock()
optimisticBlock.Block.Slot = 97
optimisticRoot, err := optimisticBlock.Block.HashTreeRoot()
require.NoError(t, err)
util.SaveBlock(t, context.Background(), beaconDB, optimisticBlock)
util.SaveBlock(t, t.Context(), beaconDB, optimisticBlock)
validatedBlock := util.NewBeaconBlock()
validatedBlock.Block.Slot = 9
validatedRoot, err := validatedBlock.Block.HashTreeRoot()
require.NoError(t, err)
util.SaveBlock(t, context.Background(), beaconDB, validatedBlock)
util.SaveBlock(t, t.Context(), beaconDB, validatedBlock)
validatedCheckpoint := &ethpb.Checkpoint{Root: br[:]}
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: optimisticRoot[:], Slot: 11}))
require.NoError(t, beaconDB.SaveStateSummary(t.Context(), &ethpb.StateSummary{Root: optimisticRoot[:], Slot: 11}))
optimistic, err := c.IsOptimisticForRoot(ctx, optimisticRoot)
require.NoError(t, err)
require.Equal(t, true, optimistic)
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: validatedRoot[:], Slot: 9}))
require.NoError(t, beaconDB.SaveStateSummary(t.Context(), &ethpb.StateSummary{Root: validatedRoot[:], Slot: 9}))
validated, err := c.IsOptimisticForRoot(ctx, validatedRoot)
require.NoError(t, err)
require.Equal(t, true, validated)
@@ -573,15 +579,15 @@ func TestService_IsOptimisticForRoot_DB_non_canonical(t *testing.T) {
}
func TestService_IsOptimisticForRoot_StateSummaryRecovered(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
ctx := t.Context()
c := testServiceWithDB(t)
beaconDB := c.cfg.BeaconDB
c.head = &head{root: params.BeaconConfig().ZeroHash}
b := util.NewBeaconBlock()
b.Block.Slot = 10
br, err := b.Block.HashTreeRoot()
require.NoError(t, err)
util.SaveBlock(t, context.Background(), beaconDB, b)
util.SaveBlock(t, t.Context(), beaconDB, b)
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, [32]byte{}))
_, err = c.IsOptimisticForRoot(ctx, br)
assert.NoError(t, err)
@@ -593,9 +599,9 @@ func TestService_IsOptimisticForRoot_StateSummaryRecovered(t *testing.T) {
}
func TestService_IsFinalized(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}}
ctx := t.Context()
c := testServiceWithDB(t)
beaconDB := c.cfg.BeaconDB
r1 := [32]byte{'a'}
require.NoError(t, c.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{
Root: r1,
@@ -616,9 +622,10 @@ func TestService_IsFinalized(t *testing.T) {
func Test_hashForGenesisRoot(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
ctx := t.Context()
c := setupBeaconChain(t, beaconDB)
st, _ := util.DeterministicGenesisStateElectra(t, 10)
genesis.StoreDuringTest(t, genesis.GenesisData{State: st})
require.NoError(t, c.cfg.BeaconDB.SaveGenesisData(ctx, st))
root, err := beaconDB.GenesisBlockRoot(ctx)
require.NoError(t, err)

View File

@@ -7,10 +7,15 @@ type currentlySyncingBlock struct {
roots map[[32]byte]struct{}
}
func (b *currentlySyncingBlock) set(root [32]byte) {
func (b *currentlySyncingBlock) set(root [32]byte) error {
b.Lock()
defer b.Unlock()
_, ok := b.roots[root]
if ok {
return errBlockBeingSynced
}
b.roots[root] = struct{}{}
return nil
}
func (b *currentlySyncingBlock) unset(root [32]byte) {

View File

@@ -2,7 +2,6 @@ package blockchain
import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
"github.com/OffchainLabs/prysm/v6/config/features"
"github.com/OffchainLabs/prysm/v6/time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
@@ -17,9 +16,6 @@ var stateDefragmentationTime = promauto.NewSummary(prometheus.SummaryOpts{
// a higher number of fragmented indexes are reallocated to a new separate slice for
// that field.
func (s *Service) defragmentState(st state.BeaconState) {
if !features.Get().EnableExperimentalState {
return
}
startTime := time.Now()
st.Defragment()
elapsedTime := time.Since(startTime)

View File

@@ -44,6 +44,8 @@ var (
errMaxBlobsExceeded = verification.AsVerificationFailure(errors.New("expected commitments in block exceeds MAX_BLOBS_PER_BLOCK"))
// errMaxDataColumnsExceeded is returned when the number of data columns exceeds the maximum allowed.
errMaxDataColumnsExceeded = verification.AsVerificationFailure(errors.New("expected data columns for node exceeds NUMBER_OF_COLUMNS"))
// errBlockBeingSynced is returned when a block is being synced.
errBlockBeingSynced = errors.New("block is being synced")
)
// An invalid block is the block that fails state transition based on the core protocol rules.

View File

@@ -141,7 +141,7 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
}
if err := s.saveHead(ctx, r, b, st); err != nil {
log.WithError(err).Error("could not save head after pruning invalid blocks")
log.WithError(err).Error("Could not save head after pruning invalid blocks")
}
log.WithFields(logrus.Fields{
@@ -174,6 +174,7 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
"payloadID": fmt.Sprintf("%#x", bytesutil.Trunc(payloadID[:])),
}).Info("Forkchoice updated with payload attributes for proposal")
s.cfg.PayloadIDCache.Set(nextSlot, arg.headRoot, pId)
go s.firePayloadAttributesEvent(s.cfg.StateNotifier.StateFeed(), arg.headBlock, arg.headRoot, nextSlot)
} else if hasAttr && payloadID == nil && !features.Get().PrepareAllPayloads {
log.WithFields(logrus.Fields{
"blockHash": fmt.Sprintf("%#x", headPayload.BlockHash()),
@@ -354,7 +355,7 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
}
// Get timestamp.
t, err := slots.ToTime(uint64(s.genesisTime.Unix()), slot)
t, err := slots.StartTime(s.genesisTime, slot)
if err != nil {
log.WithError(err).Error("Could not get timestamp to get payload attribute")
return emptyAttri

View File

@@ -19,6 +19,7 @@ import (
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/genesis"
v1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/testing/assert"
@@ -309,6 +310,7 @@ func Test_NotifyForkchoiceUpdate_NIlLVH(t *testing.T) {
block: wba,
}
genesis.StoreStateDuringTest(t, st)
require.NoError(t, beaconDB.SaveState(ctx, st, bra))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, bra))
a := &fcuConfig{
@@ -403,6 +405,7 @@ func Test_NotifyForkchoiceUpdateRecursive_DoublyLinkedTree(t *testing.T) {
require.NoError(t, err)
bState, _ := util.DeterministicGenesisState(t, 10)
genesis.StoreStateDuringTest(t, bState)
require.NoError(t, beaconDB.SaveState(ctx, bState, bra))
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 2, brb, bra, [32]byte{'B'}, ojc, ofc)

View File

@@ -76,14 +76,14 @@ func (s *Service) sendFCUWithAttributes(cfg *postBlockProcessConfig, fcuArgs *fc
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
if err := s.computePayloadAttributes(cfg, fcuArgs); err != nil {
log.WithError(err).Error("could not compute payload attributes")
log.WithError(err).Error("Could not compute payload attributes")
return
}
if fcuArgs.attributes.IsEmpty() {
return
}
if _, err := s.notifyForkchoiceUpdate(cfg.ctx, fcuArgs); err != nil {
log.WithError(err).Error("could not update forkchoice with payload attributes for proposal")
log.WithError(err).Error("Could not update forkchoice with payload attributes for proposal")
}
}
@@ -99,11 +99,9 @@ func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, args *fcuCo
}
if err := s.saveHead(ctx, args.headRoot, args.headBlock, args.headState); err != nil {
log.WithError(err).Error("could not save head")
log.WithError(err).Error("Could not save head")
}
go s.firePayloadAttributesEvent(s.cfg.StateNotifier.StateFeed(), args.headBlock, args.headRoot, s.CurrentSlot()+1)
// Only need to prune attestations from pool if the head has changed.
s.pruneAttsFromPool(s.ctx, args.headState, args.headBlock)
return nil
@@ -114,7 +112,7 @@ func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, args *fcuCo
func (s *Service) shouldOverrideFCU(newHeadRoot [32]byte, proposingSlot primitives.Slot) bool {
headWeight, err := s.cfg.ForkChoiceStore.Weight(newHeadRoot)
if err != nil {
log.WithError(err).WithField("root", fmt.Sprintf("%#x", newHeadRoot)).Warn("could not determine node weight")
log.WithError(err).WithField("root", fmt.Sprintf("%#x", newHeadRoot)).Warn("Could not determine node weight")
}
currentSlot := s.CurrentSlot()
if proposingSlot == currentSlot {
@@ -132,17 +130,17 @@ func (s *Service) shouldOverrideFCU(newHeadRoot [32]byte, proposingSlot primitiv
if s.cfg.ForkChoiceStore.ShouldOverrideFCU() {
return true
}
secs, err := slots.SecondsSinceSlotStart(currentSlot,
uint64(s.genesisTime.Unix()), uint64(time.Now().Unix()))
sss, err := slots.SinceSlotStart(currentSlot, s.genesisTime, time.Now())
if err != nil {
log.WithError(err).Error("could not compute seconds since slot start")
log.WithError(err).Error("Could not compute seconds since slot start")
}
if secs >= doublylinkedtree.ProcessAttestationsThreshold {
if sss >= doublylinkedtree.ProcessAttestationsThreshold {
log.WithFields(logrus.Fields{
"root": fmt.Sprintf("%#x", newHeadRoot),
"weight": headWeight,
}).Infof("Attempted late block reorg aborted due to attestations at %d seconds",
doublylinkedtree.ProcessAttestationsThreshold)
"root": fmt.Sprintf("%#x", newHeadRoot),
"weight": headWeight,
"sinceSlotStart": sss,
"threshold": doublylinkedtree.ProcessAttestationsThreshold,
}).Info("Attempted late block reorg aborted due to attestations after threshold")
lateBlockFailedAttemptFirstThreshold.Inc()
}
}

View File

@@ -1,7 +1,6 @@
package blockchain
import (
"context"
"testing"
"time"
@@ -36,29 +35,29 @@ func TestService_isNewHead(t *testing.T) {
func TestService_getHeadStateAndBlock(t *testing.T) {
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
_, _, err := service.getStateAndBlock(context.Background(), [32]byte{})
_, _, err := service.getStateAndBlock(t.Context(), [32]byte{})
require.ErrorContains(t, "block does not exist", err)
blk, err := blocks.NewSignedBeaconBlock(util.HydrateSignedBeaconBlock(&ethpb.SignedBeaconBlock{Signature: []byte{1}}))
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), blk))
require.NoError(t, service.cfg.BeaconDB.SaveBlock(t.Context(), blk))
st, _ := util.DeterministicGenesisState(t, 1)
r, err := blk.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), st, r))
require.NoError(t, service.cfg.BeaconDB.SaveState(t.Context(), st, r))
gotState, err := service.cfg.BeaconDB.State(context.Background(), r)
gotState, err := service.cfg.BeaconDB.State(t.Context(), r)
require.NoError(t, err)
require.DeepEqual(t, st.ToProto(), gotState.ToProto())
gotBlk, err := service.cfg.BeaconDB.Block(context.Background(), r)
gotBlk, err := service.cfg.BeaconDB.Block(t.Context(), r)
require.NoError(t, err)
require.DeepEqual(t, blk, gotBlk)
}
func TestService_forkchoiceUpdateWithExecution_exceptionalCases(t *testing.T) {
ctx := context.Background()
ctx := t.Context()
opts := testServiceOptsWithDB(t)
service, err := NewService(ctx, opts...)
@@ -161,6 +160,7 @@ func TestShouldOverrideFCU(t *testing.T) {
ctx, fcs := tr.ctx, tr.fcs
service.SetGenesisTime(time.Now().Add(-time.Duration(2*params.BeaconConfig().SecondsPerSlot) * time.Second))
fcs.SetGenesisTime(time.Now().Add(-time.Duration(2*params.BeaconConfig().SecondsPerSlot) * time.Second))
headRoot := [32]byte{'b'}
parentRoot := [32]byte{'a'}
ojc := &ethpb.Checkpoint{}
@@ -181,11 +181,12 @@ func TestShouldOverrideFCU(t *testing.T) {
require.NoError(t, err)
require.Equal(t, headRoot, head)
fcs.SetGenesisTime(uint64(time.Now().Unix()) - 29)
wantLog := "aborted due to attestations after threshold"
fcs.SetGenesisTime(time.Now().Add(-29 * time.Second))
require.Equal(t, true, service.shouldOverrideFCU(parentRoot, 3))
require.LogsDoNotContain(t, hook, "10 seconds")
fcs.SetGenesisTime(uint64(time.Now().Unix()) - 24)
require.LogsDoNotContain(t, hook, wantLog)
fcs.SetGenesisTime(time.Now().Add(-24 * time.Second))
service.SetGenesisTime(time.Now().Add(-time.Duration(2*params.BeaconConfig().SecondsPerSlot+10) * time.Second))
require.Equal(t, false, service.shouldOverrideFCU(parentRoot, 3))
require.LogsContain(t, hook, "10 seconds")
require.LogsContain(t, hook, wantLog)
}

View File

@@ -98,7 +98,7 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
oldHeadRoot := bytesutil.ToBytes32(r)
isOptimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(newHeadRoot)
if err != nil {
log.WithError(err).Error("could not check if node is optimistically synced")
log.WithError(err).Error("Could not check if node is optimistically synced")
}
if headBlock.Block().ParentRoot() != oldHeadRoot {
// A chain re-org occurred, so we fire an event notifying the rest of the services.
@@ -111,11 +111,11 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
dep := math.Max(uint64(headSlot-forkSlot), uint64(newHeadSlot-forkSlot))
oldWeight, err := s.cfg.ForkChoiceStore.Weight(oldHeadRoot)
if err != nil {
log.WithField("root", fmt.Sprintf("%#x", oldHeadRoot)).Warn("could not determine node weight")
log.WithField("root", fmt.Sprintf("%#x", oldHeadRoot)).Warn("Could not determine node weight")
}
newWeight, err := s.cfg.ForkChoiceStore.Weight(newHeadRoot)
if err != nil {
log.WithField("root", fmt.Sprintf("%#x", newHeadRoot)).Warn("could not determine node weight")
log.WithField("root", fmt.Sprintf("%#x", newHeadRoot)).Warn("Could not determine node weight")
}
log.WithFields(logrus.Fields{
"newSlot": fmt.Sprintf("%d", newHeadSlot),

View File

@@ -18,9 +18,6 @@ import (
"github.com/pkg/errors"
)
// Initialize the state cache for sync committees.
var syncCommitteeHeadStateCache = cache.NewSyncCommitteeHeadState()
// HeadSyncCommitteeFetcher is the interface that wraps the head sync committee related functions.
// The head sync committee functions return callers sync committee indices and public keys with respect to current head state.
type HeadSyncCommitteeFetcher interface {
@@ -143,7 +140,7 @@ func (s *Service) getSyncCommitteeHeadState(ctx context.Context, slot primitives
defer mLock.Unlock()
// If there's already a head state exists with the request slot, we don't need to process slots.
cachedState, err := syncCommitteeHeadStateCache.Get(slot)
cachedState, err := s.syncCommitteeHeadState.Get(slot)
switch {
case err == nil:
syncHeadStateHit.Inc()
@@ -166,7 +163,7 @@ func (s *Service) getSyncCommitteeHeadState(ctx context.Context, slot primitives
return nil, err
}
syncHeadStateMiss.Inc()
err = syncCommitteeHeadStateCache.Put(slot, headState)
err = s.syncCommitteeHeadState.Put(slot, headState)
return headState, err
default:
// In the event, we encounter another error

View File

@@ -1,12 +1,10 @@
package blockchain
import (
"context"
"testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
dbTest "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/testing/require"
@@ -16,35 +14,35 @@ import (
func TestService_HeadSyncCommitteeIndices(t *testing.T) {
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
c := &Service{cfg: &config{BeaconDB: dbTest.SetupDB(t)}}
c := testServiceWithDB(t)
c.head = &head{state: s}
// Current period
slot := 2*uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) + 1
a, err := c.HeadSyncCommitteeIndices(context.Background(), 0, primitives.Slot(slot))
a, err := c.HeadSyncCommitteeIndices(t.Context(), 0, primitives.Slot(slot))
require.NoError(t, err)
// Current period where slot-2 across EPOCHS_PER_SYNC_COMMITTEE_PERIOD
slot = 3*uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) - 2
b, err := c.HeadSyncCommitteeIndices(context.Background(), 0, primitives.Slot(slot))
b, err := c.HeadSyncCommitteeIndices(t.Context(), 0, primitives.Slot(slot))
require.NoError(t, err)
require.DeepEqual(t, a, b)
// Next period where slot-1 across EPOCHS_PER_SYNC_COMMITTEE_PERIOD
slot = 3*uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) - 1
b, err = c.HeadSyncCommitteeIndices(context.Background(), 0, primitives.Slot(slot))
b, err = c.HeadSyncCommitteeIndices(t.Context(), 0, primitives.Slot(slot))
require.NoError(t, err)
require.DeepNotEqual(t, a, b)
}
func TestService_headCurrentSyncCommitteeIndices(t *testing.T) {
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
c := &Service{cfg: &config{BeaconDB: dbTest.SetupDB(t)}}
c := testServiceWithDB(t)
c.head = &head{state: s}
// Process slot up to `EpochsPerSyncCommitteePeriod` so it can `ProcessSyncCommitteeUpdates`.
slot := uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) + 1
indices, err := c.headCurrentSyncCommitteeIndices(context.Background(), 0, primitives.Slot(slot))
indices, err := c.headCurrentSyncCommitteeIndices(t.Context(), 0, primitives.Slot(slot))
require.NoError(t, err)
// NextSyncCommittee becomes CurrentSyncCommittee so it should be empty by default.
@@ -53,12 +51,12 @@ func TestService_headCurrentSyncCommitteeIndices(t *testing.T) {
func TestService_headNextSyncCommitteeIndices(t *testing.T) {
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
c := &Service{}
c := testServiceWithDB(t)
c.head = &head{state: s}
// Process slot up to `EpochsPerSyncCommitteePeriod` so it can `ProcessSyncCommitteeUpdates`.
slot := uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) + 1
indices, err := c.headNextSyncCommitteeIndices(context.Background(), 0, primitives.Slot(slot))
indices, err := c.headNextSyncCommitteeIndices(t.Context(), 0, primitives.Slot(slot))
require.NoError(t, err)
// NextSyncCommittee should be empty after `ProcessSyncCommitteeUpdates`. Validator should get indices.
@@ -67,12 +65,12 @@ func TestService_headNextSyncCommitteeIndices(t *testing.T) {
func TestService_HeadSyncCommitteePubKeys(t *testing.T) {
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
c := &Service{cfg: &config{BeaconDB: dbTest.SetupDB(t)}}
c := testServiceWithDB(t)
c.head = &head{state: s}
// Process slot up to 2 * `EpochsPerSyncCommitteePeriod` so it can run `ProcessSyncCommitteeUpdates` twice.
slot := uint64(2*params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) + 1
pubkeys, err := c.HeadSyncCommitteePubKeys(context.Background(), primitives.Slot(slot), 0)
pubkeys, err := c.HeadSyncCommitteePubKeys(t.Context(), primitives.Slot(slot), 0)
require.NoError(t, err)
// Any subcommittee should match the subcommittee size.
@@ -82,13 +80,13 @@ func TestService_HeadSyncCommitteePubKeys(t *testing.T) {
func TestService_HeadSyncCommitteeDomain(t *testing.T) {
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
c := &Service{cfg: &config{BeaconDB: dbTest.SetupDB(t)}}
c := testServiceWithDB(t)
c.head = &head{state: s}
wanted, err := signing.Domain(s.Fork(), slots.ToEpoch(s.Slot()), params.BeaconConfig().DomainSyncCommittee, s.GenesisValidatorsRoot())
require.NoError(t, err)
d, err := c.HeadSyncCommitteeDomain(context.Background(), 0)
d, err := c.HeadSyncCommitteeDomain(t.Context(), 0)
require.NoError(t, err)
require.DeepEqual(t, wanted, d)
@@ -96,13 +94,13 @@ func TestService_HeadSyncCommitteeDomain(t *testing.T) {
func TestService_HeadSyncContributionProofDomain(t *testing.T) {
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
c := &Service{}
c := testServiceWithDB(t)
c.head = &head{state: s}
wanted, err := signing.Domain(s.Fork(), slots.ToEpoch(s.Slot()), params.BeaconConfig().DomainContributionAndProof, s.GenesisValidatorsRoot())
require.NoError(t, err)
d, err := c.HeadSyncContributionProofDomain(context.Background(), 0)
d, err := c.HeadSyncContributionProofDomain(t.Context(), 0)
require.NoError(t, err)
require.DeepEqual(t, wanted, d)
@@ -110,30 +108,27 @@ func TestService_HeadSyncContributionProofDomain(t *testing.T) {
func TestService_HeadSyncSelectionProofDomain(t *testing.T) {
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
c := &Service{}
c := testServiceWithDB(t)
c.head = &head{state: s}
wanted, err := signing.Domain(s.Fork(), slots.ToEpoch(s.Slot()), params.BeaconConfig().DomainSyncCommitteeSelectionProof, s.GenesisValidatorsRoot())
require.NoError(t, err)
d, err := c.HeadSyncSelectionProofDomain(context.Background(), 0)
d, err := c.HeadSyncSelectionProofDomain(t.Context(), 0)
require.NoError(t, err)
require.DeepEqual(t, wanted, d)
}
func TestSyncCommitteeHeadStateCache_RoundTrip(t *testing.T) {
c := syncCommitteeHeadStateCache
t.Cleanup(func() {
syncCommitteeHeadStateCache = cache.NewSyncCommitteeHeadState()
})
s := testServiceNoDB(t)
beaconState, _ := util.DeterministicGenesisStateAltair(t, 100)
require.NoError(t, beaconState.SetSlot(100))
cachedState, err := c.Get(101)
cachedState, err := s.syncCommitteeHeadState.Get(101)
require.ErrorContains(t, cache.ErrNotFound.Error(), err)
require.Equal(t, nil, cachedState)
require.NoError(t, c.Put(101, beaconState))
cachedState, err = c.Get(101)
require.NoError(t, s.syncCommitteeHeadState.Put(101, beaconState))
cachedState, err = s.syncCommitteeHeadState.Get(101)
require.NoError(t, err)
require.DeepEqual(t, beaconState, cachedState)
}

View File

@@ -9,7 +9,6 @@ import (
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
doublylinkedtree "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/doubly-linked-tree"
forkchoicetypes "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/types"
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/blstoexec"
"github.com/OffchainLabs/prysm/v6/config/params"
@@ -34,17 +33,17 @@ func TestSaveHead_Same(t *testing.T) {
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
st, _ := util.DeterministicGenesisState(t, 1)
require.NoError(t, service.saveHead(context.Background(), r, b, st))
require.NoError(t, service.saveHead(t.Context(), r, b, st))
assert.Equal(t, primitives.Slot(0), service.headSlot(), "Head did not stay the same")
assert.Equal(t, r, service.headRoot(), "Head did not stay the same")
}
func TestSaveHead_Different(t *testing.T) {
ctx := context.Background()
ctx := t.Context()
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
oldBlock := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, util.NewBeaconBlock())
oldBlock := util.SaveBlock(t, t.Context(), service.cfg.BeaconDB, util.NewBeaconBlock())
oldRoot, err := oldBlock.Block().HashTreeRoot()
require.NoError(t, err)
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
@@ -61,7 +60,7 @@ func TestSaveHead_Different(t *testing.T) {
newHeadSignedBlock.Block.Slot = 1
newHeadBlock := newHeadSignedBlock.Block
wsb := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, newHeadSignedBlock)
wsb := util.SaveBlock(t, t.Context(), service.cfg.BeaconDB, newHeadSignedBlock)
newRoot, err := newHeadBlock.HashTreeRoot()
require.NoError(t, err)
state, blkRoot, err = prepareForkchoiceState(ctx, slots.PrevSlot(wsb.Block().Slot()), wsb.Block().ParentRoot(), service.cfg.ForkChoiceStore.CachedHeadRoot(), [32]byte{}, ojc, ofc)
@@ -74,13 +73,13 @@ func TestSaveHead_Different(t *testing.T) {
headState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, headState.SetSlot(1))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Slot: 1, Root: newRoot[:]}))
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), headState, newRoot))
require.NoError(t, service.saveHead(context.Background(), newRoot, wsb, headState))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(t.Context(), &ethpb.StateSummary{Slot: 1, Root: newRoot[:]}))
require.NoError(t, service.cfg.BeaconDB.SaveState(t.Context(), headState, newRoot))
require.NoError(t, service.saveHead(t.Context(), newRoot, wsb, headState))
assert.Equal(t, primitives.Slot(1), service.HeadSlot(), "Head did not change")
cachedRoot, err := service.HeadRoot(context.Background())
cachedRoot, err := service.HeadRoot(t.Context())
require.NoError(t, err)
assert.DeepEqual(t, cachedRoot, newRoot[:], "Head did not change")
headBlock, err := service.headBlock()
@@ -92,12 +91,12 @@ func TestSaveHead_Different(t *testing.T) {
}
func TestSaveHead_Different_Reorg(t *testing.T) {
ctx := context.Background()
ctx := t.Context()
hook := logTest.NewGlobal()
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
oldBlock := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, util.NewBeaconBlock())
oldBlock := util.SaveBlock(t, t.Context(), service.cfg.BeaconDB, util.NewBeaconBlock())
oldRoot, err := oldBlock.Block().HashTreeRoot()
require.NoError(t, err)
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
@@ -120,7 +119,7 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
newHeadSignedBlock.Block.ParentRoot = reorgChainParent[:]
newHeadBlock := newHeadSignedBlock.Block
wsb := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, newHeadSignedBlock)
wsb := util.SaveBlock(t, t.Context(), service.cfg.BeaconDB, newHeadSignedBlock)
newRoot, err := newHeadBlock.HashTreeRoot()
require.NoError(t, err)
state, blkRoot, err = prepareForkchoiceState(ctx, wsb.Block().Slot(), newRoot, wsb.Block().ParentRoot(), [32]byte{}, ojc, ofc)
@@ -129,13 +128,13 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
headState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, headState.SetSlot(1))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Slot: 1, Root: newRoot[:]}))
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), headState, newRoot))
require.NoError(t, service.saveHead(context.Background(), newRoot, wsb, headState))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(t.Context(), &ethpb.StateSummary{Slot: 1, Root: newRoot[:]}))
require.NoError(t, service.cfg.BeaconDB.SaveState(t.Context(), headState, newRoot))
require.NoError(t, service.saveHead(t.Context(), newRoot, wsb, headState))
assert.Equal(t, primitives.Slot(1), service.HeadSlot(), "Head did not change")
cachedRoot, err := service.HeadRoot(context.Background())
cachedRoot, err := service.HeadRoot(t.Context())
require.NoError(t, err)
if !bytes.Equal(cachedRoot, newRoot[:]) {
t.Error("Head did not change")
@@ -154,20 +153,16 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
func Test_notifyNewHeadEvent(t *testing.T) {
t.Run("genesis_state_root", func(t *testing.T) {
bState, _ := util.DeterministicGenesisState(t, 10)
notifier := &mock.MockStateNotifier{RecordEvents: true}
srv := &Service{
cfg: &config{
StateNotifier: notifier,
ForkChoiceStore: doublylinkedtree.New(),
},
originBlockRoot: [32]byte{1},
}
st, blk, err := prepareForkchoiceState(context.Background(), 0, [32]byte{}, [32]byte{}, [32]byte{}, &ethpb.Checkpoint{}, &ethpb.Checkpoint{})
srv := testServiceWithDB(t)
srv.SetGenesisTime(time.Now())
notifier := srv.cfg.StateNotifier.(*mock.MockStateNotifier)
srv.originBlockRoot = [32]byte{1}
st, blk, err := prepareForkchoiceState(t.Context(), 0, [32]byte{}, [32]byte{}, [32]byte{}, &ethpb.Checkpoint{}, &ethpb.Checkpoint{})
require.NoError(t, err)
require.NoError(t, srv.cfg.ForkChoiceStore.InsertNode(context.Background(), st, blk))
require.NoError(t, srv.cfg.ForkChoiceStore.InsertNode(t.Context(), st, blk))
newHeadStateRoot := [32]byte{2}
newHeadRoot := [32]byte{3}
require.NoError(t, srv.notifyNewHeadEvent(context.Background(), 1, bState, newHeadStateRoot[:], newHeadRoot[:]))
require.NoError(t, srv.notifyNewHeadEvent(t.Context(), 1, bState, newHeadStateRoot[:], newHeadRoot[:]))
events := notifier.ReceivedEvents()
require.Equal(t, 1, len(events))
@@ -185,18 +180,14 @@ func Test_notifyNewHeadEvent(t *testing.T) {
})
t.Run("non_genesis_values", func(t *testing.T) {
bState, _ := util.DeterministicGenesisState(t, 10)
notifier := &mock.MockStateNotifier{RecordEvents: true}
genesisRoot := [32]byte{1}
srv := &Service{
cfg: &config{
StateNotifier: notifier,
ForkChoiceStore: doublylinkedtree.New(),
},
originBlockRoot: genesisRoot,
}
st, blk, err := prepareForkchoiceState(context.Background(), 0, [32]byte{}, [32]byte{}, [32]byte{}, &ethpb.Checkpoint{}, &ethpb.Checkpoint{})
srv := testServiceWithDB(t)
srv.SetGenesisTime(time.Now())
srv.originBlockRoot = genesisRoot
notifier := srv.cfg.StateNotifier.(*mock.MockStateNotifier)
st, blk, err := prepareForkchoiceState(t.Context(), 0, [32]byte{}, [32]byte{}, [32]byte{}, &ethpb.Checkpoint{}, &ethpb.Checkpoint{})
require.NoError(t, err)
require.NoError(t, srv.cfg.ForkChoiceStore.InsertNode(context.Background(), st, blk))
require.NoError(t, srv.cfg.ForkChoiceStore.InsertNode(t.Context(), st, blk))
epoch1Start, err := slots.EpochStart(1)
require.NoError(t, err)
epoch2Start, err := slots.EpochStart(1)
@@ -205,7 +196,7 @@ func Test_notifyNewHeadEvent(t *testing.T) {
newHeadStateRoot := [32]byte{2}
newHeadRoot := [32]byte{3}
err = srv.notifyNewHeadEvent(context.Background(), epoch2Start, bState, newHeadStateRoot[:], newHeadRoot[:])
err = srv.notifyNewHeadEvent(t.Context(), epoch2Start, bState, newHeadStateRoot[:], newHeadRoot[:])
require.NoError(t, err)
events := notifier.ReceivedEvents()
require.Equal(t, 1, len(events))
@@ -225,11 +216,11 @@ func Test_notifyNewHeadEvent(t *testing.T) {
}
func TestRetrieveHead_ReadOnly(t *testing.T) {
ctx := context.Background()
ctx := t.Context()
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
oldBlock := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, util.NewBeaconBlock())
oldBlock := util.SaveBlock(t, t.Context(), service.cfg.BeaconDB, util.NewBeaconBlock())
oldRoot, err := oldBlock.Block().HashTreeRoot()
require.NoError(t, err)
service.head = &head{
@@ -243,7 +234,7 @@ func TestRetrieveHead_ReadOnly(t *testing.T) {
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
wsb := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, newHeadSignedBlock)
wsb := util.SaveBlock(t, t.Context(), service.cfg.BeaconDB, newHeadSignedBlock)
newRoot, err := newHeadBlock.HashTreeRoot()
require.NoError(t, err)
state, blkRoot, err := prepareForkchoiceState(ctx, slots.PrevSlot(wsb.Block().Slot()), wsb.Block().ParentRoot(), service.cfg.ForkChoiceStore.CachedHeadRoot(), [32]byte{}, ojc, ofc)
@@ -256,9 +247,9 @@ func TestRetrieveHead_ReadOnly(t *testing.T) {
headState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, headState.SetSlot(1))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Slot: 1, Root: newRoot[:]}))
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), headState, newRoot))
require.NoError(t, service.saveHead(context.Background(), newRoot, wsb, headState))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(t.Context(), &ethpb.StateSummary{Slot: 1, Root: newRoot[:]}))
require.NoError(t, service.cfg.BeaconDB.SaveState(t.Context(), headState, newRoot))
require.NoError(t, service.saveHead(t.Context(), newRoot, wsb, headState))
rOnlyState, err := service.HeadStateReadOnly(ctx)
require.NoError(t, err)
@@ -267,7 +258,7 @@ func TestRetrieveHead_ReadOnly(t *testing.T) {
}
func TestSaveOrphanedAtts(t *testing.T) {
ctx := context.Background()
ctx := t.Context()
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
service.genesisTime = time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
@@ -333,7 +324,7 @@ func TestSaveOrphanedAtts(t *testing.T) {
}
func TestSaveOrphanedAttsElectra(t *testing.T) {
ctx := context.Background()
ctx := t.Context()
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
service.genesisTime = time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
@@ -404,10 +395,10 @@ func TestSaveOrphanedOps(t *testing.T) {
config.ShardCommitteePeriod = 0
params.OverrideBeaconConfig(config)
ctx := context.Background()
ctx := t.Context()
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
service.genesisTime = time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
service.SetGenesisTime(time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second))
// Chain setup
// 0 -- 1 -- 2 -- 3
@@ -481,7 +472,7 @@ func TestSaveOrphanedOps(t *testing.T) {
}
func TestSaveOrphanedAtts_CanFilter(t *testing.T) {
ctx := context.Background()
ctx := t.Context()
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
service.cfg.BLSToExecPool = blstoexec.NewPool()
@@ -539,7 +530,7 @@ func TestSaveOrphanedAtts_CanFilter(t *testing.T) {
}
func TestSaveOrphanedAtts_DoublyLinkedTrie(t *testing.T) {
ctx := context.Background()
ctx := t.Context()
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
service.genesisTime = time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
@@ -604,7 +595,7 @@ func TestSaveOrphanedAtts_DoublyLinkedTrie(t *testing.T) {
}
func TestSaveOrphanedAtts_CanFilter_DoublyLinkedTrie(t *testing.T) {
ctx := context.Background()
ctx := t.Context()
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
service.genesisTime = time.Now().Add(time.Duration(-1*int64(params.BeaconConfig().SlotsPerEpoch+2)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)

View File

@@ -1,7 +1,6 @@
package blockchain
import (
"context"
"testing"
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
@@ -11,7 +10,7 @@ import (
)
func TestService_getBlock(t *testing.T) {
ctx := context.Background()
ctx := t.Context()
beaconDB := testDB.SetupDB(t)
s := setupBeaconChain(t, beaconDB)
b1 := util.NewBeaconBlock()
@@ -42,7 +41,7 @@ func TestService_getBlock(t *testing.T) {
}
func TestService_hasBlockInInitSyncOrDB(t *testing.T) {
ctx := context.Background()
ctx := t.Context()
beaconDB := testDB.SetupDB(t)
s := setupBeaconChain(t, beaconDB)
b1 := util.NewBeaconBlock()

View File

@@ -11,7 +11,7 @@ import (
)
var (
// https://github.com/ethereum/consensus-specs/blob/dev/presets/mainnet/trusted_setups/trusted_setup_4096.json
// https://github.com/ethereum/consensus-specs/blob/master/presets/mainnet/trusted_setups/trusted_setup_4096.json
//go:embed trusted_setup_4096.json
embeddedTrustedSetup []byte // 1.2Mb
kzgContext *GoKZG.Context

View File

@@ -6,20 +6,20 @@ import (
)
// Verify performs single or batch verification of commitments depending on the number of given BlobSidecars.
func Verify(sidecars ...blocks.ROBlob) error {
if len(sidecars) == 0 {
func Verify(blobSidecars ...blocks.ROBlob) error {
if len(blobSidecars) == 0 {
return nil
}
if len(sidecars) == 1 {
if len(blobSidecars) == 1 {
return kzgContext.VerifyBlobKZGProof(
bytesToBlob(sidecars[0].Blob),
bytesToCommitment(sidecars[0].KzgCommitment),
bytesToKZGProof(sidecars[0].KzgProof))
bytesToBlob(blobSidecars[0].Blob),
bytesToCommitment(blobSidecars[0].KzgCommitment),
bytesToKZGProof(blobSidecars[0].KzgProof))
}
blobs := make([]GoKZG.Blob, len(sidecars))
cmts := make([]GoKZG.KZGCommitment, len(sidecars))
proofs := make([]GoKZG.KZGProof, len(sidecars))
for i, sidecar := range sidecars {
blobs := make([]GoKZG.Blob, len(blobSidecars))
cmts := make([]GoKZG.KZGCommitment, len(blobSidecars))
proofs := make([]GoKZG.KZGProof, len(blobSidecars))
for i, sidecar := range blobSidecars {
blobs[i] = *bytesToBlob(sidecar.Blob)
cmts[i] = bytesToCommitment(sidecar.KzgCommitment)
proofs[i] = bytesToKZGProof(sidecar.KzgProof)

View File

@@ -22,8 +22,8 @@ func GenerateCommitmentAndProof(blob GoKZG.Blob) (GoKZG.KZGCommitment, GoKZG.KZG
}
func TestVerify(t *testing.T) {
sidecars := make([]blocks.ROBlob, 0)
require.NoError(t, Verify(sidecars...))
blobSidecars := make([]blocks.ROBlob, 0)
require.NoError(t, Verify(blobSidecars...))
}
func TestBytesToAny(t *testing.T) {

View File

@@ -26,9 +26,6 @@ func logStateTransitionData(b interfaces.ReadOnlyBeaconBlock) error {
if len(b.Body().Attestations()) > 0 {
log = log.WithField("attestations", len(b.Body().Attestations()))
}
if len(b.Body().Deposits()) > 0 {
log = log.WithField("deposits", len(b.Body().Deposits()))
}
if len(b.Body().AttesterSlashings()) > 0 {
log = log.WithField("attesterSlashings", len(b.Body().AttesterSlashings()))
}
@@ -89,8 +86,8 @@ func logStateTransitionData(b interfaces.ReadOnlyBeaconBlock) error {
return nil
}
func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte, justified, finalized *ethpb.Checkpoint, receivedTime time.Time, genesisTime uint64, daWaitedTime time.Duration) error {
startTime, err := slots.ToTime(genesisTime, block.Slot())
func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte, justified, finalized *ethpb.Checkpoint, receivedTime time.Time, genesis time.Time, daWaitedTime time.Duration) error {
startTime, err := slots.StartTime(genesis, block.Slot())
if err != nil {
return err
}
@@ -111,7 +108,6 @@ func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte
"sinceSlotStartTime": prysmTime.Now().Sub(startTime),
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime) - daWaitedTime,
"dataAvailabilityWaitedTime": daWaitedTime,
"deposits": len(block.Body().Deposits()),
}
log.WithFields(lf).Debug("Synced new block")
} else {
@@ -159,7 +155,9 @@ func logPayload(block interfaces.ReadOnlyBeaconBlock) error {
if err != nil {
return errors.Wrap(err, "could not get BLSToExecutionChanges")
}
fields["blsToExecutionChanges"] = len(changes)
if len(changes) > 0 {
fields["blsToExecutionChanges"] = len(changes)
}
}
log.WithFields(fields).Debug("Synced new payload")
return nil

View File

@@ -53,7 +53,7 @@ func Test_logStateTransitionData(t *testing.T) {
require.NoError(t, err)
return wb
},
want: "\"Finished applying state transition\" attestations=1 deposits=1 prefix=blockchain slot=0",
want: "\"Finished applying state transition\" attestations=1 prefix=blockchain slot=0",
},
{name: "has attester slashing",
b: func() interfaces.ReadOnlyBeaconBlock {
@@ -93,7 +93,7 @@ func Test_logStateTransitionData(t *testing.T) {
require.NoError(t, err)
return wb
},
want: "\"Finished applying state transition\" attestations=1 attesterSlashings=1 deposits=1 prefix=blockchain proposerSlashings=1 slot=0 voluntaryExits=1",
want: "\"Finished applying state transition\" attestations=1 attesterSlashings=1 prefix=blockchain proposerSlashings=1 slot=0 voluntaryExits=1",
},
{name: "has payload",
b: func() interfaces.ReadOnlyBeaconBlock { return wrappedPayloadBlk },

View File

@@ -1,7 +1,6 @@
package blockchain
import (
"context"
"testing"
eth "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
@@ -9,23 +8,13 @@ import (
"github.com/OffchainLabs/prysm/v6/testing/util"
)
func TestReportEpochMetrics_BadHeadState(t *testing.T) {
s, err := util.NewBeaconState()
require.NoError(t, err)
h, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, h.SetValidators(nil))
err = reportEpochMetrics(context.Background(), s, h)
require.ErrorContains(t, "failed to initialize precompute: state has nil validator slice", err)
}
func TestReportEpochMetrics_BadAttestation(t *testing.T) {
s, err := util.NewBeaconState()
require.NoError(t, err)
h, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, h.AppendCurrentEpochAttestations(&eth.PendingAttestation{InclusionDelay: 0}))
err = reportEpochMetrics(context.Background(), s, h)
err = reportEpochMetrics(t.Context(), s, h)
require.ErrorContains(t, "attestation with inclusion delay of 0", err)
}
@@ -36,6 +25,6 @@ func TestReportEpochMetrics_SlashedValidatorOutOfBound(t *testing.T) {
v.Slashed = true
require.NoError(t, h.UpdateValidatorAtIndex(0, v))
require.NoError(t, h.AppendCurrentEpochAttestations(&eth.PendingAttestation{InclusionDelay: 1, Data: util.HydrateAttestationData(&eth.AttestationData{})}))
err = reportEpochMetrics(context.Background(), h, h)
err = reportEpochMetrics(t.Context(), h, h)
require.ErrorContains(t, "slot 0 out of bounds", err)
}

View File

@@ -8,9 +8,10 @@ import (
doublylinkedtree "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stategen"
"github.com/OffchainLabs/prysm/v6/testing/require"
)
func testServiceOptsWithDB(t *testing.T) []Option {
func testServiceOptsWithDB(t testing.TB) []Option {
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
cs := startup.NewClockSynchronizer()
@@ -31,3 +32,15 @@ func testServiceOptsNoDB() []Option {
cs := startup.NewClockSynchronizer()
return []Option{WithClockSynchronizer(cs)}
}
func testServiceNoDB(t testing.TB) *Service {
s, err := NewService(t.Context(), testServiceOptsNoDB()...)
require.NoError(t, err)
return s
}
func testServiceWithDB(t testing.TB) *Service {
s, err := NewService(t.Context(), testServiceOptsWithDB(t)...)
require.NoError(t, err)
return s
}

View File

@@ -7,7 +7,6 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
"github.com/OffchainLabs/prysm/v6/beacon-chain/execution"
@@ -33,6 +32,14 @@ func WithMaxGoroutines(x int) Option {
}
}
// WithLCStore for light client store access.
func WithLCStore() Option {
return func(s *Service) error {
s.lcStore = lightclient.NewLightClientStore(s.cfg.BeaconDB, s.cfg.P2P, s.cfg.StateNotifier.StateFeed())
return nil
}
}
// WithWeakSubjectivityCheckpoint for checkpoint sync.
func WithWeakSubjectivityCheckpoint(c *ethpb.Checkpoint) Option {
return func(s *Service) error {
@@ -227,14 +234,6 @@ func WithSyncChecker(checker Checker) Option {
}
}
// WithCustodyInfo sets the custody info for the blockchain service.
func WithCustodyInfo(custodyInfo *peerdas.CustodyInfo) Option {
return func(s *Service) error {
s.cfg.CustodyInfo = custodyInfo
return nil
}
}
// WithSlasherEnabled sets whether the slasher is enabled or not.
func WithSlasherEnabled(enabled bool) Option {
return func(s *Service) error {
@@ -246,7 +245,7 @@ func WithSlasherEnabled(enabled bool) Option {
// WithGenesisTime sets the genesis time for the blockchain service.
func WithGenesisTime(genesisTime time.Time) Option {
return func(s *Service) error {
s.genesisTime = genesisTime
s.genesisTime = genesisTime.Truncate(time.Second) // Genesis time has a precision of 1 second.
return nil
}
}
@@ -258,3 +257,12 @@ func WithLightClientStore(lcs *lightclient.Store) Option {
return nil
}
}
// WithStartWaitingDataColumnSidecars sets a channel that the `areDataColumnsAvailable` function will fill
// in when starting to wait for additional data columns.
func WithStartWaitingDataColumnSidecars(c chan bool) Option {
return func(s *Service) error {
s.startWaitingDataColumnSidecars = c
return nil
}
}

View File

@@ -5,6 +5,7 @@ import (
"time"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
@@ -59,10 +60,8 @@ func (s *Service) OnAttestation(ctx context.Context, a ethpb.Att, disparity time
return err
}
genesisTime := uint64(s.genesisTime.Unix())
// Verify attestation target is from current epoch or previous epoch.
if err := verifyAttTargetEpoch(ctx, genesisTime, uint64(time.Now().Add(disparity).Unix()), tgt); err != nil {
if err := verifyAttTargetEpoch(ctx, s.genesisTime, time.Now().Add(disparity), tgt); err != nil {
return err
}
@@ -75,7 +74,7 @@ func (s *Service) OnAttestation(ctx context.Context, a ethpb.Att, disparity time
// validate_aggregate_proof.go and validate_beacon_attestation.go
// Verify attestations can only affect the fork choice of subsequent slots.
if err := slots.VerifyTime(genesisTime, a.GetData().Slot+1, disparity); err != nil {
if err := slots.VerifyTime(s.genesisTime, a.GetData().Slot+1, disparity); err != nil {
return err
}
@@ -88,7 +87,7 @@ func (s *Service) OnAttestation(ctx context.Context, a ethpb.Att, disparity time
if err != nil {
return err
}
if err := attestation.IsValidAttestationIndices(ctx, indexedAtt); err != nil {
if err := attestation.IsValidAttestationIndices(ctx, indexedAtt, params.BeaconConfig().MaxValidatorsPerCommittee, params.BeaconConfig().MaxCommitteesPerSlot); err != nil {
return err
}

View File

@@ -4,12 +4,12 @@ import (
"context"
"fmt"
"strconv"
"time"
"github.com/OffchainLabs/prysm/v6/async"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
forkchoicetypes "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/types"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
@@ -139,8 +139,8 @@ func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (stat
}
// verifyAttTargetEpoch validates attestation is from the current or previous epoch.
func verifyAttTargetEpoch(_ context.Context, genesisTime, nowTime uint64, c *ethpb.Checkpoint) error {
currentSlot := primitives.Slot((nowTime - genesisTime) / params.BeaconConfig().SecondsPerSlot)
func verifyAttTargetEpoch(_ context.Context, genesis, now time.Time, c *ethpb.Checkpoint) error {
currentSlot := slots.At(genesis, now)
currentEpoch := slots.ToEpoch(currentSlot)
var prevEpoch primitives.Epoch
// Prevents previous epoch under flow

View File

@@ -161,7 +161,7 @@ func TestStore_OnAttestation_Ok_DoublyLinkedTree(t *testing.T) {
func TestService_GetRecentPreState(t *testing.T) {
service, _ := minimalTestService(t)
ctx := context.Background()
ctx := t.Context()
s, err := util.NewBeaconState()
require.NoError(t, err)
@@ -183,7 +183,7 @@ func TestService_GetRecentPreState(t *testing.T) {
func TestService_GetAttPreState_Concurrency(t *testing.T) {
service, _ := minimalTestService(t)
ctx := context.Background()
ctx := t.Context()
s, err := util.NewBeaconState()
require.NoError(t, err)
@@ -353,29 +353,29 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
}
func TestAttEpoch_MatchPrevEpoch(t *testing.T) {
ctx := context.Background()
ctx := t.Context()
nowTime := uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().SecondsPerSlot
require.NoError(t, verifyAttTargetEpoch(ctx, 0, nowTime, &ethpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)}))
nowTime := time.Unix(int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot), 0)
require.NoError(t, verifyAttTargetEpoch(ctx, time.Unix(0, 0), nowTime, &ethpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)}))
}
func TestAttEpoch_MatchCurrentEpoch(t *testing.T) {
ctx := context.Background()
ctx := t.Context()
nowTime := uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().SecondsPerSlot
require.NoError(t, verifyAttTargetEpoch(ctx, 0, nowTime, &ethpb.Checkpoint{Epoch: 1}))
nowTime := time.Unix(int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot), 0)
require.NoError(t, verifyAttTargetEpoch(ctx, time.Unix(0, 0), nowTime, &ethpb.Checkpoint{Epoch: 1}))
}
func TestAttEpoch_NotMatch(t *testing.T) {
ctx := context.Background()
ctx := t.Context()
nowTime := 2 * uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().SecondsPerSlot
err := verifyAttTargetEpoch(ctx, 0, nowTime, &ethpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)})
nowTime := time.Unix(2*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot), 0)
err := verifyAttTargetEpoch(ctx, time.Unix(0, 0), nowTime, &ethpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)})
assert.ErrorContains(t, "target epoch 0 does not match current epoch 2 or prev epoch 1", err)
}
func TestVerifyBeaconBlock_NoBlock(t *testing.T) {
ctx := context.Background()
ctx := t.Context()
opts := testServiceOptsWithDB(t)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
@@ -385,7 +385,7 @@ func TestVerifyBeaconBlock_NoBlock(t *testing.T) {
}
func TestVerifyBeaconBlock_futureBlock(t *testing.T) {
ctx := context.Background()
ctx := t.Context()
opts := testServiceOptsWithDB(t)
service, err := NewService(ctx, opts...)
@@ -402,7 +402,7 @@ func TestVerifyBeaconBlock_futureBlock(t *testing.T) {
}
func TestVerifyBeaconBlock_OK(t *testing.T) {
ctx := context.Background()
ctx := t.Context()
opts := testServiceOptsWithDB(t)
service, err := NewService(ctx, opts...)

View File

@@ -72,8 +72,6 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
}
if features.Get().EnableLightClient && slots.ToEpoch(s.CurrentSlot()) >= params.BeaconConfig().AltairForkEpoch {
defer s.processLightClientUpdates(cfg)
defer s.saveLightClientUpdate(cfg)
defer s.saveLightClientBootstrap(cfg)
}
defer s.sendStateFeedOnBlock(cfg)
defer reportProcessingTime(startTime)
@@ -135,7 +133,7 @@ func getStateVersionAndPayload(st state.BeaconState) (int, interfaces.ExecutionD
return preStateVersion, preStateHeader, nil
}
func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlock, avs das.AvailabilityStore) error {
func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlock, avs das.AvailabilityChecker) error {
ctx, span := trace.StartSpan(ctx, "blockChain.onBlockBatch")
defer span.End()
@@ -242,9 +240,10 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
}
}
if err := avs.IsDataAvailable(ctx, s.CurrentSlot(), b); err != nil {
return errors.Wrapf(err, "could not validate sidecar availability at slot %d", b.Block().Slot())
if err := s.areSidecarsAvailable(ctx, avs, b); err != nil {
return errors.Wrapf(err, "could not validate sidecar availability for block %#x at slot %d", b.Root(), b.Block().Slot())
}
args := &forkchoicetypes.BlockAndCheckpoints{Block: b,
JustifiedCheckpoint: jCheckpoints[i],
FinalizedCheckpoint: fCheckpoints[i]}
@@ -310,6 +309,30 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
return s.saveHeadNoDB(ctx, lastB, lastBR, preState, !isValidPayload)
}
func (s *Service) areSidecarsAvailable(ctx context.Context, avs das.AvailabilityChecker, roBlock consensusblocks.ROBlock) error {
blockVersion := roBlock.Version()
block := roBlock.Block()
slot := block.Slot()
if blockVersion >= version.Fulu {
if err := s.areDataColumnsAvailable(ctx, roBlock.Root(), block); err != nil {
return errors.Wrapf(err, "are data columns available for block %#x with slot %d", roBlock.Root(), slot)
}
return nil
}
if blockVersion >= version.Deneb {
if err := avs.IsDataAvailable(ctx, s.CurrentSlot(), roBlock); err != nil {
return errors.Wrapf(err, "could not validate sidecar availability at slot %d", slot)
}
return nil
}
return nil
}
func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.BeaconState) error {
e := coreTime.CurrentEpoch(st)
if err := helpers.UpdateCommitteeCache(ctx, st, e); err != nil {
@@ -331,7 +354,7 @@ func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.Beacon
// The latest block header is from the previous epoch
r, err := st.LatestBlockHeader().HashTreeRoot()
if err != nil {
log.WithError(err).Error("could not update proposer index state-root map")
log.WithError(err).Error("Could not update proposer index state-root map")
return nil
}
// The proposer indices cache takes the target root for the previous
@@ -341,12 +364,12 @@ func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.Beacon
}
target, err := s.cfg.ForkChoiceStore.TargetRootForEpoch(r, e)
if err != nil {
log.WithError(err).Error("could not update proposer index state-root map")
log.WithError(err).Error("Could not update proposer index state-root map")
return nil
}
err = helpers.UpdateCachedCheckpointToStateRoot(st, &forkchoicetypes.Checkpoint{Epoch: e, Root: target})
if err != nil {
log.WithError(err).Error("could not update proposer index state-root map")
log.WithError(err).Error("Could not update proposer index state-root map")
}
return nil
}
@@ -564,7 +587,7 @@ func (s *Service) validateMergeTransitionBlock(ctx context.Context, stateVersion
// If there is not, it will call forkchoice updated with the correct payload attribute then cache the payload ID.
func (s *Service) runLateBlockTasks() {
if err := s.waitForSync(); err != nil {
log.WithError(err).Error("failed to wait for initial sync")
log.WithError(err).Error("Failed to wait for initial sync")
return
}
@@ -586,7 +609,7 @@ func (s *Service) runLateBlockTasks() {
// It returns a map where each key represents a missing BlobSidecar index.
// An empty map means we have all indices; a non-empty map can be used to compare incoming
// BlobSidecars against the set of known missing sidecars.
func missingBlobIndices(bs *filesystem.BlobStorage, root [fieldparams.RootLength]byte, expected [][]byte, slot primitives.Slot) (map[uint64]bool, error) {
func missingBlobIndices(store *filesystem.BlobStorage, root [fieldparams.RootLength]byte, expected [][]byte, slot primitives.Slot) (map[uint64]bool, error) {
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
if len(expected) == 0 {
return nil, nil
@@ -594,7 +617,7 @@ func missingBlobIndices(bs *filesystem.BlobStorage, root [fieldparams.RootLength
if len(expected) > maxBlobsPerBlock {
return nil, errMaxBlobsExceeded
}
indices := bs.Summary(root)
indices := store.Summary(root)
missing := make(map[uint64]bool, len(expected))
for i := range expected {
if len(expected[i]) > 0 && !indices.HasIndex(uint64(i)) {
@@ -609,7 +632,7 @@ func missingBlobIndices(bs *filesystem.BlobStorage, root [fieldparams.RootLength
// It returns a map where each key represents a missing DataColumnSidecar index.
// An empty map means we have all indices; a non-empty map can be used to compare incoming
// DataColumns against the set of known missing sidecars.
func missingDataColumnIndices(bs *filesystem.DataColumnStorage, root [fieldparams.RootLength]byte, expected map[uint64]bool) (map[uint64]bool, error) {
func missingDataColumnIndices(store *filesystem.DataColumnStorage, root [fieldparams.RootLength]byte, expected map[uint64]bool) (map[uint64]bool, error) {
if len(expected) == 0 {
return nil, nil
}
@@ -621,7 +644,7 @@ func missingDataColumnIndices(bs *filesystem.DataColumnStorage, root [fieldparam
}
// Get a summary of the data columns stored in the database.
summary := bs.Summary(root)
summary := store.Summary(root)
// Check all expected data columns against the summary.
missing := make(map[uint64]bool)
@@ -639,7 +662,11 @@ func missingDataColumnIndices(bs *filesystem.DataColumnStorage, root [fieldparam
// The function will first check the database to see if all sidecars have been persisted. If any
// sidecars are missing, it will then read from the sidecar notifier channel for the given root until the channel is
// closed, the context hits cancellation/timeout, or notifications have been received for all the missing sidecars.
func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signedBlock interfaces.ReadOnlySignedBeaconBlock) error {
func (s *Service) isDataAvailable(
ctx context.Context,
root [fieldparams.RootLength]byte,
signedBlock interfaces.ReadOnlySignedBeaconBlock,
) error {
block := signedBlock.Block()
if block == nil {
return errors.New("invalid nil beacon block")
@@ -659,11 +686,16 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signedBloc
// areDataColumnsAvailable blocks until all data columns committed to in the block are available,
// or an error or context cancellation occurs. A nil result means that the data availability check is successful.
func (s *Service) areDataColumnsAvailable(ctx context.Context, root [fieldparams.RootLength]byte, block interfaces.ReadOnlyBeaconBlock) error {
// We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
func (s *Service) areDataColumnsAvailable(
ctx context.Context,
root [fieldparams.RootLength]byte,
block interfaces.ReadOnlyBeaconBlock,
) error {
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
// We are only required to check within MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS
blockSlot, currentSlot := block.Slot(), s.CurrentSlot()
blockEpoch, currentEpoch := slots.ToEpoch(blockSlot), slots.ToEpoch(currentSlot)
if !params.WithinDAPeriod(blockEpoch, currentEpoch) {
return nil
}
@@ -684,16 +716,20 @@ func (s *Service) areDataColumnsAvailable(ctx context.Context, root [fieldparams
}
// All columns to sample need to be available for the block to be considered available.
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/das-core.md#custody-sampling
nodeID := s.cfg.P2P.NodeID()
// Prevent custody group count to change during the rest of the function.
s.cfg.CustodyInfo.Mut.RLock()
defer s.cfg.CustodyInfo.Mut.RUnlock()
// Get the custody group sampling size for the node.
custodyGroupSamplingSize := s.cfg.CustodyInfo.CustodyGroupSamplingSize(peerdas.Actual)
peerInfo, _, err := peerdas.Info(nodeID, custodyGroupSamplingSize)
custodyGroupCount, err := s.cfg.P2P.CustodyGroupCount()
if err != nil {
return errors.Wrap(err, "custody group count")
}
// Compute the sampling size.
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/das-core.md#custody-sampling
samplingSize := max(samplesPerSlot, custodyGroupCount)
// Get the peer info for the node.
peerInfo, _, err := peerdas.Info(nodeID, samplingSize)
if err != nil {
return errors.Wrap(err, "peer info")
}
@@ -706,7 +742,7 @@ func (s *Service) areDataColumnsAvailable(ctx context.Context, root [fieldparams
summary := s.dataColumnStorage.Summary(root)
storedDataColumnsCount := summary.Count()
minimumColumnCountToReconstruct := peerdas.MinimumColumnsCountToReconstruct()
minimumColumnCountToReconstruct := peerdas.MinimumColumnCountToReconstruct()
// As soon as we have enough data column sidecars, we can reconstruct the missing ones.
// We don't need to wait for the rest of the data columns to declare the block as available.
@@ -726,8 +762,15 @@ func (s *Service) areDataColumnsAvailable(ctx context.Context, root [fieldparams
return nil
}
if s.startWaitingDataColumnSidecars != nil {
s.startWaitingDataColumnSidecars <- true
}
// Log for DA checks that cross over into the next slot; helpful for debugging.
nextSlot := slots.BeginsAt(block.Slot()+1, s.genesisTime)
nextSlot, err := slots.StartTime(s.genesisTime, block.Slot()+1)
if err != nil {
return fmt.Errorf("unable to determine slot start time: %w", err)
}
// Avoid logging if DA check is called after next slot start.
if nextSlot.After(time.Now()) {
@@ -802,7 +845,7 @@ func (s *Service) areDataColumnsAvailable(ctx context.Context, root [fieldparams
missingIndices = uint64MapToSortedSlice(missingMap)
}
return errors.Wrapf(ctx.Err(), "data column sidecars slot: %d, BlockRoot: %#x, missing %v", block.Slot(), root, missingIndices)
return errors.Wrapf(ctx.Err(), "data column sidecars slot: %d, BlockRoot: %#x, missing: %v", block.Slot(), root, missingIndices)
}
}
}
@@ -845,7 +888,10 @@ func (s *Service) areBlobsAvailable(ctx context.Context, root [fieldparams.RootL
nc := s.blobNotifiers.forRoot(root, block.Slot())
// Log for DA checks that cross over into the next slot; helpful for debugging.
nextSlot := slots.BeginsAt(block.Slot()+1, s.genesisTime)
nextSlot, err := slots.StartTime(s.genesisTime, block.Slot()+1)
if err != nil {
return fmt.Errorf("unable to determine slot start time: %w", err)
}
// Avoid logging if DA check is called after next slot start.
if nextSlot.After(time.Now()) {
nst := time.AfterFunc(time.Until(nextSlot), func() {
@@ -896,7 +942,7 @@ func uint64MapToSortedSlice(input map[uint64]bool) []uint64 {
// it also updates the next slot cache and the proposer index cache to deal with skipped slots.
func (s *Service) lateBlockTasks(ctx context.Context) {
currentSlot := s.CurrentSlot()
if s.CurrentSlot() == s.HeadSlot() {
if currentSlot == s.HeadSlot() {
return
}
s.cfg.ForkChoiceStore.RLock()
@@ -917,10 +963,10 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
// blocks.
lastState.CopyAllTries()
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
log.WithError(err).Debug("could not update next slot state cache")
log.WithError(err).Debug("Could not update next slot state cache")
}
if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil {
log.WithError(err).Error("lateBlockTasks: could not update epoch boundary caches")
log.WithError(err).Error("Could not update epoch boundary caches")
}
// return early if we already started building a block for the current
// head root
@@ -934,7 +980,7 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
if attribute.IsEmpty() {
headBlock, err := s.headBlock()
if err != nil {
log.WithError(err).WithField("head_root", headRoot).Error("unable to retrieve head block to fire payload attributes event")
log.WithError(err).WithField("head_root", headRoot).Error("Unable to retrieve head block to fire payload attributes event")
}
// notifyForkchoiceUpdate fires the payload attribute event. But in this case, we won't
// call notifyForkchoiceUpdate, so the event is fired here.

View File

@@ -1,7 +1,6 @@
package blockchain
import (
"bytes"
"context"
"fmt"
"strings"
@@ -33,7 +32,7 @@ import (
// CurrentSlot returns the current slot based on time.
func (s *Service) CurrentSlot() primitives.Slot {
return slots.CurrentSlot(uint64(s.genesisTime.Unix()))
return slots.CurrentSlot(s.genesisTime)
}
// getFCUArgs returns the arguments to call forkchoice update
@@ -45,7 +44,7 @@ func (s *Service) getFCUArgs(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) er
return nil
}
slot := cfg.roblock.Block().Slot()
if slots.WithinVotingWindow(uint64(s.genesisTime.Unix()), slot) {
if slots.WithinVotingWindow(s.genesisTime, slot) {
return nil
}
return s.computePayloadAttributes(cfg, fcuArgs)
@@ -68,11 +67,11 @@ func (s *Service) getFCUArgsEarlyBlock(cfg *postBlockProcessConfig, fcuArgs *fcu
func (s *Service) logNonCanonicalBlockReceived(blockRoot [32]byte, headRoot [32]byte) {
receivedWeight, err := s.cfg.ForkChoiceStore.Weight(blockRoot)
if err != nil {
log.WithField("root", fmt.Sprintf("%#x", blockRoot)).Warn("could not determine node weight")
log.WithField("root", fmt.Sprintf("%#x", blockRoot)).Warn("Could not determine node weight")
}
headWeight, err := s.cfg.ForkChoiceStore.Weight(headRoot)
if err != nil {
log.WithField("root", fmt.Sprintf("%#x", headRoot)).Warn("could not determine node weight")
log.WithField("root", fmt.Sprintf("%#x", headRoot)).Warn("Could not determine node weight")
}
log.WithFields(logrus.Fields{
"receivedRoot": fmt.Sprintf("%#x", blockRoot),
@@ -131,6 +130,9 @@ func (s *Service) sendStateFeedOnBlock(cfg *postBlockProcessConfig) {
}
func (s *Service) processLightClientUpdates(cfg *postBlockProcessConfig) {
if err := s.processLightClientUpdate(cfg); err != nil {
log.WithError(err).Error("Failed to process light client update")
}
if err := s.processLightClientOptimisticUpdate(cfg.ctx, cfg.roblock, cfg.postState); err != nil {
log.WithError(err).Error("Failed to process light client optimistic update")
}
@@ -139,101 +141,43 @@ func (s *Service) processLightClientUpdates(cfg *postBlockProcessConfig) {
}
}
// saveLightClientUpdate saves the light client update for this block
// processLightClientUpdate saves the light client update for this block
// if it's better than the already saved one, when feature flag is enabled.
func (s *Service) saveLightClientUpdate(cfg *postBlockProcessConfig) {
func (s *Service) processLightClientUpdate(cfg *postBlockProcessConfig) error {
attestedRoot := cfg.roblock.Block().ParentRoot()
attestedBlock, err := s.getBlock(cfg.ctx, attestedRoot)
if err != nil {
log.WithError(err).Errorf("Saving light client update failed: Could not get attested block for root %#x", attestedRoot)
return
return errors.Wrapf(err, "could not get attested block for root %#x", attestedRoot)
}
if attestedBlock == nil || attestedBlock.IsNil() {
log.Error("Saving light client update failed: Attested block is nil")
return
return errors.New("attested block is nil")
}
attestedState, err := s.cfg.StateGen.StateByRoot(cfg.ctx, attestedRoot)
if err != nil {
log.WithError(err).Errorf("Saving light client update failed: Could not get attested state for root %#x", attestedRoot)
return
return errors.Wrapf(err, "could not get attested state for root %#x", attestedRoot)
}
if attestedState == nil || attestedState.IsNil() {
log.Error("Saving light client update failed: Attested state is nil")
return
return errors.New("attested state is nil")
}
finalizedRoot := attestedState.FinalizedCheckpoint().Root
finalizedBlock, err := s.getBlock(cfg.ctx, [32]byte(finalizedRoot))
if err != nil {
if errors.Is(err, errBlockNotFoundInCacheOrDB) {
log.Debugf("Skipping saving light client update: Finalized block is nil for root %#x", finalizedRoot)
} else {
log.WithError(err).Errorf("Saving light client update failed: Could not get finalized block for root %#x", finalizedRoot)
log.Debugf("Skipping saving light client update because finalized block is nil for root %#x", finalizedRoot)
return nil
}
return
return errors.Wrapf(err, "could not get finalized block for root %#x", finalizedRoot)
}
update, err := lightclient.NewLightClientUpdateFromBeaconState(
cfg.ctx,
s.CurrentSlot(),
cfg.postState,
cfg.roblock,
attestedState,
attestedBlock,
finalizedBlock,
)
update, err := lightclient.NewLightClientUpdateFromBeaconState(cfg.ctx, cfg.postState, cfg.roblock, attestedState, attestedBlock, finalizedBlock)
if err != nil {
log.WithError(err).Error("Saving light client update failed: Could not create light client update")
return
return errors.Wrapf(err, "could not create light client update")
}
period := slots.SyncCommitteePeriod(slots.ToEpoch(attestedState.Slot()))
oldUpdate, err := s.cfg.BeaconDB.LightClientUpdate(cfg.ctx, period)
if err != nil {
log.WithError(err).Error("Saving light client update failed: Could not get current light client update")
return
}
if oldUpdate == nil {
if err := s.cfg.BeaconDB.SaveLightClientUpdate(cfg.ctx, period, update); err != nil {
log.WithError(err).Error("Saving light client update failed: Could not save light client update")
} else {
log.WithField("period", period).Debug("Saving light client update: Saved new update")
}
return
}
isNewUpdateBetter, err := lightclient.IsBetterUpdate(update, oldUpdate)
if err != nil {
log.WithError(err).Error("Saving light client update failed: Could not compare light client updates")
return
}
if isNewUpdateBetter {
if err := s.cfg.BeaconDB.SaveLightClientUpdate(cfg.ctx, period, update); err != nil {
log.WithError(err).Error("Saving light client update failed: Could not save light client update")
} else {
log.WithField("period", period).Debug("Saving light client update: Saved new update")
}
} else {
log.WithField("period", period).Debug("Saving light client update: New update is not better than the current one. Skipping save.")
}
}
// saveLightClientBootstrap saves a light client bootstrap for this block
// when feature flag is enabled.
func (s *Service) saveLightClientBootstrap(cfg *postBlockProcessConfig) {
blockRoot := cfg.roblock.Root()
bootstrap, err := lightclient.NewLightClientBootstrapFromBeaconState(cfg.ctx, s.CurrentSlot(), cfg.postState, cfg.roblock)
if err != nil {
log.WithError(err).Error("Saving light client bootstrap failed: Could not create light client bootstrap")
return
}
err = s.cfg.BeaconDB.SaveLightClientBootstrap(cfg.ctx, blockRoot[:], bootstrap)
if err != nil {
log.WithError(err).Error("Saving light client bootstrap failed: Could not save light client bootstrap in DB")
}
return s.lcStore.SaveLightClientUpdate(cfg.ctx, period, update)
}
func (s *Service) processLightClientFinalityUpdate(
@@ -253,8 +197,7 @@ func (s *Service) processLightClientFinalityUpdate(
finalizedCheckpoint := attestedState.FinalizedCheckpoint()
// Check if the finalized checkpoint has changed
if finalizedCheckpoint == nil || bytes.Equal(finalizedCheckpoint.GetRoot(), postState.FinalizedCheckpoint().Root) {
if finalizedCheckpoint == nil {
return nil
}
@@ -268,51 +211,18 @@ func (s *Service) processLightClientFinalityUpdate(
return errors.Wrapf(err, "could not get finalized block for root %#x", finalizedRoot)
}
newUpdate, err := lightclient.NewLightClientFinalityUpdateFromBeaconState(
ctx,
postState.Slot(),
postState,
signed,
attestedState,
attestedBlock,
finalizedBlock,
)
newUpdate, err := lightclient.NewLightClientFinalityUpdateFromBeaconState(ctx, postState, signed, attestedState, attestedBlock, finalizedBlock)
if err != nil {
return errors.Wrap(err, "could not create light client finality update")
}
lastUpdate := s.lcStore.LastFinalityUpdate()
if lastUpdate != nil {
// The finalized_header.beacon.lastUpdateSlot is greater than that of all previously forwarded finality_updates,
// or it matches the highest previously forwarded lastUpdateSlot and also has a sync_aggregate indicating supermajority (> 2/3)
// sync committee participation while the previously forwarded finality_update for that lastUpdateSlot did not indicate supermajority
newUpdateSlot := newUpdate.FinalizedHeader().Beacon().Slot
newHasSupermajority := lightclient.UpdateHasSupermajority(newUpdate.SyncAggregate())
lastUpdateSlot := lastUpdate.FinalizedHeader().Beacon().Slot
lastHasSupermajority := lightclient.UpdateHasSupermajority(lastUpdate.SyncAggregate())
if newUpdateSlot < lastUpdateSlot {
log.Debug("Skip saving light client finality newUpdate: Older than local newUpdate")
return nil
}
if newUpdateSlot == lastUpdateSlot && (lastHasSupermajority || !newHasSupermajority) {
log.Debug("Skip saving light client finality update: No supermajority advantage")
return nil
}
if !lightclient.IsBetterFinalityUpdate(newUpdate, s.lcStore.LastFinalityUpdate()) {
log.Debug("Skip saving light client finality update: current update is better")
return nil
}
log.Debug("Saving new light client finality update")
s.lcStore.SetLastFinalityUpdate(newUpdate)
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.LightClientFinalityUpdate,
Data: newUpdate,
})
if err = s.cfg.P2P.BroadcastLightClientFinalityUpdate(ctx, newUpdate); err != nil {
return errors.Wrap(err, "could not broadcast light client finality update")
}
s.lcStore.SetLastFinalityUpdate(newUpdate, true)
return nil
}
@@ -329,14 +239,7 @@ func (s *Service) processLightClientOptimisticUpdate(ctx context.Context, signed
return errors.Wrapf(err, "could not get attested state for root %#x", attestedRoot)
}
newUpdate, err := lightclient.NewLightClientOptimisticUpdateFromBeaconState(
ctx,
postState.Slot(),
postState,
signed,
attestedState,
attestedBlock,
)
newUpdate, err := lightclient.NewLightClientOptimisticUpdateFromBeaconState(ctx, postState, signed, attestedState, attestedBlock)
if err != nil {
if strings.Contains(err.Error(), lightclient.ErrNotEnoughSyncCommitteeBits) {
@@ -346,26 +249,12 @@ func (s *Service) processLightClientOptimisticUpdate(ctx context.Context, signed
return errors.Wrap(err, "could not create light client optimistic update")
}
lastUpdate := s.lcStore.LastOptimisticUpdate()
if lastUpdate != nil {
// The attested_header.beacon.slot is greater than that of all previously forwarded optimistic updates
if newUpdate.AttestedHeader().Beacon().Slot <= lastUpdate.AttestedHeader().Beacon().Slot {
log.Debug("Skip saving light client optimistic update: Older than local update")
return nil
}
if !lightclient.IsBetterOptimisticUpdate(newUpdate, s.lcStore.LastOptimisticUpdate()) {
log.Debug("Skip saving light client optimistic update: current update is better")
return nil
}
log.Debug("Saving new light client optimistic update")
s.lcStore.SetLastOptimisticUpdate(newUpdate)
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.LightClientOptimisticUpdate,
Data: newUpdate,
})
if err = s.cfg.P2P.BroadcastLightClientOptimisticUpdate(ctx, newUpdate); err != nil {
return errors.Wrap(err, "could not broadcast light client optimistic update")
}
s.lcStore.SetLastOptimisticUpdate(newUpdate, true)
return nil
}
@@ -436,7 +325,7 @@ func (s *Service) getBlockPreState(ctx context.Context, b interfaces.ReadOnlyBea
}
// Verify block slot time is not from the future.
if err := slots.VerifyTime(uint64(s.genesisTime.Unix()), b.Slot(), params.BeaconConfig().MaximumGossipClockDisparityDuration()); err != nil {
if err := slots.VerifyTime(s.genesisTime, b.Slot(), params.BeaconConfig().MaximumGossipClockDisparityDuration()); err != nil {
return nil, err
}
@@ -531,7 +420,7 @@ func (s *Service) updateFinalized(ctx context.Context, cp *ethpb.Checkpoint) err
// is meant to be asynchronous and run in the background rather than being
// tied to the execution of a block.
if err := s.cfg.StateGen.MigrateToCold(s.ctx, fRoot); err != nil {
log.WithError(err).Error("could not migrate to cold")
log.WithError(err).Error("Could not migrate to cold")
}
}()
return nil
@@ -620,7 +509,7 @@ func (s *Service) insertFinalizedDepositsAndPrune(ctx context.Context, fRoot [32
// Update deposit cache.
finalizedState, err := s.cfg.StateGen.StateByRoot(ctx, fRoot)
if err != nil {
log.WithError(err).Error("could not fetch finalized state")
log.WithError(err).Error("Could not fetch finalized state")
return
}
@@ -638,7 +527,7 @@ func (s *Service) insertFinalizedDepositsAndPrune(ctx context.Context, fRoot [32
// because the Eth1 follow distance makes such long-range reorgs extremely unlikely.
eth1DepositIndex, err := mathutil.Int(finalizedState.Eth1DepositIndex())
if err != nil {
log.WithError(err).Error("could not cast eth1 deposit index")
log.WithError(err).Error("Could not cast eth1 deposit index")
return
}
// The deposit index in the state is always the index of the next deposit
@@ -647,12 +536,12 @@ func (s *Service) insertFinalizedDepositsAndPrune(ctx context.Context, fRoot [32
finalizedEth1DepIdx := eth1DepositIndex - 1
if err = s.cfg.DepositCache.InsertFinalizedDeposits(ctx, int64(finalizedEth1DepIdx), common.Hash(finalizedState.Eth1Data().BlockHash),
0 /* Setting a zero value as we have no access to block height */); err != nil {
log.WithError(err).Error("could not insert finalized deposits")
log.WithError(err).Error("Could not insert finalized deposits")
return
}
// Deposit proofs are only used during state transition and can be safely removed to save space.
if err = s.cfg.DepositCache.PruneProofs(ctx, int64(finalizedEth1DepIdx)); err != nil {
log.WithError(err).Error("could not prune deposit proofs")
log.WithError(err).Error("Could not prune deposit proofs")
}
// Prune deposits which have already been finalized, the below method prunes all pending deposits (non-inclusive) up
// to the provided eth1 deposit index.

File diff suppressed because it is too large Load Diff

View File

@@ -43,7 +43,7 @@ func (s *Service) AttestationTargetState(ctx context.Context, target *ethpb.Chec
if err != nil {
return nil, err
}
if err := slots.ValidateClock(ss, uint64(s.genesisTime.Unix())); err != nil {
if err := slots.ValidateClock(ss, s.genesisTime); err != nil {
return nil, err
}
// We acquire the lock here instead than on gettAttPreState because that function gets called from UpdateHead that holds a write lock
@@ -69,7 +69,7 @@ func (s *Service) spawnProcessAttestationsRoutine() {
go func() {
_, err := s.clockWaiter.WaitForClock(s.ctx)
if err != nil {
log.WithError(err).Error("spawnProcessAttestationsRoutine failed to receive genesis data")
log.WithError(err).Error("Failed to receive genesis data")
return
}
if s.genesisTime.IsZero() {
@@ -103,7 +103,7 @@ func (s *Service) spawnProcessAttestationsRoutine() {
} else {
s.cfg.ForkChoiceStore.Lock()
if err := s.cfg.ForkChoiceStore.NewSlot(s.ctx, slotInterval.Slot); err != nil {
log.WithError(err).Error("could not process new slot")
log.WithError(err).Error("Could not process new slot")
}
s.cfg.ForkChoiceStore.Unlock()
@@ -144,7 +144,7 @@ func (s *Service) UpdateHead(ctx context.Context, proposingSlot primitives.Slot)
log.WithField("newHeadRoot", fmt.Sprintf("%#x", newHeadRoot)).Debug("Head changed due to attestations")
headState, headBlock, err := s.getStateAndBlock(ctx, newHeadRoot)
if err != nil {
log.WithError(err).Error("could not get head block")
log.WithError(err).Error("Could not get head block")
return
}
newAttHeadElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
@@ -161,7 +161,7 @@ func (s *Service) UpdateHead(ctx context.Context, proposingSlot primitives.Slot)
return
}
if err := s.forkchoiceUpdateWithExecution(s.ctx, fcuArgs); err != nil {
log.WithError(err).Error("could not update forkchoice")
log.WithError(err).Error("Could not update forkchoice")
}
}
@@ -177,9 +177,9 @@ func (s *Service) processAttestations(ctx context.Context, disparity time.Durati
for _, a := range atts {
// Based on the spec, don't process the attestation until the subsequent slot.
// This delays consideration in the fork choice until their slot is in the past.
// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/fork-choice.md#validate_on_attestation
// https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/fork-choice.md#validate_on_attestation
nextSlot := a.GetData().Slot + 1
if err := slots.VerifyTime(uint64(s.genesisTime.Unix()), nextSlot, disparity); err != nil {
if err := slots.VerifyTime(s.genesisTime, nextSlot, disparity); err != nil {
continue
}

View File

@@ -1,7 +1,6 @@
package blockchain
import (
"context"
"testing"
"time"
@@ -32,7 +31,7 @@ func TestAttestationCheckPtState_FarFutureSlot(t *testing.T) {
service.genesisTime = time.Now()
e := primitives.Epoch(slots.MaxSlotBuffer/uint64(params.BeaconConfig().SlotsPerEpoch) + 1)
_, err := service.AttestationTargetState(context.Background(), &ethpb.Checkpoint{Epoch: e})
_, err := service.AttestationTargetState(t.Context(), &ethpb.Checkpoint{Epoch: e})
require.ErrorContains(t, "exceeds max allowed value relative to the local clock", err)
}
@@ -56,11 +55,11 @@ func TestVerifyLMDFFGConsistent(t *testing.T) {
a.Data.Target.Root = []byte{'c'}
r33Root := r33.Root()
a.Data.BeaconBlockRoot = r33Root[:]
require.ErrorContains(t, wanted, service.VerifyLmdFfgConsistency(context.Background(), a))
require.ErrorContains(t, wanted, service.VerifyLmdFfgConsistency(t.Context(), a))
r32Root := r32.Root()
a.Data.Target.Root = r32Root[:]
err = service.VerifyLmdFfgConsistency(context.Background(), a)
err = service.VerifyLmdFfgConsistency(t.Context(), a)
require.NoError(t, err, "Could not verify LMD and FFG votes to be consistent")
}
@@ -71,7 +70,7 @@ func TestProcessAttestations_Ok(t *testing.T) {
service.genesisTime = prysmTime.Now().Add(-1 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
genesisState, pks := util.DeterministicGenesisState(t, 64)
require.NoError(t, genesisState.SetGenesisTime(uint64(prysmTime.Now().Unix())-params.BeaconConfig().SecondsPerSlot))
require.NoError(t, genesisState.SetGenesisTime(time.Now().Add(-1*time.Duration(params.BeaconConfig().SecondsPerSlot)*time.Second)))
require.NoError(t, service.saveGenesisData(ctx, genesisState))
atts, err := util.GenerateAttestations(genesisState, pks, 1, 0, false)
require.NoError(t, err)

View File

@@ -40,8 +40,8 @@ var epochsSinceFinalityExpandCache = primitives.Epoch(4)
// BlockReceiver interface defines the methods of chain service for receiving and processing new blocks.
type BlockReceiver interface {
ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, avs das.AvailabilityStore) error
ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, avs das.AvailabilityStore) error
ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, avs das.AvailabilityChecker) error
ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, avs das.AvailabilityChecker) error
HasBlock(ctx context.Context, root [32]byte) bool
RecentBlockSlot(root [32]byte) (primitives.Slot, error)
BlockBeingSynced([32]byte) bool
@@ -70,7 +70,7 @@ type SlashingReceiver interface {
// 1. Validate block, apply state transition and update checkpoints
// 2. Apply fork choice to the processed block
// 3. Save latest head info
func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, avs das.AvailabilityStore) error {
func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, avs das.AvailabilityChecker) error {
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveBlock")
defer span.End()
// Return early if the block is blacklisted
@@ -84,7 +84,11 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
}
receivedTime := time.Now()
s.blockBeingSynced.set(blockRoot)
err := s.blockBeingSynced.set(blockRoot)
if errors.Is(err, errBlockBeingSynced) {
log.WithField("blockRoot", fmt.Sprintf("%#x", blockRoot)).Debug("Ignoring block currently being synced")
return nil
}
defer s.blockBeingSynced.unset(blockRoot)
blockCopy, err := block.Copy()
@@ -183,7 +187,7 @@ func (s *Service) updateCheckpoints(
return errors.Wrap(err, "could not get head state")
}
if err := reportEpochMetrics(ctx, postState, headSt); err != nil {
log.WithError(err).Error("could not report epoch metrics")
log.WithError(err).Error("Could not report epoch metrics")
}
}
if err := s.updateJustificationOnBlock(ctx, preState, postState, cp.j); err != nil {
@@ -240,7 +244,7 @@ func (s *Service) handleDA(
ctx context.Context,
block interfaces.SignedBeaconBlock,
blockRoot [fieldparams.RootLength]byte,
avs das.AvailabilityStore,
avs das.AvailabilityChecker,
) (elapsed time.Duration, err error) {
defer func(start time.Time) {
elapsed = time.Since(start)
@@ -283,7 +287,7 @@ func (s *Service) reportPostBlockProcessing(
// Log block sync status.
cp = s.cfg.ForkChoiceStore.JustifiedCheckpoint()
justified := &ethpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
if err := logBlockSyncStatus(block.Block(), blockRoot, justified, finalized, receivedTime, uint64(s.genesisTime.Unix()), daWaitedTime); err != nil {
if err := logBlockSyncStatus(block.Block(), blockRoot, justified, finalized, receivedTime, s.genesisTime, daWaitedTime); err != nil {
log.WithError(err).Error("Unable to log block sync status")
}
// Log payload data
@@ -300,21 +304,36 @@ func (s *Service) reportPostBlockProcessing(
func (s *Service) executePostFinalizationTasks(ctx context.Context, finalizedState state.BeaconState) {
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
// Send finalization event
go func() {
s.sendNewFinalizedEvent(ctx, finalizedState)
}()
// Insert finalized deposits into finalized deposit trie
depCtx, cancel := context.WithTimeout(context.Background(), depositDeadline)
go func() {
s.insertFinalizedDepositsAndPrune(depCtx, finalized.Root)
cancel()
}()
if features.Get().EnableLightClient {
// Save a light client bootstrap for the finalized checkpoint
go func() {
err := s.lcStore.SaveLightClientBootstrap(s.ctx, finalized.Root)
if err != nil {
log.WithError(err).Error("Could not save light client bootstrap by block root")
} else {
log.Debugf("Saved light client bootstrap for finalized root %#x", finalized.Root)
}
}()
}
}
// ReceiveBlockBatch processes the whole block batch at once, assuming the block batch is linear ,transitioning
// the state, performing batch verification of all collected signatures and then performing the appropriate
// actions for a block post-transition.
func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, avs das.AvailabilityStore) error {
func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, avs das.AvailabilityChecker) error {
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveBlockBatch")
defer span.End()

View File

@@ -1,7 +1,6 @@
package blockchain
import (
"context"
"sync"
"testing"
"time"
@@ -9,7 +8,10 @@ import (
blockchainTesting "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
"github.com/OffchainLabs/prysm/v6/beacon-chain/das"
forkchoicetypes "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/types"
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/voluntaryexits"
"github.com/OffchainLabs/prysm/v6/config/features"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
@@ -19,6 +21,7 @@ import (
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
ethpbv1 "github.com/OffchainLabs/prysm/v6/proto/eth/v1"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/runtime/version"
"github.com/OffchainLabs/prysm/v6/testing/assert"
"github.com/OffchainLabs/prysm/v6/testing/require"
"github.com/OffchainLabs/prysm/v6/testing/util"
@@ -28,7 +31,7 @@ import (
)
func TestService_ReceiveBlock(t *testing.T) {
ctx := context.Background()
ctx := t.Context()
genesis, keys := util.DeterministicGenesisState(t, 64)
copiedGen := genesis.Copy()
@@ -189,7 +192,7 @@ func TestHandleDA(t *testing.T) {
require.NoError(t, err)
s, _ := minimalTestService(t)
elapsed, err := s.handleDA(context.Background(), signedBeaconBlock, [fieldparams.RootLength]byte{}, nil)
elapsed, err := s.handleDA(t.Context(), signedBeaconBlock, [fieldparams.RootLength]byte{}, nil)
require.NoError(t, err)
require.Equal(t, true, elapsed > 0, "Elapsed time should be greater than 0")
}
@@ -228,7 +231,7 @@ func TestService_ReceiveBlockUpdateHead(t *testing.T) {
}
func TestService_ReceiveBlockBatch(t *testing.T) {
ctx := context.Background()
ctx := t.Context()
genesis, keys := util.DeterministicGenesisState(t, 64)
genFullBlock := func(t *testing.T, conf *util.BlockGenConfig, slot primitives.Slot) *ethpb.SignedBeaconBlock {
@@ -293,23 +296,26 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
func TestService_HasBlock(t *testing.T) {
s, _ := minimalTestService(t)
r := [32]byte{'a'}
if s.HasBlock(context.Background(), r) {
if s.HasBlock(t.Context(), r) {
t.Error("Should not have block")
}
wsb, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
require.NoError(t, s.saveInitSyncBlock(context.Background(), r, wsb))
if !s.HasBlock(context.Background(), r) {
require.NoError(t, s.saveInitSyncBlock(t.Context(), r, wsb))
if !s.HasBlock(t.Context(), r) {
t.Error("Should have block")
}
b := util.NewBeaconBlock()
b.Block.Slot = 1
util.SaveBlock(t, context.Background(), s.cfg.BeaconDB, b)
util.SaveBlock(t, t.Context(), s.cfg.BeaconDB, b)
r, err = b.Block.HashTreeRoot()
require.NoError(t, err)
require.Equal(t, true, s.HasBlock(context.Background(), r))
s.blockBeingSynced.set(r)
require.Equal(t, false, s.HasBlock(context.Background(), r))
require.Equal(t, true, s.HasBlock(t.Context(), r))
err = s.blockBeingSynced.set(r)
require.NoError(t, err)
err = s.blockBeingSynced.set(r)
require.ErrorIs(t, err, errBlockBeingSynced)
require.Equal(t, false, s.HasBlock(t.Context(), r))
}
func TestCheckSaveHotStateDB_Enabling(t *testing.T) {
@@ -318,7 +324,7 @@ func TestCheckSaveHotStateDB_Enabling(t *testing.T) {
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
require.NoError(t, s.checkSaveHotStateDB(t.Context()))
assert.LogsContain(t, hook, "Entering mode to save hot states in DB")
}
@@ -329,19 +335,19 @@ func TestCheckSaveHotStateDB_Disabling(t *testing.T) {
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
require.NoError(t, s.checkSaveHotStateDB(t.Context()))
s.genesisTime = time.Now()
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
require.NoError(t, s.checkSaveHotStateDB(t.Context()))
assert.LogsContain(t, hook, "Exiting mode to save hot states in DB")
}
func TestCheckSaveHotStateDB_Overflow(t *testing.T) {
hook := logTest.NewGlobal()
s, _ := minimalTestService(t)
s.genesisTime = time.Now()
s.SetGenesisTime(time.Now())
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
require.NoError(t, s.checkSaveHotStateDB(t.Context()))
assert.LogsDoNotContain(t, hook, "Entering mode to save hot states in DB")
}
@@ -349,8 +355,9 @@ func TestHandleCaches_EnablingLargeSize(t *testing.T) {
hook := logTest.NewGlobal()
s, _ := minimalTestService(t)
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
s.SetGenesisTime(time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second))
helpers.ClearCache()
require.NoError(t, s.handleCaches())
assert.LogsContain(t, hook, "Expanding committee cache size")
}
@@ -456,7 +463,7 @@ func Test_executePostFinalizationTasks(t *testing.T) {
headState, err := util.NewBeaconStateElectra()
require.NoError(t, err)
finalizedStRoot, err := headState.HashTreeRoot(context.Background())
finalizedStRoot, err := headState.HashTreeRoot(t.Context())
require.NoError(t, err)
genesis := util.NewBeaconBlock()
@@ -563,3 +570,48 @@ func Test_executePostFinalizationTasks(t *testing.T) {
})
}
func TestProcessLightClientBootstrap(t *testing.T) {
featCfg := &features.Flags{}
featCfg.EnableLightClient = true
reset := features.InitWithReset(featCfg)
defer reset()
s, tr := minimalTestService(t, WithLCStore())
ctx := tr.ctx
for testVersion := version.Altair; testVersion <= version.Electra; testVersion++ {
t.Run(version.String(testVersion), func(t *testing.T) {
l := util.NewTestLightClient(t, testVersion)
require.NoError(t, s.cfg.BeaconDB.SaveBlock(ctx, l.FinalizedBlock))
finalizedBlockRoot, err := l.FinalizedBlock.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, s.cfg.BeaconDB.SaveState(ctx, l.FinalizedState, finalizedBlockRoot))
cp := l.AttestedState.FinalizedCheckpoint()
require.DeepSSZEqual(t, finalizedBlockRoot, [32]byte(cp.Root))
require.NoError(t, s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: cp.Epoch, Root: [32]byte(cp.Root)}))
sss, err := s.cfg.BeaconDB.State(ctx, finalizedBlockRoot)
require.NoError(t, err)
require.NotNil(t, sss)
s.executePostFinalizationTasks(s.ctx, l.FinalizedState)
// wait for the goroutine to finish processing
time.Sleep(1 * time.Second)
// Check that the light client bootstrap is saved
b, err := s.lcStore.LightClientBootstrap(ctx, [32]byte(cp.Root))
require.NoError(t, err)
require.NotNil(t, b)
btst, err := lightClient.NewLightClientBootstrapFromBeaconState(ctx, l.FinalizedState.Slot(), l.FinalizedState, l.FinalizedBlock)
require.NoError(t, err)
require.DeepEqual(t, btst, b)
require.Equal(t, b.Version(), testVersion)
})
}
}

View File

@@ -15,10 +15,9 @@ func (s *Service) ReceiveDataColumns(dataColumnSidecars []blocks.VerifiedRODataC
}
// ReceiveDataColumn receives a single data column.
// (It is only a wrapper around ReceiveDataColumns.)
func (s *Service) ReceiveDataColumn(dataColumnSidecar blocks.VerifiedRODataColumn) error {
if err := s.dataColumnStorage.Save([]blocks.VerifiedRODataColumn{dataColumnSidecar}); err != nil {
return errors.Wrap(err, "save data column sidecars")
return errors.Wrap(err, "save data column sidecar")
}
return nil

View File

@@ -12,11 +12,9 @@ import (
"github.com/OffchainLabs/prysm/v6/async/event"
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed"
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
coreTime "github.com/OffchainLabs/prysm/v6/beacon-chain/core/time"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
@@ -31,6 +29,7 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stategen"
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
@@ -47,27 +46,29 @@ import (
// Service represents a service that handles the internal
// logic of managing the full PoS beacon chain.
type Service struct {
cfg *config
ctx context.Context
cancel context.CancelFunc
genesisTime time.Time
head *head
headLock sync.RWMutex
originBlockRoot [32]byte // genesis root, or weak subjectivity checkpoint root, depending on how the node is initialized
boundaryRoots [][32]byte
checkpointStateCache *cache.CheckpointStateCache
initSyncBlocks map[[32]byte]interfaces.ReadOnlySignedBeaconBlock
initSyncBlocksLock sync.RWMutex
wsVerifier *WeakSubjectivityVerifier
clockSetter startup.ClockSetter
clockWaiter startup.ClockWaiter
syncComplete chan struct{}
blobNotifiers *blobNotifierMap
blockBeingSynced *currentlySyncingBlock
blobStorage *filesystem.BlobStorage
dataColumnStorage *filesystem.DataColumnStorage
slasherEnabled bool
lcStore *lightClient.Store
cfg *config
ctx context.Context
cancel context.CancelFunc
genesisTime time.Time
head *head
headLock sync.RWMutex
originBlockRoot [32]byte // genesis root, or weak subjectivity checkpoint root, depending on how the node is initialized
boundaryRoots [][32]byte
checkpointStateCache *cache.CheckpointStateCache
initSyncBlocks map[[32]byte]interfaces.ReadOnlySignedBeaconBlock
initSyncBlocksLock sync.RWMutex
wsVerifier *WeakSubjectivityVerifier
clockSetter startup.ClockSetter
clockWaiter startup.ClockWaiter
syncComplete chan struct{}
blobNotifiers *blobNotifierMap
blockBeingSynced *currentlySyncingBlock
blobStorage *filesystem.BlobStorage
dataColumnStorage *filesystem.DataColumnStorage
slasherEnabled bool
lcStore *lightClient.Store
startWaitingDataColumnSidecars chan bool // for testing purposes only
syncCommitteeHeadState *cache.SyncCommitteeHeadStateCache
}
// config options for the service.
@@ -95,7 +96,6 @@ type config struct {
FinalizedStateAtStartUp state.BeaconState
ExecutionEngineCaller execution.EngineCaller
SyncChecker Checker
CustodyInfo *peerdas.CustodyInfo
}
// Checker is an interface used to determine if a node is in initial sync
@@ -179,14 +179,15 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
seenIndex: make(map[[32]byte][]bool),
}
srv := &Service{
ctx: ctx,
cancel: cancel,
boundaryRoots: [][32]byte{},
checkpointStateCache: cache.NewCheckpointStateCache(),
initSyncBlocks: make(map[[32]byte]interfaces.ReadOnlySignedBeaconBlock),
blobNotifiers: bn,
cfg: &config{},
blockBeingSynced: &currentlySyncingBlock{roots: make(map[[32]byte]struct{})},
ctx: ctx,
cancel: cancel,
boundaryRoots: [][32]byte{},
checkpointStateCache: cache.NewCheckpointStateCache(),
initSyncBlocks: make(map[[32]byte]interfaces.ReadOnlySignedBeaconBlock),
blobNotifiers: bn,
cfg: &config{},
blockBeingSynced: &currentlySyncingBlock{roots: make(map[[32]byte]struct{})},
syncCommitteeHeadState: cache.NewSyncCommitteeHeadState(),
}
for _, opt := range opts {
if err := opt(srv); err != nil {
@@ -205,17 +206,9 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
// Start a blockchain service's main event loop.
func (s *Service) Start() {
saved := s.cfg.FinalizedStateAtStartUp
defer s.removeStartupState()
if saved != nil && !saved.IsNil() {
if err := s.StartFromSavedState(saved); err != nil {
log.Fatal(err)
}
} else {
if err := s.startFromExecutionChain(); err != nil {
log.Fatal(err)
}
if err := s.StartFromSavedState(s.cfg.FinalizedStateAtStartUp); err != nil {
log.Fatal(err)
}
s.spawnProcessAttestationsRoutine()
go s.runLateBlockTasks()
@@ -264,8 +257,11 @@ func (s *Service) Status() error {
// StartFromSavedState initializes the blockchain using a previously saved finalized checkpoint.
func (s *Service) StartFromSavedState(saved state.BeaconState) error {
if state.IsNil(saved) {
return errors.New("Last finalized state at startup is nil")
}
log.Info("Blockchain data already exists in DB, initializing...")
s.genesisTime = time.Unix(int64(saved.GenesisTime()), 0) // lint:ignore uintcast -- Genesis time will not exceed int64 in your lifetime.
s.genesisTime = saved.GenesisTime()
s.cfg.AttService.SetGenesisTime(saved.GenesisTime())
originRoot, err := s.originRootFromSavedState(s.ctx)
@@ -293,6 +289,20 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
if err := s.clockSetter.SetClock(startup.NewClock(s.genesisTime, vr)); err != nil {
return errors.Wrap(err, "failed to initialize blockchain service")
}
if !params.FuluEnabled() {
return nil
}
earliestAvailableSlot, custodySubnetCount, err := s.updateCustodyInfoInDB(saved.Slot())
if err != nil {
return errors.Wrap(err, "could not get and save custody group count")
}
if _, _, err := s.cfg.P2P.UpdateCustodyInfo(earliestAvailableSlot, custodySubnetCount); err != nil {
return errors.Wrap(err, "update custody info")
}
return nil
}
@@ -355,62 +365,6 @@ func (s *Service) initializeHead(ctx context.Context, st state.BeaconState) erro
return nil
}
func (s *Service) startFromExecutionChain() error {
log.Info("Waiting to reach the validator deposit threshold to start the beacon chain...")
if s.cfg.ChainStartFetcher == nil {
return errors.New("not configured execution chain")
}
go func() {
stateChannel := make(chan *feed.Event, 1)
stateSub := s.cfg.StateNotifier.StateFeed().Subscribe(stateChannel)
defer stateSub.Unsubscribe()
for {
select {
case e := <-stateChannel:
if e.Type == statefeed.ChainStarted {
data, ok := e.Data.(*statefeed.ChainStartedData)
if !ok {
log.Error("event data is not type *statefeed.ChainStartedData")
return
}
log.WithField("startTime", data.StartTime).Debug("Received chain start event")
s.onExecutionChainStart(s.ctx, data.StartTime)
return
}
case <-s.ctx.Done():
log.Debug("Context closed, exiting goroutine")
return
case err := <-stateSub.Err():
log.WithError(err).Error("Subscription to state forRoot failed")
return
}
}
}()
return nil
}
// onExecutionChainStart initializes a series of deposits from the ChainStart deposits in the eth1
// deposit contract, initializes the beacon chain's state, and kicks off the beacon chain.
func (s *Service) onExecutionChainStart(ctx context.Context, genesisTime time.Time) {
preGenesisState := s.cfg.ChainStartFetcher.PreGenesisState()
initializedState, err := s.initializeBeaconChain(ctx, genesisTime, preGenesisState, s.cfg.ChainStartFetcher.ChainStartEth1Data())
if err != nil {
log.WithError(err).Fatal("Could not initialize beacon chain")
}
// We start a counter to genesis, if needed.
gRoot, err := initializedState.HashTreeRoot(s.ctx)
if err != nil {
log.WithError(err).Fatal("Could not hash tree root genesis state")
}
go slots.CountdownToGenesis(ctx, genesisTime, uint64(initializedState.NumValidators()), gRoot)
vr := bytesutil.ToBytes32(initializedState.GenesisValidatorsRoot())
if err := s.clockSetter.SetClock(startup.NewClock(genesisTime, vr)); err != nil {
log.WithError(err).Fatal("failed to initialize blockchain service from execution start event")
}
}
// initializes the state and genesis block of the beacon chain to persistent storage
// based on a genesis timestamp value obtained from the ChainStart event emitted
// by the ETH1.0 Deposit Contract and the POWChain service of the node.
@@ -421,7 +375,7 @@ func (s *Service) initializeBeaconChain(
eth1data *ethpb.Eth1Data) (state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "beacon-chain.Service.initializeBeaconChain")
defer span.End()
s.genesisTime = genesisTime
s.genesisTime = genesisTime.Truncate(time.Second) // Genesis time has a precision of 1 second.
unixTime := uint64(genesisTime.Unix())
genesisState, err := transition.OptimizedGenesisBeaconState(unixTime, preGenesisState, eth1data)
@@ -482,7 +436,7 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, genesisBlkRoot); err != nil {
return errors.Wrap(err, "Could not set optimistic status of genesis block to false")
}
s.cfg.ForkChoiceStore.SetGenesisTime(uint64(s.genesisTime.Unix()))
s.cfg.ForkChoiceStore.SetGenesisTime(s.genesisTime)
if err := s.setHead(&head{
genesisBlkRoot,
@@ -513,6 +467,57 @@ func (s *Service) removeStartupState() {
s.cfg.FinalizedStateAtStartUp = nil
}
// UpdateCustodyInfoInDB updates the custody information in the database.
// It returns the (potentially updated) custody group count and the earliest available slot.
func (s *Service) updateCustodyInfoInDB(slot primitives.Slot) (primitives.Slot, uint64, error) {
isSubscribedToAllDataSubnets := flags.Get().SubscribeAllDataSubnets
beaconConfig := params.BeaconConfig()
custodyRequirement := beaconConfig.CustodyRequirement
// Check if the node was previously subscribed to all data subnets, and if so,
// store the new status accordingly.
wasSubscribedToAllDataSubnets, err := s.cfg.BeaconDB.UpdateSubscribedToAllDataSubnets(s.ctx, isSubscribedToAllDataSubnets)
if err != nil {
log.WithError(err).Error("Could not update subscription status to all data subnets")
}
// Warn the user if the node was previously subscribed to all data subnets and is not any more.
if wasSubscribedToAllDataSubnets && !isSubscribedToAllDataSubnets {
log.Warnf(
"Because the flag `--%s` was previously used, the node will still subscribe to all data subnets.",
flags.SubscribeAllDataSubnets.Name,
)
}
// Compute the custody group count.
custodyGroupCount := custodyRequirement
if isSubscribedToAllDataSubnets {
custodyGroupCount = beaconConfig.NumberOfColumns
}
// Safely compute the fulu fork slot.
fuluForkSlot, err := fuluForkSlot()
if err != nil {
return 0, 0, errors.Wrap(err, "fulu fork slot")
}
// If slot is before the fulu fork slot, then use the earliest stored slot as the reference slot.
if slot < fuluForkSlot {
slot, err = s.cfg.BeaconDB.EarliestSlot(s.ctx)
if err != nil {
return 0, 0, errors.Wrap(err, "earliest slot")
}
}
earliestAvailableSlot, custodyGroupCount, err := s.cfg.BeaconDB.UpdateCustodyInfo(s.ctx, slot, custodyGroupCount)
if err != nil {
return 0, 0, errors.Wrap(err, "update custody info")
}
return earliestAvailableSlot, custodyGroupCount, nil
}
func spawnCountdownIfPreGenesis(ctx context.Context, genesisTime time.Time, db db.HeadAccessDatabase) {
currentTime := prysmTime.Now()
if currentTime.After(genesisTime) {
@@ -529,3 +534,19 @@ func spawnCountdownIfPreGenesis(ctx context.Context, genesisTime time.Time, db d
}
go slots.CountdownToGenesis(ctx, genesisTime, uint64(gState.NumValidators()), gRoot)
}
func fuluForkSlot() (primitives.Slot, error) {
beaconConfig := params.BeaconConfig()
fuluForkEpoch := beaconConfig.FuluForkEpoch
if fuluForkEpoch == beaconConfig.FarFutureEpoch {
return beaconConfig.FarFutureSlot, nil
}
forkFuluSlot, err := slots.EpochStart(fuluForkEpoch)
if err != nil {
return 0, errors.Wrap(err, "epoch start")
}
return forkFuluSlot, nil
}

View File

@@ -1,11 +1,9 @@
package blockchain
import (
"context"
"io"
"testing"
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/testing/require"
"github.com/OffchainLabs/prysm/v6/testing/util"
@@ -18,15 +16,12 @@ func init() {
}
func TestChainService_SaveHead_DataRace(t *testing.T) {
beaconDB := testDB.SetupDB(t)
s := &Service{
cfg: &config{BeaconDB: beaconDB},
}
s := testServiceWithDB(t)
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
st, _ := util.DeterministicGenesisState(t, 1)
require.NoError(t, err)
go func() {
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
require.NoError(t, s.saveHead(t.Context(), [32]byte{}, b, st))
}()
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
require.NoError(t, s.saveHead(t.Context(), [32]byte{}, b, st))
}

View File

@@ -31,6 +31,7 @@ import (
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/container/trie"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/genesis"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/testing/assert"
"github.com/OffchainLabs/prysm/v6/testing/require"
@@ -42,7 +43,7 @@ import (
)
func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
ctx := context.Background()
ctx := t.Context()
var web3Service *execution.Service
var err error
srv, endpoint, err := mockExecution.SetupRPCServer()
@@ -51,6 +52,7 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
srv.Stop()
})
bState, _ := util.DeterministicGenesisState(t, 10)
genesis.StoreStateDuringTest(t, bState)
pbState, err := state_native.ProtobufBeaconStatePhase0(bState.ToProtoUnsafe())
require.NoError(t, err)
mockTrie, err := trie.NewTrie(0)
@@ -71,20 +73,22 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
DepositContainers: []*ethpb.DepositContainer{},
})
require.NoError(t, err)
depositCache, err := depositsnapshot.New()
require.NoError(t, err)
web3Service, err = execution.NewService(
ctx,
execution.WithDatabase(beaconDB),
execution.WithHttpEndpoint(endpoint),
execution.WithDepositContractAddress(common.Address{}),
execution.WithDepositCache(depositCache),
)
require.NoError(t, err, "Unable to set up web3 service")
attService, err := attestations.NewService(ctx, &attestations.Config{Pool: attestations.NewPool()})
require.NoError(t, err)
depositCache, err := depositsnapshot.New()
require.NoError(t, err)
fc := doublylinkedtree.New()
stateGen := stategen.New(beaconDB, fc)
// Safe a state in stategen to purposes of testing a service stop / shutdown.
@@ -115,7 +119,7 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
func TestChainStartStop_Initialized(t *testing.T) {
hook := logTest.NewGlobal()
ctx := context.Background()
ctx := t.Context()
beaconDB := testDB.SetupDB(t)
chainService := setupBeaconChain(t, beaconDB)
@@ -127,7 +131,7 @@ func TestChainStartStop_Initialized(t *testing.T) {
util.SaveBlock(t, ctx, beaconDB, genesisBlk)
s, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, s.SetGenesisTime(uint64(gt.Unix())))
require.NoError(t, s.SetGenesisTime(gt))
require.NoError(t, s.SetSlot(1))
require.NoError(t, beaconDB.SaveState(ctx, s, blkRoot))
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, blkRoot))
@@ -152,7 +156,7 @@ func TestChainStartStop_Initialized(t *testing.T) {
func TestChainStartStop_GenesisZeroHashes(t *testing.T) {
hook := logTest.NewGlobal()
ctx := context.Background()
ctx := t.Context()
beaconDB := testDB.SetupDB(t)
chainService := setupBeaconChain(t, beaconDB)
@@ -164,7 +168,7 @@ func TestChainStartStop_GenesisZeroHashes(t *testing.T) {
wsb := util.SaveBlock(t, ctx, beaconDB, genesisBlk)
s, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, s.SetGenesisTime(uint64(gt.Unix())))
require.NoError(t, s.SetGenesisTime(gt))
require.NoError(t, beaconDB.SaveState(ctx, s, blkRoot))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, blkRoot))
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
@@ -184,7 +188,7 @@ func TestChainStartStop_GenesisZeroHashes(t *testing.T) {
func TestChainService_InitializeBeaconChain(t *testing.T) {
helpers.ClearCache()
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
ctx := t.Context()
bc := setupBeaconChain(t, beaconDB)
var err error
@@ -226,7 +230,7 @@ func TestChainService_InitializeBeaconChain(t *testing.T) {
}
func TestChainService_CorrectGenesisRoots(t *testing.T) {
ctx := context.Background()
ctx := t.Context()
beaconDB := testDB.SetupDB(t)
chainService := setupBeaconChain(t, beaconDB)
@@ -238,7 +242,7 @@ func TestChainService_CorrectGenesisRoots(t *testing.T) {
util.SaveBlock(t, ctx, beaconDB, genesisBlk)
s, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, s.SetGenesisTime(uint64(gt.Unix())))
require.NoError(t, s.SetGenesisTime(gt))
require.NoError(t, s.SetSlot(0))
require.NoError(t, beaconDB.SaveState(ctx, s, blkRoot))
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, blkRoot))
@@ -295,7 +299,7 @@ func TestChainService_InitializeChainInfo(t *testing.T) {
require.NoError(t, err)
assert.DeepSSZEqual(t, headState.ToProtoUnsafe(), s.ToProtoUnsafe(), "Head state incorrect")
assert.Equal(t, c.HeadSlot(), headBlock.Block.Slot, "Head slot incorrect")
r, err := c.HeadRoot(context.Background())
r, err := c.HeadRoot(t.Context())
require.NoError(t, err)
if !bytes.Equal(headRoot[:], r) {
t.Error("head slot incorrect")
@@ -345,12 +349,8 @@ func TestChainService_InitializeChainInfo_SetHeadAtGenesis(t *testing.T) {
}
func TestChainService_SaveHeadNoDB(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
fc := doublylinkedtree.New()
s := &Service{
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB, fc), ForkChoiceStore: fc},
}
ctx := t.Context()
s := testServiceWithDB(t)
blk := util.NewBeaconBlock()
blk.Block.Slot = 1
r, err := blk.HashTreeRoot()
@@ -370,11 +370,8 @@ func TestChainService_SaveHeadNoDB(t *testing.T) {
}
func TestHasBlock_ForkChoiceAndDB_DoublyLinkedTree(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
s := &Service{
cfg: &config{ForkChoiceStore: doublylinkedtree.New(), BeaconDB: beaconDB},
}
ctx := t.Context()
s := testServiceWithDB(t)
b := util.NewBeaconBlock()
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
@@ -391,48 +388,21 @@ func TestHasBlock_ForkChoiceAndDB_DoublyLinkedTree(t *testing.T) {
}
func TestServiceStop_SaveCachedBlocks(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
beaconDB := testDB.SetupDB(t)
s := &Service{
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB, doublylinkedtree.New())},
ctx: ctx,
cancel: cancel,
initSyncBlocks: make(map[[32]byte]interfaces.ReadOnlySignedBeaconBlock),
}
s := testServiceWithDB(t)
s.initSyncBlocks = make(map[[32]byte]interfaces.ReadOnlySignedBeaconBlock)
bb := util.NewBeaconBlock()
r, err := bb.Block.HashTreeRoot()
require.NoError(t, err)
wsb, err := consensusblocks.NewSignedBeaconBlock(bb)
require.NoError(t, err)
require.NoError(t, s.saveInitSyncBlock(ctx, r, wsb))
require.NoError(t, s.saveInitSyncBlock(s.ctx, r, wsb))
require.NoError(t, s.Stop())
require.Equal(t, true, s.cfg.BeaconDB.HasBlock(ctx, r))
}
func TestProcessChainStartTime_ReceivedFeed(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
mgs := &MockClockSetter{}
service.clockSetter = mgs
gt := time.Now()
service.onExecutionChainStart(context.Background(), gt)
gs, err := beaconDB.GenesisState(ctx)
require.NoError(t, err)
require.NotEqual(t, nil, gs)
require.Equal(t, 32, len(gs.GenesisValidatorsRoot()))
var zero [32]byte
require.DeepNotEqual(t, gs.GenesisValidatorsRoot(), zero[:])
require.Equal(t, gt, mgs.G.GenesisTime())
require.Equal(t, bytesutil.ToBytes32(gs.GenesisValidatorsRoot()), mgs.G.GenesisValidatorsRoot())
require.Equal(t, true, s.cfg.BeaconDB.HasBlock(s.ctx, r))
}
func BenchmarkHasBlockDB(b *testing.B) {
beaconDB := testDB.SetupDB(b)
ctx := context.Background()
s := &Service{
cfg: &config{BeaconDB: beaconDB},
}
ctx := b.Context()
s := testServiceWithDB(b)
blk := util.NewBeaconBlock()
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
require.NoError(b, err)
@@ -447,11 +417,8 @@ func BenchmarkHasBlockDB(b *testing.B) {
}
func BenchmarkHasBlockForkChoiceStore_DoublyLinkedTree(b *testing.B) {
ctx := context.Background()
beaconDB := testDB.SetupDB(b)
s := &Service{
cfg: &config{ForkChoiceStore: doublylinkedtree.New(), BeaconDB: beaconDB},
}
ctx := b.Context()
s := testServiceWithDB(b)
blk := util.NewBeaconBlock()
r, err := blk.Block.HashTreeRoot()
require.NoError(b, err)

View File

@@ -38,7 +38,7 @@ func (s *Service) startupHeadRoot() [32]byte {
if headStr == "head" {
root, err := s.cfg.BeaconDB.HeadBlockRoot()
if err != nil {
log.WithError(err).Error("could not get head block root, starting with finalized block as head")
log.WithError(err).Error("Could not get head block root, starting with finalized block as head")
return fRoot
}
log.Infof("Using Head root of %#x", root)
@@ -46,7 +46,7 @@ func (s *Service) startupHeadRoot() [32]byte {
}
root, err := bytesutil.DecodeHexWithLength(headStr, 32)
if err != nil {
log.WithError(err).Error("could not parse head root, starting with finalized block as head")
log.WithError(err).Error("Could not parse head root, starting with finalized block as head")
return fRoot
}
return [32]byte(root)
@@ -64,16 +64,16 @@ func (s *Service) setupForkchoiceTree(st state.BeaconState) error {
}
blk, err := s.cfg.BeaconDB.Block(s.ctx, headRoot)
if err != nil {
log.WithError(err).Error("could not get head block, starting with finalized block as head")
log.WithError(err).Error("Could not get head block, starting with finalized block as head")
return nil
}
if slots.ToEpoch(blk.Block().Slot()) < cp.Epoch {
log.WithField("headRoot", fmt.Sprintf("%#x", headRoot)).Error("head block is older than finalized block, starting with finalized block as head")
log.WithField("headRoot", fmt.Sprintf("%#x", headRoot)).Error("Head block is older than finalized block, starting with finalized block as head")
return nil
}
chain, err := s.buildForkchoiceChain(s.ctx, blk)
if err != nil {
log.WithError(err).Error("could not build forkchoice chain, starting with finalized block as head")
log.WithError(err).Error("Could not build forkchoice chain, starting with finalized block as head")
return nil
}
s.cfg.ForkChoiceStore.Lock()
@@ -170,6 +170,6 @@ func (s *Service) setupForkchoiceCheckpoints() error {
Root: fRoot}); err != nil {
return errors.Wrap(err, "could not update forkchoice's finalized checkpoint")
}
s.cfg.ForkChoiceStore.SetGenesisTime(uint64(s.genesisTime.Unix()))
s.cfg.ForkChoiceStore.SetGenesisTime(s.genesisTime)
return nil
}

View File

@@ -32,7 +32,7 @@ func Test_startupHeadRoot(t *testing.T) {
})
defer resetCfg()
require.Equal(t, service.startupHeadRoot(), gr)
require.LogsContain(t, hook, "could not get head block root, starting with finalized block as head")
require.LogsContain(t, hook, "Could not get head block root, starting with finalized block as head")
})
st, _ := util.DeterministicGenesisState(t, 64)

View File

@@ -4,6 +4,7 @@ import (
"context"
"sync"
"testing"
"time"
"github.com/OffchainLabs/prysm/v6/async/event"
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
@@ -24,9 +25,12 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stategen"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/testing/require"
"github.com/libp2p/go-libp2p/core/peer"
"google.golang.org/protobuf/proto"
)
@@ -51,6 +55,7 @@ type mockBroadcaster struct {
type mockAccessor struct {
mockBroadcaster
mockCustodyManager
p2pTesting.MockPeerManager
}
@@ -84,7 +89,7 @@ func (mb *mockBroadcaster) BroadcastLightClientFinalityUpdate(_ context.Context,
return nil
}
func (mb *mockBroadcaster) BroadcastDataColumn(_ [fieldparams.RootLength]byte, _ uint64, _ *ethpb.DataColumnSidecar, _ ...chan<- bool) error {
func (mb *mockBroadcaster) BroadcastDataColumnSidecar(_ [fieldparams.RootLength]byte, _ uint64, _ *ethpb.DataColumnSidecar) error {
mb.broadcastCalled = true
return nil
}
@@ -94,6 +99,43 @@ func (mb *mockBroadcaster) BroadcastBLSChanges(_ context.Context, _ []*ethpb.Sig
var _ p2p.Broadcaster = (*mockBroadcaster)(nil)
// mockCustodyManager is a mock implementation of p2p.CustodyManager
type mockCustodyManager struct {
mut sync.RWMutex
earliestAvailableSlot primitives.Slot
custodyGroupCount uint64
}
func (dch *mockCustodyManager) EarliestAvailableSlot() (primitives.Slot, error) {
dch.mut.RLock()
defer dch.mut.RUnlock()
return dch.earliestAvailableSlot, nil
}
func (dch *mockCustodyManager) CustodyGroupCount() (uint64, error) {
dch.mut.RLock()
defer dch.mut.RUnlock()
return dch.custodyGroupCount, nil
}
func (dch *mockCustodyManager) UpdateCustodyInfo(earliestAvailableSlot primitives.Slot, custodyGroupCount uint64) (primitives.Slot, uint64, error) {
dch.mut.Lock()
defer dch.mut.Unlock()
dch.earliestAvailableSlot = earliestAvailableSlot
dch.custodyGroupCount = custodyGroupCount
return earliestAvailableSlot, custodyGroupCount, nil
}
func (dch *mockCustodyManager) CustodyGroupCountFromPeer(peer.ID) uint64 {
return 0
}
var _ p2p.CustodyManager = (*mockCustodyManager)(nil)
type testServiceRequirements struct {
ctx context.Context
db db.Database
@@ -108,9 +150,11 @@ type testServiceRequirements struct {
}
func minimalTestService(t *testing.T, opts ...Option) (*Service, *testServiceRequirements) {
ctx := context.Background()
ctx := t.Context()
genesis := time.Now().Add(-1 * 4 * time.Duration(params.BeaconConfig().SlotsPerEpoch*primitives.Slot(params.BeaconConfig().SecondsPerSlot)) * time.Second) // Genesis was 4 epochs ago.
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
fcs.SetGenesisTime(genesis)
sg := stategen.New(beaconDB, fcs)
notif := &mockBeaconNode{}
fcs.SetBalancesByRooter(sg.ActiveNonSlashedBalancesByRoot)
@@ -149,6 +193,7 @@ func minimalTestService(t *testing.T, opts ...Option) (*Service, *testServiceReq
WithExecutionEngineCaller(&mockExecution.EngineClient{}),
WithP2PBroadcaster(&mockAccessor{}),
WithLightClientStore(&lightclient.Store{}),
WithGenesisTime(genesis),
}
// append the variadic opts so they override the defaults by being processed afterwards
opts = append(defOpts, opts...)

View File

@@ -275,7 +275,7 @@ func (s *ChainService) ReceiveBlockInitialSync(ctx context.Context, block interf
}
// ReceiveBlockBatch processes blocks in batches from initial-sync.
func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []blocks.ROBlock, _ das.AvailabilityStore) error {
func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []blocks.ROBlock, _ das.AvailabilityChecker) error {
if s.State == nil {
return ErrNilState
}
@@ -305,7 +305,7 @@ func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []blocks.ROBl
}
// ReceiveBlock mocks ReceiveBlock method in chain service.
func (s *ChainService) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, _ [32]byte, _ das.AvailabilityStore) error {
func (s *ChainService) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, _ [32]byte, _ das.AvailabilityChecker) error {
if s.ReceiveBlockMockErr != nil {
return s.ReceiveBlockMockErr
}
@@ -555,11 +555,11 @@ func (s *ChainService) UpdateHead(ctx context.Context, slot primitives.Slot) {
ojc := &ethpb.Checkpoint{}
st, root, err := prepareForkchoiceState(ctx, slot, bytesutil.ToBytes32(s.Root), [32]byte{}, [32]byte{}, ojc, ojc)
if err != nil {
logrus.WithError(err).Error("could not update head")
logrus.WithError(err).Error("Could not update head")
}
err = s.ForkChoiceStore.InsertNode(ctx, st, root)
if err != nil {
logrus.WithError(err).Error("could not insert node to forkchoice")
logrus.WithError(err).Error("Could not insert node to forkchoice")
}
}
@@ -641,7 +641,7 @@ func (s *ChainService) GetProposerHead() [32]byte {
}
// SetForkChoiceGenesisTime mocks the same method in the chain service
func (s *ChainService) SetForkChoiceGenesisTime(timestamp uint64) {
func (s *ChainService) SetForkChoiceGenesisTime(timestamp time.Time) {
if s.ForkChoiceStore != nil {
s.ForkChoiceStore.SetGenesisTime(timestamp)
}

View File

@@ -1,11 +1,8 @@
package blockchain
import (
"context"
"testing"
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
doublylinkedtree "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/doubly-linked-tree"
forkchoicetypes "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/types"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
@@ -18,11 +15,8 @@ import (
)
func TestService_VerifyWeakSubjectivityRoot(t *testing.T) {
beaconDB := testDB.SetupDB(t)
b := util.NewBeaconBlock()
b.Block.Slot = 1792480
util.SaveBlock(t, context.Background(), beaconDB, b)
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
@@ -69,17 +63,17 @@ func TestService_VerifyWeakSubjectivityRoot(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := testServiceWithDB(t)
beaconDB := s.cfg.BeaconDB
util.SaveBlock(t, t.Context(), beaconDB, b)
wv, err := NewWeakSubjectivityVerifier(tt.checkpt, beaconDB)
require.Equal(t, !tt.disabled, wv.enabled)
require.NoError(t, err)
fcs := doublylinkedtree.New()
s := &Service{
cfg: &config{BeaconDB: beaconDB, WeakSubjectivityCheckpt: tt.checkpt, ForkChoiceStore: fcs},
wsVerifier: wv,
}
require.NoError(t, fcs.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: tt.finalizedEpoch}))
s.cfg.WeakSubjectivityCheckpt = tt.checkpt
s.wsVerifier = wv
require.Equal(t, !tt.disabled, wv.enabled)
require.NoError(t, s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: tt.finalizedEpoch}))
cp := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
err = s.wsVerifier.VerifyWeakSubjectivity(context.Background(), cp.Epoch)
err = s.wsVerifier.VerifyWeakSubjectivity(t.Context(), cp.Epoch)
if tt.wantErr == nil {
require.NoError(t, err)
} else {

View File

@@ -24,7 +24,8 @@ var ErrNoBuilder = errors.New("builder endpoint not configured")
// BlockBuilder defines the interface for interacting with the block builder
type BlockBuilder interface {
SubmitBlindedBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, *v1.BlobsBundle, error)
SubmitBlindedBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, v1.BlobsBundler, error)
SubmitBlindedBlockPostFulu(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock) error
GetHeader(ctx context.Context, slot primitives.Slot, parentHash [32]byte, pubKey [48]byte) (builder.SignedBid, error)
RegisterValidator(ctx context.Context, reg []*ethpb.SignedValidatorRegistrationV1) error
RegistrationByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (*ethpb.ValidatorRegistrationV1, error)
@@ -68,7 +69,7 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
log.WithError(err).Error("Failed to check builder status")
} else {
log.WithField("endpoint", s.c.NodeURL()).Info("Builder has been configured")
log.Warn("Outsourcing block construction to external builders adds non-trivial delay to block propagation time. " +
log.Warn("Outsourcing block construction to external builders adds non-trivial delay to block propagation time. " +
"Builder-constructed blocks or fallback blocks may get orphaned. Use at your own risk!")
}
}
@@ -87,7 +88,7 @@ func (s *Service) Stop() error {
}
// SubmitBlindedBlock submits a blinded block to the builder relay network.
func (s *Service) SubmitBlindedBlock(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
func (s *Service) SubmitBlindedBlock(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, v1.BlobsBundler, error) {
ctx, span := trace.StartSpan(ctx, "builder.SubmitBlindedBlock")
defer span.End()
start := time.Now()
@@ -101,6 +102,22 @@ func (s *Service) SubmitBlindedBlock(ctx context.Context, b interfaces.ReadOnlyS
return s.c.SubmitBlindedBlock(ctx, b)
}
// SubmitBlindedBlockPostFulu submits a blinded block to the builder relay network post-Fulu.
// After Fulu, relays only return status codes (no payload).
func (s *Service) SubmitBlindedBlockPostFulu(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock) error {
ctx, span := trace.StartSpan(ctx, "builder.SubmitBlindedBlockPostFulu")
defer span.End()
start := time.Now()
defer func() {
submitBlindedBlockLatency.Observe(float64(time.Since(start).Milliseconds()))
}()
if s.c == nil {
return ErrNoBuilder
}
return s.c.SubmitBlindedBlockPostFulu(ctx, b)
}
// GetHeader retrieves the header for a given slot and parent hash from the builder relay network.
func (s *Service) GetHeader(ctx context.Context, slot primitives.Slot, parentHash [32]byte, pubKey [48]byte) (builder.SignedBid, error) {
ctx, span := trace.StartSpan(ctx, "builder.GetHeader")

View File

@@ -1,7 +1,6 @@
package builder
import (
"context"
"testing"
"time"
@@ -15,19 +14,19 @@ import (
)
func Test_NewServiceWithBuilder(t *testing.T) {
s, err := NewService(context.Background(), WithBuilderClient(&buildertesting.MockClient{}))
s, err := NewService(t.Context(), WithBuilderClient(&buildertesting.MockClient{}))
require.NoError(t, err)
assert.Equal(t, true, s.Configured())
}
func Test_NewServiceWithoutBuilder(t *testing.T) {
s, err := NewService(context.Background())
s, err := NewService(t.Context())
require.NoError(t, err)
assert.Equal(t, false, s.Configured())
}
func Test_RegisterValidator(t *testing.T) {
ctx := context.Background()
ctx := t.Context()
db := dbtesting.SetupDB(t)
headFetcher := &blockchainTesting.ChainService{}
builder := buildertesting.NewClient()
@@ -40,7 +39,7 @@ func Test_RegisterValidator(t *testing.T) {
}
func Test_RegisterValidator_WithCache(t *testing.T) {
ctx := context.Background()
ctx := t.Context()
headFetcher := &blockchainTesting.ChainService{}
builder := buildertesting.NewClient()
s, err := NewService(ctx, WithRegistrationCache(), WithHeadFetcher(headFetcher), WithBuilderClient(&builder))
@@ -55,16 +54,16 @@ func Test_RegisterValidator_WithCache(t *testing.T) {
}
func Test_BuilderMethodsWithouClient(t *testing.T) {
s, err := NewService(context.Background())
s, err := NewService(t.Context())
require.NoError(t, err)
assert.Equal(t, false, s.Configured())
_, err = s.GetHeader(context.Background(), 0, [32]byte{}, [48]byte{})
_, err = s.GetHeader(t.Context(), 0, [32]byte{}, [48]byte{})
assert.ErrorContains(t, ErrNoBuilder.Error(), err)
_, _, err = s.SubmitBlindedBlock(context.Background(), nil)
_, _, err = s.SubmitBlindedBlock(t.Context(), nil)
assert.ErrorContains(t, ErrNoBuilder.Error(), err)
err = s.RegisterValidator(context.Background(), nil)
err = s.RegisterValidator(t.Context(), nil)
assert.ErrorContains(t, ErrNoBuilder.Error(), err)
}

View File

@@ -24,20 +24,22 @@ type Config struct {
// MockBuilderService to mock builder.
type MockBuilderService struct {
HasConfigured bool
Payload *v1.ExecutionPayload
PayloadCapella *v1.ExecutionPayloadCapella
PayloadDeneb *v1.ExecutionPayloadDeneb
BlobBundle *v1.BlobsBundle
ErrSubmitBlindedBlock error
Bid *ethpb.SignedBuilderBid
BidCapella *ethpb.SignedBuilderBidCapella
BidDeneb *ethpb.SignedBuilderBidDeneb
BidElectra *ethpb.SignedBuilderBidElectra
RegistrationCache *cache.RegistrationCache
ErrGetHeader error
ErrRegisterValidator error
Cfg *Config
HasConfigured bool
Payload *v1.ExecutionPayload
PayloadCapella *v1.ExecutionPayloadCapella
PayloadDeneb *v1.ExecutionPayloadDeneb
BlobBundle *v1.BlobsBundle
BlobBundleV2 *v1.BlobsBundleV2
ErrSubmitBlindedBlock error
ErrSubmitBlindedBlockPostFulu error
Bid *ethpb.SignedBuilderBid
BidCapella *ethpb.SignedBuilderBidCapella
BidDeneb *ethpb.SignedBuilderBidDeneb
BidElectra *ethpb.SignedBuilderBidElectra
RegistrationCache *cache.RegistrationCache
ErrGetHeader error
ErrRegisterValidator error
Cfg *Config
}
// Configured for mocking.
@@ -46,7 +48,7 @@ func (s *MockBuilderService) Configured() bool {
}
// SubmitBlindedBlock for mocking.
func (s *MockBuilderService) SubmitBlindedBlock(_ context.Context, b interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
func (s *MockBuilderService) SubmitBlindedBlock(_ context.Context, b interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, v1.BlobsBundler, error) {
switch b.Version() {
case version.Bellatrix:
w, err := blocks.WrappedExecutionPayload(s.Payload)
@@ -66,6 +68,16 @@ func (s *MockBuilderService) SubmitBlindedBlock(_ context.Context, b interfaces.
return nil, nil, errors.Wrap(err, "could not wrap deneb payload")
}
return w, s.BlobBundle, s.ErrSubmitBlindedBlock
case version.Fulu:
w, err := blocks.WrappedExecutionPayloadDeneb(s.PayloadDeneb)
if err != nil {
return nil, nil, errors.Wrap(err, "could not wrap deneb payload for fulu")
}
// For Fulu, return BlobsBundleV2 if available, otherwise regular BlobsBundle
if s.BlobBundleV2 != nil {
return w, s.BlobBundleV2, s.ErrSubmitBlindedBlock
}
return w, s.BlobBundle, s.ErrSubmitBlindedBlock
default:
return nil, nil, errors.New("unknown block version for mocking")
}
@@ -104,3 +116,8 @@ func (s *MockBuilderService) RegistrationByValidatorID(ctx context.Context, id p
func (s *MockBuilderService) RegisterValidator(context.Context, []*ethpb.SignedValidatorRegistrationV1) error {
return s.ErrRegisterValidator
}
// SubmitBlindedBlockPostFulu for mocking.
func (s *MockBuilderService) SubmitBlindedBlockPostFulu(_ context.Context, _ interfaces.ReadOnlySignedBeaconBlock) error {
return s.ErrSubmitBlindedBlockPostFulu
}

View File

@@ -3,7 +3,6 @@
package cache
import (
"context"
"testing"
"github.com/OffchainLabs/prysm/v6/testing/assert"
@@ -30,8 +29,8 @@ func TestCommitteeCache_FuzzCommitteesByEpoch(t *testing.T) {
for i := 0; i < 100000; i++ {
fuzzer.Fuzz(c)
require.NoError(t, cache.AddCommitteeShuffledList(context.Background(), c))
_, err := cache.Committee(context.Background(), 0, c.Seed, 0)
require.NoError(t, cache.AddCommitteeShuffledList(t.Context(), c))
_, err := cache.Committee(t.Context(), 0, c.Seed, 0)
require.NoError(t, err)
}
@@ -45,9 +44,9 @@ func TestCommitteeCache_FuzzActiveIndices(t *testing.T) {
for i := 0; i < 100000; i++ {
fuzzer.Fuzz(c)
require.NoError(t, cache.AddCommitteeShuffledList(context.Background(), c))
require.NoError(t, cache.AddCommitteeShuffledList(t.Context(), c))
indices, err := cache.ActiveIndices(context.Background(), c.Seed)
indices, err := cache.ActiveIndices(t.Context(), c.Seed)
require.NoError(t, err)
assert.DeepEqual(t, c.SortedIndices, indices)
}

View File

@@ -44,15 +44,15 @@ func TestCommitteeCache_CommitteesByEpoch(t *testing.T) {
slot := params.BeaconConfig().SlotsPerEpoch
committeeIndex := primitives.CommitteeIndex(1)
indices, err := cache.Committee(context.Background(), slot, item.Seed, committeeIndex)
indices, err := cache.Committee(t.Context(), slot, item.Seed, committeeIndex)
require.NoError(t, err)
if indices != nil {
t.Error("Expected committee not to exist in empty cache")
}
require.NoError(t, cache.AddCommitteeShuffledList(context.Background(), item))
require.NoError(t, cache.AddCommitteeShuffledList(t.Context(), item))
wantedIndex := primitives.CommitteeIndex(0)
indices, err = cache.Committee(context.Background(), slot, item.Seed, wantedIndex)
indices, err = cache.Committee(t.Context(), slot, item.Seed, wantedIndex)
require.NoError(t, err)
start, end := startEndIndices(item, uint64(wantedIndex))
@@ -63,15 +63,15 @@ func TestCommitteeCache_ActiveIndices(t *testing.T) {
cache := NewCommitteesCache()
item := &Committees{Seed: [32]byte{'A'}, SortedIndices: []primitives.ValidatorIndex{1, 2, 3, 4, 5, 6}}
indices, err := cache.ActiveIndices(context.Background(), item.Seed)
indices, err := cache.ActiveIndices(t.Context(), item.Seed)
require.NoError(t, err)
if indices != nil {
t.Error("Expected committee not to exist in empty cache")
}
require.NoError(t, cache.AddCommitteeShuffledList(context.Background(), item))
require.NoError(t, cache.AddCommitteeShuffledList(t.Context(), item))
indices, err = cache.ActiveIndices(context.Background(), item.Seed)
indices, err = cache.ActiveIndices(t.Context(), item.Seed)
require.NoError(t, err)
assert.DeepEqual(t, item.SortedIndices, indices)
}
@@ -80,13 +80,13 @@ func TestCommitteeCache_ActiveCount(t *testing.T) {
cache := NewCommitteesCache()
item := &Committees{Seed: [32]byte{'A'}, SortedIndices: []primitives.ValidatorIndex{1, 2, 3, 4, 5, 6}}
count, err := cache.ActiveIndicesCount(context.Background(), item.Seed)
count, err := cache.ActiveIndicesCount(t.Context(), item.Seed)
require.NoError(t, err)
assert.Equal(t, 0, count, "Expected active count not to exist in empty cache")
require.NoError(t, cache.AddCommitteeShuffledList(context.Background(), item))
require.NoError(t, cache.AddCommitteeShuffledList(t.Context(), item))
count, err = cache.ActiveIndicesCount(context.Background(), item.Seed)
count, err = cache.ActiveIndicesCount(t.Context(), item.Seed)
require.NoError(t, err)
assert.Equal(t, len(item.SortedIndices), count)
}
@@ -100,7 +100,7 @@ func TestCommitteeCache_CanRotate(t *testing.T) {
for i := start; i < end; i++ {
s := []byte(strconv.Itoa(i))
item := &Committees{Seed: bytesutil.ToBytes32(s)}
require.NoError(t, cache.AddCommitteeShuffledList(context.Background(), item))
require.NoError(t, cache.AddCommitteeShuffledList(t.Context(), item))
}
k := cache.CommitteeCache.Keys()
@@ -130,7 +130,7 @@ func TestCommitteeCacheOutOfRange(t *testing.T) {
assert.NoError(t, err)
_ = cache.CommitteeCache.Add(key, comms)
_, err = cache.Committee(context.Background(), 0, seed, math.MaxUint64) // Overflow!
_, err = cache.Committee(t.Context(), 0, seed, math.MaxUint64) // Overflow!
require.NotNil(t, err, "Did not fail as expected")
}
@@ -138,15 +138,15 @@ func TestCommitteeCache_DoesNothingWhenCancelledContext(t *testing.T) {
cache := NewCommitteesCache()
item := &Committees{Seed: [32]byte{'A'}, SortedIndices: []primitives.ValidatorIndex{1, 2, 3, 4, 5, 6}}
count, err := cache.ActiveIndicesCount(context.Background(), item.Seed)
count, err := cache.ActiveIndicesCount(t.Context(), item.Seed)
require.NoError(t, err)
assert.Equal(t, 0, count, "Expected active count not to exist in empty cache")
cancelled, cancel := context.WithCancel(context.Background())
cancelled, cancel := context.WithCancel(t.Context())
cancel()
require.ErrorIs(t, cache.AddCommitteeShuffledList(cancelled, item), context.Canceled)
count, err = cache.ActiveIndicesCount(context.Background(), item.Seed)
count, err = cache.ActiveIndicesCount(t.Context(), item.Seed)
require.NoError(t, err)
assert.Equal(t, 0, count)
}

View File

@@ -2,7 +2,6 @@ package depositsnapshot
import (
"bytes"
"context"
"fmt"
"math/big"
"testing"
@@ -55,7 +54,7 @@ func TestAllDeposits_ReturnsAllDeposits(t *testing.T) {
}
dc.deposits = deposits
d := dc.AllDeposits(context.Background(), nil)
d := dc.AllDeposits(t.Context(), nil)
assert.Equal(t, len(deposits), len(d))
}
@@ -95,7 +94,7 @@ func TestAllDeposits_FiltersDepositUpToAndIncludingBlockNumber(t *testing.T) {
}
dc.deposits = deposits
d := dc.AllDeposits(context.Background(), big.NewInt(11))
d := dc.AllDeposits(t.Context(), big.NewInt(11))
assert.Equal(t, 5, len(d))
}
@@ -127,7 +126,7 @@ func TestDepositsNumberAndRootAtHeight(t *testing.T) {
DepositRoot: wantedRoot,
},
}
n, root := dc.DepositsNumberAndRootAtHeight(context.Background(), big.NewInt(13))
n, root := dc.DepositsNumberAndRootAtHeight(t.Context(), big.NewInt(13))
assert.Equal(t, 4, int(n))
require.DeepEqual(t, wantedRoot, root[:])
})
@@ -143,7 +142,7 @@ func TestDepositsNumberAndRootAtHeight(t *testing.T) {
DepositRoot: wantedRoot,
},
}
n, root := dc.DepositsNumberAndRootAtHeight(context.Background(), big.NewInt(10))
n, root := dc.DepositsNumberAndRootAtHeight(t.Context(), big.NewInt(10))
assert.Equal(t, 1, int(n))
require.DeepEqual(t, wantedRoot, root[:])
})
@@ -169,7 +168,7 @@ func TestDepositsNumberAndRootAtHeight(t *testing.T) {
Deposit: &ethpb.Deposit{},
},
}
n, root := dc.DepositsNumberAndRootAtHeight(context.Background(), big.NewInt(10))
n, root := dc.DepositsNumberAndRootAtHeight(t.Context(), big.NewInt(10))
assert.Equal(t, 2, int(n))
require.DeepEqual(t, wantedRoot, root[:])
})
@@ -185,7 +184,7 @@ func TestDepositsNumberAndRootAtHeight(t *testing.T) {
DepositRoot: wantedRoot,
},
}
n, root := dc.DepositsNumberAndRootAtHeight(context.Background(), big.NewInt(7))
n, root := dc.DepositsNumberAndRootAtHeight(t.Context(), big.NewInt(7))
assert.Equal(t, 0, int(n))
require.DeepEqual(t, params.BeaconConfig().ZeroHash, root)
})
@@ -201,7 +200,7 @@ func TestDepositsNumberAndRootAtHeight(t *testing.T) {
DepositRoot: wantedRoot,
},
}
n, root := dc.DepositsNumberAndRootAtHeight(context.Background(), big.NewInt(10))
n, root := dc.DepositsNumberAndRootAtHeight(t.Context(), big.NewInt(10))
assert.Equal(t, 1, int(n))
require.DeepEqual(t, wantedRoot, root[:])
})
@@ -237,7 +236,7 @@ func TestDepositsNumberAndRootAtHeight(t *testing.T) {
Deposit: &ethpb.Deposit{},
},
}
n, root := dc.DepositsNumberAndRootAtHeight(context.Background(), big.NewInt(9))
n, root := dc.DepositsNumberAndRootAtHeight(t.Context(), big.NewInt(9))
assert.Equal(t, 3, int(n))
require.DeepEqual(t, wantedRoot, root[:])
})
@@ -288,10 +287,10 @@ func TestDepositByPubkey_ReturnsFirstMatchingDeposit(t *testing.T) {
},
},
}
dc.InsertDepositContainers(context.Background(), ctrs)
dc.InsertDepositContainers(t.Context(), ctrs)
pk1 := bytesutil.PadTo([]byte("pk1"), 48)
dep, blkNum := dc.DepositByPubkey(context.Background(), pk1)
dep, blkNum := dc.DepositByPubkey(t.Context(), pk1)
if dep == nil || !bytes.Equal(dep.Data.PublicKey, pk1) {
t.Error("Returned wrong deposit")
@@ -303,7 +302,7 @@ func TestDepositByPubkey_ReturnsFirstMatchingDeposit(t *testing.T) {
func TestInsertDepositContainers_NotNil(t *testing.T) {
dc, err := New()
require.NoError(t, err)
dc.InsertDepositContainers(context.Background(), nil)
dc.InsertDepositContainers(t.Context(), nil)
assert.DeepEqual(t, []*ethpb.DepositContainer{}, dc.deposits)
}
@@ -359,10 +358,10 @@ func TestFinalizedDeposits_DepositsCachedCorrectly(t *testing.T) {
err = dc.finalizedDeposits.depositTree.pushLeaf(root)
require.NoError(t, err)
}
err = dc.InsertFinalizedDeposits(context.Background(), 2, [32]byte{}, 0)
err = dc.InsertFinalizedDeposits(t.Context(), 2, [32]byte{}, 0)
require.NoError(t, err)
cachedDeposits, err := dc.FinalizedDeposits(context.Background())
cachedDeposits, err := dc.FinalizedDeposits(t.Context())
require.NoError(t, err)
require.NotNil(t, cachedDeposits, "Deposits not cached")
assert.Equal(t, int64(2), cachedDeposits.MerkleTrieIndex())
@@ -425,15 +424,15 @@ func TestFinalizedDeposits_UtilizesPreviouslyCachedDeposits(t *testing.T) {
err = dc.finalizedDeposits.Deposits().Insert(root[:], 0)
require.NoError(t, err)
}
err = dc.InsertFinalizedDeposits(context.Background(), 1, [32]byte{}, 0)
err = dc.InsertFinalizedDeposits(t.Context(), 1, [32]byte{}, 0)
require.NoError(t, err)
err = dc.InsertFinalizedDeposits(context.Background(), 2, [32]byte{}, 0)
err = dc.InsertFinalizedDeposits(t.Context(), 2, [32]byte{}, 0)
require.NoError(t, err)
dc.deposits = append(dc.deposits, []*ethpb.DepositContainer{newFinalizedDeposit}...)
cachedDeposits, err := dc.FinalizedDeposits(context.Background())
cachedDeposits, err := dc.FinalizedDeposits(t.Context())
require.NoError(t, err)
require.NotNil(t, cachedDeposits, "Deposits not cached")
require.Equal(t, int64(1), cachedDeposits.MerkleTrieIndex())
@@ -459,10 +458,10 @@ func TestFinalizedDeposits_HandleZeroDeposits(t *testing.T) {
dc, err := New()
require.NoError(t, err)
err = dc.InsertFinalizedDeposits(context.Background(), 2, [32]byte{}, 0)
err = dc.InsertFinalizedDeposits(t.Context(), 2, [32]byte{}, 0)
require.NoError(t, err)
cachedDeposits, err := dc.FinalizedDeposits(context.Background())
cachedDeposits, err := dc.FinalizedDeposits(t.Context())
require.NoError(t, err)
require.NotNil(t, cachedDeposits, "Deposits not cached")
assert.Equal(t, int64(-1), cachedDeposits.MerkleTrieIndex())
@@ -509,10 +508,10 @@ func TestFinalizedDeposits_HandleSmallerThanExpectedDeposits(t *testing.T) {
}
dc.deposits = finalizedDeposits
err = dc.InsertFinalizedDeposits(context.Background(), 5, [32]byte{}, 0)
err = dc.InsertFinalizedDeposits(t.Context(), 5, [32]byte{}, 0)
require.NoError(t, err)
cachedDeposits, err := dc.FinalizedDeposits(context.Background())
cachedDeposits, err := dc.FinalizedDeposits(t.Context())
require.NoError(t, err)
require.NotNil(t, cachedDeposits, "Deposits not cached")
assert.Equal(t, int64(2), cachedDeposits.MerkleTrieIndex())
@@ -592,14 +591,14 @@ func TestFinalizedDeposits_HandleLowerEth1DepositIndex(t *testing.T) {
}
dc.deposits = finalizedDeposits
err = dc.InsertFinalizedDeposits(context.Background(), 5, [32]byte{}, 0)
err = dc.InsertFinalizedDeposits(t.Context(), 5, [32]byte{}, 0)
require.NoError(t, err)
// Reinsert finalized deposits with a lower index.
err = dc.InsertFinalizedDeposits(context.Background(), 2, [32]byte{}, 0)
err = dc.InsertFinalizedDeposits(t.Context(), 2, [32]byte{}, 0)
require.NoError(t, err)
cachedDeposits, err := dc.FinalizedDeposits(context.Background())
cachedDeposits, err := dc.FinalizedDeposits(t.Context())
require.NoError(t, err)
require.NotNil(t, cachedDeposits, "Deposits not cached")
assert.Equal(t, int64(5), cachedDeposits.MerkleTrieIndex())
@@ -670,10 +669,10 @@ func TestNonFinalizedDeposits_ReturnsAllNonFinalizedDeposits(t *testing.T) {
Index: 3,
DepositRoot: rootCreator('D'),
})
err = dc.InsertFinalizedDeposits(context.Background(), 1, [32]byte{}, 0)
err = dc.InsertFinalizedDeposits(t.Context(), 1, [32]byte{}, 0)
require.NoError(t, err)
deps := dc.NonFinalizedDeposits(context.Background(), 1, nil)
deps := dc.NonFinalizedDeposits(t.Context(), 1, nil)
assert.Equal(t, 2, len(deps))
}
@@ -681,7 +680,7 @@ func TestNonFinalizedDeposits_ReturnsAllNonFinalizedDeposits_Nil(t *testing.T) {
dc, err := New()
require.NoError(t, err)
deps := dc.NonFinalizedDeposits(context.Background(), 0, nil)
deps := dc.NonFinalizedDeposits(t.Context(), 0, nil)
assert.Equal(t, 0, len(deps))
}
@@ -740,10 +739,10 @@ func TestNonFinalizedDeposits_ReturnsNonFinalizedDepositsUpToBlockNumber(t *test
Index: 3,
DepositRoot: rootCreator('D'),
})
err = dc.InsertFinalizedDeposits(context.Background(), 1, [32]byte{}, 0)
err = dc.InsertFinalizedDeposits(t.Context(), 1, [32]byte{}, 0)
require.NoError(t, err)
deps := dc.NonFinalizedDeposits(context.Background(), 1, big.NewInt(10))
deps := dc.NonFinalizedDeposits(t.Context(), 1, big.NewInt(10))
assert.Equal(t, 1, len(deps))
}
@@ -785,21 +784,21 @@ func TestFinalizedDeposits_ReturnsTrieCorrectly(t *testing.T) {
assert.NoError(t, err)
// Perform this in a nonsensical ordering
err = dc.InsertFinalizedDeposits(context.Background(), 1, [32]byte{}, 0)
err = dc.InsertFinalizedDeposits(t.Context(), 1, [32]byte{}, 0)
require.NoError(t, err)
err = dc.InsertFinalizedDeposits(context.Background(), 2, [32]byte{}, 0)
err = dc.InsertFinalizedDeposits(t.Context(), 2, [32]byte{}, 0)
require.NoError(t, err)
err = dc.InsertFinalizedDeposits(context.Background(), 3, [32]byte{}, 0)
err = dc.InsertFinalizedDeposits(t.Context(), 3, [32]byte{}, 0)
require.NoError(t, err)
err = dc.InsertFinalizedDeposits(context.Background(), 4, [32]byte{}, 0)
err = dc.InsertFinalizedDeposits(t.Context(), 4, [32]byte{}, 0)
require.NoError(t, err)
err = dc.InsertFinalizedDeposits(context.Background(), 4, [32]byte{}, 0)
err = dc.InsertFinalizedDeposits(t.Context(), 4, [32]byte{}, 0)
require.NoError(t, err)
// Mimic finalized deposit trie fetch.
fd, err := dc.FinalizedDeposits(context.Background())
fd, err := dc.FinalizedDeposits(t.Context())
require.NoError(t, err)
deps := dc.NonFinalizedDeposits(context.Background(), fd.MerkleTrieIndex(), nil)
deps := dc.NonFinalizedDeposits(t.Context(), fd.MerkleTrieIndex(), nil)
insertIndex := fd.MerkleTrieIndex() + 1
for _, dep := range deps {
@@ -810,24 +809,24 @@ func TestFinalizedDeposits_ReturnsTrieCorrectly(t *testing.T) {
}
insertIndex++
}
err = dc.InsertFinalizedDeposits(context.Background(), 5, [32]byte{}, 0)
err = dc.InsertFinalizedDeposits(t.Context(), 5, [32]byte{}, 0)
require.NoError(t, err)
err = dc.InsertFinalizedDeposits(context.Background(), 6, [32]byte{}, 0)
err = dc.InsertFinalizedDeposits(t.Context(), 6, [32]byte{}, 0)
require.NoError(t, err)
err = dc.InsertFinalizedDeposits(context.Background(), 9, [32]byte{}, 0)
err = dc.InsertFinalizedDeposits(t.Context(), 9, [32]byte{}, 0)
require.NoError(t, err)
err = dc.InsertFinalizedDeposits(context.Background(), 12, [32]byte{}, 0)
err = dc.InsertFinalizedDeposits(t.Context(), 12, [32]byte{}, 0)
require.NoError(t, err)
err = dc.InsertFinalizedDeposits(context.Background(), 15, [32]byte{}, 0)
err = dc.InsertFinalizedDeposits(t.Context(), 15, [32]byte{}, 0)
require.NoError(t, err)
err = dc.InsertFinalizedDeposits(context.Background(), 15, [32]byte{}, 0)
err = dc.InsertFinalizedDeposits(t.Context(), 15, [32]byte{}, 0)
require.NoError(t, err)
err = dc.InsertFinalizedDeposits(context.Background(), 14, [32]byte{}, 0)
err = dc.InsertFinalizedDeposits(t.Context(), 14, [32]byte{}, 0)
require.NoError(t, err)
fd, err = dc.FinalizedDeposits(context.Background())
fd, err = dc.FinalizedDeposits(t.Context())
require.NoError(t, err)
deps = dc.NonFinalizedDeposits(context.Background(), fd.MerkleTrieIndex(), nil)
deps = dc.NonFinalizedDeposits(t.Context(), fd.MerkleTrieIndex(), nil)
insertIndex = fd.MerkleTrieIndex() + 1
for _, dep := range dc.deposits {
@@ -888,9 +887,9 @@ func TestMin(t *testing.T) {
}
dc.deposits = finalizedDeposits
fd, err := dc.FinalizedDeposits(context.Background())
fd, err := dc.FinalizedDeposits(t.Context())
require.NoError(t, err)
deps := dc.NonFinalizedDeposits(context.Background(), fd.MerkleTrieIndex(), big.NewInt(16))
deps := dc.NonFinalizedDeposits(t.Context(), fd.MerkleTrieIndex(), big.NewInt(16))
insertIndex := fd.MerkleTrieIndex() + 1
for _, dep := range deps {
depHash, err := dep.Data.HashTreeRoot()
@@ -908,28 +907,28 @@ func TestDepositMap_WorksCorrectly(t *testing.T) {
require.NoError(t, err)
pk0 := bytesutil.PadTo([]byte("pk0"), 48)
dep, _ := dc.DepositByPubkey(context.Background(), pk0)
dep, _ := dc.DepositByPubkey(t.Context(), pk0)
var nilDep *ethpb.Deposit
assert.DeepEqual(t, nilDep, dep)
dep = &ethpb.Deposit{Proof: makeDepositProof(), Data: &ethpb.Deposit_Data{PublicKey: pk0, Amount: 1000}}
assert.NoError(t, dc.InsertDeposit(context.Background(), dep, 1000, 0, [32]byte{}))
assert.NoError(t, dc.InsertDeposit(t.Context(), dep, 1000, 0, [32]byte{}))
dep, _ = dc.DepositByPubkey(context.Background(), pk0)
dep, _ = dc.DepositByPubkey(t.Context(), pk0)
assert.NotEqual(t, nilDep, dep)
assert.Equal(t, uint64(1000), dep.Data.Amount)
dep = &ethpb.Deposit{Proof: makeDepositProof(), Data: &ethpb.Deposit_Data{PublicKey: pk0, Amount: 10000}}
assert.NoError(t, dc.InsertDeposit(context.Background(), dep, 1000, 1, [32]byte{}))
assert.NoError(t, dc.InsertDeposit(t.Context(), dep, 1000, 1, [32]byte{}))
// Make sure we have the same deposit returned over here.
dep, _ = dc.DepositByPubkey(context.Background(), pk0)
dep, _ = dc.DepositByPubkey(t.Context(), pk0)
assert.NotEqual(t, nilDep, dep)
assert.Equal(t, uint64(1000), dep.Data.Amount)
// Make sure another key doesn't work.
pk1 := bytesutil.PadTo([]byte("pk1"), 48)
dep, _ = dc.DepositByPubkey(context.Background(), pk1)
dep, _ = dc.DepositByPubkey(t.Context(), pk1)
assert.DeepEqual(t, nilDep, dep)
}

View File

@@ -1,7 +1,6 @@
package depositsnapshot
import (
"context"
"math/big"
"testing"
@@ -13,14 +12,14 @@ var _ PendingDepositsFetcher = (*Cache)(nil)
func TestInsertPendingDeposit_OK(t *testing.T) {
dc := Cache{}
dc.InsertPendingDeposit(context.Background(), &ethpb.Deposit{}, 111, 100, [32]byte{})
dc.InsertPendingDeposit(t.Context(), &ethpb.Deposit{}, 111, 100, [32]byte{})
assert.Equal(t, 1, len(dc.pendingDeposits), "deposit not inserted")
}
func TestInsertPendingDeposit_ignoresNilDeposit(t *testing.T) {
dc := Cache{}
dc.InsertPendingDeposit(context.Background(), nil /*deposit*/, 0 /*blockNum*/, 0, [32]byte{})
dc.InsertPendingDeposit(t.Context(), nil /*deposit*/, 0 /*blockNum*/, 0, [32]byte{})
assert.Equal(t, 0, len(dc.pendingDeposits))
}
@@ -34,13 +33,13 @@ func TestPendingDeposits_OK(t *testing.T) {
{Eth1BlockHeight: 6, Deposit: &ethpb.Deposit{Proof: [][]byte{[]byte("c")}}},
}
deposits := dc.PendingDeposits(context.Background(), big.NewInt(4))
deposits := dc.PendingDeposits(t.Context(), big.NewInt(4))
expected := []*ethpb.Deposit{
{Proof: [][]byte{[]byte("A")}},
{Proof: [][]byte{[]byte("B")}},
}
assert.DeepSSZEqual(t, expected, deposits)
all := dc.PendingDeposits(context.Background(), nil)
all := dc.PendingDeposits(t.Context(), nil)
assert.Equal(t, len(dc.pendingDeposits), len(all), "PendingDeposits(ctx, nil) did not return all deposits")
}

View File

@@ -1,7 +1,6 @@
package depositsnapshot
import (
"context"
"testing"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
@@ -22,7 +21,7 @@ func TestPrunePendingDeposits_ZeroMerkleIndex(t *testing.T) {
{Eth1BlockHeight: 12, Index: 12},
}
dc.PrunePendingDeposits(context.Background(), 0)
dc.PrunePendingDeposits(t.Context(), 0)
expected := []*ethpb.DepositContainer{
{Eth1BlockHeight: 2, Index: 2},
{Eth1BlockHeight: 4, Index: 4},
@@ -46,7 +45,7 @@ func TestPrunePendingDeposits_OK(t *testing.T) {
{Eth1BlockHeight: 12, Index: 12},
}
dc.PrunePendingDeposits(context.Background(), 6)
dc.PrunePendingDeposits(t.Context(), 6)
expected := []*ethpb.DepositContainer{
{Eth1BlockHeight: 6, Index: 6},
{Eth1BlockHeight: 8, Index: 8},
@@ -65,7 +64,7 @@ func TestPrunePendingDeposits_OK(t *testing.T) {
{Eth1BlockHeight: 12, Index: 12},
}
dc.PrunePendingDeposits(context.Background(), 10)
dc.PrunePendingDeposits(t.Context(), 10)
expected = []*ethpb.DepositContainer{
{Eth1BlockHeight: 10, Index: 10},
{Eth1BlockHeight: 12, Index: 12},
@@ -86,7 +85,7 @@ func TestPruneAllPendingDeposits(t *testing.T) {
{Eth1BlockHeight: 12, Index: 12},
}
dc.PruneAllPendingDeposits(context.Background())
dc.PruneAllPendingDeposits(t.Context())
expected := []*ethpb.DepositContainer{}
assert.DeepEqual(t, expected, dc.pendingDeposits)
@@ -128,10 +127,10 @@ func TestPruneProofs_Ok(t *testing.T) {
}
for _, ins := range deposits {
assert.NoError(t, dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
assert.NoError(t, dc.InsertDeposit(t.Context(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
}
require.NoError(t, dc.PruneProofs(context.Background(), 1))
require.NoError(t, dc.PruneProofs(t.Context(), 1))
assert.DeepEqual(t, [][]byte(nil), dc.deposits[0].Deposit.Proof)
assert.DeepEqual(t, [][]byte(nil), dc.deposits[1].Deposit.Proof)
@@ -173,10 +172,10 @@ func TestPruneProofs_SomeAlreadyPruned(t *testing.T) {
}
for _, ins := range deposits {
assert.NoError(t, dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
assert.NoError(t, dc.InsertDeposit(t.Context(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
}
require.NoError(t, dc.PruneProofs(context.Background(), 2))
require.NoError(t, dc.PruneProofs(t.Context(), 2))
assert.DeepEqual(t, [][]byte(nil), dc.deposits[2].Deposit.Proof)
}
@@ -217,10 +216,10 @@ func TestPruneProofs_PruneAllWhenDepositIndexTooBig(t *testing.T) {
}
for _, ins := range deposits {
assert.NoError(t, dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
assert.NoError(t, dc.InsertDeposit(t.Context(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
}
require.NoError(t, dc.PruneProofs(context.Background(), 99))
require.NoError(t, dc.PruneProofs(t.Context(), 99))
assert.DeepEqual(t, [][]byte(nil), dc.deposits[0].Deposit.Proof)
assert.DeepEqual(t, [][]byte(nil), dc.deposits[1].Deposit.Proof)
@@ -264,10 +263,10 @@ func TestPruneProofs_CorrectlyHandleLastIndex(t *testing.T) {
}
for _, ins := range deposits {
assert.NoError(t, dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
assert.NoError(t, dc.InsertDeposit(t.Context(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
}
require.NoError(t, dc.PruneProofs(context.Background(), 4))
require.NoError(t, dc.PruneProofs(t.Context(), 4))
assert.DeepEqual(t, [][]byte(nil), dc.deposits[0].Deposit.Proof)
assert.DeepEqual(t, [][]byte(nil), dc.deposits[1].Deposit.Proof)
@@ -311,10 +310,10 @@ func TestPruneAllProofs(t *testing.T) {
}
for _, ins := range deposits {
assert.NoError(t, dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
assert.NoError(t, dc.InsertDeposit(t.Context(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
}
dc.PruneAllProofs(context.Background())
dc.PruneAllProofs(t.Context())
assert.DeepEqual(t, [][]byte(nil), dc.deposits[0].Deposit.Proof)
assert.DeepEqual(t, [][]byte(nil), dc.deposits[1].Deposit.Proof)

View File

@@ -1,7 +1,6 @@
package cache
import (
"context"
"testing"
"time"
@@ -24,7 +23,7 @@ func TestRegistrationCache(t *testing.T) {
Timestamp: uint64(time.Now().Unix()),
Pubkey: pubkey,
}
cache.UpdateIndexToRegisteredMap(context.Background(), m)
cache.UpdateIndexToRegisteredMap(t.Context(), m)
reg, err := cache.RegistrationByIndex(validatorIndex)
require.NoError(t, err)
require.Equal(t, string(reg.Pubkey), string(pubkey))
@@ -38,7 +37,7 @@ func TestRegistrationCache(t *testing.T) {
Timestamp: uint64(time.Now().Unix()),
Pubkey: pubkey,
}
cache.UpdateIndexToRegisteredMap(context.Background(), m)
cache.UpdateIndexToRegisteredMap(t.Context(), m)
reg, err := cache.RegistrationByIndex(validatorIndex2)
require.NoError(t, err)
require.Equal(t, string(reg.Pubkey), string(pubkey))

View File

@@ -1,7 +1,6 @@
package cache_test
import (
"context"
"sync"
"testing"
@@ -14,7 +13,7 @@ import (
)
func TestSkipSlotCache_RoundTrip(t *testing.T) {
ctx := context.Background()
ctx := t.Context()
c := cache.NewSkipSlotCache()
r := [32]byte{'a'}
@@ -38,7 +37,7 @@ func TestSkipSlotCache_RoundTrip(t *testing.T) {
}
func TestSkipSlotCache_DisabledAndEnabled(t *testing.T) {
ctx := context.Background()
ctx := t.Context()
c := cache.NewSkipSlotCache()
r := [32]byte{'a'}

View File

@@ -178,7 +178,7 @@ func (s *SyncCommitteeCache) UpdatePositionsInCommittee(syncCommitteeBoundaryRoo
s.lock.Lock()
defer s.lock.Unlock()
if clearCount != s.cleared.Load() {
log.Warn("cache rotated during async committee update operation - abandoning cache update")
log.Warn("Cache rotated during async committee update operation - abandoning cache update")
return nil
}

View File

@@ -81,6 +81,7 @@ go_test(
"//proto/prysm/v1alpha1/attestation:go_default_library",
"//runtime/version:go_default_library",
"//testing/assert:go_default_library",
"//testing/fuzz:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"//time:go_default_library",

View File

@@ -1,7 +1,6 @@
package altair_test
import (
"context"
"fmt"
"testing"
@@ -19,9 +18,10 @@ import (
"github.com/OffchainLabs/prysm/v6/math"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1/attestation"
"github.com/OffchainLabs/prysm/v6/testing/fuzz"
"github.com/OffchainLabs/prysm/v6/testing/require"
"github.com/OffchainLabs/prysm/v6/testing/util"
fuzz "github.com/google/gofuzz"
gofuzz "github.com/google/gofuzz"
"github.com/prysmaticlabs/go-bitfield"
)
@@ -50,7 +50,7 @@ func TestProcessAttestations_InclusionDelayFailure(t *testing.T) {
)
wsb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb.Block())
_, err = altair.ProcessAttestationsNoVerifySignature(t.Context(), beaconState, wsb.Block())
require.ErrorContains(t, want, err)
}
@@ -81,7 +81,7 @@ func TestProcessAttestations_NeitherCurrentNorPrevEpoch(t *testing.T) {
)
wsb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb.Block())
_, err = altair.ProcessAttestationsNoVerifySignature(t.Context(), beaconState, wsb.Block())
require.ErrorContains(t, want, err)
}
@@ -110,13 +110,13 @@ func TestProcessAttestations_CurrentEpochFFGDataMismatches(t *testing.T) {
want := "source check point not equal to current justified checkpoint"
wsb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb.Block())
_, err = altair.ProcessAttestationsNoVerifySignature(t.Context(), beaconState, wsb.Block())
require.ErrorContains(t, want, err)
b.Block.Body.Attestations[0].Data.Source.Epoch = time.CurrentEpoch(beaconState)
b.Block.Body.Attestations[0].Data.Source.Root = []byte{}
wsb, err = blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb.Block())
_, err = altair.ProcessAttestationsNoVerifySignature(t.Context(), beaconState, wsb.Block())
require.ErrorContains(t, want, err)
}
@@ -151,14 +151,14 @@ func TestProcessAttestations_PrevEpochFFGDataMismatches(t *testing.T) {
want := "source check point not equal to previous justified checkpoint"
wsb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb.Block())
_, err = altair.ProcessAttestationsNoVerifySignature(t.Context(), beaconState, wsb.Block())
require.ErrorContains(t, want, err)
b.Block.Body.Attestations[0].Data.Source.Epoch = time.PrevEpoch(beaconState)
b.Block.Body.Attestations[0].Data.Target.Epoch = time.PrevEpoch(beaconState)
b.Block.Body.Attestations[0].Data.Source.Root = []byte{}
wsb, err = blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb.Block())
_, err = altair.ProcessAttestationsNoVerifySignature(t.Context(), beaconState, wsb.Block())
require.ErrorContains(t, want, err)
}
@@ -190,7 +190,7 @@ func TestProcessAttestations_InvalidAggregationBitsLength(t *testing.T) {
expected := "failed to verify aggregation bitfield: wanted participants bitfield length 3, got: 4"
wsb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb.Block())
_, err = altair.ProcessAttestationsNoVerifySignature(t.Context(), beaconState, wsb.Block())
require.ErrorContains(t, expected, err)
}
@@ -214,7 +214,7 @@ func TestProcessAttestations_OK(t *testing.T) {
cfc.Root = mockRoot[:]
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(cfc))
committee, err := helpers.BeaconCommitteeFromState(context.Background(), beaconState, att.Data.Slot, 0)
committee, err := helpers.BeaconCommitteeFromState(t.Context(), beaconState, att.Data.Slot, 0)
require.NoError(t, err)
attestingIndices, err := attestation.AttestingIndices(att, committee)
require.NoError(t, err)
@@ -235,7 +235,7 @@ func TestProcessAttestations_OK(t *testing.T) {
require.NoError(t, err)
wsb, err := blocks.NewSignedBeaconBlock(block)
require.NoError(t, err)
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb.Block())
_, err = altair.ProcessAttestationsNoVerifySignature(t.Context(), beaconState, wsb.Block())
require.NoError(t, err)
})
t.Run("post-Electra", func(t *testing.T) {
@@ -260,7 +260,7 @@ func TestProcessAttestations_OK(t *testing.T) {
cfc.Root = mockRoot[:]
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(cfc))
committee, err := helpers.BeaconCommitteeFromState(context.Background(), beaconState, att.Data.Slot, 0)
committee, err := helpers.BeaconCommitteeFromState(t.Context(), beaconState, att.Data.Slot, 0)
require.NoError(t, err)
attestingIndices, err := attestation.AttestingIndices(att, committee)
require.NoError(t, err)
@@ -281,7 +281,7 @@ func TestProcessAttestations_OK(t *testing.T) {
require.NoError(t, err)
wsb, err := blocks.NewSignedBeaconBlock(block)
require.NoError(t, err)
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb.Block())
_, err = altair.ProcessAttestationsNoVerifySignature(t.Context(), beaconState, wsb.Block())
require.NoError(t, err)
})
}
@@ -313,13 +313,13 @@ func TestProcessAttestationNoVerify_SourceTargetHead(t *testing.T) {
b, err := helpers.TotalActiveBalance(beaconState)
require.NoError(t, err)
beaconState, err = altair.ProcessAttestationNoVerifySignature(context.Background(), beaconState, att, b)
beaconState, err = altair.ProcessAttestationNoVerifySignature(t.Context(), beaconState, att, b)
require.NoError(t, err)
p, err := beaconState.CurrentEpochParticipation()
require.NoError(t, err)
committee, err := helpers.BeaconCommitteeFromState(context.Background(), beaconState, att.Data.Slot, att.Data.CommitteeIndex)
committee, err := helpers.BeaconCommitteeFromState(t.Context(), beaconState, att.Data.Slot, att.Data.CommitteeIndex)
require.NoError(t, err)
indices, err := attestation.AttestingIndices(att, committee)
require.NoError(t, err)
@@ -458,7 +458,7 @@ func TestValidatorFlag_Add_ExceedsLength(t *testing.T) {
}
func TestFuzzProcessAttestationsNoVerify_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
fuzzer := gofuzz.NewWithSeed(0)
st := &ethpb.BeaconStateAltair{}
b := &ethpb.SignedBeaconBlockAltair{Block: &ethpb.BeaconBlockAltair{}}
for i := 0; i < 10000; i++ {
@@ -474,10 +474,11 @@ func TestFuzzProcessAttestationsNoVerify_10000(t *testing.T) {
}
wsb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
r, err := altair.ProcessAttestationsNoVerifySignature(context.Background(), s, wsb.Block())
r, err := altair.ProcessAttestationsNoVerifySignature(t.Context(), s, wsb.Block())
if err != nil && r != nil {
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, s, b)
}
fuzz.FreeMemory(i)
}
}
@@ -555,10 +556,10 @@ func TestSetParticipationAndRewardProposer(t *testing.T) {
b, err := helpers.TotalActiveBalance(beaconState)
require.NoError(t, err)
st, err := altair.SetParticipationAndRewardProposer(context.Background(), beaconState, test.epoch, test.indices, test.participatedFlags, b)
st, err := altair.SetParticipationAndRewardProposer(t.Context(), beaconState, test.epoch, test.indices, test.participatedFlags, b)
require.NoError(t, err)
i, err := helpers.BeaconProposerIndex(context.Background(), st)
i, err := helpers.BeaconProposerIndex(t.Context(), st)
require.NoError(t, err)
b, err = beaconState.BalanceAtIndex(i)
require.NoError(t, err)
@@ -661,8 +662,8 @@ func TestRewardProposer(t *testing.T) {
{rewardNumerator: 1000000000000, want: 34234377253},
}
for _, test := range tests {
require.NoError(t, altair.RewardProposer(context.Background(), beaconState, test.rewardNumerator))
i, err := helpers.BeaconProposerIndex(context.Background(), beaconState)
require.NoError(t, altair.RewardProposer(t.Context(), beaconState, test.rewardNumerator))
i, err := helpers.BeaconProposerIndex(t.Context(), beaconState)
require.NoError(t, err)
b, err := beaconState.BalanceAtIndex(i)
require.NoError(t, err)

View File

@@ -1,7 +1,6 @@
package altair_test
import (
"context"
"math"
"testing"
@@ -26,7 +25,7 @@ import (
func TestProcessSyncCommittee_PerfectParticipation(t *testing.T) {
beaconState, privKeys := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().MaxValidatorsPerCommittee)
require.NoError(t, beaconState.SetSlot(1))
committee, err := altair.NextSyncCommittee(context.Background(), beaconState)
committee, err := altair.NextSyncCommittee(t.Context(), beaconState)
require.NoError(t, err)
require.NoError(t, beaconState.SetCurrentSyncCommittee(committee))
@@ -34,7 +33,7 @@ func TestProcessSyncCommittee_PerfectParticipation(t *testing.T) {
for i := range syncBits {
syncBits[i] = 0xff
}
indices, err := altair.NextSyncCommitteeIndices(context.Background(), beaconState)
indices, err := altair.NextSyncCommitteeIndices(t.Context(), beaconState)
require.NoError(t, err)
ps := slots.PrevSlot(beaconState.Slot())
pbr, err := helpers.BlockRootAtSlot(beaconState, ps)
@@ -55,7 +54,7 @@ func TestProcessSyncCommittee_PerfectParticipation(t *testing.T) {
}
var reward uint64
beaconState, reward, err = altair.ProcessSyncAggregate(context.Background(), beaconState, syncAggregate)
beaconState, reward, err = altair.ProcessSyncAggregate(t.Context(), beaconState, syncAggregate)
require.NoError(t, err)
assert.Equal(t, uint64(72192), reward)
@@ -77,7 +76,7 @@ func TestProcessSyncCommittee_PerfectParticipation(t *testing.T) {
require.Equal(t, true, balances[indices[0]] > balances[nonSyncIndex])
// Proposer should be more profitable than rest of the sync committee
proposerIndex, err := helpers.BeaconProposerIndex(context.Background(), beaconState)
proposerIndex, err := helpers.BeaconProposerIndex(t.Context(), beaconState)
require.NoError(t, err)
require.Equal(t, true, balances[proposerIndex] > balances[indices[0]])
@@ -102,7 +101,7 @@ func TestProcessSyncCommittee_PerfectParticipation(t *testing.T) {
func TestProcessSyncCommittee_MixParticipation_BadSignature(t *testing.T) {
beaconState, privKeys := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().MaxValidatorsPerCommittee)
require.NoError(t, beaconState.SetSlot(1))
committee, err := altair.NextSyncCommittee(context.Background(), beaconState)
committee, err := altair.NextSyncCommittee(t.Context(), beaconState)
require.NoError(t, err)
require.NoError(t, beaconState.SetCurrentSyncCommittee(committee))
@@ -110,7 +109,7 @@ func TestProcessSyncCommittee_MixParticipation_BadSignature(t *testing.T) {
for i := range syncBits {
syncBits[i] = 0xAA
}
indices, err := altair.NextSyncCommitteeIndices(context.Background(), beaconState)
indices, err := altair.NextSyncCommitteeIndices(t.Context(), beaconState)
require.NoError(t, err)
ps := slots.PrevSlot(beaconState.Slot())
pbr, err := helpers.BlockRootAtSlot(beaconState, ps)
@@ -130,14 +129,14 @@ func TestProcessSyncCommittee_MixParticipation_BadSignature(t *testing.T) {
SyncCommitteeSignature: aggregatedSig,
}
_, _, err = altair.ProcessSyncAggregate(context.Background(), beaconState, syncAggregate)
_, _, err = altair.ProcessSyncAggregate(t.Context(), beaconState, syncAggregate)
require.ErrorContains(t, "invalid sync committee signature", err)
}
func TestProcessSyncCommittee_MixParticipation_GoodSignature(t *testing.T) {
beaconState, privKeys := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().MaxValidatorsPerCommittee)
require.NoError(t, beaconState.SetSlot(1))
committee, err := altair.NextSyncCommittee(context.Background(), beaconState)
committee, err := altair.NextSyncCommittee(t.Context(), beaconState)
require.NoError(t, err)
require.NoError(t, beaconState.SetCurrentSyncCommittee(committee))
@@ -145,7 +144,7 @@ func TestProcessSyncCommittee_MixParticipation_GoodSignature(t *testing.T) {
for i := range syncBits {
syncBits[i] = 0xAA
}
indices, err := altair.NextSyncCommitteeIndices(context.Background(), beaconState)
indices, err := altair.NextSyncCommitteeIndices(t.Context(), beaconState)
require.NoError(t, err)
ps := slots.PrevSlot(beaconState.Slot())
pbr, err := helpers.BlockRootAtSlot(beaconState, ps)
@@ -167,7 +166,7 @@ func TestProcessSyncCommittee_MixParticipation_GoodSignature(t *testing.T) {
SyncCommitteeSignature: aggregatedSig,
}
_, _, err = altair.ProcessSyncAggregate(context.Background(), beaconState, syncAggregate)
_, _, err = altair.ProcessSyncAggregate(t.Context(), beaconState, syncAggregate)
require.NoError(t, err)
}
@@ -175,7 +174,7 @@ func TestProcessSyncCommittee_MixParticipation_GoodSignature(t *testing.T) {
func TestProcessSyncCommittee_DontPrecompute(t *testing.T) {
beaconState, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().MaxValidatorsPerCommittee)
require.NoError(t, beaconState.SetSlot(1))
committee, err := altair.NextSyncCommittee(context.Background(), beaconState)
committee, err := altair.NextSyncCommittee(t.Context(), beaconState)
require.NoError(t, err)
committeeKeys := committee.Pubkeys
committeeKeys[1] = committeeKeys[0]
@@ -192,7 +191,7 @@ func TestProcessSyncCommittee_DontPrecompute(t *testing.T) {
SyncCommitteeBits: syncBits,
}
require.NoError(t, beaconState.UpdateBalancesAtIndex(idx, 0))
st, votedKeys, _, err := altair.ProcessSyncAggregateEported(context.Background(), beaconState, syncAggregate)
st, votedKeys, _, err := altair.ProcessSyncAggregateEported(t.Context(), beaconState, syncAggregate)
require.NoError(t, err)
require.Equal(t, 511, len(votedKeys))
require.DeepEqual(t, committeeKeys[0], votedKeys[0].Marshal())
@@ -203,7 +202,7 @@ func TestProcessSyncCommittee_DontPrecompute(t *testing.T) {
func TestProcessSyncCommittee_processSyncAggregate(t *testing.T) {
beaconState, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().MaxValidatorsPerCommittee)
require.NoError(t, beaconState.SetSlot(1))
committee, err := altair.NextSyncCommittee(context.Background(), beaconState)
committee, err := altair.NextSyncCommittee(t.Context(), beaconState)
require.NoError(t, err)
require.NoError(t, beaconState.SetCurrentSyncCommittee(committee))
@@ -215,7 +214,7 @@ func TestProcessSyncCommittee_processSyncAggregate(t *testing.T) {
SyncCommitteeBits: syncBits,
}
st, votedKeys, _, err := altair.ProcessSyncAggregateEported(context.Background(), beaconState, syncAggregate)
st, votedKeys, _, err := altair.ProcessSyncAggregateEported(t.Context(), beaconState, syncAggregate)
require.NoError(t, err)
votedMap := make(map[[fieldparams.BLSPubkeyLength]byte]bool)
for _, key := range votedKeys {
@@ -228,7 +227,7 @@ func TestProcessSyncCommittee_processSyncAggregate(t *testing.T) {
committeeKeys := currentSyncCommittee.Pubkeys
balances := st.Balances()
proposerIndex, err := helpers.BeaconProposerIndex(context.Background(), beaconState)
proposerIndex, err := helpers.BeaconProposerIndex(t.Context(), beaconState)
require.NoError(t, err)
for i := 0; i < len(syncBits); i++ {
@@ -254,7 +253,7 @@ func TestProcessSyncCommittee_processSyncAggregate(t *testing.T) {
func Test_VerifySyncCommitteeSig(t *testing.T) {
beaconState, privKeys := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().MaxValidatorsPerCommittee)
require.NoError(t, beaconState.SetSlot(1))
committee, err := altair.NextSyncCommittee(context.Background(), beaconState)
committee, err := altair.NextSyncCommittee(t.Context(), beaconState)
require.NoError(t, err)
require.NoError(t, beaconState.SetCurrentSyncCommittee(committee))
@@ -262,7 +261,7 @@ func Test_VerifySyncCommitteeSig(t *testing.T) {
for i := range syncBits {
syncBits[i] = 0xff
}
indices, err := altair.NextSyncCommitteeIndices(context.Background(), beaconState)
indices, err := altair.NextSyncCommitteeIndices(t.Context(), beaconState)
require.NoError(t, err)
ps := slots.PrevSlot(beaconState.Slot())
pbr, err := helpers.BlockRootAtSlot(beaconState, ps)

View File

@@ -1,21 +1,21 @@
package altair_test
import (
"context"
"testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/altair"
state_native "github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/testing/fuzz"
"github.com/OffchainLabs/prysm/v6/testing/require"
fuzz "github.com/google/gofuzz"
gofuzz "github.com/google/gofuzz"
)
func TestFuzzProcessDeposits_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
fuzzer := gofuzz.NewWithSeed(0)
state := &ethpb.BeaconStateAltair{}
deposits := make([]*ethpb.Deposit, 100)
ctx := context.Background()
ctx := t.Context()
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
for i := range deposits {
@@ -27,14 +27,15 @@ func TestFuzzProcessDeposits_10000(t *testing.T) {
if err != nil && r != nil {
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, deposits)
}
fuzz.FreeMemory(i)
}
}
func TestFuzzProcessPreGenesisDeposit_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
fuzzer := gofuzz.NewWithSeed(0)
state := &ethpb.BeaconStateAltair{}
deposit := &ethpb.Deposit{}
ctx := context.Background()
ctx := t.Context()
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
@@ -45,14 +46,15 @@ func TestFuzzProcessPreGenesisDeposit_10000(t *testing.T) {
if err != nil && r != nil {
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, deposit)
}
fuzz.FreeMemory(i)
}
}
func TestFuzzProcessPreGenesisDeposit_Phase0_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
fuzzer := gofuzz.NewWithSeed(0)
state := &ethpb.BeaconState{}
deposit := &ethpb.Deposit{}
ctx := context.Background()
ctx := t.Context()
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
@@ -63,11 +65,12 @@ func TestFuzzProcessPreGenesisDeposit_Phase0_10000(t *testing.T) {
if err != nil && r != nil {
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, deposit)
}
fuzz.FreeMemory(i)
}
}
func TestFuzzProcessDeposit_Phase0_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
fuzzer := gofuzz.NewWithSeed(0)
state := &ethpb.BeaconState{}
deposit := &ethpb.Deposit{}
@@ -80,11 +83,12 @@ func TestFuzzProcessDeposit_Phase0_10000(t *testing.T) {
if err != nil && r != nil {
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, deposit)
}
fuzz.FreeMemory(i)
}
}
func TestFuzzProcessDeposit_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
fuzzer := gofuzz.NewWithSeed(0)
state := &ethpb.BeaconStateAltair{}
deposit := &ethpb.Deposit{}
@@ -97,5 +101,6 @@ func TestFuzzProcessDeposit_10000(t *testing.T) {
if err != nil && r != nil {
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, deposit)
}
fuzz.FreeMemory(i)
}
}

View File

@@ -1,7 +1,6 @@
package altair_test
import (
"context"
"testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/altair"
@@ -42,7 +41,7 @@ func TestProcessDeposits_SameValidatorMultipleDepositsSameBlock(t *testing.T) {
},
})
require.NoError(t, err)
newState, err := altair.ProcessDeposits(context.Background(), beaconState, []*ethpb.Deposit{dep[0], dep[1], dep[2]})
newState, err := altair.ProcessDeposits(t.Context(), beaconState, []*ethpb.Deposit{dep[0], dep[1], dep[2]})
require.NoError(t, err, "Expected block deposits to process correctly")
require.Equal(t, 2, len(newState.Validators()), "Incorrect validator count")
}
@@ -70,7 +69,7 @@ func TestProcessDeposits_AddsNewValidatorDeposit(t *testing.T) {
},
})
require.NoError(t, err)
newState, err := altair.ProcessDeposits(context.Background(), beaconState, []*ethpb.Deposit{dep[0]})
newState, err := altair.ProcessDeposits(t.Context(), beaconState, []*ethpb.Deposit{dep[0]})
require.NoError(t, err, "Expected block deposits to process correctly")
if newState.Balances()[1] != dep[0].Data.Amount {
t.Errorf(
@@ -127,7 +126,7 @@ func TestProcessDeposits_RepeatedDeposit_IncreasesValidatorBalance(t *testing.T)
},
})
require.NoError(t, err)
newState, err := altair.ProcessDeposits(context.Background(), beaconState, []*ethpb.Deposit{deposit})
newState, err := altair.ProcessDeposits(t.Context(), beaconState, []*ethpb.Deposit{deposit})
require.NoError(t, err, "Process deposit failed")
require.Equal(t, uint64(1000+50), newState.Balances()[1], "Expected balance at index 1 to be 1050")
}
@@ -256,7 +255,7 @@ func TestPreGenesisDeposits_SkipInvalidDeposit(t *testing.T) {
},
})
require.NoError(t, err)
newState, err := altair.ProcessPreGenesisDeposits(context.Background(), beaconState, dep)
newState, err := altair.ProcessPreGenesisDeposits(t.Context(), beaconState, dep)
require.NoError(t, err, "Expected invalid block deposit to be ignored without error")
_, ok := newState.ValidatorIndexByPubkey(bytesutil.ToBytes48(dep[0].Data.PublicKey))
@@ -370,6 +369,6 @@ func TestProcessDeposits_MerkleBranchFailsVerification(t *testing.T) {
})
require.NoError(t, err)
want := "deposit root did not verify"
_, err = altair.ProcessDeposits(context.Background(), beaconState, b.Block.Body.Deposits)
_, err = altair.ProcessDeposits(t.Context(), beaconState, b.Block.Body.Deposits)
assert.ErrorContains(t, want, err)
}

View File

@@ -1,7 +1,6 @@
package altair
import (
"context"
"math"
"testing"
@@ -32,7 +31,7 @@ func TestInitializeEpochValidators_Ok(t *testing.T) {
InactivityScores: []uint64{0, 1, 2, 3},
})
require.NoError(t, err)
v, b, err := InitializePrecomputeValidators(context.Background(), s)
v, b, err := InitializePrecomputeValidators(t.Context(), s)
require.NoError(t, err)
assert.DeepEqual(t, &precompute.Validator{
IsSlashed: true,
@@ -74,7 +73,7 @@ func TestInitializeEpochValidators_Overflow(t *testing.T) {
InactivityScores: []uint64{0, 1},
})
require.NoError(t, err)
_, _, err = InitializePrecomputeValidators(context.Background(), s)
_, _, err = InitializePrecomputeValidators(t.Context(), s)
require.ErrorContains(t, "could not read every validator: addition overflows", err)
}
@@ -84,16 +83,16 @@ func TestInitializeEpochValidators_BadState(t *testing.T) {
InactivityScores: []uint64{},
})
require.NoError(t, err)
_, _, err = InitializePrecomputeValidators(context.Background(), s)
_, _, err = InitializePrecomputeValidators(t.Context(), s)
require.ErrorContains(t, "num of validators is different than num of inactivity scores", err)
}
func TestProcessEpochParticipation(t *testing.T) {
s, err := testState()
require.NoError(t, err)
validators, balance, err := InitializePrecomputeValidators(context.Background(), s)
validators, balance, err := InitializePrecomputeValidators(t.Context(), s)
require.NoError(t, err)
validators, balance, err = ProcessEpochParticipation(context.Background(), s, balance, validators)
validators, balance, err = ProcessEpochParticipation(t.Context(), s, balance, validators)
require.NoError(t, err)
require.DeepEqual(t, &precompute.Validator{
IsActiveCurrentEpoch: true,
@@ -169,9 +168,9 @@ func TestProcessEpochParticipation_InactiveValidator(t *testing.T) {
InactivityScores: []uint64{0, 0, 0},
})
require.NoError(t, err)
validators, balance, err := InitializePrecomputeValidators(context.Background(), st)
validators, balance, err := InitializePrecomputeValidators(t.Context(), st)
require.NoError(t, err)
validators, balance, err = ProcessEpochParticipation(context.Background(), st, balance, validators)
validators, balance, err = ProcessEpochParticipation(t.Context(), st, balance, validators)
require.NoError(t, err)
require.DeepEqual(t, &precompute.Validator{
IsActiveCurrentEpoch: false,
@@ -209,9 +208,9 @@ func TestProcessEpochParticipation_InactiveValidator(t *testing.T) {
func TestAttestationsDelta(t *testing.T) {
s, err := testState()
require.NoError(t, err)
validators, balance, err := InitializePrecomputeValidators(context.Background(), s)
validators, balance, err := InitializePrecomputeValidators(t.Context(), s)
require.NoError(t, err)
validators, balance, err = ProcessEpochParticipation(context.Background(), s, balance, validators)
validators, balance, err = ProcessEpochParticipation(t.Context(), s, balance, validators)
require.NoError(t, err)
deltas, err := AttestationsDelta(s, balance, validators)
require.NoError(t, err)
@@ -247,9 +246,9 @@ func TestAttestationsDelta(t *testing.T) {
func TestAttestationsDeltaBellatrix(t *testing.T) {
s, err := testStateBellatrix()
require.NoError(t, err)
validators, balance, err := InitializePrecomputeValidators(context.Background(), s)
validators, balance, err := InitializePrecomputeValidators(t.Context(), s)
require.NoError(t, err)
validators, balance, err = ProcessEpochParticipation(context.Background(), s, balance, validators)
validators, balance, err = ProcessEpochParticipation(t.Context(), s, balance, validators)
require.NoError(t, err)
deltas, err := AttestationsDelta(s, balance, validators)
require.NoError(t, err)
@@ -285,9 +284,9 @@ func TestAttestationsDeltaBellatrix(t *testing.T) {
func TestProcessRewardsAndPenaltiesPrecompute_Ok(t *testing.T) {
s, err := testState()
require.NoError(t, err)
validators, balance, err := InitializePrecomputeValidators(context.Background(), s)
validators, balance, err := InitializePrecomputeValidators(t.Context(), s)
require.NoError(t, err)
validators, balance, err = ProcessEpochParticipation(context.Background(), s, balance, validators)
validators, balance, err = ProcessEpochParticipation(t.Context(), s, balance, validators)
require.NoError(t, err)
s, err = ProcessRewardsAndPenaltiesPrecompute(s, balance, validators)
require.NoError(t, err)
@@ -324,9 +323,9 @@ func TestProcessRewardsAndPenaltiesPrecompute_Ok(t *testing.T) {
func TestProcessRewardsAndPenaltiesPrecompute_InactivityLeak(t *testing.T) {
s, err := testState()
require.NoError(t, err)
validators, balance, err := InitializePrecomputeValidators(context.Background(), s)
validators, balance, err := InitializePrecomputeValidators(t.Context(), s)
require.NoError(t, err)
validators, balance, err = ProcessEpochParticipation(context.Background(), s, balance, validators)
validators, balance, err = ProcessEpochParticipation(t.Context(), s, balance, validators)
require.NoError(t, err)
sCopy := s.Copy()
s, err = ProcessRewardsAndPenaltiesPrecompute(s, balance, validators)
@@ -352,11 +351,11 @@ func TestProcessInactivityScores_CanProcessInactivityLeak(t *testing.T) {
defaultScore := uint64(5)
require.NoError(t, s.SetInactivityScores([]uint64{defaultScore, defaultScore, defaultScore, defaultScore}))
require.NoError(t, s.SetSlot(params.BeaconConfig().SlotsPerEpoch*primitives.Slot(params.BeaconConfig().MinEpochsToInactivityPenalty+2)))
validators, balance, err := InitializePrecomputeValidators(context.Background(), s)
validators, balance, err := InitializePrecomputeValidators(t.Context(), s)
require.NoError(t, err)
validators, _, err = ProcessEpochParticipation(context.Background(), s, balance, validators)
validators, _, err = ProcessEpochParticipation(t.Context(), s, balance, validators)
require.NoError(t, err)
s, _, err = ProcessInactivityScores(context.Background(), s, validators)
s, _, err = ProcessInactivityScores(t.Context(), s, validators)
require.NoError(t, err)
inactivityScores, err := s.InactivityScores()
require.NoError(t, err)
@@ -373,11 +372,11 @@ func TestProcessInactivityScores_GenesisEpoch(t *testing.T) {
defaultScore := uint64(10)
require.NoError(t, s.SetInactivityScores([]uint64{defaultScore, defaultScore, defaultScore, defaultScore}))
require.NoError(t, s.SetSlot(params.BeaconConfig().GenesisSlot))
validators, balance, err := InitializePrecomputeValidators(context.Background(), s)
validators, balance, err := InitializePrecomputeValidators(t.Context(), s)
require.NoError(t, err)
validators, _, err = ProcessEpochParticipation(context.Background(), s, balance, validators)
validators, _, err = ProcessEpochParticipation(t.Context(), s, balance, validators)
require.NoError(t, err)
s, _, err = ProcessInactivityScores(context.Background(), s, validators)
s, _, err = ProcessInactivityScores(t.Context(), s, validators)
require.NoError(t, err)
inactivityScores, err := s.InactivityScores()
require.NoError(t, err)
@@ -392,11 +391,11 @@ func TestProcessInactivityScores_CanProcessNonInactivityLeak(t *testing.T) {
require.NoError(t, err)
defaultScore := uint64(5)
require.NoError(t, s.SetInactivityScores([]uint64{defaultScore, defaultScore, defaultScore, defaultScore}))
validators, balance, err := InitializePrecomputeValidators(context.Background(), s)
validators, balance, err := InitializePrecomputeValidators(t.Context(), s)
require.NoError(t, err)
validators, _, err = ProcessEpochParticipation(context.Background(), s, balance, validators)
validators, _, err = ProcessEpochParticipation(t.Context(), s, balance, validators)
require.NoError(t, err)
s, _, err = ProcessInactivityScores(context.Background(), s, validators)
s, _, err = ProcessInactivityScores(t.Context(), s, validators)
require.NoError(t, err)
inactivityScores, err := s.InactivityScores()
require.NoError(t, err)
@@ -410,9 +409,9 @@ func TestProcessInactivityScores_CanProcessNonInactivityLeak(t *testing.T) {
func TestProcessRewardsAndPenaltiesPrecompute_GenesisEpoch(t *testing.T) {
s, err := testState()
require.NoError(t, err)
validators, balance, err := InitializePrecomputeValidators(context.Background(), s)
validators, balance, err := InitializePrecomputeValidators(t.Context(), s)
require.NoError(t, err)
validators, balance, err = ProcessEpochParticipation(context.Background(), s, balance, validators)
validators, balance, err = ProcessEpochParticipation(t.Context(), s, balance, validators)
require.NoError(t, err)
require.NoError(t, s.SetSlot(0))
s, err = ProcessRewardsAndPenaltiesPrecompute(s, balance, validators)
@@ -429,9 +428,9 @@ func TestProcessRewardsAndPenaltiesPrecompute_GenesisEpoch(t *testing.T) {
func TestProcessRewardsAndPenaltiesPrecompute_BadState(t *testing.T) {
s, err := testState()
require.NoError(t, err)
validators, balance, err := InitializePrecomputeValidators(context.Background(), s)
validators, balance, err := InitializePrecomputeValidators(t.Context(), s)
require.NoError(t, err)
_, balance, err = ProcessEpochParticipation(context.Background(), s, balance, validators)
_, balance, err = ProcessEpochParticipation(t.Context(), s, balance, validators)
require.NoError(t, err)
_, err = ProcessRewardsAndPenaltiesPrecompute(s, balance, []*precompute.Validator{})
require.ErrorContains(t, "validator registries not the same length as state's validator registries", err)
@@ -442,7 +441,7 @@ func TestProcessInactivityScores_NonEligibleValidator(t *testing.T) {
require.NoError(t, err)
defaultScore := uint64(5)
require.NoError(t, s.SetInactivityScores([]uint64{defaultScore, defaultScore, defaultScore, defaultScore}))
validators, balance, err := InitializePrecomputeValidators(context.Background(), s)
validators, balance, err := InitializePrecomputeValidators(t.Context(), s)
require.NoError(t, err)
// v0 is eligible (not active previous epoch, slashed and not withdrawable)
@@ -463,9 +462,9 @@ func TestProcessInactivityScores_NonEligibleValidator(t *testing.T) {
// v3 is eligible (active previous epoch)
validators[3].IsActivePrevEpoch = true
validators, _, err = ProcessEpochParticipation(context.Background(), s, balance, validators)
validators, _, err = ProcessEpochParticipation(t.Context(), s, balance, validators)
require.NoError(t, err)
s, _, err = ProcessInactivityScores(context.Background(), s, validators)
s, _, err = ProcessInactivityScores(t.Context(), s, validators)
require.NoError(t, err)
inactivityScores, err := s.InactivityScores()
require.NoError(t, err)

View File

@@ -1,7 +1,6 @@
package altair_test
import (
"context"
"fmt"
"math"
"testing"
@@ -29,7 +28,7 @@ func TestProcessSyncCommitteeUpdates_CanRotate(t *testing.T) {
BodyRoot: bytesutil.PadTo([]byte{'c'}, 32),
}
require.NoError(t, s.SetLatestBlockHeader(h))
postState, err := altair.ProcessSyncCommitteeUpdates(context.Background(), s)
postState, err := altair.ProcessSyncCommitteeUpdates(t.Context(), s)
require.NoError(t, err)
current, err := postState.CurrentSyncCommittee()
require.NoError(t, err)
@@ -38,7 +37,7 @@ func TestProcessSyncCommitteeUpdates_CanRotate(t *testing.T) {
require.DeepEqual(t, current, next)
require.NoError(t, s.SetSlot(params.BeaconConfig().SlotsPerEpoch))
postState, err = altair.ProcessSyncCommitteeUpdates(context.Background(), s)
postState, err = altair.ProcessSyncCommitteeUpdates(t.Context(), s)
require.NoError(t, err)
c, err := postState.CurrentSyncCommittee()
require.NoError(t, err)
@@ -48,7 +47,7 @@ func TestProcessSyncCommitteeUpdates_CanRotate(t *testing.T) {
require.DeepEqual(t, next, n)
require.NoError(t, s.SetSlot(primitives.Slot(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*params.BeaconConfig().SlotsPerEpoch-1))
postState, err = altair.ProcessSyncCommitteeUpdates(context.Background(), s)
postState, err = altair.ProcessSyncCommitteeUpdates(t.Context(), s)
require.NoError(t, err)
c, err = postState.CurrentSyncCommittee()
require.NoError(t, err)
@@ -61,7 +60,7 @@ func TestProcessSyncCommitteeUpdates_CanRotate(t *testing.T) {
// Test boundary condition.
slot := params.BeaconConfig().SlotsPerEpoch * primitives.Slot(time.CurrentEpoch(s)+params.BeaconConfig().EpochsPerSyncCommitteePeriod)
require.NoError(t, s.SetSlot(slot))
boundaryCommittee, err := altair.NextSyncCommittee(context.Background(), s)
boundaryCommittee, err := altair.NextSyncCommittee(t.Context(), s)
require.NoError(t, err)
require.DeepNotEqual(t, boundaryCommittee, n)
}

View File

@@ -31,7 +31,7 @@ func Test_BaseReward(t *testing.T) {
valIdx: 2,
st: genState(1),
want: 0,
errString: "validator index 2 does not exist",
errString: "index 2 out of bounds",
},
{
name: "active balance is 32eth",
@@ -89,7 +89,7 @@ func Test_BaseRewardWithTotalBalance(t *testing.T) {
valIdx: 2,
activeBalance: 1,
want: 0,
errString: "validator index 2 does not exist",
errString: "index 2 out of bounds",
},
{
name: "active balance is 1",

View File

@@ -217,15 +217,15 @@ func IsSyncCommitteeAggregator(sig []byte) (bool, error) {
// ValidateSyncMessageTime validates sync message to ensure that the provided slot is valid.
// Spec: [IGNORE] The message's slot is for the current slot (with a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance), i.e. sync_committee_message.slot == current_slot
func ValidateSyncMessageTime(slot primitives.Slot, genesisTime time.Time, clockDisparity time.Duration) error {
if err := slots.ValidateClock(slot, uint64(genesisTime.Unix())); err != nil {
if err := slots.ValidateClock(slot, genesisTime); err != nil {
return err
}
messageTime, err := slots.ToTime(uint64(genesisTime.Unix()), slot)
messageTime, err := slots.StartTime(genesisTime, slot)
if err != nil {
return err
}
currentSlot := slots.Since(genesisTime)
slotStartTime, err := slots.ToTime(uint64(genesisTime.Unix()), currentSlot)
currentSlot := slots.CurrentSlot(genesisTime)
slotStartTime, err := slots.StartTime(genesisTime, currentSlot)
if err != nil {
return err
}

View File

@@ -1,7 +1,6 @@
package altair_test
import (
"context"
"testing"
"time"
@@ -97,7 +96,7 @@ func TestSyncCommitteeIndices_CanGet(t *testing.T) {
t.Run(version.String(v), func(t *testing.T) {
helpers.ClearCache()
st := getState(t, tt.args.validatorCount, v)
got, err := altair.NextSyncCommitteeIndices(context.Background(), st)
got, err := altair.NextSyncCommitteeIndices(t.Context(), st)
if tt.wantErr {
require.ErrorContains(t, tt.errString, err)
} else {
@@ -129,18 +128,18 @@ func TestSyncCommitteeIndices_DifferentPeriods(t *testing.T) {
}
st := getState(t, params.BeaconConfig().MaxValidatorsPerCommittee)
got1, err := altair.NextSyncCommitteeIndices(context.Background(), st)
got1, err := altair.NextSyncCommitteeIndices(t.Context(), st)
require.NoError(t, err)
require.NoError(t, st.SetSlot(params.BeaconConfig().SlotsPerEpoch))
got2, err := altair.NextSyncCommitteeIndices(context.Background(), st)
got2, err := altair.NextSyncCommitteeIndices(t.Context(), st)
require.NoError(t, err)
require.DeepNotEqual(t, got1, got2)
require.NoError(t, st.SetSlot(params.BeaconConfig().SlotsPerEpoch*primitives.Slot(params.BeaconConfig().EpochsPerSyncCommitteePeriod)))
got2, err = altair.NextSyncCommitteeIndices(context.Background(), st)
got2, err = altair.NextSyncCommitteeIndices(t.Context(), st)
require.NoError(t, err)
require.DeepNotEqual(t, got1, got2)
require.NoError(t, st.SetSlot(params.BeaconConfig().SlotsPerEpoch*primitives.Slot(2*params.BeaconConfig().EpochsPerSyncCommitteePeriod)))
got2, err = altair.NextSyncCommitteeIndices(context.Background(), st)
got2, err = altair.NextSyncCommitteeIndices(t.Context(), st)
require.NoError(t, err)
require.DeepNotEqual(t, got1, got2)
}
@@ -206,7 +205,7 @@ func TestSyncCommittee_CanGet(t *testing.T) {
if !tt.wantErr {
require.NoError(t, tt.args.state.SetSlot(primitives.Slot(tt.args.epoch)*params.BeaconConfig().SlotsPerEpoch))
}
got, err := altair.NextSyncCommittee(context.Background(), tt.args.state)
got, err := altair.NextSyncCommittee(t.Context(), tt.args.state)
if tt.wantErr {
require.ErrorContains(t, tt.errString, err)
} else {
@@ -270,7 +269,7 @@ func TestValidateNilSyncContribution(t *testing.T) {
func TestSyncSubCommitteePubkeys_CanGet(t *testing.T) {
helpers.ClearCache()
st := getState(t, params.BeaconConfig().MaxValidatorsPerCommittee)
com, err := altair.NextSyncCommittee(context.Background(), st)
com, err := altair.NextSyncCommittee(t.Context(), st)
require.NoError(t, err)
sub, err := altair.SyncSubCommitteePubkeys(com, 0)
require.NoError(t, err)

View File

@@ -1,7 +1,6 @@
package altair_test
import (
"context"
"testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/altair"
@@ -13,7 +12,7 @@ import (
func TestProcessEpoch_CanProcess(t *testing.T) {
st, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().MaxValidatorsPerCommittee)
require.NoError(t, st.SetSlot(10*params.BeaconConfig().SlotsPerEpoch))
err := altair.ProcessEpoch(context.Background(), st)
err := altair.ProcessEpoch(t.Context(), st)
require.NoError(t, err)
require.Equal(t, uint64(0), st.Slashings()[2], "Unexpected slashed balance")
@@ -45,7 +44,7 @@ func TestProcessEpoch_CanProcess(t *testing.T) {
func TestProcessEpoch_CanProcessBellatrix(t *testing.T) {
st, _ := util.DeterministicGenesisStateBellatrix(t, params.BeaconConfig().MaxValidatorsPerCommittee)
require.NoError(t, st.SetSlot(10*params.BeaconConfig().SlotsPerEpoch))
err := altair.ProcessEpoch(context.Background(), st)
err := altair.ProcessEpoch(t.Context(), st)
require.NoError(t, err)
require.Equal(t, uint64(0), st.Slashings()[2], "Unexpected slashed balance")

View File

@@ -68,7 +68,7 @@ func UpgradeToAltair(ctx context.Context, state state.BeaconState) (state.Beacon
numValidators := state.NumValidators()
s := &ethpb.BeaconStateAltair{
GenesisTime: state.GenesisTime(),
GenesisTime: uint64(state.GenesisTime().Unix()),
GenesisValidatorsRoot: state.GenesisValidatorsRoot(),
Slot: state.Slot(),
Fork: &ethpb.Fork{

Some files were not shown because too many files have changed in this diff Show More