Compare commits

...

139 Commits

Author SHA1 Message Date
kasey
3b184f43c8 update codegen dep and cleanup organization (#14127)
Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
(cherry picked from commit 8cd249c1c8)
2024-06-20 19:11:52 -05:00
Radosław Kapka
2e322b464c Keep only the latest value in the health channel (#14087)
* Increase health tracker channel buffer size

* keep only the latest value

* Make health test blocking as a regression test for PR #14807

* Fix new race conditions in the MockHealthClient

---------

Co-authored-by: Preston Van Loon <preston@pvl.dev>
2024-06-10 07:54:10 -05:00
kasey
26c0178ce4 always close cache warm chan to prevent blocking (#14080)
* always close cache warm chan to prevent blocking

* test that waitForCache does not block

* combine defers to reduce cognitive overhead

* lint

---------

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2024-06-05 09:26:49 -05:00
Nishant Das
b6f93e5305 Change It To Debug (#14072) 2024-06-03 10:04:03 -05:00
Nishant Das
7ad870fdba Update Libp2p Dependencies (#14060)
* Update to v0.35.0 and v0.11.0

* Update Protobuf

* Update bazel deps

(cherry picked from commit 568273453b)
2024-05-31 13:33:30 -05:00
Nishant Das
8b9651606d Restrict Dials From Discovery (#14052)
* Fix Excessive Subnet Dials

* Handle backoff in Iterator

* Slow Down Lookups

* Add Flag To Configure Dials

* Preston's Review

* Update cmd/beacon-chain/flags/base.go

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>

* Reduce polling period

* Manu's Review

---------

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
(cherry picked from commit 7a4ecb6060)
2024-05-31 13:33:23 -05:00
terence
a2a77b5bfc Fix dependent root retrival genesis case (#14053)
* Fix dependent root retrival genesis case

* Remove print

(cherry picked from commit 43c7659d18)
2024-05-31 13:32:38 -05:00
Radosław Kapka
b807afbd7d Only log error when aggregator check fails (#14046)
* Only log error when aggregator check fails

* review

(cherry picked from commit 2f2152e039)
2024-05-31 13:32:21 -05:00
terence
6df83ebef7 Fix CommitteeAssignments to not return every validator (#14039)
* Rewrite CommitteeAssignments to not return every validator

* Potuz's feedback

(cherry picked from commit c35889d4c6)
2024-05-31 13:31:43 -05:00
Sammy Rosso
7b4d23801a Fix race conditions + cleanup (#14041)
(cherry picked from commit 10dedd5ced)
2024-05-31 13:31:34 -05:00
kasey
63d31cfa1e paranoid underflow protection without error handling (#14044)
Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
(cherry picked from commit 62b5c43d87)
2024-05-31 13:31:05 -05:00
james-prysm
f947fefa19 WebFix develop (#14040)
* fixing issues introduced by PR 13593

* missed setting db

* linting

(cherry picked from commit 2e84208169)
2024-05-31 13:30:41 -05:00
Sammy Rosso
7a55dc6cc3 Fix TestNodeHealth_Concurrently race condition (#14033)
(cherry picked from commit 4d190c41cc)
2024-05-31 13:29:50 -05:00
Radosław Kapka
024163b923 Substantial VC cleanup (#13593)
* Cleanup part 1

* Cleanup part 2

* Cleanup part 3

* remove lock field init

* doc for SignerConfig

* remove vars

* use full Keymanager word in function

* revert interface rename

* linter

* fix build issues

* review

(cherry picked from commit 30cc23c5de)
2024-05-31 13:26:16 -05:00
Radosław Kapka
8eb964c3e5 Remove Beacon API Postman collection (#14014)
(cherry picked from commit 8a12b78684)
2024-05-31 13:25:05 -05:00
Preston Van Loon
69c60a6611 Enable experimental_remote_downloader in CI. (#13996)
(cherry picked from commit 49a6d02e12)
2024-05-31 13:24:51 -05:00
Radosław Kapka
7de5381af8 Update state readme (#13890)
* README.md for the state package

* Update beacon-chain/state/state-native/README.md

Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com>

* Revert "Update beacon-chain/state/state-native/README.md"

This reverts commit 6a4be3bae5.

---------

Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com>
(cherry picked from commit a35535043e)
2024-05-31 13:23:46 -05:00
Brandon Liu
25dfed5cf4 use time.NewTimer() to avoid possible memory leaks (#13800)
Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
(cherry picked from commit 41edee9fe9)
2024-05-31 13:23:27 -05:00
Nishant Das
b9f0fc52ea Handle Each Blob In Its Own Goroutine (#13959)
(cherry picked from commit e9606b3635)
2024-05-31 13:23:16 -05:00
dependabot[bot]
3a186a1347 Bump golang.org/x/net from 0.21.0 to 0.23.0 (#13895)
Bumps [golang.org/x/net](https://github.com/golang/net) from 0.21.0 to 0.23.0.
- [Commits](https://github.com/golang/net/compare/v0.21.0...v0.23.0)

---
updated-dependencies:
- dependency-name: golang.org/x/net
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
(cherry picked from commit ed7c4bb6a7)
2024-05-31 13:23:00 -05:00
Nishant Das
c2117e5747 Update Libp2p Dependencies (#13960)
* Update Libp2p

* Update Go Sum

(cherry picked from commit aa847991e0)
2024-05-31 11:47:57 -05:00
terence
0a9e99ce74 Remove unused validator map copy method (#13954)
(cherry picked from commit 49f3531aed)
2024-05-31 11:47:33 -05:00
Sammy Rosso
c3486c58cb Run correct test (#13935)
(cherry picked from commit ae16d5f52c)
2024-05-31 11:46:34 -05:00
Preston Van Loon
5ddd5cb3fb beacon-chain/cache: Convert tests to cache_test blackbox testing (#13920)
* beacon-chain/cache: convert to blackbox tests (package cache_test)

* Move balanceCacheKey to its own file to satisify go fuzz build

(cherry picked from commit 3233e64ace)
2024-05-31 11:45:57 -05:00
kasey
f079db62bd use [32]byte keys in the filesystem cache (#13885)
Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
(cherry picked from commit 8d9024f01f)
2024-05-31 11:45:35 -05:00
Sammy Rosso
a5425d9c97 Remove EnableEIP4881 flag (#13826)
* Remove EnableEIP4881 flag

* Gaz

* Fix missing error handler

* Remove old tree and fix tests

* Gaz

* Fix build import

* Replace depositcache

* Add pendingDeposit tests

* Nishant's fix

* Fix unsafe uint64 to int

* Fix other unsafe uint64 to int

* Remove: RemovePendingDeposit

* Deprecate and remove DisableEIP4881 flag

* Check: index not greater than deposit count

* Move index check

(cherry picked from commit c8d6f47749)
2024-05-31 11:45:21 -05:00
Manu NALEPA
ea5c14affb Revert "zig: Update zig to recent main branch commit (#13142)" (#13908)
This reverts commit b24b60dbd8.

(cherry picked from commit a6f134e48e)
2024-05-31 11:45:21 -05:00
Preston Van Loon
f4f00e8f35 spectests: fail hard on missing test folders (#13913)
(cherry picked from commit fdbb5136d9)
2024-05-31 11:45:21 -05:00
Preston Van Loon
1fb4c38503 Refactor beacon-chain/core/helpers tests to be black box (#13906)
(cherry picked from commit 2c66918594)
2024-05-31 11:45:21 -05:00
Manu NALEPA
d041d8da32 Do not remove blobs DB in slasher. (#13881)
(cherry picked from commit a0dac292ff)
2024-05-31 11:44:40 -05:00
terence
11684392d1 Simplify prune invalid by reusing existing fork choice store call (#13878)
(cherry picked from commit 75857e7177)
2024-05-31 11:44:28 -05:00
Alec Thomas
276b97530f Change example.org DNS record (#13904)
DNS Changed for this record causing tests to fail

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2024-04-22 16:36:30 +00:00
Radosław Kapka
d5daf49a9a Use correct port for health check in Beacon API e2e evaluator (#13892) 2024-04-22 16:18:07 +00:00
james-prysm
feb16ae4aa consistent auth token for validator apis (#13747)
* wip

* fixing tests

* adding more tests especially to handle legacy

* fixing linting

* fixing deepsource issues and flags

* fixing some deepsource issues,pathing issues, and logs

* some review items

* adding additional review feedback

* updating to follow updates from https://github.com/ethereum/keymanager-APIs/pull/74

* adjusting functions to match changes in keymanagers PR

* Update validator/rpc/auth_token.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/rpc/auth_token.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/rpc/auth_token.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* review feedback

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2024-04-18 16:26:49 +00:00
kasey
219301339c Don't return error that can be internally handled (#13887)
Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2024-04-17 18:28:01 +00:00
Radosław Kapka
aec349f75a Upgrade the Beacon API e2e evaluator (#13868)
* GET

* POST

* Revert "Auxiliary commit to revert individual files from 615feb104004d6a945ededf5862ae38325fc7ec2"

This reverts commit 55cf071c684019f3d6124179154c10b2277fda49.

* comment fix

* deepsource
2024-04-15 05:56:47 +00:00
terence
5f909caedf Remove unused IsViableForCheckpoint (#13879) 2024-04-14 16:38:25 +00:00
Radosław Kapka
ba6dff3adb Return syncing status when node is optimistic (#13875) 2024-04-12 10:24:30 +00:00
Radosław Kapka
8cd05f098b Use read only validators in Beacon API (#13873) 2024-04-12 07:19:40 +00:00
Radosław Kapka
425f5387fa Handle overflow in retention period calculation (#13874) 2024-04-12 07:17:12 +00:00
Nishant Das
f2ce115ade Revert Peer Log Changes (#13872) 2024-04-12 06:49:01 +00:00
kasey
090a3e1ded Fix bug from PR 13827 (#13871)
* fix AS cache bug, tighten ro constructors

* additional coverage on AS cache filter

---------

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2024-04-11 23:07:44 +00:00
kasey
c0acb7d352 Backfill throttling (#13855)
* add a sleep between retries as a simple throttle

* unit test

* deepsource

---------

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2024-04-11 15:22:29 +00:00
Radosław Kapka
0d6070e6fc Use retention period when fetching blobs (#13869) 2024-04-11 14:06:00 +00:00
Manu NALEPA
bd00f851f0 e2e: Expected log Running node with peerId= -> Running node with. (#13861)
Rationale:
The `FindFollowingTextInFile` seems to have troubles with `logrus` fields.
2024-04-09 07:51:56 +00:00
Nishant Das
1a0c07deec Extend Broadcast Window For Attestations (#13858)
* fix it

* make check better
2024-04-08 04:49:20 +00:00
kasey
04f231a400 Initsync skip local blobs (#13827)
* wip - init-sync skip available blob req

* satisfy deep source

* gaz

* don't need to sort blobs; simplify blobRequest stack

* wip debug log to watch blob skip behavior

* unit tests for new blob req generator

* refactor to reduce blob req func count

* log when WaitForSummarizer fails

---------

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2024-04-05 19:09:43 +00:00
Manu NALEPA
be1bfcce63 P2P: Add QUIC support (#13786)
* (Unrelated) DoppelGanger: Improve message.

* `beacon-blocks-by-range`: Add `--network` option.

* `ensurePeerConnections`: Remove capital letter in error message.

* `MultiAddressBuilder{WithID}`: Refactor.

* `buildOptions`: Improve log.

* `NewService`: Bubbles up errors.

* `tcp` ==> `libp2ptcp`

* `multiAddressBuilderWithID`: Add the ability to build QUIC multiaddr

* `p2p Start`: Fix error message.

* `p2p`: Add QUIC support.

* Status: Implement `{Inbound,Outbound}Connected{TCP,QUIC}`.

* Logging: Display the number of TCP/QUIC connected peers.

* P2P: Implement `{Inbound,Outbound}ConnectedWithProtocol`.

* Hide QUIC protocol behind the `--enable-quic` feature flag.

* `e2e`: Add `--enable-quic` flag.

* Add `--enable-quic` in `devModeFlag`.

* `convertToMultiAddrs` ==> `retrieveMultiAddrsFromNode`.

* `convertToAddrInfo`: Ensure `len(infos) == 1`.
2024-04-04 12:21:35 +00:00
Manu NALEPA
8cf5d79852 Remove the Goerli/Prater support. (#13846) 2024-04-03 19:19:17 +00:00
redistay
f7912e7c20 chore: fix some comments (#13843)
Signed-off-by: redistay <wujunjing@outlook.com>
2024-04-02 22:19:15 +00:00
terence
caa8be5dd1 Beacon-api: broadcast blobs in the event of seen block (#13830)
* Beacon-api: broadcast blobs in the event of seen block

* Fix parameters

* Fix test

* Check forkchoice

* Ran go format

* Revert "Ran go format"

This reverts commit 091e77e81d6e2b9861fecc27c0bad1898033f9a3.

* James feedback

* Radek's feedback

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Fix bad tests

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2024-04-02 18:12:58 +00:00
cui
0c15a30a34 using slices.Index (#13836) 2024-04-02 16:30:05 +00:00
cui
7bce1c0714 using slices.IndexFunc (#13839) 2024-04-02 16:06:27 +00:00
Radosław Kapka
d1084cbe48 Send correct state root with finalization event (#13842) 2024-04-02 15:32:32 +00:00
cui
2cc3f69a3f using slices.Contains (#13835) 2024-04-01 21:26:52 +00:00
cui
a861489a83 using slices.ContainsFunc (#13838) 2024-04-01 21:15:38 +00:00
cui
0e1c585f7d using slices.Contains (#13837) 2024-04-01 21:10:46 +00:00
cui
9df20e616c using slices.IndexFunc (#13834) 2024-04-01 20:04:40 +00:00
kasey
53fdd2d062 allow other pkgs to check for blobs in pruning cache (#13788)
* allow other pkgs to check for blobs in pruning cache

* address deepsource complaints

* custom error to simplify test setup

* add AllAvailable method

* make storage summary slot field private

* unit test and off-by-one fix

* remove comment with copy of tested function

---------

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2024-04-01 14:19:51 +00:00
Sammy Rosso
2b4bb5d890 Fixed spelling mistakes in comments (#13833) 2024-04-01 11:12:20 +00:00
Nishant Das
38f208d70d Reject Empty Bundles (#13798)
* reject it

* test

* add test case
2024-04-01 04:37:36 +00:00
Nishant Das
65b90abdda Maximize Peer Capacity When Syncing (#13820)
* maximize it

* fix it

* lint

* add test

* Update beacon-chain/sync/initial-sync/blocks_fetcher.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* logs

* kasey's review

* kasey's review

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2024-03-30 14:54:11 +00:00
kasey
f3b49d4eaf Repair idx 13486 (#13831)
* Revert "Modify the algorithm of `updateFinalizedBlockRoots` (#13486)"

This reverts commit 32fb183392.

* migration to fix index corruption from pr 13486

* bail as soon as we see 10 epochs without the bug

---------

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2024-03-29 17:29:39 +00:00
Lorenzo
5b1da7353c feat(direct peers): configure static peers to be direct peers in pubsub options (#13773) 2024-03-29 04:00:40 +00:00
terence
9f17e65860 Fill in missing debug logs for blob p2p IGNORE/REJECT (#13825) 2024-03-28 16:11:55 +00:00
Manu NALEPA
9b2d53b0d1 Bump libp2p to v0.33.1 (#13784)
* Run bare `//:gazelle -- update-repos`.

--> Removes some blank lines.

* Update libp2p to `v0.33.1`.
2024-03-28 08:38:46 +00:00
terence
d6f9196707 Change goodbye message from rate limited peer to debug verbosity (#13819) 2024-03-28 04:14:37 +00:00
Potuz
1b0e09369e Add metrics to track pending attestations (#13815) 2024-03-27 18:20:53 +00:00
Potuz
12482eeb40 Remove check for duplicates in pending attestation queue (#13814)
* Remove check for duplicates in pending attestation queue

The current queue will only save 1 unaggregated attestation for a pending block because we wrap the object into a SignedAggregatedAttestationAndProof with a zeroed aggregator.

* fix tests
2024-03-27 16:39:45 +00:00
Joel Rousseau
acc307b959 Command-line interface for visualizing min/max span bucket (#13748)
* add max/min span visualisation tool cli

* go mod tidy

* lint imports

* remove typo

* fix epoch table value

* fix deepsource

* add dep to bazel

* fix dep import order

* change command name from span to slasher-span-display

* change command args style using - instead of _

* sed s/CONFIGURATION/SLASHER PARAMS//

* change double neg to double pos condition

* remove unused anonymous func

* better function naming

* add range condition

* [deepsource] Fix Empty slice literal used to declare a variable
    GO-W1027

* correct typo

* do not show incorrect epochs due to round robin

* fix import

---------

Co-authored-by: Manu NALEPA <enalepa@offchainlabs.com>
2024-03-27 16:15:39 +00:00
Afanti
c1d75c295a chore: enhance comment and more readable (#13792) 2024-03-27 14:43:55 +00:00
Potuz
fad118cb04 Simplify ValidateAttestationTime (#13813)
ValidateClock in ValidateAttestationTime is useless

The check is that the attSlot is not > than the currentslot + 128 slots.

Later there's a check that the attSlot start time is not > than current slot
start time + clockDisparity.

if attSlot > than currentSlot + 128 slots, then the second check would fail
anyway.

The lattest check already guarantees that the attSlot cannot be larger than the
currentSlot, therefore it may never happen that attEpoch > currentEpoch. We just
need to check for Deneb that attEpoch >= currentEpoch - 1.

Removes also some duplicated variables like the attestation epoch being computed
twice.
2024-03-27 14:17:16 +00:00
kasey
cdd1d819df Refactor batch verifier for sharing across packages (#13812)
* refactor batch verifier to share with pending queue

* unit test for batch verifier

---------

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2024-03-27 12:36:17 +00:00
terence
97edffaff5 Add bid value metrics (#13804) 2024-03-26 14:58:41 +00:00
Radosław Kapka
6de7df6b9d Get genesis only once (#13796) 2024-03-26 03:26:34 +00:00
Sammy Rosso
14d7416c16 Spec test coverage report hack (#13718)
* Spec test report hack

* no export

* fix shell complaint

* shell fix?

* shell again?

* chmod +x ./hack/spectest-report.sh

* Review + improvements

* Remove unwanted change

* Add exclusion list

* Fix path + add eip6110 to exclusion

* Fix bazel path nonsense

* Add extra detail about specific test

* Cleanup exclusion list

* Add fail conditions

* Add mkdir

* Shorten filename + mkdir only if new

* Fix names

* Add to exclusion list

* Add report to .gitignore

* Back to stupid names

* Add Bazel flags option

---------

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
2024-03-25 16:10:32 +00:00
Radosław Kapka
6782df917a Utilize next slot cache in block rewards rpc (#13684)
* Utilize next slot cache in block rewards rpc

* msg fix

* tests
2024-03-25 08:56:20 +00:00
Bharath Vedartham
3d2230223f create the log file along with its parent directory if not present (#12675)
* Remove Feature Flag From Prater (#12082)

* Use Epoch boundary cache to retrieve balances (#12083)

* Use Epoch boundary cache to retrieve balances

* save boundary states before inserting to forkchoice

* move up last block save

* remove boundary checks on balances

* fix ordering

---------

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>

* create the log file along with its parent directory if not present

* only give ReadWritePermissions to the log file

* use io/file package to create the parent directories

* fix ci related issues

* add regression tests

* run gazelle

* fix tests

* remove print statements

* gazelle

* Remove failing test for MkdirAll, this failure is not expected

---------

Co-authored-by: Nishant Das <nishdas93@gmail.com>
Co-authored-by: Potuz <potuz@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
2024-03-22 15:32:08 +00:00
Preston Van Loon
b008a6422d Add tarball support for docker images (#13790) 2024-03-22 15:31:29 +00:00
Fredrik Svantes
d19365507f Set default LocalBlockValueBoost to 10 (#13772)
* Set default LocalBlockValueBoost to 10

* Update base.go

* Update mainnet_config.go
2024-03-22 13:18:20 +00:00
kasey
c05e39a668 fix handling of goodbye messages for limited peers (#13785)
Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2024-03-22 13:06:16 +00:00
Radosław Kapka
63c2b3563a Optimize GetDuties VC action (#13789)
* wait groups

* errgroup

* tests

* bzl

* review
2024-03-22 09:50:19 +00:00
Justin Traglia
a6e86c6731 Rename payloadattribute Timestamps to Timestamp (#13523)
Co-authored-by: terence <terence@prysmaticlabs.com>
2024-03-21 21:11:01 +00:00
Radosław Kapka
32fb183392 Modify the algorithm of updateFinalizedBlockRoots (#13486)
* rename error var

* new algo

* replay_test

* add comment

* review

* fill out parent root

* handle edge cases

* review
2024-03-21 21:09:56 +00:00
carrychair
cade09ba0b chore: fix some typos (#13726)
Signed-off-by: carrychair <linghuchong404@gmail.com>
2024-03-21 21:00:21 +00:00
Potuz
f85ddfe265 Log the slot and blockroot when we deadline waiting for blobs (#13774) 2024-03-21 20:29:23 +00:00
terence
3b97094ea4 Log da block root in hex (#13787) 2024-03-21 20:26:17 +00:00
Nishant Das
acdbf7c491 expand it (#13770) 2024-03-21 19:57:22 +00:00
Potuz
1cc1effd75 Revert "pass justified=finalized in Prater (#13695)" (#13709)
This reverts commit 102518e106.
2024-03-21 17:42:40 +00:00
james-prysm
f7f1d249f2 Fix get validator endpoint for empty query parameters (#13780)
* fix handlers for get validators

* removing log
2024-03-21 14:00:07 +00:00
kasey
02abb3e3c0 add log message if in da check at slot end (#13776)
* add log message if in da check at slot end

* don't bother logging late da check start

* break up defer with a var, too dense all together

* pass slot instead of block ref

---------

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2024-03-20 19:31:09 +00:00
james-prysm
2255c8b287 setting missing beacon API (#13778) 2024-03-20 17:59:15 +00:00
terence
27ecf448a7 Add da waited time to sync block log (#13775) 2024-03-20 14:53:02 +00:00
james-prysm
e243f04e44 validator client on rest mode has an inappropriate context deadline for events (#13771)
* addressing errors on events endpoint

* reverting timeout on get health

* fixing linting

* fixing more linting

* Update validator/client/beacon-api/beacon_api_validator_client.go

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>

* Update beacon-chain/rpc/eth/events/events.go

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>

* reverting change and removing line on context done which creates a superfluous response.WriteHeader error

* gofmt

---------

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
2024-03-20 13:19:05 +00:00
Manu NALEPA
fca1adbad7 Re-design TestStartDiscV5_DiscoverPeersWithSubnets test (#13766)
* `Test_AttSubnets`: Factorize.

* `filterPeerForAttSubnet`: `O(n)` ==> `O(1)`

* `FindPeersWithSubnet`: Optimize.

* `TestStartDiscV5_DiscoverPeersWithSubnets`: Complete re-design.

* `broadcastAttestation`: User `log.WithFields`.

* `filterPeer`: Refactor comments.

* Make deepsource happy.

* `TestStartDiscV5_FindPeersWithSubnet`: Add context cancellation.

Add some notes on `FindPeersWithSubnet` about
this limitation as well.
2024-03-20 03:36:00 +00:00
Radosław Kapka
b692722ddf Optimize SubmitAggregateSelectionProof VC action (#13711)
* Optimize `SubscribeCommitteeSubnets` VC action

* test fixes

* remove newline

* Optimize `SubmitAggregateSelectionProof`

* mock

* bzl gzl

* test fixes
2024-03-19 14:09:07 +00:00
Nishant Das
c4f6020677 add mplex timeout (#13745) 2024-03-19 13:37:23 +00:00
Chanh Le
d779e65d4e chore(kzg): Additional tests for KZG commitments (#13758)
* add a test explaining kzgRootIndex

* minor

* minor
2024-03-19 09:08:02 +00:00
terence
357211b7d9 Update spec test to official 1.4.0 (#13761) 2024-03-18 23:39:03 +00:00
Potuz
2dd48343a2 Set default fee recipient if tracked val fails (#13768) 2024-03-18 19:35:34 +00:00
james-prysm
7f931bf65b Keymanager APIs - get,post,delete graffiti (#13474)
* wip

* adding set and delete graffiti

* fixing mock

* fixing mock linting and putting in scaffolds for unit tests

* adding some tests

* gaz

* adding tests

* updating missing unit test

* fixing unit test

* Update validator/rpc/handlers_keymanager.go

Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com>

* Update validator/client/propose.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/rpc/handlers_keymanager.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/client/propose.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* radek's feedback

* fixing tests

* using wrapper for graffiti

* fixing linting

* wip

* fixing setting proposer settings

* more partial fixes to tests

* gaz

* fixing tests and setting logic

* changing keymanager

* fixing tests and making graffiti optional in the proposer file

* remove unneeded lines

* reverting unintended changes

* Update validator/client/propose.go

Co-authored-by: Manu NALEPA <enalepa@offchainlabs.com>

* addressing feedback

* removing uneeded line

* fixing bad merge resolution

* gofmt

* gaz

---------

Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com>
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
Co-authored-by: Manu NALEPA <enalepa@offchainlabs.com>
2024-03-18 15:03:08 +00:00
Nishant Das
fda4589251 Rewrite Pruning Implementation To Handle EIP 7045 (#13762)
* make it very big

* use new pruning implementation

* handle pre deneb

* revert cache change

* less verbose

* gaz

* Update beacon-chain/operations/attestations/prune_expired.go

Co-authored-by: Potuz <potuz@prysmaticlabs.com>

* gofmt

* be safer

---------

Co-authored-by: Potuz <potuz@prysmaticlabs.com>
2024-03-18 12:57:21 +00:00
kasey
34593d34d4 allow blob by root within da period (#13757)
Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2024-03-18 03:15:17 +00:00
Potuz
4d18e590ed Rename mispelled variable (#13759) 2024-03-17 20:47:19 +00:00
Potuz
ec8b67cb12 Use headstate for recent checkpoints (#13746)
* Use headstate for recent checkpoints

* add the computed state to the checkpoint cache

* acquire a multilock
2024-03-17 18:50:49 +00:00
terence
a817aa0a8d New gossip cache size (#13756)
* New gossip cache size

Increase seen aggregate cache size to 4096

* Update cache size to 8192

* 16384
2024-03-17 03:02:00 +00:00
kasey
d76f55e97a adds a metric to track blob sig cache lookups (#13755)
Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2024-03-16 20:41:21 +00:00
james-prysm
2de21eb22f adding headers to post endpoint (#13753) 2024-03-15 18:19:42 +00:00
Nishant Das
58b8c31c93 mark in progress (#13750) 2024-03-15 16:46:26 +00:00
kasey
f343333880 handle special case of batch size=1 (#13646)
* handle special case of batch size=1

* unit test case for backfill batch len=1

---------

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2024-03-15 15:19:59 +00:00
kasey
8e0b1b7e1f Backfill min slot flag (#13729)
* flag to set an older backfill slot target

* wire up flag to main cli and usage

* fix deepsource complaints

---------

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2024-03-15 15:17:24 +00:00
Manu NALEPA
65f71b3a48 P2P: Simplify code (#13719)
* `subscribeStaticWithSubnets`: Fix docstring.

* `buildOptions`: Avoid `options` mutations.

* `dv5Cfg`: Avoid mutation.

* `RefreshENR`: Use default for all but Phase0.

* `udp4`, `udp6`: Create enum.

* `p2p.Config`: `BootstrapNodeAddr`==> `BootstrapNodeAddrs`.

* `p2p.Config`: `Discv5BootStrapAddr` ==> `Discv5BootStrapAddrs`.

* `TestScorers_BadResponses_Score`: Improve.

* `BeaconNode`: Avoid mutation.

* `TestStore_TrustedPeers`: Remove blankline.

* Remove blank identifiers.

* `privKey`: Keep the majority of code with low indentation.

* `P2PPreregistration`: Return error instead of fatal log.

* `parseBootStrapAddrs` => `ParseBootStrapAddrs` (export)

* `p2p.Config`: Remove `BootstrapNodeAddrs`.

* `NewService`: Avoid mutation when possible.

* `Service`: Remove blank identifier.

* `buildOptions`: Avoid `log.Fatalf` (make deepsource happy).

* `registerGRPCGateway`: Use `net.JoinHostPort` (make deepsource happy).

* `registerBuilderService`: Make deepsource happy.

* `scorers`: Add `NoLock` suffix (make deepsource happy).

* `scorerr`: Add some `NoLock`suffixes (making deepsource happy).

* `discovery_test.go`. Remove init.

Rationale:
`rand.Seed` is deprecated: As of Go 1.20 there is no reason to call Seed with a random value. Programs that call Seed with a known value to get a specific sequence of results should use New(NewSource(seed)) to obtain a local random generator.

This makes deepsource happy as well.

* `createListener`: Reduce cyclomatic complexity (make deepsource happy).

* `startDB`: Reduce cyclomatic complexity (make deepsource happy).

* `main`: Log a FATAL on error.

This way, the error message is very readable.
Before this commit, the error message is the less readable
message in the logs.

* `New`: Reduce cyclomatic complexity (make deepsource happy).

* `main`: Avoid `App` mutation, and make deepsource happy.

* Update beacon-chain/node/node.go

Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com>

* `bootnodes` ==> `BootNodes` (Fix PR comment).

* Remove duplicate `configureFastSSZHashingAlgorithm` since already done in `configureBeacon`. (Fix PR comment)

* Add `TestCreateLocalNode`. (PR comment fix.)

* `startModules` ==> `startBaseServices (Fix PR comment).

* `buildOptions` return errors consistently.

* `New`: Change ordering.

---------

Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com>
2024-03-15 11:08:19 +00:00
kasey
9fcb9b86af fix 1-worker underflow; lower default batch size (#13734)
* fix 1-worker underflow; lower default batch size

* adding test for single item in batcher

---------

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2024-03-14 00:59:27 +00:00
terence
aa63c4e7f2 Use correct gossip validation time (#13740) 2024-03-13 16:10:18 +00:00
james-prysm
d6ae838bbf replace receive slot with event stream (#13563)
* WIP

* event stream wip

* returning nil

* temp removing some tests

* wip health checks

* fixing conficts

* updating fields based on linting

* fixing more errors

* fixing mocks

* fixing more mocks

* fixing more linting

* removing white space for lint

* fixing log format

* gaz

* reverting changes on grpc

* fixing unit tests

* adding in tests for health tracker and event stream

* adding more tests for streaming slot

* gaz

* Update api/client/event/event_stream.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* review comments

* Update validator/client/runner.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/client/validator.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/client/validator.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/client/validator.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/client/validator.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/client/beacon-api/beacon_api_validator_client.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/client/validator.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/client/validator.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* addressing radek comments

* Update validator/client/validator.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* addressing review feedback

* moving things to below next slot ticker

* fixing tests

* update naming

* adding TODO comment

* Update api/client/beacon/health.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* addressing comments

* fixing broken linting

* fixing more import issues

* fixing more import issues

* linting

* updating based on radek's comments

* addressing more comments

* fixing nogo error

* fixing duplicate import

* gaz

* adding radek's review suggestion

* Update proto/prysm/v1alpha1/node.proto

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>

* preston review comments

* Update api/client/event/event_stream.go

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>

* Update validator/client/validator.go

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>

* addressing some more preston review items

* fixing tests for linting

* fixing missed linting

* updating based on feedback to simplify

* adding interface check at the top

* reverting some comments

* cleaning up intatiations

* reworking the health tracker

* fixing linting

* fixing more linting to adhear to interface

* adding interface check at the the top of the file

* fixing unit tests

* attempting to fix dependency cycle

* addressing radek's comment

* Update validator/client/beacon-api/beacon_api_validator_client.go

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>

* adding more tests and feedback items

* fixing TODO comment

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
2024-03-13 13:01:05 +00:00
terence
d49afb370c Add blob index to file path (#13736)
* Add index to file path

* Add block root

* Add index to file path
2024-03-13 06:36:43 +00:00
terence
4d3a6d84d2 Add gossip blob sidecar verification ms metric (#13737) 2024-03-13 04:41:47 +00:00
Preston Van Loon
9c5d16e161 Clean up some unused beacon state proto types and related code (#13735) 2024-03-13 03:20:36 +00:00
terence
4731304187 Save invalid blob to temp under new flag (#13725) 2024-03-12 17:51:08 +00:00
Potuz
02cbcf8545 only update head at 10 seconds when validating (#13570)
* only update head at 10 seconds when validating

* fix tests
2024-03-12 15:11:40 +00:00
Potuz
4e10734ae4 Compute unrealized checkpoints with pcli (#13692)
* Compute unrealized checkpoints with pcli

* gazelle

* fix lint

* Gazelle

---------

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
2024-03-12 15:03:21 +00:00
Preston Van Loon
e19c99c3e2 Update bazel, rules_go, gazelle, and go versions (#13724)
* Update go, rules_go, gazelle, bazel

* Update zig toolchain
2024-03-12 14:33:37 +00:00
terence
697bcd418c Save invalid block to temp save-invalid-block-temp (#13722) 2024-03-11 20:34:44 +00:00
terence
ec7949fa4b Use justified checkpoint from head state to build attestation (#13703) 2024-03-11 15:05:40 +00:00
Nishant Das
cb8eb4e955 fix context deadline rejections (#13716) 2024-03-11 04:51:02 +00:00
Chanh Le
800f3b572f chore(execution): Clean up unreachable code; use new(big.Int) instead of big.NewInt(0) (#13715)
* refactor with builtin min/max

* use new(big.Int) for more efficiency
2024-03-11 00:31:55 +00:00
terence
9d3af41acb Remove unused deneb code (#13712)
* Remove unused deneb code

* Gazelle
2024-03-09 00:12:26 +00:00
kasey
07a0a95ee7 Blob verification spectest (#13707)
* use real blob verifier in forkchoice spectest

* wip

* Use real blob sidecar for test

* Set file db correctly

* correctly handle blob cases where valid=false

* work-around spectest's weird Fork in genesis state

* gaz

* revert T-money's log level change

* rm whitespace

* unskip minimal test

* Preston's feedback

---------

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2024-03-08 18:20:38 +00:00
Radosław Kapka
9e7352704c Optimize SubscribeCommitteeSubnets VC action (#13702)
* Optimize `SubscribeCommitteeSubnets` VC action

* test fixes

* remove newline

* review
2024-03-08 18:08:35 +00:00
Nishant Das
2616de1eb1 Check Unrealized Justification Balances In Spectests (#13710)
* add them

* Ensure activation epoch does not overflow

* add them all in

* better check for overflow

* fix tests

* fix tests

---------

Co-authored-by: Potuz <potuz@prysmaticlabs.com>
2024-03-08 14:49:34 +00:00
Manu NALEPA
b2e3c29ab3 Improve logging. (#13708)
* Improve logging.

* Make deepsource happy.

* Fix comment.
2024-03-08 12:23:34 +00:00
Preston Van Loon
83538251aa Remove DOMAIN_BLOB_SIDECAR. See https://github.com/ethereum/consensus-specs/pull/3542 (#13706) 2024-03-08 03:55:46 +00:00
Stefan
2442280e37 Fix/race receive block (#13700)
* blob save: add better data checking for empty blob issues (#13647)

(cherry picked from commit daad29d0de)

* avoid part path collisions with mem addr entropy (#13648)

* avoid part path collisions with mem addr entropy

* Regression test

---------

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
(cherry picked from commit 4c66e4d060)

* fix error race

---------

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
Co-authored-by: kasey <489222+kasey@users.noreply.github.com>
2024-03-07 19:46:03 +00:00
Stefan
4608569495 fix race condition when pinging peers (#13701) 2024-03-07 19:21:55 +00:00
Preston Van Loon
20d013a30b Unskip merkle proof tests (#13704) 2024-03-07 19:18:25 +00:00
Potuz
b0a2115a26 Fix UJ (#13688)
* Fix UJ

* gate slashed

* don't filter slashed for active balance

* don't overflow

* fix tests

* fix tests
2024-03-06 20:46:16 +00:00
Potuz
102518e106 pass justified=finalized in Prater (#13695)
* pass justified=finalized in Prater

* fix gazelle mess
2024-03-06 18:45:41 +00:00
james-prysm
e49ed4d554 keymanager api: lowercase statuses (#13696)
* cleanup

* adding test

* address small comment

* gaz
2024-03-06 16:30:17 +00:00
Manu NALEPA
21775eed52 Fix VC DB conversion when no proposer settings is defined and add Experimental flag in the --enable-minimal-slashing-protection help. (#13691)
* VC: Allow DB conversion without proposer settings.

* `enable-minimal-slashing-protection` flag: Add `Experimental warning`.
2024-03-06 14:48:18 +00:00
523 changed files with 31654 additions and 35794 deletions

View File

@@ -6,6 +6,12 @@ import %workspace%/build/bazelrc/debug.bazelrc
import %workspace%/build/bazelrc/hermetic-cc.bazelrc
import %workspace%/build/bazelrc/performance.bazelrc
# hermetic_cc_toolchain v3.0.1 required changes.
common --enable_platform_specific_config
build:linux --sandbox_add_mount_pair=/tmp
build:macos --sandbox_add_mount_pair=/var/tmp
build:windows --sandbox_add_mount_pair=C:\Temp
# E2E run with debug gotag
test:e2e --define gotags=debug

View File

@@ -1 +1 @@
7.0.0
7.1.0

View File

@@ -12,8 +12,7 @@
build:remote-cache --remote_download_minimal
build:remote-cache --remote_build_event_upload=minimal
build:remote-cache --remote_cache=grpc://bazel-remote-cache:9092
# Does not work with rules_oci. See https://github.com/bazel-contrib/rules_oci/issues/292
#build:remote-cache --experimental_remote_downloader=grpc://bazel-remote-cache:9092
build:remote-cache --experimental_remote_downloader=grpc://bazel-remote-cache:9092
build:remote-cache --remote_local_fallback
build:remote-cache --experimental_remote_cache_async
build:remote-cache --experimental_remote_merkle_tree_cache

3
.gitignore vendored
View File

@@ -41,3 +41,6 @@ jwt.hex
# manual testing
tmp
# spectest coverage reports
report.txt

842
MODULE.bazel.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -2,7 +2,7 @@
[![Build status](https://badge.buildkite.com/b555891daf3614bae4284dcf365b2340cefc0089839526f096.svg?branch=master)](https://buildkite.com/prysmatic-labs/prysm)
[![Go Report Card](https://goreportcard.com/badge/github.com/prysmaticlabs/prysm)](https://goreportcard.com/report/github.com/prysmaticlabs/prysm)
[![Consensus_Spec_Version 1.3.0](https://img.shields.io/badge/Consensus%20Spec%20Version-v1.3.0-blue.svg)](https://github.com/ethereum/consensus-specs/tree/v1.3.0)
[![Consensus_Spec_Version 1.4.0](https://img.shields.io/badge/Consensus%20Spec%20Version-v1.4.0-blue.svg)](https://github.com/ethereum/consensus-specs/tree/v1.4.0)
[![Execution_API_Version 1.0.0-beta.2](https://img.shields.io/badge/Execution%20API%20Version-v1.0.0.beta.2-blue.svg)](https://github.com/ethereum/execution-apis/tree/v1.0.0-beta.2/src/engine)
[![Discord](https://user-images.githubusercontent.com/7288322/34471967-1df7808a-efbb-11e7-9088-ed0b04151291.png)](https://discord.gg/prysmaticlabs)
[![GitPOAP Badge](https://public-api.gitpoap.io/v1/repo/prysmaticlabs/prysm/badge)](https://www.gitpoap.io/gh/prysmaticlabs/prysm)

View File

@@ -16,34 +16,20 @@ load("@rules_pkg//:deps.bzl", "rules_pkg_dependencies")
rules_pkg_dependencies()
HERMETIC_CC_TOOLCHAIN_VERSION = "v3.0.1"
http_archive(
name = "hermetic_cc_toolchain",
sha256 = "973ab22945b921ef45b8e1d6ce01ca7ce1b8a462167449a36e297438c4ec2755",
strip_prefix = "hermetic_cc_toolchain-5098046bccc15d2962f3cc8e7e53d6a2a26072dc",
sha256 = "3bc6ec127622fdceb4129cb06b6f7ab098c4d539124dde96a6318e7c32a53f7a",
urls = [
"https://github.com/uber/hermetic_cc_toolchain/archive/5098046bccc15d2962f3cc8e7e53d6a2a26072dc.tar.gz", # 2023-06-28
"https://mirror.bazel.build/github.com/uber/hermetic_cc_toolchain/releases/download/{0}/hermetic_cc_toolchain-{0}.tar.gz".format(HERMETIC_CC_TOOLCHAIN_VERSION),
"https://github.com/uber/hermetic_cc_toolchain/releases/download/{0}/hermetic_cc_toolchain-{0}.tar.gz".format(HERMETIC_CC_TOOLCHAIN_VERSION),
],
)
load("@hermetic_cc_toolchain//toolchain:defs.bzl", zig_toolchains = "toolchains")
# Temporarily use a nightly build until 0.12.0 is released.
# See: https://github.com/prysmaticlabs/prysm/issues/13130
zig_toolchains(
host_platform_sha256 = {
"linux-aarch64": "45afb8e32adde825165f4f293fcea9ecea503f7f9ec0e9bf4435afe70e67fb70",
"linux-x86_64": "f136c6a8a0f6adcb057d73615fbcd6f88281b3593f7008d5f7ed514ff925c02e",
"macos-aarch64": "05d995853c05243151deff47b60bdc2674f1e794a939eaeca0f42312da031cee",
"macos-x86_64": "721754ba5a50f31e8a1f0e1a74cace26f8246576878ac4a8591b0ee7b6db1fc1",
"windows-x86_64": "93f5248b2ea8c5ee8175e15b1384e133edc1cd49870b3ea259062a2e04164343",
},
url_formats = [
"https://ziglang.org/builds/zig-{host_platform}-{version}.{_ext}",
"https://mirror.bazel.build/ziglang.org/builds/zig-{host_platform}-{version}.{_ext}",
"https://prysmaticlabs.com/mirror/ziglang.org/builds/zig-{host_platform}-{version}.{_ext}",
],
version = "0.12.0-dev.1349+fa022d1ec",
)
zig_toolchains()
# Register zig sdk toolchains with support for Ubuntu 20.04 (Focal Fossa) which has an EOL date of April, 2025.
# For ubuntu glibc support, see https://launchpad.net/ubuntu/+source/glibc
@@ -81,10 +67,10 @@ bazel_skylib_workspace()
http_archive(
name = "bazel_gazelle",
sha256 = "d3fa66a39028e97d76f9e2db8f1b0c11c099e8e01bf363a923074784e451f809",
integrity = "sha256-MpOL2hbmcABjA1R5Bj2dJMYO2o15/Uc5Vj9Q0zHLMgk=",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.33.0/bazel-gazelle-v0.33.0.tar.gz",
"https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.33.0/bazel-gazelle-v0.33.0.tar.gz",
"https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.35.0/bazel-gazelle-v0.35.0.tar.gz",
"https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.35.0/bazel-gazelle-v0.35.0.tar.gz",
],
)
@@ -128,9 +114,9 @@ aspect_bazel_lib_register_toolchains()
http_archive(
name = "rules_oci",
sha256 = "c71c25ed333a4909d2dd77e0b16c39e9912525a98c7fa85144282be8d04ef54c",
strip_prefix = "rules_oci-1.3.4",
url = "https://github.com/bazel-contrib/rules_oci/releases/download/v1.3.4/rules_oci-v1.3.4.tar.gz",
sha256 = "4a276e9566c03491649eef63f27c2816cc222f41ccdebd97d2c5159e84917c3b",
strip_prefix = "rules_oci-1.7.4",
url = "https://github.com/bazel-contrib/rules_oci/releases/download/v1.7.4/rules_oci-v1.7.4.tar.gz",
)
load("@rules_oci//oci:dependencies.bzl", "rules_oci_dependencies")
@@ -151,17 +137,13 @@ http_archive(
# Expose internals of go_test for custom build transitions.
"//third_party:io_bazel_rules_go_test.patch",
],
sha256 = "d6ab6b57e48c09523e93050f13698f708428cfd5e619252e369d377af6597707",
sha256 = "80a98277ad1311dacd837f9b16db62887702e9f1d1c4c9f796d0121a46c8e184",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.43.0/rules_go-v0.43.0.zip",
"https://github.com/bazelbuild/rules_go/releases/download/v0.43.0/rules_go-v0.43.0.zip",
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.46.0/rules_go-v0.46.0.zip",
"https://github.com/bazelbuild/rules_go/releases/download/v0.46.0/rules_go-v0.46.0.zip",
],
)
load("//:distroless_deps.bzl", "distroless_deps")
distroless_deps()
# Override default import in rules_go with special patch until
# https://github.com/gogo/protobuf/pull/582 is merged.
git_repository(
@@ -200,10 +182,14 @@ load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_depe
go_rules_dependencies()
go_register_toolchains(
go_version = "1.21.6",
go_version = "1.21.8",
nogo = "@//:nogo",
)
load("//:distroless_deps.bzl", "distroless_deps")
distroless_deps()
http_archive(
name = "io_kubernetes_build",
sha256 = "b84fbd1173acee9d02a7d3698ad269fdf4f7aa081e9cecd40e012ad0ad8cfa2a",
@@ -241,9 +227,7 @@ filegroup(
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
)
consensus_spec_version = "v1.4.0-beta.7"
consensus_spec_test_version = "v1.4.0-beta.7-hotfix"
consensus_spec_version = "v1.4.0"
bls_test_version = "v0.1.1"
@@ -260,7 +244,7 @@ filegroup(
)
""",
sha256 = "c282c0f86f23f3d2e0f71f5975769a4077e62a7e3c7382a16bd26a7e589811a0",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_test_version,
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
)
http_archive(
@@ -276,7 +260,7 @@ filegroup(
)
""",
sha256 = "4649c35aa3b8eb0cfdc81bee7c05649f90ef36bede5b0513e1f2e8baf37d6033",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_test_version,
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
)
http_archive(
@@ -292,7 +276,7 @@ filegroup(
)
""",
sha256 = "c5a03f724f757456ffaabd2a899992a71d2baf45ee4db65ca3518f2b7ee928c8",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_test_version,
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
)
http_archive(
@@ -306,7 +290,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "049c29267310e6b88280f4f834a75866c2f5b9036fa97acb9d9c6db8f64d9118",
sha256 = "cd1c9d97baccbdde1d2454a7dceb8c6c61192a3b581eee12ffc94969f2db8453",
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
)
@@ -342,22 +326,6 @@ filegroup(
url = "https://github.com/eth-clients/eth2-networks/archive/934c948e69205dcf2deb87e4ae6cc140c335f94d.tar.gz",
)
http_archive(
name = "goerli_testnet",
build_file_content = """
filegroup(
name = "configs",
srcs = [
"prater/config.yaml",
],
visibility = ["//visibility:public"],
)
""",
sha256 = "43fc0f55ddff7b511713e2de07aa22846a67432df997296fb4fc09cd8ed1dcdb",
strip_prefix = "goerli-6522ac6684693740cd4ddcc2a0662e03702aa4a1",
url = "https://github.com/eth-clients/goerli/archive/6522ac6684693740cd4ddcc2a0662e03702aa4a1.tar.gz",
)
http_archive(
name = "holesky_testnet",
build_file_content = """

View File

@@ -1,11 +1,24 @@
load("@prysm//tools/go:def.bzl", "go_library")
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"constants.go",
"headers.go",
"jwt.go",
],
importpath = "github.com/prysmaticlabs/prysm/v5/api",
visibility = ["//visibility:public"],
deps = [
"//crypto/rand:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["jwt_test.go"],
embed = [":go_default_library"],
deps = ["//testing/require:go_default_library"],
)

View File

@@ -6,11 +6,14 @@ go_library(
"checkpoint.go",
"client.go",
"doc.go",
"health.go",
"log.go",
],
importpath = "github.com/prysmaticlabs/prysm/v5/api/client/beacon",
visibility = ["//visibility:public"],
deps = [
"//api/client:go_default_library",
"//api/client/beacon/iface:go_default_library",
"//api/server:go_default_library",
"//api/server/structs:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
@@ -36,10 +39,12 @@ go_test(
srcs = [
"checkpoint_test.go",
"client_test.go",
"health_test.go",
],
embed = [":go_default_library"],
deps = [
"//api/client:go_default_library",
"//api/client/beacon/testing:go_default_library",
"//beacon-chain/state:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
@@ -53,5 +58,6 @@ go_test(
"//testing/util:go_default_library",
"//time/slots:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@org_uber_go_mock//gomock:go_default_library",
],
)

View File

@@ -17,7 +17,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/io/file"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"github.com/prysmaticlabs/prysm/v5/time/slots"
log "github.com/sirupsen/logrus"
"github.com/sirupsen/logrus"
"golang.org/x/mod/semver"
)
@@ -74,7 +74,12 @@ func DownloadFinalizedData(ctx context.Context, client *Client) (*OriginData, er
if err != nil {
return nil, errors.Wrap(err, "error detecting chain config for finalized state")
}
log.Printf("detected supported config in remote finalized state, name=%s, fork=%s", vu.Config.ConfigName, version.String(vu.Fork))
log.WithFields(logrus.Fields{
"name": vu.Config.ConfigName,
"fork": version.String(vu.Fork),
}).Info("Detected supported config in remote finalized state")
s, err := vu.UnmarshalBeaconState(sb)
if err != nil {
return nil, errors.Wrap(err, "error unmarshaling finalized state to correct version")

View File

@@ -22,7 +22,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/network/forks"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
log "github.com/sirupsen/logrus"
"github.com/sirupsen/logrus"
)
const (
@@ -309,7 +309,7 @@ func (c *Client) SubmitChangeBLStoExecution(ctx context.Context, request []*stru
}
for _, failure := range errorJson.Failures {
w := request[failure.Index].Message
log.WithFields(log.Fields{
log.WithFields(logrus.Fields{
"validatorIndex": w.ValidatorIndex,
"withdrawalAddress": w.ToExecutionAddress,
}).Error(failure.Message)
@@ -341,9 +341,9 @@ type forkScheduleResponse struct {
func (fsr *forkScheduleResponse) OrderedForkSchedule() (forks.OrderedSchedule, error) {
ofs := make(forks.OrderedSchedule, 0)
for _, d := range fsr.Data {
epoch, err := strconv.Atoi(d.Epoch)
epoch, err := strconv.ParseUint(d.Epoch, 10, 64)
if err != nil {
return nil, err
return nil, errors.Wrapf(err, "error parsing epoch %s", d.Epoch)
}
vSlice, err := hexutil.Decode(d.CurrentVersion)
if err != nil {
@@ -355,7 +355,7 @@ func (fsr *forkScheduleResponse) OrderedForkSchedule() (forks.OrderedSchedule, e
version := bytesutil.ToBytes4(vSlice)
ofs = append(ofs, forks.ForkScheduleEntry{
Version: version,
Epoch: primitives.Epoch(uint64(epoch)),
Epoch: primitives.Epoch(epoch),
})
}
sort.Sort(ofs)

View File

@@ -0,0 +1,60 @@
package beacon
import (
"context"
"sync"
"github.com/prysmaticlabs/prysm/v5/api/client/beacon/iface"
)
type NodeHealthTracker struct {
isHealthy *bool
healthChan chan bool
node iface.HealthNode
sync.RWMutex
}
func NewNodeHealthTracker(node iface.HealthNode) *NodeHealthTracker {
return &NodeHealthTracker{
node: node,
healthChan: make(chan bool, 1),
}
}
// HealthUpdates provides a read-only channel for health updates.
func (n *NodeHealthTracker) HealthUpdates() <-chan bool {
return n.healthChan
}
func (n *NodeHealthTracker) IsHealthy() bool {
n.RLock()
defer n.RUnlock()
if n.isHealthy == nil {
return false
}
return *n.isHealthy
}
func (n *NodeHealthTracker) CheckHealth(ctx context.Context) bool {
n.Lock()
defer n.Unlock()
newStatus := n.node.IsHealthy(ctx)
if n.isHealthy == nil {
n.isHealthy = &newStatus
}
isStatusChanged := newStatus != *n.isHealthy
if isStatusChanged {
// Update the health status
n.isHealthy = &newStatus
// Send the new status to the health channel, potentially overwriting the existing value
select {
case <-n.healthChan:
n.healthChan <- newStatus
default:
n.healthChan <- newStatus
}
}
return newStatus
}

View File

@@ -0,0 +1,112 @@
package beacon
import (
"context"
"sync"
"testing"
healthTesting "github.com/prysmaticlabs/prysm/v5/api/client/beacon/testing"
"go.uber.org/mock/gomock"
)
func TestNodeHealth_IsHealthy(t *testing.T) {
tests := []struct {
name string
isHealthy bool
want bool
}{
{"initially healthy", true, true},
{"initially unhealthy", false, false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
n := &NodeHealthTracker{
isHealthy: &tt.isHealthy,
healthChan: make(chan bool, 1),
}
if got := n.IsHealthy(); got != tt.want {
t.Errorf("IsHealthy() = %v, want %v", got, tt.want)
}
})
}
}
func TestNodeHealth_UpdateNodeHealth(t *testing.T) {
tests := []struct {
name string
initial bool // Initial health status
newStatus bool // Status to update to
shouldSend bool // Should a message be sent through the channel
}{
{"healthy to unhealthy", true, false, true},
{"unhealthy to healthy", false, true, true},
{"remain healthy", true, true, false},
{"remain unhealthy", false, false, false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
client := healthTesting.NewMockHealthClient(ctrl)
client.EXPECT().IsHealthy(gomock.Any()).Return(tt.newStatus)
n := &NodeHealthTracker{
isHealthy: &tt.initial,
node: client,
healthChan: make(chan bool, 1),
}
s := n.CheckHealth(context.Background())
// Check if health status was updated
if s != tt.newStatus {
t.Errorf("UpdateNodeHealth() failed to update isHealthy from %v to %v", tt.initial, tt.newStatus)
}
select {
case status := <-n.HealthUpdates():
if !tt.shouldSend {
t.Errorf("UpdateNodeHealth() unexpectedly sent status %v to HealthCh", status)
} else if status != tt.newStatus {
t.Errorf("UpdateNodeHealth() sent wrong status %v, want %v", status, tt.newStatus)
}
default:
if tt.shouldSend {
t.Error("UpdateNodeHealth() did not send any status to HealthCh when expected")
}
}
})
}
}
func TestNodeHealth_Concurrency(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
client := healthTesting.NewMockHealthClient(ctrl)
n := NewNodeHealthTracker(client)
var wg sync.WaitGroup
// Number of goroutines to spawn for both reading and writing
numGoroutines := 6
wg.Add(numGoroutines * 2) // for readers and writers
// Concurrently update health status
for i := 0; i < numGoroutines; i++ {
go func() {
defer wg.Done()
client.EXPECT().IsHealthy(gomock.Any()).Return(false).Times(1)
n.CheckHealth(context.Background())
client.EXPECT().IsHealthy(gomock.Any()).Return(true).Times(1)
n.CheckHealth(context.Background())
}()
}
// Concurrently read health status
for i := 0; i < numGoroutines; i++ {
go func() {
defer wg.Done()
_ = n.IsHealthy() // Just read the value
}()
}
wg.Wait() // Wait for all goroutines to finish
}

View File

@@ -0,0 +1,8 @@
load("@prysm//tools/go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["health.go"],
importpath = "github.com/prysmaticlabs/prysm/v5/api/client/beacon/iface",
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,13 @@
package iface
import "context"
type HealthTracker interface {
HealthUpdates() <-chan bool
IsHealthy() bool
CheckHealth(ctx context.Context) bool
}
type HealthNode interface {
IsHealthy(ctx context.Context) bool
}

5
api/client/beacon/log.go Normal file
View File

@@ -0,0 +1,5 @@
package beacon
import "github.com/sirupsen/logrus"
var log = logrus.WithField("prefix", "beacon")

View File

@@ -0,0 +1,12 @@
load("@prysm//tools/go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["mock.go"],
importpath = "github.com/prysmaticlabs/prysm/v5/api/client/beacon/testing",
visibility = ["//visibility:public"],
deps = [
"//api/client/beacon/iface:go_default_library",
"@org_uber_go_mock//gomock:go_default_library",
],
)

View File

@@ -0,0 +1,59 @@
package testing
import (
"context"
"reflect"
"sync"
"github.com/prysmaticlabs/prysm/v5/api/client/beacon/iface"
"go.uber.org/mock/gomock"
)
var (
_ = iface.HealthNode(&MockHealthClient{})
)
// MockHealthClient is a mock of HealthClient interface.
type MockHealthClient struct {
ctrl *gomock.Controller
recorder *MockHealthClientMockRecorder
sync.Mutex
}
// MockHealthClientMockRecorder is the mock recorder for MockHealthClient.
type MockHealthClientMockRecorder struct {
mock *MockHealthClient
}
// IsHealthy mocks base method.
func (m *MockHealthClient) IsHealthy(arg0 context.Context) bool {
m.Lock()
defer m.Unlock()
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "IsHealthy", arg0)
ret0, ok := ret[0].(bool)
if !ok {
return false
}
return ret0
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockHealthClient) EXPECT() *MockHealthClientMockRecorder {
return m.recorder
}
// IsHealthy indicates an expected call of IsHealthy.
func (mr *MockHealthClientMockRecorder) IsHealthy(arg0 any) *gomock.Call {
mr.mock.Lock()
defer mr.mock.Unlock()
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsHealthy", reflect.TypeOf((*MockHealthClient)(nil).IsHealthy), arg0)
}
// NewMockHealthClient creates a new mock instance.
func NewMockHealthClient(ctrl *gomock.Controller) *MockHealthClient {
mock := &MockHealthClient{ctrl: ctrl}
mock.recorder = &MockHealthClientMockRecorder{mock}
return mock
}

View File

@@ -304,6 +304,8 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
}
versionOpt := func(r *http.Request) {
r.Header.Add("Eth-Consensus-Version", version.String(version.Bellatrix))
r.Header.Set("Content-Type", "application/json")
r.Header.Set("Accept", "application/json")
}
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), versionOpt)
@@ -341,6 +343,8 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
}
versionOpt := func(r *http.Request) {
r.Header.Add("Eth-Consensus-Version", version.String(version.Capella))
r.Header.Set("Content-Type", "application/json")
r.Header.Set("Accept", "application/json")
}
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), versionOpt)
@@ -379,6 +383,8 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
versionOpt := func(r *http.Request) {
r.Header.Add("Eth-Consensus-Version", version.String(version.Deneb))
r.Header.Set("Content-Type", "application/json")
r.Header.Set("Accept", "application/json")
}
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), versionOpt)
if err != nil {

View File

@@ -321,6 +321,8 @@ func TestSubmitBlindedBlock(t *testing.T) {
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
require.Equal(t, "bellatrix", r.Header.Get("Eth-Consensus-Version"))
require.Equal(t, "application/json", r.Header.Get("Content-Type"))
require.Equal(t, "application/json", r.Header.Get("Accept"))
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayload)),
@@ -347,6 +349,8 @@ func TestSubmitBlindedBlock(t *testing.T) {
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
require.Equal(t, "capella", r.Header.Get("Eth-Consensus-Version"))
require.Equal(t, "application/json", r.Header.Get("Content-Type"))
require.Equal(t, "application/json", r.Header.Get("Accept"))
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayloadCapella)),
@@ -376,6 +380,8 @@ func TestSubmitBlindedBlock(t *testing.T) {
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
require.Equal(t, "deneb", r.Header.Get("Eth-Consensus-Version"))
require.Equal(t, "application/json", r.Header.Get("Content-Type"))
require.Equal(t, "application/json", r.Header.Get("Accept"))
var req structs.SignedBlindedBeaconBlockDeneb
err := json.NewDecoder(r.Body).Decode(&req)
require.NoError(t, err)

View File

@@ -21,6 +21,9 @@ var ErrNotFound = errors.Wrap(ErrNotOK, "recv 404 NotFound response from API")
// ErrInvalidNodeVersion indicates that the /eth/v1/node/version API response format was not recognized.
var ErrInvalidNodeVersion = errors.New("invalid node version response")
// ErrConnectionIssue represents a connection problem.
var ErrConnectionIssue = errors.New("could not connect")
// Non200Err is a function that parses an HTTP response to handle responses that are not 200 with a formatted error.
func Non200Err(response *http.Response) error {
bodyBytes, err := io.ReadAll(response.Body)

View File

@@ -0,0 +1,24 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["event_stream.go"],
importpath = "github.com/prysmaticlabs/prysm/v5/api/client/event",
visibility = ["//visibility:public"],
deps = [
"//api:go_default_library",
"//api/client:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["event_stream_test.go"],
embed = [":go_default_library"],
deps = [
"//testing/require:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)

View File

@@ -0,0 +1,148 @@
package event
import (
"bufio"
"context"
"net/http"
"net/url"
"strings"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/api"
"github.com/prysmaticlabs/prysm/v5/api/client"
log "github.com/sirupsen/logrus"
)
const (
EventHead = "head"
EventBlock = "block"
EventAttestation = "attestation"
EventVoluntaryExit = "voluntary_exit"
EventBlsToExecutionChange = "bls_to_execution_change"
EventProposerSlashing = "proposer_slashing"
EventAttesterSlashing = "attester_slashing"
EventFinalizedCheckpoint = "finalized_checkpoint"
EventChainReorg = "chain_reorg"
EventContributionAndProof = "contribution_and_proof"
EventLightClientFinalityUpdate = "light_client_finality_update"
EventLightClientOptimisticUpdate = "light_client_optimistic_update"
EventPayloadAttributes = "payload_attributes"
EventBlobSidecar = "blob_sidecar"
EventError = "error"
EventConnectionError = "connection_error"
)
var (
_ = EventStreamClient(&EventStream{})
)
var DefaultEventTopics = []string{EventHead}
type EventStreamClient interface {
Subscribe(eventsChannel chan<- *Event)
}
type Event struct {
EventType string
Data []byte
}
// EventStream is responsible for subscribing to the Beacon API events endpoint
// and dispatching received events to subscribers.
type EventStream struct {
ctx context.Context
httpClient *http.Client
host string
topics []string
}
func NewEventStream(ctx context.Context, httpClient *http.Client, host string, topics []string) (*EventStream, error) {
// Check if the host is a valid URL
_, err := url.ParseRequestURI(host)
if err != nil {
return nil, err
}
if len(topics) == 0 {
return nil, errors.New("no topics provided")
}
return &EventStream{
ctx: ctx,
httpClient: httpClient,
host: host,
topics: topics,
}, nil
}
func (h *EventStream) Subscribe(eventsChannel chan<- *Event) {
allTopics := strings.Join(h.topics, ",")
log.WithField("topics", allTopics).Info("Listening to Beacon API events")
fullUrl := h.host + "/eth/v1/events?topics=" + allTopics
req, err := http.NewRequestWithContext(h.ctx, http.MethodGet, fullUrl, nil)
if err != nil {
eventsChannel <- &Event{
EventType: EventConnectionError,
Data: []byte(errors.Wrap(err, "failed to create HTTP request").Error()),
}
}
req.Header.Set("Accept", api.EventStreamMediaType)
req.Header.Set("Connection", api.KeepAlive)
resp, err := h.httpClient.Do(req)
if err != nil {
eventsChannel <- &Event{
EventType: EventConnectionError,
Data: []byte(errors.Wrap(err, client.ErrConnectionIssue.Error()).Error()),
}
}
defer func() {
if closeErr := resp.Body.Close(); closeErr != nil {
log.WithError(closeErr).Error("Failed to close events response body")
}
}()
// Create a new scanner to read lines from the response body
scanner := bufio.NewScanner(resp.Body)
var eventType, data string // Variables to store event type and data
// Iterate over lines of the event stream
for scanner.Scan() {
select {
case <-h.ctx.Done():
log.Info("Context canceled, stopping event stream")
close(eventsChannel)
return
default:
line := scanner.Text() // TODO(13730): scanner does not handle /r and does not fully adhere to https://html.spec.whatwg.org/multipage/server-sent-events.html#the-eventsource-interface
// Handle the event based on your specific format
if line == "" {
// Empty line indicates the end of an event
if eventType != "" && data != "" {
// Process the event when both eventType and data are set
eventsChannel <- &Event{EventType: eventType, Data: []byte(data)}
}
// Reset eventType and data for the next event
eventType, data = "", ""
continue
}
et, ok := strings.CutPrefix(line, "event: ")
if ok {
// Extract event type from the "event" field
eventType = et
}
d, ok := strings.CutPrefix(line, "data: ")
if ok {
// Extract data from the "data" field
data = d
}
}
}
if err := scanner.Err(); err != nil {
eventsChannel <- &Event{
EventType: EventConnectionError,
Data: []byte(errors.Wrap(err, errors.Wrap(client.ErrConnectionIssue, "scanner failed").Error()).Error()),
}
}
}

View File

@@ -0,0 +1,80 @@
package event
import (
"context"
"fmt"
"net/http"
"net/http/httptest"
"testing"
"time"
"github.com/prysmaticlabs/prysm/v5/testing/require"
log "github.com/sirupsen/logrus"
)
func TestNewEventStream(t *testing.T) {
validURL := "http://localhost:8080"
invalidURL := "://invalid"
topics := []string{"topic1", "topic2"}
tests := []struct {
name string
host string
topics []string
wantErr bool
}{
{"Valid input", validURL, topics, false},
{"Invalid URL", invalidURL, topics, true},
{"No topics", validURL, []string{}, true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
_, err := NewEventStream(context.Background(), &http.Client{}, tt.host, tt.topics)
if (err != nil) != tt.wantErr {
t.Errorf("NewEventStream() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestEventStream(t *testing.T) {
mux := http.NewServeMux()
mux.HandleFunc("/eth/v1/events", func(w http.ResponseWriter, r *http.Request) {
flusher, ok := w.(http.Flusher)
require.Equal(t, true, ok)
for i := 1; i <= 2; i++ {
_, err := fmt.Fprintf(w, "event: head\ndata: data%d\n\n", i)
require.NoError(t, err)
flusher.Flush() // Trigger flush to simulate streaming data
time.Sleep(100 * time.Millisecond) // Simulate delay between events
}
})
server := httptest.NewServer(mux)
defer server.Close()
topics := []string{"head"}
eventsChannel := make(chan *Event, 1)
stream, err := NewEventStream(context.Background(), http.DefaultClient, server.URL, topics)
require.NoError(t, err)
go stream.Subscribe(eventsChannel)
// Collect events
var events []*Event
for len(events) != 2 {
select {
case event := <-eventsChannel:
log.Info(event)
events = append(events, event)
}
}
// Assertions to verify the events content
expectedData := []string{"data1", "data2"}
for i, event := range events {
if string(event.Data) != expectedData[i] {
t.Errorf("Expected event data %q, got %q", expectedData[i], string(event.Data))
}
}
}

View File

@@ -4,4 +4,6 @@ const (
WebUrlPrefix = "/v2/validator/"
WebApiUrlPrefix = "/api/v2/validator/"
KeymanagerApiPrefix = "/eth/v1"
SystemLogsPrefix = "health/logs"
AuthTokenFileName = "auth-token"
)

32
api/jwt.go Normal file
View File

@@ -0,0 +1,32 @@
package api
import (
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/crypto/rand"
)
// GenerateRandomHexString generates a random hex string that follows the standards for jwt token
// used for beacon node -> execution client
// used for web client -> validator client
func GenerateRandomHexString() (string, error) {
secret := make([]byte, 32)
randGen := rand.NewGenerator()
n, err := randGen.Read(secret)
if err != nil {
return "", err
} else if n != 32 {
return "", errors.New("rand: unexpected length")
}
return hexutil.Encode(secret), nil
}
// ValidateAuthToken validating auth token for web
func ValidateAuthToken(token string) error {
b, err := hexutil.Decode(token)
// token should be hex-encoded and at least 256 bits
if err != nil || len(b) < 32 {
return errors.New("invalid auth token: token should be hex-encoded and at least 256 bits")
}
return nil
}

13
api/jwt_test.go Normal file
View File

@@ -0,0 +1,13 @@
package api
import (
"testing"
"github.com/prysmaticlabs/prysm/v5/testing/require"
)
func TestGenerateRandomHexString(t *testing.T) {
token, err := GenerateRandomHexString()
require.NoError(t, err)
require.NoError(t, ValidateAuthToken(token))
}

View File

@@ -88,7 +88,7 @@ func TestToggle(t *testing.T) {
}
}
func TestToogleMultipleTimes(t *testing.T) {
func TestToggleMultipleTimes(t *testing.T) {
t.Parallel()
v := New()
@@ -101,16 +101,16 @@ func TestToogleMultipleTimes(t *testing.T) {
expected := i%2 != 0
if v.IsSet() != expected {
t.Fatalf("AtomicBool.Toogle() doesn't work after %d calls, expected: %v, got %v", i, expected, v.IsSet())
t.Fatalf("AtomicBool.Toggle() doesn't work after %d calls, expected: %v, got %v", i, expected, v.IsSet())
}
if pre == v.IsSet() {
t.Fatalf("AtomicBool.Toogle() returned wrong value at the %dth calls, expected: %v, got %v", i, !v.IsSet(), pre)
t.Fatalf("AtomicBool.Toggle() returned wrong value at the %dth calls, expected: %v, got %v", i, !v.IsSet(), pre)
}
}
}
func TestToogleAfterOverflow(t *testing.T) {
func TestToggleAfterOverflow(t *testing.T) {
t.Parallel()
var value int32 = math.MaxInt32
@@ -122,7 +122,7 @@ func TestToogleAfterOverflow(t *testing.T) {
v.Toggle()
expected := math.MaxInt32%2 == 0
if v.IsSet() != expected {
t.Fatalf("AtomicBool.Toogle() doesn't work after overflow, expected: %v, got %v", expected, v.IsSet())
t.Fatalf("AtomicBool.Toggle() doesn't work after overflow, expected: %v, got %v", expected, v.IsSet())
}
// make sure overflow happened
@@ -135,7 +135,7 @@ func TestToogleAfterOverflow(t *testing.T) {
v.Toggle()
expected = !expected
if v.IsSet() != expected {
t.Fatalf("AtomicBool.Toogle() doesn't work after the second call after overflow, expected: %v, got %v", expected, v.IsSet())
t.Fatalf("AtomicBool.Toggle() doesn't work after the second call after overflow, expected: %v, got %v", expected, v.IsSet())
}
}

View File

@@ -20,6 +20,7 @@ package event
import (
"errors"
"reflect"
"slices"
"sync"
)
@@ -219,12 +220,9 @@ type caseList []reflect.SelectCase
// find returns the index of a case containing the given channel.
func (cs caseList) find(channel interface{}) int {
for i, cas := range cs {
if cas.Chan.Interface() == channel {
return i
}
}
return -1
return slices.IndexFunc(cs, func(selectCase reflect.SelectCase) bool {
return selectCase.Chan.Interface() == channel
})
}
// delete removes the given case from cs.

View File

@@ -63,7 +63,7 @@ func Scatter(inputLen int, sFunc func(int, int, *sync.RWMutex) (interface{}, err
return results, nil
}
// calculateChunkSize calculates a suitable chunk size for the purposes of parallelisation.
// calculateChunkSize calculates a suitable chunk size for the purposes of parallelization.
func calculateChunkSize(items int) int {
// Start with a simple even split
chunkSize := items / runtime.GOMAXPROCS(0)

View File

@@ -2,7 +2,7 @@
# This script serves as a wrapper around bazel to limit the scope of environment variables that
# may change the action output. Using this script should result in a higher cache hit ratio for
# cached actions with a more heremtic build.
# cached actions with a more hermetic build.
env -i \
PATH=/usr/bin:/bin \

View File

@@ -137,7 +137,7 @@ go_test(
"//async/event:go_default_library",
"//beacon-chain/blockchain/testing:go_default_library",
"//beacon-chain/cache:go_default_library",
"//beacon-chain/cache/depositcache:go_default_library",
"//beacon-chain/cache/depositsnapshot:go_default_library",
"//beacon-chain/core/blocks:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/core/helpers:go_default_library",

View File

@@ -11,7 +11,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
f "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice"
doublylinkedtree "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/doubly-linked-tree"
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
@@ -399,14 +398,6 @@ func (s *Service) InForkchoice(root [32]byte) bool {
return s.cfg.ForkChoiceStore.HasNode(root)
}
// IsViableForCheckpoint returns whether the given checkpoint is a checkpoint in any
// chain known to forkchoice
func (s *Service) IsViableForCheckpoint(cp *forkchoicetypes.Checkpoint) (bool, error) {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.IsViableForCheckpoint(cp)
}
// IsOptimisticForRoot takes the root as argument instead of the current head
// and returns true if it is optimistic.
func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error) {
@@ -518,13 +509,6 @@ func (s *Service) Ancestor(ctx context.Context, root []byte, slot primitives.Slo
return ar[:], nil
}
// SetOptimisticToInvalid wraps the corresponding method in forkchoice
func (s *Service) SetOptimisticToInvalid(ctx context.Context, root, parent, lvh [32]byte) ([][32]byte, error) {
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
return s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, root, parent, lvh)
}
// SetGenesisTime sets the genesis time of beacon chain.
func (s *Service) SetGenesisTime(t time.Time) {
s.genesisTime = t
@@ -563,3 +547,9 @@ func (s *Service) RecentBlockSlot(root [32]byte) (primitives.Slot, error) {
func (s *Service) inRegularSync() bool {
return s.cfg.SyncChecker.Synced()
}
// validating returns true if the beacon is tracking some validators that have
// registered for proposing.
func (s *Service) validating() bool {
return s.cfg.TrackedValidatorsCache.Validating()
}

View File

@@ -256,7 +256,7 @@ func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion int,
// reportInvalidBlock deals with the event that an invalid block was detected by the execution layer
func (s *Service) pruneInvalidBlock(ctx context.Context, root, parentRoot, lvh [32]byte) error {
newPayloadInvalidNodeCount.Inc()
invalidRoots, err := s.SetOptimisticToInvalid(ctx, root, parentRoot, lvh)
invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, root, parentRoot, lvh)
if err != nil {
return err
}

View File

@@ -61,7 +61,7 @@ func TestService_headNextSyncCommitteeIndices(t *testing.T) {
indices, err := c.headNextSyncCommitteeIndices(context.Background(), 0, primitives.Slot(slot))
require.NoError(t, err)
// NextSyncCommittee should be be empty after `ProcessSyncCommitteeUpdates`. Validator should get indices.
// NextSyncCommittee should be empty after `ProcessSyncCommitteeUpdates`. Validator should get indices.
require.NotEqual(t, 0, len(indices))
}

View File

@@ -82,19 +82,20 @@ func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte
if level >= logrus.DebugLevel {
parentRoot := block.ParentRoot()
lf := logrus.Fields{
"slot": block.Slot(),
"slotInEpoch": block.Slot() % params.BeaconConfig().SlotsPerEpoch,
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
"epoch": slots.ToEpoch(block.Slot()),
"justifiedEpoch": justified.Epoch,
"justifiedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(justified.Root)[:8]),
"finalizedEpoch": finalized.Epoch,
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
"parentRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(parentRoot[:])[:8]),
"version": version.String(block.Version()),
"sinceSlotStartTime": prysmTime.Now().Sub(startTime),
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime) - daWaitedTime,
"deposits": len(block.Body().Deposits()),
"slot": block.Slot(),
"slotInEpoch": block.Slot() % params.BeaconConfig().SlotsPerEpoch,
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
"epoch": slots.ToEpoch(block.Slot()),
"justifiedEpoch": justified.Epoch,
"justifiedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(justified.Root)[:8]),
"finalizedEpoch": finalized.Epoch,
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
"parentRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(parentRoot[:])[:8]),
"version": version.String(block.Version()),
"sinceSlotStartTime": prysmTime.Now().Sub(startTime),
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime) - daWaitedTime,
"dataAvailabilityWaitedTime": daWaitedTime,
"deposits": len(block.Body().Deposits()),
}
log.WithFields(lf).Debug("Synced new block")
} else {

View File

@@ -18,17 +18,63 @@ import (
"github.com/prysmaticlabs/prysm/v5/time/slots"
)
func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) state.ReadOnlyBeaconState {
headEpoch := slots.ToEpoch(s.HeadSlot())
if c.Epoch < headEpoch {
return nil
}
if !s.cfg.ForkChoiceStore.IsCanonical([32]byte(c.Root)) {
return nil
}
if c.Epoch == headEpoch {
targetSlot, err := s.cfg.ForkChoiceStore.Slot([32]byte(c.Root))
if err != nil {
return nil
}
if slots.ToEpoch(targetSlot)+1 < headEpoch {
return nil
}
st, err := s.HeadStateReadOnly(ctx)
if err != nil {
return nil
}
return st
}
slot, err := slots.EpochStart(c.Epoch)
if err != nil {
return nil
}
// Try if we have already set the checkpoint cache
epochKey := strconv.FormatUint(uint64(c.Epoch), 10 /* base 10 */)
lock := async.NewMultilock(string(c.Root) + epochKey)
lock.Lock()
defer lock.Unlock()
cachedState, err := s.checkpointStateCache.StateByCheckpoint(c)
if err != nil {
return nil
}
if cachedState != nil && !cachedState.IsNil() {
return cachedState
}
st, err := s.HeadState(ctx)
if err != nil {
return nil
}
st, err = transition.ProcessSlotsUsingNextSlotCache(ctx, st, c.Root, slot)
if err != nil {
return nil
}
if err := s.checkpointStateCache.AddCheckpointState(c, st); err != nil {
return nil
}
return st
}
// getAttPreState retrieves the att pre state by either from the cache or the DB.
func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (state.ReadOnlyBeaconState, error) {
// If the attestation is recent and canonical we can use the head state to compute the shuffling.
headEpoch := slots.ToEpoch(s.HeadSlot())
if c.Epoch == headEpoch {
targetSlot, err := s.cfg.ForkChoiceStore.Slot([32]byte(c.Root))
if err == nil && slots.ToEpoch(targetSlot)+1 >= headEpoch {
if s.cfg.ForkChoiceStore.IsCanonical([32]byte(c.Root)) {
return s.HeadStateReadOnly(ctx)
}
}
if st := s.getRecentPreState(ctx, c); st != nil {
return st, nil
}
// Use a multilock to allow scoped holding of a mutex by a checkpoint root + epoch
// allowing us to behave smarter in terms of how this function is used concurrently.

View File

@@ -146,6 +146,28 @@ func TestStore_OnAttestation_Ok_DoublyLinkedTree(t *testing.T) {
require.NoError(t, service.OnAttestation(ctx, att[0], 0))
}
func TestService_GetRecentPreState(t *testing.T) {
service, _ := minimalTestService(t)
ctx := context.Background()
s, err := util.NewBeaconState()
require.NoError(t, err)
ckRoot := bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)
cp0 := &ethpb.Checkpoint{Epoch: 0, Root: ckRoot}
err = s.SetFinalizedCheckpoint(cp0)
require.NoError(t, err)
st, root, err := prepareForkchoiceState(ctx, 31, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
service.head = &head{
root: [32]byte(ckRoot),
state: s,
slot: 31,
}
require.NotNil(t, service.getRecentPreState(ctx, &ethpb.Checkpoint{Epoch: 1, Root: ckRoot}))
}
func TestService_GetAttPreState_Concurrency(t *testing.T) {
service, _ := minimalTestService(t)
ctx := context.Background()

View File

@@ -6,6 +6,7 @@ import (
"time"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
@@ -558,6 +559,20 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
// The gossip handler for blobs writes the index of each verified blob referencing the given
// root to the channel returned by blobNotifiers.forRoot.
nc := s.blobNotifiers.forRoot(root)
// Log for DA checks that cross over into the next slot; helpful for debugging.
nextSlot := slots.BeginsAt(signed.Block().Slot()+1, s.genesisTime)
// Avoid logging if DA check is called after next slot start.
if nextSlot.After(time.Now()) {
nst := time.AfterFunc(time.Until(nextSlot), func() {
if len(missing) == 0 {
return
}
log.WithFields(daCheckLogFields(root, signed.Block().Slot(), expected, len(missing))).
Error("Still waiting for DA check at slot end.")
})
defer nst.Stop()
}
for {
select {
case idx := <-nc:
@@ -571,11 +586,20 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
s.blobNotifiers.delete(root)
return nil
case <-ctx.Done():
return errors.Wrap(ctx.Err(), "context deadline waiting for blob sidecars")
return errors.Wrapf(ctx.Err(), "context deadline waiting for blob sidecars slot: %d, BlockRoot: %#x", block.Slot(), root)
}
}
}
func daCheckLogFields(root [32]byte, slot primitives.Slot, expected, missing int) logrus.Fields {
return logrus.Fields{
"slot": slot,
"root": fmt.Sprintf("%#x", root),
"blobsExpected": expected,
"blobsWaiting": missing,
}
}
// lateBlockTasks is called 4 seconds into the slot and performs tasks
// related to late blocks. It emits a MissedSlot state feed event.
// It calls FCU and sets the right attributes if we are proposing next slot

View File

@@ -60,7 +60,7 @@ func (s *Service) getFCUArgsEarlyBlock(cfg *postBlockProcessConfig, fcuArgs *fcu
// logNonCanonicalBlockReceived prints a message informing that the received
// block is not the head of the chain. It requires the caller holds a lock on
// Foprkchoice.
// Forkchoice.
func (s *Service) logNonCanonicalBlockReceived(blockRoot [32]byte, headRoot [32]byte) {
receivedWeight, err := s.cfg.ForkChoiceStore.Weight(blockRoot)
if err != nil {

View File

@@ -95,7 +95,9 @@ func (s *Service) spawnProcessAttestationsRoutine() {
return
case slotInterval := <-ticker.C():
if slotInterval.Interval > 0 {
s.UpdateHead(s.ctx, slotInterval.Slot+1)
if s.validating() {
s.UpdateHead(s.ctx, slotInterval.Slot+1)
}
} else {
s.cfg.ForkChoiceStore.Lock()
if err := s.cfg.ForkChoiceStore.NewSlot(s.ctx, slotInterval.Slot); err != nil {

View File

@@ -97,6 +97,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
eg, _ := errgroup.WithContext(ctx)
var postState state.BeaconState
eg.Go(func() error {
var err error
postState, err = s.validateStateTransition(ctx, preState, blockCopy)
if err != nil {
return errors.Wrap(err, "failed to validate consensus state transition function")
@@ -105,6 +106,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
})
var isValidPayload bool
eg.Go(func() error {
var err error
isValidPayload, err = s.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, blockCopy, blockRoot)
if err != nil {
return errors.Wrap(err, "could not notify the engine of the new payload")
@@ -168,7 +170,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
// Send finalized events and finalized deposits in the background
if newFinalized {
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
go s.sendNewFinalizedEvent(blockCopy, postState)
go s.sendNewFinalizedEvent(ctx, postState)
depCtx, cancel := context.WithTimeout(context.Background(), depositDeadline)
go func() {
s.insertFinalizedDeposits(depCtx, finalized.Root)
@@ -441,7 +443,7 @@ func (s *Service) updateFinalizationOnBlock(ctx context.Context, preState, postS
// sendNewFinalizedEvent sends a new finalization checkpoint event over the
// event feed. It needs to be called on the background
func (s *Service) sendNewFinalizedEvent(signed interfaces.ReadOnlySignedBeaconBlock, postState state.BeaconState) {
func (s *Service) sendNewFinalizedEvent(ctx context.Context, postState state.BeaconState) {
isValidPayload := false
s.headLock.RLock()
if s.head != nil {
@@ -449,8 +451,17 @@ func (s *Service) sendNewFinalizedEvent(signed interfaces.ReadOnlySignedBeaconBl
}
s.headLock.RUnlock()
blk, err := s.cfg.BeaconDB.Block(ctx, bytesutil.ToBytes32(postState.FinalizedCheckpoint().Root))
if err != nil {
log.WithError(err).Error("Could not retrieve block for finalized checkpoint root. Finalized event will not be emitted")
return
}
if blk == nil || blk.IsNil() || blk.Block() == nil || blk.Block().IsNil() {
log.WithError(err).Error("Block retrieved for finalized checkpoint root is nil. Finalized event will not be emitted")
return
}
stateRoot := blk.Block().StateRoot()
// Send an event regarding the new finalized checkpoint over a common event feed.
stateRoot := signed.Block().StateRoot()
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.FinalizedCheckpoint,
Data: &ethpbv1.EventFinalizedCheckpoint{
@@ -486,7 +497,10 @@ func (s *Service) sendBlockAttestationsToSlasher(signed interfaces.ReadOnlySigne
func (s *Service) validateExecutionOnBlock(ctx context.Context, ver int, header interfaces.ExecutionData, signed interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) (bool, error) {
isValidPayload, err := s.notifyNewPayload(ctx, ver, header, signed)
if err != nil {
return false, s.handleInvalidExecutionError(ctx, err, blockRoot, signed.Block().ParentRoot())
s.cfg.ForkChoiceStore.Lock()
err = s.handleInvalidExecutionError(ctx, err, blockRoot, signed.Block().ParentRoot())
s.cfg.ForkChoiceStore.Unlock()
return false, err
}
if signed.Version() < version.Capella && isValidPayload {
if err := s.validateMergeTransitionBlock(ctx, ver, header, signed); err != nil {

View File

@@ -8,12 +8,14 @@ import (
blockchainTesting "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/das"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/voluntaryexits"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
ethpbv1 "github.com/prysmaticlabs/prysm/v5/proto/eth/v1"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
"github.com/prysmaticlabs/prysm/v5/testing/require"
@@ -378,3 +380,38 @@ func TestHandleBlockBLSToExecutionChanges(t *testing.T) {
require.Equal(t, false, pool.ValidatorExists(idx))
})
}
func Test_sendNewFinalizedEvent(t *testing.T) {
s, _ := minimalTestService(t)
notifier := &blockchainTesting.MockStateNotifier{RecordEvents: true}
s.cfg.StateNotifier = notifier
finalizedSt, err := util.NewBeaconState()
require.NoError(t, err)
finalizedStRoot, err := finalizedSt.HashTreeRoot(s.ctx)
require.NoError(t, err)
b := util.NewBeaconBlock()
b.Block.StateRoot = finalizedStRoot[:]
sbb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
sbbRoot, err := sbb.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, s.cfg.BeaconDB.SaveBlock(s.ctx, sbb))
st, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, st.SetFinalizedCheckpoint(&ethpb.Checkpoint{
Epoch: 123,
Root: sbbRoot[:],
}))
s.sendNewFinalizedEvent(s.ctx, st)
require.Equal(t, 1, len(notifier.ReceivedEvents()))
e := notifier.ReceivedEvents()[0]
assert.Equal(t, statefeed.FinalizedCheckpoint, int(e.Type))
fc, ok := e.Data.(*ethpbv1.EventFinalizedCheckpoint)
require.Equal(t, true, ok, "event has wrong data type")
assert.Equal(t, primitives.Epoch(123), fc.Epoch)
assert.DeepEqual(t, sbbRoot[:], fc.Block)
assert.DeepEqual(t, finalizedStRoot[:], fc.State)
assert.Equal(t, false, fc.ExecutionOptimistic)
}

View File

@@ -9,7 +9,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositcache"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositsnapshot"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
@@ -80,7 +80,7 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
attService, err := attestations.NewService(ctx, &attestations.Config{Pool: attestations.NewPool()})
require.NoError(t, err)
depositCache, err := depositcache.New()
depositCache, err := depositsnapshot.New()
require.NoError(t, err)
fc := doublylinkedtree.New()

View File

@@ -8,7 +8,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/async/event"
mock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositcache"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositsnapshot"
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
@@ -79,7 +79,7 @@ type testServiceRequirements struct {
attPool attestations.Pool
attSrv *attestations.Service
blsPool *blstoexec.Pool
dc *depositcache.DepositCache
dc *depositsnapshot.Cache
}
func minimalTestService(t *testing.T, opts ...Option) (*Service, *testServiceRequirements) {
@@ -94,7 +94,7 @@ func minimalTestService(t *testing.T, opts ...Option) (*Service, *testServiceReq
attSrv, err := attestations.NewService(ctx, &attestations.Config{Pool: attPool})
require.NoError(t, err)
blsPool := blstoexec.NewPool()
dc, err := depositcache.New()
dc, err := depositsnapshot.New()
require.NoError(t, err)
req := &testServiceRequirements{
ctx: ctx,

View File

@@ -6,6 +6,7 @@ go_library(
"active_balance.go",
"active_balance_disabled.go", # keep
"attestation_data.go",
"balance_cache_key.go",
"checkpoint_state.go",
"committee.go",
"committee_disabled.go", # keep
@@ -70,6 +71,7 @@ go_test(
"committee_fuzz_test.go",
"committee_test.go",
"payload_id_test.go",
"private_access_test.go",
"proposer_indices_test.go",
"registration_test.go",
"skip_slot_cache_test.go",
@@ -93,6 +95,7 @@ go_test(
"//testing/util:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_google_gofuzz//:go_default_library",
"@com_github_hashicorp_golang_lru//:go_default_library",
"@com_github_stretchr_testify//require:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
],

View File

@@ -3,17 +3,13 @@
package cache
import (
"encoding/binary"
"sync"
lru "github.com/hashicorp/golang-lru"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
lruwrpr "github.com/prysmaticlabs/prysm/v5/cache/lru"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
)
const (
@@ -86,36 +82,3 @@ func (c *BalanceCache) Get(st state.ReadOnlyBeaconState) (uint64, error) {
balanceCacheHit.Inc()
return value.(uint64), nil
}
// Given input state `st`, balance key is constructed as:
// (block_root in `st` at epoch_start_slot - 1) + current_epoch + validator_count
func balanceCacheKey(st state.ReadOnlyBeaconState) (string, error) {
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
currentEpoch := st.Slot().DivSlot(slotsPerEpoch)
epochStartSlot, err := slotsPerEpoch.SafeMul(uint64(currentEpoch))
if err != nil {
// impossible condition due to early division
return "", errors.Errorf("start slot calculation overflows: %v", err)
}
prevSlot := primitives.Slot(0)
if epochStartSlot > 1 {
prevSlot = epochStartSlot - 1
}
r, err := st.BlockRootAtIndex(uint64(prevSlot % params.BeaconConfig().SlotsPerHistoricalRoot))
if err != nil {
// impossible condition because index is always constrained within state
return "", err
}
// Mix in current epoch
b := make([]byte, 8)
binary.LittleEndian.PutUint64(b, uint64(currentEpoch))
key := append(r, b...)
// Mix in validator count
b = make([]byte, 8)
binary.LittleEndian.PutUint64(b, uint64(st.NumValidators()))
key = append(key, b...)
return string(key), nil
}

View File

@@ -1,12 +1,13 @@
//go:build !fuzz
package cache
package cache_test
import (
"encoding/binary"
"math"
"testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
@@ -27,33 +28,33 @@ func TestBalanceCache_AddGetBalance(t *testing.T) {
st, err := state_native.InitializeFromProtoPhase0(raw)
require.NoError(t, err)
cache := NewEffectiveBalanceCache()
_, err = cache.Get(st)
require.ErrorContains(t, ErrNotFound.Error(), err)
cc := cache.NewEffectiveBalanceCache()
_, err = cc.Get(st)
require.ErrorContains(t, cache.ErrNotFound.Error(), err)
b := uint64(100)
require.NoError(t, cache.AddTotalEffectiveBalance(st, b))
cachedB, err := cache.Get(st)
require.NoError(t, cc.AddTotalEffectiveBalance(st, b))
cachedB, err := cc.Get(st)
require.NoError(t, err)
require.Equal(t, b, cachedB)
require.NoError(t, st.SetSlot(1000))
_, err = cache.Get(st)
require.ErrorContains(t, ErrNotFound.Error(), err)
_, err = cc.Get(st)
require.ErrorContains(t, cache.ErrNotFound.Error(), err)
b = uint64(200)
require.NoError(t, cache.AddTotalEffectiveBalance(st, b))
cachedB, err = cache.Get(st)
require.NoError(t, cc.AddTotalEffectiveBalance(st, b))
cachedB, err = cc.Get(st)
require.NoError(t, err)
require.Equal(t, b, cachedB)
require.NoError(t, st.SetSlot(1000+params.BeaconConfig().SlotsPerHistoricalRoot))
_, err = cache.Get(st)
require.ErrorContains(t, ErrNotFound.Error(), err)
_, err = cc.Get(st)
require.ErrorContains(t, cache.ErrNotFound.Error(), err)
b = uint64(300)
require.NoError(t, cache.AddTotalEffectiveBalance(st, b))
cachedB, err = cache.Get(st)
require.NoError(t, cc.AddTotalEffectiveBalance(st, b))
cachedB, err = cc.Get(st)
require.NoError(t, err)
require.Equal(t, b, cachedB)
}
@@ -72,6 +73,6 @@ func TestBalanceCache_BalanceKey(t *testing.T) {
require.NoError(t, err)
require.NoError(t, st.SetSlot(primitives.Slot(math.MaxUint64)))
_, err = balanceCacheKey(st)
_, err = cache.BalanceCacheKey(st)
require.NoError(t, err)
}

43
beacon-chain/cache/balance_cache_key.go vendored Normal file
View File

@@ -0,0 +1,43 @@
package cache
import (
"encoding/binary"
"fmt"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
)
// Given input state `st`, balance key is constructed as:
// (block_root in `st` at epoch_start_slot - 1) + current_epoch + validator_count
func balanceCacheKey(st state.ReadOnlyBeaconState) (string, error) {
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
currentEpoch := st.Slot().DivSlot(slotsPerEpoch)
epochStartSlot, err := slotsPerEpoch.SafeMul(uint64(currentEpoch))
if err != nil {
// impossible condition due to early division
return "", fmt.Errorf("start slot calculation overflows: %w", err)
}
prevSlot := primitives.Slot(0)
if epochStartSlot > 1 {
prevSlot = epochStartSlot - 1
}
r, err := st.BlockRootAtIndex(uint64(prevSlot % params.BeaconConfig().SlotsPerHistoricalRoot))
if err != nil {
// impossible condition because index is always constrained within state
return "", err
}
// Mix in current epoch
b := make([]byte, 8)
binary.LittleEndian.PutUint64(b, uint64(currentEpoch))
key := append(r, b...)
// Mix in validator count
b = make([]byte, 8)
binary.LittleEndian.PutUint64(b, uint64(st.NumValidators()))
key = append(key, b...)
return string(key), nil
}

View File

@@ -1,8 +1,9 @@
package cache
package cache_test
import (
"testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v5/config/params"
@@ -15,7 +16,7 @@ import (
)
func TestCheckpointStateCache_StateByCheckpoint(t *testing.T) {
cache := NewCheckpointStateCache()
cache := cache.NewCheckpointStateCache()
cp1 := &ethpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'A'}, 32)}
st, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
@@ -58,16 +59,16 @@ func TestCheckpointStateCache_StateByCheckpoint(t *testing.T) {
}
func TestCheckpointStateCache_MaxSize(t *testing.T) {
c := NewCheckpointStateCache()
c := cache.NewCheckpointStateCache()
st, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
Slot: 0,
})
require.NoError(t, err)
for i := uint64(0); i < uint64(maxCheckpointStateSize+100); i++ {
for i := uint64(0); i < uint64(cache.MaxCheckpointStateSize()+100); i++ {
require.NoError(t, st.SetSlot(primitives.Slot(i)))
require.NoError(t, c.AddCheckpointState(&ethpb.Checkpoint{Epoch: primitives.Epoch(i), Root: make([]byte, 32)}, st))
}
assert.Equal(t, maxCheckpointStateSize, len(c.cache.Keys()))
assert.Equal(t, cache.MaxCheckpointStateSize(), len(c.Cache().Keys()))
}

View File

@@ -1,50 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"deposits_cache.go",
"log.go",
"pending_deposits.go",
],
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositcache",
visibility = [
"//beacon-chain:__subpackages__",
"//testing/spectest:__subpackages__",
],
deps = [
"//beacon-chain/cache:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//container/trie:go_default_library",
"//crypto/hash:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@io_opencensus_go//trace:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"deposits_cache_test.go",
"pending_deposits_test.go",
],
embed = [":go_default_library"],
deps = [
"//beacon-chain/cache:go_default_library",
"//config/params:go_default_library",
"//container/trie:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
],
)

View File

@@ -1,327 +0,0 @@
// Package depositcache is the source of validator deposits maintained
// in-memory by the beacon node deposits processed from the
// eth1 powchain are then stored in this cache to be accessed by
// any other service during a beacon node's runtime.
package depositcache
import (
"context"
"encoding/hex"
"math/big"
"sort"
"sync"
"github.com/ethereum/go-ethereum/common"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/container/trie"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
var (
historicalDepositsCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "beacondb_all_deposits",
Help: "The number of total deposits in the beaconDB in-memory database",
})
)
// FinalizedDeposits stores the trie of deposits that have been included
// in the beacon state up to the latest finalized checkpoint.
type FinalizedDeposits struct {
deposits *trie.SparseMerkleTrie
merkleTrieIndex int64
}
// DepositCache stores all in-memory deposit objects. This
// stores all the deposit related data that is required by the beacon-node.
type DepositCache struct {
// Beacon chain deposits in memory.
pendingDeposits []*ethpb.DepositContainer
deposits []*ethpb.DepositContainer
finalizedDeposits FinalizedDeposits
depositsByKey map[[fieldparams.BLSPubkeyLength]byte][]*ethpb.DepositContainer
depositsLock sync.RWMutex
}
// New instantiates a new deposit cache
func New() (*DepositCache, error) {
finalizedDepositsTrie, err := trie.NewTrie(params.BeaconConfig().DepositContractTreeDepth)
if err != nil {
return nil, err
}
// finalizedDeposits.merkleTrieIndex is initialized to -1 because it represents the index of the last trie item.
// Inserting the first item into the trie will set the value of the index to 0.
return &DepositCache{
pendingDeposits: []*ethpb.DepositContainer{},
deposits: []*ethpb.DepositContainer{},
depositsByKey: map[[fieldparams.BLSPubkeyLength]byte][]*ethpb.DepositContainer{},
finalizedDeposits: FinalizedDeposits{deposits: finalizedDepositsTrie, merkleTrieIndex: -1},
}, nil
}
// InsertDeposit into the database. If deposit or block number are nil
// then this method does nothing.
func (dc *DepositCache) InsertDeposit(ctx context.Context, d *ethpb.Deposit, blockNum uint64, index int64, depositRoot [32]byte) error {
_, span := trace.StartSpan(ctx, "DepositsCache.InsertDeposit")
defer span.End()
if d == nil {
log.WithFields(logrus.Fields{
"block": blockNum,
"deposit": d,
"index": index,
"depositRoot": hex.EncodeToString(depositRoot[:]),
}).Warn("Ignoring nil deposit insertion")
return errors.New("nil deposit inserted into the cache")
}
dc.depositsLock.Lock()
defer dc.depositsLock.Unlock()
if int(index) != len(dc.deposits) {
return errors.Errorf("wanted deposit with index %d to be inserted but received %d", len(dc.deposits), index)
}
// Keep the slice sorted on insertion in order to avoid costly sorting on retrieval.
heightIdx := sort.Search(len(dc.deposits), func(i int) bool { return dc.deposits[i].Index >= index })
depCtr := &ethpb.DepositContainer{Deposit: d, Eth1BlockHeight: blockNum, DepositRoot: depositRoot[:], Index: index}
newDeposits := append(
[]*ethpb.DepositContainer{depCtr},
dc.deposits[heightIdx:]...)
dc.deposits = append(dc.deposits[:heightIdx], newDeposits...)
// Append the deposit to our map, in the event no deposits
// exist for the pubkey , it is simply added to the map.
pubkey := bytesutil.ToBytes48(d.Data.PublicKey)
dc.depositsByKey[pubkey] = append(dc.depositsByKey[pubkey], depCtr)
historicalDepositsCount.Inc()
return nil
}
// InsertDepositContainers inserts a set of deposit containers into our deposit cache.
func (dc *DepositCache) InsertDepositContainers(ctx context.Context, ctrs []*ethpb.DepositContainer) {
_, span := trace.StartSpan(ctx, "DepositsCache.InsertDepositContainers")
defer span.End()
dc.depositsLock.Lock()
defer dc.depositsLock.Unlock()
sort.SliceStable(ctrs, func(i int, j int) bool { return ctrs[i].Index < ctrs[j].Index })
dc.deposits = ctrs
for _, c := range ctrs {
// Use a new value, as the reference
// of c changes in the next iteration.
newPtr := c
pKey := bytesutil.ToBytes48(newPtr.Deposit.Data.PublicKey)
dc.depositsByKey[pKey] = append(dc.depositsByKey[pKey], newPtr)
}
historicalDepositsCount.Add(float64(len(ctrs)))
}
// InsertFinalizedDeposits inserts deposits up to eth1DepositIndex (inclusive) into the finalized deposits cache.
func (dc *DepositCache) InsertFinalizedDeposits(ctx context.Context,
eth1DepositIndex int64, _ common.Hash, _ uint64) error {
_, span := trace.StartSpan(ctx, "DepositsCache.InsertFinalizedDeposits")
defer span.End()
dc.depositsLock.Lock()
defer dc.depositsLock.Unlock()
depositTrie := dc.finalizedDeposits.Deposits()
insertIndex := int(dc.finalizedDeposits.merkleTrieIndex + 1)
// Don't insert into finalized trie if there is no deposit to
// insert.
if len(dc.deposits) == 0 {
return nil
}
// In the event we have less deposits than we need to
// finalize we finalize till the index on which we do have it.
if len(dc.deposits) <= int(eth1DepositIndex) {
eth1DepositIndex = int64(len(dc.deposits)) - 1
}
// If we finalize to some lower deposit index, we
// ignore it.
if int(eth1DepositIndex) < insertIndex {
return nil
}
for _, d := range dc.deposits {
if d.Index <= dc.finalizedDeposits.merkleTrieIndex {
continue
}
if d.Index > eth1DepositIndex {
break
}
depHash, err := d.Deposit.Data.HashTreeRoot()
if err != nil {
return errors.Wrap(err, "could not hash deposit data")
}
if err = depositTrie.Insert(depHash[:], insertIndex); err != nil {
return errors.Wrap(err, "could not insert deposit hash")
}
insertIndex++
}
tree, ok := depositTrie.(*trie.SparseMerkleTrie)
if !ok {
return errors.New("not a sparse merkle tree")
}
dc.finalizedDeposits = FinalizedDeposits{
deposits: tree,
merkleTrieIndex: eth1DepositIndex,
}
return nil
}
// AllDepositContainers returns all historical deposit containers.
func (dc *DepositCache) AllDepositContainers(ctx context.Context) []*ethpb.DepositContainer {
_, span := trace.StartSpan(ctx, "DepositsCache.AllDepositContainers")
defer span.End()
dc.depositsLock.RLock()
defer dc.depositsLock.RUnlock()
// Make a shallow copy of the deposits and return that. This way, the
// caller can safely iterate over the returned list of deposits without
// the possibility of new deposits showing up. If we were to return the
// list without a copy, when a new deposit is added to the cache, it
// would also be present in the returned value. This could result in a
// race condition if the list is being iterated over.
//
// It's not necessary to make a deep copy of this list because the
// deposits in the cache should never be modified. It is still possible
// for the caller to modify one of the underlying deposits and modify
// the cache, but that's not a race condition. Also, a deep copy would
// take too long and use too much memory.
deposits := make([]*ethpb.DepositContainer, len(dc.deposits))
copy(deposits, dc.deposits)
return deposits
}
// AllDeposits returns a list of historical deposits until the given block number
// (inclusive). If no block is specified then this method returns all historical deposits.
func (dc *DepositCache) AllDeposits(ctx context.Context, untilBlk *big.Int) []*ethpb.Deposit {
dc.depositsLock.RLock()
defer dc.depositsLock.RUnlock()
return dc.allDeposits(untilBlk)
}
func (dc *DepositCache) allDeposits(untilBlk *big.Int) []*ethpb.Deposit {
var deposits []*ethpb.Deposit
for _, ctnr := range dc.deposits {
if untilBlk == nil || untilBlk.Uint64() >= ctnr.Eth1BlockHeight {
deposits = append(deposits, ctnr.Deposit)
}
}
return deposits
}
// DepositsNumberAndRootAtHeight returns number of deposits made up to blockheight and the
// root that corresponds to the latest deposit at that blockheight.
func (dc *DepositCache) DepositsNumberAndRootAtHeight(ctx context.Context, blockHeight *big.Int) (uint64, [32]byte) {
_, span := trace.StartSpan(ctx, "DepositsCache.DepositsNumberAndRootAtHeight")
defer span.End()
dc.depositsLock.RLock()
defer dc.depositsLock.RUnlock()
heightIdx := sort.Search(len(dc.deposits), func(i int) bool { return dc.deposits[i].Eth1BlockHeight > blockHeight.Uint64() })
// send the deposit root of the empty trie, if eth1follow distance is greater than the time of the earliest
// deposit.
if heightIdx == 0 {
return 0, [32]byte{}
}
return uint64(heightIdx), bytesutil.ToBytes32(dc.deposits[heightIdx-1].DepositRoot)
}
// DepositByPubkey looks through historical deposits and finds one which contains
// a certain public key within its deposit data.
func (dc *DepositCache) DepositByPubkey(ctx context.Context, pubKey []byte) (*ethpb.Deposit, *big.Int) {
_, span := trace.StartSpan(ctx, "DepositsCache.DepositByPubkey")
defer span.End()
dc.depositsLock.RLock()
defer dc.depositsLock.RUnlock()
var deposit *ethpb.Deposit
var blockNum *big.Int
deps, ok := dc.depositsByKey[bytesutil.ToBytes48(pubKey)]
if !ok || len(deps) == 0 {
return deposit, blockNum
}
// We always return the first deposit if a particular
// validator key has multiple deposits assigned to
// it.
deposit = deps[0].Deposit
blockNum = big.NewInt(int64(deps[0].Eth1BlockHeight))
return deposit, blockNum
}
// FinalizedDeposits returns the finalized deposits trie.
func (dc *DepositCache) FinalizedDeposits(ctx context.Context) (cache.FinalizedDeposits, error) {
_, span := trace.StartSpan(ctx, "DepositsCache.FinalizedDeposits")
defer span.End()
dc.depositsLock.RLock()
defer dc.depositsLock.RUnlock()
return &FinalizedDeposits{
deposits: dc.finalizedDeposits.deposits.Copy(),
merkleTrieIndex: dc.finalizedDeposits.merkleTrieIndex,
}, nil
}
// NonFinalizedDeposits returns the list of non-finalized deposits until the given block number (inclusive).
// If no block is specified then this method returns all non-finalized deposits.
func (dc *DepositCache) NonFinalizedDeposits(ctx context.Context, lastFinalizedIndex int64, untilBlk *big.Int) []*ethpb.Deposit {
_, span := trace.StartSpan(ctx, "DepositsCache.NonFinalizedDeposits")
defer span.End()
dc.depositsLock.RLock()
defer dc.depositsLock.RUnlock()
if dc.finalizedDeposits.Deposits() == nil {
return dc.allDeposits(untilBlk)
}
var deposits []*ethpb.Deposit
for _, d := range dc.deposits {
if (d.Index > lastFinalizedIndex) && (untilBlk == nil || untilBlk.Uint64() >= d.Eth1BlockHeight) {
deposits = append(deposits, d.Deposit)
}
}
return deposits
}
// PruneProofs removes proofs from all deposits whose index is equal or less than untilDepositIndex.
func (dc *DepositCache) PruneProofs(ctx context.Context, untilDepositIndex int64) error {
_, span := trace.StartSpan(ctx, "DepositsCache.PruneProofs")
defer span.End()
dc.depositsLock.Lock()
defer dc.depositsLock.Unlock()
if untilDepositIndex >= int64(len(dc.deposits)) {
untilDepositIndex = int64(len(dc.deposits) - 1)
}
for i := untilDepositIndex; i >= 0; i-- {
// Finding a nil proof means that all proofs up to this deposit have been already pruned.
if dc.deposits[i].Deposit.Proof == nil {
break
}
dc.deposits[i].Deposit.Proof = nil
}
return nil
}
// Deposits returns the cached internal deposit tree.
func (fd *FinalizedDeposits) Deposits() cache.MerkleTree {
if fd.deposits != nil {
return fd.deposits
}
return nil
}
// MerkleTrieIndex represents the last finalized index in
// the finalized deposit container.
func (fd *FinalizedDeposits) MerkleTrieIndex() int64 {
return fd.merkleTrieIndex
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,5 +0,0 @@
package depositcache
import "github.com/sirupsen/logrus"
var log = logrus.WithField("prefix", "depositcache")

View File

@@ -1,151 +0,0 @@
package depositcache
import (
"context"
"math/big"
"sort"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
var (
pendingDepositsCount = promauto.NewGauge(prometheus.GaugeOpts{
Name: "beacondb_pending_deposits",
Help: "The number of pending deposits in the beaconDB in-memory database",
})
)
// PendingDepositsFetcher specifically outlines a struct that can retrieve deposits
// which have not yet been included in the chain.
type PendingDepositsFetcher interface {
PendingContainers(ctx context.Context, untilBlk *big.Int) []*ethpb.DepositContainer
}
// InsertPendingDeposit into the database. If deposit or block number are nil
// then this method does nothing.
func (dc *DepositCache) InsertPendingDeposit(ctx context.Context, d *ethpb.Deposit, blockNum uint64, index int64, depositRoot [32]byte) {
_, span := trace.StartSpan(ctx, "DepositsCache.InsertPendingDeposit")
defer span.End()
if d == nil {
log.WithFields(logrus.Fields{
"block": blockNum,
"deposit": d,
}).Debug("Ignoring nil deposit insertion")
return
}
dc.depositsLock.Lock()
defer dc.depositsLock.Unlock()
dc.pendingDeposits = append(dc.pendingDeposits,
&ethpb.DepositContainer{Deposit: d, Eth1BlockHeight: blockNum, Index: index, DepositRoot: depositRoot[:]})
pendingDepositsCount.Inc()
span.AddAttributes(trace.Int64Attribute("count", int64(len(dc.pendingDeposits))))
}
// PendingDeposits returns a list of deposits until the given block number
// (inclusive). If no block is specified then this method returns all pending
// deposits.
func (dc *DepositCache) PendingDeposits(ctx context.Context, untilBlk *big.Int) []*ethpb.Deposit {
ctx, span := trace.StartSpan(ctx, "DepositsCache.PendingDeposits")
defer span.End()
depositCntrs := dc.PendingContainers(ctx, untilBlk)
deposits := make([]*ethpb.Deposit, 0, len(depositCntrs))
for _, dep := range depositCntrs {
deposits = append(deposits, dep.Deposit)
}
return deposits
}
// PendingContainers returns a list of deposit containers until the given block number
// (inclusive).
func (dc *DepositCache) PendingContainers(ctx context.Context, untilBlk *big.Int) []*ethpb.DepositContainer {
_, span := trace.StartSpan(ctx, "DepositsCache.PendingDeposits")
defer span.End()
dc.depositsLock.RLock()
defer dc.depositsLock.RUnlock()
depositCntrs := make([]*ethpb.DepositContainer, 0, len(dc.pendingDeposits))
for _, ctnr := range dc.pendingDeposits {
if untilBlk == nil || untilBlk.Uint64() >= ctnr.Eth1BlockHeight {
depositCntrs = append(depositCntrs, ctnr)
}
}
// Sort the deposits by Merkle index.
sort.SliceStable(depositCntrs, func(i, j int) bool {
return depositCntrs[i].Index < depositCntrs[j].Index
})
span.AddAttributes(trace.Int64Attribute("count", int64(len(depositCntrs))))
return depositCntrs
}
// RemovePendingDeposit from the database. The deposit is indexed by the
// Index. This method does nothing if deposit ptr is nil.
func (dc *DepositCache) RemovePendingDeposit(ctx context.Context, d *ethpb.Deposit) {
_, span := trace.StartSpan(ctx, "DepositsCache.RemovePendingDeposit")
defer span.End()
if d == nil {
log.Debug("Ignoring nil deposit removal")
return
}
depRoot, err := hash.Proto(d)
if err != nil {
log.WithError(err).Error("Could not remove deposit")
return
}
dc.depositsLock.Lock()
defer dc.depositsLock.Unlock()
idx := -1
for i, ctnr := range dc.pendingDeposits {
h, err := hash.Proto(ctnr.Deposit)
if err != nil {
log.WithError(err).Error("Could not hash deposit")
continue
}
if h == depRoot {
idx = i
break
}
}
if idx >= 0 {
dc.pendingDeposits = append(dc.pendingDeposits[:idx], dc.pendingDeposits[idx+1:]...)
pendingDepositsCount.Dec()
}
}
// PrunePendingDeposits removes any deposit which is older than the given deposit merkle tree index.
func (dc *DepositCache) PrunePendingDeposits(ctx context.Context, merkleTreeIndex int64) {
_, span := trace.StartSpan(ctx, "DepositsCache.PrunePendingDeposits")
defer span.End()
if merkleTreeIndex == 0 {
log.Debug("Ignoring 0 deposit removal")
return
}
dc.depositsLock.Lock()
defer dc.depositsLock.Unlock()
cleanDeposits := make([]*ethpb.DepositContainer, 0, len(dc.pendingDeposits))
for _, dp := range dc.pendingDeposits {
if dp.Index >= merkleTreeIndex {
cleanDeposits = append(cleanDeposits, dp)
}
}
dc.pendingDeposits = cleanDeposits
pendingDepositsCount.Set(float64(len(dc.pendingDeposits)))
}

View File

@@ -34,6 +34,7 @@ go_test(
name = "go_default_test",
srcs = [
"deposit_cache_test.go",
"deposit_fetcher_test.go",
"deposit_tree_snapshot_test.go",
"merkle_tree_test.go",
"spec_test.go",

View File

@@ -784,7 +784,7 @@ func TestFinalizedDeposits_ReturnsTrieCorrectly(t *testing.T) {
depositTrie, err := trie.GenerateTrieFromItems(trieItems, params.BeaconConfig().DepositContractTreeDepth)
assert.NoError(t, err)
// Perform this in a non-sensical ordering
// Perform this in a nonsensical ordering
err = dc.InsertFinalizedDeposits(context.Background(), 1, [32]byte{}, 0)
require.NoError(t, err)
err = dc.InsertFinalizedDeposits(context.Background(), 2, [32]byte{}, 0)
@@ -1189,11 +1189,3 @@ func BenchmarkDepositTree_HashTreeRootOldImplementation(b *testing.B) {
require.NoError(b, err)
}
}
func emptyEth1data() *ethpb.Eth1Data {
return &ethpb.Eth1Data{
DepositRoot: make([]byte, 32),
DepositCount: 0,
BlockHash: make([]byte, 32),
}
}

View File

@@ -262,6 +262,12 @@ func toFinalizedDepositsContainer(deposits *DepositTree, index int64) finalizedD
}
}
// PendingDepositsFetcher specifically outlines a struct that can retrieve deposits
// which have not yet been included in the chain.
type PendingDepositsFetcher interface {
PendingContainers(ctx context.Context, untilBlk *big.Int) []*ethpb.DepositContainer
}
// PendingDeposits returns a list of deposits until the given block number
// (inclusive). If no block is specified then this method returns all pending
// deposits.

View File

@@ -1,82 +1,32 @@
package depositcache
package depositsnapshot
import (
"context"
"math/big"
"testing"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
"google.golang.org/protobuf/proto"
)
var _ PendingDepositsFetcher = (*DepositCache)(nil)
var _ PendingDepositsFetcher = (*Cache)(nil)
func TestInsertPendingDeposit_OK(t *testing.T) {
dc := DepositCache{}
dc := Cache{}
dc.InsertPendingDeposit(context.Background(), &ethpb.Deposit{}, 111, 100, [32]byte{})
assert.Equal(t, 1, len(dc.pendingDeposits), "deposit not inserted")
}
func TestInsertPendingDeposit_ignoresNilDeposit(t *testing.T) {
dc := DepositCache{}
dc := Cache{}
dc.InsertPendingDeposit(context.Background(), nil /*deposit*/, 0 /*blockNum*/, 0, [32]byte{})
assert.Equal(t, 0, len(dc.pendingDeposits))
}
func TestRemovePendingDeposit_OK(t *testing.T) {
db := DepositCache{}
proof1 := makeDepositProof()
proof1[0] = bytesutil.PadTo([]byte{'A'}, 32)
proof2 := makeDepositProof()
proof2[0] = bytesutil.PadTo([]byte{'A'}, 32)
data := &ethpb.Deposit_Data{
PublicKey: make([]byte, 48),
WithdrawalCredentials: make([]byte, 32),
Amount: 0,
Signature: make([]byte, 96),
}
depToRemove := &ethpb.Deposit{Proof: proof1, Data: data}
otherDep := &ethpb.Deposit{Proof: proof2, Data: data}
db.pendingDeposits = []*ethpb.DepositContainer{
{Deposit: depToRemove, Index: 1},
{Deposit: otherDep, Index: 5},
}
db.RemovePendingDeposit(context.Background(), depToRemove)
if len(db.pendingDeposits) != 1 || !proto.Equal(db.pendingDeposits[0].Deposit, otherDep) {
t.Error("Failed to remove deposit")
}
}
func TestRemovePendingDeposit_IgnoresNilDeposit(t *testing.T) {
dc := DepositCache{}
dc.pendingDeposits = []*ethpb.DepositContainer{{Deposit: &ethpb.Deposit{}}}
dc.RemovePendingDeposit(context.Background(), nil /*deposit*/)
assert.Equal(t, 1, len(dc.pendingDeposits), "deposit unexpectedly removed")
}
func TestPendingDeposit_RoundTrip(t *testing.T) {
dc := DepositCache{}
proof := makeDepositProof()
proof[0] = bytesutil.PadTo([]byte{'A'}, 32)
data := &ethpb.Deposit_Data{
PublicKey: make([]byte, 48),
WithdrawalCredentials: make([]byte, 32),
Amount: 0,
Signature: make([]byte, 96),
}
dep := &ethpb.Deposit{Proof: proof, Data: data}
dc.InsertPendingDeposit(context.Background(), dep, 111, 100, [32]byte{})
dc.RemovePendingDeposit(context.Background(), dep)
assert.Equal(t, 0, len(dc.pendingDeposits), "Failed to insert & delete a pending deposit")
}
func TestPendingDeposits_OK(t *testing.T) {
dc := DepositCache{}
dc := Cache{}
dc.pendingDeposits = []*ethpb.DepositContainer{
{Eth1BlockHeight: 2, Deposit: &ethpb.Deposit{Proof: [][]byte{[]byte("A")}}},
@@ -96,7 +46,7 @@ func TestPendingDeposits_OK(t *testing.T) {
}
func TestPrunePendingDeposits_ZeroMerkleIndex(t *testing.T) {
dc := DepositCache{}
dc := Cache{}
dc.pendingDeposits = []*ethpb.DepositContainer{
{Eth1BlockHeight: 2, Index: 2},
@@ -120,7 +70,7 @@ func TestPrunePendingDeposits_ZeroMerkleIndex(t *testing.T) {
}
func TestPrunePendingDeposits_OK(t *testing.T) {
dc := DepositCache{}
dc := Cache{}
dc.pendingDeposits = []*ethpb.DepositContainer{
{Eth1BlockHeight: 2, Index: 2},

View File

@@ -15,8 +15,6 @@ import (
)
var (
// ErrEmptyExecutionBlock occurs when the execution block is nil.
ErrEmptyExecutionBlock = errors.New("empty execution block")
// ErrInvalidSnapshotRoot occurs when the snapshot root does not match the calculated root.
ErrInvalidSnapshotRoot = errors.New("snapshot root is invalid")
// ErrInvalidDepositCount occurs when the value for mix in length is 0.
@@ -101,11 +99,23 @@ func (d *DepositTree) getProof(index uint64) ([32]byte, [][32]byte, error) {
if d.depositCount <= 0 {
return [32]byte{}, nil, ErrInvalidDepositCount
}
finalizedDeposits, _ := d.tree.GetFinalized([][32]byte{})
if finalizedDeposits != 0 {
finalizedDeposits = finalizedDeposits - 1
if index >= d.depositCount {
return [32]byte{}, nil, ErrInvalidIndex
}
if index <= finalizedDeposits {
finalizedDeposits, _ := d.tree.GetFinalized([][32]byte{})
finalizedIdx := -1
if finalizedDeposits != 0 {
fd, err := math.Int(finalizedDeposits)
if err != nil {
return [32]byte{}, nil, err
}
finalizedIdx = fd - 1
}
i, err := math.Int(index)
if err != nil {
return [32]byte{}, nil, err
}
if finalizedDeposits > 0 && i <= finalizedIdx {
return [32]byte{}, nil, ErrInvalidIndex
}
leaf, proof := generateProof(d.tree, index, DepositContractDepth)

View File

@@ -0,0 +1,18 @@
package cache
import (
lru "github.com/hashicorp/golang-lru"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
)
func BalanceCacheKey(st state.ReadOnlyBeaconState) (string, error) {
return balanceCacheKey(st)
}
func MaxCheckpointStateSize() int {
return maxCheckpointStateSize
}
func (c *CheckpointStateCache) Cache() *lru.Cache {
return c.cache
}

View File

@@ -1,15 +1,9 @@
package cache
import (
"errors"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
)
// ErrNotProposerIndices will be returned when a cache object is not a pointer to
// a ProposerIndices struct.
var ErrNotProposerIndices = errors.New("object is not a proposer indices struct")
// ProposerIndices defines the cached struct for proposer indices.
type ProposerIndices struct {
BlockRoot [32]byte

View File

@@ -109,10 +109,6 @@ func (c *SkipSlotCache) Get(ctx context.Context, r [32]byte) (state.BeaconState,
// MarkInProgress a request so that any other similar requests will block on
// Get until MarkNotInProgress is called.
func (c *SkipSlotCache) MarkInProgress(r [32]byte) error {
if c.disabled {
return nil
}
c.lock.Lock()
defer c.lock.Unlock()
@@ -126,10 +122,6 @@ func (c *SkipSlotCache) MarkInProgress(r [32]byte) error {
// MarkNotInProgress will release the lock on a given request. This should be
// called after put.
func (c *SkipSlotCache) MarkNotInProgress(r [32]byte) {
if c.disabled {
return
}
c.lock.Lock()
defer c.lock.Unlock()

View File

@@ -2,6 +2,7 @@ package cache_test
import (
"context"
"sync"
"testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
@@ -35,3 +36,28 @@ func TestSkipSlotCache_RoundTrip(t *testing.T) {
require.NoError(t, err)
assert.DeepEqual(t, res.ToProto(), s.ToProto(), "Expected equal protos to return from cache")
}
func TestSkipSlotCache_DisabledAndEnabled(t *testing.T) {
ctx := context.Background()
c := cache.NewSkipSlotCache()
r := [32]byte{'a'}
c.Disable()
require.NoError(t, c.MarkInProgress(r))
c.Enable()
wg := new(sync.WaitGroup)
wg.Add(1)
go func() {
// Get call will only terminate when
// it is not longer in progress.
obj, err := c.Get(ctx, r)
require.NoError(t, err)
require.IsNil(t, obj)
wg.Done()
}()
c.MarkNotInProgress(r)
wg.Wait()
}

View File

@@ -1,8 +1,9 @@
package cache
package cache_test
import (
"testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v5/config/params"
@@ -125,7 +126,7 @@ func TestSyncCommitteeHeadState(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := NewSyncCommitteeHeadState()
c := cache.NewSyncCommitteeHeadState()
if tt.put != nil {
err := c.Put(tt.put.slot, tt.put.state)
if (err != nil) != tt.wantPutErr {

View File

@@ -41,3 +41,9 @@ func (t *TrackedValidatorsCache) Prune() {
defer t.Unlock()
t.trackedValidators = make(map[primitives.ValidatorIndex]TrackedValidator)
}
func (t *TrackedValidatorsCache) Validating() bool {
t.Lock()
defer t.Unlock()
return len(t.trackedValidators) > 0
}

View File

@@ -14,6 +14,7 @@ go_library(
visibility = [
"//beacon-chain:__subpackages__",
"//testing/spectest:__subpackages__",
"//tools:__subpackages__",
],
deps = [
"//beacon-chain/core/helpers:go_default_library",

View File

@@ -50,6 +50,8 @@ go_test(
"attestation_test.go",
"beacon_committee_test.go",
"block_test.go",
"private_access_fuzz_noop_test.go", # keep
"private_access_test.go",
"randao_test.go",
"rewards_penalties_test.go",
"shuffle_test.go",

View File

@@ -12,7 +12,6 @@ import (
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
prysmTime "github.com/prysmaticlabs/prysm/v5/time"
"github.com/prysmaticlabs/prysm/v5/time/slots"
log "github.com/sirupsen/logrus"
)
var (
@@ -133,9 +132,6 @@ func ComputeSubnetFromCommitteeAndSlot(activeValCount uint64, comIdx primitives.
//
// In the attestation must be within the range of 95 to 102 in the example above.
func ValidateAttestationTime(attSlot primitives.Slot, genesisTime time.Time, clockDisparity time.Duration) error {
if err := slots.ValidateClock(attSlot, uint64(genesisTime.Unix())); err != nil {
return err
}
attTime, err := slots.ToTime(uint64(genesisTime.Unix()), attSlot)
if err != nil {
return err
@@ -182,24 +178,15 @@ func ValidateAttestationTime(attSlot primitives.Slot, genesisTime time.Time, clo
}
// EIP-7045: Starting in Deneb, allow any attestations from the current or previous epoch.
currentEpoch := slots.ToEpoch(currentSlot)
prevEpoch, err := currentEpoch.SafeSub(1)
if err != nil {
log.WithError(err).Debug("Ignoring underflow for a deneb attestation inclusion check in epoch 0")
prevEpoch = 0
}
attSlotEpoch := slots.ToEpoch(attSlot)
if attSlotEpoch != currentEpoch && attSlotEpoch != prevEpoch {
if attEpoch+1 < currentEpoch {
attError = fmt.Errorf(
"attestation epoch %d not within current epoch %d or previous epoch %d",
attSlot/params.BeaconConfig().SlotsPerEpoch,
"attestation epoch %d not within current epoch %d or previous epoch",
attEpoch,
currentEpoch,
prevEpoch,
)
return errors.Join(ErrTooLate, attError)
}
return nil
}

View File

@@ -197,7 +197,7 @@ func Test_ValidateAttestationTime(t *testing.T) {
-500 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second,
).Add(200 * time.Millisecond),
},
wantedErr: "attestation epoch 8 not within current epoch 15 or previous epoch 14",
wantedErr: "attestation epoch 8 not within current epoch 15 or previous epoch",
},
{
name: "attestation.slot is well beyond current slot",
@@ -205,7 +205,7 @@ func Test_ValidateAttestationTime(t *testing.T) {
attSlot: 1 << 32,
genesisTime: prysmTime.Now().Add(-15 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second),
},
wantedErr: "which exceeds max allowed value relative to the local clock",
wantedErr: "attestation slot 4294967296 not within attestation propagation range of 0 to 15 (current slot)",
},
}
for _, tt := range tests {

View File

@@ -140,108 +140,140 @@ func BeaconCommittee(
}
count := committeesPerSlot * uint64(params.BeaconConfig().SlotsPerEpoch)
return computeCommittee(validatorIndices, seed, indexOffset, count)
return ComputeCommittee(validatorIndices, seed, indexOffset, count)
}
// CommitteeAssignmentContainer represents a committee list, committee index, and to be attested slot for a given epoch.
type CommitteeAssignmentContainer struct {
// CommitteeAssignment represents committee list, committee index, and to be attested slot for a given epoch.
type CommitteeAssignment struct {
Committee []primitives.ValidatorIndex
AttesterSlot primitives.Slot
CommitteeIndex primitives.CommitteeIndex
}
// CommitteeAssignments is a map of validator indices pointing to the appropriate committee
// assignment for the given epoch.
//
// 1. Determine the proposer validator index for each slot.
// 2. Compute all committees.
// 3. Determine the attesting slot for each committee.
// 4. Construct a map of validator indices pointing to the respective committees.
func CommitteeAssignments(
ctx context.Context,
state state.BeaconState,
epoch primitives.Epoch,
) (map[primitives.ValidatorIndex]*CommitteeAssignmentContainer, map[primitives.ValidatorIndex][]primitives.Slot, error) {
// verifyAssignmentEpoch verifies if the given epoch is valid for assignment based on the provided state.
// It checks if the epoch is not greater than the next epoch, and if the start slot of the epoch is greater
// than or equal to the minimum valid start slot calculated based on the state's current slot and historical roots.
func verifyAssignmentEpoch(epoch primitives.Epoch, state state.BeaconState) error {
nextEpoch := time.NextEpoch(state)
if epoch > nextEpoch {
return nil, nil, fmt.Errorf(
"epoch %d can't be greater than next epoch %d",
epoch,
nextEpoch,
)
return fmt.Errorf("epoch %d can't be greater than next epoch %d", epoch, nextEpoch)
}
// We determine the slots in which proposers are supposed to act.
// Some validators may need to propose multiple times per epoch, so
// we use a map of proposer idx -> []slot to keep track of this possibility.
startSlot, err := slots.EpochStart(epoch)
if err != nil {
return nil, nil, err
return err
}
minValidStartSlot := primitives.Slot(0)
if state.Slot() >= params.BeaconConfig().SlotsPerHistoricalRoot {
minValidStartSlot = state.Slot() - params.BeaconConfig().SlotsPerHistoricalRoot
if stateSlot := state.Slot(); stateSlot >= params.BeaconConfig().SlotsPerHistoricalRoot {
minValidStartSlot = stateSlot - params.BeaconConfig().SlotsPerHistoricalRoot
}
if startSlot < minValidStartSlot {
return nil, nil, fmt.Errorf("start slot %d is smaller than the minimum valid start slot %d", startSlot, minValidStartSlot)
return fmt.Errorf("start slot %d is smaller than the minimum valid start slot %d", startSlot, minValidStartSlot)
}
return nil
}
// ProposerAssignments calculates proposer assignments for each validator during the specified epoch.
// It verifies the validity of the epoch, then iterates through each slot in the epoch to determine the
// proposer for that slot and assigns them accordingly.
func ProposerAssignments(ctx context.Context, state state.BeaconState, epoch primitives.Epoch) (map[primitives.ValidatorIndex][]primitives.Slot, error) {
// Verify if the epoch is valid for assignment based on the provided state.
if err := verifyAssignmentEpoch(epoch, state); err != nil {
return nil, err
}
startSlot, err := slots.EpochStart(epoch)
if err != nil {
return nil, err
}
proposerIndexToSlots := make(map[primitives.ValidatorIndex][]primitives.Slot, params.BeaconConfig().SlotsPerEpoch)
proposerAssignments := make(map[primitives.ValidatorIndex][]primitives.Slot)
originalStateSlot := state.Slot()
for slot := startSlot; slot < startSlot+params.BeaconConfig().SlotsPerEpoch; slot++ {
// Skip proposer assignment for genesis slot.
if slot == 0 {
continue
}
// Set the state's current slot.
if err := state.SetSlot(slot); err != nil {
return nil, nil, err
return nil, err
}
// Determine the proposer index for the current slot.
i, err := BeaconProposerIndex(ctx, state)
if err != nil {
return nil, nil, errors.Wrapf(err, "could not check proposer at slot %d", state.Slot())
return nil, errors.Wrapf(err, "could not check proposer at slot %d", state.Slot())
}
proposerIndexToSlots[i] = append(proposerIndexToSlots[i], slot)
// Append the slot to the proposer's assignments.
if _, ok := proposerAssignments[i]; !ok {
proposerAssignments[i] = make([]primitives.Slot, 0)
}
proposerAssignments[i] = append(proposerAssignments[i], slot)
}
// If previous proposer indices computation is outside if current proposal epoch range,
// we need to reset state slot back to start slot so that we can compute the correct committees.
currentProposalEpoch := epoch < nextEpoch
if !currentProposalEpoch {
if err := state.SetSlot(state.Slot() - params.BeaconConfig().SlotsPerEpoch); err != nil {
return nil, nil, err
}
// Reset state back to its original slot.
if err := state.SetSlot(originalStateSlot); err != nil {
return nil, err
}
activeValidatorIndices, err := ActiveValidatorIndices(ctx, state, epoch)
return proposerAssignments, nil
}
// CommitteeAssignments calculates committee assignments for each validator during the specified epoch.
// It retrieves active validator indices, determines the number of committees per slot, and computes
// assignments for each validator based on their presence in the provided validators slice.
func CommitteeAssignments(ctx context.Context, state state.BeaconState, epoch primitives.Epoch, validators []primitives.ValidatorIndex) (map[primitives.ValidatorIndex]*CommitteeAssignment, error) {
// Verify if the epoch is valid for assignment based on the provided state.
if err := verifyAssignmentEpoch(epoch, state); err != nil {
return nil, err
}
// Retrieve active validator count for the specified epoch.
activeValidatorCount, err := ActiveValidatorCount(ctx, state, epoch)
if err != nil {
return nil, nil, err
return nil, err
}
// Each slot in an epoch has a different set of committees. This value is derived from the
// active validator set, which does not change.
numCommitteesPerSlot := SlotCommitteeCount(uint64(len(activeValidatorIndices)))
validatorIndexToCommittee := make(map[primitives.ValidatorIndex]*CommitteeAssignmentContainer, len(activeValidatorIndices))
// Compute all committees for all slots.
for i := primitives.Slot(0); i < params.BeaconConfig().SlotsPerEpoch; i++ {
// Compute committees.
// Determine the number of committees per slot based on the number of active validator indices.
numCommitteesPerSlot := SlotCommitteeCount(activeValidatorCount)
startSlot, err := slots.EpochStart(epoch)
if err != nil {
return nil, err
}
assignments := make(map[primitives.ValidatorIndex]*CommitteeAssignment)
vals := make(map[primitives.ValidatorIndex]struct{})
for _, v := range validators {
vals[v] = struct{}{}
}
// Compute committee assignments for each slot in the epoch.
for slot := startSlot; slot < startSlot+params.BeaconConfig().SlotsPerEpoch; slot++ {
// Compute committees for the current slot.
for j := uint64(0); j < numCommitteesPerSlot; j++ {
slot := startSlot + i
committee, err := BeaconCommitteeFromState(ctx, state, slot, primitives.CommitteeIndex(j) /*committee index*/)
committee, err := BeaconCommitteeFromState(ctx, state, slot, primitives.CommitteeIndex(j))
if err != nil {
return nil, nil, err
return nil, err
}
cac := &CommitteeAssignmentContainer{
Committee: committee,
CommitteeIndex: primitives.CommitteeIndex(j),
AttesterSlot: slot,
}
for _, vIndex := range committee {
validatorIndexToCommittee[vIndex] = cac
if _, ok := vals[vIndex]; !ok { // Skip if the validator is not in the provided validators slice.
continue
}
if _, ok := assignments[vIndex]; !ok {
assignments[vIndex] = &CommitteeAssignment{}
}
assignments[vIndex].Committee = committee
assignments[vIndex].AttesterSlot = slot
assignments[vIndex].CommitteeIndex = primitives.CommitteeIndex(j)
}
}
}
return validatorIndexToCommittee, proposerIndexToSlots, nil
return assignments, nil
}
// VerifyBitfieldLength verifies that a bitfield length matches the given committee size.
@@ -359,7 +391,7 @@ func UpdateProposerIndicesInCache(ctx context.Context, state state.ReadOnlyBeaco
if err != nil {
return err
}
proposerIndices, err := precomputeProposerIndices(state, indices, epoch)
proposerIndices, err := PrecomputeProposerIndices(state, indices, epoch)
if err != nil {
return err
}
@@ -409,7 +441,7 @@ func ClearCache() {
balanceCache.Clear()
}
// computeCommittee returns the requested shuffled committee out of the total committees using
// ComputeCommittee returns the requested shuffled committee out of the total committees using
// validator indices and seed.
//
// Spec pseudocode definition:
@@ -424,7 +456,7 @@ func ClearCache() {
// start = (len(indices) * index) // count
// end = (len(indices) * uint64(index + 1)) // count
// return [indices[compute_shuffled_index(uint64(i), uint64(len(indices)), seed)] for i in range(start, end)]
func computeCommittee(
func ComputeCommittee(
indices []primitives.ValidatorIndex,
seed [32]byte,
index, count uint64,
@@ -451,9 +483,9 @@ func computeCommittee(
return shuffledList[start:end], nil
}
// This computes proposer indices of the current epoch and returns a list of proposer indices,
// PrecomputeProposerIndices computes proposer indices of the current epoch and returns a list of proposer indices,
// the index of the list represents the slot number.
func precomputeProposerIndices(state state.ReadOnlyBeaconState, activeIndices []primitives.ValidatorIndex, e primitives.Epoch) ([]primitives.ValidatorIndex, error) {
func PrecomputeProposerIndices(state state.ReadOnlyBeaconState, activeIndices []primitives.ValidatorIndex, e primitives.Epoch) ([]primitives.ValidatorIndex, error) {
hashFunc := hash.CustomSHA256Hasher()
proposerIndices := make([]primitives.ValidatorIndex, params.BeaconConfig().SlotsPerEpoch)

View File

@@ -1,4 +1,4 @@
package helpers
package helpers_test
import (
"context"
@@ -7,6 +7,7 @@ import (
"testing"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v5/config/params"
@@ -21,7 +22,7 @@ import (
)
func TestComputeCommittee_WithoutCache(t *testing.T) {
ClearCache()
helpers.ClearCache()
// Create 10 committees
committeeCount := uint64(10)
@@ -48,16 +49,16 @@ func TestComputeCommittee_WithoutCache(t *testing.T) {
require.NoError(t, err)
epoch := time.CurrentEpoch(state)
indices, err := ActiveValidatorIndices(context.Background(), state, epoch)
indices, err := helpers.ActiveValidatorIndices(context.Background(), state, epoch)
require.NoError(t, err)
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
seed, err := helpers.Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
require.NoError(t, err)
committees, err := computeCommittee(indices, seed, 0, 1 /* Total committee*/)
committees, err := helpers.ComputeCommittee(indices, seed, 0, 1 /* Total committee*/)
assert.NoError(t, err, "Could not compute committee")
// Test shuffled indices are correct for index 5 committee
index := uint64(5)
committee5, err := computeCommittee(indices, seed, index, committeeCount)
committee5, err := helpers.ComputeCommittee(indices, seed, index, committeeCount)
assert.NoError(t, err, "Could not compute committee")
start := slice.SplitOffset(validatorCount, committeeCount, index)
end := slice.SplitOffset(validatorCount, committeeCount, index+1)
@@ -65,7 +66,7 @@ func TestComputeCommittee_WithoutCache(t *testing.T) {
// Test shuffled indices are correct for index 9 committee
index = uint64(9)
committee9, err := computeCommittee(indices, seed, index, committeeCount)
committee9, err := helpers.ComputeCommittee(indices, seed, index, committeeCount)
assert.NoError(t, err, "Could not compute committee")
start = slice.SplitOffset(validatorCount, committeeCount, index)
end = slice.SplitOffset(validatorCount, committeeCount, index+1)
@@ -73,42 +74,45 @@ func TestComputeCommittee_WithoutCache(t *testing.T) {
}
func TestComputeCommittee_RegressionTest(t *testing.T) {
ClearCache()
helpers.ClearCache()
indices := []primitives.ValidatorIndex{1, 3, 8, 16, 18, 19, 20, 23, 30, 35, 43, 46, 47, 54, 56, 58, 69, 70, 71, 83, 84, 85, 91, 96, 100, 103, 105, 106, 112, 121, 127, 128, 129, 140, 142, 144, 146, 147, 149, 152, 153, 154, 157, 160, 173, 175, 180, 182, 188, 189, 191, 194, 201, 204, 217, 221, 226, 228, 230, 231, 239, 241, 249, 250, 255}
seed := [32]byte{68, 110, 161, 250, 98, 230, 161, 172, 227, 226, 99, 11, 138, 124, 201, 134, 38, 197, 0, 120, 6, 165, 122, 34, 19, 216, 43, 226, 210, 114, 165, 183}
index := uint64(215)
count := uint64(32)
_, err := computeCommittee(indices, seed, index, count)
_, err := helpers.ComputeCommittee(indices, seed, index, count)
require.ErrorContains(t, "index out of range", err)
}
func TestVerifyBitfieldLength_OK(t *testing.T) {
ClearCache()
helpers.ClearCache()
bf := bitfield.Bitlist{0xFF, 0x01}
committeeSize := uint64(8)
assert.NoError(t, VerifyBitfieldLength(bf, committeeSize), "Bitfield is not validated when it was supposed to be")
assert.NoError(t, helpers.VerifyBitfieldLength(bf, committeeSize), "Bitfield is not validated when it was supposed to be")
bf = bitfield.Bitlist{0xFF, 0x07}
committeeSize = 10
assert.NoError(t, VerifyBitfieldLength(bf, committeeSize), "Bitfield is not validated when it was supposed to be")
assert.NoError(t, helpers.VerifyBitfieldLength(bf, committeeSize), "Bitfield is not validated when it was supposed to be")
}
func TestCommitteeAssignments_CannotRetrieveFutureEpoch(t *testing.T) {
ClearCache()
helpers.ClearCache()
epoch := primitives.Epoch(1)
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
Slot: 0, // Epoch 0.
})
require.NoError(t, err)
_, _, err = CommitteeAssignments(context.Background(), state, epoch+1)
_, err = helpers.CommitteeAssignments(context.Background(), state, epoch+1, nil)
assert.ErrorContains(t, "can't be greater than next epoch", err)
_, err = helpers.ProposerAssignments(context.Background(), state, epoch+1)
assert.ErrorContains(t, "can't be greater than next epoch", err)
}
func TestCommitteeAssignments_NoProposerForSlot0(t *testing.T) {
ClearCache()
helpers.ClearCache()
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
for i := 0; i < len(validators); i++ {
@@ -127,10 +131,10 @@ func TestCommitteeAssignments_NoProposerForSlot0(t *testing.T) {
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
})
require.NoError(t, err)
_, proposerIndexToSlots, err := CommitteeAssignments(context.Background(), state, 0)
require.NoError(t, err, "Failed to determine CommitteeAssignments")
for _, ss := range proposerIndexToSlots {
for _, s := range ss {
assignments, err := helpers.ProposerAssignments(context.Background(), state, 0)
require.NoError(t, err, "Failed to determine Assignments")
for _, slots := range assignments {
for _, s := range slots {
assert.NotEqual(t, uint64(0), s, "No proposer should be assigned to slot 0")
}
}
@@ -139,6 +143,7 @@ func TestCommitteeAssignments_NoProposerForSlot0(t *testing.T) {
func TestCommitteeAssignments_CanRetrieve(t *testing.T) {
// Initialize test with 256 validators, each slot and each index gets 4 validators.
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
validatorIndices := make([]primitives.ValidatorIndex, len(validators))
for i := 0; i < len(validators); i++ {
// First 2 epochs only half validators are activated.
var activationEpoch primitives.Epoch
@@ -149,6 +154,7 @@ func TestCommitteeAssignments_CanRetrieve(t *testing.T) {
ActivationEpoch: activationEpoch,
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
}
validatorIndices[i] = primitives.ValidatorIndex(i)
}
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
@@ -198,16 +204,18 @@ func TestCommitteeAssignments_CanRetrieve(t *testing.T) {
for i, tt := range tests {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
ClearCache()
helpers.ClearCache()
validatorIndexToCommittee, proposerIndexToSlots, err := CommitteeAssignments(context.Background(), state, slots.ToEpoch(tt.slot))
require.NoError(t, err, "Failed to determine CommitteeAssignments")
cac := validatorIndexToCommittee[tt.index]
assignments, err := helpers.CommitteeAssignments(context.Background(), state, slots.ToEpoch(tt.slot), validatorIndices)
require.NoError(t, err, "Failed to determine Assignments")
cac := assignments[tt.index]
assert.Equal(t, tt.committeeIndex, cac.CommitteeIndex, "Unexpected committeeIndex for validator index %d", tt.index)
assert.Equal(t, tt.slot, cac.AttesterSlot, "Unexpected slot for validator index %d", tt.index)
if len(proposerIndexToSlots[tt.index]) > 0 && proposerIndexToSlots[tt.index][0] != tt.proposerSlot {
proposerAssignments, err := helpers.ProposerAssignments(context.Background(), state, slots.ToEpoch(tt.slot))
require.NoError(t, err)
if len(proposerAssignments[tt.index]) > 0 && proposerAssignments[tt.index][0] != tt.proposerSlot {
t.Errorf("wanted proposer slot %d, got proposer slot %d for validator index %d",
tt.proposerSlot, proposerIndexToSlots[tt.index][0], tt.index)
tt.proposerSlot, proposerAssignments[tt.index][0], tt.index)
}
assert.DeepEqual(t, tt.committee, cac.Committee, "Unexpected committee for validator index %d", tt.index)
})
@@ -215,7 +223,7 @@ func TestCommitteeAssignments_CanRetrieve(t *testing.T) {
}
func TestCommitteeAssignments_CannotRetrieveFuture(t *testing.T) {
ClearCache()
helpers.ClearCache()
// Initialize test with 256 validators, each slot and each index gets 4 validators.
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
@@ -237,17 +245,17 @@ func TestCommitteeAssignments_CannotRetrieveFuture(t *testing.T) {
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
})
require.NoError(t, err)
_, proposerIndxs, err := CommitteeAssignments(context.Background(), state, time.CurrentEpoch(state))
assignments, err := helpers.ProposerAssignments(context.Background(), state, time.CurrentEpoch(state))
require.NoError(t, err)
require.NotEqual(t, 0, len(proposerIndxs), "wanted non-zero proposer index set")
require.NotEqual(t, 0, len(assignments), "wanted non-zero proposer index set")
_, proposerIndxs, err = CommitteeAssignments(context.Background(), state, time.CurrentEpoch(state)+1)
assignments, err = helpers.ProposerAssignments(context.Background(), state, time.CurrentEpoch(state)+1)
require.NoError(t, err)
require.NotEqual(t, 0, len(proposerIndxs), "wanted non-zero proposer index set")
require.NotEqual(t, 0, len(assignments), "wanted non-zero proposer index set")
}
func TestCommitteeAssignments_CannotRetrieveOlderThanSlotsPerHistoricalRoot(t *testing.T) {
ClearCache()
helpers.ClearCache()
// Initialize test with 256 validators, each slot and each index gets 4 validators.
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
@@ -263,12 +271,12 @@ func TestCommitteeAssignments_CannotRetrieveOlderThanSlotsPerHistoricalRoot(t *t
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
})
require.NoError(t, err)
_, _, err = CommitteeAssignments(context.Background(), state, 0)
_, err = helpers.CommitteeAssignments(context.Background(), state, 0, nil)
require.ErrorContains(t, "start slot 0 is smaller than the minimum valid start slot 1", err)
}
func TestCommitteeAssignments_EverySlotHasMin1Proposer(t *testing.T) {
ClearCache()
helpers.ClearCache()
// Initialize test with 256 validators, each slot and each index gets 4 validators.
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
@@ -285,12 +293,12 @@ func TestCommitteeAssignments_EverySlotHasMin1Proposer(t *testing.T) {
})
require.NoError(t, err)
epoch := primitives.Epoch(1)
_, proposerIndexToSlots, err := CommitteeAssignments(context.Background(), state, epoch)
require.NoError(t, err, "Failed to determine CommitteeAssignments")
assignments, err := helpers.ProposerAssignments(context.Background(), state, epoch)
require.NoError(t, err, "Failed to determine Assignments")
slotsWithProposers := make(map[primitives.Slot]bool)
for _, proposerSlots := range proposerIndexToSlots {
for _, slot := range proposerSlots {
for _, slots := range assignments {
for _, slot := range slots {
slotsWithProposers[slot] = true
}
}
@@ -391,10 +399,10 @@ func TestVerifyAttestationBitfieldLengths_OK(t *testing.T) {
}
for i, tt := range tests {
ClearCache()
helpers.ClearCache()
require.NoError(t, state.SetSlot(tt.stateSlot))
err := VerifyAttestationBitfieldLengths(context.Background(), state, tt.attestation)
err := helpers.VerifyAttestationBitfieldLengths(context.Background(), state, tt.attestation)
if tt.verificationFailure {
assert.NotNil(t, err, "Verification succeeded when it was supposed to fail")
} else {
@@ -404,7 +412,7 @@ func TestVerifyAttestationBitfieldLengths_OK(t *testing.T) {
}
func TestUpdateCommitteeCache_CanUpdate(t *testing.T) {
ClearCache()
helpers.ClearCache()
validatorCount := params.BeaconConfig().MinGenesisActiveValidatorCount
validators := make([]*ethpb.Validator, validatorCount)
@@ -421,20 +429,20 @@ func TestUpdateCommitteeCache_CanUpdate(t *testing.T) {
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
})
require.NoError(t, err)
require.NoError(t, UpdateCommitteeCache(context.Background(), state, time.CurrentEpoch(state)))
require.NoError(t, helpers.UpdateCommitteeCache(context.Background(), state, time.CurrentEpoch(state)))
epoch := primitives.Epoch(0)
idx := primitives.CommitteeIndex(1)
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
seed, err := helpers.Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
require.NoError(t, err)
indices, err = committeeCache.Committee(context.Background(), params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epoch)), seed, idx)
indices, err = helpers.CommitteeCache().Committee(context.Background(), params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epoch)), seed, idx)
require.NoError(t, err)
assert.Equal(t, params.BeaconConfig().TargetCommitteeSize, uint64(len(indices)), "Did not save correct indices lengths")
}
func TestUpdateCommitteeCache_CanUpdateAcrossEpochs(t *testing.T) {
ClearCache()
helpers.ClearCache()
validatorCount := params.BeaconConfig().MinGenesisActiveValidatorCount
validators := make([]*ethpb.Validator, validatorCount)
@@ -452,19 +460,19 @@ func TestUpdateCommitteeCache_CanUpdateAcrossEpochs(t *testing.T) {
})
require.NoError(t, err)
e := time.CurrentEpoch(state)
require.NoError(t, UpdateCommitteeCache(context.Background(), state, e))
require.NoError(t, helpers.UpdateCommitteeCache(context.Background(), state, e))
seed, err := Seed(state, e, params.BeaconConfig().DomainBeaconAttester)
seed, err := helpers.Seed(state, e, params.BeaconConfig().DomainBeaconAttester)
require.NoError(t, err)
require.Equal(t, true, committeeCache.HasEntry(string(seed[:])))
require.Equal(t, true, helpers.CommitteeCache().HasEntry(string(seed[:])))
nextSeed, err := Seed(state, e+1, params.BeaconConfig().DomainBeaconAttester)
nextSeed, err := helpers.Seed(state, e+1, params.BeaconConfig().DomainBeaconAttester)
require.NoError(t, err)
require.Equal(t, false, committeeCache.HasEntry(string(nextSeed[:])))
require.Equal(t, false, helpers.CommitteeCache().HasEntry(string(nextSeed[:])))
require.NoError(t, UpdateCommitteeCache(context.Background(), state, e+1))
require.NoError(t, helpers.UpdateCommitteeCache(context.Background(), state, e+1))
require.Equal(t, true, committeeCache.HasEntry(string(nextSeed[:])))
require.Equal(t, true, helpers.CommitteeCache().HasEntry(string(nextSeed[:])))
}
func BenchmarkComputeCommittee300000_WithPreCache(b *testing.B) {
@@ -481,20 +489,20 @@ func BenchmarkComputeCommittee300000_WithPreCache(b *testing.B) {
require.NoError(b, err)
epoch := time.CurrentEpoch(state)
indices, err := ActiveValidatorIndices(context.Background(), state, epoch)
indices, err := helpers.ActiveValidatorIndices(context.Background(), state, epoch)
require.NoError(b, err)
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
seed, err := helpers.Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
require.NoError(b, err)
index := uint64(3)
_, err = computeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
_, err = helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
if err != nil {
panic(err)
}
b.ResetTimer()
for n := 0; n < b.N; n++ {
_, err := computeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
_, err := helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
if err != nil {
panic(err)
}
@@ -515,20 +523,20 @@ func BenchmarkComputeCommittee3000000_WithPreCache(b *testing.B) {
require.NoError(b, err)
epoch := time.CurrentEpoch(state)
indices, err := ActiveValidatorIndices(context.Background(), state, epoch)
indices, err := helpers.ActiveValidatorIndices(context.Background(), state, epoch)
require.NoError(b, err)
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
seed, err := helpers.Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
require.NoError(b, err)
index := uint64(3)
_, err = computeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
_, err = helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
if err != nil {
panic(err)
}
b.ResetTimer()
for n := 0; n < b.N; n++ {
_, err := computeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
_, err := helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
if err != nil {
panic(err)
}
@@ -549,9 +557,9 @@ func BenchmarkComputeCommittee128000_WithOutPreCache(b *testing.B) {
require.NoError(b, err)
epoch := time.CurrentEpoch(state)
indices, err := ActiveValidatorIndices(context.Background(), state, epoch)
indices, err := helpers.ActiveValidatorIndices(context.Background(), state, epoch)
require.NoError(b, err)
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
seed, err := helpers.Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
require.NoError(b, err)
i := uint64(0)
@@ -559,7 +567,7 @@ func BenchmarkComputeCommittee128000_WithOutPreCache(b *testing.B) {
b.ResetTimer()
for n := 0; n < b.N; n++ {
i++
_, err := computeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
_, err := helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
if err != nil {
panic(err)
}
@@ -584,9 +592,9 @@ func BenchmarkComputeCommittee1000000_WithOutCache(b *testing.B) {
require.NoError(b, err)
epoch := time.CurrentEpoch(state)
indices, err := ActiveValidatorIndices(context.Background(), state, epoch)
indices, err := helpers.ActiveValidatorIndices(context.Background(), state, epoch)
require.NoError(b, err)
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
seed, err := helpers.Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
require.NoError(b, err)
i := uint64(0)
@@ -594,7 +602,7 @@ func BenchmarkComputeCommittee1000000_WithOutCache(b *testing.B) {
b.ResetTimer()
for n := 0; n < b.N; n++ {
i++
_, err := computeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
_, err := helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
if err != nil {
panic(err)
}
@@ -619,9 +627,9 @@ func BenchmarkComputeCommittee4000000_WithOutCache(b *testing.B) {
require.NoError(b, err)
epoch := time.CurrentEpoch(state)
indices, err := ActiveValidatorIndices(context.Background(), state, epoch)
indices, err := helpers.ActiveValidatorIndices(context.Background(), state, epoch)
require.NoError(b, err)
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
seed, err := helpers.Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
require.NoError(b, err)
i := uint64(0)
@@ -629,7 +637,7 @@ func BenchmarkComputeCommittee4000000_WithOutCache(b *testing.B) {
b.ResetTimer()
for n := 0; n < b.N; n++ {
i++
_, err := computeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
_, err := helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
if err != nil {
panic(err)
}
@@ -655,13 +663,13 @@ func TestBeaconCommitteeFromState_UpdateCacheForPreviousEpoch(t *testing.T) {
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
})
require.NoError(t, err)
_, err = BeaconCommitteeFromState(context.Background(), state, 1 /* previous epoch */, 0)
_, err = helpers.BeaconCommitteeFromState(context.Background(), state, 1 /* previous epoch */, 0)
require.NoError(t, err)
// Verify previous epoch is cached
seed, err := Seed(state, 0, params.BeaconConfig().DomainBeaconAttester)
seed, err := helpers.Seed(state, 0, params.BeaconConfig().DomainBeaconAttester)
require.NoError(t, err)
activeIndices, err := committeeCache.ActiveIndices(context.Background(), seed)
activeIndices, err := helpers.CommitteeCache().ActiveIndices(context.Background(), seed)
require.NoError(t, err)
assert.NotNil(t, activeIndices, "Did not cache active indices")
}
@@ -680,19 +688,19 @@ func TestPrecomputeProposerIndices_Ok(t *testing.T) {
})
require.NoError(t, err)
indices, err := ActiveValidatorIndices(context.Background(), state, 0)
indices, err := helpers.ActiveValidatorIndices(context.Background(), state, 0)
require.NoError(t, err)
proposerIndices, err := precomputeProposerIndices(state, indices, time.CurrentEpoch(state))
proposerIndices, err := helpers.PrecomputeProposerIndices(state, indices, time.CurrentEpoch(state))
require.NoError(t, err)
var wantedProposerIndices []primitives.ValidatorIndex
seed, err := Seed(state, 0, params.BeaconConfig().DomainBeaconProposer)
seed, err := helpers.Seed(state, 0, params.BeaconConfig().DomainBeaconProposer)
require.NoError(t, err)
for i := uint64(0); i < uint64(params.BeaconConfig().SlotsPerEpoch); i++ {
seedWithSlot := append(seed[:], bytesutil.Bytes8(i)...)
seedWithSlotHash := hash.Hash(seedWithSlot)
index, err := ComputeProposerIndex(state, indices, seedWithSlotHash)
index, err := helpers.ComputeProposerIndex(state, indices, seedWithSlotHash)
require.NoError(t, err)
wantedProposerIndices = append(wantedProposerIndices, index)
}

View File

@@ -0,0 +1,17 @@
//go:build fuzz
package helpers
import "github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
func CommitteeCache() *cache.FakeCommitteeCache {
return committeeCache
}
func SyncCommitteeCache() *cache.FakeSyncCommitteeCache {
return syncCommitteeCache
}
func ProposerIndicesCache() *cache.FakeProposerIndicesCache {
return proposerIndicesCache
}

View File

@@ -0,0 +1,17 @@
//go:build !fuzz
package helpers
import "github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
func CommitteeCache() *cache.CommitteeCache {
return committeeCache
}
func SyncCommitteeCache() *cache.SyncCommitteeCache {
return syncCommitteeCache
}
func ProposerIndicesCache() *cache.ProposerIndicesCache {
return proposerIndicesCache
}

View File

@@ -1,9 +1,10 @@
package helpers
package helpers_test
import (
"encoding/binary"
"testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
@@ -40,10 +41,10 @@ func TestRandaoMix_OK(t *testing.T) {
},
}
for _, test := range tests {
ClearCache()
helpers.ClearCache()
require.NoError(t, state.SetSlot(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(test.epoch+1))))
mix, err := RandaoMix(state, test.epoch)
mix, err := helpers.RandaoMix(state, test.epoch)
require.NoError(t, err)
assert.DeepEqual(t, test.randaoMix, mix, "Incorrect randao mix")
}
@@ -76,10 +77,10 @@ func TestRandaoMix_CopyOK(t *testing.T) {
},
}
for _, test := range tests {
ClearCache()
helpers.ClearCache()
require.NoError(t, state.SetSlot(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(test.epoch+1))))
mix, err := RandaoMix(state, test.epoch)
mix, err := helpers.RandaoMix(state, test.epoch)
require.NoError(t, err)
uniqueNumber := uint64(params.BeaconConfig().EpochsPerHistoricalVector.Add(1000))
binary.LittleEndian.PutUint64(mix, uniqueNumber)
@@ -92,7 +93,7 @@ func TestRandaoMix_CopyOK(t *testing.T) {
}
func TestGenerateSeed_OK(t *testing.T) {
ClearCache()
helpers.ClearCache()
randaoMixes := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector)
for i := 0; i < len(randaoMixes); i++ {
@@ -107,7 +108,7 @@ func TestGenerateSeed_OK(t *testing.T) {
})
require.NoError(t, err)
got, err := Seed(state, 10, params.BeaconConfig().DomainBeaconAttester)
got, err := helpers.Seed(state, 10, params.BeaconConfig().DomainBeaconAttester)
require.NoError(t, err)
wanted := [32]byte{102, 82, 23, 40, 226, 79, 171, 11, 203, 23, 175, 7, 88, 202, 80,

View File

@@ -22,7 +22,7 @@ var balanceCache = cache.NewEffectiveBalanceCache()
// """
// Return the combined effective balance of the ``indices``.
// ``EFFECTIVE_BALANCE_INCREMENT`` Gwei minimum to avoid divisions by zero.
// Math safe up to ~10B ETH, afterwhich this overflows uint64.
// Math safe up to ~10B ETH, after which this overflows uint64.
// """
// return Gwei(max(EFFECTIVE_BALANCE_INCREMENT, sum([state.validators[index].effective_balance for index in indices])))
func TotalBalance(state state.ReadOnlyValidators, indices []primitives.ValidatorIndex) uint64 {

View File

@@ -1,9 +1,10 @@
package helpers
package helpers_test
import (
"math"
"testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v5/config/params"
@@ -14,7 +15,7 @@ import (
)
func TestTotalBalance_OK(t *testing.T) {
ClearCache()
helpers.ClearCache()
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{Validators: []*ethpb.Validator{
{EffectiveBalance: 27 * 1e9}, {EffectiveBalance: 28 * 1e9},
@@ -22,19 +23,19 @@ func TestTotalBalance_OK(t *testing.T) {
}})
require.NoError(t, err)
balance := TotalBalance(state, []primitives.ValidatorIndex{0, 1, 2, 3})
balance := helpers.TotalBalance(state, []primitives.ValidatorIndex{0, 1, 2, 3})
wanted := state.Validators()[0].EffectiveBalance + state.Validators()[1].EffectiveBalance +
state.Validators()[2].EffectiveBalance + state.Validators()[3].EffectiveBalance
assert.Equal(t, wanted, balance, "Incorrect TotalBalance")
}
func TestTotalBalance_ReturnsEffectiveBalanceIncrement(t *testing.T) {
ClearCache()
helpers.ClearCache()
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{Validators: []*ethpb.Validator{}})
require.NoError(t, err)
balance := TotalBalance(state, []primitives.ValidatorIndex{})
balance := helpers.TotalBalance(state, []primitives.ValidatorIndex{})
wanted := params.BeaconConfig().EffectiveBalanceIncrement
assert.Equal(t, wanted, balance, "Incorrect TotalBalance")
}
@@ -51,7 +52,7 @@ func TestGetBalance_OK(t *testing.T) {
{i: 2, b: []uint64{0, 0, 0}},
}
for _, test := range tests {
ClearCache()
helpers.ClearCache()
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{Balances: test.b})
require.NoError(t, err)
@@ -68,7 +69,7 @@ func TestTotalActiveBalance(t *testing.T) {
{10000},
}
for _, test := range tests {
ClearCache()
helpers.ClearCache()
validators := make([]*ethpb.Validator, 0)
for i := 0; i < test.vCount; i++ {
@@ -76,7 +77,7 @@ func TestTotalActiveBalance(t *testing.T) {
}
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{Validators: validators})
require.NoError(t, err)
bal, err := TotalActiveBalance(state)
bal, err := helpers.TotalActiveBalance(state)
require.NoError(t, err)
require.Equal(t, uint64(test.vCount)*params.BeaconConfig().MaxEffectiveBalance, bal)
}
@@ -91,7 +92,7 @@ func TestTotalActiveBal_ReturnMin(t *testing.T) {
{10000},
}
for _, test := range tests {
ClearCache()
helpers.ClearCache()
validators := make([]*ethpb.Validator, 0)
for i := 0; i < test.vCount; i++ {
@@ -99,7 +100,7 @@ func TestTotalActiveBal_ReturnMin(t *testing.T) {
}
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{Validators: validators})
require.NoError(t, err)
bal, err := TotalActiveBalance(state)
bal, err := helpers.TotalActiveBalance(state)
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().EffectiveBalanceIncrement, bal)
}
@@ -115,7 +116,7 @@ func TestTotalActiveBalance_WithCache(t *testing.T) {
{vCount: 10000, wantCount: 10000},
}
for _, test := range tests {
ClearCache()
helpers.ClearCache()
validators := make([]*ethpb.Validator, 0)
for i := 0; i < test.vCount; i++ {
@@ -123,7 +124,7 @@ func TestTotalActiveBalance_WithCache(t *testing.T) {
}
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{Validators: validators})
require.NoError(t, err)
bal, err := TotalActiveBalance(state)
bal, err := helpers.TotalActiveBalance(state)
require.NoError(t, err)
require.Equal(t, uint64(test.wantCount)*params.BeaconConfig().MaxEffectiveBalance, bal)
}
@@ -141,7 +142,7 @@ func TestIncreaseBalance_OK(t *testing.T) {
{i: 2, b: []uint64{27 * 1e9, 28 * 1e9, 32 * 1e9}, nb: 33 * 1e9, eb: 65 * 1e9},
}
for _, test := range tests {
ClearCache()
helpers.ClearCache()
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
Validators: []*ethpb.Validator{
@@ -149,7 +150,7 @@ func TestIncreaseBalance_OK(t *testing.T) {
Balances: test.b,
})
require.NoError(t, err)
require.NoError(t, IncreaseBalance(state, test.i, test.nb))
require.NoError(t, helpers.IncreaseBalance(state, test.i, test.nb))
assert.Equal(t, test.eb, state.Balances()[test.i], "Incorrect Validator balance")
}
}
@@ -167,7 +168,7 @@ func TestDecreaseBalance_OK(t *testing.T) {
{i: 3, b: []uint64{27 * 1e9, 28 * 1e9, 1, 28 * 1e9}, nb: 28 * 1e9, eb: 0},
}
for _, test := range tests {
ClearCache()
helpers.ClearCache()
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
Validators: []*ethpb.Validator{
@@ -175,13 +176,13 @@ func TestDecreaseBalance_OK(t *testing.T) {
Balances: test.b,
})
require.NoError(t, err)
require.NoError(t, DecreaseBalance(state, test.i, test.nb))
require.NoError(t, helpers.DecreaseBalance(state, test.i, test.nb))
assert.Equal(t, test.eb, state.Balances()[test.i], "Incorrect Validator balance")
}
}
func TestFinalityDelay(t *testing.T) {
ClearCache()
helpers.ClearCache()
base := buildState(params.BeaconConfig().SlotsPerEpoch*10, 1)
base.FinalizedCheckpoint = &ethpb.Checkpoint{Epoch: 3}
@@ -195,25 +196,25 @@ func TestFinalityDelay(t *testing.T) {
finalizedEpoch = beaconState.FinalizedCheckpointEpoch()
}
setVal()
d := FinalityDelay(prevEpoch, finalizedEpoch)
d := helpers.FinalityDelay(prevEpoch, finalizedEpoch)
w := time.PrevEpoch(beaconState) - beaconState.FinalizedCheckpointEpoch()
assert.Equal(t, w, d, "Did not get wanted finality delay")
require.NoError(t, beaconState.SetFinalizedCheckpoint(&ethpb.Checkpoint{Epoch: 4}))
setVal()
d = FinalityDelay(prevEpoch, finalizedEpoch)
d = helpers.FinalityDelay(prevEpoch, finalizedEpoch)
w = time.PrevEpoch(beaconState) - beaconState.FinalizedCheckpointEpoch()
assert.Equal(t, w, d, "Did not get wanted finality delay")
require.NoError(t, beaconState.SetFinalizedCheckpoint(&ethpb.Checkpoint{Epoch: 5}))
setVal()
d = FinalityDelay(prevEpoch, finalizedEpoch)
d = helpers.FinalityDelay(prevEpoch, finalizedEpoch)
w = time.PrevEpoch(beaconState) - beaconState.FinalizedCheckpointEpoch()
assert.Equal(t, w, d, "Did not get wanted finality delay")
}
func TestIsInInactivityLeak(t *testing.T) {
ClearCache()
helpers.ClearCache()
base := buildState(params.BeaconConfig().SlotsPerEpoch*10, 1)
base.FinalizedCheckpoint = &ethpb.Checkpoint{Epoch: 3}
@@ -227,13 +228,13 @@ func TestIsInInactivityLeak(t *testing.T) {
finalizedEpoch = beaconState.FinalizedCheckpointEpoch()
}
setVal()
assert.Equal(t, true, IsInInactivityLeak(prevEpoch, finalizedEpoch), "Wanted inactivity leak true")
assert.Equal(t, true, helpers.IsInInactivityLeak(prevEpoch, finalizedEpoch), "Wanted inactivity leak true")
require.NoError(t, beaconState.SetFinalizedCheckpoint(&ethpb.Checkpoint{Epoch: 4}))
setVal()
assert.Equal(t, true, IsInInactivityLeak(prevEpoch, finalizedEpoch), "Wanted inactivity leak true")
assert.Equal(t, true, helpers.IsInInactivityLeak(prevEpoch, finalizedEpoch), "Wanted inactivity leak true")
require.NoError(t, beaconState.SetFinalizedCheckpoint(&ethpb.Checkpoint{Epoch: 5}))
setVal()
assert.Equal(t, false, IsInInactivityLeak(prevEpoch, finalizedEpoch), "Wanted inactivity leak false")
assert.Equal(t, false, helpers.IsInInactivityLeak(prevEpoch, finalizedEpoch), "Wanted inactivity leak false")
}
func buildState(slot primitives.Slot, validatorCount uint64) *ethpb.BeaconState {
@@ -285,7 +286,7 @@ func TestIncreaseBadBalance_NotOK(t *testing.T) {
{i: 2, b: []uint64{math.MaxUint64, math.MaxUint64, math.MaxUint64}, nb: 33 * 1e9},
}
for _, test := range tests {
ClearCache()
helpers.ClearCache()
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
Validators: []*ethpb.Validator{
@@ -293,6 +294,6 @@ func TestIncreaseBadBalance_NotOK(t *testing.T) {
Balances: test.b,
})
require.NoError(t, err)
require.ErrorContains(t, "addition overflows", IncreaseBalance(state, test.i, test.nb))
require.ErrorContains(t, "addition overflows", helpers.IncreaseBalance(state, test.i, test.nb))
}
}

View File

@@ -26,7 +26,7 @@ var (
// 1. Checks if the public key exists in the sync committee cache
// 2. If 1 fails, checks if the public key exists in the input current sync committee object
func IsCurrentPeriodSyncCommittee(st state.BeaconState, valIdx primitives.ValidatorIndex) (bool, error) {
root, err := syncPeriodBoundaryRoot(st)
root, err := SyncPeriodBoundaryRoot(st)
if err != nil {
return false, err
}
@@ -63,7 +63,7 @@ func IsCurrentPeriodSyncCommittee(st state.BeaconState, valIdx primitives.Valida
func IsNextPeriodSyncCommittee(
st state.BeaconState, valIdx primitives.ValidatorIndex,
) (bool, error) {
root, err := syncPeriodBoundaryRoot(st)
root, err := SyncPeriodBoundaryRoot(st)
if err != nil {
return false, err
}
@@ -90,7 +90,7 @@ func IsNextPeriodSyncCommittee(
func CurrentPeriodSyncSubcommitteeIndices(
st state.BeaconState, valIdx primitives.ValidatorIndex,
) ([]primitives.CommitteeIndex, error) {
root, err := syncPeriodBoundaryRoot(st)
root, err := SyncPeriodBoundaryRoot(st)
if err != nil {
return nil, err
}
@@ -124,7 +124,7 @@ func CurrentPeriodSyncSubcommitteeIndices(
func NextPeriodSyncSubcommitteeIndices(
st state.BeaconState, valIdx primitives.ValidatorIndex,
) ([]primitives.CommitteeIndex, error) {
root, err := syncPeriodBoundaryRoot(st)
root, err := SyncPeriodBoundaryRoot(st)
if err != nil {
return nil, err
}
@@ -182,10 +182,10 @@ func findSubCommitteeIndices(pubKey []byte, pubKeys [][]byte) []primitives.Commi
return indices
}
// Retrieve the current sync period boundary root by calculating sync period start epoch
// SyncPeriodBoundaryRoot computes the current sync period boundary root by calculating sync period start epoch
// and calling `BlockRoot`.
// It uses the boundary slot - 1 for block root. (Ex: SlotsPerEpoch * EpochsPerSyncCommitteePeriod - 1)
func syncPeriodBoundaryRoot(st state.ReadOnlyBeaconState) ([32]byte, error) {
func SyncPeriodBoundaryRoot(st state.ReadOnlyBeaconState) ([32]byte, error) {
// Can't call `BlockRoot` until the first slot.
if st.Slot() == params.BeaconConfig().GenesisSlot {
return params.BeaconConfig().ZeroHash, nil

View File

@@ -1,4 +1,4 @@
package helpers
package helpers_test
import (
"math/rand"
@@ -7,6 +7,7 @@ import (
"time"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
@@ -17,7 +18,7 @@ import (
)
func TestIsCurrentEpochSyncCommittee_UsingCache(t *testing.T) {
ClearCache()
helpers.ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
@@ -40,15 +41,15 @@ func TestIsCurrentEpochSyncCommittee_UsingCache(t *testing.T) {
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
r := [32]byte{'a'}
require.NoError(t, err, syncCommitteeCache.UpdatePositionsInCommittee(r, state))
require.NoError(t, err, helpers.SyncCommitteeCache().UpdatePositionsInCommittee(r, state))
ok, err := IsCurrentPeriodSyncCommittee(state, 0)
ok, err := helpers.IsCurrentPeriodSyncCommittee(state, 0)
require.NoError(t, err)
require.Equal(t, true, ok)
}
func TestIsCurrentEpochSyncCommittee_UsingCommittee(t *testing.T) {
ClearCache()
helpers.ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
@@ -70,13 +71,13 @@ func TestIsCurrentEpochSyncCommittee_UsingCommittee(t *testing.T) {
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
ok, err := IsCurrentPeriodSyncCommittee(state, 0)
ok, err := helpers.IsCurrentPeriodSyncCommittee(state, 0)
require.NoError(t, err)
require.Equal(t, true, ok)
}
func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
ClearCache()
helpers.ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
@@ -98,13 +99,13 @@ func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
ok, err := IsCurrentPeriodSyncCommittee(state, 12390192)
ok, err := helpers.IsCurrentPeriodSyncCommittee(state, 12390192)
require.ErrorContains(t, "validator index 12390192 does not exist", err)
require.Equal(t, false, ok)
}
func TestIsNextEpochSyncCommittee_UsingCache(t *testing.T) {
ClearCache()
helpers.ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
@@ -127,15 +128,15 @@ func TestIsNextEpochSyncCommittee_UsingCache(t *testing.T) {
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
r := [32]byte{'a'}
require.NoError(t, err, syncCommitteeCache.UpdatePositionsInCommittee(r, state))
require.NoError(t, err, helpers.SyncCommitteeCache().UpdatePositionsInCommittee(r, state))
ok, err := IsNextPeriodSyncCommittee(state, 0)
ok, err := helpers.IsNextPeriodSyncCommittee(state, 0)
require.NoError(t, err)
require.Equal(t, true, ok)
}
func TestIsNextEpochSyncCommittee_UsingCommittee(t *testing.T) {
ClearCache()
helpers.ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
@@ -157,13 +158,13 @@ func TestIsNextEpochSyncCommittee_UsingCommittee(t *testing.T) {
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
ok, err := IsNextPeriodSyncCommittee(state, 0)
ok, err := helpers.IsNextPeriodSyncCommittee(state, 0)
require.NoError(t, err)
require.Equal(t, true, ok)
}
func TestIsNextEpochSyncCommittee_DoesNotExist(t *testing.T) {
ClearCache()
helpers.ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
@@ -185,13 +186,13 @@ func TestIsNextEpochSyncCommittee_DoesNotExist(t *testing.T) {
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
ok, err := IsNextPeriodSyncCommittee(state, 120391029)
ok, err := helpers.IsNextPeriodSyncCommittee(state, 120391029)
require.ErrorContains(t, "validator index 120391029 does not exist", err)
require.Equal(t, false, ok)
}
func TestCurrentEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
ClearCache()
helpers.ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
@@ -214,15 +215,15 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
r := [32]byte{'a'}
require.NoError(t, err, syncCommitteeCache.UpdatePositionsInCommittee(r, state))
require.NoError(t, err, helpers.SyncCommitteeCache().UpdatePositionsInCommittee(r, state))
index, err := CurrentPeriodSyncSubcommitteeIndices(state, 0)
index, err := helpers.CurrentPeriodSyncSubcommitteeIndices(state, 0)
require.NoError(t, err)
require.DeepEqual(t, []primitives.CommitteeIndex{0}, index)
}
func TestCurrentEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
ClearCache()
helpers.ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
@@ -243,27 +244,27 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
require.NoError(t, err)
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
root, err := syncPeriodBoundaryRoot(state)
root, err := helpers.SyncPeriodBoundaryRoot(state)
require.NoError(t, err)
// Test that cache was empty.
_, err = syncCommitteeCache.CurrentPeriodIndexPosition(root, 0)
_, err = helpers.SyncCommitteeCache().CurrentPeriodIndexPosition(root, 0)
require.Equal(t, cache.ErrNonExistingSyncCommitteeKey, err)
// Test that helper can retrieve the index given empty cache.
index, err := CurrentPeriodSyncSubcommitteeIndices(state, 0)
index, err := helpers.CurrentPeriodSyncSubcommitteeIndices(state, 0)
require.NoError(t, err)
require.DeepEqual(t, []primitives.CommitteeIndex{0}, index)
// Test that cache was able to fill on miss.
time.Sleep(100 * time.Millisecond)
index, err = syncCommitteeCache.CurrentPeriodIndexPosition(root, 0)
index, err = helpers.SyncCommitteeCache().CurrentPeriodIndexPosition(root, 0)
require.NoError(t, err)
require.DeepEqual(t, []primitives.CommitteeIndex{0}, index)
}
func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
ClearCache()
helpers.ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
@@ -285,13 +286,13 @@ func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
index, err := CurrentPeriodSyncSubcommitteeIndices(state, 129301923)
index, err := helpers.CurrentPeriodSyncSubcommitteeIndices(state, 129301923)
require.ErrorContains(t, "validator index 129301923 does not exist", err)
require.DeepEqual(t, []primitives.CommitteeIndex(nil), index)
}
func TestNextEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
ClearCache()
helpers.ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
@@ -314,15 +315,15 @@ func TestNextEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
r := [32]byte{'a'}
require.NoError(t, err, syncCommitteeCache.UpdatePositionsInCommittee(r, state))
require.NoError(t, err, helpers.SyncCommitteeCache().UpdatePositionsInCommittee(r, state))
index, err := NextPeriodSyncSubcommitteeIndices(state, 0)
index, err := helpers.NextPeriodSyncSubcommitteeIndices(state, 0)
require.NoError(t, err)
require.DeepEqual(t, []primitives.CommitteeIndex{0}, index)
}
func TestNextEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
ClearCache()
helpers.ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
@@ -344,13 +345,13 @@ func TestNextEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
index, err := NextPeriodSyncSubcommitteeIndices(state, 0)
index, err := helpers.NextPeriodSyncSubcommitteeIndices(state, 0)
require.NoError(t, err)
require.DeepEqual(t, []primitives.CommitteeIndex{0}, index)
}
func TestNextEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
ClearCache()
helpers.ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
@@ -372,43 +373,43 @@ func TestNextEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
index, err := NextPeriodSyncSubcommitteeIndices(state, 21093019)
index, err := helpers.NextPeriodSyncSubcommitteeIndices(state, 21093019)
require.ErrorContains(t, "validator index 21093019 does not exist", err)
require.DeepEqual(t, []primitives.CommitteeIndex(nil), index)
}
func TestUpdateSyncCommitteeCache_BadSlot(t *testing.T) {
ClearCache()
helpers.ClearCache()
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
Slot: 1,
})
require.NoError(t, err)
err = UpdateSyncCommitteeCache(state)
err = helpers.UpdateSyncCommitteeCache(state)
require.ErrorContains(t, "not at the end of the epoch to update cache", err)
state, err = state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
Slot: params.BeaconConfig().SlotsPerEpoch - 1,
})
require.NoError(t, err)
err = UpdateSyncCommitteeCache(state)
err = helpers.UpdateSyncCommitteeCache(state)
require.ErrorContains(t, "not at sync committee period boundary to update cache", err)
}
func TestUpdateSyncCommitteeCache_BadRoot(t *testing.T) {
ClearCache()
helpers.ClearCache()
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
Slot: primitives.Slot(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*params.BeaconConfig().SlotsPerEpoch - 1,
LatestBlockHeader: &ethpb.BeaconBlockHeader{StateRoot: params.BeaconConfig().ZeroHash[:]},
})
require.NoError(t, err)
err = UpdateSyncCommitteeCache(state)
err = helpers.UpdateSyncCommitteeCache(state)
require.ErrorContains(t, "zero hash state root can't be used to update cache", err)
}
func TestIsCurrentEpochSyncCommittee_SameBlockRoot(t *testing.T) {
ClearCache()
helpers.ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
@@ -435,7 +436,7 @@ func TestIsCurrentEpochSyncCommittee_SameBlockRoot(t *testing.T) {
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
comIdxs, err := CurrentPeriodSyncSubcommitteeIndices(state, 200)
comIdxs, err := helpers.CurrentPeriodSyncSubcommitteeIndices(state, 200)
require.NoError(t, err)
wantedSlot := params.BeaconConfig().EpochsPerSyncCommitteePeriod.Mul(uint64(params.BeaconConfig().SlotsPerEpoch))
@@ -446,7 +447,7 @@ func TestIsCurrentEpochSyncCommittee_SameBlockRoot(t *testing.T) {
syncCommittee.Pubkeys[i], syncCommittee.Pubkeys[j] = syncCommittee.Pubkeys[j], syncCommittee.Pubkeys[i]
})
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
newIdxs, err := CurrentPeriodSyncSubcommitteeIndices(state, 200)
newIdxs, err := helpers.CurrentPeriodSyncSubcommitteeIndices(state, 200)
require.NoError(t, err)
require.DeepNotEqual(t, comIdxs, newIdxs)
}

View File

@@ -1,4 +1,4 @@
package helpers
package helpers_test
import (
"context"
@@ -6,6 +6,7 @@ import (
"testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
@@ -32,7 +33,7 @@ func TestIsActiveValidator_OK(t *testing.T) {
}
for _, test := range tests {
validator := &ethpb.Validator{ActivationEpoch: 10, ExitEpoch: 100}
assert.Equal(t, test.b, IsActiveValidator(validator, test.a), "IsActiveValidator(%d)", test.a)
assert.Equal(t, test.b, helpers.IsActiveValidator(validator, test.a), "IsActiveValidator(%d)", test.a)
}
}
@@ -53,7 +54,7 @@ func TestIsActiveValidatorUsingTrie_OK(t *testing.T) {
for _, test := range tests {
readOnlyVal, err := beaconState.ValidatorAtIndexReadOnly(0)
require.NoError(t, err)
assert.Equal(t, test.b, IsActiveValidatorUsingTrie(readOnlyVal, test.a), "IsActiveValidatorUsingTrie(%d)", test.a)
assert.Equal(t, test.b, helpers.IsActiveValidatorUsingTrie(readOnlyVal, test.a), "IsActiveValidatorUsingTrie(%d)", test.a)
}
}
@@ -81,7 +82,7 @@ func TestIsActiveNonSlashedValidatorUsingTrie_OK(t *testing.T) {
require.NoError(t, err)
readOnlyVal, err := beaconState.ValidatorAtIndexReadOnly(0)
require.NoError(t, err)
assert.Equal(t, test.b, IsActiveNonSlashedValidatorUsingTrie(readOnlyVal, test.a), "IsActiveNonSlashedValidatorUsingTrie(%d)", test.a)
assert.Equal(t, test.b, helpers.IsActiveNonSlashedValidatorUsingTrie(readOnlyVal, test.a), "IsActiveNonSlashedValidatorUsingTrie(%d)", test.a)
}
}
@@ -161,7 +162,7 @@ func TestIsSlashableValidator_OK(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
t.Run("without trie", func(t *testing.T) {
slashableValidator := IsSlashableValidator(test.validator.ActivationEpoch,
slashableValidator := helpers.IsSlashableValidator(test.validator.ActivationEpoch,
test.validator.WithdrawableEpoch, test.validator.Slashed, test.epoch)
assert.Equal(t, test.slashable, slashableValidator, "Expected active validator slashable to be %t", test.slashable)
})
@@ -170,7 +171,7 @@ func TestIsSlashableValidator_OK(t *testing.T) {
require.NoError(t, err)
readOnlyVal, err := beaconState.ValidatorAtIndexReadOnly(0)
require.NoError(t, err)
slashableValidator := IsSlashableValidatorUsingTrie(readOnlyVal, test.epoch)
slashableValidator := helpers.IsSlashableValidatorUsingTrie(readOnlyVal, test.epoch)
assert.Equal(t, test.slashable, slashableValidator, "Expected active validator slashable to be %t", test.slashable)
})
})
@@ -223,17 +224,17 @@ func TestBeaconProposerIndex_OK(t *testing.T) {
}
for _, tt := range tests {
ClearCache()
helpers.ClearCache()
require.NoError(t, state.SetSlot(tt.slot))
result, err := BeaconProposerIndex(context.Background(), state)
result, err := helpers.BeaconProposerIndex(context.Background(), state)
require.NoError(t, err, "Failed to get shard and committees at slot")
assert.Equal(t, tt.index, result, "Result index was an unexpected value")
}
}
func TestBeaconProposerIndex_BadState(t *testing.T) {
ClearCache()
helpers.ClearCache()
params.SetupTestConfigCleanup(t)
c := params.BeaconConfig()
@@ -261,12 +262,12 @@ func TestBeaconProposerIndex_BadState(t *testing.T) {
// Set a very high slot, so that retrieved block root will be
// non existent for the proposer cache.
require.NoError(t, state.SetSlot(100))
_, err = BeaconProposerIndex(context.Background(), state)
_, err = helpers.BeaconProposerIndex(context.Background(), state)
require.NoError(t, err)
}
func TestComputeProposerIndex_Compatibility(t *testing.T) {
ClearCache()
helpers.ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
for i := 0; i < len(validators); i++ {
@@ -281,22 +282,22 @@ func TestComputeProposerIndex_Compatibility(t *testing.T) {
})
require.NoError(t, err)
indices, err := ActiveValidatorIndices(context.Background(), state, 0)
indices, err := helpers.ActiveValidatorIndices(context.Background(), state, 0)
require.NoError(t, err)
var proposerIndices []primitives.ValidatorIndex
seed, err := Seed(state, 0, params.BeaconConfig().DomainBeaconProposer)
seed, err := helpers.Seed(state, 0, params.BeaconConfig().DomainBeaconProposer)
require.NoError(t, err)
for i := uint64(0); i < uint64(params.BeaconConfig().SlotsPerEpoch); i++ {
seedWithSlot := append(seed[:], bytesutil.Bytes8(i)...)
seedWithSlotHash := hash.Hash(seedWithSlot)
index, err := ComputeProposerIndex(state, indices, seedWithSlotHash)
index, err := helpers.ComputeProposerIndex(state, indices, seedWithSlotHash)
require.NoError(t, err)
proposerIndices = append(proposerIndices, index)
}
var wantedProposerIndices []primitives.ValidatorIndex
seed, err = Seed(state, 0, params.BeaconConfig().DomainBeaconProposer)
seed, err = helpers.Seed(state, 0, params.BeaconConfig().DomainBeaconProposer)
require.NoError(t, err)
for i := uint64(0); i < uint64(params.BeaconConfig().SlotsPerEpoch); i++ {
seedWithSlot := append(seed[:], bytesutil.Bytes8(i)...)
@@ -309,15 +310,15 @@ func TestComputeProposerIndex_Compatibility(t *testing.T) {
}
func TestDelayedActivationExitEpoch_OK(t *testing.T) {
ClearCache()
helpers.ClearCache()
epoch := primitives.Epoch(9999)
wanted := epoch + 1 + params.BeaconConfig().MaxSeedLookahead
assert.Equal(t, wanted, ActivationExitEpoch(epoch))
assert.Equal(t, wanted, helpers.ActivationExitEpoch(epoch))
}
func TestActiveValidatorCount_Genesis(t *testing.T) {
ClearCache()
helpers.ClearCache()
c := 1000
validators := make([]*ethpb.Validator, c)
@@ -334,10 +335,10 @@ func TestActiveValidatorCount_Genesis(t *testing.T) {
require.NoError(t, err)
// Preset cache to a bad count.
seed, err := Seed(beaconState, 0, params.BeaconConfig().DomainBeaconAttester)
seed, err := helpers.Seed(beaconState, 0, params.BeaconConfig().DomainBeaconAttester)
require.NoError(t, err)
require.NoError(t, committeeCache.AddCommitteeShuffledList(context.Background(), &cache.Committees{Seed: seed, ShuffledIndices: []primitives.ValidatorIndex{1, 2, 3}}))
validatorCount, err := ActiveValidatorCount(context.Background(), beaconState, time.CurrentEpoch(beaconState))
require.NoError(t, helpers.CommitteeCache().AddCommitteeShuffledList(context.Background(), &cache.Committees{Seed: seed, ShuffledIndices: []primitives.ValidatorIndex{1, 2, 3}}))
validatorCount, err := helpers.ActiveValidatorCount(context.Background(), beaconState, time.CurrentEpoch(beaconState))
require.NoError(t, err)
assert.Equal(t, uint64(c), validatorCount, "Did not get the correct validator count")
}
@@ -353,7 +354,7 @@ func TestChurnLimit_OK(t *testing.T) {
{validatorCount: 2000000, wantedChurn: 30 /* validatorCount/churnLimitQuotient */},
}
for _, test := range tests {
ClearCache()
helpers.ClearCache()
validators := make([]*ethpb.Validator, test.validatorCount)
for i := 0; i < len(validators); i++ {
@@ -368,9 +369,9 @@ func TestChurnLimit_OK(t *testing.T) {
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
})
require.NoError(t, err)
validatorCount, err := ActiveValidatorCount(context.Background(), beaconState, time.CurrentEpoch(beaconState))
validatorCount, err := helpers.ActiveValidatorCount(context.Background(), beaconState, time.CurrentEpoch(beaconState))
require.NoError(t, err)
resultChurn := ValidatorActivationChurnLimit(validatorCount)
resultChurn := helpers.ValidatorActivationChurnLimit(validatorCount)
assert.Equal(t, test.wantedChurn, resultChurn, "ValidatorActivationChurnLimit(%d)", test.validatorCount)
}
}
@@ -386,7 +387,7 @@ func TestChurnLimitDeneb_OK(t *testing.T) {
{2000000, params.BeaconConfig().MaxPerEpochActivationChurnLimit},
}
for _, test := range tests {
ClearCache()
helpers.ClearCache()
// Create validators
validators := make([]*ethpb.Validator, test.validatorCount)
@@ -405,11 +406,11 @@ func TestChurnLimitDeneb_OK(t *testing.T) {
require.NoError(t, err)
// Get active validator count
validatorCount, err := ActiveValidatorCount(context.Background(), beaconState, time.CurrentEpoch(beaconState))
validatorCount, err := helpers.ActiveValidatorCount(context.Background(), beaconState, time.CurrentEpoch(beaconState))
require.NoError(t, err)
// Test churn limit calculation
resultChurn := ValidatorActivationChurnLimitDeneb(validatorCount)
resultChurn := helpers.ValidatorActivationChurnLimitDeneb(validatorCount)
assert.Equal(t, test.wantedChurn, resultChurn)
}
}
@@ -574,11 +575,11 @@ func TestActiveValidatorIndices(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ClearCache()
helpers.ClearCache()
s, err := state_native.InitializeFromProtoPhase0(tt.args.state)
require.NoError(t, err)
got, err := ActiveValidatorIndices(context.Background(), s, tt.args.epoch)
got, err := helpers.ActiveValidatorIndices(context.Background(), s, tt.args.epoch)
if tt.wantedErr != "" {
assert.ErrorContains(t, tt.wantedErr, err)
return
@@ -684,12 +685,12 @@ func TestComputeProposerIndex(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ClearCache()
helpers.ClearCache()
bState := &ethpb.BeaconState{Validators: tt.args.validators}
stTrie, err := state_native.InitializeFromProtoUnsafePhase0(bState)
require.NoError(t, err)
got, err := ComputeProposerIndex(stTrie, tt.args.indices, tt.args.seed)
got, err := helpers.ComputeProposerIndex(stTrie, tt.args.indices, tt.args.seed)
if tt.wantedErr != "" {
assert.ErrorContains(t, tt.wantedErr, err)
return
@@ -718,9 +719,9 @@ func TestIsEligibleForActivationQueue(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ClearCache()
helpers.ClearCache()
assert.Equal(t, tt.want, IsEligibleForActivationQueue(tt.validator), "IsEligibleForActivationQueue()")
assert.Equal(t, tt.want, helpers.IsEligibleForActivationQueue(tt.validator), "IsEligibleForActivationQueue()")
})
}
}
@@ -747,11 +748,11 @@ func TestIsIsEligibleForActivation(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ClearCache()
helpers.ClearCache()
s, err := state_native.InitializeFromProtoPhase0(tt.state)
require.NoError(t, err)
assert.Equal(t, tt.want, IsEligibleForActivation(s, tt.validator), "IsEligibleForActivation()")
assert.Equal(t, tt.want, helpers.IsEligibleForActivation(s, tt.validator), "IsEligibleForActivation()")
})
}
}
@@ -765,7 +766,7 @@ func computeProposerIndexWithValidators(validators []*ethpb.Validator, activeInd
hashFunc := hash.CustomSHA256Hasher()
for i := uint64(0); ; i++ {
candidateIndex, err := ComputeShuffledIndex(primitives.ValidatorIndex(i%length), length, seed, true /* shuffle */)
candidateIndex, err := helpers.ComputeShuffledIndex(primitives.ValidatorIndex(i%length), length, seed, true /* shuffle */)
if err != nil {
return 0, err
}
@@ -787,7 +788,7 @@ func computeProposerIndexWithValidators(validators []*ethpb.Validator, activeInd
}
func TestLastActivatedValidatorIndex_OK(t *testing.T) {
ClearCache()
helpers.ClearCache()
beaconState, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{})
require.NoError(t, err)
@@ -806,13 +807,13 @@ func TestLastActivatedValidatorIndex_OK(t *testing.T) {
require.NoError(t, beaconState.SetValidators(validators))
require.NoError(t, beaconState.SetBalances(balances))
index, err := LastActivatedValidatorIndex(context.Background(), beaconState)
index, err := helpers.LastActivatedValidatorIndex(context.Background(), beaconState)
require.NoError(t, err)
require.Equal(t, index, primitives.ValidatorIndex(3))
}
func TestProposerIndexFromCheckpoint(t *testing.T) {
ClearCache()
helpers.ClearCache()
e := primitives.Epoch(2)
r := [32]byte{'a'}
@@ -820,10 +821,10 @@ func TestProposerIndexFromCheckpoint(t *testing.T) {
ids := [32]primitives.ValidatorIndex{}
slot := primitives.Slot(69) // slot 5 in the Epoch
ids[5] = primitives.ValidatorIndex(19)
proposerIndicesCache.Set(e, r, ids)
helpers.ProposerIndicesCache().Set(e, r, ids)
c := &forkchoicetypes.Checkpoint{Root: root, Epoch: e - 1}
proposerIndicesCache.SetCheckpoint(*c, r)
id, err := ProposerIndexAtSlotFromCheckpoint(c, slot)
helpers.ProposerIndicesCache().SetCheckpoint(*c, r)
id, err := helpers.ProposerIndexAtSlotFromCheckpoint(c, slot)
require.NoError(t, err)
require.Equal(t, ids[5], id)
}

View File

@@ -59,7 +59,7 @@ func ComputeDomainAndSign(st state.ReadOnlyBeaconState, epoch primitives.Epoch,
return ComputeDomainAndSignWithoutState(st.Fork(), epoch, domain, st.GenesisValidatorsRoot(), obj, key)
}
// ComputeDomainAndSignWithoutState offers the same functionalit as ComputeDomainAndSign without the need to provide a BeaconState.
// ComputeDomainAndSignWithoutState offers the same functionality as ComputeDomainAndSign without the need to provide a BeaconState.
// This is particularly helpful for signing values in tests.
func ComputeDomainAndSignWithoutState(fork *ethpb.Fork, epoch primitives.Epoch, domain [4]byte, vr []byte, obj fssz.HashRoot, key bls.SecretKey) ([]byte, error) {
// EIP-7044: Beginning in Deneb, fix the fork version to Capella for signed exits.

View File

@@ -40,7 +40,6 @@ go_test(
"//consensus-types/blocks:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"//time/slots:go_default_library",

View File

@@ -94,6 +94,15 @@ func (s *LazilyPersistentStore) IsDataAvailable(ctx context.Context, current pri
entry := s.cache.ensure(key)
defer s.cache.delete(key)
root := b.Root()
sumz, err := s.store.WaitForSummarizer(ctx)
if err != nil {
log.WithField("root", fmt.Sprintf("%#x", b.Root())).
WithError(err).
Debug("Failed to receive BlobStorageSummarizer within IsDataAvailable")
} else {
entry.setDiskSummary(sumz.Summary(root))
}
// Verify we have all the expected sidecars, and fail fast if any are missing or inconsistent.
// We don't try to salvage problematic batches because this indicates a misbehaving peer and we'd rather
// ignore their response and decrease their peer score.

View File

@@ -13,7 +13,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/testing/util"
"github.com/prysmaticlabs/prysm/v5/time/slots"
@@ -114,16 +113,6 @@ func Test_commitmentsToCheck(t *testing.T) {
}
}
func daAlwaysSucceeds(_ [][]byte, _ []*ethpb.BlobSidecar) error {
return nil
}
type mockDA struct {
t *testing.T
scs []blocks.ROBlob
err error
}
func TestLazilyPersistent_Missing(t *testing.T) {
ctx := context.Background()
store := filesystem.NewEphemeralBlobStorage(t)

View File

@@ -4,6 +4,7 @@ import (
"bytes"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
@@ -59,7 +60,12 @@ func (c *cache) delete(key cacheKey) {
// cacheEntry holds a fixed-length cache of BlobSidecars.
type cacheEntry struct {
scs [fieldparams.MaxBlobsPerBlock]*blocks.ROBlob
scs [fieldparams.MaxBlobsPerBlock]*blocks.ROBlob
diskSummary filesystem.BlobStorageSummary
}
func (e *cacheEntry) setDiskSummary(sum filesystem.BlobStorageSummary) {
e.diskSummary = sum
}
// stash adds an item to the in-memory cache of BlobSidecars.
@@ -81,9 +87,17 @@ func (e *cacheEntry) stash(sc *blocks.ROBlob) error {
// the cache do not match those found in the block. If err is nil, then all expected
// commitments were found in the cache and the sidecar slice return value can be used
// to perform a DA check against the cached sidecars.
// filter only returns blobs that need to be checked. Blobs already available on disk will be excluded.
func (e *cacheEntry) filter(root [32]byte, kc safeCommitmentArray) ([]blocks.ROBlob, error) {
scs := make([]blocks.ROBlob, kc.count())
if e.diskSummary.AllAvailable(kc.count()) {
return nil, nil
}
scs := make([]blocks.ROBlob, 0, kc.count())
for i := uint64(0); i < fieldparams.MaxBlobsPerBlock; i++ {
// We already have this blob, we don't need to write it or validate it.
if e.diskSummary.HasIndex(i) {
continue
}
if kc[i] == nil {
if e.scs[i] != nil {
return nil, errors.Wrapf(errCommitmentMismatch, "root=%#x, index=%#x, commitment=%#x, no block commitment", root, i, e.scs[i].KzgCommitment)
@@ -97,7 +111,7 @@ func (e *cacheEntry) filter(root [32]byte, kc safeCommitmentArray) ([]blocks.ROB
if !bytes.Equal(kc[i], e.scs[i].KzgCommitment) {
return nil, errors.Wrapf(errCommitmentMismatch, "root=%#x, index=%#x, commitment=%#x, block commitment=%#x", root, i, e.scs[i].KzgCommitment, kc[i])
}
scs[i] = *e.scs[i]
scs = append(scs, *e.scs[i])
}
return scs, nil

View File

@@ -3,9 +3,14 @@ package das
import (
"testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/testing/util"
"github.com/prysmaticlabs/prysm/v5/time/slots"
)
func TestCacheEnsureDelete(t *testing.T) {
@@ -23,3 +28,145 @@ func TestCacheEnsureDelete(t *testing.T) {
var nilEntry *cacheEntry
require.Equal(t, nilEntry, c.entries[k])
}
type filterTestCaseSetupFunc func(t *testing.T) (*cacheEntry, safeCommitmentArray, []blocks.ROBlob)
func filterTestCaseSetup(slot primitives.Slot, nBlobs int, onDisk []int, numExpected int) filterTestCaseSetupFunc {
return func(t *testing.T) (*cacheEntry, safeCommitmentArray, []blocks.ROBlob) {
blk, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, nBlobs)
commits, err := commitmentsToCheck(blk, blk.Block().Slot())
require.NoError(t, err)
entry := &cacheEntry{}
if len(onDisk) > 0 {
od := map[[32]byte][]int{blk.Root(): onDisk}
sumz := filesystem.NewMockBlobStorageSummarizer(t, od)
sum := sumz.Summary(blk.Root())
entry.setDiskSummary(sum)
}
expected := make([]blocks.ROBlob, 0, nBlobs)
for i := 0; i < commits.count(); i++ {
if entry.diskSummary.HasIndex(uint64(i)) {
continue
}
// If we aren't telling the cache a blob is on disk, add it to the expected list and stash.
expected = append(expected, blobs[i])
require.NoError(t, entry.stash(&blobs[i]))
}
require.Equal(t, numExpected, len(expected))
return entry, commits, expected
}
}
func TestFilterDiskSummary(t *testing.T) {
denebSlot, err := slots.EpochStart(params.BeaconConfig().DenebForkEpoch)
require.NoError(t, err)
cases := []struct {
name string
setup filterTestCaseSetupFunc
}{
{
name: "full blobs, all on disk",
setup: filterTestCaseSetup(denebSlot, 6, []int{0, 1, 2, 3, 4, 5}, 0),
},
{
name: "full blobs, first on disk",
setup: filterTestCaseSetup(denebSlot, 6, []int{0}, 5),
},
{
name: "full blobs, middle on disk",
setup: filterTestCaseSetup(denebSlot, 6, []int{2}, 5),
},
{
name: "full blobs, last on disk",
setup: filterTestCaseSetup(denebSlot, 6, []int{5}, 5),
},
{
name: "full blobs, none on disk",
setup: filterTestCaseSetup(denebSlot, 6, []int{}, 6),
},
{
name: "one commitment, on disk",
setup: filterTestCaseSetup(denebSlot, 1, []int{0}, 0),
},
{
name: "one commitment, not on disk",
setup: filterTestCaseSetup(denebSlot, 1, []int{}, 1),
},
{
name: "two commitments, first on disk",
setup: filterTestCaseSetup(denebSlot, 2, []int{0}, 1),
},
{
name: "two commitments, last on disk",
setup: filterTestCaseSetup(denebSlot, 2, []int{1}, 1),
},
{
name: "two commitments, none on disk",
setup: filterTestCaseSetup(denebSlot, 2, []int{}, 2),
},
{
name: "two commitments, all on disk",
setup: filterTestCaseSetup(denebSlot, 2, []int{0, 1}, 0),
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
entry, commits, expected := c.setup(t)
// first (root) argument doesn't matter, it is just for logs
got, err := entry.filter([32]byte{}, commits)
require.NoError(t, err)
require.Equal(t, len(expected), len(got))
})
}
}
func TestFilter(t *testing.T) {
denebSlot, err := slots.EpochStart(params.BeaconConfig().DenebForkEpoch)
require.NoError(t, err)
cases := []struct {
name string
setup func(t *testing.T) (*cacheEntry, safeCommitmentArray, []blocks.ROBlob)
err error
}{
{
name: "commitments mismatch - extra sidecar",
setup: func(t *testing.T) (*cacheEntry, safeCommitmentArray, []blocks.ROBlob) {
entry, commits, expected := filterTestCaseSetup(denebSlot, 6, []int{0, 1}, 4)(t)
commits[5] = nil
return entry, commits, expected
},
err: errCommitmentMismatch,
},
{
name: "sidecar missing",
setup: func(t *testing.T) (*cacheEntry, safeCommitmentArray, []blocks.ROBlob) {
entry, commits, expected := filterTestCaseSetup(denebSlot, 6, []int{0, 1}, 4)(t)
entry.scs[5] = nil
return entry, commits, expected
},
err: errMissingSidecar,
},
{
name: "commitments mismatch - different bytes",
setup: func(t *testing.T) (*cacheEntry, safeCommitmentArray, []blocks.ROBlob) {
entry, commits, expected := filterTestCaseSetup(denebSlot, 6, []int{0, 1}, 4)(t)
entry.scs[5].KzgCommitment = []byte("nope")
return entry, commits, expected
},
err: errCommitmentMismatch,
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
entry, commits, expected := c.setup(t)
// first (root) argument doesn't matter, it is just for logs
got, err := entry.filter([32]byte{}, commits)
if c.err != nil {
require.ErrorIs(t, err, c.err)
return
}
require.NoError(t, err)
require.Equal(t, len(expected), len(got))
})
}
}

View File

@@ -19,9 +19,6 @@ var ErrNotFoundState = kv.ErrNotFoundState
// ErrNotFoundOriginBlockRoot wraps ErrNotFound for an error specific to the origin block root.
var ErrNotFoundOriginBlockRoot = kv.ErrNotFoundOriginBlockRoot
// ErrNotFoundBackfillBlockRoot wraps ErrNotFound for an error specific to the backfill block root.
var ErrNotFoundBackfillBlockRoot = kv.ErrNotFoundBackfillBlockRoot
// IsNotFound allows callers to treat errors from a flat-file database, where the file record is missing,
// as equivalent to db.ErrNotFound.
func IsNotFound(err error) bool {

View File

@@ -4,9 +4,10 @@ go_library(
name = "go_default_library",
srcs = [
"blob.go",
"ephemeral.go",
"cache.go",
"log.go",
"metrics.go",
"mock.go",
"pruner.go",
],
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem",
@@ -17,10 +18,12 @@ go_library(
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//io/file:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/logging:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
@@ -33,6 +36,7 @@ go_test(
name = "go_default_test",
srcs = [
"blob_test.go",
"cache_test.go",
"pruner_test.go",
],
embed = [":go_default_library"],
@@ -44,7 +48,6 @@ go_test(
"//proto/prysm/v1alpha1:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_fastssz//:go_default_library",
"@com_github_spf13_afero//:go_default_library",
],

View File

@@ -1,18 +1,22 @@
package filesystem
import (
"context"
"fmt"
"math"
"os"
"path"
"strconv"
"strings"
"time"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/io/file"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/runtime/logging"
@@ -25,6 +29,7 @@ var (
errEmptyBlobWritten = errors.New("zero bytes written to disk when saving blob sidecar")
errSidecarEmptySSZData = errors.New("sidecar marshalled to an empty ssz byte slice")
errNoBasePath = errors.New("BlobStorage base path not specified in init")
errInvalidRootString = errors.New("Could not parse hex string as a [32]byte")
)
const (
@@ -103,12 +108,29 @@ func (bs *BlobStorage) WarmCache() {
return
}
go func() {
if err := bs.pruner.prune(0); err != nil {
start := time.Now()
if err := bs.pruner.warmCache(); err != nil {
log.WithError(err).Error("Error encountered while warming up blob pruner cache")
}
log.WithField("elapsed", time.Since(start)).Info("Blob filesystem cache warm-up complete.")
}()
}
// ErrBlobStorageSummarizerUnavailable is a sentinel error returned when there is no pruner/cache available.
// This should be used by code that optionally uses the summarizer to optimize rpc requests. Being able to
// fallback when there is no summarizer allows client code to avoid test complexity where the summarizer doesn't matter.
var ErrBlobStorageSummarizerUnavailable = errors.New("BlobStorage not initialized with a pruner or cache")
// WaitForSummarizer blocks until the BlobStorageSummarizer is ready to use.
// BlobStorageSummarizer is not ready immediately on node startup because it needs to sample the blob filesystem to
// determine which blobs are available.
func (bs *BlobStorage) WaitForSummarizer(ctx context.Context) (BlobStorageSummarizer, error) {
if bs == nil || bs.pruner == nil {
return nil, ErrBlobStorageSummarizerUnavailable
}
return bs.pruner.waitForCache(ctx)
}
// Save saves blobs given a list of sidecars.
func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error {
startTime := time.Now()
@@ -281,6 +303,15 @@ func (bs *BlobStorage) Clear() error {
return nil
}
// WithinRetentionPeriod checks if the requested epoch is within the blob retention period.
func (bs *BlobStorage) WithinRetentionPeriod(requested, current primitives.Epoch) bool {
if requested > math.MaxUint64-bs.retentionEpochs {
// If there is an overflow, then the retention period was set to an extremely large number.
return true
}
return requested+bs.retentionEpochs >= current
}
type blobNamer struct {
root [32]byte
index uint64
@@ -305,3 +336,11 @@ func (p blobNamer) path() string {
func rootString(root [32]byte) string {
return fmt.Sprintf("%#x", root)
}
func stringToRoot(str string) ([32]byte, error) {
slice, err := hexutil.Decode(str)
if err != nil {
return [32]byte{}, errors.Wrapf(errInvalidRootString, "input=%s", str)
}
return bytesutil.ToBytes32(slice), nil
}

View File

@@ -2,13 +2,12 @@ package filesystem
import (
"bytes"
"math"
"os"
"path"
"sync"
"testing"
"time"
"github.com/pkg/errors"
ssz "github.com/prysmaticlabs/fastssz"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
@@ -26,8 +25,7 @@ func TestBlobStorage_SaveBlobData(t *testing.T) {
require.NoError(t, err)
t.Run("no error for duplicate", func(t *testing.T) {
fs, bs, err := NewEphemeralBlobStorageWithFs(t)
require.NoError(t, err)
fs, bs := NewEphemeralBlobStorageWithFs(t)
existingSidecar := testSidecars[0]
blobPath := namerForSidecar(existingSidecar).path()
@@ -129,33 +127,9 @@ func TestBlobStorage_SaveBlobData(t *testing.T) {
}
// pollUntil polls a condition function until it returns true or a timeout is reached.
func pollUntil(t *testing.T, fs afero.Fs, expected int) error {
var remainingFolders []os.FileInfo
var err error
// Define the condition function for polling
conditionFunc := func() bool {
remainingFolders, err = afero.ReadDir(fs, ".")
require.NoError(t, err)
return len(remainingFolders) == expected
}
startTime := time.Now()
for {
if conditionFunc() {
break // Condition met, exit the loop
}
if time.Since(startTime) > 30*time.Second {
return errors.New("timeout")
}
time.Sleep(1 * time.Second) // Adjust the sleep interval as needed
}
require.Equal(t, expected, len(remainingFolders))
return nil
}
func TestBlobIndicesBounds(t *testing.T) {
fs, bs, err := NewEphemeralBlobStorageWithFs(t)
require.NoError(t, err)
fs, bs := NewEphemeralBlobStorageWithFs(t)
root := [32]byte{}
okIdx := uint64(fieldparams.MaxBlobsPerBlock - 1)
@@ -186,8 +160,7 @@ func writeFakeSSZ(t *testing.T, fs afero.Fs, root [32]byte, idx uint64) {
func TestBlobStoragePrune(t *testing.T) {
currentSlot := primitives.Slot(200000)
fs, bs, err := NewEphemeralBlobStorageWithFs(t)
require.NoError(t, err)
fs, bs := NewEphemeralBlobStorageWithFs(t)
t.Run("PruneOne", func(t *testing.T) {
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 300, fieldparams.MaxBlobsPerBlock)
@@ -243,8 +216,7 @@ func TestBlobStoragePrune(t *testing.T) {
func BenchmarkPruning(b *testing.B) {
var t *testing.T
_, bs, err := NewEphemeralBlobStorageWithFs(t)
require.NoError(t, err)
_, bs := NewEphemeralBlobStorageWithFs(t)
blockQty := 10000
currentSlot := primitives.Slot(150000)
@@ -273,3 +245,50 @@ func TestNewBlobStorage(t *testing.T) {
_, err = NewBlobStorage(WithBasePath(path.Join(t.TempDir(), "good")))
require.NoError(t, err)
}
func TestConfig_WithinRetentionPeriod(t *testing.T) {
retention := primitives.Epoch(16)
storage := &BlobStorage{retentionEpochs: retention}
cases := []struct {
name string
requested primitives.Epoch
current primitives.Epoch
within bool
}{
{
name: "before",
requested: 0,
current: retention + 1,
within: false,
},
{
name: "same",
requested: 0,
current: 0,
within: true,
},
{
name: "boundary",
requested: 0,
current: retention,
within: true,
},
{
name: "one less",
requested: retention - 1,
current: retention,
within: true,
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
require.Equal(t, c.within, storage.WithinRetentionPeriod(c.requested, c.current))
})
}
t.Run("overflow", func(t *testing.T) {
storage := &BlobStorage{retentionEpochs: math.MaxUint64}
require.Equal(t, true, storage.WithinRetentionPeriod(1, 1))
})
}

View File

@@ -0,0 +1,118 @@
package filesystem
import (
"sync"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
)
// blobIndexMask is a bitmask representing the set of blob indices that are currently set.
type blobIndexMask [fieldparams.MaxBlobsPerBlock]bool
// BlobStorageSummary represents cached information about the BlobSidecars on disk for each root the cache knows about.
type BlobStorageSummary struct {
slot primitives.Slot
mask blobIndexMask
}
// HasIndex returns true if the BlobSidecar at the given index is available in the filesystem.
func (s BlobStorageSummary) HasIndex(idx uint64) bool {
// Protect from panic, but assume callers are sophisticated enough to not need an error telling them they have an invalid idx.
if idx >= fieldparams.MaxBlobsPerBlock {
return false
}
return s.mask[idx]
}
// AllAvailable returns true if we have all blobs for all indices from 0 to count-1.
func (s BlobStorageSummary) AllAvailable(count int) bool {
if count > fieldparams.MaxBlobsPerBlock {
return false
}
for i := 0; i < count; i++ {
if !s.mask[i] {
return false
}
}
return true
}
// BlobStorageSummarizer can be used to receive a summary of metadata about blobs on disk for a given root.
// The BlobStorageSummary can be used to check which indices (if any) are available for a given block by root.
type BlobStorageSummarizer interface {
Summary(root [32]byte) BlobStorageSummary
}
type blobStorageCache struct {
mu sync.RWMutex
nBlobs float64
cache map[[32]byte]BlobStorageSummary
}
var _ BlobStorageSummarizer = &blobStorageCache{}
func newBlobStorageCache() *blobStorageCache {
return &blobStorageCache{
cache: make(map[[32]byte]BlobStorageSummary, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest*fieldparams.SlotsPerEpoch),
}
}
// Summary returns the BlobStorageSummary for `root`. The BlobStorageSummary can be used to check for the presence of
// BlobSidecars based on Index.
func (s *blobStorageCache) Summary(root [32]byte) BlobStorageSummary {
s.mu.RLock()
defer s.mu.RUnlock()
return s.cache[root]
}
func (s *blobStorageCache) ensure(key [32]byte, slot primitives.Slot, idx uint64) error {
if idx >= fieldparams.MaxBlobsPerBlock {
return errIndexOutOfBounds
}
s.mu.Lock()
defer s.mu.Unlock()
v := s.cache[key]
v.slot = slot
if !v.mask[idx] {
s.updateMetrics(1)
}
v.mask[idx] = true
s.cache[key] = v
return nil
}
func (s *blobStorageCache) slot(key [32]byte) (primitives.Slot, bool) {
s.mu.RLock()
defer s.mu.RUnlock()
v, ok := s.cache[key]
if !ok {
return 0, false
}
return v.slot, ok
}
func (s *blobStorageCache) evict(key [32]byte) {
var deleted float64
s.mu.Lock()
v, ok := s.cache[key]
if ok {
for i := range v.mask {
if v.mask[i] {
deleted += 1
}
}
}
delete(s.cache, key)
s.mu.Unlock()
if deleted > 0 {
s.updateMetrics(-deleted)
}
}
func (s *blobStorageCache) updateMetrics(delta float64) {
s.nBlobs += delta
blobDiskCount.Set(s.nBlobs)
blobDiskSize.Set(s.nBlobs * bytesPerSidecar)
}

View File

@@ -0,0 +1,150 @@
package filesystem
import (
"testing"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/testing/require"
)
func TestSlotByRoot_Summary(t *testing.T) {
var noneSet, allSet, firstSet, lastSet, oneSet blobIndexMask
firstSet[0] = true
lastSet[len(lastSet)-1] = true
oneSet[1] = true
for i := range allSet {
allSet[i] = true
}
cases := []struct {
name string
root [32]byte
expected *blobIndexMask
}{
{
name: "not found",
},
{
name: "none set",
expected: &noneSet,
},
{
name: "index 1 set",
expected: &oneSet,
},
{
name: "all set",
expected: &allSet,
},
{
name: "first set",
expected: &firstSet,
},
{
name: "last set",
expected: &lastSet,
},
}
sc := newBlobStorageCache()
for _, c := range cases {
if c.expected != nil {
key := bytesutil.ToBytes32([]byte(c.name))
sc.cache[key] = BlobStorageSummary{slot: 0, mask: *c.expected}
}
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
key := bytesutil.ToBytes32([]byte(c.name))
sum := sc.Summary(key)
for i := range c.expected {
ui := uint64(i)
if c.expected == nil {
require.Equal(t, false, sum.HasIndex(ui))
} else {
require.Equal(t, c.expected[i], sum.HasIndex(ui))
}
}
})
}
}
func TestAllAvailable(t *testing.T) {
idxUpTo := func(u int) []int {
r := make([]int, u)
for i := range r {
r[i] = i
}
return r
}
require.DeepEqual(t, []int{}, idxUpTo(0))
require.DeepEqual(t, []int{0}, idxUpTo(1))
require.DeepEqual(t, []int{0, 1, 2, 3, 4, 5}, idxUpTo(6))
cases := []struct {
name string
idxSet []int
count int
aa bool
}{
{
// If there are no blobs committed, then all the committed blobs are available.
name: "none in idx, 0 arg",
count: 0,
aa: true,
},
{
name: "none in idx, 1 arg",
count: 1,
aa: false,
},
{
name: "first in idx, 1 arg",
idxSet: []int{0},
count: 1,
aa: true,
},
{
name: "second in idx, 1 arg",
idxSet: []int{1},
count: 1,
aa: false,
},
{
name: "first missing, 2 arg",
idxSet: []int{1},
count: 2,
aa: false,
},
{
name: "all missing, 1 arg",
count: 6,
aa: false,
},
{
name: "out of bound is safe",
count: fieldparams.MaxBlobsPerBlock + 1,
aa: false,
},
{
name: "max present",
count: fieldparams.MaxBlobsPerBlock,
idxSet: idxUpTo(fieldparams.MaxBlobsPerBlock),
aa: true,
},
{
name: "one present",
count: 1,
idxSet: idxUpTo(1),
aa: true,
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
var mask blobIndexMask
for _, idx := range c.idxSet {
mask[idx] = true
}
sum := BlobStorageSummary{mask: mask}
require.Equal(t, c.aa, sum.AllAvailable(c.count))
})
}
}

View File

@@ -12,7 +12,7 @@ import (
// improving test performance and simplifying cleanup.
func NewEphemeralBlobStorage(t testing.TB) *BlobStorage {
fs := afero.NewMemMapFs()
pruner, err := newBlobPruner(fs, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
pruner, err := newBlobPruner(fs, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest, withWarmedCache())
if err != nil {
t.Fatal("test setup issue", err)
}
@@ -21,13 +21,13 @@ func NewEphemeralBlobStorage(t testing.TB) *BlobStorage {
// NewEphemeralBlobStorageWithFs can be used by tests that want access to the virtual filesystem
// in order to interact with it outside the parameters of the BlobStorage api.
func NewEphemeralBlobStorageWithFs(t testing.TB) (afero.Fs, *BlobStorage, error) {
func NewEphemeralBlobStorageWithFs(t testing.TB) (afero.Fs, *BlobStorage) {
fs := afero.NewMemMapFs()
pruner, err := newBlobPruner(fs, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
pruner, err := newBlobPruner(fs, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest, withWarmedCache())
if err != nil {
t.Fatal("test setup issue", err)
}
return fs, &BlobStorage{fs: fs, pruner: pruner}, nil
return fs, &BlobStorage{fs: fs, pruner: pruner}
}
type BlobMocker struct {
@@ -61,3 +61,15 @@ func NewEphemeralBlobStorageWithMocker(_ testing.TB) (*BlobMocker, *BlobStorage)
bs := &BlobStorage{fs: fs}
return &BlobMocker{fs: fs, bs: bs}, bs
}
func NewMockBlobStorageSummarizer(t *testing.T, set map[[32]byte][]int) BlobStorageSummarizer {
c := newBlobStorageCache()
for k, v := range set {
for i := range v {
if err := c.ensure(k, 0, uint64(v[i])); err != nil {
t.Fatal(err)
}
}
}
return c
}

View File

@@ -1,6 +1,7 @@
package filesystem
import (
"context"
"encoding/binary"
"io"
"path"
@@ -12,7 +13,6 @@ import (
"time"
"github.com/pkg/errors"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/time/slots"
@@ -32,22 +32,39 @@ type blobPruner struct {
sync.Mutex
prunedBefore atomic.Uint64
windowSize primitives.Slot
slotMap *slotForRoot
cache *blobStorageCache
cacheReady chan struct{}
warmed bool
fs afero.Fs
}
func newBlobPruner(fs afero.Fs, retain primitives.Epoch) (*blobPruner, error) {
type prunerOpt func(*blobPruner) error
func withWarmedCache() prunerOpt {
return func(p *blobPruner) error {
return p.warmCache()
}
}
func newBlobPruner(fs afero.Fs, retain primitives.Epoch, opts ...prunerOpt) (*blobPruner, error) {
r, err := slots.EpochStart(retain + retentionBuffer)
if err != nil {
return nil, errors.Wrap(err, "could not set retentionSlots")
}
return &blobPruner{fs: fs, windowSize: r, slotMap: newSlotForRoot()}, nil
cw := make(chan struct{})
p := &blobPruner{fs: fs, windowSize: r, cache: newBlobStorageCache(), cacheReady: cw}
for _, o := range opts {
if err := o(p); err != nil {
return nil, err
}
}
return p, nil
}
// notify updates the pruner's view of root->blob mappings. This allows the pruner to build a cache
// of root->slot mappings and decide when to evict old blobs based on the age of present blobs.
func (p *blobPruner) notify(root [32]byte, latest primitives.Slot, idx uint64) error {
if err := p.slotMap.ensure(rootString(root), latest, idx); err != nil {
if err := p.cache.ensure(root, latest, idx); err != nil {
return err
}
pruned := uint64(windowMin(latest, p.windowSize))
@@ -55,6 +72,8 @@ func (p *blobPruner) notify(root [32]byte, latest primitives.Slot, idx uint64) e
return nil
}
go func() {
p.Lock()
defer p.Unlock()
if err := p.prune(primitives.Slot(pruned)); err != nil {
log.WithError(err).Errorf("Failed to prune blobs from slot %d", latest)
}
@@ -62,7 +81,7 @@ func (p *blobPruner) notify(root [32]byte, latest primitives.Slot, idx uint64) e
return nil
}
func windowMin(latest primitives.Slot, offset primitives.Slot) primitives.Slot {
func windowMin(latest, offset primitives.Slot) primitives.Slot {
// Safely compute the first slot in the epoch for the latest slot
latest = latest - latest%params.BeaconConfig().SlotsPerEpoch
if latest < offset {
@@ -71,12 +90,34 @@ func windowMin(latest primitives.Slot, offset primitives.Slot) primitives.Slot {
return latest - offset
}
func (p *blobPruner) warmCache() error {
p.Lock()
defer func() {
if !p.warmed {
p.warmed = true
close(p.cacheReady)
}
p.Unlock()
}()
if err := p.prune(0); err != nil {
return err
}
return nil
}
func (p *blobPruner) waitForCache(ctx context.Context) (*blobStorageCache, error) {
select {
case <-p.cacheReady:
return p.cache, nil
case <-ctx.Done():
return nil, ctx.Err()
}
}
// Prune prunes blobs in the base directory based on the retention epoch.
// It deletes blobs older than currentEpoch - (retentionEpochs+bufferEpochs).
// This is so that we keep a slight buffer and blobs are deleted after n+2 epochs.
func (p *blobPruner) prune(pruneBefore primitives.Slot) error {
p.Lock()
defer p.Unlock()
start := time.Now()
totalPruned, totalErr := 0, 0
// Customize logging/metrics behavior for the initial cache warmup when slot=0.
@@ -121,8 +162,11 @@ func shouldRetain(slot, pruneBefore primitives.Slot) bool {
}
func (p *blobPruner) tryPruneDir(dir string, pruneBefore primitives.Slot) (int, error) {
root := rootFromDir(dir)
slot, slotCached := p.slotMap.slot(root)
root, err := rootFromDir(dir)
if err != nil {
return 0, errors.Wrapf(err, "invalid directory, could not parse subdir as root %s", dir)
}
slot, slotCached := p.cache.slot(root)
// Return early if the slot is cached and doesn't need pruning.
if slotCached && shouldRetain(slot, pruneBefore) {
return 0, nil
@@ -151,7 +195,7 @@ func (p *blobPruner) tryPruneDir(dir string, pruneBefore primitives.Slot) (int,
if err != nil {
return 0, errors.Wrapf(err, "index could not be determined for blob file %s", scFiles[i])
}
if err := p.slotMap.ensure(root, slot, idx); err != nil {
if err := p.cache.ensure(root, slot, idx); err != nil {
return 0, errors.Wrapf(err, "could not update prune cache for blob file %s", scFiles[i])
}
}
@@ -179,7 +223,7 @@ func (p *blobPruner) tryPruneDir(dir string, pruneBefore primitives.Slot) (int,
return removed, errors.Wrapf(err, "unable to remove blob directory %s", dir)
}
p.slotMap.evict(rootFromDir(dir))
p.cache.evict(root)
return len(scFiles), nil
}
@@ -196,8 +240,13 @@ func idxFromPath(fname string) (uint64, error) {
return strconv.ParseUint(parts[0], 10, 64)
}
func rootFromDir(dir string) string {
return filepath.Base(dir) // end of the path should be the blob directory, named by hex encoding of root
func rootFromDir(dir string) ([32]byte, error) {
subdir := filepath.Base(dir) // end of the path should be the blob directory, named by hex encoding of root
root, err := stringToRoot(subdir)
if err != nil {
return root, errors.Wrapf(err, "invalid directory, could not parse subdir as root %s", dir)
}
return root, nil
}
// Read slot from marshaled BlobSidecar data in the given file. See slotFromBlob for details.
@@ -269,71 +318,3 @@ func filterSsz(s string) bool {
func filterPart(s string) bool {
return filepath.Ext(s) == dotPartExt
}
func newSlotForRoot() *slotForRoot {
return &slotForRoot{
cache: make(map[string]*slotCacheEntry, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest*fieldparams.SlotsPerEpoch),
}
}
type slotCacheEntry struct {
slot primitives.Slot
mask [fieldparams.MaxBlobsPerBlock]bool
}
type slotForRoot struct {
sync.RWMutex
nBlobs float64
cache map[string]*slotCacheEntry
}
func (s *slotForRoot) updateMetrics(delta float64) {
s.nBlobs += delta
blobDiskCount.Set(s.nBlobs)
blobDiskSize.Set(s.nBlobs * bytesPerSidecar)
}
func (s *slotForRoot) ensure(key string, slot primitives.Slot, idx uint64) error {
if idx >= fieldparams.MaxBlobsPerBlock {
return errIndexOutOfBounds
}
s.Lock()
defer s.Unlock()
v, ok := s.cache[key]
if !ok {
v = &slotCacheEntry{}
}
v.slot = slot
if !v.mask[idx] {
s.updateMetrics(1)
}
v.mask[idx] = true
s.cache[key] = v
return nil
}
func (s *slotForRoot) slot(key string) (primitives.Slot, bool) {
s.RLock()
defer s.RUnlock()
v, ok := s.cache[key]
if !ok {
return 0, false
}
return v.slot, ok
}
func (s *slotForRoot) evict(key string) {
s.Lock()
defer s.Unlock()
v, ok := s.cache[key]
var deleted float64
if ok {
for i := range v.mask {
if v.mask[i] {
deleted += 1
}
}
s.updateMetrics(-deleted)
}
delete(s.cache, key)
}

View File

@@ -2,16 +2,19 @@ package filesystem
import (
"bytes"
"context"
"fmt"
"math"
"os"
"path"
"sort"
"testing"
"time"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/testing/util"
"github.com/spf13/afero"
@@ -25,15 +28,43 @@ func TestTryPruneDir_CachedNotExpired(t *testing.T) {
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, fieldparams.MaxBlobsPerBlock)
sc, err := verification.BlobSidecarNoop(sidecars[0])
require.NoError(t, err)
root := fmt.Sprintf("%#x", sc.BlockRoot())
rootStr := rootString(sc.BlockRoot())
// This slot is right on the edge of what would need to be pruned, so by adding it to the cache and
// skipping any other test setup, we can be certain the hot cache path never touches the filesystem.
require.NoError(t, pr.slotMap.ensure(root, sc.Slot(), 0))
pruned, err := pr.tryPruneDir(root, pr.windowSize)
require.NoError(t, pr.cache.ensure(sc.BlockRoot(), sc.Slot(), 0))
pruned, err := pr.tryPruneDir(rootStr, pr.windowSize)
require.NoError(t, err)
require.Equal(t, 0, pruned)
}
func TestCacheWarmFail(t *testing.T) {
fs := afero.NewMemMapFs()
n := blobNamer{root: bytesutil.ToBytes32([]byte("derp")), index: 0}
bp := n.path()
mkdir := path.Dir(bp)
require.NoError(t, fs.MkdirAll(mkdir, directoryPermissions))
// Create an empty blob index in the fs by touching the file at a seemingly valid path.
fi, err := fs.Create(bp)
require.NoError(t, err)
require.NoError(t, fi.Close())
// Cache warm should fail due to the unexpected EOF.
pr, err := newBlobPruner(fs, 0)
require.NoError(t, err)
require.ErrorIs(t, pr.warmCache(), errPruningFailures)
// The cache warm has finished, so calling waitForCache with a super short deadline
// should not block or hit the context deadline.
ctx := context.Background()
ctx, cancel := context.WithDeadline(ctx, time.Now().Add(1*time.Millisecond))
defer cancel()
c, err := pr.waitForCache(ctx)
// We will get an error and a nil value for the cache if we hit the deadline.
require.NoError(t, err)
require.NotNil(t, c)
}
func TestTryPruneDir_CachedExpired(t *testing.T) {
t.Run("empty directory", func(t *testing.T) {
fs := afero.NewMemMapFs()
@@ -43,16 +74,15 @@ func TestTryPruneDir_CachedExpired(t *testing.T) {
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, 1)
sc, err := verification.BlobSidecarNoop(sidecars[0])
require.NoError(t, err)
root := fmt.Sprintf("%#x", sc.BlockRoot())
require.NoError(t, fs.Mkdir(root, directoryPermissions)) // make empty directory
require.NoError(t, pr.slotMap.ensure(root, sc.Slot(), 0))
pruned, err := pr.tryPruneDir(root, slot+1)
rootStr := rootString(sc.BlockRoot())
require.NoError(t, fs.Mkdir(rootStr, directoryPermissions)) // make empty directory
require.NoError(t, pr.cache.ensure(sc.BlockRoot(), sc.Slot(), 0))
pruned, err := pr.tryPruneDir(rootStr, slot+1)
require.NoError(t, err)
require.Equal(t, 0, pruned)
})
t.Run("blobs to delete", func(t *testing.T) {
fs, bs, err := NewEphemeralBlobStorageWithFs(t)
require.NoError(t, err)
fs, bs := NewEphemeralBlobStorageWithFs(t)
var slot primitives.Slot = 0
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, 2)
scs, err := verification.BlobSidecarSliceNoop(sidecars)
@@ -62,20 +92,21 @@ func TestTryPruneDir_CachedExpired(t *testing.T) {
require.NoError(t, bs.Save(scs[1]))
// check that the root->slot is cached
root := fmt.Sprintf("%#x", scs[0].BlockRoot())
cs, cok := bs.pruner.slotMap.slot(root)
root := scs[0].BlockRoot()
rootStr := rootString(root)
cs, cok := bs.pruner.cache.slot(scs[0].BlockRoot())
require.Equal(t, true, cok)
require.Equal(t, slot, cs)
// ensure that we see the saved files in the filesystem
files, err := listDir(fs, root)
files, err := listDir(fs, rootStr)
require.NoError(t, err)
require.Equal(t, 2, len(files))
pruned, err := bs.pruner.tryPruneDir(root, slot+1)
pruned, err := bs.pruner.tryPruneDir(rootStr, slot+1)
require.NoError(t, err)
require.Equal(t, 2, pruned)
files, err = listDir(fs, root)
files, err = listDir(fs, rootStr)
require.ErrorIs(t, err, os.ErrNotExist)
require.Equal(t, 0, len(files))
})
@@ -83,8 +114,7 @@ func TestTryPruneDir_CachedExpired(t *testing.T) {
func TestTryPruneDir_SlotFromFile(t *testing.T) {
t.Run("expired blobs deleted", func(t *testing.T) {
fs, bs, err := NewEphemeralBlobStorageWithFs(t)
require.NoError(t, err)
fs, bs := NewEphemeralBlobStorageWithFs(t)
var slot primitives.Slot = 0
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, 2)
scs, err := verification.BlobSidecarSliceNoop(sidecars)
@@ -94,32 +124,32 @@ func TestTryPruneDir_SlotFromFile(t *testing.T) {
require.NoError(t, bs.Save(scs[1]))
// check that the root->slot is cached
root := fmt.Sprintf("%#x", scs[0].BlockRoot())
cs, ok := bs.pruner.slotMap.slot(root)
root := scs[0].BlockRoot()
rootStr := rootString(root)
cs, ok := bs.pruner.cache.slot(root)
require.Equal(t, true, ok)
require.Equal(t, slot, cs)
// evict it from the cache so that we trigger the file read path
bs.pruner.slotMap.evict(root)
_, ok = bs.pruner.slotMap.slot(root)
bs.pruner.cache.evict(root)
_, ok = bs.pruner.cache.slot(root)
require.Equal(t, false, ok)
// ensure that we see the saved files in the filesystem
files, err := listDir(fs, root)
files, err := listDir(fs, rootStr)
require.NoError(t, err)
require.Equal(t, 2, len(files))
pruned, err := bs.pruner.tryPruneDir(root, slot+1)
pruned, err := bs.pruner.tryPruneDir(rootStr, slot+1)
require.NoError(t, err)
require.Equal(t, 2, pruned)
files, err = listDir(fs, root)
files, err = listDir(fs, rootStr)
require.ErrorIs(t, err, os.ErrNotExist)
require.Equal(t, 0, len(files))
})
t.Run("not expired, intact", func(t *testing.T) {
fs, bs, err := NewEphemeralBlobStorageWithFs(t)
require.NoError(t, err)
fs, bs := NewEphemeralBlobStorageWithFs(t)
// Set slot equal to the window size, so it should be retained.
var slot primitives.Slot = bs.pruner.windowSize
slot := bs.pruner.windowSize
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, 2)
scs, err := verification.BlobSidecarSliceNoop(sidecars)
require.NoError(t, err)
@@ -128,24 +158,25 @@ func TestTryPruneDir_SlotFromFile(t *testing.T) {
require.NoError(t, bs.Save(scs[1]))
// Evict slot mapping from the cache so that we trigger the file read path.
root := fmt.Sprintf("%#x", scs[0].BlockRoot())
bs.pruner.slotMap.evict(root)
_, ok := bs.pruner.slotMap.slot(root)
root := scs[0].BlockRoot()
rootStr := rootString(root)
bs.pruner.cache.evict(root)
_, ok := bs.pruner.cache.slot(root)
require.Equal(t, false, ok)
// Ensure that we see the saved files in the filesystem.
files, err := listDir(fs, root)
files, err := listDir(fs, rootStr)
require.NoError(t, err)
require.Equal(t, 2, len(files))
// This should use the slotFromFile code (simulating restart).
// Setting pruneBefore == slot, so that the slot will be outside the window (at the boundary).
pruned, err := bs.pruner.tryPruneDir(root, slot)
pruned, err := bs.pruner.tryPruneDir(rootStr, slot)
require.NoError(t, err)
require.Equal(t, 0, pruned)
// Ensure files are still present.
files, err = listDir(fs, root)
files, err = listDir(fs, rootStr)
require.NoError(t, err)
require.Equal(t, 2, len(files))
})
@@ -184,8 +215,7 @@ func TestSlotFromFile(t *testing.T) {
}
for _, c := range cases {
t.Run(fmt.Sprintf("slot %d", c.slot), func(t *testing.T) {
fs, bs, err := NewEphemeralBlobStorageWithFs(t)
require.NoError(t, err)
fs, bs := NewEphemeralBlobStorageWithFs(t)
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, c.slot, 1)
sc, err := verification.BlobSidecarNoop(sidecars[0])
require.NoError(t, err)
@@ -243,10 +273,8 @@ func TestListDir(t *testing.T) {
}
blobWithSszAndTmp := dirFiles{name: "0x1234567890", isDir: true,
children: []dirFiles{{name: "5.ssz"}, {name: "0.part"}}}
fsLayout.children = append(fsLayout.children, notABlob)
fsLayout.children = append(fsLayout.children, childlessBlob)
fsLayout.children = append(fsLayout.children, blobWithSsz)
fsLayout.children = append(fsLayout.children, blobWithSszAndTmp)
fsLayout.children = append(fsLayout.children,
notABlob, childlessBlob, blobWithSsz, blobWithSszAndTmp)
topChildren := make([]string, len(fsLayout.children))
for i := range fsLayout.children {
@@ -282,10 +310,7 @@ func TestListDir(t *testing.T) {
dirPath: ".",
expected: []string{notABlob.name},
filter: func(s string) bool {
if s == notABlob.name {
return true
}
return false
return s == notABlob.name
},
},
{
@@ -325,3 +350,45 @@ func TestListDir(t *testing.T) {
})
}
}
func TestRootFromDir(t *testing.T) {
cases := []struct {
name string
dir string
err error
root [32]byte
}{
{
name: "happy path",
dir: "0xffff875e1d985c5ccb214894983f2428edb271f0f87b68ba7010e4a99df3b5cb",
root: [32]byte{255, 255, 135, 94, 29, 152, 92, 92, 203, 33, 72, 148, 152, 63, 36, 40,
237, 178, 113, 240, 248, 123, 104, 186, 112, 16, 228, 169, 157, 243, 181, 203},
},
{
name: "too short",
dir: "0xffff875e1d985c5ccb214894983f2428edb271f0f87b68ba7010e4a99df3b5c",
err: errInvalidRootString,
},
{
name: "too log",
dir: "0xffff875e1d985c5ccb214894983f2428edb271f0f87b68ba7010e4a99df3b5cbb",
err: errInvalidRootString,
},
{
name: "missing prefix",
dir: "ffff875e1d985c5ccb214894983f2428edb271f0f87b68ba7010e4a99df3b5cb",
err: errInvalidRootString,
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
root, err := stringToRoot(c.dir)
if c.err != nil {
require.ErrorIs(t, err, c.err)
return
}
require.NoError(t, err)
require.Equal(t, c.root, root)
})
}
}

View File

@@ -118,7 +118,7 @@ type HeadAccessDatabase interface {
// SlasherDatabase interface for persisting data related to detecting slashable offenses on Ethereum.
type SlasherDatabase interface {
io.Closer
SaveLastEpochsWrittenForValidators(
SaveLastEpochWrittenForValidators(
ctx context.Context, epochByValidator map[primitives.ValidatorIndex]primitives.Epoch,
) error
SaveAttestationRecordsForValidators(

View File

@@ -20,6 +20,7 @@ go_library(
"migration.go",
"migration_archived_index.go",
"migration_block_slot_index.go",
"migration_finalized_parent.go",
"migration_state_validators.go",
"schema.go",
"state.go",

View File

@@ -201,21 +201,20 @@ func (s *Store) BackfillFinalizedIndex(ctx context.Context, blocks []blocks.ROBl
return err
}
encs[i-1] = penc
// The final element is the parent of finalizedChildRoot. This is checked inside the db transaction using
// the parent_root value stored in the index data for finalizedChildRoot.
if i == len(blocks)-1 {
fbrs[i].ChildRoot = finalizedChildRoot[:]
// Final element is complete, so it is pre-encoded like the others.
enc, err := encode(ctx, fbrs[i])
if err != nil {
tracing.AnnotateError(span, err)
return err
}
encs[i] = enc
}
}
// The final element is the parent of finalizedChildRoot. This is checked inside the db transaction using
// the parent_root value stored in the index data for finalizedChildRoot.
lastIdx := len(blocks) - 1
fbrs[lastIdx].ChildRoot = finalizedChildRoot[:]
// Final element is complete, so it is pre-encoded like the others.
enc, err := encode(ctx, fbrs[lastIdx])
if err != nil {
tracing.AnnotateError(span, err)
return err
}
encs[lastIdx] = enc
return s.db.Update(func(tx *bolt.Tx) error {
bkt := tx.Bucket(finalizedBlockRootsIndexBucket)
child := bkt.Get(finalizedChildRoot[:])

View File

@@ -237,6 +237,50 @@ func makeBlocksAltair(t *testing.T, startIdx, num uint64, previousRoot [32]byte)
return ifaceBlocks
}
func TestStore_BackfillFinalizedIndexSingle(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
// we're making 4 blocks so we can test an element without a valid child at the end
blks, err := consensusblocks.NewROBlockSlice(makeBlocks(t, 0, 4, [32]byte{}))
require.NoError(t, err)
// existing is the child that we'll set up in the index by hand to seed the index.
existing := blks[3]
// toUpdate is a single item update, emulating a backfill batch size of 1. it is the parent of `existing`.
toUpdate := blks[2]
// set up existing finalized block
ebpr := existing.Block().ParentRoot()
ebr := existing.Root()
ebf := &ethpb.FinalizedBlockRootContainer{
ParentRoot: ebpr[:],
ChildRoot: make([]byte, 32), // we're bypassing validation to seed the db, so we don't need a valid child.
}
enc, err := encode(ctx, ebf)
require.NoError(t, err)
// writing this to the index outside of the validating function to seed the test.
err = db.db.Update(func(tx *bolt.Tx) error {
bkt := tx.Bucket(finalizedBlockRootsIndexBucket)
return bkt.Put(ebr[:], enc)
})
require.NoError(t, err)
require.NoError(t, db.BackfillFinalizedIndex(ctx, []consensusblocks.ROBlock{toUpdate}, ebr))
// make sure that we still correctly validate descendents in the single item case.
noChild := blks[0] // will fail to update because we don't have blks[1] in the db.
// test wrong child param
require.ErrorIs(t, db.BackfillFinalizedIndex(ctx, []consensusblocks.ROBlock{noChild}, ebr), errNotConnectedToFinalized)
// test parent of child that isn't finalized
require.ErrorIs(t, db.BackfillFinalizedIndex(ctx, []consensusblocks.ROBlock{noChild}, blks[1].Root()), errFinalizedChildNotFound)
// now make it work by writing the missing block
require.NoError(t, db.BackfillFinalizedIndex(ctx, []consensusblocks.ROBlock{blks[1]}, blks[2].Root()))
// since blks[1] is now in the index, we should be able to update blks[0]
require.NoError(t, db.BackfillFinalizedIndex(ctx, []consensusblocks.ROBlock{blks[0]}, blks[1].Root()))
}
func TestStore_BackfillFinalizedIndex(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
@@ -252,23 +296,23 @@ func TestStore_BackfillFinalizedIndex(t *testing.T) {
ParentRoot: ebpr[:],
ChildRoot: chldr[:],
}
disjoint := []consensusblocks.ROBlock{
blks[0],
blks[2],
}
enc, err := encode(ctx, ebf)
require.NoError(t, err)
err = db.db.Update(func(tx *bolt.Tx) error {
bkt := tx.Bucket(finalizedBlockRootsIndexBucket)
return bkt.Put(ebr[:], enc)
})
require.NoError(t, err)
// reslice to remove the existing blocks
blks = blks[0:64]
// check the other error conditions with a descendent root that really doesn't exist
require.NoError(t, err)
disjoint := []consensusblocks.ROBlock{
blks[0],
blks[2],
}
require.ErrorIs(t, db.BackfillFinalizedIndex(ctx, disjoint, [32]byte{}), errIncorrectBlockParent)
require.NoError(t, err)
require.ErrorIs(t, errFinalizedChildNotFound, db.BackfillFinalizedIndex(ctx, blks, [32]byte{}))
// use the real root so that it succeeds

View File

@@ -137,7 +137,7 @@ func NewKVStore(ctx context.Context, dirPath string, opts ...KVStoreOption) (*St
}
}
datafile := StoreDatafilePath(dirPath)
log.Infof("Opening Bolt DB at %s", datafile)
log.WithField("path", datafile).Info("Opening Bolt DB")
boltDB, err := bolt.Open(
datafile,
params.BeaconIoConfig().ReadWritePermissions,

Some files were not shown because too many files have changed in this diff Show More