mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 13:58:09 -05:00
Compare commits
154 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2c56c650e6 | ||
|
|
6c0bba7197 | ||
|
|
35a2a32106 | ||
|
|
2a72703d3e | ||
|
|
3b5a6b5e2f | ||
|
|
36958b552d | ||
|
|
ae1a6be8a3 | ||
|
|
4f146f9a30 | ||
|
|
f07036ab3c | ||
|
|
da9d4cf5b9 | ||
|
|
56208aa84d | ||
|
|
b866a2c744 | ||
|
|
a77234e637 | ||
|
|
a62cca15dd | ||
|
|
e0e7354708 | ||
|
|
0f86a16915 | ||
|
|
972c22b02f | ||
|
|
93c27340e4 | ||
|
|
c3edb32558 | ||
|
|
3baaa732df | ||
|
|
8ceb7e76ea | ||
|
|
4d5dddd302 | ||
|
|
55efccb07f | ||
|
|
961d8e1481 | ||
|
|
d396a9931e | ||
|
|
e3f8f121f4 | ||
|
|
80f29e9eda | ||
|
|
8995d8133a | ||
|
|
31044206b8 | ||
|
|
ac04246a2a | ||
|
|
0923145bd7 | ||
|
|
3a1702e56f | ||
|
|
501ec74a48 | ||
|
|
c248fe0bb3 | ||
|
|
215fbcb2e4 | ||
|
|
e39f44b529 | ||
|
|
a216cb4105 | ||
|
|
9eff6ae476 | ||
|
|
3eec5a5cb6 | ||
|
|
66878deb2c | ||
|
|
0b6e1711e4 | ||
|
|
15025837bb | ||
|
|
0229a2055e | ||
|
|
eb9af15c7a | ||
|
|
0584746815 | ||
|
|
01705d1f3d | ||
|
|
14f93b4e9d | ||
|
|
ad11036c36 | ||
|
|
632a06076b | ||
|
|
242c2b0268 | ||
|
|
19662da905 | ||
|
|
7faee5af35 | ||
|
|
805ee1bf31 | ||
|
|
bea46fdfa1 | ||
|
|
f6b1fb1c88 | ||
|
|
6fb349ea76 | ||
|
|
e5a425f5c7 | ||
|
|
f157d37e4c | ||
|
|
5f08559bef | ||
|
|
a082d2aecd | ||
|
|
bcfaff8504 | ||
|
|
d8e09c346f | ||
|
|
876519731b | ||
|
|
de05b83aca | ||
|
|
56c73e7193 | ||
|
|
859ac008a8 | ||
|
|
f882bd27c8 | ||
|
|
361e5759c1 | ||
|
|
34ef0da896 | ||
|
|
726e8b962f | ||
|
|
453ea01deb | ||
|
|
6537f8011e | ||
|
|
5f17317c1c | ||
|
|
3432ffa4a3 | ||
|
|
9dac67635b | ||
|
|
9be69fbd07 | ||
|
|
e21261e893 | ||
|
|
da53a8fc48 | ||
|
|
a14634e656 | ||
|
|
43761a8066 | ||
|
|
01dbc337c0 | ||
|
|
92f9b55fcb | ||
|
|
f65f12f58b | ||
|
|
f2b61a3dcf | ||
|
|
77a6d29a2e | ||
|
|
31d16da3a0 | ||
|
|
19221b77bd | ||
|
|
83df293647 | ||
|
|
c20c09ce36 | ||
|
|
2191faaa3f | ||
|
|
2de1e6f3e4 | ||
|
|
db44df3964 | ||
|
|
f92eb44c89 | ||
|
|
a26980b64d | ||
|
|
f58cf7e626 | ||
|
|
68da7dabe2 | ||
|
|
d1e43a2c02 | ||
|
|
3652bec2f8 | ||
|
|
81b7a1725f | ||
|
|
0c917079c4 | ||
|
|
a732fe7021 | ||
|
|
d75a7aae6a | ||
|
|
e788a46e82 | ||
|
|
199543125a | ||
|
|
ca63efa770 | ||
|
|
345e6edd9c | ||
|
|
6403064126 | ||
|
|
0517d76631 | ||
|
|
000d480f77 | ||
|
|
b40a8ed37e | ||
|
|
d21c2bd63e | ||
|
|
7a256e93f7 | ||
|
|
07fe76c2da | ||
|
|
54affa897f | ||
|
|
ac4c5fae3c | ||
|
|
2845d87077 | ||
|
|
dc2c90b8ed | ||
|
|
b469157e1f | ||
|
|
2697794e58 | ||
|
|
48cf24edb4 | ||
|
|
78f90db90b | ||
|
|
d0a3b9bc1d | ||
|
|
bfdb6dab86 | ||
|
|
7dd2fd52af | ||
|
|
b6bad9331b | ||
|
|
6e2122085d | ||
|
|
7a847292aa | ||
|
|
81f4db0afa | ||
|
|
a7dc2e6c8b | ||
|
|
0a010b5088 | ||
|
|
1e335e2cf2 | ||
|
|
42f4c0f14e | ||
|
|
d3c12abe25 | ||
|
|
b0ba05b4f4 | ||
|
|
e206506489 | ||
|
|
013cb28663 | ||
|
|
496914cb39 | ||
|
|
c032e78888 | ||
|
|
5e4deff6fd | ||
|
|
6daa91c465 | ||
|
|
32ce6423eb | ||
|
|
b0ea450df5 | ||
|
|
8bd10df423 | ||
|
|
dcbb543be2 | ||
|
|
be0580e1a9 | ||
|
|
1355178115 | ||
|
|
b78c3485b9 | ||
|
|
f503efc6ed | ||
|
|
1bfbd3980e | ||
|
|
3e722ea1bc | ||
|
|
d844026433 | ||
|
|
9ffc19d5ef | ||
|
|
3e23f6e879 | ||
|
|
c688c84393 |
1
.bazelrc
1
.bazelrc
@@ -22,6 +22,7 @@ coverage --define=coverage_enabled=1
|
||||
build --workspace_status_command=./hack/workspace_status.sh
|
||||
|
||||
build --define blst_disabled=false
|
||||
build --compilation_mode=opt
|
||||
run --define blst_disabled=false
|
||||
|
||||
build:blst_disabled --define blst_disabled=true
|
||||
|
||||
137
CHANGELOG.md
137
CHANGELOG.md
@@ -4,6 +4,141 @@ All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on Keep a Changelog, and this project adheres to Semantic Versioning.
|
||||
|
||||
## [v5.3.0](https://github.com/prysmaticlabs/prysm/compare/v5.2.0...v5.3.0) - 2025-02-12
|
||||
|
||||
This release includes support for Pectra activation in the [Holesky](https://github.com/eth-clients/holesky) and [Sepolia](https://github.com/eth-clients/sepolia) testnets! The release contains many fixes for Electra that have been found in rigorous testing through devnets in the last few months.
|
||||
|
||||
For mainnet, we have a few nice features for you to try:
|
||||
|
||||
- [PR #14023](https://github.com/prysmaticlabs/prysm/pull/14023) introduces a new file layout structure for storing blobs. Rather than storing all blob root directories in one parent directory, blob root directories are organized in subdirectories by epoch. This should vastly decrease the blob cache warmup time when Prysm is starting. Try this feature with `--blob-storage-layout=by-epoch`.
|
||||
|
||||
Updating to this release is **required** for Holesky and Sepolia operators and it is **recommended** for mainnet users as there are a few bug fixes that apply to deneb logic.
|
||||
|
||||
### Added
|
||||
|
||||
- Added an error field to log `Finished building block`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14696)
|
||||
- Implemented a new `EmptyExecutionPayloadHeader` function. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14713)
|
||||
- Added proper gas limit check for header from the builder. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14707)
|
||||
- `Finished building block`: Display error only if not nil. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14722)
|
||||
- Added light client feature flag check to RPC handlers. [PR](https://github.com/prysmaticlabs/prysm/pull/14736). [[PR]](https://github.com/prysmaticlabs/prysm/pull/14782)
|
||||
- Added support to update target and max blob count to different values per hard fork config. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14678)
|
||||
- Log before blob filesystem cache warm-up. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14735)
|
||||
- New design for the attestation pool. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14324)
|
||||
- Add field param placeholder for Electra blob target and max to pass spec tests. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14733)
|
||||
- Light client: Add better error handling. [PR](https://github.com/prysmaticlabs/prysm/pull/14749). [[PR]](https://github.com/prysmaticlabs/prysm/pull/14782)
|
||||
- Add EIP-7691: Blob throughput increase. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14750)
|
||||
- Trace IDONTWANT Messages in Pubsub. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14778)
|
||||
- Add Fulu fork boilerplate. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14771)
|
||||
- DB optimization for saving light client bootstraps (save unique sync committees only). [[PR]](https://github.com/prysmaticlabs/prysm/pull/14782)
|
||||
- Separate type for unaggregated network attestations. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14659)
|
||||
- Remote signer electra fork support. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14477)
|
||||
- Add Electra test case to rewards API. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14816)
|
||||
- Update `proto_test.go` to Electra. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14817)
|
||||
- Update slasher service to Electra. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14812)
|
||||
- Builder API endpoint to support Electra. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14344)
|
||||
- Added protoc toolchains with a version of v25.3. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14818)
|
||||
- Add test cases for the eth_lightclient_bootstrap API SSZ support. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14824)
|
||||
- Handle `AttesterSlashingElectra` everywhere in the codebase. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14823)
|
||||
- Add Beacon DB pruning service to prune historical data older than MIN_EPOCHS_FOR_BLOCK_REQUESTS (roughly equivalent to the weak subjectivity period). [[PR]](https://github.com/prysmaticlabs/prysm/pull/14687)
|
||||
- Nil consolidation request check for core processing. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14851)
|
||||
- Updated blob sidecar api endpoint for Electra. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14852)
|
||||
- Slashing pool service to convert slashings from Phase0 to Electra at the fork. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14844)
|
||||
- check to stop eth1 voting after electra and eth1 deposits stop. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14835)
|
||||
- WARN log message on node startup advising of the upcoming deprecation of the --enable-historical-state-representation feature flag. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14856)
|
||||
- Beacon API event support for `SingleAttestation` and `SignedAggregateAttestationAndProofElectra`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14855)
|
||||
- Added Electra tests for `TestLightClient_NewLightClientOptimisticUpdateFromBeaconState` and `TestLightClient_NewLightClientFinalityUpdateFromBeaconState`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14783)
|
||||
- New option to select an alternate blob storage layout. Rather than a flat directory with a subdir for each block root, a multi-level scheme is used to organize blobs by epoch/slot/root, enabling leaner syscalls, indexing and pruning. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14023)
|
||||
- Send pending att queue's attestations through the notification feed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14862)
|
||||
- Prune all pending deposits and proofs in post-Electra. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14829)
|
||||
- Add Pectra testnet dates. (Sepolia and Holesky). [[PR]](https://github.com/prysmaticlabs/prysm/pull/14884)
|
||||
|
||||
### Changed
|
||||
|
||||
- Process light client finality updates only for new finalized epochs instead of doing it for every block. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14713)
|
||||
- Refactor subnets subscriptions. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14711)
|
||||
- Refactor RPC handlers subscriptions. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14732)
|
||||
- Go deps upgrade, from `ioutil` to `io`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14737)
|
||||
- Move successfully registered validator(s) on builder log to debug. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14735)
|
||||
- Update some test files to use `crypto/rand` instead of `math/rand`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14747)
|
||||
- Re-organize the content of the `*.proto` files (No functional change). [[PR]](https://github.com/prysmaticlabs/prysm/pull/14755)
|
||||
- SSZ files generation: Remove the `// Hash: ...` header.[[PR]](https://github.com/prysmaticlabs/prysm/pull/14760)
|
||||
- Updated Electra spec definition for `process_epoch`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14768)
|
||||
- Update our `go-libp2p-pubsub` dependency. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14770)
|
||||
- Re-organize the content of files to ease the creation of a new fork boilerplate. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14761)
|
||||
- Updated spec definition electra `process_registry_updates`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14767)
|
||||
- Fixed Metadata errors for peers connected via QUIC. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14776)
|
||||
- Updated spec definitions for `process_slashings` in godocs. Simplified `ProcessSlashings` API. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14766)
|
||||
- Update spec tests to v1.5.0-beta.0. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14788)
|
||||
- Process light client finality updates only for new finalized epochs instead of doing it for every block. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14718)
|
||||
- Update blobs by rpc topics from V2 to V1. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14785)
|
||||
- Updated geth to 1.14~. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14351)
|
||||
- E2e tests start from bellatrix. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14351)
|
||||
- Version pinning unclog after making some ux improvements. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14802)
|
||||
- Remove helpers to check for execution/compounding withdrawal credentials and expose them as methods. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14808)
|
||||
- Refactor `2006-01-02 15:04:05` to `time.DateTime`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14792)
|
||||
- Updated Prysm to Go v1.23.5. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14818)
|
||||
- Updated Bazel version to v7.4.1. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14818)
|
||||
- Updated rules_go to v0.46.0. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14818)
|
||||
- Updated golang.org/x/tools to be compatible with v1.23.5. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14818)
|
||||
- CI now requires proto files to be properly formatted with clang-format. [[PR](https://github.com/prysmaticlabs/prysm/pull/14831)]. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14831)
|
||||
- Improved test coverage of beacon-chain/core/electra/churn.go. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14837)
|
||||
- Update electra spec test to beta1. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14841)
|
||||
- Move deposit request nil check to apply all. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14849)
|
||||
- Do not mark blocks as invalid on context deadlines during state transition. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14838)
|
||||
- Update electra core processing to not mark block bad if execution request error. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14826)
|
||||
- Dependency: Updated go-ethereum to v1.14.13. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14872)
|
||||
- improving readability on proposer settings loader. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14868)
|
||||
- Removes existing validator.processSlot span and adds validator.processSlot span to slotCtx. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14874)
|
||||
- DownloadFinalizedData has moved from the api/client package to beacon-chain/sync/checkpoint. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14871)
|
||||
- Updated Blob-Batch-Limit to increase to 192 for electra. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14883)
|
||||
- Updated Blob-Batch-Limit-Burst-Factor to increase to 3. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14883)
|
||||
- Changed the derived batch limit when serving blobs. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14883)
|
||||
- Updated go-libp2p-pubsub to v0.13.0. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14890)
|
||||
- Rename light client flag from `enable-lightclient` to `enable-light-client`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14887)
|
||||
- Update electra spec test to beta2. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14901)
|
||||
|
||||
### Removed
|
||||
|
||||
- Cleanup ProcessSlashings method to remove unnecessary argument. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14762)
|
||||
- Remove `/proto/eth/v2` directory. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14765)
|
||||
- Remove `/memsize/` pprof endpoint as it will no longer be supported in go 1.23. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14351)
|
||||
- Clean `TestCanUpgrade*` tests. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14791)
|
||||
- Remove `Copy()` from the `ReadOnlyBeaconBlock` interface. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14811)
|
||||
- Removed a tracing span on signature requests. These requests usually took less than 5 nanoseconds and are generally not worth tracing. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14864)
|
||||
|
||||
### Fixed
|
||||
|
||||
- Added check to prevent nil pointer deference or out of bounds array access when validating the BLSToExecutionChange on an impossibly nil validator. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14705)
|
||||
- EIP-7691: Ensure new blobs subnets are subscribed on epoch in advance. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14759)
|
||||
- Fix kzg commitment inclusion proof depth minimal value. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14787)
|
||||
- Replace exampleIP to `96.7.129.13`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14795)
|
||||
- Fixed a p2p test to reliably return a static IP through DNS resolution. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14800)
|
||||
- `ToBlinded`: Use Fulu struct for Fulu (instead of Electra). [[PR]](https://github.com/prysmaticlabs/prysm/pull/14797)
|
||||
- fix panic with type cast on pbgenericblock(). [[PR]](https://github.com/prysmaticlabs/prysm/pull/14801)
|
||||
- Prysmctl generate genesis state: fix truncation of ExtraData to 32 bytes to satisfy SSZ marshaling. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14803)
|
||||
- added conditional evaluators to fix scenario e2e tests. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14798)
|
||||
- Use `SingleAttestation` for Fulu in p2p attestation map. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14809)
|
||||
- `UpgradeToFulu`: Respect the specification. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14821)
|
||||
- `nodeFilter`: Implement `filterPeerForBlobSubnet` to avoid error logs. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14822)
|
||||
- Fixed deposit packing for post-Electra: early return if EIP-6110 is applied. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14697)
|
||||
- Fix batch process new pending deposits by getting validators from state. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14827)
|
||||
- Fix handling unfound block at slot. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14852)
|
||||
- Fixed incorrect attester slashing length check. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14833)
|
||||
- Fix monitor service for Electra. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14853)
|
||||
- add more nil checks on ToConsensus functions for added safety. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14867)
|
||||
- Fix electra state to safe share references on pending fields when append. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14895)
|
||||
- Add missing config values from the spec. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14903)
|
||||
- We remove the unused `rebuildTrie` assignments for fields which do not use them. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14906)
|
||||
- fix block api endpoint to handle blocks with the same structure but on different forks (i.e. fulu and electra). [[PR]](https://github.com/prysmaticlabs/prysm/pull/14897)
|
||||
- We change how we track blob indexes during their reconstruction from the EL to prevent. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14909)
|
||||
- We now use the correct maximum value when serving blobs for electra blocks. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14910)
|
||||
|
||||
### Security
|
||||
|
||||
- go version upgrade to 1.22.10 for CVE CVE-2024-34156. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14729)
|
||||
- Update golang.org/x/crypto to v0.31.0 to address CVE-2024-45337. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14777)
|
||||
- Update golang.org/x/net to v0.33.0 to address CVE-2024-45338. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14780)
|
||||
|
||||
## [v5.2.0](https://github.com/prysmaticlabs/prysm/compare/v5.1.2...v5.2.0)
|
||||
|
||||
Updating to this release is highly recommended, especially for users running v5.1.1 or v5.1.2.
|
||||
@@ -2987,4 +3122,4 @@ There are no security updates in this release.
|
||||
|
||||
# Older than v2.0.0
|
||||
|
||||
For changelog history for releases older than v2.0.0, please refer to https://github.com/prysmaticlabs/prysm/releases
|
||||
For changelog history for releases older than v2.0.0, please refer to https://github.com/prysmaticlabs/prysm/releases
|
||||
|
||||
@@ -55,7 +55,7 @@ bazel build //beacon-chain --config=release
|
||||
## Adding / updating dependencies
|
||||
|
||||
1. Add your dependency as you would with go modules. I.e. `go get ...`
|
||||
1. Run `bazel run //:gazelle -- update-repos -from_file=go.mod` to update the bazel managed dependencies.
|
||||
1. Run `bazel run //:gazelle -- update-repos -from_file=go.mod -to_macro=deps.bzl%prysm_deps -prune=true` to update the bazel managed dependencies.
|
||||
|
||||
Example:
|
||||
|
||||
|
||||
10
WORKSPACE
10
WORKSPACE
@@ -255,7 +255,7 @@ filegroup(
|
||||
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.5.0-beta.2"
|
||||
consensus_spec_version = "v1.5.0-alpha.10"
|
||||
|
||||
bls_test_version = "v0.1.1"
|
||||
|
||||
@@ -271,7 +271,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-X/bMxbKg1clo2aFEjBoeuFq/U+BF1eQopgRP/7nI3Qg=",
|
||||
integrity = "sha256-NtWIhbO/mVMb1edq5jqABL0o8R1tNFiuG8PCMAsUHcs=",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -287,7 +287,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-WSxdri5OJGuNApW+odKle5UzToDyEOx+F3lMiqamJAg=",
|
||||
integrity = "sha256-DFlFlnzls1bBrDm+/xD8NK2ivvkhxR+rSNVLLqScVKc=",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -303,7 +303,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-LYE8l3y/zSt4YVrehrJ3ralqtgeYNildiIp+HR6+xAI=",
|
||||
integrity = "sha256-G9ENPF8udZL/BqRHbi60GhFPnZDPZAH6UjcjRiOlvbk=",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -318,7 +318,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-jvZQ90qcJMTOqMsPO7sgeEVQmewZTHcz7LVDkNqwTFQ=",
|
||||
integrity = "sha256-ClOLKkmAcEi8/uKi6LDeqthask5+E3sgxVoA0bqmQ0c=",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -46,6 +46,7 @@ go_test(
|
||||
data = glob(["testdata/**"]),
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//api/server/structs:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
|
||||
@@ -154,6 +154,10 @@ func (c *Client) do(ctx context.Context, method string, path string, body io.Rea
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if method == http.MethodPost {
|
||||
req.Header.Set("Content-Type", api.JsonMediaType)
|
||||
}
|
||||
req.Header.Set("Accept", api.JsonMediaType)
|
||||
req.Header.Add("User-Agent", version.BuildData())
|
||||
for _, o := range opts {
|
||||
o(req)
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v5/api"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
@@ -89,6 +90,8 @@ func TestClient_RegisterValidator(t *testing.T) {
|
||||
expectedPath := "/eth/v1/builder/validators"
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, api.JsonMediaType, r.Header.Get("Content-Type"))
|
||||
require.Equal(t, api.JsonMediaType, r.Header.Get("Accept"))
|
||||
body, err := io.ReadAll(r.Body)
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
@@ -364,8 +367,8 @@ func TestSubmitBlindedBlock(t *testing.T) {
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
|
||||
require.Equal(t, "bellatrix", r.Header.Get("Eth-Consensus-Version"))
|
||||
require.Equal(t, "application/json", r.Header.Get("Content-Type"))
|
||||
require.Equal(t, "application/json", r.Header.Get("Accept"))
|
||||
require.Equal(t, api.JsonMediaType, r.Header.Get("Content-Type"))
|
||||
require.Equal(t, api.JsonMediaType, r.Header.Get("Accept"))
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayload)),
|
||||
@@ -392,8 +395,8 @@ func TestSubmitBlindedBlock(t *testing.T) {
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
|
||||
require.Equal(t, "capella", r.Header.Get("Eth-Consensus-Version"))
|
||||
require.Equal(t, "application/json", r.Header.Get("Content-Type"))
|
||||
require.Equal(t, "application/json", r.Header.Get("Accept"))
|
||||
require.Equal(t, api.JsonMediaType, r.Header.Get("Content-Type"))
|
||||
require.Equal(t, api.JsonMediaType, r.Header.Get("Accept"))
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayloadCapella)),
|
||||
@@ -423,8 +426,8 @@ func TestSubmitBlindedBlock(t *testing.T) {
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
|
||||
require.Equal(t, "deneb", r.Header.Get("Eth-Consensus-Version"))
|
||||
require.Equal(t, "application/json", r.Header.Get("Content-Type"))
|
||||
require.Equal(t, "application/json", r.Header.Get("Accept"))
|
||||
require.Equal(t, api.JsonMediaType, r.Header.Get("Content-Type"))
|
||||
require.Equal(t, api.JsonMediaType, r.Header.Get("Accept"))
|
||||
var req structs.SignedBlindedBeaconBlockDeneb
|
||||
err := json.NewDecoder(r.Body).Decode(&req)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -579,14 +579,14 @@ type SignedBeaconBlockContentsFulu struct {
|
||||
}
|
||||
|
||||
type BeaconBlockContentsFulu struct {
|
||||
Block *BeaconBlockFulu `json:"block"`
|
||||
KzgProofs []string `json:"kzg_proofs"`
|
||||
Blobs []string `json:"blobs"`
|
||||
Block *BeaconBlockElectra `json:"block"`
|
||||
KzgProofs []string `json:"kzg_proofs"`
|
||||
Blobs []string `json:"blobs"`
|
||||
}
|
||||
|
||||
type SignedBeaconBlockFulu struct {
|
||||
Message *BeaconBlockFulu `json:"message"`
|
||||
Signature string `json:"signature"`
|
||||
Message *BeaconBlockElectra `json:"message"`
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
var _ SignedMessageJsoner = &SignedBeaconBlockFulu{}
|
||||
@@ -599,36 +599,12 @@ func (s *SignedBeaconBlockFulu) SigString() string {
|
||||
return s.Signature
|
||||
}
|
||||
|
||||
type BeaconBlockFulu struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
ParentRoot string `json:"parent_root"`
|
||||
StateRoot string `json:"state_root"`
|
||||
Body *BeaconBlockBodyFulu `json:"body"`
|
||||
}
|
||||
|
||||
type BeaconBlockBodyFulu struct {
|
||||
RandaoReveal string `json:"randao_reveal"`
|
||||
Eth1Data *Eth1Data `json:"eth1_data"`
|
||||
Graffiti string `json:"graffiti"`
|
||||
ProposerSlashings []*ProposerSlashing `json:"proposer_slashings"`
|
||||
AttesterSlashings []*AttesterSlashingElectra `json:"attester_slashings"`
|
||||
Attestations []*AttestationElectra `json:"attestations"`
|
||||
Deposits []*Deposit `json:"deposits"`
|
||||
VoluntaryExits []*SignedVoluntaryExit `json:"voluntary_exits"`
|
||||
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
|
||||
ExecutionPayload *ExecutionPayloadDeneb `json:"execution_payload"`
|
||||
BLSToExecutionChanges []*SignedBLSToExecutionChange `json:"bls_to_execution_changes"`
|
||||
BlobKzgCommitments []string `json:"blob_kzg_commitments"`
|
||||
ExecutionRequests *ExecutionRequests `json:"execution_requests"`
|
||||
}
|
||||
|
||||
type BlindedBeaconBlockFulu struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
ParentRoot string `json:"parent_root"`
|
||||
StateRoot string `json:"state_root"`
|
||||
Body *BlindedBeaconBlockBodyFulu `json:"body"`
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
ParentRoot string `json:"parent_root"`
|
||||
StateRoot string `json:"state_root"`
|
||||
Body *BlindedBeaconBlockBodyElectra `json:"body"`
|
||||
}
|
||||
|
||||
type SignedBlindedBeaconBlockFulu struct {
|
||||
@@ -645,19 +621,3 @@ func (s *SignedBlindedBeaconBlockFulu) MessageRawJson() ([]byte, error) {
|
||||
func (s *SignedBlindedBeaconBlockFulu) SigString() string {
|
||||
return s.Signature
|
||||
}
|
||||
|
||||
type BlindedBeaconBlockBodyFulu struct {
|
||||
RandaoReveal string `json:"randao_reveal"`
|
||||
Eth1Data *Eth1Data `json:"eth1_data"`
|
||||
Graffiti string `json:"graffiti"`
|
||||
ProposerSlashings []*ProposerSlashing `json:"proposer_slashings"`
|
||||
AttesterSlashings []*AttesterSlashingElectra `json:"attester_slashings"`
|
||||
Attestations []*AttestationElectra `json:"attestations"`
|
||||
Deposits []*Deposit `json:"deposits"`
|
||||
VoluntaryExits []*SignedVoluntaryExit `json:"voluntary_exits"`
|
||||
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
|
||||
ExecutionPayloadHeader *ExecutionPayloadHeaderDeneb `json:"execution_payload_header"`
|
||||
BLSToExecutionChanges []*SignedBLSToExecutionChange `json:"bls_to_execution_changes"`
|
||||
BlobKzgCommitments []string `json:"blob_kzg_commitments"`
|
||||
ExecutionRequests *ExecutionRequests `json:"execution_requests"`
|
||||
}
|
||||
|
||||
@@ -3365,285 +3365,6 @@ func (b *BeaconBlockContentsFulu) ToConsensus() (*eth.BeaconBlockContentsFulu, e
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (b *BeaconBlockFulu) ToConsensus() (*eth.BeaconBlockFulu, error) {
|
||||
if b == nil {
|
||||
return nil, errNilValue
|
||||
}
|
||||
if b.Body == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Body")
|
||||
}
|
||||
if b.Body.Eth1Data == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Body.Eth1Data")
|
||||
}
|
||||
if b.Body.SyncAggregate == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Body.SyncAggregate")
|
||||
}
|
||||
if b.Body.ExecutionPayload == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Body.ExecutionPayload")
|
||||
}
|
||||
|
||||
slot, err := strconv.ParseUint(b.Slot, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Slot")
|
||||
}
|
||||
proposerIndex, err := strconv.ParseUint(b.ProposerIndex, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ProposerIndex")
|
||||
}
|
||||
parentRoot, err := bytesutil.DecodeHexWithLength(b.ParentRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ParentRoot")
|
||||
}
|
||||
stateRoot, err := bytesutil.DecodeHexWithLength(b.StateRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "StateRoot")
|
||||
}
|
||||
randaoReveal, err := bytesutil.DecodeHexWithLength(b.Body.RandaoReveal, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.RandaoReveal")
|
||||
}
|
||||
depositRoot, err := bytesutil.DecodeHexWithLength(b.Body.Eth1Data.DepositRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.Eth1Data.DepositRoot")
|
||||
}
|
||||
depositCount, err := strconv.ParseUint(b.Body.Eth1Data.DepositCount, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.Eth1Data.DepositCount")
|
||||
}
|
||||
blockHash, err := bytesutil.DecodeHexWithLength(b.Body.Eth1Data.BlockHash, common.HashLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.Eth1Data.BlockHash")
|
||||
}
|
||||
graffiti, err := bytesutil.DecodeHexWithLength(b.Body.Graffiti, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.Graffiti")
|
||||
}
|
||||
proposerSlashings, err := ProposerSlashingsToConsensus(b.Body.ProposerSlashings)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.ProposerSlashings")
|
||||
}
|
||||
attesterSlashings, err := AttesterSlashingsElectraToConsensus(b.Body.AttesterSlashings)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.AttesterSlashings")
|
||||
}
|
||||
atts, err := AttsElectraToConsensus(b.Body.Attestations)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.Attestations")
|
||||
}
|
||||
deposits, err := DepositsToConsensus(b.Body.Deposits)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.Deposits")
|
||||
}
|
||||
exits, err := SignedExitsToConsensus(b.Body.VoluntaryExits)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.VoluntaryExits")
|
||||
}
|
||||
syncCommitteeBits, err := bytesutil.DecodeHexWithLength(b.Body.SyncAggregate.SyncCommitteeBits, fieldparams.SyncAggregateSyncCommitteeBytesLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.SyncAggregate.SyncCommitteeBits")
|
||||
}
|
||||
syncCommitteeSig, err := bytesutil.DecodeHexWithLength(b.Body.SyncAggregate.SyncCommitteeSignature, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.SyncAggregate.SyncCommitteeSignature")
|
||||
}
|
||||
payloadParentHash, err := bytesutil.DecodeHexWithLength(b.Body.ExecutionPayload.ParentHash, common.HashLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.ExecutionPayload.ParentHash")
|
||||
}
|
||||
payloadFeeRecipient, err := bytesutil.DecodeHexWithLength(b.Body.ExecutionPayload.FeeRecipient, fieldparams.FeeRecipientLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.ExecutionPayload.FeeRecipient")
|
||||
}
|
||||
payloadStateRoot, err := bytesutil.DecodeHexWithLength(b.Body.ExecutionPayload.StateRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.ExecutionPayload.StateRoot")
|
||||
}
|
||||
payloadReceiptsRoot, err := bytesutil.DecodeHexWithLength(b.Body.ExecutionPayload.ReceiptsRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.ExecutionPayload.ReceiptsRoot")
|
||||
}
|
||||
payloadLogsBloom, err := bytesutil.DecodeHexWithLength(b.Body.ExecutionPayload.LogsBloom, fieldparams.LogsBloomLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.ExecutionPayload.LogsBloom")
|
||||
}
|
||||
payloadPrevRandao, err := bytesutil.DecodeHexWithLength(b.Body.ExecutionPayload.PrevRandao, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.ExecutionPayload.PrevRandao")
|
||||
}
|
||||
payloadBlockNumber, err := strconv.ParseUint(b.Body.ExecutionPayload.BlockNumber, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.ExecutionPayload.BlockNumber")
|
||||
}
|
||||
payloadGasLimit, err := strconv.ParseUint(b.Body.ExecutionPayload.GasLimit, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.ExecutionPayload.GasLimit")
|
||||
}
|
||||
payloadGasUsed, err := strconv.ParseUint(b.Body.ExecutionPayload.GasUsed, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.ExecutionPayload.GasUsed")
|
||||
}
|
||||
payloadTimestamp, err := strconv.ParseUint(b.Body.ExecutionPayload.Timestamp, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.ExecutionPayloadHeader.Timestamp")
|
||||
}
|
||||
payloadExtraData, err := bytesutil.DecodeHexWithMaxLength(b.Body.ExecutionPayload.ExtraData, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.ExecutionPayload.ExtraData")
|
||||
}
|
||||
payloadBaseFeePerGas, err := bytesutil.Uint256ToSSZBytes(b.Body.ExecutionPayload.BaseFeePerGas)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.ExecutionPayload.BaseFeePerGas")
|
||||
}
|
||||
payloadBlockHash, err := bytesutil.DecodeHexWithLength(b.Body.ExecutionPayload.BlockHash, common.HashLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.ExecutionPayload.BlockHash")
|
||||
}
|
||||
err = slice.VerifyMaxLength(b.Body.ExecutionPayload.Transactions, fieldparams.MaxTxsPerPayloadLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.ExecutionPayload.Transactions")
|
||||
}
|
||||
txs := make([][]byte, len(b.Body.ExecutionPayload.Transactions))
|
||||
for i, tx := range b.Body.ExecutionPayload.Transactions {
|
||||
txs[i], err = bytesutil.DecodeHexWithMaxLength(tx, fieldparams.MaxBytesPerTxLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("Body.ExecutionPayload.Transactions[%d]", i))
|
||||
}
|
||||
}
|
||||
err = slice.VerifyMaxLength(b.Body.ExecutionPayload.Withdrawals, fieldparams.MaxWithdrawalsPerPayload)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.ExecutionPayload.Withdrawals")
|
||||
}
|
||||
withdrawals := make([]*enginev1.Withdrawal, len(b.Body.ExecutionPayload.Withdrawals))
|
||||
for i, w := range b.Body.ExecutionPayload.Withdrawals {
|
||||
withdrawalIndex, err := strconv.ParseUint(w.WithdrawalIndex, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("Body.ExecutionPayload.Withdrawals[%d].WithdrawalIndex", i))
|
||||
}
|
||||
validatorIndex, err := strconv.ParseUint(w.ValidatorIndex, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("Body.ExecutionPayload.Withdrawals[%d].ValidatorIndex", i))
|
||||
}
|
||||
address, err := bytesutil.DecodeHexWithLength(w.ExecutionAddress, common.AddressLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("Body.ExecutionPayload.Withdrawals[%d].ExecutionAddress", i))
|
||||
}
|
||||
amount, err := strconv.ParseUint(w.Amount, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("Body.ExecutionPayload.Withdrawals[%d].Amount", i))
|
||||
}
|
||||
withdrawals[i] = &enginev1.Withdrawal{
|
||||
Index: withdrawalIndex,
|
||||
ValidatorIndex: primitives.ValidatorIndex(validatorIndex),
|
||||
Address: address,
|
||||
Amount: amount,
|
||||
}
|
||||
}
|
||||
|
||||
payloadBlobGasUsed, err := strconv.ParseUint(b.Body.ExecutionPayload.BlobGasUsed, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.ExecutionPayload.BlobGasUsed")
|
||||
}
|
||||
payloadExcessBlobGas, err := strconv.ParseUint(b.Body.ExecutionPayload.ExcessBlobGas, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.ExecutionPayload.ExcessBlobGas")
|
||||
}
|
||||
|
||||
if b.Body.ExecutionRequests == nil {
|
||||
return nil, server.NewDecodeError(errors.New("nil execution requests"), "Body.ExequtionRequests")
|
||||
}
|
||||
|
||||
depositRequests := make([]*enginev1.DepositRequest, len(b.Body.ExecutionRequests.Deposits))
|
||||
for i, d := range b.Body.ExecutionRequests.Deposits {
|
||||
depositRequests[i], err = d.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("Body.ExecutionRequests.Deposits[%d]", i))
|
||||
}
|
||||
}
|
||||
|
||||
withdrawalRequests := make([]*enginev1.WithdrawalRequest, len(b.Body.ExecutionRequests.Withdrawals))
|
||||
for i, w := range b.Body.ExecutionRequests.Withdrawals {
|
||||
withdrawalRequests[i], err = w.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("Body.ExecutionRequests.Withdrawals[%d]", i))
|
||||
}
|
||||
}
|
||||
|
||||
consolidationRequests := make([]*enginev1.ConsolidationRequest, len(b.Body.ExecutionRequests.Consolidations))
|
||||
for i, c := range b.Body.ExecutionRequests.Consolidations {
|
||||
consolidationRequests[i], err = c.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("Body.ExecutionRequests.Consolidations[%d]", i))
|
||||
}
|
||||
}
|
||||
|
||||
blsChanges, err := SignedBLSChangesToConsensus(b.Body.BLSToExecutionChanges)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.BLSToExecutionChanges")
|
||||
}
|
||||
err = slice.VerifyMaxLength(b.Body.BlobKzgCommitments, fieldparams.MaxBlobCommitmentsPerBlock)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.BlobKzgCommitments")
|
||||
}
|
||||
blobKzgCommitments := make([][]byte, len(b.Body.BlobKzgCommitments))
|
||||
for i, b := range b.Body.BlobKzgCommitments {
|
||||
kzg, err := bytesutil.DecodeHexWithLength(b, fieldparams.BLSPubkeyLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("Body.BlobKzgCommitments[%d]", i))
|
||||
}
|
||||
blobKzgCommitments[i] = kzg
|
||||
}
|
||||
return ð.BeaconBlockFulu{
|
||||
Slot: primitives.Slot(slot),
|
||||
ProposerIndex: primitives.ValidatorIndex(proposerIndex),
|
||||
ParentRoot: parentRoot,
|
||||
StateRoot: stateRoot,
|
||||
Body: ð.BeaconBlockBodyFulu{
|
||||
RandaoReveal: randaoReveal,
|
||||
Eth1Data: ð.Eth1Data{
|
||||
DepositRoot: depositRoot,
|
||||
DepositCount: depositCount,
|
||||
BlockHash: blockHash,
|
||||
},
|
||||
Graffiti: graffiti,
|
||||
ProposerSlashings: proposerSlashings,
|
||||
AttesterSlashings: attesterSlashings,
|
||||
Attestations: atts,
|
||||
Deposits: deposits,
|
||||
VoluntaryExits: exits,
|
||||
SyncAggregate: ð.SyncAggregate{
|
||||
SyncCommitteeBits: syncCommitteeBits,
|
||||
SyncCommitteeSignature: syncCommitteeSig,
|
||||
},
|
||||
ExecutionPayload: &enginev1.ExecutionPayloadDeneb{
|
||||
ParentHash: payloadParentHash,
|
||||
FeeRecipient: payloadFeeRecipient,
|
||||
StateRoot: payloadStateRoot,
|
||||
ReceiptsRoot: payloadReceiptsRoot,
|
||||
LogsBloom: payloadLogsBloom,
|
||||
PrevRandao: payloadPrevRandao,
|
||||
BlockNumber: payloadBlockNumber,
|
||||
GasLimit: payloadGasLimit,
|
||||
GasUsed: payloadGasUsed,
|
||||
Timestamp: payloadTimestamp,
|
||||
ExtraData: payloadExtraData,
|
||||
BaseFeePerGas: payloadBaseFeePerGas,
|
||||
BlockHash: payloadBlockHash,
|
||||
Transactions: txs,
|
||||
Withdrawals: withdrawals,
|
||||
BlobGasUsed: payloadBlobGasUsed,
|
||||
ExcessBlobGas: payloadExcessBlobGas,
|
||||
},
|
||||
BlsToExecutionChanges: blsChanges,
|
||||
BlobKzgCommitments: blobKzgCommitments,
|
||||
ExecutionRequests: &enginev1.ExecutionRequests{
|
||||
Deposits: depositRequests,
|
||||
Withdrawals: withdrawalRequests,
|
||||
Consolidations: consolidationRequests,
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (b *SignedBeaconBlockFulu) ToConsensus() (*eth.SignedBeaconBlockFulu, error) {
|
||||
if b == nil {
|
||||
return nil, errNilValue
|
||||
@@ -3898,7 +3619,7 @@ func (b *BlindedBeaconBlockFulu) ToConsensus() (*eth.BlindedBeaconBlockFulu, err
|
||||
ProposerIndex: primitives.ValidatorIndex(proposerIndex),
|
||||
ParentRoot: parentRoot,
|
||||
StateRoot: stateRoot,
|
||||
Body: ð.BlindedBeaconBlockBodyFulu{
|
||||
Body: ð.BlindedBeaconBlockBodyElectra{
|
||||
RandaoReveal: randaoReveal,
|
||||
Eth1Data: ð.Eth1Data{
|
||||
DepositRoot: depositRoot,
|
||||
@@ -4015,7 +3736,7 @@ func BlindedBeaconBlockFuluFromConsensus(b *eth.BlindedBeaconBlockFulu) (*Blinde
|
||||
ProposerIndex: fmt.Sprintf("%d", b.ProposerIndex),
|
||||
ParentRoot: hexutil.Encode(b.ParentRoot),
|
||||
StateRoot: hexutil.Encode(b.StateRoot),
|
||||
Body: &BlindedBeaconBlockBodyFulu{
|
||||
Body: &BlindedBeaconBlockBodyElectra{
|
||||
RandaoReveal: hexutil.Encode(b.Body.RandaoReveal),
|
||||
Eth1Data: Eth1DataFromConsensus(b.Body.Eth1Data),
|
||||
Graffiti: hexutil.Encode(b.Body.Graffiti),
|
||||
@@ -4047,42 +3768,6 @@ func SignedBlindedBeaconBlockFuluFromConsensus(b *eth.SignedBlindedBeaconBlockFu
|
||||
}, nil
|
||||
}
|
||||
|
||||
func BeaconBlockFuluFromConsensus(b *eth.BeaconBlockFulu) (*BeaconBlockFulu, error) {
|
||||
payload, err := ExecutionPayloadFuluFromConsensus(b.Body.ExecutionPayload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blobKzgCommitments := make([]string, len(b.Body.BlobKzgCommitments))
|
||||
for i := range b.Body.BlobKzgCommitments {
|
||||
blobKzgCommitments[i] = hexutil.Encode(b.Body.BlobKzgCommitments[i])
|
||||
}
|
||||
|
||||
return &BeaconBlockFulu{
|
||||
Slot: fmt.Sprintf("%d", b.Slot),
|
||||
ProposerIndex: fmt.Sprintf("%d", b.ProposerIndex),
|
||||
ParentRoot: hexutil.Encode(b.ParentRoot),
|
||||
StateRoot: hexutil.Encode(b.StateRoot),
|
||||
Body: &BeaconBlockBodyFulu{
|
||||
RandaoReveal: hexutil.Encode(b.Body.RandaoReveal),
|
||||
Eth1Data: Eth1DataFromConsensus(b.Body.Eth1Data),
|
||||
Graffiti: hexutil.Encode(b.Body.Graffiti),
|
||||
ProposerSlashings: ProposerSlashingsFromConsensus(b.Body.ProposerSlashings),
|
||||
AttesterSlashings: AttesterSlashingsElectraFromConsensus(b.Body.AttesterSlashings),
|
||||
Attestations: AttsElectraFromConsensus(b.Body.Attestations),
|
||||
Deposits: DepositsFromConsensus(b.Body.Deposits),
|
||||
VoluntaryExits: SignedExitsFromConsensus(b.Body.VoluntaryExits),
|
||||
SyncAggregate: &SyncAggregate{
|
||||
SyncCommitteeBits: hexutil.Encode(b.Body.SyncAggregate.SyncCommitteeBits),
|
||||
SyncCommitteeSignature: hexutil.Encode(b.Body.SyncAggregate.SyncCommitteeSignature),
|
||||
},
|
||||
ExecutionPayload: payload,
|
||||
BLSToExecutionChanges: SignedBLSChangesFromConsensus(b.Body.BlsToExecutionChanges),
|
||||
BlobKzgCommitments: blobKzgCommitments,
|
||||
ExecutionRequests: ExecutionRequestsFromConsensus(b.Body.ExecutionRequests),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func SignedBeaconBlockFuluFromConsensus(b *eth.SignedBeaconBlockFulu) (*SignedBeaconBlockFulu, error) {
|
||||
block, err := BeaconBlockFuluFromConsensus(b.Block)
|
||||
if err != nil {
|
||||
@@ -4097,4 +3782,5 @@ func SignedBeaconBlockFuluFromConsensus(b *eth.SignedBeaconBlockFulu) (*SignedBe
|
||||
var (
|
||||
ExecutionPayloadFuluFromConsensus = ExecutionPayloadDenebFromConsensus
|
||||
ExecutionPayloadHeaderFuluFromConsensus = ExecutionPayloadHeaderDenebFromConsensus
|
||||
BeaconBlockFuluFromConsensus = BeaconBlockElectraFromConsensus
|
||||
)
|
||||
|
||||
@@ -250,3 +250,17 @@ type ChainHead struct {
|
||||
PreviousJustifiedBlockRoot string `json:"previous_justified_block_root"`
|
||||
OptimisticStatus bool `json:"optimistic_status"`
|
||||
}
|
||||
|
||||
type GetPendingDepositsResponse struct {
|
||||
Version string `json:"version"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
Data []*PendingDeposit `json:"data"`
|
||||
}
|
||||
|
||||
type GetPendingPartialWithdrawalsResponse struct {
|
||||
Version string `json:"version"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
Data []*PendingPartialWithdrawal `json:"data"`
|
||||
}
|
||||
|
||||
@@ -25,6 +25,7 @@ go_library(
|
||||
"receive_attestation.go",
|
||||
"receive_blob.go",
|
||||
"receive_block.go",
|
||||
"receive_data_column.go",
|
||||
"service.go",
|
||||
"tracked_proposer.go",
|
||||
"weak_subjectivity_checks.go",
|
||||
@@ -49,6 +50,7 @@ go_library(
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
@@ -158,6 +160,7 @@ go_test(
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/p2p/testing:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
|
||||
@@ -33,6 +33,7 @@ var (
|
||||
)
|
||||
|
||||
var errMaxBlobsExceeded = errors.New("Expected commitments in block exceeds MAX_BLOBS_PER_BLOCK")
|
||||
var errMaxDataColumnsExceeded = errors.New("Expected data columns for node exceeds NUMBER_OF_COLUMNS")
|
||||
|
||||
// An invalid block is the block that fails state transition based on the core protocol rules.
|
||||
// The beacon node shall not be accepting nor building blocks that branch off from an invalid block.
|
||||
|
||||
@@ -3,6 +3,7 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"kzg.go",
|
||||
"trusted_setup.go",
|
||||
"validation.go",
|
||||
],
|
||||
@@ -12,6 +13,9 @@ go_library(
|
||||
deps = [
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"@com_github_crate_crypto_go_kzg_4844//:go_default_library",
|
||||
"@com_github_ethereum_c_kzg_4844//bindings/go:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//crypto/kzg4844:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
111
beacon-chain/blockchain/kzg/kzg.go
Normal file
111
beacon-chain/blockchain/kzg/kzg.go
Normal file
@@ -0,0 +1,111 @@
|
||||
package kzg
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
ckzg4844 "github.com/ethereum/c-kzg-4844/v2/bindings/go"
|
||||
"github.com/ethereum/go-ethereum/crypto/kzg4844"
|
||||
)
|
||||
|
||||
// BytesPerBlob is the number of bytes in a single blob.
|
||||
const BytesPerBlob = ckzg4844.BytesPerBlob
|
||||
|
||||
// Blob represents a serialized chunk of data.
|
||||
type Blob [BytesPerBlob]byte
|
||||
|
||||
// BytesPerCell is the number of bytes in a single cell.
|
||||
const BytesPerCell = ckzg4844.BytesPerCell
|
||||
|
||||
// Cell represents a chunk of an encoded Blob.
|
||||
type Cell [BytesPerCell]byte
|
||||
|
||||
// Commitment represent a KZG commitment to a Blob.
|
||||
type Commitment [48]byte
|
||||
|
||||
// Proof represents a KZG proof that attests to the validity of a Blob or parts of it.
|
||||
type Proof [48]byte
|
||||
|
||||
// Bytes48 is a 48-byte array.
|
||||
type Bytes48 = ckzg4844.Bytes48
|
||||
|
||||
// Bytes32 is a 32-byte array.
|
||||
type Bytes32 = ckzg4844.Bytes32
|
||||
|
||||
// CellsAndProofs represents the Cells and Proofs corresponding to
|
||||
// a single blob.
|
||||
type CellsAndProofs struct {
|
||||
Cells []Cell
|
||||
Proofs []Proof
|
||||
}
|
||||
|
||||
func BlobToKZGCommitment(blob *Blob) (Commitment, error) {
|
||||
kzgBlob := kzg4844.Blob(*blob)
|
||||
comm, err := kzg4844.BlobToCommitment(&kzgBlob)
|
||||
if err != nil {
|
||||
return Commitment{}, err
|
||||
}
|
||||
return Commitment(comm), nil
|
||||
}
|
||||
|
||||
func ComputeBlobKZGProof(blob *Blob, commitment Commitment) (Proof, error) {
|
||||
kzgBlob := kzg4844.Blob(*blob)
|
||||
proof, err := kzg4844.ComputeBlobProof(&kzgBlob, kzg4844.Commitment(commitment))
|
||||
if err != nil {
|
||||
return [48]byte{}, err
|
||||
}
|
||||
return Proof(proof), nil
|
||||
}
|
||||
|
||||
func ComputeCellsAndKZGProofs(blob *Blob) (CellsAndProofs, error) {
|
||||
ckzgBlob := (*ckzg4844.Blob)(blob)
|
||||
ckzgCells, ckzgProofs, err := ckzg4844.ComputeCellsAndKZGProofs(ckzgBlob)
|
||||
if err != nil {
|
||||
return CellsAndProofs{}, err
|
||||
}
|
||||
|
||||
return makeCellsAndProofs(ckzgCells[:], ckzgProofs[:])
|
||||
}
|
||||
|
||||
func VerifyCellKZGProofBatch(commitmentsBytes []Bytes48, cellIndices []uint64, cells []Cell, proofsBytes []Bytes48) (bool, error) {
|
||||
// Convert `Cell` type to `ckzg4844.Cell`
|
||||
ckzgCells := make([]ckzg4844.Cell, len(cells))
|
||||
for i := range cells {
|
||||
ckzgCells[i] = ckzg4844.Cell(cells[i])
|
||||
}
|
||||
|
||||
return ckzg4844.VerifyCellKZGProofBatch(commitmentsBytes, cellIndices, ckzgCells, proofsBytes)
|
||||
}
|
||||
|
||||
func RecoverCellsAndKZGProofs(cellIndices []uint64, partialCells []Cell) (CellsAndProofs, error) {
|
||||
// Convert `Cell` type to `ckzg4844.Cell`
|
||||
ckzgPartialCells := make([]ckzg4844.Cell, len(partialCells))
|
||||
for i := range partialCells {
|
||||
ckzgPartialCells[i] = ckzg4844.Cell(partialCells[i])
|
||||
}
|
||||
|
||||
ckzgCells, ckzgProofs, err := ckzg4844.RecoverCellsAndKZGProofs(cellIndices, ckzgPartialCells)
|
||||
if err != nil {
|
||||
return CellsAndProofs{}, err
|
||||
}
|
||||
|
||||
return makeCellsAndProofs(ckzgCells[:], ckzgProofs[:])
|
||||
}
|
||||
|
||||
// Convert cells/proofs to the CellsAndProofs type defined in this package.
|
||||
func makeCellsAndProofs(ckzgCells []ckzg4844.Cell, ckzgProofs []ckzg4844.KZGProof) (CellsAndProofs, error) {
|
||||
if len(ckzgCells) != len(ckzgProofs) {
|
||||
return CellsAndProofs{}, errors.New("different number of cells/proofs")
|
||||
}
|
||||
|
||||
var cells []Cell
|
||||
var proofs []Proof
|
||||
for i := range ckzgCells {
|
||||
cells = append(cells, Cell(ckzgCells[i]))
|
||||
proofs = append(proofs, Proof(ckzgProofs[i]))
|
||||
}
|
||||
|
||||
return CellsAndProofs{
|
||||
Cells: cells,
|
||||
Proofs: proofs,
|
||||
}, nil
|
||||
}
|
||||
@@ -5,6 +5,8 @@ import (
|
||||
"encoding/json"
|
||||
|
||||
GoKZG "github.com/crate-crypto/go-kzg-4844"
|
||||
CKZG "github.com/ethereum/c-kzg-4844/v2/bindings/go"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@@ -12,17 +14,53 @@ var (
|
||||
//go:embed trusted_setup.json
|
||||
embeddedTrustedSetup []byte // 1.2Mb
|
||||
kzgContext *GoKZG.Context
|
||||
kzgLoaded bool
|
||||
)
|
||||
|
||||
type TrustedSetup struct {
|
||||
G1Monomial [GoKZG.ScalarsPerBlob]GoKZG.G1CompressedHexStr `json:"g1_monomial"`
|
||||
G1Lagrange [GoKZG.ScalarsPerBlob]GoKZG.G1CompressedHexStr `json:"g1_lagrange"`
|
||||
G2Monomial [65]GoKZG.G2CompressedHexStr `json:"g2_monomial"`
|
||||
}
|
||||
|
||||
func Start() error {
|
||||
parsedSetup := GoKZG.JSONTrustedSetup{}
|
||||
err := json.Unmarshal(embeddedTrustedSetup, &parsedSetup)
|
||||
trustedSetup := &TrustedSetup{}
|
||||
err := json.Unmarshal(embeddedTrustedSetup, trustedSetup)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not parse trusted setup JSON")
|
||||
}
|
||||
kzgContext, err = GoKZG.NewContext4096(&parsedSetup)
|
||||
kzgContext, err = GoKZG.NewContext4096(&GoKZG.JSONTrustedSetup{
|
||||
SetupG2: trustedSetup.G2Monomial[:],
|
||||
SetupG1Lagrange: trustedSetup.G1Lagrange})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not initialize go-kzg context")
|
||||
}
|
||||
|
||||
// Length of a G1 point, converted from hex to binary.
|
||||
g1MonomialBytes := make([]byte, len(trustedSetup.G1Monomial)*(len(trustedSetup.G1Monomial[0])-2)/2)
|
||||
for i, g1 := range &trustedSetup.G1Monomial {
|
||||
copy(g1MonomialBytes[i*(len(g1)-2)/2:], hexutil.MustDecode(g1))
|
||||
}
|
||||
// Length of a G1 point, converted from hex to binary.
|
||||
g1LagrangeBytes := make([]byte, len(trustedSetup.G1Lagrange)*(len(trustedSetup.G1Lagrange[0])-2)/2)
|
||||
for i, g1 := range &trustedSetup.G1Lagrange {
|
||||
copy(g1LagrangeBytes[i*(len(g1)-2)/2:], hexutil.MustDecode(g1))
|
||||
}
|
||||
// Length of a G2 point, converted from hex to binary.
|
||||
g2MonomialBytes := make([]byte, len(trustedSetup.G2Monomial)*(len(trustedSetup.G2Monomial[0])-2)/2)
|
||||
for i, g2 := range &trustedSetup.G2Monomial {
|
||||
copy(g2MonomialBytes[i*(len(g2)-2)/2:], hexutil.MustDecode(g2))
|
||||
}
|
||||
if !kzgLoaded {
|
||||
// TODO: Provide a configuration option for this.
|
||||
var precompute uint = 8
|
||||
|
||||
// Free the current trusted setup before running this method. CKZG
|
||||
// panics if the same setup is run multiple times.
|
||||
if err = CKZG.LoadTrustedSetup(g1MonomialBytes, g1LagrangeBytes, g2MonomialBytes, precompute); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
kzgLoaded = true
|
||||
return nil
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -69,6 +69,22 @@ func logStateTransitionData(b interfaces.ReadOnlyBeaconBlock) error {
|
||||
log = log.WithField("kzgCommitmentCount", len(kzgs))
|
||||
}
|
||||
}
|
||||
if b.Version() >= version.Electra {
|
||||
eReqs, err := b.Body().ExecutionRequests()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to get execution requests")
|
||||
} else {
|
||||
if len(eReqs.Deposits) > 0 {
|
||||
log = log.WithField("depositRequestCount", len(eReqs.Deposits))
|
||||
}
|
||||
if len(eReqs.Consolidations) > 0 {
|
||||
log = log.WithField("consolidationRequestCount", len(eReqs.Consolidations))
|
||||
}
|
||||
if len(eReqs.Withdrawals) > 0 {
|
||||
log = log.WithField("withdrawalRequestCount", len(eReqs.Withdrawals))
|
||||
}
|
||||
}
|
||||
}
|
||||
log.Info("Finished applying state transition")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -126,9 +126,9 @@ func WithBLSToExecPool(p blstoexec.PoolManager) Option {
|
||||
}
|
||||
|
||||
// WithP2PBroadcaster to broadcast messages after appropriate processing.
|
||||
func WithP2PBroadcaster(p p2p.Broadcaster) Option {
|
||||
func WithP2PBroadcaster(p p2p.Acceser) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.P2p = p
|
||||
s.cfg.P2P = p
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,11 +3,13 @@ package blockchain
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
coreTime "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/das"
|
||||
@@ -234,7 +236,9 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := avs.IsDataAvailable(ctx, s.CurrentSlot(), b); err != nil {
|
||||
|
||||
nodeID := s.cfg.P2P.NodeID()
|
||||
if err := avs.IsDataAvailable(ctx, nodeID, s.CurrentSlot(), b); err != nil {
|
||||
return errors.Wrapf(err, "could not validate blob data availability at slot %d", b.Block().Slot())
|
||||
}
|
||||
args := &forkchoicetypes.BlockAndCheckpoints{Block: b,
|
||||
@@ -522,12 +526,39 @@ func missingIndices(bs *filesystem.BlobStorage, root [32]byte, expected [][]byte
|
||||
return missing, nil
|
||||
}
|
||||
|
||||
func missingDataColumns(bs *filesystem.BlobStorage, root [32]byte, expected map[uint64]bool) (map[uint64]bool, error) {
|
||||
if len(expected) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if len(expected) > int(params.BeaconConfig().NumberOfColumns) {
|
||||
return nil, errMaxDataColumnsExceeded
|
||||
}
|
||||
|
||||
// Get a summary of the data columns stored in the database.
|
||||
summary := bs.Summary(root)
|
||||
|
||||
// Check all expected data columns against the summary.
|
||||
missing := make(map[uint64]bool)
|
||||
for column := range expected {
|
||||
if !summary.HasDataColumnIndex(column) {
|
||||
missing[column] = true
|
||||
}
|
||||
}
|
||||
|
||||
return missing, nil
|
||||
}
|
||||
|
||||
// isDataAvailable blocks until all BlobSidecars committed to in the block are available,
|
||||
// or an error or context cancellation occurs. A nil result means that the data availability check is successful.
|
||||
// The function will first check the database to see if all sidecars have been persisted. If any
|
||||
// sidecars are missing, it will then read from the blobNotifier channel for the given root until the channel is
|
||||
// closed, the context hits cancellation/timeout, or notifications have been received for all the missing sidecars.
|
||||
func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
if coreTime.PeerDASIsActive(signed.Block().Slot()) {
|
||||
return s.areDataColumnsAvailable(ctx, root, signed)
|
||||
}
|
||||
|
||||
if signed.Version() < version.Deneb {
|
||||
return nil
|
||||
}
|
||||
@@ -557,7 +588,7 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
|
||||
// get a map of BlobSidecar indices that are not currently available.
|
||||
missing, err := missingIndices(s.blobStorage, root, kzgCommitments, block.Slot())
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "missing indices")
|
||||
}
|
||||
// If there are no missing indices, all BlobSidecars are available.
|
||||
if len(missing) == 0 {
|
||||
@@ -576,8 +607,13 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
|
||||
if len(missing) == 0 {
|
||||
return
|
||||
}
|
||||
log.WithFields(daCheckLogFields(root, signed.Block().Slot(), expected, len(missing))).
|
||||
Error("Still waiting for DA check at slot end.")
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": signed.Block().Slot(),
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"blobsExpected": expected,
|
||||
"blobsWaiting": len(missing),
|
||||
}).Error("Still waiting for blobs DA check at slot end.")
|
||||
})
|
||||
defer nst.Stop()
|
||||
}
|
||||
@@ -599,12 +635,178 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
|
||||
}
|
||||
}
|
||||
|
||||
func daCheckLogFields(root [32]byte, slot primitives.Slot, expected, missing int) logrus.Fields {
|
||||
return logrus.Fields{
|
||||
"slot": slot,
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"blobsExpected": expected,
|
||||
"blobsWaiting": missing,
|
||||
// uint64MapToSortedSlice produces a sorted uint64 slice from a map.
|
||||
func uint64MapToSortedSlice(input map[uint64]bool) []uint64 {
|
||||
output := make([]uint64, 0, len(input))
|
||||
for idx := range input {
|
||||
output = append(output, idx)
|
||||
}
|
||||
slices.Sort[[]uint64](output)
|
||||
return output
|
||||
}
|
||||
|
||||
func (s *Service) areDataColumnsAvailable(ctx context.Context, root [32]byte, signedBlock interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
if signedBlock.Version() < version.Fulu {
|
||||
return nil
|
||||
}
|
||||
|
||||
block := signedBlock.Block()
|
||||
if block == nil {
|
||||
return errors.New("invalid nil beacon block")
|
||||
}
|
||||
|
||||
// We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
|
||||
blockSlot, currentSlot := block.Slot(), s.CurrentSlot()
|
||||
blockEpoch, currentEpoch := slots.ToEpoch(blockSlot), slots.ToEpoch(currentSlot)
|
||||
|
||||
if !params.WithinDAPeriod(blockEpoch, currentEpoch) {
|
||||
return nil
|
||||
}
|
||||
|
||||
body := block.Body()
|
||||
if body == nil {
|
||||
return errors.New("invalid nil beacon block body")
|
||||
}
|
||||
|
||||
kzgCommitments, err := body.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
|
||||
// If block has not commitments there is nothing to wait for.
|
||||
if len(kzgCommitments) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// All columns to sample need to be available for the block to be considered available.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/das-core.md#custody-sampling
|
||||
nodeID := s.cfg.P2P.NodeID()
|
||||
|
||||
// Prevent custody group count to change during the rest of the function.
|
||||
peerdas.CustodyGroupCountMut.RLock()
|
||||
defer peerdas.CustodyGroupCountMut.RUnlock()
|
||||
|
||||
// Get the custody group sampling size for the node.
|
||||
custodyGroupSamplingSize := peerdas.CustodyGroupSamplingSize(peerdas.Actual)
|
||||
peerInfo, _, err := peerdas.Info(nodeID, custodyGroupSamplingSize)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "peer info")
|
||||
}
|
||||
|
||||
// Exit early if the node is not expected to custody any data columns.
|
||||
if len(peerInfo.CustodyColumns) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Subscribe to newsly data columns stored in the database.
|
||||
rootIndexChan := make(chan filesystem.RootIndexPair)
|
||||
subscription := s.blobStorage.DataColumnFeed.Subscribe(rootIndexChan)
|
||||
defer subscription.Unsubscribe()
|
||||
|
||||
// Get the count of data columns we already have in the store.
|
||||
summary := s.blobStorage.Summary(root)
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
retrievedDataColumnsCount := uint64(0)
|
||||
for column := range numberOfColumns {
|
||||
if summary.HasDataColumnIndex(column) {
|
||||
retrievedDataColumnsCount++
|
||||
}
|
||||
}
|
||||
|
||||
// As soon as we have more than half of the data columns, we can reconstruct the missing ones.
|
||||
// We don't need to wait for the rest of the data columns to declare the block as available.
|
||||
if peerdas.CanSelfReconstruct(retrievedDataColumnsCount) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get a map of data column indices that are not currently available.
|
||||
missingMap, err := missingDataColumns(s.blobStorage, root, peerInfo.CustodyColumns)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If there are no missing indices, all data column sidecars are available.
|
||||
// This is the happy path.
|
||||
if len(missingMap) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Log for DA checks that cross over into the next slot; helpful for debugging.
|
||||
nextSlot := slots.BeginsAt(signedBlock.Block().Slot()+1, s.genesisTime)
|
||||
// Avoid logging if DA check is called after next slot start.
|
||||
if nextSlot.After(time.Now()) {
|
||||
nst := time.AfterFunc(time.Until(nextSlot), func() {
|
||||
missingMapCount := uint64(len(missingMap))
|
||||
|
||||
if missingMapCount == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
expected interface{} = "all"
|
||||
missing interface{} = "all"
|
||||
)
|
||||
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
colMapCount := uint64(len(peerInfo.CustodyColumns))
|
||||
|
||||
if colMapCount < numberOfColumns {
|
||||
expected = uint64MapToSortedSlice(peerInfo.CustodyColumns)
|
||||
}
|
||||
|
||||
if missingMapCount < numberOfColumns {
|
||||
missing = uint64MapToSortedSlice(missingMap)
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": signedBlock.Block().Slot(),
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"columnsExpected": expected,
|
||||
"columnsWaiting": missing,
|
||||
}).Error("Some data columns are still unavailable at slot end")
|
||||
})
|
||||
|
||||
defer nst.Stop()
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case rootIndex := <-rootIndexChan:
|
||||
if rootIndex.Root != root {
|
||||
// This is not the root we are looking for.
|
||||
continue
|
||||
}
|
||||
|
||||
// This is a data column we are expecting.
|
||||
if _, ok := missingMap[rootIndex.Index]; ok {
|
||||
retrievedDataColumnsCount++
|
||||
}
|
||||
|
||||
// As soon as we have more than half of the data columns, we can reconstruct the missing ones.
|
||||
// We don't need to wait for the rest of the data columns to declare the block as available.
|
||||
if peerdas.CanSelfReconstruct(retrievedDataColumnsCount) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove the index from the missing map.
|
||||
delete(missingMap, rootIndex.Index)
|
||||
|
||||
// Exit if there is no more missing data columns.
|
||||
if len(missingMap) == 0 {
|
||||
return nil
|
||||
}
|
||||
case <-ctx.Done():
|
||||
var missingIndices interface{} = "all"
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
missingIndicesCount := uint64(len(missingMap))
|
||||
|
||||
if missingIndicesCount < numberOfColumns {
|
||||
missingIndices = uint64MapToSortedSlice(missingMap)
|
||||
}
|
||||
|
||||
return errors.Wrapf(ctx.Err(), "data column sidecars slot: %d, BlockRoot: %#x, missing %v", block.Slot(), root, missingIndices)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -691,7 +893,7 @@ func (s *Service) waitForSync() error {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) handleInvalidExecutionError(ctx context.Context, err error, blockRoot [32]byte, parentRoot [32]byte) error {
|
||||
func (s *Service) handleInvalidExecutionError(ctx context.Context, err error, blockRoot, parentRoot [32]byte) error {
|
||||
if IsInvalidBlock(err) && InvalidBlockLVH(err) != [32]byte{} {
|
||||
return s.pruneInvalidBlock(ctx, blockRoot, parentRoot, InvalidBlockLVH(err))
|
||||
}
|
||||
|
||||
@@ -52,6 +52,12 @@ type BlobReceiver interface {
|
||||
ReceiveBlob(context.Context, blocks.VerifiedROBlob) error
|
||||
}
|
||||
|
||||
// DataColumnReceiver interface defines the methods of chain service for receiving new
|
||||
// data columns
|
||||
type DataColumnReceiver interface {
|
||||
ReceiveDataColumn(blocks.VerifiedRODataColumn) error
|
||||
}
|
||||
|
||||
// SlashingReceiver interface defines the methods of chain service for receiving validated slashing over the wire.
|
||||
type SlashingReceiver interface {
|
||||
ReceiveAttesterSlashing(ctx context.Context, slashing ethpb.AttSlashing)
|
||||
@@ -70,6 +76,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
log.WithField("blockRoot", fmt.Sprintf("%#x", blockRoot)).Debug("Ignoring already synced block")
|
||||
return nil
|
||||
}
|
||||
|
||||
receivedTime := time.Now()
|
||||
s.blockBeingSynced.set(blockRoot)
|
||||
defer s.blockBeingSynced.unset(blockRoot)
|
||||
@@ -78,6 +85,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
preState, err := s.getBlockPreState(ctx, blockCopy.Block())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get block's prestate")
|
||||
@@ -93,10 +101,12 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
daWaitedTime, err := s.handleDA(ctx, blockCopy, blockRoot, avs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Defragment the state before continuing block processing.
|
||||
s.defragmentState(postState)
|
||||
|
||||
@@ -232,12 +242,14 @@ func (s *Service) handleDA(
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if err := avs.IsDataAvailable(ctx, s.CurrentSlot(), rob); err != nil {
|
||||
|
||||
nodeID := s.cfg.P2P.NodeID()
|
||||
if err := avs.IsDataAvailable(ctx, nodeID, s.CurrentSlot(), rob); err != nil {
|
||||
return 0, errors.Wrap(err, "could not validate blob data availability (AvailabilityStore.IsDataAvailable)")
|
||||
}
|
||||
} else {
|
||||
if err := s.isDataAvailable(ctx, blockRoot, block); err != nil {
|
||||
return 0, errors.Wrap(err, "could not validate blob data availability")
|
||||
return 0, errors.Wrap(err, "is data available")
|
||||
}
|
||||
}
|
||||
daWaitedTime := time.Since(daStartTime)
|
||||
|
||||
14
beacon-chain/blockchain/receive_data_column.go
Normal file
14
beacon-chain/blockchain/receive_data_column.go
Normal file
@@ -0,0 +1,14 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
)
|
||||
|
||||
func (s *Service) ReceiveDataColumn(ds blocks.VerifiedRODataColumn) error {
|
||||
if err := s.blobStorage.SaveDataColumn(ds); err != nil {
|
||||
return errors.Wrap(err, "save data column")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -33,6 +33,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
@@ -80,7 +81,7 @@ type config struct {
|
||||
ExitPool voluntaryexits.PoolManager
|
||||
SlashingPool slashings.PoolManager
|
||||
BLSToExecPool blstoexec.PoolManager
|
||||
P2p p2p.Broadcaster
|
||||
P2P p2p.Acceser
|
||||
MaxRoutines int
|
||||
StateNotifier statefeed.Notifier
|
||||
ForkChoiceStore f.ForkChoicer
|
||||
@@ -105,22 +106,26 @@ var ErrMissingClockSetter = errors.New("blockchain Service initialized without a
|
||||
type blobNotifierMap struct {
|
||||
sync.RWMutex
|
||||
notifiers map[[32]byte]chan uint64
|
||||
seenIndex map[[32]byte][]bool
|
||||
// TODO: Separate blobs from data columns
|
||||
// seenIndex map[[32]byte][]bool
|
||||
seenIndex map[[32]byte][fieldparams.NumberOfColumns]bool
|
||||
}
|
||||
|
||||
// notifyIndex notifies a blob by its index for a given root.
|
||||
// It uses internal maps to keep track of seen indices and notifier channels.
|
||||
func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64, slot primitives.Slot) {
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
if idx >= uint64(maxBlobsPerBlock) {
|
||||
return
|
||||
}
|
||||
// TODO: Separate blobs from data columns
|
||||
// maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
// if idx >= uint64(maxBlobsPerBlock) {
|
||||
// return
|
||||
// }
|
||||
|
||||
bn.Lock()
|
||||
seen := bn.seenIndex[root]
|
||||
if seen == nil {
|
||||
seen = make([]bool, maxBlobsPerBlock)
|
||||
}
|
||||
// TODO: Separate blobs from data columns
|
||||
// if seen == nil {
|
||||
// seen = make([]bool, maxBlobsPerBlock)
|
||||
// }
|
||||
if seen[idx] {
|
||||
bn.Unlock()
|
||||
return
|
||||
@@ -131,7 +136,9 @@ func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64, slot primitive
|
||||
// Retrieve or create the notifier channel for the given root.
|
||||
c, ok := bn.notifiers[root]
|
||||
if !ok {
|
||||
c = make(chan uint64, maxBlobsPerBlock)
|
||||
// TODO: Separate blobs from data columns
|
||||
// c = make(chan uint64, maxBlobsPerBlock)
|
||||
c = make(chan uint64, fieldparams.NumberOfColumns)
|
||||
bn.notifiers[root] = c
|
||||
}
|
||||
|
||||
@@ -141,12 +148,15 @@ func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64, slot primitive
|
||||
}
|
||||
|
||||
func (bn *blobNotifierMap) forRoot(root [32]byte, slot primitives.Slot) chan uint64 {
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
// TODO: Separate blobs from data columns
|
||||
// maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
bn.Lock()
|
||||
defer bn.Unlock()
|
||||
c, ok := bn.notifiers[root]
|
||||
if !ok {
|
||||
c = make(chan uint64, maxBlobsPerBlock)
|
||||
// TODO: Separate blobs from data columns
|
||||
// c = make(chan uint64, maxBlobsPerBlock)
|
||||
c = make(chan uint64, fieldparams.NumberOfColumns)
|
||||
bn.notifiers[root] = c
|
||||
}
|
||||
return c
|
||||
@@ -172,7 +182,9 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
bn := &blobNotifierMap{
|
||||
notifiers: make(map[[32]byte]chan uint64),
|
||||
seenIndex: make(map[[32]byte][]bool),
|
||||
// TODO: Separate blobs from data columns
|
||||
// seenIndex: make(map[[32]byte][]bool),
|
||||
seenIndex: make(map[[32]byte][fieldparams.NumberOfColumns]bool),
|
||||
}
|
||||
srv := &Service{
|
||||
ctx: ctx,
|
||||
|
||||
@@ -97,13 +97,14 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
WithSlashingPool(slashings.NewPool()),
|
||||
WithExitPool(voluntaryexits.NewPool()),
|
||||
WithP2PBroadcaster(&mockBroadcaster{}),
|
||||
WithP2PBroadcaster(&mockAccesser{}),
|
||||
WithStateNotifier(&mockBeaconNode{}),
|
||||
WithForkChoiceStore(fc),
|
||||
WithAttestationService(attService),
|
||||
WithStateGen(stateGen),
|
||||
WithPayloadIDCache(cache.NewPayloadIDCache()),
|
||||
WithClockSynchronizer(startup.NewClockSynchronizer()),
|
||||
WithP2PBroadcaster(&mockAccesser{}),
|
||||
}
|
||||
|
||||
chainService, err := NewService(ctx, opts...)
|
||||
@@ -587,7 +588,9 @@ func (s *MockClockSetter) SetClock(g *startup.Clock) error {
|
||||
func TestNotifyIndex(t *testing.T) {
|
||||
// Initialize a blobNotifierMap
|
||||
bn := &blobNotifierMap{
|
||||
seenIndex: make(map[[32]byte][]bool),
|
||||
// TODO: Separate blobs from data columns
|
||||
// seenIndex: make(map[[32]byte][]bool),
|
||||
seenIndex: make(map[[32]byte][fieldparams.NumberOfColumns]bool),
|
||||
notifiers: make(map[[32]byte]chan uint64),
|
||||
}
|
||||
|
||||
|
||||
@@ -19,8 +19,10 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/blstoexec"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
|
||||
p2pTesting "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state/stategen"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"google.golang.org/protobuf/proto"
|
||||
@@ -45,6 +47,11 @@ type mockBroadcaster struct {
|
||||
broadcastCalled bool
|
||||
}
|
||||
|
||||
type mockAccesser struct {
|
||||
mockBroadcaster
|
||||
p2pTesting.MockPeerManager
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) Broadcast(_ context.Context, _ proto.Message) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
@@ -65,6 +72,11 @@ func (mb *mockBroadcaster) BroadcastBlob(_ context.Context, _ uint64, _ *ethpb.B
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastDataColumn(_ context.Context, _ [fieldparams.RootLength]byte, _ uint64, _ *ethpb.DataColumnSidecar) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastBLSChanges(_ context.Context, _ []*ethpb.SignedBLSToExecutionChange) {
|
||||
}
|
||||
|
||||
@@ -122,6 +134,7 @@ func minimalTestService(t *testing.T, opts ...Option) (*Service, *testServiceReq
|
||||
WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)),
|
||||
WithSyncChecker(mock.MockChecker{}),
|
||||
WithExecutionEngineCaller(&mockExecution.EngineClient{}),
|
||||
WithP2PBroadcaster(&mockAccesser{}),
|
||||
}
|
||||
// append the variadic opts so they override the defaults by being processed afterwards
|
||||
opts = append(defOpts, opts...)
|
||||
|
||||
@@ -702,6 +702,11 @@ func (c *ChainService) ReceiveBlob(_ context.Context, b blocks.VerifiedROBlob) e
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReceiveDataColumn implements the same method in chain service
|
||||
func (*ChainService) ReceiveDataColumn(_ blocks.VerifiedRODataColumn) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// TargetRootForEpoch mocks the same method in the chain service
|
||||
func (c *ChainService) TargetRootForEpoch(_ [32]byte, _ primitives.Epoch) ([32]byte, error) {
|
||||
return c.TargetRoot, nil
|
||||
|
||||
1
beacon-chain/cache/BUILD.bazel
vendored
1
beacon-chain/cache/BUILD.bazel
vendored
@@ -84,6 +84,7 @@ go_test(
|
||||
"sync_committee_head_state_test.go",
|
||||
"sync_committee_test.go",
|
||||
"sync_subnet_ids_test.go",
|
||||
"tracked_validators_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
|
||||
138
beacon-chain/cache/tracked_validators.go
vendored
138
beacon-chain/cache/tracked_validators.go
vendored
@@ -1,49 +1,139 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/patrickmn/go-cache"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type TrackedValidator struct {
|
||||
Active bool
|
||||
FeeRecipient primitives.ExecutionAddress
|
||||
Index primitives.ValidatorIndex
|
||||
}
|
||||
const (
|
||||
defaultExpiration = 1 * time.Hour
|
||||
cleanupInterval = 15 * time.Minute
|
||||
)
|
||||
|
||||
type TrackedValidatorsCache struct {
|
||||
sync.Mutex
|
||||
trackedValidators map[primitives.ValidatorIndex]TrackedValidator
|
||||
}
|
||||
type (
|
||||
TrackedValidator struct {
|
||||
Active bool
|
||||
FeeRecipient primitives.ExecutionAddress
|
||||
Index primitives.ValidatorIndex
|
||||
}
|
||||
|
||||
TrackedValidatorsCache struct {
|
||||
trackedValidators cache.Cache
|
||||
}
|
||||
)
|
||||
|
||||
var (
|
||||
// Metrics.
|
||||
trackedValidatorsCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "tracked_validators_cache_miss",
|
||||
Help: "The number of tracked validators requests that are not present in the cache.",
|
||||
})
|
||||
|
||||
trackedValidatorsCacheTotal = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "tracked_validators_cache_total",
|
||||
Help: "The total number of tracked validators requests in the cache.",
|
||||
})
|
||||
|
||||
trackedValidatorsCacheCount = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "tracked_validators_cache_count",
|
||||
Help: "The number of tracked validators in the cache.",
|
||||
})
|
||||
)
|
||||
|
||||
// NewTrackedValidatorsCache creates a new cache for tracking validators.
|
||||
func NewTrackedValidatorsCache() *TrackedValidatorsCache {
|
||||
return &TrackedValidatorsCache{
|
||||
trackedValidators: make(map[primitives.ValidatorIndex]TrackedValidator),
|
||||
trackedValidators: *cache.New(defaultExpiration, cleanupInterval),
|
||||
}
|
||||
}
|
||||
|
||||
// Validator retrieves a tracked validator from the cache (if present).
|
||||
func (t *TrackedValidatorsCache) Validator(index primitives.ValidatorIndex) (TrackedValidator, bool) {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
val, ok := t.trackedValidators[index]
|
||||
return val, ok
|
||||
trackedValidatorsCacheTotal.Inc()
|
||||
|
||||
key := toCacheKey(index)
|
||||
item, ok := t.trackedValidators.Get(key)
|
||||
if !ok {
|
||||
trackedValidatorsCacheMiss.Inc()
|
||||
return TrackedValidator{}, false
|
||||
}
|
||||
|
||||
val, ok := item.(TrackedValidator)
|
||||
if !ok {
|
||||
logrus.Errorf("Failed to cast tracked validator from cache, got unexpected item type %T", item)
|
||||
return TrackedValidator{}, false
|
||||
}
|
||||
|
||||
return val, true
|
||||
}
|
||||
|
||||
// Set adds a tracked validator to the cache.
|
||||
func (t *TrackedValidatorsCache) Set(val TrackedValidator) {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
t.trackedValidators[val.Index] = val
|
||||
key := toCacheKey(val.Index)
|
||||
t.trackedValidators.Set(key, val, cache.DefaultExpiration)
|
||||
}
|
||||
|
||||
// Delete removes a tracked validator from the cache.
|
||||
func (t *TrackedValidatorsCache) Prune() {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
t.trackedValidators = make(map[primitives.ValidatorIndex]TrackedValidator)
|
||||
t.trackedValidators.Flush()
|
||||
trackedValidatorsCacheCount.Set(0)
|
||||
}
|
||||
|
||||
// Validating returns true if there are at least one tracked validators in the cache.
|
||||
func (t *TrackedValidatorsCache) Validating() bool {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
return len(t.trackedValidators) > 0
|
||||
count := t.trackedValidators.ItemCount()
|
||||
trackedValidatorsCacheCount.Set(float64(count))
|
||||
|
||||
return count > 0
|
||||
}
|
||||
|
||||
// ItemCount returns the number of tracked validators in the cache.
|
||||
func (t *TrackedValidatorsCache) ItemCount() int {
|
||||
count := t.trackedValidators.ItemCount()
|
||||
trackedValidatorsCacheCount.Set(float64(count))
|
||||
|
||||
return count
|
||||
}
|
||||
|
||||
// Indices returns a map of validator indices that are being tracked.
|
||||
func (t *TrackedValidatorsCache) Indices() map[primitives.ValidatorIndex]bool {
|
||||
items := t.trackedValidators.Items()
|
||||
count := len(items)
|
||||
trackedValidatorsCacheCount.Set(float64(count))
|
||||
|
||||
indices := make(map[primitives.ValidatorIndex]bool, count)
|
||||
|
||||
for cacheKey := range items {
|
||||
index, err := fromCacheKey(cacheKey)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("Failed to get validator index from cache key")
|
||||
continue
|
||||
}
|
||||
|
||||
indices[index] = true
|
||||
}
|
||||
|
||||
return indices
|
||||
}
|
||||
|
||||
// toCacheKey creates a cache key from the validator index.
|
||||
func toCacheKey(validatorIndex primitives.ValidatorIndex) string {
|
||||
return strconv.FormatUint(uint64(validatorIndex), 10)
|
||||
}
|
||||
|
||||
// fromCacheKey gets the validator index from the cache key.
|
||||
func fromCacheKey(key string) (primitives.ValidatorIndex, error) {
|
||||
validatorIndex, err := strconv.ParseUint(key, 10, 64)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "parse Uint: %s", key)
|
||||
}
|
||||
|
||||
return primitives.ValidatorIndex(validatorIndex), nil
|
||||
}
|
||||
|
||||
79
beacon-chain/cache/tracked_validators_test.go
vendored
Normal file
79
beacon-chain/cache/tracked_validators_test.go
vendored
Normal file
@@ -0,0 +1,79 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func mapEqual(a, b map[primitives.ValidatorIndex]bool) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
|
||||
for k, v := range a {
|
||||
if b[k] != v {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func TestTrackedValidatorsCache(t *testing.T) {
|
||||
vc := NewTrackedValidatorsCache()
|
||||
|
||||
// No validators in cache.
|
||||
require.Equal(t, 0, vc.ItemCount())
|
||||
require.Equal(t, false, vc.Validating())
|
||||
require.Equal(t, 0, len(vc.Indices()))
|
||||
|
||||
_, ok := vc.Validator(41)
|
||||
require.Equal(t, false, ok)
|
||||
|
||||
// Add some validators (one twice).
|
||||
v42Expected := TrackedValidator{Active: true, FeeRecipient: [20]byte{1}, Index: 42}
|
||||
v43Expected := TrackedValidator{Active: false, FeeRecipient: [20]byte{2}, Index: 43}
|
||||
|
||||
vc.Set(v42Expected)
|
||||
vc.Set(v43Expected)
|
||||
vc.Set(v42Expected)
|
||||
|
||||
// Check if they are in the cache.
|
||||
v42Actual, ok := vc.Validator(42)
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, v42Expected, v42Actual)
|
||||
|
||||
v43Actual, ok := vc.Validator(43)
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, v43Expected, v43Actual)
|
||||
|
||||
expected := map[primitives.ValidatorIndex]bool{42: true, 43: true}
|
||||
actual := vc.Indices()
|
||||
require.Equal(t, true, mapEqual(expected, actual))
|
||||
|
||||
// Check the item count and if the cache is validating.
|
||||
require.Equal(t, 2, vc.ItemCount())
|
||||
require.Equal(t, true, vc.Validating())
|
||||
|
||||
// Check if a non-existing validator is in the cache.
|
||||
_, ok = vc.Validator(41)
|
||||
require.Equal(t, false, ok)
|
||||
|
||||
// Prune the cache and test it.
|
||||
vc.Prune()
|
||||
|
||||
_, ok = vc.Validator(41)
|
||||
require.Equal(t, false, ok)
|
||||
|
||||
_, ok = vc.Validator(42)
|
||||
require.Equal(t, false, ok)
|
||||
|
||||
_, ok = vc.Validator(43)
|
||||
require.Equal(t, false, ok)
|
||||
|
||||
require.Equal(t, 0, vc.ItemCount())
|
||||
require.Equal(t, false, vc.Validating())
|
||||
require.Equal(t, 0, len(vc.Indices()))
|
||||
}
|
||||
@@ -96,6 +96,24 @@ func VerifyBlockHeaderSignature(beaconState state.BeaconState, header *ethpb.Sig
|
||||
return signing.VerifyBlockHeaderSigningRoot(header.Header, proposerPubKey, header.Signature, domain)
|
||||
}
|
||||
|
||||
func VerifyBlockHeaderSignatureUsingCurrentFork(beaconState state.BeaconState, header *ethpb.SignedBeaconBlockHeader) error {
|
||||
currentEpoch := slots.ToEpoch(header.Header.Slot)
|
||||
fork, err := forks.Fork(currentEpoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
domain, err := signing.Domain(fork, currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorsRoot())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
proposer, err := beaconState.ValidatorAtIndex(header.Header.ProposerIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
proposerPubKey := proposer.PublicKey
|
||||
return signing.VerifyBlockHeaderSigningRoot(header.Header, proposerPubKey, header.Signature, domain)
|
||||
}
|
||||
|
||||
// VerifyBlockSignatureUsingCurrentFork verifies the proposer signature of a beacon block. This differs
|
||||
// from the above method by not using fork data from the state and instead retrieving it
|
||||
// via the respective epoch.
|
||||
|
||||
@@ -35,6 +35,9 @@ const (
|
||||
|
||||
// SingleAttReceived is sent after a single attestation object is received from gossip or rpc
|
||||
SingleAttReceived = 9
|
||||
|
||||
// DataColumnSidecarReceived is sent after a data column sidecar is received from gossip or rpc.
|
||||
DataColumnSidecarReceived = 10
|
||||
)
|
||||
|
||||
// UnAggregatedAttReceivedData is the data sent with UnaggregatedAttReceived events.
|
||||
@@ -85,3 +88,6 @@ type AttesterSlashingReceivedData struct {
|
||||
type SingleAttReceivedData struct {
|
||||
Attestation ethpb.Att
|
||||
}
|
||||
type DataColumnSidecarReceivedData struct {
|
||||
DataColumn *blocks.VerifiedRODataColumn
|
||||
}
|
||||
|
||||
@@ -102,7 +102,7 @@ func UpgradeToFulu(beaconState state.BeaconState) (state.BeaconState, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s := ðpb.BeaconStateFulu{
|
||||
s := ðpb.BeaconStateElectra{
|
||||
GenesisTime: beaconState.GenesisTime(),
|
||||
GenesisValidatorsRoot: beaconState.GenesisValidatorsRoot(),
|
||||
Slot: beaconState.Slot(),
|
||||
|
||||
@@ -78,6 +78,7 @@ func TestIsCurrentEpochSyncCommittee_UsingCommittee(t *testing.T) {
|
||||
|
||||
func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
params.SetupTestConfigCleanup(t)
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
@@ -264,6 +265,7 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
|
||||
67
beacon-chain/core/peerdas/BUILD.bazel
Normal file
67
beacon-chain/core/peerdas/BUILD.bazel
Normal file
@@ -0,0 +1,67 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"das_core.go",
|
||||
"info.go",
|
||||
"metrics.go",
|
||||
"p2p_interface.go",
|
||||
"peer_sampling.go",
|
||||
"reconstruction.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
|
||||
"@com_github_hashicorp_golang_lru//:go_default_library",
|
||||
"@com_github_holiman_uint256//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@org_golang_x_sync//errgroup:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"das_core_test.go",
|
||||
"info_test.go",
|
||||
"p2p_interface_test.go",
|
||||
"peer_sampling_test.go",
|
||||
"reconstruction_test.go",
|
||||
"utils_test.go",
|
||||
],
|
||||
deps = [
|
||||
":go_default_library",
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_consensys_gnark_crypto//ecc/bls12-381/fr:go_default_library",
|
||||
"@com_github_crate_crypto_go_kzg_4844//:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
399
beacon-chain/core/peerdas/das_core.go
Normal file
399
beacon-chain/core/peerdas/das_core.go
Normal file
@@ -0,0 +1,399 @@
|
||||
package peerdas
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"math"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/holiman/uint256"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
|
||||
beaconState "github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
var (
|
||||
// Custom errors
|
||||
errCustodyGroupCountTooLarge = errors.New("custody group count too large")
|
||||
errWrongComputedCustodyGroupCount = errors.New("wrong computed custody group count, should never happen")
|
||||
|
||||
// maxUint256 is the maximum value of a uint256.
|
||||
maxUint256 = &uint256.Int{math.MaxUint64, math.MaxUint64, math.MaxUint64, math.MaxUint64}
|
||||
)
|
||||
|
||||
type CustodyType int
|
||||
|
||||
const (
|
||||
Target CustodyType = iota
|
||||
Actual
|
||||
)
|
||||
|
||||
// CustodyGroups computes the custody groups the node should participate in for custody.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/das-core.md#get_custody_groups
|
||||
func CustodyGroups(nodeId enode.ID, custodyGroupCount uint64) (map[uint64]bool, error) {
|
||||
numberOfCustodyGroup := params.BeaconConfig().NumberOfCustodyGroups
|
||||
|
||||
// Check if the custody group count is larger than the number of custody groups.
|
||||
if custodyGroupCount > numberOfCustodyGroup {
|
||||
return nil, errCustodyGroupCountTooLarge
|
||||
}
|
||||
|
||||
custodyGroups := make(map[uint64]bool, custodyGroupCount)
|
||||
one := uint256.NewInt(1)
|
||||
|
||||
for currentId := new(uint256.Int).SetBytes(nodeId.Bytes()); uint64(len(custodyGroups)) < custodyGroupCount; currentId.Add(currentId, one) {
|
||||
// Convert to big endian bytes.
|
||||
currentIdBytesBigEndian := currentId.Bytes32()
|
||||
|
||||
// Convert to little endian.
|
||||
currentIdBytesLittleEndian := bytesutil.ReverseByteOrder(currentIdBytesBigEndian[:])
|
||||
|
||||
// Hash the result.
|
||||
hashedCurrentId := hash.Hash(currentIdBytesLittleEndian)
|
||||
|
||||
// Get the custody group ID.
|
||||
custodyGroupId := binary.LittleEndian.Uint64(hashedCurrentId[:8]) % numberOfCustodyGroup
|
||||
|
||||
// Add the custody group to the map.
|
||||
custodyGroups[custodyGroupId] = true
|
||||
|
||||
// Overflow prevention.
|
||||
if currentId.Cmp(maxUint256) == 0 {
|
||||
currentId = uint256.NewInt(0)
|
||||
}
|
||||
}
|
||||
|
||||
// Final check.
|
||||
if uint64(len(custodyGroups)) != custodyGroupCount {
|
||||
return nil, errWrongComputedCustodyGroupCount
|
||||
}
|
||||
|
||||
return custodyGroups, nil
|
||||
}
|
||||
|
||||
// ComputeColumnsForCustodyGroup computes the columns for a given custody group.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/das-core.md#compute_columns_for_custody_group
|
||||
func ComputeColumnsForCustodyGroup(custodyGroup uint64) ([]uint64, error) {
|
||||
beaconConfig := params.BeaconConfig()
|
||||
numberOfCustodyGroup := beaconConfig.NumberOfCustodyGroups
|
||||
|
||||
if custodyGroup > numberOfCustodyGroup {
|
||||
return nil, errCustodyGroupCountTooLarge
|
||||
}
|
||||
|
||||
numberOfColumns := beaconConfig.NumberOfColumns
|
||||
|
||||
columnsPerGroup := numberOfColumns / numberOfCustodyGroup
|
||||
|
||||
columns := make([]uint64, 0, columnsPerGroup)
|
||||
for i := range columnsPerGroup {
|
||||
column := numberOfCustodyGroup*i + custodyGroup
|
||||
columns = append(columns, column)
|
||||
}
|
||||
|
||||
return columns, nil
|
||||
}
|
||||
|
||||
// DataColumnSidecars computes the data column sidecars from the signed block and blobs.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/fulu/das-core.md#get_data_column_sidecars
|
||||
func DataColumnSidecars(signedBlock interfaces.ReadOnlySignedBeaconBlock, blobs []kzg.Blob) ([]*ethpb.DataColumnSidecar, error) {
|
||||
startTime := time.Now()
|
||||
blobsCount := len(blobs)
|
||||
if blobsCount == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Get the signed block header.
|
||||
signedBlockHeader, err := signedBlock.Header()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "signed block header")
|
||||
}
|
||||
|
||||
// Get the block body.
|
||||
block := signedBlock.Block()
|
||||
blockBody := block.Body()
|
||||
|
||||
// Get the blob KZG commitments.
|
||||
blobKzgCommitments, err := blockBody.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
|
||||
// Compute the KZG commitments inclusion proof.
|
||||
kzgCommitmentsInclusionProof, err := blocks.MerkleProofKZGCommitments(blockBody)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "merkle proof ZKG commitments")
|
||||
}
|
||||
|
||||
// Compute cells and proofs.
|
||||
cellsAndProofs := make([]kzg.CellsAndProofs, blobsCount)
|
||||
|
||||
eg, _ := errgroup.WithContext(context.Background())
|
||||
for i := range blobs {
|
||||
blobIndex := i
|
||||
eg.Go(func() error {
|
||||
blob := &blobs[blobIndex]
|
||||
blobCellsAndProofs, err := kzg.ComputeCellsAndKZGProofs(blob)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "compute cells and KZG proofs")
|
||||
}
|
||||
|
||||
cellsAndProofs[blobIndex] = blobCellsAndProofs
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if err := eg.Wait(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get the column sidecars.
|
||||
sidecars := make([]*ethpb.DataColumnSidecar, 0, fieldparams.NumberOfColumns)
|
||||
for columnIndex := uint64(0); columnIndex < fieldparams.NumberOfColumns; columnIndex++ {
|
||||
column := make([]kzg.Cell, 0, blobsCount)
|
||||
kzgProofOfColumn := make([]kzg.Proof, 0, blobsCount)
|
||||
|
||||
for rowIndex := 0; rowIndex < blobsCount; rowIndex++ {
|
||||
cellsForRow := cellsAndProofs[rowIndex].Cells
|
||||
proofsForRow := cellsAndProofs[rowIndex].Proofs
|
||||
|
||||
cell := cellsForRow[columnIndex]
|
||||
column = append(column, cell)
|
||||
|
||||
kzgProof := proofsForRow[columnIndex]
|
||||
kzgProofOfColumn = append(kzgProofOfColumn, kzgProof)
|
||||
}
|
||||
|
||||
columnBytes := make([][]byte, 0, blobsCount)
|
||||
for i := range column {
|
||||
columnBytes = append(columnBytes, column[i][:])
|
||||
}
|
||||
|
||||
kzgProofOfColumnBytes := make([][]byte, 0, blobsCount)
|
||||
for _, kzgProof := range kzgProofOfColumn {
|
||||
copiedProof := kzgProof
|
||||
kzgProofOfColumnBytes = append(kzgProofOfColumnBytes, copiedProof[:])
|
||||
}
|
||||
|
||||
sidecar := ðpb.DataColumnSidecar{
|
||||
ColumnIndex: columnIndex,
|
||||
DataColumn: columnBytes,
|
||||
KzgCommitments: blobKzgCommitments,
|
||||
KzgProof: kzgProofOfColumnBytes,
|
||||
SignedBlockHeader: signedBlockHeader,
|
||||
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
|
||||
}
|
||||
|
||||
sidecars = append(sidecars, sidecar)
|
||||
}
|
||||
dataColumnComputationTime.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
return sidecars, nil
|
||||
}
|
||||
|
||||
// CustodyGroupSamplingSize returns the number of custody groups the node should sample from.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/das-core.md#custody-sampling
|
||||
func CustodyGroupSamplingSize(ct CustodyType) uint64 {
|
||||
custodyGroupCount := TargetCustodyGroupCount.Get()
|
||||
|
||||
if ct == Actual {
|
||||
custodyGroupCount = ActualCustodyGroupCount()
|
||||
}
|
||||
|
||||
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
|
||||
return max(samplesPerSlot, custodyGroupCount)
|
||||
}
|
||||
|
||||
// CustodyColumns computes the custody columns from the custody groups.
|
||||
func CustodyColumns(custodyGroups map[uint64]bool) (map[uint64]bool, error) {
|
||||
numberOfCustodyGroups := params.BeaconConfig().NumberOfCustodyGroups
|
||||
|
||||
custodyGroupCount := len(custodyGroups)
|
||||
|
||||
// Compute the columns for each custody group.
|
||||
columns := make(map[uint64]bool, custodyGroupCount)
|
||||
for group := range custodyGroups {
|
||||
if group >= numberOfCustodyGroups {
|
||||
return nil, errCustodyGroupCountTooLarge
|
||||
}
|
||||
|
||||
groupColumns, err := ComputeColumnsForCustodyGroup(group)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "compute columns for custody group")
|
||||
}
|
||||
|
||||
for _, column := range groupColumns {
|
||||
columns[column] = true
|
||||
}
|
||||
}
|
||||
|
||||
return columns, nil
|
||||
}
|
||||
|
||||
// ValidatorsCustodyRequirement returns the number of custody groups regarding the validator indices attached to the beacon node.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/fulu/das-core.md#validator-custody
|
||||
func ValidatorsCustodyRequirement(state beaconState.BeaconState, validatorsIndex map[primitives.ValidatorIndex]bool) (uint64, error) {
|
||||
totalNodeBalance := uint64(0)
|
||||
for index := range validatorsIndex {
|
||||
balance, err := state.BalanceAtIndex(index)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "balance at index for validator index %v", index)
|
||||
}
|
||||
|
||||
totalNodeBalance += balance
|
||||
}
|
||||
|
||||
beaconConfig := params.BeaconConfig()
|
||||
numberOfCustodyGroup := beaconConfig.NumberOfCustodyGroups
|
||||
validatorCustodyRequirement := beaconConfig.ValidatorCustodyRequirement
|
||||
balancePerAdditionalCustodyGroup := beaconConfig.BalancePerAdditionalCustodyGroup
|
||||
|
||||
count := totalNodeBalance / balancePerAdditionalCustodyGroup
|
||||
return min(max(count, validatorCustodyRequirement), numberOfCustodyGroup), nil
|
||||
}
|
||||
|
||||
// Blobs extract blobs from `dataColumnsSidecar`.
|
||||
// This can be seen as the reciprocal function of DataColumnSidecars.
|
||||
// `dataColumnsSidecar` needs to contain the datacolumns corresponding to the non-extended matrix,
|
||||
// else an error will be returned.
|
||||
// (`dataColumnsSidecar` can contain extra columns, but they will be ignored.)
|
||||
func Blobs(indices map[uint64]bool, dataColumnsSidecar []*ethpb.DataColumnSidecar) ([]*blocks.VerifiedROBlob, error) {
|
||||
columnCount := fieldparams.NumberOfColumns
|
||||
|
||||
neededColumnCount := columnCount / 2
|
||||
|
||||
// Check if all needed columns are present.
|
||||
sliceIndexFromColumnIndex := make(map[uint64]int, len(dataColumnsSidecar))
|
||||
for i := range dataColumnsSidecar {
|
||||
dataColumnSideCar := dataColumnsSidecar[i]
|
||||
columnIndex := dataColumnSideCar.ColumnIndex
|
||||
|
||||
if columnIndex < uint64(neededColumnCount) {
|
||||
sliceIndexFromColumnIndex[columnIndex] = i
|
||||
}
|
||||
}
|
||||
|
||||
actualColumnCount := len(sliceIndexFromColumnIndex)
|
||||
|
||||
// Get missing columns.
|
||||
if actualColumnCount < neededColumnCount {
|
||||
missingColumns := make(map[int]bool, neededColumnCount-actualColumnCount)
|
||||
for i := range neededColumnCount {
|
||||
if _, ok := sliceIndexFromColumnIndex[uint64(i)]; !ok {
|
||||
missingColumns[i] = true
|
||||
}
|
||||
}
|
||||
|
||||
missingColumnsSlice := make([]int, 0, len(missingColumns))
|
||||
for i := range missingColumns {
|
||||
missingColumnsSlice = append(missingColumnsSlice, i)
|
||||
}
|
||||
|
||||
slices.Sort[[]int](missingColumnsSlice)
|
||||
return nil, errors.Errorf("some columns are missing: %v", missingColumnsSlice)
|
||||
}
|
||||
|
||||
// It is safe to retrieve the first column since we already checked that `dataColumnsSidecar` is not empty.
|
||||
firstDataColumnSidecar := dataColumnsSidecar[0]
|
||||
|
||||
blobCount := uint64(len(firstDataColumnSidecar.DataColumn))
|
||||
|
||||
// Check all colums have te same length.
|
||||
for i := range dataColumnsSidecar {
|
||||
if uint64(len(dataColumnsSidecar[i].DataColumn)) != blobCount {
|
||||
return nil, errors.Errorf("mismatch in the length of the data columns, expected %d, got %d", blobCount, len(dataColumnsSidecar[i].DataColumn))
|
||||
}
|
||||
}
|
||||
|
||||
// Reconstruct verified RO blobs from columns.
|
||||
verifiedROBlobs := make([]*blocks.VerifiedROBlob, 0, blobCount)
|
||||
|
||||
// Populate and filter indices.
|
||||
indicesSlice := populateAndFilterIndices(indices, blobCount)
|
||||
|
||||
for _, blobIndex := range indicesSlice {
|
||||
var blob kzg.Blob
|
||||
|
||||
// Compute the content of the blob.
|
||||
for columnIndex := range neededColumnCount {
|
||||
sliceIndex, ok := sliceIndexFromColumnIndex[uint64(columnIndex)]
|
||||
if !ok {
|
||||
return nil, errors.Errorf("missing column %d, this should never happen", columnIndex)
|
||||
}
|
||||
|
||||
dataColumnSideCar := dataColumnsSidecar[sliceIndex]
|
||||
cell := dataColumnSideCar.DataColumn[blobIndex]
|
||||
|
||||
for i := 0; i < len(cell); i++ {
|
||||
blob[columnIndex*kzg.BytesPerCell+i] = cell[i]
|
||||
}
|
||||
}
|
||||
|
||||
// Retrieve the blob KZG commitment.
|
||||
blobKZGCommitment := kzg.Commitment(firstDataColumnSidecar.KzgCommitments[blobIndex])
|
||||
|
||||
// Compute the blob KZG proof.
|
||||
blobKzgProof, err := kzg.ComputeBlobKZGProof(&blob, blobKZGCommitment)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "compute blob KZG proof")
|
||||
}
|
||||
|
||||
blobSidecar := ðpb.BlobSidecar{
|
||||
Index: blobIndex,
|
||||
Blob: blob[:],
|
||||
KzgCommitment: blobKZGCommitment[:],
|
||||
KzgProof: blobKzgProof[:],
|
||||
SignedBlockHeader: firstDataColumnSidecar.SignedBlockHeader,
|
||||
CommitmentInclusionProof: firstDataColumnSidecar.KzgCommitmentsInclusionProof,
|
||||
}
|
||||
|
||||
roBlob, err := blocks.NewROBlob(blobSidecar)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "new RO blob")
|
||||
}
|
||||
|
||||
verifiedROBlob := blocks.NewVerifiedROBlob(roBlob)
|
||||
verifiedROBlobs = append(verifiedROBlobs, &verifiedROBlob)
|
||||
}
|
||||
|
||||
return verifiedROBlobs, nil
|
||||
}
|
||||
|
||||
// populateAndFilterIndices returns a sorted slices of indices, setting all indices if none are provided,
|
||||
// and filtering out indices higher than the blob count.
|
||||
func populateAndFilterIndices(indices map[uint64]bool, blobCount uint64) []uint64 {
|
||||
// If no indices are provided, provide all blobs.
|
||||
if len(indices) == 0 {
|
||||
for i := range blobCount {
|
||||
indices[i] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Filter blobs index higher than the blob count.
|
||||
filteredIndices := make(map[uint64]bool, len(indices))
|
||||
for i := range indices {
|
||||
if i < blobCount {
|
||||
filteredIndices[i] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Transform set to slice.
|
||||
indicesSlice := make([]uint64, 0, len(filteredIndices))
|
||||
for i := range filteredIndices {
|
||||
indicesSlice = append(indicesSlice, i)
|
||||
}
|
||||
|
||||
// Sort the indices.
|
||||
slices.Sort[[]uint64](indicesSlice)
|
||||
|
||||
return indicesSlice
|
||||
}
|
||||
244
beacon-chain/core/peerdas/das_core_test.go
Normal file
244
beacon-chain/core/peerdas/das_core_test.go
Normal file
@@ -0,0 +1,244 @@
|
||||
package peerdas_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
)
|
||||
|
||||
func TestDataColumnSidecars(t *testing.T) {
|
||||
var expected []*ethpb.DataColumnSidecar = nil
|
||||
actual, err := peerdas.DataColumnSidecars(nil, []kzg.Blob{})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepSSZEqual(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestBlobs(t *testing.T) {
|
||||
blobsIndice := map[uint64]bool{}
|
||||
|
||||
almostAllColumns := make([]*ethpb.DataColumnSidecar, 0, fieldparams.NumberOfColumns/2)
|
||||
for i := 2; i < fieldparams.NumberOfColumns/2+2; i++ {
|
||||
almostAllColumns = append(almostAllColumns, ðpb.DataColumnSidecar{
|
||||
ColumnIndex: uint64(i),
|
||||
})
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
input []*ethpb.DataColumnSidecar
|
||||
expected []*blocks.VerifiedROBlob
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "empty input",
|
||||
input: []*ethpb.DataColumnSidecar{},
|
||||
expected: nil,
|
||||
err: errors.New("some columns are missing: [0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63]"),
|
||||
},
|
||||
{
|
||||
name: "missing columns",
|
||||
input: almostAllColumns,
|
||||
expected: nil,
|
||||
err: errors.New("some columns are missing: [0 1]"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
actual, err := peerdas.Blobs(blobsIndice, tc.input)
|
||||
if tc.err != nil {
|
||||
require.Equal(t, tc.err.Error(), err.Error())
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.DeepSSZEqual(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDataColumnsSidecarsBlobsRoundtrip(t *testing.T) {
|
||||
const blobCount = 5
|
||||
blobsIndex := map[uint64]bool{}
|
||||
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a protobuf signed beacon block.
|
||||
signedBeaconBlockPb := util.NewBeaconBlockDeneb()
|
||||
|
||||
// Generate random blobs and their corresponding commitments and proofs.
|
||||
blobs := make([]kzg.Blob, 0, blobCount)
|
||||
blobKzgCommitments := make([]*kzg.Commitment, 0, blobCount)
|
||||
blobKzgProofs := make([]*kzg.Proof, 0, blobCount)
|
||||
|
||||
for blobIndex := range blobCount {
|
||||
// Create a random blob.
|
||||
blob := getRandBlob(int64(blobIndex))
|
||||
blobs = append(blobs, blob)
|
||||
|
||||
// Generate a blobKZGCommitment for the blob.
|
||||
blobKZGCommitment, proof, err := generateCommitmentAndProof(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobKzgCommitments = append(blobKzgCommitments, blobKZGCommitment)
|
||||
blobKzgProofs = append(blobKzgProofs, proof)
|
||||
}
|
||||
|
||||
// Set the commitments into the block.
|
||||
blobZkgCommitmentsBytes := make([][]byte, 0, blobCount)
|
||||
for _, blobKZGCommitment := range blobKzgCommitments {
|
||||
blobZkgCommitmentsBytes = append(blobZkgCommitmentsBytes, blobKZGCommitment[:])
|
||||
}
|
||||
|
||||
signedBeaconBlockPb.Block.Body.BlobKzgCommitments = blobZkgCommitmentsBytes
|
||||
|
||||
// Generate verified RO blobs.
|
||||
verifiedROBlobs := make([]*blocks.VerifiedROBlob, 0, blobCount)
|
||||
|
||||
// Create a signed beacon block from the protobuf.
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
commitmentInclusionProof, err := blocks.MerkleProofKZGCommitments(signedBeaconBlock.Block().Body())
|
||||
require.NoError(t, err)
|
||||
|
||||
for blobIndex := range blobCount {
|
||||
blob := blobs[blobIndex]
|
||||
blobKZGCommitment := blobKzgCommitments[blobIndex]
|
||||
blobKzgProof := blobKzgProofs[blobIndex]
|
||||
|
||||
// Get the signed beacon block header.
|
||||
signedBeaconBlockHeader, err := signedBeaconBlock.Header()
|
||||
require.NoError(t, err)
|
||||
|
||||
blobSidecar := ðpb.BlobSidecar{
|
||||
Index: uint64(blobIndex),
|
||||
Blob: blob[:],
|
||||
KzgCommitment: blobKZGCommitment[:],
|
||||
KzgProof: blobKzgProof[:],
|
||||
SignedBlockHeader: signedBeaconBlockHeader,
|
||||
CommitmentInclusionProof: commitmentInclusionProof,
|
||||
}
|
||||
|
||||
roBlob, err := blocks.NewROBlob(blobSidecar)
|
||||
require.NoError(t, err)
|
||||
|
||||
verifiedROBlob := blocks.NewVerifiedROBlob(roBlob)
|
||||
verifiedROBlobs = append(verifiedROBlobs, &verifiedROBlob)
|
||||
}
|
||||
|
||||
// Compute data columns sidecars from the signed beacon block and from the blobs.
|
||||
dataColumnsSidecar, err := peerdas.DataColumnSidecars(signedBeaconBlock, blobs)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Compute the blobs from the data columns sidecar.
|
||||
roundtripBlobs, err := peerdas.Blobs(blobsIndex, dataColumnsSidecar)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check that the blobs are the same.
|
||||
require.DeepSSZEqual(t, verifiedROBlobs, roundtripBlobs)
|
||||
}
|
||||
|
||||
func TestValidatorsCustodyRequirement(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
count uint64
|
||||
expected uint64
|
||||
}{
|
||||
{name: "0 validators", count: 0, expected: 8},
|
||||
{name: "1 validator", count: 1, expected: 8},
|
||||
{name: "8 validators", count: 8, expected: 8},
|
||||
{name: "9 validators", count: 9, expected: 9},
|
||||
{name: "100 validators", count: 100, expected: 100},
|
||||
{name: "128 validators", count: 128, expected: 128},
|
||||
{name: "129 validators", count: 129, expected: 128},
|
||||
{name: "1000 validators", count: 1000, expected: 128},
|
||||
}
|
||||
|
||||
const balance = uint64(32_000_000_000)
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
balances := make([]uint64, 0, tc.count)
|
||||
for range tc.count {
|
||||
balances = append(balances, balance)
|
||||
}
|
||||
|
||||
validatorsIndex := make(map[primitives.ValidatorIndex]bool)
|
||||
for i := range tc.count {
|
||||
validatorsIndex[primitives.ValidatorIndex(i)] = true
|
||||
}
|
||||
|
||||
beaconState, err := state_native.InitializeFromProtoFulu(ðpb.BeaconStateElectra{Balances: balances})
|
||||
require.NoError(t, err)
|
||||
|
||||
actual, err := peerdas.ValidatorsCustodyRequirement(beaconState, validatorsIndex)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCustodyGroupSamplingSize(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
custodyType peerdas.CustodyType
|
||||
validatorsCustodyRequirement uint64
|
||||
toAdvertiseCustodyGroupCount uint64
|
||||
expected uint64
|
||||
}{
|
||||
{
|
||||
name: "target, lower than samples per slot",
|
||||
custodyType: peerdas.Target,
|
||||
validatorsCustodyRequirement: 2,
|
||||
expected: 8,
|
||||
},
|
||||
{
|
||||
name: "target, higher than samples per slot",
|
||||
custodyType: peerdas.Target,
|
||||
validatorsCustodyRequirement: 100,
|
||||
expected: 100,
|
||||
},
|
||||
{
|
||||
name: "actual, lower than samples per slot",
|
||||
custodyType: peerdas.Actual,
|
||||
validatorsCustodyRequirement: 3,
|
||||
toAdvertiseCustodyGroupCount: 4,
|
||||
expected: 8,
|
||||
},
|
||||
{
|
||||
name: "actual, higher than samples per slot",
|
||||
custodyType: peerdas.Actual,
|
||||
validatorsCustodyRequirement: 100,
|
||||
toAdvertiseCustodyGroupCount: 101,
|
||||
expected: 100,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Set the validators custody requirement for target custody group count.
|
||||
peerdas.TargetCustodyGroupCount.SetValidatorsCustodyRequirement(tc.validatorsCustodyRequirement)
|
||||
|
||||
// Set the to advertise custody group count.
|
||||
peerdas.ToAdvertiseCustodyGroupCount.Set(tc.toAdvertiseCustodyGroupCount)
|
||||
|
||||
// Compute the custody group sampling size.
|
||||
actual := peerdas.CustodyGroupSamplingSize(tc.custodyType)
|
||||
|
||||
// Check the result.
|
||||
require.Equal(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
184
beacon-chain/core/peerdas/info.go
Normal file
184
beacon-chain/core/peerdas/info.go
Normal file
@@ -0,0 +1,184 @@
|
||||
package peerdas
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"sync"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
)
|
||||
|
||||
// info contains all useful peerDAS related information regarding a peer.
|
||||
type (
|
||||
info struct {
|
||||
CustodyGroups map[uint64]bool
|
||||
CustodyColumns map[uint64]bool
|
||||
DataColumnsSubnets map[uint64]bool
|
||||
}
|
||||
|
||||
targetCustodyGroupCount struct {
|
||||
mut sync.RWMutex
|
||||
validatorsCustodyRequirement uint64
|
||||
}
|
||||
|
||||
toAdverstiseCustodyGroupCount struct {
|
||||
mut sync.RWMutex
|
||||
value uint64
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
nodeInfoCacheSize = 200
|
||||
nodeInfoCachKeySize = 32 + 8
|
||||
)
|
||||
|
||||
var (
|
||||
// CustodyGroupCountMut is a mutex to be used by caller to ensure neither
|
||||
// TargetCustodyGroupCount nor ToAdvertiseCustodyGroupCount are being modified.
|
||||
// (This is not necessary to use this mutex for any data protection.)
|
||||
CustodyGroupCountMut sync.RWMutex
|
||||
|
||||
// TargetCustodyGroupCount represents the target number of custody groups we should custody
|
||||
// regarding the validators we are tracking.
|
||||
TargetCustodyGroupCount targetCustodyGroupCount
|
||||
|
||||
// ToAdvertiseCustodyGroupCount represents the number of custody groups to advertise to the network.
|
||||
ToAdvertiseCustodyGroupCount toAdverstiseCustodyGroupCount
|
||||
|
||||
nodeInfoCacheMut sync.Mutex
|
||||
nodeInfoCache *lru.Cache
|
||||
)
|
||||
|
||||
// Info returns the peerDAS information for a given nodeID and custodyGroupCount.
|
||||
// It returns a boolean indicating if the peer info was already in the cache and an error if any.
|
||||
func Info(nodeID enode.ID, custodyGroupCount uint64) (*info, bool, error) {
|
||||
// Create a new cache if it doesn't exist.
|
||||
if err := createInfoCacheIfNeeded(); err != nil {
|
||||
return nil, false, errors.Wrap(err, "create cache if needed")
|
||||
}
|
||||
|
||||
// Compute the key.
|
||||
key := computeInfoCacheKey(nodeID, custodyGroupCount)
|
||||
|
||||
// If the value is already in the cache, return it.
|
||||
if value, ok := nodeInfoCache.Get(key); ok {
|
||||
peerInfo, ok := value.(*info)
|
||||
if !ok {
|
||||
return nil, false, errors.New("failed to cast peer info (should never happen)")
|
||||
}
|
||||
|
||||
return peerInfo, true, nil
|
||||
}
|
||||
|
||||
// The peer info is not in the cache, compute it.
|
||||
// Compute custody groups.
|
||||
custodyGroups, err := CustodyGroups(nodeID, custodyGroupCount)
|
||||
if err != nil {
|
||||
return nil, false, errors.Wrap(err, "custody groups")
|
||||
}
|
||||
|
||||
// Compute custody columns.
|
||||
custodyColumns, err := CustodyColumns(custodyGroups)
|
||||
if err != nil {
|
||||
return nil, false, errors.Wrap(err, "custody columns")
|
||||
}
|
||||
|
||||
// Compute data columns subnets.
|
||||
dataColumnsSubnets := DataColumnSubnets(custodyColumns)
|
||||
|
||||
result := &info{
|
||||
CustodyGroups: custodyGroups,
|
||||
CustodyColumns: custodyColumns,
|
||||
DataColumnsSubnets: dataColumnsSubnets,
|
||||
}
|
||||
|
||||
// Add the result to the cache.
|
||||
nodeInfoCache.Add(key, result)
|
||||
|
||||
return result, false, nil
|
||||
}
|
||||
|
||||
// createInfoCacheIfNeeded creates a new cache if it doesn't exist.
|
||||
func createInfoCacheIfNeeded() error {
|
||||
nodeInfoCacheMut.Lock()
|
||||
defer nodeInfoCacheMut.Unlock()
|
||||
|
||||
if nodeInfoCache == nil {
|
||||
c, err := lru.New(nodeInfoCacheSize)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "lru new")
|
||||
}
|
||||
|
||||
nodeInfoCache = c
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// computeInfoCacheKey returns a unique key for a node and its custodyGroupCount.
|
||||
func computeInfoCacheKey(nodeID enode.ID, custodyGroupCount uint64) [nodeInfoCachKeySize]byte {
|
||||
var key [nodeInfoCachKeySize]byte
|
||||
|
||||
copy(key[:32], nodeID[:])
|
||||
binary.BigEndian.PutUint64(key[32:], custodyGroupCount)
|
||||
|
||||
return key
|
||||
}
|
||||
|
||||
// setValidatorsCustodyRequirement sets the validators custody requirement.
|
||||
func (tcgc *targetCustodyGroupCount) SetValidatorsCustodyRequirement(value uint64) {
|
||||
tcgc.mut.Lock()
|
||||
defer tcgc.mut.Unlock()
|
||||
|
||||
tcgc.validatorsCustodyRequirement = value
|
||||
}
|
||||
|
||||
// CustodyGroupCount returns the number of groups we should participate in for custody.
|
||||
func (tcgc *targetCustodyGroupCount) Get() uint64 {
|
||||
// If subscribed to all subnets, return the number of custody groups.
|
||||
if flags.Get().SubscribeToAllSubnets {
|
||||
return params.BeaconConfig().NumberOfCustodyGroups
|
||||
}
|
||||
|
||||
tcgc.mut.RLock()
|
||||
defer tcgc.mut.RUnlock()
|
||||
|
||||
// If no validators are tracked, return the default custody requirement.
|
||||
if tcgc.validatorsCustodyRequirement == 0 {
|
||||
return params.BeaconConfig().CustodyRequirement
|
||||
}
|
||||
|
||||
// Return the validators custody requirement.
|
||||
return tcgc.validatorsCustodyRequirement
|
||||
}
|
||||
|
||||
// Set sets the to advertise custody group count.
|
||||
func (tacgc *toAdverstiseCustodyGroupCount) Set(value uint64) {
|
||||
tacgc.mut.Lock()
|
||||
defer tacgc.mut.Unlock()
|
||||
|
||||
tacgc.value = value
|
||||
}
|
||||
|
||||
// Get returns the to advertise custody group count.
|
||||
func (tacgc *toAdverstiseCustodyGroupCount) Get() uint64 {
|
||||
// If subscribed to all subnets, return the number of custody groups.
|
||||
if flags.Get().SubscribeToAllSubnets {
|
||||
return params.BeaconConfig().NumberOfCustodyGroups
|
||||
}
|
||||
|
||||
custodyRequirement := params.BeaconConfig().CustodyRequirement
|
||||
|
||||
tacgc.mut.RLock()
|
||||
defer tacgc.mut.RUnlock()
|
||||
|
||||
return max(tacgc.value, custodyRequirement)
|
||||
}
|
||||
|
||||
// ActualCustodyGroupCount returns the actual custody group count.
|
||||
func ActualCustodyGroupCount() uint64 {
|
||||
return min(TargetCustodyGroupCount.Get(), ToAdvertiseCustodyGroupCount.Get())
|
||||
}
|
||||
128
beacon-chain/core/peerdas/info_test.go
Normal file
128
beacon-chain/core/peerdas/info_test.go
Normal file
@@ -0,0 +1,128 @@
|
||||
package peerdas_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestInfo(t *testing.T) {
|
||||
nodeID := enode.ID{}
|
||||
custodyGroupCount := uint64(7)
|
||||
|
||||
expectedCustodyGroup := map[uint64]bool{1: true, 17: true, 19: true, 42: true, 75: true, 87: true, 102: true}
|
||||
expectedCustodyColumns := map[uint64]bool{1: true, 17: true, 19: true, 42: true, 75: true, 87: true, 102: true}
|
||||
expectedDataColumnsSubnets := map[uint64]bool{1: true, 17: true, 19: true, 42: true, 75: true, 87: true, 102: true}
|
||||
|
||||
for _, cached := range []bool{false, true} {
|
||||
actual, ok, err := peerdas.Info(nodeID, custodyGroupCount)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, cached, ok)
|
||||
require.DeepEqual(t, expectedCustodyGroup, actual.CustodyGroups)
|
||||
require.DeepEqual(t, expectedCustodyColumns, actual.CustodyColumns)
|
||||
require.DeepEqual(t, expectedDataColumnsSubnets, actual.DataColumnsSubnets)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTargetCustodyGroupCount(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
subscribeToAllSubnets bool
|
||||
validatorsCustodyRequirement uint64
|
||||
expected uint64
|
||||
}{
|
||||
{
|
||||
name: "subscribed to all subnets",
|
||||
subscribeToAllSubnets: true,
|
||||
validatorsCustodyRequirement: 100,
|
||||
expected: 128,
|
||||
},
|
||||
{
|
||||
name: "no validators attached",
|
||||
subscribeToAllSubnets: false,
|
||||
validatorsCustodyRequirement: 0,
|
||||
expected: 4,
|
||||
},
|
||||
{
|
||||
name: "some validators attached",
|
||||
subscribeToAllSubnets: false,
|
||||
validatorsCustodyRequirement: 100,
|
||||
expected: 100,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Subscribe to all subnets if needed.
|
||||
if tc.subscribeToAllSubnets {
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SubscribeToAllSubnets = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
}
|
||||
|
||||
// Set the validators custody requirement.
|
||||
peerdas.TargetCustodyGroupCount.SetValidatorsCustodyRequirement(tc.validatorsCustodyRequirement)
|
||||
|
||||
// Get the target custody group count.
|
||||
actual := peerdas.TargetCustodyGroupCount.Get()
|
||||
|
||||
// Compare the expected and actual values.
|
||||
require.Equal(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestToAdvertiseCustodyGroupCount(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
subscribeToAllSubnets bool
|
||||
toAdvertiseCustodyGroupCount uint64
|
||||
expected uint64
|
||||
}{
|
||||
{
|
||||
name: "subscribed to all subnets",
|
||||
subscribeToAllSubnets: true,
|
||||
toAdvertiseCustodyGroupCount: 100,
|
||||
expected: 128,
|
||||
},
|
||||
{
|
||||
name: "higher than custody requirement",
|
||||
subscribeToAllSubnets: false,
|
||||
toAdvertiseCustodyGroupCount: 100,
|
||||
expected: 100,
|
||||
},
|
||||
{
|
||||
name: "lower than custody requirement",
|
||||
subscribeToAllSubnets: false,
|
||||
toAdvertiseCustodyGroupCount: 1,
|
||||
expected: 4,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Subscribe to all subnets if needed.
|
||||
if tc.subscribeToAllSubnets {
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SubscribeToAllSubnets = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
}
|
||||
|
||||
// Set the to advertise custody group count.
|
||||
peerdas.ToAdvertiseCustodyGroupCount.Set(tc.toAdvertiseCustodyGroupCount)
|
||||
|
||||
// Get the to advertise custody group count.
|
||||
actual := peerdas.ToAdvertiseCustodyGroupCount.Get()
|
||||
|
||||
// Compare the expected and actual values.
|
||||
require.Equal(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
14
beacon-chain/core/peerdas/metrics.go
Normal file
14
beacon-chain/core/peerdas/metrics.go
Normal file
@@ -0,0 +1,14 @@
|
||||
package peerdas
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
var dataColumnComputationTime = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "beacon_data_column_sidecar_computation_milliseconds",
|
||||
Help: "Captures the time taken to compute data column sidecars from blobs.",
|
||||
Buckets: []float64{100, 250, 500, 750, 1000, 1500, 2000, 4000, 8000, 12000, 16000},
|
||||
},
|
||||
)
|
||||
126
beacon-chain/core/peerdas/p2p_interface.go
Normal file
126
beacon-chain/core/peerdas/p2p_interface.go
Normal file
@@ -0,0 +1,126 @@
|
||||
package peerdas
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
)
|
||||
|
||||
const (
|
||||
CustodyGroupCountEnrKey = "cgc"
|
||||
)
|
||||
|
||||
var (
|
||||
// Custom errors
|
||||
errRecordNil = errors.New("record is nil")
|
||||
errCannotLoadCustodyGroupCount = errors.New("cannot load the custody group count from peer")
|
||||
errIndexTooLarge = errors.New("column index is larger than the specified columns count")
|
||||
errMismatchLength = errors.New("mismatch in the length of the commitments and proofs")
|
||||
)
|
||||
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/p2p-interface.md#the-discovery-domain-discv5
|
||||
type Cgc uint64
|
||||
|
||||
func (Cgc) ENRKey() string { return CustodyGroupCountEnrKey }
|
||||
|
||||
// VerifyDataColumnsSidecarKZGProofs verifies the provided KZG Proofs of data columns.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/fulu/p2p-interface.md#verify_data_column_sidecar_kzg_proofs
|
||||
func VerifyDataColumnsSidecarKZGProofs(sidecars []blocks.RODataColumn) (bool, error) {
|
||||
// Retrieve the number of columns.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
// Compute the total count.
|
||||
count := 0
|
||||
for _, sidecar := range sidecars {
|
||||
count += len(sidecar.DataColumn)
|
||||
}
|
||||
|
||||
commitments := make([]kzg.Bytes48, 0, count)
|
||||
indices := make([]uint64, 0, count)
|
||||
cells := make([]kzg.Cell, 0, count)
|
||||
proofs := make([]kzg.Bytes48, 0, count)
|
||||
|
||||
for _, sidecar := range sidecars {
|
||||
// Check if the columns index is not too large
|
||||
if sidecar.ColumnIndex >= numberOfColumns {
|
||||
return false, errIndexTooLarge
|
||||
}
|
||||
|
||||
// Check if the KZG commitments size and data column size match.
|
||||
if len(sidecar.DataColumn) != len(sidecar.KzgCommitments) {
|
||||
return false, errMismatchLength
|
||||
}
|
||||
|
||||
// Check if the KZG proofs size and data column size match.
|
||||
if len(sidecar.DataColumn) != len(sidecar.KzgProof) {
|
||||
return false, errMismatchLength
|
||||
}
|
||||
|
||||
for i := range sidecar.DataColumn {
|
||||
commitments = append(commitments, kzg.Bytes48(sidecar.KzgCommitments[i]))
|
||||
indices = append(indices, sidecar.ColumnIndex)
|
||||
cells = append(cells, kzg.Cell(sidecar.DataColumn[i]))
|
||||
proofs = append(proofs, kzg.Bytes48(sidecar.KzgProof[i]))
|
||||
}
|
||||
}
|
||||
|
||||
// Verify all the batch at once.
|
||||
verified, err := kzg.VerifyCellKZGProofBatch(commitments, indices, cells, proofs)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "verify cell KZG proof batch")
|
||||
}
|
||||
|
||||
return verified, nil
|
||||
}
|
||||
|
||||
// ComputeSubnetForDataColumnSidecar computes the subnet for a data column sidecar.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/fulu/p2p-interface.md#compute_subnet_for_data_column_sidecar
|
||||
func ComputeSubnetForDataColumnSidecar(columnIndex uint64) uint64 {
|
||||
dataColumnSidecarSubnetCount := params.BeaconConfig().DataColumnSidecarSubnetCount
|
||||
return columnIndex % dataColumnSidecarSubnetCount
|
||||
}
|
||||
|
||||
// DataColumnSubnets computes the subnets for the data columns.
|
||||
func DataColumnSubnets(dataColumns map[uint64]bool) map[uint64]bool {
|
||||
subnets := make(map[uint64]bool, len(dataColumns))
|
||||
|
||||
for column := range dataColumns {
|
||||
subnet := ComputeSubnetForDataColumnSidecar(column)
|
||||
subnets[subnet] = true
|
||||
}
|
||||
|
||||
return subnets
|
||||
}
|
||||
|
||||
// ComputeCustodyGroupForColumn computes the custody group for a given column.
|
||||
// It is the reciprocal function of ComputeColumnsForCustodyGroup.
|
||||
func ComputeCustodyGroupForColumn(columnIndex uint64) (uint64, error) {
|
||||
beaconConfig := params.BeaconConfig()
|
||||
numberOfColumns := beaconConfig.NumberOfColumns
|
||||
|
||||
if columnIndex >= numberOfColumns {
|
||||
return 0, errIndexTooLarge
|
||||
}
|
||||
|
||||
numberOfCustodyGroups := beaconConfig.NumberOfCustodyGroups
|
||||
columnsPerGroup := numberOfColumns / numberOfCustodyGroups
|
||||
|
||||
return columnIndex / columnsPerGroup, nil
|
||||
}
|
||||
|
||||
// CustodyGroupCountFromRecord extracts the custody group count from an ENR record.
|
||||
func CustodyGroupCountFromRecord(record *enr.Record) (uint64, error) {
|
||||
if record == nil {
|
||||
return 0, errRecordNil
|
||||
}
|
||||
|
||||
// Load the `cgc`
|
||||
var cgc Cgc
|
||||
if cgc := record.Load(&cgc); cgc != nil {
|
||||
return 0, errCannotLoadCustodyGroupCount
|
||||
}
|
||||
|
||||
return uint64(cgc), nil
|
||||
}
|
||||
56
beacon-chain/core/peerdas/p2p_interface_test.go
Normal file
56
beacon-chain/core/peerdas/p2p_interface_test.go
Normal file
@@ -0,0 +1,56 @@
|
||||
package peerdas_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
)
|
||||
|
||||
func TestVerifyDataColumnSidecarKZGProofs(t *testing.T) {
|
||||
dbBlock := util.NewBeaconBlockDeneb()
|
||||
require.NoError(t, kzg.Start())
|
||||
|
||||
var (
|
||||
comms [][]byte
|
||||
blobs []kzg.Blob
|
||||
)
|
||||
for i := int64(0); i < 6; i++ {
|
||||
blob := getRandBlob(i)
|
||||
commitment, _, err := generateCommitmentAndProof(&blob)
|
||||
require.NoError(t, err)
|
||||
comms = append(comms, commitment[:])
|
||||
blobs = append(blobs, blob)
|
||||
}
|
||||
|
||||
dbBlock.Block.Body.BlobKzgCommitments = comms
|
||||
sBlock, err := blocks.NewSignedBeaconBlock(dbBlock)
|
||||
require.NoError(t, err)
|
||||
sCars, err := peerdas.DataColumnSidecars(sBlock, blobs)
|
||||
require.NoError(t, err)
|
||||
|
||||
for i, sidecar := range sCars {
|
||||
roCol, err := blocks.NewRODataColumn(sidecar)
|
||||
require.NoError(t, err)
|
||||
verified, err := peerdas.VerifyDataColumnsSidecarKZGProofs([]blocks.RODataColumn{roCol})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, verified, fmt.Sprintf("sidecar %d failed", i))
|
||||
}
|
||||
}
|
||||
|
||||
func TestCustodyGroupCountFromRecord(t *testing.T) {
|
||||
const expected uint64 = 7
|
||||
|
||||
// Create an Ethereum record.
|
||||
record := &enr.Record{}
|
||||
record.Set(peerdas.Cgc(expected))
|
||||
|
||||
actual, err := peerdas.CustodyGroupCountFromRecord(record)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, actual)
|
||||
}
|
||||
56
beacon-chain/core/peerdas/peer_sampling.go
Normal file
56
beacon-chain/core/peerdas/peer_sampling.go
Normal file
@@ -0,0 +1,56 @@
|
||||
package peerdas
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
)
|
||||
|
||||
// ExtendedSampleCount computes, for a given number of samples per slot and allowed failures the
|
||||
// number of samples we should actually query from peers.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/fulu/peer-sampling.md#get_extended_sample_count
|
||||
func ExtendedSampleCount(samplesPerSlot, allowedFailures uint64) uint64 {
|
||||
// Retrieve the columns count
|
||||
columnsCount := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
// If half of the columns are missing, we are able to reconstruct the data.
|
||||
// If half of the columns + 1 are missing, we are not able to reconstruct the data.
|
||||
// This is the smallest worst case.
|
||||
worstCaseMissing := columnsCount/2 + 1
|
||||
|
||||
// Compute the false positive threshold.
|
||||
falsePositiveThreshold := HypergeomCDF(0, columnsCount, worstCaseMissing, samplesPerSlot)
|
||||
|
||||
var sampleCount uint64
|
||||
|
||||
// Finally, compute the extended sample count.
|
||||
for sampleCount = samplesPerSlot; sampleCount < columnsCount+1; sampleCount++ {
|
||||
if HypergeomCDF(allowedFailures, columnsCount, worstCaseMissing, sampleCount) <= falsePositiveThreshold {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return sampleCount
|
||||
}
|
||||
|
||||
// HypergeomCDF computes the hypergeometric cumulative distribution function.
|
||||
// https://en.wikipedia.org/wiki/Hypergeometric_distribution
|
||||
func HypergeomCDF(k, M, n, N uint64) float64 {
|
||||
denominatorInt := new(big.Int).Binomial(int64(M), int64(N)) // lint:ignore uintcast
|
||||
denominator := new(big.Float).SetInt(denominatorInt)
|
||||
|
||||
rBig := big.NewFloat(0)
|
||||
|
||||
for i := uint64(0); i < k+1; i++ {
|
||||
a := new(big.Int).Binomial(int64(n), int64(i)) // lint:ignore uintcast
|
||||
b := new(big.Int).Binomial(int64(M-n), int64(N-i))
|
||||
numeratorInt := new(big.Int).Mul(a, b)
|
||||
numerator := new(big.Float).SetInt(numeratorInt)
|
||||
item := new(big.Float).Quo(numerator, denominator)
|
||||
rBig.Add(rBig, item)
|
||||
}
|
||||
|
||||
r, _ := rBig.Float64()
|
||||
|
||||
return r
|
||||
}
|
||||
60
beacon-chain/core/peerdas/peer_sampling_test.go
Normal file
60
beacon-chain/core/peerdas/peer_sampling_test.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package peerdas_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestExtendedSampleCount(t *testing.T) {
|
||||
const samplesPerSlot = 16
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
allowedMissings uint64
|
||||
extendedSampleCount uint64
|
||||
}{
|
||||
{name: "allowedMissings=0", allowedMissings: 0, extendedSampleCount: 16},
|
||||
{name: "allowedMissings=1", allowedMissings: 1, extendedSampleCount: 20},
|
||||
{name: "allowedMissings=2", allowedMissings: 2, extendedSampleCount: 24},
|
||||
{name: "allowedMissings=3", allowedMissings: 3, extendedSampleCount: 27},
|
||||
{name: "allowedMissings=4", allowedMissings: 4, extendedSampleCount: 29},
|
||||
{name: "allowedMissings=5", allowedMissings: 5, extendedSampleCount: 32},
|
||||
{name: "allowedMissings=6", allowedMissings: 6, extendedSampleCount: 35},
|
||||
{name: "allowedMissings=7", allowedMissings: 7, extendedSampleCount: 37},
|
||||
{name: "allowedMissings=8", allowedMissings: 8, extendedSampleCount: 40},
|
||||
{name: "allowedMissings=9", allowedMissings: 9, extendedSampleCount: 42},
|
||||
{name: "allowedMissings=10", allowedMissings: 10, extendedSampleCount: 44},
|
||||
{name: "allowedMissings=11", allowedMissings: 11, extendedSampleCount: 47},
|
||||
{name: "allowedMissings=12", allowedMissings: 12, extendedSampleCount: 49},
|
||||
{name: "allowedMissings=13", allowedMissings: 13, extendedSampleCount: 51},
|
||||
{name: "allowedMissings=14", allowedMissings: 14, extendedSampleCount: 53},
|
||||
{name: "allowedMissings=15", allowedMissings: 15, extendedSampleCount: 55},
|
||||
{name: "allowedMissings=16", allowedMissings: 16, extendedSampleCount: 57},
|
||||
{name: "allowedMissings=17", allowedMissings: 17, extendedSampleCount: 59},
|
||||
{name: "allowedMissings=18", allowedMissings: 18, extendedSampleCount: 61},
|
||||
{name: "allowedMissings=19", allowedMissings: 19, extendedSampleCount: 63},
|
||||
{name: "allowedMissings=20", allowedMissings: 20, extendedSampleCount: 65},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
result := peerdas.ExtendedSampleCount(samplesPerSlot, tc.allowedMissings)
|
||||
require.Equal(t, tc.extendedSampleCount, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestHypergeomCDF(t *testing.T) {
|
||||
// Test case from https://en.wikipedia.org/wiki/Hypergeometric_distribution
|
||||
// Population size: 1000, number of successes in population: 500, sample size: 10, number of successes in sample: 5
|
||||
// Expected result: 0.072
|
||||
const (
|
||||
expected = 0.0796665913283742
|
||||
margin = 0.000001
|
||||
)
|
||||
|
||||
actual := peerdas.HypergeomCDF(5, 128, 65, 16)
|
||||
require.Equal(t, true, expected-margin <= actual && actual <= expected+margin)
|
||||
}
|
||||
139
beacon-chain/core/peerdas/reconstruction.go
Normal file
139
beacon-chain/core/peerdas/reconstruction.go
Normal file
@@ -0,0 +1,139 @@
|
||||
package peerdas
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
// CanSelfReconstruct returns true if the node can self-reconstruct all the data columns from its custody group count.
|
||||
func CanSelfReconstruct(custodyGroupCount uint64) bool {
|
||||
total := params.BeaconConfig().NumberOfCustodyGroups
|
||||
// If total is odd, then we need total / 2 + 1 columns to reconstruct.
|
||||
// If total is even, then we need total / 2 columns to reconstruct.
|
||||
custodyGroupsNeeded := total/2 + total%2
|
||||
return custodyGroupCount >= custodyGroupsNeeded
|
||||
}
|
||||
|
||||
// RecoverCellsAndProofs recovers the cells and proofs from the data column sidecars.
|
||||
func RecoverCellsAndProofs(
|
||||
dataColumnSideCars []*ethpb.DataColumnSidecar,
|
||||
blockRoot [fieldparams.RootLength]byte,
|
||||
) ([]kzg.CellsAndProofs, error) {
|
||||
var wg errgroup.Group
|
||||
|
||||
dataColumnSideCarsCount := len(dataColumnSideCars)
|
||||
|
||||
if dataColumnSideCarsCount == 0 {
|
||||
return nil, errors.New("no data column sidecars")
|
||||
}
|
||||
|
||||
// Check if all columns have the same length.
|
||||
blobCount := len(dataColumnSideCars[0].DataColumn)
|
||||
for _, sidecar := range dataColumnSideCars {
|
||||
length := len(sidecar.DataColumn)
|
||||
|
||||
if length != blobCount {
|
||||
return nil, errors.New("columns do not have the same length")
|
||||
}
|
||||
}
|
||||
|
||||
// Recover cells and compute proofs in parallel.
|
||||
recoveredCellsAndProofs := make([]kzg.CellsAndProofs, blobCount)
|
||||
|
||||
for blobIndex := 0; blobIndex < blobCount; blobIndex++ {
|
||||
bIndex := blobIndex
|
||||
wg.Go(func() error {
|
||||
cellsIndices := make([]uint64, 0, dataColumnSideCarsCount)
|
||||
cells := make([]kzg.Cell, 0, dataColumnSideCarsCount)
|
||||
|
||||
for _, sidecar := range dataColumnSideCars {
|
||||
// Build the cell indices.
|
||||
cellsIndices = append(cellsIndices, sidecar.ColumnIndex)
|
||||
|
||||
// Get the cell.
|
||||
column := sidecar.DataColumn
|
||||
cell := column[bIndex]
|
||||
|
||||
cells = append(cells, kzg.Cell(cell))
|
||||
}
|
||||
|
||||
// Recover the cells and proofs for the corresponding blob
|
||||
cellsAndProofs, err := kzg.RecoverCellsAndKZGProofs(cellsIndices, cells)
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "recover cells and KZG proofs for blob %d", bIndex)
|
||||
}
|
||||
|
||||
recoveredCellsAndProofs[bIndex] = cellsAndProofs
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := wg.Wait(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return recoveredCellsAndProofs, nil
|
||||
}
|
||||
|
||||
// DataColumnSidecarsForReconstruct is a TEMPORARY function until there is an official specification for it.
|
||||
// It is scheduled for deletion.
|
||||
func DataColumnSidecarsForReconstruct(
|
||||
blobKzgCommitments [][]byte,
|
||||
signedBlockHeader *ethpb.SignedBeaconBlockHeader,
|
||||
kzgCommitmentsInclusionProof [][]byte,
|
||||
cellsAndProofs []kzg.CellsAndProofs,
|
||||
) ([]*ethpb.DataColumnSidecar, error) {
|
||||
// Each CellsAndProofs corresponds to a Blob
|
||||
// So we can get the BlobCount by checking the length of CellsAndProofs
|
||||
blobsCount := len(cellsAndProofs)
|
||||
if blobsCount == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Get the column sidecars.
|
||||
sidecars := make([]*ethpb.DataColumnSidecar, 0, fieldparams.NumberOfColumns)
|
||||
for columnIndex := uint64(0); columnIndex < fieldparams.NumberOfColumns; columnIndex++ {
|
||||
column := make([]kzg.Cell, 0, blobsCount)
|
||||
kzgProofOfColumn := make([]kzg.Proof, 0, blobsCount)
|
||||
|
||||
for rowIndex := 0; rowIndex < blobsCount; rowIndex++ {
|
||||
cellsForRow := cellsAndProofs[rowIndex].Cells
|
||||
proofsForRow := cellsAndProofs[rowIndex].Proofs
|
||||
|
||||
cell := cellsForRow[columnIndex]
|
||||
column = append(column, cell)
|
||||
|
||||
kzgProof := proofsForRow[columnIndex]
|
||||
kzgProofOfColumn = append(kzgProofOfColumn, kzgProof)
|
||||
}
|
||||
|
||||
columnBytes := make([][]byte, 0, blobsCount)
|
||||
for i := range column {
|
||||
columnBytes = append(columnBytes, column[i][:])
|
||||
}
|
||||
|
||||
kzgProofOfColumnBytes := make([][]byte, 0, blobsCount)
|
||||
for _, kzgProof := range kzgProofOfColumn {
|
||||
copiedProof := kzgProof
|
||||
kzgProofOfColumnBytes = append(kzgProofOfColumnBytes, copiedProof[:])
|
||||
}
|
||||
|
||||
sidecar := ðpb.DataColumnSidecar{
|
||||
ColumnIndex: columnIndex,
|
||||
DataColumn: columnBytes,
|
||||
KzgCommitments: blobKzgCommitments,
|
||||
KzgProof: kzgProofOfColumnBytes,
|
||||
SignedBlockHeader: signedBlockHeader,
|
||||
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
|
||||
}
|
||||
|
||||
sidecars = append(sidecars, sidecar)
|
||||
}
|
||||
|
||||
return sidecars, nil
|
||||
}
|
||||
208
beacon-chain/core/peerdas/reconstruction_test.go
Normal file
208
beacon-chain/core/peerdas/reconstruction_test.go
Normal file
@@ -0,0 +1,208 @@
|
||||
package peerdas_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
)
|
||||
|
||||
func TestCanSelfReconstruct(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
totalNumberOfCustodyGroups uint64
|
||||
custodyNumberOfGroups uint64
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "totalNumberOfCustodyGroups=64, custodyNumberOfGroups=31",
|
||||
totalNumberOfCustodyGroups: 64,
|
||||
custodyNumberOfGroups: 31,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "totalNumberOfCustodyGroups=64, custodyNumberOfGroups=32",
|
||||
totalNumberOfCustodyGroups: 64,
|
||||
custodyNumberOfGroups: 32,
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "totalNumberOfCustodyGroups=65, custodyNumberOfGroups=32",
|
||||
totalNumberOfCustodyGroups: 65,
|
||||
custodyNumberOfGroups: 32,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "totalNumberOfCustodyGroups=63, custodyNumberOfGroups=33",
|
||||
totalNumberOfCustodyGroups: 65,
|
||||
custodyNumberOfGroups: 33,
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Set the total number of columns.
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.NumberOfCustodyGroups = tc.totalNumberOfCustodyGroups
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Check if reconstuction is possible.
|
||||
actual := peerdas.CanSelfReconstruct(tc.custodyNumberOfGroups)
|
||||
require.Equal(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestReconstructionRoundTrip(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
|
||||
const blobCount = 5
|
||||
|
||||
var blockRoot [fieldparams.RootLength]byte
|
||||
|
||||
signedBeaconBlockPb := util.NewBeaconBlockDeneb()
|
||||
require.NoError(t, kzg.Start())
|
||||
|
||||
// Generate random blobs and their corresponding commitments.
|
||||
var (
|
||||
blobsKzgCommitments [][]byte
|
||||
blobs []kzg.Blob
|
||||
)
|
||||
for i := range blobCount {
|
||||
blob := getRandBlob(int64(i))
|
||||
commitment, _, err := generateCommitmentAndProof(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobsKzgCommitments = append(blobsKzgCommitments, commitment[:])
|
||||
blobs = append(blobs, blob)
|
||||
}
|
||||
|
||||
// Generate a signed beacon block.
|
||||
signedBeaconBlockPb.Block.Body.BlobKzgCommitments = blobsKzgCommitments
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Get the signed beacon block header.
|
||||
signedBeaconBlockHeader, err := signedBeaconBlock.Header()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Convert data columns sidecars from signed block and blobs.
|
||||
dataColumnSidecars, err := peerdas.DataColumnSidecars(signedBeaconBlock, blobs)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create verified RO data columns.
|
||||
verifiedRoDataColumns := make([]*blocks.VerifiedRODataColumn, 0, blobCount)
|
||||
for _, dataColumnSidecar := range dataColumnSidecars {
|
||||
roDataColumn, err := blocks.NewRODataColumn(dataColumnSidecar)
|
||||
require.NoError(t, err)
|
||||
|
||||
verifiedRoDataColumn := blocks.NewVerifiedRODataColumn(roDataColumn)
|
||||
verifiedRoDataColumns = append(verifiedRoDataColumns, &verifiedRoDataColumn)
|
||||
}
|
||||
|
||||
verifiedRoDataColumn := verifiedRoDataColumns[0]
|
||||
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
var noDataColumns []*ethpb.DataColumnSidecar
|
||||
dataColumnsWithDifferentLengths := []*ethpb.DataColumnSidecar{
|
||||
{DataColumn: [][]byte{{}, {}}},
|
||||
{DataColumn: [][]byte{{}}},
|
||||
}
|
||||
notEnoughDataColumns := dataColumnSidecars[:numberOfColumns/2-1]
|
||||
originalDataColumns := dataColumnSidecars[:numberOfColumns/2]
|
||||
extendedDataColumns := dataColumnSidecars[numberOfColumns/2:]
|
||||
evenDataColumns := make([]*ethpb.DataColumnSidecar, 0, numberOfColumns/2)
|
||||
oddDataColumns := make([]*ethpb.DataColumnSidecar, 0, numberOfColumns/2)
|
||||
allDataColumns := dataColumnSidecars
|
||||
|
||||
for i, dataColumn := range dataColumnSidecars {
|
||||
if i%2 == 0 {
|
||||
evenDataColumns = append(evenDataColumns, dataColumn)
|
||||
} else {
|
||||
oddDataColumns = append(oddDataColumns, dataColumn)
|
||||
}
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
dataColumnsSidecar []*ethpb.DataColumnSidecar
|
||||
isError bool
|
||||
}{
|
||||
{
|
||||
name: "No data columns sidecars",
|
||||
dataColumnsSidecar: noDataColumns,
|
||||
isError: true,
|
||||
},
|
||||
{
|
||||
name: "Data columns sidecar with different lengths",
|
||||
dataColumnsSidecar: dataColumnsWithDifferentLengths,
|
||||
isError: true,
|
||||
},
|
||||
{
|
||||
name: "All columns are present (no actual need to reconstruct)",
|
||||
dataColumnsSidecar: allDataColumns,
|
||||
isError: false,
|
||||
},
|
||||
{
|
||||
name: "Only original columns are present",
|
||||
dataColumnsSidecar: originalDataColumns,
|
||||
isError: false,
|
||||
},
|
||||
{
|
||||
name: "Only extended columns are present",
|
||||
dataColumnsSidecar: extendedDataColumns,
|
||||
isError: false,
|
||||
},
|
||||
{
|
||||
name: "Only even columns are present",
|
||||
dataColumnsSidecar: evenDataColumns,
|
||||
isError: false,
|
||||
},
|
||||
{
|
||||
name: "Only odd columns are present",
|
||||
dataColumnsSidecar: oddDataColumns,
|
||||
isError: false,
|
||||
},
|
||||
{
|
||||
name: "Not enough columns to reconstruct",
|
||||
dataColumnsSidecar: notEnoughDataColumns,
|
||||
isError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Recover cells and proofs from available data columns sidecars.
|
||||
cellsAndProofs, err := peerdas.RecoverCellsAndProofs(tc.dataColumnsSidecar, blockRoot)
|
||||
isError := (err != nil)
|
||||
require.Equal(t, tc.isError, isError)
|
||||
|
||||
if isError {
|
||||
return
|
||||
}
|
||||
|
||||
// Recover all data columns sidecars from cells and proofs.
|
||||
reconstructedDataColumnsSideCars, err := peerdas.DataColumnSidecarsForReconstruct(
|
||||
blobsKzgCommitments,
|
||||
signedBeaconBlockHeader,
|
||||
verifiedRoDataColumn.KzgCommitmentsInclusionProof,
|
||||
cellsAndProofs,
|
||||
)
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
expected := dataColumnSidecars
|
||||
actual := reconstructedDataColumnsSideCars
|
||||
require.DeepSSZEqual(t, expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
57
beacon-chain/core/peerdas/utils_test.go
Normal file
57
beacon-chain/core/peerdas/utils_test.go
Normal file
@@ -0,0 +1,57 @@
|
||||
package peerdas_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
|
||||
"github.com/consensys/gnark-crypto/ecc/bls12-381/fr"
|
||||
GoKZG "github.com/crate-crypto/go-kzg-4844"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func generateCommitmentAndProof(blob *kzg.Blob) (*kzg.Commitment, *kzg.Proof, error) {
|
||||
commitment, err := kzg.BlobToKZGCommitment(blob)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
proof, err := kzg.ComputeBlobKZGProof(blob, commitment)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return &commitment, &proof, err
|
||||
}
|
||||
|
||||
// Returns a random blob using the passed seed as entropy
|
||||
func getRandBlob(seed int64) kzg.Blob {
|
||||
var blob kzg.Blob
|
||||
bytesPerBlob := GoKZG.ScalarsPerBlob * GoKZG.SerializedScalarSize
|
||||
for i := 0; i < bytesPerBlob; i += GoKZG.SerializedScalarSize {
|
||||
fieldElementBytes := getRandFieldElement(seed + int64(i))
|
||||
copy(blob[i:i+GoKZG.SerializedScalarSize], fieldElementBytes[:])
|
||||
}
|
||||
return blob
|
||||
}
|
||||
|
||||
// Returns a serialized random field element in big-endian
|
||||
func getRandFieldElement(seed int64) [32]byte {
|
||||
bytes := deterministicRandomness(seed)
|
||||
var r fr.Element
|
||||
r.SetBytes(bytes[:])
|
||||
|
||||
return GoKZG.SerializeScalar(r)
|
||||
}
|
||||
|
||||
func deterministicRandomness(seed int64) [32]byte {
|
||||
// Converts an int64 to a byte slice
|
||||
buf := new(bytes.Buffer)
|
||||
err := binary.Write(buf, binary.BigEndian, seed)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("Failed to write int64 to bytes buffer")
|
||||
return [32]byte{}
|
||||
}
|
||||
bytes := buf.Bytes()
|
||||
|
||||
return sha256.Sum256(bytes)
|
||||
}
|
||||
@@ -53,6 +53,11 @@ func HigherEqualThanAltairVersionAndEpoch(s state.BeaconState, e primitives.Epoc
|
||||
return s.Version() >= version.Altair && e >= params.BeaconConfig().AltairForkEpoch
|
||||
}
|
||||
|
||||
// PeerDASIsActive checks whether peerDAS is active at the provided slot.
|
||||
func PeerDASIsActive(slot primitives.Slot) bool {
|
||||
return params.FuluEnabled() && slots.ToEpoch(slot) >= params.BeaconConfig().FuluForkEpoch
|
||||
}
|
||||
|
||||
// CanUpgradeToAltair returns true if the input `slot` can upgrade to Altair.
|
||||
// Spec code:
|
||||
// If state.slot % SLOTS_PER_EPOCH == 0 and compute_epoch_at_slot(state.slot) == ALTAIR_FORK_EPOCH
|
||||
|
||||
@@ -4,6 +4,7 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"availability.go",
|
||||
"availability_columns.go",
|
||||
"cache.go",
|
||||
"iface.go",
|
||||
"mock.go",
|
||||
@@ -11,14 +12,17 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/das",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//runtime/logging:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
@@ -27,6 +31,7 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"availability_columns_test.go",
|
||||
"availability_test.go",
|
||||
"cache_test.go",
|
||||
],
|
||||
@@ -34,6 +39,7 @@ go_test(
|
||||
deps = [
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
@@ -41,6 +47,7 @@ go_test(
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
errors "github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
@@ -80,7 +81,7 @@ func (s *LazilyPersistentStore) Persist(current primitives.Slot, sc ...blocks.RO
|
||||
|
||||
// IsDataAvailable returns nil if all the commitments in the given block are persisted to the db and have been verified.
|
||||
// BlobSidecars already in the db are assumed to have been previously verified against the block.
|
||||
func (s *LazilyPersistentStore) IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error {
|
||||
func (s *LazilyPersistentStore) IsDataAvailable(ctx context.Context, _ enode.ID, current primitives.Slot, b blocks.ROBlock) error {
|
||||
blockCommitments, err := commitmentsToCheck(b, current)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not check data availability for block %#x", b.Root())
|
||||
|
||||
171
beacon-chain/das/availability_columns.go
Normal file
171
beacon-chain/das/availability_columns.go
Normal file
@@ -0,0 +1,171 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
errors "github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
// LazilyPersistentStoreColumn is an implementation of AvailabilityStore to be used when batch syncing data columns.
|
||||
// This implementation will hold any blobs passed to Persist until the IsDataAvailable is called for their
|
||||
// block, at which time they will undergo full verification and be saved to the disk.
|
||||
type LazilyPersistentStoreColumn struct {
|
||||
store *filesystem.BlobStorage
|
||||
cache *cache
|
||||
}
|
||||
|
||||
func NewLazilyPersistentStoreColumn(store *filesystem.BlobStorage) *LazilyPersistentStoreColumn {
|
||||
return &LazilyPersistentStoreColumn{
|
||||
store: store,
|
||||
cache: newCache(),
|
||||
}
|
||||
}
|
||||
|
||||
// Persist do nothing at the moment.
|
||||
// TODO: Very Ugly, change interface to allow for columns and blobs
|
||||
func (*LazilyPersistentStoreColumn) Persist(_ primitives.Slot, _ ...blocks.ROBlob) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// PersistColumns adds columns to the working column cache. columns stored in this cache will be persisted
|
||||
// for at least as long as the node is running. Once IsDataAvailable succeeds, all blobs referenced
|
||||
// by the given block are guaranteed to be persisted for the remainder of the retention period.
|
||||
func (s *LazilyPersistentStoreColumn) PersistColumns(current primitives.Slot, sc ...blocks.RODataColumn) error {
|
||||
if len(sc) == 0 {
|
||||
return nil
|
||||
}
|
||||
if len(sc) > 1 {
|
||||
first := sc[0].BlockRoot()
|
||||
for i := 1; i < len(sc); i++ {
|
||||
if first != sc[i].BlockRoot() {
|
||||
return errMixedRoots
|
||||
}
|
||||
}
|
||||
}
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(sc[0].Slot()), slots.ToEpoch(current)) {
|
||||
return nil
|
||||
}
|
||||
key := keyFromColumn(sc[0])
|
||||
entry := s.cache.ensure(key)
|
||||
for i := range sc {
|
||||
if err := entry.stashColumns(&sc[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsDataAvailable returns nil if all the commitments in the given block are persisted to the db and have been verified.
|
||||
// DataColumnsSidecars already in the db are assumed to have been previously verified against the block.
|
||||
func (s *LazilyPersistentStoreColumn) IsDataAvailable(
|
||||
ctx context.Context,
|
||||
nodeID enode.ID,
|
||||
currentSlot primitives.Slot,
|
||||
block blocks.ROBlock,
|
||||
) error {
|
||||
blockCommitments, err := fullCommitmentsToCheck(nodeID, block, currentSlot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "full commitments to check with block root `%#x` and current slot `%d`", block.Root(), currentSlot)
|
||||
}
|
||||
|
||||
// Return early for blocks that do not have any commitments.
|
||||
if blockCommitments.count() == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Build the cache key for the block.
|
||||
key := keyFromBlock(block)
|
||||
|
||||
// Retrieve the cache entry for the block, or create an empty one if it doesn't exist.
|
||||
entry := s.cache.ensure(key)
|
||||
|
||||
// Delete the cache entry for the block at the end.
|
||||
defer s.cache.delete(key)
|
||||
|
||||
// Get the root of the block.
|
||||
blockRoot := block.Root()
|
||||
|
||||
// Set the disk summary for the block in the cache entry.
|
||||
entry.setDiskSummary(s.store.Summary(blockRoot))
|
||||
|
||||
// Verify we have all the expected sidecars, and fail fast if any are missing or inconsistent.
|
||||
// We don't try to salvage problematic batches because this indicates a misbehaving peer and we'd rather
|
||||
// ignore their response and decrease their peer score.
|
||||
roDataColumns, err := entry.filterColumns(blockRoot, blockCommitments)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "incomplete DataColumnSidecar batch")
|
||||
}
|
||||
|
||||
// Create verified RO data columns from RO data columns.
|
||||
verifiedRODataColumns := make([]blocks.VerifiedRODataColumn, 0, len(roDataColumns))
|
||||
|
||||
for _, roDataColumn := range roDataColumns {
|
||||
verifiedRODataColumn := blocks.NewVerifiedRODataColumn(roDataColumn)
|
||||
verifiedRODataColumns = append(verifiedRODataColumns, verifiedRODataColumn)
|
||||
}
|
||||
|
||||
// Ensure that each column sidecar is written to disk.
|
||||
for _, verifiedRODataColumn := range verifiedRODataColumns {
|
||||
if err := s.store.SaveDataColumn(verifiedRODataColumn); err != nil {
|
||||
return errors.Wrapf(err, "save data columns for index `%d` for block `%#x`", verifiedRODataColumn.ColumnIndex, blockRoot)
|
||||
}
|
||||
}
|
||||
|
||||
// All ColumnSidecars are persisted - data availability check succeeds.
|
||||
return nil
|
||||
}
|
||||
|
||||
// fullCommitmentsToCheck returns the commitments to check for a given block.
|
||||
func fullCommitmentsToCheck(nodeID enode.ID, block blocks.ROBlock, currentSlot primitives.Slot) (*safeCommitmentsArray, error) {
|
||||
// Return early for blocks that are pre-Fulu.
|
||||
if block.Version() < version.Fulu {
|
||||
return &safeCommitmentsArray{}, nil
|
||||
}
|
||||
|
||||
// Compute the block epoch.
|
||||
blockSlot := block.Block().Slot()
|
||||
blockEpoch := slots.ToEpoch(blockSlot)
|
||||
|
||||
// Compute the current spoch.
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
|
||||
// Return early if the request is out of the MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS window.
|
||||
if !params.WithinDAPeriod(blockEpoch, currentEpoch) {
|
||||
return &safeCommitmentsArray{}, nil
|
||||
}
|
||||
|
||||
// Retrieve the KZG commitments for the block.
|
||||
kzgCommitments, err := block.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
|
||||
// Return early if there are no commitments in the block.
|
||||
if len(kzgCommitments) == 0 {
|
||||
return &safeCommitmentsArray{}, nil
|
||||
}
|
||||
|
||||
// Retrieve the groups count.
|
||||
custodyGroupCount := peerdas.ActualCustodyGroupCount()
|
||||
|
||||
// Retrieve peer info.
|
||||
peerInfo, _, err := peerdas.Info(nodeID, custodyGroupCount)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "peer info")
|
||||
}
|
||||
// Create a safe commitments array for the custody columns.
|
||||
commitmentsArray := &safeCommitmentsArray{}
|
||||
for column := range peerInfo.CustodyColumns {
|
||||
commitmentsArray[column] = kzgCommitments
|
||||
}
|
||||
|
||||
return commitmentsArray, nil
|
||||
}
|
||||
94
beacon-chain/das/availability_columns_test.go
Normal file
94
beacon-chain/das/availability_columns_test.go
Normal file
@@ -0,0 +1,94 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
func TestFullCommitmentsToCheck(t *testing.T) {
|
||||
windowSlots, err := slots.EpochEnd(params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest)
|
||||
require.NoError(t, err)
|
||||
commits := [][]byte{
|
||||
bytesutil.PadTo([]byte("a"), 48),
|
||||
bytesutil.PadTo([]byte("b"), 48),
|
||||
bytesutil.PadTo([]byte("c"), 48),
|
||||
bytesutil.PadTo([]byte("d"), 48),
|
||||
}
|
||||
cases := []struct {
|
||||
name string
|
||||
commits [][]byte
|
||||
block func(*testing.T) blocks.ROBlock
|
||||
slot primitives.Slot
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "pre fulu",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
bb := util.NewBeaconBlockElectra()
|
||||
sb, err := blocks.NewSignedBeaconBlock(bb)
|
||||
require.NoError(t, err)
|
||||
rb, err := blocks.NewROBlock(sb)
|
||||
require.NoError(t, err)
|
||||
return rb
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "commitments within da",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
d := util.NewBeaconBlockFulu()
|
||||
d.Block.Body.BlobKzgCommitments = commits
|
||||
d.Block.Slot = 100
|
||||
sb, err := blocks.NewSignedBeaconBlock(d)
|
||||
require.NoError(t, err)
|
||||
rb, err := blocks.NewROBlock(sb)
|
||||
require.NoError(t, err)
|
||||
return rb
|
||||
},
|
||||
commits: commits,
|
||||
slot: 100,
|
||||
},
|
||||
{
|
||||
name: "commitments outside da",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
d := util.NewBeaconBlockElectra()
|
||||
// block is from slot 0, "current slot" is window size +1 (so outside the window)
|
||||
d.Block.Body.BlobKzgCommitments = commits
|
||||
sb, err := blocks.NewSignedBeaconBlock(d)
|
||||
require.NoError(t, err)
|
||||
rb, err := blocks.NewROBlock(sb)
|
||||
require.NoError(t, err)
|
||||
return rb
|
||||
},
|
||||
slot: windowSlots + 1,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SubscribeToAllSubnets = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
b := c.block(t)
|
||||
co, err := fullCommitmentsToCheck(enode.ID{}, b, c.slot)
|
||||
if c.err != nil {
|
||||
require.ErrorIs(t, err, c.err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
for i := 0; i < len(co); i++ {
|
||||
require.DeepEqual(t, c.commits, co[i])
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
errors "github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
@@ -123,18 +124,18 @@ func TestLazilyPersistent_Missing(t *testing.T) {
|
||||
|
||||
// Only one commitment persisted, should return error with other indices
|
||||
require.NoError(t, as.Persist(1, scs[2]))
|
||||
err := as.IsDataAvailable(ctx, 1, blk)
|
||||
err := as.IsDataAvailable(ctx, enode.ID{}, 1, blk)
|
||||
require.ErrorIs(t, err, errMissingSidecar)
|
||||
|
||||
// All but one persisted, return missing idx
|
||||
require.NoError(t, as.Persist(1, scs[0]))
|
||||
err = as.IsDataAvailable(ctx, 1, blk)
|
||||
err = as.IsDataAvailable(ctx, enode.ID{}, 1, blk)
|
||||
require.ErrorIs(t, err, errMissingSidecar)
|
||||
|
||||
// All persisted, return nil
|
||||
require.NoError(t, as.Persist(1, scs...))
|
||||
|
||||
require.NoError(t, as.IsDataAvailable(ctx, 1, blk))
|
||||
require.NoError(t, as.IsDataAvailable(ctx, enode.ID{}, 1, blk))
|
||||
}
|
||||
|
||||
func TestLazilyPersistent_Mismatch(t *testing.T) {
|
||||
@@ -149,7 +150,7 @@ func TestLazilyPersistent_Mismatch(t *testing.T) {
|
||||
|
||||
// Only one commitment persisted, should return error with other indices
|
||||
require.NoError(t, as.Persist(1, scs[0]))
|
||||
err := as.IsDataAvailable(ctx, 1, blk)
|
||||
err := as.IsDataAvailable(ctx, enode.ID{}, 1, blk)
|
||||
require.NotNil(t, err)
|
||||
require.ErrorIs(t, err, errCommitmentMismatch)
|
||||
}
|
||||
|
||||
@@ -2,9 +2,11 @@ package das
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"reflect"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
@@ -38,6 +40,10 @@ func keyFromSidecar(sc blocks.ROBlob) cacheKey {
|
||||
return cacheKey{slot: sc.Slot(), root: sc.BlockRoot()}
|
||||
}
|
||||
|
||||
func keyFromColumn(sc blocks.RODataColumn) cacheKey {
|
||||
return cacheKey{slot: sc.Slot(), root: sc.BlockRoot()}
|
||||
}
|
||||
|
||||
// keyFromBlock is a convenience method for constructing a cacheKey from a ROBlock value.
|
||||
func keyFromBlock(b blocks.ROBlock) cacheKey {
|
||||
return cacheKey{slot: b.Block().Slot(), root: b.Root()}
|
||||
@@ -61,6 +67,7 @@ func (c *cache) delete(key cacheKey) {
|
||||
// cacheEntry holds a fixed-length cache of BlobSidecars.
|
||||
type cacheEntry struct {
|
||||
scs []*blocks.ROBlob
|
||||
colScs [fieldparams.NumberOfColumns]*blocks.RODataColumn
|
||||
diskSummary filesystem.BlobStorageSummary
|
||||
}
|
||||
|
||||
@@ -86,6 +93,17 @@ func (e *cacheEntry) stash(sc *blocks.ROBlob) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *cacheEntry) stashColumns(sc *blocks.RODataColumn) error {
|
||||
if sc.ColumnIndex >= fieldparams.NumberOfColumns {
|
||||
return errors.Wrapf(errIndexOutOfBounds, "index=%d", sc.ColumnIndex)
|
||||
}
|
||||
if e.colScs[sc.ColumnIndex] != nil {
|
||||
return errors.Wrapf(ErrDuplicateSidecar, "root=%#x, index=%d, commitment=%#x", sc.BlockRoot(), sc.ColumnIndex, sc.KzgCommitments)
|
||||
}
|
||||
e.colScs[sc.ColumnIndex] = sc
|
||||
return nil
|
||||
}
|
||||
|
||||
// filter evicts sidecars that are not committed to by the block and returns custom
|
||||
// errors if the cache is missing any of the commitments, or if the commitments in
|
||||
// the cache do not match those found in the block. If err is nil, then all expected
|
||||
@@ -121,3 +139,66 @@ func (e *cacheEntry) filter(root [32]byte, kc [][]byte, slot primitives.Slot) ([
|
||||
|
||||
return scs, nil
|
||||
}
|
||||
|
||||
func (e *cacheEntry) filterColumns(root [32]byte, commitmentsArray *safeCommitmentsArray) ([]blocks.RODataColumn, error) {
|
||||
nonEmptyIndices := commitmentsArray.nonEmptyIndices()
|
||||
if e.diskSummary.AllDataColumnsAvailable(nonEmptyIndices) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
commitmentsCount := commitmentsArray.count()
|
||||
sidecars := make([]blocks.RODataColumn, 0, commitmentsCount)
|
||||
|
||||
for i := range uint64(fieldparams.NumberOfColumns) {
|
||||
// Skip if we already store this data column.
|
||||
if e.diskSummary.HasIndex(i) {
|
||||
continue
|
||||
}
|
||||
|
||||
if commitmentsArray[i] == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if e.colScs[i] == nil {
|
||||
return nil, errors.Wrapf(errMissingSidecar, "root=%#x, index=%#x", root, i)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(commitmentsArray[i], e.colScs[i].KzgCommitments) {
|
||||
return nil, errors.Wrapf(errCommitmentMismatch, "root=%#x, index=%#x, commitment=%#x, block commitment=%#x", root, i, e.colScs[i].KzgCommitments, commitmentsArray[i])
|
||||
}
|
||||
|
||||
sidecars = append(sidecars, *e.colScs[i])
|
||||
}
|
||||
|
||||
return sidecars, nil
|
||||
}
|
||||
|
||||
// safeCommitmentsArray is a fixed size array of commitments.
|
||||
// This is helpful for avoiding gratuitous bounds checks.
|
||||
type safeCommitmentsArray [fieldparams.NumberOfColumns][][]byte
|
||||
|
||||
// count returns the number of commitments in the array.
|
||||
func (s *safeCommitmentsArray) count() int {
|
||||
count := 0
|
||||
|
||||
for i := range s {
|
||||
if s[i] != nil {
|
||||
count++
|
||||
}
|
||||
}
|
||||
|
||||
return count
|
||||
}
|
||||
|
||||
// nonEmptyIndices returns a map of indices that are non-nil in the array.
|
||||
func (s *safeCommitmentsArray) nonEmptyIndices() map[uint64]bool {
|
||||
columns := make(map[uint64]bool)
|
||||
|
||||
for i := range s {
|
||||
if s[i] != nil {
|
||||
columns[uint64(i)] = true
|
||||
}
|
||||
}
|
||||
|
||||
return columns
|
||||
}
|
||||
|
||||
@@ -39,7 +39,7 @@ func filterTestCaseSetup(slot primitives.Slot, nBlobs int, onDisk []int, numExpe
|
||||
entry := &cacheEntry{}
|
||||
if len(onDisk) > 0 {
|
||||
od := map[[32]byte][]int{blk.Root(): onDisk}
|
||||
sumz := filesystem.NewMockBlobStorageSummarizer(t, od)
|
||||
sumz := filesystem.NewMockBlobStorageSummarizer(t, od, 0)
|
||||
sum := sumz.Summary(blk.Root())
|
||||
entry.setDiskSummary(sum)
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package das
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
)
|
||||
@@ -14,6 +15,6 @@ import (
|
||||
// IsDataAvailable guarantees that all blobs committed to in the block have been
|
||||
// durably persisted before returning a non-error value.
|
||||
type AvailabilityStore interface {
|
||||
IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error
|
||||
IsDataAvailable(ctx context.Context, nodeID enode.ID, current primitives.Slot, b blocks.ROBlock) error
|
||||
Persist(current primitives.Slot, sc ...blocks.ROBlob) error
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package das
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
)
|
||||
@@ -16,7 +17,7 @@ type MockAvailabilityStore struct {
|
||||
var _ AvailabilityStore = &MockAvailabilityStore{}
|
||||
|
||||
// IsDataAvailable satisfies the corresponding method of the AvailabilityStore interface in a way that is useful for tests.
|
||||
func (m *MockAvailabilityStore) IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error {
|
||||
func (m *MockAvailabilityStore) IsDataAvailable(ctx context.Context, _ enode.ID, current primitives.Slot, b blocks.ROBlock) error {
|
||||
if m.VerifyAvailabilityCallback != nil {
|
||||
return m.VerifyAvailabilityCallback(ctx, current, b)
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
@@ -50,6 +51,7 @@ go_test(
|
||||
deps = [
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
|
||||
@@ -8,7 +8,9 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/async/event"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
@@ -28,8 +30,15 @@ var (
|
||||
errNoBasePath = errors.New("BlobStorage base path not specified in init")
|
||||
)
|
||||
|
||||
// BlobStorageOption is a functional option for configuring a BlobStorage.
|
||||
type BlobStorageOption func(*BlobStorage) error
|
||||
type (
|
||||
// BlobStorageOption is a functional option for configuring a BlobStorage.
|
||||
BlobStorageOption func(*BlobStorage) error
|
||||
|
||||
RootIndexPair struct {
|
||||
Root [fieldparams.RootLength]byte
|
||||
Index uint64
|
||||
}
|
||||
)
|
||||
|
||||
// WithBasePath is a required option that sets the base path of blob storage.
|
||||
func WithBasePath(base string) BlobStorageOption {
|
||||
@@ -76,7 +85,10 @@ func WithLayout(name string) BlobStorageOption {
|
||||
// attempt to hold a file lock to guarantee exclusive control of the blob storage directory, so this should only be
|
||||
// initialized once per beacon node.
|
||||
func NewBlobStorage(opts ...BlobStorageOption) (*BlobStorage, error) {
|
||||
b := &BlobStorage{}
|
||||
b := &BlobStorage{
|
||||
DataColumnFeed: new(event.Feed),
|
||||
}
|
||||
|
||||
for _, o := range opts {
|
||||
if err := o(b); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create blob storage")
|
||||
@@ -115,6 +127,7 @@ type BlobStorage struct {
|
||||
fs afero.Fs
|
||||
layout fsLayout
|
||||
cache *blobStorageSummaryCache
|
||||
DataColumnFeed *event.Feed
|
||||
}
|
||||
|
||||
// WarmCache runs the prune routine with an expiration of slot of 0, so nothing will be pruned, but the pruner's cache
|
||||
@@ -201,6 +214,51 @@ func (bs *BlobStorage) writePart(sidecar blocks.VerifiedROBlob) (ppath string, e
|
||||
return ppath, nil
|
||||
}
|
||||
|
||||
func (bs *BlobStorage) writeDataColumnPart(sidecar blocks.VerifiedRODataColumn) (ppath string, err error) {
|
||||
ident := identForDataColumnSidecar(sidecar)
|
||||
sidecarData, err := sidecar.MarshalSSZ()
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to serialize sidecar data")
|
||||
}
|
||||
if len(sidecarData) == 0 {
|
||||
return "", errSidecarEmptySSZData
|
||||
}
|
||||
|
||||
if err := bs.fs.MkdirAll(bs.layout.dir(ident), directoryPermissions()); err != nil {
|
||||
return "", err
|
||||
}
|
||||
ppath = bs.layout.partPath(ident, fmt.Sprintf("%p", sidecarData))
|
||||
|
||||
// Create a partial file and write the serialized data to it.
|
||||
partialFile, err := bs.fs.Create(ppath)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to create partial file")
|
||||
}
|
||||
defer func() {
|
||||
cerr := partialFile.Close()
|
||||
// The close error is probably less important than any existing error, so only overwrite nil err.
|
||||
if cerr != nil && err == nil {
|
||||
err = cerr
|
||||
}
|
||||
}()
|
||||
|
||||
n, err := partialFile.Write(sidecarData)
|
||||
if err != nil {
|
||||
return ppath, errors.Wrap(err, "failed to write to partial file")
|
||||
}
|
||||
if bs.fsync {
|
||||
if err := partialFile.Sync(); err != nil {
|
||||
return ppath, err
|
||||
}
|
||||
}
|
||||
|
||||
if n != len(sidecarData) {
|
||||
return ppath, fmt.Errorf("failed to write the full bytes of sidecarData, wrote only %d of %d bytes", n, len(sidecarData))
|
||||
}
|
||||
|
||||
return ppath, nil
|
||||
}
|
||||
|
||||
// Save saves blobs given a list of sidecars.
|
||||
func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error {
|
||||
startTime := time.Now()
|
||||
@@ -251,6 +309,65 @@ func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SaveDataColumn saves dataColumns given a list of sidecars.
|
||||
func (bs *BlobStorage) SaveDataColumn(verifiedRODataColumns blocks.VerifiedRODataColumn) error {
|
||||
startTime := time.Now()
|
||||
|
||||
ident := identForDataColumnSidecar(verifiedRODataColumns)
|
||||
sszPath := bs.layout.sszPath(ident)
|
||||
exists, err := afero.Exists(bs.fs, sszPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "afero exists")
|
||||
}
|
||||
|
||||
if exists {
|
||||
return nil
|
||||
}
|
||||
|
||||
partialMoved := false
|
||||
partPath, err := bs.writeDataColumnPart(verifiedRODataColumns)
|
||||
|
||||
// Ensure the partial file is deleted.
|
||||
defer func() {
|
||||
if partialMoved || partPath == "" {
|
||||
return
|
||||
}
|
||||
|
||||
// It's expected to error if the save is successful.
|
||||
if err := bs.fs.Remove(partPath); err == nil {
|
||||
log.WithField("partPath", partPath).Debug("Removed partial file")
|
||||
}
|
||||
}()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Atomically rename the partial file to its final name.
|
||||
err = bs.fs.Rename(partPath, sszPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "rename")
|
||||
}
|
||||
partialMoved = true
|
||||
|
||||
if err := bs.layout.notify(ident); err != nil {
|
||||
return errors.Wrapf(err, "problem maintaining pruning cache/metrics for sidecar with root=%#x", verifiedRODataColumns.BlockRoot())
|
||||
}
|
||||
|
||||
// Notify the data column notifier that a new data column has been saved.
|
||||
if bs.DataColumnFeed != nil {
|
||||
bs.DataColumnFeed.Send(RootIndexPair{
|
||||
Root: verifiedRODataColumns.BlockRoot(),
|
||||
Index: verifiedRODataColumns.ColumnIndex,
|
||||
})
|
||||
}
|
||||
|
||||
blobsWrittenCounter.Inc()
|
||||
blobSaveLatency.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get retrieves a single BlobSidecar by its root and index.
|
||||
// Since BlobStorage only writes blobs that have undergone full verification, the return
|
||||
// value is always a VerifiedROBlob.
|
||||
@@ -266,6 +383,24 @@ func (bs *BlobStorage) Get(root [32]byte, idx uint64) (blocks.VerifiedROBlob, er
|
||||
return verification.VerifiedROBlobFromDisk(bs.fs, root, bs.layout.sszPath(ident))
|
||||
}
|
||||
|
||||
// GetColumn retrieves a single DataColumnSidecar by its root and index.
|
||||
// Since BlobStorage only writes blobs that have undergone full verification, the return
|
||||
// value is always a VerifiedRODataColumn.
|
||||
func (bs *BlobStorage) GetColumn(root [32]byte, idx uint64) (blocks.VerifiedRODataColumn, error) {
|
||||
startTime := time.Now()
|
||||
|
||||
ident, err := bs.layout.ident(root, idx)
|
||||
if err != nil {
|
||||
return verification.VerifiedRODataColumnError(err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
blobFetchLatency.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
}()
|
||||
|
||||
return verification.VerifiedRODataColumnFromDisk(bs.fs, root, bs.layout.sszPath(ident))
|
||||
}
|
||||
|
||||
// Remove removes all blobs for a given root.
|
||||
func (bs *BlobStorage) Remove(root [32]byte) error {
|
||||
dirIdent, err := bs.layout.dirIdent(root)
|
||||
|
||||
@@ -11,8 +11,11 @@ import (
|
||||
)
|
||||
|
||||
// blobIndexMask is a bitmask representing the set of blob indices that are currently set.
|
||||
// TODO: Separate blobs from data columns
|
||||
type blobIndexMask []bool
|
||||
|
||||
// type blobIndexMask [fieldparams.NumberOfColumns]bool
|
||||
|
||||
// BlobStorageSummary represents cached information about the BlobSidecars on disk for each root the cache knows about.
|
||||
type BlobStorageSummary struct {
|
||||
epoch primitives.Epoch
|
||||
@@ -27,6 +30,21 @@ func (s BlobStorageSummary) HasIndex(idx uint64) bool {
|
||||
return s.mask[idx]
|
||||
}
|
||||
|
||||
// HasDataColumnIndex true if the DataColumnSidecar at the given index is available in the filesystem.
|
||||
func (s BlobStorageSummary) HasDataColumnIndex(idx uint64) bool {
|
||||
// Protect from panic, but assume callers are sophisticated enough to not need an error telling them they have an invalid idx.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
if idx >= numberOfColumns {
|
||||
return false
|
||||
}
|
||||
|
||||
if idx >= uint64(len(s.mask)) {
|
||||
return false
|
||||
}
|
||||
|
||||
return s.mask[idx]
|
||||
}
|
||||
|
||||
// AllAvailable returns true if we have all blobs for all indices from 0 to count-1.
|
||||
func (s BlobStorageSummary) AllAvailable(count int) bool {
|
||||
if count > len(s.mask) {
|
||||
@@ -40,6 +58,21 @@ func (s BlobStorageSummary) AllAvailable(count int) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// AllDataColumnsAvailable returns true if we have all data columns for corresponding indices.
|
||||
func (s BlobStorageSummary) AllDataColumnsAvailable(indices map[uint64]bool) bool {
|
||||
if len(indices) > len(s.mask) {
|
||||
return false
|
||||
}
|
||||
|
||||
for indice := range indices {
|
||||
if !s.mask[indice] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (s BlobStorageSummary) MaxBlobsForEpoch() uint64 {
|
||||
return uint64(params.BeaconConfig().MaxBlobsPerBlockAtEpoch(s.epoch))
|
||||
}
|
||||
@@ -85,16 +118,23 @@ func (s *blobStorageSummaryCache) Summary(root [32]byte) BlobStorageSummary {
|
||||
}
|
||||
|
||||
func (s *blobStorageSummaryCache) ensure(ident blobIdent) error {
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlockAtEpoch(ident.epoch)
|
||||
if ident.index >= uint64(maxBlobsPerBlock) {
|
||||
maskSize := uint64(params.BeaconConfig().MaxBlobsPerBlockAtEpoch(ident.epoch))
|
||||
|
||||
fuluForkEpoch := params.BeaconConfig().FuluForkEpoch
|
||||
if ident.epoch >= fuluForkEpoch {
|
||||
maskSize = params.BeaconConfig().NumberOfColumns
|
||||
}
|
||||
|
||||
if ident.index >= maskSize {
|
||||
return errIndexOutOfBounds
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
v := s.cache[ident.root]
|
||||
v.epoch = ident.epoch
|
||||
if v.mask == nil {
|
||||
v.mask = make(blobIndexMask, maxBlobsPerBlock)
|
||||
v.mask = make(blobIndexMask, maskSize)
|
||||
}
|
||||
if !v.mask[ident.index] {
|
||||
s.updateMetrics(1)
|
||||
|
||||
@@ -3,6 +3,7 @@ package filesystem
|
||||
import (
|
||||
"testing"
|
||||
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
@@ -152,3 +153,124 @@ func TestAllAvailable(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestHasDataColumnIndex(t *testing.T) {
|
||||
storedIndices := map[uint64]bool{
|
||||
1: true,
|
||||
3: true,
|
||||
5: true,
|
||||
}
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
idx uint64
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "index is too high",
|
||||
idx: fieldparams.NumberOfColumns,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "non existing index",
|
||||
idx: 2,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "existing index",
|
||||
idx: 3,
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
// Get the maximum index that is stored.
|
||||
maxIndex := uint64(0)
|
||||
for index := range storedIndices {
|
||||
if index > maxIndex {
|
||||
maxIndex = index
|
||||
}
|
||||
}
|
||||
|
||||
mask := make(blobIndexMask, maxIndex+1)
|
||||
|
||||
for idx := range storedIndices {
|
||||
mask[idx] = true
|
||||
}
|
||||
|
||||
sum := BlobStorageSummary{mask: mask}
|
||||
require.Equal(t, c.expected, sum.HasDataColumnIndex(c.idx))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAllDataColumnAvailable(t *testing.T) {
|
||||
tooManyColumns := make(map[uint64]bool, fieldparams.NumberOfColumns+1)
|
||||
for i := uint64(0); i < fieldparams.NumberOfColumns+1; i++ {
|
||||
tooManyColumns[i] = true
|
||||
}
|
||||
|
||||
columns346 := map[uint64]bool{
|
||||
3: true,
|
||||
4: true,
|
||||
6: true,
|
||||
}
|
||||
|
||||
columns36 := map[uint64]bool{
|
||||
3: true,
|
||||
6: true,
|
||||
}
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
storedIndices map[uint64]bool
|
||||
testedIndices map[uint64]bool
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "no tested indices",
|
||||
storedIndices: columns346,
|
||||
testedIndices: map[uint64]bool{},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "too many tested indices",
|
||||
storedIndices: columns346,
|
||||
testedIndices: tooManyColumns,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "not all tested indices are stored",
|
||||
storedIndices: columns36,
|
||||
testedIndices: columns346,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "all tested indices are stored",
|
||||
storedIndices: columns346,
|
||||
testedIndices: columns36,
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
// Get the maximum index that is stored.
|
||||
maxIndex := uint64(0)
|
||||
for index := range c.storedIndices {
|
||||
if index > maxIndex {
|
||||
maxIndex = index
|
||||
}
|
||||
}
|
||||
|
||||
mask := make(blobIndexMask, maxIndex+1)
|
||||
|
||||
for idx := range c.storedIndices {
|
||||
mask[idx] = true
|
||||
}
|
||||
|
||||
sum := BlobStorageSummary{mask: mask}
|
||||
require.Equal(t, c.expected, sum.AllDataColumnsAvailable(c.testedIndices))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ package filesystem
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -193,7 +192,7 @@ func rootFromPath(p string) ([32]byte, error) {
|
||||
}
|
||||
|
||||
func idxFromPath(p string) (uint64, error) {
|
||||
p = path.Base(p)
|
||||
p = filepath.Base(p)
|
||||
|
||||
if !isSszFile(p) {
|
||||
return 0, errors.Wrap(errNotBlobSSZ, "does not have .ssz extension")
|
||||
|
||||
@@ -56,6 +56,10 @@ func identForSidecar(sc blocks.VerifiedROBlob) blobIdent {
|
||||
return newBlobIdent(sc.BlockRoot(), slots.ToEpoch(sc.Slot()), sc.Index)
|
||||
}
|
||||
|
||||
func identForDataColumnSidecar(sc blocks.VerifiedRODataColumn) blobIdent {
|
||||
return newBlobIdent(sc.BlockRoot(), slots.ToEpoch(sc.Slot()), sc.ColumnIndex)
|
||||
}
|
||||
|
||||
func (n blobIdent) sszFname() string {
|
||||
return fmt.Sprintf("%d.%s", n.index, sszExt)
|
||||
}
|
||||
|
||||
@@ -64,11 +64,11 @@ func NewEphemeralBlobStorageWithMocker(t testing.TB) (*BlobMocker, *BlobStorage)
|
||||
return &BlobMocker{fs: fs, bs: bs}, bs
|
||||
}
|
||||
|
||||
func NewMockBlobStorageSummarizer(t *testing.T, set map[[32]byte][]int) BlobStorageSummarizer {
|
||||
func NewMockBlobStorageSummarizer(t *testing.T, set map[[32]byte][]int, epoch primitives.Epoch) BlobStorageSummarizer {
|
||||
c := newBlobStorageCache()
|
||||
for k, v := range set {
|
||||
for i := range v {
|
||||
if err := c.ensure(blobIdent{root: k, epoch: 0, index: uint64(v[i])}); err != nil {
|
||||
if err := c.ensure(blobIdent{root: k, epoch: epoch, index: uint64(v[i])}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -215,7 +215,7 @@ func (s *Store) LightClientUpdates(ctx context.Context, startPeriod, endPeriod u
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return updates, err
|
||||
return updates, nil
|
||||
}
|
||||
|
||||
func (s *Store) LightClientUpdate(ctx context.Context, period uint64) (interfaces.LightClientUpdate, error) {
|
||||
|
||||
@@ -518,9 +518,9 @@ func (s *Store) unmarshalState(_ context.Context, enc []byte, validatorEntries [
|
||||
|
||||
switch {
|
||||
case hasFuluKey(enc):
|
||||
protoState := ðpb.BeaconStateFulu{}
|
||||
protoState := ðpb.BeaconStateElectra{}
|
||||
if err := protoState.UnmarshalSSZ(enc[len(fuluKey):]); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to unmarshal encoding for Electra")
|
||||
return nil, errors.Wrap(err, "failed to unmarshal encoding for Fulu")
|
||||
}
|
||||
ok, err := s.isStateValidatorMigrationOver()
|
||||
if err != nil {
|
||||
@@ -690,7 +690,7 @@ func marshalState(ctx context.Context, st state.ReadOnlyBeaconState) ([]byte, er
|
||||
}
|
||||
return snappy.Encode(nil, append(ElectraKey, rawObj...)), nil
|
||||
case version.Fulu:
|
||||
rState, ok := st.ToProtoUnsafe().(*ethpb.BeaconStateFulu)
|
||||
rState, ok := st.ToProtoUnsafe().(*ethpb.BeaconStateElectra)
|
||||
if !ok {
|
||||
return nil, errors.New("non valid inner state")
|
||||
}
|
||||
|
||||
@@ -36,17 +36,19 @@ var (
|
||||
NewPayloadMethod,
|
||||
NewPayloadMethodV2,
|
||||
NewPayloadMethodV3,
|
||||
NewPayloadMethodV4,
|
||||
ForkchoiceUpdatedMethod,
|
||||
ForkchoiceUpdatedMethodV2,
|
||||
ForkchoiceUpdatedMethodV3,
|
||||
GetPayloadMethod,
|
||||
GetPayloadMethodV2,
|
||||
GetPayloadMethodV3,
|
||||
GetPayloadMethodV4,
|
||||
GetPayloadBodiesByHashV1,
|
||||
GetPayloadBodiesByRangeV1,
|
||||
}
|
||||
electraEngineEndpoints = []string{
|
||||
NewPayloadMethodV4,
|
||||
GetPayloadMethodV4,
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -296,6 +298,10 @@ func (s *Service) ExchangeCapabilities(ctx context.Context) ([]string, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.ExchangeCapabilities")
|
||||
defer span.End()
|
||||
|
||||
// Only check for electra related engine methods if it has been activated.
|
||||
if params.ElectraEnabled() {
|
||||
supportedEngineEndpoints = append(supportedEngineEndpoints, electraEngineEndpoints...)
|
||||
}
|
||||
var result []string
|
||||
err := s.rpcClient.CallContext(ctx, &result, ExchangeCapabilities, supportedEngineEndpoints)
|
||||
if err != nil {
|
||||
|
||||
@@ -166,6 +166,23 @@ func (s *Service) processIncludedAttestation(ctx context.Context, state state.Be
|
||||
}
|
||||
}
|
||||
|
||||
// processSingleAttestation logs when the beacon node observes a single attestation from tracked validator.
|
||||
func (s *Service) processSingleAttestation(att ethpb.Att) {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
|
||||
single, ok := att.(*ethpb.SingleAttestation)
|
||||
if !ok {
|
||||
log.Errorf("Wrong attestation type (expected %T, got %T)", ðpb.SingleAttestation{}, att)
|
||||
return
|
||||
}
|
||||
|
||||
if s.canUpdateAttestedValidator(single.AttesterIndex, single.GetData().Slot) {
|
||||
logFields := logMessageTimelyFlagsForIndex(single.AttesterIndex, att.GetData())
|
||||
log.WithFields(logFields).Info("Processed unaggregated attestation")
|
||||
}
|
||||
}
|
||||
|
||||
// processUnaggregatedAttestation logs when the beacon node observes an unaggregated attestation from tracked validator.
|
||||
func (s *Service) processUnaggregatedAttestation(ctx context.Context, att ethpb.Att) {
|
||||
s.RLock()
|
||||
|
||||
@@ -241,7 +241,7 @@ func (s *Service) monitorRoutine(stateChannel chan *feed.Event, stateSub event.S
|
||||
if !ok {
|
||||
log.Error("Event feed data is not of type *operation.SingleAttReceivedData")
|
||||
} else {
|
||||
s.processUnaggregatedAttestation(s.ctx, data.Attestation)
|
||||
s.processSingleAttestation(data.Attestation)
|
||||
}
|
||||
case operation.ExitReceived:
|
||||
data, ok := e.Data.(*operation.ExitReceivedData)
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
|
||||
func configureTracing(cliCtx *cli.Context) error {
|
||||
return tracing.Setup(
|
||||
cliCtx.Context,
|
||||
"beacon-chain", // service name
|
||||
cliCtx.String(cmd.TracingProcessNameFlag.Name),
|
||||
cliCtx.String(cmd.TracingEndpointFlag.Name),
|
||||
|
||||
@@ -859,6 +859,7 @@ func (b *BeaconNode) registerSyncService(initialSyncComplete chan struct{}, bFil
|
||||
regularsync.WithBlobStorage(b.BlobStorage),
|
||||
regularsync.WithVerifierWaiter(b.verifyInitWaiter),
|
||||
regularsync.WithAvailableBlocker(bFillStore),
|
||||
regularsync.WithTrackedValidatorsCache(b.trackedValidatorsCache),
|
||||
)
|
||||
return b.services.RegisterService(rs)
|
||||
}
|
||||
@@ -976,6 +977,7 @@ func (b *BeaconNode) registerRPCService(router *http.ServeMux) error {
|
||||
FinalizationFetcher: chainService,
|
||||
BlockReceiver: chainService,
|
||||
BlobReceiver: chainService,
|
||||
DataColumnReceiver: chainService,
|
||||
AttestationReceiver: chainService,
|
||||
GenesisTimeFetcher: chainService,
|
||||
GenesisFetcher: chainService,
|
||||
|
||||
@@ -6,8 +6,8 @@ go_library(
|
||||
"doc.go",
|
||||
"log.go",
|
||||
"metrics.go",
|
||||
"pool.go",
|
||||
"service.go",
|
||||
"service_new.go",
|
||||
"types.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/slashings",
|
||||
|
||||
324
beacon-chain/operations/slashings/pool.go
Normal file
324
beacon-chain/operations/slashings/pool.go
Normal file
@@ -0,0 +1,324 @@
|
||||
package slashings
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
coretime "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/container/slice"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/trailofbits/go-mutexasserts"
|
||||
)
|
||||
|
||||
// NewPool returns an initialized attester slashing and proposer slashing pool.
|
||||
func NewPool() *Pool {
|
||||
return &Pool{
|
||||
pendingProposerSlashing: make([]*ethpb.ProposerSlashing, 0),
|
||||
pendingAttesterSlashing: make([]*PendingAttesterSlashing, 0),
|
||||
included: make(map[primitives.ValidatorIndex]bool),
|
||||
}
|
||||
}
|
||||
|
||||
// PendingAttesterSlashings returns attester slashings that are able to be included into a block.
|
||||
// This method will return the amount of pending attester slashings for a block transition unless parameter `noLimit` is true
|
||||
// to indicate the request is for noLimit pending items.
|
||||
func (p *Pool) PendingAttesterSlashings(ctx context.Context, state state.ReadOnlyBeaconState, noLimit bool) []ethpb.AttSlashing {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
_, span := trace.StartSpan(ctx, "operations.PendingAttesterSlashing")
|
||||
defer span.End()
|
||||
|
||||
// Update prom metric.
|
||||
numPendingAttesterSlashings.Set(float64(len(p.pendingAttesterSlashing)))
|
||||
|
||||
included := make(map[primitives.ValidatorIndex]bool)
|
||||
|
||||
// Allocate pending slice with a capacity of maxAttesterSlashings or len(p.pendingAttesterSlashing)) depending on the request.
|
||||
maxSlashings := params.BeaconConfig().MaxAttesterSlashings
|
||||
|
||||
// EIP-7549: Beginning from Electra, the max attester slashings is reduced to 1.
|
||||
if state.Version() >= version.Electra {
|
||||
maxSlashings = params.BeaconConfig().MaxAttesterSlashingsElectra
|
||||
}
|
||||
if noLimit {
|
||||
maxSlashings = uint64(len(p.pendingAttesterSlashing))
|
||||
}
|
||||
pending := make([]ethpb.AttSlashing, 0, maxSlashings)
|
||||
for i := 0; i < len(p.pendingAttesterSlashing); i++ {
|
||||
if uint64(len(pending)) >= maxSlashings {
|
||||
break
|
||||
}
|
||||
slashing := p.pendingAttesterSlashing[i]
|
||||
valid, err := p.validatorSlashingPreconditionCheck(state, slashing.validatorToSlash)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not validate attester slashing")
|
||||
continue
|
||||
}
|
||||
if included[slashing.validatorToSlash] || !valid {
|
||||
p.pendingAttesterSlashing = append(p.pendingAttesterSlashing[:i], p.pendingAttesterSlashing[i+1:]...)
|
||||
i--
|
||||
continue
|
||||
}
|
||||
attSlashing := slashing.attesterSlashing
|
||||
slashedVal := slice.IntersectionUint64(
|
||||
attSlashing.FirstAttestation().GetAttestingIndices(),
|
||||
attSlashing.SecondAttestation().GetAttestingIndices(),
|
||||
)
|
||||
for _, idx := range slashedVal {
|
||||
included[primitives.ValidatorIndex(idx)] = true
|
||||
}
|
||||
|
||||
pending = append(pending, attSlashing)
|
||||
}
|
||||
|
||||
return pending
|
||||
}
|
||||
|
||||
// PendingProposerSlashings returns proposer slashings that are able to be included into a block.
|
||||
// This method will return the amount of pending proposer slashings for a block transition unless the `noLimit` parameter
|
||||
// is set to true to indicate the request is for noLimit pending items.
|
||||
func (p *Pool) PendingProposerSlashings(ctx context.Context, state state.ReadOnlyBeaconState, noLimit bool) []*ethpb.ProposerSlashing {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
_, span := trace.StartSpan(ctx, "operations.PendingProposerSlashing")
|
||||
defer span.End()
|
||||
|
||||
// Update prom metric.
|
||||
numPendingProposerSlashings.Set(float64(len(p.pendingProposerSlashing)))
|
||||
|
||||
// Allocate pending slice with a capacity of len(p.pendingProposerSlashing) or maxProposerSlashings depending on the request.
|
||||
maxSlashings := params.BeaconConfig().MaxProposerSlashings
|
||||
if noLimit {
|
||||
maxSlashings = uint64(len(p.pendingProposerSlashing))
|
||||
}
|
||||
pending := make([]*ethpb.ProposerSlashing, 0, maxSlashings)
|
||||
for i := 0; i < len(p.pendingProposerSlashing); i++ {
|
||||
if uint64(len(pending)) >= maxSlashings {
|
||||
break
|
||||
}
|
||||
slashing := p.pendingProposerSlashing[i]
|
||||
valid, err := p.validatorSlashingPreconditionCheck(state, slashing.Header_1.Header.ProposerIndex)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not validate proposer slashing")
|
||||
continue
|
||||
}
|
||||
if !valid {
|
||||
p.pendingProposerSlashing = append(p.pendingProposerSlashing[:i], p.pendingProposerSlashing[i+1:]...)
|
||||
i--
|
||||
continue
|
||||
}
|
||||
|
||||
pending = append(pending, slashing)
|
||||
}
|
||||
return pending
|
||||
}
|
||||
|
||||
// InsertAttesterSlashing into the pool. This method is a no-op if the attester slashing already exists in the pool,
|
||||
// has been included into a block recently, or the validator is already exited.
|
||||
func (p *Pool) InsertAttesterSlashing(
|
||||
ctx context.Context,
|
||||
state state.ReadOnlyBeaconState,
|
||||
slashing ethpb.AttSlashing,
|
||||
) error {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
ctx, span := trace.StartSpan(ctx, "operations.InsertAttesterSlashing")
|
||||
defer span.End()
|
||||
|
||||
if err := blocks.VerifyAttesterSlashing(ctx, state, slashing); err != nil {
|
||||
return errors.Wrap(err, "could not verify attester slashing")
|
||||
}
|
||||
|
||||
slashedVal := slice.IntersectionUint64(slashing.FirstAttestation().GetAttestingIndices(), slashing.SecondAttestation().GetAttestingIndices())
|
||||
cantSlash := make([]uint64, 0, len(slashedVal))
|
||||
slashingReason := ""
|
||||
for _, val := range slashedVal {
|
||||
// Has this validator index been included recently?
|
||||
ok, err := p.validatorSlashingPreconditionCheck(state, primitives.ValidatorIndex(val))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// If the validator has already exited, has already been slashed, or if its index
|
||||
// has been recently included in the pool of slashings, skip including this indice.
|
||||
if !ok {
|
||||
slashingReason = "validator already exited/slashed or already recently included in slashings pool"
|
||||
cantSlash = append(cantSlash, val)
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if the validator already exists in the list of slashings.
|
||||
// Use binary search to find the answer.
|
||||
found := sort.Search(len(p.pendingAttesterSlashing), func(i int) bool {
|
||||
return uint64(p.pendingAttesterSlashing[i].validatorToSlash) >= val
|
||||
})
|
||||
if found != len(p.pendingAttesterSlashing) && uint64(p.pendingAttesterSlashing[found].validatorToSlash) == val {
|
||||
slashingReason = "validator already exist in list of pending slashings, no need to attempt to slash again"
|
||||
cantSlash = append(cantSlash, val)
|
||||
continue
|
||||
}
|
||||
|
||||
pendingSlashing := &PendingAttesterSlashing{
|
||||
attesterSlashing: slashing,
|
||||
validatorToSlash: primitives.ValidatorIndex(val),
|
||||
}
|
||||
// Insert into pending list and sort again.
|
||||
p.pendingAttesterSlashing = append(p.pendingAttesterSlashing, pendingSlashing)
|
||||
sort.Slice(p.pendingAttesterSlashing, func(i, j int) bool {
|
||||
return p.pendingAttesterSlashing[i].validatorToSlash < p.pendingAttesterSlashing[j].validatorToSlash
|
||||
})
|
||||
numPendingAttesterSlashings.Set(float64(len(p.pendingAttesterSlashing)))
|
||||
}
|
||||
if len(cantSlash) == len(slashedVal) {
|
||||
return fmt.Errorf(
|
||||
"could not slash any of %d validators in submitted slashing: %s",
|
||||
len(slashedVal),
|
||||
slashingReason,
|
||||
)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// InsertProposerSlashing into the pool. This method is a no-op if the pending slashing already exists,
|
||||
// has been included recently, the validator is already exited, or the validator was already slashed.
|
||||
func (p *Pool) InsertProposerSlashing(
|
||||
ctx context.Context,
|
||||
state state.ReadOnlyBeaconState,
|
||||
slashing *ethpb.ProposerSlashing,
|
||||
) error {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
_, span := trace.StartSpan(ctx, "operations.InsertProposerSlashing")
|
||||
defer span.End()
|
||||
|
||||
if err := blocks.VerifyProposerSlashing(state, slashing); err != nil {
|
||||
return errors.Wrap(err, "could not verify proposer slashing")
|
||||
}
|
||||
|
||||
idx := slashing.Header_1.Header.ProposerIndex
|
||||
ok, err := p.validatorSlashingPreconditionCheck(state, idx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// If the validator has already exited, has already been slashed, or if its index
|
||||
// has been recently included in the pool of slashings, do not process this new
|
||||
// slashing.
|
||||
if !ok {
|
||||
return fmt.Errorf("validator at index %d cannot be slashed", idx)
|
||||
}
|
||||
|
||||
// Check if the validator already exists in the list of slashings.
|
||||
// Use binary search to find the answer.
|
||||
found := sort.Search(len(p.pendingProposerSlashing), func(i int) bool {
|
||||
return p.pendingProposerSlashing[i].Header_1.Header.ProposerIndex >= slashing.Header_1.Header.ProposerIndex
|
||||
})
|
||||
if found != len(p.pendingProposerSlashing) && p.pendingProposerSlashing[found].Header_1.Header.ProposerIndex ==
|
||||
slashing.Header_1.Header.ProposerIndex {
|
||||
return errors.New("slashing object already exists in pending proposer slashings")
|
||||
}
|
||||
|
||||
// Insert into pending list and sort again.
|
||||
p.pendingProposerSlashing = append(p.pendingProposerSlashing, slashing)
|
||||
sort.Slice(p.pendingProposerSlashing, func(i, j int) bool {
|
||||
return p.pendingProposerSlashing[i].Header_1.Header.ProposerIndex < p.pendingProposerSlashing[j].Header_1.Header.ProposerIndex
|
||||
})
|
||||
numPendingProposerSlashings.Set(float64(len(p.pendingProposerSlashing)))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarkIncludedAttesterSlashing is used when an attester slashing has been included in a beacon block.
|
||||
// Every block seen by this node that contains proposer slashings should call this method to include
|
||||
// the proposer slashings.
|
||||
func (p *Pool) MarkIncludedAttesterSlashing(as ethpb.AttSlashing) {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
slashedVal := slice.IntersectionUint64(as.FirstAttestation().GetAttestingIndices(), as.SecondAttestation().GetAttestingIndices())
|
||||
for _, val := range slashedVal {
|
||||
i := sort.Search(len(p.pendingAttesterSlashing), func(i int) bool {
|
||||
return uint64(p.pendingAttesterSlashing[i].validatorToSlash) >= val
|
||||
})
|
||||
if i != len(p.pendingAttesterSlashing) && uint64(p.pendingAttesterSlashing[i].validatorToSlash) == val {
|
||||
p.pendingAttesterSlashing = append(p.pendingAttesterSlashing[:i], p.pendingAttesterSlashing[i+1:]...)
|
||||
}
|
||||
p.included[primitives.ValidatorIndex(val)] = true
|
||||
numAttesterSlashingsIncluded.Inc()
|
||||
}
|
||||
}
|
||||
|
||||
// MarkIncludedProposerSlashing is used when an proposer slashing has been included in a beacon block.
|
||||
// Every block seen by this node that contains proposer slashings should call this method to include
|
||||
// the proposer slashings.
|
||||
func (p *Pool) MarkIncludedProposerSlashing(ps *ethpb.ProposerSlashing) {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
i := sort.Search(len(p.pendingProposerSlashing), func(i int) bool {
|
||||
return p.pendingProposerSlashing[i].Header_1.Header.ProposerIndex >= ps.Header_1.Header.ProposerIndex
|
||||
})
|
||||
if i != len(p.pendingProposerSlashing) && p.pendingProposerSlashing[i].Header_1.Header.ProposerIndex == ps.Header_1.Header.ProposerIndex {
|
||||
p.pendingProposerSlashing = append(p.pendingProposerSlashing[:i], p.pendingProposerSlashing[i+1:]...)
|
||||
}
|
||||
p.included[ps.Header_1.Header.ProposerIndex] = true
|
||||
numProposerSlashingsIncluded.Inc()
|
||||
}
|
||||
|
||||
// ConvertToElectra converts all Phase0 attester slashings to Electra attester slashings.
|
||||
// This functionality is needed at the time of the Electra fork.
|
||||
func (p *Pool) ConvertToElectra() {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
for _, pas := range p.pendingAttesterSlashing {
|
||||
if pas.attesterSlashing.Version() == version.Phase0 {
|
||||
first := pas.attesterSlashing.FirstAttestation()
|
||||
second := pas.attesterSlashing.SecondAttestation()
|
||||
pas.attesterSlashing = ðpb.AttesterSlashingElectra{
|
||||
Attestation_1: ðpb.IndexedAttestationElectra{
|
||||
AttestingIndices: first.GetAttestingIndices(),
|
||||
Data: first.GetData(),
|
||||
Signature: first.GetSignature(),
|
||||
},
|
||||
Attestation_2: ðpb.IndexedAttestationElectra{
|
||||
AttestingIndices: second.GetAttestingIndices(),
|
||||
Data: second.GetData(),
|
||||
Signature: second.GetSignature(),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// this function checks a few items about a validator before proceeding with inserting
|
||||
// a proposer/attester slashing into the pool. First, it checks if the validator
|
||||
// has been recently included in the pool, then it checks if the validator is slashable.
|
||||
// Note: this method requires caller to hold the lock.
|
||||
func (p *Pool) validatorSlashingPreconditionCheck(
|
||||
state state.ReadOnlyBeaconState,
|
||||
valIdx primitives.ValidatorIndex,
|
||||
) (bool, error) {
|
||||
if !mutexasserts.RWMutexLocked(&p.lock) && !mutexasserts.RWMutexRLocked(&p.lock) {
|
||||
return false, errors.New("pool.validatorSlashingPreconditionCheck: caller must hold read/write lock")
|
||||
}
|
||||
|
||||
// Check if the validator index has been included recently.
|
||||
if p.included[valIdx] {
|
||||
return false, nil
|
||||
}
|
||||
validator, err := state.ValidatorAtIndexReadOnly(valIdx)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Checking if the validator is slashable.
|
||||
if !helpers.IsSlashableValidatorUsingTrie(validator, coretime.CurrentEpoch(state)) {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
@@ -2,323 +2,102 @@ package slashings
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
coretime "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/container/slice"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/trailofbits/go-mutexasserts"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
// NewPool returns an initialized attester slashing and proposer slashing pool.
|
||||
func NewPool() *Pool {
|
||||
return &Pool{
|
||||
pendingProposerSlashing: make([]*ethpb.ProposerSlashing, 0),
|
||||
pendingAttesterSlashing: make([]*PendingAttesterSlashing, 0),
|
||||
included: make(map[primitives.ValidatorIndex]bool),
|
||||
// WithElectraTimer includes functional options for the blockchain service related to CLI flags.
|
||||
func WithElectraTimer(cw startup.ClockWaiter, currentSlotFn func() primitives.Slot) Option {
|
||||
return func(p *PoolService) error {
|
||||
p.runElectraTimer = true
|
||||
p.cw = cw
|
||||
p.currentSlotFn = currentSlotFn
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// PendingAttesterSlashings returns attester slashings that are able to be included into a block.
|
||||
// This method will return the amount of pending attester slashings for a block transition unless parameter `noLimit` is true
|
||||
// to indicate the request is for noLimit pending items.
|
||||
func (p *Pool) PendingAttesterSlashings(ctx context.Context, state state.ReadOnlyBeaconState, noLimit bool) []ethpb.AttSlashing {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
_, span := trace.StartSpan(ctx, "operations.PendingAttesterSlashing")
|
||||
defer span.End()
|
||||
|
||||
// Update prom metric.
|
||||
numPendingAttesterSlashings.Set(float64(len(p.pendingAttesterSlashing)))
|
||||
|
||||
included := make(map[primitives.ValidatorIndex]bool)
|
||||
|
||||
// Allocate pending slice with a capacity of maxAttesterSlashings or len(p.pendingAttesterSlashing)) depending on the request.
|
||||
maxSlashings := params.BeaconConfig().MaxAttesterSlashings
|
||||
|
||||
// EIP-7549: Beginning from Electra, the max attester slashings is reduced to 1.
|
||||
if state.Version() >= version.Electra {
|
||||
maxSlashings = params.BeaconConfig().MaxAttesterSlashingsElectra
|
||||
}
|
||||
if noLimit {
|
||||
maxSlashings = uint64(len(p.pendingAttesterSlashing))
|
||||
}
|
||||
pending := make([]ethpb.AttSlashing, 0, maxSlashings)
|
||||
for i := 0; i < len(p.pendingAttesterSlashing); i++ {
|
||||
if uint64(len(pending)) >= maxSlashings {
|
||||
break
|
||||
}
|
||||
slashing := p.pendingAttesterSlashing[i]
|
||||
valid, err := p.validatorSlashingPreconditionCheck(state, slashing.validatorToSlash)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not validate attester slashing")
|
||||
continue
|
||||
}
|
||||
if included[slashing.validatorToSlash] || !valid {
|
||||
p.pendingAttesterSlashing = append(p.pendingAttesterSlashing[:i], p.pendingAttesterSlashing[i+1:]...)
|
||||
i--
|
||||
continue
|
||||
}
|
||||
attSlashing := slashing.attesterSlashing
|
||||
slashedVal := slice.IntersectionUint64(
|
||||
attSlashing.FirstAttestation().GetAttestingIndices(),
|
||||
attSlashing.SecondAttestation().GetAttestingIndices(),
|
||||
)
|
||||
for _, idx := range slashedVal {
|
||||
included[primitives.ValidatorIndex(idx)] = true
|
||||
}
|
||||
|
||||
pending = append(pending, attSlashing)
|
||||
// NewPoolService returns a service that manages the Pool.
|
||||
func NewPoolService(ctx context.Context, pool PoolManager, opts ...Option) *PoolService {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
p := &PoolService{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
poolManager: pool,
|
||||
}
|
||||
|
||||
return pending
|
||||
for _, opt := range opts {
|
||||
if err := opt(p); err != nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
// PendingProposerSlashings returns proposer slashings that are able to be included into a block.
|
||||
// This method will return the amount of pending proposer slashings for a block transition unless the `noLimit` parameter
|
||||
// is set to true to indicate the request is for noLimit pending items.
|
||||
func (p *Pool) PendingProposerSlashings(ctx context.Context, state state.ReadOnlyBeaconState, noLimit bool) []*ethpb.ProposerSlashing {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
_, span := trace.StartSpan(ctx, "operations.PendingProposerSlashing")
|
||||
defer span.End()
|
||||
|
||||
// Update prom metric.
|
||||
numPendingProposerSlashings.Set(float64(len(p.pendingProposerSlashing)))
|
||||
|
||||
// Allocate pending slice with a capacity of len(p.pendingProposerSlashing) or maxProposerSlashings depending on the request.
|
||||
maxSlashings := params.BeaconConfig().MaxProposerSlashings
|
||||
if noLimit {
|
||||
maxSlashings = uint64(len(p.pendingProposerSlashing))
|
||||
}
|
||||
pending := make([]*ethpb.ProposerSlashing, 0, maxSlashings)
|
||||
for i := 0; i < len(p.pendingProposerSlashing); i++ {
|
||||
if uint64(len(pending)) >= maxSlashings {
|
||||
break
|
||||
}
|
||||
slashing := p.pendingProposerSlashing[i]
|
||||
valid, err := p.validatorSlashingPreconditionCheck(state, slashing.Header_1.Header.ProposerIndex)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not validate proposer slashing")
|
||||
continue
|
||||
}
|
||||
if !valid {
|
||||
p.pendingProposerSlashing = append(p.pendingProposerSlashing[:i], p.pendingProposerSlashing[i+1:]...)
|
||||
i--
|
||||
continue
|
||||
}
|
||||
|
||||
pending = append(pending, slashing)
|
||||
}
|
||||
return pending
|
||||
// Start the slashing pool service.
|
||||
func (p *PoolService) Start() {
|
||||
go p.run()
|
||||
}
|
||||
|
||||
// InsertAttesterSlashing into the pool. This method is a no-op if the attester slashing already exists in the pool,
|
||||
// has been included into a block recently, or the validator is already exited.
|
||||
func (p *Pool) InsertAttesterSlashing(
|
||||
ctx context.Context,
|
||||
state state.ReadOnlyBeaconState,
|
||||
slashing ethpb.AttSlashing,
|
||||
) error {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
ctx, span := trace.StartSpan(ctx, "operations.InsertAttesterSlashing")
|
||||
defer span.End()
|
||||
|
||||
if err := blocks.VerifyAttesterSlashing(ctx, state, slashing); err != nil {
|
||||
return errors.Wrap(err, "could not verify attester slashing")
|
||||
func (p *PoolService) run() {
|
||||
if !p.runElectraTimer {
|
||||
return
|
||||
}
|
||||
|
||||
slashedVal := slice.IntersectionUint64(slashing.FirstAttestation().GetAttestingIndices(), slashing.SecondAttestation().GetAttestingIndices())
|
||||
cantSlash := make([]uint64, 0, len(slashedVal))
|
||||
slashingReason := ""
|
||||
for _, val := range slashedVal {
|
||||
// Has this validator index been included recently?
|
||||
ok, err := p.validatorSlashingPreconditionCheck(state, primitives.ValidatorIndex(val))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// If the validator has already exited, has already been slashed, or if its index
|
||||
// has been recently included in the pool of slashings, skip including this indice.
|
||||
if !ok {
|
||||
slashingReason = "validator already exited/slashed or already recently included in slashings pool"
|
||||
cantSlash = append(cantSlash, val)
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if the validator already exists in the list of slashings.
|
||||
// Use binary search to find the answer.
|
||||
found := sort.Search(len(p.pendingAttesterSlashing), func(i int) bool {
|
||||
return uint64(p.pendingAttesterSlashing[i].validatorToSlash) >= val
|
||||
})
|
||||
if found != len(p.pendingAttesterSlashing) && uint64(p.pendingAttesterSlashing[found].validatorToSlash) == val {
|
||||
slashingReason = "validator already exist in list of pending slashings, no need to attempt to slash again"
|
||||
cantSlash = append(cantSlash, val)
|
||||
continue
|
||||
}
|
||||
|
||||
pendingSlashing := &PendingAttesterSlashing{
|
||||
attesterSlashing: slashing,
|
||||
validatorToSlash: primitives.ValidatorIndex(val),
|
||||
}
|
||||
// Insert into pending list and sort again.
|
||||
p.pendingAttesterSlashing = append(p.pendingAttesterSlashing, pendingSlashing)
|
||||
sort.Slice(p.pendingAttesterSlashing, func(i, j int) bool {
|
||||
return p.pendingAttesterSlashing[i].validatorToSlash < p.pendingAttesterSlashing[j].validatorToSlash
|
||||
})
|
||||
numPendingAttesterSlashings.Set(float64(len(p.pendingAttesterSlashing)))
|
||||
electraSlot, err := slots.EpochStart(params.BeaconConfig().ElectraForkEpoch)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if len(cantSlash) == len(slashedVal) {
|
||||
return fmt.Errorf(
|
||||
"could not slash any of %d validators in submitted slashing: %s",
|
||||
len(slashedVal),
|
||||
slashingReason,
|
||||
)
|
||||
|
||||
// If run() is executed after the transition to Electra has already happened,
|
||||
// there is nothing to convert because the slashing pool is empty at startup.
|
||||
if p.currentSlotFn() >= electraSlot {
|
||||
return
|
||||
}
|
||||
|
||||
p.waitForChainInitialization()
|
||||
|
||||
electraTime, err := slots.ToTime(uint64(p.clock.GenesisTime().Unix()), electraSlot)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
t := time.NewTimer(electraTime.Sub(p.clock.Now()))
|
||||
defer t.Stop()
|
||||
|
||||
select {
|
||||
case <-t.C:
|
||||
log.Info("Converting Phase0 slashings to Electra slashings")
|
||||
p.poolManager.ConvertToElectra()
|
||||
return
|
||||
case <-p.ctx.Done():
|
||||
log.Warn("Context cancelled, ConvertToElectra timer will not execute")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PoolService) waitForChainInitialization() {
|
||||
clock, err := p.cw.WaitForClock(p.ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not receive chain start notification")
|
||||
}
|
||||
p.clock = clock
|
||||
log.WithField("genesisTime", clock.GenesisTime()).Info(
|
||||
"Slashing pool service received chain initialization event",
|
||||
)
|
||||
}
|
||||
|
||||
// Stop the slashing pool service.
|
||||
func (p *PoolService) Stop() error {
|
||||
p.cancel()
|
||||
return nil
|
||||
}
|
||||
|
||||
// InsertProposerSlashing into the pool. This method is a no-op if the pending slashing already exists,
|
||||
// has been included recently, the validator is already exited, or the validator was already slashed.
|
||||
func (p *Pool) InsertProposerSlashing(
|
||||
ctx context.Context,
|
||||
state state.ReadOnlyBeaconState,
|
||||
slashing *ethpb.ProposerSlashing,
|
||||
) error {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
_, span := trace.StartSpan(ctx, "operations.InsertProposerSlashing")
|
||||
defer span.End()
|
||||
|
||||
if err := blocks.VerifyProposerSlashing(state, slashing); err != nil {
|
||||
return errors.Wrap(err, "could not verify proposer slashing")
|
||||
}
|
||||
|
||||
idx := slashing.Header_1.Header.ProposerIndex
|
||||
ok, err := p.validatorSlashingPreconditionCheck(state, idx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// If the validator has already exited, has already been slashed, or if its index
|
||||
// has been recently included in the pool of slashings, do not process this new
|
||||
// slashing.
|
||||
if !ok {
|
||||
return fmt.Errorf("validator at index %d cannot be slashed", idx)
|
||||
}
|
||||
|
||||
// Check if the validator already exists in the list of slashings.
|
||||
// Use binary search to find the answer.
|
||||
found := sort.Search(len(p.pendingProposerSlashing), func(i int) bool {
|
||||
return p.pendingProposerSlashing[i].Header_1.Header.ProposerIndex >= slashing.Header_1.Header.ProposerIndex
|
||||
})
|
||||
if found != len(p.pendingProposerSlashing) && p.pendingProposerSlashing[found].Header_1.Header.ProposerIndex ==
|
||||
slashing.Header_1.Header.ProposerIndex {
|
||||
return errors.New("slashing object already exists in pending proposer slashings")
|
||||
}
|
||||
|
||||
// Insert into pending list and sort again.
|
||||
p.pendingProposerSlashing = append(p.pendingProposerSlashing, slashing)
|
||||
sort.Slice(p.pendingProposerSlashing, func(i, j int) bool {
|
||||
return p.pendingProposerSlashing[i].Header_1.Header.ProposerIndex < p.pendingProposerSlashing[j].Header_1.Header.ProposerIndex
|
||||
})
|
||||
numPendingProposerSlashings.Set(float64(len(p.pendingProposerSlashing)))
|
||||
|
||||
// Status of the slashing pool service.
|
||||
func (p *PoolService) Status() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarkIncludedAttesterSlashing is used when an attester slashing has been included in a beacon block.
|
||||
// Every block seen by this node that contains proposer slashings should call this method to include
|
||||
// the proposer slashings.
|
||||
func (p *Pool) MarkIncludedAttesterSlashing(as ethpb.AttSlashing) {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
slashedVal := slice.IntersectionUint64(as.FirstAttestation().GetAttestingIndices(), as.SecondAttestation().GetAttestingIndices())
|
||||
for _, val := range slashedVal {
|
||||
i := sort.Search(len(p.pendingAttesterSlashing), func(i int) bool {
|
||||
return uint64(p.pendingAttesterSlashing[i].validatorToSlash) >= val
|
||||
})
|
||||
if i != len(p.pendingAttesterSlashing) && uint64(p.pendingAttesterSlashing[i].validatorToSlash) == val {
|
||||
p.pendingAttesterSlashing = append(p.pendingAttesterSlashing[:i], p.pendingAttesterSlashing[i+1:]...)
|
||||
}
|
||||
p.included[primitives.ValidatorIndex(val)] = true
|
||||
numAttesterSlashingsIncluded.Inc()
|
||||
}
|
||||
}
|
||||
|
||||
// MarkIncludedProposerSlashing is used when an proposer slashing has been included in a beacon block.
|
||||
// Every block seen by this node that contains proposer slashings should call this method to include
|
||||
// the proposer slashings.
|
||||
func (p *Pool) MarkIncludedProposerSlashing(ps *ethpb.ProposerSlashing) {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
i := sort.Search(len(p.pendingProposerSlashing), func(i int) bool {
|
||||
return p.pendingProposerSlashing[i].Header_1.Header.ProposerIndex >= ps.Header_1.Header.ProposerIndex
|
||||
})
|
||||
if i != len(p.pendingProposerSlashing) && p.pendingProposerSlashing[i].Header_1.Header.ProposerIndex == ps.Header_1.Header.ProposerIndex {
|
||||
p.pendingProposerSlashing = append(p.pendingProposerSlashing[:i], p.pendingProposerSlashing[i+1:]...)
|
||||
}
|
||||
p.included[ps.Header_1.Header.ProposerIndex] = true
|
||||
numProposerSlashingsIncluded.Inc()
|
||||
}
|
||||
|
||||
// ConvertToElectra converts all Phase0 attester slashings to Electra attester slashings.
|
||||
// This functionality is needed at the time of the Electra fork.
|
||||
func (p *Pool) ConvertToElectra() {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
for _, pas := range p.pendingAttesterSlashing {
|
||||
if pas.attesterSlashing.Version() == version.Phase0 {
|
||||
first := pas.attesterSlashing.FirstAttestation()
|
||||
second := pas.attesterSlashing.SecondAttestation()
|
||||
pas.attesterSlashing = ðpb.AttesterSlashingElectra{
|
||||
Attestation_1: ðpb.IndexedAttestationElectra{
|
||||
AttestingIndices: first.GetAttestingIndices(),
|
||||
Data: first.GetData(),
|
||||
Signature: first.GetSignature(),
|
||||
},
|
||||
Attestation_2: ðpb.IndexedAttestationElectra{
|
||||
AttestingIndices: second.GetAttestingIndices(),
|
||||
Data: second.GetData(),
|
||||
Signature: second.GetSignature(),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// this function checks a few items about a validator before proceeding with inserting
|
||||
// a proposer/attester slashing into the pool. First, it checks if the validator
|
||||
// has been recently included in the pool, then it checks if the validator is slashable.
|
||||
// Note: this method requires caller to hold the lock.
|
||||
func (p *Pool) validatorSlashingPreconditionCheck(
|
||||
state state.ReadOnlyBeaconState,
|
||||
valIdx primitives.ValidatorIndex,
|
||||
) (bool, error) {
|
||||
if !mutexasserts.RWMutexLocked(&p.lock) && !mutexasserts.RWMutexRLocked(&p.lock) {
|
||||
return false, errors.New("pool.validatorSlashingPreconditionCheck: caller must hold read/write lock")
|
||||
}
|
||||
|
||||
// Check if the validator index has been included recently.
|
||||
if p.included[valIdx] {
|
||||
return false, nil
|
||||
}
|
||||
validator, err := state.ValidatorAtIndexReadOnly(valIdx)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Checking if the validator is slashable.
|
||||
if !helpers.IsSlashableValidatorUsingTrie(validator, coretime.CurrentEpoch(state)) {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@@ -1,103 +0,0 @@
|
||||
package slashings
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
// WithElectraTimer includes functional options for the blockchain service related to CLI flags.
|
||||
func WithElectraTimer(cw startup.ClockWaiter, currentSlotFn func() primitives.Slot) Option {
|
||||
return func(p *PoolService) error {
|
||||
p.runElectraTimer = true
|
||||
p.cw = cw
|
||||
p.currentSlotFn = currentSlotFn
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// NewPoolService returns a service that manages the Pool.
|
||||
func NewPoolService(ctx context.Context, pool PoolManager, opts ...Option) *PoolService {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
p := &PoolService{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
poolManager: pool,
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
if err := opt(p); err != nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
// Start the slashing pool service.
|
||||
func (p *PoolService) Start() {
|
||||
go p.run()
|
||||
}
|
||||
|
||||
func (p *PoolService) run() {
|
||||
if !p.runElectraTimer {
|
||||
return
|
||||
}
|
||||
|
||||
electraSlot, err := slots.EpochStart(params.BeaconConfig().ElectraForkEpoch)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// If run() is executed after the transition to Electra has already happened,
|
||||
// there is nothing to convert because the slashing pool is empty at startup.
|
||||
if p.currentSlotFn() >= electraSlot {
|
||||
return
|
||||
}
|
||||
|
||||
p.waitForChainInitialization()
|
||||
|
||||
electraTime, err := slots.ToTime(uint64(p.clock.GenesisTime().Unix()), electraSlot)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
t := time.NewTimer(electraTime.Sub(p.clock.Now()))
|
||||
defer t.Stop()
|
||||
|
||||
select {
|
||||
case <-t.C:
|
||||
log.Info("Converting Phase0 slashings to Electra slashings")
|
||||
p.poolManager.ConvertToElectra()
|
||||
return
|
||||
case <-p.ctx.Done():
|
||||
log.Warn("Context cancelled, ConvertToElectra timer will not execute")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PoolService) waitForChainInitialization() {
|
||||
clock, err := p.cw.WaitForClock(p.ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not receive chain start notification")
|
||||
}
|
||||
p.clock = clock
|
||||
log.WithField("genesisTime", clock.GenesisTime()).Info(
|
||||
"Slashing pool service received chain initialization event",
|
||||
)
|
||||
}
|
||||
|
||||
// Stop the slashing pool service.
|
||||
func (p *PoolService) Stop() error {
|
||||
p.cancel()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Status of the slashing pool service.
|
||||
func (p *PoolService) Status() error {
|
||||
return nil
|
||||
}
|
||||
@@ -7,6 +7,7 @@ go_library(
|
||||
"broadcaster.go",
|
||||
"config.go",
|
||||
"connection_gater.go",
|
||||
"custody.go",
|
||||
"dial_relay_node.go",
|
||||
"discovery.go",
|
||||
"doc.go",
|
||||
@@ -45,6 +46,7 @@ go_library(
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/p2p/encoder:go_default_library",
|
||||
@@ -55,6 +57,7 @@ go_library(
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
@@ -76,6 +79,7 @@ go_library(
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_btcsuite_btcd_btcec_v2//:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//crypto:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//log:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/discover:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
|
||||
@@ -116,6 +120,7 @@ go_test(
|
||||
"addr_factory_test.go",
|
||||
"broadcaster_test.go",
|
||||
"connection_gater_test.go",
|
||||
"custody_test.go",
|
||||
"dial_relay_node_test.go",
|
||||
"discovery_test.go",
|
||||
"fork_test.go",
|
||||
@@ -137,9 +142,11 @@ go_test(
|
||||
flaky = True,
|
||||
tags = ["requires-network"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/p2p/encoder:go_default_library",
|
||||
@@ -152,6 +159,7 @@ go_test(
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
"//container/leaky-bucket:go_default_library",
|
||||
@@ -162,6 +170,7 @@ go_test(
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/metadata:go_default_library",
|
||||
"//proto/testing:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
|
||||
@@ -96,7 +97,12 @@ func (s *Service) BroadcastSyncCommitteeMessage(ctx context.Context, subnet uint
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) internalBroadcastAttestation(ctx context.Context, subnet uint64, att ethpb.Att, forkDigest [4]byte) {
|
||||
func (s *Service) internalBroadcastAttestation(
|
||||
ctx context.Context,
|
||||
subnet uint64,
|
||||
att ethpb.Att,
|
||||
forkDigest [fieldparams.VersionLength]byte,
|
||||
) {
|
||||
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastAttestation")
|
||||
defer span.End()
|
||||
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.
|
||||
@@ -152,7 +158,7 @@ func (s *Service) internalBroadcastAttestation(ctx context.Context, subnet uint6
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) broadcastSyncCommittee(ctx context.Context, subnet uint64, sMsg *ethpb.SyncCommitteeMessage, forkDigest [4]byte) {
|
||||
func (s *Service) broadcastSyncCommittee(ctx context.Context, subnet uint64, sMsg *ethpb.SyncCommitteeMessage, forkDigest [fieldparams.VersionLength]byte) {
|
||||
_, span := trace.StartSpan(ctx, "p2p.broadcastSyncCommittee")
|
||||
defer span.End()
|
||||
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.
|
||||
@@ -228,7 +234,12 @@ func (s *Service) BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) internalBroadcastBlob(ctx context.Context, subnet uint64, blobSidecar *ethpb.BlobSidecar, forkDigest [4]byte) {
|
||||
func (s *Service) internalBroadcastBlob(
|
||||
ctx context.Context,
|
||||
subnet uint64,
|
||||
blobSidecar *ethpb.BlobSidecar,
|
||||
forkDigest [fieldparams.VersionLength]byte,
|
||||
) {
|
||||
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastBlob")
|
||||
defer span.End()
|
||||
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.
|
||||
@@ -243,7 +254,7 @@ func (s *Service) internalBroadcastBlob(ctx context.Context, subnet uint64, blob
|
||||
s.subnetLocker(wrappedSubIdx).RUnlock()
|
||||
|
||||
if !hasPeer {
|
||||
blobSidecarCommitteeBroadcastAttempts.Inc()
|
||||
blobSidecarBroadcastAttempts.Inc()
|
||||
if err := func() error {
|
||||
s.subnetLocker(wrappedSubIdx).Lock()
|
||||
defer s.subnetLocker(wrappedSubIdx).Unlock()
|
||||
@@ -252,7 +263,7 @@ func (s *Service) internalBroadcastBlob(ctx context.Context, subnet uint64, blob
|
||||
return err
|
||||
}
|
||||
if ok {
|
||||
blobSidecarCommitteeBroadcasts.Inc()
|
||||
blobSidecarBroadcasts.Inc()
|
||||
return nil
|
||||
}
|
||||
return errors.New("failed to find peers for subnet")
|
||||
@@ -268,6 +279,120 @@ func (s *Service) internalBroadcastBlob(ctx context.Context, subnet uint64, blob
|
||||
}
|
||||
}
|
||||
|
||||
// BroadcastDataColumn broadcasts a data column to the p2p network, the message is assumed to be
|
||||
// broadcasted to the current fork and to the input column subnet.
|
||||
// TODO: Add tests
|
||||
func (s *Service) BroadcastDataColumn(
|
||||
ctx context.Context,
|
||||
root [fieldparams.RootLength]byte,
|
||||
columnSubnet uint64,
|
||||
dataColumnSidecar *ethpb.DataColumnSidecar,
|
||||
) error {
|
||||
// Add tracing to the function.
|
||||
ctx, span := trace.StartSpan(ctx, "p2p.BroadcastBlob")
|
||||
defer span.End()
|
||||
|
||||
// Ensure the data column sidecar is not nil.
|
||||
if dataColumnSidecar == nil {
|
||||
return errors.Errorf("attempted to broadcast nil data column sidecar at subnet %d", columnSubnet)
|
||||
}
|
||||
|
||||
// Retrieve the current fork digest.
|
||||
forkDigest, err := s.currentForkDigest()
|
||||
if err != nil {
|
||||
err := errors.Wrap(err, "current fork digest")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Non-blocking broadcast, with attempts to discover a column subnet peer if none available.
|
||||
go s.internalBroadcastDataColumn(ctx, root, columnSubnet, dataColumnSidecar, forkDigest)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) internalBroadcastDataColumn(
|
||||
ctx context.Context,
|
||||
root [fieldparams.RootLength]byte,
|
||||
columnSubnet uint64,
|
||||
dataColumnSidecar *ethpb.DataColumnSidecar,
|
||||
forkDigest [fieldparams.VersionLength]byte,
|
||||
) {
|
||||
// Add tracing to the function.
|
||||
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastDataColumn")
|
||||
defer span.End()
|
||||
|
||||
// Increase the number of broadcast attempts.
|
||||
dataColumnSidecarBroadcastAttempts.Inc()
|
||||
|
||||
// Clear parent context / deadline.
|
||||
ctx = trace.NewContext(context.Background(), span)
|
||||
|
||||
// Define a one-slot length context timeout.
|
||||
oneSlot := time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second
|
||||
ctx, cancel := context.WithTimeout(ctx, oneSlot)
|
||||
defer cancel()
|
||||
|
||||
// Build the topic corresponding to this column subnet and this fork digest.
|
||||
topic := dataColumnSubnetToTopic(columnSubnet, forkDigest)
|
||||
|
||||
// Compute the wrapped subnet index.
|
||||
wrappedSubIdx := columnSubnet + dataColumnSubnetVal
|
||||
|
||||
// Check if we have peers with this subnet.
|
||||
hasPeer := func() bool {
|
||||
s.subnetLocker(wrappedSubIdx).RLock()
|
||||
defer s.subnetLocker(wrappedSubIdx).RUnlock()
|
||||
|
||||
return s.hasPeerWithSubnet(topic)
|
||||
}()
|
||||
|
||||
// If no peers are found, attempt to find peers with this subnet.
|
||||
if !hasPeer {
|
||||
if err := func() error {
|
||||
s.subnetLocker(wrappedSubIdx).Lock()
|
||||
defer s.subnetLocker(wrappedSubIdx).Unlock()
|
||||
|
||||
ok, err := s.FindPeersWithSubnet(ctx, topic, columnSubnet, 1 /*threshold*/)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "find peers for subnet")
|
||||
}
|
||||
|
||||
if ok {
|
||||
return nil
|
||||
}
|
||||
return errors.New("failed to find peers for subnet")
|
||||
}(); err != nil {
|
||||
log.WithError(err).Error("Failed to find peers")
|
||||
tracing.AnnotateError(span, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Broadcast the data column sidecar to the network.
|
||||
if err := s.broadcastObject(ctx, dataColumnSidecar, topic); err != nil {
|
||||
log.WithError(err).Error("Failed to broadcast data column sidecar")
|
||||
tracing.AnnotateError(span, err)
|
||||
}
|
||||
|
||||
header := dataColumnSidecar.SignedBlockHeader.GetHeader()
|
||||
slot := header.GetSlot()
|
||||
|
||||
slotStartTime, err := slots.ToTime(uint64(s.genesisTime.Unix()), slot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to convert slot to time")
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": slot,
|
||||
"timeSinceSlotStart": time.Since(slotStartTime),
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"columnSubnet": columnSubnet,
|
||||
}).Debug("Broadcasted data column sidecar")
|
||||
|
||||
// Increase the number of successful broadcasts.
|
||||
dataColumnSidecarBroadcasts.Inc()
|
||||
}
|
||||
|
||||
// method to broadcast messages to other peers in our gossip mesh.
|
||||
func (s *Service) broadcastObject(ctx context.Context, obj ssz.Marshaler, topic string) error {
|
||||
ctx, span := trace.StartSpan(ctx, "p2p.broadcastObject")
|
||||
@@ -297,14 +422,18 @@ func (s *Service) broadcastObject(ctx context.Context, obj ssz.Marshaler, topic
|
||||
return nil
|
||||
}
|
||||
|
||||
func attestationToTopic(subnet uint64, forkDigest [4]byte) string {
|
||||
func attestationToTopic(subnet uint64, forkDigest [fieldparams.VersionLength]byte) string {
|
||||
return fmt.Sprintf(AttestationSubnetTopicFormat, forkDigest, subnet)
|
||||
}
|
||||
|
||||
func syncCommitteeToTopic(subnet uint64, forkDigest [4]byte) string {
|
||||
func syncCommitteeToTopic(subnet uint64, forkDigest [fieldparams.VersionLength]byte) string {
|
||||
return fmt.Sprintf(SyncCommitteeSubnetTopicFormat, forkDigest, subnet)
|
||||
}
|
||||
|
||||
func blobSubnetToTopic(subnet uint64, forkDigest [4]byte) string {
|
||||
func blobSubnetToTopic(subnet uint64, forkDigest [fieldparams.VersionLength]byte) string {
|
||||
return fmt.Sprintf(BlobSubnetTopicFormat, forkDigest, subnet)
|
||||
}
|
||||
|
||||
func dataColumnSubnetToTopic(subnet uint64, forkDigest [fieldparams.VersionLength]byte) string {
|
||||
return fmt.Sprintf(DataColumnSubnetTopicFormat, forkDigest, subnet)
|
||||
}
|
||||
|
||||
@@ -12,11 +12,16 @@ import (
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/scorers"
|
||||
p2ptest "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/testing"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
@@ -24,7 +29,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func TestService_Broadcast(t *testing.T) {
|
||||
@@ -516,3 +520,71 @@ func TestService_BroadcastBlob(t *testing.T) {
|
||||
require.NoError(t, p.BroadcastBlob(ctx, subnet, blobSidecar))
|
||||
require.Equal(t, false, util.WaitTimeout(&wg, 1*time.Second), "Failed to receive pubsub within 1s")
|
||||
}
|
||||
|
||||
func TestService_BroadcastDataColumn(t *testing.T) {
|
||||
require.NoError(t, kzg.Start())
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
require.NotEqual(t, 0, len(p1.BHost.Network().Peers()), "No peers")
|
||||
|
||||
p := &Service{
|
||||
host: p1.BHost,
|
||||
pubsub: p1.PubSub(),
|
||||
joinedTopics: map[string]*pubsub.Topic{},
|
||||
cfg: &Config{},
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
subnetsLock: make(map[uint64]*sync.RWMutex),
|
||||
subnetsLockLock: sync.Mutex{},
|
||||
peers: peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
ScorerParams: &scorers.Config{},
|
||||
}),
|
||||
}
|
||||
|
||||
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlockElectra())
|
||||
require.NoError(t, err)
|
||||
blobs := make([]kzg.Blob, 6)
|
||||
sidecars, err := peerdas.DataColumnSidecars(b, blobs)
|
||||
require.NoError(t, err)
|
||||
|
||||
sidecar := sidecars[0]
|
||||
subnet := uint64(0)
|
||||
topic := DataColumnSubnetTopicFormat
|
||||
GossipTypeMapping[reflect.TypeOf(sidecar)] = topic
|
||||
digest, err := p.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
topic = fmt.Sprintf(topic, digest, subnet)
|
||||
|
||||
// External peer subscribes to the topic.
|
||||
topic += p.Encoding().ProtocolSuffix()
|
||||
sub, err := p2.SubscribeToTopic(topic)
|
||||
require.NoError(t, err)
|
||||
|
||||
time.Sleep(50 * time.Millisecond) // libp2p fails without this delay...
|
||||
|
||||
// Async listen for the pubsub, must be before the broadcast.
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go func(tt *testing.T) {
|
||||
defer wg.Done()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
|
||||
defer cancel()
|
||||
|
||||
msg, err := sub.Next(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
result := ðpb.DataColumnSidecar{}
|
||||
require.NoError(t, p.Encoding().DecodeGossip(msg.Data, result))
|
||||
require.DeepEqual(t, result, sidecar)
|
||||
}(t)
|
||||
|
||||
// Attempt to broadcast nil object should fail.
|
||||
ctx := context.Background()
|
||||
var root [fieldparams.RootLength]byte
|
||||
require.ErrorContains(t, "attempted to broadcast nil", p.BroadcastDataColumn(ctx, root, subnet, nil))
|
||||
|
||||
// Broadcast to peers and wait.
|
||||
require.NoError(t, p.BroadcastDataColumn(ctx, root, subnet, sidecar))
|
||||
require.Equal(t, false, util.WaitTimeout(&wg, 1*time.Second), "Failed to receive pubsub within 1s")
|
||||
}
|
||||
|
||||
152
beacon-chain/p2p/custody.go
Normal file
152
beacon-chain/p2p/custody.go
Normal file
@@ -0,0 +1,152 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
)
|
||||
|
||||
// AdmissibleCustodyGroupsPeers returns a list of peers that custody a super set of the local node's custody groups.
|
||||
func (s *Service) AdmissibleCustodyGroupsPeers(peers []peer.ID) ([]peer.ID, error) {
|
||||
localCustodyGroupCount := peerdas.ActualCustodyGroupCount()
|
||||
return s.custodyGroupsAdmissiblePeers(peers, localCustodyGroupCount)
|
||||
}
|
||||
|
||||
// AdmissibleCustodySamplingPeers returns a list of peers that custody a super set of the local node's sampling columns.
|
||||
func (s *Service) AdmissibleCustodySamplingPeers(peers []peer.ID) ([]peer.ID, error) {
|
||||
localSubnetSamplingSize := peerdas.CustodyGroupSamplingSize(peerdas.Actual)
|
||||
return s.custodyGroupsAdmissiblePeers(peers, localSubnetSamplingSize)
|
||||
}
|
||||
|
||||
// custodyGroupsAdmissiblePeers filters out `peers` that do not custody a super set of our own custody groups.
|
||||
func (s *Service) custodyGroupsAdmissiblePeers(peers []peer.ID, custodyGroupCount uint64) ([]peer.ID, error) {
|
||||
// Get the total number of custody groups.
|
||||
numberOfCustodyGroups := params.BeaconConfig().NumberOfCustodyGroups
|
||||
|
||||
// Retrieve the local node ID.
|
||||
localNodeId := s.NodeID()
|
||||
|
||||
// Retrieve the local node info.
|
||||
localNodeInfo, _, err := peerdas.Info(localNodeId, custodyGroupCount)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "peer info")
|
||||
}
|
||||
|
||||
// Retrieve the needed custody groups.
|
||||
neededCustodyGroups := localNodeInfo.CustodyGroups
|
||||
|
||||
// Find the valid peers.
|
||||
validPeers := make([]peer.ID, 0, len(peers))
|
||||
|
||||
loop:
|
||||
for _, pid := range peers {
|
||||
// Get the custody group count of the remote peer.
|
||||
remoteCustodyGroupCount := s.CustodyGroupCountFromPeer(pid)
|
||||
|
||||
// If the remote peer custodies less groups than we do, skip it.
|
||||
if remoteCustodyGroupCount < custodyGroupCount {
|
||||
continue
|
||||
}
|
||||
|
||||
// Get the remote node ID from the peer ID.
|
||||
remoteNodeID, err := ConvertPeerIDToNodeID(pid)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "convert peer ID to node ID")
|
||||
}
|
||||
|
||||
// Retrieve the remote peer info.
|
||||
remotePeerInfo, _, err := peerdas.Info(remoteNodeID, remoteCustodyGroupCount)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "peer info")
|
||||
}
|
||||
|
||||
// Retrieve the custody groups of the remote peer.
|
||||
remoteCustodyGroups := remotePeerInfo.CustodyGroups
|
||||
remoteCustodyGroupsCount := uint64(len(remoteCustodyGroups))
|
||||
|
||||
// If the remote peers custodies all the possible columns, add it to the list.
|
||||
if remoteCustodyGroupsCount == numberOfCustodyGroups {
|
||||
validPeers = append(validPeers, pid)
|
||||
continue
|
||||
}
|
||||
|
||||
// Filter out invalid peers.
|
||||
for custodyGroup := range neededCustodyGroups {
|
||||
if !remoteCustodyGroups[custodyGroup] {
|
||||
continue loop
|
||||
}
|
||||
}
|
||||
|
||||
// Add valid peer to list
|
||||
validPeers = append(validPeers, pid)
|
||||
}
|
||||
|
||||
return validPeers, nil
|
||||
}
|
||||
|
||||
// custodyGroupCountFromPeerENR retrieves the custody count from the peer ENR.
|
||||
// If the ENR is not available, it defaults to the minimum number of custody groups
|
||||
// an honest node custodies and serves samples from.
|
||||
func (s *Service) custodyGroupCountFromPeerENR(pid peer.ID) uint64 {
|
||||
// By default, we assume the peer custodies the minimum number of groups.
|
||||
custodyRequirement := params.BeaconConfig().CustodyRequirement
|
||||
|
||||
// Retrieve the ENR of the peer.
|
||||
record, err := s.peers.ENR(pid)
|
||||
if err != nil {
|
||||
log.WithError(err).WithFields(logrus.Fields{
|
||||
"peerID": pid,
|
||||
"defaultValue": custodyRequirement,
|
||||
}).Debug("Failed to retrieve ENR for peer, defaulting to the default value")
|
||||
|
||||
return custodyRequirement
|
||||
}
|
||||
|
||||
// Retrieve the custody group count from the ENR.
|
||||
custodyGroupCount, err := peerdas.CustodyGroupCountFromRecord(record)
|
||||
if err != nil {
|
||||
log.WithError(err).WithFields(logrus.Fields{
|
||||
"peerID": pid,
|
||||
"defaultValue": custodyRequirement,
|
||||
}).Debug("Failed to retrieve custody group count from ENR for peer, defaulting to the default value")
|
||||
|
||||
return custodyRequirement
|
||||
}
|
||||
|
||||
return custodyGroupCount
|
||||
}
|
||||
|
||||
// CustodyGroupCountFromPeer retrieves custody group count from a peer.
|
||||
// It first tries to get the custody group count from the peer's metadata,
|
||||
// then falls back to the ENR value if the metadata is not available, then
|
||||
// falls back to the minimum number of custody groups an honest node should custodiy
|
||||
// and serve samples from if ENR is not available.
|
||||
func (s *Service) CustodyGroupCountFromPeer(pid peer.ID) uint64 {
|
||||
// Try to get the custody group count from the peer's metadata.
|
||||
metadata, err := s.peers.Metadata(pid)
|
||||
if err != nil {
|
||||
// On error, default to the ENR value.
|
||||
log.WithError(err).WithField("peerID", pid).Debug("Failed to retrieve metadata for peer, defaulting to the ENR value")
|
||||
return s.custodyGroupCountFromPeerENR(pid)
|
||||
}
|
||||
|
||||
// If the metadata is nil, default to the ENR value.
|
||||
if metadata == nil {
|
||||
log.WithField("peerID", pid).Debug("Metadata is nil, defaulting to the ENR value")
|
||||
return s.custodyGroupCountFromPeerENR(pid)
|
||||
}
|
||||
|
||||
// Get the custody subnets count from the metadata.
|
||||
custodyCount := metadata.CustodyGroupCount()
|
||||
|
||||
// If the custody count is null, default to the ENR value.
|
||||
if custodyCount == 0 {
|
||||
log.WithField("peerID", pid).Debug("The custody count extracted from the metadata equals to 0, defaulting to the ENR value")
|
||||
return s.custodyGroupCountFromPeerENR(pid)
|
||||
}
|
||||
|
||||
return custodyCount
|
||||
}
|
||||
201
beacon-chain/p2p/custody_test.go
Normal file
201
beacon-chain/p2p/custody_test.go
Normal file
@@ -0,0 +1,201 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"net"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/libp2p/go-libp2p/core/crypto"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/scorers"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/wrapper"
|
||||
ecdsaprysm "github.com/prysmaticlabs/prysm/v5/crypto/ecdsa"
|
||||
prysmNetwork "github.com/prysmaticlabs/prysm/v5/network"
|
||||
pb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/metadata"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func createPeer(t *testing.T, privateKeyOffset int, custodyCount uint64) (*enr.Record, peer.ID, *ecdsa.PrivateKey) {
|
||||
privateKeyBytes := make([]byte, 32)
|
||||
for i := 0; i < 32; i++ {
|
||||
privateKeyBytes[i] = byte(privateKeyOffset + i)
|
||||
}
|
||||
|
||||
unmarshalledPrivateKey, err := crypto.UnmarshalSecp256k1PrivateKey(privateKeyBytes)
|
||||
require.NoError(t, err)
|
||||
|
||||
privateKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(unmarshalledPrivateKey)
|
||||
require.NoError(t, err)
|
||||
|
||||
peerID, err := peer.IDFromPrivateKey(unmarshalledPrivateKey)
|
||||
require.NoError(t, err)
|
||||
|
||||
record := &enr.Record{}
|
||||
record.Set(peerdas.Cgc(custodyCount))
|
||||
record.Set(enode.Secp256k1(privateKey.PublicKey))
|
||||
|
||||
return record, peerID, privateKey
|
||||
}
|
||||
|
||||
func TestAdmissibleCustodyGroupsPeers(t *testing.T) {
|
||||
genesisValidatorRoot := make([]byte, 32)
|
||||
|
||||
for i := 0; i < 32; i++ {
|
||||
genesisValidatorRoot[i] = byte(i)
|
||||
}
|
||||
|
||||
service := &Service{
|
||||
cfg: &Config{},
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: genesisValidatorRoot,
|
||||
peers: peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
ScorerParams: &scorers.Config{},
|
||||
}),
|
||||
}
|
||||
|
||||
ipAddrString, err := prysmNetwork.ExternalIPv4()
|
||||
require.NoError(t, err)
|
||||
ipAddr := net.ParseIP(ipAddrString)
|
||||
|
||||
custodyRequirement := params.BeaconConfig().CustodyRequirement
|
||||
dataColumnSidecarSubnetCount := params.BeaconConfig().DataColumnSidecarSubnetCount
|
||||
|
||||
// Peer 1 custodies exactly the same groups than us.
|
||||
// (We use the same keys pair than ours for simplicity)
|
||||
peer1Record, peer1ID, localPrivateKey := createPeer(t, 1, custodyRequirement)
|
||||
|
||||
// Peer 2 custodies all the groups.
|
||||
peer2Record, peer2ID, _ := createPeer(t, 2, dataColumnSidecarSubnetCount)
|
||||
|
||||
// Peer 3 custodies different groups than us (but the same count).
|
||||
// (We use the same public key than peer 2 for simplicity)
|
||||
peer3Record, peer3ID, _ := createPeer(t, 3, custodyRequirement)
|
||||
|
||||
// Peer 4 custodies less groups than us.
|
||||
peer4Record, peer4ID, _ := createPeer(t, 4, custodyRequirement-1)
|
||||
|
||||
createListener := func() (*discover.UDPv5, error) {
|
||||
return service.createListener(ipAddr, localPrivateKey)
|
||||
}
|
||||
|
||||
listener, err := newListener(createListener)
|
||||
require.NoError(t, err)
|
||||
|
||||
service.dv5Listener = listener
|
||||
|
||||
service.peers.Add(peer1Record, peer1ID, nil, network.DirOutbound)
|
||||
service.peers.Add(peer2Record, peer2ID, nil, network.DirOutbound)
|
||||
service.peers.Add(peer3Record, peer3ID, nil, network.DirOutbound)
|
||||
service.peers.Add(peer4Record, peer4ID, nil, network.DirOutbound)
|
||||
|
||||
actual, err := service.AdmissibleCustodyGroupsPeers([]peer.ID{peer1ID, peer2ID, peer3ID, peer4ID})
|
||||
require.NoError(t, err)
|
||||
|
||||
expected := []peer.ID{peer1ID, peer2ID}
|
||||
require.DeepSSZEqual(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestCustodyGroupCountFromPeer(t *testing.T) {
|
||||
const (
|
||||
expectedENR uint64 = 7
|
||||
expectedMetadata uint64 = 8
|
||||
pid = "test-id"
|
||||
)
|
||||
|
||||
cgc := peerdas.Cgc(expectedENR)
|
||||
|
||||
// Define a nil record
|
||||
var nilRecord *enr.Record = nil
|
||||
|
||||
// Define an empty record (record with non `cgc` entry)
|
||||
emptyRecord := &enr.Record{}
|
||||
|
||||
// Define a nominal record
|
||||
nominalRecord := &enr.Record{}
|
||||
nominalRecord.Set(cgc)
|
||||
|
||||
// Define a metadata with zero custody.
|
||||
zeroMetadata := wrapper.WrappedMetadataV2(&pb.MetaDataV2{
|
||||
CustodyGroupCount: 0,
|
||||
})
|
||||
|
||||
// Define a nominal metadata.
|
||||
nominalMetadata := wrapper.WrappedMetadataV2(&pb.MetaDataV2{
|
||||
CustodyGroupCount: expectedMetadata,
|
||||
})
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
record *enr.Record
|
||||
metadata metadata.Metadata
|
||||
expected uint64
|
||||
}{
|
||||
{
|
||||
name: "No metadata - No ENR",
|
||||
record: nilRecord,
|
||||
expected: params.BeaconConfig().CustodyRequirement,
|
||||
},
|
||||
{
|
||||
name: "No metadata - Empty ENR",
|
||||
record: emptyRecord,
|
||||
expected: params.BeaconConfig().CustodyRequirement,
|
||||
},
|
||||
{
|
||||
name: "No Metadata - ENR",
|
||||
record: nominalRecord,
|
||||
expected: expectedENR,
|
||||
},
|
||||
{
|
||||
name: "Metadata with 0 value - ENR",
|
||||
record: nominalRecord,
|
||||
metadata: zeroMetadata,
|
||||
expected: expectedENR,
|
||||
},
|
||||
{
|
||||
name: "Metadata - ENR",
|
||||
record: nominalRecord,
|
||||
metadata: nominalMetadata,
|
||||
expected: expectedMetadata,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Create peers status.
|
||||
peers := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
ScorerParams: &scorers.Config{},
|
||||
})
|
||||
|
||||
// Set the metadata.
|
||||
if tc.metadata != nil {
|
||||
peers.SetMetadata(pid, tc.metadata)
|
||||
}
|
||||
|
||||
// Add a new peer with the record.
|
||||
peers.Add(tc.record, pid, nil, network.DirOutbound)
|
||||
|
||||
// Create a new service.
|
||||
service := &Service{
|
||||
peers: peers,
|
||||
metaData: tc.metadata,
|
||||
}
|
||||
|
||||
// Retrieve the custody count from the remote peer.
|
||||
actual := service.CustodyGroupCountFromPeer(pid)
|
||||
|
||||
// Verify the result.
|
||||
require.Equal(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
@@ -3,10 +3,12 @@ package p2p
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/ecdsa"
|
||||
"io"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
glog "github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
@@ -16,6 +18,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
@@ -219,18 +222,51 @@ func (s *Service) RefreshPersistentSubnets() {
|
||||
// Get the sync subnet bitfield in our metadata.
|
||||
currentBitSInMetadata := s.Metadata().SyncnetsBitfield()
|
||||
|
||||
// Is our sync bitvector record up to date?
|
||||
isBitSUpToDate := bytes.Equal(bitS, inRecordBitS) && bytes.Equal(bitS, currentBitSInMetadata)
|
||||
|
||||
if metadataVersion == version.Altair && isBitVUpToDate && isBitSUpToDate {
|
||||
// Compare current epoch with the Fulu fork epoch.
|
||||
fuluForkEpoch := params.BeaconConfig().FuluForkEpoch
|
||||
|
||||
if currentEpoch < fuluForkEpoch {
|
||||
// Altair behaviour.
|
||||
if metadataVersion == version.Altair && isBitVUpToDate && isBitSUpToDate {
|
||||
// Nothing to do, return early.
|
||||
return
|
||||
}
|
||||
|
||||
// Some data have changed, update our record and metadata.
|
||||
s.updateSubnetRecordWithMetadataV2(bitV, bitS)
|
||||
|
||||
// Ping all peers to inform them of new metadata
|
||||
s.pingPeersAndLogEnr()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Get the current custody group count.
|
||||
custodyGroupCount := peerdas.ActualCustodyGroupCount()
|
||||
|
||||
// Get the custody group count we store in our record.
|
||||
inRecordCustodyGroupCount, err := peerdas.CustodyGroupCountFromRecord(record)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not retrieve custody subnet count")
|
||||
return
|
||||
}
|
||||
|
||||
// Get the custody group count in our metadata.
|
||||
inMetadataCustodyGroupCount := s.Metadata().CustodyGroupCount()
|
||||
|
||||
isCustodyGroupCountUpToDate := (custodyGroupCount == inRecordCustodyGroupCount && custodyGroupCount == inMetadataCustodyGroupCount)
|
||||
|
||||
if isBitVUpToDate && isBitSUpToDate && isCustodyGroupCountUpToDate {
|
||||
// Nothing to do, return early.
|
||||
return
|
||||
}
|
||||
|
||||
// Some data have changed, update our record and metadata.
|
||||
s.updateSubnetRecordWithMetadataV2(bitV, bitS)
|
||||
// Some data changed. Update the record and the metadata.
|
||||
s.updateSubnetRecordWithMetadataV3(bitV, bitS, custodyGroupCount)
|
||||
|
||||
// Ping all peers to inform them of new metadata
|
||||
// Ping all peers.
|
||||
s.pingPeersAndLogEnr()
|
||||
}
|
||||
|
||||
@@ -422,6 +458,7 @@ func (s *Service) createListener(
|
||||
Bootnodes: bootNodes,
|
||||
PingInterval: s.cfg.PingInterval,
|
||||
NoFindnodeLivenessCheck: s.cfg.DisableLivenessCheck,
|
||||
Log: glog.NewLogger(glog.NewTerminalHandlerWithLevel(io.Discard, -12, true)),
|
||||
}
|
||||
|
||||
listener, err := discover.ListenV5(conn, localNode, dv5Cfg)
|
||||
@@ -457,6 +494,11 @@ func (s *Service) createLocalNode(
|
||||
localNode.Set(quicEntry)
|
||||
}
|
||||
|
||||
if params.FuluEnabled() {
|
||||
custodyGroupCount := peerdas.ActualCustodyGroupCount()
|
||||
localNode.Set(peerdas.Cgc(custodyGroupCount))
|
||||
}
|
||||
|
||||
localNode.SetFallbackIP(ipAddr)
|
||||
localNode.SetFallbackUDP(udpPort)
|
||||
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
mock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/peerdata"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/scorers"
|
||||
@@ -140,6 +141,15 @@ func TestStartDiscV5_DiscoverAllPeers(t *testing.T) {
|
||||
|
||||
func TestCreateLocalNode(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
|
||||
// Set the fulu fork epoch to something other than the far future epoch.
|
||||
initFuluForkEpoch := params.BeaconConfig().FuluForkEpoch
|
||||
params.BeaconConfig().FuluForkEpoch = 42
|
||||
|
||||
defer func() {
|
||||
params.BeaconConfig().FuluForkEpoch = initFuluForkEpoch
|
||||
}()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
cfg *Config
|
||||
@@ -171,6 +181,7 @@ func TestCreateLocalNode(t *testing.T) {
|
||||
expectedError: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range testCases {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Define ports.
|
||||
@@ -236,6 +247,11 @@ func TestCreateLocalNode(t *testing.T) {
|
||||
syncSubnets := new([]byte)
|
||||
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(syncCommsSubnetEnrKey, syncSubnets)))
|
||||
require.DeepSSZEqual(t, []byte{0}, *syncSubnets)
|
||||
|
||||
// Check cgc config.
|
||||
custodyGroupCount := new(uint64)
|
||||
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(peerdas.CustodyGroupCountEnrKey, custodyGroupCount)))
|
||||
require.Equal(t, params.BeaconConfig().CustodyRequirement, *custodyGroupCount)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -535,7 +551,7 @@ type check struct {
|
||||
metadataSequenceNumber uint64
|
||||
attestationSubnets []uint64
|
||||
syncSubnets []uint64
|
||||
custodySubnetCount *uint64
|
||||
custodyGroupCount *uint64
|
||||
}
|
||||
|
||||
func checkPingCountCacheMetadataRecord(
|
||||
@@ -601,6 +617,18 @@ func checkPingCountCacheMetadataRecord(
|
||||
actualBitSMetadata := service.metaData.SyncnetsBitfield()
|
||||
require.DeepSSZEqual(t, expectedBitS, actualBitSMetadata)
|
||||
}
|
||||
|
||||
if expected.custodyGroupCount != nil {
|
||||
// Check custody subnet count in ENR.
|
||||
var actualCustodyGroupCount uint64
|
||||
err := service.dv5Listener.LocalNode().Node().Record().Load(enr.WithEntry(peerdas.CustodyGroupCountEnrKey, &actualCustodyGroupCount))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, *expected.custodyGroupCount, actualCustodyGroupCount)
|
||||
|
||||
// Check custody subnet count in metadata.
|
||||
actualGroupCountMetadata := service.metaData.CustodyGroupCount()
|
||||
require.Equal(t, *expected.custodyGroupCount, actualGroupCountMetadata)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRefreshPersistentSubnets(t *testing.T) {
|
||||
@@ -610,12 +638,18 @@ func TestRefreshPersistentSubnets(t *testing.T) {
|
||||
defer cache.SubnetIDs.EmptyAllCaches()
|
||||
defer cache.SyncSubnetIDs.EmptyAllCaches()
|
||||
|
||||
const altairForkEpoch = 5
|
||||
const (
|
||||
altairForkEpoch = 5
|
||||
fuluForkEpoch = 10
|
||||
)
|
||||
|
||||
custodyGroupCount := params.BeaconConfig().CustodyRequirement
|
||||
|
||||
// Set up epochs.
|
||||
defaultCfg := params.BeaconConfig()
|
||||
cfg := defaultCfg.Copy()
|
||||
cfg.AltairForkEpoch = altairForkEpoch
|
||||
cfg.FuluForkEpoch = fuluForkEpoch
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Compute the number of seconds per epoch.
|
||||
@@ -684,6 +718,39 @@ func TestRefreshPersistentSubnets(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Fulu",
|
||||
epochSinceGenesis: fuluForkEpoch,
|
||||
checks: []check{
|
||||
{
|
||||
pingCount: 0,
|
||||
metadataSequenceNumber: 0,
|
||||
attestationSubnets: []uint64{},
|
||||
syncSubnets: nil,
|
||||
},
|
||||
{
|
||||
pingCount: 1,
|
||||
metadataSequenceNumber: 1,
|
||||
attestationSubnets: []uint64{40, 41},
|
||||
syncSubnets: nil,
|
||||
custodyGroupCount: &custodyGroupCount,
|
||||
},
|
||||
{
|
||||
pingCount: 2,
|
||||
metadataSequenceNumber: 2,
|
||||
attestationSubnets: []uint64{40, 41},
|
||||
syncSubnets: []uint64{1, 2},
|
||||
custodyGroupCount: &custodyGroupCount,
|
||||
},
|
||||
{
|
||||
pingCount: 2,
|
||||
metadataSequenceNumber: 2,
|
||||
attestationSubnets: []uint64{40, 41},
|
||||
syncSubnets: []uint64{1, 2},
|
||||
custodyGroupCount: &custodyGroupCount,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
|
||||
@@ -121,7 +121,7 @@ func (s *Service) topicScoreParams(topic string) (*pubsub.TopicScoreParams, erro
|
||||
return defaultAttesterSlashingTopicParams(), nil
|
||||
case strings.Contains(topic, GossipBlsToExecutionChangeMessage):
|
||||
return defaultBlsToExecutionChangeTopicParams(), nil
|
||||
case strings.Contains(topic, GossipBlobSidecarMessage):
|
||||
case strings.Contains(topic, GossipBlobSidecarMessage), strings.Contains(topic, GossipDataColumnSidecarMessage):
|
||||
// TODO(Deneb): Using the default block scoring. But this should be updated.
|
||||
return defaultBlockTopicParams(), nil
|
||||
default:
|
||||
|
||||
@@ -22,6 +22,7 @@ var gossipTopicMappings = map[string]func() proto.Message{
|
||||
SyncCommitteeSubnetTopicFormat: func() proto.Message { return ðpb.SyncCommitteeMessage{} },
|
||||
BlsToExecutionChangeSubnetTopicFormat: func() proto.Message { return ðpb.SignedBLSToExecutionChange{} },
|
||||
BlobSubnetTopicFormat: func() proto.Message { return ðpb.BlobSidecar{} },
|
||||
DataColumnSubnetTopicFormat: func() proto.Message { return ðpb.DataColumnSidecar{} },
|
||||
}
|
||||
|
||||
// GossipTopicMappings is a function to return the assigned data type
|
||||
|
||||
@@ -22,7 +22,9 @@ const (
|
||||
)
|
||||
|
||||
func peerMultiaddrString(conn network.Conn) string {
|
||||
return fmt.Sprintf("%s/p2p/%s", conn.RemoteMultiaddr().String(), conn.RemotePeer().String())
|
||||
remoteMultiaddr := conn.RemoteMultiaddr().String()
|
||||
remotePeerID := conn.RemotePeer().String()
|
||||
return fmt.Sprintf("%s/p2p/%s", remoteMultiaddr, remotePeerID)
|
||||
}
|
||||
|
||||
func (s *Service) connectToPeer(conn network.Conn) {
|
||||
|
||||
@@ -3,6 +3,7 @@ package p2p
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/connmgr"
|
||||
@@ -12,6 +13,7 @@ import (
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/encoder"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/metadata"
|
||||
"google.golang.org/protobuf/proto"
|
||||
@@ -28,6 +30,12 @@ type P2P interface {
|
||||
ConnectionHandler
|
||||
PeersProvider
|
||||
MetadataProvider
|
||||
DataColumnsHandler
|
||||
}
|
||||
|
||||
type Acceser interface {
|
||||
Broadcaster
|
||||
PeerManager
|
||||
}
|
||||
|
||||
// Broadcaster broadcasts messages to peers over the p2p pubsub protocol.
|
||||
@@ -36,6 +44,7 @@ type Broadcaster interface {
|
||||
BroadcastAttestation(ctx context.Context, subnet uint64, att ethpb.Att) error
|
||||
BroadcastSyncCommitteeMessage(ctx context.Context, subnet uint64, sMsg *ethpb.SyncCommitteeMessage) error
|
||||
BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.BlobSidecar) error
|
||||
BroadcastDataColumn(ctx context.Context, root [fieldparams.RootLength]byte, columnSubnet uint64, dataColumnSidecar *ethpb.DataColumnSidecar) error
|
||||
}
|
||||
|
||||
// SetStreamHandler configures p2p to handle streams of a certain topic ID.
|
||||
@@ -81,6 +90,7 @@ type PeerManager interface {
|
||||
PeerID() peer.ID
|
||||
Host() host.Host
|
||||
ENR() *enr.Record
|
||||
NodeID() enode.ID
|
||||
DiscoveryAddresses() ([]multiaddr.Multiaddr, error)
|
||||
RefreshPersistentSubnets()
|
||||
FindPeersWithSubnet(ctx context.Context, topic string, subIndex uint64, threshold int) (bool, error)
|
||||
@@ -102,3 +112,9 @@ type MetadataProvider interface {
|
||||
Metadata() metadata.Metadata
|
||||
MetadataSeq() uint64
|
||||
}
|
||||
|
||||
type DataColumnsHandler interface {
|
||||
CustodyGroupCountFromPeer(peer.ID) uint64
|
||||
AdmissibleCustodyGroupsPeers([]peer.ID) ([]peer.ID, error)
|
||||
AdmissibleCustodySamplingPeers([]peer.ID) ([]peer.ID, error)
|
||||
}
|
||||
|
||||
@@ -60,17 +60,25 @@ var (
|
||||
"the subnet. The beacon node increments this counter when the broadcast is blocked " +
|
||||
"until a subnet peer can be found.",
|
||||
})
|
||||
blobSidecarCommitteeBroadcasts = promauto.NewCounter(prometheus.CounterOpts{
|
||||
blobSidecarBroadcasts = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "p2p_blob_sidecar_committee_broadcasts",
|
||||
Help: "The number of blob sidecar committee messages that were broadcast with no peer on.",
|
||||
Help: "The number of blob sidecar messages that were broadcast with no peer on.",
|
||||
})
|
||||
syncCommitteeBroadcastAttempts = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "p2p_sync_committee_subnet_attempted_broadcasts",
|
||||
Help: "The number of sync committee that were attempted to be broadcast.",
|
||||
})
|
||||
blobSidecarCommitteeBroadcastAttempts = promauto.NewCounter(prometheus.CounterOpts{
|
||||
blobSidecarBroadcastAttempts = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "p2p_blob_sidecar_committee_attempted_broadcasts",
|
||||
Help: "The number of blob sidecar committee messages that were attempted to be broadcast.",
|
||||
Help: "The number of blob sidecar messages that were attempted to be broadcast.",
|
||||
})
|
||||
dataColumnSidecarBroadcasts = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "p2p_data_column_sidecar_broadcasts",
|
||||
Help: "The number of data column sidecar messages that were broadcasted.",
|
||||
})
|
||||
dataColumnSidecarBroadcastAttempts = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "p2p_data_column_sidecar_attempted_broadcasts",
|
||||
Help: "The number of data column sidecar messages that were attempted to be broadcast.",
|
||||
})
|
||||
|
||||
// Gossip Tracer Metrics
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/peerdata"
|
||||
)
|
||||
|
||||
@@ -126,13 +125,14 @@ func (s *BadResponsesScorer) IsBadPeer(pid peer.ID) error {
|
||||
|
||||
// isBadPeerNoLock is lock-free version of IsBadPeer.
|
||||
func (s *BadResponsesScorer) isBadPeerNoLock(pid peer.ID) error {
|
||||
if peerData, ok := s.store.PeerData(pid); ok {
|
||||
if peerData.BadResponses >= s.config.Threshold {
|
||||
return errors.Errorf("peer exceeded bad responses threshold: got %d, threshold %d", peerData.BadResponses, s.config.Threshold)
|
||||
}
|
||||
// if peerData, ok := s.store.PeerData(pid); ok {
|
||||
// TODO: Remote this out of devnet
|
||||
// if peerData.BadResponses >= s.config.Threshold {
|
||||
// return errors.Errorf("peer exceeded bad responses threshold: got %d, threshold %d", peerData.BadResponses, s.config.Threshold)
|
||||
// }
|
||||
|
||||
return nil
|
||||
}
|
||||
// return nil
|
||||
// }
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package scorers_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
@@ -14,40 +13,41 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestScorers_BadResponses_Score(t *testing.T) {
|
||||
const pid = "peer1"
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestScorers_BadResponses_Score(t *testing.T) {
|
||||
// const pid = "peer1"
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
// ctx, cancel := context.WithCancel(context.Background())
|
||||
// defer cancel()
|
||||
|
||||
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: 4,
|
||||
},
|
||||
},
|
||||
})
|
||||
scorer := peerStatuses.Scorers().BadResponsesScorer()
|
||||
// peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{
|
||||
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
// Threshold: 4,
|
||||
// },
|
||||
// },
|
||||
// })
|
||||
// scorer := peerStatuses.Scorers().BadResponsesScorer()
|
||||
|
||||
assert.Equal(t, 0., scorer.Score(pid), "Unexpected score for unregistered peer")
|
||||
// assert.Equal(t, 0., scorer.Score(pid), "Unexpected score for unregistered peer")
|
||||
|
||||
scorer.Increment(pid)
|
||||
assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, -2.5, scorer.Score(pid))
|
||||
// scorer.Increment(pid)
|
||||
// assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
// assert.Equal(t, -2.5, scorer.Score(pid))
|
||||
|
||||
scorer.Increment(pid)
|
||||
assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, float64(-5), scorer.Score(pid))
|
||||
// scorer.Increment(pid)
|
||||
// assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
// assert.Equal(t, float64(-5), scorer.Score(pid))
|
||||
|
||||
scorer.Increment(pid)
|
||||
assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, float64(-7.5), scorer.Score(pid))
|
||||
// scorer.Increment(pid)
|
||||
// assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
// assert.Equal(t, float64(-7.5), scorer.Score(pid))
|
||||
|
||||
scorer.Increment(pid)
|
||||
assert.NotNil(t, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, -100.0, scorer.Score(pid))
|
||||
}
|
||||
// scorer.Increment(pid)
|
||||
// assert.NotNil(t, scorer.IsBadPeer(pid))
|
||||
// assert.Equal(t, -100.0, scorer.Score(pid))
|
||||
// }
|
||||
|
||||
func TestScorers_BadResponses_ParamsThreshold(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
@@ -142,58 +142,60 @@ func TestScorers_BadResponses_Decay(t *testing.T) {
|
||||
assert.Equal(t, 1, badResponses, "unexpected bad responses for pid3")
|
||||
}
|
||||
|
||||
func TestScorers_BadResponses_IsBadPeer(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestScorers_BadResponses_IsBadPeer(t *testing.T) {
|
||||
// ctx, cancel := context.WithCancel(context.Background())
|
||||
// defer cancel()
|
||||
|
||||
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{},
|
||||
})
|
||||
scorer := peerStatuses.Scorers().BadResponsesScorer()
|
||||
pid := peer.ID("peer1")
|
||||
assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
// peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{},
|
||||
// })
|
||||
// scorer := peerStatuses.Scorers().BadResponsesScorer()
|
||||
// pid := peer.ID("peer1")
|
||||
// assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
|
||||
peerStatuses.Add(nil, pid, nil, network.DirUnknown)
|
||||
assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
// peerStatuses.Add(nil, pid, nil, network.DirUnknown)
|
||||
// assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
|
||||
for i := 0; i < scorers.DefaultBadResponsesThreshold; i++ {
|
||||
scorer.Increment(pid)
|
||||
if i == scorers.DefaultBadResponsesThreshold-1 {
|
||||
assert.NotNil(t, scorer.IsBadPeer(pid), "Unexpected peer status")
|
||||
} else {
|
||||
assert.NoError(t, scorer.IsBadPeer(pid), "Unexpected peer status")
|
||||
}
|
||||
}
|
||||
}
|
||||
// for i := 0; i < scorers.DefaultBadResponsesThreshold; i++ {
|
||||
// scorer.Increment(pid)
|
||||
// if i == scorers.DefaultBadResponsesThreshold-1 {
|
||||
// assert.NotNil(t, scorer.IsBadPeer(pid), "Unexpected peer status")
|
||||
// } else {
|
||||
// assert.NoError(t, scorer.IsBadPeer(pid), "Unexpected peer status")
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
func TestScorers_BadResponses_BadPeers(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestScorers_BadResponses_BadPeers(t *testing.T) {
|
||||
// ctx, cancel := context.WithCancel(context.Background())
|
||||
// defer cancel()
|
||||
|
||||
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{},
|
||||
})
|
||||
scorer := peerStatuses.Scorers().BadResponsesScorer()
|
||||
pids := []peer.ID{peer.ID("peer1"), peer.ID("peer2"), peer.ID("peer3"), peer.ID("peer4"), peer.ID("peer5")}
|
||||
for i := 0; i < len(pids); i++ {
|
||||
peerStatuses.Add(nil, pids[i], nil, network.DirUnknown)
|
||||
}
|
||||
for i := 0; i < scorers.DefaultBadResponsesThreshold; i++ {
|
||||
scorer.Increment(pids[1])
|
||||
scorer.Increment(pids[2])
|
||||
scorer.Increment(pids[4])
|
||||
}
|
||||
assert.NoError(t, scorer.IsBadPeer(pids[0]), "Invalid peer status")
|
||||
assert.NotNil(t, scorer.IsBadPeer(pids[1]), "Invalid peer status")
|
||||
assert.NotNil(t, scorer.IsBadPeer(pids[2]), "Invalid peer status")
|
||||
assert.NoError(t, scorer.IsBadPeer(pids[3]), "Invalid peer status")
|
||||
assert.NotNil(t, scorer.IsBadPeer(pids[4]), "Invalid peer status")
|
||||
want := []peer.ID{pids[1], pids[2], pids[4]}
|
||||
badPeers := scorer.BadPeers()
|
||||
sort.Slice(badPeers, func(i, j int) bool {
|
||||
return badPeers[i] < badPeers[j]
|
||||
})
|
||||
assert.DeepEqual(t, want, badPeers, "Unexpected list of bad peers")
|
||||
}
|
||||
// peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{},
|
||||
// })
|
||||
// scorer := peerStatuses.Scorers().BadResponsesScorer()
|
||||
// pids := []peer.ID{peer.ID("peer1"), peer.ID("peer2"), peer.ID("peer3"), peer.ID("peer4"), peer.ID("peer5")}
|
||||
// for i := 0; i < len(pids); i++ {
|
||||
// peerStatuses.Add(nil, pids[i], nil, network.DirUnknown)
|
||||
// }
|
||||
// for i := 0; i < scorers.DefaultBadResponsesThreshold; i++ {
|
||||
// scorer.Increment(pids[1])
|
||||
// scorer.Increment(pids[2])
|
||||
// scorer.Increment(pids[4])
|
||||
// }
|
||||
// assert.NoError(t, scorer.IsBadPeer(pids[0]), "Invalid peer status")
|
||||
// assert.NotNil(t, scorer.IsBadPeer(pids[1]), "Invalid peer status")
|
||||
// assert.NotNil(t, scorer.IsBadPeer(pids[2]), "Invalid peer status")
|
||||
// assert.NoError(t, scorer.IsBadPeer(pids[3]), "Invalid peer status")
|
||||
// assert.NotNil(t, scorer.IsBadPeer(pids[4]), "Invalid peer status")
|
||||
// want := []peer.ID{pids[1], pids[2], pids[4]}
|
||||
// badPeers := scorer.BadPeers()
|
||||
// sort.Slice(badPeers, func(i, j int) bool {
|
||||
// return badPeers[i] < badPeers[j]
|
||||
// })
|
||||
// assert.DeepEqual(t, want, badPeers, "Unexpected list of bad peers")
|
||||
// }
|
||||
|
||||
@@ -44,7 +44,7 @@ func TestScorers_Gossip_Score(t *testing.T) {
|
||||
},
|
||||
check: func(scorer *scorers.GossipScorer) {
|
||||
assert.Equal(t, 10.0, scorer.Score("peer1"), "Unexpected score")
|
||||
assert.Equal(t, nil, scorer.IsBadPeer("peer1"), "Unexpected bad peer")
|
||||
assert.NoError(t, scorer.IsBadPeer("peer1"), "Unexpected bad peer")
|
||||
_, _, topicMap, err := scorer.GossipData("peer1")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(100), topicMap["a"].TimeInMesh, "incorrect time in mesh")
|
||||
|
||||
@@ -212,99 +212,102 @@ func TestScorers_Service_Score(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestScorers_Service_loop(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
defer cancel()
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestScorers_Service_loop(t *testing.T) {
|
||||
// ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
// defer cancel()
|
||||
|
||||
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: 5,
|
||||
DecayInterval: 50 * time.Millisecond,
|
||||
},
|
||||
BlockProviderScorerConfig: &scorers.BlockProviderScorerConfig{
|
||||
DecayInterval: 25 * time.Millisecond,
|
||||
Decay: 64,
|
||||
},
|
||||
},
|
||||
})
|
||||
s1 := peerStatuses.Scorers().BadResponsesScorer()
|
||||
s2 := peerStatuses.Scorers().BlockProviderScorer()
|
||||
// peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{
|
||||
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
// Threshold: 5,
|
||||
// DecayInterval: 50 * time.Millisecond,
|
||||
// },
|
||||
// BlockProviderScorerConfig: &scorers.BlockProviderScorerConfig{
|
||||
// DecayInterval: 25 * time.Millisecond,
|
||||
// Decay: 64,
|
||||
// },
|
||||
// },
|
||||
// })
|
||||
// s1 := peerStatuses.Scorers().BadResponsesScorer()
|
||||
// s2 := peerStatuses.Scorers().BlockProviderScorer()
|
||||
|
||||
pid1 := peer.ID("peer1")
|
||||
peerStatuses.Add(nil, pid1, nil, network.DirUnknown)
|
||||
for i := 0; i < s1.Params().Threshold+5; i++ {
|
||||
s1.Increment(pid1)
|
||||
}
|
||||
assert.NotNil(t, s1.IsBadPeer(pid1), "Peer should be marked as bad")
|
||||
// pid1 := peer.ID("peer1")
|
||||
// peerStatuses.Add(nil, pid1, nil, network.DirUnknown)
|
||||
// for i := 0; i < s1.Params().Threshold+5; i++ {
|
||||
// s1.Increment(pid1)
|
||||
// }
|
||||
// assert.NotNil(t, s1.IsBadPeer(pid1), "Peer should be marked as bad")
|
||||
|
||||
s2.IncrementProcessedBlocks("peer1", 221)
|
||||
assert.Equal(t, uint64(221), s2.ProcessedBlocks("peer1"))
|
||||
// s2.IncrementProcessedBlocks("peer1", 221)
|
||||
// assert.Equal(t, uint64(221), s2.ProcessedBlocks("peer1"))
|
||||
|
||||
done := make(chan struct{}, 1)
|
||||
go func() {
|
||||
defer func() {
|
||||
done <- struct{}{}
|
||||
}()
|
||||
ticker := time.NewTicker(50 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
if s1.IsBadPeer(pid1) == nil && s2.ProcessedBlocks("peer1") == 0 {
|
||||
return
|
||||
}
|
||||
case <-ctx.Done():
|
||||
t.Error("Timed out")
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
// done := make(chan struct{}, 1)
|
||||
// go func() {
|
||||
// defer func() {
|
||||
// done <- struct{}{}
|
||||
// }()
|
||||
// ticker := time.NewTicker(50 * time.Millisecond)
|
||||
// defer ticker.Stop()
|
||||
// for {
|
||||
// select {
|
||||
// case <-ticker.C:
|
||||
// if s1.IsBadPeer(pid1) == nil && s2.ProcessedBlocks("peer1") == 0 {
|
||||
// return
|
||||
// }
|
||||
// case <-ctx.Done():
|
||||
// t.Error("Timed out")
|
||||
// return
|
||||
// }
|
||||
// }
|
||||
// }()
|
||||
|
||||
<-done
|
||||
assert.NoError(t, s1.IsBadPeer(pid1), "Peer should not be marked as bad")
|
||||
assert.Equal(t, uint64(0), s2.ProcessedBlocks("peer1"), "No blocks are expected")
|
||||
}
|
||||
// <-done
|
||||
// assert.NoError(t, s1.IsBadPeer(pid1), "Peer should not be marked as bad")
|
||||
// assert.Equal(t, uint64(0), s2.ProcessedBlocks("peer1"), "No blocks are expected")
|
||||
// }
|
||||
|
||||
func TestScorers_Service_IsBadPeer(t *testing.T) {
|
||||
peerStatuses := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: 2,
|
||||
DecayInterval: 50 * time.Second,
|
||||
},
|
||||
},
|
||||
})
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestScorers_Service_IsBadPeer(t *testing.T) {
|
||||
// peerStatuses := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{
|
||||
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
// Threshold: 2,
|
||||
// DecayInterval: 50 * time.Second,
|
||||
// },
|
||||
// },
|
||||
// })
|
||||
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
peerStatuses.Scorers().BadResponsesScorer().Increment("peer1")
|
||||
peerStatuses.Scorers().BadResponsesScorer().Increment("peer1")
|
||||
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
}
|
||||
// assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
// peerStatuses.Scorers().BadResponsesScorer().Increment("peer1")
|
||||
// peerStatuses.Scorers().BadResponsesScorer().Increment("peer1")
|
||||
// assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
// }
|
||||
|
||||
func TestScorers_Service_BadPeers(t *testing.T) {
|
||||
peerStatuses := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: 2,
|
||||
DecayInterval: 50 * time.Second,
|
||||
},
|
||||
},
|
||||
})
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestScorers_Service_BadPeers(t *testing.T) {
|
||||
// peerStatuses := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{
|
||||
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
// Threshold: 2,
|
||||
// DecayInterval: 50 * time.Second,
|
||||
// },
|
||||
// },
|
||||
// })
|
||||
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer2"))
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer3"))
|
||||
assert.Equal(t, 0, len(peerStatuses.Scorers().BadPeers()))
|
||||
for _, pid := range []peer.ID{"peer1", "peer3"} {
|
||||
peerStatuses.Scorers().BadResponsesScorer().Increment(pid)
|
||||
peerStatuses.Scorers().BadResponsesScorer().Increment(pid)
|
||||
}
|
||||
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer2"))
|
||||
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer3"))
|
||||
assert.Equal(t, 2, len(peerStatuses.Scorers().BadPeers()))
|
||||
}
|
||||
// assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
// assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer2"))
|
||||
// assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer3"))
|
||||
// assert.Equal(t, 0, len(peerStatuses.Scorers().BadPeers()))
|
||||
// for _, pid := range []peer.ID{"peer1", "peer3"} {
|
||||
// peerStatuses.Scorers().BadResponsesScorer().Increment(pid)
|
||||
// peerStatuses.Scorers().BadResponsesScorer().Increment(pid)
|
||||
// }
|
||||
// assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
// assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer2"))
|
||||
// assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer3"))
|
||||
// assert.Equal(t, 2, len(peerStatuses.Scorers().BadPeers()))
|
||||
// }
|
||||
|
||||
@@ -705,31 +705,47 @@ func (p *Status) deprecatedPrune() {
|
||||
p.tallyIPTracker()
|
||||
}
|
||||
|
||||
// BestFinalized returns the highest finalized epoch equal to or higher than ours that is agreed
|
||||
// upon by the majority of peers. This method may not return the absolute highest finalized, but
|
||||
// the finalized epoch in which most peers can serve blocks (plurality voting).
|
||||
// Ideally, all peers would be reporting the same finalized epoch but some may be behind due to their
|
||||
// own latency, or because of their finalized epoch at the time we queried them.
|
||||
// Returns epoch number and list of peers that are at or beyond that epoch.
|
||||
// BestFinalized returns the highest finalized epoch equal to or higher than `ourFinalizedEpoch`
|
||||
// that is agreed upon by the majority of peers, and the peers agreeing on this finalized epoch.
|
||||
// This method may not return the absolute highest finalized epoch, but the finalized epoch in which
|
||||
// most peers can serve blocks (plurality voting). Ideally, all peers would be reporting the same
|
||||
// finalized epoch but some may be behind due to their own latency, or because of their finalized
|
||||
// epoch at the time we queried them. Returns epoch number and list of peers that are at or beyond
|
||||
// that epoch.
|
||||
func (p *Status) BestFinalized(maxPeers int, ourFinalizedEpoch primitives.Epoch) (primitives.Epoch, []peer.ID) {
|
||||
// Retrieve all connected peers.
|
||||
connected := p.Connected()
|
||||
|
||||
// key: finalized epoch, value: number of peers that support this finalized epoch.
|
||||
finalizedEpochVotes := make(map[primitives.Epoch]uint64)
|
||||
|
||||
// key: peer ID, value: finalized epoch of the peer.
|
||||
pidEpoch := make(map[peer.ID]primitives.Epoch, len(connected))
|
||||
|
||||
// key: peer ID, value: head slot of the peer.
|
||||
pidHead := make(map[peer.ID]primitives.Slot, len(connected))
|
||||
|
||||
potentialPIDs := make([]peer.ID, 0, len(connected))
|
||||
for _, pid := range connected {
|
||||
peerChainState, err := p.ChainState(pid)
|
||||
if err == nil && peerChainState != nil && peerChainState.FinalizedEpoch >= ourFinalizedEpoch {
|
||||
finalizedEpochVotes[peerChainState.FinalizedEpoch]++
|
||||
pidEpoch[pid] = peerChainState.FinalizedEpoch
|
||||
potentialPIDs = append(potentialPIDs, pid)
|
||||
pidHead[pid] = peerChainState.HeadSlot
|
||||
|
||||
// Skip if the peer's finalized epoch is not defined, or if the peer's finalized epoch is
|
||||
// lower than ours.
|
||||
if err != nil || peerChainState == nil || peerChainState.FinalizedEpoch < ourFinalizedEpoch {
|
||||
continue
|
||||
}
|
||||
|
||||
finalizedEpochVotes[peerChainState.FinalizedEpoch]++
|
||||
|
||||
pidEpoch[pid] = peerChainState.FinalizedEpoch
|
||||
pidHead[pid] = peerChainState.HeadSlot
|
||||
|
||||
potentialPIDs = append(potentialPIDs, pid)
|
||||
}
|
||||
|
||||
// Select the target epoch, which is the epoch most peers agree upon.
|
||||
var targetEpoch primitives.Epoch
|
||||
var mostVotes uint64
|
||||
// If there is a tie, select the highest epoch.
|
||||
targetEpoch, mostVotes := primitives.Epoch(0), uint64(0)
|
||||
for epoch, count := range finalizedEpochVotes {
|
||||
if count > mostVotes || (count == mostVotes && epoch > targetEpoch) {
|
||||
mostVotes = count
|
||||
@@ -737,11 +753,12 @@ func (p *Status) BestFinalized(maxPeers int, ourFinalizedEpoch primitives.Epoch)
|
||||
}
|
||||
}
|
||||
|
||||
// Sort PIDs by finalized epoch, in decreasing order.
|
||||
// Sort PIDs by finalized (epoch, head), in decreasing order.
|
||||
sort.Slice(potentialPIDs, func(i, j int) bool {
|
||||
if pidEpoch[potentialPIDs[i]] == pidEpoch[potentialPIDs[j]] {
|
||||
return pidHead[potentialPIDs[i]] > pidHead[potentialPIDs[j]]
|
||||
}
|
||||
|
||||
return pidEpoch[potentialPIDs[i]] > pidEpoch[potentialPIDs[j]]
|
||||
})
|
||||
|
||||
@@ -764,26 +781,42 @@ func (p *Status) BestFinalized(maxPeers int, ourFinalizedEpoch primitives.Epoch)
|
||||
// BestNonFinalized returns the highest known epoch, higher than ours,
|
||||
// and is shared by at least minPeers.
|
||||
func (p *Status) BestNonFinalized(minPeers int, ourHeadEpoch primitives.Epoch) (primitives.Epoch, []peer.ID) {
|
||||
// Retrieve all connected peers.
|
||||
connected := p.Connected()
|
||||
|
||||
// Calculate our head slot.
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
ourHeadSlot := slotsPerEpoch.Mul(uint64(ourHeadEpoch))
|
||||
|
||||
// key: head epoch, value: number of peers that support this epoch.
|
||||
epochVotes := make(map[primitives.Epoch]uint64)
|
||||
|
||||
// key: peer ID, value: head epoch of the peer.
|
||||
pidEpoch := make(map[peer.ID]primitives.Epoch, len(connected))
|
||||
|
||||
// key: peer ID, value: head slot of the peer.
|
||||
pidHead := make(map[peer.ID]primitives.Slot, len(connected))
|
||||
|
||||
potentialPIDs := make([]peer.ID, 0, len(connected))
|
||||
|
||||
ourHeadSlot := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(ourHeadEpoch))
|
||||
for _, pid := range connected {
|
||||
peerChainState, err := p.ChainState(pid)
|
||||
if err == nil && peerChainState != nil && peerChainState.HeadSlot > ourHeadSlot {
|
||||
epoch := slots.ToEpoch(peerChainState.HeadSlot)
|
||||
epochVotes[epoch]++
|
||||
pidEpoch[pid] = epoch
|
||||
pidHead[pid] = peerChainState.HeadSlot
|
||||
potentialPIDs = append(potentialPIDs, pid)
|
||||
// Skip if the peer's head epoch is not defined, or if the peer's head slot is
|
||||
// lower or equal than ours.
|
||||
if err != nil || peerChainState == nil || peerChainState.HeadSlot <= ourHeadSlot {
|
||||
continue
|
||||
}
|
||||
|
||||
epoch := slots.ToEpoch(peerChainState.HeadSlot)
|
||||
|
||||
epochVotes[epoch]++
|
||||
pidEpoch[pid] = epoch
|
||||
pidHead[pid] = peerChainState.HeadSlot
|
||||
potentialPIDs = append(potentialPIDs, pid)
|
||||
}
|
||||
|
||||
// Select the target epoch, which has enough peers' votes (>= minPeers).
|
||||
var targetEpoch primitives.Epoch
|
||||
targetEpoch := primitives.Epoch(0)
|
||||
for epoch, votes := range epochVotes {
|
||||
if votes >= uint64(minPeers) && targetEpoch < epoch {
|
||||
targetEpoch = epoch
|
||||
@@ -1012,16 +1045,23 @@ func (p *Status) isfromBadIP(pid peer.ID) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
ip, err := manet.ToIP(peerData.Address)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "to ip")
|
||||
}
|
||||
// ip, err := manet.ToIP(peerData.Address)
|
||||
// if err != nil {
|
||||
// return errors.Wrap(err, "to ip")
|
||||
// }
|
||||
|
||||
if val, ok := p.ipTracker[ip.String()]; ok {
|
||||
if val > CollocationLimit {
|
||||
return errors.Errorf("collocation limit exceeded: got %d - limit %d", val, CollocationLimit)
|
||||
}
|
||||
}
|
||||
// if val, ok := p.ipTracker[ip.String()]; ok {
|
||||
// if val > CollocationLimit {
|
||||
// TODO: Remove this out of denvet.
|
||||
// return errors.Errorf("colocation limit exceeded: got %d - limit %d", val, CollocationLimit)
|
||||
// log.WithFields(logrus.Fields{
|
||||
// "pid": pid,
|
||||
// "ip": ip.String(),
|
||||
// "colocationCount": val,
|
||||
// "colocationLimit": CollocationLimit,
|
||||
// }).Debug("Colocation limit exceeded. Peer should be banned.")
|
||||
// }
|
||||
// }
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ package peers_test
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -329,55 +328,56 @@ func TestPeerWithNilChainState(t *testing.T) {
|
||||
require.Equal(t, resChainState, nothing)
|
||||
}
|
||||
|
||||
func TestPeerBadResponses(t *testing.T) {
|
||||
maxBadResponses := 2
|
||||
p := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: maxBadResponses,
|
||||
},
|
||||
},
|
||||
})
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestPeerBadResponses(t *testing.T) {
|
||||
// maxBadResponses := 2
|
||||
// p := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{
|
||||
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
// Threshold: maxBadResponses,
|
||||
// },
|
||||
// },
|
||||
// })
|
||||
|
||||
id, err := peer.Decode("16Uiu2HAkyWZ4Ni1TpvDS8dPxsozmHY85KaiFjodQuV6Tz5tkHVeR")
|
||||
require.NoError(t, err)
|
||||
{
|
||||
_, err := id.MarshalBinary()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
// id, err := peer.Decode("16Uiu2HAkyWZ4Ni1TpvDS8dPxsozmHY85KaiFjodQuV6Tz5tkHVeR")
|
||||
// require.NoError(t, err)
|
||||
// {
|
||||
// _, err := id.MarshalBinary()
|
||||
// require.NoError(t, err)
|
||||
// }
|
||||
|
||||
assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
// assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
|
||||
address, err := ma.NewMultiaddr("/ip4/213.202.254.180/tcp/13000")
|
||||
require.NoError(t, err, "Failed to create address")
|
||||
direction := network.DirInbound
|
||||
p.Add(new(enr.Record), id, address, direction)
|
||||
// address, err := ma.NewMultiaddr("/ip4/213.202.254.180/tcp/13000")
|
||||
// require.NoError(t, err, "Failed to create address")
|
||||
// direction := network.DirInbound
|
||||
// p.Add(new(enr.Record), id, address, direction)
|
||||
|
||||
scorer := p.Scorers().BadResponsesScorer()
|
||||
resBadResponses, err := scorer.Count(id)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, resBadResponses, "Unexpected bad responses")
|
||||
assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
// scorer := p.Scorers().BadResponsesScorer()
|
||||
// resBadResponses, err := scorer.Count(id)
|
||||
// require.NoError(t, err)
|
||||
// assert.Equal(t, 0, resBadResponses, "Unexpected bad responses")
|
||||
// assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
|
||||
scorer.Increment(id)
|
||||
resBadResponses, err = scorer.Count(id)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, resBadResponses, "Unexpected bad responses")
|
||||
assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
// scorer.Increment(id)
|
||||
// resBadResponses, err = scorer.Count(id)
|
||||
// require.NoError(t, err)
|
||||
// assert.Equal(t, 1, resBadResponses, "Unexpected bad responses")
|
||||
// assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
|
||||
scorer.Increment(id)
|
||||
resBadResponses, err = scorer.Count(id)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 2, resBadResponses, "Unexpected bad responses")
|
||||
assert.NotNil(t, p.IsBad(id), "Peer not marked as bad when it should be")
|
||||
// scorer.Increment(id)
|
||||
// resBadResponses, err = scorer.Count(id)
|
||||
// require.NoError(t, err)
|
||||
// assert.Equal(t, 2, resBadResponses, "Unexpected bad responses")
|
||||
// assert.NotNil(t, p.IsBad(id), "Peer not marked as bad when it should be")
|
||||
|
||||
scorer.Increment(id)
|
||||
resBadResponses, err = scorer.Count(id)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 3, resBadResponses, "Unexpected bad responses")
|
||||
assert.NotNil(t, p.IsBad(id), "Peer not marked as bad when it should be")
|
||||
}
|
||||
// scorer.Increment(id)
|
||||
// resBadResponses, err = scorer.Count(id)
|
||||
// require.NoError(t, err)
|
||||
// assert.Equal(t, 3, resBadResponses, "Unexpected bad responses")
|
||||
// assert.NotNil(t, p.IsBad(id), "Peer not marked as bad when it should be")
|
||||
// }
|
||||
|
||||
func TestAddMetaData(t *testing.T) {
|
||||
maxBadResponses := 2
|
||||
@@ -496,100 +496,102 @@ func TestPeerValidTime(t *testing.T) {
|
||||
assert.Equal(t, numPeersConnected, len(p.Connected()), "Unexpected number of connected peers")
|
||||
}
|
||||
|
||||
func TestPrune(t *testing.T) {
|
||||
maxBadResponses := 2
|
||||
p := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: maxBadResponses,
|
||||
},
|
||||
},
|
||||
})
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestPrune(t *testing.T) {
|
||||
// maxBadResponses := 2
|
||||
// p := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{
|
||||
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
// Threshold: maxBadResponses,
|
||||
// },
|
||||
// },
|
||||
// })
|
||||
|
||||
for i := 0; i < p.MaxPeerLimit()+100; i++ {
|
||||
if i%7 == 0 {
|
||||
// Peer added as disconnected.
|
||||
_ = addPeer(t, p, peers.Disconnected)
|
||||
}
|
||||
// Peer added to peer handler.
|
||||
_ = addPeer(t, p, peers.Connected)
|
||||
}
|
||||
// for i := 0; i < p.MaxPeerLimit()+100; i++ {
|
||||
// if i%7 == 0 {
|
||||
// // Peer added as disconnected.
|
||||
// _ = addPeer(t, p, peers.PeerDisconnected)
|
||||
// }
|
||||
// // Peer added to peer handler.
|
||||
// _ = addPeer(t, p, peers.PeerConnected)
|
||||
// }
|
||||
|
||||
disPeers := p.Disconnected()
|
||||
firstPID := disPeers[0]
|
||||
secondPID := disPeers[1]
|
||||
thirdPID := disPeers[2]
|
||||
// disPeers := p.Disconnected()
|
||||
// firstPID := disPeers[0]
|
||||
// secondPID := disPeers[1]
|
||||
// thirdPID := disPeers[2]
|
||||
|
||||
scorer := p.Scorers().BadResponsesScorer()
|
||||
// scorer := p.Scorers().BadResponsesScorer()
|
||||
|
||||
// Make first peer a bad peer
|
||||
scorer.Increment(firstPID)
|
||||
scorer.Increment(firstPID)
|
||||
// // Make first peer a bad peer
|
||||
// scorer.Increment(firstPID)
|
||||
// scorer.Increment(firstPID)
|
||||
|
||||
// Add bad response for p2.
|
||||
scorer.Increment(secondPID)
|
||||
// // Add bad response for p2.
|
||||
// scorer.Increment(secondPID)
|
||||
|
||||
// Prune peers
|
||||
p.Prune()
|
||||
// // Prune peers
|
||||
// p.Prune()
|
||||
|
||||
// Bad peer is expected to still be kept in handler.
|
||||
badRes, err := scorer.Count(firstPID)
|
||||
assert.NoError(t, err, "error is supposed to be nil")
|
||||
assert.Equal(t, 2, badRes, "Did not get expected amount")
|
||||
// // Bad peer is expected to still be kept in handler.
|
||||
// badRes, err := scorer.Count(firstPID)
|
||||
// assert.NoError(t, err, "error is supposed to be nil")
|
||||
// assert.Equal(t, 2, badRes, "Did not get expected amount")
|
||||
|
||||
// Not so good peer is pruned away so that we can reduce the
|
||||
// total size of the handler.
|
||||
_, err = scorer.Count(secondPID)
|
||||
assert.ErrorContains(t, "peer unknown", err)
|
||||
// // Not so good peer is pruned away so that we can reduce the
|
||||
// // total size of the handler.
|
||||
// _, err = scorer.Count(secondPID)
|
||||
// assert.ErrorContains(t, "peer unknown", err)
|
||||
|
||||
// Last peer has been removed.
|
||||
_, err = scorer.Count(thirdPID)
|
||||
assert.ErrorContains(t, "peer unknown", err)
|
||||
}
|
||||
// // Last peer has been removed.
|
||||
// _, err = scorer.Count(thirdPID)
|
||||
// assert.ErrorContains(t, "peer unknown", err)
|
||||
// }
|
||||
|
||||
func TestPeerIPTracker(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnablePeerScorer: false,
|
||||
})
|
||||
defer resetCfg()
|
||||
maxBadResponses := 2
|
||||
p := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: maxBadResponses,
|
||||
},
|
||||
},
|
||||
})
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestPeerIPTracker(t *testing.T) {
|
||||
// resetCfg := features.InitWithReset(&features.Flags{
|
||||
// EnablePeerScorer: false,
|
||||
// })
|
||||
// defer resetCfg()
|
||||
// maxBadResponses := 2
|
||||
// p := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{
|
||||
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
// Threshold: maxBadResponses,
|
||||
// },
|
||||
// },
|
||||
// })
|
||||
|
||||
badIP := "211.227.218.116"
|
||||
var badPeers []peer.ID
|
||||
for i := 0; i < peers.CollocationLimit+10; i++ {
|
||||
port := strconv.Itoa(3000 + i)
|
||||
addr, err := ma.NewMultiaddr("/ip4/" + badIP + "/tcp/" + port)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
badPeers = append(badPeers, createPeer(t, p, addr, network.DirUnknown, peerdata.ConnectionState(ethpb.ConnectionState_DISCONNECTED)))
|
||||
}
|
||||
for _, pr := range badPeers {
|
||||
assert.NotNil(t, p.IsBad(pr), "peer with bad ip is not bad")
|
||||
}
|
||||
// badIP := "211.227.218.116"
|
||||
// var badPeers []peer.ID
|
||||
// for i := 0; i < peers.CollocationLimit+10; i++ {
|
||||
// port := strconv.Itoa(3000 + i)
|
||||
// addr, err := ma.NewMultiaddr("/ip4/" + badIP + "/tcp/" + port)
|
||||
// if err != nil {
|
||||
// t.Fatal(err)
|
||||
// }
|
||||
// badPeers = append(badPeers, createPeer(t, p, addr, network.DirUnknown, peerdata.PeerConnectionState(ethpb.ConnectionState_DISCONNECTED)))
|
||||
// }
|
||||
// for _, pr := range badPeers {
|
||||
// assert.NotNil(t, p.IsBad(pr), "peer with bad ip is not bad")
|
||||
// }
|
||||
|
||||
// Add in bad peers, so that our records are trimmed out
|
||||
// from the peer store.
|
||||
for i := 0; i < p.MaxPeerLimit()+100; i++ {
|
||||
// Peer added to peer handler.
|
||||
pid := addPeer(t, p, peers.Disconnected)
|
||||
p.Scorers().BadResponsesScorer().Increment(pid)
|
||||
}
|
||||
p.Prune()
|
||||
// // Add in bad peers, so that our records are trimmed out
|
||||
// // from the peer store.
|
||||
// for i := 0; i < p.MaxPeerLimit()+100; i++ {
|
||||
// // Peer added to peer handler.
|
||||
// pid := addPeer(t, p, peers.PeerDisconnected)
|
||||
// p.Scorers().BadResponsesScorer().Increment(pid)
|
||||
// }
|
||||
// p.Prune()
|
||||
|
||||
for _, pr := range badPeers {
|
||||
assert.NoError(t, p.IsBad(pr), "peer with good ip is regarded as bad")
|
||||
}
|
||||
}
|
||||
// for _, pr := range badPeers {
|
||||
// assert.NoError(t, p.IsBad(pr), "peer with good ip is regarded as bad")
|
||||
// }
|
||||
// }
|
||||
|
||||
func TestTrimmedOrderedPeers(t *testing.T) {
|
||||
p := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
|
||||
@@ -26,10 +26,11 @@ var _ pubsub.SubscriptionFilter = (*Service)(nil)
|
||||
// -> SyncContributionAndProof * 2 = 2
|
||||
// -> 4 SyncCommitteeSubnets * 2 = 8
|
||||
// -> BlsToExecutionChange * 2 = 2
|
||||
// -> 6 BlobSidecar * 2 = 12
|
||||
// -> 128 DataColumnSidecar * 2 = 256
|
||||
// -------------------------------------
|
||||
// TOTAL = 162
|
||||
const pubsubSubscriptionRequestLimit = 200
|
||||
// TOTAL = 406
|
||||
// (Note: BlobSidecar is not included in this list since it is superseded by DataColumnSidecar)
|
||||
const pubsubSubscriptionRequestLimit = 500
|
||||
|
||||
// CanSubscribe returns true if the topic is of interest and we could subscribe to it.
|
||||
func (s *Service) CanSubscribe(topic string) bool {
|
||||
|
||||
@@ -90,7 +90,7 @@ func TestService_CanSubscribe(t *testing.T) {
|
||||
formatting := []interface{}{digest}
|
||||
|
||||
// Special case for attestation subnets which have a second formatting placeholder.
|
||||
if topic == AttestationSubnetTopicFormat || topic == SyncCommitteeSubnetTopicFormat || topic == BlobSubnetTopicFormat {
|
||||
if topic == AttestationSubnetTopicFormat || topic == SyncCommitteeSubnetTopicFormat || topic == BlobSubnetTopicFormat || topic == DataColumnSubnetTopicFormat {
|
||||
formatting = append(formatting, 0 /* some subnet ID */)
|
||||
}
|
||||
|
||||
|
||||
@@ -10,11 +10,16 @@ import (
|
||||
pb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// SchemaVersionV1 specifies the schema version for our rpc protocol ID.
|
||||
const SchemaVersionV1 = "/1"
|
||||
const (
|
||||
// SchemaVersionV1 specifies the schema version for our rpc protocol ID.
|
||||
SchemaVersionV1 = "/1"
|
||||
|
||||
// SchemaVersionV2 specifies the next schema version for our rpc protocol ID.
|
||||
const SchemaVersionV2 = "/2"
|
||||
// SchemaVersionV2 specifies the next schema version for our rpc protocol ID.
|
||||
SchemaVersionV2 = "/2"
|
||||
|
||||
// SchemaVersionV3 specifies the next schema version for our rpc protocol ID.
|
||||
SchemaVersionV3 = "/3"
|
||||
)
|
||||
|
||||
// Specifies the protocol prefix for all our Req/Resp topics.
|
||||
const protocolPrefix = "/eth2/beacon_chain/req"
|
||||
@@ -43,6 +48,12 @@ const BlobSidecarsByRangeName = "/blob_sidecars_by_range"
|
||||
// BlobSidecarsByRootName is the name for the BlobSidecarsByRoot v1 message topic.
|
||||
const BlobSidecarsByRootName = "/blob_sidecars_by_root"
|
||||
|
||||
// DataColumnSidecarsByRootName is the name for the DataColumnSidecarsByRoot v1 message topic.
|
||||
const DataColumnSidecarsByRootName = "/data_column_sidecars_by_root"
|
||||
|
||||
// DataColumnSidecarsByRangeName is the name for the DataColumnSidecarsByRange v1 message topic.
|
||||
const DataColumnSidecarsByRangeName = "/data_column_sidecars_by_range"
|
||||
|
||||
const (
|
||||
// V1 RPC Topics
|
||||
// RPCStatusTopicV1 defines the v1 topic for the status rpc method.
|
||||
@@ -60,11 +71,17 @@ const (
|
||||
|
||||
// RPCBlobSidecarsByRangeTopicV1 is a topic for requesting blob sidecars
|
||||
// in the slot range [start_slot, start_slot + count), leading up to the current head block as selected by fork choice.
|
||||
// Protocol ID: /eth2/beacon_chain/req/blob_sidecars_by_range/1/ - New in deneb.
|
||||
// /eth2/beacon_chain/req/blob_sidecars_by_range/1/ - New in deneb.
|
||||
RPCBlobSidecarsByRangeTopicV1 = protocolPrefix + BlobSidecarsByRangeName + SchemaVersionV1
|
||||
// RPCBlobSidecarsByRootTopicV1 is a topic for requesting blob sidecars by their block root. New in deneb.
|
||||
// /eth2/beacon_chain/req/blob_sidecars_by_root/1/
|
||||
// RPCBlobSidecarsByRootTopicV1 is a topic for requesting blob sidecars by their block root.
|
||||
// /eth2/beacon_chain/req/blob_sidecars_by_root/1/ - New in deneb.
|
||||
RPCBlobSidecarsByRootTopicV1 = protocolPrefix + BlobSidecarsByRootName + SchemaVersionV1
|
||||
// RPCDataColumnSidecarsByRootTopicV1 is a topic for requesting data column sidecars by their block root.
|
||||
// /eth2/beacon_chain/req/data_column_sidecars_by_root/1 - New in Fulu.
|
||||
RPCDataColumnSidecarsByRootTopicV1 = protocolPrefix + DataColumnSidecarsByRootName + SchemaVersionV1
|
||||
// RPCDataColumnSidecarsByRangeTopicV1 is a topic for requesting data column sidecars by their slot.
|
||||
// /eth2/beacon_chain/req/data_column_sidecars_by_range/1 - New in Fulu.
|
||||
RPCDataColumnSidecarsByRangeTopicV1 = protocolPrefix + DataColumnSidecarsByRangeName + SchemaVersionV1
|
||||
|
||||
// V2 RPC Topics
|
||||
// RPCBlocksByRangeTopicV2 defines v2 the topic for the blocks by range rpc method.
|
||||
@@ -73,6 +90,10 @@ const (
|
||||
RPCBlocksByRootTopicV2 = protocolPrefix + BeaconBlocksByRootsMessageName + SchemaVersionV2
|
||||
// RPCMetaDataTopicV2 defines the v2 topic for the metadata rpc method.
|
||||
RPCMetaDataTopicV2 = protocolPrefix + MetadataMessageName + SchemaVersionV2
|
||||
|
||||
// V3 RPC Topics
|
||||
// RPCMetaDataTopicV3 defines the v3 topic for the metadata rpc method.
|
||||
RPCMetaDataTopicV3 = protocolPrefix + MetadataMessageName + SchemaVersionV3
|
||||
)
|
||||
|
||||
// RPC errors for topic parsing.
|
||||
@@ -97,10 +118,15 @@ var RPCTopicMappings = map[string]interface{}{
|
||||
// RPC Metadata Message
|
||||
RPCMetaDataTopicV1: new(interface{}),
|
||||
RPCMetaDataTopicV2: new(interface{}),
|
||||
RPCMetaDataTopicV3: new(interface{}),
|
||||
// BlobSidecarsByRange v1 Message
|
||||
RPCBlobSidecarsByRangeTopicV1: new(pb.BlobSidecarsByRangeRequest),
|
||||
// BlobSidecarsByRoot v1 Message
|
||||
RPCBlobSidecarsByRootTopicV1: new(p2ptypes.BlobSidecarsByRootReq),
|
||||
// DataColumnSidecarsByRange v1 Message
|
||||
RPCDataColumnSidecarsByRangeTopicV1: new(pb.DataColumnSidecarsByRangeRequest),
|
||||
// DataColumnSidecarsByRoot v1 Message
|
||||
RPCDataColumnSidecarsByRootTopicV1: new(p2ptypes.DataColumnSidecarsByRootReq),
|
||||
}
|
||||
|
||||
// Maps all registered protocol prefixes.
|
||||
@@ -119,6 +145,8 @@ var messageMapping = map[string]bool{
|
||||
MetadataMessageName: true,
|
||||
BlobSidecarsByRangeName: true,
|
||||
BlobSidecarsByRootName: true,
|
||||
DataColumnSidecarsByRootName: true,
|
||||
DataColumnSidecarsByRangeName: true,
|
||||
}
|
||||
|
||||
// Maps all the RPC messages which are to updated in altair.
|
||||
@@ -128,9 +156,15 @@ var altairMapping = map[string]bool{
|
||||
MetadataMessageName: true,
|
||||
}
|
||||
|
||||
// Maps all the RPC messages which are to updated in fulu.
|
||||
var fuluMapping = map[string]bool{
|
||||
MetadataMessageName: true,
|
||||
}
|
||||
|
||||
var versionMapping = map[string]bool{
|
||||
SchemaVersionV1: true,
|
||||
SchemaVersionV2: true,
|
||||
SchemaVersionV3: true,
|
||||
}
|
||||
|
||||
// OmitContextBytesV1 keeps track of which RPC methods do not write context bytes in their v1 incarnations.
|
||||
@@ -258,13 +292,25 @@ func (r RPCTopic) Version() string {
|
||||
// TopicFromMessage constructs the rpc topic from the provided message
|
||||
// type and epoch.
|
||||
func TopicFromMessage(msg string, epoch primitives.Epoch) (string, error) {
|
||||
// Check if the topic is known.
|
||||
if !messageMapping[msg] {
|
||||
return "", errors.Errorf("%s: %s", invalidRPCMessageType, msg)
|
||||
}
|
||||
|
||||
// Base version is version 1.
|
||||
version := SchemaVersionV1
|
||||
|
||||
// Check if the message is to be updated in altair.
|
||||
isAltair := epoch >= params.BeaconConfig().AltairForkEpoch
|
||||
if isAltair && altairMapping[msg] {
|
||||
version = SchemaVersionV2
|
||||
}
|
||||
|
||||
// Check if the message is to be updated in fulu.
|
||||
isFulu := epoch >= params.BeaconConfig().FuluForkEpoch
|
||||
if isFulu && fuluMapping[msg] {
|
||||
version = SchemaVersionV3
|
||||
}
|
||||
|
||||
return protocolPrefix + msg + version, nil
|
||||
}
|
||||
|
||||
@@ -42,7 +42,7 @@ func (s *Service) Send(ctx context.Context, message interface{}, baseTopic strin
|
||||
return nil, err
|
||||
}
|
||||
// do not encode anything if we are sending a metadata request
|
||||
if baseTopic != RPCMetaDataTopicV1 && baseTopic != RPCMetaDataTopicV2 {
|
||||
if baseTopic != RPCMetaDataTopicV1 && baseTopic != RPCMetaDataTopicV2 && baseTopic != RPCMetaDataTopicV3 {
|
||||
castedMsg, ok := message.(ssz.Marshaler)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("%T does not support the ssz marshaller interface", message)
|
||||
|
||||
@@ -363,6 +363,15 @@ func (s *Service) ENR() *enr.Record {
|
||||
return s.dv5Listener.Self().Record()
|
||||
}
|
||||
|
||||
// NodeID returns the local node's node ID
|
||||
// for discovery.
|
||||
func (s *Service) NodeID() enode.ID {
|
||||
if s.dv5Listener == nil {
|
||||
return [32]byte{}
|
||||
}
|
||||
return s.dv5Listener.Self().ID()
|
||||
}
|
||||
|
||||
// DiscoveryAddresses represents our enr addresses as multiaddresses.
|
||||
func (s *Service) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
|
||||
if s.dv5Listener == nil {
|
||||
|
||||
@@ -16,8 +16,6 @@ import (
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
mock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/encoder"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/scorers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
@@ -358,48 +356,49 @@ func initializeStateWithForkDigest(_ context.Context, t *testing.T, gs startup.C
|
||||
return fd
|
||||
}
|
||||
|
||||
func TestService_connectWithPeer(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
tests := []struct {
|
||||
name string
|
||||
peers *peers.Status
|
||||
info peer.AddrInfo
|
||||
wantErr string
|
||||
}{
|
||||
{
|
||||
name: "bad peer",
|
||||
peers: func() *peers.Status {
|
||||
ps := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
ScorerParams: &scorers.Config{},
|
||||
})
|
||||
for i := 0; i < 10; i++ {
|
||||
ps.Scorers().BadResponsesScorer().Increment("bad")
|
||||
}
|
||||
return ps
|
||||
}(),
|
||||
info: peer.AddrInfo{ID: "bad"},
|
||||
wantErr: "refused to connect to bad peer",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
h, _, _ := createHost(t, 34567)
|
||||
defer func() {
|
||||
if err := h.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
ctx := context.Background()
|
||||
s := &Service{
|
||||
host: h,
|
||||
peers: tt.peers,
|
||||
}
|
||||
err := s.connectWithPeer(ctx, tt.info)
|
||||
if len(tt.wantErr) > 0 {
|
||||
require.ErrorContains(t, tt.wantErr, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestService_connectWithPeer(t *testing.T) {
|
||||
// params.SetupTestConfigCleanup(t)
|
||||
// tests := []struct {
|
||||
// name string
|
||||
// peers *peers.Status
|
||||
// info peer.AddrInfo
|
||||
// wantErr string
|
||||
// }{
|
||||
// {
|
||||
// name: "bad peer",
|
||||
// peers: func() *peers.Status {
|
||||
// ps := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
// ScorerParams: &scorers.Config{},
|
||||
// })
|
||||
// for i := 0; i < 10; i++ {
|
||||
// ps.Scorers().BadResponsesScorer().Increment("bad")
|
||||
// }
|
||||
// return ps
|
||||
// }(),
|
||||
// info: peer.AddrInfo{ID: "bad"},
|
||||
// wantErr: "refused to connect to bad peer",
|
||||
// },
|
||||
// }
|
||||
// for _, tt := range tests {
|
||||
// t.Run(tt.name, func(t *testing.T) {
|
||||
// h, _, _ := createHost(t, 34567)
|
||||
// defer func() {
|
||||
// if err := h.Close(); err != nil {
|
||||
// t.Fatal(err)
|
||||
// }
|
||||
// }()
|
||||
// ctx := context.Background()
|
||||
// s := &Service{
|
||||
// host: h,
|
||||
// peers: tt.peers,
|
||||
// }
|
||||
// err := s.connectWithPeer(ctx, tt.info)
|
||||
// if len(tt.wantErr) > 0 {
|
||||
// require.ErrorContains(t, tt.wantErr, err)
|
||||
// } else {
|
||||
// require.NoError(t, err)
|
||||
// }
|
||||
// })
|
||||
// }
|
||||
// }
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
@@ -29,8 +30,9 @@ var (
|
||||
attestationSubnetCount = params.BeaconConfig().AttestationSubnetCount
|
||||
syncCommsSubnetCount = params.BeaconConfig().SyncCommitteeSubnetCount
|
||||
|
||||
attSubnetEnrKey = params.BeaconNetworkConfig().AttSubnetKey
|
||||
syncCommsSubnetEnrKey = params.BeaconNetworkConfig().SyncCommsSubnetKey
|
||||
attSubnetEnrKey = params.BeaconNetworkConfig().AttSubnetKey
|
||||
syncCommsSubnetEnrKey = params.BeaconNetworkConfig().SyncCommsSubnetKey
|
||||
custodyGroupCountEnrKey = params.BeaconNetworkConfig().CustodyGroupCountKey
|
||||
)
|
||||
|
||||
// The value used with the subnet, in order
|
||||
@@ -47,7 +49,14 @@ const syncLockerVal = 100
|
||||
// chosen more than sync and attestation subnet combined.
|
||||
const blobSubnetLockerVal = 110
|
||||
|
||||
// nodeFilter return a function that filters nodes based on the subnet topic and subnet index.
|
||||
// The value used with the data column sidecar subnet, in order
|
||||
// to create an appropriate key to retrieve
|
||||
// the relevant lock. This is used to differentiate
|
||||
// data column subnets from others. This is deliberately
|
||||
// chosen more than sync, attestation and blob subnet (6) combined.
|
||||
const dataColumnSubnetVal = 150
|
||||
|
||||
// nodeFilter returns a function that filters nodes based on the subnet topic and subnet index.
|
||||
func (s *Service) nodeFilter(topic string, index uint64) (func(node *enode.Node) bool, error) {
|
||||
switch {
|
||||
case strings.Contains(topic, GossipAttestationMessage):
|
||||
@@ -56,6 +65,8 @@ func (s *Service) nodeFilter(topic string, index uint64) (func(node *enode.Node)
|
||||
return s.filterPeerForSyncSubnet(index), nil
|
||||
case strings.Contains(topic, GossipBlobSidecarMessage):
|
||||
return s.filterPeerForBlobSubnet(), nil
|
||||
case strings.Contains(topic, GossipDataColumnSidecarMessage):
|
||||
return s.filterPeerForDataColumnsSubnet(index), nil
|
||||
default:
|
||||
return nil, errors.Errorf("no subnet exists for provided topic: %s", topic)
|
||||
}
|
||||
@@ -276,6 +287,22 @@ func (s *Service) filterPeerForBlobSubnet() func(_ *enode.Node) bool {
|
||||
}
|
||||
}
|
||||
|
||||
// returns a method with filters peers specifically for a particular data column subnet.
|
||||
func (s *Service) filterPeerForDataColumnsSubnet(index uint64) func(node *enode.Node) bool {
|
||||
return func(node *enode.Node) bool {
|
||||
if !s.filterPeer(node) {
|
||||
return false
|
||||
}
|
||||
|
||||
subnets, err := dataColumnSubnets(node.ID(), node.Record())
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return subnets[index]
|
||||
}
|
||||
}
|
||||
|
||||
// lower threshold to broadcast object compared to searching
|
||||
// for a subnet. So that even in the event of poor peer
|
||||
// connectivity, we can still broadcast an attestation.
|
||||
@@ -321,6 +348,35 @@ func (s *Service) updateSubnetRecordWithMetadataV2(bitVAtt bitfield.Bitvector64,
|
||||
})
|
||||
}
|
||||
|
||||
// updateSubnetRecordWithMetadataV3 updates:
|
||||
// - attestation subnet tracked,
|
||||
// - sync subnets tracked, and
|
||||
// - custody subnet count
|
||||
// both in the node's record and in the node's metadata.
|
||||
func (s *Service) updateSubnetRecordWithMetadataV3(
|
||||
bitVAtt bitfield.Bitvector64,
|
||||
bitVSync bitfield.Bitvector4,
|
||||
custodyGroupCount uint64,
|
||||
) {
|
||||
attSubnetsEntry := enr.WithEntry(attSubnetEnrKey, &bitVAtt)
|
||||
syncSubnetsEntry := enr.WithEntry(syncCommsSubnetEnrKey, &bitVSync)
|
||||
custodyGroupCountEntry := enr.WithEntry(custodyGroupCountEnrKey, custodyGroupCount)
|
||||
|
||||
localNode := s.dv5Listener.LocalNode()
|
||||
localNode.Set(attSubnetsEntry)
|
||||
localNode.Set(syncSubnetsEntry)
|
||||
localNode.Set(custodyGroupCountEntry)
|
||||
|
||||
newSeqNumber := s.metaData.SequenceNumber() + 1
|
||||
|
||||
s.metaData = wrapper.WrappedMetadataV2(&pb.MetaDataV2{
|
||||
SeqNumber: newSeqNumber,
|
||||
Attnets: bitVAtt,
|
||||
Syncnets: bitVSync,
|
||||
CustodyGroupCount: custodyGroupCount,
|
||||
})
|
||||
}
|
||||
|
||||
func initializePersistentSubnets(id enode.ID, epoch primitives.Epoch) error {
|
||||
_, ok, expTime := cache.SubnetIDs.GetPersistentSubnets()
|
||||
if ok && expTime.After(time.Now()) {
|
||||
@@ -458,6 +514,25 @@ func syncSubnets(record *enr.Record) ([]uint64, error) {
|
||||
return committeeIdxs, nil
|
||||
}
|
||||
|
||||
// Retrieve the data columns subnets from a node's ENR and node ID.
|
||||
// TODO: Add tests
|
||||
func dataColumnSubnets(nodeID enode.ID, record *enr.Record) (map[uint64]bool, error) {
|
||||
// Retrieve the custody count from the ENR.
|
||||
custodyGroupCount, err := peerdas.CustodyGroupCountFromRecord(record)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "custody group count from record")
|
||||
}
|
||||
|
||||
// Retrieve the peer info.
|
||||
peerInfo, _, err := peerdas.Info(nodeID, custodyGroupCount)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "peer info")
|
||||
}
|
||||
|
||||
// Get custody columns subnets from the columns.
|
||||
return peerInfo.DataColumnsSubnets, nil
|
||||
}
|
||||
|
||||
// Parses the attestation subnets ENR entry in a node and extracts its value
|
||||
// as a bitvector for further manipulation.
|
||||
func attBitvector(record *enr.Record) (bitfield.Bitvector64, error) {
|
||||
@@ -487,6 +562,7 @@ func syncBitvector(record *enr.Record) (bitfield.Bitvector4, error) {
|
||||
// between both the attestation, sync and blob subnets.
|
||||
// Sync subnets are stored by (subnet+syncLockerVal).
|
||||
// Blob subnets are stored by (subnet+blobSubnetLockerVal).
|
||||
// Data column subnets are stored by (subnet+dataColumnSubnetVal).
|
||||
// This is to prevent conflicts while allowing subnets
|
||||
// to use a single locker.
|
||||
func (s *Service) subnetLocker(i uint64) *sync.RWMutex {
|
||||
|
||||
@@ -17,9 +17,12 @@ go_library(
|
||||
"//beacon-chain:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/p2p/encoder:go_default_library",
|
||||
"//beacon-chain/p2p/peers:go_default_library",
|
||||
"//beacon-chain/p2p/peers/scorers:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/metadata:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
|
||||
@@ -3,6 +3,7 @@ package testing
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/control"
|
||||
@@ -12,6 +13,7 @@ import (
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/encoder"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/metadata"
|
||||
"google.golang.org/protobuf/proto"
|
||||
@@ -55,6 +57,11 @@ func (*FakeP2P) ENR() *enr.Record {
|
||||
return new(enr.Record)
|
||||
}
|
||||
|
||||
// NodeID returns the node id of the local peer.
|
||||
func (*FakeP2P) NodeID() enode.ID {
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
// DiscoveryAddresses -- fake
|
||||
func (*FakeP2P) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
|
||||
return nil, nil
|
||||
@@ -148,6 +155,11 @@ func (*FakeP2P) BroadcastBlob(_ context.Context, _ uint64, _ *ethpb.BlobSidecar)
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastDataColumn -- fake.
|
||||
func (*FakeP2P) BroadcastDataColumn(_ context.Context, _ [fieldparams.RootLength]byte, _ uint64, _ *ethpb.DataColumnSidecar) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// InterceptPeerDial -- fake.
|
||||
func (*FakeP2P) InterceptPeerDial(peer.ID) (allow bool) {
|
||||
return true
|
||||
@@ -172,3 +184,15 @@ func (*FakeP2P) InterceptSecured(network.Direction, peer.ID, network.ConnMultiad
|
||||
func (*FakeP2P) InterceptUpgraded(network.Conn) (allow bool, reason control.DisconnectReason) {
|
||||
return true, 0
|
||||
}
|
||||
|
||||
func (*FakeP2P) CustodyGroupCountFromPeer(peer.ID) uint64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (*FakeP2P) AdmissibleCustodyGroupsPeers(peers []peer.ID) ([]peer.ID, error) {
|
||||
return peers, nil
|
||||
}
|
||||
|
||||
func (*FakeP2P) AdmissibleCustodySamplingPeers(peers []peer.ID) ([]peer.ID, error) {
|
||||
return peers, nil
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user