mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 13:58:09 -05:00
Compare commits
183 Commits
master
...
peerdas-de
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5960a6bbab | ||
|
|
725dda51c3 | ||
|
|
6a44fef25e | ||
|
|
654f927fc2 | ||
|
|
08bb7c72fb | ||
|
|
9f91957ec3 | ||
|
|
9a80f043de | ||
|
|
8192e653e7 | ||
|
|
613c9c5de9 | ||
|
|
731a6fec5c | ||
|
|
ffc794182f | ||
|
|
c7bd2d11b2 | ||
|
|
02a6c034a7 | ||
|
|
584ef60ab0 | ||
|
|
bc017ed493 | ||
|
|
22b3327a97 | ||
|
|
aa84fbc597 | ||
|
|
c6e8c1a380 | ||
|
|
7b9eef4a45 | ||
|
|
5dfef04a6b | ||
|
|
6d16ed4a89 | ||
|
|
799ed429ca | ||
|
|
30ee3a7225 | ||
|
|
07f73857f9 | ||
|
|
5c76dc6c3a | ||
|
|
fa81334983 | ||
|
|
7fd169da5f | ||
|
|
ee4e09effe | ||
|
|
91738a24fa | ||
|
|
8abc5e159a | ||
|
|
b1ac53c4dd | ||
|
|
27ab68c856 | ||
|
|
ddf5a3953b | ||
|
|
92d2fc101d | ||
|
|
8996000d2b | ||
|
|
a2fcba2349 | ||
|
|
abe8638991 | ||
|
|
0b5064b474 | ||
|
|
da9d4cf5b9 | ||
|
|
56208aa84d | ||
|
|
b866a2c744 | ||
|
|
a77234e637 | ||
|
|
a62cca15dd | ||
|
|
e0e7354708 | ||
|
|
0f86a16915 | ||
|
|
972c22b02f | ||
|
|
93c27340e4 | ||
|
|
c3edb32558 | ||
|
|
3baaa732df | ||
|
|
8ceb7e76ea | ||
|
|
4d5dddd302 | ||
|
|
55efccb07f | ||
|
|
961d8e1481 | ||
|
|
d396a9931e | ||
|
|
e3f8f121f4 | ||
|
|
80f29e9eda | ||
|
|
8995d8133a | ||
|
|
31044206b8 | ||
|
|
ac04246a2a | ||
|
|
0923145bd7 | ||
|
|
3a1702e56f | ||
|
|
501ec74a48 | ||
|
|
c248fe0bb3 | ||
|
|
215fbcb2e4 | ||
|
|
e39f44b529 | ||
|
|
a216cb4105 | ||
|
|
9eff6ae476 | ||
|
|
3eec5a5cb6 | ||
|
|
66878deb2c | ||
|
|
0b6e1711e4 | ||
|
|
15025837bb | ||
|
|
0229a2055e | ||
|
|
eb9af15c7a | ||
|
|
0584746815 | ||
|
|
01705d1f3d | ||
|
|
14f93b4e9d | ||
|
|
ad11036c36 | ||
|
|
632a06076b | ||
|
|
242c2b0268 | ||
|
|
19662da905 | ||
|
|
7faee5af35 | ||
|
|
805ee1bf31 | ||
|
|
bea46fdfa1 | ||
|
|
f6b1fb1c88 | ||
|
|
6fb349ea76 | ||
|
|
e5a425f5c7 | ||
|
|
f157d37e4c | ||
|
|
5f08559bef | ||
|
|
a082d2aecd | ||
|
|
bcfaff8504 | ||
|
|
d8e09c346f | ||
|
|
876519731b | ||
|
|
de05b83aca | ||
|
|
56c73e7193 | ||
|
|
859ac008a8 | ||
|
|
f882bd27c8 | ||
|
|
361e5759c1 | ||
|
|
34ef0da896 | ||
|
|
726e8b962f | ||
|
|
453ea01deb | ||
|
|
6537f8011e | ||
|
|
5f17317c1c | ||
|
|
3432ffa4a3 | ||
|
|
9dac67635b | ||
|
|
9be69fbd07 | ||
|
|
e21261e893 | ||
|
|
da53a8fc48 | ||
|
|
a14634e656 | ||
|
|
43761a8066 | ||
|
|
01dbc337c0 | ||
|
|
92f9b55fcb | ||
|
|
f65f12f58b | ||
|
|
f2b61a3dcf | ||
|
|
77a6d29a2e | ||
|
|
31d16da3a0 | ||
|
|
19221b77bd | ||
|
|
83df293647 | ||
|
|
c20c09ce36 | ||
|
|
2191faaa3f | ||
|
|
2de1e6f3e4 | ||
|
|
db44df3964 | ||
|
|
f92eb44c89 | ||
|
|
a26980b64d | ||
|
|
f58cf7e626 | ||
|
|
68da7dabe2 | ||
|
|
d1e43a2c02 | ||
|
|
3652bec2f8 | ||
|
|
81b7a1725f | ||
|
|
0c917079c4 | ||
|
|
a732fe7021 | ||
|
|
d75a7aae6a | ||
|
|
e788a46e82 | ||
|
|
199543125a | ||
|
|
ca63efa770 | ||
|
|
345e6edd9c | ||
|
|
6403064126 | ||
|
|
0517d76631 | ||
|
|
000d480f77 | ||
|
|
b40a8ed37e | ||
|
|
d21c2bd63e | ||
|
|
7a256e93f7 | ||
|
|
07fe76c2da | ||
|
|
54affa897f | ||
|
|
ac4c5fae3c | ||
|
|
2845d87077 | ||
|
|
dc2c90b8ed | ||
|
|
b469157e1f | ||
|
|
2697794e58 | ||
|
|
48cf24edb4 | ||
|
|
78f90db90b | ||
|
|
d0a3b9bc1d | ||
|
|
bfdb6dab86 | ||
|
|
7dd2fd52af | ||
|
|
b6bad9331b | ||
|
|
6e2122085d | ||
|
|
7a847292aa | ||
|
|
81f4db0afa | ||
|
|
a7dc2e6c8b | ||
|
|
0a010b5088 | ||
|
|
1e335e2cf2 | ||
|
|
42f4c0f14e | ||
|
|
d3c12abe25 | ||
|
|
b0ba05b4f4 | ||
|
|
e206506489 | ||
|
|
013cb28663 | ||
|
|
496914cb39 | ||
|
|
c032e78888 | ||
|
|
5e4deff6fd | ||
|
|
6daa91c465 | ||
|
|
32ce6423eb | ||
|
|
b0ea450df5 | ||
|
|
8bd10df423 | ||
|
|
dcbb543be2 | ||
|
|
be0580e1a9 | ||
|
|
1355178115 | ||
|
|
b78c3485b9 | ||
|
|
f503efc6ed | ||
|
|
1bfbd3980e | ||
|
|
3e722ea1bc | ||
|
|
d844026433 | ||
|
|
9ffc19d5ef | ||
|
|
3e23f6e879 | ||
|
|
c688c84393 |
1
.bazelrc
1
.bazelrc
@@ -22,6 +22,7 @@ coverage --define=coverage_enabled=1
|
||||
build --workspace_status_command=./hack/workspace_status.sh
|
||||
|
||||
build --define blst_disabled=false
|
||||
build --compilation_mode=opt
|
||||
run --define blst_disabled=false
|
||||
|
||||
build:blst_disabled --define blst_disabled=true
|
||||
|
||||
137
CHANGELOG.md
137
CHANGELOG.md
@@ -4,6 +4,141 @@ All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on Keep a Changelog, and this project adheres to Semantic Versioning.
|
||||
|
||||
## [v5.3.0](https://github.com/prysmaticlabs/prysm/compare/v5.2.0...v5.3.0) - 2025-02-12
|
||||
|
||||
This release includes support for Pectra activation in the [Holesky](https://github.com/eth-clients/holesky) and [Sepolia](https://github.com/eth-clients/sepolia) testnets! The release contains many fixes for Electra that have been found in rigorous testing through devnets in the last few months.
|
||||
|
||||
For mainnet, we have a few nice features for you to try:
|
||||
|
||||
- [PR #14023](https://github.com/prysmaticlabs/prysm/pull/14023) introduces a new file layout structure for storing blobs. Rather than storing all blob root directories in one parent directory, blob root directories are organized in subdirectories by epoch. This should vastly decrease the blob cache warmup time when Prysm is starting. Try this feature with `--blob-storage-layout=by-epoch`.
|
||||
|
||||
Updating to this release is **required** for Holesky and Sepolia operators and it is **recommended** for mainnet users as there are a few bug fixes that apply to deneb logic.
|
||||
|
||||
### Added
|
||||
|
||||
- Added an error field to log `Finished building block`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14696)
|
||||
- Implemented a new `EmptyExecutionPayloadHeader` function. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14713)
|
||||
- Added proper gas limit check for header from the builder. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14707)
|
||||
- `Finished building block`: Display error only if not nil. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14722)
|
||||
- Added light client feature flag check to RPC handlers. [PR](https://github.com/prysmaticlabs/prysm/pull/14736). [[PR]](https://github.com/prysmaticlabs/prysm/pull/14782)
|
||||
- Added support to update target and max blob count to different values per hard fork config. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14678)
|
||||
- Log before blob filesystem cache warm-up. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14735)
|
||||
- New design for the attestation pool. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14324)
|
||||
- Add field param placeholder for Electra blob target and max to pass spec tests. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14733)
|
||||
- Light client: Add better error handling. [PR](https://github.com/prysmaticlabs/prysm/pull/14749). [[PR]](https://github.com/prysmaticlabs/prysm/pull/14782)
|
||||
- Add EIP-7691: Blob throughput increase. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14750)
|
||||
- Trace IDONTWANT Messages in Pubsub. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14778)
|
||||
- Add Fulu fork boilerplate. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14771)
|
||||
- DB optimization for saving light client bootstraps (save unique sync committees only). [[PR]](https://github.com/prysmaticlabs/prysm/pull/14782)
|
||||
- Separate type for unaggregated network attestations. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14659)
|
||||
- Remote signer electra fork support. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14477)
|
||||
- Add Electra test case to rewards API. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14816)
|
||||
- Update `proto_test.go` to Electra. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14817)
|
||||
- Update slasher service to Electra. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14812)
|
||||
- Builder API endpoint to support Electra. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14344)
|
||||
- Added protoc toolchains with a version of v25.3. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14818)
|
||||
- Add test cases for the eth_lightclient_bootstrap API SSZ support. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14824)
|
||||
- Handle `AttesterSlashingElectra` everywhere in the codebase. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14823)
|
||||
- Add Beacon DB pruning service to prune historical data older than MIN_EPOCHS_FOR_BLOCK_REQUESTS (roughly equivalent to the weak subjectivity period). [[PR]](https://github.com/prysmaticlabs/prysm/pull/14687)
|
||||
- Nil consolidation request check for core processing. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14851)
|
||||
- Updated blob sidecar api endpoint for Electra. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14852)
|
||||
- Slashing pool service to convert slashings from Phase0 to Electra at the fork. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14844)
|
||||
- check to stop eth1 voting after electra and eth1 deposits stop. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14835)
|
||||
- WARN log message on node startup advising of the upcoming deprecation of the --enable-historical-state-representation feature flag. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14856)
|
||||
- Beacon API event support for `SingleAttestation` and `SignedAggregateAttestationAndProofElectra`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14855)
|
||||
- Added Electra tests for `TestLightClient_NewLightClientOptimisticUpdateFromBeaconState` and `TestLightClient_NewLightClientFinalityUpdateFromBeaconState`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14783)
|
||||
- New option to select an alternate blob storage layout. Rather than a flat directory with a subdir for each block root, a multi-level scheme is used to organize blobs by epoch/slot/root, enabling leaner syscalls, indexing and pruning. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14023)
|
||||
- Send pending att queue's attestations through the notification feed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14862)
|
||||
- Prune all pending deposits and proofs in post-Electra. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14829)
|
||||
- Add Pectra testnet dates. (Sepolia and Holesky). [[PR]](https://github.com/prysmaticlabs/prysm/pull/14884)
|
||||
|
||||
### Changed
|
||||
|
||||
- Process light client finality updates only for new finalized epochs instead of doing it for every block. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14713)
|
||||
- Refactor subnets subscriptions. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14711)
|
||||
- Refactor RPC handlers subscriptions. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14732)
|
||||
- Go deps upgrade, from `ioutil` to `io`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14737)
|
||||
- Move successfully registered validator(s) on builder log to debug. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14735)
|
||||
- Update some test files to use `crypto/rand` instead of `math/rand`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14747)
|
||||
- Re-organize the content of the `*.proto` files (No functional change). [[PR]](https://github.com/prysmaticlabs/prysm/pull/14755)
|
||||
- SSZ files generation: Remove the `// Hash: ...` header.[[PR]](https://github.com/prysmaticlabs/prysm/pull/14760)
|
||||
- Updated Electra spec definition for `process_epoch`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14768)
|
||||
- Update our `go-libp2p-pubsub` dependency. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14770)
|
||||
- Re-organize the content of files to ease the creation of a new fork boilerplate. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14761)
|
||||
- Updated spec definition electra `process_registry_updates`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14767)
|
||||
- Fixed Metadata errors for peers connected via QUIC. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14776)
|
||||
- Updated spec definitions for `process_slashings` in godocs. Simplified `ProcessSlashings` API. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14766)
|
||||
- Update spec tests to v1.5.0-beta.0. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14788)
|
||||
- Process light client finality updates only for new finalized epochs instead of doing it for every block. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14718)
|
||||
- Update blobs by rpc topics from V2 to V1. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14785)
|
||||
- Updated geth to 1.14~. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14351)
|
||||
- E2e tests start from bellatrix. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14351)
|
||||
- Version pinning unclog after making some ux improvements. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14802)
|
||||
- Remove helpers to check for execution/compounding withdrawal credentials and expose them as methods. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14808)
|
||||
- Refactor `2006-01-02 15:04:05` to `time.DateTime`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14792)
|
||||
- Updated Prysm to Go v1.23.5. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14818)
|
||||
- Updated Bazel version to v7.4.1. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14818)
|
||||
- Updated rules_go to v0.46.0. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14818)
|
||||
- Updated golang.org/x/tools to be compatible with v1.23.5. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14818)
|
||||
- CI now requires proto files to be properly formatted with clang-format. [[PR](https://github.com/prysmaticlabs/prysm/pull/14831)]. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14831)
|
||||
- Improved test coverage of beacon-chain/core/electra/churn.go. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14837)
|
||||
- Update electra spec test to beta1. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14841)
|
||||
- Move deposit request nil check to apply all. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14849)
|
||||
- Do not mark blocks as invalid on context deadlines during state transition. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14838)
|
||||
- Update electra core processing to not mark block bad if execution request error. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14826)
|
||||
- Dependency: Updated go-ethereum to v1.14.13. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14872)
|
||||
- improving readability on proposer settings loader. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14868)
|
||||
- Removes existing validator.processSlot span and adds validator.processSlot span to slotCtx. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14874)
|
||||
- DownloadFinalizedData has moved from the api/client package to beacon-chain/sync/checkpoint. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14871)
|
||||
- Updated Blob-Batch-Limit to increase to 192 for electra. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14883)
|
||||
- Updated Blob-Batch-Limit-Burst-Factor to increase to 3. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14883)
|
||||
- Changed the derived batch limit when serving blobs. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14883)
|
||||
- Updated go-libp2p-pubsub to v0.13.0. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14890)
|
||||
- Rename light client flag from `enable-lightclient` to `enable-light-client`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14887)
|
||||
- Update electra spec test to beta2. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14901)
|
||||
|
||||
### Removed
|
||||
|
||||
- Cleanup ProcessSlashings method to remove unnecessary argument. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14762)
|
||||
- Remove `/proto/eth/v2` directory. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14765)
|
||||
- Remove `/memsize/` pprof endpoint as it will no longer be supported in go 1.23. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14351)
|
||||
- Clean `TestCanUpgrade*` tests. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14791)
|
||||
- Remove `Copy()` from the `ReadOnlyBeaconBlock` interface. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14811)
|
||||
- Removed a tracing span on signature requests. These requests usually took less than 5 nanoseconds and are generally not worth tracing. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14864)
|
||||
|
||||
### Fixed
|
||||
|
||||
- Added check to prevent nil pointer deference or out of bounds array access when validating the BLSToExecutionChange on an impossibly nil validator. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14705)
|
||||
- EIP-7691: Ensure new blobs subnets are subscribed on epoch in advance. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14759)
|
||||
- Fix kzg commitment inclusion proof depth minimal value. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14787)
|
||||
- Replace exampleIP to `96.7.129.13`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14795)
|
||||
- Fixed a p2p test to reliably return a static IP through DNS resolution. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14800)
|
||||
- `ToBlinded`: Use Fulu struct for Fulu (instead of Electra). [[PR]](https://github.com/prysmaticlabs/prysm/pull/14797)
|
||||
- fix panic with type cast on pbgenericblock(). [[PR]](https://github.com/prysmaticlabs/prysm/pull/14801)
|
||||
- Prysmctl generate genesis state: fix truncation of ExtraData to 32 bytes to satisfy SSZ marshaling. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14803)
|
||||
- added conditional evaluators to fix scenario e2e tests. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14798)
|
||||
- Use `SingleAttestation` for Fulu in p2p attestation map. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14809)
|
||||
- `UpgradeToFulu`: Respect the specification. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14821)
|
||||
- `nodeFilter`: Implement `filterPeerForBlobSubnet` to avoid error logs. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14822)
|
||||
- Fixed deposit packing for post-Electra: early return if EIP-6110 is applied. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14697)
|
||||
- Fix batch process new pending deposits by getting validators from state. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14827)
|
||||
- Fix handling unfound block at slot. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14852)
|
||||
- Fixed incorrect attester slashing length check. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14833)
|
||||
- Fix monitor service for Electra. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14853)
|
||||
- add more nil checks on ToConsensus functions for added safety. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14867)
|
||||
- Fix electra state to safe share references on pending fields when append. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14895)
|
||||
- Add missing config values from the spec. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14903)
|
||||
- We remove the unused `rebuildTrie` assignments for fields which do not use them. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14906)
|
||||
- fix block api endpoint to handle blocks with the same structure but on different forks (i.e. fulu and electra). [[PR]](https://github.com/prysmaticlabs/prysm/pull/14897)
|
||||
- We change how we track blob indexes during their reconstruction from the EL to prevent. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14909)
|
||||
- We now use the correct maximum value when serving blobs for electra blocks. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14910)
|
||||
|
||||
### Security
|
||||
|
||||
- go version upgrade to 1.22.10 for CVE CVE-2024-34156. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14729)
|
||||
- Update golang.org/x/crypto to v0.31.0 to address CVE-2024-45337. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14777)
|
||||
- Update golang.org/x/net to v0.33.0 to address CVE-2024-45338. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14780)
|
||||
|
||||
## [v5.2.0](https://github.com/prysmaticlabs/prysm/compare/v5.1.2...v5.2.0)
|
||||
|
||||
Updating to this release is highly recommended, especially for users running v5.1.1 or v5.1.2.
|
||||
@@ -2987,4 +3122,4 @@ There are no security updates in this release.
|
||||
|
||||
# Older than v2.0.0
|
||||
|
||||
For changelog history for releases older than v2.0.0, please refer to https://github.com/prysmaticlabs/prysm/releases
|
||||
For changelog history for releases older than v2.0.0, please refer to https://github.com/prysmaticlabs/prysm/releases
|
||||
|
||||
@@ -55,7 +55,7 @@ bazel build //beacon-chain --config=release
|
||||
## Adding / updating dependencies
|
||||
|
||||
1. Add your dependency as you would with go modules. I.e. `go get ...`
|
||||
1. Run `bazel run //:gazelle -- update-repos -from_file=go.mod` to update the bazel managed dependencies.
|
||||
1. Run `bazel run //:gazelle -- update-repos -from_file=go.mod -to_macro=deps.bzl%prysm_deps -prune=true` to update the bazel managed dependencies.
|
||||
|
||||
Example:
|
||||
|
||||
|
||||
10
WORKSPACE
10
WORKSPACE
@@ -255,7 +255,7 @@ filegroup(
|
||||
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.5.0-beta.2"
|
||||
consensus_spec_version = "v1.5.0-alpha.10"
|
||||
|
||||
bls_test_version = "v0.1.1"
|
||||
|
||||
@@ -271,7 +271,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-X/bMxbKg1clo2aFEjBoeuFq/U+BF1eQopgRP/7nI3Qg=",
|
||||
integrity = "sha256-NtWIhbO/mVMb1edq5jqABL0o8R1tNFiuG8PCMAsUHcs=",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -287,7 +287,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-WSxdri5OJGuNApW+odKle5UzToDyEOx+F3lMiqamJAg=",
|
||||
integrity = "sha256-DFlFlnzls1bBrDm+/xD8NK2ivvkhxR+rSNVLLqScVKc=",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -303,7 +303,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-LYE8l3y/zSt4YVrehrJ3ralqtgeYNildiIp+HR6+xAI=",
|
||||
integrity = "sha256-G9ENPF8udZL/BqRHbi60GhFPnZDPZAH6UjcjRiOlvbk=",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -318,7 +318,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-jvZQ90qcJMTOqMsPO7sgeEVQmewZTHcz7LVDkNqwTFQ=",
|
||||
integrity = "sha256-ClOLKkmAcEi8/uKi6LDeqthask5+E3sgxVoA0bqmQ0c=",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -46,6 +46,7 @@ go_test(
|
||||
data = glob(["testdata/**"]),
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//api/server/structs:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
|
||||
@@ -154,6 +154,10 @@ func (c *Client) do(ctx context.Context, method string, path string, body io.Rea
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if method == http.MethodPost {
|
||||
req.Header.Set("Content-Type", api.JsonMediaType)
|
||||
}
|
||||
req.Header.Set("Accept", api.JsonMediaType)
|
||||
req.Header.Add("User-Agent", version.BuildData())
|
||||
for _, o := range opts {
|
||||
o(req)
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v5/api"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
@@ -89,6 +90,8 @@ func TestClient_RegisterValidator(t *testing.T) {
|
||||
expectedPath := "/eth/v1/builder/validators"
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, api.JsonMediaType, r.Header.Get("Content-Type"))
|
||||
require.Equal(t, api.JsonMediaType, r.Header.Get("Accept"))
|
||||
body, err := io.ReadAll(r.Body)
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
@@ -364,8 +367,8 @@ func TestSubmitBlindedBlock(t *testing.T) {
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
|
||||
require.Equal(t, "bellatrix", r.Header.Get("Eth-Consensus-Version"))
|
||||
require.Equal(t, "application/json", r.Header.Get("Content-Type"))
|
||||
require.Equal(t, "application/json", r.Header.Get("Accept"))
|
||||
require.Equal(t, api.JsonMediaType, r.Header.Get("Content-Type"))
|
||||
require.Equal(t, api.JsonMediaType, r.Header.Get("Accept"))
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayload)),
|
||||
@@ -392,8 +395,8 @@ func TestSubmitBlindedBlock(t *testing.T) {
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
|
||||
require.Equal(t, "capella", r.Header.Get("Eth-Consensus-Version"))
|
||||
require.Equal(t, "application/json", r.Header.Get("Content-Type"))
|
||||
require.Equal(t, "application/json", r.Header.Get("Accept"))
|
||||
require.Equal(t, api.JsonMediaType, r.Header.Get("Content-Type"))
|
||||
require.Equal(t, api.JsonMediaType, r.Header.Get("Accept"))
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayloadCapella)),
|
||||
@@ -423,8 +426,8 @@ func TestSubmitBlindedBlock(t *testing.T) {
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
|
||||
require.Equal(t, "deneb", r.Header.Get("Eth-Consensus-Version"))
|
||||
require.Equal(t, "application/json", r.Header.Get("Content-Type"))
|
||||
require.Equal(t, "application/json", r.Header.Get("Accept"))
|
||||
require.Equal(t, api.JsonMediaType, r.Header.Get("Content-Type"))
|
||||
require.Equal(t, api.JsonMediaType, r.Header.Get("Accept"))
|
||||
var req structs.SignedBlindedBeaconBlockDeneb
|
||||
err := json.NewDecoder(r.Body).Decode(&req)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -579,14 +579,14 @@ type SignedBeaconBlockContentsFulu struct {
|
||||
}
|
||||
|
||||
type BeaconBlockContentsFulu struct {
|
||||
Block *BeaconBlockFulu `json:"block"`
|
||||
KzgProofs []string `json:"kzg_proofs"`
|
||||
Blobs []string `json:"blobs"`
|
||||
Block *BeaconBlockElectra `json:"block"`
|
||||
KzgProofs []string `json:"kzg_proofs"`
|
||||
Blobs []string `json:"blobs"`
|
||||
}
|
||||
|
||||
type SignedBeaconBlockFulu struct {
|
||||
Message *BeaconBlockFulu `json:"message"`
|
||||
Signature string `json:"signature"`
|
||||
Message *BeaconBlockElectra `json:"message"`
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
var _ SignedMessageJsoner = &SignedBeaconBlockFulu{}
|
||||
@@ -599,36 +599,12 @@ func (s *SignedBeaconBlockFulu) SigString() string {
|
||||
return s.Signature
|
||||
}
|
||||
|
||||
type BeaconBlockFulu struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
ParentRoot string `json:"parent_root"`
|
||||
StateRoot string `json:"state_root"`
|
||||
Body *BeaconBlockBodyFulu `json:"body"`
|
||||
}
|
||||
|
||||
type BeaconBlockBodyFulu struct {
|
||||
RandaoReveal string `json:"randao_reveal"`
|
||||
Eth1Data *Eth1Data `json:"eth1_data"`
|
||||
Graffiti string `json:"graffiti"`
|
||||
ProposerSlashings []*ProposerSlashing `json:"proposer_slashings"`
|
||||
AttesterSlashings []*AttesterSlashingElectra `json:"attester_slashings"`
|
||||
Attestations []*AttestationElectra `json:"attestations"`
|
||||
Deposits []*Deposit `json:"deposits"`
|
||||
VoluntaryExits []*SignedVoluntaryExit `json:"voluntary_exits"`
|
||||
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
|
||||
ExecutionPayload *ExecutionPayloadDeneb `json:"execution_payload"`
|
||||
BLSToExecutionChanges []*SignedBLSToExecutionChange `json:"bls_to_execution_changes"`
|
||||
BlobKzgCommitments []string `json:"blob_kzg_commitments"`
|
||||
ExecutionRequests *ExecutionRequests `json:"execution_requests"`
|
||||
}
|
||||
|
||||
type BlindedBeaconBlockFulu struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
ParentRoot string `json:"parent_root"`
|
||||
StateRoot string `json:"state_root"`
|
||||
Body *BlindedBeaconBlockBodyFulu `json:"body"`
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
ParentRoot string `json:"parent_root"`
|
||||
StateRoot string `json:"state_root"`
|
||||
Body *BlindedBeaconBlockBodyElectra `json:"body"`
|
||||
}
|
||||
|
||||
type SignedBlindedBeaconBlockFulu struct {
|
||||
@@ -645,19 +621,3 @@ func (s *SignedBlindedBeaconBlockFulu) MessageRawJson() ([]byte, error) {
|
||||
func (s *SignedBlindedBeaconBlockFulu) SigString() string {
|
||||
return s.Signature
|
||||
}
|
||||
|
||||
type BlindedBeaconBlockBodyFulu struct {
|
||||
RandaoReveal string `json:"randao_reveal"`
|
||||
Eth1Data *Eth1Data `json:"eth1_data"`
|
||||
Graffiti string `json:"graffiti"`
|
||||
ProposerSlashings []*ProposerSlashing `json:"proposer_slashings"`
|
||||
AttesterSlashings []*AttesterSlashingElectra `json:"attester_slashings"`
|
||||
Attestations []*AttestationElectra `json:"attestations"`
|
||||
Deposits []*Deposit `json:"deposits"`
|
||||
VoluntaryExits []*SignedVoluntaryExit `json:"voluntary_exits"`
|
||||
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
|
||||
ExecutionPayloadHeader *ExecutionPayloadHeaderDeneb `json:"execution_payload_header"`
|
||||
BLSToExecutionChanges []*SignedBLSToExecutionChange `json:"bls_to_execution_changes"`
|
||||
BlobKzgCommitments []string `json:"blob_kzg_commitments"`
|
||||
ExecutionRequests *ExecutionRequests `json:"execution_requests"`
|
||||
}
|
||||
|
||||
@@ -3365,285 +3365,6 @@ func (b *BeaconBlockContentsFulu) ToConsensus() (*eth.BeaconBlockContentsFulu, e
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (b *BeaconBlockFulu) ToConsensus() (*eth.BeaconBlockFulu, error) {
|
||||
if b == nil {
|
||||
return nil, errNilValue
|
||||
}
|
||||
if b.Body == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Body")
|
||||
}
|
||||
if b.Body.Eth1Data == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Body.Eth1Data")
|
||||
}
|
||||
if b.Body.SyncAggregate == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Body.SyncAggregate")
|
||||
}
|
||||
if b.Body.ExecutionPayload == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Body.ExecutionPayload")
|
||||
}
|
||||
|
||||
slot, err := strconv.ParseUint(b.Slot, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Slot")
|
||||
}
|
||||
proposerIndex, err := strconv.ParseUint(b.ProposerIndex, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ProposerIndex")
|
||||
}
|
||||
parentRoot, err := bytesutil.DecodeHexWithLength(b.ParentRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ParentRoot")
|
||||
}
|
||||
stateRoot, err := bytesutil.DecodeHexWithLength(b.StateRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "StateRoot")
|
||||
}
|
||||
randaoReveal, err := bytesutil.DecodeHexWithLength(b.Body.RandaoReveal, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.RandaoReveal")
|
||||
}
|
||||
depositRoot, err := bytesutil.DecodeHexWithLength(b.Body.Eth1Data.DepositRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.Eth1Data.DepositRoot")
|
||||
}
|
||||
depositCount, err := strconv.ParseUint(b.Body.Eth1Data.DepositCount, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.Eth1Data.DepositCount")
|
||||
}
|
||||
blockHash, err := bytesutil.DecodeHexWithLength(b.Body.Eth1Data.BlockHash, common.HashLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.Eth1Data.BlockHash")
|
||||
}
|
||||
graffiti, err := bytesutil.DecodeHexWithLength(b.Body.Graffiti, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.Graffiti")
|
||||
}
|
||||
proposerSlashings, err := ProposerSlashingsToConsensus(b.Body.ProposerSlashings)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.ProposerSlashings")
|
||||
}
|
||||
attesterSlashings, err := AttesterSlashingsElectraToConsensus(b.Body.AttesterSlashings)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.AttesterSlashings")
|
||||
}
|
||||
atts, err := AttsElectraToConsensus(b.Body.Attestations)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.Attestations")
|
||||
}
|
||||
deposits, err := DepositsToConsensus(b.Body.Deposits)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.Deposits")
|
||||
}
|
||||
exits, err := SignedExitsToConsensus(b.Body.VoluntaryExits)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.VoluntaryExits")
|
||||
}
|
||||
syncCommitteeBits, err := bytesutil.DecodeHexWithLength(b.Body.SyncAggregate.SyncCommitteeBits, fieldparams.SyncAggregateSyncCommitteeBytesLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.SyncAggregate.SyncCommitteeBits")
|
||||
}
|
||||
syncCommitteeSig, err := bytesutil.DecodeHexWithLength(b.Body.SyncAggregate.SyncCommitteeSignature, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.SyncAggregate.SyncCommitteeSignature")
|
||||
}
|
||||
payloadParentHash, err := bytesutil.DecodeHexWithLength(b.Body.ExecutionPayload.ParentHash, common.HashLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.ExecutionPayload.ParentHash")
|
||||
}
|
||||
payloadFeeRecipient, err := bytesutil.DecodeHexWithLength(b.Body.ExecutionPayload.FeeRecipient, fieldparams.FeeRecipientLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.ExecutionPayload.FeeRecipient")
|
||||
}
|
||||
payloadStateRoot, err := bytesutil.DecodeHexWithLength(b.Body.ExecutionPayload.StateRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.ExecutionPayload.StateRoot")
|
||||
}
|
||||
payloadReceiptsRoot, err := bytesutil.DecodeHexWithLength(b.Body.ExecutionPayload.ReceiptsRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.ExecutionPayload.ReceiptsRoot")
|
||||
}
|
||||
payloadLogsBloom, err := bytesutil.DecodeHexWithLength(b.Body.ExecutionPayload.LogsBloom, fieldparams.LogsBloomLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.ExecutionPayload.LogsBloom")
|
||||
}
|
||||
payloadPrevRandao, err := bytesutil.DecodeHexWithLength(b.Body.ExecutionPayload.PrevRandao, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.ExecutionPayload.PrevRandao")
|
||||
}
|
||||
payloadBlockNumber, err := strconv.ParseUint(b.Body.ExecutionPayload.BlockNumber, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.ExecutionPayload.BlockNumber")
|
||||
}
|
||||
payloadGasLimit, err := strconv.ParseUint(b.Body.ExecutionPayload.GasLimit, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.ExecutionPayload.GasLimit")
|
||||
}
|
||||
payloadGasUsed, err := strconv.ParseUint(b.Body.ExecutionPayload.GasUsed, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.ExecutionPayload.GasUsed")
|
||||
}
|
||||
payloadTimestamp, err := strconv.ParseUint(b.Body.ExecutionPayload.Timestamp, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.ExecutionPayloadHeader.Timestamp")
|
||||
}
|
||||
payloadExtraData, err := bytesutil.DecodeHexWithMaxLength(b.Body.ExecutionPayload.ExtraData, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.ExecutionPayload.ExtraData")
|
||||
}
|
||||
payloadBaseFeePerGas, err := bytesutil.Uint256ToSSZBytes(b.Body.ExecutionPayload.BaseFeePerGas)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.ExecutionPayload.BaseFeePerGas")
|
||||
}
|
||||
payloadBlockHash, err := bytesutil.DecodeHexWithLength(b.Body.ExecutionPayload.BlockHash, common.HashLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.ExecutionPayload.BlockHash")
|
||||
}
|
||||
err = slice.VerifyMaxLength(b.Body.ExecutionPayload.Transactions, fieldparams.MaxTxsPerPayloadLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.ExecutionPayload.Transactions")
|
||||
}
|
||||
txs := make([][]byte, len(b.Body.ExecutionPayload.Transactions))
|
||||
for i, tx := range b.Body.ExecutionPayload.Transactions {
|
||||
txs[i], err = bytesutil.DecodeHexWithMaxLength(tx, fieldparams.MaxBytesPerTxLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("Body.ExecutionPayload.Transactions[%d]", i))
|
||||
}
|
||||
}
|
||||
err = slice.VerifyMaxLength(b.Body.ExecutionPayload.Withdrawals, fieldparams.MaxWithdrawalsPerPayload)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.ExecutionPayload.Withdrawals")
|
||||
}
|
||||
withdrawals := make([]*enginev1.Withdrawal, len(b.Body.ExecutionPayload.Withdrawals))
|
||||
for i, w := range b.Body.ExecutionPayload.Withdrawals {
|
||||
withdrawalIndex, err := strconv.ParseUint(w.WithdrawalIndex, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("Body.ExecutionPayload.Withdrawals[%d].WithdrawalIndex", i))
|
||||
}
|
||||
validatorIndex, err := strconv.ParseUint(w.ValidatorIndex, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("Body.ExecutionPayload.Withdrawals[%d].ValidatorIndex", i))
|
||||
}
|
||||
address, err := bytesutil.DecodeHexWithLength(w.ExecutionAddress, common.AddressLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("Body.ExecutionPayload.Withdrawals[%d].ExecutionAddress", i))
|
||||
}
|
||||
amount, err := strconv.ParseUint(w.Amount, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("Body.ExecutionPayload.Withdrawals[%d].Amount", i))
|
||||
}
|
||||
withdrawals[i] = &enginev1.Withdrawal{
|
||||
Index: withdrawalIndex,
|
||||
ValidatorIndex: primitives.ValidatorIndex(validatorIndex),
|
||||
Address: address,
|
||||
Amount: amount,
|
||||
}
|
||||
}
|
||||
|
||||
payloadBlobGasUsed, err := strconv.ParseUint(b.Body.ExecutionPayload.BlobGasUsed, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.ExecutionPayload.BlobGasUsed")
|
||||
}
|
||||
payloadExcessBlobGas, err := strconv.ParseUint(b.Body.ExecutionPayload.ExcessBlobGas, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.ExecutionPayload.ExcessBlobGas")
|
||||
}
|
||||
|
||||
if b.Body.ExecutionRequests == nil {
|
||||
return nil, server.NewDecodeError(errors.New("nil execution requests"), "Body.ExequtionRequests")
|
||||
}
|
||||
|
||||
depositRequests := make([]*enginev1.DepositRequest, len(b.Body.ExecutionRequests.Deposits))
|
||||
for i, d := range b.Body.ExecutionRequests.Deposits {
|
||||
depositRequests[i], err = d.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("Body.ExecutionRequests.Deposits[%d]", i))
|
||||
}
|
||||
}
|
||||
|
||||
withdrawalRequests := make([]*enginev1.WithdrawalRequest, len(b.Body.ExecutionRequests.Withdrawals))
|
||||
for i, w := range b.Body.ExecutionRequests.Withdrawals {
|
||||
withdrawalRequests[i], err = w.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("Body.ExecutionRequests.Withdrawals[%d]", i))
|
||||
}
|
||||
}
|
||||
|
||||
consolidationRequests := make([]*enginev1.ConsolidationRequest, len(b.Body.ExecutionRequests.Consolidations))
|
||||
for i, c := range b.Body.ExecutionRequests.Consolidations {
|
||||
consolidationRequests[i], err = c.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("Body.ExecutionRequests.Consolidations[%d]", i))
|
||||
}
|
||||
}
|
||||
|
||||
blsChanges, err := SignedBLSChangesToConsensus(b.Body.BLSToExecutionChanges)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.BLSToExecutionChanges")
|
||||
}
|
||||
err = slice.VerifyMaxLength(b.Body.BlobKzgCommitments, fieldparams.MaxBlobCommitmentsPerBlock)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.BlobKzgCommitments")
|
||||
}
|
||||
blobKzgCommitments := make([][]byte, len(b.Body.BlobKzgCommitments))
|
||||
for i, b := range b.Body.BlobKzgCommitments {
|
||||
kzg, err := bytesutil.DecodeHexWithLength(b, fieldparams.BLSPubkeyLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("Body.BlobKzgCommitments[%d]", i))
|
||||
}
|
||||
blobKzgCommitments[i] = kzg
|
||||
}
|
||||
return ð.BeaconBlockFulu{
|
||||
Slot: primitives.Slot(slot),
|
||||
ProposerIndex: primitives.ValidatorIndex(proposerIndex),
|
||||
ParentRoot: parentRoot,
|
||||
StateRoot: stateRoot,
|
||||
Body: ð.BeaconBlockBodyFulu{
|
||||
RandaoReveal: randaoReveal,
|
||||
Eth1Data: ð.Eth1Data{
|
||||
DepositRoot: depositRoot,
|
||||
DepositCount: depositCount,
|
||||
BlockHash: blockHash,
|
||||
},
|
||||
Graffiti: graffiti,
|
||||
ProposerSlashings: proposerSlashings,
|
||||
AttesterSlashings: attesterSlashings,
|
||||
Attestations: atts,
|
||||
Deposits: deposits,
|
||||
VoluntaryExits: exits,
|
||||
SyncAggregate: ð.SyncAggregate{
|
||||
SyncCommitteeBits: syncCommitteeBits,
|
||||
SyncCommitteeSignature: syncCommitteeSig,
|
||||
},
|
||||
ExecutionPayload: &enginev1.ExecutionPayloadDeneb{
|
||||
ParentHash: payloadParentHash,
|
||||
FeeRecipient: payloadFeeRecipient,
|
||||
StateRoot: payloadStateRoot,
|
||||
ReceiptsRoot: payloadReceiptsRoot,
|
||||
LogsBloom: payloadLogsBloom,
|
||||
PrevRandao: payloadPrevRandao,
|
||||
BlockNumber: payloadBlockNumber,
|
||||
GasLimit: payloadGasLimit,
|
||||
GasUsed: payloadGasUsed,
|
||||
Timestamp: payloadTimestamp,
|
||||
ExtraData: payloadExtraData,
|
||||
BaseFeePerGas: payloadBaseFeePerGas,
|
||||
BlockHash: payloadBlockHash,
|
||||
Transactions: txs,
|
||||
Withdrawals: withdrawals,
|
||||
BlobGasUsed: payloadBlobGasUsed,
|
||||
ExcessBlobGas: payloadExcessBlobGas,
|
||||
},
|
||||
BlsToExecutionChanges: blsChanges,
|
||||
BlobKzgCommitments: blobKzgCommitments,
|
||||
ExecutionRequests: &enginev1.ExecutionRequests{
|
||||
Deposits: depositRequests,
|
||||
Withdrawals: withdrawalRequests,
|
||||
Consolidations: consolidationRequests,
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (b *SignedBeaconBlockFulu) ToConsensus() (*eth.SignedBeaconBlockFulu, error) {
|
||||
if b == nil {
|
||||
return nil, errNilValue
|
||||
@@ -3898,7 +3619,7 @@ func (b *BlindedBeaconBlockFulu) ToConsensus() (*eth.BlindedBeaconBlockFulu, err
|
||||
ProposerIndex: primitives.ValidatorIndex(proposerIndex),
|
||||
ParentRoot: parentRoot,
|
||||
StateRoot: stateRoot,
|
||||
Body: ð.BlindedBeaconBlockBodyFulu{
|
||||
Body: ð.BlindedBeaconBlockBodyElectra{
|
||||
RandaoReveal: randaoReveal,
|
||||
Eth1Data: ð.Eth1Data{
|
||||
DepositRoot: depositRoot,
|
||||
@@ -4015,7 +3736,7 @@ func BlindedBeaconBlockFuluFromConsensus(b *eth.BlindedBeaconBlockFulu) (*Blinde
|
||||
ProposerIndex: fmt.Sprintf("%d", b.ProposerIndex),
|
||||
ParentRoot: hexutil.Encode(b.ParentRoot),
|
||||
StateRoot: hexutil.Encode(b.StateRoot),
|
||||
Body: &BlindedBeaconBlockBodyFulu{
|
||||
Body: &BlindedBeaconBlockBodyElectra{
|
||||
RandaoReveal: hexutil.Encode(b.Body.RandaoReveal),
|
||||
Eth1Data: Eth1DataFromConsensus(b.Body.Eth1Data),
|
||||
Graffiti: hexutil.Encode(b.Body.Graffiti),
|
||||
@@ -4047,42 +3768,6 @@ func SignedBlindedBeaconBlockFuluFromConsensus(b *eth.SignedBlindedBeaconBlockFu
|
||||
}, nil
|
||||
}
|
||||
|
||||
func BeaconBlockFuluFromConsensus(b *eth.BeaconBlockFulu) (*BeaconBlockFulu, error) {
|
||||
payload, err := ExecutionPayloadFuluFromConsensus(b.Body.ExecutionPayload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blobKzgCommitments := make([]string, len(b.Body.BlobKzgCommitments))
|
||||
for i := range b.Body.BlobKzgCommitments {
|
||||
blobKzgCommitments[i] = hexutil.Encode(b.Body.BlobKzgCommitments[i])
|
||||
}
|
||||
|
||||
return &BeaconBlockFulu{
|
||||
Slot: fmt.Sprintf("%d", b.Slot),
|
||||
ProposerIndex: fmt.Sprintf("%d", b.ProposerIndex),
|
||||
ParentRoot: hexutil.Encode(b.ParentRoot),
|
||||
StateRoot: hexutil.Encode(b.StateRoot),
|
||||
Body: &BeaconBlockBodyFulu{
|
||||
RandaoReveal: hexutil.Encode(b.Body.RandaoReveal),
|
||||
Eth1Data: Eth1DataFromConsensus(b.Body.Eth1Data),
|
||||
Graffiti: hexutil.Encode(b.Body.Graffiti),
|
||||
ProposerSlashings: ProposerSlashingsFromConsensus(b.Body.ProposerSlashings),
|
||||
AttesterSlashings: AttesterSlashingsElectraFromConsensus(b.Body.AttesterSlashings),
|
||||
Attestations: AttsElectraFromConsensus(b.Body.Attestations),
|
||||
Deposits: DepositsFromConsensus(b.Body.Deposits),
|
||||
VoluntaryExits: SignedExitsFromConsensus(b.Body.VoluntaryExits),
|
||||
SyncAggregate: &SyncAggregate{
|
||||
SyncCommitteeBits: hexutil.Encode(b.Body.SyncAggregate.SyncCommitteeBits),
|
||||
SyncCommitteeSignature: hexutil.Encode(b.Body.SyncAggregate.SyncCommitteeSignature),
|
||||
},
|
||||
ExecutionPayload: payload,
|
||||
BLSToExecutionChanges: SignedBLSChangesFromConsensus(b.Body.BlsToExecutionChanges),
|
||||
BlobKzgCommitments: blobKzgCommitments,
|
||||
ExecutionRequests: ExecutionRequestsFromConsensus(b.Body.ExecutionRequests),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func SignedBeaconBlockFuluFromConsensus(b *eth.SignedBeaconBlockFulu) (*SignedBeaconBlockFulu, error) {
|
||||
block, err := BeaconBlockFuluFromConsensus(b.Block)
|
||||
if err != nil {
|
||||
@@ -4097,4 +3782,5 @@ func SignedBeaconBlockFuluFromConsensus(b *eth.SignedBeaconBlockFulu) (*SignedBe
|
||||
var (
|
||||
ExecutionPayloadFuluFromConsensus = ExecutionPayloadDenebFromConsensus
|
||||
ExecutionPayloadHeaderFuluFromConsensus = ExecutionPayloadHeaderDenebFromConsensus
|
||||
BeaconBlockFuluFromConsensus = BeaconBlockElectraFromConsensus
|
||||
)
|
||||
|
||||
@@ -250,3 +250,17 @@ type ChainHead struct {
|
||||
PreviousJustifiedBlockRoot string `json:"previous_justified_block_root"`
|
||||
OptimisticStatus bool `json:"optimistic_status"`
|
||||
}
|
||||
|
||||
type GetPendingDepositsResponse struct {
|
||||
Version string `json:"version"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
Data []*PendingDeposit `json:"data"`
|
||||
}
|
||||
|
||||
type GetPendingPartialWithdrawalsResponse struct {
|
||||
Version string `json:"version"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
Data []*PendingPartialWithdrawal `json:"data"`
|
||||
}
|
||||
|
||||
@@ -25,6 +25,7 @@ go_library(
|
||||
"receive_attestation.go",
|
||||
"receive_blob.go",
|
||||
"receive_block.go",
|
||||
"receive_data_column.go",
|
||||
"service.go",
|
||||
"tracked_proposer.go",
|
||||
"weak_subjectivity_checks.go",
|
||||
@@ -49,6 +50,7 @@ go_library(
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
@@ -142,6 +144,7 @@ go_test(
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/das:go_default_library",
|
||||
@@ -158,10 +161,12 @@ go_test(
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/p2p/testing:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
|
||||
@@ -33,6 +33,7 @@ var (
|
||||
)
|
||||
|
||||
var errMaxBlobsExceeded = errors.New("Expected commitments in block exceeds MAX_BLOBS_PER_BLOCK")
|
||||
var errMaxDataColumnsExceeded = errors.New("Expected data columns for node exceeds NUMBER_OF_COLUMNS")
|
||||
|
||||
// An invalid block is the block that fails state transition based on the core protocol rules.
|
||||
// The beacon node shall not be accepting nor building blocks that branch off from an invalid block.
|
||||
|
||||
@@ -444,6 +444,9 @@ func (s *Service) removeInvalidBlockAndState(ctx context.Context, blkRoots [][32
|
||||
// Blobs may not exist for some blocks, leading to deletion failures. Log such errors at debug level.
|
||||
log.WithError(err).Debug("Could not remove blob from blob storage")
|
||||
}
|
||||
if err := s.dataColumnStorage.Remove(root); err != nil {
|
||||
log.WithError(err).Debug("Could not remove data columns from data column storage")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"kzg.go",
|
||||
"trusted_setup.go",
|
||||
"validation.go",
|
||||
],
|
||||
@@ -12,6 +13,9 @@ go_library(
|
||||
deps = [
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"@com_github_crate_crypto_go_kzg_4844//:go_default_library",
|
||||
"@com_github_ethereum_c_kzg_4844//bindings/go:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//crypto/kzg4844:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -26,7 +30,8 @@ go_test(
|
||||
deps = [
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_consensys_gnark_crypto//ecc/bls12-381/fr:go_default_library",
|
||||
"@com_github_crate_crypto_go_kzg_4844//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
125
beacon-chain/blockchain/kzg/kzg.go
Normal file
125
beacon-chain/blockchain/kzg/kzg.go
Normal file
@@ -0,0 +1,125 @@
|
||||
package kzg
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
|
||||
ckzg4844 "github.com/ethereum/c-kzg-4844/v2/bindings/go"
|
||||
"github.com/ethereum/go-ethereum/crypto/kzg4844"
|
||||
)
|
||||
|
||||
// BytesPerBlob is the number of bytes in a single blob.
|
||||
const BytesPerBlob = ckzg4844.BytesPerBlob
|
||||
|
||||
// Blob represents a serialized chunk of data.
|
||||
type Blob [BytesPerBlob]byte
|
||||
|
||||
// BytesPerCell is the number of bytes in a single cell.
|
||||
const BytesPerCell = ckzg4844.BytesPerCell
|
||||
|
||||
// Cell represents a chunk of an encoded Blob.
|
||||
type Cell [BytesPerCell]byte
|
||||
|
||||
// Commitment represent a KZG commitment to a Blob.
|
||||
type Commitment [48]byte
|
||||
|
||||
// Proof represents a KZG proof that attests to the validity of a Blob or parts of it.
|
||||
type Proof [48]byte
|
||||
|
||||
// Bytes48 is a 48-byte array.
|
||||
type Bytes48 = ckzg4844.Bytes48
|
||||
|
||||
// Bytes32 is a 32-byte array.
|
||||
type Bytes32 = ckzg4844.Bytes32
|
||||
|
||||
// CellsAndProofs represents the Cells and Proofs corresponding to
|
||||
// a single blob.
|
||||
type CellsAndProofs struct {
|
||||
Cells []Cell
|
||||
Proofs []Proof
|
||||
}
|
||||
|
||||
func BlobToKZGCommitment(blob *Blob) (Commitment, error) {
|
||||
kzgBlob := kzg4844.Blob(*blob)
|
||||
comm, err := kzg4844.BlobToCommitment(&kzgBlob)
|
||||
if err != nil {
|
||||
return Commitment{}, err
|
||||
}
|
||||
return Commitment(comm), nil
|
||||
}
|
||||
|
||||
func ComputeCells(blob *Blob) ([]Cell, error) {
|
||||
ckzgBlob := (*ckzg4844.Blob)(blob)
|
||||
ckzgCells, err := ckzg4844.ComputeCells(ckzgBlob)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "compute cells")
|
||||
}
|
||||
|
||||
cells := make([]Cell, len(ckzgCells))
|
||||
for i := range ckzgCells {
|
||||
cells[i] = Cell(ckzgCells[i])
|
||||
}
|
||||
return cells, nil
|
||||
}
|
||||
|
||||
func ComputeBlobKZGProof(blob *Blob, commitment Commitment) (Proof, error) {
|
||||
kzgBlob := kzg4844.Blob(*blob)
|
||||
proof, err := kzg4844.ComputeBlobProof(&kzgBlob, kzg4844.Commitment(commitment))
|
||||
if err != nil {
|
||||
return [48]byte{}, err
|
||||
}
|
||||
return Proof(proof), nil
|
||||
}
|
||||
|
||||
func ComputeCellsAndKZGProofs(blob *Blob) (CellsAndProofs, error) {
|
||||
ckzgBlob := (*ckzg4844.Blob)(blob)
|
||||
ckzgCells, ckzgProofs, err := ckzg4844.ComputeCellsAndKZGProofs(ckzgBlob)
|
||||
if err != nil {
|
||||
return CellsAndProofs{}, err
|
||||
}
|
||||
|
||||
return makeCellsAndProofs(ckzgCells[:], ckzgProofs[:])
|
||||
}
|
||||
|
||||
func VerifyCellKZGProofBatch(commitmentsBytes []Bytes48, cellIndices []uint64, cells []Cell, proofsBytes []Bytes48) (bool, error) {
|
||||
// Convert `Cell` type to `ckzg4844.Cell`
|
||||
ckzgCells := make([]ckzg4844.Cell, len(cells))
|
||||
for i := range cells {
|
||||
ckzgCells[i] = ckzg4844.Cell(cells[i])
|
||||
}
|
||||
|
||||
return ckzg4844.VerifyCellKZGProofBatch(commitmentsBytes, cellIndices, ckzgCells, proofsBytes)
|
||||
}
|
||||
|
||||
func RecoverCellsAndKZGProofs(cellIndices []uint64, partialCells []Cell) (CellsAndProofs, error) {
|
||||
// Convert `Cell` type to `ckzg4844.Cell`
|
||||
ckzgPartialCells := make([]ckzg4844.Cell, len(partialCells))
|
||||
for i := range partialCells {
|
||||
ckzgPartialCells[i] = ckzg4844.Cell(partialCells[i])
|
||||
}
|
||||
|
||||
ckzgCells, ckzgProofs, err := ckzg4844.RecoverCellsAndKZGProofs(cellIndices, ckzgPartialCells)
|
||||
if err != nil {
|
||||
return CellsAndProofs{}, errors.Wrap(err, "recover cells and KZG proofs")
|
||||
}
|
||||
|
||||
return makeCellsAndProofs(ckzgCells[:], ckzgProofs[:])
|
||||
}
|
||||
|
||||
// Convert cells/proofs to the CellsAndProofs type defined in this package.
|
||||
func makeCellsAndProofs(ckzgCells []ckzg4844.Cell, ckzgProofs []ckzg4844.KZGProof) (CellsAndProofs, error) {
|
||||
if len(ckzgCells) != len(ckzgProofs) {
|
||||
return CellsAndProofs{}, errors.New("different number of cells/proofs")
|
||||
}
|
||||
|
||||
var cells []Cell
|
||||
var proofs []Proof
|
||||
for i := range ckzgCells {
|
||||
cells = append(cells, Cell(ckzgCells[i]))
|
||||
proofs = append(proofs, Proof(ckzgProofs[i]))
|
||||
}
|
||||
|
||||
return CellsAndProofs{
|
||||
Cells: cells,
|
||||
Proofs: proofs,
|
||||
}, nil
|
||||
}
|
||||
@@ -5,6 +5,8 @@ import (
|
||||
"encoding/json"
|
||||
|
||||
GoKZG "github.com/crate-crypto/go-kzg-4844"
|
||||
CKZG "github.com/ethereum/c-kzg-4844/v2/bindings/go"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@@ -12,17 +14,53 @@ var (
|
||||
//go:embed trusted_setup.json
|
||||
embeddedTrustedSetup []byte // 1.2Mb
|
||||
kzgContext *GoKZG.Context
|
||||
kzgLoaded bool
|
||||
)
|
||||
|
||||
type TrustedSetup struct {
|
||||
G1Monomial [GoKZG.ScalarsPerBlob]GoKZG.G1CompressedHexStr `json:"g1_monomial"`
|
||||
G1Lagrange [GoKZG.ScalarsPerBlob]GoKZG.G1CompressedHexStr `json:"g1_lagrange"`
|
||||
G2Monomial [65]GoKZG.G2CompressedHexStr `json:"g2_monomial"`
|
||||
}
|
||||
|
||||
func Start() error {
|
||||
parsedSetup := GoKZG.JSONTrustedSetup{}
|
||||
err := json.Unmarshal(embeddedTrustedSetup, &parsedSetup)
|
||||
trustedSetup := &TrustedSetup{}
|
||||
err := json.Unmarshal(embeddedTrustedSetup, trustedSetup)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not parse trusted setup JSON")
|
||||
}
|
||||
kzgContext, err = GoKZG.NewContext4096(&parsedSetup)
|
||||
kzgContext, err = GoKZG.NewContext4096(&GoKZG.JSONTrustedSetup{
|
||||
SetupG2: trustedSetup.G2Monomial[:],
|
||||
SetupG1Lagrange: trustedSetup.G1Lagrange})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not initialize go-kzg context")
|
||||
}
|
||||
|
||||
// Length of a G1 point, converted from hex to binary.
|
||||
g1MonomialBytes := make([]byte, len(trustedSetup.G1Monomial)*(len(trustedSetup.G1Monomial[0])-2)/2)
|
||||
for i, g1 := range &trustedSetup.G1Monomial {
|
||||
copy(g1MonomialBytes[i*(len(g1)-2)/2:], hexutil.MustDecode(g1))
|
||||
}
|
||||
// Length of a G1 point, converted from hex to binary.
|
||||
g1LagrangeBytes := make([]byte, len(trustedSetup.G1Lagrange)*(len(trustedSetup.G1Lagrange[0])-2)/2)
|
||||
for i, g1 := range &trustedSetup.G1Lagrange {
|
||||
copy(g1LagrangeBytes[i*(len(g1)-2)/2:], hexutil.MustDecode(g1))
|
||||
}
|
||||
// Length of a G2 point, converted from hex to binary.
|
||||
g2MonomialBytes := make([]byte, len(trustedSetup.G2Monomial)*(len(trustedSetup.G2Monomial[0])-2)/2)
|
||||
for i, g2 := range &trustedSetup.G2Monomial {
|
||||
copy(g2MonomialBytes[i*(len(g2)-2)/2:], hexutil.MustDecode(g2))
|
||||
}
|
||||
if !kzgLoaded {
|
||||
// TODO: Provide a configuration option for this.
|
||||
var precompute uint = 8
|
||||
|
||||
// Free the current trusted setup before running this method. CKZG
|
||||
// panics if the same setup is run multiple times.
|
||||
if err = CKZG.LoadTrustedSetup(g1MonomialBytes, g1LagrangeBytes, g2MonomialBytes, precompute); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
kzgLoaded = true
|
||||
return nil
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,12 +1,16 @@
|
||||
package kzg
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
"testing"
|
||||
|
||||
"github.com/consensys/gnark-crypto/ecc/bls12-381/fr"
|
||||
GoKZG "github.com/crate-crypto/go-kzg-4844"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func GenerateCommitmentAndProof(blob GoKZG.Blob) (GoKZG.KZGCommitment, GoKZG.KZGProof, error) {
|
||||
@@ -37,7 +41,7 @@ func TestBytesToAny(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGenerateCommitmentAndProof(t *testing.T) {
|
||||
blob := util.GetRandBlob(123)
|
||||
blob := getRandBlob(123)
|
||||
commitment, proof, err := GenerateCommitmentAndProof(blob)
|
||||
require.NoError(t, err)
|
||||
expectedCommitment := GoKZG.KZGCommitment{180, 218, 156, 194, 59, 20, 10, 189, 186, 254, 132, 93, 7, 127, 104, 172, 238, 240, 237, 70, 83, 89, 1, 152, 99, 0, 165, 65, 143, 62, 20, 215, 230, 14, 205, 95, 28, 245, 54, 25, 160, 16, 178, 31, 232, 207, 38, 85}
|
||||
@@ -45,3 +49,36 @@ func TestGenerateCommitmentAndProof(t *testing.T) {
|
||||
require.Equal(t, expectedCommitment, commitment)
|
||||
require.Equal(t, expectedProof, proof)
|
||||
}
|
||||
|
||||
func deterministicRandomness(seed int64) [32]byte {
|
||||
// Converts an int64 to a byte slice
|
||||
buf := new(bytes.Buffer)
|
||||
err := binary.Write(buf, binary.BigEndian, seed)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("Failed to write int64 to bytes buffer")
|
||||
return [32]byte{}
|
||||
}
|
||||
bytes := buf.Bytes()
|
||||
|
||||
return sha256.Sum256(bytes)
|
||||
}
|
||||
|
||||
// Returns a serialized random field element in big-endian
|
||||
func getRandFieldElement(seed int64) [32]byte {
|
||||
bytes := deterministicRandomness(seed)
|
||||
var r fr.Element
|
||||
r.SetBytes(bytes[:])
|
||||
|
||||
return GoKZG.SerializeScalar(r)
|
||||
}
|
||||
|
||||
// Returns a random blob using the passed seed as entropy
|
||||
func getRandBlob(seed int64) GoKZG.Blob {
|
||||
var blob GoKZG.Blob
|
||||
bytesPerBlob := GoKZG.ScalarsPerBlob * GoKZG.SerializedScalarSize
|
||||
for i := 0; i < bytesPerBlob; i += GoKZG.SerializedScalarSize {
|
||||
fieldElementBytes := getRandFieldElement(seed + int64(i))
|
||||
copy(blob[i:i+GoKZG.SerializedScalarSize], fieldElementBytes[:])
|
||||
}
|
||||
return blob
|
||||
}
|
||||
|
||||
@@ -69,6 +69,22 @@ func logStateTransitionData(b interfaces.ReadOnlyBeaconBlock) error {
|
||||
log = log.WithField("kzgCommitmentCount", len(kzgs))
|
||||
}
|
||||
}
|
||||
if b.Version() >= version.Electra {
|
||||
eReqs, err := b.Body().ExecutionRequests()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to get execution requests")
|
||||
} else {
|
||||
if len(eReqs.Deposits) > 0 {
|
||||
log = log.WithField("depositRequestCount", len(eReqs.Deposits))
|
||||
}
|
||||
if len(eReqs.Consolidations) > 0 {
|
||||
log = log.WithField("consolidationRequestCount", len(eReqs.Consolidations))
|
||||
}
|
||||
if len(eReqs.Withdrawals) > 0 {
|
||||
log = log.WithField("withdrawalRequestCount", len(eReqs.Withdrawals))
|
||||
}
|
||||
}
|
||||
}
|
||||
log.Info("Finished applying state transition")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,9 +1,12 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/async/event"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/execution"
|
||||
@@ -126,9 +129,9 @@ func WithBLSToExecPool(p blstoexec.PoolManager) Option {
|
||||
}
|
||||
|
||||
// WithP2PBroadcaster to broadcast messages after appropriate processing.
|
||||
func WithP2PBroadcaster(p p2p.Broadcaster) Option {
|
||||
func WithP2PBroadcaster(p p2p.Acceser) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.P2p = p
|
||||
s.cfg.P2P = p
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -207,9 +210,31 @@ func WithBlobStorage(b *filesystem.BlobStorage) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithDataColumnStorage sets the data column storage backend for the blockchain service.
|
||||
func WithDataColumnStorage(b *filesystem.DataColumnStorage) Option {
|
||||
return func(s *Service) error {
|
||||
s.dataColumnStorage = b
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithSyncChecker(checker Checker) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.SyncChecker = checker
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithCustodyInfo(custodyInfo *peerdas.CustodyInfo) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.CustodyInfo = custodyInfo
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithGenesisTime(genesisTime time.Time) Option {
|
||||
return func(s *Service) error {
|
||||
s.genesisTime = genesisTime
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,11 +3,13 @@ package blockchain
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
coreTime "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/das"
|
||||
@@ -15,6 +17,7 @@ import (
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
consensusblocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
@@ -234,8 +237,9 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := avs.IsDataAvailable(ctx, s.CurrentSlot(), b); err != nil {
|
||||
return errors.Wrapf(err, "could not validate blob data availability at slot %d", b.Block().Slot())
|
||||
return errors.Wrapf(err, "could not validate sidecar availability at slot %d", b.Block().Slot())
|
||||
}
|
||||
args := &forkchoicetypes.BlockAndCheckpoints{Block: b,
|
||||
JustifiedCheckpoint: jCheckpoints[i],
|
||||
@@ -522,20 +526,206 @@ func missingIndices(bs *filesystem.BlobStorage, root [32]byte, expected [][]byte
|
||||
return missing, nil
|
||||
}
|
||||
|
||||
// isDataAvailable blocks until all BlobSidecars committed to in the block are available,
|
||||
// or an error or context cancellation occurs. A nil result means that the data availability check is successful.
|
||||
// The function will first check the database to see if all sidecars have been persisted. If any
|
||||
// sidecars are missing, it will then read from the blobNotifier channel for the given root until the channel is
|
||||
// closed, the context hits cancellation/timeout, or notifications have been received for all the missing sidecars.
|
||||
func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
if signed.Version() < version.Deneb {
|
||||
return nil
|
||||
func missingDataColumns(bs *filesystem.DataColumnStorage, root [32]byte, expected map[uint64]bool) (map[uint64]bool, error) {
|
||||
if len(expected) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
block := signed.Block()
|
||||
if len(expected) > int(params.BeaconConfig().NumberOfColumns) {
|
||||
return nil, errMaxDataColumnsExceeded
|
||||
}
|
||||
|
||||
// Get a summary of the data columns stored in the database.
|
||||
summary := bs.Summary(root)
|
||||
|
||||
// Check all expected data columns against the summary.
|
||||
missing := make(map[uint64]bool)
|
||||
for column := range expected {
|
||||
if !summary.HasIndex(column) {
|
||||
missing[column] = true
|
||||
}
|
||||
}
|
||||
|
||||
return missing, nil
|
||||
}
|
||||
|
||||
// isDataAvailable blocks until all sidecars committed to in the block are available,
|
||||
// or an error or context cancellation occurs. A nil result means that the data availability check is successful.
|
||||
// The function will first check the database to see if all sidecars have been persisted. If any
|
||||
// sidecars are missing, it will then read from the sidecar notifier channel for the given root until the channel is
|
||||
// closed, the context hits cancellation/timeout, or notifications have been received for all the missing sidecars.
|
||||
func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signedBlock interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
block := signedBlock.Block()
|
||||
if block == nil {
|
||||
return errors.New("invalid nil beacon block")
|
||||
}
|
||||
|
||||
blockVersion := block.Version()
|
||||
if blockVersion >= version.Fulu {
|
||||
return s.areDataColumnsAvailable(ctx, root, block)
|
||||
}
|
||||
|
||||
if blockVersion >= version.Deneb {
|
||||
return s.areBlobsAvailable(ctx, root, block)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// areDataColumnsAvailable blocks until all data columns committed to in the block are available,
|
||||
// or an error or context cancellation occurs. A nil result means that the data availability check is successful.
|
||||
func (s *Service) areDataColumnsAvailable(ctx context.Context, root [fieldparams.RootLength]byte, block interfaces.ReadOnlyBeaconBlock) error {
|
||||
// We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
|
||||
blockSlot, currentSlot := block.Slot(), s.CurrentSlot()
|
||||
blockEpoch, currentEpoch := slots.ToEpoch(blockSlot), slots.ToEpoch(currentSlot)
|
||||
|
||||
if !params.WithinDAPeriod(blockEpoch, currentEpoch) {
|
||||
return nil
|
||||
}
|
||||
|
||||
body := block.Body()
|
||||
if body == nil {
|
||||
return errors.New("invalid nil beacon block body")
|
||||
}
|
||||
|
||||
kzgCommitments, err := body.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
|
||||
// If block has not commitments there is nothing to wait for.
|
||||
if len(kzgCommitments) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// All columns to sample need to be available for the block to be considered available.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/das-core.md#custody-sampling
|
||||
nodeID := s.cfg.P2P.NodeID()
|
||||
|
||||
// Prevent custody group count to change during the rest of the function.
|
||||
s.cfg.CustodyInfo.Mut.RLock()
|
||||
defer s.cfg.CustodyInfo.Mut.RUnlock()
|
||||
|
||||
// Get the custody group sampling size for the node.
|
||||
custodyGroupSamplingSize := s.cfg.CustodyInfo.CustodyGroupSamplingSize(peerdas.Actual)
|
||||
peerInfo, _, err := peerdas.Info(nodeID, custodyGroupSamplingSize)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "peer info")
|
||||
}
|
||||
|
||||
// Subscribe to newly data columns stored in the database.
|
||||
identsChan := make(chan filesystem.DataColumnsIdent)
|
||||
subscription := s.dataColumnStorage.DataColumnFeed.Subscribe(identsChan)
|
||||
defer subscription.Unsubscribe()
|
||||
|
||||
// Get the count of data columns we already have in the store.
|
||||
summary := s.dataColumnStorage.Summary(root)
|
||||
storedDataColumnsCount := summary.Count()
|
||||
|
||||
// As soon as we have more than half of the data columns, we can reconstruct the missing ones.
|
||||
// We don't need to wait for the rest of the data columns to declare the block as available.
|
||||
if peerdas.CanSelfReconstruct(storedDataColumnsCount) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get a map of data column indices that are not currently available.
|
||||
missingMap, err := missingDataColumns(s.dataColumnStorage, root, peerInfo.CustodyColumns)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "missing data columns")
|
||||
}
|
||||
|
||||
// If there are no missing indices, all data column sidecars are available.
|
||||
// This is the happy path.
|
||||
if len(missingMap) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Log for DA checks that cross over into the next slot; helpful for debugging.
|
||||
nextSlot := slots.BeginsAt(block.Slot()+1, s.genesisTime)
|
||||
|
||||
// Avoid logging if DA check is called after next slot start.
|
||||
if nextSlot.After(time.Now()) {
|
||||
timer := time.AfterFunc(time.Until(nextSlot), func() {
|
||||
missingMapCount := uint64(len(missingMap))
|
||||
|
||||
if missingMapCount == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
expected interface{} = "all"
|
||||
missing interface{} = "all"
|
||||
)
|
||||
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
colMapCount := uint64(len(peerInfo.CustodyColumns))
|
||||
|
||||
if colMapCount < numberOfColumns {
|
||||
expected = uint64MapToSortedSlice(peerInfo.CustodyColumns)
|
||||
}
|
||||
|
||||
if missingMapCount < numberOfColumns {
|
||||
missing = uint64MapToSortedSlice(missingMap)
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": block.Slot(),
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"columnsExpected": expected,
|
||||
"columnsWaiting": missing,
|
||||
}).Warning("Data columns still missing at slot end")
|
||||
})
|
||||
defer timer.Stop()
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case idents := <-identsChan:
|
||||
if idents.Root != root {
|
||||
// This is not the root we are looking for.
|
||||
continue
|
||||
}
|
||||
|
||||
for _, index := range idents.Indices {
|
||||
// This is a data column we are expecting.
|
||||
if _, ok := missingMap[index]; ok {
|
||||
storedDataColumnsCount++
|
||||
}
|
||||
|
||||
// As soon as we have more than half of the data columns, we can reconstruct the missing ones.
|
||||
// We don't need to wait for the rest of the data columns to declare the block as available.
|
||||
if peerdas.CanSelfReconstruct(storedDataColumnsCount) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove the index from the missing map.
|
||||
delete(missingMap, index)
|
||||
|
||||
// Return if there is no more missing data columns.
|
||||
if len(missingMap) == 0 {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
case <-ctx.Done():
|
||||
var missingIndices interface{} = "all"
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
missingIndicesCount := uint64(len(missingMap))
|
||||
|
||||
if missingIndicesCount < numberOfColumns {
|
||||
missingIndices = uint64MapToSortedSlice(missingMap)
|
||||
}
|
||||
|
||||
return errors.Wrapf(ctx.Err(), "data column sidecars slot: %d, BlockRoot: %#x, missing %v", block.Slot(), root, missingIndices)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// areBlobsAvailable blocks until all BlobSidecars committed to in the block are available,
|
||||
// or an error or context cancellation occurs. A nil result means that the data availability check is successful.
|
||||
func (s *Service) areBlobsAvailable(ctx context.Context, root [fieldparams.RootLength]byte, block interfaces.ReadOnlyBeaconBlock) error {
|
||||
blockSlot := block.Slot()
|
||||
|
||||
// We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(block.Slot()), slots.ToEpoch(s.CurrentSlot())) {
|
||||
return nil
|
||||
@@ -557,7 +747,7 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
|
||||
// get a map of BlobSidecar indices that are not currently available.
|
||||
missing, err := missingIndices(s.blobStorage, root, kzgCommitments, block.Slot())
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "missing indices")
|
||||
}
|
||||
// If there are no missing indices, all BlobSidecars are available.
|
||||
if len(missing) == 0 {
|
||||
@@ -569,15 +759,20 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
|
||||
nc := s.blobNotifiers.forRoot(root, block.Slot())
|
||||
|
||||
// Log for DA checks that cross over into the next slot; helpful for debugging.
|
||||
nextSlot := slots.BeginsAt(signed.Block().Slot()+1, s.genesisTime)
|
||||
nextSlot := slots.BeginsAt(block.Slot()+1, s.genesisTime)
|
||||
// Avoid logging if DA check is called after next slot start.
|
||||
if nextSlot.After(time.Now()) {
|
||||
nst := time.AfterFunc(time.Until(nextSlot), func() {
|
||||
if len(missing) == 0 {
|
||||
return
|
||||
}
|
||||
log.WithFields(daCheckLogFields(root, signed.Block().Slot(), expected, len(missing))).
|
||||
Error("Still waiting for DA check at slot end.")
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": blockSlot,
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"blobsExpected": expected,
|
||||
"blobsWaiting": len(missing),
|
||||
}).Error("Still waiting for blobs DA check at slot end.")
|
||||
})
|
||||
defer nst.Stop()
|
||||
}
|
||||
@@ -599,13 +794,14 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
|
||||
}
|
||||
}
|
||||
|
||||
func daCheckLogFields(root [32]byte, slot primitives.Slot, expected, missing int) logrus.Fields {
|
||||
return logrus.Fields{
|
||||
"slot": slot,
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"blobsExpected": expected,
|
||||
"blobsWaiting": missing,
|
||||
// uint64MapToSortedSlice produces a sorted uint64 slice from a map.
|
||||
func uint64MapToSortedSlice(input map[uint64]bool) []uint64 {
|
||||
output := make([]uint64, 0, len(input))
|
||||
for idx := range input {
|
||||
output = append(output, idx)
|
||||
}
|
||||
slices.Sort[[]uint64](output)
|
||||
return output
|
||||
}
|
||||
|
||||
// lateBlockTasks is called 4 seconds into the slot and performs tasks
|
||||
@@ -691,7 +887,7 @@ func (s *Service) waitForSync() error {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) handleInvalidExecutionError(ctx context.Context, err error, blockRoot [32]byte, parentRoot [32]byte) error {
|
||||
func (s *Service) handleInvalidExecutionError(ctx context.Context, err error, blockRoot, parentRoot [32]byte) error {
|
||||
if IsInvalidBlock(err) && InvalidBlockLVH(err) != [32]byte{} {
|
||||
return s.pruneInvalidBlock(ctx, blockRoot, parentRoot, InvalidBlockLVH(err))
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"strconv"
|
||||
@@ -15,6 +16,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
lightClient "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/light-client"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/das"
|
||||
@@ -26,6 +28,7 @@ import (
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
@@ -3148,3 +3151,151 @@ func TestSaveLightClientBootstrap(t *testing.T) {
|
||||
|
||||
reset()
|
||||
}
|
||||
|
||||
type testIsAvailableParams struct {
|
||||
options []Option
|
||||
blobKzgCommitmentsCount uint64
|
||||
columnsToSave []uint64
|
||||
}
|
||||
|
||||
func testIsAvailableSetup(t *testing.T, params testIsAvailableParams) (context.Context, context.CancelFunc, *Service, [fieldparams.RootLength]byte, interfaces.SignedBeaconBlock) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
options := append(params.options, WithDataColumnStorage(dataColumnStorage))
|
||||
service, _ := minimalTestService(t, options...)
|
||||
|
||||
genesisState, secretKeys := util.DeterministicGenesisStateElectra(t, 32 /*validator count*/)
|
||||
|
||||
err := service.saveGenesisData(ctx, genesisState)
|
||||
require.NoError(t, err)
|
||||
|
||||
conf := util.DefaultBlockGenConfig()
|
||||
conf.NumBlobKzgCommitments = params.blobKzgCommitmentsCount
|
||||
|
||||
signedBeaconBlock, err := util.GenerateFullBlockFulu(genesisState, secretKeys, conf, 10 /*block slot*/)
|
||||
require.NoError(t, err)
|
||||
|
||||
root, err := signedBeaconBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
dataColumnsParams := make([]verification.DataColumnParams, 0, len(params.columnsToSave))
|
||||
for _, i := range params.columnsToSave {
|
||||
dataColumnParam := verification.DataColumnParams{ColumnIndex: i}
|
||||
dataColumnsParams = append(dataColumnsParams, dataColumnParam)
|
||||
}
|
||||
|
||||
dataColumnParamsByBlockRoot := verification.DataColumnsParamsByRoot{root: dataColumnsParams}
|
||||
_, verifiedRODataColumns := verification.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnParamsByBlockRoot)
|
||||
|
||||
err = dataColumnStorage.Save(verifiedRODataColumns)
|
||||
require.NoError(t, err)
|
||||
|
||||
signed, err := consensusblocks.NewSignedBeaconBlock(signedBeaconBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
return ctx, cancel, service, root, signed
|
||||
}
|
||||
|
||||
func TestIsDataAvailable(t *testing.T) {
|
||||
t.Run("Fulu - out of retention window", func(t *testing.T) {
|
||||
params := testIsAvailableParams{options: []Option{WithGenesisTime(time.Unix(0, 0))}}
|
||||
ctx, _, service, root, signed := testIsAvailableSetup(t, params)
|
||||
|
||||
err := service.isDataAvailable(ctx, root, signed)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("Fulu - no commitment in blocks", func(t *testing.T) {
|
||||
ctx, _, service, root, signed := testIsAvailableSetup(t, testIsAvailableParams{})
|
||||
|
||||
err := service.isDataAvailable(ctx, root, signed)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("Fulu - more than half of the columns in custody", func(t *testing.T) {
|
||||
halfNumberOfColumns := params.BeaconConfig().NumberOfColumns / 2
|
||||
indices := make([]uint64, 0, halfNumberOfColumns)
|
||||
for i := range halfNumberOfColumns {
|
||||
indices = append(indices, i)
|
||||
}
|
||||
|
||||
params := testIsAvailableParams{
|
||||
options: []Option{WithCustodyInfo(&peerdas.CustodyInfo{})},
|
||||
columnsToSave: indices,
|
||||
blobKzgCommitmentsCount: 3,
|
||||
}
|
||||
|
||||
ctx, _, service, root, signed := testIsAvailableSetup(t, params)
|
||||
|
||||
err := service.isDataAvailable(ctx, root, signed)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("Fulu - no missing data columns", func(t *testing.T) {
|
||||
params := testIsAvailableParams{
|
||||
options: []Option{WithCustodyInfo(&peerdas.CustodyInfo{})},
|
||||
columnsToSave: []uint64{1, 17, 19, 42, 75, 87, 102, 117, 119}, // 119 is not needed
|
||||
blobKzgCommitmentsCount: 3,
|
||||
}
|
||||
|
||||
ctx, _, service, root, signed := testIsAvailableSetup(t, params)
|
||||
|
||||
err := service.isDataAvailable(ctx, root, signed)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("Fulu - some initially missing data columns (no reconstruction)", func(t *testing.T) {
|
||||
var custodyInfo peerdas.CustodyInfo
|
||||
custodyInfo.TargetGroupCount.SetValidatorsCustodyRequirement(128)
|
||||
custodyInfo.ToAdvertiseGroupCount.Set(128)
|
||||
|
||||
testParams := testIsAvailableParams{
|
||||
options: []Option{WithCustodyInfo(&custodyInfo)},
|
||||
blobKzgCommitmentsCount: 3,
|
||||
}
|
||||
|
||||
ctx, _, service, root, signed := testIsAvailableSetup(t, testParams)
|
||||
|
||||
// If needed, generate a random root that is different from the block root.
|
||||
var randomRoot [fieldparams.RootLength]byte
|
||||
for randomRoot == root {
|
||||
randomRootSlice := make([]byte, fieldparams.RootLength)
|
||||
_, err := rand.Read(randomRootSlice)
|
||||
require.NoError(t, err)
|
||||
copy(randomRoot[:], randomRootSlice)
|
||||
}
|
||||
|
||||
// TODO: Achieve the same result without using time.AfterFunc.
|
||||
time.AfterFunc(10*time.Millisecond, func() {
|
||||
halfNumberOfColumns := params.BeaconConfig().NumberOfColumns / 2
|
||||
indices := make([]uint64, 0, halfNumberOfColumns)
|
||||
for i := range halfNumberOfColumns {
|
||||
indices = append(indices, i)
|
||||
}
|
||||
|
||||
withSomeRequiredColumns := filesystem.DataColumnsIdent{Root: root, Indices: indices}
|
||||
service.dataColumnStorage.DataColumnFeed.Send(withSomeRequiredColumns)
|
||||
})
|
||||
|
||||
err := service.isDataAvailable(ctx, root, signed)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("Fulu - some columns are definitively missing", func(t *testing.T) {
|
||||
params := testIsAvailableParams{
|
||||
options: []Option{WithCustodyInfo(&peerdas.CustodyInfo{})},
|
||||
blobKzgCommitmentsCount: 3,
|
||||
}
|
||||
|
||||
ctx, cancel, service, root, signed := testIsAvailableSetup(t, params)
|
||||
|
||||
// TODO: Achieve the same result without using time.AfterFunc.
|
||||
time.AfterFunc(10*time.Millisecond, func() {
|
||||
cancel()
|
||||
})
|
||||
|
||||
err := service.isDataAvailable(ctx, root, signed)
|
||||
require.NotNil(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -52,6 +52,13 @@ type BlobReceiver interface {
|
||||
ReceiveBlob(context.Context, blocks.VerifiedROBlob) error
|
||||
}
|
||||
|
||||
// DataColumnReceiver interface defines the methods of chain service for receiving new
|
||||
// data columns
|
||||
type DataColumnReceiver interface {
|
||||
ReceiveDataColumn(blocks.VerifiedRODataColumn) error
|
||||
ReceiveDataColumns([]blocks.VerifiedRODataColumn) error
|
||||
}
|
||||
|
||||
// SlashingReceiver interface defines the methods of chain service for receiving validated slashing over the wire.
|
||||
type SlashingReceiver interface {
|
||||
ReceiveAttesterSlashing(ctx context.Context, slashing ethpb.AttSlashing)
|
||||
@@ -70,6 +77,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
log.WithField("blockRoot", fmt.Sprintf("%#x", blockRoot)).Debug("Ignoring already synced block")
|
||||
return nil
|
||||
}
|
||||
|
||||
receivedTime := time.Now()
|
||||
s.blockBeingSynced.set(blockRoot)
|
||||
defer s.blockBeingSynced.unset(blockRoot)
|
||||
@@ -78,6 +86,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
preState, err := s.getBlockPreState(ctx, blockCopy.Block())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get block's prestate")
|
||||
@@ -93,10 +102,12 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
daWaitedTime, err := s.handleDA(ctx, blockCopy, blockRoot, avs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Defragment the state before continuing block processing.
|
||||
s.defragmentState(postState)
|
||||
|
||||
@@ -225,24 +236,32 @@ func (s *Service) handleDA(
|
||||
block interfaces.SignedBeaconBlock,
|
||||
blockRoot [32]byte,
|
||||
avs das.AvailabilityStore,
|
||||
) (time.Duration, error) {
|
||||
daStartTime := time.Now()
|
||||
if avs != nil {
|
||||
rob, err := blocks.NewROBlockWithRoot(block, blockRoot)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
) (elapsed time.Duration, err error) {
|
||||
defer func(start time.Time) {
|
||||
elapsed := time.Since(start)
|
||||
|
||||
if err == nil {
|
||||
dataAvailWaitedTime.Observe(float64(elapsed.Milliseconds()))
|
||||
}
|
||||
if err := avs.IsDataAvailable(ctx, s.CurrentSlot(), rob); err != nil {
|
||||
return 0, errors.Wrap(err, "could not validate blob data availability (AvailabilityStore.IsDataAvailable)")
|
||||
}
|
||||
} else {
|
||||
if err := s.isDataAvailable(ctx, blockRoot, block); err != nil {
|
||||
return 0, errors.Wrap(err, "could not validate blob data availability")
|
||||
}(time.Now())
|
||||
|
||||
if avs == nil {
|
||||
if err = s.isDataAvailable(ctx, blockRoot, block); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
daWaitedTime := time.Since(daStartTime)
|
||||
dataAvailWaitedTime.Observe(float64(daWaitedTime.Milliseconds()))
|
||||
return daWaitedTime, nil
|
||||
|
||||
var rob blocks.ROBlock
|
||||
rob, err = blocks.NewROBlockWithRoot(block, blockRoot)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = avs.IsDataAvailable(ctx, s.CurrentSlot(), rob)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Service) reportPostBlockProcessing(
|
||||
|
||||
25
beacon-chain/blockchain/receive_data_column.go
Normal file
25
beacon-chain/blockchain/receive_data_column.go
Normal file
@@ -0,0 +1,25 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
)
|
||||
|
||||
// ReceiveDataColumns receives a batch of data columns.
|
||||
func (s *Service) ReceiveDataColumns(dataColumnSidecars []blocks.VerifiedRODataColumn) error {
|
||||
if err := s.dataColumnStorage.Save(dataColumnSidecars); err != nil {
|
||||
return errors.Wrap(err, "save data column sidecars")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReceiveDataColumn receives a single data column.
|
||||
// (It is only a wrapper around ReceiveDataColumns.)
|
||||
func (s *Service) ReceiveDataColumn(dataColumnSidecar blocks.VerifiedRODataColumn) error {
|
||||
if err := s.dataColumnStorage.Save([]blocks.VerifiedRODataColumn{dataColumnSidecar}); err != nil {
|
||||
return errors.Wrap(err, "save data column sidecars")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
coreTime "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db"
|
||||
@@ -33,6 +34,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
@@ -65,6 +67,7 @@ type Service struct {
|
||||
blobNotifiers *blobNotifierMap
|
||||
blockBeingSynced *currentlySyncingBlock
|
||||
blobStorage *filesystem.BlobStorage
|
||||
dataColumnStorage *filesystem.DataColumnStorage
|
||||
}
|
||||
|
||||
// config options for the service.
|
||||
@@ -80,7 +83,7 @@ type config struct {
|
||||
ExitPool voluntaryexits.PoolManager
|
||||
SlashingPool slashings.PoolManager
|
||||
BLSToExecPool blstoexec.PoolManager
|
||||
P2p p2p.Broadcaster
|
||||
P2P p2p.Acceser
|
||||
MaxRoutines int
|
||||
StateNotifier statefeed.Notifier
|
||||
ForkChoiceStore f.ForkChoicer
|
||||
@@ -92,6 +95,7 @@ type config struct {
|
||||
FinalizedStateAtStartUp state.BeaconState
|
||||
ExecutionEngineCaller execution.EngineCaller
|
||||
SyncChecker Checker
|
||||
CustodyInfo *peerdas.CustodyInfo
|
||||
}
|
||||
|
||||
// Checker is an interface used to determine if a node is in initial sync
|
||||
@@ -105,22 +109,26 @@ var ErrMissingClockSetter = errors.New("blockchain Service initialized without a
|
||||
type blobNotifierMap struct {
|
||||
sync.RWMutex
|
||||
notifiers map[[32]byte]chan uint64
|
||||
seenIndex map[[32]byte][]bool
|
||||
// TODO: Separate blobs from data columns
|
||||
// seenIndex map[[32]byte][]bool
|
||||
seenIndex map[[32]byte][fieldparams.NumberOfColumns]bool
|
||||
}
|
||||
|
||||
// notifyIndex notifies a blob by its index for a given root.
|
||||
// It uses internal maps to keep track of seen indices and notifier channels.
|
||||
func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64, slot primitives.Slot) {
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
if idx >= uint64(maxBlobsPerBlock) {
|
||||
return
|
||||
}
|
||||
// TODO: Separate blobs from data columns
|
||||
// maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
// if idx >= uint64(maxBlobsPerBlock) {
|
||||
// return
|
||||
// }
|
||||
|
||||
bn.Lock()
|
||||
seen := bn.seenIndex[root]
|
||||
if seen == nil {
|
||||
seen = make([]bool, maxBlobsPerBlock)
|
||||
}
|
||||
// TODO: Separate blobs from data columns
|
||||
// if seen == nil {
|
||||
// seen = make([]bool, maxBlobsPerBlock)
|
||||
// }
|
||||
if seen[idx] {
|
||||
bn.Unlock()
|
||||
return
|
||||
@@ -131,7 +139,9 @@ func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64, slot primitive
|
||||
// Retrieve or create the notifier channel for the given root.
|
||||
c, ok := bn.notifiers[root]
|
||||
if !ok {
|
||||
c = make(chan uint64, maxBlobsPerBlock)
|
||||
// TODO: Separate blobs from data columns
|
||||
// c = make(chan uint64, maxBlobsPerBlock)
|
||||
c = make(chan uint64, fieldparams.NumberOfColumns)
|
||||
bn.notifiers[root] = c
|
||||
}
|
||||
|
||||
@@ -141,12 +151,15 @@ func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64, slot primitive
|
||||
}
|
||||
|
||||
func (bn *blobNotifierMap) forRoot(root [32]byte, slot primitives.Slot) chan uint64 {
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
// TODO: Separate blobs from data columns
|
||||
// maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
bn.Lock()
|
||||
defer bn.Unlock()
|
||||
c, ok := bn.notifiers[root]
|
||||
if !ok {
|
||||
c = make(chan uint64, maxBlobsPerBlock)
|
||||
// TODO: Separate blobs from data columns
|
||||
// c = make(chan uint64, maxBlobsPerBlock)
|
||||
c = make(chan uint64, fieldparams.NumberOfColumns)
|
||||
bn.notifiers[root] = c
|
||||
}
|
||||
return c
|
||||
@@ -172,7 +185,9 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
bn := &blobNotifierMap{
|
||||
notifiers: make(map[[32]byte]chan uint64),
|
||||
seenIndex: make(map[[32]byte][]bool),
|
||||
// TODO: Separate blobs from data columns
|
||||
// seenIndex: make(map[[32]byte][]bool),
|
||||
seenIndex: make(map[[32]byte][fieldparams.NumberOfColumns]bool),
|
||||
}
|
||||
srv := &Service{
|
||||
ctx: ctx,
|
||||
|
||||
@@ -97,13 +97,14 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
WithSlashingPool(slashings.NewPool()),
|
||||
WithExitPool(voluntaryexits.NewPool()),
|
||||
WithP2PBroadcaster(&mockBroadcaster{}),
|
||||
WithP2PBroadcaster(&mockAccesser{}),
|
||||
WithStateNotifier(&mockBeaconNode{}),
|
||||
WithForkChoiceStore(fc),
|
||||
WithAttestationService(attService),
|
||||
WithStateGen(stateGen),
|
||||
WithPayloadIDCache(cache.NewPayloadIDCache()),
|
||||
WithClockSynchronizer(startup.NewClockSynchronizer()),
|
||||
WithP2PBroadcaster(&mockAccesser{}),
|
||||
}
|
||||
|
||||
chainService, err := NewService(ctx, opts...)
|
||||
@@ -587,7 +588,9 @@ func (s *MockClockSetter) SetClock(g *startup.Clock) error {
|
||||
func TestNotifyIndex(t *testing.T) {
|
||||
// Initialize a blobNotifierMap
|
||||
bn := &blobNotifierMap{
|
||||
seenIndex: make(map[[32]byte][]bool),
|
||||
// TODO: Separate blobs from data columns
|
||||
// seenIndex: make(map[[32]byte][]bool),
|
||||
seenIndex: make(map[[32]byte][fieldparams.NumberOfColumns]bool),
|
||||
notifiers: make(map[[32]byte]chan uint64),
|
||||
}
|
||||
|
||||
|
||||
@@ -19,8 +19,10 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/blstoexec"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
|
||||
p2pTesting "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state/stategen"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"google.golang.org/protobuf/proto"
|
||||
@@ -45,6 +47,11 @@ type mockBroadcaster struct {
|
||||
broadcastCalled bool
|
||||
}
|
||||
|
||||
type mockAccesser struct {
|
||||
mockBroadcaster
|
||||
p2pTesting.MockPeerManager
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) Broadcast(_ context.Context, _ proto.Message) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
@@ -65,6 +72,11 @@ func (mb *mockBroadcaster) BroadcastBlob(_ context.Context, _ uint64, _ *ethpb.B
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastDataColumn(_ context.Context, _ [fieldparams.RootLength]byte, _ uint64, _ *ethpb.DataColumnSidecar) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastBLSChanges(_ context.Context, _ []*ethpb.SignedBLSToExecutionChange) {
|
||||
}
|
||||
|
||||
@@ -120,8 +132,10 @@ func minimalTestService(t *testing.T, opts ...Option) (*Service, *testServiceReq
|
||||
WithDepositCache(dc),
|
||||
WithTrackedValidatorsCache(cache.NewTrackedValidatorsCache()),
|
||||
WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)),
|
||||
WithDataColumnStorage(filesystem.NewEphemeralDataColumnStorage(t)),
|
||||
WithSyncChecker(mock.MockChecker{}),
|
||||
WithExecutionEngineCaller(&mockExecution.EngineClient{}),
|
||||
WithP2PBroadcaster(&mockAccesser{}),
|
||||
}
|
||||
// append the variadic opts so they override the defaults by being processed afterwards
|
||||
opts = append(defOpts, opts...)
|
||||
|
||||
@@ -74,6 +74,7 @@ type ChainService struct {
|
||||
BlockSlot primitives.Slot
|
||||
SyncingRoot [32]byte
|
||||
Blobs []blocks.VerifiedROBlob
|
||||
DataColumns []blocks.VerifiedRODataColumn
|
||||
TargetRoot [32]byte
|
||||
}
|
||||
|
||||
@@ -702,6 +703,17 @@ func (c *ChainService) ReceiveBlob(_ context.Context, b blocks.VerifiedROBlob) e
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReceiveDataColumn implements the same method in chain service
|
||||
func (c *ChainService) ReceiveDataColumn(dc blocks.VerifiedRODataColumn) error {
|
||||
c.DataColumns = append(c.DataColumns, dc)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReceiveDataColumns implements the same method in chain service
|
||||
func (*ChainService) ReceiveDataColumns(_ []blocks.VerifiedRODataColumn) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// TargetRootForEpoch mocks the same method in the chain service
|
||||
func (c *ChainService) TargetRootForEpoch(_ [32]byte, _ primitives.Epoch) ([32]byte, error) {
|
||||
return c.TargetRoot, nil
|
||||
|
||||
1
beacon-chain/cache/BUILD.bazel
vendored
1
beacon-chain/cache/BUILD.bazel
vendored
@@ -84,6 +84,7 @@ go_test(
|
||||
"sync_committee_head_state_test.go",
|
||||
"sync_committee_test.go",
|
||||
"sync_subnet_ids_test.go",
|
||||
"tracked_validators_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
|
||||
138
beacon-chain/cache/tracked_validators.go
vendored
138
beacon-chain/cache/tracked_validators.go
vendored
@@ -1,49 +1,139 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/patrickmn/go-cache"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type TrackedValidator struct {
|
||||
Active bool
|
||||
FeeRecipient primitives.ExecutionAddress
|
||||
Index primitives.ValidatorIndex
|
||||
}
|
||||
const (
|
||||
defaultExpiration = 1 * time.Hour
|
||||
cleanupInterval = 15 * time.Minute
|
||||
)
|
||||
|
||||
type TrackedValidatorsCache struct {
|
||||
sync.Mutex
|
||||
trackedValidators map[primitives.ValidatorIndex]TrackedValidator
|
||||
}
|
||||
type (
|
||||
TrackedValidator struct {
|
||||
Active bool
|
||||
FeeRecipient primitives.ExecutionAddress
|
||||
Index primitives.ValidatorIndex
|
||||
}
|
||||
|
||||
TrackedValidatorsCache struct {
|
||||
trackedValidators cache.Cache
|
||||
}
|
||||
)
|
||||
|
||||
var (
|
||||
// Metrics.
|
||||
trackedValidatorsCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "tracked_validators_cache_miss",
|
||||
Help: "The number of tracked validators requests that are not present in the cache.",
|
||||
})
|
||||
|
||||
trackedValidatorsCacheTotal = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "tracked_validators_cache_total",
|
||||
Help: "The total number of tracked validators requests in the cache.",
|
||||
})
|
||||
|
||||
trackedValidatorsCacheCount = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "tracked_validators_cache_count",
|
||||
Help: "The number of tracked validators in the cache.",
|
||||
})
|
||||
)
|
||||
|
||||
// NewTrackedValidatorsCache creates a new cache for tracking validators.
|
||||
func NewTrackedValidatorsCache() *TrackedValidatorsCache {
|
||||
return &TrackedValidatorsCache{
|
||||
trackedValidators: make(map[primitives.ValidatorIndex]TrackedValidator),
|
||||
trackedValidators: *cache.New(defaultExpiration, cleanupInterval),
|
||||
}
|
||||
}
|
||||
|
||||
// Validator retrieves a tracked validator from the cache (if present).
|
||||
func (t *TrackedValidatorsCache) Validator(index primitives.ValidatorIndex) (TrackedValidator, bool) {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
val, ok := t.trackedValidators[index]
|
||||
return val, ok
|
||||
trackedValidatorsCacheTotal.Inc()
|
||||
|
||||
key := toCacheKey(index)
|
||||
item, ok := t.trackedValidators.Get(key)
|
||||
if !ok {
|
||||
trackedValidatorsCacheMiss.Inc()
|
||||
return TrackedValidator{}, false
|
||||
}
|
||||
|
||||
val, ok := item.(TrackedValidator)
|
||||
if !ok {
|
||||
logrus.Errorf("Failed to cast tracked validator from cache, got unexpected item type %T", item)
|
||||
return TrackedValidator{}, false
|
||||
}
|
||||
|
||||
return val, true
|
||||
}
|
||||
|
||||
// Set adds a tracked validator to the cache.
|
||||
func (t *TrackedValidatorsCache) Set(val TrackedValidator) {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
t.trackedValidators[val.Index] = val
|
||||
key := toCacheKey(val.Index)
|
||||
t.trackedValidators.Set(key, val, cache.DefaultExpiration)
|
||||
}
|
||||
|
||||
// Delete removes a tracked validator from the cache.
|
||||
func (t *TrackedValidatorsCache) Prune() {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
t.trackedValidators = make(map[primitives.ValidatorIndex]TrackedValidator)
|
||||
t.trackedValidators.Flush()
|
||||
trackedValidatorsCacheCount.Set(0)
|
||||
}
|
||||
|
||||
// Validating returns true if there are at least one tracked validators in the cache.
|
||||
func (t *TrackedValidatorsCache) Validating() bool {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
return len(t.trackedValidators) > 0
|
||||
count := t.trackedValidators.ItemCount()
|
||||
trackedValidatorsCacheCount.Set(float64(count))
|
||||
|
||||
return count > 0
|
||||
}
|
||||
|
||||
// ItemCount returns the number of tracked validators in the cache.
|
||||
func (t *TrackedValidatorsCache) ItemCount() int {
|
||||
count := t.trackedValidators.ItemCount()
|
||||
trackedValidatorsCacheCount.Set(float64(count))
|
||||
|
||||
return count
|
||||
}
|
||||
|
||||
// Indices returns a map of validator indices that are being tracked.
|
||||
func (t *TrackedValidatorsCache) Indices() map[primitives.ValidatorIndex]bool {
|
||||
items := t.trackedValidators.Items()
|
||||
count := len(items)
|
||||
trackedValidatorsCacheCount.Set(float64(count))
|
||||
|
||||
indices := make(map[primitives.ValidatorIndex]bool, count)
|
||||
|
||||
for cacheKey := range items {
|
||||
index, err := fromCacheKey(cacheKey)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("Failed to get validator index from cache key")
|
||||
continue
|
||||
}
|
||||
|
||||
indices[index] = true
|
||||
}
|
||||
|
||||
return indices
|
||||
}
|
||||
|
||||
// toCacheKey creates a cache key from the validator index.
|
||||
func toCacheKey(validatorIndex primitives.ValidatorIndex) string {
|
||||
return strconv.FormatUint(uint64(validatorIndex), 10)
|
||||
}
|
||||
|
||||
// fromCacheKey gets the validator index from the cache key.
|
||||
func fromCacheKey(key string) (primitives.ValidatorIndex, error) {
|
||||
validatorIndex, err := strconv.ParseUint(key, 10, 64)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "parse Uint: %s", key)
|
||||
}
|
||||
|
||||
return primitives.ValidatorIndex(validatorIndex), nil
|
||||
}
|
||||
|
||||
79
beacon-chain/cache/tracked_validators_test.go
vendored
Normal file
79
beacon-chain/cache/tracked_validators_test.go
vendored
Normal file
@@ -0,0 +1,79 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func mapEqual(a, b map[primitives.ValidatorIndex]bool) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
|
||||
for k, v := range a {
|
||||
if b[k] != v {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func TestTrackedValidatorsCache(t *testing.T) {
|
||||
vc := NewTrackedValidatorsCache()
|
||||
|
||||
// No validators in cache.
|
||||
require.Equal(t, 0, vc.ItemCount())
|
||||
require.Equal(t, false, vc.Validating())
|
||||
require.Equal(t, 0, len(vc.Indices()))
|
||||
|
||||
_, ok := vc.Validator(41)
|
||||
require.Equal(t, false, ok)
|
||||
|
||||
// Add some validators (one twice).
|
||||
v42Expected := TrackedValidator{Active: true, FeeRecipient: [20]byte{1}, Index: 42}
|
||||
v43Expected := TrackedValidator{Active: false, FeeRecipient: [20]byte{2}, Index: 43}
|
||||
|
||||
vc.Set(v42Expected)
|
||||
vc.Set(v43Expected)
|
||||
vc.Set(v42Expected)
|
||||
|
||||
// Check if they are in the cache.
|
||||
v42Actual, ok := vc.Validator(42)
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, v42Expected, v42Actual)
|
||||
|
||||
v43Actual, ok := vc.Validator(43)
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, v43Expected, v43Actual)
|
||||
|
||||
expected := map[primitives.ValidatorIndex]bool{42: true, 43: true}
|
||||
actual := vc.Indices()
|
||||
require.Equal(t, true, mapEqual(expected, actual))
|
||||
|
||||
// Check the item count and if the cache is validating.
|
||||
require.Equal(t, 2, vc.ItemCount())
|
||||
require.Equal(t, true, vc.Validating())
|
||||
|
||||
// Check if a non-existing validator is in the cache.
|
||||
_, ok = vc.Validator(41)
|
||||
require.Equal(t, false, ok)
|
||||
|
||||
// Prune the cache and test it.
|
||||
vc.Prune()
|
||||
|
||||
_, ok = vc.Validator(41)
|
||||
require.Equal(t, false, ok)
|
||||
|
||||
_, ok = vc.Validator(42)
|
||||
require.Equal(t, false, ok)
|
||||
|
||||
_, ok = vc.Validator(43)
|
||||
require.Equal(t, false, ok)
|
||||
|
||||
require.Equal(t, 0, vc.ItemCount())
|
||||
require.Equal(t, false, vc.Validating())
|
||||
require.Equal(t, 0, len(vc.Indices()))
|
||||
}
|
||||
@@ -230,14 +230,17 @@ func verifyBlobCommitmentCount(slot primitives.Slot, body interfaces.ReadOnlyBea
|
||||
if body.Version() < version.Deneb {
|
||||
return nil
|
||||
}
|
||||
|
||||
kzgs, err := body.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
if len(kzgs) > maxBlobsPerBlock {
|
||||
return fmt.Errorf("too many kzg commitments in block: %d", len(kzgs))
|
||||
|
||||
commitmentCount, maxBlobsPerBlock := len(kzgs), params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
if commitmentCount > maxBlobsPerBlock {
|
||||
return fmt.Errorf("too many kzg commitments in block: actual count %d - max allowed %d", commitmentCount, maxBlobsPerBlock)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -926,8 +926,10 @@ func TestVerifyBlobCommitmentCount(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, blocks.VerifyBlobCommitmentCount(rb.Slot(), rb.Body()))
|
||||
|
||||
b = ðpb.BeaconBlockDeneb{Body: ðpb.BeaconBlockBodyDeneb{BlobKzgCommitments: make([][]byte, params.BeaconConfig().MaxBlobsPerBlock(rb.Slot())+1)}}
|
||||
maxCommitmentsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(rb.Slot())
|
||||
|
||||
b = ðpb.BeaconBlockDeneb{Body: ðpb.BeaconBlockBodyDeneb{BlobKzgCommitments: make([][]byte, maxCommitmentsPerBlock+1)}}
|
||||
rb, err = consensusblocks.NewBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.ErrorContains(t, fmt.Sprintf("too many kzg commitments in block: %d", params.BeaconConfig().MaxBlobsPerBlock(rb.Slot())+1), blocks.VerifyBlobCommitmentCount(rb.Slot(), rb.Body()))
|
||||
require.ErrorContains(t, fmt.Sprintf("too many kzg commitments in block: actual count %d - max allowed %d", maxCommitmentsPerBlock+1, maxCommitmentsPerBlock), blocks.VerifyBlobCommitmentCount(rb.Slot(), rb.Body()))
|
||||
}
|
||||
|
||||
@@ -96,6 +96,24 @@ func VerifyBlockHeaderSignature(beaconState state.BeaconState, header *ethpb.Sig
|
||||
return signing.VerifyBlockHeaderSigningRoot(header.Header, proposerPubKey, header.Signature, domain)
|
||||
}
|
||||
|
||||
func VerifyBlockHeaderSignatureUsingCurrentFork(beaconState state.BeaconState, header *ethpb.SignedBeaconBlockHeader) error {
|
||||
currentEpoch := slots.ToEpoch(header.Header.Slot)
|
||||
fork, err := forks.Fork(currentEpoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
domain, err := signing.Domain(fork, currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorsRoot())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
proposer, err := beaconState.ValidatorAtIndex(header.Header.ProposerIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
proposerPubKey := proposer.PublicKey
|
||||
return signing.VerifyBlockHeaderSigningRoot(header.Header, proposerPubKey, header.Signature, domain)
|
||||
}
|
||||
|
||||
// VerifyBlockSignatureUsingCurrentFork verifies the proposer signature of a beacon block. This differs
|
||||
// from the above method by not using fork data from the state and instead retrieving it
|
||||
// via the respective epoch.
|
||||
|
||||
@@ -35,6 +35,9 @@ const (
|
||||
|
||||
// SingleAttReceived is sent after a single attestation object is received from gossip or rpc
|
||||
SingleAttReceived = 9
|
||||
|
||||
// DataColumnSidecarReceived is sent after a data column sidecar is received from gossip or rpc.
|
||||
DataColumnSidecarReceived = 10
|
||||
)
|
||||
|
||||
// UnAggregatedAttReceivedData is the data sent with UnaggregatedAttReceived events.
|
||||
@@ -85,3 +88,6 @@ type AttesterSlashingReceivedData struct {
|
||||
type SingleAttReceivedData struct {
|
||||
Attestation ethpb.Att
|
||||
}
|
||||
type DataColumnSidecarReceivedData struct {
|
||||
DataColumn *blocks.VerifiedRODataColumn
|
||||
}
|
||||
|
||||
@@ -69,6 +69,10 @@ func UpgradeToFulu(beaconState state.BeaconState) (state.BeaconState, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
depostiRequestsStartIndex, err := beaconState.DepositRequestsStartIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
depositBalanceToConsume, err := beaconState.DepositBalanceToConsume()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -102,7 +106,7 @@ func UpgradeToFulu(beaconState state.BeaconState) (state.BeaconState, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s := ðpb.BeaconStateFulu{
|
||||
s := ðpb.BeaconStateElectra{
|
||||
GenesisTime: beaconState.GenesisTime(),
|
||||
GenesisValidatorsRoot: beaconState.GenesisValidatorsRoot(),
|
||||
Slot: beaconState.Slot(),
|
||||
@@ -154,7 +158,7 @@ func UpgradeToFulu(beaconState state.BeaconState) (state.BeaconState, error) {
|
||||
NextWithdrawalValidatorIndex: vi,
|
||||
HistoricalSummaries: summaries,
|
||||
|
||||
DepositRequestsStartIndex: params.BeaconConfig().UnsetDepositRequestsStartIndex,
|
||||
DepositRequestsStartIndex: depostiRequestsStartIndex,
|
||||
DepositBalanceToConsume: depositBalanceToConsume,
|
||||
ExitBalanceToConsume: exitBalanceToConsume,
|
||||
EarliestExitEpoch: earliestExitEpoch,
|
||||
|
||||
@@ -78,6 +78,7 @@ func TestIsCurrentEpochSyncCommittee_UsingCommittee(t *testing.T) {
|
||||
|
||||
func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
params.SetupTestConfigCleanup(t)
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
@@ -264,6 +265,7 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
|
||||
69
beacon-chain/core/peerdas/BUILD.bazel
Normal file
69
beacon-chain/core/peerdas/BUILD.bazel
Normal file
@@ -0,0 +1,69 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"das_core.go",
|
||||
"info.go",
|
||||
"metrics.go",
|
||||
"p2p_interface.go",
|
||||
"peer_sampling.go",
|
||||
"reconstruction.go",
|
||||
"util.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
|
||||
"@com_github_hashicorp_golang_lru//:go_default_library",
|
||||
"@com_github_holiman_uint256//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@org_golang_x_sync//errgroup:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"das_core_test.go",
|
||||
"info_test.go",
|
||||
"p2p_interface_test.go",
|
||||
"peer_sampling_test.go",
|
||||
"reconstruction_test.go",
|
||||
"utils_test.go",
|
||||
],
|
||||
deps = [
|
||||
":go_default_library",
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_consensys_gnark_crypto//ecc/bls12-381/fr:go_default_library",
|
||||
"@com_github_crate_crypto_go_kzg_4844//:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
375
beacon-chain/core/peerdas/das_core.go
Normal file
375
beacon-chain/core/peerdas/das_core.go
Normal file
@@ -0,0 +1,375 @@
|
||||
package peerdas
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"math"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/holiman/uint256"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
|
||||
beaconState "github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
var (
|
||||
// Custom errors
|
||||
errCustodyGroupCountTooLarge = errors.New("custody group count too large")
|
||||
errWrongComputedCustodyGroupCount = errors.New("wrong computed custody group count, should never happen")
|
||||
|
||||
// maxUint256 is the maximum value of a uint256.
|
||||
maxUint256 = &uint256.Int{math.MaxUint64, math.MaxUint64, math.MaxUint64, math.MaxUint64}
|
||||
)
|
||||
|
||||
type CustodyType int
|
||||
|
||||
const (
|
||||
Target CustodyType = iota
|
||||
Actual
|
||||
)
|
||||
|
||||
// CustodyGroups computes the custody groups the node should participate in for custody.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/das-core.md#get_custody_groups
|
||||
func CustodyGroups(nodeId enode.ID, custodyGroupCount uint64) (map[uint64]bool, error) {
|
||||
numberOfCustodyGroup := params.BeaconConfig().NumberOfCustodyGroups
|
||||
|
||||
// Check if the custody group count is larger than the number of custody groups.
|
||||
if custodyGroupCount > numberOfCustodyGroup {
|
||||
return nil, errCustodyGroupCountTooLarge
|
||||
}
|
||||
|
||||
custodyGroups := make(map[uint64]bool, custodyGroupCount)
|
||||
one := uint256.NewInt(1)
|
||||
|
||||
for currentId := new(uint256.Int).SetBytes(nodeId.Bytes()); uint64(len(custodyGroups)) < custodyGroupCount; currentId.Add(currentId, one) {
|
||||
// Convert to big endian bytes.
|
||||
currentIdBytesBigEndian := currentId.Bytes32()
|
||||
|
||||
// Convert to little endian.
|
||||
currentIdBytesLittleEndian := bytesutil.ReverseByteOrder(currentIdBytesBigEndian[:])
|
||||
|
||||
// Hash the result.
|
||||
hashedCurrentId := hash.Hash(currentIdBytesLittleEndian)
|
||||
|
||||
// Get the custody group ID.
|
||||
custodyGroupId := binary.LittleEndian.Uint64(hashedCurrentId[:8]) % numberOfCustodyGroup
|
||||
|
||||
// Add the custody group to the map.
|
||||
custodyGroups[custodyGroupId] = true
|
||||
|
||||
// Overflow prevention.
|
||||
if currentId.Cmp(maxUint256) == 0 {
|
||||
currentId = uint256.NewInt(0)
|
||||
}
|
||||
}
|
||||
|
||||
// Final check.
|
||||
if uint64(len(custodyGroups)) != custodyGroupCount {
|
||||
return nil, errWrongComputedCustodyGroupCount
|
||||
}
|
||||
|
||||
return custodyGroups, nil
|
||||
}
|
||||
|
||||
// ComputeColumnsForCustodyGroup computes the columns for a given custody group.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/das-core.md#compute_columns_for_custody_group
|
||||
func ComputeColumnsForCustodyGroup(custodyGroup uint64) ([]uint64, error) {
|
||||
beaconConfig := params.BeaconConfig()
|
||||
numberOfCustodyGroup := beaconConfig.NumberOfCustodyGroups
|
||||
|
||||
if custodyGroup > numberOfCustodyGroup {
|
||||
return nil, errCustodyGroupCountTooLarge
|
||||
}
|
||||
|
||||
numberOfColumns := beaconConfig.NumberOfColumns
|
||||
|
||||
columnsPerGroup := numberOfColumns / numberOfCustodyGroup
|
||||
|
||||
columns := make([]uint64, 0, columnsPerGroup)
|
||||
for i := range columnsPerGroup {
|
||||
column := numberOfCustodyGroup*i + custodyGroup
|
||||
columns = append(columns, column)
|
||||
}
|
||||
|
||||
return columns, nil
|
||||
}
|
||||
|
||||
// DataColumnSidecars computes the data column sidecars from the signed block, cells and cell proofs.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.3/specs/fulu/das-core.md#get_data_column_sidecars
|
||||
func DataColumnSidecars(signedBlock interfaces.ReadOnlySignedBeaconBlock, cellsAndProofs []kzg.CellsAndProofs) ([]*ethpb.DataColumnSidecar, error) {
|
||||
start := time.Now()
|
||||
if signedBlock == nil || len(cellsAndProofs) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
block := signedBlock.Block()
|
||||
blockBody := block.Body()
|
||||
blobKzgCommitments, err := blockBody.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
|
||||
if len(blobKzgCommitments) != len(cellsAndProofs) {
|
||||
return nil, errors.New("mismatch in the number of blob KZG commitments and cellsAndProofs")
|
||||
}
|
||||
|
||||
signedBlockHeader, err := signedBlock.Header()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "signed block header")
|
||||
}
|
||||
|
||||
kzgCommitmentsInclusionProof, err := blocks.MerkleProofKZGCommitments(blockBody)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "merkle proof ZKG commitments")
|
||||
}
|
||||
|
||||
blobsCount := len(cellsAndProofs)
|
||||
sidecars := make([]*ethpb.DataColumnSidecar, 0, fieldparams.NumberOfColumns)
|
||||
for columnIndex := uint64(0); columnIndex < fieldparams.NumberOfColumns; columnIndex++ {
|
||||
column := make([]kzg.Cell, 0, blobsCount)
|
||||
kzgProofOfColumn := make([]kzg.Proof, 0, blobsCount)
|
||||
|
||||
for rowIndex := 0; rowIndex < blobsCount; rowIndex++ {
|
||||
cellsForRow := cellsAndProofs[rowIndex].Cells
|
||||
proofsForRow := cellsAndProofs[rowIndex].Proofs
|
||||
|
||||
cell := cellsForRow[columnIndex]
|
||||
column = append(column, cell)
|
||||
|
||||
kzgProof := proofsForRow[columnIndex]
|
||||
kzgProofOfColumn = append(kzgProofOfColumn, kzgProof)
|
||||
}
|
||||
|
||||
columnBytes := make([][]byte, 0, blobsCount)
|
||||
for i := range column {
|
||||
columnBytes = append(columnBytes, column[i][:])
|
||||
}
|
||||
|
||||
kzgProofOfColumnBytes := make([][]byte, 0, blobsCount)
|
||||
for _, kzgProof := range kzgProofOfColumn {
|
||||
copiedProof := kzgProof
|
||||
kzgProofOfColumnBytes = append(kzgProofOfColumnBytes, copiedProof[:])
|
||||
}
|
||||
|
||||
sidecar := ðpb.DataColumnSidecar{
|
||||
Index: columnIndex,
|
||||
Column: columnBytes,
|
||||
KzgCommitments: blobKzgCommitments,
|
||||
KzgProofs: kzgProofOfColumnBytes,
|
||||
SignedBlockHeader: signedBlockHeader,
|
||||
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
|
||||
}
|
||||
|
||||
sidecars = append(sidecars, sidecar)
|
||||
}
|
||||
|
||||
dataColumnComputationTime.Observe(float64(time.Since(start).Milliseconds()))
|
||||
return sidecars, nil
|
||||
}
|
||||
|
||||
// CustodyGroupSamplingSize returns the number of custody groups the node should sample from.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/das-core.md#custody-sampling
|
||||
func (custodyInfo *CustodyInfo) CustodyGroupSamplingSize(ct CustodyType) uint64 {
|
||||
custodyGroupCount := custodyInfo.TargetGroupCount.Get()
|
||||
|
||||
if ct == Actual {
|
||||
custodyGroupCount = custodyInfo.ActualGroupCount()
|
||||
}
|
||||
|
||||
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
|
||||
return max(samplesPerSlot, custodyGroupCount)
|
||||
}
|
||||
|
||||
// CustodyColumns computes the custody columns from the custody groups.
|
||||
func CustodyColumns(custodyGroups map[uint64]bool) (map[uint64]bool, error) {
|
||||
numberOfCustodyGroups := params.BeaconConfig().NumberOfCustodyGroups
|
||||
|
||||
custodyGroupCount := len(custodyGroups)
|
||||
|
||||
// Compute the columns for each custody group.
|
||||
columns := make(map[uint64]bool, custodyGroupCount)
|
||||
for group := range custodyGroups {
|
||||
if group >= numberOfCustodyGroups {
|
||||
return nil, errCustodyGroupCountTooLarge
|
||||
}
|
||||
|
||||
groupColumns, err := ComputeColumnsForCustodyGroup(group)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "compute columns for custody group")
|
||||
}
|
||||
|
||||
for _, column := range groupColumns {
|
||||
columns[column] = true
|
||||
}
|
||||
}
|
||||
|
||||
return columns, nil
|
||||
}
|
||||
|
||||
// ValidatorsCustodyRequirement returns the number of custody groups regarding the validator indices attached to the beacon node.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/fulu/das-core.md#validator-custody
|
||||
func ValidatorsCustodyRequirement(state beaconState.ReadOnlyBeaconState, validatorsIndex map[primitives.ValidatorIndex]bool) (uint64, error) {
|
||||
totalNodeBalance := uint64(0)
|
||||
for index := range validatorsIndex {
|
||||
balance, err := state.BalanceAtIndex(index)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "balance at index for validator index %v", index)
|
||||
}
|
||||
|
||||
totalNodeBalance += balance
|
||||
}
|
||||
|
||||
beaconConfig := params.BeaconConfig()
|
||||
numberOfCustodyGroup := beaconConfig.NumberOfCustodyGroups
|
||||
validatorCustodyRequirement := beaconConfig.ValidatorCustodyRequirement
|
||||
balancePerAdditionalCustodyGroup := beaconConfig.BalancePerAdditionalCustodyGroup
|
||||
|
||||
count := totalNodeBalance / balancePerAdditionalCustodyGroup
|
||||
return min(max(count, validatorCustodyRequirement), numberOfCustodyGroup), nil
|
||||
}
|
||||
|
||||
// Blobs extract blobs from `dataColumnsSidecar`.
|
||||
// This can be seen as the reciprocal function of DataColumnSidecars.
|
||||
// `dataColumnsSidecar` needs to contain the datacolumns corresponding to the non-extended matrix,
|
||||
// else an error will be returned.
|
||||
// (`dataColumnsSidecar` can contain extra columns, but they will be ignored.)
|
||||
func Blobs(indices map[uint64]bool, dataColumnsSidecar []*ethpb.DataColumnSidecar) ([]*blocks.VerifiedROBlob, error) {
|
||||
columnCount := fieldparams.NumberOfColumns
|
||||
|
||||
neededColumnCount := columnCount / 2
|
||||
|
||||
// Check if all needed columns are present.
|
||||
sliceIndexFromColumnIndex := make(map[uint64]int, len(dataColumnsSidecar))
|
||||
for i := range dataColumnsSidecar {
|
||||
dataColumnSideCar := dataColumnsSidecar[i]
|
||||
index := dataColumnSideCar.Index
|
||||
|
||||
if index < uint64(neededColumnCount) {
|
||||
sliceIndexFromColumnIndex[index] = i
|
||||
}
|
||||
}
|
||||
|
||||
actualColumnCount := len(sliceIndexFromColumnIndex)
|
||||
|
||||
// Get missing columns.
|
||||
if actualColumnCount < neededColumnCount {
|
||||
missingColumns := make(map[int]bool, neededColumnCount-actualColumnCount)
|
||||
for i := range neededColumnCount {
|
||||
if _, ok := sliceIndexFromColumnIndex[uint64(i)]; !ok {
|
||||
missingColumns[i] = true
|
||||
}
|
||||
}
|
||||
|
||||
missingColumnsSlice := make([]int, 0, len(missingColumns))
|
||||
for i := range missingColumns {
|
||||
missingColumnsSlice = append(missingColumnsSlice, i)
|
||||
}
|
||||
|
||||
slices.Sort[[]int](missingColumnsSlice)
|
||||
return nil, errors.Errorf("some columns are missing: %v", missingColumnsSlice)
|
||||
}
|
||||
|
||||
// It is safe to retrieve the first column since we already checked that `dataColumnsSidecar` is not empty.
|
||||
firstDataColumnSidecar := dataColumnsSidecar[0]
|
||||
|
||||
blobCount := uint64(len(firstDataColumnSidecar.Column))
|
||||
|
||||
// Check all colums have te same length.
|
||||
for i := range dataColumnsSidecar {
|
||||
if uint64(len(dataColumnsSidecar[i].Column)) != blobCount {
|
||||
return nil, errors.Errorf("mismatch in the length of the data columns, expected %d, got %d", blobCount, len(dataColumnsSidecar[i].Column))
|
||||
}
|
||||
}
|
||||
|
||||
// Reconstruct verified RO blobs from columns.
|
||||
verifiedROBlobs := make([]*blocks.VerifiedROBlob, 0, blobCount)
|
||||
|
||||
// Populate and filter indices.
|
||||
indicesSlice := populateAndFilterIndices(indices, blobCount)
|
||||
|
||||
for _, blobIndex := range indicesSlice {
|
||||
var blob kzg.Blob
|
||||
|
||||
// Compute the content of the blob.
|
||||
for columnIndex := range neededColumnCount {
|
||||
sliceIndex, ok := sliceIndexFromColumnIndex[uint64(columnIndex)]
|
||||
if !ok {
|
||||
return nil, errors.Errorf("missing column %d, this should never happen", columnIndex)
|
||||
}
|
||||
|
||||
dataColumnSideCar := dataColumnsSidecar[sliceIndex]
|
||||
cell := dataColumnSideCar.Column[blobIndex]
|
||||
|
||||
for i := 0; i < len(cell); i++ {
|
||||
blob[columnIndex*kzg.BytesPerCell+i] = cell[i]
|
||||
}
|
||||
}
|
||||
|
||||
// Retrieve the blob KZG commitment.
|
||||
blobKZGCommitment := kzg.Commitment(firstDataColumnSidecar.KzgCommitments[blobIndex])
|
||||
|
||||
// Compute the blob KZG proof.
|
||||
blobKzgProof, err := kzg.ComputeBlobKZGProof(&blob, blobKZGCommitment)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "compute blob KZG proof")
|
||||
}
|
||||
|
||||
blobSidecar := ðpb.BlobSidecar{
|
||||
Index: blobIndex,
|
||||
Blob: blob[:],
|
||||
KzgCommitment: blobKZGCommitment[:],
|
||||
KzgProof: blobKzgProof[:],
|
||||
SignedBlockHeader: firstDataColumnSidecar.SignedBlockHeader,
|
||||
CommitmentInclusionProof: firstDataColumnSidecar.KzgCommitmentsInclusionProof,
|
||||
}
|
||||
|
||||
roBlob, err := blocks.NewROBlob(blobSidecar)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "new RO blob")
|
||||
}
|
||||
|
||||
verifiedROBlob := blocks.NewVerifiedROBlob(roBlob)
|
||||
verifiedROBlobs = append(verifiedROBlobs, &verifiedROBlob)
|
||||
}
|
||||
|
||||
return verifiedROBlobs, nil
|
||||
}
|
||||
|
||||
// populateAndFilterIndices returns a sorted slices of indices, setting all indices if none are provided,
|
||||
// and filtering out indices higher than the blob count.
|
||||
func populateAndFilterIndices(indices map[uint64]bool, blobCount uint64) []uint64 {
|
||||
// If no indices are provided, provide all blobs.
|
||||
if len(indices) == 0 {
|
||||
for i := range blobCount {
|
||||
indices[i] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Filter blobs index higher than the blob count.
|
||||
filteredIndices := make(map[uint64]bool, len(indices))
|
||||
for i := range indices {
|
||||
if i < blobCount {
|
||||
filteredIndices[i] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Transform set to slice.
|
||||
indicesSlice := make([]uint64, 0, len(filteredIndices))
|
||||
for i := range filteredIndices {
|
||||
indicesSlice = append(indicesSlice, i)
|
||||
}
|
||||
|
||||
// Sort the indices.
|
||||
slices.Sort[[]uint64](indicesSlice)
|
||||
|
||||
return indicesSlice
|
||||
}
|
||||
248
beacon-chain/core/peerdas/das_core_test.go
Normal file
248
beacon-chain/core/peerdas/das_core_test.go
Normal file
@@ -0,0 +1,248 @@
|
||||
package peerdas_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
)
|
||||
|
||||
func TestDataColumnSidecars(t *testing.T) {
|
||||
var expected []*ethpb.DataColumnSidecar = nil
|
||||
actual, err := peerdas.DataColumnSidecars(nil, []kzg.CellsAndProofs{})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepSSZEqual(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestBlobs(t *testing.T) {
|
||||
blobsIndice := map[uint64]bool{}
|
||||
|
||||
almostAllColumns := make([]*ethpb.DataColumnSidecar, 0, fieldparams.NumberOfColumns/2)
|
||||
for i := 2; i < fieldparams.NumberOfColumns/2+2; i++ {
|
||||
almostAllColumns = append(almostAllColumns, ðpb.DataColumnSidecar{
|
||||
Index: uint64(i),
|
||||
})
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
input []*ethpb.DataColumnSidecar
|
||||
expected []*blocks.VerifiedROBlob
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "empty input",
|
||||
input: []*ethpb.DataColumnSidecar{},
|
||||
expected: nil,
|
||||
err: errors.New("some columns are missing: [0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63]"),
|
||||
},
|
||||
{
|
||||
name: "missing columns",
|
||||
input: almostAllColumns,
|
||||
expected: nil,
|
||||
err: errors.New("some columns are missing: [0 1]"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
actual, err := peerdas.Blobs(blobsIndice, tc.input)
|
||||
if tc.err != nil {
|
||||
require.Equal(t, tc.err.Error(), err.Error())
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.DeepSSZEqual(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDataColumnsSidecarsBlobsRoundtrip(t *testing.T) {
|
||||
const blobCount = 5
|
||||
blobsIndex := map[uint64]bool{}
|
||||
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a protobuf signed beacon block.
|
||||
signedBeaconBlockPb := util.NewBeaconBlockDeneb()
|
||||
|
||||
// Generate random blobs and their corresponding commitments and proofs.
|
||||
blobs := make([]kzg.Blob, 0, blobCount)
|
||||
blobKzgCommitments := make([]*kzg.Commitment, 0, blobCount)
|
||||
blobKzgProofs := make([]*kzg.Proof, 0, blobCount)
|
||||
|
||||
for blobIndex := range blobCount {
|
||||
// Create a random blob.
|
||||
blob := getRandBlob(int64(blobIndex))
|
||||
blobs = append(blobs, blob)
|
||||
|
||||
// Generate a blobKZGCommitment for the blob.
|
||||
blobKZGCommitment, proof, err := generateCommitmentAndProof(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobKzgCommitments = append(blobKzgCommitments, blobKZGCommitment)
|
||||
blobKzgProofs = append(blobKzgProofs, proof)
|
||||
}
|
||||
|
||||
// Set the commitments into the block.
|
||||
blobZkgCommitmentsBytes := make([][]byte, 0, blobCount)
|
||||
for _, blobKZGCommitment := range blobKzgCommitments {
|
||||
blobZkgCommitmentsBytes = append(blobZkgCommitmentsBytes, blobKZGCommitment[:])
|
||||
}
|
||||
|
||||
signedBeaconBlockPb.Block.Body.BlobKzgCommitments = blobZkgCommitmentsBytes
|
||||
|
||||
// Generate verified RO blobs.
|
||||
verifiedROBlobs := make([]*blocks.VerifiedROBlob, 0, blobCount)
|
||||
|
||||
// Create a signed beacon block from the protobuf.
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
commitmentInclusionProof, err := blocks.MerkleProofKZGCommitments(signedBeaconBlock.Block().Body())
|
||||
require.NoError(t, err)
|
||||
|
||||
for blobIndex := range blobCount {
|
||||
blob := blobs[blobIndex]
|
||||
blobKZGCommitment := blobKzgCommitments[blobIndex]
|
||||
blobKzgProof := blobKzgProofs[blobIndex]
|
||||
|
||||
// Get the signed beacon block header.
|
||||
signedBeaconBlockHeader, err := signedBeaconBlock.Header()
|
||||
require.NoError(t, err)
|
||||
|
||||
blobSidecar := ðpb.BlobSidecar{
|
||||
Index: uint64(blobIndex),
|
||||
Blob: blob[:],
|
||||
KzgCommitment: blobKZGCommitment[:],
|
||||
KzgProof: blobKzgProof[:],
|
||||
SignedBlockHeader: signedBeaconBlockHeader,
|
||||
CommitmentInclusionProof: commitmentInclusionProof,
|
||||
}
|
||||
|
||||
roBlob, err := blocks.NewROBlob(blobSidecar)
|
||||
require.NoError(t, err)
|
||||
|
||||
verifiedROBlob := blocks.NewVerifiedROBlob(roBlob)
|
||||
verifiedROBlobs = append(verifiedROBlobs, &verifiedROBlob)
|
||||
}
|
||||
|
||||
// Compute data columns sidecars from the signed beacon block and from the blobs.
|
||||
cellsAndProofs := util.GenerateCellsAndProofs(t, blobs)
|
||||
dataColumnsSidecar, err := peerdas.DataColumnSidecars(signedBeaconBlock, cellsAndProofs)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Compute the blobs from the data columns sidecar.
|
||||
roundtripBlobs, err := peerdas.Blobs(blobsIndex, dataColumnsSidecar)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check that the blobs are the same.
|
||||
require.DeepSSZEqual(t, verifiedROBlobs, roundtripBlobs)
|
||||
}
|
||||
|
||||
func TestValidatorsCustodyRequirement(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
count uint64
|
||||
expected uint64
|
||||
}{
|
||||
{name: "0 validators", count: 0, expected: 8},
|
||||
{name: "1 validator", count: 1, expected: 8},
|
||||
{name: "8 validators", count: 8, expected: 8},
|
||||
{name: "9 validators", count: 9, expected: 9},
|
||||
{name: "100 validators", count: 100, expected: 100},
|
||||
{name: "128 validators", count: 128, expected: 128},
|
||||
{name: "129 validators", count: 129, expected: 128},
|
||||
{name: "1000 validators", count: 1000, expected: 128},
|
||||
}
|
||||
|
||||
const balance = uint64(32_000_000_000)
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
balances := make([]uint64, 0, tc.count)
|
||||
for range tc.count {
|
||||
balances = append(balances, balance)
|
||||
}
|
||||
|
||||
validatorsIndex := make(map[primitives.ValidatorIndex]bool)
|
||||
for i := range tc.count {
|
||||
validatorsIndex[primitives.ValidatorIndex(i)] = true
|
||||
}
|
||||
|
||||
beaconState, err := state_native.InitializeFromProtoFulu(ðpb.BeaconStateElectra{Balances: balances})
|
||||
require.NoError(t, err)
|
||||
|
||||
actual, err := peerdas.ValidatorsCustodyRequirement(beaconState, validatorsIndex)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCustodyGroupSamplingSize(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
custodyType peerdas.CustodyType
|
||||
validatorsCustodyRequirement uint64
|
||||
toAdvertiseCustodyGroupCount uint64
|
||||
expected uint64
|
||||
}{
|
||||
{
|
||||
name: "target, lower than samples per slot",
|
||||
custodyType: peerdas.Target,
|
||||
validatorsCustodyRequirement: 2,
|
||||
expected: 8,
|
||||
},
|
||||
{
|
||||
name: "target, higher than samples per slot",
|
||||
custodyType: peerdas.Target,
|
||||
validatorsCustodyRequirement: 100,
|
||||
expected: 100,
|
||||
},
|
||||
{
|
||||
name: "actual, lower than samples per slot",
|
||||
custodyType: peerdas.Actual,
|
||||
validatorsCustodyRequirement: 3,
|
||||
toAdvertiseCustodyGroupCount: 4,
|
||||
expected: 8,
|
||||
},
|
||||
{
|
||||
name: "actual, higher than samples per slot",
|
||||
custodyType: peerdas.Actual,
|
||||
validatorsCustodyRequirement: 100,
|
||||
toAdvertiseCustodyGroupCount: 101,
|
||||
expected: 100,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Create a custody info.
|
||||
custodyInfo := peerdas.CustodyInfo{}
|
||||
|
||||
// Set the validators custody requirement for target custody group count.
|
||||
custodyInfo.TargetGroupCount.SetValidatorsCustodyRequirement(tc.validatorsCustodyRequirement)
|
||||
|
||||
// Set the to advertise custody group count.
|
||||
custodyInfo.ToAdvertiseGroupCount.Set(tc.toAdvertiseCustodyGroupCount)
|
||||
|
||||
// Compute the custody group sampling size.
|
||||
actual := custodyInfo.CustodyGroupSamplingSize(tc.custodyType)
|
||||
|
||||
// Check the result.
|
||||
require.Equal(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
186
beacon-chain/core/peerdas/info.go
Normal file
186
beacon-chain/core/peerdas/info.go
Normal file
@@ -0,0 +1,186 @@
|
||||
package peerdas
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"sync"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
)
|
||||
|
||||
// info contains all useful peerDAS related information regarding a peer.
|
||||
type (
|
||||
info struct {
|
||||
CustodyGroups map[uint64]bool
|
||||
CustodyColumns map[uint64]bool
|
||||
DataColumnsSubnets map[uint64]bool
|
||||
}
|
||||
|
||||
targetCustodyGroupCount struct {
|
||||
mut sync.RWMutex
|
||||
validatorsCustodyRequirement uint64
|
||||
}
|
||||
|
||||
toAdverstiseCustodyGroupCount struct {
|
||||
mut sync.RWMutex
|
||||
value uint64
|
||||
}
|
||||
|
||||
CustodyInfo struct {
|
||||
// Mut is a mutex to be used by caller to ensure neither
|
||||
// TargetCustodyGroupCount nor ToAdvertiseCustodyGroupCount are being modified.
|
||||
// (This is not necessary to use this mutex for any data protection.)
|
||||
Mut sync.RWMutex
|
||||
|
||||
// TargetGroupCount represents the target number of custody groups we should custody
|
||||
// regarding the validators we are tracking.
|
||||
TargetGroupCount targetCustodyGroupCount
|
||||
|
||||
// ToAdvertiseGroupCount represents the number of custody groups to advertise to the network.
|
||||
ToAdvertiseGroupCount toAdverstiseCustodyGroupCount
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
nodeInfoCacheSize = 200
|
||||
nodeInfoCachKeySize = 32 + 8
|
||||
)
|
||||
|
||||
var (
|
||||
nodeInfoCacheMut sync.Mutex
|
||||
nodeInfoCache *lru.Cache
|
||||
)
|
||||
|
||||
// Info returns the peerDAS information for a given nodeID and custodyGroupCount.
|
||||
// It returns a boolean indicating if the peer info was already in the cache and an error if any.
|
||||
func Info(nodeID enode.ID, custodyGroupCount uint64) (*info, bool, error) {
|
||||
// Create a new cache if it doesn't exist.
|
||||
if err := createInfoCacheIfNeeded(); err != nil {
|
||||
return nil, false, errors.Wrap(err, "create cache if needed")
|
||||
}
|
||||
|
||||
// Compute the key.
|
||||
key := computeInfoCacheKey(nodeID, custodyGroupCount)
|
||||
|
||||
// If the value is already in the cache, return it.
|
||||
if value, ok := nodeInfoCache.Get(key); ok {
|
||||
peerInfo, ok := value.(*info)
|
||||
if !ok {
|
||||
return nil, false, errors.New("failed to cast peer info (should never happen)")
|
||||
}
|
||||
|
||||
return peerInfo, true, nil
|
||||
}
|
||||
|
||||
// The peer info is not in the cache, compute it.
|
||||
// Compute custody groups.
|
||||
custodyGroups, err := CustodyGroups(nodeID, custodyGroupCount)
|
||||
if err != nil {
|
||||
return nil, false, errors.Wrap(err, "custody groups")
|
||||
}
|
||||
|
||||
// Compute custody columns.
|
||||
custodyColumns, err := CustodyColumns(custodyGroups)
|
||||
if err != nil {
|
||||
return nil, false, errors.Wrap(err, "custody columns")
|
||||
}
|
||||
|
||||
// Compute data columns subnets.
|
||||
dataColumnsSubnets := DataColumnSubnets(custodyColumns)
|
||||
|
||||
result := &info{
|
||||
CustodyGroups: custodyGroups,
|
||||
CustodyColumns: custodyColumns,
|
||||
DataColumnsSubnets: dataColumnsSubnets,
|
||||
}
|
||||
|
||||
// Add the result to the cache.
|
||||
nodeInfoCache.Add(key, result)
|
||||
|
||||
return result, false, nil
|
||||
}
|
||||
|
||||
// createInfoCacheIfNeeded creates a new cache if it doesn't exist.
|
||||
func createInfoCacheIfNeeded() error {
|
||||
nodeInfoCacheMut.Lock()
|
||||
defer nodeInfoCacheMut.Unlock()
|
||||
|
||||
if nodeInfoCache == nil {
|
||||
c, err := lru.New(nodeInfoCacheSize)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "lru new")
|
||||
}
|
||||
|
||||
nodeInfoCache = c
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// computeInfoCacheKey returns a unique key for a node and its custodyGroupCount.
|
||||
func computeInfoCacheKey(nodeID enode.ID, custodyGroupCount uint64) [nodeInfoCachKeySize]byte {
|
||||
var key [nodeInfoCachKeySize]byte
|
||||
|
||||
copy(key[:32], nodeID[:])
|
||||
binary.BigEndian.PutUint64(key[32:], custodyGroupCount)
|
||||
|
||||
return key
|
||||
}
|
||||
|
||||
// setValidatorsCustodyRequirement sets the validators custody requirement.
|
||||
func (tcgc *targetCustodyGroupCount) SetValidatorsCustodyRequirement(value uint64) {
|
||||
tcgc.mut.Lock()
|
||||
defer tcgc.mut.Unlock()
|
||||
|
||||
tcgc.validatorsCustodyRequirement = value
|
||||
}
|
||||
|
||||
// CustodyGroupCount returns the number of groups we should participate in for custody.
|
||||
func (tcgc *targetCustodyGroupCount) Get() uint64 {
|
||||
// If subscribed to all subnets, return the number of custody groups.
|
||||
if flags.Get().SubscribeToAllSubnets {
|
||||
return params.BeaconConfig().NumberOfCustodyGroups
|
||||
}
|
||||
|
||||
tcgc.mut.RLock()
|
||||
defer tcgc.mut.RUnlock()
|
||||
|
||||
// If no validators are tracked, return the default custody requirement.
|
||||
if tcgc.validatorsCustodyRequirement == 0 {
|
||||
return params.BeaconConfig().CustodyRequirement
|
||||
}
|
||||
|
||||
// Return the validators custody requirement.
|
||||
return tcgc.validatorsCustodyRequirement
|
||||
}
|
||||
|
||||
// Set sets the to advertise custody group count.
|
||||
func (tacgc *toAdverstiseCustodyGroupCount) Set(value uint64) {
|
||||
tacgc.mut.Lock()
|
||||
defer tacgc.mut.Unlock()
|
||||
|
||||
tacgc.value = value
|
||||
}
|
||||
|
||||
// Get returns the to advertise custody group count.
|
||||
func (tacgc *toAdverstiseCustodyGroupCount) Get() uint64 {
|
||||
// If subscribed to all subnets, return the number of custody groups.
|
||||
if flags.Get().SubscribeToAllSubnets {
|
||||
return params.BeaconConfig().NumberOfCustodyGroups
|
||||
}
|
||||
|
||||
custodyRequirement := params.BeaconConfig().CustodyRequirement
|
||||
|
||||
tacgc.mut.RLock()
|
||||
defer tacgc.mut.RUnlock()
|
||||
|
||||
return max(tacgc.value, custodyRequirement)
|
||||
}
|
||||
|
||||
// ActualGroupCount returns the actual custody group count.
|
||||
func (custodyInfo *CustodyInfo) ActualGroupCount() uint64 {
|
||||
return min(custodyInfo.TargetGroupCount.Get(), custodyInfo.ToAdvertiseGroupCount.Get())
|
||||
}
|
||||
133
beacon-chain/core/peerdas/info_test.go
Normal file
133
beacon-chain/core/peerdas/info_test.go
Normal file
@@ -0,0 +1,133 @@
|
||||
package peerdas_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestInfo(t *testing.T) {
|
||||
nodeID := enode.ID{}
|
||||
custodyGroupCount := uint64(7)
|
||||
|
||||
expectedCustodyGroup := map[uint64]bool{1: true, 17: true, 19: true, 42: true, 75: true, 87: true, 102: true}
|
||||
expectedCustodyColumns := map[uint64]bool{1: true, 17: true, 19: true, 42: true, 75: true, 87: true, 102: true}
|
||||
expectedDataColumnsSubnets := map[uint64]bool{1: true, 17: true, 19: true, 42: true, 75: true, 87: true, 102: true}
|
||||
|
||||
for _, cached := range []bool{false, true} {
|
||||
actual, ok, err := peerdas.Info(nodeID, custodyGroupCount)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, cached, ok)
|
||||
require.DeepEqual(t, expectedCustodyGroup, actual.CustodyGroups)
|
||||
require.DeepEqual(t, expectedCustodyColumns, actual.CustodyColumns)
|
||||
require.DeepEqual(t, expectedDataColumnsSubnets, actual.DataColumnsSubnets)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTargetCustodyGroupCount(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
subscribeToAllSubnets bool
|
||||
validatorsCustodyRequirement uint64
|
||||
expected uint64
|
||||
}{
|
||||
{
|
||||
name: "subscribed to all subnets",
|
||||
subscribeToAllSubnets: true,
|
||||
validatorsCustodyRequirement: 100,
|
||||
expected: 128,
|
||||
},
|
||||
{
|
||||
name: "no validators attached",
|
||||
subscribeToAllSubnets: false,
|
||||
validatorsCustodyRequirement: 0,
|
||||
expected: 4,
|
||||
},
|
||||
{
|
||||
name: "some validators attached",
|
||||
subscribeToAllSubnets: false,
|
||||
validatorsCustodyRequirement: 100,
|
||||
expected: 100,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Subscribe to all subnets if needed.
|
||||
if tc.subscribeToAllSubnets {
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SubscribeToAllSubnets = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
}
|
||||
|
||||
var custodyInfo peerdas.CustodyInfo
|
||||
|
||||
// Set the validators custody requirement.
|
||||
custodyInfo.TargetGroupCount.SetValidatorsCustodyRequirement(tc.validatorsCustodyRequirement)
|
||||
|
||||
// Get the target custody group count.
|
||||
actual := custodyInfo.TargetGroupCount.Get()
|
||||
|
||||
// Compare the expected and actual values.
|
||||
require.Equal(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestToAdvertiseCustodyGroupCount(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
subscribeToAllSubnets bool
|
||||
toAdvertiseCustodyGroupCount uint64
|
||||
expected uint64
|
||||
}{
|
||||
{
|
||||
name: "subscribed to all subnets",
|
||||
subscribeToAllSubnets: true,
|
||||
toAdvertiseCustodyGroupCount: 100,
|
||||
expected: 128,
|
||||
},
|
||||
{
|
||||
name: "higher than custody requirement",
|
||||
subscribeToAllSubnets: false,
|
||||
toAdvertiseCustodyGroupCount: 100,
|
||||
expected: 100,
|
||||
},
|
||||
{
|
||||
name: "lower than custody requirement",
|
||||
subscribeToAllSubnets: false,
|
||||
toAdvertiseCustodyGroupCount: 1,
|
||||
expected: 4,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Subscribe to all subnets if needed.
|
||||
if tc.subscribeToAllSubnets {
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SubscribeToAllSubnets = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
}
|
||||
|
||||
// Create a custody info.
|
||||
var custodyInfo peerdas.CustodyInfo
|
||||
|
||||
// Set the to advertise custody group count.
|
||||
custodyInfo.ToAdvertiseGroupCount.Set(tc.toAdvertiseCustodyGroupCount)
|
||||
|
||||
// Get the to advertise custody group count.
|
||||
actual := custodyInfo.ToAdvertiseGroupCount.Get()
|
||||
|
||||
// Compare the expected and actual values.
|
||||
require.Equal(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
14
beacon-chain/core/peerdas/metrics.go
Normal file
14
beacon-chain/core/peerdas/metrics.go
Normal file
@@ -0,0 +1,14 @@
|
||||
package peerdas
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
var dataColumnComputationTime = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "beacon_data_column_sidecar_computation_milliseconds",
|
||||
Help: "Captures the time taken to compute data column sidecars from blobs.",
|
||||
Buckets: []float64{100, 250, 500, 750, 1000, 1500, 2000, 4000, 8000, 12000, 16000},
|
||||
},
|
||||
)
|
||||
126
beacon-chain/core/peerdas/p2p_interface.go
Normal file
126
beacon-chain/core/peerdas/p2p_interface.go
Normal file
@@ -0,0 +1,126 @@
|
||||
package peerdas
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
)
|
||||
|
||||
const (
|
||||
CustodyGroupCountEnrKey = "cgc"
|
||||
)
|
||||
|
||||
var (
|
||||
// Custom errors
|
||||
errRecordNil = errors.New("record is nil")
|
||||
errCannotLoadCustodyGroupCount = errors.New("cannot load the custody group count from peer")
|
||||
errIndexTooLarge = errors.New("column index is larger than the specified columns count")
|
||||
errMismatchLength = errors.New("mismatch in the length of the commitments and proofs")
|
||||
)
|
||||
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/p2p-interface.md#the-discovery-domain-discv5
|
||||
type Cgc uint64
|
||||
|
||||
func (Cgc) ENRKey() string { return CustodyGroupCountEnrKey }
|
||||
|
||||
// VerifyDataColumnsSidecarKZGProofs verifies the provided KZG Proofs of data columns.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/fulu/p2p-interface.md#verify_data_column_sidecar_kzg_proofs
|
||||
func VerifyDataColumnsSidecarKZGProofs(sidecars []blocks.RODataColumn) (bool, error) {
|
||||
// Retrieve the number of columns.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
// Compute the total count.
|
||||
count := 0
|
||||
for _, sidecar := range sidecars {
|
||||
count += len(sidecar.Column)
|
||||
}
|
||||
|
||||
commitments := make([]kzg.Bytes48, 0, count)
|
||||
indices := make([]uint64, 0, count)
|
||||
cells := make([]kzg.Cell, 0, count)
|
||||
proofs := make([]kzg.Bytes48, 0, count)
|
||||
|
||||
for _, sidecar := range sidecars {
|
||||
// Check if the columns index is not too large
|
||||
if sidecar.Index >= numberOfColumns {
|
||||
return false, errIndexTooLarge
|
||||
}
|
||||
|
||||
// Check if the KZG commitments size and data column size match.
|
||||
if len(sidecar.Column) != len(sidecar.KzgCommitments) {
|
||||
return false, errMismatchLength
|
||||
}
|
||||
|
||||
// Check if the KZG proofs size and data column size match.
|
||||
if len(sidecar.Column) != len(sidecar.KzgProofs) {
|
||||
return false, errMismatchLength
|
||||
}
|
||||
|
||||
for i := range sidecar.Column {
|
||||
commitments = append(commitments, kzg.Bytes48(sidecar.KzgCommitments[i]))
|
||||
indices = append(indices, sidecar.Index)
|
||||
cells = append(cells, kzg.Cell(sidecar.Column[i]))
|
||||
proofs = append(proofs, kzg.Bytes48(sidecar.KzgProofs[i]))
|
||||
}
|
||||
}
|
||||
|
||||
// Verify all the batch at once.
|
||||
verified, err := kzg.VerifyCellKZGProofBatch(commitments, indices, cells, proofs)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "verify cell KZG proof batch")
|
||||
}
|
||||
|
||||
return verified, nil
|
||||
}
|
||||
|
||||
// ComputeSubnetForDataColumnSidecar computes the subnet for a data column sidecar.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/fulu/p2p-interface.md#compute_subnet_for_data_column_sidecar
|
||||
func ComputeSubnetForDataColumnSidecar(columnIndex uint64) uint64 {
|
||||
dataColumnSidecarSubnetCount := params.BeaconConfig().DataColumnSidecarSubnetCount
|
||||
return columnIndex % dataColumnSidecarSubnetCount
|
||||
}
|
||||
|
||||
// DataColumnSubnets computes the subnets for the data columns.
|
||||
func DataColumnSubnets(dataColumns map[uint64]bool) map[uint64]bool {
|
||||
subnets := make(map[uint64]bool, len(dataColumns))
|
||||
|
||||
for column := range dataColumns {
|
||||
subnet := ComputeSubnetForDataColumnSidecar(column)
|
||||
subnets[subnet] = true
|
||||
}
|
||||
|
||||
return subnets
|
||||
}
|
||||
|
||||
// ComputeCustodyGroupForColumn computes the custody group for a given column.
|
||||
// It is the reciprocal function of ComputeColumnsForCustodyGroup.
|
||||
func ComputeCustodyGroupForColumn(columnIndex uint64) (uint64, error) {
|
||||
beaconConfig := params.BeaconConfig()
|
||||
numberOfColumns := beaconConfig.NumberOfColumns
|
||||
|
||||
if columnIndex >= numberOfColumns {
|
||||
return 0, errIndexTooLarge
|
||||
}
|
||||
|
||||
numberOfCustodyGroups := beaconConfig.NumberOfCustodyGroups
|
||||
columnsPerGroup := numberOfColumns / numberOfCustodyGroups
|
||||
|
||||
return columnIndex / columnsPerGroup, nil
|
||||
}
|
||||
|
||||
// CustodyGroupCountFromRecord extracts the custody group count from an ENR record.
|
||||
func CustodyGroupCountFromRecord(record *enr.Record) (uint64, error) {
|
||||
if record == nil {
|
||||
return 0, errRecordNil
|
||||
}
|
||||
|
||||
// Load the `cgc`
|
||||
var cgc Cgc
|
||||
if cgc := record.Load(&cgc); cgc != nil {
|
||||
return 0, errCannotLoadCustodyGroupCount
|
||||
}
|
||||
|
||||
return uint64(cgc), nil
|
||||
}
|
||||
57
beacon-chain/core/peerdas/p2p_interface_test.go
Normal file
57
beacon-chain/core/peerdas/p2p_interface_test.go
Normal file
@@ -0,0 +1,57 @@
|
||||
package peerdas_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
)
|
||||
|
||||
func TestVerifyDataColumnSidecarKZGProofs(t *testing.T) {
|
||||
dbBlock := util.NewBeaconBlockDeneb()
|
||||
require.NoError(t, kzg.Start())
|
||||
|
||||
var (
|
||||
comms [][]byte
|
||||
blobs []kzg.Blob
|
||||
)
|
||||
for i := int64(0); i < 6; i++ {
|
||||
blob := getRandBlob(i)
|
||||
commitment, _, err := generateCommitmentAndProof(&blob)
|
||||
require.NoError(t, err)
|
||||
comms = append(comms, commitment[:])
|
||||
blobs = append(blobs, blob)
|
||||
}
|
||||
|
||||
dbBlock.Block.Body.BlobKzgCommitments = comms
|
||||
sBlock, err := blocks.NewSignedBeaconBlock(dbBlock)
|
||||
require.NoError(t, err)
|
||||
cellsAndProofs := util.GenerateCellsAndProofs(t, blobs)
|
||||
sCars, err := peerdas.DataColumnSidecars(sBlock, cellsAndProofs)
|
||||
require.NoError(t, err)
|
||||
|
||||
for i, sidecar := range sCars {
|
||||
roCol, err := blocks.NewRODataColumn(sidecar)
|
||||
require.NoError(t, err)
|
||||
verified, err := peerdas.VerifyDataColumnsSidecarKZGProofs([]blocks.RODataColumn{roCol})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, verified, fmt.Sprintf("sidecar %d failed", i))
|
||||
}
|
||||
}
|
||||
|
||||
func TestCustodyGroupCountFromRecord(t *testing.T) {
|
||||
const expected uint64 = 7
|
||||
|
||||
// Create an Ethereum record.
|
||||
record := &enr.Record{}
|
||||
record.Set(peerdas.Cgc(expected))
|
||||
|
||||
actual, err := peerdas.CustodyGroupCountFromRecord(record)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, actual)
|
||||
}
|
||||
56
beacon-chain/core/peerdas/peer_sampling.go
Normal file
56
beacon-chain/core/peerdas/peer_sampling.go
Normal file
@@ -0,0 +1,56 @@
|
||||
package peerdas
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
)
|
||||
|
||||
// ExtendedSampleCount computes, for a given number of samples per slot and allowed failures the
|
||||
// number of samples we should actually query from peers.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/fulu/peer-sampling.md#get_extended_sample_count
|
||||
func ExtendedSampleCount(samplesPerSlot, allowedFailures uint64) uint64 {
|
||||
// Retrieve the columns count
|
||||
columnsCount := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
// If half of the columns are missing, we are able to reconstruct the data.
|
||||
// If half of the columns + 1 are missing, we are not able to reconstruct the data.
|
||||
// This is the smallest worst case.
|
||||
worstCaseMissing := columnsCount/2 + 1
|
||||
|
||||
// Compute the false positive threshold.
|
||||
falsePositiveThreshold := HypergeomCDF(0, columnsCount, worstCaseMissing, samplesPerSlot)
|
||||
|
||||
var sampleCount uint64
|
||||
|
||||
// Finally, compute the extended sample count.
|
||||
for sampleCount = samplesPerSlot; sampleCount < columnsCount+1; sampleCount++ {
|
||||
if HypergeomCDF(allowedFailures, columnsCount, worstCaseMissing, sampleCount) <= falsePositiveThreshold {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return sampleCount
|
||||
}
|
||||
|
||||
// HypergeomCDF computes the hypergeometric cumulative distribution function.
|
||||
// https://en.wikipedia.org/wiki/Hypergeometric_distribution
|
||||
func HypergeomCDF(k, M, n, N uint64) float64 {
|
||||
denominatorInt := new(big.Int).Binomial(int64(M), int64(N)) // lint:ignore uintcast
|
||||
denominator := new(big.Float).SetInt(denominatorInt)
|
||||
|
||||
rBig := big.NewFloat(0)
|
||||
|
||||
for i := uint64(0); i < k+1; i++ {
|
||||
a := new(big.Int).Binomial(int64(n), int64(i)) // lint:ignore uintcast
|
||||
b := new(big.Int).Binomial(int64(M-n), int64(N-i))
|
||||
numeratorInt := new(big.Int).Mul(a, b)
|
||||
numerator := new(big.Float).SetInt(numeratorInt)
|
||||
item := new(big.Float).Quo(numerator, denominator)
|
||||
rBig.Add(rBig, item)
|
||||
}
|
||||
|
||||
r, _ := rBig.Float64()
|
||||
|
||||
return r
|
||||
}
|
||||
60
beacon-chain/core/peerdas/peer_sampling_test.go
Normal file
60
beacon-chain/core/peerdas/peer_sampling_test.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package peerdas_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestExtendedSampleCount(t *testing.T) {
|
||||
const samplesPerSlot = 16
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
allowedMissings uint64
|
||||
extendedSampleCount uint64
|
||||
}{
|
||||
{name: "allowedMissings=0", allowedMissings: 0, extendedSampleCount: 16},
|
||||
{name: "allowedMissings=1", allowedMissings: 1, extendedSampleCount: 20},
|
||||
{name: "allowedMissings=2", allowedMissings: 2, extendedSampleCount: 24},
|
||||
{name: "allowedMissings=3", allowedMissings: 3, extendedSampleCount: 27},
|
||||
{name: "allowedMissings=4", allowedMissings: 4, extendedSampleCount: 29},
|
||||
{name: "allowedMissings=5", allowedMissings: 5, extendedSampleCount: 32},
|
||||
{name: "allowedMissings=6", allowedMissings: 6, extendedSampleCount: 35},
|
||||
{name: "allowedMissings=7", allowedMissings: 7, extendedSampleCount: 37},
|
||||
{name: "allowedMissings=8", allowedMissings: 8, extendedSampleCount: 40},
|
||||
{name: "allowedMissings=9", allowedMissings: 9, extendedSampleCount: 42},
|
||||
{name: "allowedMissings=10", allowedMissings: 10, extendedSampleCount: 44},
|
||||
{name: "allowedMissings=11", allowedMissings: 11, extendedSampleCount: 47},
|
||||
{name: "allowedMissings=12", allowedMissings: 12, extendedSampleCount: 49},
|
||||
{name: "allowedMissings=13", allowedMissings: 13, extendedSampleCount: 51},
|
||||
{name: "allowedMissings=14", allowedMissings: 14, extendedSampleCount: 53},
|
||||
{name: "allowedMissings=15", allowedMissings: 15, extendedSampleCount: 55},
|
||||
{name: "allowedMissings=16", allowedMissings: 16, extendedSampleCount: 57},
|
||||
{name: "allowedMissings=17", allowedMissings: 17, extendedSampleCount: 59},
|
||||
{name: "allowedMissings=18", allowedMissings: 18, extendedSampleCount: 61},
|
||||
{name: "allowedMissings=19", allowedMissings: 19, extendedSampleCount: 63},
|
||||
{name: "allowedMissings=20", allowedMissings: 20, extendedSampleCount: 65},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
result := peerdas.ExtendedSampleCount(samplesPerSlot, tc.allowedMissings)
|
||||
require.Equal(t, tc.extendedSampleCount, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestHypergeomCDF(t *testing.T) {
|
||||
// Test case from https://en.wikipedia.org/wiki/Hypergeometric_distribution
|
||||
// Population size: 1000, number of successes in population: 500, sample size: 10, number of successes in sample: 5
|
||||
// Expected result: 0.072
|
||||
const (
|
||||
expected = 0.0796665913283742
|
||||
margin = 0.000001
|
||||
)
|
||||
|
||||
actual := peerdas.HypergeomCDF(5, 128, 65, 16)
|
||||
require.Equal(t, true, expected-margin <= actual && actual <= expected+margin)
|
||||
}
|
||||
147
beacon-chain/core/peerdas/reconstruction.go
Normal file
147
beacon-chain/core/peerdas/reconstruction.go
Normal file
@@ -0,0 +1,147 @@
|
||||
package peerdas
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
// CanSelfReconstruct returns true if the node can self-reconstruct all the data columns from its custody group count.
|
||||
func CanSelfReconstruct(custodyGroupCount uint64) bool {
|
||||
total := params.BeaconConfig().NumberOfCustodyGroups
|
||||
// If total is odd, then we need total / 2 + 1 columns to reconstruct.
|
||||
// If total is even, then we need total / 2 columns to reconstruct.
|
||||
custodyGroupsNeeded := total/2 + total%2
|
||||
return custodyGroupCount >= custodyGroupsNeeded
|
||||
}
|
||||
|
||||
// RecoverCellsAndProofs recovers the cells and proofs from the data column sidecars.
|
||||
func RecoverCellsAndProofs(
|
||||
dataColumnSideCars []*ethpb.DataColumnSidecar,
|
||||
blockRoot [fieldparams.RootLength]byte,
|
||||
) ([]kzg.CellsAndProofs, error) {
|
||||
var wg errgroup.Group
|
||||
|
||||
dataColumnSideCarsCount := len(dataColumnSideCars)
|
||||
|
||||
if dataColumnSideCarsCount == 0 {
|
||||
return nil, errors.New("no data column sidecars")
|
||||
}
|
||||
|
||||
// Check if all columns have the same length.
|
||||
blobCount := len(dataColumnSideCars[0].Column)
|
||||
for _, sidecar := range dataColumnSideCars {
|
||||
length := len(sidecar.Column)
|
||||
|
||||
if length != blobCount {
|
||||
return nil, errors.New("columns do not have the same length")
|
||||
}
|
||||
}
|
||||
|
||||
// Recover cells and compute proofs in parallel.
|
||||
recoveredCellsAndProofs := make([]kzg.CellsAndProofs, blobCount)
|
||||
|
||||
for blobIndex := 0; blobIndex < blobCount; blobIndex++ {
|
||||
bIndex := blobIndex
|
||||
wg.Go(func() error {
|
||||
cellsIndices := make([]uint64, 0, dataColumnSideCarsCount)
|
||||
cells := make([]kzg.Cell, 0, dataColumnSideCarsCount)
|
||||
|
||||
for _, sidecar := range dataColumnSideCars {
|
||||
// Build the cell indices.
|
||||
cellsIndices = append(cellsIndices, sidecar.Index)
|
||||
|
||||
// Get the cell.
|
||||
column := sidecar.Column
|
||||
cell := column[bIndex]
|
||||
|
||||
cells = append(cells, kzg.Cell(cell))
|
||||
}
|
||||
|
||||
// Recover the cells and proofs for the corresponding blob
|
||||
cellsAndProofs, err := kzg.RecoverCellsAndKZGProofs(cellsIndices, cells)
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "recover cells and KZG proofs for blob %d", bIndex)
|
||||
}
|
||||
|
||||
recoveredCellsAndProofs[bIndex] = cellsAndProofs
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := wg.Wait(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return recoveredCellsAndProofs, nil
|
||||
}
|
||||
|
||||
// DataColumnSidecarsForReconstruct is a TEMPORARY function until there is an official specification for it.
|
||||
// It is scheduled for deletion.
|
||||
func DataColumnSidecarsForReconstruct(
|
||||
blobKzgCommitments [][]byte,
|
||||
signedBlockHeader *ethpb.SignedBeaconBlockHeader,
|
||||
kzgCommitmentsInclusionProof [][]byte,
|
||||
cellsAndProofs []kzg.CellsAndProofs,
|
||||
) ([]*ethpb.DataColumnSidecar, error) {
|
||||
// Each CellsAndProofs corresponds to a Blob
|
||||
// So we can get the BlobCount by checking the length of CellsAndProofs
|
||||
blobsCount := len(cellsAndProofs)
|
||||
if blobsCount == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Get the column sidecars.
|
||||
sidecars := make([]*ethpb.DataColumnSidecar, 0, fieldparams.NumberOfColumns)
|
||||
for columnIndex := range fieldparams.NumberOfColumns {
|
||||
column := make([]kzg.Cell, 0, blobsCount)
|
||||
kzgProofOfColumn := make([]kzg.Proof, 0, blobsCount)
|
||||
|
||||
for rowIndex := range blobsCount {
|
||||
cellsForRow := cellsAndProofs[rowIndex].Cells
|
||||
proofsForRow := cellsAndProofs[rowIndex].Proofs
|
||||
|
||||
if len(cellsForRow) != fieldparams.NumberOfColumns {
|
||||
return nil, errors.Errorf("cells don't have the expected size: expected %d - actual %d", fieldparams.NumberOfColumns, len(cellsForRow))
|
||||
}
|
||||
|
||||
cell := cellsForRow[columnIndex]
|
||||
column = append(column, cell)
|
||||
|
||||
if len(proofsForRow) != fieldparams.NumberOfColumns {
|
||||
return nil, errors.Errorf("proofs don't have the expected size: expected %d - actual %d", fieldparams.NumberOfColumns, len(proofsForRow))
|
||||
}
|
||||
|
||||
kzgProof := proofsForRow[columnIndex]
|
||||
kzgProofOfColumn = append(kzgProofOfColumn, kzgProof)
|
||||
}
|
||||
|
||||
columnBytes := make([][]byte, 0, blobsCount)
|
||||
for i := range column {
|
||||
columnBytes = append(columnBytes, column[i][:])
|
||||
}
|
||||
|
||||
kzgProofOfColumnBytes := make([][]byte, 0, blobsCount)
|
||||
for _, kzgProof := range kzgProofOfColumn {
|
||||
copiedProof := kzgProof
|
||||
kzgProofOfColumnBytes = append(kzgProofOfColumnBytes, copiedProof[:])
|
||||
}
|
||||
|
||||
sidecar := ðpb.DataColumnSidecar{
|
||||
Index: uint64(columnIndex),
|
||||
Column: columnBytes,
|
||||
KzgCommitments: blobKzgCommitments,
|
||||
KzgProofs: kzgProofOfColumnBytes,
|
||||
SignedBlockHeader: signedBlockHeader,
|
||||
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
|
||||
}
|
||||
|
||||
sidecars = append(sidecars, sidecar)
|
||||
}
|
||||
|
||||
return sidecars, nil
|
||||
}
|
||||
209
beacon-chain/core/peerdas/reconstruction_test.go
Normal file
209
beacon-chain/core/peerdas/reconstruction_test.go
Normal file
@@ -0,0 +1,209 @@
|
||||
package peerdas_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
)
|
||||
|
||||
func TestCanSelfReconstruct(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
totalNumberOfCustodyGroups uint64
|
||||
custodyNumberOfGroups uint64
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "totalNumberOfCustodyGroups=64, custodyNumberOfGroups=31",
|
||||
totalNumberOfCustodyGroups: 64,
|
||||
custodyNumberOfGroups: 31,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "totalNumberOfCustodyGroups=64, custodyNumberOfGroups=32",
|
||||
totalNumberOfCustodyGroups: 64,
|
||||
custodyNumberOfGroups: 32,
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "totalNumberOfCustodyGroups=65, custodyNumberOfGroups=32",
|
||||
totalNumberOfCustodyGroups: 65,
|
||||
custodyNumberOfGroups: 32,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "totalNumberOfCustodyGroups=63, custodyNumberOfGroups=33",
|
||||
totalNumberOfCustodyGroups: 65,
|
||||
custodyNumberOfGroups: 33,
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Set the total number of columns.
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.NumberOfCustodyGroups = tc.totalNumberOfCustodyGroups
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Check if reconstuction is possible.
|
||||
actual := peerdas.CanSelfReconstruct(tc.custodyNumberOfGroups)
|
||||
require.Equal(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestReconstructionRoundTrip(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
|
||||
const blobCount = 5
|
||||
|
||||
var blockRoot [fieldparams.RootLength]byte
|
||||
|
||||
signedBeaconBlockPb := util.NewBeaconBlockDeneb()
|
||||
require.NoError(t, kzg.Start())
|
||||
|
||||
// Generate random blobs and their corresponding commitments.
|
||||
var (
|
||||
blobsKzgCommitments [][]byte
|
||||
blobs []kzg.Blob
|
||||
)
|
||||
for i := range blobCount {
|
||||
blob := getRandBlob(int64(i))
|
||||
commitment, _, err := generateCommitmentAndProof(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobsKzgCommitments = append(blobsKzgCommitments, commitment[:])
|
||||
blobs = append(blobs, blob)
|
||||
}
|
||||
|
||||
// Generate a signed beacon block.
|
||||
signedBeaconBlockPb.Block.Body.BlobKzgCommitments = blobsKzgCommitments
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Get the signed beacon block header.
|
||||
signedBeaconBlockHeader, err := signedBeaconBlock.Header()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Convert data columns sidecars from signed block and blobs.
|
||||
cellsAndProofs := util.GenerateCellsAndProofs(t, blobs)
|
||||
dataColumnSidecars, err := peerdas.DataColumnSidecars(signedBeaconBlock, cellsAndProofs)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create verified RO data columns.
|
||||
verifiedRoDataColumns := make([]*blocks.VerifiedRODataColumn, 0, blobCount)
|
||||
for _, dataColumnSidecar := range dataColumnSidecars {
|
||||
roDataColumn, err := blocks.NewRODataColumn(dataColumnSidecar)
|
||||
require.NoError(t, err)
|
||||
|
||||
verifiedRoDataColumn := blocks.NewVerifiedRODataColumn(roDataColumn)
|
||||
verifiedRoDataColumns = append(verifiedRoDataColumns, &verifiedRoDataColumn)
|
||||
}
|
||||
|
||||
verifiedRoDataColumn := verifiedRoDataColumns[0]
|
||||
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
var noDataColumns []*ethpb.DataColumnSidecar
|
||||
dataColumnsWithDifferentLengths := []*ethpb.DataColumnSidecar{
|
||||
{Column: [][]byte{{}, {}}},
|
||||
{Column: [][]byte{{}}},
|
||||
}
|
||||
notEnoughDataColumns := dataColumnSidecars[:numberOfColumns/2-1]
|
||||
originalDataColumns := dataColumnSidecars[:numberOfColumns/2]
|
||||
extendedDataColumns := dataColumnSidecars[numberOfColumns/2:]
|
||||
evenDataColumns := make([]*ethpb.DataColumnSidecar, 0, numberOfColumns/2)
|
||||
oddDataColumns := make([]*ethpb.DataColumnSidecar, 0, numberOfColumns/2)
|
||||
allDataColumns := dataColumnSidecars
|
||||
|
||||
for i, dataColumn := range dataColumnSidecars {
|
||||
if i%2 == 0 {
|
||||
evenDataColumns = append(evenDataColumns, dataColumn)
|
||||
} else {
|
||||
oddDataColumns = append(oddDataColumns, dataColumn)
|
||||
}
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
dataColumnsSidecar []*ethpb.DataColumnSidecar
|
||||
isError bool
|
||||
}{
|
||||
{
|
||||
name: "No data columns sidecars",
|
||||
dataColumnsSidecar: noDataColumns,
|
||||
isError: true,
|
||||
},
|
||||
{
|
||||
name: "Data columns sidecar with different lengths",
|
||||
dataColumnsSidecar: dataColumnsWithDifferentLengths,
|
||||
isError: true,
|
||||
},
|
||||
{
|
||||
name: "All columns are present (no actual need to reconstruct)",
|
||||
dataColumnsSidecar: allDataColumns,
|
||||
isError: false,
|
||||
},
|
||||
{
|
||||
name: "Only original columns are present",
|
||||
dataColumnsSidecar: originalDataColumns,
|
||||
isError: false,
|
||||
},
|
||||
{
|
||||
name: "Only extended columns are present",
|
||||
dataColumnsSidecar: extendedDataColumns,
|
||||
isError: false,
|
||||
},
|
||||
{
|
||||
name: "Only even columns are present",
|
||||
dataColumnsSidecar: evenDataColumns,
|
||||
isError: false,
|
||||
},
|
||||
{
|
||||
name: "Only odd columns are present",
|
||||
dataColumnsSidecar: oddDataColumns,
|
||||
isError: false,
|
||||
},
|
||||
{
|
||||
name: "Not enough columns to reconstruct",
|
||||
dataColumnsSidecar: notEnoughDataColumns,
|
||||
isError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Recover cells and proofs from available data columns sidecars.
|
||||
cellsAndProofs, err := peerdas.RecoverCellsAndProofs(tc.dataColumnsSidecar, blockRoot)
|
||||
isError := (err != nil)
|
||||
require.Equal(t, tc.isError, isError)
|
||||
|
||||
if isError {
|
||||
return
|
||||
}
|
||||
|
||||
// Recover all data columns sidecars from cells and proofs.
|
||||
reconstructedDataColumnsSideCars, err := peerdas.DataColumnSidecarsForReconstruct(
|
||||
blobsKzgCommitments,
|
||||
signedBeaconBlockHeader,
|
||||
verifiedRoDataColumn.KzgCommitmentsInclusionProof,
|
||||
cellsAndProofs,
|
||||
)
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
expected := dataColumnSidecars
|
||||
actual := reconstructedDataColumnsSideCars
|
||||
require.DeepSSZEqual(t, expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
57
beacon-chain/core/peerdas/util.go
Normal file
57
beacon-chain/core/peerdas/util.go
Normal file
@@ -0,0 +1,57 @@
|
||||
package peerdas
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
)
|
||||
|
||||
// ConstructDataColumnSidecars constructs data column sidecars from a block, blobs and their cell proofs.
|
||||
// This is a convenience method as blob and cell proofs are common inputs.
|
||||
func ConstructDataColumnSidecars(block interfaces.SignedBeaconBlock, blobs [][]byte, cellProofs [][]byte) ([]*ethpb.DataColumnSidecar, error) {
|
||||
// Check if the block is at least a Fulu block.
|
||||
if block.Version() < version.Fulu {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
cellsAndProofs, err := constructCellsAndProofs(blobs, cellProofs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return DataColumnSidecars(block, cellsAndProofs)
|
||||
}
|
||||
|
||||
func constructCellsAndProofs(blobs [][]byte, cellProofs [][]byte) ([]kzg.CellsAndProofs, error) {
|
||||
numColumns := int(params.BeaconConfig().NumberOfColumns)
|
||||
if len(blobs)*numColumns != len(cellProofs) {
|
||||
return nil, fmt.Errorf("number of blobs and cell proofs do not match: %d * %d != %d", len(blobs), numColumns, len(cellProofs))
|
||||
}
|
||||
|
||||
cellsAndProofs := make([]kzg.CellsAndProofs, 0, len(blobs))
|
||||
|
||||
for i, blob := range blobs {
|
||||
var b kzg.Blob
|
||||
copy(b[:], blob)
|
||||
cells, err := kzg.ComputeCells(&b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var proofs []kzg.Proof
|
||||
for idx := i * numColumns; idx < (i+1)*numColumns; idx++ {
|
||||
proofs = append(proofs, kzg.Proof(cellProofs[idx]))
|
||||
}
|
||||
|
||||
cellsAndProofs = append(cellsAndProofs, kzg.CellsAndProofs{
|
||||
Cells: cells,
|
||||
Proofs: proofs,
|
||||
})
|
||||
}
|
||||
|
||||
return cellsAndProofs, nil
|
||||
}
|
||||
57
beacon-chain/core/peerdas/utils_test.go
Normal file
57
beacon-chain/core/peerdas/utils_test.go
Normal file
@@ -0,0 +1,57 @@
|
||||
package peerdas_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
|
||||
"github.com/consensys/gnark-crypto/ecc/bls12-381/fr"
|
||||
GoKZG "github.com/crate-crypto/go-kzg-4844"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func generateCommitmentAndProof(blob *kzg.Blob) (*kzg.Commitment, *kzg.Proof, error) {
|
||||
commitment, err := kzg.BlobToKZGCommitment(blob)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
proof, err := kzg.ComputeBlobKZGProof(blob, commitment)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return &commitment, &proof, err
|
||||
}
|
||||
|
||||
// Returns a random blob using the passed seed as entropy
|
||||
func getRandBlob(seed int64) kzg.Blob {
|
||||
var blob kzg.Blob
|
||||
bytesPerBlob := GoKZG.ScalarsPerBlob * GoKZG.SerializedScalarSize
|
||||
for i := 0; i < bytesPerBlob; i += GoKZG.SerializedScalarSize {
|
||||
fieldElementBytes := getRandFieldElement(seed + int64(i))
|
||||
copy(blob[i:i+GoKZG.SerializedScalarSize], fieldElementBytes[:])
|
||||
}
|
||||
return blob
|
||||
}
|
||||
|
||||
// Returns a serialized random field element in big-endian
|
||||
func getRandFieldElement(seed int64) [32]byte {
|
||||
bytes := deterministicRandomness(seed)
|
||||
var r fr.Element
|
||||
r.SetBytes(bytes[:])
|
||||
|
||||
return GoKZG.SerializeScalar(r)
|
||||
}
|
||||
|
||||
func deterministicRandomness(seed int64) [32]byte {
|
||||
// Converts an int64 to a byte slice
|
||||
buf := new(bytes.Buffer)
|
||||
err := binary.Write(buf, binary.BigEndian, seed)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("Failed to write int64 to bytes buffer")
|
||||
return [32]byte{}
|
||||
}
|
||||
bytes := buf.Bytes()
|
||||
|
||||
return sha256.Sum256(bytes)
|
||||
}
|
||||
@@ -53,6 +53,11 @@ func HigherEqualThanAltairVersionAndEpoch(s state.BeaconState, e primitives.Epoc
|
||||
return s.Version() >= version.Altair && e >= params.BeaconConfig().AltairForkEpoch
|
||||
}
|
||||
|
||||
// PeerDASIsActive checks whether peerDAS is active at the provided slot.
|
||||
func PeerDASIsActive(slot primitives.Slot) bool {
|
||||
return params.FuluEnabled() && slots.ToEpoch(slot) >= params.BeaconConfig().FuluForkEpoch
|
||||
}
|
||||
|
||||
// CanUpgradeToAltair returns true if the input `slot` can upgrade to Altair.
|
||||
// Spec code:
|
||||
// If state.slot % SLOTS_PER_EPOCH == 0 and compute_epoch_at_slot(state.slot) == ALTAIR_FORK_EPOCH
|
||||
|
||||
@@ -4,21 +4,26 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"availability.go",
|
||||
"availability_columns.go",
|
||||
"cache.go",
|
||||
"data_column_cache.go",
|
||||
"iface.go",
|
||||
"mock.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/das",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//runtime/logging:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
@@ -27,13 +32,18 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"availability_columns_test.go",
|
||||
"availability_test.go",
|
||||
"cache_test.go",
|
||||
"data_column_cache_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
@@ -41,6 +51,7 @@ go_test(
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -53,10 +53,16 @@ func NewLazilyPersistentStore(store *filesystem.BlobStorage, verifier BlobBatchV
|
||||
// Persist adds blobs to the working blob cache. Blobs stored in this cache will be persisted
|
||||
// for at least as long as the node is running. Once IsDataAvailable succeeds, all blobs referenced
|
||||
// by the given block are guaranteed to be persisted for the remainder of the retention period.
|
||||
func (s *LazilyPersistentStore) Persist(current primitives.Slot, sc ...blocks.ROBlob) error {
|
||||
if len(sc) == 0 {
|
||||
func (s *LazilyPersistentStore) Persist(current primitives.Slot, scg ...blocks.ROSidecar) error {
|
||||
if len(scg) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
sc, err := blocks.BlobSidecarsFromSidecars(scg)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "blob sidecars from sidecars")
|
||||
}
|
||||
|
||||
if len(sc) > 1 {
|
||||
first := sc[0].BlockRoot()
|
||||
for i := 1; i < len(sc); i++ {
|
||||
|
||||
181
beacon-chain/das/availability_columns.go
Normal file
181
beacon-chain/das/availability_columns.go
Normal file
@@ -0,0 +1,181 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
errors "github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
// LazilyPersistentStoreColumn is an implementation of AvailabilityStore to be used when batch syncing data columns.
|
||||
// This implementation will hold any data columns passed to Persist until the IsDataAvailable is called for their
|
||||
// block, at which time they will undergo full verification and be saved to the disk.
|
||||
type LazilyPersistentStoreColumn struct {
|
||||
store *filesystem.DataColumnStorage
|
||||
nodeID enode.ID
|
||||
cache *dataColumnCache
|
||||
custodyInfo *peerdas.CustodyInfo
|
||||
}
|
||||
|
||||
func NewLazilyPersistentStoreColumn(store *filesystem.DataColumnStorage, nodeID enode.ID, custodyInfo *peerdas.CustodyInfo) *LazilyPersistentStoreColumn {
|
||||
return &LazilyPersistentStoreColumn{
|
||||
store: store,
|
||||
nodeID: nodeID,
|
||||
cache: newDataColumnCache(),
|
||||
custodyInfo: custodyInfo,
|
||||
}
|
||||
}
|
||||
|
||||
// PersistColumns adds columns to the working column cache. Columns stored in this cache will be persisted
|
||||
// for at least as long as the node is running. Once IsDataAvailable succeeds, all columns referenced
|
||||
// by the given block are guaranteed to be persisted for the remainder of the retention period.
|
||||
func (s *LazilyPersistentStoreColumn) Persist(current primitives.Slot, sidecars ...blocks.ROSidecar) error {
|
||||
if len(sidecars) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
dataColumnSidecars, err := blocks.DataColumnSidecarsFromSidecars(sidecars)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "blob sidecars from sidecars")
|
||||
}
|
||||
|
||||
// It is safe to retrieve the first sidecar.
|
||||
firstSidecar := dataColumnSidecars[0]
|
||||
|
||||
if len(sidecars) > 1 {
|
||||
firstRoot := firstSidecar.BlockRoot()
|
||||
for _, sidecar := range dataColumnSidecars[1:] {
|
||||
if sidecar.BlockRoot() != firstRoot {
|
||||
return errMixedRoots
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
firstSidecarEpoch, currentEpoch := slots.ToEpoch(firstSidecar.Slot()), slots.ToEpoch(current)
|
||||
if !params.WithinDAPeriod(firstSidecarEpoch, currentEpoch) {
|
||||
return nil
|
||||
}
|
||||
|
||||
key := dataColumnCacheKey{slot: firstSidecar.Slot(), root: firstSidecar.BlockRoot()}
|
||||
entry := s.cache.ensure(key)
|
||||
|
||||
for i := range sidecars {
|
||||
if err := entry.stash(&dataColumnSidecars[i]); err != nil {
|
||||
return errors.Wrap(err, "stash DataColumnSidecar")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsDataAvailable returns nil if all the commitments in the given block are persisted to the db and have been verified.
|
||||
// DataColumnsSidecars already in the db are assumed to have been previously verified against the block.
|
||||
func (s *LazilyPersistentStoreColumn) IsDataAvailable(
|
||||
ctx context.Context,
|
||||
currentSlot primitives.Slot,
|
||||
block blocks.ROBlock,
|
||||
) error {
|
||||
blockCommitments, err := s.fullCommitmentsToCheck(s.nodeID, block, currentSlot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "full commitments to check with block root `%#x` and current slot `%d`", block.Root(), currentSlot)
|
||||
}
|
||||
|
||||
// Return early for blocks that do not have any commitments.
|
||||
if blockCommitments.count() == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get the root of the block.
|
||||
blockRoot := block.Root()
|
||||
|
||||
// Build the cache key for the block.
|
||||
key := dataColumnCacheKey{slot: block.Block().Slot(), root: blockRoot}
|
||||
|
||||
// Retrieve the cache entry for the block, or create an empty one if it doesn't exist.
|
||||
entry := s.cache.ensure(key)
|
||||
|
||||
// Delete the cache entry for the block at the end.
|
||||
defer s.cache.delete(key)
|
||||
|
||||
// Set the disk summary for the block in the cache entry.
|
||||
entry.setDiskSummary(s.store.Summary(blockRoot))
|
||||
|
||||
// Verify we have all the expected sidecars, and fail fast if any are missing or inconsistent.
|
||||
// We don't try to salvage problematic batches because this indicates a misbehaving peer and we'd rather
|
||||
// ignore their response and decrease their peer score.
|
||||
roDataColumns, err := entry.filter(blockRoot, blockCommitments)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "incomplete DataColumnSidecar batch")
|
||||
}
|
||||
|
||||
// Create verified RO data columns from RO data columns.
|
||||
verifiedRODataColumns := make([]blocks.VerifiedRODataColumn, 0, len(roDataColumns))
|
||||
|
||||
for _, roDataColumn := range roDataColumns {
|
||||
verifiedRODataColumn := blocks.NewVerifiedRODataColumn(roDataColumn)
|
||||
verifiedRODataColumns = append(verifiedRODataColumns, verifiedRODataColumn)
|
||||
}
|
||||
|
||||
// Ensure that column sidecars are written to disk.
|
||||
if err := s.store.Save(verifiedRODataColumns); err != nil {
|
||||
return errors.Wrapf(err, "save data column sidecars")
|
||||
}
|
||||
|
||||
// All ColumnSidecars are persisted - data availability check succeeds.
|
||||
return nil
|
||||
}
|
||||
|
||||
// fullCommitmentsToCheck returns the commitments to check for a given block.
|
||||
func (s *LazilyPersistentStoreColumn) fullCommitmentsToCheck(nodeID enode.ID, block blocks.ROBlock, currentSlot primitives.Slot) (*safeCommitmentsArray, error) {
|
||||
// Return early for blocks that are pre-Fulu.
|
||||
if block.Version() < version.Fulu {
|
||||
return &safeCommitmentsArray{}, nil
|
||||
}
|
||||
|
||||
// Compute the block epoch.
|
||||
blockSlot := block.Block().Slot()
|
||||
blockEpoch := slots.ToEpoch(blockSlot)
|
||||
|
||||
// Compute the current spoch.
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
|
||||
// Return early if the request is out of the MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS window.
|
||||
if !params.WithinDAPeriod(blockEpoch, currentEpoch) {
|
||||
return &safeCommitmentsArray{}, nil
|
||||
}
|
||||
|
||||
// Retrieve the KZG commitments for the block.
|
||||
kzgCommitments, err := block.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
|
||||
// Return early if there are no commitments in the block.
|
||||
if len(kzgCommitments) == 0 {
|
||||
return &safeCommitmentsArray{}, nil
|
||||
}
|
||||
|
||||
// Retrieve the groups count.
|
||||
custodyGroupCount := s.custodyInfo.ActualGroupCount()
|
||||
|
||||
// Retrieve peer info.
|
||||
peerInfo, _, err := peerdas.Info(nodeID, custodyGroupCount)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "peer info")
|
||||
}
|
||||
|
||||
// Create a safe commitments array for the custody columns.
|
||||
commitmentsArray := &safeCommitmentsArray{}
|
||||
for column := range peerInfo.CustodyColumns {
|
||||
commitmentsArray[column] = kzgCommitments
|
||||
}
|
||||
|
||||
return commitmentsArray, nil
|
||||
}
|
||||
254
beacon-chain/das/availability_columns_test.go
Normal file
254
beacon-chain/das/availability_columns_test.go
Normal file
@@ -0,0 +1,254 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
func roSidecarsFromDataColumnParamsByBlockRoot(t *testing.T, dataColumnParamsByBlockRoot verification.DataColumnsParamsByRoot) ([]blocks.ROSidecar, []blocks.RODataColumn) {
|
||||
roDataColumns, _ := verification.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnParamsByBlockRoot)
|
||||
|
||||
roSidecars := make([]blocks.ROSidecar, 0, len(roDataColumns))
|
||||
for _, roDataColumn := range roDataColumns {
|
||||
roSidecars = append(roSidecars, blocks.NewSidecarFromDataColumnSidecar(roDataColumn))
|
||||
}
|
||||
|
||||
return roSidecars, roDataColumns
|
||||
}
|
||||
|
||||
func newSignedRoBlock(t *testing.T, signedBeaconBlock interface{}) blocks.ROBlock {
|
||||
sb, err := blocks.NewSignedBeaconBlock(signedBeaconBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
rb, err := blocks.NewROBlock(sb)
|
||||
require.NoError(t, err)
|
||||
|
||||
return rb
|
||||
}
|
||||
|
||||
var commitments = [][]byte{
|
||||
bytesutil.PadTo([]byte("a"), 48),
|
||||
bytesutil.PadTo([]byte("b"), 48),
|
||||
bytesutil.PadTo([]byte("c"), 48),
|
||||
bytesutil.PadTo([]byte("d"), 48),
|
||||
}
|
||||
|
||||
func TestPersist(t *testing.T) {
|
||||
t.Run("no sidecars", func(t *testing.T) {
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, &peerdas.CustodyInfo{})
|
||||
err := lazilyPersistentStoreColumns.Persist(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(lazilyPersistentStoreColumns.cache.entries))
|
||||
})
|
||||
|
||||
t.Run("mixed roots", func(t *testing.T) {
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
dataColumnParamsByBlockRoot := map[[fieldparams.RootLength]byte][]verification.DataColumnParams{
|
||||
{1}: {{ColumnIndex: 1}},
|
||||
{2}: {{ColumnIndex: 2}},
|
||||
}
|
||||
|
||||
roSidecars, _ := roSidecarsFromDataColumnParamsByBlockRoot(t, dataColumnParamsByBlockRoot)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, &peerdas.CustodyInfo{})
|
||||
|
||||
err := lazilyPersistentStoreColumns.Persist(0, roSidecars...)
|
||||
require.ErrorIs(t, err, errMixedRoots)
|
||||
require.Equal(t, 0, len(lazilyPersistentStoreColumns.cache.entries))
|
||||
})
|
||||
|
||||
t.Run("outside DA period", func(t *testing.T) {
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
dataColumnParamsByBlockRoot := map[[fieldparams.RootLength]byte][]verification.DataColumnParams{
|
||||
{1}: {{ColumnIndex: 1}},
|
||||
}
|
||||
|
||||
roSidecars, _ := roSidecarsFromDataColumnParamsByBlockRoot(t, dataColumnParamsByBlockRoot)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, &peerdas.CustodyInfo{})
|
||||
|
||||
err := lazilyPersistentStoreColumns.Persist(1_000_000, roSidecars...)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(lazilyPersistentStoreColumns.cache.entries))
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
dataColumnParamsByBlockRoot := map[[fieldparams.RootLength]byte][]verification.DataColumnParams{
|
||||
{}: {{ColumnIndex: 1}, {ColumnIndex: 5}},
|
||||
}
|
||||
|
||||
roSidecars, roDataColumns := roSidecarsFromDataColumnParamsByBlockRoot(t, dataColumnParamsByBlockRoot)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, &peerdas.CustodyInfo{})
|
||||
|
||||
err := lazilyPersistentStoreColumns.Persist(0, roSidecars...)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(lazilyPersistentStoreColumns.cache.entries))
|
||||
|
||||
key := dataColumnCacheKey{slot: 0, root: [32]byte{}}
|
||||
entry := lazilyPersistentStoreColumns.cache.entries[key]
|
||||
|
||||
// A call to Persist does NOT save the sidecars to disk.
|
||||
require.Equal(t, uint64(0), entry.diskSummary.Count())
|
||||
|
||||
require.DeepSSZEqual(t, roDataColumns[0], *entry.scs[1])
|
||||
require.DeepSSZEqual(t, roDataColumns[1], *entry.scs[5])
|
||||
|
||||
for i, roDataColumn := range entry.scs {
|
||||
if map[int]bool{1: true, 5: true}[i] {
|
||||
continue
|
||||
}
|
||||
|
||||
require.IsNil(t, roDataColumn)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestIsDataAvailable(t *testing.T) {
|
||||
t.Run("No commitments", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
signedBeaconBlockFulu := util.NewBeaconBlockFulu()
|
||||
signedRoBlock := newSignedRoBlock(t, signedBeaconBlockFulu)
|
||||
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, &peerdas.CustodyInfo{})
|
||||
|
||||
err := lazilyPersistentStoreColumns.IsDataAvailable(ctx, 0 /*current slot*/, signedRoBlock)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("Some sidecars are not available", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
signedBeaconBlockFulu := util.NewBeaconBlockFulu()
|
||||
signedBeaconBlockFulu.Block.Body.BlobKzgCommitments = commitments
|
||||
signedRoBlock := newSignedRoBlock(t, signedBeaconBlockFulu)
|
||||
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, &peerdas.CustodyInfo{})
|
||||
err := lazilyPersistentStoreColumns.IsDataAvailable(ctx, 0 /*current slot*/, signedRoBlock)
|
||||
require.NotNil(t, err)
|
||||
})
|
||||
|
||||
t.Run("All sidecars are available", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
signedBeaconBlockFulu := util.NewBeaconBlockFulu()
|
||||
signedBeaconBlockFulu.Block.Body.BlobKzgCommitments = commitments
|
||||
signedRoBlock := newSignedRoBlock(t, signedBeaconBlockFulu)
|
||||
root := signedRoBlock.Root()
|
||||
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, &peerdas.CustodyInfo{})
|
||||
|
||||
indices := [...]uint64{1, 17, 87, 102}
|
||||
dataColumnsParams := make([]verification.DataColumnParams, 0, len(indices))
|
||||
for _, index := range indices {
|
||||
dataColumnParams := verification.DataColumnParams{
|
||||
ColumnIndex: index,
|
||||
KzgCommitments: commitments,
|
||||
}
|
||||
|
||||
dataColumnsParams = append(dataColumnsParams, dataColumnParams)
|
||||
}
|
||||
|
||||
dataColumnsParamsByBlockRoot := verification.DataColumnsParamsByRoot{root: dataColumnsParams}
|
||||
_, verifiedRoDataColumns := verification.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnsParamsByBlockRoot)
|
||||
|
||||
key := dataColumnCacheKey{root: root}
|
||||
entry := lazilyPersistentStoreColumns.cache.ensure(key)
|
||||
defer lazilyPersistentStoreColumns.cache.delete(key)
|
||||
|
||||
for _, verifiedRoDataColumn := range verifiedRoDataColumns {
|
||||
err := entry.stash(&verifiedRoDataColumn.RODataColumn)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
err := lazilyPersistentStoreColumns.IsDataAvailable(ctx, 0 /*current slot*/, signedRoBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
actual, err := dataColumnStorage.Get(root, indices[:])
|
||||
require.NoError(t, err)
|
||||
|
||||
summary := dataColumnStorage.Summary(root)
|
||||
require.Equal(t, uint64(len(indices)), summary.Count())
|
||||
require.DeepSSZEqual(t, verifiedRoDataColumns, actual)
|
||||
})
|
||||
}
|
||||
|
||||
func TestFullCommitmentsToCheck(t *testing.T) {
|
||||
windowSlots, err := slots.EpochEnd(params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest)
|
||||
require.NoError(t, err)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
commitments [][]byte
|
||||
block func(*testing.T) blocks.ROBlock
|
||||
slot primitives.Slot
|
||||
}{
|
||||
{
|
||||
name: "Pre-Fulu block",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
return newSignedRoBlock(t, util.NewBeaconBlockElectra())
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Commitments outside data availability window",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
beaconBlockElectra := util.NewBeaconBlockElectra()
|
||||
|
||||
// Block is from slot 0, "current slot" is window size +1 (so outside the window)
|
||||
beaconBlockElectra.Block.Body.BlobKzgCommitments = commitments
|
||||
|
||||
return newSignedRoBlock(t, beaconBlockElectra)
|
||||
},
|
||||
slot: windowSlots + 1,
|
||||
},
|
||||
{
|
||||
name: "Commitments within data availability window",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
signedBeaconBlockFulu := util.NewBeaconBlockFulu()
|
||||
signedBeaconBlockFulu.Block.Body.BlobKzgCommitments = commitments
|
||||
signedBeaconBlockFulu.Block.Slot = 100
|
||||
|
||||
return newSignedRoBlock(t, signedBeaconBlockFulu)
|
||||
},
|
||||
commitments: commitments,
|
||||
slot: 100,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SubscribeToAllSubnets = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
b := tc.block(t)
|
||||
s := NewLazilyPersistentStoreColumn(nil, enode.ID{}, &peerdas.CustodyInfo{})
|
||||
|
||||
commitmentsArray, err := s.fullCommitmentsToCheck(enode.ID{}, b, tc.slot)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, commitments := range commitmentsArray {
|
||||
require.DeepEqual(t, tc.commitments, commitments)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -116,9 +116,11 @@ func TestLazilyPersistent_Missing(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
store := filesystem.NewEphemeralBlobStorage(t)
|
||||
|
||||
blk, scs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 3)
|
||||
blk, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 3)
|
||||
|
||||
mbv := &mockBlobBatchVerifier{t: t, scs: scs}
|
||||
scs := blocks.NewSidecarsFromBlobSidecars(blobSidecars)
|
||||
|
||||
mbv := &mockBlobBatchVerifier{t: t, scs: blobSidecars}
|
||||
as := NewLazilyPersistentStore(store, mbv)
|
||||
|
||||
// Only one commitment persisted, should return error with other indices
|
||||
@@ -141,12 +143,14 @@ func TestLazilyPersistent_Mismatch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
store := filesystem.NewEphemeralBlobStorage(t)
|
||||
|
||||
blk, scs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 3)
|
||||
blk, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 3)
|
||||
|
||||
mbv := &mockBlobBatchVerifier{t: t, err: errors.New("kzg check should not run")}
|
||||
scs[0].KzgCommitment = bytesutil.PadTo([]byte("nope"), 48)
|
||||
blobSidecars[0].KzgCommitment = bytesutil.PadTo([]byte("nope"), 48)
|
||||
as := NewLazilyPersistentStore(store, mbv)
|
||||
|
||||
scs := blocks.NewSidecarsFromBlobSidecars(blobSidecars)
|
||||
|
||||
// Only one commitment persisted, should return error with other indices
|
||||
require.NoError(t, as.Persist(1, scs[0]))
|
||||
err := as.IsDataAvailable(ctx, 1, blk)
|
||||
@@ -155,7 +159,10 @@ func TestLazilyPersistent_Mismatch(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLazyPersistOnceCommitted(t *testing.T) {
|
||||
_, scs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 6)
|
||||
_, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 6)
|
||||
|
||||
scs := blocks.NewSidecarsFromBlobSidecars(blobSidecars)
|
||||
|
||||
as := NewLazilyPersistentStore(filesystem.NewEphemeralBlobStorage(t), &mockBlobBatchVerifier{})
|
||||
// stashes as expected
|
||||
require.NoError(t, as.Persist(1, scs...))
|
||||
@@ -163,10 +170,13 @@ func TestLazyPersistOnceCommitted(t *testing.T) {
|
||||
require.ErrorIs(t, as.Persist(1, scs...), ErrDuplicateSidecar)
|
||||
|
||||
// ignores index out of bound
|
||||
scs[0].Index = 6
|
||||
require.ErrorIs(t, as.Persist(1, scs[0]), errIndexOutOfBounds)
|
||||
blobSidecars[0].Index = 6
|
||||
require.ErrorIs(t, as.Persist(1, blocks.NewSidecarFromBlobSidecar(blobSidecars[0])), errIndexOutOfBounds)
|
||||
|
||||
_, moreBlobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 4)
|
||||
|
||||
more := blocks.NewSidecarsFromBlobSidecars(moreBlobSidecars)
|
||||
|
||||
_, more := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 4)
|
||||
// ignores sidecars before the retention period
|
||||
slotOOB, err := slots.EpochStart(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -10,12 +10,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrDuplicateSidecar = errors.New("duplicate sidecar stashed in AvailabilityStore")
|
||||
errIndexOutOfBounds = errors.New("sidecar.index > MAX_BLOBS_PER_BLOCK")
|
||||
errCommitmentMismatch = errors.New("KzgCommitment of sidecar in cache did not match block commitment")
|
||||
errMissingSidecar = errors.New("no sidecar in cache for block commitment")
|
||||
)
|
||||
var errIndexOutOfBounds = errors.New("sidecar.index > MAX_BLOBS_PER_BLOCK")
|
||||
|
||||
// cacheKey includes the slot so that we can easily iterate through the cache and compare
|
||||
// slots for eviction purposes. Whether the input is the block or the sidecar, we always have
|
||||
|
||||
135
beacon-chain/das/data_column_cache.go
Normal file
135
beacon-chain/das/data_column_cache.go
Normal file
@@ -0,0 +1,135 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrDuplicateSidecar = errors.New("duplicate sidecar stashed in AvailabilityStore")
|
||||
errColumnIndexTooHigh = errors.New("column index too high")
|
||||
errCommitmentMismatch = errors.New("KzgCommitment of sidecar in cache did not match block commitment")
|
||||
errMissingSidecar = errors.New("no sidecar in cache for block commitment")
|
||||
)
|
||||
|
||||
// dataColumnCacheKey includes the slot so that we can easily iterate through the cache and compare
|
||||
// slots for eviction purposes. Whether the input is the block or the sidecar, we always have
|
||||
// the root+slot when interacting with the cache, so it isn't an inconvenience to use both.
|
||||
type dataColumnCacheKey struct {
|
||||
slot primitives.Slot
|
||||
root [32]byte
|
||||
}
|
||||
|
||||
type dataColumnCache struct {
|
||||
entries map[dataColumnCacheKey]*dataColumnCacheEntry
|
||||
}
|
||||
|
||||
func newDataColumnCache() *dataColumnCache {
|
||||
return &dataColumnCache{entries: make(map[dataColumnCacheKey]*dataColumnCacheEntry)}
|
||||
}
|
||||
|
||||
// ensure returns the entry for the given key, creating it if it isn't already present.
|
||||
func (c *dataColumnCache) ensure(key dataColumnCacheKey) *dataColumnCacheEntry {
|
||||
entry, ok := c.entries[key]
|
||||
if !ok {
|
||||
entry = &dataColumnCacheEntry{}
|
||||
c.entries[key] = entry
|
||||
}
|
||||
|
||||
return entry
|
||||
}
|
||||
|
||||
// delete removes the cache entry from the cache.
|
||||
func (c *dataColumnCache) delete(key dataColumnCacheKey) {
|
||||
delete(c.entries, key)
|
||||
}
|
||||
|
||||
// dataColumnCacheEntry holds a fixed-length cache of BlobSidecars.
|
||||
type dataColumnCacheEntry struct {
|
||||
scs [fieldparams.NumberOfColumns]*blocks.RODataColumn
|
||||
diskSummary filesystem.DataColumnStorageSummary
|
||||
}
|
||||
|
||||
func (e *dataColumnCacheEntry) setDiskSummary(sum filesystem.DataColumnStorageSummary) {
|
||||
e.diskSummary = sum
|
||||
}
|
||||
|
||||
// stash adds an item to the in-memory cache of DataColumnSidecars.
|
||||
// Only the first DataColumnSidecar of a given Index will be kept in the cache.
|
||||
// stash will return an error if the given data colunn is already in the cache, or if the Index is out of bounds.
|
||||
func (e *dataColumnCacheEntry) stash(sc *blocks.RODataColumn) error {
|
||||
if sc.Index >= fieldparams.NumberOfColumns {
|
||||
return errors.Wrapf(errColumnIndexTooHigh, "index=%d", sc.Index)
|
||||
}
|
||||
|
||||
if e.scs[sc.Index] != nil {
|
||||
return errors.Wrapf(ErrDuplicateSidecar, "root=%#x, index=%d, commitment=%#x", sc.BlockRoot(), sc.Index, sc.KzgCommitments)
|
||||
}
|
||||
|
||||
e.scs[sc.Index] = sc
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *dataColumnCacheEntry) filter(root [32]byte, commitmentsArray *safeCommitmentsArray) ([]blocks.RODataColumn, error) {
|
||||
nonEmptyIndices := commitmentsArray.nonEmptyIndices()
|
||||
if e.diskSummary.AllAvailable(nonEmptyIndices) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
commitmentsCount := commitmentsArray.count()
|
||||
sidecars := make([]blocks.RODataColumn, 0, commitmentsCount)
|
||||
|
||||
for i := range nonEmptyIndices {
|
||||
if e.diskSummary.HasIndex(i) {
|
||||
continue
|
||||
}
|
||||
|
||||
if e.scs[i] == nil {
|
||||
return nil, errors.Wrapf(errMissingSidecar, "root=%#x, index=%#x", root, i)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(commitmentsArray[i], e.scs[i].KzgCommitments) {
|
||||
return nil, errors.Wrapf(errCommitmentMismatch, "root=%#x, index=%#x, commitment=%#x, block commitment=%#x", root, i, e.scs[i].KzgCommitments, commitmentsArray[i])
|
||||
}
|
||||
|
||||
sidecars = append(sidecars, *e.scs[i])
|
||||
}
|
||||
|
||||
return sidecars, nil
|
||||
}
|
||||
|
||||
// safeCommitmentsArray is a fixed size array of commitments.
|
||||
// This is helpful for avoiding gratuitous bounds checks.
|
||||
type safeCommitmentsArray [fieldparams.NumberOfColumns][][]byte
|
||||
|
||||
// count returns the number of commitments in the array.
|
||||
func (s *safeCommitmentsArray) count() int {
|
||||
count := 0
|
||||
|
||||
for i := range s {
|
||||
if s[i] != nil {
|
||||
count++
|
||||
}
|
||||
}
|
||||
|
||||
return count
|
||||
}
|
||||
|
||||
// nonEmptyIndices returns a map of indices that are non-nil in the array.
|
||||
func (s *safeCommitmentsArray) nonEmptyIndices() map[uint64]bool {
|
||||
columns := make(map[uint64]bool)
|
||||
|
||||
for i := range s {
|
||||
if s[i] != nil {
|
||||
columns[uint64(i)] = true
|
||||
}
|
||||
}
|
||||
|
||||
return columns
|
||||
}
|
||||
124
beacon-chain/das/data_column_cache_test.go
Normal file
124
beacon-chain/das/data_column_cache_test.go
Normal file
@@ -0,0 +1,124 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestEnsureDeleteSetDiskSummary(t *testing.T) {
|
||||
c := newDataColumnCache()
|
||||
key := dataColumnCacheKey{}
|
||||
entry := c.ensure(key)
|
||||
require.DeepEqual(t, dataColumnCacheEntry{}, *entry)
|
||||
|
||||
diskSummary := filesystem.NewDataColumnStorageSummary(42, [fieldparams.NumberOfColumns]bool{true})
|
||||
entry.setDiskSummary(diskSummary)
|
||||
entry = c.ensure(key)
|
||||
require.DeepEqual(t, dataColumnCacheEntry{diskSummary: diskSummary}, *entry)
|
||||
|
||||
c.delete(key)
|
||||
entry = c.ensure(key)
|
||||
require.DeepEqual(t, dataColumnCacheEntry{}, *entry)
|
||||
}
|
||||
|
||||
func TestStash(t *testing.T) {
|
||||
t.Run("Index too high", func(t *testing.T) {
|
||||
dataColumnParamsByBlockRoot := verification.DataColumnsParamsByRoot{{1}: {{ColumnIndex: 10_000}}}
|
||||
roDataColumns, _ := verification.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnParamsByBlockRoot)
|
||||
|
||||
var entry dataColumnCacheEntry
|
||||
err := entry.stash(&roDataColumns[0])
|
||||
require.NotNil(t, err)
|
||||
})
|
||||
|
||||
t.Run("Nominal and already existing", func(t *testing.T) {
|
||||
dataColumnParamsByBlockRoot := verification.DataColumnsParamsByRoot{{1}: {{ColumnIndex: 1}}}
|
||||
roDataColumns, _ := verification.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnParamsByBlockRoot)
|
||||
|
||||
var entry dataColumnCacheEntry
|
||||
err := entry.stash(&roDataColumns[0])
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepEqual(t, roDataColumns[0], entry.scs[1])
|
||||
|
||||
err = entry.stash(&roDataColumns[0])
|
||||
require.NotNil(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestFilterDataColumns(t *testing.T) {
|
||||
t.Run("All available", func(t *testing.T) {
|
||||
commitmentsArray := safeCommitmentsArray{nil, [][]byte{[]byte{1}}, nil, [][]byte{[]byte{3}}}
|
||||
|
||||
diskSummary := filesystem.NewDataColumnStorageSummary(42, [fieldparams.NumberOfColumns]bool{false, true, false, true})
|
||||
|
||||
dataColumnCacheEntry := dataColumnCacheEntry{diskSummary: diskSummary}
|
||||
|
||||
actual, err := dataColumnCacheEntry.filter([fieldparams.RootLength]byte{}, &commitmentsArray)
|
||||
require.NoError(t, err)
|
||||
require.IsNil(t, actual)
|
||||
})
|
||||
|
||||
t.Run("Some scs missing", func(t *testing.T) {
|
||||
commitmentsArray := safeCommitmentsArray{nil, [][]byte{[]byte{1}}}
|
||||
|
||||
diskSummary := filesystem.NewDataColumnStorageSummary(42, [fieldparams.NumberOfColumns]bool{})
|
||||
|
||||
dataColumnCacheEntry := dataColumnCacheEntry{diskSummary: diskSummary}
|
||||
|
||||
_, err := dataColumnCacheEntry.filter([fieldparams.RootLength]byte{}, &commitmentsArray)
|
||||
require.NotNil(t, err)
|
||||
})
|
||||
|
||||
t.Run("Commitments not equal", func(t *testing.T) {
|
||||
root := [fieldparams.RootLength]byte{}
|
||||
commitmentsArray := safeCommitmentsArray{nil, [][]byte{[]byte{1}}}
|
||||
|
||||
dataColumnParamsByBlockRoot := verification.DataColumnsParamsByRoot{root: {{ColumnIndex: 1}}}
|
||||
roDataColumns, _ := verification.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnParamsByBlockRoot)
|
||||
|
||||
var scs [fieldparams.NumberOfColumns]*blocks.RODataColumn
|
||||
scs[1] = &roDataColumns[0]
|
||||
|
||||
dataColumnCacheEntry := dataColumnCacheEntry{scs: scs}
|
||||
|
||||
_, err := dataColumnCacheEntry.filter(root, &commitmentsArray)
|
||||
require.NotNil(t, err)
|
||||
})
|
||||
|
||||
t.Run("Nominal", func(t *testing.T) {
|
||||
root := [fieldparams.RootLength]byte{}
|
||||
commitmentsArray := safeCommitmentsArray{nil, [][]byte{[]byte{1}}, nil, [][]byte{[]byte{3}}}
|
||||
|
||||
diskSummary := filesystem.NewDataColumnStorageSummary(42, [fieldparams.NumberOfColumns]bool{false, true})
|
||||
|
||||
dataColumnParamsByBlockRoot := verification.DataColumnsParamsByRoot{root: {{ColumnIndex: 3, KzgCommitments: [][]byte{[]byte{3}}}}}
|
||||
expected, _ := verification.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnParamsByBlockRoot)
|
||||
|
||||
var scs [fieldparams.NumberOfColumns]*blocks.RODataColumn
|
||||
scs[3] = &expected[0]
|
||||
|
||||
dataColumnCacheEntry := dataColumnCacheEntry{scs: scs, diskSummary: diskSummary}
|
||||
|
||||
actual, err := dataColumnCacheEntry.filter(root, &commitmentsArray)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepEqual(t, expected, actual)
|
||||
})
|
||||
}
|
||||
|
||||
func TestCount(t *testing.T) {
|
||||
s := safeCommitmentsArray{nil, [][]byte{[]byte{1}}, nil, [][]byte{[]byte{3}}}
|
||||
require.Equal(t, 2, s.count())
|
||||
}
|
||||
|
||||
func TestNonEmptyIndices(t *testing.T) {
|
||||
s := safeCommitmentsArray{nil, [][]byte{[]byte{10}}, nil, [][]byte{[]byte{20}}}
|
||||
actual := s.nonEmptyIndices()
|
||||
require.DeepEqual(t, map[uint64]bool{1: true, 3: true}, actual)
|
||||
}
|
||||
@@ -15,5 +15,5 @@ import (
|
||||
// durably persisted before returning a non-error value.
|
||||
type AvailabilityStore interface {
|
||||
IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error
|
||||
Persist(current primitives.Slot, sc ...blocks.ROBlob) error
|
||||
Persist(current primitives.Slot, sc ...blocks.ROSidecar) error
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package das
|
||||
import (
|
||||
"context"
|
||||
|
||||
errors "github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
)
|
||||
@@ -24,9 +25,13 @@ func (m *MockAvailabilityStore) IsDataAvailable(ctx context.Context, current pri
|
||||
}
|
||||
|
||||
// Persist satisfies the corresponding method of the AvailabilityStore interface in a way that is useful for tests.
|
||||
func (m *MockAvailabilityStore) Persist(current primitives.Slot, sc ...blocks.ROBlob) error {
|
||||
func (m *MockAvailabilityStore) Persist(current primitives.Slot, sc ...blocks.ROSidecar) error {
|
||||
blobSidecars, err := blocks.BlobSidecarsFromSidecars(sc)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "blob sidecars from sidecars")
|
||||
}
|
||||
if m.PersistBlobsCallback != nil {
|
||||
return m.PersistBlobsCallback(current, sc...)
|
||||
return m.PersistBlobsCallback(current, blobSidecars...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -5,6 +5,9 @@ go_library(
|
||||
srcs = [
|
||||
"blob.go",
|
||||
"cache.go",
|
||||
"data_column.go",
|
||||
"data_column_cache.go",
|
||||
"doc.go",
|
||||
"iteration.go",
|
||||
"layout.go",
|
||||
"layout_by_epoch.go",
|
||||
@@ -17,6 +20,8 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//async:go_default_library",
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
@@ -25,6 +30,7 @@ go_library(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//io/file:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/logging:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
@@ -41,6 +47,8 @@ go_test(
|
||||
srcs = [
|
||||
"blob_test.go",
|
||||
"cache_test.go",
|
||||
"data_column_cache_test.go",
|
||||
"data_column_test.go",
|
||||
"iteration_test.go",
|
||||
"layout_test.go",
|
||||
"migration_test.go",
|
||||
@@ -50,6 +58,7 @@ go_test(
|
||||
deps = [
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
|
||||
999
beacon-chain/db/filesystem/data_column.go
Normal file
999
beacon-chain/db/filesystem/data_column.go
Normal file
@@ -0,0 +1,999 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/async"
|
||||
"github.com/prysmaticlabs/prysm/v5/async/event"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/io/file"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
const (
|
||||
version = 0x01
|
||||
versionOffset = 0 // bytes
|
||||
versionSize = 1 // bytes
|
||||
maxSszEncodedDataColumnSidecarSize = 536_870_912 // 2**(4*8) / 8 bytes
|
||||
encodedSszEncodedDataColumnSidecarSizeOffset = versionOffset + versionSize
|
||||
encodedSszEncodedDataColumnSidecarSizeSize = 4 // bytes (size of the encoded size of the SSZ encoded data column sidecar)
|
||||
mandatoryNumberOfColumns = 128 // 2**7
|
||||
indicesOffset = encodedSszEncodedDataColumnSidecarSizeOffset + encodedSszEncodedDataColumnSidecarSizeSize
|
||||
indicesSize = mandatoryNumberOfColumns
|
||||
nonZeroOffset = mandatoryNumberOfColumns
|
||||
headerSize = versionSize + encodedSszEncodedDataColumnSidecarSizeSize + mandatoryNumberOfColumns
|
||||
dataColumnsFileExtension = "sszs"
|
||||
prunePeriod = 1 * time.Minute
|
||||
)
|
||||
|
||||
var (
|
||||
errWrongNumberOfColumns = errors.New("wrong number of data columns")
|
||||
errDataColumnIndexTooLarge = errors.New("data column index too large")
|
||||
errWrongBytesWritten = errors.New("wrong number of bytes written")
|
||||
errWrongVersion = errors.New("wrong version")
|
||||
errWrongBytesHeaderRead = errors.New("wrong number of bytes header read")
|
||||
errTooManyDataColumns = errors.New("too many data columns")
|
||||
errWrongSszEncodedDataColumnSidecarSize = errors.New("wrong SSZ encoded data column sidecar size")
|
||||
errDataColumnSidecarsFromDifferentSlots = errors.New("data column sidecars from different slots")
|
||||
)
|
||||
|
||||
type (
|
||||
// DataColumnStorage is the concrete implementation of the filesystem backend for saving and retrieving DataColumnSidecars.
|
||||
DataColumnStorage struct {
|
||||
base string
|
||||
retentionEpochs primitives.Epoch
|
||||
fs afero.Fs
|
||||
cache *dataColumnStorageSummaryCache
|
||||
DataColumnFeed *event.Feed
|
||||
muChans map[[fieldparams.RootLength]byte]*muChan
|
||||
mu sync.Mutex // protect muChans
|
||||
pruneMu sync.RWMutex
|
||||
}
|
||||
|
||||
// DataColumnStorageOption is a functional option for configuring a DataColumnStorage.
|
||||
DataColumnStorageOption func(*DataColumnStorage) error
|
||||
|
||||
// DataColumnIdent is the unique identifier for a data column sidecar.
|
||||
DataColumnsIdent struct {
|
||||
Root [fieldparams.RootLength]byte
|
||||
Epoch primitives.Epoch
|
||||
Indices []uint64
|
||||
}
|
||||
|
||||
storageIndices struct {
|
||||
indices [mandatoryNumberOfColumns]byte
|
||||
}
|
||||
|
||||
metadata struct {
|
||||
indices *storageIndices
|
||||
savedDataColumnSidecarCount int64
|
||||
sszEncodedDataColumnSidecarSize uint32
|
||||
fileSize int64
|
||||
}
|
||||
|
||||
fileMetadata struct {
|
||||
period uint64
|
||||
epoch primitives.Epoch
|
||||
blockRoot [fieldparams.RootLength]byte
|
||||
}
|
||||
|
||||
muChan struct {
|
||||
mu *sync.RWMutex
|
||||
toStore chan []blocks.VerifiedRODataColumn
|
||||
}
|
||||
)
|
||||
|
||||
// WithDataColumnBasePath is a required option that sets the base path of data column storage.
|
||||
func WithDataColumnBasePath(base string) DataColumnStorageOption {
|
||||
return func(b *DataColumnStorage) error {
|
||||
b.base = base
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithDataColumnRetentionEpochs is an option that changes the number of epochs data columns will be persisted.
|
||||
func WithDataColumnRetentionEpochs(e primitives.Epoch) DataColumnStorageOption {
|
||||
return func(b *DataColumnStorage) error {
|
||||
b.retentionEpochs = e
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithDataColumnFs allows the afero.Fs implementation to be customized.
|
||||
// Used by tests to substitute an in-memory filesystem.
|
||||
func WithDataColumnFs(fs afero.Fs) DataColumnStorageOption {
|
||||
return func(b *DataColumnStorage) error {
|
||||
b.fs = fs
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// NewDataColumnStorage creates a new instance of the DataColumnStorage object. Note that the implementation of DataColumnStorage may
|
||||
// attempt to hold a file lock to guarantee exclusive control of the data column storage directory, so this should only be
|
||||
// initialized once per beacon node.
|
||||
func NewDataColumnStorage(ctx context.Context, opts ...DataColumnStorageOption) (*DataColumnStorage, error) {
|
||||
storage := &DataColumnStorage{
|
||||
DataColumnFeed: new(event.Feed),
|
||||
muChans: make(map[[fieldparams.RootLength]byte]*muChan),
|
||||
}
|
||||
|
||||
for _, o := range opts {
|
||||
if err := o(storage); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create data column storage")
|
||||
}
|
||||
}
|
||||
|
||||
// Allow tests to set up a different fs using WithFs.
|
||||
if storage.fs == nil {
|
||||
if storage.base == "" {
|
||||
return nil, errNoBasePath
|
||||
}
|
||||
|
||||
storage.base = path.Clean(storage.base)
|
||||
if err := file.MkdirAll(storage.base); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to create data column storage at %s", storage.base)
|
||||
}
|
||||
|
||||
storage.fs = afero.NewBasePathFs(afero.NewOsFs(), storage.base)
|
||||
}
|
||||
|
||||
storage.cache = newDataColumnStorageSummaryCache()
|
||||
|
||||
async.RunEvery(ctx, prunePeriod, func() {
|
||||
storage.pruneMu.Lock()
|
||||
defer storage.pruneMu.Unlock()
|
||||
|
||||
storage.prune()
|
||||
})
|
||||
|
||||
return storage, nil
|
||||
}
|
||||
|
||||
// WarmCache warms the cache of the data column filesystem.
|
||||
// It holds the database (read) lock for all the time it is running.
|
||||
func (dcs *DataColumnStorage) WarmCache() {
|
||||
start := time.Now()
|
||||
log.Info("Data column filesystem cache warm-up started")
|
||||
|
||||
dcs.pruneMu.Lock()
|
||||
defer dcs.pruneMu.Unlock()
|
||||
|
||||
highestStoredEpoch := primitives.Epoch(0)
|
||||
|
||||
// Walk the data column filesystem to warm up the cache.
|
||||
if err := afero.Walk(dcs.fs, ".", func(path string, info os.FileInfo, fileErr error) (err error) {
|
||||
if fileErr != nil {
|
||||
return fileErr
|
||||
}
|
||||
|
||||
// If not a leaf, skip.
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Extract metadata from the file path.
|
||||
fileMetadata, err := extractFileMetadata(path)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error encountered while extracting file metadata")
|
||||
}
|
||||
|
||||
// Open the data column filesystem file.
|
||||
file, err := dcs.fs.Open(path)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error encountered while opening data column filesystem file")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close the file.
|
||||
defer func() {
|
||||
// Overwrite the existing error only if it is nil, since the close error is less important.
|
||||
closeErr := file.Close()
|
||||
if closeErr != nil && err == nil {
|
||||
err = closeErr
|
||||
}
|
||||
}()
|
||||
|
||||
// Read the metadata of the file.
|
||||
metadata, err := dcs.metadata(file)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error encountered while reading metadata from data column filesystem file")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check the indices.
|
||||
indices := metadata.indices.all()
|
||||
if len(indices) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Build the ident.
|
||||
dataColumnsIdent := DataColumnsIdent{Root: fileMetadata.blockRoot, Epoch: fileMetadata.epoch, Indices: indices}
|
||||
|
||||
// Update the highest stored epoch.
|
||||
highestStoredEpoch = max(highestStoredEpoch, fileMetadata.epoch)
|
||||
|
||||
// Set the ident in the cache.
|
||||
if err := dcs.cache.set(dataColumnsIdent); err != nil {
|
||||
log.WithError(err).Error("Error encountered while ensuring data column filesystem cache")
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
log.WithError(err).Error("Error encountered while walking data column filesystem.")
|
||||
}
|
||||
|
||||
// Prune the cache and the filesystem.
|
||||
dcs.prune()
|
||||
|
||||
log.WithField("elapsed", time.Since(start)).Info("Data column filesystem cache warm-up complete")
|
||||
}
|
||||
|
||||
// Summary returns the DataColumnStorageSummary.
|
||||
func (dcs *DataColumnStorage) Summary(root [fieldparams.RootLength]byte) DataColumnStorageSummary {
|
||||
return dcs.cache.Summary(root)
|
||||
}
|
||||
|
||||
// Save saves data column sidecars into the database and asynchronously performs pruning.
|
||||
// The returned chanel is closed when the pruning is complete.
|
||||
func (dcs *DataColumnStorage) Save(dataColumnSidecars []blocks.VerifiedRODataColumn) error {
|
||||
startTime := time.Now()
|
||||
|
||||
if len(dataColumnSidecars) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check the number of columns is the one expected.
|
||||
// While implementing this, we expect the number of columns won't change.
|
||||
// If it does, we will need to create a new version of the data column sidecar file.
|
||||
if params.BeaconConfig().NumberOfColumns != mandatoryNumberOfColumns {
|
||||
return errWrongNumberOfColumns
|
||||
}
|
||||
|
||||
highestEpoch := primitives.Epoch(0)
|
||||
dataColumnSidecarsbyRoot := make(map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn)
|
||||
|
||||
// Group data column sidecars by root.
|
||||
for _, dataColumnSidecar := range dataColumnSidecars {
|
||||
// Check if the data column index is too large.
|
||||
if dataColumnSidecar.Index >= mandatoryNumberOfColumns {
|
||||
return errDataColumnIndexTooLarge
|
||||
}
|
||||
|
||||
// Group data column sidecars by root.
|
||||
root := dataColumnSidecar.BlockRoot()
|
||||
dataColumnSidecarsbyRoot[root] = append(dataColumnSidecarsbyRoot[root], dataColumnSidecar)
|
||||
}
|
||||
|
||||
for root, dataColumnSidecars := range dataColumnSidecarsbyRoot {
|
||||
// Safety check all data column sidecars for this root are from the same slot.
|
||||
firstSlot := dataColumnSidecars[0].SignedBlockHeader.Header.Slot
|
||||
for _, dataColumnSidecar := range dataColumnSidecars[1:] {
|
||||
if dataColumnSidecar.SignedBlockHeader.Header.Slot != firstSlot {
|
||||
return errDataColumnSidecarsFromDifferentSlots
|
||||
}
|
||||
}
|
||||
|
||||
// Set the highest epoch.
|
||||
epoch := slots.ToEpoch(dataColumnSidecars[0].Slot())
|
||||
highestEpoch = max(highestEpoch, epoch)
|
||||
|
||||
// Save data columns in the filesystem.
|
||||
if err := dcs.saveFilesystem(root, epoch, dataColumnSidecars); err != nil {
|
||||
return errors.Wrap(err, "save filesystem")
|
||||
}
|
||||
|
||||
// Get all indices.
|
||||
indices := make([]uint64, 0, len(dataColumnSidecars))
|
||||
for _, dataColumnSidecar := range dataColumnSidecars {
|
||||
indices = append(indices, dataColumnSidecar.Index)
|
||||
}
|
||||
|
||||
// Compute the data columns ident.
|
||||
dataColumnsIdent := DataColumnsIdent{Root: root, Epoch: slots.ToEpoch(dataColumnSidecars[0].Slot()), Indices: indices}
|
||||
|
||||
// Set data columns in the cache.
|
||||
if err := dcs.cache.set(dataColumnsIdent); err != nil {
|
||||
return errors.Wrap(err, "cache set")
|
||||
}
|
||||
|
||||
// Notify the data column feed.
|
||||
dcs.DataColumnFeed.Send(dataColumnsIdent)
|
||||
}
|
||||
|
||||
dataColumnSaveLatency.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// saveFilesystem saves data column sidecars into the database.
|
||||
// This function expects all data column sidecars to belong to the same block.
|
||||
func (dcs *DataColumnStorage) saveFilesystem(root [fieldparams.RootLength]byte, epoch primitives.Epoch, dataColumnSidecars []blocks.VerifiedRODataColumn) error {
|
||||
// Compute the file path.
|
||||
filePath := filePath(root, epoch)
|
||||
|
||||
dcs.pruneMu.RLock()
|
||||
defer dcs.pruneMu.RUnlock()
|
||||
|
||||
fileMu, toStore := dcs.fileMutex(root)
|
||||
toStore <- dataColumnSidecars
|
||||
|
||||
fileMu.Lock()
|
||||
defer fileMu.Unlock()
|
||||
|
||||
// Check if the file exists.
|
||||
exists, err := afero.Exists(dcs.fs, filePath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "afero exists")
|
||||
}
|
||||
|
||||
if exists {
|
||||
if err := dcs.saveDataColumnSidecarsExistingFile(filePath, toStore); err != nil {
|
||||
return errors.Wrap(err, "save data column existing file")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := dcs.saveDataColumnSidecarsNewFile(filePath, toStore); err != nil {
|
||||
return errors.Wrap(err, "save data columns new file")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get retrieves data column sidecars from the database.
|
||||
// If one of the requested data column sidecars is not found, it is just skipped.
|
||||
// If indices is nil, then all stored data column sidecars are returned.
|
||||
// Since DataColumnStorage only writes data columns that have undergone full verification, the return
|
||||
// value is always a VerifiedRODataColumn.
|
||||
func (dcs *DataColumnStorage) Get(root [fieldparams.RootLength]byte, indices []uint64) ([]blocks.VerifiedRODataColumn, error) {
|
||||
dcs.pruneMu.RLock()
|
||||
defer dcs.pruneMu.RUnlock()
|
||||
|
||||
fileMu, _ := dcs.fileMutex(root)
|
||||
fileMu.RLock()
|
||||
defer fileMu.RUnlock()
|
||||
|
||||
startTime := time.Now()
|
||||
|
||||
// Build all indices if none are provided.
|
||||
if indices == nil {
|
||||
indices = make([]uint64, mandatoryNumberOfColumns)
|
||||
for i := range indices {
|
||||
indices[i] = uint64(i)
|
||||
}
|
||||
}
|
||||
|
||||
summary, ok := dcs.cache.get(root)
|
||||
if !ok {
|
||||
// Nothing found in db. Exit early.
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Compute the file path.
|
||||
filePath := filePath(root, summary.epoch)
|
||||
|
||||
// Open the data column sidecars file.
|
||||
// We do not specially check if the file exists since we have already checked the cache.
|
||||
file, err := dcs.fs.Open(filePath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "data column sidecars file path open")
|
||||
}
|
||||
|
||||
// Read file metadata.
|
||||
metadata, err := dcs.metadata(file)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "metadata")
|
||||
}
|
||||
|
||||
// Retrieve data column sidecars from the file.
|
||||
verifiedRODataColumnSidecars := make([]blocks.VerifiedRODataColumn, 0, len(indices))
|
||||
for _, index := range indices {
|
||||
ok, position, err := metadata.indices.get(index)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "get index")
|
||||
}
|
||||
|
||||
// Skip if the data column is not saved.
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// Compute the offset of the data column sidecar.
|
||||
offset := headerSize + position*int64(metadata.sszEncodedDataColumnSidecarSize)
|
||||
|
||||
// Seek to the beginning of the data column sidecar.
|
||||
_, err = file.Seek(offset, io.SeekStart)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "seek")
|
||||
}
|
||||
|
||||
verifiedRODataColumn, err := verification.VerifiedRODataColumnFromDisk(file, root, metadata.sszEncodedDataColumnSidecarSize)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "verified RO data column from disk")
|
||||
}
|
||||
|
||||
// Append the verified RO data column to the data column sidecars.
|
||||
verifiedRODataColumnSidecars = append(verifiedRODataColumnSidecars, verifiedRODataColumn)
|
||||
}
|
||||
|
||||
dataColumnFetchLatency.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
|
||||
return verifiedRODataColumnSidecars, nil
|
||||
}
|
||||
|
||||
// Remove deletes all data column sidecars for a given root.
|
||||
func (dcs *DataColumnStorage) Remove(blockRoot [fieldparams.RootLength]byte) error {
|
||||
dcs.pruneMu.RLock()
|
||||
defer dcs.pruneMu.RUnlock()
|
||||
|
||||
fileMu, _ := dcs.fileMutex(blockRoot)
|
||||
fileMu.Lock()
|
||||
defer fileMu.Unlock()
|
||||
|
||||
summary, ok := dcs.cache.get(blockRoot)
|
||||
if !ok {
|
||||
// Nothing found in db. Exit early.
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove the data column sidecars from the cache.
|
||||
dcs.cache.evict(blockRoot)
|
||||
|
||||
// Remove the data column sidecars file.
|
||||
filePath := filePath(blockRoot, summary.epoch)
|
||||
if err := dcs.fs.Remove(filePath); err != nil {
|
||||
return errors.Wrap(err, "remove")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Clear deletes all files on the filesystem.
|
||||
func (dcs *DataColumnStorage) Clear() error {
|
||||
dcs.pruneMu.Lock()
|
||||
defer dcs.pruneMu.Unlock()
|
||||
|
||||
dirs, err := listDir(dcs.fs, ".")
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "list dir")
|
||||
}
|
||||
|
||||
dcs.cache.clear()
|
||||
|
||||
for _, dir := range dirs {
|
||||
if err := dcs.fs.RemoveAll(dir); err != nil {
|
||||
return errors.Wrap(err, "remove all")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// prune clean the cache, the filesystem and mutexes.
|
||||
func (dcs *DataColumnStorage) prune() {
|
||||
dcs.mu.Lock()
|
||||
defer dcs.mu.Unlock()
|
||||
|
||||
highestStoredEpoch := dcs.cache.HighestEpoch()
|
||||
|
||||
// Check if we need to prune.
|
||||
if highestStoredEpoch < dcs.retentionEpochs {
|
||||
return
|
||||
}
|
||||
|
||||
highestEpochToPrune := highestStoredEpoch - dcs.retentionEpochs
|
||||
highestPeriodToPrune := period(highestEpochToPrune)
|
||||
|
||||
// Prune the cache.
|
||||
prunedCount := dcs.cache.pruneUpTo(highestEpochToPrune)
|
||||
|
||||
if prunedCount == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
dataColumnPrunedCounter.Add(float64(prunedCount))
|
||||
|
||||
// Prune the filesystem.
|
||||
periodFileInfos, err := afero.ReadDir(dcs.fs, ".")
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error encountered while reading top directory")
|
||||
return
|
||||
}
|
||||
|
||||
for _, periodFileInfo := range periodFileInfos {
|
||||
periodStr := periodFileInfo.Name()
|
||||
period, err := strconv.ParseUint(periodStr, 10, 64)
|
||||
if err != nil {
|
||||
log.WithError(err).Errorf("Error encountered while parsing period %s", periodStr)
|
||||
continue
|
||||
}
|
||||
|
||||
if period < highestPeriodToPrune {
|
||||
// Remove everything lower thant highest period to prune.
|
||||
if err := dcs.fs.RemoveAll(periodStr); err != nil {
|
||||
log.WithError(err).Error("Error encountered while removing period directory")
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
if period > highestPeriodToPrune {
|
||||
// Do not remove anything higher than highest period to prune.
|
||||
continue
|
||||
}
|
||||
|
||||
// if period == highestPeriodToPrune
|
||||
epochFileInfos, err := afero.ReadDir(dcs.fs, periodStr)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error encountered while reading epoch directory")
|
||||
continue
|
||||
}
|
||||
|
||||
for _, epochFileInfo := range epochFileInfos {
|
||||
epochStr := epochFileInfo.Name()
|
||||
epochDir := path.Join(periodStr, epochStr)
|
||||
|
||||
epoch, err := strconv.ParseUint(epochStr, 10, 64)
|
||||
if err != nil {
|
||||
log.WithError(err).Errorf("Error encountered while parsing epoch %s", epochStr)
|
||||
continue
|
||||
}
|
||||
|
||||
if primitives.Epoch(epoch) > highestEpochToPrune {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := dcs.fs.RemoveAll(epochDir); err != nil {
|
||||
log.WithError(err).Error("Error encountered while removing epoch directory")
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
clear(dcs.muChans)
|
||||
}
|
||||
|
||||
// saveDataColumnSidecarsExistingFile saves data column sidecars into an existing file.
|
||||
// This function expects all data column sidecars to belong to the same block.
|
||||
func (dcs *DataColumnStorage) saveDataColumnSidecarsExistingFile(filePath string, inputDataColumnSidecars chan []blocks.VerifiedRODataColumn) (err error) {
|
||||
// Open the data column sidecars file.
|
||||
file, err := dcs.fs.OpenFile(filePath, os.O_RDWR, os.FileMode(0600))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "data column sidecars file path open")
|
||||
}
|
||||
|
||||
defer func() {
|
||||
closeErr := file.Close()
|
||||
|
||||
// Overwrite the existing error only if it is nil, since the close error is less important.
|
||||
if closeErr != nil && err == nil {
|
||||
err = closeErr
|
||||
}
|
||||
}()
|
||||
|
||||
metadata, err := dcs.metadata(file)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "metadata")
|
||||
}
|
||||
|
||||
// Create the SSZ encoded data column sidecars.
|
||||
var sszEncodedDataColumnSidecars []byte
|
||||
|
||||
for {
|
||||
dataColumnSidecars := pullChan(inputDataColumnSidecars)
|
||||
if len(dataColumnSidecars) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
for _, dataColumnSidecar := range dataColumnSidecars {
|
||||
// Extract the data columns index.
|
||||
dataColumnIndex := dataColumnSidecar.Index
|
||||
|
||||
ok, _, err := metadata.indices.get(dataColumnIndex)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "get index")
|
||||
}
|
||||
|
||||
// Skip if the data column is already saved.
|
||||
if ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if the number of saved data columns is too large.
|
||||
// This is impossible to happen in practice is this function is called
|
||||
// by SaveDataColumnSidecars.
|
||||
if metadata.savedDataColumnSidecarCount >= mandatoryNumberOfColumns {
|
||||
return errTooManyDataColumns
|
||||
}
|
||||
|
||||
// SSZ encode the data column sidecar.
|
||||
sszEncodedDataColumnSidecar, err := dataColumnSidecar.MarshalSSZ()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "data column sidecar marshal SSZ")
|
||||
}
|
||||
|
||||
// Compute the size of the SSZ encoded data column sidecar.
|
||||
incomingSszEncodedDataColumnSidecarSize := uint32(len(sszEncodedDataColumnSidecar))
|
||||
|
||||
// Check if the incoming encoded data column sidecar size corresponds to the one read from the file.
|
||||
if incomingSszEncodedDataColumnSidecarSize != metadata.sszEncodedDataColumnSidecarSize {
|
||||
return errWrongSszEncodedDataColumnSidecarSize
|
||||
}
|
||||
|
||||
// Alter indices to mark the data column as saved.
|
||||
if err := metadata.indices.set(dataColumnIndex, uint8(metadata.savedDataColumnSidecarCount)); err != nil {
|
||||
return errors.Wrap(err, "set index")
|
||||
}
|
||||
|
||||
metadata.savedDataColumnSidecarCount++
|
||||
|
||||
// Append the SSZ encoded data column sidecar to the SSZ encoded data column sidecars.
|
||||
sszEncodedDataColumnSidecars = append(sszEncodedDataColumnSidecars, sszEncodedDataColumnSidecar...)
|
||||
}
|
||||
}
|
||||
|
||||
// Save indices to the file.
|
||||
indices := metadata.indices.raw()
|
||||
count, err := file.WriteAt(indices[:], int64(versionSize+encodedSszEncodedDataColumnSidecarSizeSize))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "write indices")
|
||||
}
|
||||
if count != mandatoryNumberOfColumns {
|
||||
return errWrongBytesWritten
|
||||
}
|
||||
|
||||
// Append the SSZ encoded data column sidecars to the end of the file.
|
||||
count, err = file.WriteAt(sszEncodedDataColumnSidecars, metadata.fileSize)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "write SSZ encoded data column sidecars")
|
||||
}
|
||||
if count != len(sszEncodedDataColumnSidecars) {
|
||||
return errWrongBytesWritten
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// saveDataColumnSidecarsNewFile saves data column sidecars into a new file.
|
||||
// This function expects all data column sidecars to belong to the same block.
|
||||
func (dcs *DataColumnStorage) saveDataColumnSidecarsNewFile(filePath string, inputDataColumnSidecars chan []blocks.VerifiedRODataColumn) (err error) {
|
||||
// Initialize the indices.
|
||||
var indices storageIndices
|
||||
|
||||
var (
|
||||
sszEncodedDataColumnSidecarRefSize int
|
||||
sszEncodedDataColumnSidecars []byte
|
||||
)
|
||||
|
||||
// Initialize the count of the saved SSZ encoded data column sidecar.
|
||||
storedCount := uint8(0)
|
||||
|
||||
for {
|
||||
dataColumnSidecars := pullChan(inputDataColumnSidecars)
|
||||
if len(dataColumnSidecars) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
for _, dataColumnSidecar := range dataColumnSidecars {
|
||||
// Extract the data column index.
|
||||
dataColumnIndex := dataColumnSidecar.Index
|
||||
|
||||
// Skip if the data column is already stored.
|
||||
ok, _, err := indices.get(dataColumnIndex)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "get index")
|
||||
}
|
||||
if ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// Alter the indices to mark the first data column sidecar as saved.
|
||||
// savedCount can safely be cast to uint8 since it is less than limit.
|
||||
if err := indices.set(dataColumnIndex, storedCount); err != nil {
|
||||
return errors.Wrap(err, "set index")
|
||||
}
|
||||
|
||||
// Increment the count of the saved SSZ encoded data column sidecar.
|
||||
storedCount++
|
||||
|
||||
// SSZ encode the first data column sidecar.
|
||||
sszEncodedDataColumnSidecar, err := dataColumnSidecar.MarshalSSZ()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "data column sidecar marshal SSZ")
|
||||
}
|
||||
|
||||
// Check if the size of the SSZ encoded data column sidecar is correct.
|
||||
if sszEncodedDataColumnSidecarRefSize != 0 && len(sszEncodedDataColumnSidecar) != sszEncodedDataColumnSidecarRefSize {
|
||||
return errWrongSszEncodedDataColumnSidecarSize
|
||||
}
|
||||
|
||||
// Set the SSZ encoded data column sidecar reference size.
|
||||
sszEncodedDataColumnSidecarRefSize = len(sszEncodedDataColumnSidecar)
|
||||
|
||||
// Append the first SSZ encoded data column sidecar to the SSZ encoded data column sidecars.
|
||||
sszEncodedDataColumnSidecars = append(sszEncodedDataColumnSidecars, sszEncodedDataColumnSidecar...)
|
||||
}
|
||||
}
|
||||
|
||||
if storedCount == 0 {
|
||||
// Nothing to save.
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create the data column sidecars file.
|
||||
dir := filepath.Dir(filePath)
|
||||
if err := dcs.fs.MkdirAll(dir, directoryPermissions()); err != nil {
|
||||
return errors.Wrapf(err, "mkdir all")
|
||||
}
|
||||
|
||||
file, err := dcs.fs.Create(filePath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "data column sidecars file path create")
|
||||
}
|
||||
|
||||
defer func() {
|
||||
closeErr := file.Close()
|
||||
|
||||
// Overwrite the existing error only if it is nil, since the close error is less important.
|
||||
if closeErr != nil && err == nil {
|
||||
err = closeErr
|
||||
}
|
||||
}()
|
||||
|
||||
// Encode the SSZ encoded data column sidecar size.
|
||||
var encodedSszEncodedDataColumnSidecarSize [encodedSszEncodedDataColumnSidecarSizeSize]byte
|
||||
binary.BigEndian.PutUint32(encodedSszEncodedDataColumnSidecarSize[:], uint32(sszEncodedDataColumnSidecarRefSize))
|
||||
|
||||
// Get the raw indices.
|
||||
rawIndices := indices.raw()
|
||||
|
||||
// Concatenate the version, the data column sidecar size, the data column indices and the SSZ encoded data column sidecar.
|
||||
countToWrite := headerSize + len(sszEncodedDataColumnSidecars)
|
||||
bytes := make([]byte, 0, countToWrite)
|
||||
bytes = append(bytes, byte(version))
|
||||
bytes = append(bytes, encodedSszEncodedDataColumnSidecarSize[:]...)
|
||||
bytes = append(bytes, rawIndices[:]...)
|
||||
bytes = append(bytes, sszEncodedDataColumnSidecars...)
|
||||
|
||||
countWritten, err := file.Write(bytes)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "write")
|
||||
}
|
||||
if countWritten != countToWrite {
|
||||
return errWrongBytesWritten
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// metadata runs file sanity checks and retrieves metadata of the file.
|
||||
// The file descriptor is left at the beginning of the first SSZ encoded data column sidecar.
|
||||
func (dcs *DataColumnStorage) metadata(file afero.File) (*metadata, error) {
|
||||
var header [headerSize]byte
|
||||
countRead, err := file.ReadAt(header[:], 0)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "read at")
|
||||
}
|
||||
if countRead != headerSize {
|
||||
return nil, errWrongBytesHeaderRead
|
||||
}
|
||||
|
||||
// Read the encoded file version.
|
||||
encodedFileVersion := header[versionOffset : versionOffset+versionSize]
|
||||
|
||||
// Convert the version to an int.
|
||||
fileVersion := int(encodedFileVersion[0])
|
||||
|
||||
// Check if the version is the expected one.
|
||||
if fileVersion != version {
|
||||
return nil, errWrongVersion
|
||||
}
|
||||
|
||||
// DataColumnSidecar is a variable sized ssz object, but all data columns for a block will be the same size.
|
||||
encodedSszEncodedDataColumnSidecarSize := header[encodedSszEncodedDataColumnSidecarSizeOffset : encodedSszEncodedDataColumnSidecarSizeOffset+encodedSszEncodedDataColumnSidecarSizeSize]
|
||||
|
||||
// Convert the SSZ encoded data column sidecar size to an int.
|
||||
sszEncodedDataColumnSidecarSize := binary.BigEndian.Uint32(encodedSszEncodedDataColumnSidecarSize)
|
||||
|
||||
// Read the data column indices.
|
||||
indices, err := newStorageIndices(header[indicesOffset : indicesOffset+mandatoryNumberOfColumns])
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "new storage indices")
|
||||
}
|
||||
|
||||
// Compute the saved columns count.
|
||||
savedDataColumnSidecarCount := int64(indices.len())
|
||||
|
||||
// Compute the size of the file.
|
||||
// It is safe to cast the SSZ encoded data column sidecar size to int64 since it is less than 2**63.
|
||||
fileSize := int64(headerSize) + savedDataColumnSidecarCount*int64(sszEncodedDataColumnSidecarSize) // lint:ignore uintcast
|
||||
|
||||
metadata := &metadata{
|
||||
indices: indices,
|
||||
savedDataColumnSidecarCount: savedDataColumnSidecarCount,
|
||||
sszEncodedDataColumnSidecarSize: sszEncodedDataColumnSidecarSize,
|
||||
fileSize: fileSize,
|
||||
}
|
||||
|
||||
return metadata, nil
|
||||
}
|
||||
|
||||
func (dcs *DataColumnStorage) fileMutex(root [fieldparams.RootLength]byte) (*sync.RWMutex, chan []blocks.VerifiedRODataColumn) {
|
||||
dcs.mu.Lock()
|
||||
defer dcs.mu.Unlock()
|
||||
|
||||
mc, ok := dcs.muChans[root]
|
||||
if !ok {
|
||||
mc = &muChan{
|
||||
mu: new(sync.RWMutex),
|
||||
toStore: make(chan []blocks.VerifiedRODataColumn, 1),
|
||||
}
|
||||
dcs.muChans[root] = mc
|
||||
|
||||
return mc.mu, mc.toStore
|
||||
}
|
||||
|
||||
return mc.mu, mc.toStore
|
||||
}
|
||||
|
||||
func newStorageIndices(indices []byte) (*storageIndices, error) {
|
||||
if len(indices) != mandatoryNumberOfColumns {
|
||||
return nil, errWrongNumberOfColumns
|
||||
}
|
||||
|
||||
var storageIndices storageIndices
|
||||
copy(storageIndices.indices[:], indices)
|
||||
|
||||
return &storageIndices, nil
|
||||
}
|
||||
|
||||
// get returns a boolean indicating if the data column sidecar is saved,
|
||||
// and the position of the data column sidecar in the file.
|
||||
func (si *storageIndices) get(dataColumnIndex uint64) (bool, int64, error) {
|
||||
if dataColumnIndex >= mandatoryNumberOfColumns {
|
||||
return false, 0, errDataColumnIndexTooLarge
|
||||
}
|
||||
|
||||
if si.indices[dataColumnIndex] < nonZeroOffset {
|
||||
return false, 0, nil
|
||||
}
|
||||
|
||||
return true, int64(si.indices[dataColumnIndex] - nonZeroOffset), nil
|
||||
}
|
||||
|
||||
// len returns the number of saved data column sidecars.
|
||||
func (si *storageIndices) len() int {
|
||||
count := 0
|
||||
|
||||
for _, i := range si.indices {
|
||||
if i >= nonZeroOffset {
|
||||
count++
|
||||
}
|
||||
}
|
||||
|
||||
return count
|
||||
}
|
||||
|
||||
// all returns all saved data column sidecars.
|
||||
func (si *storageIndices) all() []uint64 {
|
||||
indices := make([]uint64, 0, len(si.indices))
|
||||
|
||||
for index, i := range si.indices {
|
||||
if i >= nonZeroOffset {
|
||||
indices = append(indices, uint64(index))
|
||||
}
|
||||
}
|
||||
|
||||
return indices
|
||||
}
|
||||
|
||||
// raw returns the raw data column sidecar indices.
|
||||
// It can be safely modified by the caller.
|
||||
func (si *storageIndices) raw() [mandatoryNumberOfColumns]byte {
|
||||
var result [mandatoryNumberOfColumns]byte
|
||||
copy(result[:], si.indices[:])
|
||||
return result
|
||||
}
|
||||
|
||||
// set sets the data column sidecar as saved.
|
||||
func (si *storageIndices) set(dataColumnIndex uint64, position uint8) error {
|
||||
if dataColumnIndex >= mandatoryNumberOfColumns || position >= mandatoryNumberOfColumns {
|
||||
return errDataColumnIndexTooLarge
|
||||
}
|
||||
|
||||
si.indices[dataColumnIndex] = nonZeroOffset + position
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// pullChan pulls data column sidecars from the input channel until it is empty.
|
||||
func pullChan(inputRoDataColumns chan []blocks.VerifiedRODataColumn) []blocks.VerifiedRODataColumn {
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
dataColumnSidecars := make([]blocks.VerifiedRODataColumn, 0, numberOfColumns)
|
||||
|
||||
for {
|
||||
select {
|
||||
case dataColumnSidecar := <-inputRoDataColumns:
|
||||
dataColumnSidecars = append(dataColumnSidecars, dataColumnSidecar...)
|
||||
default:
|
||||
return dataColumnSidecars
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// filePath builds the file path in database for a given root and epoch.
|
||||
func filePath(root [fieldparams.RootLength]byte, epoch primitives.Epoch) string {
|
||||
return path.Join(
|
||||
fmt.Sprintf("%d", period(epoch)),
|
||||
fmt.Sprintf("%d", epoch),
|
||||
fmt.Sprintf("%#x.%s", root, dataColumnsFileExtension),
|
||||
)
|
||||
}
|
||||
|
||||
// extractFileMetadata extracts the metadata from a file path.
|
||||
// If the path is not a leaf, it returns nil.
|
||||
func extractFileMetadata(path string) (*fileMetadata, error) {
|
||||
// Is this Windows friendly?
|
||||
parts := strings.Split(path, "/")
|
||||
if len(parts) != 3 {
|
||||
return nil, errors.Errorf("unexpected file %s", path)
|
||||
}
|
||||
|
||||
period, err := strconv.ParseUint(parts[0], 10, 64)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse period from %s", path)
|
||||
}
|
||||
|
||||
epoch, err := strconv.ParseUint(parts[1], 10, 64)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse epoch from %s", path)
|
||||
}
|
||||
|
||||
partsRoot := strings.Split(parts[2], ".")
|
||||
if len(partsRoot) != 2 {
|
||||
return nil, errors.Errorf("failed to parse root from %s", path)
|
||||
}
|
||||
|
||||
blockRootString := partsRoot[0]
|
||||
if len(blockRootString) != 2+2*fieldparams.RootLength || blockRootString[:2] != "0x" {
|
||||
return nil, errors.Errorf("unexpected file name %s", path)
|
||||
}
|
||||
|
||||
if partsRoot[1] != dataColumnsFileExtension {
|
||||
return nil, errors.Errorf("unexpected extension %s", path)
|
||||
}
|
||||
|
||||
blockRootSlice, err := hex.DecodeString(blockRootString[2:])
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "decode string from %s", path)
|
||||
}
|
||||
|
||||
var blockRoot [fieldparams.RootLength]byte
|
||||
copy(blockRoot[:], blockRootSlice)
|
||||
|
||||
result := &fileMetadata{period: period, epoch: primitives.Epoch(epoch), blockRoot: blockRoot}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// period computes the period of a given epoch.
|
||||
func period(epoch primitives.Epoch) uint64 {
|
||||
return uint64(epoch / params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
|
||||
}
|
||||
219
beacon-chain/db/filesystem/data_column_cache.go
Normal file
219
beacon-chain/db/filesystem/data_column_cache.go
Normal file
@@ -0,0 +1,219 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
)
|
||||
|
||||
var errDataColumnIndexOutOfBounds = errors.New("data column index too high")
|
||||
|
||||
// DataColumnStorageSummary represents cached information about the DataColumnSidecars on disk for each root the cache knows about.
|
||||
type DataColumnStorageSummary struct {
|
||||
epoch primitives.Epoch
|
||||
mask [fieldparams.NumberOfColumns]bool
|
||||
}
|
||||
|
||||
// NewDataColumnStorageSummary creates a new DataColumnStorageSummary for a given epoch and mask.
|
||||
func NewDataColumnStorageSummary(epoch primitives.Epoch, mask [fieldparams.NumberOfColumns]bool) DataColumnStorageSummary {
|
||||
return DataColumnStorageSummary{
|
||||
epoch: epoch,
|
||||
mask: mask,
|
||||
}
|
||||
}
|
||||
|
||||
// HasIndex returns true if the DataColumnSidecar at the given index is available in the filesystem.
|
||||
func (s DataColumnStorageSummary) HasIndex(index uint64) bool {
|
||||
if index >= uint64(fieldparams.NumberOfColumns) {
|
||||
return false
|
||||
}
|
||||
return s.mask[index]
|
||||
}
|
||||
|
||||
// Count returns the number of available data columns.
|
||||
func (s DataColumnStorageSummary) Count() uint64 {
|
||||
count := uint64(0)
|
||||
|
||||
for _, available := range s.mask {
|
||||
if available {
|
||||
count++
|
||||
}
|
||||
}
|
||||
|
||||
return count
|
||||
}
|
||||
|
||||
// AllAvailable returns true if we have all data columns for corresponding indices.
|
||||
func (s DataColumnStorageSummary) AllAvailable(indices map[uint64]bool) bool {
|
||||
if len(indices) > len(s.mask) {
|
||||
return false
|
||||
}
|
||||
|
||||
for index := range indices {
|
||||
if !s.mask[index] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// DataColumnStorageSummarizer can be used to receive a summary of metadata about data columns on disk for a given root.
|
||||
// The DataColumnStorageSummary can be used to check which indices (if any) are available for a given block by root.
|
||||
type DataColumnStorageSummarizer interface {
|
||||
Summary(root [fieldparams.RootLength]byte) DataColumnStorageSummary
|
||||
}
|
||||
|
||||
type dataColumnStorageSummaryCache struct {
|
||||
mu sync.RWMutex
|
||||
dataColumnCount float64
|
||||
lowestCachedEpoch primitives.Epoch
|
||||
highestCachedEpoch primitives.Epoch
|
||||
cache map[[fieldparams.RootLength]byte]DataColumnStorageSummary
|
||||
}
|
||||
|
||||
var _ DataColumnStorageSummarizer = &dataColumnStorageSummaryCache{}
|
||||
|
||||
func newDataColumnStorageSummaryCache() *dataColumnStorageSummaryCache {
|
||||
return &dataColumnStorageSummaryCache{
|
||||
cache: make(map[[fieldparams.RootLength]byte]DataColumnStorageSummary),
|
||||
lowestCachedEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
}
|
||||
|
||||
// Summary returns the DataColumnStorageSummary for `root`.
|
||||
// The DataColumnStorageSummary can be used to check for the presence of DataColumnSidecars based on Index.
|
||||
func (sc *dataColumnStorageSummaryCache) Summary(root [fieldparams.RootLength]byte) DataColumnStorageSummary {
|
||||
sc.mu.RLock()
|
||||
defer sc.mu.RUnlock()
|
||||
|
||||
return sc.cache[root]
|
||||
}
|
||||
|
||||
func (sc *dataColumnStorageSummaryCache) HighestEpoch() primitives.Epoch {
|
||||
sc.mu.RLock()
|
||||
defer sc.mu.RUnlock()
|
||||
return sc.highestCachedEpoch
|
||||
}
|
||||
|
||||
// set updates the cache.
|
||||
func (sc *dataColumnStorageSummaryCache) set(dataColumnsIdent DataColumnsIdent) error {
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
sc.mu.Lock()
|
||||
defer sc.mu.Unlock()
|
||||
|
||||
summary := sc.cache[dataColumnsIdent.Root]
|
||||
summary.epoch = dataColumnsIdent.Epoch
|
||||
|
||||
count := uint64(0)
|
||||
for _, index := range dataColumnsIdent.Indices {
|
||||
if index >= numberOfColumns {
|
||||
return errDataColumnIndexOutOfBounds
|
||||
}
|
||||
|
||||
if summary.mask[index] {
|
||||
continue
|
||||
}
|
||||
|
||||
count++
|
||||
|
||||
summary.mask[index] = true
|
||||
sc.cache[dataColumnsIdent.Root] = summary
|
||||
sc.lowestCachedEpoch = min(sc.lowestCachedEpoch, dataColumnsIdent.Epoch)
|
||||
sc.highestCachedEpoch = max(sc.highestCachedEpoch, dataColumnsIdent.Epoch)
|
||||
}
|
||||
|
||||
countFloat := float64(count)
|
||||
sc.dataColumnCount += countFloat
|
||||
dataColumnDiskCount.Set(sc.dataColumnCount)
|
||||
dataColumnWrittenCounter.Add(countFloat)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// get returns the DataColumnStorageSummary for the given block root.
|
||||
// If the root is not in the cache, the second return value will be false.
|
||||
func (sc *dataColumnStorageSummaryCache) get(blockRoot [fieldparams.RootLength]byte) (DataColumnStorageSummary, bool) {
|
||||
sc.mu.RLock()
|
||||
defer sc.mu.RUnlock()
|
||||
|
||||
v, ok := sc.cache[blockRoot]
|
||||
return v, ok
|
||||
}
|
||||
|
||||
// evict removes the DataColumnStorageSummary for the given block root from the cache.
|
||||
func (s *dataColumnStorageSummaryCache) evict(blockRoot [fieldparams.RootLength]byte) int {
|
||||
deleted := 0
|
||||
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
summary, ok := s.cache[blockRoot]
|
||||
if !ok {
|
||||
return 0
|
||||
}
|
||||
|
||||
for i := range summary.mask {
|
||||
if summary.mask[i] {
|
||||
deleted += 1
|
||||
}
|
||||
}
|
||||
|
||||
delete(s.cache, blockRoot)
|
||||
if deleted > 0 {
|
||||
s.dataColumnCount -= float64(deleted)
|
||||
dataColumnDiskCount.Set(s.dataColumnCount)
|
||||
}
|
||||
|
||||
// The lowest and highest cached epoch may no longer be valid here,
|
||||
// but is not worth the effort to recalculate.
|
||||
|
||||
return deleted
|
||||
}
|
||||
|
||||
// pruneUpTo removes all entries from the cache up to the given target epoch included.
|
||||
func (sc *dataColumnStorageSummaryCache) pruneUpTo(targetEpoch primitives.Epoch) uint64 {
|
||||
sc.mu.Lock()
|
||||
defer sc.mu.Unlock()
|
||||
|
||||
prunedCount := uint64(0)
|
||||
newLowestCachedEpoch := params.BeaconConfig().FarFutureEpoch
|
||||
newHighestCachedEpoch := primitives.Epoch(0)
|
||||
|
||||
for blockRoot, summary := range sc.cache {
|
||||
epoch := summary.epoch
|
||||
|
||||
if epoch > targetEpoch {
|
||||
newLowestCachedEpoch = min(newLowestCachedEpoch, epoch)
|
||||
newHighestCachedEpoch = max(newHighestCachedEpoch, epoch)
|
||||
}
|
||||
|
||||
if epoch <= targetEpoch {
|
||||
for i := range summary.mask {
|
||||
if summary.mask[i] {
|
||||
prunedCount += 1
|
||||
}
|
||||
}
|
||||
|
||||
delete(sc.cache, blockRoot)
|
||||
}
|
||||
}
|
||||
|
||||
if prunedCount > 0 {
|
||||
sc.lowestCachedEpoch = newLowestCachedEpoch
|
||||
sc.highestCachedEpoch = newHighestCachedEpoch
|
||||
sc.dataColumnCount -= float64(prunedCount)
|
||||
dataColumnDiskCount.Set(sc.dataColumnCount)
|
||||
}
|
||||
|
||||
return prunedCount
|
||||
}
|
||||
|
||||
// clear removes all entries from the cache.
|
||||
func (sc *dataColumnStorageSummaryCache) clear() uint64 {
|
||||
return sc.pruneUpTo(params.BeaconConfig().FarFutureEpoch)
|
||||
}
|
||||
235
beacon-chain/db/filesystem/data_column_cache_test.go
Normal file
235
beacon-chain/db/filesystem/data_column_cache_test.go
Normal file
@@ -0,0 +1,235 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestHasIndex(t *testing.T) {
|
||||
summary := NewDataColumnStorageSummary(0, [fieldparams.NumberOfColumns]bool{false, true})
|
||||
|
||||
hasIndex := summary.HasIndex(1_000_000)
|
||||
require.Equal(t, false, hasIndex)
|
||||
|
||||
hasIndex = summary.HasIndex(0)
|
||||
require.Equal(t, false, hasIndex)
|
||||
|
||||
hasIndex = summary.HasIndex(1)
|
||||
require.Equal(t, true, hasIndex)
|
||||
}
|
||||
|
||||
func TestCount(t *testing.T) {
|
||||
summary := NewDataColumnStorageSummary(0, [fieldparams.NumberOfColumns]bool{false, true, false, true})
|
||||
|
||||
count := summary.Count()
|
||||
require.Equal(t, uint64(2), count)
|
||||
}
|
||||
|
||||
func TestAllAvailableDataColumns(t *testing.T) {
|
||||
const count = uint64(1_000)
|
||||
|
||||
summary := NewDataColumnStorageSummary(0, [fieldparams.NumberOfColumns]bool{false, true, false, true})
|
||||
|
||||
indices := make(map[uint64]bool, count)
|
||||
for i := range count {
|
||||
indices[i] = true
|
||||
}
|
||||
|
||||
allAvailable := summary.AllAvailable(indices)
|
||||
require.Equal(t, false, allAvailable)
|
||||
|
||||
indices = map[uint64]bool{1: true, 2: true}
|
||||
allAvailable = summary.AllAvailable(indices)
|
||||
require.Equal(t, false, allAvailable)
|
||||
|
||||
indices = map[uint64]bool{1: true, 3: true}
|
||||
allAvailable = summary.AllAvailable(indices)
|
||||
require.Equal(t, true, allAvailable)
|
||||
}
|
||||
|
||||
func TestSummary(t *testing.T) {
|
||||
root := [fieldparams.RootLength]byte{}
|
||||
|
||||
summaryCache := newDataColumnStorageSummaryCache()
|
||||
expected := NewDataColumnStorageSummary(0, [fieldparams.NumberOfColumns]bool{})
|
||||
actual := summaryCache.Summary(root)
|
||||
require.DeepEqual(t, expected, actual)
|
||||
|
||||
summaryCache = newDataColumnStorageSummaryCache()
|
||||
expected = NewDataColumnStorageSummary(0, [fieldparams.NumberOfColumns]bool{true, false, true, false})
|
||||
summaryCache.cache[root] = expected
|
||||
actual = summaryCache.Summary(root)
|
||||
require.DeepEqual(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestHighestEpoch(t *testing.T) {
|
||||
root1 := [fieldparams.RootLength]byte{1}
|
||||
root2 := [fieldparams.RootLength]byte{2}
|
||||
root3 := [fieldparams.RootLength]byte{3}
|
||||
|
||||
summaryCache := newDataColumnStorageSummaryCache()
|
||||
actual := summaryCache.HighestEpoch()
|
||||
require.Equal(t, primitives.Epoch(0), actual)
|
||||
|
||||
err := summaryCache.set(DataColumnsIdent{Root: root1, Epoch: 42, Indices: []uint64{1, 3}})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.Epoch(42), summaryCache.HighestEpoch())
|
||||
|
||||
err = summaryCache.set(DataColumnsIdent{Root: root2, Epoch: 43, Indices: []uint64{1, 3}})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.Epoch(43), summaryCache.HighestEpoch())
|
||||
|
||||
err = summaryCache.set(DataColumnsIdent{Root: root3, Epoch: 40, Indices: []uint64{1, 3}})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.Epoch(43), summaryCache.HighestEpoch())
|
||||
}
|
||||
|
||||
func TestSet(t *testing.T) {
|
||||
t.Run("Index out of bounds", func(t *testing.T) {
|
||||
summaryCache := newDataColumnStorageSummaryCache()
|
||||
err := summaryCache.set(DataColumnsIdent{Indices: []uint64{1_000_000}})
|
||||
require.ErrorIs(t, err, errDataColumnIndexOutOfBounds)
|
||||
require.Equal(t, params.BeaconConfig().FarFutureEpoch, summaryCache.lowestCachedEpoch)
|
||||
require.Equal(t, 0, len(summaryCache.cache))
|
||||
})
|
||||
|
||||
t.Run("Nominal", func(t *testing.T) {
|
||||
root1 := [fieldparams.RootLength]byte{1}
|
||||
root2 := [fieldparams.RootLength]byte{2}
|
||||
|
||||
summaryCache := newDataColumnStorageSummaryCache()
|
||||
|
||||
err := summaryCache.set(DataColumnsIdent{Root: root1, Epoch: 42, Indices: []uint64{1, 3}})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.Epoch(42), summaryCache.lowestCachedEpoch)
|
||||
require.Equal(t, 1, len(summaryCache.cache))
|
||||
expected := DataColumnStorageSummary{epoch: 42, mask: [fieldparams.NumberOfColumns]bool{false, true, false, true}}
|
||||
actual := summaryCache.cache[root1]
|
||||
require.DeepEqual(t, expected, actual)
|
||||
|
||||
err = summaryCache.set(DataColumnsIdent{Root: root1, Epoch: 42, Indices: []uint64{0, 1}})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.Epoch(42), summaryCache.lowestCachedEpoch)
|
||||
require.Equal(t, 1, len(summaryCache.cache))
|
||||
expected = DataColumnStorageSummary{epoch: 42, mask: [fieldparams.NumberOfColumns]bool{true, true, false, true}}
|
||||
actual = summaryCache.cache[root1]
|
||||
require.DeepEqual(t, expected, actual)
|
||||
|
||||
err = summaryCache.set(DataColumnsIdent{Root: root2, Epoch: 43, Indices: []uint64{1}})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.Epoch(42), summaryCache.lowestCachedEpoch) // Epoch 42 is still the lowest
|
||||
require.Equal(t, 2, len(summaryCache.cache))
|
||||
expected = DataColumnStorageSummary{epoch: 43, mask: [fieldparams.NumberOfColumns]bool{false, true}}
|
||||
actual = summaryCache.cache[root2]
|
||||
require.DeepEqual(t, expected, actual)
|
||||
})
|
||||
}
|
||||
|
||||
func TestGet(t *testing.T) {
|
||||
t.Run("Not in cache", func(t *testing.T) {
|
||||
summaryCache := newDataColumnStorageSummaryCache()
|
||||
root := [fieldparams.RootLength]byte{}
|
||||
_, ok := summaryCache.get(root)
|
||||
require.Equal(t, false, ok)
|
||||
})
|
||||
|
||||
t.Run("In cache", func(t *testing.T) {
|
||||
root := [fieldparams.RootLength]byte{}
|
||||
summaryCache := newDataColumnStorageSummaryCache()
|
||||
summaryCache.cache[root] = NewDataColumnStorageSummary(42, [fieldparams.NumberOfColumns]bool{true, false, true, false})
|
||||
actual, ok := summaryCache.get(root)
|
||||
require.Equal(t, true, ok)
|
||||
expected := NewDataColumnStorageSummary(42, [fieldparams.NumberOfColumns]bool{true, false, true, false})
|
||||
require.DeepEqual(t, expected, actual)
|
||||
})
|
||||
}
|
||||
|
||||
func TestEvict(t *testing.T) {
|
||||
t.Run("No eviction", func(t *testing.T) {
|
||||
root := [fieldparams.RootLength]byte{}
|
||||
summaryCache := newDataColumnStorageSummaryCache()
|
||||
|
||||
evicted := summaryCache.evict(root)
|
||||
require.Equal(t, 0, evicted)
|
||||
})
|
||||
|
||||
t.Run("Eviction", func(t *testing.T) {
|
||||
root1 := [fieldparams.RootLength]byte{1}
|
||||
root2 := [fieldparams.RootLength]byte{2}
|
||||
summaryCache := newDataColumnStorageSummaryCache()
|
||||
summaryCache.cache[root1] = NewDataColumnStorageSummary(42, [fieldparams.NumberOfColumns]bool{true, false, true, false})
|
||||
summaryCache.cache[root2] = NewDataColumnStorageSummary(43, [fieldparams.NumberOfColumns]bool{false, true, false, true})
|
||||
|
||||
evicted := summaryCache.evict(root1)
|
||||
require.Equal(t, 2, evicted)
|
||||
require.Equal(t, 1, len(summaryCache.cache))
|
||||
|
||||
_, ok := summaryCache.cache[root1]
|
||||
require.Equal(t, false, ok)
|
||||
|
||||
_, ok = summaryCache.cache[root2]
|
||||
require.Equal(t, true, ok)
|
||||
})
|
||||
}
|
||||
|
||||
func TestPruneUpTo(t *testing.T) {
|
||||
t.Run("No pruning", func(t *testing.T) {
|
||||
summaryCache := newDataColumnStorageSummaryCache()
|
||||
err := summaryCache.set(DataColumnsIdent{Root: [fieldparams.RootLength]byte{1}, Epoch: 42, Indices: []uint64{1}})
|
||||
require.NoError(t, err)
|
||||
|
||||
err = summaryCache.set(DataColumnsIdent{Root: [fieldparams.RootLength]byte{2}, Epoch: 43, Indices: []uint64{2, 4}})
|
||||
require.NoError(t, err)
|
||||
|
||||
count := summaryCache.pruneUpTo(41)
|
||||
require.Equal(t, uint64(0), count)
|
||||
require.Equal(t, 2, len(summaryCache.cache))
|
||||
require.Equal(t, primitives.Epoch(42), summaryCache.lowestCachedEpoch)
|
||||
})
|
||||
|
||||
t.Run("Pruning", func(t *testing.T) {
|
||||
summaryCache := newDataColumnStorageSummaryCache()
|
||||
err := summaryCache.set(DataColumnsIdent{Root: [fieldparams.RootLength]byte{1}, Epoch: 42, Indices: []uint64{1}})
|
||||
require.NoError(t, err)
|
||||
|
||||
err = summaryCache.set(DataColumnsIdent{Root: [fieldparams.RootLength]byte{2}, Epoch: 44, Indices: []uint64{2, 4}})
|
||||
require.NoError(t, err)
|
||||
|
||||
err = summaryCache.set(DataColumnsIdent{Root: [fieldparams.RootLength]byte{3}, Epoch: 45, Indices: []uint64{2, 4}})
|
||||
require.NoError(t, err)
|
||||
|
||||
count := summaryCache.pruneUpTo(42)
|
||||
require.Equal(t, uint64(1), count)
|
||||
require.Equal(t, 2, len(summaryCache.cache))
|
||||
require.Equal(t, primitives.Epoch(44), summaryCache.lowestCachedEpoch)
|
||||
|
||||
count = summaryCache.pruneUpTo(45)
|
||||
require.Equal(t, uint64(4), count)
|
||||
require.Equal(t, 0, len(summaryCache.cache))
|
||||
require.Equal(t, params.BeaconConfig().FarFutureEpoch, summaryCache.lowestCachedEpoch)
|
||||
require.Equal(t, primitives.Epoch(0), summaryCache.highestCachedEpoch)
|
||||
|
||||
})
|
||||
|
||||
t.Run("Clear", func(t *testing.T) {
|
||||
summaryCache := newDataColumnStorageSummaryCache()
|
||||
err := summaryCache.set(DataColumnsIdent{Root: [fieldparams.RootLength]byte{1}, Epoch: 42, Indices: []uint64{1}})
|
||||
require.NoError(t, err)
|
||||
|
||||
err = summaryCache.set(DataColumnsIdent{Root: [fieldparams.RootLength]byte{2}, Epoch: 44, Indices: []uint64{2, 4}})
|
||||
require.NoError(t, err)
|
||||
|
||||
err = summaryCache.set(DataColumnsIdent{Root: [fieldparams.RootLength]byte{3}, Epoch: 45, Indices: []uint64{2, 4}})
|
||||
require.NoError(t, err)
|
||||
|
||||
count := summaryCache.clear()
|
||||
require.Equal(t, uint64(5), count)
|
||||
require.Equal(t, 0, len(summaryCache.cache))
|
||||
require.Equal(t, params.BeaconConfig().FarFutureEpoch, summaryCache.lowestCachedEpoch)
|
||||
require.Equal(t, primitives.Epoch(0), summaryCache.highestCachedEpoch)
|
||||
})
|
||||
}
|
||||
734
beacon-chain/db/filesystem/data_column_test.go
Normal file
734
beacon-chain/db/filesystem/data_column_test.go
Normal file
@@ -0,0 +1,734 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
func TestNewDataColumnStorage(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
t.Run("No base path", func(t *testing.T) {
|
||||
_, err := NewDataColumnStorage(ctx)
|
||||
require.ErrorIs(t, err, errNoBasePath)
|
||||
})
|
||||
|
||||
t.Run("Nominal", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
storage, err := NewDataColumnStorage(ctx, WithDataColumnBasePath(dir))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, dir, storage.base)
|
||||
})
|
||||
}
|
||||
|
||||
func TestWarmCache(t *testing.T) {
|
||||
storage, err := NewDataColumnStorage(
|
||||
context.Background(),
|
||||
WithDataColumnBasePath(t.TempDir()),
|
||||
WithDataColumnRetentionEpochs(10_000),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, verifiedRoDataColumnSidecars := verification.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
verification.DataColumnsParamsByRoot{
|
||||
{0}: {
|
||||
{Slot: 33, ColumnIndex: 2, DataColumn: []byte{1, 2, 3}}, // Period 0 - Epoch 1
|
||||
{Slot: 33, ColumnIndex: 4, DataColumn: []byte{2, 3, 4}}, // Period 0 - Epoch 1
|
||||
},
|
||||
{1}: {
|
||||
{Slot: 128_002, ColumnIndex: 2, DataColumn: []byte{1, 2, 3}}, // Period 0 - Epoch 4000
|
||||
{Slot: 128_002, ColumnIndex: 4, DataColumn: []byte{2, 3, 4}}, // Period 0 - Epoch 4000
|
||||
},
|
||||
{2}: {
|
||||
{Slot: 128_003, ColumnIndex: 1, DataColumn: []byte{1, 2, 3}}, // Period 0 - Epoch 4000
|
||||
{Slot: 128_003, ColumnIndex: 3, DataColumn: []byte{2, 3, 4}}, // Period 0 - Epoch 4000
|
||||
},
|
||||
{3}: {
|
||||
{Slot: 128_034, ColumnIndex: 2, DataColumn: []byte{1, 2, 3}}, // Period 0 - Epoch 4001
|
||||
{Slot: 128_034, ColumnIndex: 4, DataColumn: []byte{2, 3, 4}}, // Period 0 - Epoch 4001
|
||||
},
|
||||
{4}: {
|
||||
{Slot: 131_138, ColumnIndex: 2, DataColumn: []byte{1, 2, 3}}, // Period 1 - Epoch 4098
|
||||
},
|
||||
{5}: {
|
||||
{Slot: 131_138, ColumnIndex: 1, DataColumn: []byte{1, 2, 3}}, // Period 1 - Epoch 4098
|
||||
},
|
||||
{6}: {
|
||||
{Slot: 131_168, ColumnIndex: 0, DataColumn: []byte{1, 2, 3}}, // Period 1 - Epoch 4099
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
err = storage.Save(verifiedRoDataColumnSidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
storage.retentionEpochs = 4_096
|
||||
|
||||
storage.WarmCache()
|
||||
require.Equal(t, primitives.Epoch(4_000), storage.cache.lowestCachedEpoch)
|
||||
require.Equal(t, 6, len(storage.cache.cache))
|
||||
|
||||
summary, ok := storage.cache.get([fieldparams.RootLength]byte{1})
|
||||
require.Equal(t, true, ok)
|
||||
require.DeepEqual(t, DataColumnStorageSummary{epoch: 4_000, mask: [fieldparams.NumberOfColumns]bool{false, false, true, false, true}}, summary)
|
||||
|
||||
summary, ok = storage.cache.get([fieldparams.RootLength]byte{2})
|
||||
require.Equal(t, true, ok)
|
||||
require.DeepEqual(t, DataColumnStorageSummary{epoch: 4_000, mask: [fieldparams.NumberOfColumns]bool{false, true, false, true}}, summary)
|
||||
|
||||
summary, ok = storage.cache.get([fieldparams.RootLength]byte{3})
|
||||
require.Equal(t, true, ok)
|
||||
require.DeepEqual(t, DataColumnStorageSummary{epoch: 4_001, mask: [fieldparams.NumberOfColumns]bool{false, false, true, false, true}}, summary)
|
||||
|
||||
summary, ok = storage.cache.get([fieldparams.RootLength]byte{4})
|
||||
require.Equal(t, true, ok)
|
||||
require.DeepEqual(t, DataColumnStorageSummary{epoch: 4_098, mask: [fieldparams.NumberOfColumns]bool{false, false, true}}, summary)
|
||||
|
||||
summary, ok = storage.cache.get([fieldparams.RootLength]byte{5})
|
||||
require.Equal(t, true, ok)
|
||||
require.DeepEqual(t, DataColumnStorageSummary{epoch: 4_098, mask: [fieldparams.NumberOfColumns]bool{false, true}}, summary)
|
||||
|
||||
summary, ok = storage.cache.get([fieldparams.RootLength]byte{6})
|
||||
require.Equal(t, true, ok)
|
||||
require.DeepEqual(t, DataColumnStorageSummary{epoch: 4_099, mask: [fieldparams.NumberOfColumns]bool{true}}, summary)
|
||||
}
|
||||
|
||||
func TestSaveDataColumnsSidecars(t *testing.T) {
|
||||
t.Run("wrong numbers of columns", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.NumberOfColumns = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
params.SetupTestConfigCleanup(t)
|
||||
|
||||
_, verifiedRoDataColumnSidecars := verification.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
verification.DataColumnsParamsByRoot{
|
||||
{}: {{ColumnIndex: 12}, {ColumnIndex: 1_000_000}, {ColumnIndex: 48}},
|
||||
},
|
||||
)
|
||||
|
||||
_, dataColumnStorage := NewEphemeralDataColumnStorageAndFs(t)
|
||||
err := dataColumnStorage.Save(verifiedRoDataColumnSidecars)
|
||||
require.ErrorIs(t, err, errWrongNumberOfColumns)
|
||||
})
|
||||
|
||||
t.Run("one of the column index is too large", func(t *testing.T) {
|
||||
_, verifiedRoDataColumnSidecars := verification.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
verification.DataColumnsParamsByRoot{{}: {{ColumnIndex: 12}, {ColumnIndex: 1_000_000}, {ColumnIndex: 48}}},
|
||||
)
|
||||
|
||||
_, dataColumnStorage := NewEphemeralDataColumnStorageAndFs(t)
|
||||
err := dataColumnStorage.Save(verifiedRoDataColumnSidecars)
|
||||
require.ErrorIs(t, err, errDataColumnIndexTooLarge)
|
||||
})
|
||||
|
||||
t.Run("different slots", func(t *testing.T) {
|
||||
_, verifiedRoDataColumnSidecars := verification.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
verification.DataColumnsParamsByRoot{
|
||||
{}: {
|
||||
{Slot: 1, ColumnIndex: 12, DataColumn: []byte{1, 2, 3}},
|
||||
{Slot: 2, ColumnIndex: 12, DataColumn: []byte{1, 2, 3}},
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
_, dataColumnStorage := NewEphemeralDataColumnStorageAndFs(t)
|
||||
err := dataColumnStorage.Save(verifiedRoDataColumnSidecars)
|
||||
require.ErrorIs(t, err, errDataColumnSidecarsFromDifferentSlots)
|
||||
})
|
||||
|
||||
t.Run("new file - no data columns to save", func(t *testing.T) {
|
||||
_, verifiedRoDataColumnSidecars := verification.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
verification.DataColumnsParamsByRoot{{}: {}},
|
||||
)
|
||||
|
||||
_, dataColumnStorage := NewEphemeralDataColumnStorageAndFs(t)
|
||||
err := dataColumnStorage.Save(verifiedRoDataColumnSidecars)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("new file - different data column size", func(t *testing.T) {
|
||||
_, verifiedRoDataColumnSidecars := verification.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
verification.DataColumnsParamsByRoot{
|
||||
{}: {
|
||||
{ColumnIndex: 12, DataColumn: []byte{1, 2, 3}},
|
||||
{ColumnIndex: 11, DataColumn: []byte{1, 2, 3, 4}},
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
_, dataColumnStorage := NewEphemeralDataColumnStorageAndFs(t)
|
||||
err := dataColumnStorage.Save(verifiedRoDataColumnSidecars)
|
||||
require.ErrorIs(t, err, errWrongSszEncodedDataColumnSidecarSize)
|
||||
})
|
||||
|
||||
t.Run("existing file - wrong incoming SSZ encoded size", func(t *testing.T) {
|
||||
_, verifiedRoDataColumnSidecars := verification.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
verification.DataColumnsParamsByRoot{{1}: {{ColumnIndex: 12, DataColumn: []byte{1, 2, 3}}}},
|
||||
)
|
||||
|
||||
// Save data columns into a file.
|
||||
_, dataColumnStorage := NewEphemeralDataColumnStorageAndFs(t)
|
||||
err := dataColumnStorage.Save(verifiedRoDataColumnSidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Build a data column sidecar for the same block but with a different
|
||||
// column index and an different SSZ encoded size.
|
||||
_, verifiedRoDataColumnSidecars = verification.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
verification.DataColumnsParamsByRoot{{1}: {{ColumnIndex: 13, DataColumn: []byte{1, 2, 3, 4}}}},
|
||||
)
|
||||
|
||||
// Try to rewrite the file.
|
||||
err = dataColumnStorage.Save(verifiedRoDataColumnSidecars)
|
||||
require.ErrorIs(t, err, errWrongSszEncodedDataColumnSidecarSize)
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
_, inputVerifiedRoDataColumnSidecars := verification.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
verification.DataColumnsParamsByRoot{
|
||||
{1}: {
|
||||
{ColumnIndex: 12, DataColumn: []byte{1, 2, 3}},
|
||||
{ColumnIndex: 11, DataColumn: []byte{3, 4, 5}},
|
||||
{ColumnIndex: 12, DataColumn: []byte{1, 2, 3}}, // OK if duplicate
|
||||
{ColumnIndex: 13, DataColumn: []byte{6, 7, 8}},
|
||||
},
|
||||
{2}: {
|
||||
{ColumnIndex: 12, DataColumn: []byte{3, 4, 5}},
|
||||
{ColumnIndex: 13, DataColumn: []byte{6, 7, 8}},
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
_, dataColumnStorage := NewEphemeralDataColumnStorageAndFs(t)
|
||||
err := dataColumnStorage.Save(inputVerifiedRoDataColumnSidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, inputVerifiedRoDataColumnSidecars = verification.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
verification.DataColumnsParamsByRoot{
|
||||
{1}: {
|
||||
{ColumnIndex: 12, DataColumn: []byte{1, 2, 3}}, // OK if duplicate
|
||||
{ColumnIndex: 15, DataColumn: []byte{2, 3, 4}},
|
||||
{ColumnIndex: 1, DataColumn: []byte{2, 3, 4}},
|
||||
},
|
||||
{3}: {
|
||||
{ColumnIndex: 6, DataColumn: []byte{3, 4, 5}},
|
||||
{ColumnIndex: 2, DataColumn: []byte{6, 7, 8}},
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
err = dataColumnStorage.Save(inputVerifiedRoDataColumnSidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
type fixture struct {
|
||||
fileName string
|
||||
blockRoot [fieldparams.RootLength]byte
|
||||
expectedIndices [mandatoryNumberOfColumns]byte
|
||||
dataColumnParams []verification.DataColumnParams
|
||||
}
|
||||
|
||||
fixtures := []fixture{
|
||||
{
|
||||
fileName: "0/0/0x0100000000000000000000000000000000000000000000000000000000000000.sszs",
|
||||
blockRoot: [fieldparams.RootLength]byte{1},
|
||||
expectedIndices: [mandatoryNumberOfColumns]byte{
|
||||
0, nonZeroOffset + 4, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, nonZeroOffset + 1, nonZeroOffset, nonZeroOffset + 2, 0, nonZeroOffset + 3,
|
||||
// The rest is filled with zeroes.
|
||||
},
|
||||
dataColumnParams: []verification.DataColumnParams{
|
||||
{ColumnIndex: 12, DataColumn: []byte{1, 2, 3}},
|
||||
{ColumnIndex: 11, DataColumn: []byte{3, 4, 5}},
|
||||
{ColumnIndex: 13, DataColumn: []byte{6, 7, 8}},
|
||||
{ColumnIndex: 15, DataColumn: []byte{2, 3, 4}},
|
||||
{ColumnIndex: 1, DataColumn: []byte{2, 3, 4}},
|
||||
},
|
||||
},
|
||||
{
|
||||
fileName: "0/0/0x0200000000000000000000000000000000000000000000000000000000000000.sszs",
|
||||
blockRoot: [fieldparams.RootLength]byte{2},
|
||||
expectedIndices: [mandatoryNumberOfColumns]byte{
|
||||
0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, nonZeroOffset, nonZeroOffset + 1, 0, 0,
|
||||
// The rest is filled with zeroes.
|
||||
},
|
||||
dataColumnParams: []verification.DataColumnParams{
|
||||
{ColumnIndex: 12, DataColumn: []byte{3, 4, 5}},
|
||||
{ColumnIndex: 13, DataColumn: []byte{6, 7, 8}},
|
||||
},
|
||||
},
|
||||
{
|
||||
fileName: "0/0/0x0300000000000000000000000000000000000000000000000000000000000000.sszs",
|
||||
blockRoot: [fieldparams.RootLength]byte{3},
|
||||
expectedIndices: [mandatoryNumberOfColumns]byte{
|
||||
0, 0, nonZeroOffset + 1, 0, 0, 0, nonZeroOffset, 0,
|
||||
// The rest is filled with zeroes.
|
||||
},
|
||||
dataColumnParams: []verification.DataColumnParams{
|
||||
{ColumnIndex: 6, DataColumn: []byte{3, 4, 5}},
|
||||
{ColumnIndex: 2, DataColumn: []byte{6, 7, 8}},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, fixture := range fixtures {
|
||||
// Build expected data column sidecars.
|
||||
_, expectedDataColumnSidecars := verification.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
verification.DataColumnsParamsByRoot{fixture.blockRoot: fixture.dataColumnParams},
|
||||
)
|
||||
|
||||
// Build expected bytes.
|
||||
firstSszEncodedDataColumnSidecar, err := expectedDataColumnSidecars[0].MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
|
||||
dataColumnSidecarsCount := len(expectedDataColumnSidecars)
|
||||
sszEncodedDataColumnSidecarSize := len(firstSszEncodedDataColumnSidecar)
|
||||
|
||||
sszEncodedDataColumnSidecars := make([]byte, 0, dataColumnSidecarsCount*sszEncodedDataColumnSidecarSize)
|
||||
sszEncodedDataColumnSidecars = append(sszEncodedDataColumnSidecars, firstSszEncodedDataColumnSidecar...)
|
||||
for _, dataColumnSidecar := range expectedDataColumnSidecars[1:] {
|
||||
sszEncodedDataColumnSidecar, err := dataColumnSidecar.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
sszEncodedDataColumnSidecars = append(sszEncodedDataColumnSidecars, sszEncodedDataColumnSidecar...)
|
||||
}
|
||||
|
||||
var encodedSszEncodedDataColumnSidecarSize [encodedSszEncodedDataColumnSidecarSizeSize]byte
|
||||
binary.BigEndian.PutUint32(encodedSszEncodedDataColumnSidecarSize[:], uint32(sszEncodedDataColumnSidecarSize))
|
||||
|
||||
expectedBytes := make([]byte, 0, headerSize+dataColumnSidecarsCount*sszEncodedDataColumnSidecarSize)
|
||||
expectedBytes = append(expectedBytes, []byte{0x01}...)
|
||||
expectedBytes = append(expectedBytes, encodedSszEncodedDataColumnSidecarSize[:]...)
|
||||
expectedBytes = append(expectedBytes, fixture.expectedIndices[:]...)
|
||||
expectedBytes = append(expectedBytes, sszEncodedDataColumnSidecars...)
|
||||
|
||||
// Check the actual content of the file.
|
||||
actualBytes, err := afero.ReadFile(dataColumnStorage.fs, fixture.fileName)
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, expectedBytes, actualBytes)
|
||||
|
||||
// Check the summary.
|
||||
indices := map[uint64]bool{}
|
||||
for _, dataColumnParam := range fixture.dataColumnParams {
|
||||
indices[dataColumnParam.ColumnIndex] = true
|
||||
}
|
||||
|
||||
summary := dataColumnStorage.Summary(fixture.blockRoot)
|
||||
for index := range uint64(mandatoryNumberOfColumns) {
|
||||
require.Equal(t, indices[index], summary.HasIndex(index))
|
||||
}
|
||||
|
||||
err = dataColumnStorage.Remove(fixture.blockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
summary = dataColumnStorage.Summary(fixture.blockRoot)
|
||||
for index := range uint64(mandatoryNumberOfColumns) {
|
||||
require.Equal(t, false, summary.HasIndex(index))
|
||||
}
|
||||
|
||||
_, err = afero.ReadFile(dataColumnStorage.fs, fixture.fileName)
|
||||
require.ErrorIs(t, err, os.ErrNotExist)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetDataColumnSidecars(t *testing.T) {
|
||||
t.Run("not found", func(t *testing.T) {
|
||||
_, dataColumnStorage := NewEphemeralDataColumnStorageAndFs(t)
|
||||
|
||||
verifiedRODataColumnSidecars, err := dataColumnStorage.Get([fieldparams.RootLength]byte{1}, []uint64{12, 13, 14})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(verifiedRODataColumnSidecars))
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
_, expectedVerifiedRoDataColumnSidecars := verification.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
verification.DataColumnsParamsByRoot{
|
||||
{1}: {
|
||||
{ColumnIndex: 12, DataColumn: []byte{1, 2, 3}},
|
||||
{ColumnIndex: 14, DataColumn: []byte{2, 3, 4}},
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
_, dataColumnStorage := NewEphemeralDataColumnStorageAndFs(t)
|
||||
err := dataColumnStorage.Save(expectedVerifiedRoDataColumnSidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
verifiedRODataColumnSidecars, err := dataColumnStorage.Get([fieldparams.RootLength]byte{1}, nil)
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, expectedVerifiedRoDataColumnSidecars, verifiedRODataColumnSidecars)
|
||||
|
||||
verifiedRODataColumnSidecars, err = dataColumnStorage.Get([fieldparams.RootLength]byte{1}, []uint64{12, 13, 14})
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, expectedVerifiedRoDataColumnSidecars, verifiedRODataColumnSidecars)
|
||||
})
|
||||
}
|
||||
|
||||
func TestRemove(t *testing.T) {
|
||||
t.Run("not found", func(t *testing.T) {
|
||||
_, dataColumnStorage := NewEphemeralDataColumnStorageAndFs(t)
|
||||
err := dataColumnStorage.Remove([fieldparams.RootLength]byte{1})
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
_, inputVerifiedRoDataColumnSidecars := verification.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
verification.DataColumnsParamsByRoot{
|
||||
{1}: {
|
||||
{Slot: 32, ColumnIndex: 10, DataColumn: []byte{1, 2, 3}},
|
||||
{Slot: 32, ColumnIndex: 11, DataColumn: []byte{2, 3, 4}},
|
||||
},
|
||||
{2}: {
|
||||
{Slot: 33, ColumnIndex: 10, DataColumn: []byte{1, 2, 3}},
|
||||
{Slot: 33, ColumnIndex: 11, DataColumn: []byte{2, 3, 4}},
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
_, dataColumnStorage := NewEphemeralDataColumnStorageAndFs(t)
|
||||
err := dataColumnStorage.Save(inputVerifiedRoDataColumnSidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = dataColumnStorage.Remove([fieldparams.RootLength]byte{1})
|
||||
require.NoError(t, err)
|
||||
|
||||
summary := dataColumnStorage.Summary([fieldparams.RootLength]byte{1})
|
||||
require.Equal(t, primitives.Epoch(0), summary.epoch)
|
||||
require.Equal(t, uint64(0), summary.Count())
|
||||
|
||||
summary = dataColumnStorage.Summary([fieldparams.RootLength]byte{2})
|
||||
require.Equal(t, primitives.Epoch(1), summary.epoch)
|
||||
require.Equal(t, uint64(2), summary.Count())
|
||||
|
||||
actual, err := dataColumnStorage.Get([fieldparams.RootLength]byte{1}, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(actual))
|
||||
|
||||
actual, err = dataColumnStorage.Get([fieldparams.RootLength]byte{2}, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(actual))
|
||||
})
|
||||
}
|
||||
|
||||
func TestClear(t *testing.T) {
|
||||
_, inputVerifiedRoDataColumnSidecars := verification.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
verification.DataColumnsParamsByRoot{
|
||||
{1}: {{ColumnIndex: 12, DataColumn: []byte{1, 2, 3}}},
|
||||
{2}: {{ColumnIndex: 13, DataColumn: []byte{6, 7, 8}}},
|
||||
},
|
||||
)
|
||||
|
||||
_, dataColumnStorage := NewEphemeralDataColumnStorageAndFs(t)
|
||||
err := dataColumnStorage.Save(inputVerifiedRoDataColumnSidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
filePaths := []string{
|
||||
"0/0/0x0100000000000000000000000000000000000000000000000000000000000000.sszs",
|
||||
"0/0/0x0200000000000000000000000000000000000000000000000000000000000000.sszs",
|
||||
}
|
||||
|
||||
for _, filePath := range filePaths {
|
||||
_, err = afero.ReadFile(dataColumnStorage.fs, filePath)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
err = dataColumnStorage.Clear()
|
||||
require.NoError(t, err)
|
||||
|
||||
summary := dataColumnStorage.Summary([fieldparams.RootLength]byte{1})
|
||||
for index := range uint64(mandatoryNumberOfColumns) {
|
||||
require.Equal(t, false, summary.HasIndex(index))
|
||||
}
|
||||
|
||||
for _, filePath := range filePaths {
|
||||
_, err = afero.ReadFile(dataColumnStorage.fs, filePath)
|
||||
require.ErrorIs(t, err, os.ErrNotExist)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetadata(t *testing.T) {
|
||||
t.Run("wrong version", func(t *testing.T) {
|
||||
_, verifiedRoDataColumnSidecars := verification.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
verification.DataColumnsParamsByRoot{
|
||||
{1}: {{ColumnIndex: 12, DataColumn: []byte{1, 2, 3}}},
|
||||
},
|
||||
)
|
||||
|
||||
// Save data columns into a file.
|
||||
_, dataColumnStorage := NewEphemeralDataColumnStorageAndFs(t)
|
||||
err := dataColumnStorage.Save(verifiedRoDataColumnSidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Alter the version.
|
||||
const filePath = "0/0/0x0100000000000000000000000000000000000000000000000000000000000000.sszs"
|
||||
file, err := dataColumnStorage.fs.OpenFile(filePath, os.O_WRONLY, os.FileMode(0600))
|
||||
require.NoError(t, err)
|
||||
|
||||
count, err := file.Write([]byte{42})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, count)
|
||||
|
||||
// Try to read the metadata.
|
||||
_, err = dataColumnStorage.metadata(file)
|
||||
require.ErrorIs(t, err, errWrongVersion)
|
||||
|
||||
err = file.Close()
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestNewStorageIndices(t *testing.T) {
|
||||
t.Run("wrong number of columns", func(t *testing.T) {
|
||||
_, err := newStorageIndices(nil)
|
||||
require.ErrorIs(t, err, errWrongNumberOfColumns)
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
var indices [mandatoryNumberOfColumns]byte
|
||||
indices[0] = 1
|
||||
|
||||
storageIndices, err := newStorageIndices(indices[:])
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, indices, storageIndices.indices)
|
||||
})
|
||||
}
|
||||
|
||||
func TestStorageIndicesGet(t *testing.T) {
|
||||
t.Run("index too large", func(t *testing.T) {
|
||||
var indices storageIndices
|
||||
_, _, err := indices.get(1_000_000)
|
||||
require.ErrorIs(t, errDataColumnIndexTooLarge, err)
|
||||
})
|
||||
|
||||
t.Run("index not set", func(t *testing.T) {
|
||||
const expected = false
|
||||
var indices storageIndices
|
||||
actual, _, err := indices.get(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, actual)
|
||||
})
|
||||
|
||||
t.Run("index set", func(t *testing.T) {
|
||||
const (
|
||||
expectedOk = true
|
||||
expectedPosition = int64(3)
|
||||
)
|
||||
|
||||
indices := storageIndices{indices: [mandatoryNumberOfColumns]byte{0, 131}}
|
||||
actualOk, actualPosition, err := indices.get(1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedOk, actualOk)
|
||||
require.Equal(t, expectedPosition, actualPosition)
|
||||
})
|
||||
}
|
||||
|
||||
func TestStorageIndicesLen(t *testing.T) {
|
||||
const expectedLen = 2
|
||||
indices := storageIndices{indices: [mandatoryNumberOfColumns]byte{0, 131, 0, 128}}
|
||||
actualLen := indices.len()
|
||||
require.Equal(t, expectedLen, actualLen)
|
||||
}
|
||||
|
||||
func TestStorageIndicesAll(t *testing.T) {
|
||||
expectedIndices := []uint64{1, 3}
|
||||
indices := storageIndices{indices: [mandatoryNumberOfColumns]byte{0, 131, 0, 128}}
|
||||
actualIndices := indices.all()
|
||||
require.DeepEqual(t, expectedIndices, actualIndices)
|
||||
}
|
||||
|
||||
func TestStorageIndicesSet(t *testing.T) {
|
||||
t.Run("data column index too large", func(t *testing.T) {
|
||||
var indices storageIndices
|
||||
err := indices.set(1_000_000, 0)
|
||||
require.ErrorIs(t, errDataColumnIndexTooLarge, err)
|
||||
})
|
||||
|
||||
t.Run("position too large", func(t *testing.T) {
|
||||
var indices storageIndices
|
||||
err := indices.set(0, 255)
|
||||
require.ErrorIs(t, errDataColumnIndexTooLarge, err)
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
expected := [mandatoryNumberOfColumns]byte{0, 0, 128, 0, 131}
|
||||
var indices storageIndices
|
||||
|
||||
err := indices.set(2, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = indices.set(4, 3)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, expected, indices.indices)
|
||||
})
|
||||
}
|
||||
|
||||
func TestPrune(t *testing.T) {
|
||||
t.Run(("nothing to prune"), func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
dataColumnStorage, err := NewDataColumnStorage(context.Background(), WithDataColumnBasePath(dir))
|
||||
require.NoError(t, err)
|
||||
|
||||
dataColumnStorage.prune()
|
||||
})
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
var compareSlices = func(left, right []string) bool {
|
||||
if len(left) != len(right) {
|
||||
return false
|
||||
}
|
||||
|
||||
leftMap := make(map[string]bool, len(left))
|
||||
for _, leftItem := range left {
|
||||
leftMap[leftItem] = true
|
||||
}
|
||||
|
||||
for _, rightItem := range right {
|
||||
if _, ok := leftMap[rightItem]; !ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
_, verifiedRoDataColumnSidecars := verification.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
verification.DataColumnsParamsByRoot{
|
||||
{0}: {
|
||||
{Slot: 33, ColumnIndex: 2, DataColumn: []byte{1, 2, 3}}, // Period 0 - Epoch 1
|
||||
{Slot: 33, ColumnIndex: 4, DataColumn: []byte{2, 3, 4}}, // Period 0 - Epoch 1
|
||||
},
|
||||
{1}: {
|
||||
{Slot: 128_002, ColumnIndex: 2, DataColumn: []byte{1, 2, 3}}, // Period 0 - Epoch 4000
|
||||
{Slot: 128_002, ColumnIndex: 4, DataColumn: []byte{2, 3, 4}}, // Period 0 - Epoch 4000
|
||||
},
|
||||
{2}: {
|
||||
{Slot: 128_003, ColumnIndex: 1, DataColumn: []byte{1, 2, 3}}, // Period 0 - Epoch 4000
|
||||
{Slot: 128_003, ColumnIndex: 3, DataColumn: []byte{2, 3, 4}}, // Period 0 - Epoch 4000
|
||||
},
|
||||
{3}: {
|
||||
{Slot: 131_138, ColumnIndex: 2, DataColumn: []byte{1, 2, 3}}, // Period 1 - Epoch 4098
|
||||
{Slot: 131_138, ColumnIndex: 3, DataColumn: []byte{1, 2, 3}}, // Period 1 - Epoch 4098
|
||||
},
|
||||
{4}: {
|
||||
{Slot: 131_169, ColumnIndex: 2, DataColumn: []byte{1, 2, 3}}, // Period 1 - Epoch 4099
|
||||
{Slot: 131_169, ColumnIndex: 3, DataColumn: []byte{1, 2, 3}}, // Period 1 - Epoch 4099
|
||||
},
|
||||
{5}: {
|
||||
{Slot: 262_144, ColumnIndex: 2, DataColumn: []byte{1, 2, 3}}, // Period 2 - Epoch 8192
|
||||
{Slot: 262_144, ColumnIndex: 3, DataColumn: []byte{1, 2, 3}}, // Period 2 - Epoch 8292
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
dir := t.TempDir()
|
||||
dataColumnStorage, err := NewDataColumnStorage(context.Background(), WithDataColumnBasePath(dir), WithDataColumnRetentionEpochs(10_000))
|
||||
require.NoError(t, err)
|
||||
|
||||
err = dataColumnStorage.Save(verifiedRoDataColumnSidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
dirs, err := listDir(dataColumnStorage.fs, ".")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, compareSlices([]string{"0", "1", "2"}, dirs))
|
||||
|
||||
dirs, err = listDir(dataColumnStorage.fs, "0")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, compareSlices([]string{"1", "4000"}, dirs))
|
||||
|
||||
dirs, err = listDir(dataColumnStorage.fs, "1")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, compareSlices([]string{"4099", "4098"}, dirs))
|
||||
|
||||
dirs, err = listDir(dataColumnStorage.fs, "2")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, compareSlices([]string{"8192"}, dirs))
|
||||
|
||||
dirs, err = listDir(dataColumnStorage.fs, "0/1")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, compareSlices([]string{"0x0000000000000000000000000000000000000000000000000000000000000000.sszs"}, dirs))
|
||||
|
||||
dirs, err = listDir(dataColumnStorage.fs, "0/4000")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, compareSlices([]string{
|
||||
"0x0200000000000000000000000000000000000000000000000000000000000000.sszs",
|
||||
"0x0100000000000000000000000000000000000000000000000000000000000000.sszs",
|
||||
}, dirs))
|
||||
|
||||
dirs, err = listDir(dataColumnStorage.fs, "1/4098")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, compareSlices([]string{"0x0300000000000000000000000000000000000000000000000000000000000000.sszs"}, dirs))
|
||||
|
||||
dirs, err = listDir(dataColumnStorage.fs, "1/4099")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, compareSlices([]string{"0x0400000000000000000000000000000000000000000000000000000000000000.sszs"}, dirs))
|
||||
|
||||
dirs, err = listDir(dataColumnStorage.fs, "2/8192")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, compareSlices([]string{"0x0500000000000000000000000000000000000000000000000000000000000000.sszs"}, dirs))
|
||||
|
||||
_, verifiedRoDataColumnSidecars = verification.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
verification.DataColumnsParamsByRoot{
|
||||
{6}: {{Slot: 451_141, ColumnIndex: 2, DataColumn: []byte{1, 2, 3}}}, // Period 3 - Epoch 14_098
|
||||
},
|
||||
)
|
||||
|
||||
err = dataColumnStorage.Save(verifiedRoDataColumnSidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
// dataColumnStorage.prune(14_098)
|
||||
dataColumnStorage.prune()
|
||||
|
||||
dirs, err = listDir(dataColumnStorage.fs, ".")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, compareSlices([]string{"1", "2", "3"}, dirs))
|
||||
|
||||
dirs, err = listDir(dataColumnStorage.fs, "1")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, compareSlices([]string{"4099"}, dirs))
|
||||
|
||||
dirs, err = listDir(dataColumnStorage.fs, "2")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, compareSlices([]string{"8192"}, dirs))
|
||||
|
||||
dirs, err = listDir(dataColumnStorage.fs, "3")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, compareSlices([]string{"14098"}, dirs))
|
||||
|
||||
dirs, err = listDir(dataColumnStorage.fs, "1/4099")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, compareSlices([]string{"0x0400000000000000000000000000000000000000000000000000000000000000.sszs"}, dirs))
|
||||
|
||||
dirs, err = listDir(dataColumnStorage.fs, "2/8192")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, compareSlices([]string{"0x0500000000000000000000000000000000000000000000000000000000000000.sszs"}, dirs))
|
||||
|
||||
dirs, err = listDir(dataColumnStorage.fs, "3/14098")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, compareSlices([]string{"0x0600000000000000000000000000000000000000000000000000000000000000.sszs"}, dirs))
|
||||
})
|
||||
}
|
||||
104
beacon-chain/db/filesystem/doc.go
Normal file
104
beacon-chain/db/filesystem/doc.go
Normal file
@@ -0,0 +1,104 @@
|
||||
package filesystem
|
||||
|
||||
// nolint:dupword
|
||||
/*
|
||||
Data column sidecars storage documentation
|
||||
==========================================
|
||||
|
||||
File organisation
|
||||
-----------------
|
||||
- The first byte represents the version of the file structure (up to 0xff = 255).
|
||||
We set it to 0x01.
|
||||
Note: This is not strictly needed, but it will help a lot if, in the future,
|
||||
we want to modify the file structure.
|
||||
- The next 4 bytes represents the size of a SSZ encoded data column sidecar.
|
||||
(See the `Computation of the maximum size of a DataColumnSidecar` section to a description
|
||||
of how this value is computed).
|
||||
- The next 128 bytes represent the index in the file of a given column.
|
||||
The first bit of each byte in the index is set to 0 if there is no data column,
|
||||
and set to 1 if there is a data column.
|
||||
The remaining 7 bits (from 0 to 127) represent the index of the data column.
|
||||
This sentinel bit is needed to distinguish between the column with index 0 and no column.
|
||||
Example: If the column with index 5 is in the 3th position in the file, then indices[5] = 0x80 + 0x03 = 0x83.
|
||||
- The rest of the file is a repeat of the SSZ encoded data columns sidecars.
|
||||
|
||||
|
||||
|------------------------------------------|------------------------------------------------------------------------------------|
|
||||
| Byte offset | Description |
|
||||
|------------------------------------------|------------------------------------------------------------------------------------|
|
||||
| 0 | version (1 byte) | sszEncodedDataColumnSidecarSize (4 bytes) | indices (128 bytes) |
|
||||
|133 + 0*sszEncodedDataColumnSidecarSize | sszEncodedDataColumnSidecar (sszEncodedDataColumnSidecarSize bytes) |
|
||||
|133 + 1*sszEncodedDataColumnSidecarSize | sszEncodedDataColumnSidecar (sszEncodedDataColumnSidecarSize bytes) |
|
||||
|133 + 2*sszEncodedDataColumnSidecarSize | sszEncodedDataColumnSidecar (sszEncodedDataColumnSidecarSize bytes) |
|
||||
| ... | ... |
|
||||
|133 + 127*sszEncodedDataColumnSidecarSize | sszEncodedDataColumnSidecar (sszEncodedDataColumnSidecarSize bytes) |
|
||||
|------------------------------------------|------------------------------------------------------------------------------------|
|
||||
|
||||
Each file is named after the block root where the data columns were data columns are committed to.
|
||||
Example: `0x259c6d2f6a0bb75e2405cea7cb248e5663dc26b9404fd3bcd777afc20de91c1e.sszs`
|
||||
|
||||
Database organisation
|
||||
---------------------
|
||||
SSZ encoded data column sidecars are stored following the `by-epoch` layout.
|
||||
- The first layer is a directory corresponding to the `period`, which corresponds to the epoch divided by the 4096.
|
||||
- The second layer is a directory corresponding to the epoch.
|
||||
- Then all files are stored in the epoch directory.
|
||||
|
||||
Example:
|
||||
data-columns
|
||||
├── 0
|
||||
│ ├── 3638
|
||||
│ │ ├── 0x259c6d2f6a0bb75e2405cea7cb248e5663dc26b9404fd3bcd777afc20de91c1e.sszs
|
||||
│ │ ├── 0x2a855b1f6e9a2f04f8383e336325bf7d5ba02d1eab3ef90ef183736f8c768533.sszs
|
||||
│ │ ├── ...
|
||||
│ │ ├── 0xeb78e2b2350a71c640f1e96fea9e42f38e65705ab7e6e100c8bc9c589f2c5f2b.sszs
|
||||
│ │ └── 0xeb7ee68da988fd20d773d45aad01dd62527734367a146e2b048715bd68a4e370.sszs
|
||||
│ └── 3639
|
||||
│ ├── 0x0fd231fe95e57936fa44f6c712c490b9e337a481b661dfd46768901e90444330.sszs
|
||||
│ ├── 0x1bf5edff6b6ba2b65b1db325ff3312bbb57da461ef2ae651bd741af851aada3a.sszs
|
||||
│ ├── ...
|
||||
│ ├── 0xa156a527e631f858fee79fab7ef1fde3f6117a2e1201d47c09fbab0c6780c937.sszs
|
||||
│ └── 0xcd80bc535ddc467dea1d19e0c39c1160875ccd1989061bcd8ce206e3c1261c87.sszs
|
||||
└── 1
|
||||
├── 4096
|
||||
│ ├── 0x0d244009093e2bedb72eb265280290199e8c7bf1d90d7583c41af40d9f662269.sszs
|
||||
│ ├── 0x11f420928d8de41c50e735caab0369996824a5299c5f054e097965855925697d.sszs
|
||||
│ ├── ...
|
||||
│ ├── 0xbe91fc782877ed400d95c02c61aebfdd592635d11f8e64c94b46abd84f45c967.sszs
|
||||
│ └── 0xf246189f078f02d30173ff74605cf31c9e65b5e463275ebdbeb40476638135ff.sszs
|
||||
└── 4097
|
||||
├── 0x454d000674793c479e90504c0fe9827b50bb176ae022dab4e37d6a21471ab570.sszs
|
||||
├── 0xac5eb7437d7190c48cfa863e3c45f96a7f8af371d47ac12ccda07129a06af763.sszs
|
||||
├── ...
|
||||
├── 0xb7df30561d9d92ab5fafdd96bca8b44526497c8debf0fc425c7a0770b2abeb83.sszs
|
||||
└── 0xc1dd0b1ae847b6ec62303a36d08c6a4a2e9e3ec4be3ff70551972a0ee3de9c14.sszs
|
||||
|
||||
Computation of the maximum size of a DataColumnSidecar
|
||||
------------------------------------------------------
|
||||
https://github.com/ethereum/consensus-specs/blob/dev/specs/fulu/das-core.md#datacolumnsidecar
|
||||
|
||||
|
||||
class DataColumnSidecar(Container):
|
||||
index: ColumnIndex # Index of column in extended matrix
|
||||
column: List[Cell, MAX_BLOB_COMMITMENTS_PER_BLOCK]
|
||||
kzg_commitments: List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK]
|
||||
kzg_proofs: List[KZGProof, MAX_BLOB_COMMITMENTS_PER_BLOCK]
|
||||
signed_block_header: SignedBeaconBlockHeader
|
||||
kzg_commitments_inclusion_proof: Vector[Bytes32, KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH]
|
||||
|
||||
|
||||
- index: 2 bytes (ColumnIndex)
|
||||
- `column`: 4,096 (MAX_BLOB_COMMITMENTS_PER_BLOCK) * 64 (FIELD_ELEMENTS_PER_CELL) * 32 bytes (BYTES_PER_FIELD_ELEMENT) = 8,388,608 bytes
|
||||
- kzg_commitments: 4,096 (MAX_BLOB_COMMITMENTS_PER_BLOCK) * 48 bytes (KZGCommitment) = 196,608 bytes
|
||||
- kzg_proofs: 4,096 (MAX_BLOB_COMMITMENTS_PER_BLOCK) * 48 bytes (KZGProof) = 196,608 bytes
|
||||
- signed_block_header: 2 bytes (Slot) + 2 bytes (ValidatorIndex) + 3 * 2 bytes (Root) + 96 bytes (BLSSignature) = 106 bytes
|
||||
- kzg_commitments_inclusion_proof: 4 (KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH) * 32 bytes = 128 bytes
|
||||
|
||||
TOTAL: 8,782,060 bytes = 70,256,480 bits
|
||||
log(70,256,480) / log(2) ~= 26.07
|
||||
|
||||
==> 32 bits (4 bytes) are enough to store the maximum size of a data column sidecar.
|
||||
|
||||
The maximum size of an SSZ encoded data column can be 2**32 bits = 536,879,912 bytes,
|
||||
which left a room of 536,879,912 bytes - 8,782,060 bytes ~= 503 mega bytes to store the extra data needed by SSZ encoding (which is more than enough.)
|
||||
*/
|
||||
@@ -3,7 +3,6 @@ package filesystem
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -193,7 +192,7 @@ func rootFromPath(p string) ([32]byte, error) {
|
||||
}
|
||||
|
||||
func idxFromPath(p string) (uint64, error) {
|
||||
p = path.Base(p)
|
||||
p = filepath.Base(p)
|
||||
|
||||
if !isSszFile(p) {
|
||||
return 0, errors.Wrap(errNotBlobSSZ, "does not have .ssz extension")
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
@@ -220,28 +219,6 @@ func TestListDir(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestSlotFromBlob(t *testing.T) {
|
||||
cases := []struct {
|
||||
slot primitives.Slot
|
||||
}{
|
||||
{slot: 0},
|
||||
{slot: 2},
|
||||
{slot: 1123581321},
|
||||
{slot: math.MaxUint64},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(fmt.Sprintf("slot %d", c.slot), func(t *testing.T) {
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, c.slot, 1)
|
||||
sc := sidecars[0]
|
||||
enc, err := sc.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
slot, err := slotFromBlob(bytes.NewReader(enc))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, c.slot, slot)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIterationComplete(t *testing.T) {
|
||||
targets := []migrationTestTarget{
|
||||
{
|
||||
|
||||
@@ -1,13 +1,12 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
@@ -115,29 +114,25 @@ func (l *flatLayout) pruneBefore(before primitives.Epoch) (*pruneSummary, error)
|
||||
|
||||
// Read slot from marshaled BlobSidecar data in the given file. See slotFromBlob for details.
|
||||
func slotFromFile(name string, fs afero.Fs) (primitives.Slot, error) {
|
||||
f, err := fs.Open(name)
|
||||
// Read whole file, try to unmarshal it as data columns sidecar, if it fails, try to unmarshal it as blob sidecar.
|
||||
content, err := afero.ReadFile(fs, name)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer func() {
|
||||
if err := f.Close(); err != nil {
|
||||
log.WithError(err).Errorf("Could not close blob file")
|
||||
}
|
||||
}()
|
||||
return slotFromBlob(f)
|
||||
}
|
||||
|
||||
// slotFromBlob reads the ssz data of a file at the specified offset (8 + 131072 + 48 + 48 = 131176 bytes),
|
||||
// which is calculated based on the size of the BlobSidecar struct and is based on the size of the fields
|
||||
// preceding the slot information within SignedBeaconBlockHeader.
|
||||
func slotFromBlob(at io.ReaderAt) (primitives.Slot, error) {
|
||||
b := make([]byte, 8)
|
||||
_, err := at.ReadAt(b, 131176)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
dataColumnSidecar := ðpb.DataColumnSidecar{}
|
||||
err = dataColumnSidecar.UnmarshalSSZ(content)
|
||||
if err == nil {
|
||||
return dataColumnSidecar.SignedBlockHeader.Header.Slot, nil
|
||||
}
|
||||
rawSlot := binary.LittleEndian.Uint64(b)
|
||||
return primitives.Slot(rawSlot), nil
|
||||
|
||||
blobSidecar := ðpb.BlobSidecar{}
|
||||
err = blobSidecar.UnmarshalSSZ(content)
|
||||
if err == nil {
|
||||
return blobSidecar.SignedBlockHeader.Header.Slot, nil
|
||||
}
|
||||
|
||||
return 0, errors.New("failed to unmarshal as blob sidecar or data column sidecar")
|
||||
}
|
||||
|
||||
type flatSlotReader struct {
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
// Blobs
|
||||
blobBuckets = []float64{3, 5, 7, 9, 11, 13}
|
||||
blobSaveLatency = promauto.NewHistogram(prometheus.HistogramOpts{
|
||||
Name: "blob_storage_save_latency",
|
||||
@@ -33,4 +34,29 @@ var (
|
||||
Name: "blob_disk_bytes",
|
||||
Help: "Approximate number of bytes occupied by blobs in storage",
|
||||
})
|
||||
|
||||
// Data columns
|
||||
dataColumnBuckets = []float64{3, 5, 7, 9, 11, 13}
|
||||
dataColumnSaveLatency = promauto.NewHistogram(prometheus.HistogramOpts{
|
||||
Name: "data_column_storage_save_latency",
|
||||
Help: "Latency of DataColumnSidecar storage save operations in milliseconds",
|
||||
Buckets: dataColumnBuckets,
|
||||
})
|
||||
dataColumnFetchLatency = promauto.NewHistogram(prometheus.HistogramOpts{
|
||||
Name: "data_column_storage_get_latency",
|
||||
Help: "Latency of DataColumnSidecar storage get operations in milliseconds",
|
||||
Buckets: dataColumnBuckets,
|
||||
})
|
||||
dataColumnPrunedCounter = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "data_column_pruned",
|
||||
Help: "Number of DataColumnSidecar pruned.",
|
||||
})
|
||||
dataColumnWrittenCounter = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "data_column_written",
|
||||
Help: "Number of DataColumnSidecar written",
|
||||
})
|
||||
dataColumnDiskCount = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "data_column_disk_count",
|
||||
Help: "Approximate number of data columns in storage",
|
||||
})
|
||||
)
|
||||
|
||||
@@ -1,14 +1,19 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
// Blobs
|
||||
// -----
|
||||
|
||||
// NewEphemeralBlobStorage should only be used for tests.
|
||||
// The instance of BlobStorage returned is backed by an in-memory virtual filesystem,
|
||||
// improving test performance and simplifying cleanup.
|
||||
@@ -75,3 +80,52 @@ func NewMockBlobStorageSummarizer(t *testing.T, set map[[32]byte][]int) BlobStor
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
// Data columns
|
||||
// ------------
|
||||
|
||||
// NewEphemeralDataColumnStorage should only be used for tests.
|
||||
// The instance of DataColumnStorage returned is backed by an in-memory virtual filesystem,
|
||||
// improving test performance and simplifying cleanup.
|
||||
func NewEphemeralDataColumnStorage(t testing.TB, opts ...DataColumnStorageOption) *DataColumnStorage {
|
||||
return NewWarmedEphemeralDataColumnStorageUsingFs(t, afero.NewMemMapFs(), opts...)
|
||||
}
|
||||
|
||||
// NewEphemeralDataColumnStorageAndFs can be used by tests that want access to the virtual filesystem
|
||||
// in order to interact with it outside the parameters of the DataColumnStorage API.
|
||||
func NewEphemeralDataColumnStorageAndFs(t testing.TB, opts ...DataColumnStorageOption) (afero.Fs, *DataColumnStorage) {
|
||||
fs := afero.NewMemMapFs()
|
||||
dcs := NewWarmedEphemeralDataColumnStorageUsingFs(t, fs, opts...)
|
||||
return fs, dcs
|
||||
}
|
||||
|
||||
func NewWarmedEphemeralDataColumnStorageUsingFs(t testing.TB, fs afero.Fs, opts ...DataColumnStorageOption) *DataColumnStorage {
|
||||
bs := NewEphemeralDataColumnStorageUsingFs(t, fs, opts...)
|
||||
bs.WarmCache()
|
||||
return bs
|
||||
}
|
||||
|
||||
func NewEphemeralDataColumnStorageUsingFs(t testing.TB, fs afero.Fs, opts ...DataColumnStorageOption) *DataColumnStorage {
|
||||
opts = append(opts,
|
||||
WithDataColumnRetentionEpochs(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest),
|
||||
WithDataColumnFs(fs),
|
||||
)
|
||||
|
||||
bs, err := NewDataColumnStorage(context.Background(), opts...)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
return bs
|
||||
}
|
||||
|
||||
func NewMockDataColumnStorageSummarizer(t *testing.T, set map[[fieldparams.RootLength]byte][]uint64) DataColumnStorageSummarizer {
|
||||
c := newDataColumnStorageSummaryCache()
|
||||
for root, indices := range set {
|
||||
if err := c.set(DataColumnsIdent{Root: root, Epoch: 0, Indices: indices}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
@@ -215,7 +215,7 @@ func (s *Store) LightClientUpdates(ctx context.Context, startPeriod, endPeriod u
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return updates, err
|
||||
return updates, nil
|
||||
}
|
||||
|
||||
func (s *Store) LightClientUpdate(ctx context.Context, period uint64) (interfaces.LightClientUpdate, error) {
|
||||
|
||||
@@ -518,9 +518,9 @@ func (s *Store) unmarshalState(_ context.Context, enc []byte, validatorEntries [
|
||||
|
||||
switch {
|
||||
case hasFuluKey(enc):
|
||||
protoState := ðpb.BeaconStateFulu{}
|
||||
protoState := ðpb.BeaconStateElectra{}
|
||||
if err := protoState.UnmarshalSSZ(enc[len(fuluKey):]); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to unmarshal encoding for Electra")
|
||||
return nil, errors.Wrap(err, "failed to unmarshal encoding for Fulu")
|
||||
}
|
||||
ok, err := s.isStateValidatorMigrationOver()
|
||||
if err != nil {
|
||||
@@ -690,7 +690,7 @@ func marshalState(ctx context.Context, st state.ReadOnlyBeaconState) ([]byte, er
|
||||
}
|
||||
return snappy.Encode(nil, append(ElectraKey, rawObj...)), nil
|
||||
case version.Fulu:
|
||||
rState, ok := st.ToProtoUnsafe().(*ethpb.BeaconStateFulu)
|
||||
rState, ok := st.ToProtoUnsafe().(*ethpb.BeaconStateElectra)
|
||||
if !ok {
|
||||
return nil, errors.New("non valid inner state")
|
||||
}
|
||||
|
||||
@@ -25,12 +25,14 @@ go_library(
|
||||
"//testing/spectest:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/cache/depositsnapshot:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/execution/types:go_default_library",
|
||||
@@ -97,6 +99,7 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/cache/depositsnapshot:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
|
||||
@@ -13,6 +13,8 @@ import (
|
||||
gethRPC "github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/holiman/uint256"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/execution/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
@@ -36,16 +38,23 @@ var (
|
||||
NewPayloadMethod,
|
||||
NewPayloadMethodV2,
|
||||
NewPayloadMethodV3,
|
||||
NewPayloadMethodV4,
|
||||
ForkchoiceUpdatedMethod,
|
||||
ForkchoiceUpdatedMethodV2,
|
||||
ForkchoiceUpdatedMethodV3,
|
||||
GetPayloadMethod,
|
||||
GetPayloadMethodV2,
|
||||
GetPayloadMethodV3,
|
||||
GetPayloadMethodV4,
|
||||
GetPayloadBodiesByHashV1,
|
||||
GetPayloadBodiesByRangeV1,
|
||||
GetBlobsV1,
|
||||
}
|
||||
electraEngineEndpoints = []string{
|
||||
NewPayloadMethodV4,
|
||||
GetPayloadMethodV4,
|
||||
}
|
||||
fuluEngineEndpoints = []string{
|
||||
GetPayloadMethodV5,
|
||||
GetBlobsV2,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -71,6 +80,8 @@ const (
|
||||
GetPayloadMethodV3 = "engine_getPayloadV3"
|
||||
// GetPayloadMethodV4 is the get payload method added for electra
|
||||
GetPayloadMethodV4 = "engine_getPayloadV4"
|
||||
// GetPayloadMethodV5 is the get payload method added for fulu
|
||||
GetPayloadMethodV5 = "engine_getPayloadV5"
|
||||
// BlockByHashMethod request string for JSON-RPC.
|
||||
BlockByHashMethod = "eth_getBlockByHash"
|
||||
// BlockByNumberMethod request string for JSON-RPC.
|
||||
@@ -83,8 +94,15 @@ const (
|
||||
ExchangeCapabilities = "engine_exchangeCapabilities"
|
||||
// GetBlobsV1 request string for JSON-RPC.
|
||||
GetBlobsV1 = "engine_getBlobsV1"
|
||||
// GetBlobsV2 request string for JSON-RPC.
|
||||
GetBlobsV2 = "engine_getBlobsV2"
|
||||
// Defines the seconds before timing out engine endpoints with non-block execution semantics.
|
||||
defaultEngineTimeout = time.Second
|
||||
// TODO: Remove temporarily needed hack since geth takes an input blobs txs with blobs proofs, and
|
||||
// does the heavy lifting of building cells proofs, while normally this is done by the tx sender.
|
||||
// This is a cool hack because it lets the CL to act as if the tx sender actually computed the cells proofs.
|
||||
// The only counter part is the `engine_getPayloadv<x>` takes a lot of time.
|
||||
// defaultEngineTimeout = time.Second
|
||||
defaultEngineTimeout = 2 * time.Second
|
||||
)
|
||||
|
||||
var errInvalidPayloadBodyResponse = errors.New("engine api payload body response is invalid")
|
||||
@@ -106,6 +124,7 @@ type Reconstructor interface {
|
||||
ctx context.Context, blindedBlocks []interfaces.ReadOnlySignedBeaconBlock,
|
||||
) ([]interfaces.SignedBeaconBlock, error)
|
||||
ReconstructBlobSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, hi func(uint64) bool) ([]blocks.VerifiedROBlob, error)
|
||||
ReconstructDataColumnSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) ([]blocks.VerifiedRODataColumn, error)
|
||||
}
|
||||
|
||||
// EngineCaller defines a client that can interact with an Ethereum
|
||||
@@ -255,6 +274,9 @@ func (s *Service) ForkchoiceUpdated(
|
||||
|
||||
func getPayloadMethodAndMessage(slot primitives.Slot) (string, proto.Message) {
|
||||
pe := slots.ToEpoch(slot)
|
||||
if pe >= params.BeaconConfig().FuluForkEpoch {
|
||||
return GetPayloadMethodV5, &pb.ExecutionBundleElectra{}
|
||||
}
|
||||
if pe >= params.BeaconConfig().ElectraForkEpoch {
|
||||
return GetPayloadMethodV4, &pb.ExecutionBundleElectra{}
|
||||
}
|
||||
@@ -287,7 +309,7 @@ func (s *Service) GetPayload(ctx context.Context, payloadId [8]byte, slot primit
|
||||
}
|
||||
res, err := blocks.NewGetPayloadResponse(result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "new get payload response")
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
@@ -296,6 +318,13 @@ func (s *Service) ExchangeCapabilities(ctx context.Context) ([]string, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.ExchangeCapabilities")
|
||||
defer span.End()
|
||||
|
||||
// Only check for electra related engine methods if it has been activated.
|
||||
if params.ElectraEnabled() {
|
||||
supportedEngineEndpoints = append(supportedEngineEndpoints, electraEngineEndpoints...)
|
||||
}
|
||||
if params.FuluEnabled() {
|
||||
supportedEngineEndpoints = append(supportedEngineEndpoints, fuluEngineEndpoints...)
|
||||
}
|
||||
var result []string
|
||||
err := s.rpcClient.CallContext(ctx, &result, ExchangeCapabilities, supportedEngineEndpoints)
|
||||
if err != nil {
|
||||
@@ -489,9 +518,10 @@ func (s *Service) HeaderByNumber(ctx context.Context, number *big.Int) (*types.H
|
||||
func (s *Service) GetBlobs(ctx context.Context, versionedHashes []common.Hash) ([]*pb.BlobAndProof, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.GetBlobs")
|
||||
defer span.End()
|
||||
|
||||
// If the execution engine does not support `GetBlobsV1`, return early to prevent encountering an error later.
|
||||
if !s.capabilityCache.has(GetBlobsV1) {
|
||||
return nil, nil
|
||||
return nil, errors.New(fmt.Sprintf("%s is not supported", GetBlobsV1))
|
||||
}
|
||||
|
||||
result := make([]*pb.BlobAndProof, len(versionedHashes))
|
||||
@@ -499,6 +529,19 @@ func (s *Service) GetBlobs(ctx context.Context, versionedHashes []common.Hash) (
|
||||
return result, handleRPCError(err)
|
||||
}
|
||||
|
||||
func (s *Service) GetBlobsV2(ctx context.Context, versionedHashes []common.Hash) ([]*pb.BlobAndProofV2, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.GetBlobsV2")
|
||||
defer span.End()
|
||||
|
||||
if !s.capabilityCache.has(GetBlobsV2) {
|
||||
return nil, errors.New(fmt.Sprintf("%s is not supported", GetBlobsV2))
|
||||
}
|
||||
|
||||
result := make([]*pb.BlobAndProofV2, len(versionedHashes))
|
||||
err := s.rpcClient.CallContext(ctx, &result, GetBlobsV2, versionedHashes)
|
||||
return result, handleRPCError(err)
|
||||
}
|
||||
|
||||
// ReconstructFullBlock takes in a blinded beacon block and reconstructs
|
||||
// a beacon block with a full execution payload via the engine API.
|
||||
func (s *Service) ReconstructFullBlock(
|
||||
@@ -609,6 +652,84 @@ func (s *Service) ReconstructBlobSidecars(ctx context.Context, block interfaces.
|
||||
return verifiedBlobs, nil
|
||||
}
|
||||
|
||||
// ReconstructDataColumnSidecars reconstructs the verified data column sidecars for a given beacon block.
|
||||
// It retrieves the KZG commitments from the block body, fetches the associated blobs and cell proofs from the EL,
|
||||
// and constructs the corresponding verified read-only data column sidecars.
|
||||
func (s *Service) ReconstructDataColumnSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) ([]blocks.VerifiedRODataColumn, error) {
|
||||
blockBody := block.Block().Body()
|
||||
kzgCommitments, err := blockBody.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, wrapWithBlockRoot(err, blockRoot, "blob KZG commitments")
|
||||
}
|
||||
|
||||
// Collect KZG hashes for all blobs
|
||||
var kzgHashes []common.Hash
|
||||
for _, commitment := range kzgCommitments {
|
||||
kzgHashes = append(kzgHashes, primitives.ConvertKzgCommitmentToVersionedHash(commitment))
|
||||
}
|
||||
|
||||
// Fetch all blobsAndCellsProofs from EL
|
||||
blobAndProofV2s, err := s.GetBlobsV2(ctx, kzgHashes)
|
||||
if err != nil {
|
||||
return nil, wrapWithBlockRoot(err, blockRoot, "get blobs V2")
|
||||
}
|
||||
|
||||
var cellsAndProofs []kzg.CellsAndProofs
|
||||
for _, blobAndProof := range blobAndProofV2s {
|
||||
if blobAndProof == nil {
|
||||
return nil, wrapWithBlockRoot(errors.New("unable to reconstruct data column sidecars, did not get all blobs from EL"), blockRoot, "")
|
||||
}
|
||||
|
||||
var blob kzg.Blob
|
||||
copy(blob[:], blobAndProof.Blob)
|
||||
cells, err := kzg.ComputeCells(&blob)
|
||||
if err != nil {
|
||||
return nil, wrapWithBlockRoot(err, blockRoot, "could not compute cells")
|
||||
}
|
||||
|
||||
proofs := make([]kzg.Proof, len(blobAndProof.KzgProofs))
|
||||
for i, proof := range blobAndProof.KzgProofs {
|
||||
proofs[i] = kzg.Proof(proof)
|
||||
}
|
||||
cellsAndProofs = append(cellsAndProofs, kzg.CellsAndProofs{
|
||||
Cells: cells,
|
||||
Proofs: proofs,
|
||||
})
|
||||
}
|
||||
|
||||
header, err := block.Header()
|
||||
if err != nil {
|
||||
return nil, wrapWithBlockRoot(err, blockRoot, "could not get header")
|
||||
}
|
||||
|
||||
kzgCommitmentsInclusionProof, err := blocks.MerkleProofKZGCommitments(blockBody)
|
||||
if err != nil {
|
||||
return nil, wrapWithBlockRoot(err, blockRoot, "could not get Merkle proof for KZG commitments")
|
||||
}
|
||||
|
||||
dataColumnSidecars, err := peerdas.DataColumnSidecarsForReconstruct(kzgCommitments, header, kzgCommitmentsInclusionProof, cellsAndProofs)
|
||||
if err != nil {
|
||||
return nil, wrapWithBlockRoot(err, blockRoot, "could not reconstruct data column sidecars")
|
||||
}
|
||||
|
||||
verifiedRODataColumns := make([]blocks.VerifiedRODataColumn, len(dataColumnSidecars))
|
||||
for i, dataColumnSidecar := range dataColumnSidecars {
|
||||
roDataColumn, err := blocks.NewRODataColumnWithRoot(dataColumnSidecar, blockRoot)
|
||||
if err != nil {
|
||||
return nil, wrapWithBlockRoot(err, blockRoot, "new read-only data column with root")
|
||||
}
|
||||
|
||||
verifiedRODataColumns[i] = blocks.NewVerifiedRODataColumn(roDataColumn)
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", blockRoot),
|
||||
"slot": block.Block().Slot(),
|
||||
}).Debug("Data columns successfully reconstructed from EL")
|
||||
|
||||
return verifiedRODataColumns, nil
|
||||
}
|
||||
|
||||
func fullPayloadFromPayloadBody(
|
||||
header interfaces.ExecutionData, body *pb.ExecutionPayloadBody, bVersion int,
|
||||
) (interfaces.ExecutionData, error) {
|
||||
@@ -896,3 +1017,8 @@ func toBlockNumArg(number *big.Int) string {
|
||||
}
|
||||
return hexutil.EncodeBig(number)
|
||||
}
|
||||
|
||||
// wrapWithBlockRoot returns a new error with the given block root.
|
||||
func wrapWithBlockRoot(err error, blockRoot [32]byte, message string) error {
|
||||
return errors.Wrap(err, fmt.Sprintf("%s for block %#x", message, blockRoot))
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/holiman/uint256"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
mocks "github.com/prysmaticlabs/prysm/v5/beacon-chain/execution/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
@@ -317,11 +318,11 @@ func TestClient_HTTP(t *testing.T) {
|
||||
require.DeepEqual(t, uint64(2), g)
|
||||
|
||||
commitments := [][]byte{bytesutil.PadTo([]byte("commitment1"), fieldparams.BLSPubkeyLength), bytesutil.PadTo([]byte("commitment2"), fieldparams.BLSPubkeyLength)}
|
||||
require.DeepEqual(t, commitments, resp.BlobsBundle.KzgCommitments)
|
||||
require.DeepEqual(t, commitments, resp.BlobsBundle.GetKzgCommitments())
|
||||
proofs := [][]byte{bytesutil.PadTo([]byte("proof1"), fieldparams.BLSPubkeyLength), bytesutil.PadTo([]byte("proof2"), fieldparams.BLSPubkeyLength)}
|
||||
require.DeepEqual(t, proofs, resp.BlobsBundle.Proofs)
|
||||
require.DeepEqual(t, proofs, resp.BlobsBundle.GetProofs())
|
||||
blobs := [][]byte{bytesutil.PadTo([]byte("a"), fieldparams.BlobLength), bytesutil.PadTo([]byte("b"), fieldparams.BlobLength)}
|
||||
require.DeepEqual(t, blobs, resp.BlobsBundle.Blobs)
|
||||
require.DeepEqual(t, blobs, resp.BlobsBundle.GetBlobs())
|
||||
})
|
||||
t.Run(GetPayloadMethodV4, func(t *testing.T) {
|
||||
payloadId := [8]byte{1}
|
||||
@@ -372,11 +373,11 @@ func TestClient_HTTP(t *testing.T) {
|
||||
require.DeepEqual(t, uint64(2), g)
|
||||
|
||||
commitments := [][]byte{bytesutil.PadTo([]byte("commitment1"), fieldparams.BLSPubkeyLength), bytesutil.PadTo([]byte("commitment2"), fieldparams.BLSPubkeyLength)}
|
||||
require.DeepEqual(t, commitments, resp.BlobsBundle.KzgCommitments)
|
||||
require.DeepEqual(t, commitments, resp.BlobsBundle.GetKzgCommitments())
|
||||
proofs := [][]byte{bytesutil.PadTo([]byte("proof1"), fieldparams.BLSPubkeyLength), bytesutil.PadTo([]byte("proof2"), fieldparams.BLSPubkeyLength)}
|
||||
require.DeepEqual(t, proofs, resp.BlobsBundle.Proofs)
|
||||
require.DeepEqual(t, proofs, resp.BlobsBundle.GetProofs())
|
||||
blobs := [][]byte{bytesutil.PadTo([]byte("a"), fieldparams.BlobLength), bytesutil.PadTo([]byte("b"), fieldparams.BlobLength)}
|
||||
require.DeepEqual(t, blobs, resp.BlobsBundle.Blobs)
|
||||
require.DeepEqual(t, blobs, resp.BlobsBundle.GetBlobs())
|
||||
requests := &pb.ExecutionRequests{
|
||||
Deposits: []*pb.DepositRequest{
|
||||
{
|
||||
@@ -2424,7 +2425,7 @@ func TestReconstructBlobSidecars(t *testing.T) {
|
||||
t.Run("get-blobs end point is not supported", func(t *testing.T) {
|
||||
hi := mockSummary(t, []bool{true, true, true, true, true, false})
|
||||
verifiedBlobs, err := client.ReconstructBlobSidecars(ctx, sb, r, hi)
|
||||
require.NoError(t, err)
|
||||
require.ErrorContains(t, "engine_getBlobsV1 is not supported", err)
|
||||
require.Equal(t, 0, len(verifiedBlobs))
|
||||
})
|
||||
|
||||
@@ -2476,6 +2477,64 @@ func TestReconstructBlobSidecars(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestReconstructDataColumnSidecars(t *testing.T) {
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Setup right fork epoch
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.CapellaForkEpoch = 1
|
||||
cfg.DenebForkEpoch = 2
|
||||
cfg.ElectraForkEpoch = 3
|
||||
cfg.FuluForkEpoch = 4
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
client := &Service{capabilityCache: &capabilityCache{}}
|
||||
b := util.NewBeaconBlockFulu()
|
||||
b.Block.Slot = 4 * params.BeaconConfig().SlotsPerEpoch
|
||||
kzgCommitments := createRandomKzgCommitments(t, 6)
|
||||
b.Block.Body.BlobKzgCommitments = kzgCommitments
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
sb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
t.Run("GetBlobsV2 is not supported", func(t *testing.T) {
|
||||
_, err := client.ReconstructDataColumnSidecars(ctx, sb, r)
|
||||
require.ErrorContains(t, "get blobs V2 for block", err)
|
||||
})
|
||||
|
||||
t.Run("receiving all blobs", func(t *testing.T) {
|
||||
blobMasks := []bool{true, true, true, true, true, true}
|
||||
srv := createBlobServerV2(t, 6, blobMasks)
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
dataColumns, err := client.ReconstructDataColumnSidecars(ctx, sb, r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 128, len(dataColumns))
|
||||
})
|
||||
|
||||
t.Run("missing some blobs", func(t *testing.T) {
|
||||
blobMasks := []bool{false, true, true, true, true, true}
|
||||
srv := createBlobServerV2(t, 6, blobMasks)
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
dataColumns, err := client.ReconstructDataColumnSidecars(ctx, sb, r)
|
||||
require.ErrorContains(t, "unable to reconstruct data column sidecars, did not get all blobs from EL", err)
|
||||
require.Equal(t, 0, len(dataColumns))
|
||||
})
|
||||
}
|
||||
|
||||
func createRandomKzgCommitments(t *testing.T, num int) [][]byte {
|
||||
kzgCommitments := make([][]byte, num)
|
||||
for i := range kzgCommitments {
|
||||
@@ -2511,6 +2570,42 @@ func createBlobServer(t *testing.T, numBlobs int, callbackFuncs ...func()) *http
|
||||
}))
|
||||
}
|
||||
|
||||
func createBlobServerV2(t *testing.T, numBlobs int, blobMasks []bool) *httptest.Server {
|
||||
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
|
||||
require.Equal(t, len(blobMasks), numBlobs)
|
||||
|
||||
blobAndCellProofs := make([]*pb.BlobAndProofV2Json, numBlobs)
|
||||
for i := range blobAndCellProofs {
|
||||
if !blobMasks[i] {
|
||||
continue
|
||||
}
|
||||
|
||||
blobAndCellProofs[i] = &pb.BlobAndProofV2Json{
|
||||
Blob: []byte("0xblob"),
|
||||
KzgProofs: []hexutil.Bytes{},
|
||||
}
|
||||
for j := 0; j < int(params.BeaconConfig().NumberOfColumns); j++ {
|
||||
cellProof := make([]byte, 48)
|
||||
blobAndCellProofs[i].KzgProofs = append(blobAndCellProofs[i].KzgProofs, cellProof)
|
||||
}
|
||||
}
|
||||
|
||||
respJSON := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": blobAndCellProofs,
|
||||
}
|
||||
|
||||
err := json.NewEncoder(w).Encode(respJSON)
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
}
|
||||
|
||||
func setupRpcClient(t *testing.T, url string, client *Service) (*rpc.Client, *Service) {
|
||||
rpcClient, err := rpc.DialHTTP(url)
|
||||
require.NoError(t, err)
|
||||
@@ -2522,6 +2617,12 @@ func setupRpcClient(t *testing.T, url string, client *Service) (*rpc.Client, *Se
|
||||
return rpcClient, client
|
||||
}
|
||||
|
||||
func setupRpcClientV2(t *testing.T, url string, client *Service) (*rpc.Client, *Service) {
|
||||
rpcClient, client := setupRpcClient(t, url, client)
|
||||
client.capabilityCache = &capabilityCache{capabilities: map[string]interface{}{GetBlobsV2: nil}}
|
||||
return rpcClient, client
|
||||
}
|
||||
|
||||
func testNewBlobVerifier() verification.NewBlobVerifier {
|
||||
return func(b blocks.ROBlob, reqs []verification.Requirement) verification.BlobVerifier {
|
||||
return &verification.MockBlobVerifier{
|
||||
|
||||
@@ -38,6 +38,8 @@ type EngineClient struct {
|
||||
ErrGetPayload error
|
||||
BlobSidecars []blocks.VerifiedROBlob
|
||||
ErrorBlobSidecars error
|
||||
DataColumnSidecars []blocks.VerifiedRODataColumn
|
||||
ErrorDataColumnSidecars error
|
||||
}
|
||||
|
||||
// NewPayload --
|
||||
@@ -113,6 +115,10 @@ func (e *EngineClient) ReconstructBlobSidecars(context.Context, interfaces.ReadO
|
||||
return e.BlobSidecars, e.ErrorBlobSidecars
|
||||
}
|
||||
|
||||
func (e *EngineClient) ReconstructDataColumnSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) ([]blocks.VerifiedRODataColumn, error) {
|
||||
return e.DataColumnSidecars, e.ErrorDataColumnSidecars
|
||||
}
|
||||
|
||||
// GetTerminalBlockHash --
|
||||
func (e *EngineClient) GetTerminalBlockHash(ctx context.Context, transitionTime uint64) ([]byte, bool, error) {
|
||||
ttd := new(big.Int)
|
||||
|
||||
@@ -166,6 +166,23 @@ func (s *Service) processIncludedAttestation(ctx context.Context, state state.Be
|
||||
}
|
||||
}
|
||||
|
||||
// processSingleAttestation logs when the beacon node observes a single attestation from tracked validator.
|
||||
func (s *Service) processSingleAttestation(att ethpb.Att) {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
|
||||
single, ok := att.(*ethpb.SingleAttestation)
|
||||
if !ok {
|
||||
log.Errorf("Wrong attestation type (expected %T, got %T)", ðpb.SingleAttestation{}, att)
|
||||
return
|
||||
}
|
||||
|
||||
if s.canUpdateAttestedValidator(single.AttesterIndex, single.GetData().Slot) {
|
||||
logFields := logMessageTimelyFlagsForIndex(single.AttesterIndex, att.GetData())
|
||||
log.WithFields(logFields).Info("Processed unaggregated attestation")
|
||||
}
|
||||
}
|
||||
|
||||
// processUnaggregatedAttestation logs when the beacon node observes an unaggregated attestation from tracked validator.
|
||||
func (s *Service) processUnaggregatedAttestation(ctx context.Context, att ethpb.Att) {
|
||||
s.RLock()
|
||||
|
||||
@@ -241,7 +241,7 @@ func (s *Service) monitorRoutine(stateChannel chan *feed.Event, stateSub event.S
|
||||
if !ok {
|
||||
log.Error("Event feed data is not of type *operation.SingleAttReceivedData")
|
||||
} else {
|
||||
s.processUnaggregatedAttestation(s.ctx, data.Attestation)
|
||||
s.processSingleAttestation(data.Attestation)
|
||||
}
|
||||
case operation.ExitReceived:
|
||||
data, ok := e.Data.(*operation.ExitReceivedData)
|
||||
|
||||
@@ -22,6 +22,7 @@ go_library(
|
||||
"//beacon-chain/builder:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/cache/depositsnapshot:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/db/kv:go_default_library",
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
|
||||
func configureTracing(cliCtx *cli.Context) error {
|
||||
return tracing.Setup(
|
||||
cliCtx.Context,
|
||||
"beacon-chain", // service name
|
||||
cliCtx.String(cmd.TracingProcessNameFlag.Name),
|
||||
cliCtx.String(cmd.TracingEndpointFlag.Name),
|
||||
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/builder"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositsnapshot"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/kv"
|
||||
@@ -86,42 +87,45 @@ type serviceFlagOpts struct {
|
||||
// full PoS node. It handles the lifecycle of the entire system and registers
|
||||
// services to a service registry.
|
||||
type BeaconNode struct {
|
||||
cliCtx *cli.Context
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
services *runtime.ServiceRegistry
|
||||
lock sync.RWMutex
|
||||
stop chan struct{} // Channel to wait for termination notifications.
|
||||
db db.Database
|
||||
slasherDB db.SlasherDatabase
|
||||
attestationCache *cache.AttestationCache
|
||||
attestationPool attestations.Pool
|
||||
exitPool voluntaryexits.PoolManager
|
||||
slashingsPool slashings.PoolManager
|
||||
syncCommitteePool synccommittee.Pool
|
||||
blsToExecPool blstoexec.PoolManager
|
||||
depositCache cache.DepositCache
|
||||
trackedValidatorsCache *cache.TrackedValidatorsCache
|
||||
payloadIDCache *cache.PayloadIDCache
|
||||
stateFeed *event.Feed
|
||||
blockFeed *event.Feed
|
||||
opFeed *event.Feed
|
||||
stateGen *stategen.State
|
||||
collector *bcnodeCollector
|
||||
slasherBlockHeadersFeed *event.Feed
|
||||
slasherAttestationsFeed *event.Feed
|
||||
finalizedStateAtStartUp state.BeaconState
|
||||
serviceFlagOpts *serviceFlagOpts
|
||||
GenesisInitializer genesis.Initializer
|
||||
CheckpointInitializer checkpoint.Initializer
|
||||
forkChoicer forkchoice.ForkChoicer
|
||||
clockWaiter startup.ClockWaiter
|
||||
BackfillOpts []backfill.ServiceOption
|
||||
initialSyncComplete chan struct{}
|
||||
BlobStorage *filesystem.BlobStorage
|
||||
BlobStorageOptions []filesystem.BlobStorageOption
|
||||
verifyInitWaiter *verification.InitializerWaiter
|
||||
syncChecker *initialsync.SyncChecker
|
||||
cliCtx *cli.Context
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
services *runtime.ServiceRegistry
|
||||
lock sync.RWMutex
|
||||
stop chan struct{} // Channel to wait for termination notifications.
|
||||
db db.Database
|
||||
slasherDB db.SlasherDatabase
|
||||
attestationCache *cache.AttestationCache
|
||||
attestationPool attestations.Pool
|
||||
exitPool voluntaryexits.PoolManager
|
||||
slashingsPool slashings.PoolManager
|
||||
syncCommitteePool synccommittee.Pool
|
||||
blsToExecPool blstoexec.PoolManager
|
||||
depositCache cache.DepositCache
|
||||
trackedValidatorsCache *cache.TrackedValidatorsCache
|
||||
payloadIDCache *cache.PayloadIDCache
|
||||
stateFeed *event.Feed
|
||||
blockFeed *event.Feed
|
||||
opFeed *event.Feed
|
||||
stateGen *stategen.State
|
||||
collector *bcnodeCollector
|
||||
slasherBlockHeadersFeed *event.Feed
|
||||
slasherAttestationsFeed *event.Feed
|
||||
finalizedStateAtStartUp state.BeaconState
|
||||
serviceFlagOpts *serviceFlagOpts
|
||||
GenesisInitializer genesis.Initializer
|
||||
CheckpointInitializer checkpoint.Initializer
|
||||
forkChoicer forkchoice.ForkChoicer
|
||||
clockWaiter startup.ClockWaiter
|
||||
BackfillOpts []backfill.ServiceOption
|
||||
initialSyncComplete chan struct{}
|
||||
BlobStorage *filesystem.BlobStorage
|
||||
BlobStorageOptions []filesystem.BlobStorageOption
|
||||
DataColumnStorage *filesystem.DataColumnStorage
|
||||
DataColumnStorageOptions []filesystem.DataColumnStorageOption
|
||||
verifyInitWaiter *verification.InitializerWaiter
|
||||
syncChecker *initialsync.SyncChecker
|
||||
custodyInfo *peerdas.CustodyInfo
|
||||
}
|
||||
|
||||
// New creates a new node instance, sets up configuration options, and registers
|
||||
@@ -159,6 +163,7 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
|
||||
serviceFlagOpts: &serviceFlagOpts{},
|
||||
initialSyncComplete: make(chan struct{}),
|
||||
syncChecker: &initialsync.SyncChecker{},
|
||||
custodyInfo: &peerdas.CustodyInfo{},
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
@@ -186,6 +191,15 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
|
||||
beacon.BlobStorage = blobs
|
||||
}
|
||||
|
||||
if beacon.DataColumnStorage == nil {
|
||||
dataColumnStorage, err := filesystem.NewDataColumnStorage(cliCtx.Context, beacon.DataColumnStorageOptions...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "new data column storage")
|
||||
}
|
||||
|
||||
beacon.DataColumnStorage = dataColumnStorage
|
||||
}
|
||||
|
||||
bfs, err := startBaseServices(cliCtx, beacon, depositAddress)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not start modules")
|
||||
@@ -276,7 +290,9 @@ func startBaseServices(cliCtx *cli.Context, beacon *BeaconNode, depositAddress s
|
||||
if err := beacon.startDB(cliCtx, depositAddress); err != nil {
|
||||
return nil, errors.Wrap(err, "could not start DB")
|
||||
}
|
||||
|
||||
beacon.BlobStorage.WarmCache()
|
||||
beacon.DataColumnStorage.WarmCache()
|
||||
|
||||
log.Debugln("Starting Slashing DB")
|
||||
if err := beacon.startSlasherDB(cliCtx); err != nil {
|
||||
@@ -488,6 +504,10 @@ func (b *BeaconNode) clearDB(clearDB, forceClearDB bool, d *kv.Store, dbPath str
|
||||
return nil, errors.Wrap(err, "could not clear blob storage")
|
||||
}
|
||||
|
||||
if err := b.DataColumnStorage.Clear(); err != nil {
|
||||
return nil, errors.Wrap(err, "could not clear data column storage")
|
||||
}
|
||||
|
||||
d, err = kv.NewKVStore(b.ctx, dbPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not create new database")
|
||||
@@ -694,6 +714,7 @@ func (b *BeaconNode) registerP2P(cliCtx *cli.Context) error {
|
||||
StateNotifier: b,
|
||||
DB: b.db,
|
||||
ClockWaiter: b.clockWaiter,
|
||||
CustodyInfo: b.custodyInfo,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -772,9 +793,11 @@ func (b *BeaconNode) registerBlockchainService(fc forkchoice.ForkChoicer, gs *st
|
||||
blockchain.WithClockSynchronizer(gs),
|
||||
blockchain.WithSyncComplete(syncComplete),
|
||||
blockchain.WithBlobStorage(b.BlobStorage),
|
||||
blockchain.WithDataColumnStorage(b.DataColumnStorage),
|
||||
blockchain.WithTrackedValidatorsCache(b.trackedValidatorsCache),
|
||||
blockchain.WithPayloadIDCache(b.payloadIDCache),
|
||||
blockchain.WithSyncChecker(b.syncChecker),
|
||||
blockchain.WithCustodyInfo(b.custodyInfo),
|
||||
)
|
||||
|
||||
blockchainService, err := blockchain.NewService(b.ctx, opts...)
|
||||
@@ -857,8 +880,11 @@ func (b *BeaconNode) registerSyncService(initialSyncComplete chan struct{}, bFil
|
||||
regularsync.WithInitialSyncComplete(initialSyncComplete),
|
||||
regularsync.WithStateNotifier(b),
|
||||
regularsync.WithBlobStorage(b.BlobStorage),
|
||||
regularsync.WithDataColumnStorage(b.DataColumnStorage),
|
||||
regularsync.WithVerifierWaiter(b.verifyInitWaiter),
|
||||
regularsync.WithAvailableBlocker(bFillStore),
|
||||
regularsync.WithTrackedValidatorsCache(b.trackedValidatorsCache),
|
||||
regularsync.WithCustodyInfo(b.custodyInfo),
|
||||
)
|
||||
return b.services.RegisterService(rs)
|
||||
}
|
||||
@@ -882,6 +908,8 @@ func (b *BeaconNode) registerInitialSyncService(complete chan struct{}) error {
|
||||
ClockWaiter: b.clockWaiter,
|
||||
InitialSyncComplete: complete,
|
||||
BlobStorage: b.BlobStorage,
|
||||
DataColumnStorage: b.DataColumnStorage,
|
||||
CustodyInfo: b.custodyInfo,
|
||||
}, opts...)
|
||||
return b.services.RegisterService(is)
|
||||
}
|
||||
@@ -976,6 +1004,7 @@ func (b *BeaconNode) registerRPCService(router *http.ServeMux) error {
|
||||
FinalizationFetcher: chainService,
|
||||
BlockReceiver: chainService,
|
||||
BlobReceiver: chainService,
|
||||
DataColumnReceiver: chainService,
|
||||
AttestationReceiver: chainService,
|
||||
GenesisTimeFetcher: chainService,
|
||||
GenesisFetcher: chainService,
|
||||
@@ -1003,6 +1032,7 @@ func (b *BeaconNode) registerRPCService(router *http.ServeMux) error {
|
||||
Router: router,
|
||||
ClockWaiter: b.clockWaiter,
|
||||
BlobStorage: b.BlobStorage,
|
||||
DataColumnStorage: b.DataColumnStorage,
|
||||
TrackedValidatorsCache: b.trackedValidatorsCache,
|
||||
PayloadIDCache: b.payloadIDCache,
|
||||
})
|
||||
@@ -1142,6 +1172,7 @@ func (b *BeaconNode) registerPrunerService(cliCtx *cli.Context) error {
|
||||
|
||||
func (b *BeaconNode) RegisterBackfillService(cliCtx *cli.Context, bfs *backfill.Store) error {
|
||||
pa := peers.NewAssigner(b.fetchP2P().Peers(), b.forkChoicer)
|
||||
// TODO: Add backfill for data column storage
|
||||
bf, err := backfill.NewService(cliCtx.Context, bfs, b.BlobStorage, b.clockWaiter, b.fetchP2P(), pa, b.BackfillOpts...)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error initializing backfill service")
|
||||
|
||||
@@ -54,7 +54,7 @@ func TestNodeClose_OK(t *testing.T) {
|
||||
cmd.ValidatorMonitorIndicesFlag.Value.SetInt(1)
|
||||
ctx, cancel := newCliContextWithCancel(&app, set)
|
||||
|
||||
node, err := New(ctx, cancel, WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)))
|
||||
node, err := New(ctx, cancel, WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)), WithDataColumnStorage(filesystem.NewEphemeralDataColumnStorage(t)))
|
||||
require.NoError(t, err)
|
||||
|
||||
node.Close()
|
||||
@@ -75,7 +75,7 @@ func TestNodeStart_Ok(t *testing.T) {
|
||||
node, err := New(ctx, cancel, WithBlockchainFlagOptions([]blockchain.Option{}),
|
||||
WithBuilderFlagOptions([]builder.Option{}),
|
||||
WithExecutionChainOptions([]execution.Option{}),
|
||||
WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)))
|
||||
WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)), WithDataColumnStorage(filesystem.NewEphemeralDataColumnStorage(t)))
|
||||
require.NoError(t, err)
|
||||
node.services = &runtime.ServiceRegistry{}
|
||||
go func() {
|
||||
@@ -99,7 +99,7 @@ func TestNodeStart_SyncChecker(t *testing.T) {
|
||||
node, err := New(ctx, cancel, WithBlockchainFlagOptions([]blockchain.Option{}),
|
||||
WithBuilderFlagOptions([]builder.Option{}),
|
||||
WithExecutionChainOptions([]execution.Option{}),
|
||||
WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)))
|
||||
WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)), WithDataColumnStorage(filesystem.NewEphemeralDataColumnStorage(t)))
|
||||
require.NoError(t, err)
|
||||
go func() {
|
||||
node.Start()
|
||||
@@ -130,7 +130,7 @@ func TestClearDB(t *testing.T) {
|
||||
context, cancel := newCliContextWithCancel(&app, set)
|
||||
options := []Option{
|
||||
WithExecutionChainOptions([]execution.Option{execution.WithHttpEndpoint(endpoint)}),
|
||||
WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)),
|
||||
WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)), WithDataColumnStorage(filesystem.NewEphemeralDataColumnStorage(t)),
|
||||
}
|
||||
_, err = New(context, cancel, options...)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -50,3 +50,20 @@ func WithBlobStorageOptions(opt ...filesystem.BlobStorageOption) Option {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithDataColumnStorage sets the DataColumnStorage backend for the BeaconNode
|
||||
func WithDataColumnStorage(bs *filesystem.DataColumnStorage) Option {
|
||||
return func(bn *BeaconNode) error {
|
||||
bn.DataColumnStorage = bs
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithDataColumnStorageOptions appends 1 or more filesystem.DataColumnStorageOption on the beacon node,
|
||||
// to be used when initializing data column storage.
|
||||
func WithDataColumnStorageOptions(opt ...filesystem.DataColumnStorageOption) Option {
|
||||
return func(bn *BeaconNode) error {
|
||||
bn.DataColumnStorageOptions = append(bn.DataColumnStorageOptions, opt...)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,8 +6,8 @@ go_library(
|
||||
"doc.go",
|
||||
"log.go",
|
||||
"metrics.go",
|
||||
"pool.go",
|
||||
"service.go",
|
||||
"service_new.go",
|
||||
"types.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/slashings",
|
||||
|
||||
324
beacon-chain/operations/slashings/pool.go
Normal file
324
beacon-chain/operations/slashings/pool.go
Normal file
@@ -0,0 +1,324 @@
|
||||
package slashings
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
coretime "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/container/slice"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/trailofbits/go-mutexasserts"
|
||||
)
|
||||
|
||||
// NewPool returns an initialized attester slashing and proposer slashing pool.
|
||||
func NewPool() *Pool {
|
||||
return &Pool{
|
||||
pendingProposerSlashing: make([]*ethpb.ProposerSlashing, 0),
|
||||
pendingAttesterSlashing: make([]*PendingAttesterSlashing, 0),
|
||||
included: make(map[primitives.ValidatorIndex]bool),
|
||||
}
|
||||
}
|
||||
|
||||
// PendingAttesterSlashings returns attester slashings that are able to be included into a block.
|
||||
// This method will return the amount of pending attester slashings for a block transition unless parameter `noLimit` is true
|
||||
// to indicate the request is for noLimit pending items.
|
||||
func (p *Pool) PendingAttesterSlashings(ctx context.Context, state state.ReadOnlyBeaconState, noLimit bool) []ethpb.AttSlashing {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
_, span := trace.StartSpan(ctx, "operations.PendingAttesterSlashing")
|
||||
defer span.End()
|
||||
|
||||
// Update prom metric.
|
||||
numPendingAttesterSlashings.Set(float64(len(p.pendingAttesterSlashing)))
|
||||
|
||||
included := make(map[primitives.ValidatorIndex]bool)
|
||||
|
||||
// Allocate pending slice with a capacity of maxAttesterSlashings or len(p.pendingAttesterSlashing)) depending on the request.
|
||||
maxSlashings := params.BeaconConfig().MaxAttesterSlashings
|
||||
|
||||
// EIP-7549: Beginning from Electra, the max attester slashings is reduced to 1.
|
||||
if state.Version() >= version.Electra {
|
||||
maxSlashings = params.BeaconConfig().MaxAttesterSlashingsElectra
|
||||
}
|
||||
if noLimit {
|
||||
maxSlashings = uint64(len(p.pendingAttesterSlashing))
|
||||
}
|
||||
pending := make([]ethpb.AttSlashing, 0, maxSlashings)
|
||||
for i := 0; i < len(p.pendingAttesterSlashing); i++ {
|
||||
if uint64(len(pending)) >= maxSlashings {
|
||||
break
|
||||
}
|
||||
slashing := p.pendingAttesterSlashing[i]
|
||||
valid, err := p.validatorSlashingPreconditionCheck(state, slashing.validatorToSlash)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not validate attester slashing")
|
||||
continue
|
||||
}
|
||||
if included[slashing.validatorToSlash] || !valid {
|
||||
p.pendingAttesterSlashing = append(p.pendingAttesterSlashing[:i], p.pendingAttesterSlashing[i+1:]...)
|
||||
i--
|
||||
continue
|
||||
}
|
||||
attSlashing := slashing.attesterSlashing
|
||||
slashedVal := slice.IntersectionUint64(
|
||||
attSlashing.FirstAttestation().GetAttestingIndices(),
|
||||
attSlashing.SecondAttestation().GetAttestingIndices(),
|
||||
)
|
||||
for _, idx := range slashedVal {
|
||||
included[primitives.ValidatorIndex(idx)] = true
|
||||
}
|
||||
|
||||
pending = append(pending, attSlashing)
|
||||
}
|
||||
|
||||
return pending
|
||||
}
|
||||
|
||||
// PendingProposerSlashings returns proposer slashings that are able to be included into a block.
|
||||
// This method will return the amount of pending proposer slashings for a block transition unless the `noLimit` parameter
|
||||
// is set to true to indicate the request is for noLimit pending items.
|
||||
func (p *Pool) PendingProposerSlashings(ctx context.Context, state state.ReadOnlyBeaconState, noLimit bool) []*ethpb.ProposerSlashing {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
_, span := trace.StartSpan(ctx, "operations.PendingProposerSlashing")
|
||||
defer span.End()
|
||||
|
||||
// Update prom metric.
|
||||
numPendingProposerSlashings.Set(float64(len(p.pendingProposerSlashing)))
|
||||
|
||||
// Allocate pending slice with a capacity of len(p.pendingProposerSlashing) or maxProposerSlashings depending on the request.
|
||||
maxSlashings := params.BeaconConfig().MaxProposerSlashings
|
||||
if noLimit {
|
||||
maxSlashings = uint64(len(p.pendingProposerSlashing))
|
||||
}
|
||||
pending := make([]*ethpb.ProposerSlashing, 0, maxSlashings)
|
||||
for i := 0; i < len(p.pendingProposerSlashing); i++ {
|
||||
if uint64(len(pending)) >= maxSlashings {
|
||||
break
|
||||
}
|
||||
slashing := p.pendingProposerSlashing[i]
|
||||
valid, err := p.validatorSlashingPreconditionCheck(state, slashing.Header_1.Header.ProposerIndex)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not validate proposer slashing")
|
||||
continue
|
||||
}
|
||||
if !valid {
|
||||
p.pendingProposerSlashing = append(p.pendingProposerSlashing[:i], p.pendingProposerSlashing[i+1:]...)
|
||||
i--
|
||||
continue
|
||||
}
|
||||
|
||||
pending = append(pending, slashing)
|
||||
}
|
||||
return pending
|
||||
}
|
||||
|
||||
// InsertAttesterSlashing into the pool. This method is a no-op if the attester slashing already exists in the pool,
|
||||
// has been included into a block recently, or the validator is already exited.
|
||||
func (p *Pool) InsertAttesterSlashing(
|
||||
ctx context.Context,
|
||||
state state.ReadOnlyBeaconState,
|
||||
slashing ethpb.AttSlashing,
|
||||
) error {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
ctx, span := trace.StartSpan(ctx, "operations.InsertAttesterSlashing")
|
||||
defer span.End()
|
||||
|
||||
if err := blocks.VerifyAttesterSlashing(ctx, state, slashing); err != nil {
|
||||
return errors.Wrap(err, "could not verify attester slashing")
|
||||
}
|
||||
|
||||
slashedVal := slice.IntersectionUint64(slashing.FirstAttestation().GetAttestingIndices(), slashing.SecondAttestation().GetAttestingIndices())
|
||||
cantSlash := make([]uint64, 0, len(slashedVal))
|
||||
slashingReason := ""
|
||||
for _, val := range slashedVal {
|
||||
// Has this validator index been included recently?
|
||||
ok, err := p.validatorSlashingPreconditionCheck(state, primitives.ValidatorIndex(val))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// If the validator has already exited, has already been slashed, or if its index
|
||||
// has been recently included in the pool of slashings, skip including this indice.
|
||||
if !ok {
|
||||
slashingReason = "validator already exited/slashed or already recently included in slashings pool"
|
||||
cantSlash = append(cantSlash, val)
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if the validator already exists in the list of slashings.
|
||||
// Use binary search to find the answer.
|
||||
found := sort.Search(len(p.pendingAttesterSlashing), func(i int) bool {
|
||||
return uint64(p.pendingAttesterSlashing[i].validatorToSlash) >= val
|
||||
})
|
||||
if found != len(p.pendingAttesterSlashing) && uint64(p.pendingAttesterSlashing[found].validatorToSlash) == val {
|
||||
slashingReason = "validator already exist in list of pending slashings, no need to attempt to slash again"
|
||||
cantSlash = append(cantSlash, val)
|
||||
continue
|
||||
}
|
||||
|
||||
pendingSlashing := &PendingAttesterSlashing{
|
||||
attesterSlashing: slashing,
|
||||
validatorToSlash: primitives.ValidatorIndex(val),
|
||||
}
|
||||
// Insert into pending list and sort again.
|
||||
p.pendingAttesterSlashing = append(p.pendingAttesterSlashing, pendingSlashing)
|
||||
sort.Slice(p.pendingAttesterSlashing, func(i, j int) bool {
|
||||
return p.pendingAttesterSlashing[i].validatorToSlash < p.pendingAttesterSlashing[j].validatorToSlash
|
||||
})
|
||||
numPendingAttesterSlashings.Set(float64(len(p.pendingAttesterSlashing)))
|
||||
}
|
||||
if len(cantSlash) == len(slashedVal) {
|
||||
return fmt.Errorf(
|
||||
"could not slash any of %d validators in submitted slashing: %s",
|
||||
len(slashedVal),
|
||||
slashingReason,
|
||||
)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// InsertProposerSlashing into the pool. This method is a no-op if the pending slashing already exists,
|
||||
// has been included recently, the validator is already exited, or the validator was already slashed.
|
||||
func (p *Pool) InsertProposerSlashing(
|
||||
ctx context.Context,
|
||||
state state.ReadOnlyBeaconState,
|
||||
slashing *ethpb.ProposerSlashing,
|
||||
) error {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
_, span := trace.StartSpan(ctx, "operations.InsertProposerSlashing")
|
||||
defer span.End()
|
||||
|
||||
if err := blocks.VerifyProposerSlashing(state, slashing); err != nil {
|
||||
return errors.Wrap(err, "could not verify proposer slashing")
|
||||
}
|
||||
|
||||
idx := slashing.Header_1.Header.ProposerIndex
|
||||
ok, err := p.validatorSlashingPreconditionCheck(state, idx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// If the validator has already exited, has already been slashed, or if its index
|
||||
// has been recently included in the pool of slashings, do not process this new
|
||||
// slashing.
|
||||
if !ok {
|
||||
return fmt.Errorf("validator at index %d cannot be slashed", idx)
|
||||
}
|
||||
|
||||
// Check if the validator already exists in the list of slashings.
|
||||
// Use binary search to find the answer.
|
||||
found := sort.Search(len(p.pendingProposerSlashing), func(i int) bool {
|
||||
return p.pendingProposerSlashing[i].Header_1.Header.ProposerIndex >= slashing.Header_1.Header.ProposerIndex
|
||||
})
|
||||
if found != len(p.pendingProposerSlashing) && p.pendingProposerSlashing[found].Header_1.Header.ProposerIndex ==
|
||||
slashing.Header_1.Header.ProposerIndex {
|
||||
return errors.New("slashing object already exists in pending proposer slashings")
|
||||
}
|
||||
|
||||
// Insert into pending list and sort again.
|
||||
p.pendingProposerSlashing = append(p.pendingProposerSlashing, slashing)
|
||||
sort.Slice(p.pendingProposerSlashing, func(i, j int) bool {
|
||||
return p.pendingProposerSlashing[i].Header_1.Header.ProposerIndex < p.pendingProposerSlashing[j].Header_1.Header.ProposerIndex
|
||||
})
|
||||
numPendingProposerSlashings.Set(float64(len(p.pendingProposerSlashing)))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarkIncludedAttesterSlashing is used when an attester slashing has been included in a beacon block.
|
||||
// Every block seen by this node that contains proposer slashings should call this method to include
|
||||
// the proposer slashings.
|
||||
func (p *Pool) MarkIncludedAttesterSlashing(as ethpb.AttSlashing) {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
slashedVal := slice.IntersectionUint64(as.FirstAttestation().GetAttestingIndices(), as.SecondAttestation().GetAttestingIndices())
|
||||
for _, val := range slashedVal {
|
||||
i := sort.Search(len(p.pendingAttesterSlashing), func(i int) bool {
|
||||
return uint64(p.pendingAttesterSlashing[i].validatorToSlash) >= val
|
||||
})
|
||||
if i != len(p.pendingAttesterSlashing) && uint64(p.pendingAttesterSlashing[i].validatorToSlash) == val {
|
||||
p.pendingAttesterSlashing = append(p.pendingAttesterSlashing[:i], p.pendingAttesterSlashing[i+1:]...)
|
||||
}
|
||||
p.included[primitives.ValidatorIndex(val)] = true
|
||||
numAttesterSlashingsIncluded.Inc()
|
||||
}
|
||||
}
|
||||
|
||||
// MarkIncludedProposerSlashing is used when an proposer slashing has been included in a beacon block.
|
||||
// Every block seen by this node that contains proposer slashings should call this method to include
|
||||
// the proposer slashings.
|
||||
func (p *Pool) MarkIncludedProposerSlashing(ps *ethpb.ProposerSlashing) {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
i := sort.Search(len(p.pendingProposerSlashing), func(i int) bool {
|
||||
return p.pendingProposerSlashing[i].Header_1.Header.ProposerIndex >= ps.Header_1.Header.ProposerIndex
|
||||
})
|
||||
if i != len(p.pendingProposerSlashing) && p.pendingProposerSlashing[i].Header_1.Header.ProposerIndex == ps.Header_1.Header.ProposerIndex {
|
||||
p.pendingProposerSlashing = append(p.pendingProposerSlashing[:i], p.pendingProposerSlashing[i+1:]...)
|
||||
}
|
||||
p.included[ps.Header_1.Header.ProposerIndex] = true
|
||||
numProposerSlashingsIncluded.Inc()
|
||||
}
|
||||
|
||||
// ConvertToElectra converts all Phase0 attester slashings to Electra attester slashings.
|
||||
// This functionality is needed at the time of the Electra fork.
|
||||
func (p *Pool) ConvertToElectra() {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
for _, pas := range p.pendingAttesterSlashing {
|
||||
if pas.attesterSlashing.Version() == version.Phase0 {
|
||||
first := pas.attesterSlashing.FirstAttestation()
|
||||
second := pas.attesterSlashing.SecondAttestation()
|
||||
pas.attesterSlashing = ðpb.AttesterSlashingElectra{
|
||||
Attestation_1: ðpb.IndexedAttestationElectra{
|
||||
AttestingIndices: first.GetAttestingIndices(),
|
||||
Data: first.GetData(),
|
||||
Signature: first.GetSignature(),
|
||||
},
|
||||
Attestation_2: ðpb.IndexedAttestationElectra{
|
||||
AttestingIndices: second.GetAttestingIndices(),
|
||||
Data: second.GetData(),
|
||||
Signature: second.GetSignature(),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// this function checks a few items about a validator before proceeding with inserting
|
||||
// a proposer/attester slashing into the pool. First, it checks if the validator
|
||||
// has been recently included in the pool, then it checks if the validator is slashable.
|
||||
// Note: this method requires caller to hold the lock.
|
||||
func (p *Pool) validatorSlashingPreconditionCheck(
|
||||
state state.ReadOnlyBeaconState,
|
||||
valIdx primitives.ValidatorIndex,
|
||||
) (bool, error) {
|
||||
if !mutexasserts.RWMutexLocked(&p.lock) && !mutexasserts.RWMutexRLocked(&p.lock) {
|
||||
return false, errors.New("pool.validatorSlashingPreconditionCheck: caller must hold read/write lock")
|
||||
}
|
||||
|
||||
// Check if the validator index has been included recently.
|
||||
if p.included[valIdx] {
|
||||
return false, nil
|
||||
}
|
||||
validator, err := state.ValidatorAtIndexReadOnly(valIdx)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Checking if the validator is slashable.
|
||||
if !helpers.IsSlashableValidatorUsingTrie(validator, coretime.CurrentEpoch(state)) {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
@@ -2,323 +2,102 @@ package slashings
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
coretime "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/container/slice"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/trailofbits/go-mutexasserts"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
// NewPool returns an initialized attester slashing and proposer slashing pool.
|
||||
func NewPool() *Pool {
|
||||
return &Pool{
|
||||
pendingProposerSlashing: make([]*ethpb.ProposerSlashing, 0),
|
||||
pendingAttesterSlashing: make([]*PendingAttesterSlashing, 0),
|
||||
included: make(map[primitives.ValidatorIndex]bool),
|
||||
// WithElectraTimer includes functional options for the blockchain service related to CLI flags.
|
||||
func WithElectraTimer(cw startup.ClockWaiter, currentSlotFn func() primitives.Slot) Option {
|
||||
return func(p *PoolService) error {
|
||||
p.runElectraTimer = true
|
||||
p.cw = cw
|
||||
p.currentSlotFn = currentSlotFn
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// PendingAttesterSlashings returns attester slashings that are able to be included into a block.
|
||||
// This method will return the amount of pending attester slashings for a block transition unless parameter `noLimit` is true
|
||||
// to indicate the request is for noLimit pending items.
|
||||
func (p *Pool) PendingAttesterSlashings(ctx context.Context, state state.ReadOnlyBeaconState, noLimit bool) []ethpb.AttSlashing {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
_, span := trace.StartSpan(ctx, "operations.PendingAttesterSlashing")
|
||||
defer span.End()
|
||||
|
||||
// Update prom metric.
|
||||
numPendingAttesterSlashings.Set(float64(len(p.pendingAttesterSlashing)))
|
||||
|
||||
included := make(map[primitives.ValidatorIndex]bool)
|
||||
|
||||
// Allocate pending slice with a capacity of maxAttesterSlashings or len(p.pendingAttesterSlashing)) depending on the request.
|
||||
maxSlashings := params.BeaconConfig().MaxAttesterSlashings
|
||||
|
||||
// EIP-7549: Beginning from Electra, the max attester slashings is reduced to 1.
|
||||
if state.Version() >= version.Electra {
|
||||
maxSlashings = params.BeaconConfig().MaxAttesterSlashingsElectra
|
||||
}
|
||||
if noLimit {
|
||||
maxSlashings = uint64(len(p.pendingAttesterSlashing))
|
||||
}
|
||||
pending := make([]ethpb.AttSlashing, 0, maxSlashings)
|
||||
for i := 0; i < len(p.pendingAttesterSlashing); i++ {
|
||||
if uint64(len(pending)) >= maxSlashings {
|
||||
break
|
||||
}
|
||||
slashing := p.pendingAttesterSlashing[i]
|
||||
valid, err := p.validatorSlashingPreconditionCheck(state, slashing.validatorToSlash)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not validate attester slashing")
|
||||
continue
|
||||
}
|
||||
if included[slashing.validatorToSlash] || !valid {
|
||||
p.pendingAttesterSlashing = append(p.pendingAttesterSlashing[:i], p.pendingAttesterSlashing[i+1:]...)
|
||||
i--
|
||||
continue
|
||||
}
|
||||
attSlashing := slashing.attesterSlashing
|
||||
slashedVal := slice.IntersectionUint64(
|
||||
attSlashing.FirstAttestation().GetAttestingIndices(),
|
||||
attSlashing.SecondAttestation().GetAttestingIndices(),
|
||||
)
|
||||
for _, idx := range slashedVal {
|
||||
included[primitives.ValidatorIndex(idx)] = true
|
||||
}
|
||||
|
||||
pending = append(pending, attSlashing)
|
||||
// NewPoolService returns a service that manages the Pool.
|
||||
func NewPoolService(ctx context.Context, pool PoolManager, opts ...Option) *PoolService {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
p := &PoolService{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
poolManager: pool,
|
||||
}
|
||||
|
||||
return pending
|
||||
for _, opt := range opts {
|
||||
if err := opt(p); err != nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
// PendingProposerSlashings returns proposer slashings that are able to be included into a block.
|
||||
// This method will return the amount of pending proposer slashings for a block transition unless the `noLimit` parameter
|
||||
// is set to true to indicate the request is for noLimit pending items.
|
||||
func (p *Pool) PendingProposerSlashings(ctx context.Context, state state.ReadOnlyBeaconState, noLimit bool) []*ethpb.ProposerSlashing {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
_, span := trace.StartSpan(ctx, "operations.PendingProposerSlashing")
|
||||
defer span.End()
|
||||
|
||||
// Update prom metric.
|
||||
numPendingProposerSlashings.Set(float64(len(p.pendingProposerSlashing)))
|
||||
|
||||
// Allocate pending slice with a capacity of len(p.pendingProposerSlashing) or maxProposerSlashings depending on the request.
|
||||
maxSlashings := params.BeaconConfig().MaxProposerSlashings
|
||||
if noLimit {
|
||||
maxSlashings = uint64(len(p.pendingProposerSlashing))
|
||||
}
|
||||
pending := make([]*ethpb.ProposerSlashing, 0, maxSlashings)
|
||||
for i := 0; i < len(p.pendingProposerSlashing); i++ {
|
||||
if uint64(len(pending)) >= maxSlashings {
|
||||
break
|
||||
}
|
||||
slashing := p.pendingProposerSlashing[i]
|
||||
valid, err := p.validatorSlashingPreconditionCheck(state, slashing.Header_1.Header.ProposerIndex)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not validate proposer slashing")
|
||||
continue
|
||||
}
|
||||
if !valid {
|
||||
p.pendingProposerSlashing = append(p.pendingProposerSlashing[:i], p.pendingProposerSlashing[i+1:]...)
|
||||
i--
|
||||
continue
|
||||
}
|
||||
|
||||
pending = append(pending, slashing)
|
||||
}
|
||||
return pending
|
||||
// Start the slashing pool service.
|
||||
func (p *PoolService) Start() {
|
||||
go p.run()
|
||||
}
|
||||
|
||||
// InsertAttesterSlashing into the pool. This method is a no-op if the attester slashing already exists in the pool,
|
||||
// has been included into a block recently, or the validator is already exited.
|
||||
func (p *Pool) InsertAttesterSlashing(
|
||||
ctx context.Context,
|
||||
state state.ReadOnlyBeaconState,
|
||||
slashing ethpb.AttSlashing,
|
||||
) error {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
ctx, span := trace.StartSpan(ctx, "operations.InsertAttesterSlashing")
|
||||
defer span.End()
|
||||
|
||||
if err := blocks.VerifyAttesterSlashing(ctx, state, slashing); err != nil {
|
||||
return errors.Wrap(err, "could not verify attester slashing")
|
||||
func (p *PoolService) run() {
|
||||
if !p.runElectraTimer {
|
||||
return
|
||||
}
|
||||
|
||||
slashedVal := slice.IntersectionUint64(slashing.FirstAttestation().GetAttestingIndices(), slashing.SecondAttestation().GetAttestingIndices())
|
||||
cantSlash := make([]uint64, 0, len(slashedVal))
|
||||
slashingReason := ""
|
||||
for _, val := range slashedVal {
|
||||
// Has this validator index been included recently?
|
||||
ok, err := p.validatorSlashingPreconditionCheck(state, primitives.ValidatorIndex(val))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// If the validator has already exited, has already been slashed, or if its index
|
||||
// has been recently included in the pool of slashings, skip including this indice.
|
||||
if !ok {
|
||||
slashingReason = "validator already exited/slashed or already recently included in slashings pool"
|
||||
cantSlash = append(cantSlash, val)
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if the validator already exists in the list of slashings.
|
||||
// Use binary search to find the answer.
|
||||
found := sort.Search(len(p.pendingAttesterSlashing), func(i int) bool {
|
||||
return uint64(p.pendingAttesterSlashing[i].validatorToSlash) >= val
|
||||
})
|
||||
if found != len(p.pendingAttesterSlashing) && uint64(p.pendingAttesterSlashing[found].validatorToSlash) == val {
|
||||
slashingReason = "validator already exist in list of pending slashings, no need to attempt to slash again"
|
||||
cantSlash = append(cantSlash, val)
|
||||
continue
|
||||
}
|
||||
|
||||
pendingSlashing := &PendingAttesterSlashing{
|
||||
attesterSlashing: slashing,
|
||||
validatorToSlash: primitives.ValidatorIndex(val),
|
||||
}
|
||||
// Insert into pending list and sort again.
|
||||
p.pendingAttesterSlashing = append(p.pendingAttesterSlashing, pendingSlashing)
|
||||
sort.Slice(p.pendingAttesterSlashing, func(i, j int) bool {
|
||||
return p.pendingAttesterSlashing[i].validatorToSlash < p.pendingAttesterSlashing[j].validatorToSlash
|
||||
})
|
||||
numPendingAttesterSlashings.Set(float64(len(p.pendingAttesterSlashing)))
|
||||
electraSlot, err := slots.EpochStart(params.BeaconConfig().ElectraForkEpoch)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if len(cantSlash) == len(slashedVal) {
|
||||
return fmt.Errorf(
|
||||
"could not slash any of %d validators in submitted slashing: %s",
|
||||
len(slashedVal),
|
||||
slashingReason,
|
||||
)
|
||||
|
||||
// If run() is executed after the transition to Electra has already happened,
|
||||
// there is nothing to convert because the slashing pool is empty at startup.
|
||||
if p.currentSlotFn() >= electraSlot {
|
||||
return
|
||||
}
|
||||
|
||||
p.waitForChainInitialization()
|
||||
|
||||
electraTime, err := slots.ToTime(uint64(p.clock.GenesisTime().Unix()), electraSlot)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
t := time.NewTimer(electraTime.Sub(p.clock.Now()))
|
||||
defer t.Stop()
|
||||
|
||||
select {
|
||||
case <-t.C:
|
||||
log.Info("Converting Phase0 slashings to Electra slashings")
|
||||
p.poolManager.ConvertToElectra()
|
||||
return
|
||||
case <-p.ctx.Done():
|
||||
log.Warn("Context cancelled, ConvertToElectra timer will not execute")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PoolService) waitForChainInitialization() {
|
||||
clock, err := p.cw.WaitForClock(p.ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not receive chain start notification")
|
||||
}
|
||||
p.clock = clock
|
||||
log.WithField("genesisTime", clock.GenesisTime()).Info(
|
||||
"Slashing pool service received chain initialization event",
|
||||
)
|
||||
}
|
||||
|
||||
// Stop the slashing pool service.
|
||||
func (p *PoolService) Stop() error {
|
||||
p.cancel()
|
||||
return nil
|
||||
}
|
||||
|
||||
// InsertProposerSlashing into the pool. This method is a no-op if the pending slashing already exists,
|
||||
// has been included recently, the validator is already exited, or the validator was already slashed.
|
||||
func (p *Pool) InsertProposerSlashing(
|
||||
ctx context.Context,
|
||||
state state.ReadOnlyBeaconState,
|
||||
slashing *ethpb.ProposerSlashing,
|
||||
) error {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
_, span := trace.StartSpan(ctx, "operations.InsertProposerSlashing")
|
||||
defer span.End()
|
||||
|
||||
if err := blocks.VerifyProposerSlashing(state, slashing); err != nil {
|
||||
return errors.Wrap(err, "could not verify proposer slashing")
|
||||
}
|
||||
|
||||
idx := slashing.Header_1.Header.ProposerIndex
|
||||
ok, err := p.validatorSlashingPreconditionCheck(state, idx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// If the validator has already exited, has already been slashed, or if its index
|
||||
// has been recently included in the pool of slashings, do not process this new
|
||||
// slashing.
|
||||
if !ok {
|
||||
return fmt.Errorf("validator at index %d cannot be slashed", idx)
|
||||
}
|
||||
|
||||
// Check if the validator already exists in the list of slashings.
|
||||
// Use binary search to find the answer.
|
||||
found := sort.Search(len(p.pendingProposerSlashing), func(i int) bool {
|
||||
return p.pendingProposerSlashing[i].Header_1.Header.ProposerIndex >= slashing.Header_1.Header.ProposerIndex
|
||||
})
|
||||
if found != len(p.pendingProposerSlashing) && p.pendingProposerSlashing[found].Header_1.Header.ProposerIndex ==
|
||||
slashing.Header_1.Header.ProposerIndex {
|
||||
return errors.New("slashing object already exists in pending proposer slashings")
|
||||
}
|
||||
|
||||
// Insert into pending list and sort again.
|
||||
p.pendingProposerSlashing = append(p.pendingProposerSlashing, slashing)
|
||||
sort.Slice(p.pendingProposerSlashing, func(i, j int) bool {
|
||||
return p.pendingProposerSlashing[i].Header_1.Header.ProposerIndex < p.pendingProposerSlashing[j].Header_1.Header.ProposerIndex
|
||||
})
|
||||
numPendingProposerSlashings.Set(float64(len(p.pendingProposerSlashing)))
|
||||
|
||||
// Status of the slashing pool service.
|
||||
func (p *PoolService) Status() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarkIncludedAttesterSlashing is used when an attester slashing has been included in a beacon block.
|
||||
// Every block seen by this node that contains proposer slashings should call this method to include
|
||||
// the proposer slashings.
|
||||
func (p *Pool) MarkIncludedAttesterSlashing(as ethpb.AttSlashing) {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
slashedVal := slice.IntersectionUint64(as.FirstAttestation().GetAttestingIndices(), as.SecondAttestation().GetAttestingIndices())
|
||||
for _, val := range slashedVal {
|
||||
i := sort.Search(len(p.pendingAttesterSlashing), func(i int) bool {
|
||||
return uint64(p.pendingAttesterSlashing[i].validatorToSlash) >= val
|
||||
})
|
||||
if i != len(p.pendingAttesterSlashing) && uint64(p.pendingAttesterSlashing[i].validatorToSlash) == val {
|
||||
p.pendingAttesterSlashing = append(p.pendingAttesterSlashing[:i], p.pendingAttesterSlashing[i+1:]...)
|
||||
}
|
||||
p.included[primitives.ValidatorIndex(val)] = true
|
||||
numAttesterSlashingsIncluded.Inc()
|
||||
}
|
||||
}
|
||||
|
||||
// MarkIncludedProposerSlashing is used when an proposer slashing has been included in a beacon block.
|
||||
// Every block seen by this node that contains proposer slashings should call this method to include
|
||||
// the proposer slashings.
|
||||
func (p *Pool) MarkIncludedProposerSlashing(ps *ethpb.ProposerSlashing) {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
i := sort.Search(len(p.pendingProposerSlashing), func(i int) bool {
|
||||
return p.pendingProposerSlashing[i].Header_1.Header.ProposerIndex >= ps.Header_1.Header.ProposerIndex
|
||||
})
|
||||
if i != len(p.pendingProposerSlashing) && p.pendingProposerSlashing[i].Header_1.Header.ProposerIndex == ps.Header_1.Header.ProposerIndex {
|
||||
p.pendingProposerSlashing = append(p.pendingProposerSlashing[:i], p.pendingProposerSlashing[i+1:]...)
|
||||
}
|
||||
p.included[ps.Header_1.Header.ProposerIndex] = true
|
||||
numProposerSlashingsIncluded.Inc()
|
||||
}
|
||||
|
||||
// ConvertToElectra converts all Phase0 attester slashings to Electra attester slashings.
|
||||
// This functionality is needed at the time of the Electra fork.
|
||||
func (p *Pool) ConvertToElectra() {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
for _, pas := range p.pendingAttesterSlashing {
|
||||
if pas.attesterSlashing.Version() == version.Phase0 {
|
||||
first := pas.attesterSlashing.FirstAttestation()
|
||||
second := pas.attesterSlashing.SecondAttestation()
|
||||
pas.attesterSlashing = ðpb.AttesterSlashingElectra{
|
||||
Attestation_1: ðpb.IndexedAttestationElectra{
|
||||
AttestingIndices: first.GetAttestingIndices(),
|
||||
Data: first.GetData(),
|
||||
Signature: first.GetSignature(),
|
||||
},
|
||||
Attestation_2: ðpb.IndexedAttestationElectra{
|
||||
AttestingIndices: second.GetAttestingIndices(),
|
||||
Data: second.GetData(),
|
||||
Signature: second.GetSignature(),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// this function checks a few items about a validator before proceeding with inserting
|
||||
// a proposer/attester slashing into the pool. First, it checks if the validator
|
||||
// has been recently included in the pool, then it checks if the validator is slashable.
|
||||
// Note: this method requires caller to hold the lock.
|
||||
func (p *Pool) validatorSlashingPreconditionCheck(
|
||||
state state.ReadOnlyBeaconState,
|
||||
valIdx primitives.ValidatorIndex,
|
||||
) (bool, error) {
|
||||
if !mutexasserts.RWMutexLocked(&p.lock) && !mutexasserts.RWMutexRLocked(&p.lock) {
|
||||
return false, errors.New("pool.validatorSlashingPreconditionCheck: caller must hold read/write lock")
|
||||
}
|
||||
|
||||
// Check if the validator index has been included recently.
|
||||
if p.included[valIdx] {
|
||||
return false, nil
|
||||
}
|
||||
validator, err := state.ValidatorAtIndexReadOnly(valIdx)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Checking if the validator is slashable.
|
||||
if !helpers.IsSlashableValidatorUsingTrie(validator, coretime.CurrentEpoch(state)) {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@@ -1,103 +0,0 @@
|
||||
package slashings
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
// WithElectraTimer includes functional options for the blockchain service related to CLI flags.
|
||||
func WithElectraTimer(cw startup.ClockWaiter, currentSlotFn func() primitives.Slot) Option {
|
||||
return func(p *PoolService) error {
|
||||
p.runElectraTimer = true
|
||||
p.cw = cw
|
||||
p.currentSlotFn = currentSlotFn
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// NewPoolService returns a service that manages the Pool.
|
||||
func NewPoolService(ctx context.Context, pool PoolManager, opts ...Option) *PoolService {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
p := &PoolService{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
poolManager: pool,
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
if err := opt(p); err != nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
// Start the slashing pool service.
|
||||
func (p *PoolService) Start() {
|
||||
go p.run()
|
||||
}
|
||||
|
||||
func (p *PoolService) run() {
|
||||
if !p.runElectraTimer {
|
||||
return
|
||||
}
|
||||
|
||||
electraSlot, err := slots.EpochStart(params.BeaconConfig().ElectraForkEpoch)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// If run() is executed after the transition to Electra has already happened,
|
||||
// there is nothing to convert because the slashing pool is empty at startup.
|
||||
if p.currentSlotFn() >= electraSlot {
|
||||
return
|
||||
}
|
||||
|
||||
p.waitForChainInitialization()
|
||||
|
||||
electraTime, err := slots.ToTime(uint64(p.clock.GenesisTime().Unix()), electraSlot)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
t := time.NewTimer(electraTime.Sub(p.clock.Now()))
|
||||
defer t.Stop()
|
||||
|
||||
select {
|
||||
case <-t.C:
|
||||
log.Info("Converting Phase0 slashings to Electra slashings")
|
||||
p.poolManager.ConvertToElectra()
|
||||
return
|
||||
case <-p.ctx.Done():
|
||||
log.Warn("Context cancelled, ConvertToElectra timer will not execute")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PoolService) waitForChainInitialization() {
|
||||
clock, err := p.cw.WaitForClock(p.ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not receive chain start notification")
|
||||
}
|
||||
p.clock = clock
|
||||
log.WithField("genesisTime", clock.GenesisTime()).Info(
|
||||
"Slashing pool service received chain initialization event",
|
||||
)
|
||||
}
|
||||
|
||||
// Stop the slashing pool service.
|
||||
func (p *PoolService) Stop() error {
|
||||
p.cancel()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Status of the slashing pool service.
|
||||
func (p *PoolService) Status() error {
|
||||
return nil
|
||||
}
|
||||
@@ -7,6 +7,7 @@ go_library(
|
||||
"broadcaster.go",
|
||||
"config.go",
|
||||
"connection_gater.go",
|
||||
"custody.go",
|
||||
"dial_relay_node.go",
|
||||
"discovery.go",
|
||||
"doc.go",
|
||||
@@ -45,6 +46,7 @@ go_library(
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/p2p/encoder:go_default_library",
|
||||
@@ -55,6 +57,7 @@ go_library(
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
@@ -116,6 +119,7 @@ go_test(
|
||||
"addr_factory_test.go",
|
||||
"broadcaster_test.go",
|
||||
"connection_gater_test.go",
|
||||
"custody_test.go",
|
||||
"dial_relay_node_test.go",
|
||||
"discovery_test.go",
|
||||
"fork_test.go",
|
||||
@@ -137,9 +141,11 @@ go_test(
|
||||
flaky = True,
|
||||
tags = ["requires-network"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/p2p/encoder:go_default_library",
|
||||
@@ -152,6 +158,7 @@ go_test(
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
"//container/leaky-bucket:go_default_library",
|
||||
@@ -162,6 +169,7 @@ go_test(
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/metadata:go_default_library",
|
||||
"//proto/testing:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
|
||||
@@ -96,7 +97,12 @@ func (s *Service) BroadcastSyncCommitteeMessage(ctx context.Context, subnet uint
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) internalBroadcastAttestation(ctx context.Context, subnet uint64, att ethpb.Att, forkDigest [4]byte) {
|
||||
func (s *Service) internalBroadcastAttestation(
|
||||
ctx context.Context,
|
||||
subnet uint64,
|
||||
att ethpb.Att,
|
||||
forkDigest [fieldparams.VersionLength]byte,
|
||||
) {
|
||||
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastAttestation")
|
||||
defer span.End()
|
||||
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.
|
||||
@@ -152,7 +158,7 @@ func (s *Service) internalBroadcastAttestation(ctx context.Context, subnet uint6
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) broadcastSyncCommittee(ctx context.Context, subnet uint64, sMsg *ethpb.SyncCommitteeMessage, forkDigest [4]byte) {
|
||||
func (s *Service) broadcastSyncCommittee(ctx context.Context, subnet uint64, sMsg *ethpb.SyncCommitteeMessage, forkDigest [fieldparams.VersionLength]byte) {
|
||||
_, span := trace.StartSpan(ctx, "p2p.broadcastSyncCommittee")
|
||||
defer span.End()
|
||||
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.
|
||||
@@ -228,7 +234,12 @@ func (s *Service) BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) internalBroadcastBlob(ctx context.Context, subnet uint64, blobSidecar *ethpb.BlobSidecar, forkDigest [4]byte) {
|
||||
func (s *Service) internalBroadcastBlob(
|
||||
ctx context.Context,
|
||||
subnet uint64,
|
||||
blobSidecar *ethpb.BlobSidecar,
|
||||
forkDigest [fieldparams.VersionLength]byte,
|
||||
) {
|
||||
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastBlob")
|
||||
defer span.End()
|
||||
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.
|
||||
@@ -243,7 +254,7 @@ func (s *Service) internalBroadcastBlob(ctx context.Context, subnet uint64, blob
|
||||
s.subnetLocker(wrappedSubIdx).RUnlock()
|
||||
|
||||
if !hasPeer {
|
||||
blobSidecarCommitteeBroadcastAttempts.Inc()
|
||||
blobSidecarBroadcastAttempts.Inc()
|
||||
if err := func() error {
|
||||
s.subnetLocker(wrappedSubIdx).Lock()
|
||||
defer s.subnetLocker(wrappedSubIdx).Unlock()
|
||||
@@ -252,7 +263,7 @@ func (s *Service) internalBroadcastBlob(ctx context.Context, subnet uint64, blob
|
||||
return err
|
||||
}
|
||||
if ok {
|
||||
blobSidecarCommitteeBroadcasts.Inc()
|
||||
blobSidecarBroadcasts.Inc()
|
||||
return nil
|
||||
}
|
||||
return errors.New("failed to find peers for subnet")
|
||||
@@ -268,6 +279,120 @@ func (s *Service) internalBroadcastBlob(ctx context.Context, subnet uint64, blob
|
||||
}
|
||||
}
|
||||
|
||||
// BroadcastDataColumn broadcasts a data column to the p2p network, the message is assumed to be
|
||||
// broadcasted to the current fork and to the input column subnet.
|
||||
// TODO: Add tests
|
||||
func (s *Service) BroadcastDataColumn(
|
||||
ctx context.Context,
|
||||
root [fieldparams.RootLength]byte,
|
||||
columnSubnet uint64,
|
||||
dataColumnSidecar *ethpb.DataColumnSidecar,
|
||||
) error {
|
||||
// Add tracing to the function.
|
||||
ctx, span := trace.StartSpan(ctx, "p2p.BroadcastBlob")
|
||||
defer span.End()
|
||||
|
||||
// Ensure the data column sidecar is not nil.
|
||||
if dataColumnSidecar == nil {
|
||||
return errors.Errorf("attempted to broadcast nil data column sidecar at subnet %d", columnSubnet)
|
||||
}
|
||||
|
||||
// Retrieve the current fork digest.
|
||||
forkDigest, err := s.currentForkDigest()
|
||||
if err != nil {
|
||||
err := errors.Wrap(err, "current fork digest")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Non-blocking broadcast, with attempts to discover a column subnet peer if none available.
|
||||
go s.internalBroadcastDataColumn(ctx, root, columnSubnet, dataColumnSidecar, forkDigest)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) internalBroadcastDataColumn(
|
||||
ctx context.Context,
|
||||
root [fieldparams.RootLength]byte,
|
||||
columnSubnet uint64,
|
||||
dataColumnSidecar *ethpb.DataColumnSidecar,
|
||||
forkDigest [fieldparams.VersionLength]byte,
|
||||
) {
|
||||
// Add tracing to the function.
|
||||
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastDataColumn")
|
||||
defer span.End()
|
||||
|
||||
// Increase the number of broadcast attempts.
|
||||
dataColumnSidecarBroadcastAttempts.Inc()
|
||||
|
||||
// Clear parent context / deadline.
|
||||
ctx = trace.NewContext(context.Background(), span)
|
||||
|
||||
// Define a one-slot length context timeout.
|
||||
oneSlot := time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second
|
||||
ctx, cancel := context.WithTimeout(ctx, oneSlot)
|
||||
defer cancel()
|
||||
|
||||
// Build the topic corresponding to this column subnet and this fork digest.
|
||||
topic := dataColumnSubnetToTopic(columnSubnet, forkDigest)
|
||||
|
||||
// Compute the wrapped subnet index.
|
||||
wrappedSubIdx := columnSubnet + dataColumnSubnetVal
|
||||
|
||||
// Check if we have peers with this subnet.
|
||||
hasPeer := func() bool {
|
||||
s.subnetLocker(wrappedSubIdx).RLock()
|
||||
defer s.subnetLocker(wrappedSubIdx).RUnlock()
|
||||
|
||||
return s.hasPeerWithSubnet(topic)
|
||||
}()
|
||||
|
||||
// If no peers are found, attempt to find peers with this subnet.
|
||||
if !hasPeer {
|
||||
if err := func() error {
|
||||
s.subnetLocker(wrappedSubIdx).Lock()
|
||||
defer s.subnetLocker(wrappedSubIdx).Unlock()
|
||||
|
||||
ok, err := s.FindPeersWithSubnet(ctx, topic, columnSubnet, 1 /*threshold*/)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "find peers for subnet")
|
||||
}
|
||||
|
||||
if ok {
|
||||
return nil
|
||||
}
|
||||
return errors.New("failed to find peers for subnet")
|
||||
}(); err != nil {
|
||||
log.WithError(err).Error("Failed to find peers")
|
||||
tracing.AnnotateError(span, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Broadcast the data column sidecar to the network.
|
||||
if err := s.broadcastObject(ctx, dataColumnSidecar, topic); err != nil {
|
||||
log.WithError(err).Error("Failed to broadcast data column sidecar")
|
||||
tracing.AnnotateError(span, err)
|
||||
}
|
||||
|
||||
header := dataColumnSidecar.SignedBlockHeader.GetHeader()
|
||||
slot := header.GetSlot()
|
||||
|
||||
slotStartTime, err := slots.ToTime(uint64(s.genesisTime.Unix()), slot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to convert slot to time")
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": slot,
|
||||
"timeSinceSlotStart": time.Since(slotStartTime),
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"columnSubnet": columnSubnet,
|
||||
}).Debug("Broadcasted data column sidecar")
|
||||
|
||||
// Increase the number of successful broadcasts.
|
||||
dataColumnSidecarBroadcasts.Inc()
|
||||
}
|
||||
|
||||
// method to broadcast messages to other peers in our gossip mesh.
|
||||
func (s *Service) broadcastObject(ctx context.Context, obj ssz.Marshaler, topic string) error {
|
||||
ctx, span := trace.StartSpan(ctx, "p2p.broadcastObject")
|
||||
@@ -297,14 +422,18 @@ func (s *Service) broadcastObject(ctx context.Context, obj ssz.Marshaler, topic
|
||||
return nil
|
||||
}
|
||||
|
||||
func attestationToTopic(subnet uint64, forkDigest [4]byte) string {
|
||||
func attestationToTopic(subnet uint64, forkDigest [fieldparams.VersionLength]byte) string {
|
||||
return fmt.Sprintf(AttestationSubnetTopicFormat, forkDigest, subnet)
|
||||
}
|
||||
|
||||
func syncCommitteeToTopic(subnet uint64, forkDigest [4]byte) string {
|
||||
func syncCommitteeToTopic(subnet uint64, forkDigest [fieldparams.VersionLength]byte) string {
|
||||
return fmt.Sprintf(SyncCommitteeSubnetTopicFormat, forkDigest, subnet)
|
||||
}
|
||||
|
||||
func blobSubnetToTopic(subnet uint64, forkDigest [4]byte) string {
|
||||
func blobSubnetToTopic(subnet uint64, forkDigest [fieldparams.VersionLength]byte) string {
|
||||
return fmt.Sprintf(BlobSubnetTopicFormat, forkDigest, subnet)
|
||||
}
|
||||
|
||||
func dataColumnSubnetToTopic(subnet uint64, forkDigest [fieldparams.VersionLength]byte) string {
|
||||
return fmt.Sprintf(DataColumnSubnetTopicFormat, forkDigest, subnet)
|
||||
}
|
||||
|
||||
@@ -12,11 +12,16 @@ import (
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/scorers"
|
||||
p2ptest "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/testing"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
@@ -24,7 +29,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func TestService_Broadcast(t *testing.T) {
|
||||
@@ -516,3 +520,88 @@ func TestService_BroadcastBlob(t *testing.T) {
|
||||
require.NoError(t, p.BroadcastBlob(ctx, subnet, blobSidecar))
|
||||
require.Equal(t, false, util.WaitTimeout(&wg, 1*time.Second), "Failed to receive pubsub within 1s")
|
||||
}
|
||||
|
||||
func TestService_BroadcastDataColumn(t *testing.T) {
|
||||
require.NoError(t, kzg.Start())
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
require.NotEqual(t, 0, len(p1.BHost.Network().Peers()), "No peers")
|
||||
|
||||
p := &Service{
|
||||
host: p1.BHost,
|
||||
pubsub: p1.PubSub(),
|
||||
joinedTopics: map[string]*pubsub.Topic{},
|
||||
cfg: &Config{},
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
subnetsLock: make(map[uint64]*sync.RWMutex),
|
||||
subnetsLockLock: sync.Mutex{},
|
||||
peers: peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
ScorerParams: &scorers.Config{},
|
||||
}),
|
||||
}
|
||||
|
||||
var (
|
||||
comts [][]byte
|
||||
blobs []kzg.Blob
|
||||
)
|
||||
for i := int64(0); i < 6; i++ {
|
||||
blob := kzg.Blob(util.GetRandBlob(i))
|
||||
commitment, err := kzg.BlobToKZGCommitment(&blob)
|
||||
require.NoError(t, err)
|
||||
comts = append(comts, commitment[:])
|
||||
blobs = append(blobs, blob)
|
||||
}
|
||||
|
||||
b := util.NewBeaconBlockFulu()
|
||||
b.Block.Body.BlobKzgCommitments = comts
|
||||
sb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
for i := range blobs {
|
||||
blobs[i] = kzg.Blob(util.GetRandBlob(int64(i)))
|
||||
}
|
||||
cellsAndProofs := util.GenerateCellsAndProofs(t, blobs)
|
||||
sidecars, err := peerdas.DataColumnSidecars(sb, cellsAndProofs)
|
||||
require.NoError(t, err)
|
||||
|
||||
sidecar := sidecars[0]
|
||||
subnet := uint64(0)
|
||||
topic := DataColumnSubnetTopicFormat
|
||||
GossipTypeMapping[reflect.TypeOf(sidecar)] = topic
|
||||
digest, err := p.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
topic = fmt.Sprintf(topic, digest, subnet)
|
||||
|
||||
// External peer subscribes to the topic.
|
||||
topic += p.Encoding().ProtocolSuffix()
|
||||
sub, err := p2.SubscribeToTopic(topic)
|
||||
require.NoError(t, err)
|
||||
|
||||
time.Sleep(50 * time.Millisecond) // libp2p fails without this delay...
|
||||
|
||||
// Async listen for the pubsub, must be before the broadcast.
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go func(tt *testing.T) {
|
||||
defer wg.Done()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
|
||||
defer cancel()
|
||||
|
||||
msg, err := sub.Next(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
result := ðpb.DataColumnSidecar{}
|
||||
require.NoError(t, p.Encoding().DecodeGossip(msg.Data, result))
|
||||
require.DeepEqual(t, result, sidecar)
|
||||
}(t)
|
||||
|
||||
// Attempt to broadcast nil object should fail.
|
||||
ctx := context.Background()
|
||||
var root [fieldparams.RootLength]byte
|
||||
require.ErrorContains(t, "attempted to broadcast nil", p.BroadcastDataColumn(ctx, root, subnet, nil))
|
||||
|
||||
// Broadcast to peers and wait.
|
||||
require.NoError(t, p.BroadcastDataColumn(ctx, root, subnet, sidecar))
|
||||
require.Equal(t, false, util.WaitTimeout(&wg, 1*time.Second), "Failed to receive pubsub within 1s")
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"time"
|
||||
|
||||
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
|
||||
)
|
||||
@@ -38,6 +39,7 @@ type Config struct {
|
||||
StateNotifier statefeed.Notifier
|
||||
DB db.ReadOnlyDatabase
|
||||
ClockWaiter startup.ClockWaiter
|
||||
CustodyInfo *peerdas.CustodyInfo
|
||||
}
|
||||
|
||||
// validateConfig validates whether the values provided are accurate and will set
|
||||
|
||||
75
beacon-chain/p2p/custody.go
Normal file
75
beacon-chain/p2p/custody.go
Normal file
@@ -0,0 +1,75 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
)
|
||||
|
||||
var _ DataColumnsHandler = (*Service)(nil)
|
||||
|
||||
// custodyGroupCountFromPeerENR retrieves the custody count from the peer ENR.
|
||||
// If the ENR is not available, it defaults to the minimum number of custody groups
|
||||
// an honest node custodies and serves samples from.
|
||||
func (s *Service) custodyGroupCountFromPeerENR(pid peer.ID) uint64 {
|
||||
// By default, we assume the peer custodies the minimum number of groups.
|
||||
custodyRequirement := params.BeaconConfig().CustodyRequirement
|
||||
|
||||
// Retrieve the ENR of the peer.
|
||||
record, err := s.peers.ENR(pid)
|
||||
if err != nil {
|
||||
log.WithError(err).WithFields(logrus.Fields{
|
||||
"peerID": pid,
|
||||
"defaultValue": custodyRequirement,
|
||||
}).Debug("Failed to retrieve ENR for peer, defaulting to the default value")
|
||||
|
||||
return custodyRequirement
|
||||
}
|
||||
|
||||
// Retrieve the custody group count from the ENR.
|
||||
custodyGroupCount, err := peerdas.CustodyGroupCountFromRecord(record)
|
||||
if err != nil {
|
||||
log.WithError(err).WithFields(logrus.Fields{
|
||||
"peerID": pid,
|
||||
"defaultValue": custodyRequirement,
|
||||
}).Debug("Failed to retrieve custody group count from ENR for peer, defaulting to the default value")
|
||||
|
||||
return custodyRequirement
|
||||
}
|
||||
|
||||
return custodyGroupCount
|
||||
}
|
||||
|
||||
// CustodyGroupCountFromPeer retrieves custody group count from a peer.
|
||||
// It first tries to get the custody group count from the peer's metadata,
|
||||
// then falls back to the ENR value if the metadata is not available, then
|
||||
// falls back to the minimum number of custody groups an honest node should custodiy
|
||||
// and serve samples from if ENR is not available.
|
||||
func (s *Service) CustodyGroupCountFromPeer(pid peer.ID) uint64 {
|
||||
// Try to get the custody group count from the peer's metadata.
|
||||
metadata, err := s.peers.Metadata(pid)
|
||||
if err != nil {
|
||||
// On error, default to the ENR value.
|
||||
log.WithError(err).WithField("peerID", pid).Debug("Failed to retrieve metadata for peer, defaulting to the ENR value")
|
||||
return s.custodyGroupCountFromPeerENR(pid)
|
||||
}
|
||||
|
||||
// If the metadata is nil, default to the ENR value.
|
||||
if metadata == nil {
|
||||
log.WithField("peerID", pid).Debug("Metadata is nil, defaulting to the ENR value")
|
||||
return s.custodyGroupCountFromPeerENR(pid)
|
||||
}
|
||||
|
||||
// Get the custody subnets count from the metadata.
|
||||
custodyCount := metadata.CustodyGroupCount()
|
||||
|
||||
// If the custody count is null, default to the ENR value.
|
||||
if custodyCount == 0 {
|
||||
log.WithField("peerID", pid).Debug("The custody count extracted from the metadata equals to 0, defaulting to the ENR value")
|
||||
return s.custodyGroupCountFromPeerENR(pid)
|
||||
}
|
||||
|
||||
return custodyCount
|
||||
}
|
||||
112
beacon-chain/p2p/custody_test.go
Normal file
112
beacon-chain/p2p/custody_test.go
Normal file
@@ -0,0 +1,112 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/scorers"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/wrapper"
|
||||
pb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/metadata"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestCustodyGroupCountFromPeer(t *testing.T) {
|
||||
const (
|
||||
expectedENR uint64 = 7
|
||||
expectedMetadata uint64 = 8
|
||||
pid = "test-id"
|
||||
)
|
||||
|
||||
cgc := peerdas.Cgc(expectedENR)
|
||||
|
||||
// Define a nil record
|
||||
var nilRecord *enr.Record = nil
|
||||
|
||||
// Define an empty record (record with non `cgc` entry)
|
||||
emptyRecord := &enr.Record{}
|
||||
|
||||
// Define a nominal record
|
||||
nominalRecord := &enr.Record{}
|
||||
nominalRecord.Set(cgc)
|
||||
|
||||
// Define a metadata with zero custody.
|
||||
zeroMetadata := wrapper.WrappedMetadataV2(&pb.MetaDataV2{
|
||||
CustodyGroupCount: 0,
|
||||
})
|
||||
|
||||
// Define a nominal metadata.
|
||||
nominalMetadata := wrapper.WrappedMetadataV2(&pb.MetaDataV2{
|
||||
CustodyGroupCount: expectedMetadata,
|
||||
})
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
record *enr.Record
|
||||
metadata metadata.Metadata
|
||||
expected uint64
|
||||
}{
|
||||
{
|
||||
name: "No metadata - No ENR",
|
||||
record: nilRecord,
|
||||
expected: params.BeaconConfig().CustodyRequirement,
|
||||
},
|
||||
{
|
||||
name: "No metadata - Empty ENR",
|
||||
record: emptyRecord,
|
||||
expected: params.BeaconConfig().CustodyRequirement,
|
||||
},
|
||||
{
|
||||
name: "No Metadata - ENR",
|
||||
record: nominalRecord,
|
||||
expected: expectedENR,
|
||||
},
|
||||
{
|
||||
name: "Metadata with 0 value - ENR",
|
||||
record: nominalRecord,
|
||||
metadata: zeroMetadata,
|
||||
expected: expectedENR,
|
||||
},
|
||||
{
|
||||
name: "Metadata - ENR",
|
||||
record: nominalRecord,
|
||||
metadata: nominalMetadata,
|
||||
expected: expectedMetadata,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Create peers status.
|
||||
peers := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
ScorerParams: &scorers.Config{},
|
||||
})
|
||||
|
||||
// Set the metadata.
|
||||
if tc.metadata != nil {
|
||||
peers.SetMetadata(pid, tc.metadata)
|
||||
}
|
||||
|
||||
// Add a new peer with the record.
|
||||
peers.Add(tc.record, pid, nil, network.DirOutbound)
|
||||
|
||||
// Create a new service.
|
||||
service := &Service{
|
||||
peers: peers,
|
||||
metaData: tc.metadata,
|
||||
}
|
||||
|
||||
// Retrieve the custody count from the remote peer.
|
||||
actual := service.CustodyGroupCountFromPeer(pid)
|
||||
|
||||
// Verify the result.
|
||||
require.Equal(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
@@ -186,7 +187,8 @@ func (s *Service) RefreshPersistentSubnets() {
|
||||
// Compare current epoch with Altair fork epoch
|
||||
altairForkEpoch := params.BeaconConfig().AltairForkEpoch
|
||||
|
||||
if currentEpoch < altairForkEpoch {
|
||||
// We add `1` to the current epoch because we want to prepare one epoch before the Altair fork.
|
||||
if currentEpoch+1 < altairForkEpoch {
|
||||
// Phase 0 behaviour.
|
||||
if isBitVUpToDate {
|
||||
// Return early if bitfield hasn't changed.
|
||||
@@ -219,18 +221,52 @@ func (s *Service) RefreshPersistentSubnets() {
|
||||
// Get the sync subnet bitfield in our metadata.
|
||||
currentBitSInMetadata := s.Metadata().SyncnetsBitfield()
|
||||
|
||||
// Is our sync bitvector record up to date?
|
||||
isBitSUpToDate := bytes.Equal(bitS, inRecordBitS) && bytes.Equal(bitS, currentBitSInMetadata)
|
||||
|
||||
if metadataVersion == version.Altair && isBitVUpToDate && isBitSUpToDate {
|
||||
// Compare current epoch with the Fulu fork epoch.
|
||||
fuluForkEpoch := params.BeaconConfig().FuluForkEpoch
|
||||
|
||||
// We add `1` to the current epoch because we want to prepare one epoch before the Fulu fork.
|
||||
if currentEpoch+1 < fuluForkEpoch {
|
||||
// Altair behaviour.
|
||||
if metadataVersion == version.Altair && isBitVUpToDate && isBitSUpToDate {
|
||||
// Nothing to do, return early.
|
||||
return
|
||||
}
|
||||
|
||||
// Some data have changed, update our record and metadata.
|
||||
s.updateSubnetRecordWithMetadataV2(bitV, bitS)
|
||||
|
||||
// Ping all peers to inform them of new metadata
|
||||
s.pingPeersAndLogEnr()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Get the current custody group count.
|
||||
custodyGroupCount := s.cfg.CustodyInfo.ActualGroupCount()
|
||||
|
||||
// Get the custody group count we store in our record.
|
||||
inRecordCustodyGroupCount, err := peerdas.CustodyGroupCountFromRecord(record)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not retrieve custody subnet count")
|
||||
return
|
||||
}
|
||||
|
||||
// Get the custody group count in our metadata.
|
||||
inMetadataCustodyGroupCount := s.Metadata().CustodyGroupCount()
|
||||
|
||||
isCustodyGroupCountUpToDate := (custodyGroupCount == inRecordCustodyGroupCount && custodyGroupCount == inMetadataCustodyGroupCount)
|
||||
|
||||
if isBitVUpToDate && isBitSUpToDate && isCustodyGroupCountUpToDate {
|
||||
// Nothing to do, return early.
|
||||
return
|
||||
}
|
||||
|
||||
// Some data have changed, update our record and metadata.
|
||||
s.updateSubnetRecordWithMetadataV2(bitV, bitS)
|
||||
// Some data changed. Update the record and the metadata.
|
||||
s.updateSubnetRecordWithMetadataV3(bitV, bitS, custodyGroupCount)
|
||||
|
||||
// Ping all peers to inform them of new metadata
|
||||
// Ping all peers.
|
||||
s.pingPeersAndLogEnr()
|
||||
}
|
||||
|
||||
@@ -457,6 +493,11 @@ func (s *Service) createLocalNode(
|
||||
localNode.Set(quicEntry)
|
||||
}
|
||||
|
||||
if params.FuluEnabled() {
|
||||
custodyGroupCount := s.cfg.CustodyInfo.ActualGroupCount()
|
||||
localNode.Set(peerdas.Cgc(custodyGroupCount))
|
||||
}
|
||||
|
||||
localNode.SetFallbackIP(ipAddr)
|
||||
localNode.SetFallbackUDP(udpPort)
|
||||
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
mock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/peerdata"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/scorers"
|
||||
@@ -140,6 +141,15 @@ func TestStartDiscV5_DiscoverAllPeers(t *testing.T) {
|
||||
|
||||
func TestCreateLocalNode(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
|
||||
// Set the fulu fork epoch to something other than the far future epoch.
|
||||
initFuluForkEpoch := params.BeaconConfig().FuluForkEpoch
|
||||
params.BeaconConfig().FuluForkEpoch = 42
|
||||
|
||||
defer func() {
|
||||
params.BeaconConfig().FuluForkEpoch = initFuluForkEpoch
|
||||
}()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
cfg *Config
|
||||
@@ -147,30 +157,31 @@ func TestCreateLocalNode(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "valid config",
|
||||
cfg: nil,
|
||||
cfg: &Config{CustodyInfo: &peerdas.CustodyInfo{}},
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "invalid host address",
|
||||
cfg: &Config{HostAddress: "invalid"},
|
||||
cfg: &Config{HostAddress: "invalid", CustodyInfo: &peerdas.CustodyInfo{}},
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
name: "valid host address",
|
||||
cfg: &Config{HostAddress: "192.168.0.1"},
|
||||
cfg: &Config{HostAddress: "192.168.0.1", CustodyInfo: &peerdas.CustodyInfo{}},
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "invalid host DNS",
|
||||
cfg: &Config{HostDNS: "invalid"},
|
||||
cfg: &Config{HostDNS: "invalid", CustodyInfo: &peerdas.CustodyInfo{}},
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
name: "valid host DNS",
|
||||
cfg: &Config{HostDNS: "www.google.com"},
|
||||
cfg: &Config{HostDNS: "www.google.com", CustodyInfo: &peerdas.CustodyInfo{}},
|
||||
expectedError: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range testCases {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Define ports.
|
||||
@@ -199,7 +210,7 @@ func TestCreateLocalNode(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedAddress := address
|
||||
if tt.cfg != nil && tt.cfg.HostAddress != "" {
|
||||
if tt.cfg.HostAddress != "" {
|
||||
expectedAddress = net.ParseIP(tt.cfg.HostAddress)
|
||||
}
|
||||
|
||||
@@ -236,6 +247,11 @@ func TestCreateLocalNode(t *testing.T) {
|
||||
syncSubnets := new([]byte)
|
||||
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(syncCommsSubnetEnrKey, syncSubnets)))
|
||||
require.DeepSSZEqual(t, []byte{0}, *syncSubnets)
|
||||
|
||||
// Check cgc config.
|
||||
custodyGroupCount := new(uint64)
|
||||
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(peerdas.CustodyGroupCountEnrKey, custodyGroupCount)))
|
||||
require.Equal(t, params.BeaconConfig().CustodyRequirement, *custodyGroupCount)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -535,7 +551,7 @@ type check struct {
|
||||
metadataSequenceNumber uint64
|
||||
attestationSubnets []uint64
|
||||
syncSubnets []uint64
|
||||
custodySubnetCount *uint64
|
||||
custodyGroupCount *uint64
|
||||
}
|
||||
|
||||
func checkPingCountCacheMetadataRecord(
|
||||
@@ -601,6 +617,18 @@ func checkPingCountCacheMetadataRecord(
|
||||
actualBitSMetadata := service.metaData.SyncnetsBitfield()
|
||||
require.DeepSSZEqual(t, expectedBitS, actualBitSMetadata)
|
||||
}
|
||||
|
||||
if expected.custodyGroupCount != nil {
|
||||
// Check custody subnet count in ENR.
|
||||
var actualCustodyGroupCount uint64
|
||||
err := service.dv5Listener.LocalNode().Node().Record().Load(enr.WithEntry(peerdas.CustodyGroupCountEnrKey, &actualCustodyGroupCount))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, *expected.custodyGroupCount, actualCustodyGroupCount)
|
||||
|
||||
// Check custody subnet count in metadata.
|
||||
actualGroupCountMetadata := service.metaData.CustodyGroupCount()
|
||||
require.Equal(t, *expected.custodyGroupCount, actualGroupCountMetadata)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRefreshPersistentSubnets(t *testing.T) {
|
||||
@@ -610,12 +638,18 @@ func TestRefreshPersistentSubnets(t *testing.T) {
|
||||
defer cache.SubnetIDs.EmptyAllCaches()
|
||||
defer cache.SyncSubnetIDs.EmptyAllCaches()
|
||||
|
||||
const altairForkEpoch = 5
|
||||
const (
|
||||
altairForkEpoch = 5
|
||||
fuluForkEpoch = 10
|
||||
)
|
||||
|
||||
custodyGroupCount := params.BeaconConfig().CustodyRequirement
|
||||
|
||||
// Set up epochs.
|
||||
defaultCfg := params.BeaconConfig()
|
||||
cfg := defaultCfg.Copy()
|
||||
cfg.AltairForkEpoch = altairForkEpoch
|
||||
cfg.FuluForkEpoch = fuluForkEpoch
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Compute the number of seconds per epoch.
|
||||
@@ -684,6 +718,39 @@ func TestRefreshPersistentSubnets(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Fulu",
|
||||
epochSinceGenesis: fuluForkEpoch,
|
||||
checks: []check{
|
||||
{
|
||||
pingCount: 0,
|
||||
metadataSequenceNumber: 0,
|
||||
attestationSubnets: []uint64{},
|
||||
syncSubnets: nil,
|
||||
},
|
||||
{
|
||||
pingCount: 1,
|
||||
metadataSequenceNumber: 1,
|
||||
attestationSubnets: []uint64{40, 41},
|
||||
syncSubnets: nil,
|
||||
custodyGroupCount: &custodyGroupCount,
|
||||
},
|
||||
{
|
||||
pingCount: 2,
|
||||
metadataSequenceNumber: 2,
|
||||
attestationSubnets: []uint64{40, 41},
|
||||
syncSubnets: []uint64{1, 2},
|
||||
custodyGroupCount: &custodyGroupCount,
|
||||
},
|
||||
{
|
||||
pingCount: 2,
|
||||
metadataSequenceNumber: 2,
|
||||
attestationSubnets: []uint64{40, 41},
|
||||
syncSubnets: []uint64{1, 2},
|
||||
custodyGroupCount: &custodyGroupCount,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
@@ -717,7 +784,7 @@ func TestRefreshPersistentSubnets(t *testing.T) {
|
||||
actualPingCount++
|
||||
return nil
|
||||
},
|
||||
cfg: &Config{UDPPort: 2000},
|
||||
cfg: &Config{UDPPort: 2000, CustodyInfo: &peerdas.CustodyInfo{}},
|
||||
peers: p2p.Peers(),
|
||||
genesisTime: time.Now().Add(-time.Duration(tc.epochSinceGenesis*secondsPerEpoch) * time.Second),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
|
||||
@@ -121,7 +121,7 @@ func (s *Service) topicScoreParams(topic string) (*pubsub.TopicScoreParams, erro
|
||||
return defaultAttesterSlashingTopicParams(), nil
|
||||
case strings.Contains(topic, GossipBlsToExecutionChangeMessage):
|
||||
return defaultBlsToExecutionChangeTopicParams(), nil
|
||||
case strings.Contains(topic, GossipBlobSidecarMessage):
|
||||
case strings.Contains(topic, GossipBlobSidecarMessage), strings.Contains(topic, GossipDataColumnSidecarMessage):
|
||||
// TODO(Deneb): Using the default block scoring. But this should be updated.
|
||||
return defaultBlockTopicParams(), nil
|
||||
default:
|
||||
|
||||
@@ -22,6 +22,7 @@ var gossipTopicMappings = map[string]func() proto.Message{
|
||||
SyncCommitteeSubnetTopicFormat: func() proto.Message { return ðpb.SyncCommitteeMessage{} },
|
||||
BlsToExecutionChangeSubnetTopicFormat: func() proto.Message { return ðpb.SignedBLSToExecutionChange{} },
|
||||
BlobSubnetTopicFormat: func() proto.Message { return ðpb.BlobSidecar{} },
|
||||
DataColumnSubnetTopicFormat: func() proto.Message { return ðpb.DataColumnSidecar{} },
|
||||
}
|
||||
|
||||
// GossipTopicMappings is a function to return the assigned data type
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user