mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-09 21:38:05 -05:00
Compare commits
181 Commits
block-by-r
...
jimmy
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2c56c650e6 | ||
|
|
6c0bba7197 | ||
|
|
35a2a32106 | ||
|
|
2a72703d3e | ||
|
|
3b5a6b5e2f | ||
|
|
36958b552d | ||
|
|
ae1a6be8a3 | ||
|
|
4f146f9a30 | ||
|
|
f07036ab3c | ||
|
|
da9d4cf5b9 | ||
|
|
56208aa84d | ||
|
|
b866a2c744 | ||
|
|
a77234e637 | ||
|
|
a62cca15dd | ||
|
|
e0e7354708 | ||
|
|
0f86a16915 | ||
|
|
972c22b02f | ||
|
|
93c27340e4 | ||
|
|
c3edb32558 | ||
|
|
3baaa732df | ||
|
|
8ceb7e76ea | ||
|
|
4d5dddd302 | ||
|
|
55efccb07f | ||
|
|
961d8e1481 | ||
|
|
d396a9931e | ||
|
|
e3f8f121f4 | ||
|
|
80f29e9eda | ||
|
|
8995d8133a | ||
|
|
31044206b8 | ||
|
|
ac04246a2a | ||
|
|
0923145bd7 | ||
|
|
3a1702e56f | ||
|
|
501ec74a48 | ||
|
|
c248fe0bb3 | ||
|
|
215fbcb2e4 | ||
|
|
e39f44b529 | ||
|
|
a216cb4105 | ||
|
|
9eff6ae476 | ||
|
|
3eec5a5cb6 | ||
|
|
66878deb2c | ||
|
|
0b6e1711e4 | ||
|
|
15025837bb | ||
|
|
0229a2055e | ||
|
|
eb9af15c7a | ||
|
|
0584746815 | ||
|
|
8c4ea850ba | ||
|
|
4b43f13e65 | ||
|
|
26d35474e9 | ||
|
|
9fbe3564df | ||
|
|
bed5547890 | ||
|
|
47922fe7d8 | ||
|
|
dcd25d1d97 | ||
|
|
81a2a17c5f | ||
|
|
6b3f1de19d | ||
|
|
7c17af2a41 | ||
|
|
ecf5a368d7 | ||
|
|
557c5be433 | ||
|
|
49405c3afd | ||
|
|
3439122629 | ||
|
|
f6e5da6723 | ||
|
|
842f241cb9 | ||
|
|
41daac1b04 | ||
|
|
2a7fc84044 | ||
|
|
44ff0b1a14 | ||
|
|
91cdd318a8 | ||
|
|
3dc00816fb | ||
|
|
e331d5b371 | ||
|
|
8d5090ce54 | ||
|
|
25244d906d | ||
|
|
aa445713ac | ||
|
|
177769a1ce | ||
|
|
967e9255a2 | ||
|
|
01705d1f3d | ||
|
|
14f93b4e9d | ||
|
|
ad11036c36 | ||
|
|
632a06076b | ||
|
|
242c2b0268 | ||
|
|
19662da905 | ||
|
|
7faee5af35 | ||
|
|
805ee1bf31 | ||
|
|
bea46fdfa1 | ||
|
|
f6b1fb1c88 | ||
|
|
6fb349ea76 | ||
|
|
e5a425f5c7 | ||
|
|
f157d37e4c | ||
|
|
5f08559bef | ||
|
|
a082d2aecd | ||
|
|
bcfaff8504 | ||
|
|
d8e09c346f | ||
|
|
876519731b | ||
|
|
de05b83aca | ||
|
|
56c73e7193 | ||
|
|
859ac008a8 | ||
|
|
f882bd27c8 | ||
|
|
361e5759c1 | ||
|
|
34ef0da896 | ||
|
|
726e8b962f | ||
|
|
453ea01deb | ||
|
|
6537f8011e | ||
|
|
5f17317c1c | ||
|
|
3432ffa4a3 | ||
|
|
9dac67635b | ||
|
|
9be69fbd07 | ||
|
|
e21261e893 | ||
|
|
da53a8fc48 | ||
|
|
a14634e656 | ||
|
|
43761a8066 | ||
|
|
01dbc337c0 | ||
|
|
92f9b55fcb | ||
|
|
f65f12f58b | ||
|
|
f2b61a3dcf | ||
|
|
77a6d29a2e | ||
|
|
31d16da3a0 | ||
|
|
19221b77bd | ||
|
|
83df293647 | ||
|
|
c20c09ce36 | ||
|
|
2191faaa3f | ||
|
|
2de1e6f3e4 | ||
|
|
db44df3964 | ||
|
|
f92eb44c89 | ||
|
|
a26980b64d | ||
|
|
f58cf7e626 | ||
|
|
68da7dabe2 | ||
|
|
d1e43a2c02 | ||
|
|
3652bec2f8 | ||
|
|
81b7a1725f | ||
|
|
0c917079c4 | ||
|
|
a732fe7021 | ||
|
|
d75a7aae6a | ||
|
|
e788a46e82 | ||
|
|
199543125a | ||
|
|
ca63efa770 | ||
|
|
345e6edd9c | ||
|
|
6403064126 | ||
|
|
0517d76631 | ||
|
|
000d480f77 | ||
|
|
b40a8ed37e | ||
|
|
d21c2bd63e | ||
|
|
7a256e93f7 | ||
|
|
07fe76c2da | ||
|
|
54affa897f | ||
|
|
ac4c5fae3c | ||
|
|
2845d87077 | ||
|
|
dc2c90b8ed | ||
|
|
b469157e1f | ||
|
|
2697794e58 | ||
|
|
48cf24edb4 | ||
|
|
78f90db90b | ||
|
|
d0a3b9bc1d | ||
|
|
bfdb6dab86 | ||
|
|
7dd2fd52af | ||
|
|
b6bad9331b | ||
|
|
6e2122085d | ||
|
|
7a847292aa | ||
|
|
81f4db0afa | ||
|
|
a7dc2e6c8b | ||
|
|
0a010b5088 | ||
|
|
1e335e2cf2 | ||
|
|
42f4c0f14e | ||
|
|
d3c12abe25 | ||
|
|
b0ba05b4f4 | ||
|
|
e206506489 | ||
|
|
013cb28663 | ||
|
|
496914cb39 | ||
|
|
c032e78888 | ||
|
|
5e4deff6fd | ||
|
|
6daa91c465 | ||
|
|
32ce6423eb | ||
|
|
b0ea450df5 | ||
|
|
8bd10df423 | ||
|
|
dcbb543be2 | ||
|
|
be0580e1a9 | ||
|
|
1355178115 | ||
|
|
b78c3485b9 | ||
|
|
f503efc6ed | ||
|
|
1bfbd3980e | ||
|
|
3e722ea1bc | ||
|
|
d844026433 | ||
|
|
9ffc19d5ef | ||
|
|
3e23f6e879 | ||
|
|
c688c84393 |
1
.bazelrc
1
.bazelrc
@@ -22,6 +22,7 @@ coverage --define=coverage_enabled=1
|
||||
build --workspace_status_command=./hack/workspace_status.sh
|
||||
|
||||
build --define blst_disabled=false
|
||||
build --compilation_mode=opt
|
||||
run --define blst_disabled=false
|
||||
|
||||
build:blst_disabled --define blst_disabled=true
|
||||
|
||||
137
CHANGELOG.md
137
CHANGELOG.md
@@ -4,6 +4,141 @@ All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on Keep a Changelog, and this project adheres to Semantic Versioning.
|
||||
|
||||
## [v5.3.0](https://github.com/prysmaticlabs/prysm/compare/v5.2.0...v5.3.0) - 2025-02-12
|
||||
|
||||
This release includes support for Pectra activation in the [Holesky](https://github.com/eth-clients/holesky) and [Sepolia](https://github.com/eth-clients/sepolia) testnets! The release contains many fixes for Electra that have been found in rigorous testing through devnets in the last few months.
|
||||
|
||||
For mainnet, we have a few nice features for you to try:
|
||||
|
||||
- [PR #14023](https://github.com/prysmaticlabs/prysm/pull/14023) introduces a new file layout structure for storing blobs. Rather than storing all blob root directories in one parent directory, blob root directories are organized in subdirectories by epoch. This should vastly decrease the blob cache warmup time when Prysm is starting. Try this feature with `--blob-storage-layout=by-epoch`.
|
||||
|
||||
Updating to this release is **required** for Holesky and Sepolia operators and it is **recommended** for mainnet users as there are a few bug fixes that apply to deneb logic.
|
||||
|
||||
### Added
|
||||
|
||||
- Added an error field to log `Finished building block`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14696)
|
||||
- Implemented a new `EmptyExecutionPayloadHeader` function. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14713)
|
||||
- Added proper gas limit check for header from the builder. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14707)
|
||||
- `Finished building block`: Display error only if not nil. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14722)
|
||||
- Added light client feature flag check to RPC handlers. [PR](https://github.com/prysmaticlabs/prysm/pull/14736). [[PR]](https://github.com/prysmaticlabs/prysm/pull/14782)
|
||||
- Added support to update target and max blob count to different values per hard fork config. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14678)
|
||||
- Log before blob filesystem cache warm-up. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14735)
|
||||
- New design for the attestation pool. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14324)
|
||||
- Add field param placeholder for Electra blob target and max to pass spec tests. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14733)
|
||||
- Light client: Add better error handling. [PR](https://github.com/prysmaticlabs/prysm/pull/14749). [[PR]](https://github.com/prysmaticlabs/prysm/pull/14782)
|
||||
- Add EIP-7691: Blob throughput increase. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14750)
|
||||
- Trace IDONTWANT Messages in Pubsub. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14778)
|
||||
- Add Fulu fork boilerplate. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14771)
|
||||
- DB optimization for saving light client bootstraps (save unique sync committees only). [[PR]](https://github.com/prysmaticlabs/prysm/pull/14782)
|
||||
- Separate type for unaggregated network attestations. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14659)
|
||||
- Remote signer electra fork support. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14477)
|
||||
- Add Electra test case to rewards API. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14816)
|
||||
- Update `proto_test.go` to Electra. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14817)
|
||||
- Update slasher service to Electra. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14812)
|
||||
- Builder API endpoint to support Electra. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14344)
|
||||
- Added protoc toolchains with a version of v25.3. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14818)
|
||||
- Add test cases for the eth_lightclient_bootstrap API SSZ support. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14824)
|
||||
- Handle `AttesterSlashingElectra` everywhere in the codebase. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14823)
|
||||
- Add Beacon DB pruning service to prune historical data older than MIN_EPOCHS_FOR_BLOCK_REQUESTS (roughly equivalent to the weak subjectivity period). [[PR]](https://github.com/prysmaticlabs/prysm/pull/14687)
|
||||
- Nil consolidation request check for core processing. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14851)
|
||||
- Updated blob sidecar api endpoint for Electra. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14852)
|
||||
- Slashing pool service to convert slashings from Phase0 to Electra at the fork. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14844)
|
||||
- check to stop eth1 voting after electra and eth1 deposits stop. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14835)
|
||||
- WARN log message on node startup advising of the upcoming deprecation of the --enable-historical-state-representation feature flag. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14856)
|
||||
- Beacon API event support for `SingleAttestation` and `SignedAggregateAttestationAndProofElectra`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14855)
|
||||
- Added Electra tests for `TestLightClient_NewLightClientOptimisticUpdateFromBeaconState` and `TestLightClient_NewLightClientFinalityUpdateFromBeaconState`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14783)
|
||||
- New option to select an alternate blob storage layout. Rather than a flat directory with a subdir for each block root, a multi-level scheme is used to organize blobs by epoch/slot/root, enabling leaner syscalls, indexing and pruning. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14023)
|
||||
- Send pending att queue's attestations through the notification feed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14862)
|
||||
- Prune all pending deposits and proofs in post-Electra. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14829)
|
||||
- Add Pectra testnet dates. (Sepolia and Holesky). [[PR]](https://github.com/prysmaticlabs/prysm/pull/14884)
|
||||
|
||||
### Changed
|
||||
|
||||
- Process light client finality updates only for new finalized epochs instead of doing it for every block. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14713)
|
||||
- Refactor subnets subscriptions. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14711)
|
||||
- Refactor RPC handlers subscriptions. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14732)
|
||||
- Go deps upgrade, from `ioutil` to `io`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14737)
|
||||
- Move successfully registered validator(s) on builder log to debug. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14735)
|
||||
- Update some test files to use `crypto/rand` instead of `math/rand`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14747)
|
||||
- Re-organize the content of the `*.proto` files (No functional change). [[PR]](https://github.com/prysmaticlabs/prysm/pull/14755)
|
||||
- SSZ files generation: Remove the `// Hash: ...` header.[[PR]](https://github.com/prysmaticlabs/prysm/pull/14760)
|
||||
- Updated Electra spec definition for `process_epoch`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14768)
|
||||
- Update our `go-libp2p-pubsub` dependency. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14770)
|
||||
- Re-organize the content of files to ease the creation of a new fork boilerplate. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14761)
|
||||
- Updated spec definition electra `process_registry_updates`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14767)
|
||||
- Fixed Metadata errors for peers connected via QUIC. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14776)
|
||||
- Updated spec definitions for `process_slashings` in godocs. Simplified `ProcessSlashings` API. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14766)
|
||||
- Update spec tests to v1.5.0-beta.0. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14788)
|
||||
- Process light client finality updates only for new finalized epochs instead of doing it for every block. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14718)
|
||||
- Update blobs by rpc topics from V2 to V1. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14785)
|
||||
- Updated geth to 1.14~. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14351)
|
||||
- E2e tests start from bellatrix. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14351)
|
||||
- Version pinning unclog after making some ux improvements. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14802)
|
||||
- Remove helpers to check for execution/compounding withdrawal credentials and expose them as methods. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14808)
|
||||
- Refactor `2006-01-02 15:04:05` to `time.DateTime`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14792)
|
||||
- Updated Prysm to Go v1.23.5. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14818)
|
||||
- Updated Bazel version to v7.4.1. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14818)
|
||||
- Updated rules_go to v0.46.0. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14818)
|
||||
- Updated golang.org/x/tools to be compatible with v1.23.5. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14818)
|
||||
- CI now requires proto files to be properly formatted with clang-format. [[PR](https://github.com/prysmaticlabs/prysm/pull/14831)]. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14831)
|
||||
- Improved test coverage of beacon-chain/core/electra/churn.go. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14837)
|
||||
- Update electra spec test to beta1. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14841)
|
||||
- Move deposit request nil check to apply all. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14849)
|
||||
- Do not mark blocks as invalid on context deadlines during state transition. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14838)
|
||||
- Update electra core processing to not mark block bad if execution request error. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14826)
|
||||
- Dependency: Updated go-ethereum to v1.14.13. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14872)
|
||||
- improving readability on proposer settings loader. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14868)
|
||||
- Removes existing validator.processSlot span and adds validator.processSlot span to slotCtx. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14874)
|
||||
- DownloadFinalizedData has moved from the api/client package to beacon-chain/sync/checkpoint. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14871)
|
||||
- Updated Blob-Batch-Limit to increase to 192 for electra. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14883)
|
||||
- Updated Blob-Batch-Limit-Burst-Factor to increase to 3. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14883)
|
||||
- Changed the derived batch limit when serving blobs. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14883)
|
||||
- Updated go-libp2p-pubsub to v0.13.0. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14890)
|
||||
- Rename light client flag from `enable-lightclient` to `enable-light-client`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14887)
|
||||
- Update electra spec test to beta2. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14901)
|
||||
|
||||
### Removed
|
||||
|
||||
- Cleanup ProcessSlashings method to remove unnecessary argument. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14762)
|
||||
- Remove `/proto/eth/v2` directory. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14765)
|
||||
- Remove `/memsize/` pprof endpoint as it will no longer be supported in go 1.23. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14351)
|
||||
- Clean `TestCanUpgrade*` tests. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14791)
|
||||
- Remove `Copy()` from the `ReadOnlyBeaconBlock` interface. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14811)
|
||||
- Removed a tracing span on signature requests. These requests usually took less than 5 nanoseconds and are generally not worth tracing. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14864)
|
||||
|
||||
### Fixed
|
||||
|
||||
- Added check to prevent nil pointer deference or out of bounds array access when validating the BLSToExecutionChange on an impossibly nil validator. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14705)
|
||||
- EIP-7691: Ensure new blobs subnets are subscribed on epoch in advance. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14759)
|
||||
- Fix kzg commitment inclusion proof depth minimal value. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14787)
|
||||
- Replace exampleIP to `96.7.129.13`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14795)
|
||||
- Fixed a p2p test to reliably return a static IP through DNS resolution. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14800)
|
||||
- `ToBlinded`: Use Fulu struct for Fulu (instead of Electra). [[PR]](https://github.com/prysmaticlabs/prysm/pull/14797)
|
||||
- fix panic with type cast on pbgenericblock(). [[PR]](https://github.com/prysmaticlabs/prysm/pull/14801)
|
||||
- Prysmctl generate genesis state: fix truncation of ExtraData to 32 bytes to satisfy SSZ marshaling. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14803)
|
||||
- added conditional evaluators to fix scenario e2e tests. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14798)
|
||||
- Use `SingleAttestation` for Fulu in p2p attestation map. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14809)
|
||||
- `UpgradeToFulu`: Respect the specification. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14821)
|
||||
- `nodeFilter`: Implement `filterPeerForBlobSubnet` to avoid error logs. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14822)
|
||||
- Fixed deposit packing for post-Electra: early return if EIP-6110 is applied. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14697)
|
||||
- Fix batch process new pending deposits by getting validators from state. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14827)
|
||||
- Fix handling unfound block at slot. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14852)
|
||||
- Fixed incorrect attester slashing length check. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14833)
|
||||
- Fix monitor service for Electra. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14853)
|
||||
- add more nil checks on ToConsensus functions for added safety. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14867)
|
||||
- Fix electra state to safe share references on pending fields when append. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14895)
|
||||
- Add missing config values from the spec. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14903)
|
||||
- We remove the unused `rebuildTrie` assignments for fields which do not use them. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14906)
|
||||
- fix block api endpoint to handle blocks with the same structure but on different forks (i.e. fulu and electra). [[PR]](https://github.com/prysmaticlabs/prysm/pull/14897)
|
||||
- We change how we track blob indexes during their reconstruction from the EL to prevent. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14909)
|
||||
- We now use the correct maximum value when serving blobs for electra blocks. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14910)
|
||||
|
||||
### Security
|
||||
|
||||
- go version upgrade to 1.22.10 for CVE CVE-2024-34156. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14729)
|
||||
- Update golang.org/x/crypto to v0.31.0 to address CVE-2024-45337. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14777)
|
||||
- Update golang.org/x/net to v0.33.0 to address CVE-2024-45338. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14780)
|
||||
|
||||
## [v5.2.0](https://github.com/prysmaticlabs/prysm/compare/v5.1.2...v5.2.0)
|
||||
|
||||
Updating to this release is highly recommended, especially for users running v5.1.1 or v5.1.2.
|
||||
@@ -2987,4 +3122,4 @@ There are no security updates in this release.
|
||||
|
||||
# Older than v2.0.0
|
||||
|
||||
For changelog history for releases older than v2.0.0, please refer to https://github.com/prysmaticlabs/prysm/releases
|
||||
For changelog history for releases older than v2.0.0, please refer to https://github.com/prysmaticlabs/prysm/releases
|
||||
|
||||
@@ -55,7 +55,7 @@ bazel build //beacon-chain --config=release
|
||||
## Adding / updating dependencies
|
||||
|
||||
1. Add your dependency as you would with go modules. I.e. `go get ...`
|
||||
1. Run `bazel run //:gazelle -- update-repos -from_file=go.mod` to update the bazel managed dependencies.
|
||||
1. Run `bazel run //:gazelle -- update-repos -from_file=go.mod -to_macro=deps.bzl%prysm_deps -prune=true` to update the bazel managed dependencies.
|
||||
|
||||
Example:
|
||||
|
||||
|
||||
10
WORKSPACE
10
WORKSPACE
@@ -255,7 +255,7 @@ filegroup(
|
||||
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.5.0-beta.1"
|
||||
consensus_spec_version = "v1.5.0-alpha.10"
|
||||
|
||||
bls_test_version = "v0.1.1"
|
||||
|
||||
@@ -271,7 +271,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-R6r60geCfEjMaB1Ag3svaMFXFIgaJvkTJhfKsf76rFE=",
|
||||
integrity = "sha256-NtWIhbO/mVMb1edq5jqABL0o8R1tNFiuG8PCMAsUHcs=",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -287,7 +287,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-2Pem2gMHxW/6bBhZ2BaqkQruQSd/dTS3WMaMQO8rZ/o=",
|
||||
integrity = "sha256-DFlFlnzls1bBrDm+/xD8NK2ivvkhxR+rSNVLLqScVKc=",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -303,7 +303,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-5yP05JTV1MhcUZ2kSh+T+kXjG+uW3A5877veC5c1mD4=",
|
||||
integrity = "sha256-G9ENPF8udZL/BqRHbi60GhFPnZDPZAH6UjcjRiOlvbk=",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -318,7 +318,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-O6Rg6h19T0RsJs0sBDZ9O1k4LnCJ/gu2ilHijFBVfME=",
|
||||
integrity = "sha256-ClOLKkmAcEi8/uKi6LDeqthask5+E3sgxVoA0bqmQ0c=",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -3,7 +3,6 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"checkpoint.go",
|
||||
"client.go",
|
||||
"doc.go",
|
||||
"health.go",
|
||||
@@ -16,28 +15,19 @@ go_library(
|
||||
"//api/client/beacon/iface:go_default_library",
|
||||
"//api/server:go_default_library",
|
||||
"//api/server/structs:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz/detect:go_default_library",
|
||||
"//io/file:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@org_golang_x_mod//semver:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"checkpoint_test.go",
|
||||
"client_test.go",
|
||||
"health_test.go",
|
||||
],
|
||||
@@ -45,19 +35,7 @@ go_test(
|
||||
deps = [
|
||||
"//api/client:go_default_library",
|
||||
"//api/client/beacon/testing:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/blocks/testing:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/ssz/detect:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@org_uber_go_mock//gomock:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -1,276 +0,0 @@
|
||||
package beacon
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
base "github.com/prysmaticlabs/prysm/v5/api/client"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/ssz/detect"
|
||||
"github.com/prysmaticlabs/prysm/v5/io/file"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/mod/semver"
|
||||
)
|
||||
|
||||
var errCheckpointBlockMismatch = errors.New("mismatch between checkpoint sync state and block")
|
||||
|
||||
// OriginData represents the BeaconState and ReadOnlySignedBeaconBlock necessary to start an empty Beacon Node
|
||||
// using Checkpoint Sync.
|
||||
type OriginData struct {
|
||||
sb []byte
|
||||
bb []byte
|
||||
st state.BeaconState
|
||||
b interfaces.ReadOnlySignedBeaconBlock
|
||||
vu *detect.VersionedUnmarshaler
|
||||
br [32]byte
|
||||
sr [32]byte
|
||||
}
|
||||
|
||||
// SaveBlock saves the downloaded block to a unique file in the given path.
|
||||
// For readability and collision avoidance, the file name includes: type, config name, slot and root
|
||||
func (o *OriginData) SaveBlock(dir string) (string, error) {
|
||||
blockPath := path.Join(dir, fname("block", o.vu, o.b.Block().Slot(), o.br))
|
||||
return blockPath, file.WriteFile(blockPath, o.BlockBytes())
|
||||
}
|
||||
|
||||
// SaveState saves the downloaded state to a unique file in the given path.
|
||||
// For readability and collision avoidance, the file name includes: type, config name, slot and root
|
||||
func (o *OriginData) SaveState(dir string) (string, error) {
|
||||
statePath := path.Join(dir, fname("state", o.vu, o.st.Slot(), o.sr))
|
||||
return statePath, file.WriteFile(statePath, o.StateBytes())
|
||||
}
|
||||
|
||||
// StateBytes returns the ssz-encoded bytes of the downloaded BeaconState value.
|
||||
func (o *OriginData) StateBytes() []byte {
|
||||
return o.sb
|
||||
}
|
||||
|
||||
// BlockBytes returns the ssz-encoded bytes of the downloaded ReadOnlySignedBeaconBlock value.
|
||||
func (o *OriginData) BlockBytes() []byte {
|
||||
return o.bb
|
||||
}
|
||||
|
||||
func fname(prefix string, vu *detect.VersionedUnmarshaler, slot primitives.Slot, root [32]byte) string {
|
||||
return fmt.Sprintf("%s_%s_%s_%d-%#x.ssz", prefix, vu.Config.ConfigName, version.String(vu.Fork), slot, root)
|
||||
}
|
||||
|
||||
// DownloadFinalizedData downloads the most recently finalized state, and the block most recently applied to that state.
|
||||
// This pair can be used to initialize a new beacon node via checkpoint sync.
|
||||
func DownloadFinalizedData(ctx context.Context, client *Client) (*OriginData, error) {
|
||||
sb, err := client.GetState(ctx, IdFinalized)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vu, err := detect.FromState(sb)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error detecting chain config for finalized state")
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"name": vu.Config.ConfigName,
|
||||
"fork": version.String(vu.Fork),
|
||||
}).Info("Detected supported config in remote finalized state")
|
||||
|
||||
s, err := vu.UnmarshalBeaconState(sb)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error unmarshaling finalized state to correct version")
|
||||
}
|
||||
|
||||
slot := s.LatestBlockHeader().Slot
|
||||
bb, err := client.GetBlock(ctx, IdFromSlot(slot))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error requesting block by slot = %d", slot)
|
||||
}
|
||||
b, err := vu.UnmarshalBeaconBlock(bb)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to unmarshal block to a supported type using the detected fork schedule")
|
||||
}
|
||||
br, err := b.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error computing hash_tree_root of retrieved block")
|
||||
}
|
||||
bodyRoot, err := b.Block().Body().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error computing hash_tree_root of retrieved block body")
|
||||
}
|
||||
|
||||
sbr := bytesutil.ToBytes32(s.LatestBlockHeader().BodyRoot)
|
||||
if sbr != bodyRoot {
|
||||
return nil, errors.Wrapf(errCheckpointBlockMismatch, "state body root = %#x, block body root = %#x", sbr, bodyRoot)
|
||||
}
|
||||
sr, err := s.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to compute htr for finalized state at slot=%d", s.Slot())
|
||||
}
|
||||
|
||||
log.
|
||||
WithField("blockSlot", b.Block().Slot()).
|
||||
WithField("stateSlot", s.Slot()).
|
||||
WithField("stateRoot", hexutil.Encode(sr[:])).
|
||||
WithField("blockRoot", hexutil.Encode(br[:])).
|
||||
Info("Downloaded checkpoint sync state and block.")
|
||||
return &OriginData{
|
||||
st: s,
|
||||
b: b,
|
||||
sb: sb,
|
||||
bb: bb,
|
||||
vu: vu,
|
||||
br: br,
|
||||
sr: sr,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// WeakSubjectivityData represents the state root, block root and epoch of the BeaconState + ReadOnlySignedBeaconBlock
|
||||
// that falls at the beginning of the current weak subjectivity period. These values can be used to construct
|
||||
// a weak subjectivity checkpoint beacon node flag to be used for validation.
|
||||
type WeakSubjectivityData struct {
|
||||
BlockRoot [32]byte
|
||||
StateRoot [32]byte
|
||||
Epoch primitives.Epoch
|
||||
}
|
||||
|
||||
// CheckpointString returns the standard string representation of a Checkpoint.
|
||||
// The format is a hex-encoded block root, followed by the epoch of the block, separated by a colon. For example:
|
||||
// "0x1c35540cac127315fabb6bf29181f2ae0de1a3fc909d2e76ba771e61312cc49a:74888"
|
||||
func (wsd *WeakSubjectivityData) CheckpointString() string {
|
||||
return fmt.Sprintf("%#x:%d", wsd.BlockRoot, wsd.Epoch)
|
||||
}
|
||||
|
||||
// ComputeWeakSubjectivityCheckpoint attempts to use the prysm weak_subjectivity api
|
||||
// to obtain the current weak_subjectivity checkpoint.
|
||||
// For non-prysm nodes, the same computation will be performed with extra steps,
|
||||
// using the head state downloaded from the beacon node api.
|
||||
func ComputeWeakSubjectivityCheckpoint(ctx context.Context, client *Client) (*WeakSubjectivityData, error) {
|
||||
ws, err := client.GetWeakSubjectivity(ctx)
|
||||
if err != nil {
|
||||
// a 404/405 is expected if querying an endpoint that doesn't support the weak subjectivity checkpoint api
|
||||
if !errors.Is(err, base.ErrNotOK) {
|
||||
return nil, errors.Wrap(err, "unexpected API response for prysm-only weak subjectivity checkpoint API")
|
||||
}
|
||||
// fall back to vanilla Beacon Node API method
|
||||
return computeBackwardsCompatible(ctx, client)
|
||||
}
|
||||
log.Printf("server weak subjectivity checkpoint response - epoch=%d, block_root=%#x, state_root=%#x", ws.Epoch, ws.BlockRoot, ws.StateRoot)
|
||||
return ws, nil
|
||||
}
|
||||
|
||||
const (
|
||||
prysmMinimumVersion = "v2.0.7"
|
||||
prysmImplementationName = "Prysm"
|
||||
)
|
||||
|
||||
// errUnsupportedPrysmCheckpointVersion indicates remote beacon node can't be used for checkpoint retrieval.
|
||||
var errUnsupportedPrysmCheckpointVersion = errors.New("node does not meet minimum version requirements for checkpoint retrieval")
|
||||
|
||||
// for older endpoints or clients that do not support the weak_subjectivity api method
|
||||
// we gather the necessary data for a checkpoint sync by:
|
||||
// - inspecting the remote server's head state and computing the weak subjectivity epoch locally
|
||||
// - requesting the state at the first slot of the epoch
|
||||
// - using hash_tree_root(state.latest_block_header) to compute the block the state integrates
|
||||
// - requesting that block by its root
|
||||
func computeBackwardsCompatible(ctx context.Context, client *Client) (*WeakSubjectivityData, error) {
|
||||
log.Print("falling back to generic checkpoint derivation, weak_subjectivity API not supported by server")
|
||||
nv, err := client.GetNodeVersion(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to proceed with fallback method without confirming node version")
|
||||
}
|
||||
if nv.implementation == prysmImplementationName && semver.Compare(nv.semver, prysmMinimumVersion) < 0 {
|
||||
return nil, errors.Wrapf(errUnsupportedPrysmCheckpointVersion, "%s < minimum (%s)", nv.semver, prysmMinimumVersion)
|
||||
}
|
||||
epoch, err := getWeakSubjectivityEpochFromHead(ctx, client)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error computing weak subjectivity epoch via head state inspection")
|
||||
}
|
||||
|
||||
// use first slot of the epoch for the state slot
|
||||
slot, err := slots.EpochStart(epoch)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error computing first slot of epoch=%d", epoch)
|
||||
}
|
||||
|
||||
log.Printf("requesting checkpoint state at slot %d", slot)
|
||||
// get the state at the first slot of the epoch
|
||||
sb, err := client.GetState(ctx, IdFromSlot(slot))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to request state by slot from api, slot=%d", slot)
|
||||
}
|
||||
|
||||
// ConfigFork is used to unmarshal the BeaconState so we can read the block root in latest_block_header
|
||||
vu, err := detect.FromState(sb)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error detecting chain config for beacon state")
|
||||
}
|
||||
log.Printf("detected supported config in checkpoint state, name=%s, fork=%s", vu.Config.ConfigName, version.String(vu.Fork))
|
||||
|
||||
s, err := vu.UnmarshalBeaconState(sb)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error using detected config fork to unmarshal state bytes")
|
||||
}
|
||||
|
||||
// compute state and block roots
|
||||
sr, err := s.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error computing hash_tree_root of state")
|
||||
}
|
||||
|
||||
h := s.LatestBlockHeader()
|
||||
h.StateRoot = sr[:]
|
||||
br, err := h.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error while computing block root using state data")
|
||||
}
|
||||
|
||||
bb, err := client.GetBlock(ctx, IdFromRoot(br))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error requesting block by root = %d", br)
|
||||
}
|
||||
b, err := vu.UnmarshalBeaconBlock(bb)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to unmarshal block to a supported type using the detected fork schedule")
|
||||
}
|
||||
br, err = b.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error computing hash_tree_root for block obtained via root")
|
||||
}
|
||||
|
||||
return &WeakSubjectivityData{
|
||||
Epoch: epoch,
|
||||
BlockRoot: br,
|
||||
StateRoot: sr,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// this method downloads the head state, which can be used to find the correct chain config
|
||||
// and use prysm's helper methods to compute the latest weak subjectivity epoch.
|
||||
func getWeakSubjectivityEpochFromHead(ctx context.Context, client *Client) (primitives.Epoch, error) {
|
||||
headBytes, err := client.GetState(ctx, IdHead)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
vu, err := detect.FromState(headBytes)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "error detecting chain config for beacon state")
|
||||
}
|
||||
log.Printf("detected supported config in remote head state, name=%s, fork=%s", vu.Config.ConfigName, version.String(vu.Fork))
|
||||
headState, err := vu.UnmarshalBeaconState(headBytes)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "error unmarshaling state to correct version")
|
||||
}
|
||||
|
||||
epoch, err := helpers.LatestWeakSubjectivityEpoch(ctx, headState, vu.Config)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "error computing the weak subjectivity epoch from head state")
|
||||
}
|
||||
|
||||
log.Printf("(computed client-side) weak subjectivity epoch = %d", epoch)
|
||||
return epoch, nil
|
||||
}
|
||||
@@ -29,12 +29,13 @@ const (
|
||||
getSignedBlockPath = "/eth/v2/beacon/blocks"
|
||||
getBlockRootPath = "/eth/v1/beacon/blocks/{{.Id}}/root"
|
||||
getForkForStatePath = "/eth/v1/beacon/states/{{.Id}}/fork"
|
||||
getWeakSubjectivityPath = "/prysm/v1/beacon/weak_subjectivity"
|
||||
getForkSchedulePath = "/eth/v1/config/fork_schedule"
|
||||
getConfigSpecPath = "/eth/v1/config/spec"
|
||||
getStatePath = "/eth/v2/debug/beacon/states"
|
||||
getNodeVersionPath = "/eth/v1/node/version"
|
||||
changeBLStoExecutionPath = "/eth/v1/beacon/pool/bls_to_execution_changes"
|
||||
|
||||
GetNodeVersionPath = "/eth/v1/node/version"
|
||||
GetWeakSubjectivityPath = "/prysm/v1/beacon/weak_subjectivity"
|
||||
)
|
||||
|
||||
// StateOrBlockId represents the block_id / state_id parameters that several of the Eth Beacon API methods accept.
|
||||
@@ -80,7 +81,8 @@ func idTemplate(ts string) func(StateOrBlockId) string {
|
||||
return f
|
||||
}
|
||||
|
||||
func renderGetBlockPath(id StateOrBlockId) string {
|
||||
// RenderGetBlockPath formats a block id into a path for the GetBlock API endpoint.
|
||||
func RenderGetBlockPath(id StateOrBlockId) string {
|
||||
return path.Join(getSignedBlockPath, string(id))
|
||||
}
|
||||
|
||||
@@ -104,7 +106,7 @@ func NewClient(host string, opts ...client.ClientOpt) (*Client, error) {
|
||||
// for the named identifiers.
|
||||
// The return value contains the ssz-encoded bytes.
|
||||
func (c *Client) GetBlock(ctx context.Context, blockId StateOrBlockId) ([]byte, error) {
|
||||
blockPath := renderGetBlockPath(blockId)
|
||||
blockPath := RenderGetBlockPath(blockId)
|
||||
b, err := c.Get(ctx, blockPath, client.WithSSZEncoding())
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error requesting state by id = %s", blockId)
|
||||
@@ -195,6 +197,10 @@ type NodeVersion struct {
|
||||
systemInfo string
|
||||
}
|
||||
|
||||
func (nv *NodeVersion) SetImplementation(impl string) {
|
||||
nv.implementation = impl
|
||||
}
|
||||
|
||||
var versionRE = regexp.MustCompile(`^(\w+)/(v\d+\.\d+\.\d+[-a-zA-Z0-9]*)\s*/?(.*)$`)
|
||||
|
||||
func parseNodeVersion(v string) (*NodeVersion, error) {
|
||||
@@ -212,7 +218,7 @@ func parseNodeVersion(v string) (*NodeVersion, error) {
|
||||
// GetNodeVersion requests that the beacon node identify information about its implementation in a format
|
||||
// similar to a HTTP User-Agent field. ex: Lighthouse/v0.1.5 (Linux x86_64)
|
||||
func (c *Client) GetNodeVersion(ctx context.Context) (*NodeVersion, error) {
|
||||
b, err := c.Get(ctx, getNodeVersionPath)
|
||||
b, err := c.Get(ctx, GetNodeVersionPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error requesting node version")
|
||||
}
|
||||
@@ -228,7 +234,8 @@ func (c *Client) GetNodeVersion(ctx context.Context) (*NodeVersion, error) {
|
||||
return parseNodeVersion(d.Data.Version)
|
||||
}
|
||||
|
||||
func renderGetStatePath(id StateOrBlockId) string {
|
||||
// RenderGetStatePath formats a state id into a path for the GetState API endpoint.
|
||||
func RenderGetStatePath(id StateOrBlockId) string {
|
||||
return path.Join(getStatePath, string(id))
|
||||
}
|
||||
|
||||
@@ -246,13 +253,29 @@ func (c *Client) GetState(ctx context.Context, stateId StateOrBlockId) ([]byte,
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// WeakSubjectivityData represents the state root, block root and epoch of the BeaconState + ReadOnlySignedBeaconBlock
|
||||
// that falls at the beginning of the current weak subjectivity period. These values can be used to construct
|
||||
// a weak subjectivity checkpoint beacon node flag to be used for validation.
|
||||
type WeakSubjectivityData struct {
|
||||
BlockRoot [32]byte
|
||||
StateRoot [32]byte
|
||||
Epoch primitives.Epoch
|
||||
}
|
||||
|
||||
// CheckpointString returns the standard string representation of a Checkpoint.
|
||||
// The format is a hex-encoded block root, followed by the epoch of the block, separated by a colon. For example:
|
||||
// "0x1c35540cac127315fabb6bf29181f2ae0de1a3fc909d2e76ba771e61312cc49a:74888"
|
||||
func (wsd *WeakSubjectivityData) CheckpointString() string {
|
||||
return fmt.Sprintf("%#x:%d", wsd.BlockRoot, wsd.Epoch)
|
||||
}
|
||||
|
||||
// GetWeakSubjectivity calls a proposed API endpoint that is unique to prysm
|
||||
// This api method does the following:
|
||||
// - computes weak subjectivity epoch
|
||||
// - finds the highest non-skipped block preceding the epoch
|
||||
// - returns the htr of the found block and returns this + the value of state_root from the block
|
||||
func (c *Client) GetWeakSubjectivity(ctx context.Context) (*WeakSubjectivityData, error) {
|
||||
body, err := c.Get(ctx, getWeakSubjectivityPath)
|
||||
body, err := c.Get(ctx, GetWeakSubjectivityPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -97,31 +97,31 @@ func TestValidHostname(t *testing.T) {
|
||||
{
|
||||
name: "hostname with port",
|
||||
hostArg: "mydomain.org:3500",
|
||||
path: getNodeVersionPath,
|
||||
path: GetNodeVersionPath,
|
||||
joined: "http://mydomain.org:3500/eth/v1/node/version",
|
||||
},
|
||||
{
|
||||
name: "https scheme, hostname with port",
|
||||
hostArg: "https://mydomain.org:3500",
|
||||
path: getNodeVersionPath,
|
||||
path: GetNodeVersionPath,
|
||||
joined: "https://mydomain.org:3500/eth/v1/node/version",
|
||||
},
|
||||
{
|
||||
name: "http scheme, hostname without port",
|
||||
hostArg: "http://mydomain.org",
|
||||
path: getNodeVersionPath,
|
||||
path: GetNodeVersionPath,
|
||||
joined: "http://mydomain.org/eth/v1/node/version",
|
||||
},
|
||||
{
|
||||
name: "http scheme, trailing slash, hostname without port",
|
||||
hostArg: "http://mydomain.org/",
|
||||
path: getNodeVersionPath,
|
||||
path: GetNodeVersionPath,
|
||||
joined: "http://mydomain.org/eth/v1/node/version",
|
||||
},
|
||||
{
|
||||
name: "http scheme, hostname with basic auth creds and no port",
|
||||
hostArg: "http://username:pass@mydomain.org/",
|
||||
path: getNodeVersionPath,
|
||||
path: GetNodeVersionPath,
|
||||
joined: "http://username:pass@mydomain.org/eth/v1/node/version",
|
||||
},
|
||||
}
|
||||
|
||||
@@ -46,6 +46,7 @@ go_test(
|
||||
data = glob(["testdata/**"]),
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//api/server/structs:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
|
||||
@@ -154,6 +154,10 @@ func (c *Client) do(ctx context.Context, method string, path string, body io.Rea
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if method == http.MethodPost {
|
||||
req.Header.Set("Content-Type", api.JsonMediaType)
|
||||
}
|
||||
req.Header.Set("Accept", api.JsonMediaType)
|
||||
req.Header.Add("User-Agent", version.BuildData())
|
||||
for _, o := range opts {
|
||||
o(req)
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v5/api"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
@@ -89,6 +90,8 @@ func TestClient_RegisterValidator(t *testing.T) {
|
||||
expectedPath := "/eth/v1/builder/validators"
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, api.JsonMediaType, r.Header.Get("Content-Type"))
|
||||
require.Equal(t, api.JsonMediaType, r.Header.Get("Accept"))
|
||||
body, err := io.ReadAll(r.Body)
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
@@ -364,8 +367,8 @@ func TestSubmitBlindedBlock(t *testing.T) {
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
|
||||
require.Equal(t, "bellatrix", r.Header.Get("Eth-Consensus-Version"))
|
||||
require.Equal(t, "application/json", r.Header.Get("Content-Type"))
|
||||
require.Equal(t, "application/json", r.Header.Get("Accept"))
|
||||
require.Equal(t, api.JsonMediaType, r.Header.Get("Content-Type"))
|
||||
require.Equal(t, api.JsonMediaType, r.Header.Get("Accept"))
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayload)),
|
||||
@@ -392,8 +395,8 @@ func TestSubmitBlindedBlock(t *testing.T) {
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
|
||||
require.Equal(t, "capella", r.Header.Get("Eth-Consensus-Version"))
|
||||
require.Equal(t, "application/json", r.Header.Get("Content-Type"))
|
||||
require.Equal(t, "application/json", r.Header.Get("Accept"))
|
||||
require.Equal(t, api.JsonMediaType, r.Header.Get("Content-Type"))
|
||||
require.Equal(t, api.JsonMediaType, r.Header.Get("Accept"))
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayloadCapella)),
|
||||
@@ -423,8 +426,8 @@ func TestSubmitBlindedBlock(t *testing.T) {
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
|
||||
require.Equal(t, "deneb", r.Header.Get("Eth-Consensus-Version"))
|
||||
require.Equal(t, "application/json", r.Header.Get("Content-Type"))
|
||||
require.Equal(t, "application/json", r.Header.Get("Accept"))
|
||||
require.Equal(t, api.JsonMediaType, r.Header.Get("Content-Type"))
|
||||
require.Equal(t, api.JsonMediaType, r.Header.Get("Accept"))
|
||||
var req structs.SignedBlindedBeaconBlockDeneb
|
||||
err := json.NewDecoder(r.Body).Decode(&req)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -579,14 +579,14 @@ type SignedBeaconBlockContentsFulu struct {
|
||||
}
|
||||
|
||||
type BeaconBlockContentsFulu struct {
|
||||
Block *BeaconBlockFulu `json:"block"`
|
||||
KzgProofs []string `json:"kzg_proofs"`
|
||||
Blobs []string `json:"blobs"`
|
||||
Block *BeaconBlockElectra `json:"block"`
|
||||
KzgProofs []string `json:"kzg_proofs"`
|
||||
Blobs []string `json:"blobs"`
|
||||
}
|
||||
|
||||
type SignedBeaconBlockFulu struct {
|
||||
Message *BeaconBlockFulu `json:"message"`
|
||||
Signature string `json:"signature"`
|
||||
Message *BeaconBlockElectra `json:"message"`
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
var _ SignedMessageJsoner = &SignedBeaconBlockFulu{}
|
||||
@@ -599,36 +599,12 @@ func (s *SignedBeaconBlockFulu) SigString() string {
|
||||
return s.Signature
|
||||
}
|
||||
|
||||
type BeaconBlockFulu struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
ParentRoot string `json:"parent_root"`
|
||||
StateRoot string `json:"state_root"`
|
||||
Body *BeaconBlockBodyFulu `json:"body"`
|
||||
}
|
||||
|
||||
type BeaconBlockBodyFulu struct {
|
||||
RandaoReveal string `json:"randao_reveal"`
|
||||
Eth1Data *Eth1Data `json:"eth1_data"`
|
||||
Graffiti string `json:"graffiti"`
|
||||
ProposerSlashings []*ProposerSlashing `json:"proposer_slashings"`
|
||||
AttesterSlashings []*AttesterSlashingElectra `json:"attester_slashings"`
|
||||
Attestations []*AttestationElectra `json:"attestations"`
|
||||
Deposits []*Deposit `json:"deposits"`
|
||||
VoluntaryExits []*SignedVoluntaryExit `json:"voluntary_exits"`
|
||||
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
|
||||
ExecutionPayload *ExecutionPayloadDeneb `json:"execution_payload"`
|
||||
BLSToExecutionChanges []*SignedBLSToExecutionChange `json:"bls_to_execution_changes"`
|
||||
BlobKzgCommitments []string `json:"blob_kzg_commitments"`
|
||||
ExecutionRequests *ExecutionRequests `json:"execution_requests"`
|
||||
}
|
||||
|
||||
type BlindedBeaconBlockFulu struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
ParentRoot string `json:"parent_root"`
|
||||
StateRoot string `json:"state_root"`
|
||||
Body *BlindedBeaconBlockBodyFulu `json:"body"`
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
ParentRoot string `json:"parent_root"`
|
||||
StateRoot string `json:"state_root"`
|
||||
Body *BlindedBeaconBlockBodyElectra `json:"body"`
|
||||
}
|
||||
|
||||
type SignedBlindedBeaconBlockFulu struct {
|
||||
@@ -645,19 +621,3 @@ func (s *SignedBlindedBeaconBlockFulu) MessageRawJson() ([]byte, error) {
|
||||
func (s *SignedBlindedBeaconBlockFulu) SigString() string {
|
||||
return s.Signature
|
||||
}
|
||||
|
||||
type BlindedBeaconBlockBodyFulu struct {
|
||||
RandaoReveal string `json:"randao_reveal"`
|
||||
Eth1Data *Eth1Data `json:"eth1_data"`
|
||||
Graffiti string `json:"graffiti"`
|
||||
ProposerSlashings []*ProposerSlashing `json:"proposer_slashings"`
|
||||
AttesterSlashings []*AttesterSlashingElectra `json:"attester_slashings"`
|
||||
Attestations []*AttestationElectra `json:"attestations"`
|
||||
Deposits []*Deposit `json:"deposits"`
|
||||
VoluntaryExits []*SignedVoluntaryExit `json:"voluntary_exits"`
|
||||
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
|
||||
ExecutionPayloadHeader *ExecutionPayloadHeaderDeneb `json:"execution_payload_header"`
|
||||
BLSToExecutionChanges []*SignedBLSToExecutionChange `json:"bls_to_execution_changes"`
|
||||
BlobKzgCommitments []string `json:"blob_kzg_commitments"`
|
||||
ExecutionRequests *ExecutionRequests `json:"execution_requests"`
|
||||
}
|
||||
|
||||
@@ -52,6 +52,9 @@ func HistoricalSummaryFromConsensus(s *eth.HistoricalSummary) *HistoricalSummary
|
||||
}
|
||||
|
||||
func (s *SignedBLSToExecutionChange) ToConsensus() (*eth.SignedBLSToExecutionChange, error) {
|
||||
if s.Message == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Message")
|
||||
}
|
||||
change, err := s.Message.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Message")
|
||||
@@ -103,14 +106,17 @@ func SignedBLSChangeFromConsensus(ch *eth.SignedBLSToExecutionChange) *SignedBLS
|
||||
|
||||
func SignedBLSChangesToConsensus(src []*SignedBLSToExecutionChange) ([]*eth.SignedBLSToExecutionChange, error) {
|
||||
if src == nil {
|
||||
return nil, errNilValue
|
||||
return nil, server.NewDecodeError(errNilValue, "SignedBLSToExecutionChanges")
|
||||
}
|
||||
err := slice.VerifyMaxLength(src, 16)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, server.NewDecodeError(err, "SignedBLSToExecutionChanges")
|
||||
}
|
||||
changes := make([]*eth.SignedBLSToExecutionChange, len(src))
|
||||
for i, ch := range src {
|
||||
if ch == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d]", i))
|
||||
}
|
||||
changes[i], err = ch.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d]", i))
|
||||
@@ -156,6 +162,9 @@ func ForkFromConsensus(f *eth.Fork) *Fork {
|
||||
}
|
||||
|
||||
func (s *SignedValidatorRegistration) ToConsensus() (*eth.SignedValidatorRegistrationV1, error) {
|
||||
if s.Message == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Message")
|
||||
}
|
||||
msg, err := s.Message.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Message")
|
||||
@@ -212,6 +221,9 @@ func SignedValidatorRegistrationFromConsensus(vr *eth.SignedValidatorRegistratio
|
||||
}
|
||||
|
||||
func (s *SignedContributionAndProof) ToConsensus() (*eth.SignedContributionAndProof, error) {
|
||||
if s.Message == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Message")
|
||||
}
|
||||
msg, err := s.Message.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Message")
|
||||
@@ -236,6 +248,9 @@ func SignedContributionAndProofFromConsensus(c *eth.SignedContributionAndProof)
|
||||
}
|
||||
|
||||
func (c *ContributionAndProof) ToConsensus() (*eth.ContributionAndProof, error) {
|
||||
if c.Contribution == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Contribution")
|
||||
}
|
||||
contribution, err := c.Contribution.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Contribution")
|
||||
@@ -307,6 +322,9 @@ func SyncCommitteeContributionFromConsensus(c *eth.SyncCommitteeContribution) *S
|
||||
}
|
||||
|
||||
func (s *SignedAggregateAttestationAndProof) ToConsensus() (*eth.SignedAggregateAttestationAndProof, error) {
|
||||
if s.Message == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Message")
|
||||
}
|
||||
msg, err := s.Message.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Message")
|
||||
@@ -327,6 +345,9 @@ func (a *AggregateAttestationAndProof) ToConsensus() (*eth.AggregateAttestationA
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "AggregatorIndex")
|
||||
}
|
||||
if a.Aggregate == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Aggregate")
|
||||
}
|
||||
agg, err := a.Aggregate.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Aggregate")
|
||||
@@ -343,6 +364,9 @@ func (a *AggregateAttestationAndProof) ToConsensus() (*eth.AggregateAttestationA
|
||||
}
|
||||
|
||||
func (s *SignedAggregateAttestationAndProofElectra) ToConsensus() (*eth.SignedAggregateAttestationAndProofElectra, error) {
|
||||
if s.Message == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Message")
|
||||
}
|
||||
msg, err := s.Message.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Message")
|
||||
@@ -363,6 +387,9 @@ func (a *AggregateAttestationAndProofElectra) ToConsensus() (*eth.AggregateAttes
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "AggregatorIndex")
|
||||
}
|
||||
if a.Aggregate == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Aggregate")
|
||||
}
|
||||
agg, err := a.Aggregate.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Aggregate")
|
||||
@@ -383,6 +410,9 @@ func (a *Attestation) ToConsensus() (*eth.Attestation, error) {
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "AggregationBits")
|
||||
}
|
||||
if a.Data == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Data")
|
||||
}
|
||||
data, err := a.Data.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Data")
|
||||
@@ -412,6 +442,9 @@ func (a *AttestationElectra) ToConsensus() (*eth.AttestationElectra, error) {
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "AggregationBits")
|
||||
}
|
||||
if a.Data == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Data")
|
||||
}
|
||||
data, err := a.Data.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Data")
|
||||
@@ -433,6 +466,15 @@ func (a *AttestationElectra) ToConsensus() (*eth.AttestationElectra, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
func SingleAttFromConsensus(a *eth.SingleAttestation) *SingleAttestation {
|
||||
return &SingleAttestation{
|
||||
CommitteeIndex: fmt.Sprintf("%d", a.CommitteeId),
|
||||
AttesterIndex: fmt.Sprintf("%d", a.AttesterIndex),
|
||||
Data: AttDataFromConsensus(a.Data),
|
||||
Signature: hexutil.Encode(a.Signature),
|
||||
}
|
||||
}
|
||||
|
||||
func (a *SingleAttestation) ToConsensus() (*eth.SingleAttestation, error) {
|
||||
ci, err := strconv.ParseUint(a.CommitteeIndex, 10, 64)
|
||||
if err != nil {
|
||||
@@ -442,6 +484,9 @@ func (a *SingleAttestation) ToConsensus() (*eth.SingleAttestation, error) {
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "AttesterIndex")
|
||||
}
|
||||
if a.Data == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Data")
|
||||
}
|
||||
data, err := a.Data.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Data")
|
||||
@@ -481,10 +526,16 @@ func (a *AttestationData) ToConsensus() (*eth.AttestationData, error) {
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "BeaconBlockRoot")
|
||||
}
|
||||
if a.Source == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Source")
|
||||
}
|
||||
source, err := a.Source.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Source")
|
||||
}
|
||||
if a.Target == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Target")
|
||||
}
|
||||
target, err := a.Target.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Target")
|
||||
@@ -584,15 +635,17 @@ func (b *BeaconCommitteeSubscription) ToConsensus() (*validator.BeaconCommitteeS
|
||||
}
|
||||
|
||||
func (e *SignedVoluntaryExit) ToConsensus() (*eth.SignedVoluntaryExit, error) {
|
||||
sig, err := bytesutil.DecodeHexWithLength(e.Signature, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Signature")
|
||||
if e.Message == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Message")
|
||||
}
|
||||
exit, err := e.Message.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Message")
|
||||
}
|
||||
|
||||
sig, err := bytesutil.DecodeHexWithLength(e.Signature, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Signature")
|
||||
}
|
||||
return ð.SignedVoluntaryExit{
|
||||
Exit: exit,
|
||||
Signature: sig,
|
||||
@@ -695,10 +748,16 @@ func Eth1DataFromConsensus(e1d *eth.Eth1Data) *Eth1Data {
|
||||
}
|
||||
|
||||
func (s *ProposerSlashing) ToConsensus() (*eth.ProposerSlashing, error) {
|
||||
if s.SignedHeader1 == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "SignedHeader1")
|
||||
}
|
||||
h1, err := s.SignedHeader1.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "SignedHeader1")
|
||||
}
|
||||
if s.SignedHeader2 == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "SignedHeader2")
|
||||
}
|
||||
h2, err := s.SignedHeader2.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "SignedHeader2")
|
||||
@@ -711,10 +770,16 @@ func (s *ProposerSlashing) ToConsensus() (*eth.ProposerSlashing, error) {
|
||||
}
|
||||
|
||||
func (s *AttesterSlashing) ToConsensus() (*eth.AttesterSlashing, error) {
|
||||
if s.Attestation1 == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Attestation1")
|
||||
}
|
||||
att1, err := s.Attestation1.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Attestation1")
|
||||
}
|
||||
if s.Attestation2 == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Attestation2")
|
||||
}
|
||||
att2, err := s.Attestation2.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Attestation2")
|
||||
@@ -723,10 +788,16 @@ func (s *AttesterSlashing) ToConsensus() (*eth.AttesterSlashing, error) {
|
||||
}
|
||||
|
||||
func (s *AttesterSlashingElectra) ToConsensus() (*eth.AttesterSlashingElectra, error) {
|
||||
if s.Attestation1 == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Attestation1")
|
||||
}
|
||||
att1, err := s.Attestation1.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Attestation1")
|
||||
}
|
||||
if s.Attestation2 == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Attestation2")
|
||||
}
|
||||
att2, err := s.Attestation2.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Attestation2")
|
||||
@@ -747,6 +818,9 @@ func (a *IndexedAttestation) ToConsensus() (*eth.IndexedAttestation, error) {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("AttestingIndices[%d]", i))
|
||||
}
|
||||
}
|
||||
if a.Data == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Data")
|
||||
}
|
||||
data, err := a.Data.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Data")
|
||||
@@ -779,6 +853,9 @@ func (a *IndexedAttestationElectra) ToConsensus() (*eth.IndexedAttestationElectr
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("AttestingIndices[%d]", i))
|
||||
}
|
||||
}
|
||||
if a.Data == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Data")
|
||||
}
|
||||
data, err := a.Data.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Data")
|
||||
@@ -934,11 +1011,11 @@ func (d *DepositRequest) ToConsensus() (*enginev1.DepositRequest, error) {
|
||||
|
||||
func ProposerSlashingsToConsensus(src []*ProposerSlashing) ([]*eth.ProposerSlashing, error) {
|
||||
if src == nil {
|
||||
return nil, errNilValue
|
||||
return nil, server.NewDecodeError(errNilValue, "ProposerSlashings")
|
||||
}
|
||||
err := slice.VerifyMaxLength(src, 16)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, server.NewDecodeError(err, "ProposerSlashings")
|
||||
}
|
||||
proposerSlashings := make([]*eth.ProposerSlashing, len(src))
|
||||
for i, s := range src {
|
||||
@@ -1067,11 +1144,11 @@ func ProposerSlashingFromConsensus(src *eth.ProposerSlashing) *ProposerSlashing
|
||||
|
||||
func AttesterSlashingsToConsensus(src []*AttesterSlashing) ([]*eth.AttesterSlashing, error) {
|
||||
if src == nil {
|
||||
return nil, errNilValue
|
||||
return nil, server.NewDecodeError(errNilValue, "AttesterSlashings")
|
||||
}
|
||||
err := slice.VerifyMaxLength(src, 2)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, server.NewDecodeError(err, "AttesterSlashings")
|
||||
}
|
||||
|
||||
attesterSlashings := make([]*eth.AttesterSlashing, len(src))
|
||||
@@ -1082,10 +1159,19 @@ func AttesterSlashingsToConsensus(src []*AttesterSlashing) ([]*eth.AttesterSlash
|
||||
if s.Attestation1 == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d].Attestation1", i))
|
||||
}
|
||||
|
||||
if s.Attestation1.Data == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d].Attestation1.Data", i))
|
||||
}
|
||||
|
||||
if s.Attestation2 == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d].Attestation2", i))
|
||||
}
|
||||
|
||||
if s.Attestation2.Data == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d].Attestation2.Data", i))
|
||||
}
|
||||
|
||||
a1Sig, err := bytesutil.DecodeHexWithLength(s.Attestation1.Signature, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation1.Signature", i))
|
||||
@@ -1102,6 +1188,7 @@ func AttesterSlashingsToConsensus(src []*AttesterSlashing) ([]*eth.AttesterSlash
|
||||
}
|
||||
a1AttestingIndices[j] = attestingIndex
|
||||
}
|
||||
|
||||
a1Data, err := s.Attestation1.Data.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation1.Data", i))
|
||||
@@ -1199,11 +1286,11 @@ func AttesterSlashingFromConsensus(src *eth.AttesterSlashing) *AttesterSlashing
|
||||
|
||||
func AttesterSlashingsElectraToConsensus(src []*AttesterSlashingElectra) ([]*eth.AttesterSlashingElectra, error) {
|
||||
if src == nil {
|
||||
return nil, errNilValue
|
||||
return nil, server.NewDecodeError(errNilValue, "AttesterSlashingsElectra")
|
||||
}
|
||||
err := slice.VerifyMaxLength(src, fieldparams.MaxAttesterSlashingsElectra)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, server.NewDecodeError(err, "AttesterSlashingsElectra")
|
||||
}
|
||||
|
||||
attesterSlashings := make([]*eth.AttesterSlashingElectra, len(src))
|
||||
@@ -1211,13 +1298,23 @@ func AttesterSlashingsElectraToConsensus(src []*AttesterSlashingElectra) ([]*eth
|
||||
if s == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d]", i))
|
||||
}
|
||||
|
||||
if s.Attestation1 == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d].Attestation1", i))
|
||||
}
|
||||
|
||||
if s.Attestation1.Data == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d].Attestation1.Data", i))
|
||||
}
|
||||
|
||||
if s.Attestation2 == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d].Attestation2", i))
|
||||
}
|
||||
|
||||
if s.Attestation2.Data == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d].Attestation2.Data", i))
|
||||
}
|
||||
|
||||
a1Sig, err := bytesutil.DecodeHexWithLength(s.Attestation1.Signature, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation1.Signature", i))
|
||||
@@ -1331,15 +1428,18 @@ func AttesterSlashingElectraFromConsensus(src *eth.AttesterSlashingElectra) *Att
|
||||
|
||||
func AttsToConsensus(src []*Attestation) ([]*eth.Attestation, error) {
|
||||
if src == nil {
|
||||
return nil, errNilValue
|
||||
return nil, server.NewDecodeError(errNilValue, "Attestations")
|
||||
}
|
||||
err := slice.VerifyMaxLength(src, 128)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, server.NewDecodeError(err, "Attestations")
|
||||
}
|
||||
|
||||
atts := make([]*eth.Attestation, len(src))
|
||||
for i, a := range src {
|
||||
if a == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d]", i))
|
||||
}
|
||||
atts[i], err = a.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d]", i))
|
||||
@@ -1358,15 +1458,18 @@ func AttsFromConsensus(src []*eth.Attestation) []*Attestation {
|
||||
|
||||
func AttsElectraToConsensus(src []*AttestationElectra) ([]*eth.AttestationElectra, error) {
|
||||
if src == nil {
|
||||
return nil, errNilValue
|
||||
return nil, server.NewDecodeError(errNilValue, "AttestationsElectra")
|
||||
}
|
||||
err := slice.VerifyMaxLength(src, 8)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, server.NewDecodeError(err, "AttestationsElectra")
|
||||
}
|
||||
|
||||
atts := make([]*eth.AttestationElectra, len(src))
|
||||
for i, a := range src {
|
||||
if a == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d]", i))
|
||||
}
|
||||
atts[i], err = a.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d]", i))
|
||||
@@ -1385,11 +1488,11 @@ func AttsElectraFromConsensus(src []*eth.AttestationElectra) []*AttestationElect
|
||||
|
||||
func DepositsToConsensus(src []*Deposit) ([]*eth.Deposit, error) {
|
||||
if src == nil {
|
||||
return nil, errNilValue
|
||||
return nil, server.NewDecodeError(errNilValue, "Deposits")
|
||||
}
|
||||
err := slice.VerifyMaxLength(src, 16)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, server.NewDecodeError(err, "Deposits")
|
||||
}
|
||||
|
||||
deposits := make([]*eth.Deposit, len(src))
|
||||
@@ -1461,15 +1564,18 @@ func DepositsFromConsensus(src []*eth.Deposit) []*Deposit {
|
||||
|
||||
func SignedExitsToConsensus(src []*SignedVoluntaryExit) ([]*eth.SignedVoluntaryExit, error) {
|
||||
if src == nil {
|
||||
return nil, errNilValue
|
||||
return nil, server.NewDecodeError(errNilValue, "SignedVoluntaryExits")
|
||||
}
|
||||
err := slice.VerifyMaxLength(src, 16)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, server.NewDecodeError(err, "SignedVoluntaryExits")
|
||||
}
|
||||
|
||||
exits := make([]*eth.SignedVoluntaryExit, len(src))
|
||||
for i, e := range src {
|
||||
if e == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d]", i))
|
||||
}
|
||||
exits[i], err = e.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d]", i))
|
||||
|
||||
@@ -3365,285 +3365,6 @@ func (b *BeaconBlockContentsFulu) ToConsensus() (*eth.BeaconBlockContentsFulu, e
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (b *BeaconBlockFulu) ToConsensus() (*eth.BeaconBlockFulu, error) {
|
||||
if b == nil {
|
||||
return nil, errNilValue
|
||||
}
|
||||
if b.Body == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Body")
|
||||
}
|
||||
if b.Body.Eth1Data == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Body.Eth1Data")
|
||||
}
|
||||
if b.Body.SyncAggregate == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Body.SyncAggregate")
|
||||
}
|
||||
if b.Body.ExecutionPayload == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Body.ExecutionPayload")
|
||||
}
|
||||
|
||||
slot, err := strconv.ParseUint(b.Slot, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Slot")
|
||||
}
|
||||
proposerIndex, err := strconv.ParseUint(b.ProposerIndex, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ProposerIndex")
|
||||
}
|
||||
parentRoot, err := bytesutil.DecodeHexWithLength(b.ParentRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ParentRoot")
|
||||
}
|
||||
stateRoot, err := bytesutil.DecodeHexWithLength(b.StateRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "StateRoot")
|
||||
}
|
||||
randaoReveal, err := bytesutil.DecodeHexWithLength(b.Body.RandaoReveal, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.RandaoReveal")
|
||||
}
|
||||
depositRoot, err := bytesutil.DecodeHexWithLength(b.Body.Eth1Data.DepositRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.Eth1Data.DepositRoot")
|
||||
}
|
||||
depositCount, err := strconv.ParseUint(b.Body.Eth1Data.DepositCount, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.Eth1Data.DepositCount")
|
||||
}
|
||||
blockHash, err := bytesutil.DecodeHexWithLength(b.Body.Eth1Data.BlockHash, common.HashLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.Eth1Data.BlockHash")
|
||||
}
|
||||
graffiti, err := bytesutil.DecodeHexWithLength(b.Body.Graffiti, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.Graffiti")
|
||||
}
|
||||
proposerSlashings, err := ProposerSlashingsToConsensus(b.Body.ProposerSlashings)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.ProposerSlashings")
|
||||
}
|
||||
attesterSlashings, err := AttesterSlashingsElectraToConsensus(b.Body.AttesterSlashings)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.AttesterSlashings")
|
||||
}
|
||||
atts, err := AttsElectraToConsensus(b.Body.Attestations)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.Attestations")
|
||||
}
|
||||
deposits, err := DepositsToConsensus(b.Body.Deposits)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.Deposits")
|
||||
}
|
||||
exits, err := SignedExitsToConsensus(b.Body.VoluntaryExits)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.VoluntaryExits")
|
||||
}
|
||||
syncCommitteeBits, err := bytesutil.DecodeHexWithLength(b.Body.SyncAggregate.SyncCommitteeBits, fieldparams.SyncAggregateSyncCommitteeBytesLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.SyncAggregate.SyncCommitteeBits")
|
||||
}
|
||||
syncCommitteeSig, err := bytesutil.DecodeHexWithLength(b.Body.SyncAggregate.SyncCommitteeSignature, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.SyncAggregate.SyncCommitteeSignature")
|
||||
}
|
||||
payloadParentHash, err := bytesutil.DecodeHexWithLength(b.Body.ExecutionPayload.ParentHash, common.HashLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.ExecutionPayload.ParentHash")
|
||||
}
|
||||
payloadFeeRecipient, err := bytesutil.DecodeHexWithLength(b.Body.ExecutionPayload.FeeRecipient, fieldparams.FeeRecipientLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.ExecutionPayload.FeeRecipient")
|
||||
}
|
||||
payloadStateRoot, err := bytesutil.DecodeHexWithLength(b.Body.ExecutionPayload.StateRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.ExecutionPayload.StateRoot")
|
||||
}
|
||||
payloadReceiptsRoot, err := bytesutil.DecodeHexWithLength(b.Body.ExecutionPayload.ReceiptsRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.ExecutionPayload.ReceiptsRoot")
|
||||
}
|
||||
payloadLogsBloom, err := bytesutil.DecodeHexWithLength(b.Body.ExecutionPayload.LogsBloom, fieldparams.LogsBloomLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.ExecutionPayload.LogsBloom")
|
||||
}
|
||||
payloadPrevRandao, err := bytesutil.DecodeHexWithLength(b.Body.ExecutionPayload.PrevRandao, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.ExecutionPayload.PrevRandao")
|
||||
}
|
||||
payloadBlockNumber, err := strconv.ParseUint(b.Body.ExecutionPayload.BlockNumber, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.ExecutionPayload.BlockNumber")
|
||||
}
|
||||
payloadGasLimit, err := strconv.ParseUint(b.Body.ExecutionPayload.GasLimit, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.ExecutionPayload.GasLimit")
|
||||
}
|
||||
payloadGasUsed, err := strconv.ParseUint(b.Body.ExecutionPayload.GasUsed, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.ExecutionPayload.GasUsed")
|
||||
}
|
||||
payloadTimestamp, err := strconv.ParseUint(b.Body.ExecutionPayload.Timestamp, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.ExecutionPayloadHeader.Timestamp")
|
||||
}
|
||||
payloadExtraData, err := bytesutil.DecodeHexWithMaxLength(b.Body.ExecutionPayload.ExtraData, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.ExecutionPayload.ExtraData")
|
||||
}
|
||||
payloadBaseFeePerGas, err := bytesutil.Uint256ToSSZBytes(b.Body.ExecutionPayload.BaseFeePerGas)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.ExecutionPayload.BaseFeePerGas")
|
||||
}
|
||||
payloadBlockHash, err := bytesutil.DecodeHexWithLength(b.Body.ExecutionPayload.BlockHash, common.HashLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.ExecutionPayload.BlockHash")
|
||||
}
|
||||
err = slice.VerifyMaxLength(b.Body.ExecutionPayload.Transactions, fieldparams.MaxTxsPerPayloadLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.ExecutionPayload.Transactions")
|
||||
}
|
||||
txs := make([][]byte, len(b.Body.ExecutionPayload.Transactions))
|
||||
for i, tx := range b.Body.ExecutionPayload.Transactions {
|
||||
txs[i], err = bytesutil.DecodeHexWithMaxLength(tx, fieldparams.MaxBytesPerTxLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("Body.ExecutionPayload.Transactions[%d]", i))
|
||||
}
|
||||
}
|
||||
err = slice.VerifyMaxLength(b.Body.ExecutionPayload.Withdrawals, fieldparams.MaxWithdrawalsPerPayload)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.ExecutionPayload.Withdrawals")
|
||||
}
|
||||
withdrawals := make([]*enginev1.Withdrawal, len(b.Body.ExecutionPayload.Withdrawals))
|
||||
for i, w := range b.Body.ExecutionPayload.Withdrawals {
|
||||
withdrawalIndex, err := strconv.ParseUint(w.WithdrawalIndex, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("Body.ExecutionPayload.Withdrawals[%d].WithdrawalIndex", i))
|
||||
}
|
||||
validatorIndex, err := strconv.ParseUint(w.ValidatorIndex, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("Body.ExecutionPayload.Withdrawals[%d].ValidatorIndex", i))
|
||||
}
|
||||
address, err := bytesutil.DecodeHexWithLength(w.ExecutionAddress, common.AddressLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("Body.ExecutionPayload.Withdrawals[%d].ExecutionAddress", i))
|
||||
}
|
||||
amount, err := strconv.ParseUint(w.Amount, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("Body.ExecutionPayload.Withdrawals[%d].Amount", i))
|
||||
}
|
||||
withdrawals[i] = &enginev1.Withdrawal{
|
||||
Index: withdrawalIndex,
|
||||
ValidatorIndex: primitives.ValidatorIndex(validatorIndex),
|
||||
Address: address,
|
||||
Amount: amount,
|
||||
}
|
||||
}
|
||||
|
||||
payloadBlobGasUsed, err := strconv.ParseUint(b.Body.ExecutionPayload.BlobGasUsed, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.ExecutionPayload.BlobGasUsed")
|
||||
}
|
||||
payloadExcessBlobGas, err := strconv.ParseUint(b.Body.ExecutionPayload.ExcessBlobGas, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.ExecutionPayload.ExcessBlobGas")
|
||||
}
|
||||
|
||||
if b.Body.ExecutionRequests == nil {
|
||||
return nil, server.NewDecodeError(errors.New("nil execution requests"), "Body.ExequtionRequests")
|
||||
}
|
||||
|
||||
depositRequests := make([]*enginev1.DepositRequest, len(b.Body.ExecutionRequests.Deposits))
|
||||
for i, d := range b.Body.ExecutionRequests.Deposits {
|
||||
depositRequests[i], err = d.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("Body.ExecutionRequests.Deposits[%d]", i))
|
||||
}
|
||||
}
|
||||
|
||||
withdrawalRequests := make([]*enginev1.WithdrawalRequest, len(b.Body.ExecutionRequests.Withdrawals))
|
||||
for i, w := range b.Body.ExecutionRequests.Withdrawals {
|
||||
withdrawalRequests[i], err = w.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("Body.ExecutionRequests.Withdrawals[%d]", i))
|
||||
}
|
||||
}
|
||||
|
||||
consolidationRequests := make([]*enginev1.ConsolidationRequest, len(b.Body.ExecutionRequests.Consolidations))
|
||||
for i, c := range b.Body.ExecutionRequests.Consolidations {
|
||||
consolidationRequests[i], err = c.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("Body.ExecutionRequests.Consolidations[%d]", i))
|
||||
}
|
||||
}
|
||||
|
||||
blsChanges, err := SignedBLSChangesToConsensus(b.Body.BLSToExecutionChanges)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.BLSToExecutionChanges")
|
||||
}
|
||||
err = slice.VerifyMaxLength(b.Body.BlobKzgCommitments, fieldparams.MaxBlobCommitmentsPerBlock)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.BlobKzgCommitments")
|
||||
}
|
||||
blobKzgCommitments := make([][]byte, len(b.Body.BlobKzgCommitments))
|
||||
for i, b := range b.Body.BlobKzgCommitments {
|
||||
kzg, err := bytesutil.DecodeHexWithLength(b, fieldparams.BLSPubkeyLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("Body.BlobKzgCommitments[%d]", i))
|
||||
}
|
||||
blobKzgCommitments[i] = kzg
|
||||
}
|
||||
return ð.BeaconBlockFulu{
|
||||
Slot: primitives.Slot(slot),
|
||||
ProposerIndex: primitives.ValidatorIndex(proposerIndex),
|
||||
ParentRoot: parentRoot,
|
||||
StateRoot: stateRoot,
|
||||
Body: ð.BeaconBlockBodyFulu{
|
||||
RandaoReveal: randaoReveal,
|
||||
Eth1Data: ð.Eth1Data{
|
||||
DepositRoot: depositRoot,
|
||||
DepositCount: depositCount,
|
||||
BlockHash: blockHash,
|
||||
},
|
||||
Graffiti: graffiti,
|
||||
ProposerSlashings: proposerSlashings,
|
||||
AttesterSlashings: attesterSlashings,
|
||||
Attestations: atts,
|
||||
Deposits: deposits,
|
||||
VoluntaryExits: exits,
|
||||
SyncAggregate: ð.SyncAggregate{
|
||||
SyncCommitteeBits: syncCommitteeBits,
|
||||
SyncCommitteeSignature: syncCommitteeSig,
|
||||
},
|
||||
ExecutionPayload: &enginev1.ExecutionPayloadDeneb{
|
||||
ParentHash: payloadParentHash,
|
||||
FeeRecipient: payloadFeeRecipient,
|
||||
StateRoot: payloadStateRoot,
|
||||
ReceiptsRoot: payloadReceiptsRoot,
|
||||
LogsBloom: payloadLogsBloom,
|
||||
PrevRandao: payloadPrevRandao,
|
||||
BlockNumber: payloadBlockNumber,
|
||||
GasLimit: payloadGasLimit,
|
||||
GasUsed: payloadGasUsed,
|
||||
Timestamp: payloadTimestamp,
|
||||
ExtraData: payloadExtraData,
|
||||
BaseFeePerGas: payloadBaseFeePerGas,
|
||||
BlockHash: payloadBlockHash,
|
||||
Transactions: txs,
|
||||
Withdrawals: withdrawals,
|
||||
BlobGasUsed: payloadBlobGasUsed,
|
||||
ExcessBlobGas: payloadExcessBlobGas,
|
||||
},
|
||||
BlsToExecutionChanges: blsChanges,
|
||||
BlobKzgCommitments: blobKzgCommitments,
|
||||
ExecutionRequests: &enginev1.ExecutionRequests{
|
||||
Deposits: depositRequests,
|
||||
Withdrawals: withdrawalRequests,
|
||||
Consolidations: consolidationRequests,
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (b *SignedBeaconBlockFulu) ToConsensus() (*eth.SignedBeaconBlockFulu, error) {
|
||||
if b == nil {
|
||||
return nil, errNilValue
|
||||
@@ -3898,7 +3619,7 @@ func (b *BlindedBeaconBlockFulu) ToConsensus() (*eth.BlindedBeaconBlockFulu, err
|
||||
ProposerIndex: primitives.ValidatorIndex(proposerIndex),
|
||||
ParentRoot: parentRoot,
|
||||
StateRoot: stateRoot,
|
||||
Body: ð.BlindedBeaconBlockBodyFulu{
|
||||
Body: ð.BlindedBeaconBlockBodyElectra{
|
||||
RandaoReveal: randaoReveal,
|
||||
Eth1Data: ð.Eth1Data{
|
||||
DepositRoot: depositRoot,
|
||||
@@ -4015,7 +3736,7 @@ func BlindedBeaconBlockFuluFromConsensus(b *eth.BlindedBeaconBlockFulu) (*Blinde
|
||||
ProposerIndex: fmt.Sprintf("%d", b.ProposerIndex),
|
||||
ParentRoot: hexutil.Encode(b.ParentRoot),
|
||||
StateRoot: hexutil.Encode(b.StateRoot),
|
||||
Body: &BlindedBeaconBlockBodyFulu{
|
||||
Body: &BlindedBeaconBlockBodyElectra{
|
||||
RandaoReveal: hexutil.Encode(b.Body.RandaoReveal),
|
||||
Eth1Data: Eth1DataFromConsensus(b.Body.Eth1Data),
|
||||
Graffiti: hexutil.Encode(b.Body.Graffiti),
|
||||
@@ -4047,42 +3768,6 @@ func SignedBlindedBeaconBlockFuluFromConsensus(b *eth.SignedBlindedBeaconBlockFu
|
||||
}, nil
|
||||
}
|
||||
|
||||
func BeaconBlockFuluFromConsensus(b *eth.BeaconBlockFulu) (*BeaconBlockFulu, error) {
|
||||
payload, err := ExecutionPayloadFuluFromConsensus(b.Body.ExecutionPayload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blobKzgCommitments := make([]string, len(b.Body.BlobKzgCommitments))
|
||||
for i := range b.Body.BlobKzgCommitments {
|
||||
blobKzgCommitments[i] = hexutil.Encode(b.Body.BlobKzgCommitments[i])
|
||||
}
|
||||
|
||||
return &BeaconBlockFulu{
|
||||
Slot: fmt.Sprintf("%d", b.Slot),
|
||||
ProposerIndex: fmt.Sprintf("%d", b.ProposerIndex),
|
||||
ParentRoot: hexutil.Encode(b.ParentRoot),
|
||||
StateRoot: hexutil.Encode(b.StateRoot),
|
||||
Body: &BeaconBlockBodyFulu{
|
||||
RandaoReveal: hexutil.Encode(b.Body.RandaoReveal),
|
||||
Eth1Data: Eth1DataFromConsensus(b.Body.Eth1Data),
|
||||
Graffiti: hexutil.Encode(b.Body.Graffiti),
|
||||
ProposerSlashings: ProposerSlashingsFromConsensus(b.Body.ProposerSlashings),
|
||||
AttesterSlashings: AttesterSlashingsElectraFromConsensus(b.Body.AttesterSlashings),
|
||||
Attestations: AttsElectraFromConsensus(b.Body.Attestations),
|
||||
Deposits: DepositsFromConsensus(b.Body.Deposits),
|
||||
VoluntaryExits: SignedExitsFromConsensus(b.Body.VoluntaryExits),
|
||||
SyncAggregate: &SyncAggregate{
|
||||
SyncCommitteeBits: hexutil.Encode(b.Body.SyncAggregate.SyncCommitteeBits),
|
||||
SyncCommitteeSignature: hexutil.Encode(b.Body.SyncAggregate.SyncCommitteeSignature),
|
||||
},
|
||||
ExecutionPayload: payload,
|
||||
BLSToExecutionChanges: SignedBLSChangesFromConsensus(b.Body.BlsToExecutionChanges),
|
||||
BlobKzgCommitments: blobKzgCommitments,
|
||||
ExecutionRequests: ExecutionRequestsFromConsensus(b.Body.ExecutionRequests),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func SignedBeaconBlockFuluFromConsensus(b *eth.SignedBeaconBlockFulu) (*SignedBeaconBlockFulu, error) {
|
||||
block, err := BeaconBlockFuluFromConsensus(b.Block)
|
||||
if err != nil {
|
||||
@@ -4097,4 +3782,5 @@ func SignedBeaconBlockFuluFromConsensus(b *eth.SignedBeaconBlockFulu) (*SignedBe
|
||||
var (
|
||||
ExecutionPayloadFuluFromConsensus = ExecutionPayloadDenebFromConsensus
|
||||
ExecutionPayloadHeaderFuluFromConsensus = ExecutionPayloadHeaderDenebFromConsensus
|
||||
BeaconBlockFuluFromConsensus = BeaconBlockElectraFromConsensus
|
||||
)
|
||||
|
||||
@@ -24,3 +24,96 @@ func TestDepositSnapshotFromConsensus(t *testing.T) {
|
||||
require.Equal(t, "0x1234", res.ExecutionBlockHash)
|
||||
require.Equal(t, "67890", res.ExecutionBlockHeight)
|
||||
}
|
||||
|
||||
func TestSignedBLSToExecutionChange_ToConsensus(t *testing.T) {
|
||||
s := &SignedBLSToExecutionChange{Message: nil, Signature: ""}
|
||||
_, err := s.ToConsensus()
|
||||
require.ErrorContains(t, errNilValue.Error(), err)
|
||||
}
|
||||
|
||||
func TestSignedValidatorRegistration_ToConsensus(t *testing.T) {
|
||||
s := &SignedValidatorRegistration{Message: nil, Signature: ""}
|
||||
_, err := s.ToConsensus()
|
||||
require.ErrorContains(t, errNilValue.Error(), err)
|
||||
}
|
||||
|
||||
func TestSignedContributionAndProof_ToConsensus(t *testing.T) {
|
||||
s := &SignedContributionAndProof{Message: nil, Signature: ""}
|
||||
_, err := s.ToConsensus()
|
||||
require.ErrorContains(t, errNilValue.Error(), err)
|
||||
}
|
||||
|
||||
func TestContributionAndProof_ToConsensus(t *testing.T) {
|
||||
c := &ContributionAndProof{
|
||||
Contribution: nil,
|
||||
AggregatorIndex: "invalid",
|
||||
SelectionProof: "",
|
||||
}
|
||||
_, err := c.ToConsensus()
|
||||
require.ErrorContains(t, errNilValue.Error(), err)
|
||||
}
|
||||
|
||||
func TestSignedAggregateAttestationAndProof_ToConsensus(t *testing.T) {
|
||||
s := &SignedAggregateAttestationAndProof{Message: nil, Signature: ""}
|
||||
_, err := s.ToConsensus()
|
||||
require.ErrorContains(t, errNilValue.Error(), err)
|
||||
}
|
||||
|
||||
func TestAggregateAttestationAndProof_ToConsensus(t *testing.T) {
|
||||
a := &AggregateAttestationAndProof{
|
||||
AggregatorIndex: "1",
|
||||
Aggregate: nil,
|
||||
SelectionProof: "",
|
||||
}
|
||||
_, err := a.ToConsensus()
|
||||
require.ErrorContains(t, errNilValue.Error(), err)
|
||||
}
|
||||
|
||||
func TestAttestation_ToConsensus(t *testing.T) {
|
||||
a := &Attestation{
|
||||
AggregationBits: "0x10",
|
||||
Data: nil,
|
||||
Signature: "",
|
||||
}
|
||||
_, err := a.ToConsensus()
|
||||
require.ErrorContains(t, errNilValue.Error(), err)
|
||||
}
|
||||
|
||||
func TestSingleAttestation_ToConsensus(t *testing.T) {
|
||||
s := &SingleAttestation{
|
||||
CommitteeIndex: "1",
|
||||
AttesterIndex: "1",
|
||||
Data: nil,
|
||||
Signature: "",
|
||||
}
|
||||
_, err := s.ToConsensus()
|
||||
require.ErrorContains(t, errNilValue.Error(), err)
|
||||
}
|
||||
|
||||
func TestSignedVoluntaryExit_ToConsensus(t *testing.T) {
|
||||
s := &SignedVoluntaryExit{Message: nil, Signature: ""}
|
||||
_, err := s.ToConsensus()
|
||||
require.ErrorContains(t, errNilValue.Error(), err)
|
||||
}
|
||||
|
||||
func TestProposerSlashing_ToConsensus(t *testing.T) {
|
||||
p := &ProposerSlashing{SignedHeader1: nil, SignedHeader2: nil}
|
||||
_, err := p.ToConsensus()
|
||||
require.ErrorContains(t, errNilValue.Error(), err)
|
||||
}
|
||||
|
||||
func TestAttesterSlashing_ToConsensus(t *testing.T) {
|
||||
a := &AttesterSlashing{Attestation1: nil, Attestation2: nil}
|
||||
_, err := a.ToConsensus()
|
||||
require.ErrorContains(t, errNilValue.Error(), err)
|
||||
}
|
||||
|
||||
func TestIndexedAttestation_ToConsensus(t *testing.T) {
|
||||
a := &IndexedAttestation{
|
||||
AttestingIndices: []string{"1"},
|
||||
Data: nil,
|
||||
Signature: "invalid",
|
||||
}
|
||||
_, err := a.ToConsensus()
|
||||
require.ErrorContains(t, errNilValue.Error(), err)
|
||||
}
|
||||
|
||||
@@ -250,3 +250,17 @@ type ChainHead struct {
|
||||
PreviousJustifiedBlockRoot string `json:"previous_justified_block_root"`
|
||||
OptimisticStatus bool `json:"optimistic_status"`
|
||||
}
|
||||
|
||||
type GetPendingDepositsResponse struct {
|
||||
Version string `json:"version"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
Data []*PendingDeposit `json:"data"`
|
||||
}
|
||||
|
||||
type GetPendingPartialWithdrawalsResponse struct {
|
||||
Version string `json:"version"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
Data []*PendingPartialWithdrawal `json:"data"`
|
||||
}
|
||||
|
||||
@@ -25,6 +25,7 @@ go_library(
|
||||
"receive_attestation.go",
|
||||
"receive_blob.go",
|
||||
"receive_block.go",
|
||||
"receive_data_column.go",
|
||||
"service.go",
|
||||
"tracked_proposer.go",
|
||||
"weak_subjectivity_checks.go",
|
||||
@@ -49,6 +50,7 @@ go_library(
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
@@ -158,6 +160,7 @@ go_test(
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/p2p/testing:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
|
||||
@@ -33,6 +33,7 @@ var (
|
||||
)
|
||||
|
||||
var errMaxBlobsExceeded = errors.New("Expected commitments in block exceeds MAX_BLOBS_PER_BLOCK")
|
||||
var errMaxDataColumnsExceeded = errors.New("Expected data columns for node exceeds NUMBER_OF_COLUMNS")
|
||||
|
||||
// An invalid block is the block that fails state transition based on the core protocol rules.
|
||||
// The beacon node shall not be accepting nor building blocks that branch off from an invalid block.
|
||||
|
||||
@@ -3,6 +3,7 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"kzg.go",
|
||||
"trusted_setup.go",
|
||||
"validation.go",
|
||||
],
|
||||
@@ -12,6 +13,9 @@ go_library(
|
||||
deps = [
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"@com_github_crate_crypto_go_kzg_4844//:go_default_library",
|
||||
"@com_github_ethereum_c_kzg_4844//bindings/go:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//crypto/kzg4844:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
111
beacon-chain/blockchain/kzg/kzg.go
Normal file
111
beacon-chain/blockchain/kzg/kzg.go
Normal file
@@ -0,0 +1,111 @@
|
||||
package kzg
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
ckzg4844 "github.com/ethereum/c-kzg-4844/v2/bindings/go"
|
||||
"github.com/ethereum/go-ethereum/crypto/kzg4844"
|
||||
)
|
||||
|
||||
// BytesPerBlob is the number of bytes in a single blob.
|
||||
const BytesPerBlob = ckzg4844.BytesPerBlob
|
||||
|
||||
// Blob represents a serialized chunk of data.
|
||||
type Blob [BytesPerBlob]byte
|
||||
|
||||
// BytesPerCell is the number of bytes in a single cell.
|
||||
const BytesPerCell = ckzg4844.BytesPerCell
|
||||
|
||||
// Cell represents a chunk of an encoded Blob.
|
||||
type Cell [BytesPerCell]byte
|
||||
|
||||
// Commitment represent a KZG commitment to a Blob.
|
||||
type Commitment [48]byte
|
||||
|
||||
// Proof represents a KZG proof that attests to the validity of a Blob or parts of it.
|
||||
type Proof [48]byte
|
||||
|
||||
// Bytes48 is a 48-byte array.
|
||||
type Bytes48 = ckzg4844.Bytes48
|
||||
|
||||
// Bytes32 is a 32-byte array.
|
||||
type Bytes32 = ckzg4844.Bytes32
|
||||
|
||||
// CellsAndProofs represents the Cells and Proofs corresponding to
|
||||
// a single blob.
|
||||
type CellsAndProofs struct {
|
||||
Cells []Cell
|
||||
Proofs []Proof
|
||||
}
|
||||
|
||||
func BlobToKZGCommitment(blob *Blob) (Commitment, error) {
|
||||
kzgBlob := kzg4844.Blob(*blob)
|
||||
comm, err := kzg4844.BlobToCommitment(&kzgBlob)
|
||||
if err != nil {
|
||||
return Commitment{}, err
|
||||
}
|
||||
return Commitment(comm), nil
|
||||
}
|
||||
|
||||
func ComputeBlobKZGProof(blob *Blob, commitment Commitment) (Proof, error) {
|
||||
kzgBlob := kzg4844.Blob(*blob)
|
||||
proof, err := kzg4844.ComputeBlobProof(&kzgBlob, kzg4844.Commitment(commitment))
|
||||
if err != nil {
|
||||
return [48]byte{}, err
|
||||
}
|
||||
return Proof(proof), nil
|
||||
}
|
||||
|
||||
func ComputeCellsAndKZGProofs(blob *Blob) (CellsAndProofs, error) {
|
||||
ckzgBlob := (*ckzg4844.Blob)(blob)
|
||||
ckzgCells, ckzgProofs, err := ckzg4844.ComputeCellsAndKZGProofs(ckzgBlob)
|
||||
if err != nil {
|
||||
return CellsAndProofs{}, err
|
||||
}
|
||||
|
||||
return makeCellsAndProofs(ckzgCells[:], ckzgProofs[:])
|
||||
}
|
||||
|
||||
func VerifyCellKZGProofBatch(commitmentsBytes []Bytes48, cellIndices []uint64, cells []Cell, proofsBytes []Bytes48) (bool, error) {
|
||||
// Convert `Cell` type to `ckzg4844.Cell`
|
||||
ckzgCells := make([]ckzg4844.Cell, len(cells))
|
||||
for i := range cells {
|
||||
ckzgCells[i] = ckzg4844.Cell(cells[i])
|
||||
}
|
||||
|
||||
return ckzg4844.VerifyCellKZGProofBatch(commitmentsBytes, cellIndices, ckzgCells, proofsBytes)
|
||||
}
|
||||
|
||||
func RecoverCellsAndKZGProofs(cellIndices []uint64, partialCells []Cell) (CellsAndProofs, error) {
|
||||
// Convert `Cell` type to `ckzg4844.Cell`
|
||||
ckzgPartialCells := make([]ckzg4844.Cell, len(partialCells))
|
||||
for i := range partialCells {
|
||||
ckzgPartialCells[i] = ckzg4844.Cell(partialCells[i])
|
||||
}
|
||||
|
||||
ckzgCells, ckzgProofs, err := ckzg4844.RecoverCellsAndKZGProofs(cellIndices, ckzgPartialCells)
|
||||
if err != nil {
|
||||
return CellsAndProofs{}, err
|
||||
}
|
||||
|
||||
return makeCellsAndProofs(ckzgCells[:], ckzgProofs[:])
|
||||
}
|
||||
|
||||
// Convert cells/proofs to the CellsAndProofs type defined in this package.
|
||||
func makeCellsAndProofs(ckzgCells []ckzg4844.Cell, ckzgProofs []ckzg4844.KZGProof) (CellsAndProofs, error) {
|
||||
if len(ckzgCells) != len(ckzgProofs) {
|
||||
return CellsAndProofs{}, errors.New("different number of cells/proofs")
|
||||
}
|
||||
|
||||
var cells []Cell
|
||||
var proofs []Proof
|
||||
for i := range ckzgCells {
|
||||
cells = append(cells, Cell(ckzgCells[i]))
|
||||
proofs = append(proofs, Proof(ckzgProofs[i]))
|
||||
}
|
||||
|
||||
return CellsAndProofs{
|
||||
Cells: cells,
|
||||
Proofs: proofs,
|
||||
}, nil
|
||||
}
|
||||
@@ -5,6 +5,8 @@ import (
|
||||
"encoding/json"
|
||||
|
||||
GoKZG "github.com/crate-crypto/go-kzg-4844"
|
||||
CKZG "github.com/ethereum/c-kzg-4844/v2/bindings/go"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@@ -12,17 +14,53 @@ var (
|
||||
//go:embed trusted_setup.json
|
||||
embeddedTrustedSetup []byte // 1.2Mb
|
||||
kzgContext *GoKZG.Context
|
||||
kzgLoaded bool
|
||||
)
|
||||
|
||||
type TrustedSetup struct {
|
||||
G1Monomial [GoKZG.ScalarsPerBlob]GoKZG.G1CompressedHexStr `json:"g1_monomial"`
|
||||
G1Lagrange [GoKZG.ScalarsPerBlob]GoKZG.G1CompressedHexStr `json:"g1_lagrange"`
|
||||
G2Monomial [65]GoKZG.G2CompressedHexStr `json:"g2_monomial"`
|
||||
}
|
||||
|
||||
func Start() error {
|
||||
parsedSetup := GoKZG.JSONTrustedSetup{}
|
||||
err := json.Unmarshal(embeddedTrustedSetup, &parsedSetup)
|
||||
trustedSetup := &TrustedSetup{}
|
||||
err := json.Unmarshal(embeddedTrustedSetup, trustedSetup)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not parse trusted setup JSON")
|
||||
}
|
||||
kzgContext, err = GoKZG.NewContext4096(&parsedSetup)
|
||||
kzgContext, err = GoKZG.NewContext4096(&GoKZG.JSONTrustedSetup{
|
||||
SetupG2: trustedSetup.G2Monomial[:],
|
||||
SetupG1Lagrange: trustedSetup.G1Lagrange})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not initialize go-kzg context")
|
||||
}
|
||||
|
||||
// Length of a G1 point, converted from hex to binary.
|
||||
g1MonomialBytes := make([]byte, len(trustedSetup.G1Monomial)*(len(trustedSetup.G1Monomial[0])-2)/2)
|
||||
for i, g1 := range &trustedSetup.G1Monomial {
|
||||
copy(g1MonomialBytes[i*(len(g1)-2)/2:], hexutil.MustDecode(g1))
|
||||
}
|
||||
// Length of a G1 point, converted from hex to binary.
|
||||
g1LagrangeBytes := make([]byte, len(trustedSetup.G1Lagrange)*(len(trustedSetup.G1Lagrange[0])-2)/2)
|
||||
for i, g1 := range &trustedSetup.G1Lagrange {
|
||||
copy(g1LagrangeBytes[i*(len(g1)-2)/2:], hexutil.MustDecode(g1))
|
||||
}
|
||||
// Length of a G2 point, converted from hex to binary.
|
||||
g2MonomialBytes := make([]byte, len(trustedSetup.G2Monomial)*(len(trustedSetup.G2Monomial[0])-2)/2)
|
||||
for i, g2 := range &trustedSetup.G2Monomial {
|
||||
copy(g2MonomialBytes[i*(len(g2)-2)/2:], hexutil.MustDecode(g2))
|
||||
}
|
||||
if !kzgLoaded {
|
||||
// TODO: Provide a configuration option for this.
|
||||
var precompute uint = 8
|
||||
|
||||
// Free the current trusted setup before running this method. CKZG
|
||||
// panics if the same setup is run multiple times.
|
||||
if err = CKZG.LoadTrustedSetup(g1MonomialBytes, g1LagrangeBytes, g2MonomialBytes, precompute); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
kzgLoaded = true
|
||||
return nil
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -69,6 +69,22 @@ func logStateTransitionData(b interfaces.ReadOnlyBeaconBlock) error {
|
||||
log = log.WithField("kzgCommitmentCount", len(kzgs))
|
||||
}
|
||||
}
|
||||
if b.Version() >= version.Electra {
|
||||
eReqs, err := b.Body().ExecutionRequests()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to get execution requests")
|
||||
} else {
|
||||
if len(eReqs.Deposits) > 0 {
|
||||
log = log.WithField("depositRequestCount", len(eReqs.Deposits))
|
||||
}
|
||||
if len(eReqs.Consolidations) > 0 {
|
||||
log = log.WithField("consolidationRequestCount", len(eReqs.Consolidations))
|
||||
}
|
||||
if len(eReqs.Withdrawals) > 0 {
|
||||
log = log.WithField("withdrawalRequestCount", len(eReqs.Withdrawals))
|
||||
}
|
||||
}
|
||||
}
|
||||
log.Info("Finished applying state transition")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -126,9 +126,9 @@ func WithBLSToExecPool(p blstoexec.PoolManager) Option {
|
||||
}
|
||||
|
||||
// WithP2PBroadcaster to broadcast messages after appropriate processing.
|
||||
func WithP2PBroadcaster(p p2p.Broadcaster) Option {
|
||||
func WithP2PBroadcaster(p p2p.Acceser) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.P2p = p
|
||||
s.cfg.P2P = p
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,11 +3,13 @@ package blockchain
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
coreTime "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/das"
|
||||
@@ -234,7 +236,9 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := avs.IsDataAvailable(ctx, s.CurrentSlot(), b); err != nil {
|
||||
|
||||
nodeID := s.cfg.P2P.NodeID()
|
||||
if err := avs.IsDataAvailable(ctx, nodeID, s.CurrentSlot(), b); err != nil {
|
||||
return errors.Wrapf(err, "could not validate blob data availability at slot %d", b.Block().Slot())
|
||||
}
|
||||
args := &forkchoicetypes.BlockAndCheckpoints{Block: b,
|
||||
@@ -512,28 +516,49 @@ func missingIndices(bs *filesystem.BlobStorage, root [32]byte, expected [][]byte
|
||||
if len(expected) > maxBlobsPerBlock {
|
||||
return nil, errMaxBlobsExceeded
|
||||
}
|
||||
indices, err := bs.Indices(root, slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
indices := bs.Summary(root)
|
||||
missing := make(map[uint64]struct{}, len(expected))
|
||||
for i := range expected {
|
||||
ui := uint64(i)
|
||||
if len(expected[i]) > 0 {
|
||||
if !indices[i] {
|
||||
missing[ui] = struct{}{}
|
||||
}
|
||||
if len(expected[i]) > 0 && !indices.HasIndex(uint64(i)) {
|
||||
missing[uint64(i)] = struct{}{}
|
||||
}
|
||||
}
|
||||
return missing, nil
|
||||
}
|
||||
|
||||
func missingDataColumns(bs *filesystem.BlobStorage, root [32]byte, expected map[uint64]bool) (map[uint64]bool, error) {
|
||||
if len(expected) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if len(expected) > int(params.BeaconConfig().NumberOfColumns) {
|
||||
return nil, errMaxDataColumnsExceeded
|
||||
}
|
||||
|
||||
// Get a summary of the data columns stored in the database.
|
||||
summary := bs.Summary(root)
|
||||
|
||||
// Check all expected data columns against the summary.
|
||||
missing := make(map[uint64]bool)
|
||||
for column := range expected {
|
||||
if !summary.HasDataColumnIndex(column) {
|
||||
missing[column] = true
|
||||
}
|
||||
}
|
||||
|
||||
return missing, nil
|
||||
}
|
||||
|
||||
// isDataAvailable blocks until all BlobSidecars committed to in the block are available,
|
||||
// or an error or context cancellation occurs. A nil result means that the data availability check is successful.
|
||||
// The function will first check the database to see if all sidecars have been persisted. If any
|
||||
// sidecars are missing, it will then read from the blobNotifier channel for the given root until the channel is
|
||||
// closed, the context hits cancellation/timeout, or notifications have been received for all the missing sidecars.
|
||||
func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
if coreTime.PeerDASIsActive(signed.Block().Slot()) {
|
||||
return s.areDataColumnsAvailable(ctx, root, signed)
|
||||
}
|
||||
|
||||
if signed.Version() < version.Deneb {
|
||||
return nil
|
||||
}
|
||||
@@ -563,7 +588,7 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
|
||||
// get a map of BlobSidecar indices that are not currently available.
|
||||
missing, err := missingIndices(s.blobStorage, root, kzgCommitments, block.Slot())
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "missing indices")
|
||||
}
|
||||
// If there are no missing indices, all BlobSidecars are available.
|
||||
if len(missing) == 0 {
|
||||
@@ -582,8 +607,13 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
|
||||
if len(missing) == 0 {
|
||||
return
|
||||
}
|
||||
log.WithFields(daCheckLogFields(root, signed.Block().Slot(), expected, len(missing))).
|
||||
Error("Still waiting for DA check at slot end.")
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": signed.Block().Slot(),
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"blobsExpected": expected,
|
||||
"blobsWaiting": len(missing),
|
||||
}).Error("Still waiting for blobs DA check at slot end.")
|
||||
})
|
||||
defer nst.Stop()
|
||||
}
|
||||
@@ -605,12 +635,178 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
|
||||
}
|
||||
}
|
||||
|
||||
func daCheckLogFields(root [32]byte, slot primitives.Slot, expected, missing int) logrus.Fields {
|
||||
return logrus.Fields{
|
||||
"slot": slot,
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"blobsExpected": expected,
|
||||
"blobsWaiting": missing,
|
||||
// uint64MapToSortedSlice produces a sorted uint64 slice from a map.
|
||||
func uint64MapToSortedSlice(input map[uint64]bool) []uint64 {
|
||||
output := make([]uint64, 0, len(input))
|
||||
for idx := range input {
|
||||
output = append(output, idx)
|
||||
}
|
||||
slices.Sort[[]uint64](output)
|
||||
return output
|
||||
}
|
||||
|
||||
func (s *Service) areDataColumnsAvailable(ctx context.Context, root [32]byte, signedBlock interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
if signedBlock.Version() < version.Fulu {
|
||||
return nil
|
||||
}
|
||||
|
||||
block := signedBlock.Block()
|
||||
if block == nil {
|
||||
return errors.New("invalid nil beacon block")
|
||||
}
|
||||
|
||||
// We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
|
||||
blockSlot, currentSlot := block.Slot(), s.CurrentSlot()
|
||||
blockEpoch, currentEpoch := slots.ToEpoch(blockSlot), slots.ToEpoch(currentSlot)
|
||||
|
||||
if !params.WithinDAPeriod(blockEpoch, currentEpoch) {
|
||||
return nil
|
||||
}
|
||||
|
||||
body := block.Body()
|
||||
if body == nil {
|
||||
return errors.New("invalid nil beacon block body")
|
||||
}
|
||||
|
||||
kzgCommitments, err := body.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
|
||||
// If block has not commitments there is nothing to wait for.
|
||||
if len(kzgCommitments) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// All columns to sample need to be available for the block to be considered available.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/das-core.md#custody-sampling
|
||||
nodeID := s.cfg.P2P.NodeID()
|
||||
|
||||
// Prevent custody group count to change during the rest of the function.
|
||||
peerdas.CustodyGroupCountMut.RLock()
|
||||
defer peerdas.CustodyGroupCountMut.RUnlock()
|
||||
|
||||
// Get the custody group sampling size for the node.
|
||||
custodyGroupSamplingSize := peerdas.CustodyGroupSamplingSize(peerdas.Actual)
|
||||
peerInfo, _, err := peerdas.Info(nodeID, custodyGroupSamplingSize)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "peer info")
|
||||
}
|
||||
|
||||
// Exit early if the node is not expected to custody any data columns.
|
||||
if len(peerInfo.CustodyColumns) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Subscribe to newsly data columns stored in the database.
|
||||
rootIndexChan := make(chan filesystem.RootIndexPair)
|
||||
subscription := s.blobStorage.DataColumnFeed.Subscribe(rootIndexChan)
|
||||
defer subscription.Unsubscribe()
|
||||
|
||||
// Get the count of data columns we already have in the store.
|
||||
summary := s.blobStorage.Summary(root)
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
retrievedDataColumnsCount := uint64(0)
|
||||
for column := range numberOfColumns {
|
||||
if summary.HasDataColumnIndex(column) {
|
||||
retrievedDataColumnsCount++
|
||||
}
|
||||
}
|
||||
|
||||
// As soon as we have more than half of the data columns, we can reconstruct the missing ones.
|
||||
// We don't need to wait for the rest of the data columns to declare the block as available.
|
||||
if peerdas.CanSelfReconstruct(retrievedDataColumnsCount) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get a map of data column indices that are not currently available.
|
||||
missingMap, err := missingDataColumns(s.blobStorage, root, peerInfo.CustodyColumns)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If there are no missing indices, all data column sidecars are available.
|
||||
// This is the happy path.
|
||||
if len(missingMap) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Log for DA checks that cross over into the next slot; helpful for debugging.
|
||||
nextSlot := slots.BeginsAt(signedBlock.Block().Slot()+1, s.genesisTime)
|
||||
// Avoid logging if DA check is called after next slot start.
|
||||
if nextSlot.After(time.Now()) {
|
||||
nst := time.AfterFunc(time.Until(nextSlot), func() {
|
||||
missingMapCount := uint64(len(missingMap))
|
||||
|
||||
if missingMapCount == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
expected interface{} = "all"
|
||||
missing interface{} = "all"
|
||||
)
|
||||
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
colMapCount := uint64(len(peerInfo.CustodyColumns))
|
||||
|
||||
if colMapCount < numberOfColumns {
|
||||
expected = uint64MapToSortedSlice(peerInfo.CustodyColumns)
|
||||
}
|
||||
|
||||
if missingMapCount < numberOfColumns {
|
||||
missing = uint64MapToSortedSlice(missingMap)
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": signedBlock.Block().Slot(),
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"columnsExpected": expected,
|
||||
"columnsWaiting": missing,
|
||||
}).Error("Some data columns are still unavailable at slot end")
|
||||
})
|
||||
|
||||
defer nst.Stop()
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case rootIndex := <-rootIndexChan:
|
||||
if rootIndex.Root != root {
|
||||
// This is not the root we are looking for.
|
||||
continue
|
||||
}
|
||||
|
||||
// This is a data column we are expecting.
|
||||
if _, ok := missingMap[rootIndex.Index]; ok {
|
||||
retrievedDataColumnsCount++
|
||||
}
|
||||
|
||||
// As soon as we have more than half of the data columns, we can reconstruct the missing ones.
|
||||
// We don't need to wait for the rest of the data columns to declare the block as available.
|
||||
if peerdas.CanSelfReconstruct(retrievedDataColumnsCount) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove the index from the missing map.
|
||||
delete(missingMap, rootIndex.Index)
|
||||
|
||||
// Exit if there is no more missing data columns.
|
||||
if len(missingMap) == 0 {
|
||||
return nil
|
||||
}
|
||||
case <-ctx.Done():
|
||||
var missingIndices interface{} = "all"
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
missingIndicesCount := uint64(len(missingMap))
|
||||
|
||||
if missingIndicesCount < numberOfColumns {
|
||||
missingIndices = uint64MapToSortedSlice(missingMap)
|
||||
}
|
||||
|
||||
return errors.Wrapf(ctx.Err(), "data column sidecars slot: %d, BlockRoot: %#x, missing %v", block.Slot(), root, missingIndices)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -697,7 +893,7 @@ func (s *Service) waitForSync() error {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) handleInvalidExecutionError(ctx context.Context, err error, blockRoot [32]byte, parentRoot [32]byte) error {
|
||||
func (s *Service) handleInvalidExecutionError(ctx context.Context, err error, blockRoot, parentRoot [32]byte) error {
|
||||
if IsInvalidBlock(err) && InvalidBlockLVH(err) != [32]byte{} {
|
||||
return s.pruneInvalidBlock(ctx, blockRoot, parentRoot, InvalidBlockLVH(err))
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
lightclient "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/light-client"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@@ -552,7 +553,8 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, signed inte
|
||||
|
||||
// inserts finalized deposits into our finalized deposit trie, needs to be
|
||||
// called in the background
|
||||
func (s *Service) insertFinalizedDeposits(ctx context.Context, fRoot [32]byte) {
|
||||
// Post-Electra: prunes all proofs and pending deposits in the cache
|
||||
func (s *Service) insertFinalizedDepositsAndPrune(ctx context.Context, fRoot [32]byte) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.insertFinalizedDeposits")
|
||||
defer span.End()
|
||||
startTime := time.Now()
|
||||
@@ -563,6 +565,16 @@ func (s *Service) insertFinalizedDeposits(ctx context.Context, fRoot [32]byte) {
|
||||
log.WithError(err).Error("could not fetch finalized state")
|
||||
return
|
||||
}
|
||||
|
||||
// Check if we should prune all pending deposits.
|
||||
// In post-Electra(after the legacy deposit mechanism is deprecated),
|
||||
// we can prune all pending deposits in the deposit cache.
|
||||
// See: https://eips.ethereum.org/EIPS/eip-6110#eth1data-poll-deprecation
|
||||
if helpers.DepositRequestsStarted(finalizedState) {
|
||||
s.pruneAllPendingDepositsAndProofs(ctx)
|
||||
return
|
||||
}
|
||||
|
||||
// We update the cache up to the last deposit index in the finalized block's state.
|
||||
// We can be confident that these deposits will be included in some block
|
||||
// because the Eth1 follow distance makes such long-range reorgs extremely unlikely.
|
||||
@@ -591,6 +603,12 @@ func (s *Service) insertFinalizedDeposits(ctx context.Context, fRoot [32]byte) {
|
||||
log.WithField("duration", time.Since(startTime).String()).Debugf("Finalized deposit insertion completed at index %d", finalizedEth1DepIdx)
|
||||
}
|
||||
|
||||
// pruneAllPendingDepositsAndProofs prunes all proofs and pending deposits in the cache.
|
||||
func (s *Service) pruneAllPendingDepositsAndProofs(ctx context.Context) {
|
||||
s.cfg.DepositCache.PruneAllPendingDeposits(ctx)
|
||||
s.cfg.DepositCache.PruneAllProofs(ctx)
|
||||
}
|
||||
|
||||
// This ensures that the input root defaults to using genesis root instead of zero hashes. This is needed for handling
|
||||
// fork choice justification routine.
|
||||
func (s *Service) ensureRootNotZeros(root [32]byte) [32]byte {
|
||||
|
||||
@@ -723,7 +723,7 @@ func TestInsertFinalizedDeposits(t *testing.T) {
|
||||
Signature: zeroSig[:],
|
||||
}, Proof: [][]byte{root}}, 100+i, int64(i), bytesutil.ToBytes32(root)))
|
||||
}
|
||||
service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k'})
|
||||
service.insertFinalizedDepositsAndPrune(ctx, [32]byte{'m', 'o', 'c', 'k'})
|
||||
fDeposits, err := depositCache.FinalizedDeposits(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 7, int(fDeposits.MerkleTrieIndex()), "Finalized deposits not inserted correctly")
|
||||
@@ -759,7 +759,7 @@ func TestInsertFinalizedDeposits_PrunePendingDeposits(t *testing.T) {
|
||||
Signature: zeroSig[:],
|
||||
}, Proof: [][]byte{root}}, 100+i, int64(i), bytesutil.ToBytes32(root))
|
||||
}
|
||||
service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k'})
|
||||
service.insertFinalizedDepositsAndPrune(ctx, [32]byte{'m', 'o', 'c', 'k'})
|
||||
fDeposits, err := depositCache.FinalizedDeposits(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 7, int(fDeposits.MerkleTrieIndex()), "Finalized deposits not inserted correctly")
|
||||
@@ -799,7 +799,7 @@ func TestInsertFinalizedDeposits_MultipleFinalizedRoutines(t *testing.T) {
|
||||
}
|
||||
// Insert 3 deposits before hand.
|
||||
require.NoError(t, depositCache.InsertFinalizedDeposits(ctx, 2, [32]byte{}, 0))
|
||||
service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k'})
|
||||
service.insertFinalizedDepositsAndPrune(ctx, [32]byte{'m', 'o', 'c', 'k'})
|
||||
fDeposits, err := depositCache.FinalizedDeposits(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 5, int(fDeposits.MerkleTrieIndex()), "Finalized deposits not inserted correctly")
|
||||
@@ -810,7 +810,7 @@ func TestInsertFinalizedDeposits_MultipleFinalizedRoutines(t *testing.T) {
|
||||
}
|
||||
|
||||
// Insert New Finalized State with higher deposit count.
|
||||
service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k', '2'})
|
||||
service.insertFinalizedDepositsAndPrune(ctx, [32]byte{'m', 'o', 'c', 'k', '2'})
|
||||
fDeposits, err = depositCache.FinalizedDeposits(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 12, int(fDeposits.MerkleTrieIndex()), "Finalized deposits not inserted correctly")
|
||||
@@ -2297,7 +2297,7 @@ func TestMissingIndices(t *testing.T) {
|
||||
for _, c := range cases {
|
||||
bm, bs := filesystem.NewEphemeralBlobStorageWithMocker(t)
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
require.NoError(t, bm.CreateFakeIndices(c.root, c.present...))
|
||||
require.NoError(t, bm.CreateFakeIndices(c.root, 0, c.present...))
|
||||
missing, err := missingIndices(bs, c.root, c.expected, 0)
|
||||
if c.err != nil {
|
||||
require.ErrorIs(t, err, c.err)
|
||||
|
||||
@@ -52,6 +52,12 @@ type BlobReceiver interface {
|
||||
ReceiveBlob(context.Context, blocks.VerifiedROBlob) error
|
||||
}
|
||||
|
||||
// DataColumnReceiver interface defines the methods of chain service for receiving new
|
||||
// data columns
|
||||
type DataColumnReceiver interface {
|
||||
ReceiveDataColumn(blocks.VerifiedRODataColumn) error
|
||||
}
|
||||
|
||||
// SlashingReceiver interface defines the methods of chain service for receiving validated slashing over the wire.
|
||||
type SlashingReceiver interface {
|
||||
ReceiveAttesterSlashing(ctx context.Context, slashing ethpb.AttSlashing)
|
||||
@@ -70,6 +76,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
log.WithField("blockRoot", fmt.Sprintf("%#x", blockRoot)).Debug("Ignoring already synced block")
|
||||
return nil
|
||||
}
|
||||
|
||||
receivedTime := time.Now()
|
||||
s.blockBeingSynced.set(blockRoot)
|
||||
defer s.blockBeingSynced.unset(blockRoot)
|
||||
@@ -78,6 +85,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
preState, err := s.getBlockPreState(ctx, blockCopy.Block())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get block's prestate")
|
||||
@@ -93,10 +101,12 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
daWaitedTime, err := s.handleDA(ctx, blockCopy, blockRoot, avs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Defragment the state before continuing block processing.
|
||||
s.defragmentState(postState)
|
||||
|
||||
@@ -232,12 +242,14 @@ func (s *Service) handleDA(
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if err := avs.IsDataAvailable(ctx, s.CurrentSlot(), rob); err != nil {
|
||||
|
||||
nodeID := s.cfg.P2P.NodeID()
|
||||
if err := avs.IsDataAvailable(ctx, nodeID, s.CurrentSlot(), rob); err != nil {
|
||||
return 0, errors.Wrap(err, "could not validate blob data availability (AvailabilityStore.IsDataAvailable)")
|
||||
}
|
||||
} else {
|
||||
if err := s.isDataAvailable(ctx, blockRoot, block); err != nil {
|
||||
return 0, errors.Wrap(err, "could not validate blob data availability")
|
||||
return 0, errors.Wrap(err, "is data available")
|
||||
}
|
||||
}
|
||||
daWaitedTime := time.Since(daStartTime)
|
||||
@@ -279,9 +291,10 @@ func (s *Service) executePostFinalizationTasks(ctx context.Context, finalizedSta
|
||||
go func() {
|
||||
s.sendNewFinalizedEvent(ctx, finalizedState)
|
||||
}()
|
||||
|
||||
depCtx, cancel := context.WithTimeout(context.Background(), depositDeadline)
|
||||
go func() {
|
||||
s.insertFinalizedDeposits(depCtx, finalized.Root)
|
||||
s.insertFinalizedDepositsAndPrune(depCtx, finalized.Root)
|
||||
cancel()
|
||||
}()
|
||||
}
|
||||
|
||||
@@ -455,41 +455,81 @@ func Test_executePostFinalizationTasks(t *testing.T) {
|
||||
Root: headRoot[:],
|
||||
}))
|
||||
require.NoError(t, headState.SetGenesisValidatorsRoot(params.BeaconConfig().ZeroHash[:]))
|
||||
t.Run("pre deposit request", func(t *testing.T) {
|
||||
require.NoError(t, headState.SetEth1DepositIndex(1))
|
||||
s, tr := minimalTestService(t, WithFinalizedStateAtStartUp(headState))
|
||||
ctx, beaconDB, stateGen := tr.ctx, tr.db, tr.sg
|
||||
|
||||
s, tr := minimalTestService(t, WithFinalizedStateAtStartUp(headState))
|
||||
ctx, beaconDB, stateGen := tr.ctx, tr.db, tr.sg
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, genesis)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, headBlock)
|
||||
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Epoch: slots.ToEpoch(finalizedSlot), Root: headRoot[:]}))
|
||||
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, genesis)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, headBlock)
|
||||
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Epoch: slots.ToEpoch(finalizedSlot), Root: headRoot[:]}))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, stateGen.SaveState(ctx, headRoot, headState))
|
||||
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, ðpb.Checkpoint{Epoch: slots.ToEpoch(finalizedSlot), Root: headRoot[:]}))
|
||||
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, stateGen.SaveState(ctx, headRoot, headState))
|
||||
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, ðpb.Checkpoint{Epoch: slots.ToEpoch(finalizedSlot), Root: headRoot[:]}))
|
||||
notifier := &blockchainTesting.MockStateNotifier{RecordEvents: true}
|
||||
s.cfg.StateNotifier = notifier
|
||||
s.executePostFinalizationTasks(s.ctx, headState)
|
||||
|
||||
notifier := &blockchainTesting.MockStateNotifier{RecordEvents: true}
|
||||
s.cfg.StateNotifier = notifier
|
||||
s.executePostFinalizationTasks(s.ctx, headState)
|
||||
time.Sleep(1 * time.Second) // sleep for a second because event is in a separate go routine
|
||||
require.Equal(t, 1, len(notifier.ReceivedEvents()))
|
||||
e := notifier.ReceivedEvents()[0]
|
||||
assert.Equal(t, statefeed.FinalizedCheckpoint, int(e.Type))
|
||||
fc, ok := e.Data.(*ethpbv1.EventFinalizedCheckpoint)
|
||||
require.Equal(t, true, ok, "event has wrong data type")
|
||||
assert.Equal(t, primitives.Epoch(123), fc.Epoch)
|
||||
assert.DeepEqual(t, headRoot[:], fc.Block)
|
||||
assert.DeepEqual(t, finalizedStRoot[:], fc.State)
|
||||
assert.Equal(t, false, fc.ExecutionOptimistic)
|
||||
|
||||
time.Sleep(1 * time.Second) // sleep for a second because event is in a separate go routine
|
||||
require.Equal(t, 1, len(notifier.ReceivedEvents()))
|
||||
e := notifier.ReceivedEvents()[0]
|
||||
assert.Equal(t, statefeed.FinalizedCheckpoint, int(e.Type))
|
||||
fc, ok := e.Data.(*ethpbv1.EventFinalizedCheckpoint)
|
||||
require.Equal(t, true, ok, "event has wrong data type")
|
||||
assert.Equal(t, primitives.Epoch(123), fc.Epoch)
|
||||
assert.DeepEqual(t, headRoot[:], fc.Block)
|
||||
assert.DeepEqual(t, finalizedStRoot[:], fc.State)
|
||||
assert.Equal(t, false, fc.ExecutionOptimistic)
|
||||
// check the cache
|
||||
index, ok := headState.ValidatorIndexByPubkey(bytesutil.ToBytes48(key))
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, primitives.ValidatorIndex(0), index) // first index
|
||||
|
||||
// check the cache
|
||||
index, ok := headState.ValidatorIndexByPubkey(bytesutil.ToBytes48(key))
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, primitives.ValidatorIndex(0), index) // first index
|
||||
// check deposit
|
||||
require.LogsContain(t, logHook, "Finalized deposit insertion completed at index")
|
||||
})
|
||||
t.Run("deposit requests started", func(t *testing.T) {
|
||||
require.NoError(t, headState.SetEth1DepositIndex(1))
|
||||
require.NoError(t, headState.SetDepositRequestsStartIndex(1))
|
||||
s, tr := minimalTestService(t, WithFinalizedStateAtStartUp(headState))
|
||||
ctx, beaconDB, stateGen := tr.ctx, tr.db, tr.sg
|
||||
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, genesis)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, headBlock)
|
||||
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Epoch: slots.ToEpoch(finalizedSlot), Root: headRoot[:]}))
|
||||
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, stateGen.SaveState(ctx, headRoot, headState))
|
||||
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, ðpb.Checkpoint{Epoch: slots.ToEpoch(finalizedSlot), Root: headRoot[:]}))
|
||||
|
||||
notifier := &blockchainTesting.MockStateNotifier{RecordEvents: true}
|
||||
s.cfg.StateNotifier = notifier
|
||||
s.executePostFinalizationTasks(s.ctx, headState)
|
||||
|
||||
time.Sleep(1 * time.Second) // sleep for a second because event is in a separate go routine
|
||||
require.Equal(t, 1, len(notifier.ReceivedEvents()))
|
||||
e := notifier.ReceivedEvents()[0]
|
||||
assert.Equal(t, statefeed.FinalizedCheckpoint, int(e.Type))
|
||||
fc, ok := e.Data.(*ethpbv1.EventFinalizedCheckpoint)
|
||||
require.Equal(t, true, ok, "event has wrong data type")
|
||||
assert.Equal(t, primitives.Epoch(123), fc.Epoch)
|
||||
assert.DeepEqual(t, headRoot[:], fc.Block)
|
||||
assert.DeepEqual(t, finalizedStRoot[:], fc.State)
|
||||
assert.Equal(t, false, fc.ExecutionOptimistic)
|
||||
|
||||
// check the cache
|
||||
index, ok := headState.ValidatorIndexByPubkey(bytesutil.ToBytes48(key))
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, primitives.ValidatorIndex(0), index) // first index
|
||||
})
|
||||
|
||||
// check deposit
|
||||
require.LogsContain(t, logHook, "Finalized deposit insertion completed at index")
|
||||
}
|
||||
|
||||
14
beacon-chain/blockchain/receive_data_column.go
Normal file
14
beacon-chain/blockchain/receive_data_column.go
Normal file
@@ -0,0 +1,14 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
)
|
||||
|
||||
func (s *Service) ReceiveDataColumn(ds blocks.VerifiedRODataColumn) error {
|
||||
if err := s.blobStorage.SaveDataColumn(ds); err != nil {
|
||||
return errors.Wrap(err, "save data column")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -33,6 +33,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
@@ -80,7 +81,7 @@ type config struct {
|
||||
ExitPool voluntaryexits.PoolManager
|
||||
SlashingPool slashings.PoolManager
|
||||
BLSToExecPool blstoexec.PoolManager
|
||||
P2p p2p.Broadcaster
|
||||
P2P p2p.Acceser
|
||||
MaxRoutines int
|
||||
StateNotifier statefeed.Notifier
|
||||
ForkChoiceStore f.ForkChoicer
|
||||
@@ -105,22 +106,26 @@ var ErrMissingClockSetter = errors.New("blockchain Service initialized without a
|
||||
type blobNotifierMap struct {
|
||||
sync.RWMutex
|
||||
notifiers map[[32]byte]chan uint64
|
||||
seenIndex map[[32]byte][]bool
|
||||
// TODO: Separate blobs from data columns
|
||||
// seenIndex map[[32]byte][]bool
|
||||
seenIndex map[[32]byte][fieldparams.NumberOfColumns]bool
|
||||
}
|
||||
|
||||
// notifyIndex notifies a blob by its index for a given root.
|
||||
// It uses internal maps to keep track of seen indices and notifier channels.
|
||||
func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64, slot primitives.Slot) {
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
if idx >= uint64(maxBlobsPerBlock) {
|
||||
return
|
||||
}
|
||||
// TODO: Separate blobs from data columns
|
||||
// maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
// if idx >= uint64(maxBlobsPerBlock) {
|
||||
// return
|
||||
// }
|
||||
|
||||
bn.Lock()
|
||||
seen := bn.seenIndex[root]
|
||||
if seen == nil {
|
||||
seen = make([]bool, maxBlobsPerBlock)
|
||||
}
|
||||
// TODO: Separate blobs from data columns
|
||||
// if seen == nil {
|
||||
// seen = make([]bool, maxBlobsPerBlock)
|
||||
// }
|
||||
if seen[idx] {
|
||||
bn.Unlock()
|
||||
return
|
||||
@@ -131,7 +136,9 @@ func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64, slot primitive
|
||||
// Retrieve or create the notifier channel for the given root.
|
||||
c, ok := bn.notifiers[root]
|
||||
if !ok {
|
||||
c = make(chan uint64, maxBlobsPerBlock)
|
||||
// TODO: Separate blobs from data columns
|
||||
// c = make(chan uint64, maxBlobsPerBlock)
|
||||
c = make(chan uint64, fieldparams.NumberOfColumns)
|
||||
bn.notifiers[root] = c
|
||||
}
|
||||
|
||||
@@ -141,12 +148,15 @@ func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64, slot primitive
|
||||
}
|
||||
|
||||
func (bn *blobNotifierMap) forRoot(root [32]byte, slot primitives.Slot) chan uint64 {
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
// TODO: Separate blobs from data columns
|
||||
// maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
bn.Lock()
|
||||
defer bn.Unlock()
|
||||
c, ok := bn.notifiers[root]
|
||||
if !ok {
|
||||
c = make(chan uint64, maxBlobsPerBlock)
|
||||
// TODO: Separate blobs from data columns
|
||||
// c = make(chan uint64, maxBlobsPerBlock)
|
||||
c = make(chan uint64, fieldparams.NumberOfColumns)
|
||||
bn.notifiers[root] = c
|
||||
}
|
||||
return c
|
||||
@@ -172,7 +182,9 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
bn := &blobNotifierMap{
|
||||
notifiers: make(map[[32]byte]chan uint64),
|
||||
seenIndex: make(map[[32]byte][]bool),
|
||||
// TODO: Separate blobs from data columns
|
||||
// seenIndex: make(map[[32]byte][]bool),
|
||||
seenIndex: make(map[[32]byte][fieldparams.NumberOfColumns]bool),
|
||||
}
|
||||
srv := &Service{
|
||||
ctx: ctx,
|
||||
|
||||
@@ -97,13 +97,14 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
WithSlashingPool(slashings.NewPool()),
|
||||
WithExitPool(voluntaryexits.NewPool()),
|
||||
WithP2PBroadcaster(&mockBroadcaster{}),
|
||||
WithP2PBroadcaster(&mockAccesser{}),
|
||||
WithStateNotifier(&mockBeaconNode{}),
|
||||
WithForkChoiceStore(fc),
|
||||
WithAttestationService(attService),
|
||||
WithStateGen(stateGen),
|
||||
WithPayloadIDCache(cache.NewPayloadIDCache()),
|
||||
WithClockSynchronizer(startup.NewClockSynchronizer()),
|
||||
WithP2PBroadcaster(&mockAccesser{}),
|
||||
}
|
||||
|
||||
chainService, err := NewService(ctx, opts...)
|
||||
@@ -587,7 +588,9 @@ func (s *MockClockSetter) SetClock(g *startup.Clock) error {
|
||||
func TestNotifyIndex(t *testing.T) {
|
||||
// Initialize a blobNotifierMap
|
||||
bn := &blobNotifierMap{
|
||||
seenIndex: make(map[[32]byte][]bool),
|
||||
// TODO: Separate blobs from data columns
|
||||
// seenIndex: make(map[[32]byte][]bool),
|
||||
seenIndex: make(map[[32]byte][fieldparams.NumberOfColumns]bool),
|
||||
notifiers: make(map[[32]byte]chan uint64),
|
||||
}
|
||||
|
||||
|
||||
@@ -19,8 +19,10 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/blstoexec"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
|
||||
p2pTesting "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state/stategen"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"google.golang.org/protobuf/proto"
|
||||
@@ -45,6 +47,11 @@ type mockBroadcaster struct {
|
||||
broadcastCalled bool
|
||||
}
|
||||
|
||||
type mockAccesser struct {
|
||||
mockBroadcaster
|
||||
p2pTesting.MockPeerManager
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) Broadcast(_ context.Context, _ proto.Message) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
@@ -65,6 +72,11 @@ func (mb *mockBroadcaster) BroadcastBlob(_ context.Context, _ uint64, _ *ethpb.B
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastDataColumn(_ context.Context, _ [fieldparams.RootLength]byte, _ uint64, _ *ethpb.DataColumnSidecar) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastBLSChanges(_ context.Context, _ []*ethpb.SignedBLSToExecutionChange) {
|
||||
}
|
||||
|
||||
@@ -122,6 +134,7 @@ func minimalTestService(t *testing.T, opts ...Option) (*Service, *testServiceReq
|
||||
WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)),
|
||||
WithSyncChecker(mock.MockChecker{}),
|
||||
WithExecutionEngineCaller(&mockExecution.EngineClient{}),
|
||||
WithP2PBroadcaster(&mockAccesser{}),
|
||||
}
|
||||
// append the variadic opts so they override the defaults by being processed afterwards
|
||||
opts = append(defOpts, opts...)
|
||||
|
||||
@@ -702,6 +702,11 @@ func (c *ChainService) ReceiveBlob(_ context.Context, b blocks.VerifiedROBlob) e
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReceiveDataColumn implements the same method in chain service
|
||||
func (*ChainService) ReceiveDataColumn(_ blocks.VerifiedRODataColumn) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// TargetRootForEpoch mocks the same method in the chain service
|
||||
func (c *ChainService) TargetRootForEpoch(_ [32]byte, _ primitives.Epoch) ([32]byte, error) {
|
||||
return c.TargetRoot, nil
|
||||
|
||||
1
beacon-chain/cache/BUILD.bazel
vendored
1
beacon-chain/cache/BUILD.bazel
vendored
@@ -84,6 +84,7 @@ go_test(
|
||||
"sync_committee_head_state_test.go",
|
||||
"sync_committee_test.go",
|
||||
"sync_subnet_ids_test.go",
|
||||
"tracked_validators_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
|
||||
@@ -5,6 +5,7 @@ go_library(
|
||||
srcs = [
|
||||
"deposit_fetcher.go",
|
||||
"deposit_inserter.go",
|
||||
"deposit_pruner.go",
|
||||
"deposit_tree.go",
|
||||
"deposit_tree_snapshot.go",
|
||||
"merkle_tree.go",
|
||||
@@ -35,6 +36,7 @@ go_test(
|
||||
srcs = [
|
||||
"deposit_cache_test.go",
|
||||
"deposit_fetcher_test.go",
|
||||
"deposit_pruner_test.go",
|
||||
"deposit_tree_snapshot_test.go",
|
||||
"merkle_tree_test.go",
|
||||
"spec_test.go",
|
||||
|
||||
@@ -903,189 +903,6 @@ func TestMin(t *testing.T) {
|
||||
|
||||
}
|
||||
|
||||
func TestPruneProofs_Ok(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
deposits := []struct {
|
||||
blkNum uint64
|
||||
deposit *ethpb.Deposit
|
||||
index int64
|
||||
}{
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk0"), 48)}},
|
||||
index: 0,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk1"), 48)}},
|
||||
index: 1,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk2"), 48)}},
|
||||
index: 2,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk3"), 48)}},
|
||||
index: 3,
|
||||
},
|
||||
}
|
||||
|
||||
for _, ins := range deposits {
|
||||
assert.NoError(t, dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
|
||||
}
|
||||
|
||||
require.NoError(t, dc.PruneProofs(context.Background(), 1))
|
||||
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[0].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[1].Deposit.Proof)
|
||||
assert.NotNil(t, dc.deposits[2].Deposit.Proof)
|
||||
assert.NotNil(t, dc.deposits[3].Deposit.Proof)
|
||||
}
|
||||
|
||||
func TestPruneProofs_SomeAlreadyPruned(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
deposits := []struct {
|
||||
blkNum uint64
|
||||
deposit *ethpb.Deposit
|
||||
index int64
|
||||
}{
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: nil, Data: ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.PadTo([]byte("pk0"), 48)}},
|
||||
index: 0,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: nil, Data: ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.PadTo([]byte("pk1"), 48)}}, index: 1,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(), Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk2"), 48)}},
|
||||
index: 2,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk3"), 48)}},
|
||||
index: 3,
|
||||
},
|
||||
}
|
||||
|
||||
for _, ins := range deposits {
|
||||
assert.NoError(t, dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
|
||||
}
|
||||
|
||||
require.NoError(t, dc.PruneProofs(context.Background(), 2))
|
||||
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[2].Deposit.Proof)
|
||||
}
|
||||
|
||||
func TestPruneProofs_PruneAllWhenDepositIndexTooBig(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
deposits := []struct {
|
||||
blkNum uint64
|
||||
deposit *ethpb.Deposit
|
||||
index int64
|
||||
}{
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk0"), 48)}},
|
||||
index: 0,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk1"), 48)}},
|
||||
index: 1,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk2"), 48)}},
|
||||
index: 2,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk3"), 48)}},
|
||||
index: 3,
|
||||
},
|
||||
}
|
||||
|
||||
for _, ins := range deposits {
|
||||
assert.NoError(t, dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
|
||||
}
|
||||
|
||||
require.NoError(t, dc.PruneProofs(context.Background(), 99))
|
||||
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[0].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[1].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[2].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[3].Deposit.Proof)
|
||||
}
|
||||
|
||||
func TestPruneProofs_CorrectlyHandleLastIndex(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
deposits := []struct {
|
||||
blkNum uint64
|
||||
deposit *ethpb.Deposit
|
||||
index int64
|
||||
}{
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk0"), 48)}},
|
||||
index: 0,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk1"), 48)}},
|
||||
index: 1,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk2"), 48)}},
|
||||
index: 2,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk3"), 48)}},
|
||||
index: 3,
|
||||
},
|
||||
}
|
||||
|
||||
for _, ins := range deposits {
|
||||
assert.NoError(t, dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
|
||||
}
|
||||
|
||||
require.NoError(t, dc.PruneProofs(context.Background(), 4))
|
||||
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[0].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[1].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[2].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[3].Deposit.Proof)
|
||||
}
|
||||
|
||||
func TestDepositMap_WorksCorrectly(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -178,52 +178,6 @@ func (c *Cache) NonFinalizedDeposits(ctx context.Context, lastFinalizedIndex int
|
||||
return deposits
|
||||
}
|
||||
|
||||
// PruneProofs removes proofs from all deposits whose index is equal or less than untilDepositIndex.
|
||||
func (c *Cache) PruneProofs(ctx context.Context, untilDepositIndex int64) error {
|
||||
_, span := trace.StartSpan(ctx, "Cache.PruneProofs")
|
||||
defer span.End()
|
||||
c.depositsLock.Lock()
|
||||
defer c.depositsLock.Unlock()
|
||||
|
||||
if untilDepositIndex >= int64(len(c.deposits)) {
|
||||
untilDepositIndex = int64(len(c.deposits) - 1)
|
||||
}
|
||||
|
||||
for i := untilDepositIndex; i >= 0; i-- {
|
||||
// Finding a nil proof means that all proofs up to this deposit have been already pruned.
|
||||
if c.deposits[i].Deposit.Proof == nil {
|
||||
break
|
||||
}
|
||||
c.deposits[i].Deposit.Proof = nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// PrunePendingDeposits removes any deposit which is older than the given deposit merkle tree index.
|
||||
func (c *Cache) PrunePendingDeposits(ctx context.Context, merkleTreeIndex int64) {
|
||||
_, span := trace.StartSpan(ctx, "Cache.PrunePendingDeposits")
|
||||
defer span.End()
|
||||
|
||||
if merkleTreeIndex == 0 {
|
||||
log.Debug("Ignoring 0 deposit removal")
|
||||
return
|
||||
}
|
||||
|
||||
c.depositsLock.Lock()
|
||||
defer c.depositsLock.Unlock()
|
||||
|
||||
cleanDeposits := make([]*ethpb.DepositContainer, 0, len(c.pendingDeposits))
|
||||
for _, dp := range c.pendingDeposits {
|
||||
if dp.Index >= merkleTreeIndex {
|
||||
cleanDeposits = append(cleanDeposits, dp)
|
||||
}
|
||||
}
|
||||
|
||||
c.pendingDeposits = cleanDeposits
|
||||
pendingDepositsCount.Set(float64(len(c.pendingDeposits)))
|
||||
}
|
||||
|
||||
// InsertPendingDeposit into the database. If deposit or block number are nil
|
||||
// then this method does nothing.
|
||||
func (c *Cache) InsertPendingDeposit(ctx context.Context, d *ethpb.Deposit, blockNum uint64, index int64, depositRoot [32]byte) {
|
||||
|
||||
@@ -44,67 +44,3 @@ func TestPendingDeposits_OK(t *testing.T) {
|
||||
all := dc.PendingDeposits(context.Background(), nil)
|
||||
assert.Equal(t, len(dc.pendingDeposits), len(all), "PendingDeposits(ctx, nil) did not return all deposits")
|
||||
}
|
||||
|
||||
func TestPrunePendingDeposits_ZeroMerkleIndex(t *testing.T) {
|
||||
dc := Cache{}
|
||||
|
||||
dc.pendingDeposits = []*ethpb.DepositContainer{
|
||||
{Eth1BlockHeight: 2, Index: 2},
|
||||
{Eth1BlockHeight: 4, Index: 4},
|
||||
{Eth1BlockHeight: 6, Index: 6},
|
||||
{Eth1BlockHeight: 8, Index: 8},
|
||||
{Eth1BlockHeight: 10, Index: 10},
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
|
||||
dc.PrunePendingDeposits(context.Background(), 0)
|
||||
expected := []*ethpb.DepositContainer{
|
||||
{Eth1BlockHeight: 2, Index: 2},
|
||||
{Eth1BlockHeight: 4, Index: 4},
|
||||
{Eth1BlockHeight: 6, Index: 6},
|
||||
{Eth1BlockHeight: 8, Index: 8},
|
||||
{Eth1BlockHeight: 10, Index: 10},
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
assert.DeepEqual(t, expected, dc.pendingDeposits)
|
||||
}
|
||||
|
||||
func TestPrunePendingDeposits_OK(t *testing.T) {
|
||||
dc := Cache{}
|
||||
|
||||
dc.pendingDeposits = []*ethpb.DepositContainer{
|
||||
{Eth1BlockHeight: 2, Index: 2},
|
||||
{Eth1BlockHeight: 4, Index: 4},
|
||||
{Eth1BlockHeight: 6, Index: 6},
|
||||
{Eth1BlockHeight: 8, Index: 8},
|
||||
{Eth1BlockHeight: 10, Index: 10},
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
|
||||
dc.PrunePendingDeposits(context.Background(), 6)
|
||||
expected := []*ethpb.DepositContainer{
|
||||
{Eth1BlockHeight: 6, Index: 6},
|
||||
{Eth1BlockHeight: 8, Index: 8},
|
||||
{Eth1BlockHeight: 10, Index: 10},
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
|
||||
assert.DeepEqual(t, expected, dc.pendingDeposits)
|
||||
|
||||
dc.pendingDeposits = []*ethpb.DepositContainer{
|
||||
{Eth1BlockHeight: 2, Index: 2},
|
||||
{Eth1BlockHeight: 4, Index: 4},
|
||||
{Eth1BlockHeight: 6, Index: 6},
|
||||
{Eth1BlockHeight: 8, Index: 8},
|
||||
{Eth1BlockHeight: 10, Index: 10},
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
|
||||
dc.PrunePendingDeposits(context.Background(), 10)
|
||||
expected = []*ethpb.DepositContainer{
|
||||
{Eth1BlockHeight: 10, Index: 10},
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
|
||||
assert.DeepEqual(t, expected, dc.pendingDeposits)
|
||||
}
|
||||
|
||||
88
beacon-chain/cache/depositsnapshot/deposit_pruner.go
vendored
Normal file
88
beacon-chain/cache/depositsnapshot/deposit_pruner.go
vendored
Normal file
@@ -0,0 +1,88 @@
|
||||
package depositsnapshot
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// PruneProofs removes proofs from all deposits whose index is equal or less than untilDepositIndex.
|
||||
func (c *Cache) PruneProofs(ctx context.Context, untilDepositIndex int64) error {
|
||||
_, span := trace.StartSpan(ctx, "Cache.PruneProofs")
|
||||
defer span.End()
|
||||
c.depositsLock.Lock()
|
||||
defer c.depositsLock.Unlock()
|
||||
|
||||
if untilDepositIndex >= int64(len(c.deposits)) {
|
||||
untilDepositIndex = int64(len(c.deposits) - 1)
|
||||
}
|
||||
|
||||
for i := untilDepositIndex; i >= 0; i-- {
|
||||
// Finding a nil proof means that all proofs up to this deposit have been already pruned.
|
||||
if c.deposits[i].Deposit.Proof == nil {
|
||||
break
|
||||
}
|
||||
c.deposits[i].Deposit.Proof = nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// PruneAllProofs removes proofs from all deposits.
|
||||
// As EIP-6110 applies and the legacy deposit mechanism is deprecated,
|
||||
// proofs in deposit snapshot are no longer needed.
|
||||
// See: https://eips.ethereum.org/EIPS/eip-6110#eth1data-poll-deprecation
|
||||
func (c *Cache) PruneAllProofs(ctx context.Context) {
|
||||
_, span := trace.StartSpan(ctx, "Cache.PruneAllProofs")
|
||||
defer span.End()
|
||||
|
||||
c.depositsLock.Lock()
|
||||
defer c.depositsLock.Unlock()
|
||||
|
||||
for i := len(c.deposits) - 1; i >= 0; i-- {
|
||||
if c.deposits[i].Deposit.Proof == nil {
|
||||
break
|
||||
}
|
||||
c.deposits[i].Deposit.Proof = nil
|
||||
}
|
||||
}
|
||||
|
||||
// PrunePendingDeposits removes any deposit which is older than the given deposit merkle tree index.
|
||||
func (c *Cache) PrunePendingDeposits(ctx context.Context, merkleTreeIndex int64) {
|
||||
_, span := trace.StartSpan(ctx, "Cache.PrunePendingDeposits")
|
||||
defer span.End()
|
||||
|
||||
if merkleTreeIndex == 0 {
|
||||
log.Debug("Ignoring 0 deposit removal")
|
||||
return
|
||||
}
|
||||
|
||||
c.depositsLock.Lock()
|
||||
defer c.depositsLock.Unlock()
|
||||
|
||||
cleanDeposits := make([]*ethpb.DepositContainer, 0, len(c.pendingDeposits))
|
||||
for _, dp := range c.pendingDeposits {
|
||||
if dp.Index >= merkleTreeIndex {
|
||||
cleanDeposits = append(cleanDeposits, dp)
|
||||
}
|
||||
}
|
||||
|
||||
c.pendingDeposits = cleanDeposits
|
||||
pendingDepositsCount.Set(float64(len(c.pendingDeposits)))
|
||||
}
|
||||
|
||||
// PruneAllPendingDeposits removes all pending deposits from the cache.
|
||||
// As EIP-6110 applies and the legacy deposit mechanism is deprecated,
|
||||
// pending deposits in deposit snapshot are no longer needed.
|
||||
// See: https://eips.ethereum.org/EIPS/eip-6110#eth1data-poll-deprecation
|
||||
func (c *Cache) PruneAllPendingDeposits(ctx context.Context) {
|
||||
_, span := trace.StartSpan(ctx, "Cache.PruneAllPendingDeposits")
|
||||
defer span.End()
|
||||
|
||||
c.depositsLock.Lock()
|
||||
defer c.depositsLock.Unlock()
|
||||
|
||||
c.pendingDeposits = make([]*ethpb.DepositContainer, 0)
|
||||
pendingDepositsCount.Set(float64(0))
|
||||
}
|
||||
323
beacon-chain/cache/depositsnapshot/deposit_pruner_test.go
vendored
Normal file
323
beacon-chain/cache/depositsnapshot/deposit_pruner_test.go
vendored
Normal file
@@ -0,0 +1,323 @@
|
||||
package depositsnapshot
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestPrunePendingDeposits_ZeroMerkleIndex(t *testing.T) {
|
||||
dc := Cache{}
|
||||
|
||||
dc.pendingDeposits = []*ethpb.DepositContainer{
|
||||
{Eth1BlockHeight: 2, Index: 2},
|
||||
{Eth1BlockHeight: 4, Index: 4},
|
||||
{Eth1BlockHeight: 6, Index: 6},
|
||||
{Eth1BlockHeight: 8, Index: 8},
|
||||
{Eth1BlockHeight: 10, Index: 10},
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
|
||||
dc.PrunePendingDeposits(context.Background(), 0)
|
||||
expected := []*ethpb.DepositContainer{
|
||||
{Eth1BlockHeight: 2, Index: 2},
|
||||
{Eth1BlockHeight: 4, Index: 4},
|
||||
{Eth1BlockHeight: 6, Index: 6},
|
||||
{Eth1BlockHeight: 8, Index: 8},
|
||||
{Eth1BlockHeight: 10, Index: 10},
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
assert.DeepEqual(t, expected, dc.pendingDeposits)
|
||||
}
|
||||
|
||||
func TestPrunePendingDeposits_OK(t *testing.T) {
|
||||
dc := Cache{}
|
||||
|
||||
dc.pendingDeposits = []*ethpb.DepositContainer{
|
||||
{Eth1BlockHeight: 2, Index: 2},
|
||||
{Eth1BlockHeight: 4, Index: 4},
|
||||
{Eth1BlockHeight: 6, Index: 6},
|
||||
{Eth1BlockHeight: 8, Index: 8},
|
||||
{Eth1BlockHeight: 10, Index: 10},
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
|
||||
dc.PrunePendingDeposits(context.Background(), 6)
|
||||
expected := []*ethpb.DepositContainer{
|
||||
{Eth1BlockHeight: 6, Index: 6},
|
||||
{Eth1BlockHeight: 8, Index: 8},
|
||||
{Eth1BlockHeight: 10, Index: 10},
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
|
||||
assert.DeepEqual(t, expected, dc.pendingDeposits)
|
||||
|
||||
dc.pendingDeposits = []*ethpb.DepositContainer{
|
||||
{Eth1BlockHeight: 2, Index: 2},
|
||||
{Eth1BlockHeight: 4, Index: 4},
|
||||
{Eth1BlockHeight: 6, Index: 6},
|
||||
{Eth1BlockHeight: 8, Index: 8},
|
||||
{Eth1BlockHeight: 10, Index: 10},
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
|
||||
dc.PrunePendingDeposits(context.Background(), 10)
|
||||
expected = []*ethpb.DepositContainer{
|
||||
{Eth1BlockHeight: 10, Index: 10},
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
|
||||
assert.DeepEqual(t, expected, dc.pendingDeposits)
|
||||
}
|
||||
|
||||
func TestPruneAllPendingDeposits(t *testing.T) {
|
||||
dc := Cache{}
|
||||
|
||||
dc.pendingDeposits = []*ethpb.DepositContainer{
|
||||
{Eth1BlockHeight: 2, Index: 2},
|
||||
{Eth1BlockHeight: 4, Index: 4},
|
||||
{Eth1BlockHeight: 6, Index: 6},
|
||||
{Eth1BlockHeight: 8, Index: 8},
|
||||
{Eth1BlockHeight: 10, Index: 10},
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
|
||||
dc.PruneAllPendingDeposits(context.Background())
|
||||
expected := []*ethpb.DepositContainer{}
|
||||
|
||||
assert.DeepEqual(t, expected, dc.pendingDeposits)
|
||||
}
|
||||
|
||||
func TestPruneProofs_Ok(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
deposits := []struct {
|
||||
blkNum uint64
|
||||
deposit *ethpb.Deposit
|
||||
index int64
|
||||
}{
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk0"), 48)}},
|
||||
index: 0,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk1"), 48)}},
|
||||
index: 1,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk2"), 48)}},
|
||||
index: 2,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk3"), 48)}},
|
||||
index: 3,
|
||||
},
|
||||
}
|
||||
|
||||
for _, ins := range deposits {
|
||||
assert.NoError(t, dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
|
||||
}
|
||||
|
||||
require.NoError(t, dc.PruneProofs(context.Background(), 1))
|
||||
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[0].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[1].Deposit.Proof)
|
||||
assert.NotNil(t, dc.deposits[2].Deposit.Proof)
|
||||
assert.NotNil(t, dc.deposits[3].Deposit.Proof)
|
||||
}
|
||||
|
||||
func TestPruneProofs_SomeAlreadyPruned(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
deposits := []struct {
|
||||
blkNum uint64
|
||||
deposit *ethpb.Deposit
|
||||
index int64
|
||||
}{
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: nil, Data: ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.PadTo([]byte("pk0"), 48)}},
|
||||
index: 0,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: nil, Data: ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.PadTo([]byte("pk1"), 48)}}, index: 1,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(), Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk2"), 48)}},
|
||||
index: 2,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk3"), 48)}},
|
||||
index: 3,
|
||||
},
|
||||
}
|
||||
|
||||
for _, ins := range deposits {
|
||||
assert.NoError(t, dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
|
||||
}
|
||||
|
||||
require.NoError(t, dc.PruneProofs(context.Background(), 2))
|
||||
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[2].Deposit.Proof)
|
||||
}
|
||||
|
||||
func TestPruneProofs_PruneAllWhenDepositIndexTooBig(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
deposits := []struct {
|
||||
blkNum uint64
|
||||
deposit *ethpb.Deposit
|
||||
index int64
|
||||
}{
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk0"), 48)}},
|
||||
index: 0,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk1"), 48)}},
|
||||
index: 1,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk2"), 48)}},
|
||||
index: 2,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk3"), 48)}},
|
||||
index: 3,
|
||||
},
|
||||
}
|
||||
|
||||
for _, ins := range deposits {
|
||||
assert.NoError(t, dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
|
||||
}
|
||||
|
||||
require.NoError(t, dc.PruneProofs(context.Background(), 99))
|
||||
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[0].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[1].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[2].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[3].Deposit.Proof)
|
||||
}
|
||||
|
||||
func TestPruneProofs_CorrectlyHandleLastIndex(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
deposits := []struct {
|
||||
blkNum uint64
|
||||
deposit *ethpb.Deposit
|
||||
index int64
|
||||
}{
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk0"), 48)}},
|
||||
index: 0,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk1"), 48)}},
|
||||
index: 1,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk2"), 48)}},
|
||||
index: 2,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk3"), 48)}},
|
||||
index: 3,
|
||||
},
|
||||
}
|
||||
|
||||
for _, ins := range deposits {
|
||||
assert.NoError(t, dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
|
||||
}
|
||||
|
||||
require.NoError(t, dc.PruneProofs(context.Background(), 4))
|
||||
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[0].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[1].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[2].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[3].Deposit.Proof)
|
||||
}
|
||||
|
||||
func TestPruneAllProofs(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
deposits := []struct {
|
||||
blkNum uint64
|
||||
deposit *ethpb.Deposit
|
||||
index int64
|
||||
}{
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk0"), 48)}},
|
||||
index: 0,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk1"), 48)}},
|
||||
index: 1,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk2"), 48)}},
|
||||
index: 2,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk3"), 48)}},
|
||||
index: 3,
|
||||
},
|
||||
}
|
||||
|
||||
for _, ins := range deposits {
|
||||
assert.NoError(t, dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
|
||||
}
|
||||
|
||||
dc.PruneAllProofs(context.Background())
|
||||
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[0].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[1].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[2].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[3].Deposit.Proof)
|
||||
}
|
||||
11
beacon-chain/cache/interfaces.go
vendored
11
beacon-chain/cache/interfaces.go
vendored
@@ -12,6 +12,7 @@ import (
|
||||
type DepositCache interface {
|
||||
DepositFetcher
|
||||
DepositInserter
|
||||
DepositPruner
|
||||
}
|
||||
|
||||
// DepositFetcher defines a struct which can retrieve deposit information from a store.
|
||||
@@ -23,8 +24,6 @@ type DepositFetcher interface {
|
||||
InsertPendingDeposit(ctx context.Context, d *ethpb.Deposit, blockNum uint64, index int64, depositRoot [32]byte)
|
||||
PendingDeposits(ctx context.Context, untilBlk *big.Int) []*ethpb.Deposit
|
||||
PendingContainers(ctx context.Context, untilBlk *big.Int) []*ethpb.DepositContainer
|
||||
PrunePendingDeposits(ctx context.Context, merkleTreeIndex int64)
|
||||
PruneProofs(ctx context.Context, untilDepositIndex int64) error
|
||||
FinalizedFetcher
|
||||
}
|
||||
|
||||
@@ -42,6 +41,14 @@ type FinalizedFetcher interface {
|
||||
NonFinalizedDeposits(ctx context.Context, lastFinalizedIndex int64, untilBlk *big.Int) []*ethpb.Deposit
|
||||
}
|
||||
|
||||
// DepositPruner is an interface for pruning deposits and proofs.
|
||||
type DepositPruner interface {
|
||||
PrunePendingDeposits(ctx context.Context, merkleTreeIndex int64)
|
||||
PruneAllPendingDeposits(ctx context.Context)
|
||||
PruneProofs(ctx context.Context, untilDepositIndex int64) error
|
||||
PruneAllProofs(ctx context.Context)
|
||||
}
|
||||
|
||||
// FinalizedDeposits defines a method to access a merkle tree containing deposits and their indexes.
|
||||
type FinalizedDeposits interface {
|
||||
Deposits() MerkleTree
|
||||
|
||||
138
beacon-chain/cache/tracked_validators.go
vendored
138
beacon-chain/cache/tracked_validators.go
vendored
@@ -1,49 +1,139 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/patrickmn/go-cache"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type TrackedValidator struct {
|
||||
Active bool
|
||||
FeeRecipient primitives.ExecutionAddress
|
||||
Index primitives.ValidatorIndex
|
||||
}
|
||||
const (
|
||||
defaultExpiration = 1 * time.Hour
|
||||
cleanupInterval = 15 * time.Minute
|
||||
)
|
||||
|
||||
type TrackedValidatorsCache struct {
|
||||
sync.Mutex
|
||||
trackedValidators map[primitives.ValidatorIndex]TrackedValidator
|
||||
}
|
||||
type (
|
||||
TrackedValidator struct {
|
||||
Active bool
|
||||
FeeRecipient primitives.ExecutionAddress
|
||||
Index primitives.ValidatorIndex
|
||||
}
|
||||
|
||||
TrackedValidatorsCache struct {
|
||||
trackedValidators cache.Cache
|
||||
}
|
||||
)
|
||||
|
||||
var (
|
||||
// Metrics.
|
||||
trackedValidatorsCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "tracked_validators_cache_miss",
|
||||
Help: "The number of tracked validators requests that are not present in the cache.",
|
||||
})
|
||||
|
||||
trackedValidatorsCacheTotal = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "tracked_validators_cache_total",
|
||||
Help: "The total number of tracked validators requests in the cache.",
|
||||
})
|
||||
|
||||
trackedValidatorsCacheCount = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "tracked_validators_cache_count",
|
||||
Help: "The number of tracked validators in the cache.",
|
||||
})
|
||||
)
|
||||
|
||||
// NewTrackedValidatorsCache creates a new cache for tracking validators.
|
||||
func NewTrackedValidatorsCache() *TrackedValidatorsCache {
|
||||
return &TrackedValidatorsCache{
|
||||
trackedValidators: make(map[primitives.ValidatorIndex]TrackedValidator),
|
||||
trackedValidators: *cache.New(defaultExpiration, cleanupInterval),
|
||||
}
|
||||
}
|
||||
|
||||
// Validator retrieves a tracked validator from the cache (if present).
|
||||
func (t *TrackedValidatorsCache) Validator(index primitives.ValidatorIndex) (TrackedValidator, bool) {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
val, ok := t.trackedValidators[index]
|
||||
return val, ok
|
||||
trackedValidatorsCacheTotal.Inc()
|
||||
|
||||
key := toCacheKey(index)
|
||||
item, ok := t.trackedValidators.Get(key)
|
||||
if !ok {
|
||||
trackedValidatorsCacheMiss.Inc()
|
||||
return TrackedValidator{}, false
|
||||
}
|
||||
|
||||
val, ok := item.(TrackedValidator)
|
||||
if !ok {
|
||||
logrus.Errorf("Failed to cast tracked validator from cache, got unexpected item type %T", item)
|
||||
return TrackedValidator{}, false
|
||||
}
|
||||
|
||||
return val, true
|
||||
}
|
||||
|
||||
// Set adds a tracked validator to the cache.
|
||||
func (t *TrackedValidatorsCache) Set(val TrackedValidator) {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
t.trackedValidators[val.Index] = val
|
||||
key := toCacheKey(val.Index)
|
||||
t.trackedValidators.Set(key, val, cache.DefaultExpiration)
|
||||
}
|
||||
|
||||
// Delete removes a tracked validator from the cache.
|
||||
func (t *TrackedValidatorsCache) Prune() {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
t.trackedValidators = make(map[primitives.ValidatorIndex]TrackedValidator)
|
||||
t.trackedValidators.Flush()
|
||||
trackedValidatorsCacheCount.Set(0)
|
||||
}
|
||||
|
||||
// Validating returns true if there are at least one tracked validators in the cache.
|
||||
func (t *TrackedValidatorsCache) Validating() bool {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
return len(t.trackedValidators) > 0
|
||||
count := t.trackedValidators.ItemCount()
|
||||
trackedValidatorsCacheCount.Set(float64(count))
|
||||
|
||||
return count > 0
|
||||
}
|
||||
|
||||
// ItemCount returns the number of tracked validators in the cache.
|
||||
func (t *TrackedValidatorsCache) ItemCount() int {
|
||||
count := t.trackedValidators.ItemCount()
|
||||
trackedValidatorsCacheCount.Set(float64(count))
|
||||
|
||||
return count
|
||||
}
|
||||
|
||||
// Indices returns a map of validator indices that are being tracked.
|
||||
func (t *TrackedValidatorsCache) Indices() map[primitives.ValidatorIndex]bool {
|
||||
items := t.trackedValidators.Items()
|
||||
count := len(items)
|
||||
trackedValidatorsCacheCount.Set(float64(count))
|
||||
|
||||
indices := make(map[primitives.ValidatorIndex]bool, count)
|
||||
|
||||
for cacheKey := range items {
|
||||
index, err := fromCacheKey(cacheKey)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("Failed to get validator index from cache key")
|
||||
continue
|
||||
}
|
||||
|
||||
indices[index] = true
|
||||
}
|
||||
|
||||
return indices
|
||||
}
|
||||
|
||||
// toCacheKey creates a cache key from the validator index.
|
||||
func toCacheKey(validatorIndex primitives.ValidatorIndex) string {
|
||||
return strconv.FormatUint(uint64(validatorIndex), 10)
|
||||
}
|
||||
|
||||
// fromCacheKey gets the validator index from the cache key.
|
||||
func fromCacheKey(key string) (primitives.ValidatorIndex, error) {
|
||||
validatorIndex, err := strconv.ParseUint(key, 10, 64)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "parse Uint: %s", key)
|
||||
}
|
||||
|
||||
return primitives.ValidatorIndex(validatorIndex), nil
|
||||
}
|
||||
|
||||
79
beacon-chain/cache/tracked_validators_test.go
vendored
Normal file
79
beacon-chain/cache/tracked_validators_test.go
vendored
Normal file
@@ -0,0 +1,79 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func mapEqual(a, b map[primitives.ValidatorIndex]bool) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
|
||||
for k, v := range a {
|
||||
if b[k] != v {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func TestTrackedValidatorsCache(t *testing.T) {
|
||||
vc := NewTrackedValidatorsCache()
|
||||
|
||||
// No validators in cache.
|
||||
require.Equal(t, 0, vc.ItemCount())
|
||||
require.Equal(t, false, vc.Validating())
|
||||
require.Equal(t, 0, len(vc.Indices()))
|
||||
|
||||
_, ok := vc.Validator(41)
|
||||
require.Equal(t, false, ok)
|
||||
|
||||
// Add some validators (one twice).
|
||||
v42Expected := TrackedValidator{Active: true, FeeRecipient: [20]byte{1}, Index: 42}
|
||||
v43Expected := TrackedValidator{Active: false, FeeRecipient: [20]byte{2}, Index: 43}
|
||||
|
||||
vc.Set(v42Expected)
|
||||
vc.Set(v43Expected)
|
||||
vc.Set(v42Expected)
|
||||
|
||||
// Check if they are in the cache.
|
||||
v42Actual, ok := vc.Validator(42)
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, v42Expected, v42Actual)
|
||||
|
||||
v43Actual, ok := vc.Validator(43)
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, v43Expected, v43Actual)
|
||||
|
||||
expected := map[primitives.ValidatorIndex]bool{42: true, 43: true}
|
||||
actual := vc.Indices()
|
||||
require.Equal(t, true, mapEqual(expected, actual))
|
||||
|
||||
// Check the item count and if the cache is validating.
|
||||
require.Equal(t, 2, vc.ItemCount())
|
||||
require.Equal(t, true, vc.Validating())
|
||||
|
||||
// Check if a non-existing validator is in the cache.
|
||||
_, ok = vc.Validator(41)
|
||||
require.Equal(t, false, ok)
|
||||
|
||||
// Prune the cache and test it.
|
||||
vc.Prune()
|
||||
|
||||
_, ok = vc.Validator(41)
|
||||
require.Equal(t, false, ok)
|
||||
|
||||
_, ok = vc.Validator(42)
|
||||
require.Equal(t, false, ok)
|
||||
|
||||
_, ok = vc.Validator(43)
|
||||
require.Equal(t, false, ok)
|
||||
|
||||
require.Equal(t, 0, vc.ItemCount())
|
||||
require.Equal(t, false, vc.Validating())
|
||||
require.Equal(t, 0, len(vc.Indices()))
|
||||
}
|
||||
@@ -96,6 +96,24 @@ func VerifyBlockHeaderSignature(beaconState state.BeaconState, header *ethpb.Sig
|
||||
return signing.VerifyBlockHeaderSigningRoot(header.Header, proposerPubKey, header.Signature, domain)
|
||||
}
|
||||
|
||||
func VerifyBlockHeaderSignatureUsingCurrentFork(beaconState state.BeaconState, header *ethpb.SignedBeaconBlockHeader) error {
|
||||
currentEpoch := slots.ToEpoch(header.Header.Slot)
|
||||
fork, err := forks.Fork(currentEpoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
domain, err := signing.Domain(fork, currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorsRoot())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
proposer, err := beaconState.ValidatorAtIndex(header.Header.ProposerIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
proposerPubKey := proposer.PublicKey
|
||||
return signing.VerifyBlockHeaderSigningRoot(header.Header, proposerPubKey, header.Signature, domain)
|
||||
}
|
||||
|
||||
// VerifyBlockSignatureUsingCurrentFork verifies the proposer signature of a beacon block. This differs
|
||||
// from the above method by not using fork data from the state and instead retrieving it
|
||||
// via the respective epoch.
|
||||
|
||||
@@ -177,9 +177,9 @@ func TestComputeConsolidationEpochAndUpdateChurn(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}(t),
|
||||
consolidationBalance: helpers.ConsolidationChurnLimit(32000000000000000)+1,
|
||||
consolidationBalance: helpers.ConsolidationChurnLimit(32000000000000000) + 1,
|
||||
expectedEpoch: 18, // Flows into another epoch.
|
||||
expectedConsolidationBalanceToConsume: helpers.ConsolidationChurnLimit(32000000000000000)-1,
|
||||
expectedConsolidationBalanceToConsume: helpers.ConsolidationChurnLimit(32000000000000000) - 1,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -32,6 +32,12 @@ const (
|
||||
|
||||
// AttesterSlashingReceived is sent after an attester slashing is received from gossip or rpc
|
||||
AttesterSlashingReceived = 8
|
||||
|
||||
// SingleAttReceived is sent after a single attestation object is received from gossip or rpc
|
||||
SingleAttReceived = 9
|
||||
|
||||
// DataColumnSidecarReceived is sent after a data column sidecar is received from gossip or rpc.
|
||||
DataColumnSidecarReceived = 10
|
||||
)
|
||||
|
||||
// UnAggregatedAttReceivedData is the data sent with UnaggregatedAttReceived events.
|
||||
@@ -43,7 +49,7 @@ type UnAggregatedAttReceivedData struct {
|
||||
// AggregatedAttReceivedData is the data sent with AggregatedAttReceived events.
|
||||
type AggregatedAttReceivedData struct {
|
||||
// Attestation is the aggregated attestation object.
|
||||
Attestation *ethpb.AggregateAttestationAndProof
|
||||
Attestation ethpb.AggregateAttAndProof
|
||||
}
|
||||
|
||||
// ExitReceivedData is the data sent with ExitReceived events.
|
||||
@@ -77,3 +83,11 @@ type ProposerSlashingReceivedData struct {
|
||||
type AttesterSlashingReceivedData struct {
|
||||
AttesterSlashing ethpb.AttSlashing
|
||||
}
|
||||
|
||||
// SingleAttReceivedData is the data sent with SingleAttReceived events.
|
||||
type SingleAttReceivedData struct {
|
||||
Attestation ethpb.Att
|
||||
}
|
||||
type DataColumnSidecarReceivedData struct {
|
||||
DataColumn *blocks.VerifiedRODataColumn
|
||||
}
|
||||
|
||||
@@ -102,7 +102,7 @@ func UpgradeToFulu(beaconState state.BeaconState) (state.BeaconState, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s := ðpb.BeaconStateFulu{
|
||||
s := ðpb.BeaconStateElectra{
|
||||
GenesisTime: beaconState.GenesisTime(),
|
||||
GenesisValidatorsRoot: beaconState.GenesisValidatorsRoot(),
|
||||
Slot: beaconState.Slot(),
|
||||
|
||||
@@ -78,6 +78,7 @@ func TestIsCurrentEpochSyncCommittee_UsingCommittee(t *testing.T) {
|
||||
|
||||
func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
params.SetupTestConfigCleanup(t)
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
@@ -264,6 +265,7 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
|
||||
@@ -22,6 +22,15 @@ import (
|
||||
)
|
||||
|
||||
func TestLightClient_NewLightClientOptimisticUpdateFromBeaconState(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.AltairForkEpoch = 1
|
||||
cfg.BellatrixForkEpoch = 2
|
||||
cfg.CapellaForkEpoch = 3
|
||||
cfg.DenebForkEpoch = 4
|
||||
cfg.ElectraForkEpoch = 5
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
t.Run("Altair", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t).SetupTestAltair()
|
||||
|
||||
@@ -59,9 +68,31 @@ func TestLightClient_NewLightClientOptimisticUpdateFromBeaconState(t *testing.T)
|
||||
l.CheckSyncAggregate(update.SyncAggregate())
|
||||
l.CheckAttestedHeader(update.AttestedHeader())
|
||||
})
|
||||
|
||||
t.Run("Electra", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t).SetupTestElectra(false)
|
||||
|
||||
update, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(l.Ctx, l.State.Slot(), l.State, l.Block, l.AttestedState, l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, update, "update is nil")
|
||||
|
||||
require.Equal(t, l.Block.Block().Slot(), update.SignatureSlot(), "Signature slot is not equal")
|
||||
|
||||
l.CheckSyncAggregate(update.SyncAggregate())
|
||||
l.CheckAttestedHeader(update.AttestedHeader())
|
||||
})
|
||||
}
|
||||
|
||||
func TestLightClient_NewLightClientFinalityUpdateFromBeaconState(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.AltairForkEpoch = 1
|
||||
cfg.BellatrixForkEpoch = 2
|
||||
cfg.CapellaForkEpoch = 3
|
||||
cfg.DenebForkEpoch = 4
|
||||
cfg.ElectraForkEpoch = 5
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
t.Run("Altair", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t).SetupTestAltair()
|
||||
|
||||
@@ -356,6 +387,157 @@ func TestLightClient_NewLightClientFinalityUpdateFromBeaconState(t *testing.T) {
|
||||
require.DeepSSZEqual(t, execution, updateExecution.Proto(), "Finalized Block Execution is not equal")
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("Electra", func(t *testing.T) {
|
||||
t.Run("FinalizedBlock Not Nil", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t).SetupTestElectra(false)
|
||||
|
||||
update, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State.Slot(), l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, update, "update is nil")
|
||||
|
||||
require.Equal(t, l.Block.Block().Slot(), update.SignatureSlot(), "Signature slot is not equal")
|
||||
|
||||
l.CheckSyncAggregate(update.SyncAggregate())
|
||||
l.CheckAttestedHeader(update.AttestedHeader())
|
||||
|
||||
//zeroHash := params.BeaconConfig().ZeroHash[:]
|
||||
finalizedBlockHeader, err := l.FinalizedBlock.Header()
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, update.FinalizedHeader(), "Finalized header is nil")
|
||||
updateFinalizedHeaderBeacon := update.FinalizedHeader().Beacon()
|
||||
require.Equal(t, finalizedBlockHeader.Header.Slot, updateFinalizedHeaderBeacon.Slot, "Finalized header slot is not equal")
|
||||
require.Equal(t, finalizedBlockHeader.Header.ProposerIndex, updateFinalizedHeaderBeacon.ProposerIndex, "Finalized header proposer index is not equal")
|
||||
require.DeepSSZEqual(t, finalizedBlockHeader.Header.ParentRoot, updateFinalizedHeaderBeacon.ParentRoot, "Finalized header parent root is not equal")
|
||||
require.DeepSSZEqual(t, finalizedBlockHeader.Header.StateRoot, updateFinalizedHeaderBeacon.StateRoot, "Finalized header state root is not equal")
|
||||
require.DeepSSZEqual(t, finalizedBlockHeader.Header.BodyRoot, updateFinalizedHeaderBeacon.BodyRoot, "Finalized header body root is not equal")
|
||||
fb, err := update.FinalityBranchElectra()
|
||||
require.NoError(t, err)
|
||||
proof, err := l.AttestedState.FinalizedRootProof(l.Ctx)
|
||||
require.NoError(t, err)
|
||||
for i, leaf := range fb {
|
||||
require.DeepSSZEqual(t, proof[i], leaf[:], "Leaf is not equal")
|
||||
}
|
||||
|
||||
// Check Execution BlockHash
|
||||
payloadInterface, err := l.FinalizedBlock.Block().Body().Execution()
|
||||
require.NoError(t, err)
|
||||
transactionsRoot, err := payloadInterface.TransactionsRoot()
|
||||
if errors.Is(err, consensustypes.ErrUnsupportedField) {
|
||||
transactions, err := payloadInterface.Transactions()
|
||||
require.NoError(t, err)
|
||||
transactionsRootArray, err := ssz.TransactionsRoot(transactions)
|
||||
require.NoError(t, err)
|
||||
transactionsRoot = transactionsRootArray[:]
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
withdrawalsRoot, err := payloadInterface.WithdrawalsRoot()
|
||||
if errors.Is(err, consensustypes.ErrUnsupportedField) {
|
||||
withdrawals, err := payloadInterface.Withdrawals()
|
||||
require.NoError(t, err)
|
||||
withdrawalsRootArray, err := ssz.WithdrawalSliceRoot(withdrawals, fieldparams.MaxWithdrawalsPerPayload)
|
||||
require.NoError(t, err)
|
||||
withdrawalsRoot = withdrawalsRootArray[:]
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
execution := &v11.ExecutionPayloadHeaderDeneb{
|
||||
ParentHash: payloadInterface.ParentHash(),
|
||||
FeeRecipient: payloadInterface.FeeRecipient(),
|
||||
StateRoot: payloadInterface.StateRoot(),
|
||||
ReceiptsRoot: payloadInterface.ReceiptsRoot(),
|
||||
LogsBloom: payloadInterface.LogsBloom(),
|
||||
PrevRandao: payloadInterface.PrevRandao(),
|
||||
BlockNumber: payloadInterface.BlockNumber(),
|
||||
GasLimit: payloadInterface.GasLimit(),
|
||||
GasUsed: payloadInterface.GasUsed(),
|
||||
Timestamp: payloadInterface.Timestamp(),
|
||||
ExtraData: payloadInterface.ExtraData(),
|
||||
BaseFeePerGas: payloadInterface.BaseFeePerGas(),
|
||||
BlockHash: payloadInterface.BlockHash(),
|
||||
TransactionsRoot: transactionsRoot,
|
||||
WithdrawalsRoot: withdrawalsRoot,
|
||||
}
|
||||
updateExecution, err := update.FinalizedHeader().Execution()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, execution, updateExecution.Proto(), "Finalized Block Execution is not equal")
|
||||
})
|
||||
|
||||
t.Run("FinalizedBlock In Previous Fork", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t).SetupTestElectraFinalizedBlockDeneb(false)
|
||||
|
||||
update, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State.Slot(), l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, update, "update is nil")
|
||||
|
||||
require.Equal(t, l.Block.Block().Slot(), update.SignatureSlot(), "Signature slot is not equal")
|
||||
|
||||
l.CheckSyncAggregate(update.SyncAggregate())
|
||||
l.CheckAttestedHeader(update.AttestedHeader())
|
||||
|
||||
finalizedBlockHeader, err := l.FinalizedBlock.Header()
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, update.FinalizedHeader(), "Finalized header is nil")
|
||||
updateFinalizedHeaderBeacon := update.FinalizedHeader().Beacon()
|
||||
require.Equal(t, finalizedBlockHeader.Header.Slot, updateFinalizedHeaderBeacon.Slot, "Finalized header slot is not equal")
|
||||
require.Equal(t, finalizedBlockHeader.Header.ProposerIndex, updateFinalizedHeaderBeacon.ProposerIndex, "Finalized header proposer index is not equal")
|
||||
require.DeepSSZEqual(t, finalizedBlockHeader.Header.ParentRoot, updateFinalizedHeaderBeacon.ParentRoot, "Finalized header parent root is not equal")
|
||||
require.DeepSSZEqual(t, finalizedBlockHeader.Header.StateRoot, updateFinalizedHeaderBeacon.StateRoot, "Finalized header state root is not equal")
|
||||
require.DeepSSZEqual(t, finalizedBlockHeader.Header.BodyRoot, updateFinalizedHeaderBeacon.BodyRoot, "Finalized header body root is not equal")
|
||||
fb, err := update.FinalityBranchElectra()
|
||||
require.NoError(t, err)
|
||||
proof, err := l.AttestedState.FinalizedRootProof(l.Ctx)
|
||||
require.NoError(t, err)
|
||||
for i, leaf := range fb {
|
||||
require.DeepSSZEqual(t, proof[i], leaf[:], "Leaf is not equal")
|
||||
}
|
||||
|
||||
// Check Execution BlockHash
|
||||
payloadInterface, err := l.FinalizedBlock.Block().Body().Execution()
|
||||
require.NoError(t, err)
|
||||
transactionsRoot, err := payloadInterface.TransactionsRoot()
|
||||
if errors.Is(err, consensustypes.ErrUnsupportedField) {
|
||||
transactions, err := payloadInterface.Transactions()
|
||||
require.NoError(t, err)
|
||||
transactionsRootArray, err := ssz.TransactionsRoot(transactions)
|
||||
require.NoError(t, err)
|
||||
transactionsRoot = transactionsRootArray[:]
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
withdrawalsRoot, err := payloadInterface.WithdrawalsRoot()
|
||||
if errors.Is(err, consensustypes.ErrUnsupportedField) {
|
||||
withdrawals, err := payloadInterface.Withdrawals()
|
||||
require.NoError(t, err)
|
||||
withdrawalsRootArray, err := ssz.WithdrawalSliceRoot(withdrawals, fieldparams.MaxWithdrawalsPerPayload)
|
||||
require.NoError(t, err)
|
||||
withdrawalsRoot = withdrawalsRootArray[:]
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
execution := &v11.ExecutionPayloadHeaderDeneb{
|
||||
ParentHash: payloadInterface.ParentHash(),
|
||||
FeeRecipient: payloadInterface.FeeRecipient(),
|
||||
StateRoot: payloadInterface.StateRoot(),
|
||||
ReceiptsRoot: payloadInterface.ReceiptsRoot(),
|
||||
LogsBloom: payloadInterface.LogsBloom(),
|
||||
PrevRandao: payloadInterface.PrevRandao(),
|
||||
BlockNumber: payloadInterface.BlockNumber(),
|
||||
GasLimit: payloadInterface.GasLimit(),
|
||||
GasUsed: payloadInterface.GasUsed(),
|
||||
Timestamp: payloadInterface.Timestamp(),
|
||||
ExtraData: payloadInterface.ExtraData(),
|
||||
BaseFeePerGas: payloadInterface.BaseFeePerGas(),
|
||||
BlockHash: payloadInterface.BlockHash(),
|
||||
TransactionsRoot: transactionsRoot,
|
||||
WithdrawalsRoot: withdrawalsRoot,
|
||||
}
|
||||
updateExecution, err := update.FinalizedHeader().Execution()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, execution, updateExecution.Proto(), "Finalized Block Execution is not equal")
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestLightClient_BlockToLightClientHeader(t *testing.T) {
|
||||
|
||||
67
beacon-chain/core/peerdas/BUILD.bazel
Normal file
67
beacon-chain/core/peerdas/BUILD.bazel
Normal file
@@ -0,0 +1,67 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"das_core.go",
|
||||
"info.go",
|
||||
"metrics.go",
|
||||
"p2p_interface.go",
|
||||
"peer_sampling.go",
|
||||
"reconstruction.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
|
||||
"@com_github_hashicorp_golang_lru//:go_default_library",
|
||||
"@com_github_holiman_uint256//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@org_golang_x_sync//errgroup:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"das_core_test.go",
|
||||
"info_test.go",
|
||||
"p2p_interface_test.go",
|
||||
"peer_sampling_test.go",
|
||||
"reconstruction_test.go",
|
||||
"utils_test.go",
|
||||
],
|
||||
deps = [
|
||||
":go_default_library",
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_consensys_gnark_crypto//ecc/bls12-381/fr:go_default_library",
|
||||
"@com_github_crate_crypto_go_kzg_4844//:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
399
beacon-chain/core/peerdas/das_core.go
Normal file
399
beacon-chain/core/peerdas/das_core.go
Normal file
@@ -0,0 +1,399 @@
|
||||
package peerdas
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"math"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/holiman/uint256"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
|
||||
beaconState "github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
var (
|
||||
// Custom errors
|
||||
errCustodyGroupCountTooLarge = errors.New("custody group count too large")
|
||||
errWrongComputedCustodyGroupCount = errors.New("wrong computed custody group count, should never happen")
|
||||
|
||||
// maxUint256 is the maximum value of a uint256.
|
||||
maxUint256 = &uint256.Int{math.MaxUint64, math.MaxUint64, math.MaxUint64, math.MaxUint64}
|
||||
)
|
||||
|
||||
type CustodyType int
|
||||
|
||||
const (
|
||||
Target CustodyType = iota
|
||||
Actual
|
||||
)
|
||||
|
||||
// CustodyGroups computes the custody groups the node should participate in for custody.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/das-core.md#get_custody_groups
|
||||
func CustodyGroups(nodeId enode.ID, custodyGroupCount uint64) (map[uint64]bool, error) {
|
||||
numberOfCustodyGroup := params.BeaconConfig().NumberOfCustodyGroups
|
||||
|
||||
// Check if the custody group count is larger than the number of custody groups.
|
||||
if custodyGroupCount > numberOfCustodyGroup {
|
||||
return nil, errCustodyGroupCountTooLarge
|
||||
}
|
||||
|
||||
custodyGroups := make(map[uint64]bool, custodyGroupCount)
|
||||
one := uint256.NewInt(1)
|
||||
|
||||
for currentId := new(uint256.Int).SetBytes(nodeId.Bytes()); uint64(len(custodyGroups)) < custodyGroupCount; currentId.Add(currentId, one) {
|
||||
// Convert to big endian bytes.
|
||||
currentIdBytesBigEndian := currentId.Bytes32()
|
||||
|
||||
// Convert to little endian.
|
||||
currentIdBytesLittleEndian := bytesutil.ReverseByteOrder(currentIdBytesBigEndian[:])
|
||||
|
||||
// Hash the result.
|
||||
hashedCurrentId := hash.Hash(currentIdBytesLittleEndian)
|
||||
|
||||
// Get the custody group ID.
|
||||
custodyGroupId := binary.LittleEndian.Uint64(hashedCurrentId[:8]) % numberOfCustodyGroup
|
||||
|
||||
// Add the custody group to the map.
|
||||
custodyGroups[custodyGroupId] = true
|
||||
|
||||
// Overflow prevention.
|
||||
if currentId.Cmp(maxUint256) == 0 {
|
||||
currentId = uint256.NewInt(0)
|
||||
}
|
||||
}
|
||||
|
||||
// Final check.
|
||||
if uint64(len(custodyGroups)) != custodyGroupCount {
|
||||
return nil, errWrongComputedCustodyGroupCount
|
||||
}
|
||||
|
||||
return custodyGroups, nil
|
||||
}
|
||||
|
||||
// ComputeColumnsForCustodyGroup computes the columns for a given custody group.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/das-core.md#compute_columns_for_custody_group
|
||||
func ComputeColumnsForCustodyGroup(custodyGroup uint64) ([]uint64, error) {
|
||||
beaconConfig := params.BeaconConfig()
|
||||
numberOfCustodyGroup := beaconConfig.NumberOfCustodyGroups
|
||||
|
||||
if custodyGroup > numberOfCustodyGroup {
|
||||
return nil, errCustodyGroupCountTooLarge
|
||||
}
|
||||
|
||||
numberOfColumns := beaconConfig.NumberOfColumns
|
||||
|
||||
columnsPerGroup := numberOfColumns / numberOfCustodyGroup
|
||||
|
||||
columns := make([]uint64, 0, columnsPerGroup)
|
||||
for i := range columnsPerGroup {
|
||||
column := numberOfCustodyGroup*i + custodyGroup
|
||||
columns = append(columns, column)
|
||||
}
|
||||
|
||||
return columns, nil
|
||||
}
|
||||
|
||||
// DataColumnSidecars computes the data column sidecars from the signed block and blobs.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/fulu/das-core.md#get_data_column_sidecars
|
||||
func DataColumnSidecars(signedBlock interfaces.ReadOnlySignedBeaconBlock, blobs []kzg.Blob) ([]*ethpb.DataColumnSidecar, error) {
|
||||
startTime := time.Now()
|
||||
blobsCount := len(blobs)
|
||||
if blobsCount == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Get the signed block header.
|
||||
signedBlockHeader, err := signedBlock.Header()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "signed block header")
|
||||
}
|
||||
|
||||
// Get the block body.
|
||||
block := signedBlock.Block()
|
||||
blockBody := block.Body()
|
||||
|
||||
// Get the blob KZG commitments.
|
||||
blobKzgCommitments, err := blockBody.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
|
||||
// Compute the KZG commitments inclusion proof.
|
||||
kzgCommitmentsInclusionProof, err := blocks.MerkleProofKZGCommitments(blockBody)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "merkle proof ZKG commitments")
|
||||
}
|
||||
|
||||
// Compute cells and proofs.
|
||||
cellsAndProofs := make([]kzg.CellsAndProofs, blobsCount)
|
||||
|
||||
eg, _ := errgroup.WithContext(context.Background())
|
||||
for i := range blobs {
|
||||
blobIndex := i
|
||||
eg.Go(func() error {
|
||||
blob := &blobs[blobIndex]
|
||||
blobCellsAndProofs, err := kzg.ComputeCellsAndKZGProofs(blob)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "compute cells and KZG proofs")
|
||||
}
|
||||
|
||||
cellsAndProofs[blobIndex] = blobCellsAndProofs
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if err := eg.Wait(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get the column sidecars.
|
||||
sidecars := make([]*ethpb.DataColumnSidecar, 0, fieldparams.NumberOfColumns)
|
||||
for columnIndex := uint64(0); columnIndex < fieldparams.NumberOfColumns; columnIndex++ {
|
||||
column := make([]kzg.Cell, 0, blobsCount)
|
||||
kzgProofOfColumn := make([]kzg.Proof, 0, blobsCount)
|
||||
|
||||
for rowIndex := 0; rowIndex < blobsCount; rowIndex++ {
|
||||
cellsForRow := cellsAndProofs[rowIndex].Cells
|
||||
proofsForRow := cellsAndProofs[rowIndex].Proofs
|
||||
|
||||
cell := cellsForRow[columnIndex]
|
||||
column = append(column, cell)
|
||||
|
||||
kzgProof := proofsForRow[columnIndex]
|
||||
kzgProofOfColumn = append(kzgProofOfColumn, kzgProof)
|
||||
}
|
||||
|
||||
columnBytes := make([][]byte, 0, blobsCount)
|
||||
for i := range column {
|
||||
columnBytes = append(columnBytes, column[i][:])
|
||||
}
|
||||
|
||||
kzgProofOfColumnBytes := make([][]byte, 0, blobsCount)
|
||||
for _, kzgProof := range kzgProofOfColumn {
|
||||
copiedProof := kzgProof
|
||||
kzgProofOfColumnBytes = append(kzgProofOfColumnBytes, copiedProof[:])
|
||||
}
|
||||
|
||||
sidecar := ðpb.DataColumnSidecar{
|
||||
ColumnIndex: columnIndex,
|
||||
DataColumn: columnBytes,
|
||||
KzgCommitments: blobKzgCommitments,
|
||||
KzgProof: kzgProofOfColumnBytes,
|
||||
SignedBlockHeader: signedBlockHeader,
|
||||
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
|
||||
}
|
||||
|
||||
sidecars = append(sidecars, sidecar)
|
||||
}
|
||||
dataColumnComputationTime.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
return sidecars, nil
|
||||
}
|
||||
|
||||
// CustodyGroupSamplingSize returns the number of custody groups the node should sample from.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/das-core.md#custody-sampling
|
||||
func CustodyGroupSamplingSize(ct CustodyType) uint64 {
|
||||
custodyGroupCount := TargetCustodyGroupCount.Get()
|
||||
|
||||
if ct == Actual {
|
||||
custodyGroupCount = ActualCustodyGroupCount()
|
||||
}
|
||||
|
||||
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
|
||||
return max(samplesPerSlot, custodyGroupCount)
|
||||
}
|
||||
|
||||
// CustodyColumns computes the custody columns from the custody groups.
|
||||
func CustodyColumns(custodyGroups map[uint64]bool) (map[uint64]bool, error) {
|
||||
numberOfCustodyGroups := params.BeaconConfig().NumberOfCustodyGroups
|
||||
|
||||
custodyGroupCount := len(custodyGroups)
|
||||
|
||||
// Compute the columns for each custody group.
|
||||
columns := make(map[uint64]bool, custodyGroupCount)
|
||||
for group := range custodyGroups {
|
||||
if group >= numberOfCustodyGroups {
|
||||
return nil, errCustodyGroupCountTooLarge
|
||||
}
|
||||
|
||||
groupColumns, err := ComputeColumnsForCustodyGroup(group)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "compute columns for custody group")
|
||||
}
|
||||
|
||||
for _, column := range groupColumns {
|
||||
columns[column] = true
|
||||
}
|
||||
}
|
||||
|
||||
return columns, nil
|
||||
}
|
||||
|
||||
// ValidatorsCustodyRequirement returns the number of custody groups regarding the validator indices attached to the beacon node.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/fulu/das-core.md#validator-custody
|
||||
func ValidatorsCustodyRequirement(state beaconState.BeaconState, validatorsIndex map[primitives.ValidatorIndex]bool) (uint64, error) {
|
||||
totalNodeBalance := uint64(0)
|
||||
for index := range validatorsIndex {
|
||||
balance, err := state.BalanceAtIndex(index)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "balance at index for validator index %v", index)
|
||||
}
|
||||
|
||||
totalNodeBalance += balance
|
||||
}
|
||||
|
||||
beaconConfig := params.BeaconConfig()
|
||||
numberOfCustodyGroup := beaconConfig.NumberOfCustodyGroups
|
||||
validatorCustodyRequirement := beaconConfig.ValidatorCustodyRequirement
|
||||
balancePerAdditionalCustodyGroup := beaconConfig.BalancePerAdditionalCustodyGroup
|
||||
|
||||
count := totalNodeBalance / balancePerAdditionalCustodyGroup
|
||||
return min(max(count, validatorCustodyRequirement), numberOfCustodyGroup), nil
|
||||
}
|
||||
|
||||
// Blobs extract blobs from `dataColumnsSidecar`.
|
||||
// This can be seen as the reciprocal function of DataColumnSidecars.
|
||||
// `dataColumnsSidecar` needs to contain the datacolumns corresponding to the non-extended matrix,
|
||||
// else an error will be returned.
|
||||
// (`dataColumnsSidecar` can contain extra columns, but they will be ignored.)
|
||||
func Blobs(indices map[uint64]bool, dataColumnsSidecar []*ethpb.DataColumnSidecar) ([]*blocks.VerifiedROBlob, error) {
|
||||
columnCount := fieldparams.NumberOfColumns
|
||||
|
||||
neededColumnCount := columnCount / 2
|
||||
|
||||
// Check if all needed columns are present.
|
||||
sliceIndexFromColumnIndex := make(map[uint64]int, len(dataColumnsSidecar))
|
||||
for i := range dataColumnsSidecar {
|
||||
dataColumnSideCar := dataColumnsSidecar[i]
|
||||
columnIndex := dataColumnSideCar.ColumnIndex
|
||||
|
||||
if columnIndex < uint64(neededColumnCount) {
|
||||
sliceIndexFromColumnIndex[columnIndex] = i
|
||||
}
|
||||
}
|
||||
|
||||
actualColumnCount := len(sliceIndexFromColumnIndex)
|
||||
|
||||
// Get missing columns.
|
||||
if actualColumnCount < neededColumnCount {
|
||||
missingColumns := make(map[int]bool, neededColumnCount-actualColumnCount)
|
||||
for i := range neededColumnCount {
|
||||
if _, ok := sliceIndexFromColumnIndex[uint64(i)]; !ok {
|
||||
missingColumns[i] = true
|
||||
}
|
||||
}
|
||||
|
||||
missingColumnsSlice := make([]int, 0, len(missingColumns))
|
||||
for i := range missingColumns {
|
||||
missingColumnsSlice = append(missingColumnsSlice, i)
|
||||
}
|
||||
|
||||
slices.Sort[[]int](missingColumnsSlice)
|
||||
return nil, errors.Errorf("some columns are missing: %v", missingColumnsSlice)
|
||||
}
|
||||
|
||||
// It is safe to retrieve the first column since we already checked that `dataColumnsSidecar` is not empty.
|
||||
firstDataColumnSidecar := dataColumnsSidecar[0]
|
||||
|
||||
blobCount := uint64(len(firstDataColumnSidecar.DataColumn))
|
||||
|
||||
// Check all colums have te same length.
|
||||
for i := range dataColumnsSidecar {
|
||||
if uint64(len(dataColumnsSidecar[i].DataColumn)) != blobCount {
|
||||
return nil, errors.Errorf("mismatch in the length of the data columns, expected %d, got %d", blobCount, len(dataColumnsSidecar[i].DataColumn))
|
||||
}
|
||||
}
|
||||
|
||||
// Reconstruct verified RO blobs from columns.
|
||||
verifiedROBlobs := make([]*blocks.VerifiedROBlob, 0, blobCount)
|
||||
|
||||
// Populate and filter indices.
|
||||
indicesSlice := populateAndFilterIndices(indices, blobCount)
|
||||
|
||||
for _, blobIndex := range indicesSlice {
|
||||
var blob kzg.Blob
|
||||
|
||||
// Compute the content of the blob.
|
||||
for columnIndex := range neededColumnCount {
|
||||
sliceIndex, ok := sliceIndexFromColumnIndex[uint64(columnIndex)]
|
||||
if !ok {
|
||||
return nil, errors.Errorf("missing column %d, this should never happen", columnIndex)
|
||||
}
|
||||
|
||||
dataColumnSideCar := dataColumnsSidecar[sliceIndex]
|
||||
cell := dataColumnSideCar.DataColumn[blobIndex]
|
||||
|
||||
for i := 0; i < len(cell); i++ {
|
||||
blob[columnIndex*kzg.BytesPerCell+i] = cell[i]
|
||||
}
|
||||
}
|
||||
|
||||
// Retrieve the blob KZG commitment.
|
||||
blobKZGCommitment := kzg.Commitment(firstDataColumnSidecar.KzgCommitments[blobIndex])
|
||||
|
||||
// Compute the blob KZG proof.
|
||||
blobKzgProof, err := kzg.ComputeBlobKZGProof(&blob, blobKZGCommitment)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "compute blob KZG proof")
|
||||
}
|
||||
|
||||
blobSidecar := ðpb.BlobSidecar{
|
||||
Index: blobIndex,
|
||||
Blob: blob[:],
|
||||
KzgCommitment: blobKZGCommitment[:],
|
||||
KzgProof: blobKzgProof[:],
|
||||
SignedBlockHeader: firstDataColumnSidecar.SignedBlockHeader,
|
||||
CommitmentInclusionProof: firstDataColumnSidecar.KzgCommitmentsInclusionProof,
|
||||
}
|
||||
|
||||
roBlob, err := blocks.NewROBlob(blobSidecar)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "new RO blob")
|
||||
}
|
||||
|
||||
verifiedROBlob := blocks.NewVerifiedROBlob(roBlob)
|
||||
verifiedROBlobs = append(verifiedROBlobs, &verifiedROBlob)
|
||||
}
|
||||
|
||||
return verifiedROBlobs, nil
|
||||
}
|
||||
|
||||
// populateAndFilterIndices returns a sorted slices of indices, setting all indices if none are provided,
|
||||
// and filtering out indices higher than the blob count.
|
||||
func populateAndFilterIndices(indices map[uint64]bool, blobCount uint64) []uint64 {
|
||||
// If no indices are provided, provide all blobs.
|
||||
if len(indices) == 0 {
|
||||
for i := range blobCount {
|
||||
indices[i] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Filter blobs index higher than the blob count.
|
||||
filteredIndices := make(map[uint64]bool, len(indices))
|
||||
for i := range indices {
|
||||
if i < blobCount {
|
||||
filteredIndices[i] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Transform set to slice.
|
||||
indicesSlice := make([]uint64, 0, len(filteredIndices))
|
||||
for i := range filteredIndices {
|
||||
indicesSlice = append(indicesSlice, i)
|
||||
}
|
||||
|
||||
// Sort the indices.
|
||||
slices.Sort[[]uint64](indicesSlice)
|
||||
|
||||
return indicesSlice
|
||||
}
|
||||
244
beacon-chain/core/peerdas/das_core_test.go
Normal file
244
beacon-chain/core/peerdas/das_core_test.go
Normal file
@@ -0,0 +1,244 @@
|
||||
package peerdas_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
)
|
||||
|
||||
func TestDataColumnSidecars(t *testing.T) {
|
||||
var expected []*ethpb.DataColumnSidecar = nil
|
||||
actual, err := peerdas.DataColumnSidecars(nil, []kzg.Blob{})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepSSZEqual(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestBlobs(t *testing.T) {
|
||||
blobsIndice := map[uint64]bool{}
|
||||
|
||||
almostAllColumns := make([]*ethpb.DataColumnSidecar, 0, fieldparams.NumberOfColumns/2)
|
||||
for i := 2; i < fieldparams.NumberOfColumns/2+2; i++ {
|
||||
almostAllColumns = append(almostAllColumns, ðpb.DataColumnSidecar{
|
||||
ColumnIndex: uint64(i),
|
||||
})
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
input []*ethpb.DataColumnSidecar
|
||||
expected []*blocks.VerifiedROBlob
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "empty input",
|
||||
input: []*ethpb.DataColumnSidecar{},
|
||||
expected: nil,
|
||||
err: errors.New("some columns are missing: [0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63]"),
|
||||
},
|
||||
{
|
||||
name: "missing columns",
|
||||
input: almostAllColumns,
|
||||
expected: nil,
|
||||
err: errors.New("some columns are missing: [0 1]"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
actual, err := peerdas.Blobs(blobsIndice, tc.input)
|
||||
if tc.err != nil {
|
||||
require.Equal(t, tc.err.Error(), err.Error())
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.DeepSSZEqual(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDataColumnsSidecarsBlobsRoundtrip(t *testing.T) {
|
||||
const blobCount = 5
|
||||
blobsIndex := map[uint64]bool{}
|
||||
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a protobuf signed beacon block.
|
||||
signedBeaconBlockPb := util.NewBeaconBlockDeneb()
|
||||
|
||||
// Generate random blobs and their corresponding commitments and proofs.
|
||||
blobs := make([]kzg.Blob, 0, blobCount)
|
||||
blobKzgCommitments := make([]*kzg.Commitment, 0, blobCount)
|
||||
blobKzgProofs := make([]*kzg.Proof, 0, blobCount)
|
||||
|
||||
for blobIndex := range blobCount {
|
||||
// Create a random blob.
|
||||
blob := getRandBlob(int64(blobIndex))
|
||||
blobs = append(blobs, blob)
|
||||
|
||||
// Generate a blobKZGCommitment for the blob.
|
||||
blobKZGCommitment, proof, err := generateCommitmentAndProof(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobKzgCommitments = append(blobKzgCommitments, blobKZGCommitment)
|
||||
blobKzgProofs = append(blobKzgProofs, proof)
|
||||
}
|
||||
|
||||
// Set the commitments into the block.
|
||||
blobZkgCommitmentsBytes := make([][]byte, 0, blobCount)
|
||||
for _, blobKZGCommitment := range blobKzgCommitments {
|
||||
blobZkgCommitmentsBytes = append(blobZkgCommitmentsBytes, blobKZGCommitment[:])
|
||||
}
|
||||
|
||||
signedBeaconBlockPb.Block.Body.BlobKzgCommitments = blobZkgCommitmentsBytes
|
||||
|
||||
// Generate verified RO blobs.
|
||||
verifiedROBlobs := make([]*blocks.VerifiedROBlob, 0, blobCount)
|
||||
|
||||
// Create a signed beacon block from the protobuf.
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
commitmentInclusionProof, err := blocks.MerkleProofKZGCommitments(signedBeaconBlock.Block().Body())
|
||||
require.NoError(t, err)
|
||||
|
||||
for blobIndex := range blobCount {
|
||||
blob := blobs[blobIndex]
|
||||
blobKZGCommitment := blobKzgCommitments[blobIndex]
|
||||
blobKzgProof := blobKzgProofs[blobIndex]
|
||||
|
||||
// Get the signed beacon block header.
|
||||
signedBeaconBlockHeader, err := signedBeaconBlock.Header()
|
||||
require.NoError(t, err)
|
||||
|
||||
blobSidecar := ðpb.BlobSidecar{
|
||||
Index: uint64(blobIndex),
|
||||
Blob: blob[:],
|
||||
KzgCommitment: blobKZGCommitment[:],
|
||||
KzgProof: blobKzgProof[:],
|
||||
SignedBlockHeader: signedBeaconBlockHeader,
|
||||
CommitmentInclusionProof: commitmentInclusionProof,
|
||||
}
|
||||
|
||||
roBlob, err := blocks.NewROBlob(blobSidecar)
|
||||
require.NoError(t, err)
|
||||
|
||||
verifiedROBlob := blocks.NewVerifiedROBlob(roBlob)
|
||||
verifiedROBlobs = append(verifiedROBlobs, &verifiedROBlob)
|
||||
}
|
||||
|
||||
// Compute data columns sidecars from the signed beacon block and from the blobs.
|
||||
dataColumnsSidecar, err := peerdas.DataColumnSidecars(signedBeaconBlock, blobs)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Compute the blobs from the data columns sidecar.
|
||||
roundtripBlobs, err := peerdas.Blobs(blobsIndex, dataColumnsSidecar)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check that the blobs are the same.
|
||||
require.DeepSSZEqual(t, verifiedROBlobs, roundtripBlobs)
|
||||
}
|
||||
|
||||
func TestValidatorsCustodyRequirement(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
count uint64
|
||||
expected uint64
|
||||
}{
|
||||
{name: "0 validators", count: 0, expected: 8},
|
||||
{name: "1 validator", count: 1, expected: 8},
|
||||
{name: "8 validators", count: 8, expected: 8},
|
||||
{name: "9 validators", count: 9, expected: 9},
|
||||
{name: "100 validators", count: 100, expected: 100},
|
||||
{name: "128 validators", count: 128, expected: 128},
|
||||
{name: "129 validators", count: 129, expected: 128},
|
||||
{name: "1000 validators", count: 1000, expected: 128},
|
||||
}
|
||||
|
||||
const balance = uint64(32_000_000_000)
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
balances := make([]uint64, 0, tc.count)
|
||||
for range tc.count {
|
||||
balances = append(balances, balance)
|
||||
}
|
||||
|
||||
validatorsIndex := make(map[primitives.ValidatorIndex]bool)
|
||||
for i := range tc.count {
|
||||
validatorsIndex[primitives.ValidatorIndex(i)] = true
|
||||
}
|
||||
|
||||
beaconState, err := state_native.InitializeFromProtoFulu(ðpb.BeaconStateElectra{Balances: balances})
|
||||
require.NoError(t, err)
|
||||
|
||||
actual, err := peerdas.ValidatorsCustodyRequirement(beaconState, validatorsIndex)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCustodyGroupSamplingSize(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
custodyType peerdas.CustodyType
|
||||
validatorsCustodyRequirement uint64
|
||||
toAdvertiseCustodyGroupCount uint64
|
||||
expected uint64
|
||||
}{
|
||||
{
|
||||
name: "target, lower than samples per slot",
|
||||
custodyType: peerdas.Target,
|
||||
validatorsCustodyRequirement: 2,
|
||||
expected: 8,
|
||||
},
|
||||
{
|
||||
name: "target, higher than samples per slot",
|
||||
custodyType: peerdas.Target,
|
||||
validatorsCustodyRequirement: 100,
|
||||
expected: 100,
|
||||
},
|
||||
{
|
||||
name: "actual, lower than samples per slot",
|
||||
custodyType: peerdas.Actual,
|
||||
validatorsCustodyRequirement: 3,
|
||||
toAdvertiseCustodyGroupCount: 4,
|
||||
expected: 8,
|
||||
},
|
||||
{
|
||||
name: "actual, higher than samples per slot",
|
||||
custodyType: peerdas.Actual,
|
||||
validatorsCustodyRequirement: 100,
|
||||
toAdvertiseCustodyGroupCount: 101,
|
||||
expected: 100,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Set the validators custody requirement for target custody group count.
|
||||
peerdas.TargetCustodyGroupCount.SetValidatorsCustodyRequirement(tc.validatorsCustodyRequirement)
|
||||
|
||||
// Set the to advertise custody group count.
|
||||
peerdas.ToAdvertiseCustodyGroupCount.Set(tc.toAdvertiseCustodyGroupCount)
|
||||
|
||||
// Compute the custody group sampling size.
|
||||
actual := peerdas.CustodyGroupSamplingSize(tc.custodyType)
|
||||
|
||||
// Check the result.
|
||||
require.Equal(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
184
beacon-chain/core/peerdas/info.go
Normal file
184
beacon-chain/core/peerdas/info.go
Normal file
@@ -0,0 +1,184 @@
|
||||
package peerdas
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"sync"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
)
|
||||
|
||||
// info contains all useful peerDAS related information regarding a peer.
|
||||
type (
|
||||
info struct {
|
||||
CustodyGroups map[uint64]bool
|
||||
CustodyColumns map[uint64]bool
|
||||
DataColumnsSubnets map[uint64]bool
|
||||
}
|
||||
|
||||
targetCustodyGroupCount struct {
|
||||
mut sync.RWMutex
|
||||
validatorsCustodyRequirement uint64
|
||||
}
|
||||
|
||||
toAdverstiseCustodyGroupCount struct {
|
||||
mut sync.RWMutex
|
||||
value uint64
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
nodeInfoCacheSize = 200
|
||||
nodeInfoCachKeySize = 32 + 8
|
||||
)
|
||||
|
||||
var (
|
||||
// CustodyGroupCountMut is a mutex to be used by caller to ensure neither
|
||||
// TargetCustodyGroupCount nor ToAdvertiseCustodyGroupCount are being modified.
|
||||
// (This is not necessary to use this mutex for any data protection.)
|
||||
CustodyGroupCountMut sync.RWMutex
|
||||
|
||||
// TargetCustodyGroupCount represents the target number of custody groups we should custody
|
||||
// regarding the validators we are tracking.
|
||||
TargetCustodyGroupCount targetCustodyGroupCount
|
||||
|
||||
// ToAdvertiseCustodyGroupCount represents the number of custody groups to advertise to the network.
|
||||
ToAdvertiseCustodyGroupCount toAdverstiseCustodyGroupCount
|
||||
|
||||
nodeInfoCacheMut sync.Mutex
|
||||
nodeInfoCache *lru.Cache
|
||||
)
|
||||
|
||||
// Info returns the peerDAS information for a given nodeID and custodyGroupCount.
|
||||
// It returns a boolean indicating if the peer info was already in the cache and an error if any.
|
||||
func Info(nodeID enode.ID, custodyGroupCount uint64) (*info, bool, error) {
|
||||
// Create a new cache if it doesn't exist.
|
||||
if err := createInfoCacheIfNeeded(); err != nil {
|
||||
return nil, false, errors.Wrap(err, "create cache if needed")
|
||||
}
|
||||
|
||||
// Compute the key.
|
||||
key := computeInfoCacheKey(nodeID, custodyGroupCount)
|
||||
|
||||
// If the value is already in the cache, return it.
|
||||
if value, ok := nodeInfoCache.Get(key); ok {
|
||||
peerInfo, ok := value.(*info)
|
||||
if !ok {
|
||||
return nil, false, errors.New("failed to cast peer info (should never happen)")
|
||||
}
|
||||
|
||||
return peerInfo, true, nil
|
||||
}
|
||||
|
||||
// The peer info is not in the cache, compute it.
|
||||
// Compute custody groups.
|
||||
custodyGroups, err := CustodyGroups(nodeID, custodyGroupCount)
|
||||
if err != nil {
|
||||
return nil, false, errors.Wrap(err, "custody groups")
|
||||
}
|
||||
|
||||
// Compute custody columns.
|
||||
custodyColumns, err := CustodyColumns(custodyGroups)
|
||||
if err != nil {
|
||||
return nil, false, errors.Wrap(err, "custody columns")
|
||||
}
|
||||
|
||||
// Compute data columns subnets.
|
||||
dataColumnsSubnets := DataColumnSubnets(custodyColumns)
|
||||
|
||||
result := &info{
|
||||
CustodyGroups: custodyGroups,
|
||||
CustodyColumns: custodyColumns,
|
||||
DataColumnsSubnets: dataColumnsSubnets,
|
||||
}
|
||||
|
||||
// Add the result to the cache.
|
||||
nodeInfoCache.Add(key, result)
|
||||
|
||||
return result, false, nil
|
||||
}
|
||||
|
||||
// createInfoCacheIfNeeded creates a new cache if it doesn't exist.
|
||||
func createInfoCacheIfNeeded() error {
|
||||
nodeInfoCacheMut.Lock()
|
||||
defer nodeInfoCacheMut.Unlock()
|
||||
|
||||
if nodeInfoCache == nil {
|
||||
c, err := lru.New(nodeInfoCacheSize)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "lru new")
|
||||
}
|
||||
|
||||
nodeInfoCache = c
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// computeInfoCacheKey returns a unique key for a node and its custodyGroupCount.
|
||||
func computeInfoCacheKey(nodeID enode.ID, custodyGroupCount uint64) [nodeInfoCachKeySize]byte {
|
||||
var key [nodeInfoCachKeySize]byte
|
||||
|
||||
copy(key[:32], nodeID[:])
|
||||
binary.BigEndian.PutUint64(key[32:], custodyGroupCount)
|
||||
|
||||
return key
|
||||
}
|
||||
|
||||
// setValidatorsCustodyRequirement sets the validators custody requirement.
|
||||
func (tcgc *targetCustodyGroupCount) SetValidatorsCustodyRequirement(value uint64) {
|
||||
tcgc.mut.Lock()
|
||||
defer tcgc.mut.Unlock()
|
||||
|
||||
tcgc.validatorsCustodyRequirement = value
|
||||
}
|
||||
|
||||
// CustodyGroupCount returns the number of groups we should participate in for custody.
|
||||
func (tcgc *targetCustodyGroupCount) Get() uint64 {
|
||||
// If subscribed to all subnets, return the number of custody groups.
|
||||
if flags.Get().SubscribeToAllSubnets {
|
||||
return params.BeaconConfig().NumberOfCustodyGroups
|
||||
}
|
||||
|
||||
tcgc.mut.RLock()
|
||||
defer tcgc.mut.RUnlock()
|
||||
|
||||
// If no validators are tracked, return the default custody requirement.
|
||||
if tcgc.validatorsCustodyRequirement == 0 {
|
||||
return params.BeaconConfig().CustodyRequirement
|
||||
}
|
||||
|
||||
// Return the validators custody requirement.
|
||||
return tcgc.validatorsCustodyRequirement
|
||||
}
|
||||
|
||||
// Set sets the to advertise custody group count.
|
||||
func (tacgc *toAdverstiseCustodyGroupCount) Set(value uint64) {
|
||||
tacgc.mut.Lock()
|
||||
defer tacgc.mut.Unlock()
|
||||
|
||||
tacgc.value = value
|
||||
}
|
||||
|
||||
// Get returns the to advertise custody group count.
|
||||
func (tacgc *toAdverstiseCustodyGroupCount) Get() uint64 {
|
||||
// If subscribed to all subnets, return the number of custody groups.
|
||||
if flags.Get().SubscribeToAllSubnets {
|
||||
return params.BeaconConfig().NumberOfCustodyGroups
|
||||
}
|
||||
|
||||
custodyRequirement := params.BeaconConfig().CustodyRequirement
|
||||
|
||||
tacgc.mut.RLock()
|
||||
defer tacgc.mut.RUnlock()
|
||||
|
||||
return max(tacgc.value, custodyRequirement)
|
||||
}
|
||||
|
||||
// ActualCustodyGroupCount returns the actual custody group count.
|
||||
func ActualCustodyGroupCount() uint64 {
|
||||
return min(TargetCustodyGroupCount.Get(), ToAdvertiseCustodyGroupCount.Get())
|
||||
}
|
||||
128
beacon-chain/core/peerdas/info_test.go
Normal file
128
beacon-chain/core/peerdas/info_test.go
Normal file
@@ -0,0 +1,128 @@
|
||||
package peerdas_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestInfo(t *testing.T) {
|
||||
nodeID := enode.ID{}
|
||||
custodyGroupCount := uint64(7)
|
||||
|
||||
expectedCustodyGroup := map[uint64]bool{1: true, 17: true, 19: true, 42: true, 75: true, 87: true, 102: true}
|
||||
expectedCustodyColumns := map[uint64]bool{1: true, 17: true, 19: true, 42: true, 75: true, 87: true, 102: true}
|
||||
expectedDataColumnsSubnets := map[uint64]bool{1: true, 17: true, 19: true, 42: true, 75: true, 87: true, 102: true}
|
||||
|
||||
for _, cached := range []bool{false, true} {
|
||||
actual, ok, err := peerdas.Info(nodeID, custodyGroupCount)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, cached, ok)
|
||||
require.DeepEqual(t, expectedCustodyGroup, actual.CustodyGroups)
|
||||
require.DeepEqual(t, expectedCustodyColumns, actual.CustodyColumns)
|
||||
require.DeepEqual(t, expectedDataColumnsSubnets, actual.DataColumnsSubnets)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTargetCustodyGroupCount(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
subscribeToAllSubnets bool
|
||||
validatorsCustodyRequirement uint64
|
||||
expected uint64
|
||||
}{
|
||||
{
|
||||
name: "subscribed to all subnets",
|
||||
subscribeToAllSubnets: true,
|
||||
validatorsCustodyRequirement: 100,
|
||||
expected: 128,
|
||||
},
|
||||
{
|
||||
name: "no validators attached",
|
||||
subscribeToAllSubnets: false,
|
||||
validatorsCustodyRequirement: 0,
|
||||
expected: 4,
|
||||
},
|
||||
{
|
||||
name: "some validators attached",
|
||||
subscribeToAllSubnets: false,
|
||||
validatorsCustodyRequirement: 100,
|
||||
expected: 100,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Subscribe to all subnets if needed.
|
||||
if tc.subscribeToAllSubnets {
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SubscribeToAllSubnets = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
}
|
||||
|
||||
// Set the validators custody requirement.
|
||||
peerdas.TargetCustodyGroupCount.SetValidatorsCustodyRequirement(tc.validatorsCustodyRequirement)
|
||||
|
||||
// Get the target custody group count.
|
||||
actual := peerdas.TargetCustodyGroupCount.Get()
|
||||
|
||||
// Compare the expected and actual values.
|
||||
require.Equal(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestToAdvertiseCustodyGroupCount(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
subscribeToAllSubnets bool
|
||||
toAdvertiseCustodyGroupCount uint64
|
||||
expected uint64
|
||||
}{
|
||||
{
|
||||
name: "subscribed to all subnets",
|
||||
subscribeToAllSubnets: true,
|
||||
toAdvertiseCustodyGroupCount: 100,
|
||||
expected: 128,
|
||||
},
|
||||
{
|
||||
name: "higher than custody requirement",
|
||||
subscribeToAllSubnets: false,
|
||||
toAdvertiseCustodyGroupCount: 100,
|
||||
expected: 100,
|
||||
},
|
||||
{
|
||||
name: "lower than custody requirement",
|
||||
subscribeToAllSubnets: false,
|
||||
toAdvertiseCustodyGroupCount: 1,
|
||||
expected: 4,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Subscribe to all subnets if needed.
|
||||
if tc.subscribeToAllSubnets {
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SubscribeToAllSubnets = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
}
|
||||
|
||||
// Set the to advertise custody group count.
|
||||
peerdas.ToAdvertiseCustodyGroupCount.Set(tc.toAdvertiseCustodyGroupCount)
|
||||
|
||||
// Get the to advertise custody group count.
|
||||
actual := peerdas.ToAdvertiseCustodyGroupCount.Get()
|
||||
|
||||
// Compare the expected and actual values.
|
||||
require.Equal(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
14
beacon-chain/core/peerdas/metrics.go
Normal file
14
beacon-chain/core/peerdas/metrics.go
Normal file
@@ -0,0 +1,14 @@
|
||||
package peerdas
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
var dataColumnComputationTime = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "beacon_data_column_sidecar_computation_milliseconds",
|
||||
Help: "Captures the time taken to compute data column sidecars from blobs.",
|
||||
Buckets: []float64{100, 250, 500, 750, 1000, 1500, 2000, 4000, 8000, 12000, 16000},
|
||||
},
|
||||
)
|
||||
126
beacon-chain/core/peerdas/p2p_interface.go
Normal file
126
beacon-chain/core/peerdas/p2p_interface.go
Normal file
@@ -0,0 +1,126 @@
|
||||
package peerdas
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
)
|
||||
|
||||
const (
|
||||
CustodyGroupCountEnrKey = "cgc"
|
||||
)
|
||||
|
||||
var (
|
||||
// Custom errors
|
||||
errRecordNil = errors.New("record is nil")
|
||||
errCannotLoadCustodyGroupCount = errors.New("cannot load the custody group count from peer")
|
||||
errIndexTooLarge = errors.New("column index is larger than the specified columns count")
|
||||
errMismatchLength = errors.New("mismatch in the length of the commitments and proofs")
|
||||
)
|
||||
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/p2p-interface.md#the-discovery-domain-discv5
|
||||
type Cgc uint64
|
||||
|
||||
func (Cgc) ENRKey() string { return CustodyGroupCountEnrKey }
|
||||
|
||||
// VerifyDataColumnsSidecarKZGProofs verifies the provided KZG Proofs of data columns.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/fulu/p2p-interface.md#verify_data_column_sidecar_kzg_proofs
|
||||
func VerifyDataColumnsSidecarKZGProofs(sidecars []blocks.RODataColumn) (bool, error) {
|
||||
// Retrieve the number of columns.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
// Compute the total count.
|
||||
count := 0
|
||||
for _, sidecar := range sidecars {
|
||||
count += len(sidecar.DataColumn)
|
||||
}
|
||||
|
||||
commitments := make([]kzg.Bytes48, 0, count)
|
||||
indices := make([]uint64, 0, count)
|
||||
cells := make([]kzg.Cell, 0, count)
|
||||
proofs := make([]kzg.Bytes48, 0, count)
|
||||
|
||||
for _, sidecar := range sidecars {
|
||||
// Check if the columns index is not too large
|
||||
if sidecar.ColumnIndex >= numberOfColumns {
|
||||
return false, errIndexTooLarge
|
||||
}
|
||||
|
||||
// Check if the KZG commitments size and data column size match.
|
||||
if len(sidecar.DataColumn) != len(sidecar.KzgCommitments) {
|
||||
return false, errMismatchLength
|
||||
}
|
||||
|
||||
// Check if the KZG proofs size and data column size match.
|
||||
if len(sidecar.DataColumn) != len(sidecar.KzgProof) {
|
||||
return false, errMismatchLength
|
||||
}
|
||||
|
||||
for i := range sidecar.DataColumn {
|
||||
commitments = append(commitments, kzg.Bytes48(sidecar.KzgCommitments[i]))
|
||||
indices = append(indices, sidecar.ColumnIndex)
|
||||
cells = append(cells, kzg.Cell(sidecar.DataColumn[i]))
|
||||
proofs = append(proofs, kzg.Bytes48(sidecar.KzgProof[i]))
|
||||
}
|
||||
}
|
||||
|
||||
// Verify all the batch at once.
|
||||
verified, err := kzg.VerifyCellKZGProofBatch(commitments, indices, cells, proofs)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "verify cell KZG proof batch")
|
||||
}
|
||||
|
||||
return verified, nil
|
||||
}
|
||||
|
||||
// ComputeSubnetForDataColumnSidecar computes the subnet for a data column sidecar.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/fulu/p2p-interface.md#compute_subnet_for_data_column_sidecar
|
||||
func ComputeSubnetForDataColumnSidecar(columnIndex uint64) uint64 {
|
||||
dataColumnSidecarSubnetCount := params.BeaconConfig().DataColumnSidecarSubnetCount
|
||||
return columnIndex % dataColumnSidecarSubnetCount
|
||||
}
|
||||
|
||||
// DataColumnSubnets computes the subnets for the data columns.
|
||||
func DataColumnSubnets(dataColumns map[uint64]bool) map[uint64]bool {
|
||||
subnets := make(map[uint64]bool, len(dataColumns))
|
||||
|
||||
for column := range dataColumns {
|
||||
subnet := ComputeSubnetForDataColumnSidecar(column)
|
||||
subnets[subnet] = true
|
||||
}
|
||||
|
||||
return subnets
|
||||
}
|
||||
|
||||
// ComputeCustodyGroupForColumn computes the custody group for a given column.
|
||||
// It is the reciprocal function of ComputeColumnsForCustodyGroup.
|
||||
func ComputeCustodyGroupForColumn(columnIndex uint64) (uint64, error) {
|
||||
beaconConfig := params.BeaconConfig()
|
||||
numberOfColumns := beaconConfig.NumberOfColumns
|
||||
|
||||
if columnIndex >= numberOfColumns {
|
||||
return 0, errIndexTooLarge
|
||||
}
|
||||
|
||||
numberOfCustodyGroups := beaconConfig.NumberOfCustodyGroups
|
||||
columnsPerGroup := numberOfColumns / numberOfCustodyGroups
|
||||
|
||||
return columnIndex / columnsPerGroup, nil
|
||||
}
|
||||
|
||||
// CustodyGroupCountFromRecord extracts the custody group count from an ENR record.
|
||||
func CustodyGroupCountFromRecord(record *enr.Record) (uint64, error) {
|
||||
if record == nil {
|
||||
return 0, errRecordNil
|
||||
}
|
||||
|
||||
// Load the `cgc`
|
||||
var cgc Cgc
|
||||
if cgc := record.Load(&cgc); cgc != nil {
|
||||
return 0, errCannotLoadCustodyGroupCount
|
||||
}
|
||||
|
||||
return uint64(cgc), nil
|
||||
}
|
||||
56
beacon-chain/core/peerdas/p2p_interface_test.go
Normal file
56
beacon-chain/core/peerdas/p2p_interface_test.go
Normal file
@@ -0,0 +1,56 @@
|
||||
package peerdas_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
)
|
||||
|
||||
func TestVerifyDataColumnSidecarKZGProofs(t *testing.T) {
|
||||
dbBlock := util.NewBeaconBlockDeneb()
|
||||
require.NoError(t, kzg.Start())
|
||||
|
||||
var (
|
||||
comms [][]byte
|
||||
blobs []kzg.Blob
|
||||
)
|
||||
for i := int64(0); i < 6; i++ {
|
||||
blob := getRandBlob(i)
|
||||
commitment, _, err := generateCommitmentAndProof(&blob)
|
||||
require.NoError(t, err)
|
||||
comms = append(comms, commitment[:])
|
||||
blobs = append(blobs, blob)
|
||||
}
|
||||
|
||||
dbBlock.Block.Body.BlobKzgCommitments = comms
|
||||
sBlock, err := blocks.NewSignedBeaconBlock(dbBlock)
|
||||
require.NoError(t, err)
|
||||
sCars, err := peerdas.DataColumnSidecars(sBlock, blobs)
|
||||
require.NoError(t, err)
|
||||
|
||||
for i, sidecar := range sCars {
|
||||
roCol, err := blocks.NewRODataColumn(sidecar)
|
||||
require.NoError(t, err)
|
||||
verified, err := peerdas.VerifyDataColumnsSidecarKZGProofs([]blocks.RODataColumn{roCol})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, verified, fmt.Sprintf("sidecar %d failed", i))
|
||||
}
|
||||
}
|
||||
|
||||
func TestCustodyGroupCountFromRecord(t *testing.T) {
|
||||
const expected uint64 = 7
|
||||
|
||||
// Create an Ethereum record.
|
||||
record := &enr.Record{}
|
||||
record.Set(peerdas.Cgc(expected))
|
||||
|
||||
actual, err := peerdas.CustodyGroupCountFromRecord(record)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, actual)
|
||||
}
|
||||
56
beacon-chain/core/peerdas/peer_sampling.go
Normal file
56
beacon-chain/core/peerdas/peer_sampling.go
Normal file
@@ -0,0 +1,56 @@
|
||||
package peerdas
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
)
|
||||
|
||||
// ExtendedSampleCount computes, for a given number of samples per slot and allowed failures the
|
||||
// number of samples we should actually query from peers.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/fulu/peer-sampling.md#get_extended_sample_count
|
||||
func ExtendedSampleCount(samplesPerSlot, allowedFailures uint64) uint64 {
|
||||
// Retrieve the columns count
|
||||
columnsCount := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
// If half of the columns are missing, we are able to reconstruct the data.
|
||||
// If half of the columns + 1 are missing, we are not able to reconstruct the data.
|
||||
// This is the smallest worst case.
|
||||
worstCaseMissing := columnsCount/2 + 1
|
||||
|
||||
// Compute the false positive threshold.
|
||||
falsePositiveThreshold := HypergeomCDF(0, columnsCount, worstCaseMissing, samplesPerSlot)
|
||||
|
||||
var sampleCount uint64
|
||||
|
||||
// Finally, compute the extended sample count.
|
||||
for sampleCount = samplesPerSlot; sampleCount < columnsCount+1; sampleCount++ {
|
||||
if HypergeomCDF(allowedFailures, columnsCount, worstCaseMissing, sampleCount) <= falsePositiveThreshold {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return sampleCount
|
||||
}
|
||||
|
||||
// HypergeomCDF computes the hypergeometric cumulative distribution function.
|
||||
// https://en.wikipedia.org/wiki/Hypergeometric_distribution
|
||||
func HypergeomCDF(k, M, n, N uint64) float64 {
|
||||
denominatorInt := new(big.Int).Binomial(int64(M), int64(N)) // lint:ignore uintcast
|
||||
denominator := new(big.Float).SetInt(denominatorInt)
|
||||
|
||||
rBig := big.NewFloat(0)
|
||||
|
||||
for i := uint64(0); i < k+1; i++ {
|
||||
a := new(big.Int).Binomial(int64(n), int64(i)) // lint:ignore uintcast
|
||||
b := new(big.Int).Binomial(int64(M-n), int64(N-i))
|
||||
numeratorInt := new(big.Int).Mul(a, b)
|
||||
numerator := new(big.Float).SetInt(numeratorInt)
|
||||
item := new(big.Float).Quo(numerator, denominator)
|
||||
rBig.Add(rBig, item)
|
||||
}
|
||||
|
||||
r, _ := rBig.Float64()
|
||||
|
||||
return r
|
||||
}
|
||||
60
beacon-chain/core/peerdas/peer_sampling_test.go
Normal file
60
beacon-chain/core/peerdas/peer_sampling_test.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package peerdas_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestExtendedSampleCount(t *testing.T) {
|
||||
const samplesPerSlot = 16
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
allowedMissings uint64
|
||||
extendedSampleCount uint64
|
||||
}{
|
||||
{name: "allowedMissings=0", allowedMissings: 0, extendedSampleCount: 16},
|
||||
{name: "allowedMissings=1", allowedMissings: 1, extendedSampleCount: 20},
|
||||
{name: "allowedMissings=2", allowedMissings: 2, extendedSampleCount: 24},
|
||||
{name: "allowedMissings=3", allowedMissings: 3, extendedSampleCount: 27},
|
||||
{name: "allowedMissings=4", allowedMissings: 4, extendedSampleCount: 29},
|
||||
{name: "allowedMissings=5", allowedMissings: 5, extendedSampleCount: 32},
|
||||
{name: "allowedMissings=6", allowedMissings: 6, extendedSampleCount: 35},
|
||||
{name: "allowedMissings=7", allowedMissings: 7, extendedSampleCount: 37},
|
||||
{name: "allowedMissings=8", allowedMissings: 8, extendedSampleCount: 40},
|
||||
{name: "allowedMissings=9", allowedMissings: 9, extendedSampleCount: 42},
|
||||
{name: "allowedMissings=10", allowedMissings: 10, extendedSampleCount: 44},
|
||||
{name: "allowedMissings=11", allowedMissings: 11, extendedSampleCount: 47},
|
||||
{name: "allowedMissings=12", allowedMissings: 12, extendedSampleCount: 49},
|
||||
{name: "allowedMissings=13", allowedMissings: 13, extendedSampleCount: 51},
|
||||
{name: "allowedMissings=14", allowedMissings: 14, extendedSampleCount: 53},
|
||||
{name: "allowedMissings=15", allowedMissings: 15, extendedSampleCount: 55},
|
||||
{name: "allowedMissings=16", allowedMissings: 16, extendedSampleCount: 57},
|
||||
{name: "allowedMissings=17", allowedMissings: 17, extendedSampleCount: 59},
|
||||
{name: "allowedMissings=18", allowedMissings: 18, extendedSampleCount: 61},
|
||||
{name: "allowedMissings=19", allowedMissings: 19, extendedSampleCount: 63},
|
||||
{name: "allowedMissings=20", allowedMissings: 20, extendedSampleCount: 65},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
result := peerdas.ExtendedSampleCount(samplesPerSlot, tc.allowedMissings)
|
||||
require.Equal(t, tc.extendedSampleCount, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestHypergeomCDF(t *testing.T) {
|
||||
// Test case from https://en.wikipedia.org/wiki/Hypergeometric_distribution
|
||||
// Population size: 1000, number of successes in population: 500, sample size: 10, number of successes in sample: 5
|
||||
// Expected result: 0.072
|
||||
const (
|
||||
expected = 0.0796665913283742
|
||||
margin = 0.000001
|
||||
)
|
||||
|
||||
actual := peerdas.HypergeomCDF(5, 128, 65, 16)
|
||||
require.Equal(t, true, expected-margin <= actual && actual <= expected+margin)
|
||||
}
|
||||
139
beacon-chain/core/peerdas/reconstruction.go
Normal file
139
beacon-chain/core/peerdas/reconstruction.go
Normal file
@@ -0,0 +1,139 @@
|
||||
package peerdas
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
// CanSelfReconstruct returns true if the node can self-reconstruct all the data columns from its custody group count.
|
||||
func CanSelfReconstruct(custodyGroupCount uint64) bool {
|
||||
total := params.BeaconConfig().NumberOfCustodyGroups
|
||||
// If total is odd, then we need total / 2 + 1 columns to reconstruct.
|
||||
// If total is even, then we need total / 2 columns to reconstruct.
|
||||
custodyGroupsNeeded := total/2 + total%2
|
||||
return custodyGroupCount >= custodyGroupsNeeded
|
||||
}
|
||||
|
||||
// RecoverCellsAndProofs recovers the cells and proofs from the data column sidecars.
|
||||
func RecoverCellsAndProofs(
|
||||
dataColumnSideCars []*ethpb.DataColumnSidecar,
|
||||
blockRoot [fieldparams.RootLength]byte,
|
||||
) ([]kzg.CellsAndProofs, error) {
|
||||
var wg errgroup.Group
|
||||
|
||||
dataColumnSideCarsCount := len(dataColumnSideCars)
|
||||
|
||||
if dataColumnSideCarsCount == 0 {
|
||||
return nil, errors.New("no data column sidecars")
|
||||
}
|
||||
|
||||
// Check if all columns have the same length.
|
||||
blobCount := len(dataColumnSideCars[0].DataColumn)
|
||||
for _, sidecar := range dataColumnSideCars {
|
||||
length := len(sidecar.DataColumn)
|
||||
|
||||
if length != blobCount {
|
||||
return nil, errors.New("columns do not have the same length")
|
||||
}
|
||||
}
|
||||
|
||||
// Recover cells and compute proofs in parallel.
|
||||
recoveredCellsAndProofs := make([]kzg.CellsAndProofs, blobCount)
|
||||
|
||||
for blobIndex := 0; blobIndex < blobCount; blobIndex++ {
|
||||
bIndex := blobIndex
|
||||
wg.Go(func() error {
|
||||
cellsIndices := make([]uint64, 0, dataColumnSideCarsCount)
|
||||
cells := make([]kzg.Cell, 0, dataColumnSideCarsCount)
|
||||
|
||||
for _, sidecar := range dataColumnSideCars {
|
||||
// Build the cell indices.
|
||||
cellsIndices = append(cellsIndices, sidecar.ColumnIndex)
|
||||
|
||||
// Get the cell.
|
||||
column := sidecar.DataColumn
|
||||
cell := column[bIndex]
|
||||
|
||||
cells = append(cells, kzg.Cell(cell))
|
||||
}
|
||||
|
||||
// Recover the cells and proofs for the corresponding blob
|
||||
cellsAndProofs, err := kzg.RecoverCellsAndKZGProofs(cellsIndices, cells)
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "recover cells and KZG proofs for blob %d", bIndex)
|
||||
}
|
||||
|
||||
recoveredCellsAndProofs[bIndex] = cellsAndProofs
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := wg.Wait(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return recoveredCellsAndProofs, nil
|
||||
}
|
||||
|
||||
// DataColumnSidecarsForReconstruct is a TEMPORARY function until there is an official specification for it.
|
||||
// It is scheduled for deletion.
|
||||
func DataColumnSidecarsForReconstruct(
|
||||
blobKzgCommitments [][]byte,
|
||||
signedBlockHeader *ethpb.SignedBeaconBlockHeader,
|
||||
kzgCommitmentsInclusionProof [][]byte,
|
||||
cellsAndProofs []kzg.CellsAndProofs,
|
||||
) ([]*ethpb.DataColumnSidecar, error) {
|
||||
// Each CellsAndProofs corresponds to a Blob
|
||||
// So we can get the BlobCount by checking the length of CellsAndProofs
|
||||
blobsCount := len(cellsAndProofs)
|
||||
if blobsCount == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Get the column sidecars.
|
||||
sidecars := make([]*ethpb.DataColumnSidecar, 0, fieldparams.NumberOfColumns)
|
||||
for columnIndex := uint64(0); columnIndex < fieldparams.NumberOfColumns; columnIndex++ {
|
||||
column := make([]kzg.Cell, 0, blobsCount)
|
||||
kzgProofOfColumn := make([]kzg.Proof, 0, blobsCount)
|
||||
|
||||
for rowIndex := 0; rowIndex < blobsCount; rowIndex++ {
|
||||
cellsForRow := cellsAndProofs[rowIndex].Cells
|
||||
proofsForRow := cellsAndProofs[rowIndex].Proofs
|
||||
|
||||
cell := cellsForRow[columnIndex]
|
||||
column = append(column, cell)
|
||||
|
||||
kzgProof := proofsForRow[columnIndex]
|
||||
kzgProofOfColumn = append(kzgProofOfColumn, kzgProof)
|
||||
}
|
||||
|
||||
columnBytes := make([][]byte, 0, blobsCount)
|
||||
for i := range column {
|
||||
columnBytes = append(columnBytes, column[i][:])
|
||||
}
|
||||
|
||||
kzgProofOfColumnBytes := make([][]byte, 0, blobsCount)
|
||||
for _, kzgProof := range kzgProofOfColumn {
|
||||
copiedProof := kzgProof
|
||||
kzgProofOfColumnBytes = append(kzgProofOfColumnBytes, copiedProof[:])
|
||||
}
|
||||
|
||||
sidecar := ðpb.DataColumnSidecar{
|
||||
ColumnIndex: columnIndex,
|
||||
DataColumn: columnBytes,
|
||||
KzgCommitments: blobKzgCommitments,
|
||||
KzgProof: kzgProofOfColumnBytes,
|
||||
SignedBlockHeader: signedBlockHeader,
|
||||
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
|
||||
}
|
||||
|
||||
sidecars = append(sidecars, sidecar)
|
||||
}
|
||||
|
||||
return sidecars, nil
|
||||
}
|
||||
208
beacon-chain/core/peerdas/reconstruction_test.go
Normal file
208
beacon-chain/core/peerdas/reconstruction_test.go
Normal file
@@ -0,0 +1,208 @@
|
||||
package peerdas_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
)
|
||||
|
||||
func TestCanSelfReconstruct(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
totalNumberOfCustodyGroups uint64
|
||||
custodyNumberOfGroups uint64
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "totalNumberOfCustodyGroups=64, custodyNumberOfGroups=31",
|
||||
totalNumberOfCustodyGroups: 64,
|
||||
custodyNumberOfGroups: 31,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "totalNumberOfCustodyGroups=64, custodyNumberOfGroups=32",
|
||||
totalNumberOfCustodyGroups: 64,
|
||||
custodyNumberOfGroups: 32,
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "totalNumberOfCustodyGroups=65, custodyNumberOfGroups=32",
|
||||
totalNumberOfCustodyGroups: 65,
|
||||
custodyNumberOfGroups: 32,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "totalNumberOfCustodyGroups=63, custodyNumberOfGroups=33",
|
||||
totalNumberOfCustodyGroups: 65,
|
||||
custodyNumberOfGroups: 33,
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Set the total number of columns.
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.NumberOfCustodyGroups = tc.totalNumberOfCustodyGroups
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Check if reconstuction is possible.
|
||||
actual := peerdas.CanSelfReconstruct(tc.custodyNumberOfGroups)
|
||||
require.Equal(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestReconstructionRoundTrip(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
|
||||
const blobCount = 5
|
||||
|
||||
var blockRoot [fieldparams.RootLength]byte
|
||||
|
||||
signedBeaconBlockPb := util.NewBeaconBlockDeneb()
|
||||
require.NoError(t, kzg.Start())
|
||||
|
||||
// Generate random blobs and their corresponding commitments.
|
||||
var (
|
||||
blobsKzgCommitments [][]byte
|
||||
blobs []kzg.Blob
|
||||
)
|
||||
for i := range blobCount {
|
||||
blob := getRandBlob(int64(i))
|
||||
commitment, _, err := generateCommitmentAndProof(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobsKzgCommitments = append(blobsKzgCommitments, commitment[:])
|
||||
blobs = append(blobs, blob)
|
||||
}
|
||||
|
||||
// Generate a signed beacon block.
|
||||
signedBeaconBlockPb.Block.Body.BlobKzgCommitments = blobsKzgCommitments
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Get the signed beacon block header.
|
||||
signedBeaconBlockHeader, err := signedBeaconBlock.Header()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Convert data columns sidecars from signed block and blobs.
|
||||
dataColumnSidecars, err := peerdas.DataColumnSidecars(signedBeaconBlock, blobs)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create verified RO data columns.
|
||||
verifiedRoDataColumns := make([]*blocks.VerifiedRODataColumn, 0, blobCount)
|
||||
for _, dataColumnSidecar := range dataColumnSidecars {
|
||||
roDataColumn, err := blocks.NewRODataColumn(dataColumnSidecar)
|
||||
require.NoError(t, err)
|
||||
|
||||
verifiedRoDataColumn := blocks.NewVerifiedRODataColumn(roDataColumn)
|
||||
verifiedRoDataColumns = append(verifiedRoDataColumns, &verifiedRoDataColumn)
|
||||
}
|
||||
|
||||
verifiedRoDataColumn := verifiedRoDataColumns[0]
|
||||
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
var noDataColumns []*ethpb.DataColumnSidecar
|
||||
dataColumnsWithDifferentLengths := []*ethpb.DataColumnSidecar{
|
||||
{DataColumn: [][]byte{{}, {}}},
|
||||
{DataColumn: [][]byte{{}}},
|
||||
}
|
||||
notEnoughDataColumns := dataColumnSidecars[:numberOfColumns/2-1]
|
||||
originalDataColumns := dataColumnSidecars[:numberOfColumns/2]
|
||||
extendedDataColumns := dataColumnSidecars[numberOfColumns/2:]
|
||||
evenDataColumns := make([]*ethpb.DataColumnSidecar, 0, numberOfColumns/2)
|
||||
oddDataColumns := make([]*ethpb.DataColumnSidecar, 0, numberOfColumns/2)
|
||||
allDataColumns := dataColumnSidecars
|
||||
|
||||
for i, dataColumn := range dataColumnSidecars {
|
||||
if i%2 == 0 {
|
||||
evenDataColumns = append(evenDataColumns, dataColumn)
|
||||
} else {
|
||||
oddDataColumns = append(oddDataColumns, dataColumn)
|
||||
}
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
dataColumnsSidecar []*ethpb.DataColumnSidecar
|
||||
isError bool
|
||||
}{
|
||||
{
|
||||
name: "No data columns sidecars",
|
||||
dataColumnsSidecar: noDataColumns,
|
||||
isError: true,
|
||||
},
|
||||
{
|
||||
name: "Data columns sidecar with different lengths",
|
||||
dataColumnsSidecar: dataColumnsWithDifferentLengths,
|
||||
isError: true,
|
||||
},
|
||||
{
|
||||
name: "All columns are present (no actual need to reconstruct)",
|
||||
dataColumnsSidecar: allDataColumns,
|
||||
isError: false,
|
||||
},
|
||||
{
|
||||
name: "Only original columns are present",
|
||||
dataColumnsSidecar: originalDataColumns,
|
||||
isError: false,
|
||||
},
|
||||
{
|
||||
name: "Only extended columns are present",
|
||||
dataColumnsSidecar: extendedDataColumns,
|
||||
isError: false,
|
||||
},
|
||||
{
|
||||
name: "Only even columns are present",
|
||||
dataColumnsSidecar: evenDataColumns,
|
||||
isError: false,
|
||||
},
|
||||
{
|
||||
name: "Only odd columns are present",
|
||||
dataColumnsSidecar: oddDataColumns,
|
||||
isError: false,
|
||||
},
|
||||
{
|
||||
name: "Not enough columns to reconstruct",
|
||||
dataColumnsSidecar: notEnoughDataColumns,
|
||||
isError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Recover cells and proofs from available data columns sidecars.
|
||||
cellsAndProofs, err := peerdas.RecoverCellsAndProofs(tc.dataColumnsSidecar, blockRoot)
|
||||
isError := (err != nil)
|
||||
require.Equal(t, tc.isError, isError)
|
||||
|
||||
if isError {
|
||||
return
|
||||
}
|
||||
|
||||
// Recover all data columns sidecars from cells and proofs.
|
||||
reconstructedDataColumnsSideCars, err := peerdas.DataColumnSidecarsForReconstruct(
|
||||
blobsKzgCommitments,
|
||||
signedBeaconBlockHeader,
|
||||
verifiedRoDataColumn.KzgCommitmentsInclusionProof,
|
||||
cellsAndProofs,
|
||||
)
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
expected := dataColumnSidecars
|
||||
actual := reconstructedDataColumnsSideCars
|
||||
require.DeepSSZEqual(t, expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
57
beacon-chain/core/peerdas/utils_test.go
Normal file
57
beacon-chain/core/peerdas/utils_test.go
Normal file
@@ -0,0 +1,57 @@
|
||||
package peerdas_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
|
||||
"github.com/consensys/gnark-crypto/ecc/bls12-381/fr"
|
||||
GoKZG "github.com/crate-crypto/go-kzg-4844"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func generateCommitmentAndProof(blob *kzg.Blob) (*kzg.Commitment, *kzg.Proof, error) {
|
||||
commitment, err := kzg.BlobToKZGCommitment(blob)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
proof, err := kzg.ComputeBlobKZGProof(blob, commitment)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return &commitment, &proof, err
|
||||
}
|
||||
|
||||
// Returns a random blob using the passed seed as entropy
|
||||
func getRandBlob(seed int64) kzg.Blob {
|
||||
var blob kzg.Blob
|
||||
bytesPerBlob := GoKZG.ScalarsPerBlob * GoKZG.SerializedScalarSize
|
||||
for i := 0; i < bytesPerBlob; i += GoKZG.SerializedScalarSize {
|
||||
fieldElementBytes := getRandFieldElement(seed + int64(i))
|
||||
copy(blob[i:i+GoKZG.SerializedScalarSize], fieldElementBytes[:])
|
||||
}
|
||||
return blob
|
||||
}
|
||||
|
||||
// Returns a serialized random field element in big-endian
|
||||
func getRandFieldElement(seed int64) [32]byte {
|
||||
bytes := deterministicRandomness(seed)
|
||||
var r fr.Element
|
||||
r.SetBytes(bytes[:])
|
||||
|
||||
return GoKZG.SerializeScalar(r)
|
||||
}
|
||||
|
||||
func deterministicRandomness(seed int64) [32]byte {
|
||||
// Converts an int64 to a byte slice
|
||||
buf := new(bytes.Buffer)
|
||||
err := binary.Write(buf, binary.BigEndian, seed)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("Failed to write int64 to bytes buffer")
|
||||
return [32]byte{}
|
||||
}
|
||||
bytes := buf.Bytes()
|
||||
|
||||
return sha256.Sum256(bytes)
|
||||
}
|
||||
@@ -53,6 +53,11 @@ func HigherEqualThanAltairVersionAndEpoch(s state.BeaconState, e primitives.Epoc
|
||||
return s.Version() >= version.Altair && e >= params.BeaconConfig().AltairForkEpoch
|
||||
}
|
||||
|
||||
// PeerDASIsActive checks whether peerDAS is active at the provided slot.
|
||||
func PeerDASIsActive(slot primitives.Slot) bool {
|
||||
return params.FuluEnabled() && slots.ToEpoch(slot) >= params.BeaconConfig().FuluForkEpoch
|
||||
}
|
||||
|
||||
// CanUpgradeToAltair returns true if the input `slot` can upgrade to Altair.
|
||||
// Spec code:
|
||||
// If state.slot % SLOTS_PER_EPOCH == 0 and compute_epoch_at_slot(state.slot) == ALTAIR_FORK_EPOCH
|
||||
|
||||
@@ -4,6 +4,7 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"availability.go",
|
||||
"availability_columns.go",
|
||||
"cache.go",
|
||||
"iface.go",
|
||||
"mock.go",
|
||||
@@ -11,14 +12,17 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/das",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//runtime/logging:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
@@ -27,6 +31,7 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"availability_columns_test.go",
|
||||
"availability_test.go",
|
||||
"cache_test.go",
|
||||
],
|
||||
@@ -34,6 +39,7 @@ go_test(
|
||||
deps = [
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
@@ -41,6 +47,7 @@ go_test(
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
errors "github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
@@ -80,7 +81,7 @@ func (s *LazilyPersistentStore) Persist(current primitives.Slot, sc ...blocks.RO
|
||||
|
||||
// IsDataAvailable returns nil if all the commitments in the given block are persisted to the db and have been verified.
|
||||
// BlobSidecars already in the db are assumed to have been previously verified against the block.
|
||||
func (s *LazilyPersistentStore) IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error {
|
||||
func (s *LazilyPersistentStore) IsDataAvailable(ctx context.Context, _ enode.ID, current primitives.Slot, b blocks.ROBlock) error {
|
||||
blockCommitments, err := commitmentsToCheck(b, current)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not check data availability for block %#x", b.Root())
|
||||
@@ -94,14 +95,7 @@ func (s *LazilyPersistentStore) IsDataAvailable(ctx context.Context, current pri
|
||||
entry := s.cache.ensure(key)
|
||||
defer s.cache.delete(key)
|
||||
root := b.Root()
|
||||
sumz, err := s.store.WaitForSummarizer(ctx)
|
||||
if err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", b.Root())).
|
||||
WithError(err).
|
||||
Debug("Failed to receive BlobStorageSummarizer within IsDataAvailable")
|
||||
} else {
|
||||
entry.setDiskSummary(sumz.Summary(root))
|
||||
}
|
||||
entry.setDiskSummary(s.store.Summary(root))
|
||||
|
||||
// Verify we have all the expected sidecars, and fail fast if any are missing or inconsistent.
|
||||
// We don't try to salvage problematic batches because this indicates a misbehaving peer and we'd rather
|
||||
|
||||
171
beacon-chain/das/availability_columns.go
Normal file
171
beacon-chain/das/availability_columns.go
Normal file
@@ -0,0 +1,171 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
errors "github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
// LazilyPersistentStoreColumn is an implementation of AvailabilityStore to be used when batch syncing data columns.
|
||||
// This implementation will hold any blobs passed to Persist until the IsDataAvailable is called for their
|
||||
// block, at which time they will undergo full verification and be saved to the disk.
|
||||
type LazilyPersistentStoreColumn struct {
|
||||
store *filesystem.BlobStorage
|
||||
cache *cache
|
||||
}
|
||||
|
||||
func NewLazilyPersistentStoreColumn(store *filesystem.BlobStorage) *LazilyPersistentStoreColumn {
|
||||
return &LazilyPersistentStoreColumn{
|
||||
store: store,
|
||||
cache: newCache(),
|
||||
}
|
||||
}
|
||||
|
||||
// Persist do nothing at the moment.
|
||||
// TODO: Very Ugly, change interface to allow for columns and blobs
|
||||
func (*LazilyPersistentStoreColumn) Persist(_ primitives.Slot, _ ...blocks.ROBlob) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// PersistColumns adds columns to the working column cache. columns stored in this cache will be persisted
|
||||
// for at least as long as the node is running. Once IsDataAvailable succeeds, all blobs referenced
|
||||
// by the given block are guaranteed to be persisted for the remainder of the retention period.
|
||||
func (s *LazilyPersistentStoreColumn) PersistColumns(current primitives.Slot, sc ...blocks.RODataColumn) error {
|
||||
if len(sc) == 0 {
|
||||
return nil
|
||||
}
|
||||
if len(sc) > 1 {
|
||||
first := sc[0].BlockRoot()
|
||||
for i := 1; i < len(sc); i++ {
|
||||
if first != sc[i].BlockRoot() {
|
||||
return errMixedRoots
|
||||
}
|
||||
}
|
||||
}
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(sc[0].Slot()), slots.ToEpoch(current)) {
|
||||
return nil
|
||||
}
|
||||
key := keyFromColumn(sc[0])
|
||||
entry := s.cache.ensure(key)
|
||||
for i := range sc {
|
||||
if err := entry.stashColumns(&sc[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsDataAvailable returns nil if all the commitments in the given block are persisted to the db and have been verified.
|
||||
// DataColumnsSidecars already in the db are assumed to have been previously verified against the block.
|
||||
func (s *LazilyPersistentStoreColumn) IsDataAvailable(
|
||||
ctx context.Context,
|
||||
nodeID enode.ID,
|
||||
currentSlot primitives.Slot,
|
||||
block blocks.ROBlock,
|
||||
) error {
|
||||
blockCommitments, err := fullCommitmentsToCheck(nodeID, block, currentSlot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "full commitments to check with block root `%#x` and current slot `%d`", block.Root(), currentSlot)
|
||||
}
|
||||
|
||||
// Return early for blocks that do not have any commitments.
|
||||
if blockCommitments.count() == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Build the cache key for the block.
|
||||
key := keyFromBlock(block)
|
||||
|
||||
// Retrieve the cache entry for the block, or create an empty one if it doesn't exist.
|
||||
entry := s.cache.ensure(key)
|
||||
|
||||
// Delete the cache entry for the block at the end.
|
||||
defer s.cache.delete(key)
|
||||
|
||||
// Get the root of the block.
|
||||
blockRoot := block.Root()
|
||||
|
||||
// Set the disk summary for the block in the cache entry.
|
||||
entry.setDiskSummary(s.store.Summary(blockRoot))
|
||||
|
||||
// Verify we have all the expected sidecars, and fail fast if any are missing or inconsistent.
|
||||
// We don't try to salvage problematic batches because this indicates a misbehaving peer and we'd rather
|
||||
// ignore their response and decrease their peer score.
|
||||
roDataColumns, err := entry.filterColumns(blockRoot, blockCommitments)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "incomplete DataColumnSidecar batch")
|
||||
}
|
||||
|
||||
// Create verified RO data columns from RO data columns.
|
||||
verifiedRODataColumns := make([]blocks.VerifiedRODataColumn, 0, len(roDataColumns))
|
||||
|
||||
for _, roDataColumn := range roDataColumns {
|
||||
verifiedRODataColumn := blocks.NewVerifiedRODataColumn(roDataColumn)
|
||||
verifiedRODataColumns = append(verifiedRODataColumns, verifiedRODataColumn)
|
||||
}
|
||||
|
||||
// Ensure that each column sidecar is written to disk.
|
||||
for _, verifiedRODataColumn := range verifiedRODataColumns {
|
||||
if err := s.store.SaveDataColumn(verifiedRODataColumn); err != nil {
|
||||
return errors.Wrapf(err, "save data columns for index `%d` for block `%#x`", verifiedRODataColumn.ColumnIndex, blockRoot)
|
||||
}
|
||||
}
|
||||
|
||||
// All ColumnSidecars are persisted - data availability check succeeds.
|
||||
return nil
|
||||
}
|
||||
|
||||
// fullCommitmentsToCheck returns the commitments to check for a given block.
|
||||
func fullCommitmentsToCheck(nodeID enode.ID, block blocks.ROBlock, currentSlot primitives.Slot) (*safeCommitmentsArray, error) {
|
||||
// Return early for blocks that are pre-Fulu.
|
||||
if block.Version() < version.Fulu {
|
||||
return &safeCommitmentsArray{}, nil
|
||||
}
|
||||
|
||||
// Compute the block epoch.
|
||||
blockSlot := block.Block().Slot()
|
||||
blockEpoch := slots.ToEpoch(blockSlot)
|
||||
|
||||
// Compute the current spoch.
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
|
||||
// Return early if the request is out of the MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS window.
|
||||
if !params.WithinDAPeriod(blockEpoch, currentEpoch) {
|
||||
return &safeCommitmentsArray{}, nil
|
||||
}
|
||||
|
||||
// Retrieve the KZG commitments for the block.
|
||||
kzgCommitments, err := block.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
|
||||
// Return early if there are no commitments in the block.
|
||||
if len(kzgCommitments) == 0 {
|
||||
return &safeCommitmentsArray{}, nil
|
||||
}
|
||||
|
||||
// Retrieve the groups count.
|
||||
custodyGroupCount := peerdas.ActualCustodyGroupCount()
|
||||
|
||||
// Retrieve peer info.
|
||||
peerInfo, _, err := peerdas.Info(nodeID, custodyGroupCount)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "peer info")
|
||||
}
|
||||
// Create a safe commitments array for the custody columns.
|
||||
commitmentsArray := &safeCommitmentsArray{}
|
||||
for column := range peerInfo.CustodyColumns {
|
||||
commitmentsArray[column] = kzgCommitments
|
||||
}
|
||||
|
||||
return commitmentsArray, nil
|
||||
}
|
||||
94
beacon-chain/das/availability_columns_test.go
Normal file
94
beacon-chain/das/availability_columns_test.go
Normal file
@@ -0,0 +1,94 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
func TestFullCommitmentsToCheck(t *testing.T) {
|
||||
windowSlots, err := slots.EpochEnd(params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest)
|
||||
require.NoError(t, err)
|
||||
commits := [][]byte{
|
||||
bytesutil.PadTo([]byte("a"), 48),
|
||||
bytesutil.PadTo([]byte("b"), 48),
|
||||
bytesutil.PadTo([]byte("c"), 48),
|
||||
bytesutil.PadTo([]byte("d"), 48),
|
||||
}
|
||||
cases := []struct {
|
||||
name string
|
||||
commits [][]byte
|
||||
block func(*testing.T) blocks.ROBlock
|
||||
slot primitives.Slot
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "pre fulu",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
bb := util.NewBeaconBlockElectra()
|
||||
sb, err := blocks.NewSignedBeaconBlock(bb)
|
||||
require.NoError(t, err)
|
||||
rb, err := blocks.NewROBlock(sb)
|
||||
require.NoError(t, err)
|
||||
return rb
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "commitments within da",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
d := util.NewBeaconBlockFulu()
|
||||
d.Block.Body.BlobKzgCommitments = commits
|
||||
d.Block.Slot = 100
|
||||
sb, err := blocks.NewSignedBeaconBlock(d)
|
||||
require.NoError(t, err)
|
||||
rb, err := blocks.NewROBlock(sb)
|
||||
require.NoError(t, err)
|
||||
return rb
|
||||
},
|
||||
commits: commits,
|
||||
slot: 100,
|
||||
},
|
||||
{
|
||||
name: "commitments outside da",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
d := util.NewBeaconBlockElectra()
|
||||
// block is from slot 0, "current slot" is window size +1 (so outside the window)
|
||||
d.Block.Body.BlobKzgCommitments = commits
|
||||
sb, err := blocks.NewSignedBeaconBlock(d)
|
||||
require.NoError(t, err)
|
||||
rb, err := blocks.NewROBlock(sb)
|
||||
require.NoError(t, err)
|
||||
return rb
|
||||
},
|
||||
slot: windowSlots + 1,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SubscribeToAllSubnets = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
b := c.block(t)
|
||||
co, err := fullCommitmentsToCheck(enode.ID{}, b, c.slot)
|
||||
if c.err != nil {
|
||||
require.ErrorIs(t, err, c.err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
for i := 0; i < len(co); i++ {
|
||||
require.DeepEqual(t, c.commits, co[i])
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
errors "github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
@@ -123,18 +124,18 @@ func TestLazilyPersistent_Missing(t *testing.T) {
|
||||
|
||||
// Only one commitment persisted, should return error with other indices
|
||||
require.NoError(t, as.Persist(1, scs[2]))
|
||||
err := as.IsDataAvailable(ctx, 1, blk)
|
||||
err := as.IsDataAvailable(ctx, enode.ID{}, 1, blk)
|
||||
require.ErrorIs(t, err, errMissingSidecar)
|
||||
|
||||
// All but one persisted, return missing idx
|
||||
require.NoError(t, as.Persist(1, scs[0]))
|
||||
err = as.IsDataAvailable(ctx, 1, blk)
|
||||
err = as.IsDataAvailable(ctx, enode.ID{}, 1, blk)
|
||||
require.ErrorIs(t, err, errMissingSidecar)
|
||||
|
||||
// All persisted, return nil
|
||||
require.NoError(t, as.Persist(1, scs...))
|
||||
|
||||
require.NoError(t, as.IsDataAvailable(ctx, 1, blk))
|
||||
require.NoError(t, as.IsDataAvailable(ctx, enode.ID{}, 1, blk))
|
||||
}
|
||||
|
||||
func TestLazilyPersistent_Mismatch(t *testing.T) {
|
||||
@@ -149,7 +150,7 @@ func TestLazilyPersistent_Mismatch(t *testing.T) {
|
||||
|
||||
// Only one commitment persisted, should return error with other indices
|
||||
require.NoError(t, as.Persist(1, scs[0]))
|
||||
err := as.IsDataAvailable(ctx, 1, blk)
|
||||
err := as.IsDataAvailable(ctx, enode.ID{}, 1, blk)
|
||||
require.NotNil(t, err)
|
||||
require.ErrorIs(t, err, errCommitmentMismatch)
|
||||
}
|
||||
|
||||
@@ -2,9 +2,11 @@ package das
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"reflect"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
@@ -38,6 +40,10 @@ func keyFromSidecar(sc blocks.ROBlob) cacheKey {
|
||||
return cacheKey{slot: sc.Slot(), root: sc.BlockRoot()}
|
||||
}
|
||||
|
||||
func keyFromColumn(sc blocks.RODataColumn) cacheKey {
|
||||
return cacheKey{slot: sc.Slot(), root: sc.BlockRoot()}
|
||||
}
|
||||
|
||||
// keyFromBlock is a convenience method for constructing a cacheKey from a ROBlock value.
|
||||
func keyFromBlock(b blocks.ROBlock) cacheKey {
|
||||
return cacheKey{slot: b.Block().Slot(), root: b.Root()}
|
||||
@@ -61,6 +67,7 @@ func (c *cache) delete(key cacheKey) {
|
||||
// cacheEntry holds a fixed-length cache of BlobSidecars.
|
||||
type cacheEntry struct {
|
||||
scs []*blocks.ROBlob
|
||||
colScs [fieldparams.NumberOfColumns]*blocks.RODataColumn
|
||||
diskSummary filesystem.BlobStorageSummary
|
||||
}
|
||||
|
||||
@@ -86,6 +93,17 @@ func (e *cacheEntry) stash(sc *blocks.ROBlob) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *cacheEntry) stashColumns(sc *blocks.RODataColumn) error {
|
||||
if sc.ColumnIndex >= fieldparams.NumberOfColumns {
|
||||
return errors.Wrapf(errIndexOutOfBounds, "index=%d", sc.ColumnIndex)
|
||||
}
|
||||
if e.colScs[sc.ColumnIndex] != nil {
|
||||
return errors.Wrapf(ErrDuplicateSidecar, "root=%#x, index=%d, commitment=%#x", sc.BlockRoot(), sc.ColumnIndex, sc.KzgCommitments)
|
||||
}
|
||||
e.colScs[sc.ColumnIndex] = sc
|
||||
return nil
|
||||
}
|
||||
|
||||
// filter evicts sidecars that are not committed to by the block and returns custom
|
||||
// errors if the cache is missing any of the commitments, or if the commitments in
|
||||
// the cache do not match those found in the block. If err is nil, then all expected
|
||||
@@ -121,3 +139,66 @@ func (e *cacheEntry) filter(root [32]byte, kc [][]byte, slot primitives.Slot) ([
|
||||
|
||||
return scs, nil
|
||||
}
|
||||
|
||||
func (e *cacheEntry) filterColumns(root [32]byte, commitmentsArray *safeCommitmentsArray) ([]blocks.RODataColumn, error) {
|
||||
nonEmptyIndices := commitmentsArray.nonEmptyIndices()
|
||||
if e.diskSummary.AllDataColumnsAvailable(nonEmptyIndices) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
commitmentsCount := commitmentsArray.count()
|
||||
sidecars := make([]blocks.RODataColumn, 0, commitmentsCount)
|
||||
|
||||
for i := range uint64(fieldparams.NumberOfColumns) {
|
||||
// Skip if we already store this data column.
|
||||
if e.diskSummary.HasIndex(i) {
|
||||
continue
|
||||
}
|
||||
|
||||
if commitmentsArray[i] == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if e.colScs[i] == nil {
|
||||
return nil, errors.Wrapf(errMissingSidecar, "root=%#x, index=%#x", root, i)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(commitmentsArray[i], e.colScs[i].KzgCommitments) {
|
||||
return nil, errors.Wrapf(errCommitmentMismatch, "root=%#x, index=%#x, commitment=%#x, block commitment=%#x", root, i, e.colScs[i].KzgCommitments, commitmentsArray[i])
|
||||
}
|
||||
|
||||
sidecars = append(sidecars, *e.colScs[i])
|
||||
}
|
||||
|
||||
return sidecars, nil
|
||||
}
|
||||
|
||||
// safeCommitmentsArray is a fixed size array of commitments.
|
||||
// This is helpful for avoiding gratuitous bounds checks.
|
||||
type safeCommitmentsArray [fieldparams.NumberOfColumns][][]byte
|
||||
|
||||
// count returns the number of commitments in the array.
|
||||
func (s *safeCommitmentsArray) count() int {
|
||||
count := 0
|
||||
|
||||
for i := range s {
|
||||
if s[i] != nil {
|
||||
count++
|
||||
}
|
||||
}
|
||||
|
||||
return count
|
||||
}
|
||||
|
||||
// nonEmptyIndices returns a map of indices that are non-nil in the array.
|
||||
func (s *safeCommitmentsArray) nonEmptyIndices() map[uint64]bool {
|
||||
columns := make(map[uint64]bool)
|
||||
|
||||
for i := range s {
|
||||
if s[i] != nil {
|
||||
columns[uint64(i)] = true
|
||||
}
|
||||
}
|
||||
|
||||
return columns
|
||||
}
|
||||
|
||||
@@ -39,7 +39,7 @@ func filterTestCaseSetup(slot primitives.Slot, nBlobs int, onDisk []int, numExpe
|
||||
entry := &cacheEntry{}
|
||||
if len(onDisk) > 0 {
|
||||
od := map[[32]byte][]int{blk.Root(): onDisk}
|
||||
sumz := filesystem.NewMockBlobStorageSummarizer(t, od)
|
||||
sumz := filesystem.NewMockBlobStorageSummarizer(t, od, 0)
|
||||
sum := sumz.Summary(blk.Root())
|
||||
entry.setDiskSummary(sum)
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package das
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
)
|
||||
@@ -14,6 +15,6 @@ import (
|
||||
// IsDataAvailable guarantees that all blobs committed to in the block have been
|
||||
// durably persisted before returning a non-error value.
|
||||
type AvailabilityStore interface {
|
||||
IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error
|
||||
IsDataAvailable(ctx context.Context, nodeID enode.ID, current primitives.Slot, b blocks.ROBlock) error
|
||||
Persist(current primitives.Slot, sc ...blocks.ROBlob) error
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package das
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
)
|
||||
@@ -16,7 +17,7 @@ type MockAvailabilityStore struct {
|
||||
var _ AvailabilityStore = &MockAvailabilityStore{}
|
||||
|
||||
// IsDataAvailable satisfies the corresponding method of the AvailabilityStore interface in a way that is useful for tests.
|
||||
func (m *MockAvailabilityStore) IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error {
|
||||
func (m *MockAvailabilityStore) IsDataAvailable(ctx context.Context, _ enode.ID, current primitives.Slot, b blocks.ROBlock) error {
|
||||
if m.VerifyAvailabilityCallback != nil {
|
||||
return m.VerifyAvailabilityCallback(ctx, current, b)
|
||||
}
|
||||
|
||||
@@ -5,6 +5,10 @@ go_library(
|
||||
srcs = [
|
||||
"blob.go",
|
||||
"cache.go",
|
||||
"iteration.go",
|
||||
"layout.go",
|
||||
"layout_by_epoch.go",
|
||||
"layout_flat.go",
|
||||
"log.go",
|
||||
"metrics.go",
|
||||
"mock.go",
|
||||
@@ -13,6 +17,8 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
@@ -20,7 +26,6 @@ go_library(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//io/file:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/logging:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
@@ -37,17 +42,23 @@ go_test(
|
||||
srcs = [
|
||||
"blob_test.go",
|
||||
"cache_test.go",
|
||||
"iteration_test.go",
|
||||
"layout_test.go",
|
||||
"migration_test.go",
|
||||
"pruner_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@com_github_spf13_afero//:go_default_library",
|
||||
],
|
||||
|
||||
@@ -1,47 +1,45 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/async/event"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/io/file"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/logging"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
func directoryPermissions() os.FileMode {
|
||||
return params.BeaconIoConfig().ReadWriteExecutePermissions
|
||||
}
|
||||
|
||||
var (
|
||||
errIndexOutOfBounds = errors.New("blob index in file name >= DeprecatedMaxBlobsPerBlock")
|
||||
errEmptyBlobWritten = errors.New("zero bytes written to disk when saving blob sidecar")
|
||||
errIndexOutOfBounds = errors.New("blob index in file name >= MAX_BLOBS_PER_BLOCK")
|
||||
errSidecarEmptySSZData = errors.New("sidecar marshalled to an empty ssz byte slice")
|
||||
errNoBasePath = errors.New("BlobStorage base path not specified in init")
|
||||
errInvalidRootString = errors.New("Could not parse hex string as a [32]byte")
|
||||
)
|
||||
|
||||
const (
|
||||
sszExt = "ssz"
|
||||
partExt = "part"
|
||||
type (
|
||||
// BlobStorageOption is a functional option for configuring a BlobStorage.
|
||||
BlobStorageOption func(*BlobStorage) error
|
||||
|
||||
directoryPermissions = 0700
|
||||
RootIndexPair struct {
|
||||
Root [fieldparams.RootLength]byte
|
||||
Index uint64
|
||||
}
|
||||
)
|
||||
|
||||
// BlobStorageOption is a functional option for configuring a BlobStorage.
|
||||
type BlobStorageOption func(*BlobStorage) error
|
||||
|
||||
// WithBasePath is a required option that sets the base path of blob storage.
|
||||
func WithBasePath(base string) BlobStorageOption {
|
||||
return func(b *BlobStorage) error {
|
||||
@@ -66,29 +64,57 @@ func WithSaveFsync(fsync bool) BlobStorageOption {
|
||||
}
|
||||
}
|
||||
|
||||
// WithFs allows the afero.Fs implementation to be customized. Used by tests
|
||||
// to substitute an in-memory filesystem.
|
||||
func WithFs(fs afero.Fs) BlobStorageOption {
|
||||
return func(b *BlobStorage) error {
|
||||
b.fs = fs
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithLayout enables the user to specify which layout scheme to use, dictating how blob files are stored on disk.
|
||||
func WithLayout(name string) BlobStorageOption {
|
||||
return func(b *BlobStorage) error {
|
||||
b.layoutName = name
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// NewBlobStorage creates a new instance of the BlobStorage object. Note that the implementation of BlobStorage may
|
||||
// attempt to hold a file lock to guarantee exclusive control of the blob storage directory, so this should only be
|
||||
// initialized once per beacon node.
|
||||
func NewBlobStorage(opts ...BlobStorageOption) (*BlobStorage, error) {
|
||||
b := &BlobStorage{}
|
||||
b := &BlobStorage{
|
||||
DataColumnFeed: new(event.Feed),
|
||||
}
|
||||
|
||||
for _, o := range opts {
|
||||
if err := o(b); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create blob storage")
|
||||
}
|
||||
}
|
||||
if b.base == "" {
|
||||
return nil, errNoBasePath
|
||||
// Allow tests to set up a different fs using WithFs.
|
||||
if b.fs == nil {
|
||||
if b.base == "" {
|
||||
return nil, errNoBasePath
|
||||
}
|
||||
b.base = path.Clean(b.base)
|
||||
if err := file.MkdirAll(b.base); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to create blob storage at %s", b.base)
|
||||
}
|
||||
b.fs = afero.NewBasePathFs(afero.NewOsFs(), b.base)
|
||||
}
|
||||
b.base = path.Clean(b.base)
|
||||
if err := file.MkdirAll(b.base); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to create blob storage at %s", b.base)
|
||||
b.cache = newBlobStorageCache()
|
||||
pruner := newBlobPruner(b.retentionEpochs)
|
||||
if b.layoutName == "" {
|
||||
b.layoutName = LayoutNameFlat
|
||||
}
|
||||
b.fs = afero.NewBasePathFs(afero.NewOsFs(), b.base)
|
||||
pruner, err := newBlobPruner(b.fs, b.retentionEpochs)
|
||||
layout, err := newLayout(b.layoutName, b.fs, b.cache, pruner)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b.pruner = pruner
|
||||
b.layout = layout
|
||||
return b, nil
|
||||
}
|
||||
|
||||
@@ -96,47 +122,149 @@ func NewBlobStorage(opts ...BlobStorageOption) (*BlobStorage, error) {
|
||||
type BlobStorage struct {
|
||||
base string
|
||||
retentionEpochs primitives.Epoch
|
||||
layoutName string
|
||||
fsync bool
|
||||
fs afero.Fs
|
||||
pruner *blobPruner
|
||||
layout fsLayout
|
||||
cache *blobStorageSummaryCache
|
||||
DataColumnFeed *event.Feed
|
||||
}
|
||||
|
||||
// WarmCache runs the prune routine with an expiration of slot of 0, so nothing will be pruned, but the pruner's cache
|
||||
// will be populated at node startup, avoiding a costly cold prune (~4s in syscalls) during syncing.
|
||||
func (bs *BlobStorage) WarmCache() {
|
||||
if bs.pruner == nil {
|
||||
return
|
||||
}
|
||||
go func() {
|
||||
start := time.Now()
|
||||
start := time.Now()
|
||||
if bs.layoutName == LayoutNameFlat {
|
||||
log.Info("Blob filesystem cache warm-up started. This may take a few minutes.")
|
||||
if err := bs.pruner.warmCache(); err != nil {
|
||||
log.WithError(err).Error("Error encountered while warming up blob pruner cache")
|
||||
}
|
||||
log.WithField("elapsed", time.Since(start)).Info("Blob filesystem cache warm-up complete")
|
||||
}()
|
||||
} else {
|
||||
log.Info("Blob filesystem cache warm-up started.")
|
||||
}
|
||||
|
||||
if err := warmCache(bs.layout, bs.cache); err != nil {
|
||||
log.WithError(err).Error("Error encountered while warming up blob filesystem cache.")
|
||||
}
|
||||
if err := bs.migrateLayouts(); err != nil {
|
||||
log.WithError(err).Error("Error encountered while migrating blob storage.")
|
||||
}
|
||||
log.WithField("elapsed", time.Since(start)).Info("Blob filesystem cache warm-up complete.")
|
||||
}
|
||||
|
||||
// ErrBlobStorageSummarizerUnavailable is a sentinel error returned when there is no pruner/cache available.
|
||||
// This should be used by code that optionally uses the summarizer to optimize rpc requests. Being able to
|
||||
// fallback when there is no summarizer allows client code to avoid test complexity where the summarizer doesn't matter.
|
||||
var ErrBlobStorageSummarizerUnavailable = errors.New("BlobStorage not initialized with a pruner or cache")
|
||||
|
||||
// WaitForSummarizer blocks until the BlobStorageSummarizer is ready to use.
|
||||
// BlobStorageSummarizer is not ready immediately on node startup because it needs to sample the blob filesystem to
|
||||
// determine which blobs are available.
|
||||
func (bs *BlobStorage) WaitForSummarizer(ctx context.Context) (BlobStorageSummarizer, error) {
|
||||
if bs == nil || bs.pruner == nil {
|
||||
return nil, ErrBlobStorageSummarizerUnavailable
|
||||
// If any blob storage directories are found for layouts besides the configured layout, migrate them.
|
||||
func (bs *BlobStorage) migrateLayouts() error {
|
||||
for _, name := range LayoutNames {
|
||||
if name == bs.layoutName {
|
||||
continue
|
||||
}
|
||||
from, err := newLayout(name, bs.fs, bs.cache, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := migrateLayout(bs.fs, from, bs.layout, bs.cache); err != nil {
|
||||
if errors.Is(err, errLayoutNotDetected) {
|
||||
continue
|
||||
}
|
||||
return errors.Wrapf(err, "failed to migrate layout from %s to %s", name, bs.layoutName)
|
||||
}
|
||||
}
|
||||
return bs.pruner.waitForCache(ctx)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bs *BlobStorage) writePart(sidecar blocks.VerifiedROBlob) (ppath string, err error) {
|
||||
ident := identForSidecar(sidecar)
|
||||
sidecarData, err := sidecar.MarshalSSZ()
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to serialize sidecar data")
|
||||
}
|
||||
if len(sidecarData) == 0 {
|
||||
return "", errSidecarEmptySSZData
|
||||
}
|
||||
|
||||
if err := bs.fs.MkdirAll(bs.layout.dir(ident), directoryPermissions()); err != nil {
|
||||
return "", err
|
||||
}
|
||||
ppath = bs.layout.partPath(ident, fmt.Sprintf("%p", sidecarData))
|
||||
|
||||
// Create a partial file and write the serialized data to it.
|
||||
partialFile, err := bs.fs.Create(ppath)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to create partial file")
|
||||
}
|
||||
defer func() {
|
||||
cerr := partialFile.Close()
|
||||
// The close error is probably less important than any existing error, so only overwrite nil err.
|
||||
if cerr != nil && err == nil {
|
||||
err = cerr
|
||||
}
|
||||
}()
|
||||
|
||||
n, err := partialFile.Write(sidecarData)
|
||||
if err != nil {
|
||||
return ppath, errors.Wrap(err, "failed to write to partial file")
|
||||
}
|
||||
if bs.fsync {
|
||||
if err := partialFile.Sync(); err != nil {
|
||||
return ppath, err
|
||||
}
|
||||
}
|
||||
|
||||
if n != len(sidecarData) {
|
||||
return ppath, fmt.Errorf("failed to write the full bytes of sidecarData, wrote only %d of %d bytes", n, len(sidecarData))
|
||||
}
|
||||
|
||||
return ppath, nil
|
||||
}
|
||||
|
||||
func (bs *BlobStorage) writeDataColumnPart(sidecar blocks.VerifiedRODataColumn) (ppath string, err error) {
|
||||
ident := identForDataColumnSidecar(sidecar)
|
||||
sidecarData, err := sidecar.MarshalSSZ()
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to serialize sidecar data")
|
||||
}
|
||||
if len(sidecarData) == 0 {
|
||||
return "", errSidecarEmptySSZData
|
||||
}
|
||||
|
||||
if err := bs.fs.MkdirAll(bs.layout.dir(ident), directoryPermissions()); err != nil {
|
||||
return "", err
|
||||
}
|
||||
ppath = bs.layout.partPath(ident, fmt.Sprintf("%p", sidecarData))
|
||||
|
||||
// Create a partial file and write the serialized data to it.
|
||||
partialFile, err := bs.fs.Create(ppath)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to create partial file")
|
||||
}
|
||||
defer func() {
|
||||
cerr := partialFile.Close()
|
||||
// The close error is probably less important than any existing error, so only overwrite nil err.
|
||||
if cerr != nil && err == nil {
|
||||
err = cerr
|
||||
}
|
||||
}()
|
||||
|
||||
n, err := partialFile.Write(sidecarData)
|
||||
if err != nil {
|
||||
return ppath, errors.Wrap(err, "failed to write to partial file")
|
||||
}
|
||||
if bs.fsync {
|
||||
if err := partialFile.Sync(); err != nil {
|
||||
return ppath, err
|
||||
}
|
||||
}
|
||||
|
||||
if n != len(sidecarData) {
|
||||
return ppath, fmt.Errorf("failed to write the full bytes of sidecarData, wrote only %d of %d bytes", n, len(sidecarData))
|
||||
}
|
||||
|
||||
return ppath, nil
|
||||
}
|
||||
|
||||
// Save saves blobs given a list of sidecars.
|
||||
func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error {
|
||||
startTime := time.Now()
|
||||
fname := namerForSidecar(sidecar)
|
||||
sszPath := fname.path()
|
||||
|
||||
ident := identForSidecar(sidecar)
|
||||
sszPath := bs.layout.sszPath(ident)
|
||||
exists, err := afero.Exists(bs.fs, sszPath)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -145,78 +273,95 @@ func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error {
|
||||
log.WithFields(logging.BlobFields(sidecar.ROBlob)).Debug("Ignoring a duplicate blob sidecar save attempt")
|
||||
return nil
|
||||
}
|
||||
if bs.pruner != nil {
|
||||
if err := bs.pruner.notify(sidecar.BlockRoot(), sidecar.Slot(), sidecar.Index); err != nil {
|
||||
return errors.Wrapf(err, "problem maintaining pruning cache/metrics for sidecar with root=%#x", sidecar.BlockRoot())
|
||||
}
|
||||
}
|
||||
|
||||
// Serialize the ethpb.BlobSidecar to binary data using SSZ.
|
||||
sidecarData, err := sidecar.MarshalSSZ()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to serialize sidecar data")
|
||||
} else if len(sidecarData) == 0 {
|
||||
return errSidecarEmptySSZData
|
||||
}
|
||||
|
||||
if err := bs.fs.MkdirAll(fname.dir(), directoryPermissions); err != nil {
|
||||
return err
|
||||
}
|
||||
partPath := fname.partPath(fmt.Sprintf("%p", sidecarData))
|
||||
|
||||
partialMoved := false
|
||||
partPath, err := bs.writePart(sidecar)
|
||||
// Ensure the partial file is deleted.
|
||||
defer func() {
|
||||
if partialMoved {
|
||||
if partialMoved || partPath == "" {
|
||||
return
|
||||
}
|
||||
// It's expected to error if the save is successful.
|
||||
err = bs.fs.Remove(partPath)
|
||||
err := bs.fs.Remove(partPath)
|
||||
if err == nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"partPath": partPath,
|
||||
}).Debugf("Removed partial file")
|
||||
}
|
||||
}()
|
||||
|
||||
// Create a partial file and write the serialized data to it.
|
||||
partialFile, err := bs.fs.Create(partPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create partial file")
|
||||
}
|
||||
|
||||
n, err := partialFile.Write(sidecarData)
|
||||
if err != nil {
|
||||
closeErr := partialFile.Close()
|
||||
if closeErr != nil {
|
||||
return closeErr
|
||||
}
|
||||
return errors.Wrap(err, "failed to write to partial file")
|
||||
}
|
||||
if bs.fsync {
|
||||
if err := partialFile.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := partialFile.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if n != len(sidecarData) {
|
||||
return fmt.Errorf("failed to write the full bytes of sidecarData, wrote only %d of %d bytes", n, len(sidecarData))
|
||||
}
|
||||
|
||||
if n == 0 {
|
||||
return errEmptyBlobWritten
|
||||
}
|
||||
|
||||
// Atomically rename the partial file to its final name.
|
||||
err = bs.fs.Rename(partPath, sszPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to rename partial file to final name")
|
||||
}
|
||||
partialMoved = true
|
||||
|
||||
if err := bs.layout.notify(ident); err != nil {
|
||||
return errors.Wrapf(err, "problem maintaining pruning cache/metrics for sidecar with root=%#x", sidecar.BlockRoot())
|
||||
}
|
||||
blobsWrittenCounter.Inc()
|
||||
blobSaveLatency.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SaveDataColumn saves dataColumns given a list of sidecars.
|
||||
func (bs *BlobStorage) SaveDataColumn(verifiedRODataColumns blocks.VerifiedRODataColumn) error {
|
||||
startTime := time.Now()
|
||||
|
||||
ident := identForDataColumnSidecar(verifiedRODataColumns)
|
||||
sszPath := bs.layout.sszPath(ident)
|
||||
exists, err := afero.Exists(bs.fs, sszPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "afero exists")
|
||||
}
|
||||
|
||||
if exists {
|
||||
return nil
|
||||
}
|
||||
|
||||
partialMoved := false
|
||||
partPath, err := bs.writeDataColumnPart(verifiedRODataColumns)
|
||||
|
||||
// Ensure the partial file is deleted.
|
||||
defer func() {
|
||||
if partialMoved || partPath == "" {
|
||||
return
|
||||
}
|
||||
|
||||
// It's expected to error if the save is successful.
|
||||
if err := bs.fs.Remove(partPath); err == nil {
|
||||
log.WithField("partPath", partPath).Debug("Removed partial file")
|
||||
}
|
||||
}()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Atomically rename the partial file to its final name.
|
||||
err = bs.fs.Rename(partPath, sszPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "rename")
|
||||
}
|
||||
partialMoved = true
|
||||
|
||||
if err := bs.layout.notify(ident); err != nil {
|
||||
return errors.Wrapf(err, "problem maintaining pruning cache/metrics for sidecar with root=%#x", verifiedRODataColumns.BlockRoot())
|
||||
}
|
||||
|
||||
// Notify the data column notifier that a new data column has been saved.
|
||||
if bs.DataColumnFeed != nil {
|
||||
bs.DataColumnFeed.Send(RootIndexPair{
|
||||
Root: verifiedRODataColumns.BlockRoot(),
|
||||
Index: verifiedRODataColumns.ColumnIndex,
|
||||
})
|
||||
}
|
||||
|
||||
blobsWrittenCounter.Inc()
|
||||
blobSaveLatency.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
|
||||
@@ -228,70 +373,48 @@ func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error {
|
||||
// value is always a VerifiedROBlob.
|
||||
func (bs *BlobStorage) Get(root [32]byte, idx uint64) (blocks.VerifiedROBlob, error) {
|
||||
startTime := time.Now()
|
||||
expected := blobNamer{root: root, index: idx}
|
||||
encoded, err := afero.ReadFile(bs.fs, expected.path())
|
||||
var v blocks.VerifiedROBlob
|
||||
ident, err := bs.layout.ident(root, idx)
|
||||
if err != nil {
|
||||
return v, err
|
||||
}
|
||||
s := ðpb.BlobSidecar{}
|
||||
if err := s.UnmarshalSSZ(encoded); err != nil {
|
||||
return v, err
|
||||
}
|
||||
ro, err := blocks.NewROBlobWithRoot(s, root)
|
||||
if err != nil {
|
||||
return blocks.VerifiedROBlob{}, err
|
||||
return verification.VerifiedROBlobError(err)
|
||||
}
|
||||
defer func() {
|
||||
blobFetchLatency.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
}()
|
||||
return verification.BlobSidecarNoop(ro)
|
||||
return verification.VerifiedROBlobFromDisk(bs.fs, root, bs.layout.sszPath(ident))
|
||||
}
|
||||
|
||||
// GetColumn retrieves a single DataColumnSidecar by its root and index.
|
||||
// Since BlobStorage only writes blobs that have undergone full verification, the return
|
||||
// value is always a VerifiedRODataColumn.
|
||||
func (bs *BlobStorage) GetColumn(root [32]byte, idx uint64) (blocks.VerifiedRODataColumn, error) {
|
||||
startTime := time.Now()
|
||||
|
||||
ident, err := bs.layout.ident(root, idx)
|
||||
if err != nil {
|
||||
return verification.VerifiedRODataColumnError(err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
blobFetchLatency.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
}()
|
||||
|
||||
return verification.VerifiedRODataColumnFromDisk(bs.fs, root, bs.layout.sszPath(ident))
|
||||
}
|
||||
|
||||
// Remove removes all blobs for a given root.
|
||||
func (bs *BlobStorage) Remove(root [32]byte) error {
|
||||
rootDir := blobNamer{root: root}.dir()
|
||||
return bs.fs.RemoveAll(rootDir)
|
||||
dirIdent, err := bs.layout.dirIdent(root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = bs.layout.remove(dirIdent)
|
||||
return err
|
||||
}
|
||||
|
||||
// Indices generates a bitmap representing which BlobSidecar.Index values are present on disk for a given root.
|
||||
// This value can be compared to the commitments observed in a block to determine which indices need to be found
|
||||
// on the network to confirm data availability.
|
||||
func (bs *BlobStorage) Indices(root [32]byte, s primitives.Slot) ([]bool, error) {
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(s)
|
||||
mask := make([]bool, maxBlobsPerBlock)
|
||||
|
||||
rootDir := blobNamer{root: root}.dir()
|
||||
entries, err := afero.ReadDir(bs.fs, rootDir)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return mask, nil
|
||||
}
|
||||
return mask, err
|
||||
}
|
||||
|
||||
for i := range entries {
|
||||
if entries[i].IsDir() {
|
||||
continue
|
||||
}
|
||||
name := entries[i].Name()
|
||||
if !strings.HasSuffix(name, sszExt) {
|
||||
continue
|
||||
}
|
||||
parts := strings.Split(name, ".")
|
||||
if len(parts) != 2 {
|
||||
continue
|
||||
}
|
||||
u, err := strconv.ParseUint(parts[0], 10, 64)
|
||||
if err != nil {
|
||||
return mask, errors.Wrapf(err, "unexpected directory entry breaks listing, %s", parts[0])
|
||||
}
|
||||
if u >= uint64(maxBlobsPerBlock) {
|
||||
return mask, errIndexOutOfBounds
|
||||
}
|
||||
mask[u] = true
|
||||
}
|
||||
return mask, nil
|
||||
// Summary returns the BlobStorageSummary from the layout.
|
||||
// Internally, this is a cached representation of the directory listing for the given root.
|
||||
func (bs *BlobStorage) Summary(root [32]byte) BlobStorageSummary {
|
||||
return bs.layout.summary(root)
|
||||
}
|
||||
|
||||
// Clear deletes all files on the filesystem.
|
||||
@@ -316,36 +439,3 @@ func (bs *BlobStorage) WithinRetentionPeriod(requested, current primitives.Epoch
|
||||
}
|
||||
return requested+bs.retentionEpochs >= current
|
||||
}
|
||||
|
||||
type blobNamer struct {
|
||||
root [32]byte
|
||||
index uint64
|
||||
}
|
||||
|
||||
func namerForSidecar(sc blocks.VerifiedROBlob) blobNamer {
|
||||
return blobNamer{root: sc.BlockRoot(), index: sc.Index}
|
||||
}
|
||||
|
||||
func (p blobNamer) dir() string {
|
||||
return rootString(p.root)
|
||||
}
|
||||
|
||||
func (p blobNamer) partPath(entropy string) string {
|
||||
return path.Join(p.dir(), fmt.Sprintf("%s-%d.%s", entropy, p.index, partExt))
|
||||
}
|
||||
|
||||
func (p blobNamer) path() string {
|
||||
return path.Join(p.dir(), fmt.Sprintf("%d.%s", p.index, sszExt))
|
||||
}
|
||||
|
||||
func rootString(root [32]byte) string {
|
||||
return fmt.Sprintf("%#x", root)
|
||||
}
|
||||
|
||||
func stringToRoot(str string) ([32]byte, error) {
|
||||
slice, err := hexutil.Decode(str)
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrapf(errInvalidRootString, "input=%s", str)
|
||||
}
|
||||
return bytesutil.ToBytes32(slice), nil
|
||||
}
|
||||
|
||||
@@ -9,26 +9,26 @@ import (
|
||||
"testing"
|
||||
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
func TestBlobStorage_SaveBlobData(t *testing.T) {
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, params.BeaconConfig().MaxBlobsPerBlock(1))
|
||||
testSidecars, err := verification.BlobSidecarSliceNoop(sidecars)
|
||||
require.NoError(t, err)
|
||||
testSidecars := verification.FakeVerifySliceForTest(t, sidecars)
|
||||
|
||||
t.Run("no error for duplicate", func(t *testing.T) {
|
||||
fs, bs := NewEphemeralBlobStorageWithFs(t)
|
||||
fs, bs := NewEphemeralBlobStorageAndFs(t)
|
||||
existingSidecar := testSidecars[0]
|
||||
|
||||
blobPath := namerForSidecar(existingSidecar).path()
|
||||
blobPath := bs.layout.sszPath(identForSidecar(existingSidecar))
|
||||
// Serialize the existing BlobSidecar to binary data.
|
||||
existingSidecarData, err := ssz.MarshalSSZ(existingSidecar)
|
||||
require.NoError(t, err)
|
||||
@@ -56,8 +56,8 @@ func TestBlobStorage_SaveBlobData(t *testing.T) {
|
||||
require.NoError(t, bs.Save(sc))
|
||||
actualSc, err := bs.Get(sc.BlockRoot(), sc.Index)
|
||||
require.NoError(t, err)
|
||||
expectedIdx := []bool{false, false, true, false, false, false}
|
||||
actualIdx, err := bs.Indices(actualSc.BlockRoot(), 100)
|
||||
expectedIdx := blobIndexMask{false, false, true, false, false, false}
|
||||
actualIdx := bs.Summary(actualSc.BlockRoot()).mask
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, expectedIdx, actualIdx)
|
||||
})
|
||||
@@ -85,7 +85,7 @@ func TestBlobStorage_SaveBlobData(t *testing.T) {
|
||||
|
||||
require.NoError(t, bs.Remove(expected.BlockRoot()))
|
||||
_, err = bs.Get(expected.BlockRoot(), expected.Index)
|
||||
require.ErrorContains(t, "file does not exist", err)
|
||||
require.Equal(t, true, db.IsNotFound(err))
|
||||
})
|
||||
|
||||
t.Run("clear", func(t *testing.T) {
|
||||
@@ -126,16 +126,14 @@ func TestBlobStorage_SaveBlobData(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
// pollUntil polls a condition function until it returns true or a timeout is reached.
|
||||
|
||||
func TestBlobIndicesBounds(t *testing.T) {
|
||||
fs, bs := NewEphemeralBlobStorageWithFs(t)
|
||||
fs := afero.NewMemMapFs()
|
||||
root := [32]byte{}
|
||||
|
||||
okIdx := uint64(params.BeaconConfig().MaxBlobsPerBlock(0)) - 1
|
||||
writeFakeSSZ(t, fs, root, okIdx)
|
||||
indices, err := bs.Indices(root, 100)
|
||||
require.NoError(t, err)
|
||||
writeFakeSSZ(t, fs, root, 0, okIdx)
|
||||
bs := NewWarmedEphemeralBlobStorageUsingFs(t, fs, WithLayout(LayoutNameByEpoch))
|
||||
indices := bs.Summary(root).mask
|
||||
expected := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(0))
|
||||
expected[okIdx] = true
|
||||
for i := range expected {
|
||||
@@ -143,102 +141,23 @@ func TestBlobIndicesBounds(t *testing.T) {
|
||||
}
|
||||
|
||||
oobIdx := uint64(params.BeaconConfig().MaxBlobsPerBlock(0))
|
||||
writeFakeSSZ(t, fs, root, oobIdx)
|
||||
_, err = bs.Indices(root, 100)
|
||||
require.ErrorIs(t, err, errIndexOutOfBounds)
|
||||
writeFakeSSZ(t, fs, root, 0, oobIdx)
|
||||
// This now fails at cache warmup time.
|
||||
require.ErrorIs(t, warmCache(bs.layout, bs.cache), errIndexOutOfBounds)
|
||||
}
|
||||
|
||||
func writeFakeSSZ(t *testing.T, fs afero.Fs, root [32]byte, idx uint64) {
|
||||
namer := blobNamer{root: root, index: idx}
|
||||
require.NoError(t, fs.MkdirAll(namer.dir(), 0700))
|
||||
fh, err := fs.Create(namer.path())
|
||||
func writeFakeSSZ(t *testing.T, fs afero.Fs, root [32]byte, slot primitives.Slot, idx uint64) {
|
||||
epoch := slots.ToEpoch(slot)
|
||||
namer := newBlobIdent(root, epoch, idx)
|
||||
layout := periodicEpochLayout{}
|
||||
require.NoError(t, fs.MkdirAll(layout.dir(namer), 0700))
|
||||
fh, err := fs.Create(layout.sszPath(namer))
|
||||
require.NoError(t, err)
|
||||
_, err = fh.Write([]byte("derp"))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fh.Close())
|
||||
}
|
||||
|
||||
func TestBlobStoragePrune(t *testing.T) {
|
||||
currentSlot := primitives.Slot(200000)
|
||||
fs, bs := NewEphemeralBlobStorageWithFs(t)
|
||||
|
||||
t.Run("PruneOne", func(t *testing.T) {
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 300, params.BeaconConfig().MaxBlobsPerBlock(0))
|
||||
testSidecars, err := verification.BlobSidecarSliceNoop(sidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, sidecar := range testSidecars {
|
||||
require.NoError(t, bs.Save(sidecar))
|
||||
}
|
||||
|
||||
require.NoError(t, bs.pruner.prune(currentSlot-bs.pruner.windowSize))
|
||||
|
||||
remainingFolders, err := afero.ReadDir(fs, ".")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(remainingFolders))
|
||||
})
|
||||
t.Run("Prune dangling blob", func(t *testing.T) {
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 299, params.BeaconConfig().MaxBlobsPerBlock(0))
|
||||
testSidecars, err := verification.BlobSidecarSliceNoop(sidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, sidecar := range testSidecars[4:] {
|
||||
require.NoError(t, bs.Save(sidecar))
|
||||
}
|
||||
|
||||
require.NoError(t, bs.pruner.prune(currentSlot-bs.pruner.windowSize))
|
||||
|
||||
remainingFolders, err := afero.ReadDir(fs, ".")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(remainingFolders))
|
||||
})
|
||||
t.Run("PruneMany", func(t *testing.T) {
|
||||
blockQty := 10
|
||||
slot := primitives.Slot(1)
|
||||
|
||||
for j := 0; j <= blockQty; j++ {
|
||||
root := bytesutil.ToBytes32(bytesutil.ToBytes(uint64(slot), 32))
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, root, slot, params.BeaconConfig().MaxBlobsPerBlock(0))
|
||||
testSidecars, err := verification.BlobSidecarSliceNoop(sidecars)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, bs.Save(testSidecars[0]))
|
||||
|
||||
slot += 10000
|
||||
}
|
||||
|
||||
require.NoError(t, bs.pruner.prune(currentSlot-bs.pruner.windowSize))
|
||||
|
||||
remainingFolders, err := afero.ReadDir(fs, ".")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 4, len(remainingFolders))
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkPruning(b *testing.B) {
|
||||
var t *testing.T
|
||||
_, bs := NewEphemeralBlobStorageWithFs(t)
|
||||
|
||||
blockQty := 10000
|
||||
currentSlot := primitives.Slot(150000)
|
||||
slot := primitives.Slot(0)
|
||||
|
||||
for j := 0; j <= blockQty; j++ {
|
||||
root := bytesutil.ToBytes32(bytesutil.ToBytes(uint64(slot), 32))
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, root, slot, params.BeaconConfig().MaxBlobsPerBlock(0))
|
||||
testSidecars, err := verification.BlobSidecarSliceNoop(sidecars)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, bs.Save(testSidecars[0]))
|
||||
|
||||
slot += 100
|
||||
}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := bs.pruner.prune(currentSlot)
|
||||
require.NoError(b, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewBlobStorage(t *testing.T) {
|
||||
_, err := NewBlobStorage()
|
||||
require.ErrorIs(t, err, errNoBasePath)
|
||||
@@ -292,3 +211,13 @@ func TestConfig_WithinRetentionPeriod(t *testing.T) {
|
||||
require.Equal(t, true, storage.WithinRetentionPeriod(1, 1))
|
||||
})
|
||||
}
|
||||
|
||||
func TestLayoutNames(t *testing.T) {
|
||||
badLayoutName := "bad"
|
||||
for _, name := range LayoutNames {
|
||||
_, err := newLayout(name, nil, nil, nil)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
_, err := newLayout(badLayoutName, nil, nil, nil)
|
||||
require.ErrorIs(t, err, errInvalidLayoutName)
|
||||
}
|
||||
|
||||
@@ -1,41 +1,52 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
)
|
||||
|
||||
// blobIndexMask is a bitmask representing the set of blob indices that are currently set.
|
||||
// TODO: Separate blobs from data columns
|
||||
type blobIndexMask []bool
|
||||
|
||||
// type blobIndexMask [fieldparams.NumberOfColumns]bool
|
||||
|
||||
// BlobStorageSummary represents cached information about the BlobSidecars on disk for each root the cache knows about.
|
||||
type BlobStorageSummary struct {
|
||||
slot primitives.Slot
|
||||
mask blobIndexMask
|
||||
epoch primitives.Epoch
|
||||
mask blobIndexMask
|
||||
}
|
||||
|
||||
// HasIndex returns true if the BlobSidecar at the given index is available in the filesystem.
|
||||
func (s BlobStorageSummary) HasIndex(idx uint64) bool {
|
||||
// Protect from panic, but assume callers are sophisticated enough to not need an error telling them they have an invalid idx.
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(s.slot)
|
||||
if idx >= uint64(maxBlobsPerBlock) {
|
||||
return false
|
||||
}
|
||||
if idx >= uint64(len(s.mask)) {
|
||||
return false
|
||||
}
|
||||
return s.mask[idx]
|
||||
}
|
||||
|
||||
// AllAvailable returns true if we have all blobs for all indices from 0 to count-1.
|
||||
func (s BlobStorageSummary) AllAvailable(count int) bool {
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(s.slot)
|
||||
if count > maxBlobsPerBlock {
|
||||
// HasDataColumnIndex true if the DataColumnSidecar at the given index is available in the filesystem.
|
||||
func (s BlobStorageSummary) HasDataColumnIndex(idx uint64) bool {
|
||||
// Protect from panic, but assume callers are sophisticated enough to not need an error telling them they have an invalid idx.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
if idx >= numberOfColumns {
|
||||
return false
|
||||
}
|
||||
|
||||
if idx >= uint64(len(s.mask)) {
|
||||
return false
|
||||
}
|
||||
|
||||
return s.mask[idx]
|
||||
}
|
||||
|
||||
// AllAvailable returns true if we have all blobs for all indices from 0 to count-1.
|
||||
func (s BlobStorageSummary) AllAvailable(count int) bool {
|
||||
if count > len(s.mask) {
|
||||
return false
|
||||
}
|
||||
@@ -47,83 +58,143 @@ func (s BlobStorageSummary) AllAvailable(count int) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// AllDataColumnsAvailable returns true if we have all data columns for corresponding indices.
|
||||
func (s BlobStorageSummary) AllDataColumnsAvailable(indices map[uint64]bool) bool {
|
||||
if len(indices) > len(s.mask) {
|
||||
return false
|
||||
}
|
||||
|
||||
for indice := range indices {
|
||||
if !s.mask[indice] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (s BlobStorageSummary) MaxBlobsForEpoch() uint64 {
|
||||
return uint64(params.BeaconConfig().MaxBlobsPerBlockAtEpoch(s.epoch))
|
||||
}
|
||||
|
||||
// NewBlobStorageSummary creates a new BlobStorageSummary for a given epoch and mask.
|
||||
func NewBlobStorageSummary(epoch primitives.Epoch, mask []bool) (BlobStorageSummary, error) {
|
||||
c := params.BeaconConfig().MaxBlobsPerBlockAtEpoch(epoch)
|
||||
if len(mask) != c {
|
||||
return BlobStorageSummary{}, fmt.Errorf("mask length %d does not match expected %d for epoch %d", len(mask), c, epoch)
|
||||
}
|
||||
return BlobStorageSummary{
|
||||
epoch: epoch,
|
||||
mask: mask,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// BlobStorageSummarizer can be used to receive a summary of metadata about blobs on disk for a given root.
|
||||
// The BlobStorageSummary can be used to check which indices (if any) are available for a given block by root.
|
||||
type BlobStorageSummarizer interface {
|
||||
Summary(root [32]byte) BlobStorageSummary
|
||||
}
|
||||
|
||||
type blobStorageCache struct {
|
||||
type blobStorageSummaryCache struct {
|
||||
mu sync.RWMutex
|
||||
nBlobs float64
|
||||
cache map[[32]byte]BlobStorageSummary
|
||||
}
|
||||
|
||||
var _ BlobStorageSummarizer = &blobStorageCache{}
|
||||
var _ BlobStorageSummarizer = &blobStorageSummaryCache{}
|
||||
|
||||
func newBlobStorageCache() *blobStorageCache {
|
||||
return &blobStorageCache{
|
||||
cache: make(map[[32]byte]BlobStorageSummary, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest*fieldparams.SlotsPerEpoch),
|
||||
func newBlobStorageCache() *blobStorageSummaryCache {
|
||||
return &blobStorageSummaryCache{
|
||||
cache: make(map[[32]byte]BlobStorageSummary),
|
||||
}
|
||||
}
|
||||
|
||||
// Summary returns the BlobStorageSummary for `root`. The BlobStorageSummary can be used to check for the presence of
|
||||
// BlobSidecars based on Index.
|
||||
func (s *blobStorageCache) Summary(root [32]byte) BlobStorageSummary {
|
||||
func (s *blobStorageSummaryCache) Summary(root [32]byte) BlobStorageSummary {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.cache[root]
|
||||
}
|
||||
|
||||
func (s *blobStorageCache) ensure(key [32]byte, slot primitives.Slot, idx uint64) error {
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
if idx >= uint64(maxBlobsPerBlock) {
|
||||
func (s *blobStorageSummaryCache) ensure(ident blobIdent) error {
|
||||
maskSize := uint64(params.BeaconConfig().MaxBlobsPerBlockAtEpoch(ident.epoch))
|
||||
|
||||
fuluForkEpoch := params.BeaconConfig().FuluForkEpoch
|
||||
if ident.epoch >= fuluForkEpoch {
|
||||
maskSize = params.BeaconConfig().NumberOfColumns
|
||||
}
|
||||
|
||||
if ident.index >= maskSize {
|
||||
return errIndexOutOfBounds
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
v := s.cache[key]
|
||||
v.slot = slot
|
||||
v := s.cache[ident.root]
|
||||
v.epoch = ident.epoch
|
||||
if v.mask == nil {
|
||||
v.mask = make(blobIndexMask, maxBlobsPerBlock)
|
||||
v.mask = make(blobIndexMask, maskSize)
|
||||
}
|
||||
if !v.mask[idx] {
|
||||
if !v.mask[ident.index] {
|
||||
s.updateMetrics(1)
|
||||
}
|
||||
v.mask[idx] = true
|
||||
s.cache[key] = v
|
||||
v.mask[ident.index] = true
|
||||
s.cache[ident.root] = v
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *blobStorageCache) slot(key [32]byte) (primitives.Slot, bool) {
|
||||
func (s *blobStorageSummaryCache) get(key [32]byte) (BlobStorageSummary, bool) {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
v, ok := s.cache[key]
|
||||
if !ok {
|
||||
return 0, false
|
||||
}
|
||||
return v.slot, ok
|
||||
return v, ok
|
||||
}
|
||||
|
||||
func (s *blobStorageCache) evict(key [32]byte) {
|
||||
var deleted float64
|
||||
func (s *blobStorageSummaryCache) identForIdx(key [32]byte, idx uint64) (blobIdent, error) {
|
||||
v, ok := s.get(key)
|
||||
if !ok || !v.HasIndex(idx) {
|
||||
return blobIdent{}, db.ErrNotFound
|
||||
}
|
||||
return blobIdent{
|
||||
root: key,
|
||||
index: idx,
|
||||
epoch: v.epoch,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *blobStorageSummaryCache) identForRoot(key [32]byte) (blobIdent, error) {
|
||||
v, ok := s.get(key)
|
||||
if !ok {
|
||||
return blobIdent{}, db.ErrNotFound
|
||||
}
|
||||
return blobIdent{
|
||||
root: key,
|
||||
epoch: v.epoch,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *blobStorageSummaryCache) evict(key [32]byte) int {
|
||||
deleted := 0
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
v, ok := s.cache[key]
|
||||
if ok {
|
||||
for i := range v.mask {
|
||||
if v.mask[i] {
|
||||
deleted += 1
|
||||
}
|
||||
if !ok {
|
||||
return 0
|
||||
}
|
||||
for i := range v.mask {
|
||||
if v.mask[i] {
|
||||
deleted += 1
|
||||
}
|
||||
}
|
||||
delete(s.cache, key)
|
||||
s.mu.Unlock()
|
||||
if deleted > 0 {
|
||||
s.updateMetrics(-deleted)
|
||||
s.updateMetrics(-float64(deleted))
|
||||
}
|
||||
return deleted
|
||||
}
|
||||
|
||||
func (s *blobStorageCache) updateMetrics(delta float64) {
|
||||
func (s *blobStorageSummaryCache) updateMetrics(delta float64) {
|
||||
s.nBlobs += delta
|
||||
blobDiskCount.Set(s.nBlobs)
|
||||
blobDiskSize.Set(s.nBlobs * fieldparams.BlobSidecarSize)
|
||||
|
||||
@@ -3,6 +3,7 @@ package filesystem
|
||||
import (
|
||||
"testing"
|
||||
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
@@ -53,7 +54,7 @@ func TestSlotByRoot_Summary(t *testing.T) {
|
||||
for _, c := range cases {
|
||||
if c.expected != nil {
|
||||
key := bytesutil.ToBytes32([]byte(c.name))
|
||||
sc.cache[key] = BlobStorageSummary{slot: 0, mask: c.expected}
|
||||
sc.cache[key] = BlobStorageSummary{epoch: 0, mask: c.expected}
|
||||
}
|
||||
}
|
||||
for _, c := range cases {
|
||||
@@ -152,3 +153,124 @@ func TestAllAvailable(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestHasDataColumnIndex(t *testing.T) {
|
||||
storedIndices := map[uint64]bool{
|
||||
1: true,
|
||||
3: true,
|
||||
5: true,
|
||||
}
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
idx uint64
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "index is too high",
|
||||
idx: fieldparams.NumberOfColumns,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "non existing index",
|
||||
idx: 2,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "existing index",
|
||||
idx: 3,
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
// Get the maximum index that is stored.
|
||||
maxIndex := uint64(0)
|
||||
for index := range storedIndices {
|
||||
if index > maxIndex {
|
||||
maxIndex = index
|
||||
}
|
||||
}
|
||||
|
||||
mask := make(blobIndexMask, maxIndex+1)
|
||||
|
||||
for idx := range storedIndices {
|
||||
mask[idx] = true
|
||||
}
|
||||
|
||||
sum := BlobStorageSummary{mask: mask}
|
||||
require.Equal(t, c.expected, sum.HasDataColumnIndex(c.idx))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAllDataColumnAvailable(t *testing.T) {
|
||||
tooManyColumns := make(map[uint64]bool, fieldparams.NumberOfColumns+1)
|
||||
for i := uint64(0); i < fieldparams.NumberOfColumns+1; i++ {
|
||||
tooManyColumns[i] = true
|
||||
}
|
||||
|
||||
columns346 := map[uint64]bool{
|
||||
3: true,
|
||||
4: true,
|
||||
6: true,
|
||||
}
|
||||
|
||||
columns36 := map[uint64]bool{
|
||||
3: true,
|
||||
6: true,
|
||||
}
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
storedIndices map[uint64]bool
|
||||
testedIndices map[uint64]bool
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "no tested indices",
|
||||
storedIndices: columns346,
|
||||
testedIndices: map[uint64]bool{},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "too many tested indices",
|
||||
storedIndices: columns346,
|
||||
testedIndices: tooManyColumns,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "not all tested indices are stored",
|
||||
storedIndices: columns36,
|
||||
testedIndices: columns346,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "all tested indices are stored",
|
||||
storedIndices: columns346,
|
||||
testedIndices: columns36,
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
// Get the maximum index that is stored.
|
||||
maxIndex := uint64(0)
|
||||
for index := range c.storedIndices {
|
||||
if index > maxIndex {
|
||||
maxIndex = index
|
||||
}
|
||||
}
|
||||
|
||||
mask := make(blobIndexMask, maxIndex+1)
|
||||
|
||||
for idx := range c.storedIndices {
|
||||
mask[idx] = true
|
||||
}
|
||||
|
||||
sum := BlobStorageSummary{mask: mask}
|
||||
require.Equal(t, c.expected, sum.AllDataColumnsAvailable(c.testedIndices))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
237
beacon-chain/db/filesystem/iteration.go
Normal file
237
beacon-chain/db/filesystem/iteration.go
Normal file
@@ -0,0 +1,237 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
var errIdentFailure = errors.New("failed to determine blob metadata, ignoring all sub-paths.")
|
||||
|
||||
type identificationError struct {
|
||||
err error
|
||||
path string
|
||||
ident blobIdent
|
||||
}
|
||||
|
||||
func (ide *identificationError) Error() string {
|
||||
return fmt.Sprintf("%s path=%s, err=%s", errIdentFailure.Error(), ide.path, ide.err.Error())
|
||||
}
|
||||
|
||||
func (ide *identificationError) Unwrap() error {
|
||||
return ide.err
|
||||
}
|
||||
|
||||
func (*identificationError) Is(err error) bool {
|
||||
return err == errIdentFailure
|
||||
}
|
||||
|
||||
func (ide *identificationError) LogFields() logrus.Fields {
|
||||
fields := ide.ident.logFields()
|
||||
fields["path"] = ide.path
|
||||
return fields
|
||||
}
|
||||
|
||||
func newIdentificationError(path string, ident blobIdent, err error) *identificationError {
|
||||
return &identificationError{path: path, ident: ident, err: err}
|
||||
}
|
||||
|
||||
func listDir(fs afero.Fs, dir string) ([]string, error) {
|
||||
top, err := fs.Open(dir)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to open directory descriptor")
|
||||
}
|
||||
defer func() {
|
||||
if err := top.Close(); err != nil {
|
||||
log.WithError(err).Errorf("Could not close file %s", dir)
|
||||
}
|
||||
}()
|
||||
// re the -1 param: "If n <= 0, Readdirnames returns all the names from the directory in a single slice"
|
||||
dirs, err := top.Readdirnames(-1)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read directory listing")
|
||||
}
|
||||
return dirs, nil
|
||||
}
|
||||
|
||||
// identPopulator is a function that sets values in the blobIdent for a given layer of the filesystem layout.
|
||||
type identPopulator func(blobIdent, string) (blobIdent, error)
|
||||
|
||||
// layoutLayer represents a layer of the nested directory scheme. Each layer is defined by a filter function that
|
||||
// ensures any entries at that layer of the scheme are named in a valid way, and a populateIdent function that
|
||||
// parses the directory name into a blobIdent object, used for iterating across the layout in a layout-independent way.
|
||||
type layoutLayer struct {
|
||||
populateIdent identPopulator
|
||||
filter func(string) bool
|
||||
}
|
||||
|
||||
// identIterator moves through the filesystem in order to yield blobIdents.
|
||||
// layoutLayers (in the 'layers' field) allows a filesystem layout to control how the
|
||||
// layout is traversed. A layoutLayer can filter out entries from the directory listing
|
||||
// via the filter function, and populate fields in the blobIdent via the populateIdent function.
|
||||
// The blobIdent is populated from an empty value at the root, accumulating values for its fields at each layer.
|
||||
// The fully populated blobIdent is returned when the iterator reaches the leaf layer.
|
||||
type identIterator struct {
|
||||
fs afero.Fs
|
||||
path string
|
||||
child *identIterator
|
||||
ident blobIdent
|
||||
// layoutLayers are the heart of how the layout defines the nesting of the components of the path.
|
||||
// Each layer of the layout represents a different layer of the directory layout hierarchy,
|
||||
// from the relative root at the zero index to the blob files at the end.
|
||||
layers []layoutLayer
|
||||
entries []string
|
||||
offset int
|
||||
eof bool
|
||||
}
|
||||
|
||||
// atEOF can be used to peek at the iterator to see if it's already finished. This is useful for the migration code to check
|
||||
// if there are any entries in the directory indicated by the migration.
|
||||
func (iter *identIterator) atEOF() bool {
|
||||
return iter.eof
|
||||
}
|
||||
|
||||
// next is the only method that a user of the identIterator needs to call.
|
||||
// identIterator will yield blobIdents in a breadth-first fashion,
|
||||
// returning an empty blobIdent and io.EOF once all branches have been traversed.
|
||||
func (iter *identIterator) next() (blobIdent, error) {
|
||||
if iter.eof {
|
||||
return blobIdent{}, io.EOF
|
||||
}
|
||||
if iter.child != nil {
|
||||
next, err := iter.child.next()
|
||||
if err == nil {
|
||||
return next, nil
|
||||
}
|
||||
if !errors.Is(err, io.EOF) {
|
||||
return blobIdent{}, err
|
||||
}
|
||||
}
|
||||
return iter.advanceChild()
|
||||
}
|
||||
|
||||
// advanceChild is used to move to the next directory at each layer of the tree, either when
|
||||
// the nodes are first being initialized at a layer, or when a sub-branch has been exhausted.
|
||||
func (iter *identIterator) advanceChild() (blobIdent, error) {
|
||||
defer func() {
|
||||
iter.offset += 1
|
||||
}()
|
||||
for i := iter.offset; i < len(iter.entries); i++ {
|
||||
iter.offset = i
|
||||
nextPath := filepath.Join(iter.path, iter.entries[iter.offset])
|
||||
nextLayer := iter.layers[0]
|
||||
if !nextLayer.filter(nextPath) {
|
||||
continue
|
||||
}
|
||||
ident, err := nextLayer.populateIdent(iter.ident, nextPath)
|
||||
if err != nil {
|
||||
return ident, newIdentificationError(nextPath, ident, err)
|
||||
}
|
||||
// if we're at the leaf layer , we can return the updated ident.
|
||||
if len(iter.layers) == 1 {
|
||||
return ident, nil
|
||||
}
|
||||
|
||||
entries, err := listDir(iter.fs, nextPath)
|
||||
if err != nil {
|
||||
return blobIdent{}, err
|
||||
}
|
||||
if len(entries) == 0 {
|
||||
continue
|
||||
}
|
||||
iter.child = &identIterator{
|
||||
fs: iter.fs,
|
||||
path: nextPath,
|
||||
ident: ident,
|
||||
layers: iter.layers[1:],
|
||||
entries: entries,
|
||||
}
|
||||
return iter.child.next()
|
||||
}
|
||||
|
||||
return blobIdent{}, io.EOF
|
||||
}
|
||||
|
||||
func populateNoop(namer blobIdent, _ string) (blobIdent, error) {
|
||||
return namer, nil
|
||||
}
|
||||
|
||||
func populateRoot(namer blobIdent, dir string) (blobIdent, error) {
|
||||
root, err := rootFromPath(dir)
|
||||
if err != nil {
|
||||
return namer, err
|
||||
}
|
||||
namer.root = root
|
||||
return namer, nil
|
||||
}
|
||||
|
||||
func populateIndex(namer blobIdent, fname string) (blobIdent, error) {
|
||||
idx, err := idxFromPath(fname)
|
||||
if err != nil {
|
||||
return namer, err
|
||||
}
|
||||
namer.index = idx
|
||||
return namer, nil
|
||||
}
|
||||
|
||||
func rootFromPath(p string) ([32]byte, error) {
|
||||
subdir := filepath.Base(p)
|
||||
root, err := stringToRoot(subdir)
|
||||
if err != nil {
|
||||
return root, errors.Wrapf(err, "invalid directory, could not parse subdir as root %s", p)
|
||||
}
|
||||
return root, nil
|
||||
}
|
||||
|
||||
func idxFromPath(p string) (uint64, error) {
|
||||
p = filepath.Base(p)
|
||||
|
||||
if !isSszFile(p) {
|
||||
return 0, errors.Wrap(errNotBlobSSZ, "does not have .ssz extension")
|
||||
}
|
||||
parts := strings.Split(p, ".")
|
||||
if len(parts) != 2 {
|
||||
return 0, errors.Wrap(errNotBlobSSZ, "unexpected filename structure (want <index>.ssz)")
|
||||
}
|
||||
idx, err := strconv.ParseUint(parts[0], 10, 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return idx, nil
|
||||
}
|
||||
|
||||
func filterNoop(_ string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func isRootDir(p string) bool {
|
||||
dir := filepath.Base(p)
|
||||
return len(dir) == rootStringLen && strings.HasPrefix(dir, "0x")
|
||||
}
|
||||
|
||||
func isSszFile(s string) bool {
|
||||
return filepath.Ext(s) == "."+sszExt
|
||||
}
|
||||
|
||||
func rootToString(root [32]byte) string {
|
||||
return fmt.Sprintf("%#x", root)
|
||||
}
|
||||
|
||||
func stringToRoot(str string) ([32]byte, error) {
|
||||
if len(str) != rootStringLen {
|
||||
return [32]byte{}, errors.Wrapf(errInvalidRootString, "incorrect len for input=%s", str)
|
||||
}
|
||||
slice, err := hexutil.Decode(str)
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrapf(errInvalidRootString, "input=%s", str)
|
||||
}
|
||||
return bytesutil.ToBytes32(slice), nil
|
||||
}
|
||||
304
beacon-chain/db/filesystem/iteration_test.go
Normal file
304
beacon-chain/db/filesystem/iteration_test.go
Normal file
@@ -0,0 +1,304 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
"path"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
func TestRootFromDir(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
dir string
|
||||
err error
|
||||
root [32]byte
|
||||
}{
|
||||
{
|
||||
name: "happy path",
|
||||
dir: "0xffff875e1d985c5ccb214894983f2428edb271f0f87b68ba7010e4a99df3b5cb",
|
||||
root: [32]byte{255, 255, 135, 94, 29, 152, 92, 92, 203, 33, 72, 148, 152, 63, 36, 40,
|
||||
237, 178, 113, 240, 248, 123, 104, 186, 112, 16, 228, 169, 157, 243, 181, 203},
|
||||
},
|
||||
{
|
||||
name: "too short",
|
||||
dir: "0xffff875e1d985c5ccb214894983f2428edb271f0f87b68ba7010e4a99df3b5c",
|
||||
err: errInvalidRootString,
|
||||
},
|
||||
{
|
||||
name: "too log",
|
||||
dir: "0xffff875e1d985c5ccb214894983f2428edb271f0f87b68ba7010e4a99df3b5cbb",
|
||||
err: errInvalidRootString,
|
||||
},
|
||||
{
|
||||
name: "missing prefix",
|
||||
dir: "ffff875e1d985c5ccb214894983f2428edb271f0f87b68ba7010e4a99df3b5cb",
|
||||
err: errInvalidRootString,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
root, err := stringToRoot(c.dir)
|
||||
if c.err != nil {
|
||||
require.ErrorIs(t, err, c.err)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, c.root, root)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSlotFromFile(t *testing.T) {
|
||||
cases := []struct {
|
||||
slot primitives.Slot
|
||||
}{
|
||||
{slot: 0},
|
||||
{slot: 2},
|
||||
{slot: 1123581321},
|
||||
{slot: math.MaxUint64},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(fmt.Sprintf("slot %d", c.slot), func(t *testing.T) {
|
||||
fs, bs := NewEphemeralBlobStorageAndFs(t)
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, c.slot, 1)
|
||||
sc := verification.FakeVerifyForTest(t, sidecars[0])
|
||||
require.NoError(t, bs.Save(sc))
|
||||
namer := identForSidecar(sc)
|
||||
sszPath := bs.layout.sszPath(namer)
|
||||
slot, err := slotFromFile(sszPath, fs)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, c.slot, slot)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type dirFiles struct {
|
||||
name string
|
||||
isDir bool
|
||||
children []dirFiles
|
||||
}
|
||||
|
||||
func (df dirFiles) reify(t *testing.T, fs afero.Fs, base string) {
|
||||
fullPath := path.Join(base, df.name)
|
||||
if df.isDir {
|
||||
if df.name != "" {
|
||||
require.NoError(t, fs.Mkdir(fullPath, directoryPermissions()))
|
||||
}
|
||||
for _, c := range df.children {
|
||||
c.reify(t, fs, fullPath)
|
||||
}
|
||||
} else {
|
||||
fp, err := fs.Create(fullPath)
|
||||
require.NoError(t, err)
|
||||
_, err = fp.WriteString("derp")
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (df dirFiles) childNames() []string {
|
||||
cn := make([]string, len(df.children))
|
||||
for i := range df.children {
|
||||
cn[i] = df.children[i].name
|
||||
}
|
||||
return cn
|
||||
}
|
||||
|
||||
func TestListDir(t *testing.T) {
|
||||
fs := afero.NewMemMapFs()
|
||||
rootStrs := []string{
|
||||
"0x0023dc5d063c7c1b37016bb54963c6ff4bfe5dfdf6dac29e7ceeb2b8fa81ed7a",
|
||||
"0xff30526cd634a5af3a09cc9bff67f33a621fc5b975750bb4432f74df077554b4",
|
||||
"0x23f5f795aaeb78c01fadaf3d06da2e99bd4b3622ae4dfea61b05b7d9adb119c2",
|
||||
}
|
||||
|
||||
// parent directory
|
||||
tree := dirFiles{isDir: true}
|
||||
// break out each subdir for easier assertions
|
||||
notABlob := dirFiles{name: "notABlob", isDir: true}
|
||||
childlessBlob := dirFiles{name: rootStrs[0], isDir: true}
|
||||
blobWithSsz := dirFiles{name: rootStrs[1], isDir: true,
|
||||
children: []dirFiles{{name: "1.ssz"}, {name: "2.ssz"}},
|
||||
}
|
||||
blobWithSszAndTmp := dirFiles{name: rootStrs[2], isDir: true,
|
||||
children: []dirFiles{{name: "5.ssz"}, {name: "0.part"}}}
|
||||
tree.children = append(tree.children,
|
||||
notABlob, childlessBlob, blobWithSsz, blobWithSszAndTmp)
|
||||
|
||||
topChildren := make([]string, len(tree.children))
|
||||
for i := range tree.children {
|
||||
topChildren[i] = tree.children[i].name
|
||||
}
|
||||
|
||||
var filter = func(entries []string, filt func(string) bool) []string {
|
||||
filtered := make([]string, 0, len(entries))
|
||||
for i := range entries {
|
||||
if filt(entries[i]) {
|
||||
filtered = append(filtered, entries[i])
|
||||
}
|
||||
}
|
||||
return filtered
|
||||
}
|
||||
|
||||
tree.reify(t, fs, "")
|
||||
cases := []struct {
|
||||
name string
|
||||
dirPath string
|
||||
expected []string
|
||||
filter func(string) bool
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "non-existent",
|
||||
dirPath: "derp",
|
||||
expected: []string{},
|
||||
err: os.ErrNotExist,
|
||||
},
|
||||
{
|
||||
name: "empty",
|
||||
dirPath: childlessBlob.name,
|
||||
expected: []string{},
|
||||
},
|
||||
{
|
||||
name: "top",
|
||||
dirPath: ".",
|
||||
expected: topChildren,
|
||||
},
|
||||
{
|
||||
name: "custom filter: only notABlob",
|
||||
dirPath: ".",
|
||||
expected: []string{notABlob.name},
|
||||
filter: func(s string) bool {
|
||||
return s == notABlob.name
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "root filter",
|
||||
dirPath: ".",
|
||||
expected: []string{childlessBlob.name, blobWithSsz.name, blobWithSszAndTmp.name},
|
||||
filter: isRootDir,
|
||||
},
|
||||
{
|
||||
name: "ssz filter",
|
||||
dirPath: blobWithSsz.name,
|
||||
expected: blobWithSsz.childNames(),
|
||||
filter: isSszFile,
|
||||
},
|
||||
{
|
||||
name: "ssz mixed filter",
|
||||
dirPath: blobWithSszAndTmp.name,
|
||||
expected: []string{"5.ssz"},
|
||||
filter: isSszFile,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
result, err := listDir(fs, c.dirPath)
|
||||
if c.filter != nil {
|
||||
result = filter(result, c.filter)
|
||||
}
|
||||
if c.err != nil {
|
||||
require.ErrorIs(t, err, c.err)
|
||||
require.Equal(t, 0, len(result))
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
sort.Strings(c.expected)
|
||||
sort.Strings(result)
|
||||
require.DeepEqual(t, c.expected, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSlotFromBlob(t *testing.T) {
|
||||
cases := []struct {
|
||||
slot primitives.Slot
|
||||
}{
|
||||
{slot: 0},
|
||||
{slot: 2},
|
||||
{slot: 1123581321},
|
||||
{slot: math.MaxUint64},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(fmt.Sprintf("slot %d", c.slot), func(t *testing.T) {
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, c.slot, 1)
|
||||
sc := sidecars[0]
|
||||
enc, err := sc.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
slot, err := slotFromBlob(bytes.NewReader(enc))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, c.slot, slot)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIterationComplete(t *testing.T) {
|
||||
targets := []migrationTestTarget{
|
||||
{
|
||||
ident: ezIdent(t, "0x0125e54c64c925018c9296965a5b622d9f5ab626c10917860dcfb6aa09a0a00b", 1234, 0),
|
||||
path: "by-epoch/0/1234/0x0125e54c64c925018c9296965a5b622d9f5ab626c10917860dcfb6aa09a0a00b/0.ssz",
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", 5330, 0),
|
||||
slotOffset: 31,
|
||||
path: "by-epoch/1/5330/0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86/0.ssz",
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", 5330, 1),
|
||||
slotOffset: 31,
|
||||
path: "by-epoch/1/5330/0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86/1.ssz",
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", 16777216, 0),
|
||||
slotOffset: 16,
|
||||
path: "by-epoch/4096/16777216/0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c/0.ssz",
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", 16777216, 1),
|
||||
slotOffset: 16,
|
||||
path: "by-epoch/4096/16777216/0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c/1.ssz",
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x42eabe3d2c125410cd226de6f2825fb7575ab896c3f52e43de1fa29e4c809aba", 16777217, 0),
|
||||
slotOffset: 16,
|
||||
path: "by-epoch/4096/16777217/0x42eabe3d2c125410cd226de6f2825fb7575ab896c3f52e43de1fa29e4c809aba/0.ssz",
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x666cea5034e22bd3b849cb33914cad59afd88ee08e4d5bc0e997411c945fbc1d", 11235, 1),
|
||||
path: "by-epoch/2/11235/0x666cea5034e22bd3b849cb33914cad59afd88ee08e4d5bc0e997411c945fbc1d/1.ssz",
|
||||
},
|
||||
}
|
||||
fs := afero.NewMemMapFs()
|
||||
cache := newBlobStorageCache()
|
||||
byEpoch, err := newLayout(LayoutNameByEpoch, fs, cache, nil)
|
||||
require.NoError(t, err)
|
||||
for _, tar := range targets {
|
||||
setupTestBlobFile(t, tar.ident, tar.slotOffset, fs, byEpoch)
|
||||
}
|
||||
iter, err := byEpoch.iterateIdents(0)
|
||||
require.NoError(t, err)
|
||||
nIdents := 0
|
||||
for ident, err := iter.next(); err != io.EOF; ident, err = iter.next() {
|
||||
require.NoError(t, err)
|
||||
nIdents++
|
||||
require.NoError(t, cache.ensure(ident))
|
||||
}
|
||||
require.Equal(t, len(targets), nIdents)
|
||||
for _, tar := range targets {
|
||||
entry, ok := cache.get(tar.ident.root)
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, tar.ident.epoch, entry.epoch)
|
||||
require.Equal(t, true, entry.HasIndex(tar.ident.index))
|
||||
require.Equal(t, tar.path, byEpoch.sszPath(tar.ident))
|
||||
}
|
||||
}
|
||||
295
beacon-chain/db/filesystem/layout.go
Normal file
295
beacon-chain/db/filesystem/layout.go
Normal file
@@ -0,0 +1,295 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
const (
|
||||
// Full root in directory will be 66 chars, eg:
|
||||
// >>> len('0x0002fb4db510b8618b04dc82d023793739c26346a8b02eb73482e24b0fec0555') == 66
|
||||
rootStringLen = 66
|
||||
sszExt = "ssz"
|
||||
partExt = "part"
|
||||
periodicEpochBaseDir = "by-epoch"
|
||||
)
|
||||
|
||||
const (
|
||||
LayoutNameFlat = "flat"
|
||||
LayoutNameByEpoch = "by-epoch"
|
||||
)
|
||||
|
||||
var LayoutNames = []string{LayoutNameFlat, LayoutNameByEpoch}
|
||||
|
||||
var (
|
||||
errMigrationFailure = errors.New("unable to migrate blob directory between old and new layout")
|
||||
errCacheWarmFailed = errors.New("failed to warm blob filesystem cache")
|
||||
errPruneFailed = errors.New("failed to prune root")
|
||||
errInvalidRootString = errors.New("Could not parse hex string as a [32]byte")
|
||||
errInvalidDirectoryLayout = errors.New("Could not parse blob directory path")
|
||||
errInvalidLayoutName = errors.New("unknown layout name")
|
||||
errLayoutNotDetected = errors.New("given layout not observed in the blob filesystem tree")
|
||||
)
|
||||
|
||||
type blobIdent struct {
|
||||
root [32]byte
|
||||
epoch primitives.Epoch
|
||||
index uint64
|
||||
}
|
||||
|
||||
func newBlobIdent(root [32]byte, epoch primitives.Epoch, index uint64) blobIdent {
|
||||
return blobIdent{root: root, epoch: epoch, index: index}
|
||||
}
|
||||
|
||||
func identForSidecar(sc blocks.VerifiedROBlob) blobIdent {
|
||||
return newBlobIdent(sc.BlockRoot(), slots.ToEpoch(sc.Slot()), sc.Index)
|
||||
}
|
||||
|
||||
func identForDataColumnSidecar(sc blocks.VerifiedRODataColumn) blobIdent {
|
||||
return newBlobIdent(sc.BlockRoot(), slots.ToEpoch(sc.Slot()), sc.ColumnIndex)
|
||||
}
|
||||
|
||||
func (n blobIdent) sszFname() string {
|
||||
return fmt.Sprintf("%d.%s", n.index, sszExt)
|
||||
}
|
||||
|
||||
func (n blobIdent) partFname(entropy string) string {
|
||||
return fmt.Sprintf("%s-%d.%s", entropy, n.index, partExt)
|
||||
}
|
||||
|
||||
func (n blobIdent) logFields() logrus.Fields {
|
||||
return logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", n.root),
|
||||
"epoch": n.epoch,
|
||||
"index": n.index,
|
||||
}
|
||||
}
|
||||
|
||||
type fsLayout interface {
|
||||
name() string
|
||||
dir(n blobIdent) string
|
||||
sszPath(n blobIdent) string
|
||||
partPath(n blobIdent, entropy string) string
|
||||
iterateIdents(before primitives.Epoch) (*identIterator, error)
|
||||
ident(root [32]byte, idx uint64) (blobIdent, error)
|
||||
dirIdent(root [32]byte) (blobIdent, error)
|
||||
summary(root [32]byte) BlobStorageSummary
|
||||
notify(ident blobIdent) error
|
||||
pruneBefore(before primitives.Epoch) (*pruneSummary, error)
|
||||
remove(ident blobIdent) (int, error)
|
||||
blockParentDirs(ident blobIdent) []string
|
||||
}
|
||||
|
||||
func newLayout(name string, fs afero.Fs, cache *blobStorageSummaryCache, pruner *blobPruner) (fsLayout, error) {
|
||||
switch name {
|
||||
case LayoutNameFlat:
|
||||
return newFlatLayout(fs, cache, pruner), nil
|
||||
case LayoutNameByEpoch:
|
||||
return newPeriodicEpochLayout(fs, cache, pruner), nil
|
||||
default:
|
||||
return nil, errors.Wrapf(errInvalidLayoutName, "name=%s", name)
|
||||
}
|
||||
}
|
||||
|
||||
func warmCache(l fsLayout, cache *blobStorageSummaryCache) error {
|
||||
iter, err := l.iterateIdents(0)
|
||||
if err != nil {
|
||||
return errors.Wrap(errCacheWarmFailed, err.Error())
|
||||
}
|
||||
for ident, err := iter.next(); !errors.Is(err, io.EOF); ident, err = iter.next() {
|
||||
if errors.Is(err, errIdentFailure) {
|
||||
idf := &identificationError{}
|
||||
if errors.As(err, &idf) {
|
||||
log.WithFields(idf.LogFields()).WithError(err).Error("Failed to cache blob data for path")
|
||||
}
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("%w: failed to populate blob data cache: %w", errCacheWarmFailed, err)
|
||||
}
|
||||
if err := cache.ensure(ident); err != nil {
|
||||
return fmt.Errorf("%w: failed to write cache entry for %s: %w", errCacheWarmFailed, l.sszPath(ident), err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func migrateLayout(fs afero.Fs, from, to fsLayout, cache *blobStorageSummaryCache) error {
|
||||
start := time.Now()
|
||||
iter, err := from.iterateIdents(0)
|
||||
if err != nil {
|
||||
return errors.Wrapf(errMigrationFailure, "failed to iterate legacy structure while migrating blobs, err=%s", err.Error())
|
||||
}
|
||||
if iter.atEOF() {
|
||||
return errLayoutNotDetected
|
||||
}
|
||||
log.WithField("fromLayout", from.name()).WithField("toLayout", to.name()).Info("Migrating blob filesystem layout. This one-time operation can take extra time (up to a few minutes for systems with extended blob storage and a cold disk cache).")
|
||||
lastMoved := ""
|
||||
parentDirs := make(map[string]bool) // this map should have < 65k keys by design
|
||||
moved := 0
|
||||
dc := newDirCleaner()
|
||||
for ident, err := iter.next(); !errors.Is(err, io.EOF); ident, err = iter.next() {
|
||||
if err != nil {
|
||||
if errors.Is(err, errIdentFailure) {
|
||||
idf := &identificationError{}
|
||||
if errors.As(err, &idf) {
|
||||
log.WithFields(idf.LogFields()).WithError(err).Error("Failed to migrate blob path")
|
||||
}
|
||||
continue
|
||||
}
|
||||
return errors.Wrapf(errMigrationFailure, "failed to iterate previous layout structure while migrating blobs, err=%s", err.Error())
|
||||
}
|
||||
src := from.dir(ident)
|
||||
target := to.dir(ident)
|
||||
if src != lastMoved {
|
||||
targetParent := filepath.Dir(target)
|
||||
if targetParent != "" && targetParent != "." && !parentDirs[targetParent] {
|
||||
if err := fs.MkdirAll(targetParent, directoryPermissions()); err != nil {
|
||||
return errors.Wrapf(errMigrationFailure, "failed to make enclosing path before moving %s to %s", src, target)
|
||||
}
|
||||
parentDirs[targetParent] = true
|
||||
}
|
||||
if err := fs.Rename(src, target); err != nil {
|
||||
return errors.Wrapf(errMigrationFailure, "could not rename %s to %s", src, target)
|
||||
}
|
||||
moved += 1
|
||||
lastMoved = src
|
||||
for _, dir := range from.blockParentDirs(ident) {
|
||||
dc.add(dir)
|
||||
}
|
||||
}
|
||||
if err := cache.ensure(ident); err != nil {
|
||||
return errors.Wrapf(errMigrationFailure, "could not cache path %s, err=%s", to.sszPath(ident), err.Error())
|
||||
}
|
||||
}
|
||||
dc.clean(fs)
|
||||
if moved > 0 {
|
||||
log.WithField("dirsMoved", moved).WithField("elapsed", time.Since(start)).
|
||||
Info("Blob filesystem migration complete.")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type dirCleaner struct {
|
||||
maxDepth int
|
||||
layers map[int]map[string]struct{}
|
||||
}
|
||||
|
||||
func newDirCleaner() *dirCleaner {
|
||||
return &dirCleaner{layers: make(map[int]map[string]struct{})}
|
||||
}
|
||||
|
||||
func (d *dirCleaner) add(dir string) {
|
||||
nLayers := len(strings.Split(dir, string(filepath.Separator)))
|
||||
_, ok := d.layers[nLayers]
|
||||
if !ok {
|
||||
d.layers[nLayers] = make(map[string]struct{})
|
||||
}
|
||||
d.layers[nLayers][dir] = struct{}{}
|
||||
if nLayers > d.maxDepth {
|
||||
d.maxDepth = nLayers
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dirCleaner) clean(fs afero.Fs) {
|
||||
for i := d.maxDepth; i >= 0; i-- {
|
||||
d.cleanLayer(fs, i)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dirCleaner) cleanLayer(fs afero.Fs, layer int) {
|
||||
dirs, ok := d.layers[layer]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
for dir := range dirs {
|
||||
// Use Remove rather than RemoveAll to make sure we're only removing empty directories
|
||||
if err := fs.Remove(dir); err != nil {
|
||||
log.WithField("dir", dir).WithError(err).Error("Failed to remove blob directory, please remove it manually if desired.")
|
||||
contents, err := listDir(fs, dir)
|
||||
if err != nil {
|
||||
log.WithField("dir", dir).WithError(err).Error("Could not list blob directory contents to find reason for removal failure.")
|
||||
continue
|
||||
}
|
||||
for _, c := range contents {
|
||||
log.WithField("file", c).WithField("dir", dir).Debug("Unexpected file blocking migrated blob directory cleanup.")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type pruneSummary struct {
|
||||
blobsPruned int
|
||||
failedRemovals []string
|
||||
}
|
||||
|
||||
func (s pruneSummary) LogFields() logrus.Fields {
|
||||
return logrus.Fields{
|
||||
"blobsPruned": s.blobsPruned,
|
||||
"failedRemovals": len(s.failedRemovals),
|
||||
}
|
||||
}
|
||||
|
||||
func pruneBefore(before primitives.Epoch, l fsLayout) (map[primitives.Epoch]*pruneSummary, error) {
|
||||
sums := make(map[primitives.Epoch]*pruneSummary)
|
||||
iter, err := l.iterateIdents(before)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to iterate blob paths for pruning")
|
||||
}
|
||||
|
||||
// We will get an ident for each index, but want to prune all indexes for the given root together.
|
||||
var lastIdent blobIdent
|
||||
for ident, err := iter.next(); !errors.Is(err, io.EOF); ident, err = iter.next() {
|
||||
if err != nil {
|
||||
if errors.Is(err, errIdentFailure) {
|
||||
idf := &identificationError{}
|
||||
if errors.As(err, &idf) {
|
||||
log.WithFields(idf.LogFields()).WithError(err).Error("Failed to prune blob path due to identification errors")
|
||||
}
|
||||
continue
|
||||
}
|
||||
log.WithError(err).Error("encountered unhandled error during pruning")
|
||||
return nil, errors.Wrap(errPruneFailed, err.Error())
|
||||
}
|
||||
if ident.epoch >= before {
|
||||
continue
|
||||
}
|
||||
if lastIdent.root != ident.root {
|
||||
pruneOne(lastIdent, l, sums)
|
||||
lastIdent = ident
|
||||
}
|
||||
}
|
||||
// handle the final ident
|
||||
pruneOne(lastIdent, l, sums)
|
||||
|
||||
return sums, nil
|
||||
}
|
||||
|
||||
func pruneOne(ident blobIdent, l fsLayout, sums map[primitives.Epoch]*pruneSummary) {
|
||||
// Skip pruning the n-1 ident if we're on the first real ident (lastIdent will be zero value).
|
||||
if ident.root == params.BeaconConfig().ZeroHash {
|
||||
return
|
||||
}
|
||||
_, ok := sums[ident.epoch]
|
||||
if !ok {
|
||||
sums[ident.epoch] = &pruneSummary{}
|
||||
}
|
||||
s := sums[ident.epoch]
|
||||
removed, err := l.remove(ident)
|
||||
if err != nil {
|
||||
s.failedRemovals = append(s.failedRemovals, l.dir(ident))
|
||||
log.WithField("root", fmt.Sprintf("%#x", ident.root)).Error("Failed to delete blob directory for root")
|
||||
}
|
||||
s.blobsPruned += removed
|
||||
}
|
||||
212
beacon-chain/db/filesystem/layout_by_epoch.go
Normal file
212
beacon-chain/db/filesystem/layout_by_epoch.go
Normal file
@@ -0,0 +1,212 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
const epochsPerDirectory = 4096
|
||||
|
||||
type periodicEpochLayout struct {
|
||||
fs afero.Fs
|
||||
cache *blobStorageSummaryCache
|
||||
pruner *blobPruner
|
||||
}
|
||||
|
||||
var _ fsLayout = &periodicEpochLayout{}
|
||||
|
||||
func newPeriodicEpochLayout(fs afero.Fs, cache *blobStorageSummaryCache, pruner *blobPruner) fsLayout {
|
||||
l := &periodicEpochLayout{fs: fs, cache: cache, pruner: pruner}
|
||||
return l
|
||||
}
|
||||
|
||||
func (l *periodicEpochLayout) name() string {
|
||||
return LayoutNameByEpoch
|
||||
}
|
||||
|
||||
func (l *periodicEpochLayout) blockParentDirs(ident blobIdent) []string {
|
||||
return []string{
|
||||
periodicEpochBaseDir,
|
||||
l.periodDir(ident.epoch),
|
||||
l.epochDir(ident.epoch),
|
||||
}
|
||||
}
|
||||
|
||||
func (l *periodicEpochLayout) notify(ident blobIdent) error {
|
||||
if err := l.cache.ensure(ident); err != nil {
|
||||
return err
|
||||
}
|
||||
l.pruner.notify(ident.epoch, l)
|
||||
return nil
|
||||
}
|
||||
|
||||
// If before == 0, it won't be used as a filter and all idents will be returned.
|
||||
func (l *periodicEpochLayout) iterateIdents(before primitives.Epoch) (*identIterator, error) {
|
||||
_, err := l.fs.Stat(periodicEpochBaseDir)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return &identIterator{eof: true}, nil // The directory is non-existent, which is fine; stop iteration.
|
||||
}
|
||||
return nil, errors.Wrapf(err, "error reading path %s", periodicEpochBaseDir)
|
||||
}
|
||||
// iterate root, which should have directories named by "period"
|
||||
entries, err := listDir(l.fs, periodicEpochBaseDir)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to list %s", periodicEpochBaseDir)
|
||||
}
|
||||
|
||||
return &identIterator{
|
||||
fs: l.fs,
|
||||
path: periodicEpochBaseDir,
|
||||
// Please see comments on the `layers` field in `identIterator`` if the role of the layers is unclear.
|
||||
layers: []layoutLayer{
|
||||
{populateIdent: populateNoop, filter: isBeforePeriod(before)},
|
||||
{populateIdent: populateEpoch, filter: isBeforeEpoch(before)},
|
||||
{populateIdent: populateRoot, filter: isRootDir}, // extract root from path
|
||||
{populateIdent: populateIndex, filter: isSszFile}, // extract index from filename
|
||||
},
|
||||
entries: entries,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (l *periodicEpochLayout) ident(root [32]byte, idx uint64) (blobIdent, error) {
|
||||
return l.cache.identForIdx(root, idx)
|
||||
}
|
||||
|
||||
func (l *periodicEpochLayout) dirIdent(root [32]byte) (blobIdent, error) {
|
||||
return l.cache.identForRoot(root)
|
||||
}
|
||||
|
||||
func (l *periodicEpochLayout) summary(root [32]byte) BlobStorageSummary {
|
||||
return l.cache.Summary(root)
|
||||
}
|
||||
|
||||
func (l *periodicEpochLayout) dir(n blobIdent) string {
|
||||
return filepath.Join(l.epochDir(n.epoch), rootToString(n.root))
|
||||
}
|
||||
|
||||
func (l *periodicEpochLayout) epochDir(epoch primitives.Epoch) string {
|
||||
return filepath.Join(l.periodDir(epoch), fmt.Sprintf("%d", epoch))
|
||||
}
|
||||
|
||||
func (l *periodicEpochLayout) periodDir(epoch primitives.Epoch) string {
|
||||
return filepath.Join(periodicEpochBaseDir, fmt.Sprintf("%d", periodForEpoch(epoch)))
|
||||
}
|
||||
|
||||
func (l *periodicEpochLayout) sszPath(n blobIdent) string {
|
||||
return filepath.Join(l.dir(n), n.sszFname())
|
||||
}
|
||||
|
||||
func (l *periodicEpochLayout) partPath(n blobIdent, entropy string) string {
|
||||
return path.Join(l.dir(n), n.partFname(entropy))
|
||||
}
|
||||
|
||||
func (l *periodicEpochLayout) pruneBefore(before primitives.Epoch) (*pruneSummary, error) {
|
||||
sums, err := pruneBefore(before, l)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Roll up summaries and clean up per-epoch directories.
|
||||
rollup := &pruneSummary{}
|
||||
for epoch, sum := range sums {
|
||||
rollup.blobsPruned += sum.blobsPruned
|
||||
rollup.failedRemovals = append(rollup.failedRemovals, sum.failedRemovals...)
|
||||
rmdir := l.epochDir(epoch)
|
||||
if len(sum.failedRemovals) == 0 {
|
||||
if err := l.fs.Remove(rmdir); err != nil {
|
||||
log.WithField("dir", rmdir).WithError(err).Error("Failed to remove epoch directory while pruning")
|
||||
}
|
||||
} else {
|
||||
log.WithField("dir", rmdir).WithField("numFailed", len(sum.failedRemovals)).WithError(err).Error("Unable to remove epoch directory due to pruning failures")
|
||||
}
|
||||
}
|
||||
|
||||
return rollup, nil
|
||||
}
|
||||
|
||||
func (l *periodicEpochLayout) remove(ident blobIdent) (int, error) {
|
||||
removed := l.cache.evict(ident.root)
|
||||
// Skip the syscall if there are no blobs to remove.
|
||||
if removed == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
if err := l.fs.RemoveAll(l.dir(ident)); err != nil {
|
||||
return removed, err
|
||||
}
|
||||
return removed, nil
|
||||
}
|
||||
|
||||
func periodForEpoch(epoch primitives.Epoch) primitives.Epoch {
|
||||
return epoch / params.BeaconConfig().MinEpochsForBlobsSidecarsRequest
|
||||
}
|
||||
|
||||
// Funcs below this line are iteration support methods that are specific to the epoch layout.
|
||||
|
||||
func isBeforePeriod(before primitives.Epoch) func(string) bool {
|
||||
if before == 0 {
|
||||
return filterNoop
|
||||
}
|
||||
beforePeriod := periodForEpoch(before)
|
||||
if before%epochsPerDirectory != 0 {
|
||||
// Add one because we need to include the period the epoch is in, unless it is the first epoch in the period,
|
||||
// in which case we can just look at any previous period.
|
||||
beforePeriod += 1
|
||||
}
|
||||
return func(p string) bool {
|
||||
period, err := periodFromPath(p)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return primitives.Epoch(period) < beforePeriod
|
||||
}
|
||||
}
|
||||
|
||||
func isBeforeEpoch(before primitives.Epoch) func(string) bool {
|
||||
if before == 0 {
|
||||
return filterNoop
|
||||
}
|
||||
return func(p string) bool {
|
||||
epoch, err := epochFromPath(p)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return epoch < before
|
||||
}
|
||||
}
|
||||
|
||||
func epochFromPath(p string) (primitives.Epoch, error) {
|
||||
subdir := filepath.Base(p)
|
||||
epoch, err := strconv.ParseUint(subdir, 10, 64)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(errInvalidDirectoryLayout,
|
||||
"failed to decode epoch as uint, err=%s, dir=%s", err.Error(), p)
|
||||
}
|
||||
return primitives.Epoch(epoch), nil
|
||||
}
|
||||
|
||||
func periodFromPath(p string) (uint64, error) {
|
||||
subdir := filepath.Base(p)
|
||||
period, err := strconv.ParseUint(subdir, 10, 64)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(errInvalidDirectoryLayout,
|
||||
"failed to decode period from path as uint, err=%s, dir=%s", err.Error(), p)
|
||||
}
|
||||
return period, nil
|
||||
}
|
||||
|
||||
func populateEpoch(namer blobIdent, dir string) (blobIdent, error) {
|
||||
epoch, err := epochFromPath(dir)
|
||||
if err != nil {
|
||||
return namer, err
|
||||
}
|
||||
namer.epoch = epoch
|
||||
return namer, nil
|
||||
}
|
||||
219
beacon-chain/db/filesystem/layout_flat.go
Normal file
219
beacon-chain/db/filesystem/layout_flat.go
Normal file
@@ -0,0 +1,219 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
type flatLayout struct {
|
||||
fs afero.Fs
|
||||
cache *blobStorageSummaryCache
|
||||
pruner *blobPruner
|
||||
}
|
||||
|
||||
var _ fsLayout = &flatLayout{}
|
||||
|
||||
func newFlatLayout(fs afero.Fs, cache *blobStorageSummaryCache, pruner *blobPruner) fsLayout {
|
||||
l := &flatLayout{fs: fs, cache: cache, pruner: pruner}
|
||||
return l
|
||||
}
|
||||
|
||||
func (l *flatLayout) iterateIdents(before primitives.Epoch) (*identIterator, error) {
|
||||
if _, err := l.fs.Stat("."); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return &identIterator{eof: true}, nil // The directory is non-existent, which is fine; stop iteration.
|
||||
}
|
||||
return nil, errors.Wrapf(err, "error reading path %s", periodicEpochBaseDir)
|
||||
}
|
||||
entries, err := listDir(l.fs, ".")
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not list root directory")
|
||||
}
|
||||
slotAndIndex := &flatSlotReader{fs: l.fs, cache: l.cache, before: before}
|
||||
return &identIterator{
|
||||
fs: l.fs,
|
||||
// Please see comments on the `layers` field in `identIterator`` if the role of the layers is unclear.
|
||||
layers: []layoutLayer{
|
||||
{populateIdent: populateRoot, filter: isFlatCachedAndBefore(l.cache, before)},
|
||||
{populateIdent: slotAndIndex.populateEpoch, filter: slotAndIndex.isSSZAndBefore}},
|
||||
entries: entries,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (*flatLayout) name() string {
|
||||
return LayoutNameFlat
|
||||
}
|
||||
|
||||
func (l *flatLayout) blockParentDirs(ident blobIdent) []string {
|
||||
return []string{}
|
||||
}
|
||||
|
||||
func (*flatLayout) dir(n blobIdent) string {
|
||||
return rootToString(n.root)
|
||||
}
|
||||
|
||||
func (l *flatLayout) sszPath(n blobIdent) string {
|
||||
return path.Join(l.dir(n), n.sszFname())
|
||||
}
|
||||
|
||||
func (l *flatLayout) partPath(n blobIdent, entropy string) string {
|
||||
return path.Join(l.dir(n), n.partFname(entropy))
|
||||
}
|
||||
|
||||
func (l *flatLayout) ident(root [32]byte, idx uint64) (blobIdent, error) {
|
||||
return l.cache.identForIdx(root, idx)
|
||||
}
|
||||
|
||||
func (l *flatLayout) dirIdent(root [32]byte) (blobIdent, error) {
|
||||
return l.cache.identForRoot(root)
|
||||
}
|
||||
|
||||
func (l *flatLayout) summary(root [32]byte) BlobStorageSummary {
|
||||
return l.cache.Summary(root)
|
||||
}
|
||||
|
||||
func (l *flatLayout) remove(ident blobIdent) (int, error) {
|
||||
removed := l.cache.evict(ident.root)
|
||||
if err := l.fs.RemoveAll(l.dir(ident)); err != nil {
|
||||
return removed, err
|
||||
}
|
||||
return removed, nil
|
||||
}
|
||||
|
||||
func (l *flatLayout) notify(ident blobIdent) error {
|
||||
if err := l.cache.ensure(ident); err != nil {
|
||||
return err
|
||||
}
|
||||
l.pruner.notify(ident.epoch, l)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *flatLayout) pruneBefore(before primitives.Epoch) (*pruneSummary, error) {
|
||||
sums, err := pruneBefore(before, l)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Roll up summaries and clean up per-epoch directories.
|
||||
rollup := &pruneSummary{}
|
||||
for _, sum := range sums {
|
||||
rollup.blobsPruned += sum.blobsPruned
|
||||
rollup.failedRemovals = append(rollup.failedRemovals, sum.failedRemovals...)
|
||||
}
|
||||
|
||||
return rollup, nil
|
||||
}
|
||||
|
||||
// Below this line are iteration support funcs and types that are specific to the flat layout.
|
||||
|
||||
// Read slot from marshaled BlobSidecar data in the given file. See slotFromBlob for details.
|
||||
func slotFromFile(name string, fs afero.Fs) (primitives.Slot, error) {
|
||||
f, err := fs.Open(name)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer func() {
|
||||
if err := f.Close(); err != nil {
|
||||
log.WithError(err).Errorf("Could not close blob file")
|
||||
}
|
||||
}()
|
||||
return slotFromBlob(f)
|
||||
}
|
||||
|
||||
// slotFromBlob reads the ssz data of a file at the specified offset (8 + 131072 + 48 + 48 = 131176 bytes),
|
||||
// which is calculated based on the size of the BlobSidecar struct and is based on the size of the fields
|
||||
// preceding the slot information within SignedBeaconBlockHeader.
|
||||
func slotFromBlob(at io.ReaderAt) (primitives.Slot, error) {
|
||||
b := make([]byte, 8)
|
||||
_, err := at.ReadAt(b, 131176)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
rawSlot := binary.LittleEndian.Uint64(b)
|
||||
return primitives.Slot(rawSlot), nil
|
||||
}
|
||||
|
||||
type flatSlotReader struct {
|
||||
before primitives.Epoch
|
||||
fs afero.Fs
|
||||
cache *blobStorageSummaryCache
|
||||
}
|
||||
|
||||
func (l *flatSlotReader) populateEpoch(ident blobIdent, fname string) (blobIdent, error) {
|
||||
ident, err := populateIndex(ident, fname)
|
||||
if err != nil {
|
||||
return ident, err
|
||||
}
|
||||
sum, ok := l.cache.get(ident.root)
|
||||
if ok {
|
||||
ident.epoch = sum.epoch
|
||||
// Return early if the index is already known to the cache.
|
||||
if sum.HasIndex(ident.index) {
|
||||
return ident, nil
|
||||
}
|
||||
} else {
|
||||
// If the root is not in the cache, we need to read the slot from the file.
|
||||
slot, err := slotFromFile(fname, l.fs)
|
||||
if err != nil {
|
||||
return ident, err
|
||||
}
|
||||
ident.epoch = slots.ToEpoch(slot)
|
||||
}
|
||||
return ident, l.cache.ensure(ident)
|
||||
}
|
||||
|
||||
func (l *flatSlotReader) isSSZAndBefore(fname string) bool {
|
||||
if !isSszFile(fname) {
|
||||
return false
|
||||
}
|
||||
// If 'before' != 0, assuming isSSZAndBefore is used as a filter on the same layer with populateEpoch, this will typically
|
||||
// call populateEpoch before the iteration code calls it. So we can guarantee that the cache gets populated
|
||||
// in either case, because if it is filtered out here, we either have a malformed path (root can't be determined) in which case
|
||||
// the filter code won't call it anyway, or we have a valid path and the cache will be populated before the epoch can be compared.
|
||||
if l.before == 0 {
|
||||
return true
|
||||
}
|
||||
ident, err := populateRoot(blobIdent{}, path.Dir(fname))
|
||||
// Filter out the path if we can't determine its root.
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
ident, err = l.populateEpoch(ident, fname)
|
||||
// Filter out the path if we can't determine its epoch or properly cache it.
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return ident.epoch < l.before
|
||||
}
|
||||
|
||||
// isFlatCachedAndBefore returns a filter callback function to exclude roots that are known to be after the given epoch
|
||||
// based on the cache. It's an opportunistic filter; if the cache is not populated, it will not attempt to populate it.
|
||||
// isSSZAndBefore on the other hand, is a strict filter that will only return true if the file is an SSZ file and
|
||||
// the epoch can be determined.
|
||||
func isFlatCachedAndBefore(cache *blobStorageSummaryCache, before primitives.Epoch) func(string) bool {
|
||||
if before == 0 {
|
||||
return isRootDir
|
||||
}
|
||||
return func(p string) bool {
|
||||
if !isRootDir(p) {
|
||||
return false
|
||||
}
|
||||
root, err := rootFromPath(p)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
sum, ok := cache.get(root)
|
||||
// If we don't know the epoch by looking at the root, don't try to filter it.
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
return sum.epoch < before
|
||||
}
|
||||
}
|
||||
75
beacon-chain/db/filesystem/layout_test.go
Normal file
75
beacon-chain/db/filesystem/layout_test.go
Normal file
@@ -0,0 +1,75 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
type mockLayout struct {
|
||||
pruneBeforeFunc func(primitives.Epoch) (*pruneSummary, error)
|
||||
}
|
||||
|
||||
var _ fsLayout = &mockLayout{}
|
||||
|
||||
func (m *mockLayout) name() string {
|
||||
return "mock"
|
||||
}
|
||||
|
||||
func (*mockLayout) dir(_ blobIdent) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (*mockLayout) blockParentDirs(id blobIdent) []string {
|
||||
return []string{}
|
||||
}
|
||||
|
||||
func (*mockLayout) sszPath(_ blobIdent) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (*mockLayout) partPath(_ blobIdent, _ string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (*mockLayout) iterateIdents(_ primitives.Epoch) (*identIterator, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (*mockLayout) ident(_ [32]byte, _ uint64) (blobIdent, error) {
|
||||
return blobIdent{}, nil
|
||||
}
|
||||
|
||||
func (*mockLayout) dirIdent(_ [32]byte) (blobIdent, error) {
|
||||
return blobIdent{}, nil
|
||||
}
|
||||
|
||||
func (*mockLayout) summary(_ [32]byte) BlobStorageSummary {
|
||||
return BlobStorageSummary{}
|
||||
}
|
||||
|
||||
func (*mockLayout) notify(blobIdent) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockLayout) pruneBefore(before primitives.Epoch) (*pruneSummary, error) {
|
||||
return m.pruneBeforeFunc(before)
|
||||
}
|
||||
|
||||
func (*mockLayout) remove(ident blobIdent) (int, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
var _ fsLayout = &mockLayout{}
|
||||
|
||||
func TestCleaner(t *testing.T) {
|
||||
l := &periodicEpochLayout{}
|
||||
p := l.periodDir(11235813)
|
||||
e := l.epochDir(11235813)
|
||||
dc := newDirCleaner()
|
||||
dc.add(p)
|
||||
require.Equal(t, 2, dc.maxDepth)
|
||||
dc.add(e)
|
||||
require.Equal(t, 3, dc.maxDepth)
|
||||
}
|
||||
180
beacon-chain/db/filesystem/migration_test.go
Normal file
180
beacon-chain/db/filesystem/migration_test.go
Normal file
@@ -0,0 +1,180 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
func ezIdent(t *testing.T, rootStr string, epoch primitives.Epoch, index uint64) blobIdent {
|
||||
r, err := stringToRoot(rootStr)
|
||||
require.NoError(t, err)
|
||||
return blobIdent{root: r, epoch: epoch, index: index}
|
||||
}
|
||||
|
||||
func setupTestBlobFile(t *testing.T, ident blobIdent, offset primitives.Slot, fs afero.Fs, l fsLayout) {
|
||||
slot, err := slots.EpochStart(ident.epoch)
|
||||
require.NoError(t, err)
|
||||
slot += offset
|
||||
_, sc := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, 1)
|
||||
scb, err := sc[0].MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
dir := l.dir(ident)
|
||||
require.NoError(t, fs.MkdirAll(dir, directoryPermissions()))
|
||||
p := l.sszPath(ident)
|
||||
require.NoError(t, afero.WriteFile(fs, p, scb, 0666))
|
||||
_, err = fs.Stat(p)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
type migrationTestTarget struct {
|
||||
ident blobIdent
|
||||
slotOffset primitives.Slot
|
||||
migrated bool
|
||||
path string
|
||||
}
|
||||
|
||||
func testAssertFsMigrated(t *testing.T, fs afero.Fs, ident blobIdent, before, after fsLayout) {
|
||||
// Assert the pre-migration path is gone.
|
||||
_, err := fs.Stat(before.sszPath(ident))
|
||||
require.ErrorIs(t, err, os.ErrNotExist)
|
||||
dir := before.dir(ident)
|
||||
_, err = listDir(fs, dir)
|
||||
require.ErrorIs(t, err, os.ErrNotExist)
|
||||
|
||||
// Assert the post-migration path present.
|
||||
_, err = fs.Stat(after.sszPath(ident))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestMigrations(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
forwardLayout string
|
||||
backwardLayout string
|
||||
targets []migrationTestTarget
|
||||
}{
|
||||
{
|
||||
name: "all need migration",
|
||||
backwardLayout: LayoutNameFlat,
|
||||
forwardLayout: LayoutNameByEpoch,
|
||||
targets: []migrationTestTarget{
|
||||
{
|
||||
ident: ezIdent(t, "0x0125e54c64c925018c9296965a5b622d9f5ab626c10917860dcfb6aa09a0a00b", 1234, 0),
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", 5330, 0),
|
||||
slotOffset: 31,
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", 5330, 1),
|
||||
slotOffset: 31,
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", 16777216, 0),
|
||||
slotOffset: 16,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "mix old and new",
|
||||
backwardLayout: LayoutNameFlat,
|
||||
forwardLayout: LayoutNameByEpoch,
|
||||
targets: []migrationTestTarget{
|
||||
{
|
||||
ident: ezIdent(t, "0x0125e54c64c925018c9296965a5b622d9f5ab626c10917860dcfb6aa09a0a00b", 1234, 0),
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", 5330, 0),
|
||||
slotOffset: 31,
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", 5330, 1),
|
||||
slotOffset: 31,
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", 16777216, 0),
|
||||
slotOffset: 16,
|
||||
migrated: true,
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", 16777216, 1),
|
||||
slotOffset: 16,
|
||||
migrated: true,
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x42eabe3d2c125410cd226de6f2825fb7575ab896c3f52e43de1fa29e4c809aba", 16777217, 0),
|
||||
slotOffset: 16,
|
||||
migrated: true,
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x666cea5034e22bd3b849cb33914cad59afd88ee08e4d5bc0e997411c945fbc1d", 11235, 1),
|
||||
migrated: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
t.Run("forward", func(t *testing.T) {
|
||||
testMigration(t, c.forwardLayout, c.backwardLayout, c.targets)
|
||||
})
|
||||
// run the same test in reverse - to cover both directions while making the test table smaller.
|
||||
t.Run("backward", func(t *testing.T) {
|
||||
testMigration(t, c.forwardLayout, c.backwardLayout, c.targets)
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testMigration(t *testing.T, forwardName, backwardName string, targets []migrationTestTarget) {
|
||||
fs := afero.NewMemMapFs()
|
||||
cache := newBlobStorageCache()
|
||||
forward, err := newLayout(forwardName, fs, cache, nil)
|
||||
require.NoError(t, err)
|
||||
backward, err := newLayout(backwardName, fs, cache, nil)
|
||||
require.NoError(t, err)
|
||||
for _, tar := range targets {
|
||||
if tar.migrated {
|
||||
setupTestBlobFile(t, tar.ident, tar.slotOffset, fs, forward)
|
||||
} else {
|
||||
setupTestBlobFile(t, tar.ident, tar.slotOffset, fs, backward)
|
||||
}
|
||||
}
|
||||
require.NoError(t, migrateLayout(fs, backward, forward, cache))
|
||||
for _, tar := range targets {
|
||||
// Make sure the file wound up in the right spot, according to the forward layout
|
||||
// and that the old file is gone, according to the backward layout.
|
||||
testAssertFsMigrated(t, fs, tar.ident, backward, forward)
|
||||
entry, ok := cache.get(tar.ident.root)
|
||||
// we only expect cache to be populated here by files that needed to be moved.
|
||||
if !tar.migrated {
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, true, entry.HasIndex(tar.ident.index))
|
||||
require.Equal(t, tar.ident.epoch, entry.epoch)
|
||||
}
|
||||
}
|
||||
|
||||
// Run migration in reverse - testing "undo"
|
||||
cache = newBlobStorageCache()
|
||||
forward, err = newLayout(forwardName, fs, cache, nil)
|
||||
require.NoError(t, err)
|
||||
backward, err = newLayout(backwardName, fs, cache, nil)
|
||||
require.NoError(t, err)
|
||||
// forward and backward are flipped compared to the above
|
||||
require.NoError(t, migrateLayout(fs, forward, backward, cache))
|
||||
for _, tar := range targets {
|
||||
// just like the above, but forward and backward are flipped
|
||||
testAssertFsMigrated(t, fs, tar.ident, forward, backward)
|
||||
entry, ok := cache.get(tar.ident.root)
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, true, entry.HasIndex(tar.ident.index))
|
||||
require.Equal(t, tar.ident.epoch, entry.epoch)
|
||||
}
|
||||
}
|
||||
@@ -4,30 +4,41 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
// NewEphemeralBlobStorage should only be used for tests.
|
||||
// The instance of BlobStorage returned is backed by an in-memory virtual filesystem,
|
||||
// improving test performance and simplifying cleanup.
|
||||
func NewEphemeralBlobStorage(t testing.TB) *BlobStorage {
|
||||
fs := afero.NewMemMapFs()
|
||||
pruner, err := newBlobPruner(fs, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest, withWarmedCache())
|
||||
if err != nil {
|
||||
t.Fatal("test setup issue", err)
|
||||
}
|
||||
return &BlobStorage{fs: fs, pruner: pruner}
|
||||
func NewEphemeralBlobStorage(t testing.TB, opts ...BlobStorageOption) *BlobStorage {
|
||||
return NewWarmedEphemeralBlobStorageUsingFs(t, afero.NewMemMapFs(), opts...)
|
||||
}
|
||||
|
||||
// NewEphemeralBlobStorageWithFs can be used by tests that want access to the virtual filesystem
|
||||
// NewEphemeralBlobStorageAndFs can be used by tests that want access to the virtual filesystem
|
||||
// in order to interact with it outside the parameters of the BlobStorage api.
|
||||
func NewEphemeralBlobStorageWithFs(t testing.TB) (afero.Fs, *BlobStorage) {
|
||||
func NewEphemeralBlobStorageAndFs(t testing.TB, opts ...BlobStorageOption) (afero.Fs, *BlobStorage) {
|
||||
fs := afero.NewMemMapFs()
|
||||
pruner, err := newBlobPruner(fs, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest, withWarmedCache())
|
||||
bs := NewWarmedEphemeralBlobStorageUsingFs(t, fs, opts...)
|
||||
return fs, bs
|
||||
}
|
||||
|
||||
func NewEphemeralBlobStorageUsingFs(t testing.TB, fs afero.Fs, opts ...BlobStorageOption) *BlobStorage {
|
||||
opts = append(opts,
|
||||
WithBlobRetentionEpochs(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest),
|
||||
WithFs(fs))
|
||||
bs, err := NewBlobStorage(opts...)
|
||||
if err != nil {
|
||||
t.Fatal("test setup issue", err)
|
||||
t.Fatalf("error initializing test BlobStorage, err=%s", err.Error())
|
||||
}
|
||||
return fs, &BlobStorage{fs: fs, pruner: pruner}
|
||||
return bs
|
||||
}
|
||||
|
||||
func NewWarmedEphemeralBlobStorageUsingFs(t testing.TB, fs afero.Fs, opts ...BlobStorageOption) *BlobStorage {
|
||||
bs := NewEphemeralBlobStorageUsingFs(t, fs, opts...)
|
||||
bs.WarmCache()
|
||||
return bs
|
||||
}
|
||||
|
||||
type BlobMocker struct {
|
||||
@@ -37,17 +48,9 @@ type BlobMocker struct {
|
||||
|
||||
// CreateFakeIndices creates empty blob sidecar files at the expected path for the given
|
||||
// root and indices to influence the result of Indices().
|
||||
func (bm *BlobMocker) CreateFakeIndices(root [32]byte, indices ...uint64) error {
|
||||
func (bm *BlobMocker) CreateFakeIndices(root [32]byte, slot primitives.Slot, indices ...uint64) error {
|
||||
for i := range indices {
|
||||
n := blobNamer{root: root, index: indices[i]}
|
||||
if err := bm.fs.MkdirAll(n.dir(), directoryPermissions); err != nil {
|
||||
return err
|
||||
}
|
||||
f, err := bm.fs.Create(n.path())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := f.Close(); err != nil {
|
||||
if err := bm.bs.layout.notify(newBlobIdent(root, slots.ToEpoch(slot), indices[i])); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -56,17 +59,16 @@ func (bm *BlobMocker) CreateFakeIndices(root [32]byte, indices ...uint64) error
|
||||
|
||||
// NewEphemeralBlobStorageWithMocker returns a *BlobMocker value in addition to the BlobStorage value.
|
||||
// BlockMocker encapsulates things blob path construction to avoid leaking implementation details.
|
||||
func NewEphemeralBlobStorageWithMocker(_ testing.TB) (*BlobMocker, *BlobStorage) {
|
||||
fs := afero.NewMemMapFs()
|
||||
bs := &BlobStorage{fs: fs}
|
||||
func NewEphemeralBlobStorageWithMocker(t testing.TB) (*BlobMocker, *BlobStorage) {
|
||||
fs, bs := NewEphemeralBlobStorageAndFs(t)
|
||||
return &BlobMocker{fs: fs, bs: bs}, bs
|
||||
}
|
||||
|
||||
func NewMockBlobStorageSummarizer(t *testing.T, set map[[32]byte][]int) BlobStorageSummarizer {
|
||||
func NewMockBlobStorageSummarizer(t *testing.T, set map[[32]byte][]int, epoch primitives.Epoch) BlobStorageSummarizer {
|
||||
c := newBlobStorageCache()
|
||||
for k, v := range set {
|
||||
for i := range v {
|
||||
if err := c.ensure(k, 0, uint64(v[i])); err != nil {
|
||||
if err := c.ensure(blobIdent{root: k, epoch: epoch, index: uint64(v[i])}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,319 +1,67 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
const retentionBuffer primitives.Epoch = 2
|
||||
|
||||
var (
|
||||
errPruningFailures = errors.New("blobs could not be pruned for some roots")
|
||||
errNotBlobSSZ = errors.New("not a blob ssz file")
|
||||
)
|
||||
var errNotBlobSSZ = errors.New("not a blob ssz file")
|
||||
|
||||
// blobPruner keeps track of the tail end of the retention period, based only the blobs it has seen via the notify method.
|
||||
// If the retention period advances in response to notify being called,
|
||||
// the pruner will invoke the pruneBefore method of the given layout in a new goroutine.
|
||||
// The details of pruning are left entirely to the layout, with the pruner's only responsibility being to
|
||||
// schedule just one pruning operation at a time, for each forward movement of the minimum retention epoch.
|
||||
type blobPruner struct {
|
||||
sync.Mutex
|
||||
prunedBefore atomic.Uint64
|
||||
windowSize primitives.Slot
|
||||
cache *blobStorageCache
|
||||
cacheReady chan struct{}
|
||||
warmed bool
|
||||
fs afero.Fs
|
||||
mu sync.Mutex
|
||||
prunedBefore atomic.Uint64
|
||||
retentionPeriod primitives.Epoch
|
||||
}
|
||||
|
||||
type prunerOpt func(*blobPruner) error
|
||||
|
||||
func withWarmedCache() prunerOpt {
|
||||
return func(p *blobPruner) error {
|
||||
return p.warmCache()
|
||||
}
|
||||
func newBlobPruner(retain primitives.Epoch) *blobPruner {
|
||||
p := &blobPruner{retentionPeriod: retain + retentionBuffer}
|
||||
return p
|
||||
}
|
||||
|
||||
func newBlobPruner(fs afero.Fs, retain primitives.Epoch, opts ...prunerOpt) (*blobPruner, error) {
|
||||
r, err := slots.EpochStart(retain + retentionBuffer)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not set retentionSlots")
|
||||
}
|
||||
cw := make(chan struct{})
|
||||
p := &blobPruner{fs: fs, windowSize: r, cache: newBlobStorageCache(), cacheReady: cw}
|
||||
for _, o := range opts {
|
||||
if err := o(p); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// notify updates the pruner's view of root->blob mappings. This allows the pruner to build a cache
|
||||
// of root->slot mappings and decide when to evict old blobs based on the age of present blobs.
|
||||
func (p *blobPruner) notify(root [32]byte, latest primitives.Slot, idx uint64) error {
|
||||
if err := p.cache.ensure(root, latest, idx); err != nil {
|
||||
return err
|
||||
}
|
||||
pruned := uint64(windowMin(latest, p.windowSize))
|
||||
if p.prunedBefore.Swap(pruned) == pruned {
|
||||
return nil
|
||||
// notify returns a channel that is closed when the pruning operation is complete.
|
||||
// This is useful for tests, but at runtime fsLayouts or BlobStorage should not wait for completion.
|
||||
func (p *blobPruner) notify(latest primitives.Epoch, layout fsLayout) chan struct{} {
|
||||
done := make(chan struct{})
|
||||
floor := periodFloor(latest, p.retentionPeriod)
|
||||
if primitives.Epoch(p.prunedBefore.Swap(uint64(floor))) >= floor {
|
||||
// Only trigger pruning if the atomic swap changed the previous value of prunedBefore.
|
||||
close(done)
|
||||
return done
|
||||
}
|
||||
go func() {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
if err := p.prune(primitives.Slot(pruned)); err != nil {
|
||||
log.WithError(err).Errorf("Failed to prune blobs from slot %d", latest)
|
||||
p.mu.Lock()
|
||||
start := time.Now()
|
||||
defer p.mu.Unlock()
|
||||
sum, err := layout.pruneBefore(floor)
|
||||
if err != nil {
|
||||
log.WithError(err).WithFields(sum.LogFields()).Warn("Encountered errors during blob pruning.")
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"upToEpoch": floor,
|
||||
"duration": time.Since(start).String(),
|
||||
"filesRemoved": sum.blobsPruned,
|
||||
}).Debug("Pruned old blobs")
|
||||
blobsPrunedCounter.Add(float64(sum.blobsPruned))
|
||||
close(done)
|
||||
}()
|
||||
return nil
|
||||
return done
|
||||
}
|
||||
|
||||
func windowMin(latest, offset primitives.Slot) primitives.Slot {
|
||||
// Safely compute the first slot in the epoch for the latest slot
|
||||
latest = latest - latest%params.BeaconConfig().SlotsPerEpoch
|
||||
if latest < offset {
|
||||
func periodFloor(latest, period primitives.Epoch) primitives.Epoch {
|
||||
if latest < period {
|
||||
return 0
|
||||
}
|
||||
return latest - offset
|
||||
}
|
||||
|
||||
func (p *blobPruner) warmCache() error {
|
||||
p.Lock()
|
||||
defer func() {
|
||||
if !p.warmed {
|
||||
p.warmed = true
|
||||
close(p.cacheReady)
|
||||
}
|
||||
p.Unlock()
|
||||
}()
|
||||
if err := p.prune(0); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *blobPruner) waitForCache(ctx context.Context) (*blobStorageCache, error) {
|
||||
select {
|
||||
case <-p.cacheReady:
|
||||
return p.cache, nil
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
// Prune prunes blobs in the base directory based on the retention epoch.
|
||||
// It deletes blobs older than currentEpoch - (retentionEpochs+bufferEpochs).
|
||||
// This is so that we keep a slight buffer and blobs are deleted after n+2 epochs.
|
||||
func (p *blobPruner) prune(pruneBefore primitives.Slot) error {
|
||||
start := time.Now()
|
||||
totalPruned, totalErr := 0, 0
|
||||
// Customize logging/metrics behavior for the initial cache warmup when slot=0.
|
||||
// We'll never see a prune request for slot 0, unless this is the initial call to warm up the cache.
|
||||
if pruneBefore == 0 {
|
||||
defer func() {
|
||||
log.WithField("duration", time.Since(start).String()).Debug("Warmed up pruner cache")
|
||||
}()
|
||||
} else {
|
||||
defer func() {
|
||||
log.WithFields(logrus.Fields{
|
||||
"upToEpoch": slots.ToEpoch(pruneBefore),
|
||||
"duration": time.Since(start).String(),
|
||||
"filesRemoved": totalPruned,
|
||||
}).Debug("Pruned old blobs")
|
||||
blobsPrunedCounter.Add(float64(totalPruned))
|
||||
}()
|
||||
}
|
||||
|
||||
entries, err := listDir(p.fs, ".")
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to list root blobs directory")
|
||||
}
|
||||
dirs := filter(entries, filterRoot)
|
||||
for _, dir := range dirs {
|
||||
pruned, err := p.tryPruneDir(dir, pruneBefore)
|
||||
if err != nil {
|
||||
totalErr += 1
|
||||
log.WithError(err).WithField("directory", dir).Error("Unable to prune directory")
|
||||
}
|
||||
totalPruned += pruned
|
||||
}
|
||||
|
||||
if totalErr > 0 {
|
||||
return errors.Wrapf(errPruningFailures, "pruning failed for %d root directories", totalErr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func shouldRetain(slot, pruneBefore primitives.Slot) bool {
|
||||
return slot >= pruneBefore
|
||||
}
|
||||
|
||||
func (p *blobPruner) tryPruneDir(dir string, pruneBefore primitives.Slot) (int, error) {
|
||||
root, err := rootFromDir(dir)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "invalid directory, could not parse subdir as root %s", dir)
|
||||
}
|
||||
slot, slotCached := p.cache.slot(root)
|
||||
// Return early if the slot is cached and doesn't need pruning.
|
||||
if slotCached && shouldRetain(slot, pruneBefore) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// entries will include things that aren't ssz files, like dangling .part files. We need these to
|
||||
// completely clean up the directory.
|
||||
entries, err := listDir(p.fs, dir)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "failed to list blobs in directory %s", dir)
|
||||
}
|
||||
// scFiles filters the dir listing down to the ssz encoded BlobSidecar files. This allows us to peek
|
||||
// at the first one in the list to figure out the slot.
|
||||
scFiles := filter(entries, filterSsz)
|
||||
if len(scFiles) == 0 {
|
||||
log.WithField("dir", dir).Warn("Pruner ignoring directory with no blob files")
|
||||
return 0, nil
|
||||
}
|
||||
if !slotCached {
|
||||
slot, err = slotFromFile(path.Join(dir, scFiles[0]), p.fs)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "slot could not be read from blob file %s", scFiles[0])
|
||||
}
|
||||
for i := range scFiles {
|
||||
idx, err := idxFromPath(scFiles[i])
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "index could not be determined for blob file %s", scFiles[i])
|
||||
}
|
||||
if err := p.cache.ensure(root, slot, idx); err != nil {
|
||||
return 0, errors.Wrapf(err, "could not update prune cache for blob file %s", scFiles[i])
|
||||
}
|
||||
}
|
||||
if shouldRetain(slot, pruneBefore) {
|
||||
return 0, nil
|
||||
}
|
||||
}
|
||||
|
||||
removed := 0
|
||||
for _, fname := range entries {
|
||||
fullName := path.Join(dir, fname)
|
||||
if err := p.fs.Remove(fullName); err != nil {
|
||||
return removed, errors.Wrapf(err, "unable to remove %s", fullName)
|
||||
}
|
||||
// Don't count other files that happen to be in the dir, like dangling .part files.
|
||||
if filterSsz(fname) {
|
||||
removed += 1
|
||||
}
|
||||
// Log a warning whenever we clean up a .part file
|
||||
if filterPart(fullName) {
|
||||
log.WithField("file", fullName).Warn("Deleting abandoned blob .part file")
|
||||
}
|
||||
}
|
||||
if err := p.fs.Remove(dir); err != nil {
|
||||
return removed, errors.Wrapf(err, "unable to remove blob directory %s", dir)
|
||||
}
|
||||
|
||||
p.cache.evict(root)
|
||||
return len(scFiles), nil
|
||||
}
|
||||
|
||||
func idxFromPath(fname string) (uint64, error) {
|
||||
fname = path.Base(fname)
|
||||
|
||||
if filepath.Ext(fname) != dotSszExt {
|
||||
return 0, errors.Wrap(errNotBlobSSZ, "does not have .ssz extension")
|
||||
}
|
||||
parts := strings.Split(fname, ".")
|
||||
if len(parts) != 2 {
|
||||
return 0, errors.Wrap(errNotBlobSSZ, "unexpected filename structure (want <index>.ssz)")
|
||||
}
|
||||
return strconv.ParseUint(parts[0], 10, 64)
|
||||
}
|
||||
|
||||
func rootFromDir(dir string) ([32]byte, error) {
|
||||
subdir := filepath.Base(dir) // end of the path should be the blob directory, named by hex encoding of root
|
||||
root, err := stringToRoot(subdir)
|
||||
if err != nil {
|
||||
return root, errors.Wrapf(err, "invalid directory, could not parse subdir as root %s", dir)
|
||||
}
|
||||
return root, nil
|
||||
}
|
||||
|
||||
// Read slot from marshaled BlobSidecar data in the given file. See slotFromBlob for details.
|
||||
func slotFromFile(file string, fs afero.Fs) (primitives.Slot, error) {
|
||||
f, err := fs.Open(file)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer func() {
|
||||
if err := f.Close(); err != nil {
|
||||
log.WithError(err).Errorf("Could not close blob file")
|
||||
}
|
||||
}()
|
||||
return slotFromBlob(f)
|
||||
}
|
||||
|
||||
// slotFromBlob reads the ssz data of a file at the specified offset (8 + 131072 + 48 + 48 = 131176 bytes),
|
||||
// which is calculated based on the size of the BlobSidecar struct and is based on the size of the fields
|
||||
// preceding the slot information within SignedBeaconBlockHeader.
|
||||
func slotFromBlob(at io.ReaderAt) (primitives.Slot, error) {
|
||||
b := make([]byte, 8)
|
||||
_, err := at.ReadAt(b, 131176)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
rawSlot := binary.LittleEndian.Uint64(b)
|
||||
return primitives.Slot(rawSlot), nil
|
||||
}
|
||||
|
||||
func listDir(fs afero.Fs, dir string) ([]string, error) {
|
||||
top, err := fs.Open(dir)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to open directory descriptor")
|
||||
}
|
||||
defer func() {
|
||||
if err := top.Close(); err != nil {
|
||||
log.WithError(err).Errorf("Could not close file %s", dir)
|
||||
}
|
||||
}()
|
||||
// re the -1 param: "If n <= 0, Readdirnames returns all the names from the directory in a single slice"
|
||||
dirs, err := top.Readdirnames(-1)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read directory listing")
|
||||
}
|
||||
return dirs, nil
|
||||
}
|
||||
|
||||
func filter(entries []string, filt func(string) bool) []string {
|
||||
filtered := make([]string, 0, len(entries))
|
||||
for i := range entries {
|
||||
if filt(entries[i]) {
|
||||
filtered = append(filtered, entries[i])
|
||||
}
|
||||
}
|
||||
return filtered
|
||||
}
|
||||
|
||||
func filterRoot(s string) bool {
|
||||
return strings.HasPrefix(s, "0x")
|
||||
}
|
||||
|
||||
var dotSszExt = "." + sszExt
|
||||
var dotPartExt = "." + partExt
|
||||
|
||||
func filterSsz(s string) bool {
|
||||
return filepath.Ext(s) == dotSszExt
|
||||
}
|
||||
|
||||
func filterPart(s string) bool {
|
||||
return filepath.Ext(s) == dotPartExt
|
||||
return latest - period
|
||||
}
|
||||
|
||||
@@ -1,394 +1,197 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"encoding/binary"
|
||||
"os"
|
||||
"path"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
func TestTryPruneDir_CachedNotExpired(t *testing.T) {
|
||||
fs := afero.NewMemMapFs()
|
||||
pr, err := newBlobPruner(fs, 0)
|
||||
require.NoError(t, err)
|
||||
slot := pr.windowSize
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, params.BeaconConfig().MaxBlobsPerBlock(slot))
|
||||
sc, err := verification.BlobSidecarNoop(sidecars[0])
|
||||
require.NoError(t, err)
|
||||
rootStr := rootString(sc.BlockRoot())
|
||||
// This slot is right on the edge of what would need to be pruned, so by adding it to the cache and
|
||||
// skipping any other test setup, we can be certain the hot cache path never touches the filesystem.
|
||||
require.NoError(t, pr.cache.ensure(sc.BlockRoot(), sc.Slot(), 0))
|
||||
pruned, err := pr.tryPruneDir(rootStr, pr.windowSize)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, pruned)
|
||||
type prunerScenario struct {
|
||||
name string
|
||||
prunedBefore primitives.Epoch
|
||||
retentionPeriod primitives.Epoch
|
||||
latest primitives.Epoch
|
||||
expected pruneExpectation
|
||||
}
|
||||
|
||||
func TestCacheWarmFail(t *testing.T) {
|
||||
fs := afero.NewMemMapFs()
|
||||
n := blobNamer{root: bytesutil.ToBytes32([]byte("derp")), index: 0}
|
||||
bp := n.path()
|
||||
mkdir := path.Dir(bp)
|
||||
require.NoError(t, fs.MkdirAll(mkdir, directoryPermissions))
|
||||
|
||||
// Create an empty blob index in the fs by touching the file at a seemingly valid path.
|
||||
fi, err := fs.Create(bp)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fi.Close())
|
||||
|
||||
// Cache warm should fail due to the unexpected EOF.
|
||||
pr, err := newBlobPruner(fs, 0)
|
||||
require.NoError(t, err)
|
||||
require.ErrorIs(t, pr.warmCache(), errPruningFailures)
|
||||
|
||||
// The cache warm has finished, so calling waitForCache with a super short deadline
|
||||
// should not block or hit the context deadline.
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithDeadline(ctx, time.Now().Add(1*time.Millisecond))
|
||||
defer cancel()
|
||||
c, err := pr.waitForCache(ctx)
|
||||
// We will get an error and a nil value for the cache if we hit the deadline.
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, c)
|
||||
type pruneExpectation struct {
|
||||
called bool
|
||||
arg primitives.Epoch
|
||||
summary *pruneSummary
|
||||
err error
|
||||
}
|
||||
|
||||
func TestTryPruneDir_CachedExpired(t *testing.T) {
|
||||
t.Run("empty directory", func(t *testing.T) {
|
||||
fs := afero.NewMemMapFs()
|
||||
pr, err := newBlobPruner(fs, 0)
|
||||
require.NoError(t, err)
|
||||
var slot primitives.Slot = 0
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, 1)
|
||||
sc, err := verification.BlobSidecarNoop(sidecars[0])
|
||||
require.NoError(t, err)
|
||||
rootStr := rootString(sc.BlockRoot())
|
||||
require.NoError(t, fs.Mkdir(rootStr, directoryPermissions)) // make empty directory
|
||||
require.NoError(t, pr.cache.ensure(sc.BlockRoot(), sc.Slot(), 0))
|
||||
pruned, err := pr.tryPruneDir(rootStr, slot+1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, pruned)
|
||||
})
|
||||
t.Run("blobs to delete", func(t *testing.T) {
|
||||
fs, bs := NewEphemeralBlobStorageWithFs(t)
|
||||
var slot primitives.Slot = 0
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, 2)
|
||||
scs, err := verification.BlobSidecarSliceNoop(sidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, bs.Save(scs[0]))
|
||||
require.NoError(t, bs.Save(scs[1]))
|
||||
|
||||
// check that the root->slot is cached
|
||||
root := scs[0].BlockRoot()
|
||||
rootStr := rootString(root)
|
||||
cs, cok := bs.pruner.cache.slot(scs[0].BlockRoot())
|
||||
require.Equal(t, true, cok)
|
||||
require.Equal(t, slot, cs)
|
||||
|
||||
// ensure that we see the saved files in the filesystem
|
||||
files, err := listDir(fs, rootStr)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(files))
|
||||
|
||||
pruned, err := bs.pruner.tryPruneDir(rootStr, slot+1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, pruned)
|
||||
files, err = listDir(fs, rootStr)
|
||||
require.ErrorIs(t, err, os.ErrNotExist)
|
||||
require.Equal(t, 0, len(files))
|
||||
})
|
||||
func (e *pruneExpectation) record(before primitives.Epoch) (*pruneSummary, error) {
|
||||
e.called = true
|
||||
e.arg = before
|
||||
if e.summary == nil {
|
||||
e.summary = &pruneSummary{}
|
||||
}
|
||||
return e.summary, e.err
|
||||
}
|
||||
|
||||
func TestTryPruneDir_SlotFromFile(t *testing.T) {
|
||||
t.Run("expired blobs deleted", func(t *testing.T) {
|
||||
fs, bs := NewEphemeralBlobStorageWithFs(t)
|
||||
var slot primitives.Slot = 0
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, 2)
|
||||
scs, err := verification.BlobSidecarSliceNoop(sidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, bs.Save(scs[0]))
|
||||
require.NoError(t, bs.Save(scs[1]))
|
||||
|
||||
// check that the root->slot is cached
|
||||
root := scs[0].BlockRoot()
|
||||
rootStr := rootString(root)
|
||||
cs, ok := bs.pruner.cache.slot(root)
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, slot, cs)
|
||||
// evict it from the cache so that we trigger the file read path
|
||||
bs.pruner.cache.evict(root)
|
||||
_, ok = bs.pruner.cache.slot(root)
|
||||
require.Equal(t, false, ok)
|
||||
|
||||
// ensure that we see the saved files in the filesystem
|
||||
files, err := listDir(fs, rootStr)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(files))
|
||||
|
||||
pruned, err := bs.pruner.tryPruneDir(rootStr, slot+1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, pruned)
|
||||
files, err = listDir(fs, rootStr)
|
||||
require.ErrorIs(t, err, os.ErrNotExist)
|
||||
require.Equal(t, 0, len(files))
|
||||
})
|
||||
t.Run("not expired, intact", func(t *testing.T) {
|
||||
fs, bs := NewEphemeralBlobStorageWithFs(t)
|
||||
// Set slot equal to the window size, so it should be retained.
|
||||
slot := bs.pruner.windowSize
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, 2)
|
||||
scs, err := verification.BlobSidecarSliceNoop(sidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, bs.Save(scs[0]))
|
||||
require.NoError(t, bs.Save(scs[1]))
|
||||
|
||||
// Evict slot mapping from the cache so that we trigger the file read path.
|
||||
root := scs[0].BlockRoot()
|
||||
rootStr := rootString(root)
|
||||
bs.pruner.cache.evict(root)
|
||||
_, ok := bs.pruner.cache.slot(root)
|
||||
require.Equal(t, false, ok)
|
||||
|
||||
// Ensure that we see the saved files in the filesystem.
|
||||
files, err := listDir(fs, rootStr)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(files))
|
||||
|
||||
// This should use the slotFromFile code (simulating restart).
|
||||
// Setting pruneBefore == slot, so that the slot will be outside the window (at the boundary).
|
||||
pruned, err := bs.pruner.tryPruneDir(rootStr, slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, pruned)
|
||||
|
||||
// Ensure files are still present.
|
||||
files, err = listDir(fs, rootStr)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(files))
|
||||
})
|
||||
}
|
||||
|
||||
func TestSlotFromBlob(t *testing.T) {
|
||||
cases := []struct {
|
||||
slot primitives.Slot
|
||||
}{
|
||||
{slot: 0},
|
||||
{slot: 2},
|
||||
{slot: 1123581321},
|
||||
{slot: math.MaxUint64},
|
||||
func TestPrunerNotify(t *testing.T) {
|
||||
defaultRetention := params.BeaconConfig().MinEpochsForBlobsSidecarsRequest
|
||||
cases := []prunerScenario{
|
||||
{
|
||||
name: "last epoch of period",
|
||||
retentionPeriod: defaultRetention,
|
||||
prunedBefore: 11235,
|
||||
latest: defaultRetention + 11235,
|
||||
expected: pruneExpectation{called: false},
|
||||
},
|
||||
{
|
||||
name: "within period",
|
||||
retentionPeriod: defaultRetention,
|
||||
prunedBefore: 11235,
|
||||
latest: 11235 + defaultRetention - 1,
|
||||
expected: pruneExpectation{called: false},
|
||||
},
|
||||
{
|
||||
name: "triggers",
|
||||
retentionPeriod: defaultRetention,
|
||||
prunedBefore: 11235,
|
||||
latest: 11235 + 1 + defaultRetention,
|
||||
expected: pruneExpectation{called: true, arg: 11235 + 1},
|
||||
},
|
||||
{
|
||||
name: "from zero - before first period",
|
||||
retentionPeriod: defaultRetention,
|
||||
prunedBefore: 0,
|
||||
latest: defaultRetention - 1,
|
||||
expected: pruneExpectation{called: false},
|
||||
},
|
||||
{
|
||||
name: "from zero - at boundary",
|
||||
retentionPeriod: defaultRetention,
|
||||
prunedBefore: 0,
|
||||
latest: defaultRetention,
|
||||
expected: pruneExpectation{called: false},
|
||||
},
|
||||
{
|
||||
name: "from zero - triggers",
|
||||
retentionPeriod: defaultRetention,
|
||||
prunedBefore: 0,
|
||||
latest: defaultRetention + 1,
|
||||
expected: pruneExpectation{called: true, arg: 1},
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(fmt.Sprintf("slot %d", c.slot), func(t *testing.T) {
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, c.slot, 1)
|
||||
sc := sidecars[0]
|
||||
enc, err := sc.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
slot, err := slotFromBlob(bytes.NewReader(enc))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, c.slot, slot)
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
actual := &pruneExpectation{}
|
||||
l := &mockLayout{pruneBeforeFunc: actual.record}
|
||||
pruner := &blobPruner{retentionPeriod: c.retentionPeriod}
|
||||
pruner.prunedBefore.Store(uint64(c.prunedBefore))
|
||||
done := pruner.notify(c.latest, l)
|
||||
<-done
|
||||
require.Equal(t, c.expected.called, actual.called)
|
||||
require.Equal(t, c.expected.arg, actual.arg)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSlotFromFile(t *testing.T) {
|
||||
cases := []struct {
|
||||
slot primitives.Slot
|
||||
}{
|
||||
{slot: 0},
|
||||
{slot: 2},
|
||||
{slot: 1123581321},
|
||||
{slot: math.MaxUint64},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(fmt.Sprintf("slot %d", c.slot), func(t *testing.T) {
|
||||
fs, bs := NewEphemeralBlobStorageWithFs(t)
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, c.slot, 1)
|
||||
sc, err := verification.BlobSidecarNoop(sidecars[0])
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, bs.Save(sc))
|
||||
fname := namerForSidecar(sc)
|
||||
sszPath := fname.path()
|
||||
slot, err := slotFromFile(sszPath, fs)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, c.slot, slot)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type dirFiles struct {
|
||||
name string
|
||||
isDir bool
|
||||
children []dirFiles
|
||||
}
|
||||
|
||||
func (df dirFiles) reify(t *testing.T, fs afero.Fs, base string) {
|
||||
fullPath := path.Join(base, df.name)
|
||||
if df.isDir {
|
||||
if df.name != "" {
|
||||
require.NoError(t, fs.Mkdir(fullPath, directoryPermissions))
|
||||
}
|
||||
for _, c := range df.children {
|
||||
c.reify(t, fs, fullPath)
|
||||
}
|
||||
} else {
|
||||
fp, err := fs.Create(fullPath)
|
||||
func testSetupBlobIdentPaths(t *testing.T, fs afero.Fs, bs *BlobStorage, idents []testIdent) []blobIdent {
|
||||
created := make([]blobIdent, len(idents))
|
||||
for i, id := range idents {
|
||||
slot, err := slots.EpochStart(id.epoch)
|
||||
require.NoError(t, err)
|
||||
_, err = fp.WriteString("derp")
|
||||
slot += id.offset
|
||||
_, scs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, 1)
|
||||
sc := verification.FakeVerifyForTest(t, scs[0])
|
||||
require.NoError(t, bs.Save(sc))
|
||||
ident := identForSidecar(sc)
|
||||
_, err = fs.Stat(bs.layout.sszPath(ident))
|
||||
require.NoError(t, err)
|
||||
created[i] = ident
|
||||
}
|
||||
return created
|
||||
}
|
||||
|
||||
func testAssertBlobsPruned(t *testing.T, fs afero.Fs, bs *BlobStorage, pruned, remain []blobIdent) {
|
||||
for _, id := range pruned {
|
||||
_, err := fs.Stat(bs.layout.sszPath(id))
|
||||
require.NotNil(t, err)
|
||||
require.Equal(t, true, os.IsNotExist(err))
|
||||
}
|
||||
for _, id := range remain {
|
||||
_, err := fs.Stat(bs.layout.sszPath(id))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (df dirFiles) childNames() []string {
|
||||
cn := make([]string, len(df.children))
|
||||
for i := range df.children {
|
||||
cn[i] = df.children[i].name
|
||||
}
|
||||
return cn
|
||||
type testIdent struct {
|
||||
blobIdent
|
||||
offset primitives.Slot
|
||||
}
|
||||
|
||||
func TestListDir(t *testing.T) {
|
||||
fs := afero.NewMemMapFs()
|
||||
|
||||
// parent directory
|
||||
fsLayout := dirFiles{isDir: true}
|
||||
// break out each subdir for easier assertions
|
||||
notABlob := dirFiles{name: "notABlob", isDir: true}
|
||||
childlessBlob := dirFiles{name: "0x0987654321", isDir: true}
|
||||
blobWithSsz := dirFiles{name: "0x1123581321", isDir: true,
|
||||
children: []dirFiles{{name: "1.ssz"}, {name: "2.ssz"}},
|
||||
func testRoots(n int) [][32]byte {
|
||||
roots := make([][32]byte, n)
|
||||
for i := range roots {
|
||||
binary.LittleEndian.PutUint32(roots[i][:], uint32(1+i))
|
||||
}
|
||||
blobWithSszAndTmp := dirFiles{name: "0x1234567890", isDir: true,
|
||||
children: []dirFiles{{name: "5.ssz"}, {name: "0.part"}}}
|
||||
fsLayout.children = append(fsLayout.children,
|
||||
notABlob, childlessBlob, blobWithSsz, blobWithSszAndTmp)
|
||||
return roots
|
||||
}
|
||||
|
||||
topChildren := make([]string, len(fsLayout.children))
|
||||
for i := range fsLayout.children {
|
||||
topChildren[i] = fsLayout.children[i].name
|
||||
}
|
||||
|
||||
fsLayout.reify(t, fs, "")
|
||||
func TestLayoutPruneBefore(t *testing.T) {
|
||||
roots := testRoots(10)
|
||||
cases := []struct {
|
||||
name string
|
||||
dirPath string
|
||||
expected []string
|
||||
filter func(string) bool
|
||||
err error
|
||||
name string
|
||||
pruned []testIdent
|
||||
remain []testIdent
|
||||
pruneBefore primitives.Epoch
|
||||
err error
|
||||
sum pruneSummary
|
||||
}{
|
||||
{
|
||||
name: "non-existent",
|
||||
dirPath: "derp",
|
||||
expected: []string{},
|
||||
err: os.ErrNotExist,
|
||||
},
|
||||
{
|
||||
name: "empty",
|
||||
dirPath: childlessBlob.name,
|
||||
expected: []string{},
|
||||
},
|
||||
{
|
||||
name: "top",
|
||||
dirPath: ".",
|
||||
expected: topChildren,
|
||||
},
|
||||
{
|
||||
name: "custom filter: only notABlob",
|
||||
dirPath: ".",
|
||||
expected: []string{notABlob.name},
|
||||
filter: func(s string) bool {
|
||||
return s == notABlob.name
|
||||
name: "none pruned",
|
||||
pruneBefore: 1,
|
||||
pruned: []testIdent{},
|
||||
remain: []testIdent{
|
||||
{offset: 1, blobIdent: blobIdent{root: roots[0], epoch: 1, index: 0}},
|
||||
{offset: 1, blobIdent: blobIdent{root: roots[1], epoch: 1, index: 0}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "root filter",
|
||||
dirPath: ".",
|
||||
expected: []string{childlessBlob.name, blobWithSsz.name, blobWithSszAndTmp.name},
|
||||
filter: filterRoot,
|
||||
},
|
||||
{
|
||||
name: "ssz filter",
|
||||
dirPath: blobWithSsz.name,
|
||||
expected: blobWithSsz.childNames(),
|
||||
filter: filterSsz,
|
||||
},
|
||||
{
|
||||
name: "ssz mixed filter",
|
||||
dirPath: blobWithSszAndTmp.name,
|
||||
expected: []string{"5.ssz"},
|
||||
filter: filterSsz,
|
||||
name: "expected pruned before epoch",
|
||||
pruneBefore: 3,
|
||||
pruned: []testIdent{
|
||||
{offset: 0, blobIdent: blobIdent{root: roots[0], epoch: 1, index: 0}},
|
||||
{offset: 31, blobIdent: blobIdent{root: roots[1], epoch: 1, index: 5}},
|
||||
{offset: 0, blobIdent: blobIdent{root: roots[2], epoch: 2, index: 0}},
|
||||
{offset: 31, blobIdent: blobIdent{root: roots[3], epoch: 2, index: 3}},
|
||||
},
|
||||
remain: []testIdent{
|
||||
{offset: 0, blobIdent: blobIdent{root: roots[4], epoch: 3, index: 2}}, // boundary
|
||||
{offset: 31, blobIdent: blobIdent{root: roots[5], epoch: 3, index: 0}}, // boundary
|
||||
{offset: 0, blobIdent: blobIdent{root: roots[6], epoch: 4, index: 1}},
|
||||
{offset: 31, blobIdent: blobIdent{root: roots[7], epoch: 4, index: 5}},
|
||||
},
|
||||
sum: pruneSummary{blobsPruned: 4},
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
result, err := listDir(fs, c.dirPath)
|
||||
if c.filter != nil {
|
||||
result = filter(result, c.filter)
|
||||
}
|
||||
if c.err != nil {
|
||||
require.ErrorIs(t, err, c.err)
|
||||
require.Equal(t, 0, len(result))
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
sort.Strings(c.expected)
|
||||
sort.Strings(result)
|
||||
require.DeepEqual(t, c.expected, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRootFromDir(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
dir string
|
||||
err error
|
||||
root [32]byte
|
||||
}{
|
||||
{
|
||||
name: "happy path",
|
||||
dir: "0xffff875e1d985c5ccb214894983f2428edb271f0f87b68ba7010e4a99df3b5cb",
|
||||
root: [32]byte{255, 255, 135, 94, 29, 152, 92, 92, 203, 33, 72, 148, 152, 63, 36, 40,
|
||||
237, 178, 113, 240, 248, 123, 104, 186, 112, 16, 228, 169, 157, 243, 181, 203},
|
||||
},
|
||||
{
|
||||
name: "too short",
|
||||
dir: "0xffff875e1d985c5ccb214894983f2428edb271f0f87b68ba7010e4a99df3b5c",
|
||||
err: errInvalidRootString,
|
||||
},
|
||||
{
|
||||
name: "too log",
|
||||
dir: "0xffff875e1d985c5ccb214894983f2428edb271f0f87b68ba7010e4a99df3b5cbb",
|
||||
err: errInvalidRootString,
|
||||
},
|
||||
{
|
||||
name: "missing prefix",
|
||||
dir: "ffff875e1d985c5ccb214894983f2428edb271f0f87b68ba7010e4a99df3b5cb",
|
||||
err: errInvalidRootString,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
root, err := stringToRoot(c.dir)
|
||||
fs, bs := NewEphemeralBlobStorageAndFs(t, WithLayout(LayoutNameByEpoch))
|
||||
pruned := testSetupBlobIdentPaths(t, fs, bs, c.pruned)
|
||||
remain := testSetupBlobIdentPaths(t, fs, bs, c.remain)
|
||||
sum, err := bs.layout.pruneBefore(c.pruneBefore)
|
||||
if c.err != nil {
|
||||
require.ErrorIs(t, err, c.err)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, c.root, root)
|
||||
testAssertBlobsPruned(t, fs, bs, pruned, remain)
|
||||
require.Equal(t, c.sum.blobsPruned, sum.blobsPruned)
|
||||
require.Equal(t, len(c.pruned), sum.blobsPruned)
|
||||
require.Equal(t, len(c.sum.failedRemovals), len(sum.failedRemovals))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,15 +3,13 @@ package kv
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filters"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
@@ -22,6 +20,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
|
||||
@@ -215,7 +215,7 @@ func (s *Store) LightClientUpdates(ctx context.Context, startPeriod, endPeriod u
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return updates, err
|
||||
return updates, nil
|
||||
}
|
||||
|
||||
func (s *Store) LightClientUpdate(ctx context.Context, period uint64) (interfaces.LightClientUpdate, error) {
|
||||
|
||||
@@ -518,9 +518,9 @@ func (s *Store) unmarshalState(_ context.Context, enc []byte, validatorEntries [
|
||||
|
||||
switch {
|
||||
case hasFuluKey(enc):
|
||||
protoState := ðpb.BeaconStateFulu{}
|
||||
protoState := ðpb.BeaconStateElectra{}
|
||||
if err := protoState.UnmarshalSSZ(enc[len(fuluKey):]); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to unmarshal encoding for Electra")
|
||||
return nil, errors.Wrap(err, "failed to unmarshal encoding for Fulu")
|
||||
}
|
||||
ok, err := s.isStateValidatorMigrationOver()
|
||||
if err != nil {
|
||||
@@ -690,7 +690,7 @@ func marshalState(ctx context.Context, st state.ReadOnlyBeaconState) ([]byte, er
|
||||
}
|
||||
return snappy.Encode(nil, append(ElectraKey, rawObj...)), nil
|
||||
case version.Fulu:
|
||||
rState, ok := st.ToProtoUnsafe().(*ethpb.BeaconStateFulu)
|
||||
rState, ok := st.ToProtoUnsafe().(*ethpb.BeaconStateElectra)
|
||||
if !ok {
|
||||
return nil, errors.New("non valid inner state")
|
||||
}
|
||||
|
||||
@@ -103,6 +103,7 @@ go_test(
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/execution/testing:go_default_library",
|
||||
"//beacon-chain/execution/types:go_default_library",
|
||||
|
||||
@@ -36,17 +36,19 @@ var (
|
||||
NewPayloadMethod,
|
||||
NewPayloadMethodV2,
|
||||
NewPayloadMethodV3,
|
||||
NewPayloadMethodV4,
|
||||
ForkchoiceUpdatedMethod,
|
||||
ForkchoiceUpdatedMethodV2,
|
||||
ForkchoiceUpdatedMethodV3,
|
||||
GetPayloadMethod,
|
||||
GetPayloadMethodV2,
|
||||
GetPayloadMethodV3,
|
||||
GetPayloadMethodV4,
|
||||
GetPayloadBodiesByHashV1,
|
||||
GetPayloadBodiesByRangeV1,
|
||||
}
|
||||
electraEngineEndpoints = []string{
|
||||
NewPayloadMethodV4,
|
||||
GetPayloadMethodV4,
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -105,7 +107,7 @@ type Reconstructor interface {
|
||||
ReconstructFullBellatrixBlockBatch(
|
||||
ctx context.Context, blindedBlocks []interfaces.ReadOnlySignedBeaconBlock,
|
||||
) ([]interfaces.SignedBeaconBlock, error)
|
||||
ReconstructBlobSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, indices []bool) ([]blocks.VerifiedROBlob, error)
|
||||
ReconstructBlobSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, hi func(uint64) bool) ([]blocks.VerifiedROBlob, error)
|
||||
}
|
||||
|
||||
// EngineCaller defines a client that can interact with an Ethereum
|
||||
@@ -296,6 +298,10 @@ func (s *Service) ExchangeCapabilities(ctx context.Context) ([]string, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.ExchangeCapabilities")
|
||||
defer span.End()
|
||||
|
||||
// Only check for electra related engine methods if it has been activated.
|
||||
if params.ElectraEnabled() {
|
||||
supportedEngineEndpoints = append(supportedEngineEndpoints, electraEngineEndpoints...)
|
||||
}
|
||||
var result []string
|
||||
err := s.rpcClient.CallContext(ctx, &result, ExchangeCapabilities, supportedEngineEndpoints)
|
||||
if err != nil {
|
||||
@@ -531,32 +537,23 @@ func (s *Service) ReconstructFullBellatrixBlockBatch(
|
||||
// It retrieves the KZG commitments from the block body, fetches the associated blobs and proofs,
|
||||
// and constructs the corresponding verified read-only blob sidecars.
|
||||
//
|
||||
// The 'exists' argument is a boolean list (must be the same length as body.BlobKzgCommitments), where each element corresponds to whether a
|
||||
// particular blob sidecar already exists. If exists[i] is true, the blob for the i-th KZG commitment
|
||||
// has already been retrieved and does not need to be fetched again from the execution layer (EL).
|
||||
//
|
||||
// For example:
|
||||
// - len(block.Body().BlobKzgCommitments()) == 6
|
||||
// - If exists = [true, false, true, false, true, false], the function will fetch the blobs
|
||||
// associated with indices 1, 3, and 5 (since those are marked as non-existent).
|
||||
// - If exists = [false ... x 6], the function will attempt to fetch all blobs.
|
||||
//
|
||||
// Only the blobs that do not already exist (where exists[i] is false) are fetched using the KZG commitments from block body.
|
||||
func (s *Service) ReconstructBlobSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, exists []bool) ([]blocks.VerifiedROBlob, error) {
|
||||
// The 'hasIndex' argument is a function returns true if the given uint64 blob index already exists on disc.
|
||||
// Only the blobs that do not already exist (where hasIndex(i) is false)
|
||||
// will be fetched from the execution engine using the KZG commitments from block body.
|
||||
func (s *Service) ReconstructBlobSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, hasIndex func(uint64) bool) ([]blocks.VerifiedROBlob, error) {
|
||||
blockBody := block.Block().Body()
|
||||
kzgCommitments, err := blockBody.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get blob KZG commitments")
|
||||
}
|
||||
if len(kzgCommitments) > len(exists) {
|
||||
return nil, fmt.Errorf("length of KZG commitments (%d) is greater than length of exists (%d)", len(kzgCommitments), len(exists))
|
||||
}
|
||||
|
||||
// Collect KZG hashes for non-existing blobs
|
||||
var kzgHashes []common.Hash
|
||||
var kzgIndexes []int
|
||||
for i, commitment := range kzgCommitments {
|
||||
if !exists[i] {
|
||||
if !hasIndex(uint64(i)) {
|
||||
kzgHashes = append(kzgHashes, primitives.ConvertKzgCommitmentToVersionedHash(commitment))
|
||||
kzgIndexes = append(kzgIndexes, i)
|
||||
}
|
||||
}
|
||||
if len(kzgHashes) == 0 {
|
||||
@@ -579,27 +576,21 @@ func (s *Service) ReconstructBlobSidecars(ctx context.Context, block interfaces.
|
||||
|
||||
// Reconstruct verified blob sidecars
|
||||
var verifiedBlobs []blocks.VerifiedROBlob
|
||||
for i, blobIndex := 0, 0; i < len(kzgCommitments); i++ {
|
||||
if exists[i] {
|
||||
for i := 0; i < len(kzgHashes); i++ {
|
||||
if blobs[i] == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if blobIndex >= len(blobs) || blobs[blobIndex] == nil {
|
||||
blobIndex++
|
||||
continue
|
||||
}
|
||||
blob := blobs[blobIndex]
|
||||
blobIndex++
|
||||
|
||||
proof, err := blocks.MerkleProofKZGCommitment(blockBody, i)
|
||||
blob := blobs[i]
|
||||
blobIndex := kzgIndexes[i]
|
||||
proof, err := blocks.MerkleProofKZGCommitment(blockBody, blobIndex)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("index", i).Error("failed to get Merkle proof for KZG commitment")
|
||||
log.WithError(err).WithField("index", blobIndex).Error("failed to get Merkle proof for KZG commitment")
|
||||
continue
|
||||
}
|
||||
sidecar := ðpb.BlobSidecar{
|
||||
Index: uint64(i),
|
||||
Index: uint64(blobIndex),
|
||||
Blob: blob.Blob,
|
||||
KzgCommitment: kzgCommitments[i],
|
||||
KzgCommitment: kzgCommitments[blobIndex],
|
||||
KzgProof: blob.KzgProof,
|
||||
SignedBlockHeader: header,
|
||||
CommitmentInclusionProof: proof,
|
||||
@@ -607,14 +598,14 @@ func (s *Service) ReconstructBlobSidecars(ctx context.Context, block interfaces.
|
||||
|
||||
roBlob, err := blocks.NewROBlobWithRoot(sidecar, blockRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("index", i).Error("failed to create RO blob with root")
|
||||
log.WithError(err).WithField("index", blobIndex).Error("failed to create RO blob with root")
|
||||
continue
|
||||
}
|
||||
|
||||
v := s.blobVerifier(roBlob, verification.ELMemPoolRequirements)
|
||||
verifiedBlob, err := v.VerifiedROBlob()
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("index", i).Error("failed to verify RO blob")
|
||||
log.WithError(err).WithField("index", blobIndex).Error("failed to verify RO blob")
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/holiman/uint256"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
mocks "github.com/prysmaticlabs/prysm/v5/beacon-chain/execution/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
@@ -2395,6 +2396,12 @@ func Test_ExchangeCapabilities(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func mockSummary(t *testing.T, exists []bool) func(uint64) bool {
|
||||
hi, err := filesystem.NewBlobStorageSummary(params.BeaconConfig().DenebForkEpoch, exists)
|
||||
require.NoError(t, err)
|
||||
return hi.HasIndex
|
||||
}
|
||||
|
||||
func TestReconstructBlobSidecars(t *testing.T) {
|
||||
client := &Service{capabilityCache: &capabilityCache{}}
|
||||
b := util.NewBeaconBlockDeneb()
|
||||
@@ -2408,15 +2415,15 @@ func TestReconstructBlobSidecars(t *testing.T) {
|
||||
|
||||
ctx := context.Background()
|
||||
t.Run("all seen", func(t *testing.T) {
|
||||
exists := []bool{true, true, true, true, true, true}
|
||||
verifiedBlobs, err := client.ReconstructBlobSidecars(ctx, sb, r, exists)
|
||||
hi := mockSummary(t, []bool{true, true, true, true, true, true})
|
||||
verifiedBlobs, err := client.ReconstructBlobSidecars(ctx, sb, r, hi)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(verifiedBlobs))
|
||||
})
|
||||
|
||||
t.Run("get-blobs end point is not supported", func(t *testing.T) {
|
||||
exists := []bool{true, true, true, true, true, false}
|
||||
verifiedBlobs, err := client.ReconstructBlobSidecars(ctx, sb, r, exists)
|
||||
hi := mockSummary(t, []bool{true, true, true, true, true, false})
|
||||
verifiedBlobs, err := client.ReconstructBlobSidecars(ctx, sb, r, hi)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(verifiedBlobs))
|
||||
})
|
||||
@@ -2430,8 +2437,8 @@ func TestReconstructBlobSidecars(t *testing.T) {
|
||||
rpcClient, client := setupRpcClient(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
exists := [6]bool{}
|
||||
verifiedBlobs, err := client.ReconstructBlobSidecars(ctx, sb, r, exists[:])
|
||||
hi := mockSummary(t, make([]bool, 6))
|
||||
verifiedBlobs, err := client.ReconstructBlobSidecars(ctx, sb, r, hi)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 6, len(verifiedBlobs))
|
||||
})
|
||||
@@ -2443,22 +2450,29 @@ func TestReconstructBlobSidecars(t *testing.T) {
|
||||
rpcClient, client := setupRpcClient(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
exists := []bool{true, false, true, false, true, false}
|
||||
verifiedBlobs, err := client.ReconstructBlobSidecars(ctx, sb, r, exists)
|
||||
hi := mockSummary(t, []bool{true, false, true, false, true, false})
|
||||
verifiedBlobs, err := client.ReconstructBlobSidecars(ctx, sb, r, hi)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, len(verifiedBlobs))
|
||||
})
|
||||
|
||||
t.Run("kzg is longer than exist", func(t *testing.T) {
|
||||
srv := createBlobServer(t, 3)
|
||||
t.Run("recovered 3 missing blobs with mutated blob mask", func(t *testing.T) {
|
||||
exists := []bool{true, false, true, false, true, false}
|
||||
hi := mockSummary(t, exists)
|
||||
|
||||
srv := createBlobServer(t, 3, func() {
|
||||
// Mutate blob mask
|
||||
exists[1] = true
|
||||
exists[3] = true
|
||||
})
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, client := setupRpcClient(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
exists := []bool{true, false, true, false, true}
|
||||
_, err := client.ReconstructBlobSidecars(ctx, sb, r, exists)
|
||||
require.ErrorContains(t, "length of KZG commitments (6) is greater than length of exists (5)", err)
|
||||
verifiedBlobs, err := client.ReconstructBlobSidecars(ctx, sb, r, hi)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, len(verifiedBlobs))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -2472,12 +2486,16 @@ func createRandomKzgCommitments(t *testing.T, num int) [][]byte {
|
||||
return kzgCommitments
|
||||
}
|
||||
|
||||
func createBlobServer(t *testing.T, numBlobs int) *httptest.Server {
|
||||
func createBlobServer(t *testing.T, numBlobs int, callbackFuncs ...func()) *httptest.Server {
|
||||
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
// Execute callback functions for each request.
|
||||
for _, f := range callbackFuncs {
|
||||
f()
|
||||
}
|
||||
|
||||
blobs := make([]pb.BlobAndProofJson, numBlobs)
|
||||
for i := range blobs {
|
||||
|
||||
@@ -109,7 +109,7 @@ func (e *EngineClient) ReconstructFullBellatrixBlockBatch(
|
||||
}
|
||||
|
||||
// ReconstructBlobSidecars is a mock implementation of the ReconstructBlobSidecars method.
|
||||
func (e *EngineClient) ReconstructBlobSidecars(context.Context, interfaces.ReadOnlySignedBeaconBlock, [32]byte, []bool) ([]blocks.VerifiedROBlob, error) {
|
||||
func (e *EngineClient) ReconstructBlobSidecars(context.Context, interfaces.ReadOnlySignedBeaconBlock, [32]byte, func(uint64) bool) ([]blocks.VerifiedROBlob, error) {
|
||||
return e.BlobSidecars, e.ErrorBlobSidecars
|
||||
}
|
||||
|
||||
|
||||
@@ -78,7 +78,7 @@ func (f *ForkChoice) ShouldOverrideFCU() (override bool) {
|
||||
// }
|
||||
|
||||
// Only orphan a block if the head LMD vote is weak
|
||||
if head.weight*100 > f.store.committeeWeight*params.BeaconConfig().ReorgWeightThreshold {
|
||||
if head.weight*100 > f.store.committeeWeight*params.BeaconConfig().ReorgHeadWeightThreshold {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -143,7 +143,7 @@ func (f *ForkChoice) GetProposerHead() [32]byte {
|
||||
}
|
||||
|
||||
// Only orphan a block if the head LMD vote is weak
|
||||
if head.weight*100 > f.store.committeeWeight*params.BeaconConfig().ReorgWeightThreshold {
|
||||
if head.weight*100 > f.store.committeeWeight*params.BeaconConfig().ReorgHeadWeightThreshold {
|
||||
return head.root
|
||||
}
|
||||
|
||||
|
||||
@@ -34,11 +34,17 @@ func (s *Service) canUpdateAttestedValidator(idx primitives.ValidatorIndex, slot
|
||||
|
||||
// attestingIndices returns the indices of validators that participated in the given aggregated attestation.
|
||||
func attestingIndices(ctx context.Context, state state.BeaconState, att ethpb.Att) ([]uint64, error) {
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, state, att.GetData().Slot, att.GetData().CommitteeIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
committeeBits := att.CommitteeBitsVal().BitIndices()
|
||||
committees := make([][]primitives.ValidatorIndex, len(committeeBits))
|
||||
var err error
|
||||
for i, ci := range committeeBits {
|
||||
committees[i], err = helpers.BeaconCommitteeFromState(ctx, state, att.GetData().Slot, primitives.CommitteeIndex(ci))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return attestation.AttestingIndices(att, committee)
|
||||
|
||||
return attestation.AttestingIndices(att, committees...)
|
||||
}
|
||||
|
||||
// logMessageTimelyFlagsForIndex returns the log message with performance info for the attestation (head, source, target)
|
||||
@@ -160,6 +166,23 @@ func (s *Service) processIncludedAttestation(ctx context.Context, state state.Be
|
||||
}
|
||||
}
|
||||
|
||||
// processSingleAttestation logs when the beacon node observes a single attestation from tracked validator.
|
||||
func (s *Service) processSingleAttestation(att ethpb.Att) {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
|
||||
single, ok := att.(*ethpb.SingleAttestation)
|
||||
if !ok {
|
||||
log.Errorf("Wrong attestation type (expected %T, got %T)", ðpb.SingleAttestation{}, att)
|
||||
return
|
||||
}
|
||||
|
||||
if s.canUpdateAttestedValidator(single.AttesterIndex, single.GetData().Slot) {
|
||||
logFields := logMessageTimelyFlagsForIndex(single.AttesterIndex, att.GetData())
|
||||
log.WithFields(logFields).Info("Processed unaggregated attestation")
|
||||
}
|
||||
}
|
||||
|
||||
// processUnaggregatedAttestation logs when the beacon node observes an unaggregated attestation from tracked validator.
|
||||
func (s *Service) processUnaggregatedAttestation(ctx context.Context, att ethpb.Att) {
|
||||
s.RLock()
|
||||
|
||||
@@ -236,6 +236,13 @@ func (s *Service) monitorRoutine(stateChannel chan *feed.Event, stateSub event.S
|
||||
} else {
|
||||
s.processAggregatedAttestation(s.ctx, data.Attestation)
|
||||
}
|
||||
case operation.SingleAttReceived:
|
||||
data, ok := e.Data.(*operation.SingleAttReceivedData)
|
||||
if !ok {
|
||||
log.Error("Event feed data is not of type *operation.SingleAttReceivedData")
|
||||
} else {
|
||||
s.processSingleAttestation(data.Attestation)
|
||||
}
|
||||
case operation.ExitReceived:
|
||||
data, ok := e.Data.(*operation.ExitReceivedData)
|
||||
if !ok {
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
|
||||
func configureTracing(cliCtx *cli.Context) error {
|
||||
return tracing.Setup(
|
||||
cliCtx.Context,
|
||||
"beacon-chain", // service name
|
||||
cliCtx.String(cmd.TracingProcessNameFlag.Name),
|
||||
cliCtx.String(cmd.TracingEndpointFlag.Name),
|
||||
|
||||
@@ -859,6 +859,7 @@ func (b *BeaconNode) registerSyncService(initialSyncComplete chan struct{}, bFil
|
||||
regularsync.WithBlobStorage(b.BlobStorage),
|
||||
regularsync.WithVerifierWaiter(b.verifyInitWaiter),
|
||||
regularsync.WithAvailableBlocker(bFillStore),
|
||||
regularsync.WithTrackedValidatorsCache(b.trackedValidatorsCache),
|
||||
)
|
||||
return b.services.RegisterService(rs)
|
||||
}
|
||||
@@ -976,6 +977,7 @@ func (b *BeaconNode) registerRPCService(router *http.ServeMux) error {
|
||||
FinalizationFetcher: chainService,
|
||||
BlockReceiver: chainService,
|
||||
BlobReceiver: chainService,
|
||||
DataColumnReceiver: chainService,
|
||||
AttestationReceiver: chainService,
|
||||
GenesisTimeFetcher: chainService,
|
||||
GenesisFetcher: chainService,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user