Compare commits

..

6 Commits

Author SHA1 Message Date
satushh
c603321733 initialise registeredNetworkEntry in startDiscoveryAndSubscriptions (by james) 2025-09-29 18:59:57 +01:00
satushh
8f816d6f49 Track existing subscriptions to prevent repeated attempts 2025-09-29 13:42:03 +01:00
satushh
0a8603268b consider existing case of topic 2025-09-28 21:31:48 +01:00
satushh
6fd2d5f268 bazel run //:gazelle -- fix 2025-09-28 21:11:42 +01:00
satushh
8056f55522 fix race condition and proper removal of topic 2025-09-28 21:06:53 +01:00
satushh
9ab42d18da tests to check race condition 2025-09-28 20:55:13 +01:00
665 changed files with 6040 additions and 17883 deletions

View File

@@ -34,7 +34,6 @@ build:minimal --@io_bazel_rules_go//go/config:tags=minimal
build:release --compilation_mode=opt
build:release --stamp
build:release --define pgo_enabled=1
build:release --strip=always
# Build binary with cgo symbolizer for debugging / profiling.
build:cgo_symbolizer --copt=-g

View File

@@ -9,7 +9,7 @@ on:
jobs:
run-changelog-check:
runs-on: ubuntu-4
runs-on: ubuntu-latest
steps:
- name: Checkout source code
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0

View File

@@ -3,7 +3,7 @@ on: [push, pull_request]
jobs:
check-specrefs:
runs-on: ubuntu-4
runs-on: ubuntu-latest
steps:
- name: Checkout repository

View File

@@ -10,7 +10,7 @@ on:
jobs:
clang-format-checking:
runs-on: ubuntu-4
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
# Is this step failing for you?

View File

@@ -10,7 +10,7 @@ permissions:
jobs:
list:
runs-on: ubuntu-4
runs-on: ubuntu-latest
timeout-minutes: 180
steps:
- uses: actions/checkout@v3
@@ -25,7 +25,7 @@ jobs:
fuzz-tests: ${{steps.list.outputs.fuzz-tests}}
fuzz:
runs-on: ubuntu-4
runs-on: ubuntu-latest
timeout-minutes: 360
needs: list
strategy:

View File

@@ -11,7 +11,7 @@ on:
jobs:
formatting:
name: Formatting
runs-on: ubuntu-4
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
@@ -22,7 +22,7 @@ jobs:
gosec:
name: Gosec scan
runs-on: ubuntu-4
runs-on: ubuntu-latest
env:
GO111MODULE: on
steps:
@@ -40,7 +40,7 @@ jobs:
lint:
name: Lint
runs-on: ubuntu-4
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
@@ -59,7 +59,7 @@ jobs:
build:
name: Build
runs-on: ubuntu-4
runs-on: ubuntu-latest
steps:
- name: Set up Go 1.25.1
uses: actions/setup-go@v4

View File

@@ -8,7 +8,7 @@ on:
jobs:
Horusec_Scan:
name: horusec-Scan
runs-on: ubuntu-4
runs-on: ubuntu-latest
if: github.ref == 'refs/heads/develop'
steps:
- name: Check out code
@@ -19,4 +19,4 @@ jobs:
- name: Running Security Scan
run: |
curl -fsSL https://raw.githubusercontent.com/ZupIT/horusec/main/deployments/scripts/install.sh | bash -s latest
horusec start -t="10000" -p="./" -e="true" -i="**/crypto/bls/herumi/**, **/**/*_test.go, **/third_party/afl/**, **/crypto/keystore/key.go"
horusec start -t="10000" -p="./" -e="true" -i="**/crypto/bls/herumi/**, **/**/*_test.go, **/third_party/afl/**, **/crypto/keystore/key.go"

View File

@@ -4,368 +4,6 @@ All notable changes to this project will be documented in this file.
The format is based on Keep a Changelog, and this project adheres to Semantic Versioning.
## [v6.1.4](https://github.com/prysmaticlabs/prysm/compare/v6.1.3...v6.1.4) - 2025-10-24
This release includes a bug fix affecting block proposals in rare cases, along with an important update for Windows users running post-Fusaka fork.
### Added
- SSZ-QL: Add endpoints for `BeaconState`/`BeaconBlock`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15888)
- Add native state diff type and marshalling functions. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15250)
- Update the earliest available slot after pruning operations in beacon chain database pruner. This ensures the P2P layer accurately knows which historical data is available after pruning, preventing nodes from advertising or attempting to serve data that has been pruned. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15694)
### Fixed
- Correctly advertise (in ENR and beacon API) attestation subnets when using `--subscribe-all-subnets`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15880)
- `randomPeer`: Return if the context is cancelled when waiting for peers. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15876)
- Improve error message when the byte count read from disk when reading a data column sidecars is lower than expected. (Mostly, because the file is truncated.). [[PR]](https://github.com/prysmaticlabs/prysm/pull/15881)
- Delete the genesis state file when --clear-db / --force-clear-db is specified. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15883)
- Fix sync committee subscription to use subnet indices instead of committee indices. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15885)
- Fixed metadata extraction on Windows by correctly splitting file paths. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15899)
- `VerifyDataColumnsSidecarKZGProofs`: Check if sizes match. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15892)
- Fix recoverStateSummary to persist state summaries in stateSummaryBucket instead of stateBucket (#15896). [[PR]](https://github.com/prysmaticlabs/prysm/pull/15896)
- `updateCustodyInfoInDB`: Use `NumberOfCustodyGroups` instead of `NumberOfColumns`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15908)
- Sync committee uses correct state to calculate position. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15905)
## [v6.1.3](https://github.com/prysmaticlabs/prysm/compare/v6.1.2...v6.1.3) - 2025-10-20
This release has several important beacon API and p2p fixes.
### Added
- Add Grandine to P2P known agents. (Useful for metrics). [[PR]](https://github.com/prysmaticlabs/prysm/pull/15829)
- Delegate sszInfo HashTreeRoot to FastSSZ-generated implementations via SSZObject, enabling roots calculation for generated types while avoiding duplicate logic. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15805)
- SSZ-QL: Use `fastssz`'s `SizeSSZ` method for calculating the size of `Container` type. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15864)
- SSZ-QL: Access n-th element in `List`/`Vector`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15767)
### Changed
- Do not verify block data when calculating rewards. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15819)
- Process pending attestations after pending blocks are cleared. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15824)
- updated web3signer to 25.9.1. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15832)
- Gracefully handle submit blind block returning 502 errors. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15848)
- Improve returning individual message errors from Beacon API. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15835)
- SSZ-QL: Clarify `Size` method with more sophisticated `SSZType`s. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15864)
### Fixed
- Use service context and continue on slasher attestation errors (#15803). [[PR]](https://github.com/prysmaticlabs/prysm/pull/15803)
- block event probably shouldn't be sent on certain block processing failures, now sends only on successing processing Block is NON-CANONICAL, Block IS CANONICAL but getFCUArgs FAILS, and Full success. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15814)
- Fixed web3signer e2e, issues caused due to a regression on old fork support. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15832)
- Do not mark blocks as invalid from ErrNotDescendantOfFinalized. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15846)
- Fixed [#15812](https://github.com/OffchainLabs/prysm/issues/15812): Gossip attestation validation incorrectly rejecting attestations that arrive before their referenced blocks. Previously, attestations were saved to the pending queue but immediately rejected by forkchoice validation, causing "not descendant of finalized checkpoint" errors. Now attestations for missing blocks return `ValidationIgnore` without error, allowing them to be properly processed when their blocks arrive. This eliminates false positive rejections and prevents potential incorrect peer downscoring during network congestion. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15840)
- Mark the block as invalid if it has an invalid signature. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15847)
- Display error messages from the server verbatim when they are not encoded as `application/json`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15860)
- `HasAtLeastOneIndex`: Check the index is not too high. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15865)
- Fix `/eth/v1/beacon/blob_sidecars/` beacon API is the fulu fork epoch is set to the far future epoch. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15867)
- `dataColumnSidecarsByRangeRPCHandler`: Gracefully close the stream if no data to return. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15866)
- `VerifyDataColumnSidecar`: Check if there is no too many commitments. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15859)
- `WithDataColumnRetentionEpochs`: Use `dataColumnRetentionEpoch` instead of `blobColumnRetentionEpoch`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15872)
- Mark epoch transition correctly on new head events. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15871)
- reject committee index >= committees_per_slot in unaggregated attestation validation. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15855)
- Decreased attestation gossip validation batch deadline to 5ms. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15882)
## [v6.1.2](https://github.com/prysmaticlabs/prysm/compare/v6.1.1...v6.1.2) - 2025-10-10
This release has several important fixes to improve Prysm's peering, stability, and attestation inclusion on mainnet and all testnets. All node operators are encouraged to update to this release as soon as practical for the best mainnet performance.
### Added
- Added a 1 minute timeout on PruneAttestationOnEpoch operations to prevent very large bolt transactions. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15746)
- Added expected delay before broadcasting light client p2p messages. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15776)
### Changed
- Replaced reflect.TypeOf with reflect.TypeFor. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15627)
- Bazel builds with `--config=release` now properly apply `--strip=always` to strip debug symbols from the release assets. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15774)
- Add sources for compute_fork_digest to specrefs. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15699)
- Aggregate logs when broadcasting data column sidecars (one per root instead of one per sidecar). [[PR]](https://github.com/prysmaticlabs/prysm/pull/15748)
- `c-kzg-4844`: Update from `v2.1.1` to `v2.1.5`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15708)
- Process pending attestations as soon as the block arrives. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15791)
- Compare received LC messages over gossipsub with locally computed ones before forwarding. Also no longer save updates. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15783)
- Optimize pending attestation processing by adding batching. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15801)
### Removed
- removed unused configs and hides prysm specific configs from `/eth/v1/config/spec` endpoint. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15797)
### Fixed
- SSZ-QL: Support nested `List` type (e.g., `ExecutionPayload.Transactions`). [[PR]](https://github.com/prysmaticlabs/prysm/pull/15725)
- Fixing Unsupported config field kind; value forwarded verbatim errors for type string. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15773)
- fix /eth/v1/config/spec endpoint to properly skip omitted values. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15777)
- Fix ProduceSyncCommitteeContribution not returning error when committee index is out of range. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15770)
- adding in improvements to getduties v2, replaces helpers.PrecomputeCommittees() ( exepensive ) with CommitteeAssignments. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15784)
- Avoid unnecessary calls to `ExitInformation()`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15764)
- `inclusionProofKey`: Include the commitments in the key. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15795)
- Do not reject peers if they have a mismatched version|digest when the next for epoch is FAR_FUTURE_EPOCH. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15798)
- Don't include entries in the fork schedule if their epoch is set to far future epoch. Avoids reporting next_fork_version == <unscheduled fork>. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15799)
- Wait for custody info to be initialized before querying them. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15804)
- fixes level=error msg="Could not clean up dirty states" error="OriginBlockRoot: not found in db" prefix=state-gen error when starting in kurtosis. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15808)
- Correctly clear disconnected peers from `connected_libp2p_peers` and `connected_libp2p_peers_average_scores`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15807)
- `buildStatusFromStream`: Respond `statusV2` only if Fulu is enabled. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15818)
- Send our real earliest available slot when sending a Status request post Fulu instead of `0`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15818)
- switch to built-in min/max. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15817)
- `findPeersWithSubnets`: If the filter function returns an error for a given peer, log an error and skip the peer instead of aborting the whole function. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15815)
- `computeIndicesByRootByPeer`: If the loop returns an error for a given peer, log an error and skip the peer instead of aborting the whole function. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15815)
- Fixed issue #15738 where separate goroutines assume sole responsibility for topic registration. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15779)
## [v6.1.0](https://github.com/prysmaticlabs/prysm/compare/v6.0.5...v6.1.0) and [v6.1.1](https://github.com/prysmaticlabs/prysm/compare/v6.1.0...v6.1.1) - 2025-09-26
This release has support for Fusaka testnets as well as many mainnet improvements. Testnet operators are required to updated prior to the testnet fork date. See [PR #15721](https://github.com/OffchainLabs/prysm/pull/15721).
Mainnet operators are encouraged to update per their regular update cadence.
Note: This release was re-issued as v6.1.1 to distribute release assets without debug symbols. See issue [#15760](https://github.com/OffchainLabs/prysm/issues/15760).
#### Noteworthy improvements, changes and bugfixes:
- The `--disable-experimental-state` beacon-node flag has been removed, marking the full graduation of the [Copy-on-write design](https://hackmd.io/zlTJ6Qe_RiueT3y2R77BvA) for BeaconState fields, which reduces the memory overhead of keeping multiple BeaconStates in RAM for block processing. Congrats @rkapka!
- The behavior set by the `--attest_timely` flag is now on by default, with the flag itself deprecated.
- GetDutiesV2 introduced, lowering duty request latency and beacon-node load. Multiple other improvements and bugfixes have been made to harden the validator run loop.
- New validator flag `--max-health-checks` configures a validator to switch to a fallback beacon node after the given number of health check failures.
- Improvements to rest-mode validator, defaulting to SSZ where available and adding SSZ support to more Beacon API endpoints.
- Beacon API now honors the gzip content-encoding header.
- Log timestamps now include milliseconds.
- Full fusaka support for testnets!
**Special thanks to external contributors!**: @Alleysira, @KaloyanTanev, @rose2221
[1] To override this limit, use the validator flag `--suggested-gas-limit` or set the `builder.gas_limit` setting in your [proposer settings file](https://prysm.offchainlabs.com/docs/configure-prysm/fee-recipient/#advanced-configure-mev-builder-and-gas-limit).
### Added
- PeerDAS: Add `CustodyInfo` in `BeaconNode`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15378)
- GetDutiesV2 gRPC function, removes committee list from duties, replaced with committee length, validator committee index. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15273)
- Add SSZ support for two attestation APIs: `/eth/v1/validator/attestation_data` and. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15377)
- Added feature flag for validator client to use get duties v2. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15380)
- PeerDAS: Implement DAS. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15367)
- `verifyBlobCommitmentCount`: Print max allowed blob count in error message. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15386)
- Data column support for beacon api event end point. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15387)
- Implement EIP-7917: Stable proposer lookahead. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15129)
- Implement `dataColumnSidecarByRootRPCHandler`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15405)
- New ssz-only flag for validator client to enable calling rest apis in SSZ, starting with get block endpoint. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15390)
- Implement `dataColumnSidecarsByRangeRPCHandler`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15421)
- Add SSZ support for `submitPoolAttestationsV2` beacon API. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15422)
- New `StatusV2` proto message. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15423)
- Implement `SendDataColumnSidecarsByRangeRequest`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15430)
- Implement `SendDataColumnSidecarsByRootRequest`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15430)
- Implement beacon API blob sidecar enpoint for Fulu. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15436)
- PeerDAS: Implement the new Fulu Metadata. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15440)
- PeerDAS: Implement reconstruction. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15454)
- Implement engine method `GetBlobsV2`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15469)
- Implement execution `ReconstructDataColumnSidecars`, which reconstruct data column sidecars from data fetched from the execution layer. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15469)
- new `--batch-verifier-limit` flag to configure max number of signatures to batch verify on gossip. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15467)
- `disable-attest-timely` flag to disable attest timely. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15410)
- Added `max-health-checks` flag that sets the maximum times the validator tries to check the health of the beacon node before timing out. 0 or a negative number is indefinite. (the default is 0). [[PR]](https://github.com/prysmaticlabs/prysm/pull/15401)
- Add method `VersionToForkEpochMap()` to the `BeaconChainConfig` in the `params` package. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15482)
- Add log capitalization analyzer and apply changes across codebase. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15452)
- Slot aware cache for seen data column gossip p2p to reduce memory usages. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15477)
- **Gzip Compression for Beacon API:**. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14982)
- Implement data column sidecars reconstruction with data retrieved from the execution client when receiving a block via gossip. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15483)
- Add support for parsing and handling `ExecutionPayloadAndBlobsBundleV2`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15503)
- Added new PRYSM_API_OVERRIDE_ACCEPT environment variable to override ssz accept header as a replacement to flag. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15433)
- Implements the `/eth/v1/beacon/states/{state_id}/proposer_lookahead` beacon api endpoint. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15525)
- Added new metadata fields (attnets,syncnets,custody_group_count) to `/eth/v1/node/identity`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15506)
- Add BLOB_SCHEDULE field to `/eth/v1/config/spec` endpoint response to expose blob scheduling configuration for networks. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15485)
- Add timing metric `publish_block_v2_duration_milliseconds` to measure processing duration of the `PublishBlockV2` beacon API endpoint. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15539)
- Add Fulu case for `saveStatesEfficientInternal`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15553)
- Support for fusaka `nfd` enr field, and changes to the semantics of the eth2 field. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15501)
- Implement post-Fulu MEV-boost protocol changes where relays only return status codes for blinded block submissions. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15486)
- Added fulu block support to StreamBlocksAltair. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15583)
- All outbound HTTP requests from the validator client now include a custom `User-Agent` header in the format `Prysm/<name>/<version>`. This enhances observability and enables upstream systems to correctly identify Prysm validator clients by their name and version. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15574)
- Fixes [#15435](https://github.com/OffchainLabs/prysm/issues/15435). [[PR]](https://github.com/prysmaticlabs/prysm/pull/15574)
- Data columns syncing for Fusaka. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15564)
- Added specification references which map spec to implementation. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15592)
- Warm data columns storage cache at start. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15629)
- Add `--data-column-path` flag. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15629)
- Initialize package for SSZ Query Language. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15588)
- In FetchDataColumnSidecars, after retrieving sidecars from peers, if still some sidecars are missing for a given root and if a reconstruction is possible (combining sidecars already retrieved from peers and sidecars in the storage), then reconstruct missing sidecars instead of trying to fetch the missing ones from peers. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15593)
- Fulu block proposal changes for beacon api and gRPC. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15628)
- Retry to fetch origin data column sidecars when starting from a checkpoint. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15634)
- Aggregate and pack sync committee messages into blocks. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15608)
- Support `List` type for SSZ-QL. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15637)
- Configured the beacon node to seek peers when we have validator custody requirements. If one or more validators are connected to the beacon node, then the beacon node should seek a diverse set of peers such that broadcasting to all data column subnets for a block proposal is more efficient. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15654)
- SSZ-QL: Add element information for `Vector` type. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15668)
- SSZ-QL: Support multi-dimensional tag parsing. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15668)
- Added more metadata for debug logs when initial sync requests fail for "invalid data returned from peer" errors. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15674)
- Adding Fulu types for web3signer. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15498)
- Added erigon/caplin to known p2p agent strings. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15678)
- Add Fulu fork transition tests for mainnet and minimal configurations. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15666)
- Fulu proposer lookahead epoch processing tests for mainnet and minimal configurations. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15667)
- Populate sszInfo of variable-length fields in AnalyzeObjects. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15676)
- KZG proof batch verification for data column gossip validation. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15617)
- Added flag `--p2p-colocation-whitelist` to accept CIDRs which will bypass the p2p colocation restrictions. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15685)
- Fulu spec tests coverage for covering the general package. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15682)
- Implemented syncing in a disjoint network with respect to data column sidecars subscribed by peers. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15644)
- Add retry logic when GetBlobsV2 is called. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15520)
- Call GetBlobsV2 as soon as we receive the first data column sidecar or block. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15520)
- Added new post fulu /eth/v1/beacon/blobs/{block_id} endpoint. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15610)
- SSZ-QL: Handle `Bitlist` and `Bitvector` types. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15704)
- Adding `/eth/v1/debug/beacon/data_column_sidecars/{block_id}` endpoint. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15701)
- Support Fulu genesis block. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15652)
- Update spectests to 1.6.0-beta.0. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15741)
### Changed
- `parseIndices`: Return `[]int` instead of `[]uint64`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15386)
- Reclaim memory manually in some tests that fuzz the beacon state. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15395)
- when REST api is enabled the get Block api defaults to requesting and receiving SSZ instead of JSON, JSON is the fallback. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15390)
- Remove "invalid" from logs for incoming blob sidecar that is missing parent or out of range slot. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15428)
- In `TopicFromMessage`: Do not assume anymore that all Fulu specific topic are V3 only. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15423)
- `readChunkedDataColumnSidecar`: Add `validationFunctions` parameter and add tests. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15423)
- Put the initiation of LC Store behind the `enable-light-client` flag. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15464)
- default batch signature verification limit increased from 50 to 1000. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15467)
- Increase mainnet DefaultBuilderGasLimit from 36M to 45M. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15455)
- Attest timely is now default. `attest-timely` flag is now deprecated. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15410)
- Move data col reconstruction log to a more accurate place in the code. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15475)
- Makes the multivalue slice permanent in the state and removes old paths. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15414)
- Previously, we optimistically believed the beacon node was healthy and tried to get chain start, but now we do a health check at the start. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15401)
- Optimize proposer inclusion proof calcuation by pre caching subtries. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15473)
- Move setter/getter functions for LC Bootstrap into LcStore for a unified interface. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15476)
- Changed `enable-duties-v2` to `disable-duties-v2` to default to using duties v2. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15445)
- Changed `uint64` genesis time to use `time.Time`. Also did some refactoring and cleanup that was enabled by these changes. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15419)
- Add milliseconds to log timestamps. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15496)
- Move setter/getter functions for LC Updates into LcStore for a unified interface. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15488)
- Change LC Bootstrap logic to only save bootstraps on finalized checkpoints instead of every block. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15497)
- Update links to consensus-specs to point to `master` branch. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15523)
- changed from in-memory to persistent discv5 db to keep local node information persistent for the key to keep the ENR sequence number deterministic when restarting. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15519)
- Fix some nits associated with data column sidecar verification. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15521)
- Include state root in StateNotFoundError for better debugging of consensus validation failures. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15533)
- when shutting down the sync service we now send p2p goodbye messages in parallel to maxmimize changes of propogating goodbyes to all peers before an unsafe shutdown. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15542)
- Do not compare liveness response with LH in e2e Beacon API evaluator. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15556)
- Moved the broadcast and event notifier logic for saving LC updates to the store function. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15540)
- Fixed the issue with broadcasting more than twice per LC Finality update, and the if-case bug. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15540)
- Separated the finality update validation rules for saving and broadcasting. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15540)
- Update validator custody to the latest specification, including the new status message. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15532)
- Beacon api optimize validator lookup for large batch request size. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15558)
- Check pending block is in forkchoice before importing pending attestation. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15547)
- Redesign the pending attestation queue. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15024)
- Replaced hardcoded `grpc-gateway-port` with `flags.HTTPServerPort.Name` in `testing/endtoend/components/validator.go`, resolving an inline TODO for improved flag consistency. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15236)
- Refactor `htrutil.go` by removing redundant codes. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15453)
- Improved sync unaggregated attestation cache key outside of lock path. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15572)
- Move aggregated attestation cache key generation outside of critical locks to improve performance. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15579)
- Renamed various variables/functions to be more clear. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15529)
- Update consensus spec to v1.6.0-alpha.4 and implement data column support for forkchoice spectests. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15590)
- Reject incoming connections when the fork schedule of the connecting peer (parsed from their ENR) has a matching next_fork_epoch, but mismatched next_fork_version or nfd (next fork digest). [[PR]](https://github.com/prysmaticlabs/prysm/pull/15604)
- Update gohashtree to v0.0.5-beta. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15619)
- Updated consensus spec from v1.6.0-alpha.4 to v1.6.0-alpha.5 with adjusted minimal config parameters. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15621)
- Changed old atomic functions to new atomic.Int for safer and clearer code. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15625)
- Start from justified checkpoint by default. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15636)
- Updated consensus spec from v1.6.0-alpha.5 to v1.6.0-alpha.6. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15658)
- Updated outdated documentation links for Web3Signer and Why Bazel. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15631)
- changed validatorpb.SignRequest_AggregateAttestationAndProof signing type to use AggregateAttestationAndProofV2 on web3signer. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15498)
- Pre-calculate exit epoch, churn and active balance before processing slashings to reduce CPU load. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14990)
- Switching default of validator client rest call for submit block from JSON to SSZ. Fallback json will be attempted. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15645)
- Deprecated and added error to /prysm/v1/beacon/blobs endpoint for post Fulu fork. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15643)
- Upgraded gossipsub to v0.14.2 and libp2p to v0.39.1. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15677)
- Prysm will now downscore peers that return invalid block_by_range responses. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15686)
- Filtering peers for data column subnets: Added a one-epoch slack to the peers head slot view. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15644)
- Fetching data column sidecars: If not all requested sidecars are available for a given root, return the successfully retrieved ones along with a map indicating which could not be fetched. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15644)
- Fetching origin data column sidecars: If only some sidecars are fetched, save the retrieved ones and retry fetching the missing ones on the next attempt. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15644)
- Renamed the `--enable-experimental-backfill` flag to `--enable-backfill` to signal that it is more mature. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15690)
- Restrict best LC update collection to canonical blocks. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15585)
- PeerDAS: Wait for a random delay, then reconstruct data column sidecars and immediately reseed instead of immediately reconstructing, waiting and then reseeding. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15705)
- Clarified misleading log messages in beacon-chain/rpc/service gRPC module. [[PR]](https://github.com/prysmaticlabs/prysm/pull/13063)
- Broadcast block then sidecars, instead block and sidecars concurrently. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15720)
- Broadcast and receive sidecars in concurrently, instead sequentially. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15720)
- Changed blst dependency from `http_archive` to `go_repository` so that gazelle can keep it in sync with go.mod. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15709)
- Updated go to v1.25.1. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15641)
- Updated rules_go to v0.57.0. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15641)
- Updated protobuf to 28.3. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15641)
- Set Fulu fork epochs for Holesky, Hoodi, and Sepolia testnets. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15721)
- Improve logging of data column sidecars. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15728)
- Updated go.mod to v1.25.1. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15740)
### Deprecated
- Deprecated `p2p-metadata` flag. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15554)
### Removed
- Removed //tools/eth1voting tool. This is no longer needed as the beacon chain no longer uses eth1data voting since Electra. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15415)
- Remove deposit count from sync new block log. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15420)
- Unused `DataColumnIdentifier` proto message. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15423)
- Validator client will no longer need to call the canonical head api. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15480)
- Partially reverting pr #15390 removing the `ssz-only` debug flag until there is a real usecase for the flag. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15433)
### Fixed
- Added regression test for [PR 15369](https://github.com/OffchainLabs/prysm/pull/15369). [[PR]](https://github.com/prysmaticlabs/prysm/pull/15379)
- Added missing `meta` field to the response of the endpoint `/eth/v1/node/peers` to align with the Beacon API spec (#15370). [[PR]](https://github.com/prysmaticlabs/prysm/pull/15371)
- Fix blob metric name for peer count. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15412)
- Non deterministic output order of `dataColumnSidecarByRootRPCHandler`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15441)
- Fixed the versioning bug for light client data types in the Beacon API. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15400)
- `--chain-config-file`: Do not use any more mainnet boot nodes. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15460)
- Fix panic on dutiesv2 when there is no committee assignment on the epoch. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15466)
- Allow SSZ requests for pending deposits, partial withdrawals and consolidations. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15474)
- Validator client shuts down cleanly on error instead of fatal error. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15401)
- Fixes edge case starting validator client with new validator keys starts the slot ticker too early resulting in replayed slots in the main runner loop. Fixes edge case of replayed slots when waiting for account acivations. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15479)
- DV aggregations failing first slot of the epoch. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15156)
- Skip genesis block retrieval when EIP-6110 deposit requests have started to prevent "pruned history unavailable" errors with execution clients that have pruned pre-merge data. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15494)
- Fixed lookahead initialization at the fulu fork. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15450)
- Write `Content-Encoding` header in the response properly when gzip encoding is requested. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15499)
- Subnets subscription: Avoid dynamic subscribing blocking in case not enough peers per subnets are found. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15471)
- Do not apply the gzip middleware to the event stream API. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15517)
- Fixed various reasons why a node is banned by its peers when it stops. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15505)
- Use `MinEpochsForDataColumnSidecarsRequest` in `WithinDAPeriod` when in Fulu. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15522)
- Return zero value for `Eth-Consensus-Block-Value` on error to avoid missed block proposals. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15526)
- Moved reconstruction lock to prevent unnecessary work. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15528)
- Fixed variable names, links, and typos in das core code. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15524)
- Fix builder bid version compatibility to support Electra bids with Fulu blocks. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15536)
- Fixed align submitPoolSyncCommitteeSignatures response with Beacon API specification. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15516)
- Trigger payload attribute event as soon as an early block is processed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15541)
- Beacon-api proposer duty fulu computation. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15534)
- Fixed the max proofs in `BlobsBundleV2`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15530)
- Prevent a race on double `ReceiveBlock`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15565)
- Fixed [#15544](https://github.com/OffchainLabs/prysm/issues/15544): Persist metadata sequence number if it is needed (e.g., use static peer ID option or Fulu enabled). [[PR]](https://github.com/prysmaticlabs/prysm/pull/15554)
- Fix the validateConsensus endpoint handler. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15548)
- builder version check was using head block version instead of current fork's version based on slot, fixes e2e from https://github.com/OffchainLabs/prysm/commit/57e27199bdb9b3ef1af14c3374999aba5e0788a3. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15568)
- Don't submit duplicate `SignedContributionAndProof` messages. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15571)
- Genesis state, timestamp and validators root now ubiquitously available at node startup, supporting tech debt cleanup. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15470)
- Fixed a condition where the blob cache could panic when there were less than or no sidecars in the cache entry. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15581)
- Fixed endpoint response to return 404 or 400 after isOptimistic check. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15559)
- Safeguard against accidental out of bounds array access in dataColumnSidecars method. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15586)
- Fixed NewSignedBeaconBlock calls to use Block field for proper equivocation handling. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15595)
- Fixed regression in find peer functions introduced in PR#15471, where nodes with equal sequence numbers were incorrectly skipped and the peer count was incorrectly reduced when replacing nodes with higher sequence numbers. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15578)
- Fix bug where stale computed value in closure excludes newly required (eg attestation) subscriptions. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15603)
- Fix bug where arguments of fillInForkChoiceMissingBlocks were incorrectly placed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15639)
- Fix next epoch proposer duties in Fulu by advancing the state to the beginning of the current epoch. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15642)
- Fix getBlockAttestationsV2 to return [] instead of null when data is empty. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15651)
- Fixed the issue of empty dirs not being deleted when using blob-storage-layout=by-epoch. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15573)
- Start topic-based peer discovery before initial sync completes so that we have coverage of needed columns when range syncing. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15660)
- Fixed an off-by-one in forkchoice startup. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15684)
- mitigate potential supernode clustering due to libp2p ConnManager pruning of non-supernodes, see https://github.com/OffchainLabs/prysm/issues/15607. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15681)
- Initial sync: Do not request data column sidecars for blocks before the retention period. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15644)
- Fixed incorrect attestation data request where the assigned committee index was used after Electra, instead of 0. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15696)
- Use v2 endpoint for blinded block submission post-Fulu. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15716)
- Fixed 'justified' block support missing on blocker.Block and optimized logic between blocker.Block and blocker.Blob. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15715)
- Fix prysmctl panic when baseFee is not set in genesis.json. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15687)
- Fix getStateRandao not returning historic RANDAO mix values. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15653)
- fix race in PriorityQueue.Pop by checking emptiness under write lock. (#15726). [[PR]](https://github.com/prysmaticlabs/prysm/pull/15726)
- In P2P service start, wait for the custody info to be correctly initialized. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15732)
- `createLocalNode`: Wait before retrying to retrieve the custody group count if not present. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15735)
- Replace fmt.Printf with proper test error handling in web3signer keymanager tests, using require.NoError(t, err) instead of t.Fatalf for better error handling and debugging. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15723)
- fixed regression introduced in PR #15715 , blocker now returns an error for not found, and error handling correctly handles error and returns 404 instead of 500. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15742)
- da metric was not writing correctly because if statement on err was accidently flipped. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15743)
### Security
- Updated go to version 1.24.5. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15561)
- Updated distroless/cc-debian11 to latest to resolve CVE-2024-2961. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15562)
- Updated go to version 1.24.6. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15566)
- Updated quic-go to latest version. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15749)
## [v6.0.5](https://github.com/prysmaticlabs/prysm/compare/v6.0.4...v6.0.5) - 2025-09-26
We are releasing a patch update on top of v6.0.4 to address a stability issue with quic-go.
All operators should update as soon as possible to v6.0.5 or later.
### Security
- Updated quic-go to latest version. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15749)
## [v6.0.4](https://github.com/prysmaticlabs/prysm/compare/v6.0.3...v6.0.4) - 2025-06-05
This release has more work on PeerDAS, and light client support. Additionally, we have a few bug fixes:
@@ -3820,4 +3458,4 @@ There are no security updates in this release.
# Older than v2.0.0
For changelog history for releases older than v2.0.0, please refer to https://github.com/prysmaticlabs/prysm/releases
For changelog history for releases older than v2.0.0, please refer to https://github.com/prysmaticlabs/prysm/releases

View File

@@ -253,16 +253,16 @@ filegroup(
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
)
consensus_spec_version = "v1.6.0-beta.1"
consensus_spec_version = "v1.6.0-beta.0"
load("@prysm//tools:download_spectests.bzl", "consensus_spec_tests")
consensus_spec_tests(
name = "consensus_spec_tests",
flavors = {
"general": "sha256-oEj0MTViJHjZo32nABK36gfvSXpbwkBk/jt6Mj7pWFI=",
"minimal": "sha256-cS4NPv6IRBoCSmWomQ8OEo8IsVNW9YawUFqoRZQBUj4=",
"mainnet": "sha256-BYuLndMPAh4p13IRJgNfVakrCVL69KRrNw2tdc3ETbE=",
"general": "sha256-rT3jQp2+ZaDiO66gIQggetzqr+kGeexaLqEhbx4HDMY=",
"minimal": "sha256-wowwwyvd0KJLsE+oDOtPkrhZyJndJpJ0lbXYsLH6XBw=",
"mainnet": "sha256-4ZLrLNeO7NihZ4TuWH5V5fUhvW9Y3mAPBQDCqrfShps=",
},
version = consensus_spec_version,
)
@@ -278,7 +278,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
integrity = "sha256-yrq3tdwPS8Ri+ueeLAHssIT3ssMrX7zvHiJ8Xf9GVYs=",
integrity = "sha256-sBe3Rx8zGq9IrvfgIhZQpYidGjy3mE1SiCb6/+pjLdY=",
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
)

View File

@@ -6,25 +6,15 @@ go_library(
"client.go",
"errors.go",
"options.go",
"transport.go",
],
importpath = "github.com/OffchainLabs/prysm/v6/api/client",
visibility = ["//visibility:public"],
deps = [
"//api:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
deps = ["@com_github_pkg_errors//:go_default_library"],
)
go_test(
name = "go_default_test",
srcs = [
"client_test.go",
"transport_test.go",
],
srcs = ["client_test.go"],
embed = [":go_default_library"],
deps = [
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
],
deps = ["//testing/require:go_default_library"],
)

View File

@@ -11,7 +11,6 @@ go_library(
importpath = "github.com/OffchainLabs/prysm/v6/api/client/beacon",
visibility = ["//visibility:public"],
deps = [
"//api:go_default_library",
"//api/client:go_default_library",
"//api/server:go_default_library",
"//api/server/structs:go_default_library",

View File

@@ -11,7 +11,6 @@ import (
"regexp"
"strconv"
"github.com/OffchainLabs/prysm/v6/api"
"github.com/OffchainLabs/prysm/v6/api/client"
"github.com/OffchainLabs/prysm/v6/api/server"
"github.com/OffchainLabs/prysm/v6/api/server/structs"
@@ -274,7 +273,7 @@ func (c *Client) SubmitChangeBLStoExecution(ctx context.Context, request []*stru
if err != nil {
return errors.Wrap(err, "invalid format, failed to create new POST request object")
}
req.Header.Set(api.ContentTypeHeader, api.JsonMediaType)
req.Header.Set("Content-Type", "application/json")
resp, err := c.Do(req)
if err != nil {
return err
@@ -285,7 +284,7 @@ func (c *Client) SubmitChangeBLStoExecution(ctx context.Context, request []*stru
if resp.StatusCode != http.StatusOK {
decoder := json.NewDecoder(resp.Body)
decoder.DisallowUnknownFields()
errorJson := &server.IndexedErrorContainer{}
errorJson := &server.IndexedVerificationFailureError{}
if err := decoder.Decode(errorJson); err != nil {
return errors.Wrapf(err, "failed to decode error JSON for %s", resp.Request.URL)
}

View File

@@ -59,7 +59,6 @@ go_test(
"//runtime/version:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",

View File

@@ -171,7 +171,7 @@ func (c *Client) do(ctx context.Context, method string, path string, body io.Rea
if err != nil {
return
}
req.Header.Add(api.UserAgentHeader, version.BuildData())
req.Header.Add("User-Agent", version.BuildData())
for _, o := range opts {
o(req)
}
@@ -232,11 +232,11 @@ func (c *Client) GetHeader(ctx context.Context, slot primitives.Slot, parentHash
var getOpts reqOption
if c.sszEnabled {
getOpts = func(r *http.Request) {
r.Header.Set(api.AcceptHeader, api.OctetStreamMediaType)
r.Header.Set("Accept", api.OctetStreamMediaType)
}
} else {
getOpts = func(r *http.Request) {
r.Header.Set(api.AcceptHeader, api.JsonMediaType)
r.Header.Set("Accept", api.JsonMediaType)
}
}
data, header, err := c.do(ctx, http.MethodGet, path, nil, http.StatusOK, getOpts)
@@ -259,8 +259,8 @@ func (c *Client) GetHeader(ctx context.Context, slot primitives.Slot, parentHash
func (c *Client) parseHeaderResponse(data []byte, header http.Header, slot primitives.Slot) (SignedBid, error) {
var versionHeader string
if c.sszEnabled || header.Get(api.EthConsensusVersionHeader) != "" {
versionHeader = header.Get(api.EthConsensusVersionHeader)
if c.sszEnabled || header.Get(api.VersionHeader) != "" {
versionHeader = header.Get(api.VersionHeader)
} else {
// If we don't have a version header, attempt to parse JSON for version
v := &VersionResponse{}
@@ -390,8 +390,8 @@ func (c *Client) RegisterValidator(ctx context.Context, svr []*ethpb.SignedValid
)
if c.sszEnabled {
postOpts = func(r *http.Request) {
r.Header.Set(api.ContentTypeHeader, api.OctetStreamMediaType)
r.Header.Set(api.AcceptHeader, api.OctetStreamMediaType)
r.Header.Set("Content-Type", api.OctetStreamMediaType)
r.Header.Set("Accept", api.OctetStreamMediaType)
}
body, err = sszValidatorRegisterRequest(svr)
if err != nil {
@@ -401,8 +401,8 @@ func (c *Client) RegisterValidator(ctx context.Context, svr []*ethpb.SignedValid
}
} else {
postOpts = func(r *http.Request) {
r.Header.Set(api.ContentTypeHeader, api.JsonMediaType)
r.Header.Set(api.AcceptHeader, api.JsonMediaType)
r.Header.Set("Content-Type", api.JsonMediaType)
r.Header.Set("Accept", api.JsonMediaType)
}
body, err = jsonValidatorRegisterRequest(svr)
if err != nil {
@@ -446,7 +446,7 @@ func sszValidatorRegisterRequest(svr []*ethpb.SignedValidatorRegistrationV1) ([]
return ssz, nil
}
var errResponseVersionMismatch = errors.New("builder API response uses a different version than requested in " + api.EthConsensusVersionHeader + " header")
var errResponseVersionMismatch = errors.New("builder API response uses a different version than requested in " + api.VersionHeader + " header")
func getVersionsBlockToPayload(blockVersion int) (int, error) {
if blockVersion >= version.Fulu {
@@ -525,7 +525,7 @@ func (c *Client) SubmitBlindedBlockPostFulu(ctx context.Context, sb interfaces.R
func (c *Client) checkBlockVersion(respBytes []byte, header http.Header) (int, error) {
var versionHeader string
if c.sszEnabled {
versionHeader = strings.ToLower(header.Get(api.EthConsensusVersionHeader))
versionHeader = strings.ToLower(header.Get(api.VersionHeader))
} else {
// fallback to JSON-based version extraction
v := &VersionResponse{}
@@ -555,9 +555,9 @@ func (c *Client) buildBlindedBlockRequest(sb interfaces.ReadOnlySignedBeaconBloc
return nil, nil, errors.Wrap(err, "could not marshal SSZ for blinded block")
}
opt := func(r *http.Request) {
r.Header.Set(api.EthConsensusVersionHeader, version.String(sb.Version()))
r.Header.Set(api.ContentTypeHeader, api.OctetStreamMediaType)
r.Header.Set(api.AcceptHeader, api.OctetStreamMediaType)
r.Header.Set(api.VersionHeader, version.String(sb.Version()))
r.Header.Set("Content-Type", api.OctetStreamMediaType)
r.Header.Set("Accept", api.OctetStreamMediaType)
}
return body, opt, nil
}
@@ -571,9 +571,9 @@ func (c *Client) buildBlindedBlockRequest(sb interfaces.ReadOnlySignedBeaconBloc
return nil, nil, errors.Wrap(err, "error marshaling blinded block to JSON")
}
opt := func(r *http.Request) {
r.Header.Set(api.EthConsensusVersionHeader, version.String(sb.Version()))
r.Header.Set(api.ContentTypeHeader, api.JsonMediaType)
r.Header.Set(api.AcceptHeader, api.JsonMediaType)
r.Header.Set(api.VersionHeader, version.String(sb.Version()))
r.Header.Set("Content-Type", api.JsonMediaType)
r.Header.Set("Accept", api.JsonMediaType)
}
return body, opt, nil
}
@@ -676,7 +676,7 @@ func (c *Client) parseBlindedBlockResponseJSON(
// happy path, and an error with information about the server response body for a non-200 response.
func (c *Client) Status(ctx context.Context) error {
getOpts := func(r *http.Request) {
r.Header.Set(api.AcceptHeader, api.JsonMediaType)
r.Header.Set("Accept", api.JsonMediaType)
}
_, _, err := c.do(ctx, http.MethodGet, getStatus, nil, http.StatusOK, getOpts)
return err
@@ -726,12 +726,6 @@ func unexpectedStatusErr(response *http.Response, expected int) error {
return errors.Wrap(jsonErr, "unable to read response body")
}
return errors.Wrap(ErrNotOK, errMessage.Message)
case http.StatusBadGateway:
log.WithError(ErrBadGateway).Debug(msg)
if jsonErr := json.Unmarshal(bodyBytes, &errMessage); jsonErr != nil {
return errors.Wrap(jsonErr, "unable to read response body")
}
return errors.Wrap(ErrBadGateway, errMessage.Message)
default:
log.WithError(ErrNotOK).Debug(msg)
return errors.Wrap(ErrNotOK, fmt.Sprintf("unsupported error code: %d", response.StatusCode))

View File

@@ -21,7 +21,6 @@ import (
eth "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/testing/assert"
"github.com/OffchainLabs/prysm/v6/testing/require"
"github.com/OffchainLabs/prysm/v6/testing/util"
"github.com/prysmaticlabs/go-bitfield"
log "github.com/sirupsen/logrus"
)
@@ -92,8 +91,8 @@ func TestClient_RegisterValidator(t *testing.T) {
t.Run("JSON success", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, api.JsonMediaType, r.Header.Get(api.ContentTypeHeader))
require.Equal(t, api.JsonMediaType, r.Header.Get(api.AcceptHeader))
require.Equal(t, api.JsonMediaType, r.Header.Get("Content-Type"))
require.Equal(t, api.JsonMediaType, r.Header.Get("Accept"))
body, err := io.ReadAll(r.Body)
defer func() {
require.NoError(t, r.Body.Close())
@@ -127,8 +126,8 @@ func TestClient_RegisterValidator(t *testing.T) {
t.Run("SSZ success", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, api.OctetStreamMediaType, r.Header.Get(api.ContentTypeHeader))
require.Equal(t, api.OctetStreamMediaType, r.Header.Get(api.AcceptHeader))
require.Equal(t, api.OctetStreamMediaType, r.Header.Get("Content-Type"))
require.Equal(t, api.OctetStreamMediaType, r.Header.Get("Accept"))
body, err := io.ReadAll(r.Body)
defer func() {
require.NoError(t, r.Body.Close())
@@ -171,11 +170,8 @@ func TestClient_RegisterValidator(t *testing.T) {
func TestClient_GetHeader(t *testing.T) {
ctx := t.Context()
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
es := util.SlotAtEpoch(t, params.BeaconConfig().ElectraForkEpoch)
expectedPath := "/eth/v1/builder/header/%d/0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2/0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"
expectedPath = fmt.Sprintf(expectedPath, ds)
var slot primitives.Slot = ds
expectedPath := "/eth/v1/builder/header/23/0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2/0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"
var slot primitives.Slot = 23
parentHash := ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
pubkey := ezDecode(t, "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a")
t.Run("server error", func(t *testing.T) {
@@ -225,7 +221,7 @@ func TestClient_GetHeader(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, expectedPath, r.URL.Path)
require.Equal(t, api.JsonMediaType, r.Header.Get(api.AcceptHeader))
require.Equal(t, api.JsonMediaType, r.Header.Get("Accept"))
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(bytes.NewBufferString(testExampleHeaderResponse)),
@@ -260,7 +256,7 @@ func TestClient_GetHeader(t *testing.T) {
t.Run("bellatrix ssz", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, api.OctetStreamMediaType, r.Header.Get(api.AcceptHeader))
require.Equal(t, api.OctetStreamMediaType, r.Header.Get("Accept"))
require.Equal(t, expectedPath, r.URL.Path)
epr := &ExecHeaderResponse{}
require.NoError(t, json.Unmarshal([]byte(testExampleHeaderResponse), epr))
@@ -269,7 +265,7 @@ func TestClient_GetHeader(t *testing.T) {
ssz, err := pro.MarshalSSZ()
require.NoError(t, err)
header := http.Header{}
header.Set(api.EthConsensusVersionHeader, "bellatrix")
header.Set(api.VersionHeader, "bellatrix")
return &http.Response{
StatusCode: http.StatusOK,
Header: header,
@@ -306,7 +302,7 @@ func TestClient_GetHeader(t *testing.T) {
t.Run("capella", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, api.JsonMediaType, r.Header.Get(api.AcceptHeader))
require.Equal(t, api.JsonMediaType, r.Header.Get("Accept"))
require.Equal(t, expectedPath, r.URL.Path)
return &http.Response{
StatusCode: http.StatusOK,
@@ -338,7 +334,7 @@ func TestClient_GetHeader(t *testing.T) {
t.Run("capella ssz", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, api.OctetStreamMediaType, r.Header.Get(api.AcceptHeader))
require.Equal(t, api.OctetStreamMediaType, r.Header.Get("Accept"))
require.Equal(t, expectedPath, r.URL.Path)
epr := &ExecHeaderResponseCapella{}
require.NoError(t, json.Unmarshal([]byte(testExampleHeaderResponseCapella), epr))
@@ -347,7 +343,7 @@ func TestClient_GetHeader(t *testing.T) {
ssz, err := pro.MarshalSSZ()
require.NoError(t, err)
header := http.Header{}
header.Set(api.EthConsensusVersionHeader, "capella")
header.Set(api.VersionHeader, "capella")
return &http.Response{
StatusCode: http.StatusOK,
Header: header,
@@ -380,7 +376,7 @@ func TestClient_GetHeader(t *testing.T) {
t.Run("deneb", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, api.JsonMediaType, r.Header.Get(api.AcceptHeader))
require.Equal(t, api.JsonMediaType, r.Header.Get("Accept"))
require.Equal(t, expectedPath, r.URL.Path)
return &http.Response{
StatusCode: http.StatusOK,
@@ -420,7 +416,7 @@ func TestClient_GetHeader(t *testing.T) {
t.Run("deneb ssz", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, api.OctetStreamMediaType, r.Header.Get(api.AcceptHeader))
require.Equal(t, api.OctetStreamMediaType, r.Header.Get("Accept"))
require.Equal(t, expectedPath, r.URL.Path)
epr := &ExecHeaderResponseDeneb{}
require.NoError(t, json.Unmarshal([]byte(testExampleHeaderResponseDeneb), epr))
@@ -429,7 +425,7 @@ func TestClient_GetHeader(t *testing.T) {
ssz, err := pro.MarshalSSZ()
require.NoError(t, err)
header := http.Header{}
header.Set(api.EthConsensusVersionHeader, "deneb")
header.Set(api.VersionHeader, "deneb")
return &http.Response{
StatusCode: http.StatusOK,
Header: header,
@@ -488,7 +484,7 @@ func TestClient_GetHeader(t *testing.T) {
t.Run("electra", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, api.JsonMediaType, r.Header.Get(api.AcceptHeader))
require.Equal(t, api.JsonMediaType, r.Header.Get("Accept"))
require.Equal(t, expectedPath, r.URL.Path)
return &http.Response{
StatusCode: http.StatusOK,
@@ -533,16 +529,16 @@ func TestClient_GetHeader(t *testing.T) {
t.Run("electra ssz", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, api.OctetStreamMediaType, r.Header.Get(api.AcceptHeader))
require.Equal(t, api.OctetStreamMediaType, r.Header.Get("Accept"))
require.Equal(t, expectedPath, r.URL.Path)
epr := &ExecHeaderResponseElectra{}
require.NoError(t, json.Unmarshal([]byte(testExampleHeaderResponseElectra), epr))
pro, err := epr.ToProto(es)
pro, err := epr.ToProto(100)
require.NoError(t, err)
ssz, err := pro.MarshalSSZ()
require.NoError(t, err)
header := http.Header{}
header.Set(api.EthConsensusVersionHeader, "electra")
header.Set(api.VersionHeader, "electra")
return &http.Response{
StatusCode: http.StatusOK,
Header: header,
@@ -612,9 +608,9 @@ func TestSubmitBlindedBlock(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
require.Equal(t, "bellatrix", r.Header.Get(api.EthConsensusVersionHeader))
require.Equal(t, api.JsonMediaType, r.Header.Get(api.ContentTypeHeader))
require.Equal(t, api.JsonMediaType, r.Header.Get(api.AcceptHeader))
require.Equal(t, "bellatrix", r.Header.Get("Eth-Consensus-Version"))
require.Equal(t, api.JsonMediaType, r.Header.Get("Content-Type"))
require.Equal(t, api.JsonMediaType, r.Header.Get("Accept"))
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayload)),
@@ -640,9 +636,9 @@ func TestSubmitBlindedBlock(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
require.Equal(t, "bellatrix", r.Header.Get(api.EthConsensusVersionHeader))
require.Equal(t, api.OctetStreamMediaType, r.Header.Get(api.ContentTypeHeader))
require.Equal(t, api.OctetStreamMediaType, r.Header.Get(api.AcceptHeader))
require.Equal(t, "bellatrix", r.Header.Get(api.VersionHeader))
require.Equal(t, api.OctetStreamMediaType, r.Header.Get("Content-Type"))
require.Equal(t, api.OctetStreamMediaType, r.Header.Get("Accept"))
epr := &ExecutionPayloadResponse{}
require.NoError(t, json.Unmarshal([]byte(testExampleExecutionPayload), epr))
ep := &structs.ExecutionPayload{}
@@ -652,7 +648,7 @@ func TestSubmitBlindedBlock(t *testing.T) {
ssz, err := pro.MarshalSSZ()
require.NoError(t, err)
header := http.Header{}
header.Set(api.EthConsensusVersionHeader, "bellatrix")
header.Set(api.VersionHeader, "bellatrix")
return &http.Response{
StatusCode: http.StatusOK,
Header: header,
@@ -680,9 +676,9 @@ func TestSubmitBlindedBlock(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
require.Equal(t, "capella", r.Header.Get(api.EthConsensusVersionHeader))
require.Equal(t, api.JsonMediaType, r.Header.Get(api.ContentTypeHeader))
require.Equal(t, api.JsonMediaType, r.Header.Get(api.AcceptHeader))
require.Equal(t, "capella", r.Header.Get(api.VersionHeader))
require.Equal(t, api.JsonMediaType, r.Header.Get("Content-Type"))
require.Equal(t, api.JsonMediaType, r.Header.Get("Accept"))
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayloadCapella)),
@@ -710,9 +706,9 @@ func TestSubmitBlindedBlock(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
require.Equal(t, "capella", r.Header.Get(api.EthConsensusVersionHeader))
require.Equal(t, api.OctetStreamMediaType, r.Header.Get(api.ContentTypeHeader))
require.Equal(t, api.OctetStreamMediaType, r.Header.Get(api.AcceptHeader))
require.Equal(t, "capella", r.Header.Get(api.VersionHeader))
require.Equal(t, api.OctetStreamMediaType, r.Header.Get("Content-Type"))
require.Equal(t, api.OctetStreamMediaType, r.Header.Get("Accept"))
epr := &ExecutionPayloadResponse{}
require.NoError(t, json.Unmarshal([]byte(testExampleExecutionPayloadCapella), epr))
ep := &structs.ExecutionPayloadCapella{}
@@ -722,7 +718,7 @@ func TestSubmitBlindedBlock(t *testing.T) {
ssz, err := pro.MarshalSSZ()
require.NoError(t, err)
header := http.Header{}
header.Set(api.EthConsensusVersionHeader, "capella")
header.Set(api.VersionHeader, "capella")
return &http.Response{
StatusCode: http.StatusOK,
Header: header,
@@ -753,9 +749,9 @@ func TestSubmitBlindedBlock(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
require.Equal(t, "deneb", r.Header.Get(api.EthConsensusVersionHeader))
require.Equal(t, api.JsonMediaType, r.Header.Get(api.ContentTypeHeader))
require.Equal(t, api.JsonMediaType, r.Header.Get(api.AcceptHeader))
require.Equal(t, "deneb", r.Header.Get(api.VersionHeader))
require.Equal(t, api.JsonMediaType, r.Header.Get("Content-Type"))
require.Equal(t, api.JsonMediaType, r.Header.Get("Accept"))
var req structs.SignedBlindedBeaconBlockDeneb
err := json.NewDecoder(r.Body).Decode(&req)
require.NoError(t, err)
@@ -793,9 +789,9 @@ func TestSubmitBlindedBlock(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
require.Equal(t, "deneb", r.Header.Get(api.EthConsensusVersionHeader))
require.Equal(t, api.OctetStreamMediaType, r.Header.Get(api.ContentTypeHeader))
require.Equal(t, api.OctetStreamMediaType, r.Header.Get(api.AcceptHeader))
require.Equal(t, "deneb", r.Header.Get(api.VersionHeader))
require.Equal(t, api.OctetStreamMediaType, r.Header.Get("Content-Type"))
require.Equal(t, api.OctetStreamMediaType, r.Header.Get("Accept"))
epr := &ExecPayloadResponseDeneb{}
require.NoError(t, json.Unmarshal([]byte(testExampleExecutionPayloadDeneb), epr))
pro, blob, err := epr.ToProto()
@@ -807,7 +803,7 @@ func TestSubmitBlindedBlock(t *testing.T) {
ssz, err := combined.MarshalSSZ()
require.NoError(t, err)
header := http.Header{}
header.Set(api.EthConsensusVersionHeader, "deneb")
header.Set(api.VersionHeader, "deneb")
return &http.Response{
StatusCode: http.StatusOK,
Header: header,
@@ -840,9 +836,9 @@ func TestSubmitBlindedBlock(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
require.Equal(t, "electra", r.Header.Get(api.EthConsensusVersionHeader))
require.Equal(t, api.JsonMediaType, r.Header.Get(api.ContentTypeHeader))
require.Equal(t, api.JsonMediaType, r.Header.Get(api.AcceptHeader))
require.Equal(t, "electra", r.Header.Get(api.VersionHeader))
require.Equal(t, api.JsonMediaType, r.Header.Get("Content-Type"))
require.Equal(t, api.JsonMediaType, r.Header.Get("Accept"))
var req structs.SignedBlindedBeaconBlockElectra
err := json.NewDecoder(r.Body).Decode(&req)
require.NoError(t, err)
@@ -880,9 +876,9 @@ func TestSubmitBlindedBlock(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
require.Equal(t, "electra", r.Header.Get(api.EthConsensusVersionHeader))
require.Equal(t, api.OctetStreamMediaType, r.Header.Get(api.ContentTypeHeader))
require.Equal(t, api.OctetStreamMediaType, r.Header.Get(api.AcceptHeader))
require.Equal(t, "electra", r.Header.Get(api.VersionHeader))
require.Equal(t, api.OctetStreamMediaType, r.Header.Get("Content-Type"))
require.Equal(t, api.OctetStreamMediaType, r.Header.Get("Accept"))
epr := &ExecPayloadResponseDeneb{}
require.NoError(t, json.Unmarshal([]byte(testExampleExecutionPayloadDeneb), epr))
pro, blob, err := epr.ToProto()
@@ -894,7 +890,7 @@ func TestSubmitBlindedBlock(t *testing.T) {
ssz, err := combined.MarshalSSZ()
require.NoError(t, err)
header := http.Header{}
header.Set(api.EthConsensusVersionHeader, "electra")
header.Set(api.VersionHeader, "electra")
return &http.Response{
StatusCode: http.StatusOK,
Header: header,
@@ -1566,9 +1562,9 @@ func TestSubmitBlindedBlockPostFulu(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, postBlindedBeaconBlockV2Path, r.URL.Path)
require.Equal(t, "bellatrix", r.Header.Get(api.EthConsensusVersionHeader))
require.Equal(t, api.JsonMediaType, r.Header.Get(api.ContentTypeHeader))
require.Equal(t, api.JsonMediaType, r.Header.Get(api.AcceptHeader))
require.Equal(t, "bellatrix", r.Header.Get("Eth-Consensus-Version"))
require.Equal(t, api.JsonMediaType, r.Header.Get("Content-Type"))
require.Equal(t, api.JsonMediaType, r.Header.Get("Accept"))
// Post-Fulu: only return status code, no payload
return &http.Response{
StatusCode: http.StatusAccepted,
@@ -1591,9 +1587,9 @@ func TestSubmitBlindedBlockPostFulu(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, postBlindedBeaconBlockV2Path, r.URL.Path)
require.Equal(t, "bellatrix", r.Header.Get(api.EthConsensusVersionHeader))
require.Equal(t, api.OctetStreamMediaType, r.Header.Get(api.ContentTypeHeader))
require.Equal(t, api.OctetStreamMediaType, r.Header.Get(api.AcceptHeader))
require.Equal(t, "bellatrix", r.Header.Get(api.VersionHeader))
require.Equal(t, api.OctetStreamMediaType, r.Header.Get("Content-Type"))
require.Equal(t, api.OctetStreamMediaType, r.Header.Get("Accept"))
// Post-Fulu: only return status code, no payload
return &http.Response{
StatusCode: http.StatusAccepted,
@@ -1617,7 +1613,7 @@ func TestSubmitBlindedBlockPostFulu(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, postBlindedBeaconBlockV2Path, r.URL.Path)
require.Equal(t, "bellatrix", r.Header.Get(api.EthConsensusVersionHeader))
require.Equal(t, "bellatrix", r.Header.Get("Eth-Consensus-Version"))
message := ErrorMessage{
Code: 400,
Message: "Bad Request",

View File

@@ -21,4 +21,3 @@ var ErrUnsupportedMediaType = errors.Wrap(ErrNotOK, "The media type in \"Content
// ErrNotAcceptable specifically means that a '406 - Not Acceptable' was received from the API.
var ErrNotAcceptable = errors.Wrap(ErrNotOK, "The accept header value is not acceptable")
var ErrBadGateway = errors.Wrap(ErrNotOK, "recv 502 BadGateway response from API")

View File

@@ -73,8 +73,8 @@ func (h *EventStream) Subscribe(eventsChannel chan<- *Event) {
Data: []byte(errors.Wrap(err, "failed to create HTTP request").Error()),
}
}
req.Header.Set(api.AcceptHeader, api.EventStreamMediaType)
req.Header.Set(api.ConnectionHeader, api.KeepAlive)
req.Header.Set("Accept", api.EventStreamMediaType)
req.Header.Set("Connection", api.KeepAlive)
resp, err := h.httpClient.Do(req)
if err != nil {
eventsChannel <- &Event{

View File

@@ -4,8 +4,6 @@ import (
"fmt"
"net/http"
"time"
"github.com/OffchainLabs/prysm/v6/api"
)
// ReqOption is a request functional option.
@@ -14,14 +12,14 @@ type ReqOption func(*http.Request)
// WithSSZEncoding is a request functional option that adds SSZ encoding header.
func WithSSZEncoding() ReqOption {
return func(req *http.Request) {
req.Header.Set(api.AcceptEncodingHeader, api.OctetStreamMediaType)
req.Header.Set("Accept", "application/octet-stream")
}
}
// WithAuthorizationToken is a request functional option that adds header for authorization token.
func WithAuthorizationToken(token string) ReqOption {
return func(req *http.Request) {
req.Header.Set(api.AuthorizationHeader, fmt.Sprintf("%s %s", api.BearerAuthorization, token))
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token))
}
}

View File

@@ -1,25 +0,0 @@
package client
import "net/http"
// CustomHeadersTransport adds custom headers to each request
type CustomHeadersTransport struct {
base http.RoundTripper
headers map[string][]string
}
func NewCustomHeadersTransport(base http.RoundTripper, headers map[string][]string) *CustomHeadersTransport {
return &CustomHeadersTransport{
base: base,
headers: headers,
}
}
func (t *CustomHeadersTransport) RoundTrip(req *http.Request) (*http.Response, error) {
for header, values := range t.headers {
for _, value := range values {
req.Header.Add(header, value)
}
}
return t.base.RoundTrip(req)
}

View File

@@ -1,25 +0,0 @@
package client
import (
"net/http"
"net/http/httptest"
"testing"
"github.com/OffchainLabs/prysm/v6/testing/assert"
"github.com/OffchainLabs/prysm/v6/testing/require"
)
type noopTransport struct{}
func (*noopTransport) RoundTrip(*http.Request) (*http.Response, error) {
return nil, nil
}
func TestRoundTrip(t *testing.T) {
tr := &CustomHeadersTransport{base: &noopTransport{}, headers: map[string][]string{"key1": []string{"value1", "value2"}, "key2": []string{"value3"}}}
req := httptest.NewRequest("GET", "http://foo", nil)
_, err := tr.RoundTrip(req)
require.NoError(t, err)
assert.DeepEqual(t, []string{"value1", "value2"}, req.Header.Values("key1"))
assert.DeepEqual(t, []string{"value3"}, req.Header.Values("key2"))
}

View File

@@ -3,38 +3,18 @@ package api
import "net/http"
const (
// Headers
EthConsensusVersionHeader = "Eth-Consensus-Version"
EthExecutionPayloadBlindedHeader = "Eth-Execution-Payload-Blinded"
EthExecutionPayloadValueHeader = "Eth-Execution-Payload-Value"
EthConsensusBlockValueHeader = "Eth-Consensus-Block-Value"
AcceptEncodingHeader = "Accept-Encoding"
AcceptHeader = "Accept"
AccessControlAllowOriginHeader = "Access-Control-Allow-Origin"
AuthorizationHeader = "Authorization"
CacheControlHeader = "Cache-Control"
ConnectionHeader = "Connection"
ContentEncodingHeader = "Content-Encoding"
ContentLengthHeader = "Content-Length"
ContentTypeHeader = "Content-Type"
HostHeader = "Host"
OriginHeader = "Origin"
UserAgentHeader = "User-Agent"
// Header values
JsonMediaType = "application/json"
OctetStreamMediaType = "application/octet-stream"
PlainMediaType = "text/plain"
EventStreamMediaType = "text/event-stream"
KeepAlive = "keep-alive"
GzipEncoding = "gzip"
BasicAuthorization = "Basic"
BearerAuthorization = "Bearer"
NoCache = "no-cache"
VersionHeader = "Eth-Consensus-Version"
ExecutionPayloadBlindedHeader = "Eth-Execution-Payload-Blinded"
ExecutionPayloadValueHeader = "Eth-Execution-Payload-Value"
ConsensusBlockValueHeader = "Eth-Consensus-Block-Value"
JsonMediaType = "application/json"
OctetStreamMediaType = "application/octet-stream"
EventStreamMediaType = "text/event-stream"
KeepAlive = "keep-alive"
)
// SetSSEHeaders sets the headers needed for a server-sent event response.
func SetSSEHeaders(w http.ResponseWriter) {
w.Header().Set(ContentTypeHeader, EventStreamMediaType)
w.Header().Set(ConnectionHeader, KeepAlive)
w.Header().Set("Content-Type", EventStreamMediaType)
w.Header().Set("Connection", KeepAlive)
}

View File

@@ -6,11 +6,6 @@ import (
"strings"
)
var (
ErrIndexedValidationFail = "One or more messages failed validation"
ErrIndexedBroadcastFail = "One or more messages failed broadcast"
)
// DecodeError represents an error resulting from trying to decode an HTTP request.
// It tracks the full field name for which decoding failed.
type DecodeError struct {
@@ -34,38 +29,19 @@ func (e *DecodeError) Error() string {
return fmt.Sprintf("could not decode %s: %s", strings.Join(e.path, "."), e.err.Error())
}
// IndexedErrorContainer wraps a collection of indexed errors.
type IndexedErrorContainer struct {
Message string `json:"message"`
Code int `json:"code"`
Failures []*IndexedError `json:"failures"`
// IndexedVerificationFailureError wraps a collection of verification failures.
type IndexedVerificationFailureError struct {
Message string `json:"message"`
Code int `json:"code"`
Failures []*IndexedVerificationFailure `json:"failures"`
}
func (e *IndexedErrorContainer) StatusCode() int {
func (e *IndexedVerificationFailureError) StatusCode() int {
return e.Code
}
// IndexedError represents an issue when processing a single indexed object e.g. an item in an array.
type IndexedError struct {
// IndexedVerificationFailure represents an issue when verifying a single indexed object e.g. an item in an array.
type IndexedVerificationFailure struct {
Index int `json:"index"`
Message string `json:"message"`
}
// BroadcastFailedError represents an error scenario where broadcasting a published message failed.
type BroadcastFailedError struct {
msg string
err error
}
// NewBroadcastFailedError creates a new instance of BroadcastFailedError.
func NewBroadcastFailedError(msg string, err error) *BroadcastFailedError {
return &BroadcastFailedError{
msg: msg,
err: err,
}
}
// Error returns the underlying error message.
func (e *BroadcastFailedError) Error() string {
return fmt.Sprintf("could not broadcast %s: %s", e.msg, e.err.Error())
}

View File

@@ -47,7 +47,7 @@ func ContentTypeHandler(acceptedMediaTypes []string) Middleware {
next.ServeHTTP(w, r)
return
}
contentType := r.Header.Get(api.ContentTypeHeader)
contentType := r.Header.Get("Content-Type")
if contentType == "" {
http.Error(w, "Content-Type header is missing", http.StatusUnsupportedMediaType)
return
@@ -75,7 +75,7 @@ func ContentTypeHandler(acceptedMediaTypes []string) Middleware {
func AcceptHeaderHandler(serverAcceptedTypes []string) Middleware {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if _, ok := apiutil.Negotiate(r.Header.Get(api.AcceptHeader), serverAcceptedTypes); !ok {
if _, ok := apiutil.Negotiate(r.Header.Get("Accept"), serverAcceptedTypes); !ok {
http.Error(w, "Not Acceptable", http.StatusNotAcceptable)
return
}
@@ -88,7 +88,7 @@ func AcceptHeaderHandler(serverAcceptedTypes []string) Middleware {
func AcceptEncodingHeaderHandler() Middleware {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if !strings.Contains(r.Header.Get(api.AcceptEncodingHeader), api.GzipEncoding) {
if !strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
next.ServeHTTP(w, r)
return
}
@@ -116,10 +116,10 @@ type gzipResponseWriter struct {
}
func (g *gzipResponseWriter) WriteHeader(statusCode int) {
if strings.Contains(g.Header().Get(api.ContentTypeHeader), api.JsonMediaType) {
if strings.Contains(g.Header().Get("Content-Type"), api.JsonMediaType) {
// Removing the current Content-Length because zipping will change it.
g.Header().Del(api.ContentLengthHeader)
g.Header().Set(api.ContentEncodingHeader, api.GzipEncoding)
g.Header().Del("Content-Length")
g.Header().Set("Content-Encoding", "gzip")
g.zip = true
}

View File

@@ -132,7 +132,7 @@ func TestContentTypeHandler(t *testing.T) {
}
req := httptest.NewRequest(httpMethod, "/", nil)
if tt.contentType != "" {
req.Header.Set(api.ContentTypeHeader, tt.contentType)
req.Header.Set("Content-Type", tt.contentType)
}
rr := httptest.NewRecorder()
@@ -148,7 +148,7 @@ func TestContentTypeHandler(t *testing.T) {
func TestAcceptEncodingHeaderHandler(t *testing.T) {
dummyContent := "Test gzip middleware content"
nextHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set(api.ContentTypeHeader, r.Header.Get(api.AcceptHeader))
w.Header().Set("Content-Type", r.Header.Get("Accept"))
w.WriteHeader(http.StatusOK)
_, err := w.Write([]byte(dummyContent))
require.NoError(t, err)
@@ -165,7 +165,7 @@ func TestAcceptEncodingHeaderHandler(t *testing.T) {
{
name: "Accept gzip",
accept: api.JsonMediaType,
acceptEncoding: api.GzipEncoding,
acceptEncoding: "gzip",
expectCompressed: true,
},
{
@@ -189,7 +189,7 @@ func TestAcceptEncodingHeaderHandler(t *testing.T) {
{
name: "SSZ",
accept: api.OctetStreamMediaType,
acceptEncoding: api.GzipEncoding,
acceptEncoding: "gzip",
expectCompressed: false,
},
}
@@ -197,16 +197,16 @@ func TestAcceptEncodingHeaderHandler(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
req := httptest.NewRequest("GET", "/", nil)
req.Header.Set(api.AcceptHeader, tt.accept)
req.Header.Set("Accept", tt.accept)
if tt.acceptEncoding != "" {
req.Header.Set(api.AcceptEncodingHeader, tt.acceptEncoding)
req.Header.Set("Accept-Encoding", tt.acceptEncoding)
}
rr := &frozenHeaderRecorder{ResponseRecorder: httptest.NewRecorder()}
handler.ServeHTTP(rr, req)
if tt.expectCompressed {
require.Equal(t, api.GzipEncoding, rr.frozenHeader.Get(api.ContentEncodingHeader), "Expected Content-Encoding header to be 'gzip'")
require.Equal(t, "gzip", rr.frozenHeader.Get("Content-Encoding"), "Expected Content-Encoding header to be 'gzip'")
compressedBody := rr.Body.Bytes()
require.NotEqual(t, dummyContent, string(compressedBody), "Response body should be compressed and differ from the original")
@@ -230,7 +230,7 @@ func TestAcceptEncodingHeaderHandler(t *testing.T) {
}
func TestAcceptHeaderHandler(t *testing.T) {
acceptedTypes := []string{api.JsonMediaType, api.OctetStreamMediaType}
acceptedTypes := []string{"application/json", "application/octet-stream"}
nextHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
_, err := w.Write([]byte("next handler"))
@@ -295,7 +295,7 @@ func TestAcceptHeaderHandler(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
req := httptest.NewRequest("GET", "/", nil)
if tt.acceptHeader != "" {
req.Header.Set(api.AcceptHeader, tt.acceptHeader)
req.Header.Set("Accept", tt.acceptHeader)
}
rr := httptest.NewRecorder()

View File

@@ -296,8 +296,3 @@ type GetBlobsResponse struct {
Finalized bool `json:"finalized"`
Data []string `json:"data"` //blobs
}
type SSZQueryRequest struct {
Query string `json:"query"`
IncludeProof bool `json:"include_proof,omitempty"`
}

View File

@@ -173,7 +173,6 @@ go_test(
"//beacon-chain/state/state-native:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//beacon-chain/verification:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",

View File

@@ -30,7 +30,7 @@ var (
// errWSBlockNotFoundInEpoch is returned when a block is not found in the WS cache or DB within epoch.
errWSBlockNotFoundInEpoch = errors.New("weak subjectivity root not found in db within epoch")
// ErrNotDescendantOfFinalized is returned when a block is not a descendant of the finalized checkpoint
ErrNotDescendantOfFinalized = errors.New("not descendant of finalized checkpoint")
ErrNotDescendantOfFinalized = invalidBlock{error: errors.New("not descendant of finalized checkpoint")}
// ErrNotCheckpoint is returned when a given checkpoint is not a
// checkpoint in any chain known to forkchoice
ErrNotCheckpoint = errors.New("not a checkpoint in forkchoice")

View File

@@ -16,6 +16,7 @@ import (
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/math"
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
ethpbv1 "github.com/OffchainLabs/prysm/v6/proto/eth/v1"
"github.com/OffchainLabs/prysm/v6/runtime/version"
@@ -107,7 +108,7 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
commonRoot = params.BeaconConfig().ZeroHash
}
dis := headSlot + newHeadSlot - 2*forkSlot
dep := max(uint64(headSlot-forkSlot), uint64(newHeadSlot-forkSlot))
dep := math.Max(uint64(headSlot-forkSlot), uint64(newHeadSlot-forkSlot))
oldWeight, err := s.cfg.ForkChoiceStore.Weight(oldHeadRoot)
if err != nil {
log.WithField("root", fmt.Sprintf("%#x", oldHeadRoot)).Warn("Could not determine node weight")
@@ -134,7 +135,7 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
Type: statefeed.Reorg,
Data: &ethpbv1.EventChainReorg{
Slot: newHeadSlot,
Depth: max(uint64(headSlot-forkSlot), uint64(newHeadSlot-forkSlot)),
Depth: math.Max(uint64(headSlot-forkSlot), uint64(newHeadSlot-forkSlot)),
OldHeadBlock: oldHeadRoot[:],
NewHeadBlock: newHeadRoot[:],
OldHeadState: oldStateRoot[:],
@@ -346,24 +347,13 @@ func (s *Service) notifyNewHeadEvent(
if err != nil {
return errors.Wrap(err, "could not check if node is optimistically synced")
}
parentRoot, err := s.ParentRoot([32]byte(newHeadRoot))
if err != nil {
return errors.Wrap(err, "could not obtain parent root in forkchoice")
}
parentSlot, err := s.RecentBlockSlot(parentRoot)
if err != nil {
return errors.Wrap(err, "could not obtain parent slot in forkchoice")
}
epochTransition := slots.ToEpoch(newHeadSlot) > slots.ToEpoch(parentSlot)
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.NewHead,
Data: &ethpbv1.EventHead{
Slot: newHeadSlot,
Block: newHeadRoot,
State: newHeadStateRoot,
EpochTransition: epochTransition,
EpochTransition: slots.IsEpochStart(newHeadSlot),
PreviousDutyDependentRoot: previousDutyDependentRoot[:],
CurrentDutyDependentRoot: currentDutyDependentRoot[:],
ExecutionOptimistic: isOptimistic,

View File

@@ -162,9 +162,6 @@ func Test_notifyNewHeadEvent(t *testing.T) {
require.NoError(t, srv.cfg.ForkChoiceStore.InsertNode(t.Context(), st, blk))
newHeadStateRoot := [32]byte{2}
newHeadRoot := [32]byte{3}
st, blk, err = prepareForkchoiceState(t.Context(), 1, newHeadRoot, [32]byte{}, [32]byte{}, &ethpb.Checkpoint{}, &ethpb.Checkpoint{})
require.NoError(t, err)
require.NoError(t, srv.cfg.ForkChoiceStore.InsertNode(t.Context(), st, blk))
require.NoError(t, srv.notifyNewHeadEvent(t.Context(), 1, bState, newHeadStateRoot[:], newHeadRoot[:]))
events := notifier.ReceivedEvents()
require.Equal(t, 1, len(events))
@@ -199,9 +196,6 @@ func Test_notifyNewHeadEvent(t *testing.T) {
newHeadStateRoot := [32]byte{2}
newHeadRoot := [32]byte{3}
st, blk, err = prepareForkchoiceState(t.Context(), 0, newHeadRoot, [32]byte{}, [32]byte{}, &ethpb.Checkpoint{}, &ethpb.Checkpoint{})
require.NoError(t, err)
require.NoError(t, srv.cfg.ForkChoiceStore.InsertNode(t.Context(), st, blk))
err = srv.notifyNewHeadEvent(t.Context(), epoch2Start, bState, newHeadStateRoot[:], newHeadRoot[:])
require.NoError(t, err)
events := notifier.ReceivedEvents()
@@ -219,37 +213,6 @@ func Test_notifyNewHeadEvent(t *testing.T) {
}
require.DeepSSZEqual(t, wanted, eventHead)
})
t.Run("epoch transition", func(t *testing.T) {
bState, _ := util.DeterministicGenesisState(t, 10)
srv := testServiceWithDB(t)
srv.SetGenesisTime(time.Now())
notifier := srv.cfg.StateNotifier.(*mock.MockStateNotifier)
srv.originBlockRoot = [32]byte{1}
st, blk, err := prepareForkchoiceState(t.Context(), 0, [32]byte{}, [32]byte{}, [32]byte{}, &ethpb.Checkpoint{}, &ethpb.Checkpoint{})
require.NoError(t, err)
require.NoError(t, srv.cfg.ForkChoiceStore.InsertNode(t.Context(), st, blk))
newHeadStateRoot := [32]byte{2}
newHeadRoot := [32]byte{3}
st, blk, err = prepareForkchoiceState(t.Context(), 32, newHeadRoot, [32]byte{}, [32]byte{}, &ethpb.Checkpoint{}, &ethpb.Checkpoint{})
require.NoError(t, err)
require.NoError(t, srv.cfg.ForkChoiceStore.InsertNode(t.Context(), st, blk))
newHeadSlot := params.BeaconConfig().SlotsPerEpoch
require.NoError(t, srv.notifyNewHeadEvent(t.Context(), newHeadSlot, bState, newHeadStateRoot[:], newHeadRoot[:]))
events := notifier.ReceivedEvents()
require.Equal(t, 1, len(events))
eventHead, ok := events[0].Data.(*ethpbv1.EventHead)
require.Equal(t, true, ok)
wanted := &ethpbv1.EventHead{
Slot: newHeadSlot,
Block: newHeadRoot[:],
State: newHeadStateRoot[:],
EpochTransition: true,
PreviousDutyDependentRoot: params.BeaconConfig().ZeroHash[:],
CurrentDutyDependentRoot: srv.originBlockRoot[:],
}
require.DeepSSZEqual(t, wanted, eventHead)
})
}
func TestRetrieveHead_ReadOnly(t *testing.T) {

View File

@@ -109,7 +109,6 @@ func VerifyCellKZGProofBatch(commitmentsBytes []Bytes48, cellIndices []uint64, c
}
// RecoverCellsAndKZGProofs recovers the complete cells and KZG proofs from a given set of cell indices and partial cells.
// Note: `len(cellIndices)` must be equal to `len(partialCells)` and `cellIndices` must be sorted in ascending order.
func RecoverCellsAndKZGProofs(cellIndices []uint64, partialCells []Cell) (CellsAndProofs, error) {
// Convert `Cell` type to `ckzg4844.Cell`
ckzgPartialCells := make([]ckzg4844.Cell, len(partialCells))

View File

@@ -72,7 +72,7 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
if features.Get().EnableLightClient && slots.ToEpoch(s.CurrentSlot()) >= params.BeaconConfig().AltairForkEpoch {
defer s.processLightClientUpdates(cfg)
}
defer s.sendStateFeedOnBlock(cfg)
defer reportProcessingTime(startTime)
defer reportAttestationInclusion(cfg.roblock.Block())
@@ -93,8 +93,6 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
return errors.Wrap(err, "could not set optimistic block to valid")
}
}
defer s.sendStateFeedOnBlock(cfg) // only send event after successful insertion
start := time.Now()
cfg.headRoot, err = s.cfg.ForkChoiceStore.Head(ctx)
if err != nil {
@@ -714,7 +712,7 @@ func (s *Service) areDataColumnsAvailable(
nodeID := s.cfg.P2P.NodeID()
// Get the custody group sampling size for the node.
custodyGroupCount, err := s.cfg.P2P.CustodyGroupCount(ctx)
custodyGroupCount, err := s.cfg.P2P.CustodyGroupCount()
if err != nil {
return errors.Wrap(err, "custody group count")
}

View File

@@ -9,10 +9,8 @@ import (
"testing"
"time"
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/blocks"
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
@@ -2415,8 +2413,6 @@ func driftGenesisTime(s *Service, slot primitives.Slot, delay time.Duration) {
}
func TestMissingBlobIndices(t *testing.T) {
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
maxBlobs := params.BeaconConfig().MaxBlobsPerBlock(ds)
cases := []struct {
name string
expected [][]byte
@@ -2430,23 +2426,23 @@ func TestMissingBlobIndices(t *testing.T) {
},
{
name: "expected exceeds max",
expected: fakeCommitments(maxBlobs + 1),
expected: fakeCommitments(params.BeaconConfig().MaxBlobsPerBlock(0) + 1),
err: errMaxBlobsExceeded,
},
{
name: "first missing",
expected: fakeCommitments(maxBlobs),
expected: fakeCommitments(params.BeaconConfig().MaxBlobsPerBlock(0)),
present: []uint64{1, 2, 3, 4, 5},
result: fakeResult([]uint64{0}),
},
{
name: "all missing",
expected: fakeCommitments(maxBlobs),
expected: fakeCommitments(params.BeaconConfig().MaxBlobsPerBlock(0)),
result: fakeResult([]uint64{0, 1, 2, 3, 4, 5}),
},
{
name: "none missing",
expected: fakeCommitments(maxBlobs),
expected: fakeCommitments(params.BeaconConfig().MaxBlobsPerBlock(0)),
present: []uint64{0, 1, 2, 3, 4, 5},
result: fakeResult([]uint64{}),
},
@@ -2479,8 +2475,8 @@ func TestMissingBlobIndices(t *testing.T) {
for _, c := range cases {
bm, bs := filesystem.NewEphemeralBlobStorageWithMocker(t)
t.Run(c.name, func(t *testing.T) {
require.NoError(t, bm.CreateFakeIndices(c.root, ds, c.present...))
missing, err := missingBlobIndices(bs, c.root, c.expected, ds)
require.NoError(t, bm.CreateFakeIndices(c.root, 0, c.present...))
missing, err := missingBlobIndices(bs, c.root, c.expected, 0)
if c.err != nil {
require.ErrorIs(t, err, c.err)
return
@@ -2908,21 +2904,22 @@ type testIsAvailableParams struct {
columnsToSave []uint64
}
func testIsAvailableSetup(t *testing.T, p testIsAvailableParams) (context.Context, context.CancelFunc, *Service, [fieldparams.RootLength]byte, interfaces.SignedBeaconBlock) {
func testIsAvailableSetup(t *testing.T, params testIsAvailableParams) (context.Context, context.CancelFunc, *Service, [fieldparams.RootLength]byte, interfaces.SignedBeaconBlock) {
ctx, cancel := context.WithCancel(t.Context())
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
options := append(p.options, WithDataColumnStorage(dataColumnStorage))
options := append(params.options, WithDataColumnStorage(dataColumnStorage))
service, _ := minimalTestService(t, options...)
fs := util.SlotAtEpoch(t, params.BeaconConfig().FuluForkEpoch)
genesisState, secretKeys := util.DeterministicGenesisStateElectra(t, 32, util.WithElectraStateSlot(fs))
require.NoError(t, service.saveGenesisData(ctx, genesisState))
genesisState, secretKeys := util.DeterministicGenesisStateElectra(t, 32 /*validator count*/)
err := service.saveGenesisData(ctx, genesisState)
require.NoError(t, err)
conf := util.DefaultBlockGenConfig()
conf.NumBlobKzgCommitments = p.blobKzgCommitmentsCount
conf.NumBlobKzgCommitments = params.blobKzgCommitmentsCount
signedBeaconBlock, err := util.GenerateFullBlockFulu(genesisState, secretKeys, conf, fs+1)
signedBeaconBlock, err := util.GenerateFullBlockFulu(genesisState, secretKeys, conf, 10 /*block slot*/)
require.NoError(t, err)
block := signedBeaconBlock.Block
@@ -2932,8 +2929,8 @@ func testIsAvailableSetup(t *testing.T, p testIsAvailableParams) (context.Contex
root, err := block.HashTreeRoot()
require.NoError(t, err)
dataColumnsParams := make([]util.DataColumnParam, 0, len(p.columnsToSave))
for _, i := range p.columnsToSave {
dataColumnsParams := make([]util.DataColumnParam, 0, len(params.columnsToSave))
for _, i := range params.columnsToSave {
dataColumnParam := util.DataColumnParam{
Index: i,
Slot: block.Slot,
@@ -2957,12 +2954,8 @@ func testIsAvailableSetup(t *testing.T, p testIsAvailableParams) (context.Contex
}
func TestIsDataAvailable(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.AltairForkEpoch, cfg.BellatrixForkEpoch, cfg.CapellaForkEpoch, cfg.DenebForkEpoch, cfg.ElectraForkEpoch, cfg.FuluForkEpoch = 0, 0, 0, 0, 0, 0
params.OverrideBeaconConfig(cfg)
t.Run("Fulu - out of retention window", func(t *testing.T) {
params := testIsAvailableParams{}
params := testIsAvailableParams{options: []Option{WithGenesisTime(time.Unix(0, 0))}}
ctx, _, service, root, signed := testIsAvailableSetup(t, params)
roBlock, err := consensusblocks.NewROBlockWithRoot(signed, root)
@@ -2979,6 +2972,7 @@ func TestIsDataAvailable(t *testing.T) {
err = service.isDataAvailable(ctx, roBlock)
require.NoError(t, err)
})
t.Run("Fulu - more than half of the columns in custody", func(t *testing.T) {
minimumColumnsCountToReconstruct := peerdas.MinimumColumnCountToReconstruct()
indices := make([]uint64, 0, minimumColumnsCountToReconstruct)
@@ -3149,159 +3143,6 @@ func TestIsDataAvailable(t *testing.T) {
})
}
// Test_postBlockProcess_EventSending tests that block processed events are only sent
// when block processing succeeds according to the decision tree:
//
// Block Processing Flow:
// ├─ InsertNode FAILS (fork choice timeout)
// │ └─ blockProcessed = false ❌ NO EVENT
// │
// ├─ InsertNode succeeds
// │ ├─ handleBlockAttestations FAILS
// │ │ └─ blockProcessed = false ❌ NO EVENT
// │ │
// │ ├─ Block is NON-CANONICAL (not head)
// │ │ └─ blockProcessed = true ✅ SEND EVENT (Line 111)
// │ │
// │ ├─ Block IS CANONICAL (new head)
// │ │ ├─ getFCUArgs FAILS
// │ │ │ └─ blockProcessed = true ✅ SEND EVENT (Line 117)
// │ │ │
// │ │ ├─ sendFCU FAILS
// │ │ │ └─ blockProcessed = false ❌ NO EVENT
// │ │ │
// │ │ └─ Full success
// │ │ └─ blockProcessed = true ✅ SEND EVENT (Line 125)
func Test_postBlockProcess_EventSending(t *testing.T) {
ctx := context.Background()
// Helper to create a minimal valid block and state
createTestBlockAndState := func(t *testing.T, slot primitives.Slot, parentRoot [32]byte) (consensusblocks.ROBlock, state.BeaconState) {
st, _ := util.DeterministicGenesisState(t, 64)
require.NoError(t, st.SetSlot(slot))
stateRoot, err := st.HashTreeRoot(ctx)
require.NoError(t, err)
blk := util.NewBeaconBlock()
blk.Block.Slot = slot
blk.Block.ProposerIndex = 0
blk.Block.ParentRoot = parentRoot[:]
blk.Block.StateRoot = stateRoot[:]
signed := util.HydrateSignedBeaconBlock(blk)
roBlock, err := consensusblocks.NewSignedBeaconBlock(signed)
require.NoError(t, err)
roBlk, err := consensusblocks.NewROBlock(roBlock)
require.NoError(t, err)
return roBlk, st
}
tests := []struct {
name string
setupService func(*Service, [32]byte)
expectEvent bool
expectError bool
errorContains string
}{
{
name: "Block successfully processed - sends event",
setupService: func(s *Service, blockRoot [32]byte) {
// Default setup should work
},
expectEvent: true,
expectError: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Create service with required options
opts := testServiceOptsWithDB(t)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
// Initialize fork choice with genesis block
st, _ := util.DeterministicGenesisState(t, 64)
require.NoError(t, st.SetSlot(0))
genesisBlock := util.NewBeaconBlock()
genesisBlock.Block.StateRoot = bytesutil.PadTo([]byte("genesisState"), 32)
signedGenesis := util.HydrateSignedBeaconBlock(genesisBlock)
block, err := consensusblocks.NewSignedBeaconBlock(signedGenesis)
require.NoError(t, err)
genesisRoot, err := block.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, block))
require.NoError(t, service.cfg.BeaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, genesisRoot))
genesisROBlock, err := consensusblocks.NewROBlock(block)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, genesisROBlock))
// Create test block and state with genesis as parent
roBlock, postSt := createTestBlockAndState(t, 100, genesisRoot)
// Apply additional service setup if provided
if tt.setupService != nil {
tt.setupService(service, roBlock.Root())
}
// Create post block process config
cfg := &postBlockProcessConfig{
ctx: ctx,
roblock: roBlock,
postState: postSt,
isValidPayload: true,
}
// Execute postBlockProcess
err = service.postBlockProcess(cfg)
// Check error expectation
if tt.expectError {
require.NotNil(t, err)
if tt.errorContains != "" {
require.ErrorContains(t, tt.errorContains, err)
}
} else {
require.NoError(t, err)
}
// Give a moment for deferred functions to execute
time.Sleep(10 * time.Millisecond)
// Check event expectation
notifier := service.cfg.StateNotifier.(*mock.MockStateNotifier)
events := notifier.ReceivedEvents()
if tt.expectEvent {
require.NotEqual(t, 0, len(events), "Expected event to be sent but none were received")
// Verify it's a BlockProcessed event
foundBlockProcessed := false
for _, evt := range events {
if evt.Type == statefeed.BlockProcessed {
foundBlockProcessed = true
data, ok := evt.Data.(*statefeed.BlockProcessedData)
require.Equal(t, true, ok, "Event data should be BlockProcessedData")
require.Equal(t, roBlock.Root(), data.BlockRoot, "Event should contain correct block root")
break
}
}
require.Equal(t, true, foundBlockProcessed, "Expected BlockProcessed event type")
} else {
// For no-event cases, verify no BlockProcessed events were sent
for _, evt := range events {
require.NotEqual(t, statefeed.BlockProcessed, evt.Type,
"Expected no BlockProcessed event but one was sent")
}
}
})
}
}
func setupLightClientTestRequirements(ctx context.Context, t *testing.T, s *Service, v int, options ...util.LightClientOption) (*util.TestLightClient, *postBlockProcessConfig) {
var l *util.TestLightClient
switch v {

View File

@@ -219,9 +219,6 @@ func (s *Service) validateExecutionAndConsensus(
eg.Go(func() error {
var err error
postState, err = s.validateStateTransition(ctx, preState, block)
if errors.Is(err, ErrNotDescendantOfFinalized) {
return invalidBlock{error: err, root: block.Root()}
}
if err != nil {
return errors.Wrap(err, "failed to validate consensus state transition function")
}
@@ -588,17 +585,17 @@ func (s *Service) sendNewFinalizedEvent(ctx context.Context, postState state.Bea
func (s *Service) sendBlockAttestationsToSlasher(signed interfaces.ReadOnlySignedBeaconBlock, preState state.BeaconState) {
// Feed the indexed attestation to slasher if enabled. This action
// is done in the background to avoid adding more load to this critical code path.
ctx := s.ctx
ctx := context.TODO()
for _, att := range signed.Block().Body().Attestations() {
committees, err := helpers.AttestationCommitteesFromState(ctx, preState, att)
if err != nil {
log.WithError(err).Error("Could not get attestation committees")
continue
return
}
indexedAtt, err := attestation.ConvertToIndexed(ctx, att, committees...)
if err != nil {
log.WithError(err).Error("Could not convert to indexed attestation")
continue
return
}
s.cfg.SlasherAttestationsFeed.Send(&types.WrappedIndexedAtt{IndexedAtt: indexedAtt})
}

View File

@@ -472,8 +472,8 @@ func (s *Service) removeStartupState() {
func (s *Service) updateCustodyInfoInDB(slot primitives.Slot) (primitives.Slot, uint64, error) {
isSubscribedToAllDataSubnets := flags.Get().SubscribeAllDataSubnets
cfg := params.BeaconConfig()
custodyRequirement := cfg.CustodyRequirement
beaconConfig := params.BeaconConfig()
custodyRequirement := beaconConfig.CustodyRequirement
// Check if the node was previously subscribed to all data subnets, and if so,
// store the new status accordingly.
@@ -493,7 +493,7 @@ func (s *Service) updateCustodyInfoInDB(slot primitives.Slot) (primitives.Slot,
// Compute the custody group count.
custodyGroupCount := custodyRequirement
if isSubscribedToAllDataSubnets {
custodyGroupCount = cfg.NumberOfCustodyGroups
custodyGroupCount = beaconConfig.NumberOfColumns
}
// Safely compute the fulu fork slot.
@@ -536,11 +536,11 @@ func spawnCountdownIfPreGenesis(ctx context.Context, genesisTime time.Time, db d
}
func fuluForkSlot() (primitives.Slot, error) {
cfg := params.BeaconConfig()
beaconConfig := params.BeaconConfig()
fuluForkEpoch := cfg.FuluForkEpoch
if fuluForkEpoch == cfg.FarFutureEpoch {
return cfg.FarFutureSlot, nil
fuluForkEpoch := beaconConfig.FuluForkEpoch
if fuluForkEpoch == beaconConfig.FarFutureEpoch {
return beaconConfig.FarFutureSlot, nil
}
forkFuluSlot, err := slots.EpochStart(fuluForkEpoch)

View File

@@ -23,11 +23,9 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
state_native "github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stategen"
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
"github.com/OffchainLabs/prysm/v6/config/features"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
consensusblocks "github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
@@ -564,9 +562,8 @@ func TestNotifyIndex(t *testing.T) {
var root [32]byte
copy(root[:], "exampleRoot")
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
// Test notifying a new index
bn.notifyIndex(root, 1, ds)
bn.notifyIndex(root, 1, 1)
if !bn.seenIndex[root][1] {
t.Errorf("Index was not marked as seen")
}
@@ -583,7 +580,7 @@ func TestNotifyIndex(t *testing.T) {
}
// Test notifying a new index again
bn.notifyIndex(root, 2, ds)
bn.notifyIndex(root, 2, 1)
if !bn.seenIndex[root][2] {
t.Errorf("Index was not marked as seen")
}
@@ -598,103 +595,3 @@ func TestNotifyIndex(t *testing.T) {
t.Errorf("Notifier channel did not receive the index")
}
}
func TestUpdateCustodyInfoInDB(t *testing.T) {
const (
fuluForkEpoch = 10
custodyRequirement = uint64(4)
earliestStoredSlot = primitives.Slot(12)
numberOfCustodyGroups = uint64(64)
numberOfColumns = uint64(128)
)
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.FuluForkEpoch = fuluForkEpoch
cfg.CustodyRequirement = custodyRequirement
cfg.NumberOfCustodyGroups = numberOfCustodyGroups
cfg.NumberOfColumns = numberOfColumns
params.OverrideBeaconConfig(cfg)
ctx := t.Context()
pbBlock := util.NewBeaconBlock()
pbBlock.Block.Slot = 12
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(pbBlock)
require.NoError(t, err)
roBlock, err := blocks.NewROBlock(signedBeaconBlock)
require.NoError(t, err)
t.Run("CGC increases before fulu", func(t *testing.T) {
service, requirements := minimalTestService(t)
err = requirements.db.SaveBlock(ctx, roBlock)
require.NoError(t, err)
// Before Fulu
// -----------
actualEas, actualCgc, err := service.updateCustodyInfoInDB(15)
require.NoError(t, err)
require.Equal(t, earliestStoredSlot, actualEas)
require.Equal(t, custodyRequirement, actualCgc)
actualEas, actualCgc, err = service.updateCustodyInfoInDB(17)
require.NoError(t, err)
require.Equal(t, earliestStoredSlot, actualEas)
require.Equal(t, custodyRequirement, actualCgc)
resetFlags := flags.Get()
gFlags := new(flags.GlobalFlags)
gFlags.SubscribeAllDataSubnets = true
flags.Init(gFlags)
defer flags.Init(resetFlags)
actualEas, actualCgc, err = service.updateCustodyInfoInDB(19)
require.NoError(t, err)
require.Equal(t, earliestStoredSlot, actualEas)
require.Equal(t, numberOfCustodyGroups, actualCgc)
// After Fulu
// ----------
actualEas, actualCgc, err = service.updateCustodyInfoInDB(fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1)
require.NoError(t, err)
require.Equal(t, earliestStoredSlot, actualEas)
require.Equal(t, numberOfCustodyGroups, actualCgc)
})
t.Run("CGC increases after fulu", func(t *testing.T) {
service, requirements := minimalTestService(t)
err = requirements.db.SaveBlock(ctx, roBlock)
require.NoError(t, err)
// Before Fulu
// -----------
actualEas, actualCgc, err := service.updateCustodyInfoInDB(15)
require.NoError(t, err)
require.Equal(t, earliestStoredSlot, actualEas)
require.Equal(t, custodyRequirement, actualCgc)
actualEas, actualCgc, err = service.updateCustodyInfoInDB(17)
require.NoError(t, err)
require.Equal(t, earliestStoredSlot, actualEas)
require.Equal(t, custodyRequirement, actualCgc)
// After Fulu
// ----------
resetFlags := flags.Get()
gFlags := new(flags.GlobalFlags)
gFlags.SubscribeAllDataSubnets = true
flags.Init(gFlags)
defer flags.Init(resetFlags)
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
actualEas, actualCgc, err = service.updateCustodyInfoInDB(slot)
require.NoError(t, err)
require.Equal(t, slot, actualEas)
require.Equal(t, numberOfCustodyGroups, actualCgc)
actualEas, actualCgc, err = service.updateCustodyInfoInDB(slot + 2)
require.NoError(t, err)
require.Equal(t, slot, actualEas)
require.Equal(t, numberOfCustodyGroups, actualCgc)
})
}

View File

@@ -89,7 +89,7 @@ func (mb *mockBroadcaster) BroadcastLightClientFinalityUpdate(_ context.Context,
return nil
}
func (mb *mockBroadcaster) BroadcastDataColumnSidecars(_ context.Context, _ []blocks.VerifiedRODataColumn) error {
func (mb *mockBroadcaster) BroadcastDataColumnSidecar(_ uint64, _ blocks.VerifiedRODataColumn) error {
mb.broadcastCalled = true
return nil
}
@@ -106,14 +106,14 @@ type mockCustodyManager struct {
custodyGroupCount uint64
}
func (dch *mockCustodyManager) EarliestAvailableSlot(context.Context) (primitives.Slot, error) {
func (dch *mockCustodyManager) EarliestAvailableSlot() (primitives.Slot, error) {
dch.mut.RLock()
defer dch.mut.RUnlock()
return dch.earliestAvailableSlot, nil
}
func (dch *mockCustodyManager) CustodyGroupCount(context.Context) (uint64, error) {
func (dch *mockCustodyManager) CustodyGroupCount() (uint64, error) {
dch.mut.RLock()
defer dch.mut.RUnlock()
@@ -130,14 +130,6 @@ func (dch *mockCustodyManager) UpdateCustodyInfo(earliestAvailableSlot primitive
return earliestAvailableSlot, custodyGroupCount, nil
}
func (dch *mockCustodyManager) UpdateEarliestAvailableSlot(earliestAvailableSlot primitives.Slot) error {
dch.mut.Lock()
defer dch.mut.Unlock()
dch.earliestAvailableSlot = earliestAvailableSlot
return nil
}
func (dch *mockCustodyManager) CustodyGroupCountFromPeer(peer.ID) uint64 {
return 0
}

View File

@@ -472,36 +472,6 @@ func (s *ChainService) HasBlock(ctx context.Context, rt [32]byte) bool {
return s.InitSyncBlockRoots[rt]
}
func (s *ChainService) AvailableBlocks(ctx context.Context, blockRoots [][32]byte) map[[32]byte]bool {
if s.DB == nil {
return nil
}
count := len(blockRoots)
availableRoots := make(map[[32]byte]bool, count)
notInDBRoots := make([][32]byte, 0, count)
for _, root := range blockRoots {
if s.DB.HasBlock(ctx, root) {
availableRoots[root] = true
continue
}
notInDBRoots = append(notInDBRoots, root)
}
if s.InitSyncBlockRoots == nil {
return availableRoots
}
for _, root := range notInDBRoots {
if s.InitSyncBlockRoots[root] {
availableRoots[root] = true
}
}
return availableRoots
}
// RecentBlockSlot mocks the same method in the chain service.
func (s *ChainService) RecentBlockSlot([32]byte) (primitives.Slot, error) {
return s.BlockSlot, nil

View File

@@ -3,7 +3,6 @@ package blockchain
import (
"context"
"fmt"
"slices"
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filters"
"github.com/OffchainLabs/prysm/v6/config/params"
@@ -82,10 +81,12 @@ func (v *WeakSubjectivityVerifier) VerifyWeakSubjectivity(ctx context.Context, f
if err != nil {
return errors.Wrap(err, "error while retrieving block roots to verify weak subjectivity")
}
if slices.Contains(roots, v.root) {
log.Info("Weak subjectivity check has passed!!")
v.verified = true
return nil
for _, root := range roots {
if v.root == root {
log.Info("Weak subjectivity check has passed!!")
v.verified = true
return nil
}
}
return errors.Wrap(errWSBlockNotFoundInEpoch, fmt.Sprintf("root=%#x, epoch=%d", v.root, v.epoch))
}

View File

@@ -5,6 +5,7 @@ package cache
import (
"context"
"errors"
"math"
"sync"
"time"
@@ -271,7 +272,7 @@ func (c *CommitteeCache) checkInProgress(ctx context.Context, seed [32]byte) err
// for the in progress boolean to flip to false.
time.Sleep(time.Duration(delay) * time.Nanosecond)
delay *= delayFactor
delay = min(delay, maxDelay)
delay = math.Min(delay, maxDelay)
}
return nil
}

View File

@@ -52,7 +52,7 @@ func create(leaves [][32]byte, depth uint64) MerkleTreeNode {
if depth == 0 {
return &LeafNode{hash: leaves[0]}
}
split := min(math.PowerOf2(depth-1), length)
split := math.Min(math.PowerOf2(depth-1), length)
left := create(leaves[0:split], depth-1)
right := create(leaves[split:], depth-1)
return &InnerNode{left: left, right: right}

View File

@@ -2,6 +2,7 @@ package cache
import (
"context"
"math"
"sync"
"time"
@@ -89,7 +90,7 @@ func (c *SkipSlotCache) Get(ctx context.Context, r [32]byte) (state.BeaconState,
// for the in progress boolean to flip to false.
time.Sleep(time.Duration(delay) * time.Nanosecond)
delay *= delayFactor
delay = min(delay, maxDelay)
delay = math.Min(delay, maxDelay)
}
span.SetAttributes(trace.BoolAttribute("inProgress", inProgress))

View File

@@ -49,22 +49,13 @@ func ProcessSyncAggregate(ctx context.Context, s state.BeaconState, sync *ethpb.
if err != nil {
return nil, 0, errors.Wrap(err, "could not filter sync committee votes")
}
if err := VerifySyncCommitteeSig(s, votedKeys, sync.SyncCommitteeSignature); err != nil {
return nil, 0, errors.Wrap(err, "could not verify sync committee signature")
}
return s, reward, nil
}
// ProcessSyncAggregateNoVerifySig processes the sync aggregate without verifying the sync committee signature.
// This is useful in scenarios such as block reward calculation, where we can assume the data in the block is valid.
func ProcessSyncAggregateNoVerifySig(ctx context.Context, s state.BeaconState, sync *ethpb.SyncAggregate) (state.BeaconState, uint64, error) {
s, _, reward, err := processSyncAggregate(ctx, s, sync)
if err != nil {
return nil, 0, errors.Wrap(err, "could not filter sync committee votes")
}
return s, reward, nil
}
// processSyncAggregate applies all the logic in the spec function `process_sync_aggregate` except
// verifying the BLS signatures. It returns the modified beacons state, the list of validators'
// public keys that voted (for future signature verification) and the proposer reward for including

View File

@@ -53,19 +53,9 @@ func TestProcessSyncCommittee_PerfectParticipation(t *testing.T) {
SyncCommitteeSignature: aggregatedSig,
}
// Verify that ProcessSyncAggregateNoVerifySig and ProcessSyncAggregate have the same outcome.
beaconStateNoVerifySig := beaconState.Copy()
beaconStateNoVerifySig, rewardNoVerifySig, err := altair.ProcessSyncAggregateNoVerifySig(t.Context(), beaconStateNoVerifySig, syncAggregate)
require.NoError(t, err)
sszNoVerifySig, err := beaconStateNoVerifySig.MarshalSSZ()
require.NoError(t, err)
var reward uint64
beaconState, reward, err = altair.ProcessSyncAggregate(t.Context(), beaconState, syncAggregate)
require.NoError(t, err)
ssz, err := beaconState.MarshalSSZ()
require.NoError(t, err)
assert.DeepEqual(t, sszNoVerifySig, ssz, "States resulting from ProcessSyncAggregateNoVerifySig and ProcessSyncAggregate are not equal")
assert.Equal(t, rewardNoVerifySig, reward, "Rewards resulting from ProcessSyncAggregateNoVerifySig and ProcessSyncAggregate are not equal")
assert.Equal(t, uint64(72192), reward)
// Use a non-sync committee index to compare profitability.

View File

@@ -16,6 +16,7 @@ import (
"github.com/OffchainLabs/prysm/v6/crypto/bls"
"github.com/OffchainLabs/prysm/v6/crypto/hash"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/math"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/runtime/version"
"github.com/OffchainLabs/prysm/v6/time/slots"
@@ -208,7 +209,7 @@ func IsSyncCommitteeAggregator(sig []byte) (bool, error) {
}
cfg := params.BeaconConfig()
modulo := max(1, cfg.SyncCommitteeSize/cfg.SyncCommitteeSubnetCount/cfg.TargetAggregatorsPerSyncSubcommittee)
modulo := math.Max(1, cfg.SyncCommitteeSize/cfg.SyncCommitteeSubnetCount/cfg.TargetAggregatorsPerSyncSubcommittee)
hashedSig := hash.Hash(sig)
return bytesutil.FromBytes8(hashedSig[:8])%modulo == 0, nil
}

View File

@@ -12,46 +12,6 @@ import (
"github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1/attestation"
)
// ConvertToAltair converts a Phase 0 beacon state to an Altair beacon state.
func ConvertToAltair(state state.BeaconState) (state.BeaconState, error) {
epoch := time.CurrentEpoch(state)
numValidators := state.NumValidators()
s := &ethpb.BeaconStateAltair{
GenesisTime: uint64(state.GenesisTime().Unix()),
GenesisValidatorsRoot: state.GenesisValidatorsRoot(),
Slot: state.Slot(),
Fork: &ethpb.Fork{
PreviousVersion: state.Fork().CurrentVersion,
CurrentVersion: params.BeaconConfig().AltairForkVersion,
Epoch: epoch,
},
LatestBlockHeader: state.LatestBlockHeader(),
BlockRoots: state.BlockRoots(),
StateRoots: state.StateRoots(),
HistoricalRoots: state.HistoricalRoots(),
Eth1Data: state.Eth1Data(),
Eth1DataVotes: state.Eth1DataVotes(),
Eth1DepositIndex: state.Eth1DepositIndex(),
Validators: state.Validators(),
Balances: state.Balances(),
RandaoMixes: state.RandaoMixes(),
Slashings: state.Slashings(),
PreviousEpochParticipation: make([]byte, numValidators),
CurrentEpochParticipation: make([]byte, numValidators),
JustificationBits: state.JustificationBits(),
PreviousJustifiedCheckpoint: state.PreviousJustifiedCheckpoint(),
CurrentJustifiedCheckpoint: state.CurrentJustifiedCheckpoint(),
FinalizedCheckpoint: state.FinalizedCheckpoint(),
InactivityScores: make([]uint64, numValidators),
}
newState, err := state_native.InitializeFromProtoUnsafeAltair(s)
if err != nil {
return nil, err
}
return newState, nil
}
// UpgradeToAltair updates input state to return the version Altair state.
//
// Spec code:
@@ -104,7 +64,39 @@ func ConvertToAltair(state state.BeaconState) (state.BeaconState, error) {
// post.next_sync_committee = get_next_sync_committee(post)
// return post
func UpgradeToAltair(ctx context.Context, state state.BeaconState) (state.BeaconState, error) {
newState, err := ConvertToAltair(state)
epoch := time.CurrentEpoch(state)
numValidators := state.NumValidators()
s := &ethpb.BeaconStateAltair{
GenesisTime: uint64(state.GenesisTime().Unix()),
GenesisValidatorsRoot: state.GenesisValidatorsRoot(),
Slot: state.Slot(),
Fork: &ethpb.Fork{
PreviousVersion: state.Fork().CurrentVersion,
CurrentVersion: params.BeaconConfig().AltairForkVersion,
Epoch: epoch,
},
LatestBlockHeader: state.LatestBlockHeader(),
BlockRoots: state.BlockRoots(),
StateRoots: state.StateRoots(),
HistoricalRoots: state.HistoricalRoots(),
Eth1Data: state.Eth1Data(),
Eth1DataVotes: state.Eth1DataVotes(),
Eth1DepositIndex: state.Eth1DepositIndex(),
Validators: state.Validators(),
Balances: state.Balances(),
RandaoMixes: state.RandaoMixes(),
Slashings: state.Slashings(),
PreviousEpochParticipation: make([]byte, numValidators),
CurrentEpochParticipation: make([]byte, numValidators),
JustificationBits: state.JustificationBits(),
PreviousJustifiedCheckpoint: state.PreviousJustifiedCheckpoint(),
CurrentJustifiedCheckpoint: state.CurrentJustifiedCheckpoint(),
FinalizedCheckpoint: state.FinalizedCheckpoint(),
InactivityScores: make([]uint64, numValidators),
}
newState, err := state_native.InitializeFromProtoUnsafeAltair(s)
if err != nil {
return nil, err
}

View File

@@ -39,6 +39,7 @@ go_library(
"//crypto/hash:go_default_library",
"//encoding/bytesutil:go_default_library",
"//encoding/ssz:go_default_library",
"//math:go_default_library",
"//monitoring/tracing/trace:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",

View File

@@ -42,9 +42,6 @@ func ProcessAttesterSlashings(
slashings []ethpb.AttSlashing,
exitInfo *validators.ExitInfo,
) (state.BeaconState, error) {
if exitInfo == nil && len(slashings) > 0 {
return nil, errors.New("exit info required to process attester slashings")
}
var err error
for _, slashing := range slashings {
beaconState, err = ProcessAttesterSlashing(ctx, beaconState, slashing, exitInfo)
@@ -55,28 +52,6 @@ func ProcessAttesterSlashings(
return beaconState, nil
}
// ProcessAttesterSlashingsNoVerify processes attester slashings without verifying them.
// This is useful in scenarios such as block reward calculation, where we can assume the data
// in the block is valid.
func ProcessAttesterSlashingsNoVerify(
ctx context.Context,
beaconState state.BeaconState,
slashings []ethpb.AttSlashing,
exitInfo *validators.ExitInfo,
) (state.BeaconState, error) {
if exitInfo == nil && len(slashings) > 0 {
return nil, errors.New("exit info required to process attester slashings")
}
var err error
for _, slashing := range slashings {
beaconState, err = ProcessAttesterSlashingNoVerify(ctx, beaconState, slashing, exitInfo)
if err != nil {
return nil, err
}
}
return beaconState, nil
}
// ProcessAttesterSlashing processes individual attester slashing.
func ProcessAttesterSlashing(
ctx context.Context,
@@ -84,36 +59,9 @@ func ProcessAttesterSlashing(
slashing ethpb.AttSlashing,
exitInfo *validators.ExitInfo,
) (state.BeaconState, error) {
if exitInfo == nil {
return nil, errors.New("exit info is required to process attester slashing")
}
if err := VerifyAttesterSlashing(ctx, beaconState, slashing); err != nil {
return nil, errors.Wrap(err, "could not verify attester slashing")
}
return processAttesterSlashing(ctx, beaconState, slashing, exitInfo)
}
// ProcessAttesterSlashingNoVerify processes individual attester slashing without verifying it.
// This is useful in scenarios such as block reward calculation, where we can assume the data
// in the block is valid.
func ProcessAttesterSlashingNoVerify(
ctx context.Context,
beaconState state.BeaconState,
slashing ethpb.AttSlashing,
exitInfo *validators.ExitInfo,
) (state.BeaconState, error) {
if exitInfo == nil {
return nil, errors.New("exit info is required to process attester slashing")
}
return processAttesterSlashing(ctx, beaconState, slashing, exitInfo)
}
func processAttesterSlashing(
ctx context.Context,
beaconState state.BeaconState,
slashing ethpb.AttSlashing,
exitInfo *validators.ExitInfo,
) (state.BeaconState, error) {
slashableIndices := SlashableAttesterIndices(slashing)
sort.SliceStable(slashableIndices, func(i, j int) bool {
return slashableIndices[i] < slashableIndices[j]

View File

@@ -242,18 +242,8 @@ func TestProcessAttesterSlashings_AppliesCorrectStatus(t *testing.T) {
currentSlot := 2 * params.BeaconConfig().SlotsPerEpoch
require.NoError(t, tc.st.SetSlot(currentSlot))
// Verify that ProcessAttesterSlashingsNoVerify and ProcessAttesterSlashings have the same outcome.
stNoVerify := tc.st.Copy()
newStateNoVerify, err := blocks.ProcessAttesterSlashingsNoVerify(t.Context(), stNoVerify, []ethpb.AttSlashing{tc.slashing}, v.ExitInformation(stNoVerify))
require.NoError(t, err)
sszNoVerify, err := newStateNoVerify.MarshalSSZ()
require.NoError(t, err)
newState, err := blocks.ProcessAttesterSlashings(t.Context(), tc.st, []ethpb.AttSlashing{tc.slashing}, v.ExitInformation(tc.st))
require.NoError(t, err)
ssz, err := newState.MarshalSSZ()
require.NoError(t, err)
assert.DeepEqual(t, sszNoVerify, ssz, "States resulting from ProcessAttesterSlashingsNoVerify and ProcessAttesterSlashings are not equal")
newRegistry := newState.Validators()
// Given the intersection of slashable indices is [1], only validator

View File

@@ -11,6 +11,7 @@ import (
"github.com/OffchainLabs/prysm/v6/contracts/deposit"
"github.com/OffchainLabs/prysm/v6/crypto/bls"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/math"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/pkg/errors"
)
@@ -33,7 +34,7 @@ func ActivateValidatorWithEffectiveBalance(beaconState state.BeaconState, deposi
if err != nil {
return nil, err
}
validator.EffectiveBalance = min(balance-balance%params.BeaconConfig().EffectiveBalanceIncrement, params.BeaconConfig().MaxEffectiveBalance)
validator.EffectiveBalance = math.Min(balance-balance%params.BeaconConfig().EffectiveBalanceIncrement, params.BeaconConfig().MaxEffectiveBalance)
if validator.EffectiveBalance ==
params.BeaconConfig().MaxEffectiveBalance {
validator.ActivationEligibilityEpoch = 0

View File

@@ -6,4 +6,3 @@ var errNilSignedWithdrawalMessage = errors.New("nil SignedBLSToExecutionChange m
var errNilWithdrawalMessage = errors.New("nil BLSToExecutionChange message")
var errInvalidBLSPrefix = errors.New("withdrawal credential prefix is not a BLS prefix")
var errInvalidWithdrawalCredentials = errors.New("withdrawal credentials do not match")
var ErrInvalidSignature = errors.New("invalid signature")

View File

@@ -55,9 +55,6 @@ func ProcessVoluntaryExits(
if len(exits) == 0 {
return beaconState, nil
}
if exitInfo == nil {
return nil, errors.New("exit info required to process voluntary exits")
}
for idx, exit := range exits {
if exit == nil || exit.Exit == nil {
return nil, errors.New("nil voluntary exit in block body")

View File

@@ -51,9 +51,6 @@ func ProcessProposerSlashings(
slashings []*ethpb.ProposerSlashing,
exitInfo *validators.ExitInfo,
) (state.BeaconState, error) {
if exitInfo == nil && len(slashings) > 0 {
return nil, errors.New("exit info required to process proposer slashings")
}
var err error
for _, slashing := range slashings {
beaconState, err = ProcessProposerSlashing(ctx, beaconState, slashing, exitInfo)
@@ -64,28 +61,6 @@ func ProcessProposerSlashings(
return beaconState, nil
}
// ProcessProposerSlashingsNoVerify processes proposer slashings without verifying them.
// This is useful in scenarios such as block reward calculation, where we can assume the data
// in the block is valid.
func ProcessProposerSlashingsNoVerify(
ctx context.Context,
beaconState state.BeaconState,
slashings []*ethpb.ProposerSlashing,
exitInfo *validators.ExitInfo,
) (state.BeaconState, error) {
if exitInfo == nil && len(slashings) > 0 {
return nil, errors.New("exit info required to process proposer slashings")
}
var err error
for _, slashing := range slashings {
beaconState, err = ProcessProposerSlashingNoVerify(ctx, beaconState, slashing, exitInfo)
if err != nil {
return nil, err
}
}
return beaconState, nil
}
// ProcessProposerSlashing processes individual proposer slashing.
func ProcessProposerSlashing(
ctx context.Context,
@@ -93,40 +68,13 @@ func ProcessProposerSlashing(
slashing *ethpb.ProposerSlashing,
exitInfo *validators.ExitInfo,
) (state.BeaconState, error) {
var err error
if slashing == nil {
return nil, errors.New("nil proposer slashings in block body")
}
if err := VerifyProposerSlashing(beaconState, slashing); err != nil {
if err = VerifyProposerSlashing(beaconState, slashing); err != nil {
return nil, errors.Wrap(err, "could not verify proposer slashing")
}
return processProposerSlashing(ctx, beaconState, slashing, exitInfo)
}
// ProcessProposerSlashingNoVerify processes individual proposer slashing without verifying it.
// This is useful in scenarios such as block reward calculation, where we can assume the data
// in the block is valid.
func ProcessProposerSlashingNoVerify(
ctx context.Context,
beaconState state.BeaconState,
slashing *ethpb.ProposerSlashing,
exitInfo *validators.ExitInfo,
) (state.BeaconState, error) {
if slashing == nil {
return nil, errors.New("nil proposer slashings in block body")
}
return processProposerSlashing(ctx, beaconState, slashing, exitInfo)
}
func processProposerSlashing(
ctx context.Context,
beaconState state.BeaconState,
slashing *ethpb.ProposerSlashing,
exitInfo *validators.ExitInfo,
) (state.BeaconState, error) {
if exitInfo == nil {
return nil, errors.New("exit info is required to process proposer slashing")
}
var err error
beaconState, err = validators.SlashValidator(ctx, beaconState, slashing.Header_1.Header.ProposerIndex, exitInfo)
if err != nil {
return nil, errors.Wrapf(err, "could not slash proposer index %d", slashing.Header_1.Header.ProposerIndex)

View File

@@ -172,17 +172,8 @@ func TestProcessProposerSlashings_AppliesCorrectStatus(t *testing.T) {
block := util.NewBeaconBlock()
block.Block.Body.ProposerSlashings = slashings
// Verify that ProcessProposerSlashingsNoVerify and ProcessProposerSlashings have the same outcome.
beaconStateNoVerify := beaconState.Copy()
newStateNoVerify, err := blocks.ProcessProposerSlashingsNoVerify(t.Context(), beaconStateNoVerify, block.Block.Body.ProposerSlashings, v.ExitInformation(beaconStateNoVerify))
require.NoError(t, err)
sszNoVerify, err := newStateNoVerify.MarshalSSZ()
require.NoError(t, err)
newState, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, block.Block.Body.ProposerSlashings, v.ExitInformation(beaconState))
require.NoError(t, err)
ssz, err := newState.MarshalSSZ()
require.NoError(t, err)
assert.DeepEqual(t, sszNoVerify, ssz, "States resulting from ProcessProposerSlashingsNoVerify and ProcessProposerSlashings are not equal")
newStateVals := newState.Validators()
if newStateVals[1].ExitEpoch != beaconState.Validators()[1].ExitEpoch {

View File

@@ -114,12 +114,9 @@ func VerifyBlockSignatureUsingCurrentFork(beaconState state.ReadOnlyBeaconState,
}
proposerPubKey := proposer.PublicKey
sig := blk.Signature()
if err := signing.VerifyBlockSigningRoot(proposerPubKey, sig[:], domain, func() ([32]byte, error) {
return signing.VerifyBlockSigningRoot(proposerPubKey, sig[:], domain, func() ([32]byte, error) {
return blkRoot, nil
}); err != nil {
return ErrInvalidSignature
}
return nil
})
}
// BlockSignatureBatch retrieves the block signature batch from the provided block and its corresponding state.

View File

@@ -89,36 +89,3 @@ func TestVerifyBlockSignatureUsingCurrentFork(t *testing.T) {
require.NoError(t, err)
assert.NoError(t, blocks.VerifyBlockSignatureUsingCurrentFork(bState, wsb, blkRoot))
}
func TestVerifyBlockSignatureUsingCurrentFork_InvalidSignature(t *testing.T) {
params.SetupTestConfigCleanup(t)
bCfg := params.BeaconConfig()
bCfg.AltairForkEpoch = 100
bCfg.ForkVersionSchedule[bytesutil.ToBytes4(bCfg.AltairForkVersion)] = 100
params.OverrideBeaconConfig(bCfg)
bState, keys := util.DeterministicGenesisState(t, 100)
altairBlk := util.NewBeaconBlockAltair()
altairBlk.Block.ProposerIndex = 0
altairBlk.Block.Slot = params.BeaconConfig().SlotsPerEpoch * 100
blkRoot, err := altairBlk.Block.HashTreeRoot()
assert.NoError(t, err)
// Sign with wrong key (proposer index 0, but using key 1)
fData := &ethpb.Fork{
Epoch: 100,
CurrentVersion: params.BeaconConfig().AltairForkVersion,
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
}
domain, err := signing.Domain(fData, 100, params.BeaconConfig().DomainBeaconProposer, bState.GenesisValidatorsRoot())
assert.NoError(t, err)
rt, err := signing.ComputeSigningRoot(altairBlk.Block, domain)
assert.NoError(t, err)
wrongSig := keys[1].Sign(rt[:]).Marshal()
altairBlk.Signature = wrongSig
wsb, err := consensusblocks.NewSignedBeaconBlock(altairBlk)
require.NoError(t, err)
err = blocks.VerifyBlockSignatureUsingCurrentFork(bState, wsb, blkRoot)
require.ErrorIs(t, err, blocks.ErrInvalidSignature, "Expected ErrInvalidSignature for invalid signature")
}

View File

@@ -53,15 +53,9 @@ func ProcessOperations(ctx context.Context, st state.BeaconState, block interfac
// 6110 validations are in VerifyOperationLengths
bb := block.Body()
// Electra extends the altair operations.
var exitInfo *v.ExitInfo
hasSlashings := len(bb.ProposerSlashings()) > 0 || len(bb.AttesterSlashings()) > 0
hasExits := len(bb.VoluntaryExits()) > 0
if hasSlashings || hasExits {
// ExitInformation is expensive to compute, only do it if we need it.
exitInfo = v.ExitInformation(st)
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
return nil, errors.Wrap(err, "could not update total active balance cache")
}
exitInfo := v.ExitInformation(st)
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
return nil, errors.Wrap(err, "could not update total active balance cache")
}
st, err = ProcessProposerSlashings(ctx, st, bb.ProposerSlashings(), exitInfo)
if err != nil {

View File

@@ -15,129 +15,6 @@ import (
"github.com/pkg/errors"
)
// ConvertToElectra converts a Deneb beacon state to an Electra beacon state. It does not perform any fork logic.
func ConvertToElectra(beaconState state.BeaconState) (state.BeaconState, error) {
currentSyncCommittee, err := beaconState.CurrentSyncCommittee()
if err != nil {
return nil, err
}
nextSyncCommittee, err := beaconState.NextSyncCommittee()
if err != nil {
return nil, err
}
prevEpochParticipation, err := beaconState.PreviousEpochParticipation()
if err != nil {
return nil, err
}
currentEpochParticipation, err := beaconState.CurrentEpochParticipation()
if err != nil {
return nil, err
}
inactivityScores, err := beaconState.InactivityScores()
if err != nil {
return nil, err
}
payloadHeader, err := beaconState.LatestExecutionPayloadHeader()
if err != nil {
return nil, err
}
txRoot, err := payloadHeader.TransactionsRoot()
if err != nil {
return nil, err
}
wdRoot, err := payloadHeader.WithdrawalsRoot()
if err != nil {
return nil, err
}
wi, err := beaconState.NextWithdrawalIndex()
if err != nil {
return nil, err
}
vi, err := beaconState.NextWithdrawalValidatorIndex()
if err != nil {
return nil, err
}
summaries, err := beaconState.HistoricalSummaries()
if err != nil {
return nil, err
}
excessBlobGas, err := payloadHeader.ExcessBlobGas()
if err != nil {
return nil, err
}
blobGasUsed, err := payloadHeader.BlobGasUsed()
if err != nil {
return nil, err
}
s := &ethpb.BeaconStateElectra{
GenesisTime: uint64(beaconState.GenesisTime().Unix()),
GenesisValidatorsRoot: beaconState.GenesisValidatorsRoot(),
Slot: beaconState.Slot(),
Fork: &ethpb.Fork{
PreviousVersion: beaconState.Fork().CurrentVersion,
CurrentVersion: params.BeaconConfig().ElectraForkVersion,
Epoch: time.CurrentEpoch(beaconState),
},
LatestBlockHeader: beaconState.LatestBlockHeader(),
BlockRoots: beaconState.BlockRoots(),
StateRoots: beaconState.StateRoots(),
HistoricalRoots: beaconState.HistoricalRoots(),
Eth1Data: beaconState.Eth1Data(),
Eth1DataVotes: beaconState.Eth1DataVotes(),
Eth1DepositIndex: beaconState.Eth1DepositIndex(),
Validators: beaconState.Validators(),
Balances: beaconState.Balances(),
RandaoMixes: beaconState.RandaoMixes(),
Slashings: beaconState.Slashings(),
PreviousEpochParticipation: prevEpochParticipation,
CurrentEpochParticipation: currentEpochParticipation,
JustificationBits: beaconState.JustificationBits(),
PreviousJustifiedCheckpoint: beaconState.PreviousJustifiedCheckpoint(),
CurrentJustifiedCheckpoint: beaconState.CurrentJustifiedCheckpoint(),
FinalizedCheckpoint: beaconState.FinalizedCheckpoint(),
InactivityScores: inactivityScores,
CurrentSyncCommittee: currentSyncCommittee,
NextSyncCommittee: nextSyncCommittee,
LatestExecutionPayloadHeader: &enginev1.ExecutionPayloadHeaderDeneb{
ParentHash: payloadHeader.ParentHash(),
FeeRecipient: payloadHeader.FeeRecipient(),
StateRoot: payloadHeader.StateRoot(),
ReceiptsRoot: payloadHeader.ReceiptsRoot(),
LogsBloom: payloadHeader.LogsBloom(),
PrevRandao: payloadHeader.PrevRandao(),
BlockNumber: payloadHeader.BlockNumber(),
GasLimit: payloadHeader.GasLimit(),
GasUsed: payloadHeader.GasUsed(),
Timestamp: payloadHeader.Timestamp(),
ExtraData: payloadHeader.ExtraData(),
BaseFeePerGas: payloadHeader.BaseFeePerGas(),
BlockHash: payloadHeader.BlockHash(),
TransactionsRoot: txRoot,
WithdrawalsRoot: wdRoot,
ExcessBlobGas: excessBlobGas,
BlobGasUsed: blobGasUsed,
},
NextWithdrawalIndex: wi,
NextWithdrawalValidatorIndex: vi,
HistoricalSummaries: summaries,
DepositRequestsStartIndex: params.BeaconConfig().UnsetDepositRequestsStartIndex,
DepositBalanceToConsume: 0,
EarliestConsolidationEpoch: helpers.ActivationExitEpoch(slots.ToEpoch(beaconState.Slot())),
PendingDeposits: make([]*ethpb.PendingDeposit, 0),
PendingPartialWithdrawals: make([]*ethpb.PendingPartialWithdrawal, 0),
PendingConsolidations: make([]*ethpb.PendingConsolidation, 0),
}
// need to cast the beaconState to use in helper functions
post, err := state_native.InitializeFromProtoUnsafeElectra(s)
if err != nil {
return nil, errors.Wrap(err, "failed to initialize post electra beaconState")
}
return post, nil
}
// UpgradeToElectra updates inputs a generic state to return the version Electra state.
//
// nolint:dupword
@@ -249,7 +126,55 @@ func ConvertToElectra(beaconState state.BeaconState) (state.BeaconState, error)
//
// return post
func UpgradeToElectra(beaconState state.BeaconState) (state.BeaconState, error) {
s, err := ConvertToElectra(beaconState)
currentSyncCommittee, err := beaconState.CurrentSyncCommittee()
if err != nil {
return nil, err
}
nextSyncCommittee, err := beaconState.NextSyncCommittee()
if err != nil {
return nil, err
}
prevEpochParticipation, err := beaconState.PreviousEpochParticipation()
if err != nil {
return nil, err
}
currentEpochParticipation, err := beaconState.CurrentEpochParticipation()
if err != nil {
return nil, err
}
inactivityScores, err := beaconState.InactivityScores()
if err != nil {
return nil, err
}
payloadHeader, err := beaconState.LatestExecutionPayloadHeader()
if err != nil {
return nil, err
}
txRoot, err := payloadHeader.TransactionsRoot()
if err != nil {
return nil, err
}
wdRoot, err := payloadHeader.WithdrawalsRoot()
if err != nil {
return nil, err
}
wi, err := beaconState.NextWithdrawalIndex()
if err != nil {
return nil, err
}
vi, err := beaconState.NextWithdrawalValidatorIndex()
if err != nil {
return nil, err
}
summaries, err := beaconState.HistoricalSummaries()
if err != nil {
return nil, err
}
excessBlobGas, err := payloadHeader.ExcessBlobGas()
if err != nil {
return nil, err
}
blobGasUsed, err := payloadHeader.BlobGasUsed()
if err != nil {
return nil, err
}
@@ -281,38 +206,97 @@ func UpgradeToElectra(beaconState state.BeaconState) (state.BeaconState, error)
if err != nil {
return nil, errors.Wrap(err, "failed to get total active balance")
}
if err := s.SetExitBalanceToConsume(helpers.ActivationExitChurnLimit(primitives.Gwei(tab))); err != nil {
return nil, errors.Wrap(err, "failed to set exit balance to consume")
}
if err := s.SetEarliestExitEpoch(earliestExitEpoch); err != nil {
return nil, errors.Wrap(err, "failed to set earliest exit epoch")
}
if err := s.SetConsolidationBalanceToConsume(helpers.ConsolidationChurnLimit(primitives.Gwei(tab))); err != nil {
return nil, errors.Wrap(err, "failed to set consolidation balance to consume")
s := &ethpb.BeaconStateElectra{
GenesisTime: uint64(beaconState.GenesisTime().Unix()),
GenesisValidatorsRoot: beaconState.GenesisValidatorsRoot(),
Slot: beaconState.Slot(),
Fork: &ethpb.Fork{
PreviousVersion: beaconState.Fork().CurrentVersion,
CurrentVersion: params.BeaconConfig().ElectraForkVersion,
Epoch: time.CurrentEpoch(beaconState),
},
LatestBlockHeader: beaconState.LatestBlockHeader(),
BlockRoots: beaconState.BlockRoots(),
StateRoots: beaconState.StateRoots(),
HistoricalRoots: beaconState.HistoricalRoots(),
Eth1Data: beaconState.Eth1Data(),
Eth1DataVotes: beaconState.Eth1DataVotes(),
Eth1DepositIndex: beaconState.Eth1DepositIndex(),
Validators: beaconState.Validators(),
Balances: beaconState.Balances(),
RandaoMixes: beaconState.RandaoMixes(),
Slashings: beaconState.Slashings(),
PreviousEpochParticipation: prevEpochParticipation,
CurrentEpochParticipation: currentEpochParticipation,
JustificationBits: beaconState.JustificationBits(),
PreviousJustifiedCheckpoint: beaconState.PreviousJustifiedCheckpoint(),
CurrentJustifiedCheckpoint: beaconState.CurrentJustifiedCheckpoint(),
FinalizedCheckpoint: beaconState.FinalizedCheckpoint(),
InactivityScores: inactivityScores,
CurrentSyncCommittee: currentSyncCommittee,
NextSyncCommittee: nextSyncCommittee,
LatestExecutionPayloadHeader: &enginev1.ExecutionPayloadHeaderDeneb{
ParentHash: payloadHeader.ParentHash(),
FeeRecipient: payloadHeader.FeeRecipient(),
StateRoot: payloadHeader.StateRoot(),
ReceiptsRoot: payloadHeader.ReceiptsRoot(),
LogsBloom: payloadHeader.LogsBloom(),
PrevRandao: payloadHeader.PrevRandao(),
BlockNumber: payloadHeader.BlockNumber(),
GasLimit: payloadHeader.GasLimit(),
GasUsed: payloadHeader.GasUsed(),
Timestamp: payloadHeader.Timestamp(),
ExtraData: payloadHeader.ExtraData(),
BaseFeePerGas: payloadHeader.BaseFeePerGas(),
BlockHash: payloadHeader.BlockHash(),
TransactionsRoot: txRoot,
WithdrawalsRoot: wdRoot,
ExcessBlobGas: excessBlobGas,
BlobGasUsed: blobGasUsed,
},
NextWithdrawalIndex: wi,
NextWithdrawalValidatorIndex: vi,
HistoricalSummaries: summaries,
DepositRequestsStartIndex: params.BeaconConfig().UnsetDepositRequestsStartIndex,
DepositBalanceToConsume: 0,
ExitBalanceToConsume: helpers.ActivationExitChurnLimit(primitives.Gwei(tab)),
EarliestExitEpoch: earliestExitEpoch,
ConsolidationBalanceToConsume: helpers.ConsolidationChurnLimit(primitives.Gwei(tab)),
EarliestConsolidationEpoch: helpers.ActivationExitEpoch(slots.ToEpoch(beaconState.Slot())),
PendingDeposits: make([]*ethpb.PendingDeposit, 0),
PendingPartialWithdrawals: make([]*ethpb.PendingPartialWithdrawal, 0),
PendingConsolidations: make([]*ethpb.PendingConsolidation, 0),
}
// Sorting preActivationIndices based on a custom criteria
vals := s.Validators()
sort.Slice(preActivationIndices, func(i, j int) bool {
// Comparing based on ActivationEligibilityEpoch and then by index if the epochs are the same
if vals[preActivationIndices[i]].ActivationEligibilityEpoch == vals[preActivationIndices[j]].ActivationEligibilityEpoch {
if s.Validators[preActivationIndices[i]].ActivationEligibilityEpoch == s.Validators[preActivationIndices[j]].ActivationEligibilityEpoch {
return preActivationIndices[i] < preActivationIndices[j]
}
return vals[preActivationIndices[i]].ActivationEligibilityEpoch < vals[preActivationIndices[j]].ActivationEligibilityEpoch
return s.Validators[preActivationIndices[i]].ActivationEligibilityEpoch < s.Validators[preActivationIndices[j]].ActivationEligibilityEpoch
})
// need to cast the beaconState to use in helper functions
post, err := state_native.InitializeFromProtoUnsafeElectra(s)
if err != nil {
return nil, errors.Wrap(err, "failed to initialize post electra beaconState")
}
for _, index := range preActivationIndices {
if err := QueueEntireBalanceAndResetValidator(s, index); err != nil {
if err := QueueEntireBalanceAndResetValidator(post, index); err != nil {
return nil, errors.Wrap(err, "failed to queue entire balance and reset validator")
}
}
// Ensure early adopters of compounding credentials go through the activation churn
for _, index := range compoundWithdrawalIndices {
if err := QueueExcessActiveBalance(s, index); err != nil {
if err := QueueExcessActiveBalance(post, index); err != nil {
return nil, errors.Wrap(err, "failed to queue excess active balance")
}
}
return s, nil
return post, nil
}

View File

@@ -13,7 +13,6 @@ import (
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/runtime/version"
"github.com/OffchainLabs/prysm/v6/time/slots"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/pkg/errors"
@@ -92,18 +91,6 @@ func ProcessWithdrawalRequests(ctx context.Context, st state.BeaconState, wrs []
ctx, span := trace.StartSpan(ctx, "electra.ProcessWithdrawalRequests")
defer span.End()
currentEpoch := slots.ToEpoch(st.Slot())
if len(wrs) == 0 {
return st, nil
}
// It is correct to compute exitInfo once for all withdrawals in the block, as the ExitInfo pointer is
// updated within InitiateValidatorExit which is the only function that uses it.
var exitInfo *validators.ExitInfo
if st.Version() < version.Electra {
exitInfo = validators.ExitInformation(st)
} else {
// After Electra, the function InitiateValidatorExit ignores the exitInfo passed to it and recomputes it anyway.
exitInfo = &validators.ExitInfo{}
}
for _, wr := range wrs {
if wr == nil {
return nil, errors.New("nil execution layer withdrawal request")
@@ -161,8 +148,7 @@ func ProcessWithdrawalRequests(ctx context.Context, st state.BeaconState, wrs []
// Only exit validator if it has no pending withdrawals in the queue
if pendingBalanceToWithdraw == 0 {
var err error
// exitInfo is updated within InitiateValidatorExit
st, err = validators.InitiateValidatorExit(ctx, st, vIdx, exitInfo)
st, err = validators.InitiateValidatorExit(ctx, st, vIdx, validators.ExitInformation(st))
if err != nil {
return nil, err
}

View File

@@ -96,17 +96,12 @@ func ProcessRegistryUpdates(ctx context.Context, st state.BeaconState) (state.Be
}
// Process validators eligible for ejection.
if len(eligibleForEjection) > 0 {
// It is safe to compute exitInfo once for all ejections in the epoch, as the ExitInfo pointer is
// updated within InitiateValidatorExit which is the only function that uses it.
exitInfo := validators.ExitInformation(st)
for _, idx := range eligibleForEjection {
// Here is fine to do a quadratic loop since this should
// barely happen
st, err = validators.InitiateValidatorExit(ctx, st, idx, exitInfo)
if err != nil && !errors.Is(err, validators.ErrValidatorAlreadyExited) {
return nil, errors.Wrapf(err, "could not initiate exit for validator %d", idx)
}
for _, idx := range eligibleForEjection {
// Here is fine to do a quadratic loop since this should
// barely happen
st, err = validators.InitiateValidatorExit(ctx, st, idx, validators.ExitInformation(st))
if err != nil && !errors.Is(err, validators.ErrValidatorAlreadyExited) {
return nil, errors.Wrapf(err, "could not initiate exit for validator %d", idx)
}
}
@@ -233,7 +228,7 @@ func ProcessSlashings(st state.BeaconState) error {
// a callback is used here to apply the following actions to all validators
// below equally.
increment := params.BeaconConfig().EffectiveBalanceIncrement
minSlashing := min(totalSlashing*slashingMultiplier, totalBalance)
minSlashing := math.Min(totalSlashing*slashingMultiplier, totalBalance)
// Modified in Electra:EIP7251
var penaltyPerEffectiveBalanceIncrement uint64

View File

@@ -5,6 +5,7 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/time"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/math"
)
// ProcessSlashingsPrecompute processes the slashed validators during epoch processing.
@@ -20,7 +21,7 @@ func ProcessSlashingsPrecompute(s state.BeaconState, pBal *Balance) error {
totalSlashing += slashing
}
minSlashing := min(totalSlashing*params.BeaconConfig().ProportionalSlashingMultiplier, pBal.ActiveCurrentEpoch)
minSlashing := math.Min(totalSlashing*params.BeaconConfig().ProportionalSlashingMultiplier, pBal.ActiveCurrentEpoch)
epochToWithdraw := currentEpoch + exitLength/2
var hasSlashing bool

View File

@@ -7,7 +7,6 @@ go_library(
visibility = [
"//beacon-chain:__subpackages__",
"//cmd/prysmctl/testnet:__pkg__",
"//consensus-types/hdiff:__subpackages__",
"//testing/spectest:__subpackages__",
"//validator/client:__pkg__",
],

View File

@@ -15,7 +15,6 @@ go_library(
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//monitoring/tracing/trace:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",

View File

@@ -8,7 +8,6 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
state_native "github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/time/slots"
@@ -18,25 +17,6 @@ import (
// UpgradeToFulu updates inputs a generic state to return the version Fulu state.
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/fork.md#upgrading-the-state
func UpgradeToFulu(ctx context.Context, beaconState state.BeaconState) (state.BeaconState, error) {
s, err := ConvertToFulu(beaconState)
if err != nil {
return nil, errors.Wrap(err, "could not convert to fulu")
}
proposerLookahead, err := helpers.InitializeProposerLookahead(ctx, beaconState, slots.ToEpoch(beaconState.Slot()))
if err != nil {
return nil, err
}
pl := make([]primitives.ValidatorIndex, len(proposerLookahead))
for i, v := range proposerLookahead {
pl[i] = primitives.ValidatorIndex(v)
}
if err := s.SetProposerLookahead(pl); err != nil {
return nil, errors.Wrap(err, "failed to set proposer lookahead")
}
return s, nil
}
func ConvertToFulu(beaconState state.BeaconState) (state.BeaconState, error) {
currentSyncCommittee, err := beaconState.CurrentSyncCommittee()
if err != nil {
return nil, err
@@ -125,6 +105,11 @@ func ConvertToFulu(beaconState state.BeaconState) (state.BeaconState, error) {
if err != nil {
return nil, err
}
proposerLookahead, err := helpers.InitializeProposerLookahead(ctx, beaconState, slots.ToEpoch(beaconState.Slot()))
if err != nil {
return nil, err
}
s := &ethpb.BeaconStateFulu{
GenesisTime: uint64(beaconState.GenesisTime().Unix()),
GenesisValidatorsRoot: beaconState.GenesisValidatorsRoot(),
@@ -186,6 +171,14 @@ func ConvertToFulu(beaconState state.BeaconState) (state.BeaconState, error) {
PendingDeposits: pendingDeposits,
PendingPartialWithdrawals: pendingPartialWithdrawals,
PendingConsolidations: pendingConsolidations,
ProposerLookahead: proposerLookahead,
}
return state_native.InitializeFromProtoUnsafeFulu(s)
// Need to cast the beaconState to use in helper functions
post, err := state_native.InitializeFromProtoUnsafeFulu(s)
if err != nil {
return nil, errors.Wrap(err, "failed to initialize post fulu beaconState")
}
return post, nil
}

View File

@@ -399,6 +399,7 @@ func CommitteeAssignments(ctx context.Context, state state.BeaconState, epoch pr
ctx, span := trace.StartSpan(ctx, "helpers.CommitteeAssignments")
defer span.End()
// Verify if the epoch is valid for assignment based on the provided state.
if err := VerifyAssignmentEpoch(epoch, state); err != nil {
return nil, err
}
@@ -406,15 +407,12 @@ func CommitteeAssignments(ctx context.Context, state state.BeaconState, epoch pr
if err != nil {
return nil, err
}
// Deduplicate and make set for O(1) membership checks.
vals := make(map[primitives.ValidatorIndex]struct{}, len(validators))
vals := make(map[primitives.ValidatorIndex]struct{})
for _, v := range validators {
vals[v] = struct{}{}
}
remaining := len(vals)
assignments := make(map[primitives.ValidatorIndex]*CommitteeAssignment, len(vals))
assignments := make(map[primitives.ValidatorIndex]*CommitteeAssignment)
// Compute committee assignments for each slot in the epoch.
for slot := startSlot; slot < startSlot+params.BeaconConfig().SlotsPerEpoch; slot++ {
committees, err := BeaconCommittees(ctx, state, slot)
if err != nil {
@@ -422,7 +420,7 @@ func CommitteeAssignments(ctx context.Context, state state.BeaconState, epoch pr
}
for j, committee := range committees {
for _, vIndex := range committee {
if _, ok := vals[vIndex]; !ok {
if _, ok := vals[vIndex]; !ok { // Skip if the validator is not in the provided validators slice.
continue
}
if _, ok := assignments[vIndex]; !ok {
@@ -431,11 +429,6 @@ func CommitteeAssignments(ctx context.Context, state state.BeaconState, epoch pr
assignments[vIndex].Committee = committee
assignments[vIndex].AttesterSlot = slot
assignments[vIndex].CommitteeIndex = primitives.CommitteeIndex(j)
delete(vals, vIndex)
remaining--
if remaining == 0 {
return assignments, nil // early exit
}
}
}
}

View File

@@ -79,7 +79,7 @@ func TotalActiveBalance(s state.ReadOnlyBeaconState) (uint64, error) {
}
// Spec defines `EffectiveBalanceIncrement` as min to avoid divisions by zero.
total = max(params.BeaconConfig().EffectiveBalanceIncrement, total)
total = mathutil.Max(params.BeaconConfig().EffectiveBalanceIncrement, total)
if err := balanceCache.AddTotalEffectiveBalance(s, total); err != nil {
return 0, err
}

View File

@@ -401,7 +401,7 @@ func ComputeProposerIndex(bState state.ReadOnlyBeaconState, activeIndices []prim
return 0, errors.New("empty active indices list")
}
hashFunc := hash.CustomSHA256Hasher()
cfg := params.BeaconConfig()
beaconConfig := params.BeaconConfig()
seedBuffer := make([]byte, len(seed)+8)
copy(seedBuffer, seed[:])
@@ -426,14 +426,14 @@ func ComputeProposerIndex(bState state.ReadOnlyBeaconState, activeIndices []prim
offset := (i % 16) * 2
randomValue := uint64(randomBytes[offset]) | uint64(randomBytes[offset+1])<<8
if effectiveBal*fieldparams.MaxRandomValueElectra >= cfg.MaxEffectiveBalanceElectra*randomValue {
if effectiveBal*fieldparams.MaxRandomValueElectra >= beaconConfig.MaxEffectiveBalanceElectra*randomValue {
return candidateIndex, nil
}
} else {
binary.LittleEndian.PutUint64(seedBuffer[len(seed):], i/32)
randomByte := hashFunc(seedBuffer)[i%32]
if effectiveBal*fieldparams.MaxRandomByte >= cfg.MaxEffectiveBalance*uint64(randomByte) {
if effectiveBal*fieldparams.MaxRandomByte >= beaconConfig.MaxEffectiveBalance*uint64(randomByte) {
return candidateIndex, nil
}
}

View File

@@ -14,6 +14,7 @@ import (
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/math"
v1alpha1 "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/time/slots"
)
@@ -94,7 +95,7 @@ func ComputeWeakSubjectivityPeriod(ctx context.Context, st state.ReadOnlyBeaconS
if T*(200+3*D) < t*(200+12*D) {
epochsForValidatorSetChurn := N * (t*(200+12*D) - T*(200+3*D)) / (600 * delta * (2*t + T))
epochsForBalanceTopUps := N * (200 + 3*D) / (600 * Delta)
wsp += max(epochsForValidatorSetChurn, epochsForBalanceTopUps)
wsp += math.Max(epochsForValidatorSetChurn, epochsForBalanceTopUps)
} else {
wsp += 3 * N * D * t / (200 * Delta * (T - t))
}
@@ -201,3 +202,14 @@ func ParseWeakSubjectivityInputString(wsCheckpointString string) (*v1alpha1.Chec
Root: bRoot,
}, nil
}
// MinEpochsForBlockRequests computes the number of epochs of block history that we need to maintain,
// relative to the current epoch, per the p2p specs. This is used to compute the slot where backfill is complete.
// value defined:
// https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/p2p-interface.md#configuration
// MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2 (= 33024, ~5 months)
// detailed rationale: https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/p2p-interface.md#why-are-blocksbyrange-requests-only-required-to-be-served-for-the-latest-min_epochs_for_block_requests-epochs
func MinEpochsForBlockRequests() primitives.Epoch {
return params.BeaconConfig().MinValidatorWithdrawabilityDelay +
primitives.Epoch(params.BeaconConfig().ChurnLimitQuotient/2)
}

View File

@@ -286,3 +286,20 @@ func genState(t *testing.T, valCount, avgBalance uint64) state.BeaconState {
return beaconState
}
func TestMinEpochsForBlockRequests(t *testing.T) {
helpers.ClearCache()
params.SetActiveTestCleanup(t, params.MainnetConfig())
var expected primitives.Epoch = 33024
// expected value of 33024 via spec commentary:
// https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/p2p-interface.md#why-are-blocksbyrange-requests-only-required-to-be-served-for-the-latest-min_epochs_for_block_requests-epochs
// MIN_EPOCHS_FOR_BLOCK_REQUESTS is calculated using the arithmetic from compute_weak_subjectivity_period found in the weak subjectivity guide. Specifically to find this max epoch range, we use the worst case event of a very large validator size (>= MIN_PER_EPOCH_CHURN_LIMIT * CHURN_LIMIT_QUOTIENT).
//
// MIN_EPOCHS_FOR_BLOCK_REQUESTS = (
// MIN_VALIDATOR_WITHDRAWABILITY_DELAY
// + MAX_SAFETY_DECAY * CHURN_LIMIT_QUOTIENT // (2 * 100)
// )
//
// Where MAX_SAFETY_DECAY = 100 and thus MIN_EPOCHS_FOR_BLOCK_REQUESTS = 33024 (~5 months).
require.Equal(t, expected, helpers.MinEpochsForBlockRequests())
}

View File

@@ -89,14 +89,14 @@ func CustodyGroups(nodeId enode.ID, custodyGroupCount uint64) ([]uint64, error)
// ComputeColumnsForCustodyGroup computes the columns for a given custody group.
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/das-core.md#compute_columns_for_custody_group
func ComputeColumnsForCustodyGroup(custodyGroup uint64) ([]uint64, error) {
cfg := params.BeaconConfig()
numberOfCustodyGroups := cfg.NumberOfCustodyGroups
beaconConfig := params.BeaconConfig()
numberOfCustodyGroups := beaconConfig.NumberOfCustodyGroups
if custodyGroup >= numberOfCustodyGroups {
return nil, ErrCustodyGroupTooLarge
}
numberOfColumns := cfg.NumberOfColumns
numberOfColumns := beaconConfig.NumberOfColumns
columnsPerGroup := numberOfColumns / numberOfCustodyGroups
@@ -112,9 +112,9 @@ func ComputeColumnsForCustodyGroup(custodyGroup uint64) ([]uint64, error) {
// ComputeCustodyGroupForColumn computes the custody group for a given column.
// It is the reciprocal function of ComputeColumnsForCustodyGroup.
func ComputeCustodyGroupForColumn(columnIndex uint64) (uint64, error) {
cfg := params.BeaconConfig()
numberOfColumns := cfg.NumberOfColumns
numberOfCustodyGroups := cfg.NumberOfCustodyGroups
beaconConfig := params.BeaconConfig()
numberOfColumns := beaconConfig.NumberOfColumns
numberOfCustodyGroups := beaconConfig.NumberOfCustodyGroups
if columnIndex >= numberOfColumns {
return 0, ErrIndexTooLarge

View File

@@ -43,13 +43,6 @@ func VerifyDataColumnSidecar(sidecar blocks.RODataColumn) error {
return ErrNoKzgCommitments
}
// A sidecar with more commitments than the max blob count for this block is invalid.
slot := sidecar.Slot()
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
if len(sidecar.KzgCommitments) > maxBlobsPerBlock {
return ErrTooManyCommitments
}
// The column length must be equal to the number of commitments/proofs.
if len(sidecar.Column) != len(sidecar.KzgCommitments) || len(sidecar.Column) != len(sidecar.KzgProofs) {
return ErrMismatchLength
@@ -79,30 +72,10 @@ func VerifyDataColumnsSidecarKZGProofs(sidecars []blocks.RODataColumn) error {
for _, sidecar := range sidecars {
for i := range sidecar.Column {
var (
commitment kzg.Bytes48
cell kzg.Cell
proof kzg.Bytes48
)
commitmentBytes := sidecar.KzgCommitments[i]
cellBytes := sidecar.Column[i]
proofBytes := sidecar.KzgProofs[i]
if len(commitmentBytes) != len(commitment) ||
len(cellBytes) != len(cell) ||
len(proofBytes) != len(proof) {
return ErrMismatchLength
}
copy(commitment[:], commitmentBytes)
copy(cell[:], cellBytes)
copy(proof[:], proofBytes)
commitments = append(commitments, commitment)
commitments = append(commitments, kzg.Bytes48(sidecar.KzgCommitments[i]))
indices = append(indices, sidecar.Index)
cells = append(cells, cell)
proofs = append(proofs, proof)
cells = append(cells, kzg.Cell(sidecar.Column[i]))
proofs = append(proofs, kzg.Bytes48(sidecar.KzgProofs[i]))
}
}

View File

@@ -18,46 +18,38 @@ import (
)
func TestVerifyDataColumnSidecar(t *testing.T) {
testCases := []struct {
name string
index uint64
blobCount int
commitmentCount int
proofCount int
maxBlobsPerBlock uint64
expectedError error
}{
{name: "index too large", index: 1_000_000, expectedError: peerdas.ErrIndexTooLarge},
{name: "no commitments", expectedError: peerdas.ErrNoKzgCommitments},
{name: "too many commitments", blobCount: 10, commitmentCount: 10, proofCount: 10, maxBlobsPerBlock: 2, expectedError: peerdas.ErrTooManyCommitments},
{name: "commitments size mismatch", commitmentCount: 1, maxBlobsPerBlock: 1, expectedError: peerdas.ErrMismatchLength},
{name: "proofs size mismatch", blobCount: 1, commitmentCount: 1, maxBlobsPerBlock: 1, expectedError: peerdas.ErrMismatchLength},
{name: "nominal", blobCount: 1, commitmentCount: 1, proofCount: 1, maxBlobsPerBlock: 1, expectedError: nil},
}
t.Run("index too large", func(t *testing.T) {
roSidecar := createTestSidecar(t, 1_000_000, nil, nil, nil)
err := peerdas.VerifyDataColumnSidecar(roSidecar)
require.ErrorIs(t, err, peerdas.ErrIndexTooLarge)
})
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.FuluForkEpoch = 0
cfg.BlobSchedule = []params.BlobScheduleEntry{{Epoch: 0, MaxBlobsPerBlock: tc.maxBlobsPerBlock}}
params.OverrideBeaconConfig(cfg)
t.Run("no commitments", func(t *testing.T) {
roSidecar := createTestSidecar(t, 0, nil, nil, nil)
err := peerdas.VerifyDataColumnSidecar(roSidecar)
require.ErrorIs(t, err, peerdas.ErrNoKzgCommitments)
})
column := make([][]byte, tc.blobCount)
kzgCommitments := make([][]byte, tc.commitmentCount)
kzgProof := make([][]byte, tc.proofCount)
t.Run("KZG commitments size mismatch", func(t *testing.T) {
kzgCommitments := make([][]byte, 1)
roSidecar := createTestSidecar(t, 0, nil, kzgCommitments, nil)
err := peerdas.VerifyDataColumnSidecar(roSidecar)
require.ErrorIs(t, err, peerdas.ErrMismatchLength)
})
roSidecar := createTestSidecar(t, tc.index, column, kzgCommitments, kzgProof)
err := peerdas.VerifyDataColumnSidecar(roSidecar)
t.Run("KZG proofs size mismatch", func(t *testing.T) {
column, kzgCommitments := make([][]byte, 1), make([][]byte, 1)
roSidecar := createTestSidecar(t, 0, column, kzgCommitments, nil)
err := peerdas.VerifyDataColumnSidecar(roSidecar)
require.ErrorIs(t, err, peerdas.ErrMismatchLength)
})
if tc.expectedError != nil {
require.ErrorIs(t, err, tc.expectedError)
return
}
require.NoError(t, err)
})
}
t.Run("nominal", func(t *testing.T) {
column, kzgCommitments, kzgProofs := make([][]byte, 1), make([][]byte, 1), make([][]byte, 1)
roSidecar := createTestSidecar(t, 0, column, kzgCommitments, kzgProofs)
err := peerdas.VerifyDataColumnSidecar(roSidecar)
require.NoError(t, err)
})
}
func TestVerifyDataColumnSidecarKZGProofs(t *testing.T) {
@@ -68,14 +60,6 @@ func TestVerifyDataColumnSidecarKZGProofs(t *testing.T) {
err := kzg.Start()
require.NoError(t, err)
t.Run("size mismatch", func(t *testing.T) {
sidecars := generateRandomSidecars(t, seed, blobCount)
sidecars[0].Column[0] = sidecars[0].Column[0][:len(sidecars[0].Column[0])-1] // Remove one byte to create size mismatch
err := peerdas.VerifyDataColumnsSidecarKZGProofs(sidecars)
require.ErrorIs(t, err, peerdas.ErrMismatchLength)
})
t.Run("invalid proof", func(t *testing.T) {
sidecars := generateRandomSidecars(t, seed, blobCount)
sidecars[0].Column[0][0]++ // It is OK to overflow

View File

@@ -1,8 +1,6 @@
package peerdas
import (
"sort"
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/config/params"
@@ -30,8 +28,7 @@ func MinimumColumnCountToReconstruct() uint64 {
// ReconstructDataColumnSidecars reconstructs all the data column sidecars from the given input data column sidecars.
// All input sidecars must be committed to the same block.
// `inVerifiedRoSidecars` should contain enough sidecars to reconstruct the missing columns, and should not contain any duplicate.
// WARNING: This function sorts inplace `verifiedRoSidecars` by index.
// `inVerifiedRoSidecars` should contain enough (unique) sidecars to reconstruct the missing columns.
func ReconstructDataColumnSidecars(verifiedRoSidecars []blocks.VerifiedRODataColumn) ([]blocks.VerifiedRODataColumn, error) {
// Check if there is at least one input sidecar.
if len(verifiedRoSidecars) == 0 {
@@ -54,17 +51,18 @@ func ReconstructDataColumnSidecars(verifiedRoSidecars []blocks.VerifiedRODataCol
}
}
// Deduplicate sidecars.
sidecarByIndex := make(map[uint64]blocks.VerifiedRODataColumn, len(verifiedRoSidecars))
for _, inVerifiedRoSidecar := range verifiedRoSidecars {
sidecarByIndex[inVerifiedRoSidecar.Index] = inVerifiedRoSidecar
}
// Check if there is enough sidecars to reconstruct the missing columns.
sidecarCount := len(verifiedRoSidecars)
sidecarCount := len(sidecarByIndex)
if uint64(sidecarCount) < MinimumColumnCountToReconstruct() {
return nil, ErrNotEnoughDataColumnSidecars
}
// Sort the input sidecars by index.
sort.Slice(verifiedRoSidecars, func(i, j int) bool {
return verifiedRoSidecars[i].Index < verifiedRoSidecars[j].Index
})
// Recover cells and compute proofs in parallel.
var wg errgroup.Group
cellsAndProofs := make([]kzg.CellsAndProofs, blobCount)
@@ -73,10 +71,10 @@ func ReconstructDataColumnSidecars(verifiedRoSidecars []blocks.VerifiedRODataCol
cellsIndices := make([]uint64, 0, sidecarCount)
cells := make([]kzg.Cell, 0, sidecarCount)
for _, sidecar := range verifiedRoSidecars {
for columnIndex, sidecar := range sidecarByIndex {
cell := sidecar.Column[blobIndex]
cells = append(cells, kzg.Cell(cell))
cellsIndices = append(cellsIndices, sidecar.Index)
cellsIndices = append(cellsIndices, columnIndex)
}
// Recover the cells and proofs for the corresponding blob
@@ -257,7 +255,7 @@ func ComputeCellsAndProofsFromStructured(blobsAndProofs []*pb.BlobAndProofV2) ([
return nil, errors.Wrap(err, "compute cells")
}
kzgProofs := make([]kzg.Proof, 0, numberOfColumns)
kzgProofs := make([]kzg.Proof, 0, numberOfColumns*kzg.BytesPerProof)
for _, kzgProofBytes := range blobAndProof.KzgProofs {
if len(kzgProofBytes) != kzg.BytesPerProof {
return nil, errors.New("wrong KZG proof size - should never happen")

View File

@@ -125,12 +125,11 @@ func TestReconstructDataColumnSidecars(t *testing.T) {
}
func TestReconstructBlobs(t *testing.T) {
params.SetupTestConfigCleanup(t)
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096*2
// Start the trusted setup.
err := kzg.Start()
require.NoError(t, err)
require.NoError(t, kzg.Start())
var emptyBlock blocks.ROBlock
fs := util.SlotAtEpoch(t, params.BeaconConfig().FuluForkEpoch)
t.Run("no index", func(t *testing.T) {
actual, err := peerdas.ReconstructBlobs(emptyBlock, nil, nil)
@@ -191,10 +190,10 @@ func TestReconstructBlobs(t *testing.T) {
})
t.Run("not committed to the same block", func(t *testing.T) {
_, _, verifiedRoSidecars := util.GenerateTestFuluBlockWithSidecars(t, 3, util.WithParentRoot([fieldparams.RootLength]byte{1}), util.WithSlot(fs))
roBlock, _, _ := util.GenerateTestFuluBlockWithSidecars(t, 3, util.WithParentRoot([fieldparams.RootLength]byte{2}), util.WithSlot(fs))
_, _, verifiedRoSidecars := util.GenerateTestFuluBlockWithSidecars(t, 3, util.WithParentRoot([fieldparams.RootLength]byte{1}))
roBlock, _, _ := util.GenerateTestFuluBlockWithSidecars(t, 3, util.WithParentRoot([fieldparams.RootLength]byte{2}))
_, err := peerdas.ReconstructBlobs(roBlock, verifiedRoSidecars, []int{0})
_, err = peerdas.ReconstructBlobs(roBlock, verifiedRoSidecars, []int{0})
require.ErrorContains(t, peerdas.ErrRootMismatch.Error(), err)
})
@@ -441,7 +440,6 @@ func TestComputeCellsAndProofsFromStructured(t *testing.T) {
for i := range blobCount {
require.Equal(t, len(expectedCellsAndProofs[i].Cells), len(actualCellsAndProofs[i].Cells))
require.Equal(t, len(expectedCellsAndProofs[i].Proofs), len(actualCellsAndProofs[i].Proofs))
require.Equal(t, len(expectedCellsAndProofs[i].Proofs), cap(actualCellsAndProofs[i].Proofs))
// Compare cells
for j, expectedCell := range expectedCellsAndProofs[i].Cells {

View File

@@ -84,10 +84,10 @@ func ValidatorsCustodyRequirement(state beaconState.ReadOnlyBeaconState, validat
totalNodeBalance += validator.EffectiveBalance()
}
cfg := params.BeaconConfig()
numberOfCustodyGroups := cfg.NumberOfCustodyGroups
validatorCustodyRequirement := cfg.ValidatorCustodyRequirement
balancePerAdditionalCustodyGroup := cfg.BalancePerAdditionalCustodyGroup
beaconConfig := params.BeaconConfig()
numberOfCustodyGroups := beaconConfig.NumberOfCustodyGroups
validatorCustodyRequirement := beaconConfig.ValidatorCustodyRequirement
balancePerAdditionalCustodyGroup := beaconConfig.BalancePerAdditionalCustodyGroup
count := totalNodeBalance / balancePerAdditionalCustodyGroup
return min(max(count, validatorCustodyRequirement), numberOfCustodyGroups), nil

View File

@@ -16,60 +16,61 @@ func TestDataColumnsAlignWithBlock(t *testing.T) {
err := kzg.Start()
require.NoError(t, err)
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096*2
fs := util.SlotAtEpoch(t, params.BeaconConfig().ElectraForkEpoch)
require.NoError(t, err)
fuluMax := params.BeaconConfig().MaxBlobsPerBlock(fs)
t.Run("pre fulu", func(t *testing.T) {
block, _ := util.GenerateTestElectraBlockWithSidecar(t, [fieldparams.RootLength]byte{}, fs, 0)
block, _ := util.GenerateTestElectraBlockWithSidecar(t, [fieldparams.RootLength]byte{}, 0, 0)
err := peerdas.DataColumnsAlignWithBlock(block, nil)
require.NoError(t, err)
})
t.Run("too many commitments", func(t *testing.T) {
block, _, _ := util.GenerateTestFuluBlockWithSidecars(t, fuluMax+1, util.WithSlot(fs))
t.Run("too many commitmnets", func(t *testing.T) {
params.SetupTestConfigCleanup(t)
config := params.BeaconConfig()
config.BlobSchedule = []params.BlobScheduleEntry{{}}
params.OverrideBeaconConfig(config)
block, _, _ := util.GenerateTestFuluBlockWithSidecars(t, 3)
err := peerdas.DataColumnsAlignWithBlock(block, nil)
require.ErrorIs(t, err, peerdas.ErrTooManyCommitments)
})
t.Run("root mismatch", func(t *testing.T) {
_, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2, util.WithSlot(fs))
block, _, _ := util.GenerateTestFuluBlockWithSidecars(t, 0, util.WithSlot(fs))
_, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2)
block, _, _ := util.GenerateTestFuluBlockWithSidecars(t, 0)
err := peerdas.DataColumnsAlignWithBlock(block, sidecars)
require.ErrorIs(t, err, peerdas.ErrRootMismatch)
})
t.Run("column size mismatch", func(t *testing.T) {
block, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2, util.WithSlot(fs))
block, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2)
sidecars[0].Column = [][]byte{}
err := peerdas.DataColumnsAlignWithBlock(block, sidecars)
require.ErrorIs(t, err, peerdas.ErrBlockColumnSizeMismatch)
})
t.Run("KZG commitments size mismatch", func(t *testing.T) {
block, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2, util.WithSlot(fs))
block, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2)
sidecars[0].KzgCommitments = [][]byte{}
err := peerdas.DataColumnsAlignWithBlock(block, sidecars)
require.ErrorIs(t, err, peerdas.ErrBlockColumnSizeMismatch)
})
t.Run("KZG proofs mismatch", func(t *testing.T) {
block, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2, util.WithSlot(fs))
block, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2)
sidecars[0].KzgProofs = [][]byte{}
err := peerdas.DataColumnsAlignWithBlock(block, sidecars)
require.ErrorIs(t, err, peerdas.ErrBlockColumnSizeMismatch)
})
t.Run("commitment mismatch", func(t *testing.T) {
block, _, _ := util.GenerateTestFuluBlockWithSidecars(t, 2, util.WithSlot(fs))
_, alteredSidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2, util.WithSlot(fs))
block, _, _ := util.GenerateTestFuluBlockWithSidecars(t, 2)
_, alteredSidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2)
alteredSidecars[1].KzgCommitments[0][0]++ // Overflow is OK
err := peerdas.DataColumnsAlignWithBlock(block, alteredSidecars)
require.ErrorIs(t, err, peerdas.ErrCommitmentMismatch)
})
t.Run("nominal", func(t *testing.T) {
block, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2, util.WithSlot(fs))
block, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2)
err := peerdas.DataColumnsAlignWithBlock(block, sidecars)
require.NoError(t, err)
})

View File

@@ -196,7 +196,7 @@ func TestAltairCompatible(t *testing.T) {
}
func TestCanUpgradeTo(t *testing.T) {
cfg := params.BeaconConfig()
beaconConfig := params.BeaconConfig()
outerTestCases := []struct {
name string
@@ -205,32 +205,32 @@ func TestCanUpgradeTo(t *testing.T) {
}{
{
name: "Altair",
forkEpoch: &cfg.AltairForkEpoch,
forkEpoch: &beaconConfig.AltairForkEpoch,
upgradeFunc: time.CanUpgradeToAltair,
},
{
name: "Bellatrix",
forkEpoch: &cfg.BellatrixForkEpoch,
forkEpoch: &beaconConfig.BellatrixForkEpoch,
upgradeFunc: time.CanUpgradeToBellatrix,
},
{
name: "Capella",
forkEpoch: &cfg.CapellaForkEpoch,
forkEpoch: &beaconConfig.CapellaForkEpoch,
upgradeFunc: time.CanUpgradeToCapella,
},
{
name: "Deneb",
forkEpoch: &cfg.DenebForkEpoch,
forkEpoch: &beaconConfig.DenebForkEpoch,
upgradeFunc: time.CanUpgradeToDeneb,
},
{
name: "Electra",
forkEpoch: &cfg.ElectraForkEpoch,
forkEpoch: &beaconConfig.ElectraForkEpoch,
upgradeFunc: time.CanUpgradeToElectra,
},
{
name: "Fulu",
forkEpoch: &cfg.FuluForkEpoch,
forkEpoch: &beaconConfig.FuluForkEpoch,
upgradeFunc: time.CanUpgradeToFulu,
},
}
@@ -238,7 +238,7 @@ func TestCanUpgradeTo(t *testing.T) {
for _, otc := range outerTestCases {
params.SetupTestConfigCleanup(t)
*otc.forkEpoch = 5
params.OverrideBeaconConfig(cfg)
params.OverrideBeaconConfig(beaconConfig)
innerTestCases := []struct {
name string

View File

@@ -10,7 +10,6 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/electra"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition/interop"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/validators"
v "github.com/OffchainLabs/prysm/v6/beacon-chain/core/validators"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
@@ -379,16 +378,9 @@ func ProcessBlockForStateRoot(
func altairOperations(ctx context.Context, st state.BeaconState, beaconBlock interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
var err error
hasSlashings := len(beaconBlock.Body().ProposerSlashings()) > 0 || len(beaconBlock.Body().AttesterSlashings()) > 0
// exitInfo is only needed for voluntary exits pre Electra.
hasExits := st.Version() < version.Electra && len(beaconBlock.Body().VoluntaryExits()) > 0
exitInfo := &validators.ExitInfo{}
if hasSlashings || hasExits {
// ExitInformation is expensive to compute, only do it if we need it.
exitInfo = v.ExitInformation(st)
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
return nil, errors.Wrap(err, "could not update total active balance cache")
}
exitInfo := v.ExitInformation(st)
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
return nil, errors.Wrap(err, "could not update total active balance cache")
}
st, err = b.ProcessProposerSlashings(ctx, st, beaconBlock.Body().ProposerSlashings(), exitInfo)
if err != nil {
@@ -415,15 +407,10 @@ func altairOperations(ctx context.Context, st state.BeaconState, beaconBlock int
// This calls phase 0 block operations.
func phase0Operations(ctx context.Context, st state.BeaconState, beaconBlock interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
var err error
hasSlashings := len(beaconBlock.Body().ProposerSlashings()) > 0 || len(beaconBlock.Body().AttesterSlashings()) > 0
hasExits := len(beaconBlock.Body().VoluntaryExits()) > 0
var exitInfo *v.ExitInfo
if hasSlashings || hasExits {
// ExitInformation is expensive to compute, only do it if we need it.
exitInfo = v.ExitInformation(st)
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
return nil, errors.Wrap(err, "could not update total active balance cache")
}
exitInfo := v.ExitInformation(st)
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
return nil, errors.Wrap(err, "could not update total active balance cache")
}
st, err = b.ProcessProposerSlashings(ctx, st, beaconBlock.Body().ProposerSlashings(), exitInfo)
if err != nil {

View File

@@ -13,6 +13,7 @@ import (
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/math"
mathutil "github.com/OffchainLabs/prysm/v6/math"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/runtime/version"
"github.com/OffchainLabs/prysm/v6/time/slots"
@@ -59,7 +60,7 @@ func ExitInformation(s state.BeaconState) *ExitInfo {
_ = err
// Apply minimum balance as per spec
exitInfo.TotalActiveBalance = max(params.BeaconConfig().EffectiveBalanceIncrement, totalActiveBalance)
exitInfo.TotalActiveBalance = mathutil.Max(params.BeaconConfig().EffectiveBalanceIncrement, totalActiveBalance)
return exitInfo
}
@@ -97,9 +98,7 @@ func InitiateValidatorExit(
if validator.ExitEpoch != params.BeaconConfig().FarFutureEpoch {
return s, ErrValidatorAlreadyExited
}
if exitInfo == nil {
return nil, errors.New("exit info is required to process validator exit")
}
// Compute exit queue epoch.
if s.Version() < version.Electra {
if err = initiateValidatorExitPreElectra(ctx, s, exitInfo); err != nil {
@@ -178,9 +177,6 @@ func initiateValidatorExitPreElectra(ctx context.Context, s state.BeaconState, e
// if exit_queue_churn >= get_validator_churn_limit(state):
// exit_queue_epoch += Epoch(1)
exitableEpoch := helpers.ActivationExitEpoch(time.CurrentEpoch(s))
if exitInfo == nil {
return errors.New("exit info is required to process validator exit")
}
if exitableEpoch > exitInfo.HighestExitEpoch {
exitInfo.HighestExitEpoch = exitableEpoch
exitInfo.Churn = 0
@@ -239,9 +235,7 @@ func SlashValidator(
exitInfo *ExitInfo,
) (state.BeaconState, error) {
var err error
if exitInfo == nil {
return nil, errors.New("exit info is required to slash validator")
}
s, err = InitiateValidatorExitForTotalBal(ctx, s, slashedIdx, exitInfo, primitives.Gwei(exitInfo.TotalActiveBalance))
if err != nil && !errors.Is(err, ErrValidatorAlreadyExited) {
return nil, errors.Wrapf(err, "could not initiate validator %d exit", slashedIdx)

View File

@@ -18,16 +18,13 @@ import (
)
func Test_commitmentsToCheck(t *testing.T) {
params.SetupTestConfigCleanup(t)
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096*2
fulu := primitives.Slot(params.BeaconConfig().FuluForkEpoch) * params.BeaconConfig().SlotsPerEpoch
windowSlots, err := slots.EpochEnd(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
require.NoError(t, err)
windowSlots = windowSlots + primitives.Slot(params.BeaconConfig().FuluForkEpoch)
maxBlobs := params.LastNetworkScheduleEntry().MaxBlobsPerBlock
commits := make([][]byte, maxBlobs+1)
for i := 0; i < len(commits); i++ {
commits[i] = bytesutil.PadTo([]byte{byte(i)}, 48)
commits := [][]byte{
bytesutil.PadTo([]byte("a"), 48),
bytesutil.PadTo([]byte("b"), 48),
bytesutil.PadTo([]byte("c"), 48),
bytesutil.PadTo([]byte("d"), 48),
}
cases := []struct {
name string
@@ -50,40 +47,41 @@ func Test_commitmentsToCheck(t *testing.T) {
{
name: "commitments within da",
block: func(t *testing.T) blocks.ROBlock {
d := util.NewBeaconBlockFulu()
d.Block.Body.BlobKzgCommitments = commits[:maxBlobs]
d.Block.Slot = fulu + 100
d := util.NewBeaconBlockDeneb()
d.Block.Body.BlobKzgCommitments = commits
d.Block.Slot = 100
sb, err := blocks.NewSignedBeaconBlock(d)
require.NoError(t, err)
rb, err := blocks.NewROBlock(sb)
require.NoError(t, err)
return rb
},
commits: commits[:maxBlobs],
slot: fulu + 100,
commits: commits,
slot: 100,
},
{
name: "commitments outside da",
block: func(t *testing.T) blocks.ROBlock {
d := util.NewBeaconBlockFulu()
d.Block.Slot = fulu
d := util.NewBeaconBlockDeneb()
// block is from slot 0, "current slot" is window size +1 (so outside the window)
d.Block.Body.BlobKzgCommitments = commits[:maxBlobs]
d.Block.Body.BlobKzgCommitments = commits
sb, err := blocks.NewSignedBeaconBlock(d)
require.NoError(t, err)
rb, err := blocks.NewROBlock(sb)
require.NoError(t, err)
return rb
},
slot: fulu + windowSlots + 1,
slot: windowSlots + 1,
},
{
name: "excessive commitments",
block: func(t *testing.T) blocks.ROBlock {
d := util.NewBeaconBlockFulu()
d.Block.Slot = fulu + 100
d := util.NewBeaconBlockDeneb()
d.Block.Slot = 100
// block is from slot 0, "current slot" is window size +1 (so outside the window)
d.Block.Body.BlobKzgCommitments = commits
// Double the number of commitments, assert that this is over the limit
d.Block.Body.BlobKzgCommitments = append(commits, d.Block.Body.BlobKzgCommitments...)
sb, err := blocks.NewSignedBeaconBlock(d)
require.NoError(t, err)
rb, err := blocks.NewROBlock(sb)
@@ -117,69 +115,67 @@ func Test_commitmentsToCheck(t *testing.T) {
func TestLazilyPersistent_Missing(t *testing.T) {
ctx := t.Context()
store := filesystem.NewEphemeralBlobStorage(t)
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
blk, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, ds, 3)
blk, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 3)
mbv := &mockBlobBatchVerifier{t: t, scs: blobSidecars}
as := NewLazilyPersistentStore(store, mbv)
// Only one commitment persisted, should return error with other indices
require.NoError(t, as.Persist(ds, blobSidecars[2]))
err := as.IsDataAvailable(ctx, ds, blk)
require.NoError(t, as.Persist(1, blobSidecars[2]))
err := as.IsDataAvailable(ctx, 1, blk)
require.ErrorIs(t, err, errMissingSidecar)
// All but one persisted, return missing idx
require.NoError(t, as.Persist(ds, blobSidecars[0]))
err = as.IsDataAvailable(ctx, ds, blk)
require.NoError(t, as.Persist(1, blobSidecars[0]))
err = as.IsDataAvailable(ctx, 1, blk)
require.ErrorIs(t, err, errMissingSidecar)
// All persisted, return nil
require.NoError(t, as.Persist(ds, blobSidecars...))
require.NoError(t, as.Persist(1, blobSidecars...))
require.NoError(t, as.IsDataAvailable(ctx, ds, blk))
require.NoError(t, as.IsDataAvailable(ctx, 1, blk))
}
func TestLazilyPersistent_Mismatch(t *testing.T) {
ctx := t.Context()
store := filesystem.NewEphemeralBlobStorage(t)
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
blk, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, ds, 3)
blk, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 3)
mbv := &mockBlobBatchVerifier{t: t, err: errors.New("kzg check should not run")}
blobSidecars[0].KzgCommitment = bytesutil.PadTo([]byte("nope"), 48)
as := NewLazilyPersistentStore(store, mbv)
// Only one commitment persisted, should return error with other indices
require.NoError(t, as.Persist(ds, blobSidecars[0]))
err := as.IsDataAvailable(ctx, ds, blk)
require.NoError(t, as.Persist(1, blobSidecars[0]))
err := as.IsDataAvailable(ctx, 1, blk)
require.NotNil(t, err)
require.ErrorIs(t, err, errCommitmentMismatch)
}
func TestLazyPersistOnceCommitted(t *testing.T) {
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
_, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, ds, 6)
_, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 6)
as := NewLazilyPersistentStore(filesystem.NewEphemeralBlobStorage(t), &mockBlobBatchVerifier{})
// stashes as expected
require.NoError(t, as.Persist(ds, blobSidecars...))
require.NoError(t, as.Persist(1, blobSidecars...))
// ignores duplicates
require.ErrorIs(t, as.Persist(ds, blobSidecars...), ErrDuplicateSidecar)
require.ErrorIs(t, as.Persist(1, blobSidecars...), ErrDuplicateSidecar)
// ignores index out of bound
blobSidecars[0].Index = 6
require.ErrorIs(t, as.Persist(ds, blobSidecars[0]), errIndexOutOfBounds)
_, moreBlobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, ds, 4)
require.ErrorIs(t, as.Persist(1, blobSidecars[0]), errIndexOutOfBounds)
_, moreBlobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 4)
// ignores sidecars before the retention period
slotOOB := util.SlotAtEpoch(t, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
slotOOB += ds + 32
require.NoError(t, as.Persist(slotOOB, moreBlobSidecars[0]))
slotOOB, err := slots.EpochStart(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
require.NoError(t, err)
require.NoError(t, as.Persist(32+slotOOB, moreBlobSidecars[0]))
// doesn't ignore new sidecars with a different block root
require.NoError(t, as.Persist(ds, moreBlobSidecars...))
require.NoError(t, as.Persist(1, moreBlobSidecars...))
}
type mockBlobBatchVerifier struct {

View File

@@ -39,7 +39,7 @@ func filterTestCaseSetup(slot primitives.Slot, nBlobs int, onDisk []int, numExpe
entry := &blobCacheEntry{}
if len(onDisk) > 0 {
od := map[[32]byte][]int{blk.Root(): onDisk}
sumz := filesystem.NewMockBlobStorageSummarizer(t, slots.ToEpoch(slot), od)
sumz := filesystem.NewMockBlobStorageSummarizer(t, od)
sum := sumz.Summary(blk.Root())
entry.setDiskSummary(sum)
}

View File

@@ -21,8 +21,7 @@ import (
)
func TestBlobStorage_SaveBlobData(t *testing.T) {
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, ds, params.BeaconConfig().MaxBlobsPerBlock(ds))
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, params.BeaconConfig().MaxBlobsPerBlock(1))
testSidecars := verification.FakeVerifySliceForTest(t, sidecars)
t.Run("no error for duplicate", func(t *testing.T) {
@@ -128,22 +127,21 @@ func TestBlobStorage_SaveBlobData(t *testing.T) {
}
func TestBlobIndicesBounds(t *testing.T) {
es := util.SlotAtEpoch(t, params.BeaconConfig().ElectraForkEpoch)
fs := afero.NewMemMapFs()
root := [32]byte{}
okIdx := uint64(params.BeaconConfig().MaxBlobsPerBlock(es)) - 1
writeFakeSSZ(t, fs, root, es, okIdx)
okIdx := uint64(params.BeaconConfig().MaxBlobsPerBlock(0)) - 1
writeFakeSSZ(t, fs, root, 0, okIdx)
bs := NewWarmedEphemeralBlobStorageUsingFs(t, fs, WithLayout(LayoutNameByEpoch))
indices := bs.Summary(root).mask
expected := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(es))
expected := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(0))
expected[okIdx] = true
for i := range expected {
require.Equal(t, expected[i], indices[i])
}
oobIdx := uint64(params.BeaconConfig().MaxBlobsPerBlock(es))
writeFakeSSZ(t, fs, root, es, oobIdx)
oobIdx := uint64(params.BeaconConfig().MaxBlobsPerBlock(0))
writeFakeSSZ(t, fs, root, 0, oobIdx)
// This now fails at cache warmup time.
require.ErrorIs(t, warmCache(bs.layout, bs.cache), errIndexOutOfBounds)
}

View File

@@ -6,17 +6,14 @@ import (
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/testing/require"
"github.com/OffchainLabs/prysm/v6/testing/util"
)
func TestSlotByRoot_Summary(t *testing.T) {
ee := params.BeaconConfig().ElectraForkEpoch
es := util.SlotAtEpoch(t, ee)
noneSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(es))
allSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(es))
firstSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(es))
lastSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(es))
oneSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(es))
noneSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(0))
allSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(0))
firstSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(0))
lastSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(0))
oneSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(0))
firstSet[0] = true
lastSet[len(lastSet)-1] = true
oneSet[1] = true
@@ -56,7 +53,7 @@ func TestSlotByRoot_Summary(t *testing.T) {
for _, c := range cases {
if c.expected != nil {
key := bytesutil.ToBytes32([]byte(c.name))
sc.cache[key] = BlobStorageSummary{epoch: ee, mask: c.expected}
sc.cache[key] = BlobStorageSummary{epoch: 0, mask: c.expected}
}
}
for _, c := range cases {
@@ -76,7 +73,6 @@ func TestSlotByRoot_Summary(t *testing.T) {
}
func TestAllAvailable(t *testing.T) {
es := util.SlotAtEpoch(t, params.BeaconConfig().ElectraForkEpoch)
idxUpTo := func(u int) []int {
r := make([]int, u)
for i := range r {
@@ -129,13 +125,13 @@ func TestAllAvailable(t *testing.T) {
},
{
name: "out of bound is safe",
count: params.BeaconConfig().MaxBlobsPerBlock(es) + 1,
count: params.BeaconConfig().MaxBlobsPerBlock(0) + 1,
aa: false,
},
{
name: "max present",
count: params.BeaconConfig().MaxBlobsPerBlock(es),
idxSet: idxUpTo(params.BeaconConfig().MaxBlobsPerBlock(es)),
count: params.BeaconConfig().MaxBlobsPerBlock(0),
idxSet: idxUpTo(params.BeaconConfig().MaxBlobsPerBlock(0)),
aa: true,
},
{
@@ -147,7 +143,7 @@ func TestAllAvailable(t *testing.T) {
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
mask := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(es))
mask := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(0))
for _, idx := range c.idxSet {
mask[idx] = true
}

View File

@@ -200,7 +200,6 @@ func (dcs *DataColumnStorage) WarmCache() {
fileMetadata, err := extractFileMetadata(path)
if err != nil {
log.WithError(err).Error("Error encountered while extracting file metadata")
return nil
}
// Open the data column filesystem file.
@@ -989,8 +988,8 @@ func filePath(root [fieldparams.RootLength]byte, epoch primitives.Epoch) string
// extractFileMetadata extracts the metadata from a file path.
// If the path is not a leaf, it returns nil.
func extractFileMetadata(path string) (*fileMetadata, error) {
// Use filepath.Separator to handle both Windows (\) and Unix (/) path separators
parts := strings.Split(path, string(filepath.Separator))
// Is this Windows friendly?
parts := strings.Split(path, "/")
if len(parts) != 3 {
return nil, errors.Errorf("unexpected file %s", path)
}
@@ -1033,5 +1032,5 @@ func extractFileMetadata(path string) (*fileMetadata, error) {
// period computes the period of a given epoch.
func period(epoch primitives.Epoch) uint64 {
return uint64(epoch / params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest)
return uint64(epoch / params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
}

View File

@@ -35,9 +35,8 @@ func (s DataColumnStorageSummary) HasIndex(index uint64) bool {
// HasAtLeastOneIndex returns true if at least one of the DataColumnSidecars at the given indices is available in the filesystem.
func (s DataColumnStorageSummary) HasAtLeastOneIndex(indices []uint64) bool {
size := uint64(len(s.mask))
for _, index := range indices {
if index < size && s.mask[index] {
if s.mask[index] {
return true
}
}

View File

@@ -25,11 +25,11 @@ func TestHasIndex(t *testing.T) {
func TestHasAtLeastOneIndex(t *testing.T) {
summary := NewDataColumnStorageSummary(0, [fieldparams.NumberOfColumns]bool{false, true})
actual := summary.HasAtLeastOneIndex([]uint64{3, 1, fieldparams.NumberOfColumns, 2})
require.Equal(t, true, actual)
hasAtLeastOneIndex := summary.HasAtLeastOneIndex([]uint64{3, 1, 2})
require.Equal(t, true, hasAtLeastOneIndex)
actual = summary.HasAtLeastOneIndex([]uint64{3, 4, fieldparams.NumberOfColumns, 2})
require.Equal(t, false, actual)
hasAtLeastOneIndex = summary.HasAtLeastOneIndex([]uint64{3, 4, 2})
require.Equal(t, false, hasAtLeastOneIndex)
}
func TestCount(t *testing.T) {

View File

@@ -3,7 +3,6 @@ package filesystem
import (
"encoding/binary"
"os"
"path/filepath"
"testing"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
@@ -726,37 +725,3 @@ func TestPrune(t *testing.T) {
require.Equal(t, true, compareSlices([]string{"0x0de28a18cae63cbc6f0b20dc1afb0b1df38da40824a5f09f92d485ade04de97f.sszs"}, dirs))
})
}
func TestExtractFileMetadata(t *testing.T) {
t.Run("Unix", func(t *testing.T) {
// Test with Unix-style path separators (/)
path := "12/1234/0x8bb2f09de48c102635622dc27e6de03ae2b22639df7c33edbc8222b2ec423746.sszs"
metadata, err := extractFileMetadata(path)
if filepath.Separator == '/' {
// On Unix systems, this should succeed
require.NoError(t, err)
require.Equal(t, uint64(12), metadata.period)
require.Equal(t, primitives.Epoch(1234), metadata.epoch)
return
}
// On Windows systems, this should fail because it uses the wrong separator
require.NotNil(t, err)
})
t.Run("Windows", func(t *testing.T) {
// Test with Windows-style path separators (\)
path := "12\\1234\\0x8bb2f09de48c102635622dc27e6de03ae2b22639df7c33edbc8222b2ec423746.sszs"
metadata, err := extractFileMetadata(path)
if filepath.Separator == '\\' {
// On Windows systems, this should succeed
require.NoError(t, err)
require.Equal(t, uint64(12), metadata.period)
require.Equal(t, primitives.Epoch(1234), metadata.epoch)
return
}
// On Unix systems, this should fail because it uses the wrong separator
require.NotNil(t, err)
})
}

View File

@@ -212,8 +212,7 @@ func filterNoop(_ string) bool {
return true
}
// IsBlockRootDir returns true if the path segment looks like a block root directory.
func IsBlockRootDir(p string) bool {
func isRootDir(p string) bool {
dir := filepath.Base(p)
return len(dir) == rootStringLen && strings.HasPrefix(dir, "0x")
}

View File

@@ -11,7 +11,6 @@ import (
"testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/testing/require"
"github.com/OffchainLabs/prysm/v6/testing/util"
@@ -61,13 +60,12 @@ func TestRootFromDir(t *testing.T) {
}
func TestSlotFromFile(t *testing.T) {
es := util.SlotAtEpoch(t, params.BeaconConfig().ElectraForkEpoch)
cases := []struct {
slot primitives.Slot
}{
{slot: es + 0},
{slot: es + 2},
{slot: es + 1123581321},
{slot: 0},
{slot: 2},
{slot: 1123581321},
{slot: math.MaxUint64},
}
for _, c := range cases {
@@ -188,7 +186,7 @@ func TestListDir(t *testing.T) {
name: "root filter",
dirPath: ".",
expected: []string{childlessBlob.name, blobWithSsz.name, blobWithSszAndTmp.name},
filter: IsBlockRootDir,
filter: isRootDir,
},
{
name: "ssz filter",
@@ -245,40 +243,39 @@ func TestSlotFromBlob(t *testing.T) {
}
func TestIterationComplete(t *testing.T) {
de := params.BeaconConfig().DenebForkEpoch
targets := []migrationTestTarget{
{
ident: ezIdent(t, "0x0125e54c64c925018c9296965a5b622d9f5ab626c10917860dcfb6aa09a0a00b", de+1234, 0),
path: "by-epoch/%d/%d/0x0125e54c64c925018c9296965a5b622d9f5ab626c10917860dcfb6aa09a0a00b/0.ssz",
ident: ezIdent(t, "0x0125e54c64c925018c9296965a5b622d9f5ab626c10917860dcfb6aa09a0a00b", 1234, 0),
path: "by-epoch/0/1234/0x0125e54c64c925018c9296965a5b622d9f5ab626c10917860dcfb6aa09a0a00b/0.ssz",
},
{
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", de+5330, 0),
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", 5330, 0),
slotOffset: 31,
path: "by-epoch/%d/%d/0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86/0.ssz",
path: "by-epoch/1/5330/0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86/0.ssz",
},
{
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", de+5330, 1),
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", 5330, 1),
slotOffset: 31,
path: "by-epoch/%d/%d/0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86/1.ssz",
path: "by-epoch/1/5330/0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86/1.ssz",
},
{
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", -1+math.MaxUint64/32, 0),
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", 16777216, 0),
slotOffset: 16,
path: "by-epoch/%d/%d/0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c/0.ssz",
path: "by-epoch/4096/16777216/0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c/0.ssz",
},
{
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", -1+math.MaxUint64/32, 1),
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", 16777216, 1),
slotOffset: 16,
path: "by-epoch/%d/%d/0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c/1.ssz",
path: "by-epoch/4096/16777216/0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c/1.ssz",
},
{
ident: ezIdent(t, "0x42eabe3d2c125410cd226de6f2825fb7575ab896c3f52e43de1fa29e4c809aba", -1+math.MaxUint64/32, 0),
ident: ezIdent(t, "0x42eabe3d2c125410cd226de6f2825fb7575ab896c3f52e43de1fa29e4c809aba", 16777217, 0),
slotOffset: 16,
path: "by-epoch/%d/%d/0x42eabe3d2c125410cd226de6f2825fb7575ab896c3f52e43de1fa29e4c809aba/0.ssz",
path: "by-epoch/4096/16777217/0x42eabe3d2c125410cd226de6f2825fb7575ab896c3f52e43de1fa29e4c809aba/0.ssz",
},
{
ident: ezIdent(t, "0x666cea5034e22bd3b849cb33914cad59afd88ee08e4d5bc0e997411c945fbc1d", de+11235, 1),
path: "by-epoch/%d/%d/0x666cea5034e22bd3b849cb33914cad59afd88ee08e4d5bc0e997411c945fbc1d/1.ssz",
ident: ezIdent(t, "0x666cea5034e22bd3b849cb33914cad59afd88ee08e4d5bc0e997411c945fbc1d", 11235, 1),
path: "by-epoch/2/11235/0x666cea5034e22bd3b849cb33914cad59afd88ee08e4d5bc0e997411c945fbc1d/1.ssz",
},
}
fs := afero.NewMemMapFs()
@@ -302,7 +299,6 @@ func TestIterationComplete(t *testing.T) {
require.Equal(t, true, ok)
require.Equal(t, tar.ident.epoch, entry.epoch)
require.Equal(t, true, entry.HasIndex(tar.ident.index))
path := fmt.Sprintf(tar.path, periodForEpoch(tar.ident.epoch), tar.ident.epoch)
require.Equal(t, path, byEpoch.sszPath(tar.ident))
require.Equal(t, tar.path, byEpoch.sszPath(tar.ident))
}
}

View File

@@ -19,14 +19,12 @@ import (
const (
// Full root in directory will be 66 chars, eg:
// >>> len('0x0002fb4db510b8618b04dc82d023793739c26346a8b02eb73482e24b0fec0555') == 66
rootStringLen = 66
sszExt = "ssz"
partExt = "part"
rootStringLen = 66
sszExt = "ssz"
partExt = "part"
periodicEpochBaseDir = "by-epoch"
)
// PeriodicEpochBaseDir is the name of the base directory for the by-epoch layout.
const PeriodicEpochBaseDir = "by-epoch"
const (
LayoutNameFlat = "flat"
LayoutNameByEpoch = "by-epoch"
@@ -132,11 +130,11 @@ func migrateLayout(fs afero.Fs, from, to fsLayout, cache *blobStorageSummaryCach
if iter.atEOF() {
return errLayoutNotDetected
}
log.WithField("fromLayout", from.name()).WithField("toLayout", to.name()).Info("Migrating blob filesystem layout. This one-time operation can take extra time (up to a few minutes for systems with extended blob storage and a cold disk cache).")
lastMoved := ""
parentDirs := make(map[string]bool) // this map should have < 65k keys by design
moved := 0
dc := newDirCleaner()
migrationLogged := false
for ident, err := iter.next(); !errors.Is(err, io.EOF); ident, err = iter.next() {
if err != nil {
if errors.Is(err, errIdentFailure) {
@@ -148,11 +146,6 @@ func migrateLayout(fs afero.Fs, from, to fsLayout, cache *blobStorageSummaryCach
}
return errors.Wrapf(errMigrationFailure, "failed to iterate previous layout structure while migrating blobs, err=%s", err.Error())
}
if !migrationLogged {
log.WithField("fromLayout", from.name()).WithField("toLayout", to.name()).
Info("Migrating blob filesystem layout. This one-time operation can take extra time (up to a few minutes for systems with extended blob storage and a cold disk cache).")
migrationLogged = true
}
src := from.dir(ident)
target := to.dir(ident)
if src != lastMoved {

View File

@@ -34,7 +34,7 @@ func (l *periodicEpochLayout) name() string {
func (l *periodicEpochLayout) blockParentDirs(ident blobIdent) []string {
return []string{
PeriodicEpochBaseDir,
periodicEpochBaseDir,
l.periodDir(ident.epoch),
l.epochDir(ident.epoch),
}
@@ -50,28 +50,28 @@ func (l *periodicEpochLayout) notify(ident blobIdent) error {
// If before == 0, it won't be used as a filter and all idents will be returned.
func (l *periodicEpochLayout) iterateIdents(before primitives.Epoch) (*identIterator, error) {
_, err := l.fs.Stat(PeriodicEpochBaseDir)
_, err := l.fs.Stat(periodicEpochBaseDir)
if err != nil {
if os.IsNotExist(err) {
return &identIterator{eof: true}, nil // The directory is non-existent, which is fine; stop iteration.
}
return nil, errors.Wrapf(err, "error reading path %s", PeriodicEpochBaseDir)
return nil, errors.Wrapf(err, "error reading path %s", periodicEpochBaseDir)
}
// iterate root, which should have directories named by "period"
entries, err := listDir(l.fs, PeriodicEpochBaseDir)
entries, err := listDir(l.fs, periodicEpochBaseDir)
if err != nil {
return nil, errors.Wrapf(err, "failed to list %s", PeriodicEpochBaseDir)
return nil, errors.Wrapf(err, "failed to list %s", periodicEpochBaseDir)
}
return &identIterator{
fs: l.fs,
path: PeriodicEpochBaseDir,
path: periodicEpochBaseDir,
// Please see comments on the `layers` field in `identIterator`` if the role of the layers is unclear.
layers: []layoutLayer{
{populateIdent: populateNoop, filter: isBeforePeriod(before)},
{populateIdent: populateEpoch, filter: isBeforeEpoch(before)},
{populateIdent: populateRoot, filter: IsBlockRootDir}, // extract root from path
{populateIdent: populateIndex, filter: isSszFile}, // extract index from filename
{populateIdent: populateRoot, filter: isRootDir}, // extract root from path
{populateIdent: populateIndex, filter: isSszFile}, // extract index from filename
},
entries: entries,
}, nil
@@ -98,7 +98,7 @@ func (l *periodicEpochLayout) epochDir(epoch primitives.Epoch) string {
}
func (l *periodicEpochLayout) periodDir(epoch primitives.Epoch) string {
return filepath.Join(PeriodicEpochBaseDir, fmt.Sprintf("%d", periodForEpoch(epoch)))
return filepath.Join(periodicEpochBaseDir, fmt.Sprintf("%d", periodForEpoch(epoch)))
}
func (l *periodicEpochLayout) sszPath(n blobIdent) string {

View File

@@ -30,7 +30,7 @@ func (l *flatLayout) iterateIdents(before primitives.Epoch) (*identIterator, err
if os.IsNotExist(err) {
return &identIterator{eof: true}, nil // The directory is non-existent, which is fine; stop iteration.
}
return nil, errors.Wrap(err, "error reading blob base dir")
return nil, errors.Wrapf(err, "error reading path %s", periodicEpochBaseDir)
}
entries, err := listDir(l.fs, ".")
if err != nil {
@@ -199,10 +199,10 @@ func (l *flatSlotReader) isSSZAndBefore(fname string) bool {
// the epoch can be determined.
func isFlatCachedAndBefore(cache *blobStorageSummaryCache, before primitives.Epoch) func(string) bool {
if before == 0 {
return IsBlockRootDir
return isRootDir
}
return func(p string) bool {
if !IsBlockRootDir(p) {
if !isRootDir(p) {
return false
}
root, err := rootFromPath(p)

View File

@@ -4,10 +4,10 @@ import (
"os"
"testing"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/testing/require"
"github.com/OffchainLabs/prysm/v6/testing/util"
"github.com/OffchainLabs/prysm/v6/time/slots"
"github.com/spf13/afero"
)
@@ -18,7 +18,9 @@ func ezIdent(t *testing.T, rootStr string, epoch primitives.Epoch, index uint64)
}
func setupTestBlobFile(t *testing.T, ident blobIdent, offset primitives.Slot, fs afero.Fs, l fsLayout) {
slot := util.SlotAtEpoch(t, ident.epoch) + offset
slot, err := slots.EpochStart(ident.epoch)
require.NoError(t, err)
slot += offset
_, sc := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, 1)
scb, err := sc[0].MarshalSSZ()
require.NoError(t, err)
@@ -51,7 +53,6 @@ func testAssertFsMigrated(t *testing.T, fs afero.Fs, ident blobIdent, before, af
}
func TestMigrations(t *testing.T) {
de := params.BeaconConfig().DenebForkEpoch
cases := []struct {
name string
forwardLayout string
@@ -64,18 +65,18 @@ func TestMigrations(t *testing.T) {
forwardLayout: LayoutNameByEpoch,
targets: []migrationTestTarget{
{
ident: ezIdent(t, "0x0125e54c64c925018c9296965a5b622d9f5ab626c10917860dcfb6aa09a0a00b", de+1234, 0),
ident: ezIdent(t, "0x0125e54c64c925018c9296965a5b622d9f5ab626c10917860dcfb6aa09a0a00b", 1234, 0),
},
{
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", de+5330, 0),
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", 5330, 0),
slotOffset: 31,
},
{
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", de+5330, 1),
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", 5330, 1),
slotOffset: 31,
},
{
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", de+16777216, 0),
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", 16777216, 0),
slotOffset: 16,
},
},
@@ -86,33 +87,33 @@ func TestMigrations(t *testing.T) {
forwardLayout: LayoutNameByEpoch,
targets: []migrationTestTarget{
{
ident: ezIdent(t, "0x0125e54c64c925018c9296965a5b622d9f5ab626c10917860dcfb6aa09a0a00b", de+1234, 0),
ident: ezIdent(t, "0x0125e54c64c925018c9296965a5b622d9f5ab626c10917860dcfb6aa09a0a00b", 1234, 0),
},
{
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", de+5330, 0),
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", 5330, 0),
slotOffset: 31,
},
{
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", de+5330, 1),
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", 5330, 1),
slotOffset: 31,
},
{
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", de+16777216, 0),
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", 16777216, 0),
slotOffset: 16,
migrated: true,
},
{
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", de+16777216, 1),
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", 16777216, 1),
slotOffset: 16,
migrated: true,
},
{
ident: ezIdent(t, "0x42eabe3d2c125410cd226de6f2825fb7575ab896c3f52e43de1fa29e4c809aba", de+16777217, 0),
ident: ezIdent(t, "0x42eabe3d2c125410cd226de6f2825fb7575ab896c3f52e43de1fa29e4c809aba", 16777217, 0),
slotOffset: 16,
migrated: true,
},
{
ident: ezIdent(t, "0x666cea5034e22bd3b849cb33914cad59afd88ee08e4d5bc0e997411c945fbc1d", de+11235, 1),
ident: ezIdent(t, "0x666cea5034e22bd3b849cb33914cad59afd88ee08e4d5bc0e997411c945fbc1d", 11235, 1),
migrated: true,
},
},

View File

@@ -88,11 +88,11 @@ func NewEphemeralBlobStorageWithMocker(t testing.TB) (*BlobMocker, *BlobStorage)
return &BlobMocker{fs: fs, bs: bs}, bs
}
func NewMockBlobStorageSummarizer(t *testing.T, epoch primitives.Epoch, set map[[32]byte][]int) BlobStorageSummarizer {
func NewMockBlobStorageSummarizer(t *testing.T, set map[[32]byte][]int) BlobStorageSummarizer {
c := newBlobStorageCache()
for k, v := range set {
for i := range v {
if err := c.ensure(blobIdent{root: k, epoch: epoch, index: uint64(v[i])}); err != nil {
if err := c.ensure(blobIdent{root: k, epoch: 0, index: uint64(v[i])}); err != nil {
t.Fatal(err)
}
}
@@ -126,7 +126,7 @@ func NewWarmedEphemeralDataColumnStorageUsingFs(t testing.TB, fs afero.Fs, opts
func NewEphemeralDataColumnStorageUsingFs(t testing.TB, fs afero.Fs, opts ...DataColumnStorageOption) *DataColumnStorage {
opts = append(opts,
WithDataColumnRetentionEpochs(params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest),
WithDataColumnRetentionEpochs(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest),
WithDataColumnFs(fs),
)

View File

@@ -142,7 +142,6 @@ func testRoots(n int) [][32]byte {
}
func TestLayoutPruneBefore(t *testing.T) {
electra := params.BeaconConfig().ElectraForkEpoch
roots := testRoots(10)
cases := []struct {
name string
@@ -154,27 +153,27 @@ func TestLayoutPruneBefore(t *testing.T) {
}{
{
name: "none pruned",
pruneBefore: electra + 1,
pruneBefore: 1,
pruned: []testIdent{},
remain: []testIdent{
{offset: 1, blobIdent: blobIdent{root: roots[0], epoch: electra + 1, index: 0}},
{offset: 1, blobIdent: blobIdent{root: roots[1], epoch: electra + 1, index: 0}},
{offset: 1, blobIdent: blobIdent{root: roots[0], epoch: 1, index: 0}},
{offset: 1, blobIdent: blobIdent{root: roots[1], epoch: 1, index: 0}},
},
},
{
name: "expected pruned before epoch",
pruneBefore: electra + 3,
pruneBefore: 3,
pruned: []testIdent{
{offset: 0, blobIdent: blobIdent{root: roots[0], epoch: electra + 1, index: 0}},
{offset: 31, blobIdent: blobIdent{root: roots[1], epoch: electra + 1, index: 5}},
{offset: 0, blobIdent: blobIdent{root: roots[2], epoch: electra + 2, index: 0}},
{offset: 31, blobIdent: blobIdent{root: roots[3], epoch: electra + 2, index: 3}},
{offset: 0, blobIdent: blobIdent{root: roots[0], epoch: 1, index: 0}},
{offset: 31, blobIdent: blobIdent{root: roots[1], epoch: 1, index: 5}},
{offset: 0, blobIdent: blobIdent{root: roots[2], epoch: 2, index: 0}},
{offset: 31, blobIdent: blobIdent{root: roots[3], epoch: 2, index: 3}},
},
remain: []testIdent{
{offset: 0, blobIdent: blobIdent{root: roots[4], epoch: electra + 3, index: 2}}, // boundary
{offset: 31, blobIdent: blobIdent{root: roots[5], epoch: electra + 3, index: 0}}, // boundary
{offset: 0, blobIdent: blobIdent{root: roots[6], epoch: electra + 4, index: 1}},
{offset: 31, blobIdent: blobIdent{root: roots[7], epoch: electra + 4, index: 5}},
{offset: 0, blobIdent: blobIdent{root: roots[4], epoch: 3, index: 2}}, // boundary
{offset: 31, blobIdent: blobIdent{root: roots[5], epoch: 3, index: 0}}, // boundary
{offset: 0, blobIdent: blobIdent{root: roots[6], epoch: 4, index: 1}},
{offset: 31, blobIdent: blobIdent{root: roots[7], epoch: 4, index: 5}},
},
sum: pruneSummary{blobsPruned: 4},
},

View File

@@ -28,7 +28,6 @@ type ReadOnlyDatabase interface {
BlocksBySlot(ctx context.Context, slot primitives.Slot) ([]interfaces.ReadOnlySignedBeaconBlock, error)
BlockRootsBySlot(ctx context.Context, slot primitives.Slot) (bool, [][32]byte, error)
HasBlock(ctx context.Context, blockRoot [32]byte) bool
AvailableBlocks(ctx context.Context, blockRoots [][32]byte) map[[32]byte]bool
GenesisBlock(ctx context.Context) (interfaces.ReadOnlySignedBeaconBlock, error)
GenesisBlockRoot(ctx context.Context) ([32]byte, error)
IsFinalizedBlock(ctx context.Context, blockRoot [32]byte) bool
@@ -130,7 +129,6 @@ type NoHeadAccessDatabase interface {
// Custody operations.
UpdateSubscribedToAllDataSubnets(ctx context.Context, subscribed bool) (bool, error)
UpdateCustodyInfo(ctx context.Context, earliestAvailableSlot primitives.Slot, custodyGroupCount uint64) (primitives.Slot, uint64, error)
UpdateEarliestAvailableSlot(ctx context.Context, earliestAvailableSlot primitives.Slot) error
// P2P Metadata operations.
SaveMetadataSeqNum(ctx context.Context, seqNum uint64) error

View File

@@ -336,42 +336,6 @@ func (s *Store) HasBlock(ctx context.Context, blockRoot [32]byte) bool {
return exists
}
// AvailableBlocks returns a set of roots indicating which blocks corresponding to `blockRoots` are available in the storage.
func (s *Store) AvailableBlocks(ctx context.Context, blockRoots [][32]byte) map[[32]byte]bool {
_, span := trace.StartSpan(ctx, "BeaconDB.AvailableBlocks")
defer span.End()
count := len(blockRoots)
availableRoots := make(map[[32]byte]bool, count)
// First, check the cache for each block root.
notInCacheRoots := make([][32]byte, 0, count)
for _, root := range blockRoots {
if v, ok := s.blockCache.Get(string(root[:])); v != nil && ok {
availableRoots[root] = true
continue
}
notInCacheRoots = append(notInCacheRoots, root)
}
// Next, check the database for the remaining block roots.
if err := s.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(blocksBucket)
for _, root := range notInCacheRoots {
if bkt.Get(root[:]) != nil {
availableRoots[root] = true
}
}
return nil
}); err != nil {
panic(err) // lint:nopanic -- View never returns an error.
}
return availableRoots
}
// BlocksBySlot retrieves a list of beacon blocks and its respective roots by slot.
func (s *Store) BlocksBySlot(ctx context.Context, slot primitives.Slot) ([]interfaces.ReadOnlySignedBeaconBlock, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.BlocksBySlot")

View File

@@ -656,44 +656,6 @@ func TestStore_BlocksCRUD_NoCache(t *testing.T) {
}
}
func TestAvailableBlocks(t *testing.T) {
ctx := t.Context()
db := setupDB(t)
b0, b1, b2 := util.NewBeaconBlock(), util.NewBeaconBlock(), util.NewBeaconBlock()
b0.Block.Slot, b1.Block.Slot, b2.Block.Slot = 10, 20, 30
sb0, err := blocks.NewSignedBeaconBlock(b0)
require.NoError(t, err)
r0, err := b0.Block.HashTreeRoot()
require.NoError(t, err)
// Save b0 but remove it from cache.
err = db.SaveBlock(ctx, sb0)
require.NoError(t, err)
db.blockCache.Del(string(r0[:]))
// b1 is not saved at all.
r1, err := b1.Block.HashTreeRoot()
require.NoError(t, err)
// Save b2 in cache and DB.
sb2, err := blocks.NewSignedBeaconBlock(b2)
require.NoError(t, err)
r2, err := b2.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, db.SaveBlock(ctx, sb2))
require.NoError(t, err)
expected := map[[32]byte]bool{r0: true, r2: true}
actual := db.AvailableBlocks(ctx, [][32]byte{r0, r1, r2})
require.Equal(t, len(expected), len(actual))
for i := range expected {
require.Equal(t, true, actual[i])
}
}
func TestStore_Blocks_FiltersCorrectly(t *testing.T) {
for _, tt := range blockTests {
t.Run(tt.name, func(t *testing.T) {

View File

@@ -132,6 +132,6 @@ func recoverStateSummary(ctx context.Context, tx *bolt.Tx, root []byte) error {
if err != nil {
return err
}
summaryBucket := tx.Bucket(stateSummaryBucket)
summaryBucket := tx.Bucket(stateBucket)
return summaryBucket.Put(root, summaryEnc)
}

View File

@@ -137,32 +137,3 @@ func TestStore_FinalizedCheckpoint_StateMustExist(t *testing.T) {
require.ErrorContains(t, errMissingStateForCheckpoint.Error(), db.SaveFinalizedCheckpoint(ctx, cp))
}
// Regression test: verify that saving a checkpoint triggers recovery which writes
// the state summary into the correct stateSummaryBucket so that HasStateSummary/StateSummary see it.
func TestRecoverStateSummary_WritesToStateSummaryBucket(t *testing.T) {
db := setupDB(t)
ctx := t.Context()
// Create a block without saving a state or summary, so recovery is needed.
blk := util.HydrateSignedBeaconBlock(&ethpb.SignedBeaconBlock{})
root, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
wsb, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, db.SaveBlock(ctx, wsb))
// Precondition: summary not present yet.
require.Equal(t, false, db.HasStateSummary(ctx, root))
// Saving justified checkpoint should trigger recovery path calling recoverStateSummary.
cp := &ethpb.Checkpoint{Epoch: 2, Root: root[:]}
require.NoError(t, db.SaveJustifiedCheckpoint(ctx, cp))
// Postcondition: summary is visible via the public summary APIs (which read stateSummaryBucket).
require.Equal(t, true, db.HasStateSummary(ctx, root))
summary, err := db.StateSummary(ctx, root)
require.NoError(t, err)
require.NotNil(t, summary)
assert.DeepEqual(t, &ethpb.StateSummary{Slot: blk.Block.Slot, Root: root[:]}, summary)
}

View File

@@ -2,19 +2,16 @@ package kv
import (
"context"
"time"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
"github.com/OffchainLabs/prysm/v6/time/slots"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
bolt "go.etcd.io/bbolt"
)
// UpdateCustodyInfo atomically updates the custody group count only if it is greater than the stored one.
// UpdateCustodyInfo atomically updates the custody group count only it is greater than the stored one.
// In this case, it also updates the earliest available slot with the provided value.
// It returns the (potentially updated) custody group count and earliest available slot.
func (s *Store) UpdateCustodyInfo(ctx context.Context, earliestAvailableSlot primitives.Slot, custodyGroupCount uint64) (primitives.Slot, uint64, error) {
@@ -73,79 +70,6 @@ func (s *Store) UpdateCustodyInfo(ctx context.Context, earliestAvailableSlot pri
return storedEarliestAvailableSlot, storedGroupCount, nil
}
// UpdateEarliestAvailableSlot updates the earliest available slot.
func (s *Store) UpdateEarliestAvailableSlot(ctx context.Context, earliestAvailableSlot primitives.Slot) error {
_, span := trace.StartSpan(ctx, "BeaconDB.UpdateEarliestAvailableSlot")
defer span.End()
storedEarliestAvailableSlot := primitives.Slot(0)
if err := s.db.Update(func(tx *bolt.Tx) error {
// Retrieve the custody bucket.
bucket, err := tx.CreateBucketIfNotExists(custodyBucket)
if err != nil {
return errors.Wrap(err, "create custody bucket")
}
// Retrieve the stored earliest available slot.
storedEarliestAvailableSlotBytes := bucket.Get(earliestAvailableSlotKey)
if len(storedEarliestAvailableSlotBytes) != 0 {
storedEarliestAvailableSlot = primitives.Slot(bytesutil.BytesToUint64BigEndian(storedEarliestAvailableSlotBytes))
}
// Allow decrease (for backfill scenarios)
if earliestAvailableSlot <= storedEarliestAvailableSlot {
storedEarliestAvailableSlot = earliestAvailableSlot
bytes := bytesutil.Uint64ToBytesBigEndian(uint64(earliestAvailableSlot))
if err := bucket.Put(earliestAvailableSlotKey, bytes); err != nil {
return errors.Wrap(err, "put earliest available slot")
}
return nil
}
// Prevent increase within the MIN_EPOCHS_FOR_BLOCK_REQUESTS period
// This ensures we don't voluntarily refuse to serve mandatory block data
genesisTime := time.Unix(int64(params.BeaconConfig().MinGenesisTime+params.BeaconConfig().GenesisDelay), 0)
currentSlot := slots.CurrentSlot(genesisTime)
currentEpoch := slots.ToEpoch(currentSlot)
minEpochsForBlocks := primitives.Epoch(params.BeaconConfig().MinEpochsForBlockRequests)
// Calculate the minimum required epoch (or 0 if we're early in the chain)
minRequiredEpoch := primitives.Epoch(0)
if currentEpoch > minEpochsForBlocks {
minRequiredEpoch = currentEpoch - minEpochsForBlocks
}
// Convert to slot to ensure we compare at slot-level granularity
minRequiredSlot, err := slots.EpochStart(minRequiredEpoch)
if err != nil {
return errors.Wrap(err, "calculate minimum required slot")
}
// Prevent any increase that would put earliest available slot beyond the minimum required slot
if earliestAvailableSlot > minRequiredSlot {
return errors.Errorf(
"cannot increase earliest available slot to %d (epoch %d) as it exceeds minimum required slot %d (epoch %d)",
earliestAvailableSlot, slots.ToEpoch(earliestAvailableSlot),
minRequiredSlot, minRequiredEpoch,
)
}
storedEarliestAvailableSlot = earliestAvailableSlot
bytes := bytesutil.Uint64ToBytesBigEndian(uint64(earliestAvailableSlot))
if err := bucket.Put(earliestAvailableSlotKey, bytes); err != nil {
return errors.Wrap(err, "put earliest available slot")
}
return nil
}); err != nil {
return err
}
log.WithField("earliestAvailableSlot", storedEarliestAvailableSlot).Debug("Updated earliest available slot")
return nil
}
// UpdateSubscribedToAllDataSubnets updates the "subscribed to all data subnets" status in the database
// only if `subscribed` is `true`.
// It returns the previous subscription status.

Some files were not shown because too many files have changed in this diff Show More