Compare commits

..

10 Commits

Author SHA1 Message Date
satushh
32ef35c6ac bound zero sync participation tolerance in validatorsSyncParticipation 2026-03-06 16:39:11 +05:30
satushh
bc211b9b33 lint and changelog 2026-03-06 16:15:19 +05:30
satushh
9aa109d65a test commit 2 2026-03-06 11:27:52 +05:30
satushh
87debbc35e test commit 2026-03-06 10:44:38 +05:30
satushh
967d0cca4b deflake sync participation evaluator around fork transitions 2026-03-05 22:06:36 +05:30
satushh
474de75942 reset expected vote if required 2026-03-05 18:34:19 +05:30
satushh
8c7f35dd8b log only optimistic status check 2026-03-05 17:18:21 +05:30
satushh
a955f762eb updated comment 2026-03-05 14:32:09 +05:30
satushh
4a0b986581 harden evaluator + bug fix 2026-03-05 13:27:28 +05:30
satushh
89909b18a9 potential improvements to e2e 2026-03-05 10:09:44 +05:30
869 changed files with 7313 additions and 33892 deletions

View File

@@ -1,4 +1,4 @@
version: v1.7.0-alpha.4
version: v1.7.0-alpha.2
style: full
specrefs:
@@ -23,8 +23,6 @@ exceptions:
- PTC_SIZE#gloas
constants:
# heze
- DOMAIN_INCLUSION_LIST_COMMITTEE#heze
# phase0
- BASIS_POINTS#phase0
- ENDIANNESS#phase0
@@ -63,6 +61,7 @@ exceptions:
- ATTESTATION_TIMELINESS_INDEX#gloas
- BUILDER_INDEX_FLAG#gloas
- BUILDER_INDEX_SELF_BUILD#gloas
- DOMAIN_PROPOSER_PREFERENCES#gloas
- NUM_BLOCK_TIMELINESS_DEADLINES#gloas
- PTC_TIMELINESS_INDEX#gloas
@@ -73,31 +72,13 @@ exceptions:
- CONTRIBUTION_DUE_BPS_GLOAS#gloas
- GLOAS_FORK_EPOCH#gloas
- GLOAS_FORK_VERSION#gloas
- MIN_BUILDER_WITHDRAWABILITY_DELAY#gloas
- PAYLOAD_ATTESTATION_DUE_BPS#gloas
- SYNC_MESSAGE_DUE_BPS_GLOAS#gloas
# heze
- HEZE_FORK_EPOCH#heze
- HEZE_FORK_VERSION#heze
- INCLUSION_LIST_SUBMISSION_DUE_BPS#heze
- MAX_BYTES_PER_INCLUSION_LIST#heze
- MAX_REQUEST_INCLUSION_LIST#heze
- PROPOSER_INCLUSION_LIST_CUTOFF_BPS#heze
- VIEW_FREEZE_CUTOFF_BPS#heze
ssz_objects:
# phase0
- Eth1Block#phase0
# fulu
- PartialDataColumnHeader#fulu
- PartialDataColumnPartsMetadata#fulu
- PartialDataColumnSidecar#fulu
# gloas
- PartialDataColumnHeader#gloas
# heze
- BeaconState#heze
- ExecutionPayloadBid#heze
- InclusionList#heze
- SignedExecutionPayloadBid#heze
- SignedInclusionList#heze
# capella
- LightClientBootstrap#capella
- LightClientFinalityUpdate#capella
@@ -127,7 +108,6 @@ exceptions:
dataclasses:
# phase0
- LatestMessage#phase0
- Seen#phase0
- Store#phase0
# altair
- LightClientStore#altair
@@ -144,11 +124,6 @@ exceptions:
- ExpectedWithdrawals#gloas
- LatestMessage#gloas
- Store#gloas
# heze
- GetInclusionListResponse#heze
- InclusionListStore#heze
- PayloadAttributes#heze
- Store#heze
functions:
# Functions implemented by KZG library for EIP-4844
@@ -205,22 +180,11 @@ exceptions:
- verify_cell_kzg_proof_batch_impl#fulu
# phase0
- compute_attestation_subnet_prefix_bits#phase0
- compute_min_epochs_for_block_requests#phase0
- compute_time_at_slot_ms#phase0
- is_not_from_future_slot#phase0
- is_within_slot_range#phase0
- update_proposer_boost_root#phase0
- is_proposer_equivocation#phase0
- record_block_timeliness#phase0
- compute_proposer_score#phase0
- get_attestation_score#phase0
- validate_attester_slashing_gossip#phase0
- validate_beacon_aggregate_and_proof_gossip#phase0
- validate_beacon_attestation_gossip#phase0
- validate_beacon_block_gossip#phase0
- validate_proposer_slashing_gossip#phase0
- validate_voluntary_exit_gossip#phase0
- calculate_committee_fraction#phase0
- compute_fork_version#phase0
- compute_pulled_up_tip#phase0
@@ -311,7 +275,6 @@ exceptions:
- upgrade_lc_store_to_capella#capella
- upgrade_lc_update_to_capella#capella
# deneb
- compute_max_request_blob_sidecars#deneb
- get_lc_execution_root#deneb
- is_valid_light_client_header#deneb
- prepare_execution_payload#deneb
@@ -322,7 +285,6 @@ exceptions:
- upgrade_lc_store_to_deneb#deneb
- upgrade_lc_update_to_deneb#deneb
# electra
- compute_max_request_blob_sidecars#electra
- compute_weak_subjectivity_period#electra
- current_sync_committee_gindex_at_slot#electra
- finalized_root_gindex_at_slot#electra
@@ -344,20 +306,12 @@ exceptions:
- upgrade_lc_store_to_electra#electra
- upgrade_lc_update_to_electra#electra
# fulu
- compute_max_request_data_column_sidecars#fulu
- compute_matrix#fulu
- verify_partial_data_column_header_inclusion_proof#fulu
- verify_partial_data_column_sidecar_kzg_proofs#fulu
- get_blob_parameters#fulu
- get_data_column_sidecars_from_block#fulu
- get_data_column_sidecars_from_column_sidecar#fulu
- recover_matrix#fulu
# gloas
- compute_ptc#gloas
- initialize_ptc_window#gloas
- is_payload_data_available#gloas
- is_pending_validator#gloas
- process_ptc_window#gloas
- compute_balance_weighted_acceptance#gloas
- compute_balance_weighted_selection#gloas
- compute_fork_version#gloas
@@ -435,8 +389,10 @@ exceptions:
- get_builder_withdrawals#gloas
- get_builders_sweep_withdrawals#gloas
- get_index_for_new_builder#gloas
- get_pending_balance_to_withdraw_for_builder#gloas
- get_proposer_preferences_signature#gloas
- get_upcoming_proposal_slots#gloas
- initiate_builder_exit#gloas
- is_active_builder#gloas
- is_builder_index#gloas
- is_data_available#gloas
@@ -447,6 +403,7 @@ exceptions:
- is_valid_proposal_slot#gloas
- onboard_builders_from_pending_deposits#gloas
- process_deposit_request#gloas
- process_voluntary_exit#gloas
- record_block_timeliness#gloas
- record_block_timeliness#phase0
- verify_data_column_sidecar_kzg_proofs#gloas
@@ -455,28 +412,6 @@ exceptions:
- update_next_withdrawal_builder_index#gloas
- update_payload_expected_withdrawals#gloas
- update_proposer_boost_root#gloas
# heze
- compute_fork_version#heze
- get_forkchoice_store#heze
- get_inclusion_list_bits#heze
- get_inclusion_list_committee_assignment#heze
- get_inclusion_list_committee#heze
- get_inclusion_list_signature#heze
- get_inclusion_list_store#heze
- get_inclusion_list_submission_due_ms#heze
- get_inclusion_list_transactions#heze
- get_proposer_inclusion_list_cutoff_ms#heze
- get_view_freeze_cutoff_ms#heze
- is_inclusion_list_bits_inclusive#heze
- is_payload_inclusion_list_satisfied#heze
- is_valid_inclusion_list_signature#heze
- on_execution_payload#heze
- on_inclusion_list#heze
- prepare_execution_payload#heze
- process_inclusion_list#heze
- record_payload_inclusion_list_satisfaction#heze
- should_extend_payload#heze
- upgrade_to_heze#heze
presets:
# gloas
@@ -485,5 +420,3 @@ exceptions:
- MAX_BUILDERS_PER_WITHDRAWALS_SWEEP#gloas
- MAX_PAYLOAD_ATTESTATIONS#gloas
- PTC_SIZE#gloas
# heze
- INCLUSION_LIST_COMMITTEE_SIZE#heze

View File

@@ -1,67 +0,0 @@
name: SBOM Export & Centralize
on:
push:
branches: [ "develop" ]
schedule:
- cron: '50 21 * * 2'
permissions:
contents: read
jobs:
generate-and-upload:
runs-on: ubuntu-latest
steps:
- name: Checkout Source Code
uses: actions/checkout@v6
- name: Check for recent changes
id: check
run: |
if [ -z "$(git log --since='7 days ago' --oneline | head -1)" ]; then
echo "No commits in the last 7 days, skipping SBOM generation."
echo "skip=true" >> "$GITHUB_OUTPUT"
fi
- name: Generate CycloneDX SBOM via cdxgen
if: steps.check.outputs.skip != 'true'
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
docker run --rm \
--user "$(id -u):$(id -g)" \
-v /tmp:/tmp \
-v "${{ github.workspace }}:/app:rw" \
-e FETCH_LICENSE=true \
-e GITHUB_TOKEN \
ghcr.io/cdxgen/cdxgen:v12.1.1 \
-r /app \
-o /app/sbom.cdx.json \
--no-install-deps \
--spec-version 1.6
if [ ! -s sbom.cdx.json ]; then
echo "::error::cdxgen SBOM generation failed or returned empty."
exit 1
fi
echo "SBOM generated successfully:"
ls -lh sbom.cdx.json
- name: Upload SBOM to Dependency Track
if: steps.check.outputs.skip != 'true'
env:
DT_API_KEY: ${{ secrets.DEPENDENCY_TRACK_API_KEY }}
DT_URL: ${{ secrets.DEPENDENCY_TRACK_URL }}
run: |
REPO_NAME=${GITHUB_REPOSITORY##*/}
curl -sf -X POST "${DT_URL}/api/v1/bom" \
-H "X-Api-Key: ${DT_API_KEY}" \
-F "autoCreate=true" \
-F "projectName=${REPO_NAME}" \
-F "projectVersion=${{ github.ref_name }}" \
-F "bom=@sbom.cdx.json"
echo "SBOM uploaded to Dependency Track for ${REPO_NAME}@${{ github.ref_name }}"

View File

@@ -4,116 +4,6 @@ All notable changes to this project will be documented in this file.
The format is based on Keep a Changelog, and this project adheres to Semantic Versioning.
## [v7.1.3](https://github.com/prysmaticlabs/prysm/compare/v7.1.2...v7.1.3) - 2026-03-18
This release brings extensive Gloas (next fork) groundwork, a major logging infrastructure overhaul, and numerous performance optimizations across the beacon chain. A security update to go-ethereum v1.16.8 is also included.
Release highlights:
- **Gloas fork preparation**: Builder registry, bid processing, payload attestation, proposer slashing, slot processing, block API endpoints, and duty timing intervals are all wired up.
- **Logging revamp**: New ephemeral debug logfile (24h retention, enabled by default), per-package loggers with CI enforcement, per-hook verbosity control (`--log.vmodule`), and a version banner at startup.
- **Performance**: Forkchoice updates moved to background, post-Electra attestation data cached per slot, parallel data column cache warmup, reduced heap allocations in SSZ marshaling and `MixInLength`, and proposer preprocessing behind a feature flag.
- **Validator client**: gRPC fallback now matches the REST API implementation — both connect only to fully synced nodes. The gRPC health endpoint returns an error on syncing/optimistic status.
- **Security**: go-ethereum updated to v1.16.8; fixed an authentication bypass on `/v2/validator/*` endpoints.
- **State storage**: Initial support for the `hdiff` state-diff feature — migration-to-cold and DB initialization are now available behind feature flags.
There are no known security issues in this release. Operators can update at their convenience.
### Added
- Use the head state to validate attestations for the previous epoch if head is compatible with the target checkpoint. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16109)
- Added separate logrus hooks for handling the formatting and output of terminal logs vs log-file logs, instead of the. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16102)
- Batch publish data columns for faster data propogation. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16183)
- `--disable-get-blobs-v2` flag. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16155)
- Update spectests to v1.7.0-alpha.0. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16219)
- Added basic Gloas builder support (`Builder` message and `BeaconStateGloas` `builders`/`next_withdrawal_builder_index` fields). [[PR]](https://github.com/prysmaticlabs/prysm/pull/16164)
- Added an ephemeral debug logfile that for beacon and validator nodes that captures debug-level logs for 24 hours. It. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16108)
- Add a feature flag to pass spectests with low validator count. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16231)
- Add feature flag `--enable-proposer-preprocessing` to process the block and verify signatures before proposing. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15920)
- Add `ProofByFieldIndex` to generalize merkle proof generation for `BeaconState`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15443)
- Update spectests to v1.7.0-alpha-1. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16246)
- Add feature flag to use hashtree instead of gohashtre. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16216)
- Migrate to cold with the hdiff feature. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16049)
- Adding basic fulu fork transition support for mainnet and minimal e2e tests (multi scenario is not included). [[PR]](https://github.com/prysmaticlabs/prysm/pull/15640)
- `commitment_count_in_gossip_processed_blocks` gauge metric to track the number of blob KZG commitments in processed beacon blocks. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16254)
- Add Gloas latest execution bid processing. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15638)
- Added shell completion support for `beacon-chain` and `validator` CLI tools. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16245)
- add pending payments processing and quorum threshold, plus spectests and state hooks (rotate/append). [[PR]](https://github.com/prysmaticlabs/prysm/pull/15655)
- Add slot processing with execution payload availability updates. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15730)
- Implement modified proposer slashing for gloas. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16212)
- Added missing beacon config in fulu so that the presets don't go missing in /eth/v1/config/spec beacon api. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16170)
- Close opened file in data_column.go. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16274)
- Flag `--log.vmodule` to set per-package verbosity levels for logging. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16272)
- Added a version log at startup to display the version of the build. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16283)
- gloas block return support for /eth/v2/beacon/blocks/{block_id} and /eth/v1/beacon/blocks/{block_id}/root endpoints. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16278)
- Add Gloas process payload attestation. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15650)
- Initialize db with state-diff feature flag. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16203)
- Gloas-specific timing intervals for validator attestation, aggregation, and sync duties. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16291)
- Added new proofCollector type to ssz-query. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16177)
- Added README for maintaining specrefs. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16302)
- The ability to download the nightly reference tests from a specific day. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16298)
- Set beacon node options after reading the config file. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16320)
- Implement finalization-based eviction for `CheckpointStateCache`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16458)
### Changed
- Performance improvement in ProcessConsolidationRequests: Use more performance HasPendingBalanceToWithdraw instead of PendingBalanceToWithdraw as no need to calculate full total pending balance. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16189)
- Extend `httperror` analyzer to more functions. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16186)
- Do not check block signature on state transition. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14820)
- Notify the engine about forkchoice updates in the background. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16149)
- Use a separate context when updating the slot cache. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16209)
- Data column sidecars cache warmup: Process in parallel all sidecars for a given epoch. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16207)
- Use lookahead to validate data column sidecar proposer index. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16202)
- Summarize DEBUG log corresponding to incoming via gossip data column sidecar. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16210)
- Added a log.go file for every important package with a logger variable containing a `package` field set to the package. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16059)
- Added a CI check to ensure every important package has a log.go file with the correct `package` field. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16059)
- Changed the log formatter to use this `package` field instead of the previous `prefix` field. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16059)
- Replaced `time.Sleep` with `require.Eventually` polling in tests to fix flaky behavior caused by race conditions between goroutines and assertions. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16217)
- changed IsHealthy check to IsReady for validator client's interpretation from /eth/v1/node/health, 206 will now return false as the node is syncing. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16167)
- Performance improvement in state (MarshalSSZTo): use copy() instead of byte-by-byte loop which isn't required. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16222)
- Moved verbosity settings to be configurable per hook, rather than just globally. This allows us to control the. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16106)
- updated go ethereum to 1.16.7. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15640)
- Use dependent root and target root to verify data column proposer index. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16250)
- post electra we now call attestation data once per slot and use a cache for subsequent requests. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16236)
- Avoid unnessary heap allocation while calling MixInLength. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16251)
- Log commitments instead of indices in missingCommitError. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16258)
- Added some defensive checks to prevent overflows in block batch requests. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16227)
- gRPC health endpoint will now return an error on syncing or optimistic status showing that it's unavailable. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16294)
- Sample PTC per committee to reduce allocations. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16293)
- gRPC fallback now matches rest api implementation and will also check and connect to only synced nodes. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16215)
- Improved node fallback logs. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16316)
- Improved integrations with ethspecify so specrefs can be used throughout the codebase. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16304)
- Fixed the logging issue described in #16314. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16322)
### Removed
- removed github.com/MariusVanDerWijden/FuzzyVM and github.com/MariusVanDerWijden/tx-fuzz due to lack of support post 1.16.7, only used in e2e for transaction fuzzing. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15640)
- Remove unused `delay` parameter from `fetchOriginDataColumnSidecars` function. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16262)
- Batching of KZG verification for incoming via gossip data column sidecars. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16240)
- `--disable-get-blobs-v2` flag from help. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16265)
- gRPC resolver for load balancing, the new implementation matches rest api's so we should remove the resolver so it's handled the same way for consistency. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16215)
### Fixed
- avoid panic when fork schedule is empty [#16175](https://github.com/OffchainLabs/prysm/pull/16175). [[PR]](https://github.com/prysmaticlabs/prysm/pull/16175)
- Fix validation logic for `--backfill-oldest-slot`, which was rejecting slots newer than 1056767. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16173)
- Don't call trace.WithMaxExportBatchSize(trace.DefaultMaxExportBatchSize) twice. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16211)
- When adding the `--[semi-]supernode` flag, update the ealiest available slot accordingly. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16230)
- fixed broken and old links to actual. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15856)
- stop SlotIntervalTicker goroutine leaks [#16241](https://github.com/OffchainLabs/prysm/pull/16241). [[PR]](https://github.com/prysmaticlabs/prysm/pull/16241)
- Fix `prysmctl testnet generate-genesis` to use the timestamp from `--geth-genesis-json-in` when `--genesis-time` is not explicitly provided. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16239)
- Prevent authentication bypass on direct `/v2/validator/*` endpoints by enforcing auth checks for non-public routes. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16226)
- Fixed a typo: AggregrateDueBPS -> AggregateDueBPS. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16194)
- Fixed a bug in `hack/check-logs.sh` where untracked files were ignored. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16287)
- Fix hashtree release builds. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16288)
- Fix Bazel build failure on macOS x86_64 (darwin_amd64) (adds missing assembly stub to hashtree patch). [[PR]](https://github.com/prysmaticlabs/prysm/pull/16281)
- a potential race condition when switching hosts quickly and reconnecting to same host on an old connection. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16316)
- Fixed a bug where `cmd/beacon-chain/execution` was being ignored by `hack/gen-logs.sh` due to a `.gitignore` rule. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16328)
### Security
- Update go-ethereum to v1.16.8. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16252)
## [v7.1.2](https://github.com/prysmaticlabs/prysm/compare/v7.1.1...v7.1.2) - 2026-01-07
Happy new year! This patch release is very small. The main improvement is better management of pending attestation aggregation via [PR 16153](https://github.com/OffchainLabs/prysm/pull/16153).
@@ -4156,4 +4046,4 @@ There are no security updates in this release.
# Older than v2.0.0
For changelog history for releases older than v2.0.0, please refer to https://github.com/prysmaticlabs/prysm/releases
For changelog history for releases older than v2.0.0, please refer to https://github.com/prysmaticlabs/prysm/releases

View File

@@ -273,16 +273,16 @@ filegroup(
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
)
consensus_spec_version = "v1.7.0-alpha.4"
consensus_spec_version = "v1.7.0-alpha.2"
load("@prysm//tools:download_spectests.bzl", "consensus_spec_tests")
consensus_spec_tests(
name = "consensus_spec_tests",
flavors = {
"general": "sha256-kNJxuhCtW4RbuS9nb4U6JXHlPgTSg6G3hWeHFVB9gZ4=",
"minimal": "sha256-U1tCkXxtdI6mkEdk80i8z9LU2hAyf7Ztz5SBYo5oMzo=",
"mainnet": "sha256-Ga8VDOcNhTTdXDj8tSyBVYrwya9f1HO94ehJ5vv91r4=",
"general": "sha256-iGQsGZ1cHah+2CSod9jC3kN8Ku4n6KO0hIwfINrn/po=",
"minimal": "sha256-TgcYt8N8sXSttdHTGvOa+exUZ1zn1UzlAMz0V7i37xc=",
"mainnet": "sha256-LnXyiLoJtrvEvbqLDSAAqpLMdN/lXv92SAgYG8fNjCs=",
},
version = consensus_spec_version,
)
@@ -298,7 +298,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
integrity = "sha256-XHu5K/65mue+5po63L9yGTFjGfU1RGj4S56dmcHc2Rs=",
integrity = "sha256-Y/67Dg393PksZj5rTFNLntiJ6hNdB7Rxbu5gZE2gebY=",
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
)

View File

@@ -46,7 +46,7 @@ func EnsureReady(ctx context.Context, provider HostProvider, checker ReadyChecke
"previous": startingHost,
"current": provider.CurrentHost(),
"tried": attemptedHosts,
}).Warn("Switched to responsive beacon node")
}).Info("Switched to responsive beacon node")
}
return true
}

View File

@@ -3,15 +3,14 @@ package api
import "net/http"
const (
VersionHeader = "Eth-Consensus-Version"
ExecutionPayloadBlindedHeader = "Eth-Execution-Payload-Blinded"
ExecutionPayloadValueHeader = "Eth-Execution-Payload-Value"
ConsensusBlockValueHeader = "Eth-Consensus-Block-Value"
ExecutionPayloadIncludedHeader = "Eth-Execution-Payload-Included"
JsonMediaType = "application/json"
OctetStreamMediaType = "application/octet-stream"
EventStreamMediaType = "text/event-stream"
KeepAlive = "keep-alive"
VersionHeader = "Eth-Consensus-Version"
ExecutionPayloadBlindedHeader = "Eth-Execution-Payload-Blinded"
ExecutionPayloadValueHeader = "Eth-Execution-Payload-Value"
ConsensusBlockValueHeader = "Eth-Consensus-Block-Value"
JsonMediaType = "application/json"
OctetStreamMediaType = "application/octet-stream"
EventStreamMediaType = "text/event-stream"
KeepAlive = "keep-alive"
)
// SetSSEHeaders sets the headers needed for a server-sent event response.

View File

@@ -29,8 +29,6 @@ type Server struct {
startFailure error
}
const eventStreamPath = "/eth/v1/events"
// New returns a new instance of the Server.
func New(ctx context.Context, opts ...Option) (*Server, error) {
g := &Server{
@@ -50,17 +48,7 @@ func New(ctx context.Context, opts ...Option) (*Server, error) {
handler = middleware.MiddlewareChain(g.cfg.router, g.cfg.middlewares)
if g.cfg.timeout > 0*time.Second {
defaultReadHeaderTimeout = g.cfg.timeout
baseHandler := handler
timeoutHandler := http.TimeoutHandler(baseHandler, g.cfg.timeout, "request timed out")
handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// SSE streams stay open indefinitely, so the global timeout wrapper must not
// cancel `/eth/v1/events` before the handler starts streaming responses.
if r.URL != nil && r.URL.Path == eventStreamPath {
baseHandler.ServeHTTP(w, r)
return
}
timeoutHandler.ServeHTTP(w, r)
})
handler = http.TimeoutHandler(handler, g.cfg.timeout, "request timed out")
}
g.server = &http.Server{
Addr: g.cfg.httpAddr,

View File

@@ -7,9 +7,7 @@ import (
"net/http"
"net/http/httptest"
"net/url"
"strings"
"testing"
"time"
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
"github.com/OffchainLabs/prysm/v7/testing/assert"
@@ -39,18 +37,10 @@ func TestServer_StartStop(t *testing.T) {
require.NoError(t, err)
g.Start()
require.Eventually(t, func() bool {
foundStart := false
for _, entry := range hook.AllEntries() {
if strings.Contains(entry.Message, "Starting HTTP server") {
foundStart = true
}
if strings.Contains(entry.Message, "Starting API middleware") {
return false
}
}
return foundStart
}, time.Second, 10*time.Millisecond)
go func() {
require.LogsContain(t, hook, "Starting HTTP server")
require.LogsDoNotContain(t, hook, "Starting API middleware")
}()
err = g.Stop()
require.NoError(t, err)
}
@@ -78,51 +68,3 @@ func TestServer_NilHandler_NotFoundHandlerRegistered(t *testing.T) {
g.cfg.router.ServeHTTP(writer, &http.Request{Method: "GET", Host: "localhost", URL: &url.URL{Path: "/foo"}})
assert.Equal(t, http.StatusNotFound, writer.Code)
}
func TestServer_TimeoutHandlerBypassesSSE(t *testing.T) {
handler := http.NewServeMux()
handler.HandleFunc(eventStreamPath, func(w http.ResponseWriter, _ *http.Request) {
time.Sleep(20 * time.Millisecond)
w.WriteHeader(http.StatusOK)
_, err := w.Write([]byte("stream-open"))
require.NoError(t, err)
})
g, err := New(t.Context(),
WithHTTPAddr("127.0.0.1:0"),
WithRouter(handler),
WithTimeout(5*time.Millisecond),
)
require.NoError(t, err)
req := httptest.NewRequest(http.MethodGet, eventStreamPath, nil)
writer := httptest.NewRecorder()
g.server.Handler.ServeHTTP(writer, req)
assert.Equal(t, http.StatusOK, writer.Code)
assert.Equal(t, "stream-open", writer.Body.String())
}
func TestServer_TimeoutHandlerStillAppliesToNonSSE(t *testing.T) {
handler := http.NewServeMux()
handler.HandleFunc("/foo", func(w http.ResponseWriter, _ *http.Request) {
time.Sleep(20 * time.Millisecond)
w.WriteHeader(http.StatusOK)
_, err := w.Write([]byte("ok"))
require.NoError(t, err)
})
g, err := New(t.Context(),
WithHTTPAddr("127.0.0.1:0"),
WithRouter(handler),
WithTimeout(5*time.Millisecond),
)
require.NoError(t, err)
req := httptest.NewRequest(http.MethodGet, "/foo", nil)
writer := httptest.NewRecorder()
g.server.Handler.ServeHTTP(writer, req)
assert.Equal(t, http.StatusServiceUnavailable, writer.Code)
assert.Equal(t, true, strings.Contains(writer.Body.String(), "request timed out"))
}

View File

@@ -54,13 +54,11 @@ go_test(
name = "go_default_test",
srcs = [
"conversions_block_execution_test.go",
"conversions_block_gloas_test.go",
"conversions_test.go",
],
embed = [":go_default_library"],
deps = [
"//consensus-types/blocks:go_default_library",
"//consensus-types/primitives:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",

View File

@@ -540,12 +540,6 @@ type PayloadAttestation struct {
Signature string `json:"signature"`
}
type PayloadAttestationMessage struct {
ValidatorIndex string `json:"validator_index"`
Data *PayloadAttestationData `json:"data"`
Signature string `json:"signature"`
}
type BeaconBlockBodyGloas struct {
RandaoReveal string `json:"randao_reveal"`
Eth1Data *Eth1Data `json:"eth1_data"`
@@ -584,13 +578,6 @@ func (s *SignedBeaconBlockGloas) SigString() string {
return s.Signature
}
type BlockContentsGloas struct {
Block *BeaconBlockGloas `json:"block"`
ExecutionPayloadEnvelope *ExecutionPayloadEnvelope `json:"execution_payload_envelope"`
KzgProofs []string `json:"kzg_proofs"`
Blobs []string `json:"blobs"`
}
type ExecutionPayloadEnvelope struct {
Payload *ExecutionPayloadDeneb `json:"payload"`
ExecutionRequests *ExecutionRequests `json:"execution_requests"`

View File

@@ -2966,14 +2966,6 @@ func PayloadAttestationFromConsensus(pa *eth.PayloadAttestation) *PayloadAttesta
}
}
func PayloadAttestationMessageFromConsensus(m *eth.PayloadAttestationMessage) *PayloadAttestationMessage {
return &PayloadAttestationMessage{
ValidatorIndex: fmt.Sprintf("%d", m.ValidatorIndex),
Data: PayloadAttestationDataFromConsensus(m.Data),
Signature: hexutil.Encode(m.Signature),
}
}
func PayloadAttestationDataFromConsensus(d *eth.PayloadAttestationData) *PayloadAttestationData {
return &PayloadAttestationData{
BeaconBlockRoot: hexutil.Encode(d.BeaconBlockRoot),
@@ -2983,19 +2975,6 @@ func PayloadAttestationDataFromConsensus(d *eth.PayloadAttestationData) *Payload
}
}
func (b *SignedBeaconBlockGloas) ToGeneric() (*eth.GenericSignedBeaconBlock, error) {
if b == nil {
return nil, errNilValue
}
signed, err := b.ToConsensus()
if err != nil {
return nil, err
}
return &eth.GenericSignedBeaconBlock{
Block: &eth.GenericSignedBeaconBlock_Gloas{Gloas: signed},
}, nil
}
func (b *SignedBeaconBlockGloas) ToConsensus() (*eth.SignedBeaconBlockGloas, error) {
if b == nil {
return nil, errNilValue
@@ -3150,14 +3129,6 @@ func (b *BeaconBlockBodyGloas) ToConsensus() (*eth.BeaconBlockBodyGloas, error)
}, nil
}
func (b *BeaconBlockGloas) ToGeneric() (*eth.GenericBeaconBlock, error) {
block, err := b.ToConsensus()
if err != nil {
return nil, errors.Wrap(err, "could not convert gloas block to consensus")
}
return &eth.GenericBeaconBlock{Block: &eth.GenericBeaconBlock_Gloas{Gloas: block}}, nil
}
func (b *SignedExecutionPayloadBid) ToConsensus() (*eth.SignedExecutionPayloadBid, error) {
if b == nil {
return nil, errNilValue
@@ -3305,113 +3276,25 @@ func (d *PayloadAttestationData) ToConsensus() (*eth.PayloadAttestationData, err
}, nil
}
// ExecutionPayloadEnvelopeFromConsensus converts a proto envelope to the API struct.
func ExecutionPayloadEnvelopeFromConsensus(e *eth.ExecutionPayloadEnvelope) (*ExecutionPayloadEnvelope, error) {
payload, err := ExecutionPayloadDenebFromConsensus(e.Payload)
// SignedExecutionPayloadEnvelopeFromConsensus converts a proto envelope to the API struct.
func SignedExecutionPayloadEnvelopeFromConsensus(e *eth.SignedExecutionPayloadEnvelope) (*SignedExecutionPayloadEnvelope, error) {
payload, err := ExecutionPayloadDenebFromConsensus(e.Message.Payload)
if err != nil {
return nil, err
}
var requests *ExecutionRequests
if e.ExecutionRequests != nil {
requests = ExecutionRequestsFromConsensus(e.ExecutionRequests)
}
return &ExecutionPayloadEnvelope{
Payload: payload,
ExecutionRequests: requests,
BuilderIndex: fmt.Sprintf("%d", e.BuilderIndex),
BeaconBlockRoot: hexutil.Encode(e.BeaconBlockRoot),
Slot: fmt.Sprintf("%d", e.Slot),
StateRoot: hexutil.Encode(e.StateRoot),
}, nil
}
// SignedExecutionPayloadEnvelopeFromConsensus converts a signed proto envelope to the API struct.
func SignedExecutionPayloadEnvelopeFromConsensus(e *eth.SignedExecutionPayloadEnvelope) (*SignedExecutionPayloadEnvelope, error) {
envelope, err := ExecutionPayloadEnvelopeFromConsensus(e.Message)
if err != nil {
return nil, err
if e.Message.ExecutionRequests != nil {
requests = ExecutionRequestsFromConsensus(e.Message.ExecutionRequests)
}
return &SignedExecutionPayloadEnvelope{
Message: envelope,
Message: &ExecutionPayloadEnvelope{
Payload: payload,
ExecutionRequests: requests,
BuilderIndex: fmt.Sprintf("%d", e.Message.BuilderIndex),
BeaconBlockRoot: hexutil.Encode(e.Message.BeaconBlockRoot),
Slot: fmt.Sprintf("%d", e.Message.Slot),
StateRoot: hexutil.Encode(e.Message.StateRoot),
},
Signature: hexutil.Encode(e.Signature),
}, nil
}
// BlockContentsGloasFromConsensus converts a proto Gloas block and envelope to the API struct.
func BlockContentsGloasFromConsensus(block *eth.BeaconBlockGloas, envelope *eth.ExecutionPayloadEnvelope) (*BlockContentsGloas, error) {
b, err := BeaconBlockGloasFromConsensus(block)
if err != nil {
return nil, err
}
env, err := ExecutionPayloadEnvelopeFromConsensus(envelope)
if err != nil {
return nil, err
}
return &BlockContentsGloas{
Block: b,
ExecutionPayloadEnvelope: env,
KzgProofs: []string{}, // TODO: populate from blobs bundle
Blobs: []string{}, // TODO: populate from blobs bundle
}, nil
}
// ToConsensus converts the API struct to a proto ExecutionPayloadEnvelope.
func (e *ExecutionPayloadEnvelope) ToConsensus() (*eth.ExecutionPayloadEnvelope, error) {
if e == nil {
return nil, server.NewDecodeError(errNilValue, "ExecutionPayloadEnvelope")
}
payload, err := e.Payload.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, "Payload")
}
var requests *enginev1.ExecutionRequests
if e.ExecutionRequests != nil {
requests, err = e.ExecutionRequests.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionRequests")
}
}
builderIndex, err := strconv.ParseUint(e.BuilderIndex, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "BuilderIndex")
}
beaconBlockRoot, err := bytesutil.DecodeHexWithLength(e.BeaconBlockRoot, fieldparams.RootLength)
if err != nil {
return nil, server.NewDecodeError(err, "BeaconBlockRoot")
}
slot, err := strconv.ParseUint(e.Slot, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "Slot")
}
stateRoot, err := bytesutil.DecodeHexWithLength(e.StateRoot, fieldparams.RootLength)
if err != nil {
return nil, server.NewDecodeError(err, "StateRoot")
}
return &eth.ExecutionPayloadEnvelope{
Payload: payload,
ExecutionRequests: requests,
BuilderIndex: primitives.BuilderIndex(builderIndex),
BeaconBlockRoot: beaconBlockRoot,
Slot: primitives.Slot(slot),
StateRoot: stateRoot,
}, nil
}
// ToConsensus converts the API struct to a proto SignedExecutionPayloadEnvelope.
func (e *SignedExecutionPayloadEnvelope) ToConsensus() (*eth.SignedExecutionPayloadEnvelope, error) {
if e == nil {
return nil, server.NewDecodeError(errNilValue, "SignedExecutionPayloadEnvelope")
}
msg, err := e.Message.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, "Message")
}
sig, err := bytesutil.DecodeHexWithLength(e.Signature, fieldparams.BLSSignatureLength)
if err != nil {
return nil, server.NewDecodeError(err, "Signature")
}
return &eth.SignedExecutionPayloadEnvelope{
Message: msg,
Signature: sig,
}, nil
}

View File

@@ -1,67 +0,0 @@
package structs
import (
"testing"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/require"
"github.com/OffchainLabs/prysm/v7/testing/util"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
)
func testEnvelopeProto() *eth.ExecutionPayloadEnvelope {
return &eth.ExecutionPayloadEnvelope{
Payload: &enginev1.ExecutionPayloadDeneb{
ParentHash: fillByteSlice(common.HashLength, 0xaa),
FeeRecipient: fillByteSlice(20, 0xbb),
StateRoot: fillByteSlice(32, 0xcc),
ReceiptsRoot: fillByteSlice(32, 0xdd),
LogsBloom: fillByteSlice(256, 0xee),
PrevRandao: fillByteSlice(32, 0xff),
BaseFeePerGas: fillByteSlice(32, 0x11),
BlockHash: fillByteSlice(common.HashLength, 0x22),
},
ExecutionRequests: &enginev1.ExecutionRequests{},
BuilderIndex: 7,
BeaconBlockRoot: fillByteSlice(32, 0x33),
Slot: 42,
StateRoot: fillByteSlice(32, 0x44),
}
}
func TestExecutionPayloadEnvelopeFromConsensus(t *testing.T) {
env := testEnvelopeProto()
result, err := ExecutionPayloadEnvelopeFromConsensus(env)
require.NoError(t, err)
require.NotNil(t, result.Payload)
require.Equal(t, hexutil.Encode(env.Payload.ParentHash), result.Payload.ParentHash)
require.Equal(t, "7", result.BuilderIndex)
require.Equal(t, hexutil.Encode(env.BeaconBlockRoot), result.BeaconBlockRoot)
require.Equal(t, "42", result.Slot)
require.Equal(t, hexutil.Encode(env.StateRoot), result.StateRoot)
require.NotNil(t, result.ExecutionRequests)
}
func TestExecutionPayloadEnvelopeFromConsensus_NilRequests(t *testing.T) {
env := testEnvelopeProto()
env.ExecutionRequests = nil
result, err := ExecutionPayloadEnvelopeFromConsensus(env)
require.NoError(t, err)
require.Equal(t, (*ExecutionRequests)(nil), result.ExecutionRequests)
}
func TestBlockContentsGloasFromConsensus(t *testing.T) {
block := util.NewBeaconBlockGloas().Block
env := testEnvelopeProto()
result, err := BlockContentsGloasFromConsensus(block, env)
require.NoError(t, err)
require.NotNil(t, result.Block)
require.NotNil(t, result.Block.Body)
require.NotNil(t, result.ExecutionPayloadEnvelope)
require.Equal(t, hexutil.Encode(env.BeaconBlockRoot), result.ExecutionPayloadEnvelope.BeaconBlockRoot)
require.Equal(t, 0, len(result.KzgProofs))
require.Equal(t, 0, len(result.Blobs))
}

View File

@@ -5,7 +5,6 @@ import (
"testing"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/assert"
@@ -489,7 +488,7 @@ func TestBeaconStateGloasFromConsensus(t *testing.T) {
state.GenesisTime = 123
state.GenesisValidatorsRoot = bytes.Repeat([]byte{0x10}, 32)
state.Slot = 5
state.ProposerLookahead = []primitives.ValidatorIndex{1, 2}
state.ProposerLookahead = []uint64{1, 2}
state.LatestExecutionPayloadBid = &eth.ExecutionPayloadBid{
ParentBlockHash: bytes.Repeat([]byte{0x11}, 32),
ParentBlockRoot: bytes.Repeat([]byte{0x12}, 32),

View File

@@ -53,8 +53,8 @@ type ChainReorgEvent struct {
Slot string `json:"slot"`
Depth string `json:"depth"`
OldHeadBlock string `json:"old_head_block"`
NewHeadBlock string `json:"new_head_block"`
OldHeadState string `json:"old_head_state"`
NewHeadBlock string `json:"old_head_state"`
OldHeadState string `json:"new_head_block"`
NewHeadState string `json:"new_head_state"`
Epoch string `json:"epoch"`
ExecutionOptimistic bool `json:"execution_optimistic"`

View File

@@ -95,14 +95,6 @@ type ProduceBlockV3Response struct {
Data json.RawMessage `json:"data"` // represents the block values based on the version
}
// ProduceBlockV4Response is a wrapper json object for the returned block from the ProduceBlockV4 endpoint
type ProduceBlockV4Response struct {
Version string `json:"version"`
ConsensusBlockValue string `json:"consensus_block_value"`
ExecutionPayloadIncluded bool `json:"execution_payload_included"`
Data json.RawMessage `json:"data"`
}
type GetLivenessResponse struct {
Data []*Liveness `json:"data"`
}
@@ -159,11 +151,6 @@ type ValidatorParticipation struct {
PreviousEpochHeadAttestingGwei string `json:"previous_epoch_head_attesting_gwei"`
}
type GetValidatorExecutionPayloadEnvelopeResponse struct {
Version string `json:"version"`
Data *ExecutionPayloadEnvelope `json:"data"`
}
type ActiveSetChanges struct {
Epoch string `json:"epoch"`
ActivatedPublicKeys []string `json:"activated_public_keys"`

View File

@@ -141,7 +141,6 @@ go_test(
"service_test.go",
"setup_forkchoice_test.go",
"setup_test.go",
"tracked_proposer_test.go",
"weak_subjectivity_checks_test.go",
],
embed = [":go_default_library"],
@@ -155,6 +154,7 @@ go_test(
"//beacon-chain/core/altair:go_default_library",
"//beacon-chain/core/blocks:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/core/gloas:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/peerdas:go_default_library",
"//beacon-chain/core/signing:go_default_library",

View File

@@ -40,15 +40,11 @@ type ChainInfoFetcher interface {
// of locking forkchoice
type ForkchoiceFetcher interface {
Ancestor(context.Context, []byte, primitives.Slot) ([]byte, error)
BlockHash(root [32]byte) ([32]byte, error)
CachedHeadRoot() [32]byte
GetProposerHead() [32]byte
SetForkChoiceGenesisTime(time.Time)
UpdateHead(context.Context, primitives.Slot)
HighestReceivedBlockSlot() primitives.Slot
HighestReceivedBlockRoot() [32]byte
HasFullNode([32]byte) bool
PayloadContentLookup([32]byte) ([32]byte, bool)
ReceivedBlocksLastEpoch() (uint64, error)
InsertNode(context.Context, state.BeaconState, consensus_blocks.ROBlock) error
InsertPayload(interfaces.ROExecutionPayloadEnvelope) error
@@ -58,8 +54,6 @@ type ForkchoiceFetcher interface {
RecentBlockSlot(root [32]byte) (primitives.Slot, error)
IsCanonical(ctx context.Context, blockRoot [32]byte) (bool, error)
DependentRoot(primitives.Epoch) ([32]byte, error)
CanonicalNodeAtSlot(primitives.Slot) ([32]byte, bool)
ShouldIgnoreData(parentRoot [32]byte, dataSlot primitives.Slot) bool
}
// TimeFetcher retrieves the Ethereum consensus data that's related to time.
@@ -599,26 +593,3 @@ func (s *Service) inRegularSync() bool {
func (s *Service) validating() bool {
return s.cfg.TrackedValidatorsCache.Validating()
}
// ShouldIgnoreData returns true if the data for the given parent root and slot should be ignored.
func (s *Service) ShouldIgnoreData(parentRoot [32]byte, dataSlot primitives.Slot) bool {
currentEpoch := slots.ToEpoch(s.CurrentSlot())
if slots.ToEpoch(dataSlot) < currentEpoch {
return false
}
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
parentSlot, err := s.cfg.ForkChoiceStore.Slot(parentRoot)
if err != nil {
// This should not happen. The caller should have already checked the parent is in forkchoice.
return false
}
j := s.cfg.ForkChoiceStore.JustifiedCheckpoint()
if j == nil {
return false
}
if slots.ToEpoch(parentSlot) >= j.Epoch {
return false
}
return s.cfg.ForkChoiceStore.IsCanonical(parentRoot)
}

View File

@@ -42,34 +42,6 @@ func (s *Service) HighestReceivedBlockSlot() primitives.Slot {
return s.cfg.ForkChoiceStore.HighestReceivedBlockSlot()
}
// HighestReceivedBlockRoot returns the corresponding value from forkchoice
func (s *Service) HighestReceivedBlockRoot() [32]byte {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.HighestReceivedBlockRoot()
}
// BlockHash returns the execution payload block hash for the given beacon block root from forkchoice.
func (s *Service) BlockHash(root [32]byte) ([32]byte, error) {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.BlockHash(root)
}
// HasFullNode returns the corresponding value from forkchoice
func (s *Service) HasFullNode(root [32]byte) bool {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.HasFullNode(root)
}
// PayloadContentLookup returns the preferred payload-content lookup key from forkchoice.
func (s *Service) PayloadContentLookup(root [32]byte) ([32]byte, bool) {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.PayloadContentLookup(root)
}
// ReceivedBlocksLastEpoch returns the corresponding value from forkchoice
func (s *Service) ReceivedBlocksLastEpoch() (uint64, error) {
s.cfg.ForkChoiceStore.RLock()
@@ -157,13 +129,6 @@ func (s *Service) hashForGenesisBlock(ctx context.Context, root [32]byte) ([]byt
if st.Version() < version.Bellatrix {
return nil, nil
}
if st.Version() >= version.Gloas {
h, err := st.LatestBlockHash()
if err != nil {
return nil, errors.Wrap(err, "could not get latest block hash")
}
return bytesutil.SafeCopyBytes(h[:]), nil
}
header, err := st.LatestExecutionPayloadHeader()
if err != nil {
return nil, errors.Wrap(err, "could not get latest execution payload header")

View File

@@ -20,7 +20,6 @@ import (
"github.com/OffchainLabs/prysm/v7/testing/assert"
"github.com/OffchainLabs/prysm/v7/testing/require"
"github.com/OffchainLabs/prysm/v7/testing/util"
"github.com/OffchainLabs/prysm/v7/time/slots"
"google.golang.org/protobuf/proto"
)
@@ -736,73 +735,6 @@ func TestParentPayloadReady(t *testing.T) {
})
}
func TestService_ShouldIgnoreData(t *testing.T) {
service, tr := minimalTestService(t)
ctx := t.Context()
fcs := tr.fcs
zeroHash := params.BeaconConfig().ZeroHash
currentSlot := service.CurrentSlot()
currentEpoch := slots.ToEpoch(currentSlot)
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
// Build a chain in forkchoice:
// genesis (slot 0) -> nodeA (slot 1, epoch 0) -> nodeB (slot slotsPerEpoch, epoch 1) -> nodeC (slot 2*slotsPerEpoch, epoch 2)
nodeARoot := [32]byte{1}
nodeBRoot := [32]byte{2}
nodeCRoot := [32]byte{3}
nodeASlot := primitives.Slot(1)
nodeBSlot := primitives.Slot(slotsPerEpoch) // epoch 1
nodeCSlot := primitives.Slot(2 * slotsPerEpoch) // epoch 2
stA, robA, err := prepareForkchoiceState(ctx, nodeASlot, nodeARoot, zeroHash, [32]byte{10}, &ethpb.Checkpoint{Epoch: 0, Root: zeroHash[:]}, &ethpb.Checkpoint{Epoch: 0, Root: zeroHash[:]})
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, stA, robA))
stB, robB, err := prepareForkchoiceState(ctx, nodeBSlot, nodeBRoot, nodeARoot, [32]byte{11}, &ethpb.Checkpoint{Epoch: 0, Root: zeroHash[:]}, &ethpb.Checkpoint{Epoch: 0, Root: zeroHash[:]})
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, stB, robB))
stC, robC, err := prepareForkchoiceState(ctx, nodeCSlot, nodeCRoot, nodeBRoot, [32]byte{12}, &ethpb.Checkpoint{Epoch: 0, Root: zeroHash[:]}, &ethpb.Checkpoint{Epoch: 0, Root: zeroHash[:]})
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, stC, robC))
// Set justified checkpoint to nodeB (epoch 1).
fcs.SetBalancesByRooter(func(_ context.Context, _ [32]byte) ([]uint64, error) { return []uint64{}, nil })
require.NoError(t, fcs.UpdateJustifiedCheckpoint(ctx, &forkchoicetypes.Checkpoint{Epoch: 1, Root: nodeBRoot}))
t.Run("past epoch data is not ignored", func(t *testing.T) {
pastSlot := primitives.Slot((currentEpoch - 1) * primitives.Epoch(slotsPerEpoch))
require.Equal(t, false, service.ShouldIgnoreData(nodeARoot, pastSlot))
})
t.Run("parent not in forkchoice", func(t *testing.T) {
unknownRoot := [32]byte{99}
require.Equal(t, false, service.ShouldIgnoreData(unknownRoot, currentSlot))
})
t.Run("parent epoch at or after justified", func(t *testing.T) {
// nodeB is at epoch 1, justified is epoch 1 => parentEpoch >= justified => false
require.Equal(t, false, service.ShouldIgnoreData(nodeBRoot, currentSlot))
})
t.Run("canonical parent before justified is ignored", func(t *testing.T) {
// nodeA is at epoch 0 < justified epoch 1, and is canonical => true
require.Equal(t, true, service.ShouldIgnoreData(nodeARoot, currentSlot))
})
t.Run("non-canonical parent before justified is not ignored", func(t *testing.T) {
// Insert a fork: nodeD at slot 2 (epoch 0) branching from nodeA, not on the canonical chain.
nodeDRoot := [32]byte{4}
stD, robD, err := prepareForkchoiceState(ctx, 2, nodeDRoot, nodeARoot, [32]byte{13}, &ethpb.Checkpoint{Epoch: 0, Root: zeroHash[:]}, &ethpb.Checkpoint{Epoch: 0, Root: zeroHash[:]})
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, stD, robD))
// nodeD is at epoch 0 < justified epoch 1, but not canonical => false
require.Equal(t, false, service.ShouldIgnoreData(nodeDRoot, currentSlot))
})
}
func Test_hashForGenesisRoot(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := t.Context()
@@ -820,23 +752,3 @@ func Test_hashForGenesisRoot(t *testing.T) {
require.NoError(t, err)
require.Equal(t, [32]byte{}, [32]byte(genRoot))
}
func Test_hashForGenesisRoot_Gloas(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := t.Context()
c := setupBeaconChain(t, beaconDB)
expectedHash := [32]byte{1, 2, 3, 4, 5}
st, err := state_native.InitializeFromProtoGloas(&ethpb.BeaconStateGloas{
LatestBlockHash: expectedHash[:],
})
require.NoError(t, err)
genesis.StoreDuringTest(t, genesis.GenesisData{State: st})
genesisRoot := [32]byte{0xaa}
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
genHash, err := c.hashForGenesisBlock(ctx, genesisRoot)
require.NoError(t, err)
require.Equal(t, expectedHash, [32]byte(genHash))
}

View File

@@ -271,7 +271,7 @@ func (s *Service) notifyNewPayload(ctx context.Context, stVersion int, header in
}
}
lastValidHash, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, versionedHashes, parentRoot, requests, blk.Block().Slot())
lastValidHash, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, versionedHashes, parentRoot, requests)
if err == nil {
newPayloadValidNodeCount.Inc()
return true, nil
@@ -321,7 +321,7 @@ func (s *Service) pruneInvalidBlock(ctx context.Context, root, parentRoot, paren
// getPayloadAttributes returns the payload attributes for the given state and slot.
// The attribute is required to initiate a payload build process in the context of an `engine_forkchoiceUpdated` call.
func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState, slot primitives.Slot, headRoot, accessRoot []byte) payloadattribute.Attributer {
func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState, slot primitives.Slot, headRoot []byte) payloadattribute.Attributer {
emptyAttri := payloadattribute.EmptyWithVersion(st.Version())
// If it is an epoch boundary then process slots to get the right
@@ -343,7 +343,7 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
// right proposer index pre-Fulu, either way we need to copy the state to process it.
st = st.Copy()
var err error
st, err = transition.ProcessSlotsUsingNextSlotCache(ctx, st, accessRoot, slot)
st, err = transition.ProcessSlotsUsingNextSlotCache(ctx, st, headRoot, slot)
if err != nil {
log.WithError(err).Error("Could not process slots to get payload attribute")
return emptyAttri
@@ -371,91 +371,66 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
}
v := st.Version()
switch {
case v >= version.Gloas:
return payloadAttributesGloas(st, uint64(t.Unix()), prevRando, val.FeeRecipient[:], headRoot)
case v >= version.Deneb:
return payloadAttributesDeneb(st, uint64(t.Unix()), prevRando, val.FeeRecipient[:], headRoot)
case v >= version.Capella:
return payloadAttributesCapella(st, uint64(t.Unix()), prevRando, val.FeeRecipient[:])
case v >= version.Bellatrix:
return payloadAttributesBellatrix(uint64(t.Unix()), prevRando, val.FeeRecipient[:])
default:
log.WithField("version", version.String(v)).Error("Could not get payload attribute due to unknown state version")
return payloadattribute.EmptyWithVersion(v)
}
}
func payloadAttributesGloas(st state.BeaconState, timestamp uint64, prevRandao, feeRecipient, parentBeaconBlockRoot []byte) payloadattribute.Attributer {
withdrawals, err := st.WithdrawalsForPayload()
if err != nil {
log.WithError(err).Error("Could not get payload withdrawals to get payload attribute")
return payloadattribute.EmptyWithVersion(st.Version())
}
attr, err := payloadattribute.New(&enginev1.PayloadAttributesV3{
Timestamp: timestamp,
PrevRandao: prevRandao,
SuggestedFeeRecipient: feeRecipient,
Withdrawals: withdrawals,
ParentBeaconBlockRoot: parentBeaconBlockRoot,
})
if err != nil {
log.WithError(err).Error("Could not get payload attribute")
return payloadattribute.EmptyWithVersion(st.Version())
}
return attr
}
if v >= version.Deneb {
withdrawals, _, err := st.ExpectedWithdrawals()
if err != nil {
log.WithError(err).Error("Could not get expected withdrawals to get payload attribute")
return emptyAttri
}
func payloadAttributesDeneb(st state.BeaconState, timestamp uint64, prevRandao, feeRecipient, parentBeaconBlockRoot []byte) payloadattribute.Attributer {
withdrawals, _, err := st.ExpectedWithdrawals()
if err != nil {
log.WithError(err).Error("Could not get expected withdrawals to get payload attribute")
return payloadattribute.EmptyWithVersion(st.Version())
}
attr, err := payloadattribute.New(&enginev1.PayloadAttributesV3{
Timestamp: timestamp,
PrevRandao: prevRandao,
SuggestedFeeRecipient: feeRecipient,
Withdrawals: withdrawals,
ParentBeaconBlockRoot: parentBeaconBlockRoot,
})
if err != nil {
log.WithError(err).Error("Could not get payload attribute")
return payloadattribute.EmptyWithVersion(st.Version())
}
return attr
}
attr, err := payloadattribute.New(&enginev1.PayloadAttributesV3{
Timestamp: uint64(t.Unix()),
PrevRandao: prevRando,
SuggestedFeeRecipient: val.FeeRecipient[:],
Withdrawals: withdrawals,
ParentBeaconBlockRoot: headRoot,
})
if err != nil {
log.WithError(err).Error("Could not get payload attribute")
return emptyAttri
}
func payloadAttributesCapella(st state.BeaconState, timestamp uint64, prevRandao, feeRecipient []byte) payloadattribute.Attributer {
withdrawals, _, err := st.ExpectedWithdrawals()
if err != nil {
log.WithError(err).Error("Could not get expected withdrawals to get payload attribute")
return payloadattribute.EmptyWithVersion(st.Version())
return attr
}
attr, err := payloadattribute.New(&enginev1.PayloadAttributesV2{
Timestamp: timestamp,
PrevRandao: prevRandao,
SuggestedFeeRecipient: feeRecipient,
Withdrawals: withdrawals,
})
if err != nil {
log.WithError(err).Error("Could not get payload attribute")
return payloadattribute.EmptyWithVersion(st.Version())
}
return attr
}
func payloadAttributesBellatrix(timestamp uint64, prevRandao, feeRecipient []byte) payloadattribute.Attributer {
attr, err := payloadattribute.New(&enginev1.PayloadAttributes{
Timestamp: timestamp,
PrevRandao: prevRandao,
SuggestedFeeRecipient: feeRecipient,
})
if err != nil {
log.WithError(err).Error("Could not get payload attribute")
return payloadattribute.EmptyWithVersion(version.Bellatrix)
if v >= version.Capella {
withdrawals, _, err := st.ExpectedWithdrawals()
if err != nil {
log.WithError(err).Error("Could not get expected withdrawals to get payload attribute")
return emptyAttri
}
attr, err := payloadattribute.New(&enginev1.PayloadAttributesV2{
Timestamp: uint64(t.Unix()),
PrevRandao: prevRando,
SuggestedFeeRecipient: val.FeeRecipient[:],
Withdrawals: withdrawals,
})
if err != nil {
log.WithError(err).Error("Could not get payload attribute")
return emptyAttri
}
return attr
}
return attr
if v >= version.Bellatrix {
attr, err := payloadattribute.New(&enginev1.PayloadAttributes{
Timestamp: uint64(t.Unix()),
PrevRandao: prevRando,
SuggestedFeeRecipient: val.FeeRecipient[:],
})
if err != nil {
log.WithError(err).Error("Could not get payload attribute")
return emptyAttri
}
return attr
}
log.WithField("version", version.String(st.Version())).Error("Could not get payload attribute due to unknown state version")
return emptyAttri
}
// removeInvalidBlockAndState removes the invalid block, blob and its corresponding state from the cache and DB.

View File

@@ -717,14 +717,14 @@ func Test_GetPayloadAttribute(t *testing.T) {
ctx := tr.ctx
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
attr := service.getPayloadAttribute(ctx, st, 0, []byte{}, []byte{})
attr := service.getPayloadAttribute(ctx, st, 0, []byte{})
require.Equal(t, true, attr.IsEmpty())
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, Index: 0})
// Cache hit, advance state, no fee recipient
slot := primitives.Slot(1)
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
attr = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:], params.BeaconConfig().ZeroHash[:])
attr = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
require.Equal(t, false, attr.IsEmpty())
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient()).String())
@@ -732,7 +732,7 @@ func Test_GetPayloadAttribute(t *testing.T) {
suggestedAddr := common.HexToAddress("123")
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, FeeRecipient: primitives.ExecutionAddress(suggestedAddr), Index: 0})
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
attr = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:], params.BeaconConfig().ZeroHash[:])
attr = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
require.Equal(t, false, attr.IsEmpty())
require.Equal(t, suggestedAddr, common.BytesToAddress(attr.SuggestedFeeRecipient()))
}
@@ -747,7 +747,7 @@ func Test_GetPayloadAttribute_PrepareAllPayloads(t *testing.T) {
ctx := tr.ctx
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
attr := service.getPayloadAttribute(ctx, st, 0, []byte{}, []byte{})
attr := service.getPayloadAttribute(ctx, st, 0, []byte{})
require.Equal(t, false, attr.IsEmpty())
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient()).String())
}
@@ -757,14 +757,14 @@ func Test_GetPayloadAttributeV2(t *testing.T) {
ctx := tr.ctx
st, _ := util.DeterministicGenesisStateCapella(t, 1)
attr := service.getPayloadAttribute(ctx, st, 0, []byte{}, []byte{})
attr := service.getPayloadAttribute(ctx, st, 0, []byte{})
require.Equal(t, true, attr.IsEmpty())
// Cache hit, advance state, no fee recipient
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, Index: 0})
slot := primitives.Slot(1)
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
attr = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:], params.BeaconConfig().ZeroHash[:])
attr = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
require.Equal(t, false, attr.IsEmpty())
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient()).String())
a, err := attr.Withdrawals()
@@ -775,7 +775,7 @@ func Test_GetPayloadAttributeV2(t *testing.T) {
suggestedAddr := common.HexToAddress("123")
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, FeeRecipient: primitives.ExecutionAddress(suggestedAddr), Index: 0})
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
attr = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:], params.BeaconConfig().ZeroHash[:])
attr = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
require.Equal(t, false, attr.IsEmpty())
require.Equal(t, suggestedAddr, common.BytesToAddress(attr.SuggestedFeeRecipient()))
a, err = attr.Withdrawals()
@@ -809,14 +809,14 @@ func Test_GetPayloadAttributeV3(t *testing.T) {
service, tr := minimalTestService(t, WithPayloadIDCache(cache.NewPayloadIDCache()))
ctx := tr.ctx
attr := service.getPayloadAttribute(ctx, test.st, 0, []byte{}, []byte{})
attr := service.getPayloadAttribute(ctx, test.st, 0, []byte{})
require.Equal(t, true, attr.IsEmpty())
// Cache hit, advance state, no fee recipient
slot := primitives.Slot(1)
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, Index: 0})
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
attr = service.getPayloadAttribute(ctx, test.st, slot, params.BeaconConfig().ZeroHash[:], params.BeaconConfig().ZeroHash[:])
attr = service.getPayloadAttribute(ctx, test.st, slot, params.BeaconConfig().ZeroHash[:])
require.Equal(t, false, attr.IsEmpty())
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient()).String())
a, err := attr.Withdrawals()
@@ -827,7 +827,7 @@ func Test_GetPayloadAttributeV3(t *testing.T) {
suggestedAddr := common.HexToAddress("123")
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, FeeRecipient: primitives.ExecutionAddress(suggestedAddr), Index: 0})
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
attr = service.getPayloadAttribute(ctx, test.st, slot, params.BeaconConfig().ZeroHash[:], params.BeaconConfig().ZeroHash[:])
attr = service.getPayloadAttribute(ctx, test.st, slot, params.BeaconConfig().ZeroHash[:])
require.Equal(t, false, attr.IsEmpty())
require.Equal(t, suggestedAddr, common.BytesToAddress(attr.SuggestedFeeRecipient()))
a, err = attr.Withdrawals()

View File

@@ -18,21 +18,19 @@ import (
"github.com/sirupsen/logrus"
)
func (s *Service) isNewHead(r [32]byte, full bool) bool {
func (s *Service) isNewHead(r [32]byte) bool {
s.headLock.RLock()
defer s.headLock.RUnlock()
currentHeadRoot := s.originBlockRoot
currentFull := false
if s.head != nil {
currentHeadRoot = s.headRoot()
currentFull = s.head.full
}
return r != currentHeadRoot || full != currentFull || r == [32]byte{}
return r != currentHeadRoot || r == [32]byte{}
}
func (s *Service) getStateAndBlock(ctx context.Context, r, h [32]byte) (state.BeaconState, interfaces.ReadOnlySignedBeaconBlock, error) {
func (s *Service) getStateAndBlock(ctx context.Context, r [32]byte) (state.BeaconState, interfaces.ReadOnlySignedBeaconBlock, error) {
if !s.hasBlockInInitSyncOrDB(ctx, r) {
return nil, nil, errors.New("block does not exist")
}
@@ -40,7 +38,7 @@ func (s *Service) getStateAndBlock(ctx context.Context, r, h [32]byte) (state.Be
if err != nil {
return nil, nil, err
}
headState, err := s.cfg.StateGen.StateByRoot(ctx, h)
headState, err := s.cfg.StateGen.StateByRoot(ctx, r)
if err != nil {
return nil, nil, err
}
@@ -72,7 +70,7 @@ func (s *Service) sendFCU(cfg *postBlockProcessConfig) {
return
}
// If head has not been updated and attributes are nil, we can skip the FCU.
if !s.isNewHead(cfg.headRoot, false) && (fcuArgs.attributes == nil || fcuArgs.attributes.IsEmpty()) {
if !s.isNewHead(cfg.headRoot) && (fcuArgs.attributes == nil || fcuArgs.attributes.IsEmpty()) {
return
}
// If we are proposing and we aim to reorg the block, we have already sent FCU with attributes on lateBlockTasks
@@ -83,7 +81,7 @@ func (s *Service) sendFCU(cfg *postBlockProcessConfig) {
go s.forkchoiceUpdateWithExecution(cfg.ctx, fcuArgs)
}
if s.isNewHead(fcuArgs.headRoot, false) {
if s.isNewHead(fcuArgs.headRoot) {
if err := s.saveHead(cfg.ctx, fcuArgs.headRoot, fcuArgs.headBlock, fcuArgs.headState); err != nil {
log.WithError(err).Error("Could not save head")
}

View File

@@ -19,42 +19,23 @@ import (
func TestService_isNewHead(t *testing.T) {
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
require.Equal(t, true, service.isNewHead([32]byte{}))
// Zero root is always a new head
require.Equal(t, true, service.isNewHead([32]byte{}, false))
require.Equal(t, true, service.isNewHead([32]byte{}, true))
// Different root is a new head
service.head = &head{root: [32]byte{1}}
require.Equal(t, true, service.isNewHead([32]byte{2}, false))
// Same root and same full status is not a new head
require.Equal(t, false, service.isNewHead([32]byte{1}, false))
// Same root but different full status is a new head
require.Equal(t, true, service.isNewHead([32]byte{1}, true))
// Same root and both full is not a new head
service.head = &head{root: [32]byte{1}, full: true}
require.Equal(t, false, service.isNewHead([32]byte{1}, true))
// Same root, head is full but incoming is not full, is a new head
require.Equal(t, true, service.isNewHead([32]byte{1}, false))
require.Equal(t, true, service.isNewHead([32]byte{2}))
require.Equal(t, false, service.isNewHead([32]byte{1}))
// Nil head should use origin root
service.head = nil
service.originBlockRoot = [32]byte{3}
require.Equal(t, true, service.isNewHead([32]byte{2}, false))
require.Equal(t, false, service.isNewHead([32]byte{3}, false))
// Nil head with full=true is always a new head (originBlockRoot has full=false)
require.Equal(t, true, service.isNewHead([32]byte{3}, true))
require.Equal(t, true, service.isNewHead([32]byte{2}))
require.Equal(t, false, service.isNewHead([32]byte{3}))
}
func TestService_getHeadStateAndBlock(t *testing.T) {
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
_, _, err := service.getStateAndBlock(t.Context(), [32]byte{}, [32]byte{})
_, _, err := service.getStateAndBlock(t.Context(), [32]byte{})
require.ErrorContains(t, "block does not exist", err)
blk, err := blocks.NewSignedBeaconBlock(util.HydrateSignedBeaconBlock(&ethpb.SignedBeaconBlock{Signature: []byte{1}}))

View File

@@ -1,42 +1,13 @@
package blockchain
import (
"context"
"math"
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/config/params"
consensus_blocks "github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
payloadattribute "github.com/OffchainLabs/prysm/v7/consensus-types/payload-attribute"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/OffchainLabs/prysm/v7/time/slots"
"github.com/pkg/errors"
)
func (s *Service) waitUntilEpoch(target primitives.Epoch, secondsPerSlot uint64) error {
if slots.ToEpoch(s.CurrentSlot()) >= target {
return nil
}
ticker := slots.NewSlotTicker(s.genesisTime, secondsPerSlot)
defer ticker.Done()
for {
select {
case slot := <-ticker.C():
if slots.ToEpoch(slot) >= target {
return nil
}
case <-s.ctx.Done():
return s.ctx.Err()
}
}
}
// getLookupParentRoot returns the root that serves as key to generate the parent state for the passed beacon block.
// if it is based on empty or it is pre-Gloas, it is the parent root of the block, otherwise if it is based on full it is
// the parent hash.
@@ -72,142 +43,3 @@ func (s *Service) getLookupParentRoot(b consensus_blocks.ROBlock) ([32]byte, err
}
return parentRoot, nil
}
func (s *Service) runLatePayloadTasks() {
if err := s.waitForSync(); err != nil {
log.WithError(err).Error("Failed to wait for initial sync")
return
}
cfg := params.BeaconConfig()
if cfg.GloasForkEpoch == math.MaxUint64 {
return
}
if err := s.waitUntilEpoch(cfg.GloasForkEpoch, cfg.SecondsPerSlot); err != nil {
return
}
offset := cfg.SlotComponentDuration(cfg.PayloadAttestationDueBPS)
ticker := slots.NewSlotTickerWithOffset(s.genesisTime, offset, cfg.SecondsPerSlot)
defer ticker.Done()
for {
select {
case <-ticker.C():
s.latePayloadTasks(s.ctx)
case <-s.ctx.Done():
log.Debug("Context closed, exiting late payload tasks routine")
return
}
}
}
func (s *Service) checkIfProposing(st state.ReadOnlyBeaconState, slot primitives.Slot) (cache.TrackedValidator, bool) {
e := slots.ToEpoch(slot)
stateEpoch := slots.ToEpoch(st.Slot())
fuluAndNextEpoch := st.Version() >= version.Fulu && e == stateEpoch+1
if e == stateEpoch || fuluAndNextEpoch {
return s.trackedProposer(st, slot)
}
return cache.TrackedValidator{}, false
}
// This is a Gloas version of getPayloadAttribute that avoids all the clutter that was originally due to the proposer Index.
// It is guaranteed to be called for the current slot + 1 and the head state to have been advanced to at least the current epoch.
func (s *Service) getPayloadAttributeGloas(ctx context.Context, st state.ReadOnlyBeaconState, slot primitives.Slot, headRoot, accessRoot []byte) payloadattribute.Attributer {
emptyAttri := payloadattribute.EmptyWithVersion(st.Version())
val, proposing := s.checkIfProposing(st, slot)
if !proposing {
return emptyAttri
}
st, err := transition.ProcessSlotsIfNeeded(ctx, st, accessRoot, slot)
if err != nil {
log.WithError(err).Error("Could not process slots to get payload attribute")
return emptyAttri
}
// Get previous randao.
prevRando, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
if err != nil {
log.WithError(err).Error("Could not get randao mix to get payload attribute")
return emptyAttri
}
// Get timestamp.
t, err := slots.StartTime(s.genesisTime, slot)
if err != nil {
log.WithError(err).Error("Could not get timestamp to get payload attribute")
return emptyAttri
}
withdrawals, err := st.WithdrawalsForPayload()
if err != nil {
log.WithError(err).Error("Could not get payload withdrawals to get payload attribute")
return emptyAttri
}
attr, err := payloadattribute.New(&enginev1.PayloadAttributesV3{
Timestamp: uint64(t.Unix()),
PrevRandao: prevRando,
SuggestedFeeRecipient: val.FeeRecipient[:],
Withdrawals: withdrawals,
ParentBeaconBlockRoot: headRoot,
})
if err != nil {
log.WithError(err).Error("Could not get payload attribute")
return emptyAttri
}
return attr
}
// latePayloadTasks updates the NSC and epoch boundary caches when there is no payload in the current slot (and there is a block)
// The case where the block was also missing would have been dealt by lateBlockTasks already.
// We call FCU only if we are proposing next slot, as the execution head is assumed to not have changed.
func (s *Service) latePayloadTasks(ctx context.Context) {
currentSlot := s.CurrentSlot()
if currentSlot != s.HeadSlot() {
// We must've already sent a FCU and updated the caches in lateBlockTaks.
return
}
r, err := s.HeadRoot(ctx)
if err != nil {
log.WithError(err).Error("Failed to get head root")
return
}
hr := [32]byte(r)
if s.payloadBeingSynced.isSyncing(hr) {
return
}
if s.HasFullNode(hr) {
return
}
st, err := s.HeadStateReadOnly(ctx)
if err != nil {
log.WithError(err).Error("Failed to get head state")
return
}
if !s.inRegularSync() {
return
}
attr := s.getPayloadAttributeGloas(ctx, st, currentSlot+1, r, r)
if attr == nil || attr.IsEmpty() {
return
}
beaconLatePayloadTaskTriggeredTotal.Inc()
// Head is the empty block.
bh, err := st.LatestBlockHash()
if err != nil {
log.WithError(err).Error("Could not get latest block hash to notify engine")
return
}
pid, err := s.notifyForkchoiceUpdateGloas(ctx, bh, attr)
if err != nil {
log.WithError(err).Error("Could not notify forkchoice update")
return
}
if pid == nil {
log.Warn("Received nil payload ID from forkchoice update.")
return
}
var pId [8]byte
copy(pId[:], pid[:])
s.cfg.PayloadIDCache.Set(currentSlot+1, hr, pId)
}

View File

@@ -75,7 +75,7 @@ func prepareGloasForkchoiceState(
ExecutionPayloadAvailability: make([]byte, 1024),
LatestBlockHash: make([]byte, 32),
PayloadExpectedWithdrawals: make([]*enginev1.Withdrawal, 0),
ProposerLookahead: make([]primitives.ValidatorIndex, 64),
ProposerLookahead: make([]uint64, 64),
}
st, err := state_native.InitializeFromProtoUnsafeGloas(base)
@@ -146,7 +146,7 @@ func testGloasState(t *testing.T, slot primitives.Slot, parentRoot [32]byte, blo
ExecutionPayloadAvailability: make([]byte, 1024),
LatestBlockHash: make([]byte, 32),
PayloadExpectedWithdrawals: make([]*enginev1.Withdrawal, 0),
ProposerLookahead: make([]primitives.ValidatorIndex, 64),
ProposerLookahead: make([]uint64, 64),
}
bid := util.HydrateSignedExecutionPayloadBid(&ethpb.SignedExecutionPayloadBid{
@@ -446,37 +446,6 @@ func TestPostPayloadHeadUpdate_NotHead(t *testing.T) {
require.NoError(t, s.postPayloadHeadUpdate(ctx, envelope, st, root, headRoot[:]))
}
func TestPostPayloadHeadUpdate_SetsHeadFull(t *testing.T) {
s, _ := setupGloasService(t, &mockExecution.EngineClient{})
ctx := t.Context()
root := bytesutil.ToBytes32([]byte("root1"))
blockHash := bytesutil.ToBytes32([]byte("hash1"))
base, blk := testGloasState(t, 1, params.BeaconConfig().ZeroHash, blockHash)
st, err := state_native.InitializeFromProtoUnsafeGloas(base)
require.NoError(t, err)
signed, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
s.head = &head{root: root, block: signed, state: st, slot: 1}
require.Equal(t, false, s.head.full)
env := &ethpb.ExecutionPayloadEnvelope{
BeaconBlockRoot: root[:],
Payload: &enginev1.ExecutionPayloadDeneb{BlockHash: blockHash[:], ParentHash: make([]byte, 32)},
Slot: 1,
}
envelope, err := blocks.WrappedROExecutionPayloadEnvelope(env)
require.NoError(t, err)
require.NoError(t, s.postPayloadHeadUpdate(ctx, envelope, st, root, root[:]))
s.headLock.RLock()
require.Equal(t, true, s.head.full)
s.headLock.RUnlock()
}
func TestGetLookupParentRoot_PreGloas(t *testing.T) {
service, _ := minimalTestService(t)
@@ -647,71 +616,6 @@ func TestGetLookupParentRoot_GloasParentPreForkEpoch(t *testing.T) {
require.Equal(t, parentRoot, got)
}
func TestLatePayloadTasks_ReturnsEarlyWhenBlockLate(t *testing.T) {
logHook := logTest.NewGlobal()
service, tr := setupGloasService(t, &mockExecution.EngineClient{})
blockHash := bytesutil.ToBytes32([]byte("hash1"))
base, _ := testGloasState(t, 1, params.BeaconConfig().ZeroHash, blockHash)
base.LatestBlockHash = blockHash[:]
st, err := state_native.InitializeFromProtoUnsafeGloas(base)
require.NoError(t, err)
headRoot := bytesutil.ToBytes32([]byte("headroot"))
service.head = &head{
root: headRoot,
state: st,
slot: 1,
}
// Set genesis time so CurrentSlot > HeadSlot.
service.SetGenesisTime(time.Now().Add(-2 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second))
service.latePayloadTasks(tr.ctx)
require.LogsDoNotContain(t, logHook, "Could not notify forkchoice update")
// No payload ID should have been cached.
_, has := service.cfg.PayloadIDCache.PayloadID(service.CurrentSlot()+1, headRoot)
require.Equal(t, false, has)
}
func TestLatePayloadTasks_SendsFCU(t *testing.T) {
logHook := logTest.NewGlobal()
resetCfg := features.InitWithReset(&features.Flags{
PrepareAllPayloads: true,
})
defer resetCfg()
pid := &enginev1.PayloadIDBytes{1, 2, 3, 4, 5, 6, 7, 8}
service, tr := setupGloasService(t, &mockExecution.EngineClient{PayloadIDBytes: pid})
blockHash := bytesutil.ToBytes32([]byte("hash1"))
base, blk := testGloasState(t, 1, params.BeaconConfig().ZeroHash, blockHash)
base.LatestBlockHash = blockHash[:]
st, err := state_native.InitializeFromProtoUnsafeGloas(base)
require.NoError(t, err)
signed, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
headRoot := bytesutil.ToBytes32([]byte("headroot"))
service.head = &head{
root: headRoot,
block: signed,
state: st,
slot: 1,
}
// CurrentSlot == HeadSlot == 1: place genesis 1.5 slots ago so we're solidly in slot 1.
service.SetGenesisTime(time.Now().Add(-3 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second / 2))
service.SetForkChoiceGenesisTime(service.genesisTime)
service.latePayloadTasks(tr.ctx)
require.LogsDoNotContain(t, logHook, "Could not notify forkchoice update")
require.LogsDoNotContain(t, logHook, "Could not get")
// Payload ID should have been cached.
cachedPid, has := service.cfg.PayloadIDCache.PayloadID(service.CurrentSlot()+1, headRoot)
require.Equal(t, true, has)
require.Equal(t, primitives.PayloadID(pid[:]), cachedPid)
}
func TestLateBlockTasks_GloasFCU(t *testing.T) {
logHook := logTest.NewGlobal()
resetCfg := features.InitWithReset(&features.Flags{
@@ -741,217 +645,4 @@ func TestLateBlockTasks_GloasFCU(t *testing.T) {
service.lateBlockTasks(tr.ctx)
require.LogsDoNotContain(t, logHook, "could not perform late block tasks")
// Payload ID should have been cached by the Gloas FCU path.
cachedPid, has := service.cfg.PayloadIDCache.PayloadID(service.CurrentSlot()+1, headRoot)
require.Equal(t, true, has)
require.Equal(t, primitives.PayloadID(pid[:]), cachedPid)
}
// TestSaveHead_GloasForkBoundary_PreforkBidForcesEmptyHead verifies that saveHead does not
// treat the head as "full" when the latest execution payload bid was issued in a pre-fork epoch.
// This guards against the Fulu->Gloas upgrade-seeded bid (bid.BlockHash == latestBlockHash,
// bid.Slot == 0) causing a spurious full=true head before any real Gloas bid has been processed.
func TestSaveHead_GloasForkBoundary_PreforkBidForcesEmptyHead(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.GloasForkEpoch = 1
cfg.InitializeForkSchedule()
params.OverrideBeaconConfig(cfg)
service, _ := setupGloasService(t, &mockExecution.EngineClient{})
ctx := t.Context()
blockRoot := bytesutil.ToBytes32([]byte("root1"))
parentRoot := params.BeaconConfig().ZeroHash
blockHash := bytesutil.ToBytes32([]byte("hash1"))
// Create a Gloas state where IsParentBlockFull()==true (bid.BlockHash == LatestBlockHash)
// but bid.Slot is 0 (epoch 0, pre-fork). This mimics the upgrade-seeded state.
base, blk := testGloasState(t, 1, parentRoot, blockHash)
base.LatestBlockHash = blockHash[:]
// bid.Slot defaults to 0, which is before GloasForkEpoch=1.
// Set a valid initial head so saveHead's headBlock() call does not panic.
// We do NOT insert the old block into forkchoice because insertGloasBlock
// would claim the tree root slot; the target block (parentRoot=ZeroHash) must
// be the first node inserted so it can become the tree root.
oldBlk := util.HydrateSignedBeaconBlockGloas(&ethpb.SignedBeaconBlockGloas{})
oldSigned, err2 := blocks.NewSignedBeaconBlock(oldBlk)
require.NoError(t, err2)
oldSt, err2 := state_native.InitializeFromProtoUnsafeGloas(&ethpb.BeaconStateGloas{
Slot: 0,
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
CurrentJustifiedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)},
FinalizedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)},
LatestBlockHeader: &ethpb.BeaconBlockHeader{ParentRoot: make([]byte, 32), StateRoot: make([]byte, 32), BodyRoot: make([]byte, 32)},
Eth1Data: &ethpb.Eth1Data{DepositRoot: make([]byte, 32), BlockHash: make([]byte, 32)},
LatestExecutionPayloadBid: &ethpb.ExecutionPayloadBid{BlockHash: make([]byte, 32), ParentBlockHash: make([]byte, 32), ParentBlockRoot: make([]byte, 32), PrevRandao: make([]byte, 32), FeeRecipient: make([]byte, 20), BlobKzgCommitments: [][]byte{make([]byte, 48)}},
BuilderPendingPayments: func() []*ethpb.BuilderPendingPayment {
pp := make([]*ethpb.BuilderPendingPayment, 64)
for i := range pp {
pp[i] = &ethpb.BuilderPendingPayment{Withdrawal: &ethpb.BuilderPendingWithdrawal{FeeRecipient: make([]byte, 20)}}
}
return pp
}(),
ExecutionPayloadAvailability: make([]byte, 1024),
LatestBlockHash: make([]byte, 32),
PayloadExpectedWithdrawals: make([]*enginev1.Withdrawal, 0),
ProposerLookahead: make([]primitives.ValidatorIndex, 64),
})
require.NoError(t, err2)
oldRoot := bytesutil.ToBytes32([]byte("oldroot1"))
service.head = &head{root: oldRoot, block: oldSigned, state: oldSt, slot: 0}
insertGloasBlock(t, service, base, blk, blockRoot)
st, err := state_native.InitializeFromProtoUnsafeGloas(base)
require.NoError(t, err)
// Verify precondition: IsParentBlockFull() is true.
full, err := st.IsParentBlockFull()
require.NoError(t, err)
require.Equal(t, true, full, "precondition: IsParentBlockFull must be true")
// Verify guard precondition: bid.Slot is pre-fork.
bid, err := st.LatestExecutionPayloadBid()
require.NoError(t, err)
isPrefork := slots.ToEpoch(bid.Slot()) < params.BeaconConfig().GloasForkEpoch
require.Equal(t, true, isPrefork, "precondition: bid.Slot must be pre-fork")
ssigned, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
// saveHead should NOT mark the head as full because bid.Slot < GloasForkEpoch.
require.NoError(t, service.saveHead(ctx, blockRoot, ssigned, st))
service.headLock.RLock()
headFull := service.head.full
service.headLock.RUnlock()
require.Equal(t, false, headFull, "head must not be full for upgrade-seeded bid")
}
// TestSaveHead_GloasForkBoundary_PostforkBidSetsFullHead verifies that saveHead correctly
// marks the head as full when the latest bid is from a post-fork epoch.
func TestSaveHead_GloasForkBoundary_PostforkBidSetsFullHead(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.GloasForkEpoch = 1
cfg.InitializeForkSchedule()
params.OverrideBeaconConfig(cfg)
service, _ := setupGloasService(t, &mockExecution.EngineClient{})
ctx := t.Context()
forkSlot, err := slots.EpochStart(params.BeaconConfig().GloasForkEpoch)
require.NoError(t, err)
blockRoot := bytesutil.ToBytes32([]byte("root1"))
parentRoot := params.BeaconConfig().ZeroHash
blockHash := bytesutil.ToBytes32([]byte("hash1"))
// Set a valid initial head so saveHead's headBlock() call does not panic.
// Do NOT use insertGloasBlock for the old block — the target block must be
// the first node inserted so it can claim the tree root (parentRoot=ZeroHash).
oldBlk2 := util.HydrateSignedBeaconBlockGloas(&ethpb.SignedBeaconBlockGloas{})
oldSigned2, err2 := blocks.NewSignedBeaconBlock(oldBlk2)
require.NoError(t, err2)
oldSt2, err2 := state_native.InitializeFromProtoUnsafeGloas(&ethpb.BeaconStateGloas{
Slot: 0,
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
CurrentJustifiedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)},
FinalizedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)},
LatestBlockHeader: &ethpb.BeaconBlockHeader{ParentRoot: make([]byte, 32), StateRoot: make([]byte, 32), BodyRoot: make([]byte, 32)},
Eth1Data: &ethpb.Eth1Data{DepositRoot: make([]byte, 32), BlockHash: make([]byte, 32)},
LatestExecutionPayloadBid: &ethpb.ExecutionPayloadBid{BlockHash: make([]byte, 32), ParentBlockHash: make([]byte, 32), ParentBlockRoot: make([]byte, 32), PrevRandao: make([]byte, 32), FeeRecipient: make([]byte, 20), BlobKzgCommitments: [][]byte{make([]byte, 48)}},
BuilderPendingPayments: func() []*ethpb.BuilderPendingPayment {
pp := make([]*ethpb.BuilderPendingPayment, 64)
for i := range pp {
pp[i] = &ethpb.BuilderPendingPayment{Withdrawal: &ethpb.BuilderPendingWithdrawal{FeeRecipient: make([]byte, 20)}}
}
return pp
}(),
ExecutionPayloadAvailability: make([]byte, 1024),
LatestBlockHash: make([]byte, 32),
PayloadExpectedWithdrawals: make([]*enginev1.Withdrawal, 0),
ProposerLookahead: make([]primitives.ValidatorIndex, 64),
})
require.NoError(t, err2)
oldRoot2 := bytesutil.ToBytes32([]byte("oldroot2"))
service.head = &head{root: oldRoot2, block: oldSigned2, state: oldSt2, slot: 0}
base, blk := testGloasState(t, forkSlot+1, parentRoot, blockHash)
base.LatestBlockHash = blockHash[:]
// Set bid.Slot to a post-fork epoch slot.
base.LatestExecutionPayloadBid.Slot = forkSlot + 1
insertGloasBlock(t, service, base, blk, blockRoot)
st, err := state_native.InitializeFromProtoUnsafeGloas(base)
require.NoError(t, err)
// Verify preconditions.
full, err := st.IsParentBlockFull()
require.NoError(t, err)
require.Equal(t, true, full, "precondition: IsParentBlockFull must be true")
bid, err := st.LatestExecutionPayloadBid()
require.NoError(t, err)
isPostfork := slots.ToEpoch(bid.Slot()) >= params.BeaconConfig().GloasForkEpoch
require.Equal(t, true, isPostfork, "precondition: bid.Slot must be post-fork")
ssigned, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
// saveHead SHOULD mark the head as full because bid.Slot >= GloasForkEpoch.
require.NoError(t, service.saveHead(ctx, blockRoot, ssigned, st))
service.headLock.RLock()
headFull := service.head.full
service.headLock.RUnlock()
require.Equal(t, true, headFull, "head must be full for real post-fork bid")
}
// TestLateBlockTasks_GloasForkBoundary_PreforkBidUsesHeadRoot verifies that lateBlockTasks
// uses headRoot (not LatestBlockHash) as the accessRoot when the bid is pre-fork epoch.
// Without this guard, the upgrade-seeded bid would cause lateBlockTasks to use the wrong
// access root for the next-slot cache.
func TestLateBlockTasks_GloasForkBoundary_PreforkBidUsesHeadRoot(t *testing.T) {
logHook := logTest.NewGlobal()
resetCfg := features.InitWithReset(&features.Flags{
PrepareAllPayloads: true,
})
defer resetCfg()
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.GloasForkEpoch = 1
cfg.InitializeForkSchedule()
params.OverrideBeaconConfig(cfg)
pid := &enginev1.PayloadIDBytes{1, 2, 3, 4, 5, 6, 7, 8}
service, tr := setupGloasService(t, &mockExecution.EngineClient{PayloadIDBytes: pid})
blockHash := bytesutil.ToBytes32([]byte("hash1"))
base, _ := testGloasState(t, 1, params.BeaconConfig().ZeroHash, blockHash)
// Make IsParentBlockFull() true: bid.BlockHash == LatestBlockHash.
base.LatestBlockHash = blockHash[:]
// bid.Slot is 0 (pre-fork epoch): the epoch guard should prevent using LatestBlockHash as accessRoot.
st, err := state_native.InitializeFromProtoUnsafeGloas(base)
require.NoError(t, err)
headRoot := bytesutil.ToBytes32([]byte("headroot"))
service.head = &head{
root: headRoot,
state: st,
slot: 1,
}
// Trigger late block logic: CurrentSlot > HeadSlot.
service.SetGenesisTime(time.Now().Add(-2 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second))
service.SetForkChoiceGenesisTime(service.genesisTime)
service.lateBlockTasks(tr.ctx)
require.LogsDoNotContain(t, logHook, "could not perform late block tasks")
}

View File

@@ -50,7 +50,6 @@ type head struct {
block interfaces.ReadOnlySignedBeaconBlock // current head block.
state state.BeaconState // current head state.
slot primitives.Slot // the head block slot number
full bool // whether the head is post-CL or post-EL after Gloas
optimistic bool // optimistic status when saved head
}
@@ -61,24 +60,8 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
ctx, span := trace.StartSpan(ctx, "blockChain.saveHead")
defer span.End()
// Pre-Gloas we use empty for head because we still key states by blockroot
var full bool
var err error
if headState.Version() >= version.Gloas {
gloasFirstSlot, err := slots.EpochStart(params.BeaconConfig().GloasForkEpoch)
if err != nil {
return errors.Wrap(err, "could not compute gloas first slot")
}
if headState.Slot() > gloasFirstSlot {
full, err = headState.IsParentBlockFull()
if err != nil {
return errors.Wrap(err, "could not determine if head is full or not")
}
}
}
// Do nothing if head hasn't changed.
if !s.isNewHead(newHeadRoot, full) {
if !s.isNewHead(newHeadRoot) {
return nil
}
@@ -174,7 +157,6 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
state: headState,
optimistic: isOptimistic,
slot: headBlock.Block().Slot(),
full: full,
}
if err := s.setHead(newHead); err != nil {
return errors.Wrap(err, "could not set head")
@@ -235,7 +217,6 @@ func (s *Service) setHead(newHead *head) error {
root: newHead.root,
block: bCp,
state: newHead.state.Copy(),
full: newHead.full,
optimistic: newHead.optimistic,
slot: newHead.slot,
}
@@ -352,16 +333,13 @@ func (s *Service) notifyNewHeadEvent(
if currentDutyDependentRoot == [32]byte{} {
currentDutyDependentRoot = s.originBlockRoot
}
var previousDutyDependentRoot [32]byte
previousDutyDependentRoot := currentDutyDependentRoot
if currEpoch > 0 {
previousDutyDependentRoot, err = s.DependentRoot(currEpoch.Sub(1))
if err != nil {
return errors.Wrap(err, "could not get duty dependent root")
}
}
if previousDutyDependentRoot == [32]byte{} {
previousDutyDependentRoot = s.originBlockRoot
}
isOptimistic, err := s.IsOptimistic(ctx)
if err != nil {

View File

@@ -213,7 +213,7 @@ func Test_notifyNewHeadEvent(t *testing.T) {
Block: newHeadRoot[:],
State: newHeadStateRoot[:],
EpochTransition: true,
PreviousDutyDependentRoot: srv.originBlockRoot[:],
PreviousDutyDependentRoot: make([]byte, 32),
CurrentDutyDependentRoot: srv.originBlockRoot[:],
}
require.DeepSSZEqual(t, wanted, eventHead)
@@ -243,35 +243,11 @@ func Test_notifyNewHeadEvent(t *testing.T) {
Block: newHeadRoot[:],
State: newHeadStateRoot[:],
EpochTransition: true,
PreviousDutyDependentRoot: srv.originBlockRoot[:],
PreviousDutyDependentRoot: params.BeaconConfig().ZeroHash[:],
CurrentDutyDependentRoot: srv.originBlockRoot[:],
}
require.DeepSSZEqual(t, wanted, eventHead)
})
t.Run("previous dependent root zero hash falls back to origin", func(t *testing.T) {
srv := testServiceWithDB(t)
srv.SetGenesisTime(time.Now())
notifier := srv.cfg.StateNotifier.(*mock.MockStateNotifier)
srv.originBlockRoot = [32]byte{0xab}
st, blk, err := prepareForkchoiceState(t.Context(), 0, [32]byte{}, [32]byte{}, [32]byte{}, &ethpb.Checkpoint{}, &ethpb.Checkpoint{})
require.NoError(t, err)
require.NoError(t, srv.cfg.ForkChoiceStore.InsertNode(t.Context(), st, blk))
newHeadRoot := [32]byte{3}
st, blk, err = prepareForkchoiceState(t.Context(), 32, newHeadRoot, [32]byte{}, [32]byte{}, &ethpb.Checkpoint{}, &ethpb.Checkpoint{})
require.NoError(t, err)
require.NoError(t, srv.cfg.ForkChoiceStore.InsertNode(t.Context(), st, blk))
newHeadSlot := params.BeaconConfig().SlotsPerEpoch
require.NoError(t, srv.notifyNewHeadEvent(t.Context(), newHeadSlot, []byte{2}, newHeadRoot[:]))
events := notifier.ReceivedEvents()
require.Equal(t, 1, len(events))
eventHead, ok := events[0].Data.(*ethpbv1.EventHead)
require.Equal(t, true, ok)
// DependentRoot(0) returns zero hash since the forkchoice tree is sparse.
// The fix ensures it falls back to originBlockRoot instead of sending zeros.
assert.DeepEqual(t, srv.originBlockRoot[:], eventHead.PreviousDutyDependentRoot)
assert.DeepEqual(t, srv.originBlockRoot[:], eventHead.CurrentDutyDependentRoot)
})
}
func TestRetrieveHead_ReadOnly(t *testing.T) {

View File

@@ -77,10 +77,10 @@ func VerifyBlobKZGProofBatch(blobs [][]byte, commitments [][]byte, proofs [][]by
return fmt.Errorf("blobs len (%d) differs from expected (%d)", len(blobs[i]), len(ckzg4844.Blob{}))
}
if len(commitments[i]) != len(ckzg4844.Bytes48{}) {
return fmt.Errorf("commitments len (%d) differs from expected (%d)", len(commitments[i]), len(ckzg4844.Bytes48{}))
return fmt.Errorf("commitments len (%d) differs from expected (%d)", len(commitments[i]), len(ckzg4844.Blob{}))
}
if len(proofs[i]) != len(ckzg4844.Bytes48{}) {
return fmt.Errorf("proofs len (%d) differs from expected (%d)", len(proofs[i]), len(ckzg4844.Bytes48{}))
return fmt.Errorf("proofs len (%d) differs from expected (%d)", len(proofs[i]), len(ckzg4844.Blob{}))
}
ckzgBlobs[i] = ckzg4844.Blob(blobs[i])
ckzgCommitments[i] = ckzg4844.Bytes48(commitments[i])

View File

@@ -234,25 +234,6 @@ var (
Help: "The maximum number of blobs allowed in a block.",
},
)
beaconExecutionPayloadEnvelopeValidTotal = promauto.NewCounter(prometheus.CounterOpts{
Name: "beacon_execution_payload_envelope_valid_total",
Help: "Count the number of execution payload envelopes that were processed successfully.",
})
beaconExecutionPayloadEnvelopeInvalidTotal = promauto.NewCounter(prometheus.CounterOpts{
Name: "beacon_execution_payload_envelope_invalid_total",
Help: "Count the number of execution payload envelopes that failed processing.",
})
beaconExecutionPayloadEnvelopeProcessingDurationSeconds = promauto.NewHistogram(
prometheus.HistogramOpts{
Name: "beacon_execution_payload_envelope_processing_duration_seconds",
Help: "Captures end-to-end processing time for execution payload envelopes.",
Buckets: prometheus.DefBuckets,
},
)
beaconLatePayloadTaskTriggeredTotal = promauto.NewCounter(prometheus.CounterOpts{
Name: "beacon_late_payload_task_triggered_total",
Help: "Count the number of times late payload tasks fired.",
})
)
// reportSlotMetrics reports slot related metrics.

View File

@@ -96,15 +96,6 @@ func WithTrackedValidatorsCache(c *cache.TrackedValidatorsCache) Option {
}
}
// WithProposerPreferencesCache sets the proposer preferences cache used to
// look up fee recipient and gas limit from Gloas gossip preferences.
func WithProposerPreferencesCache(c *cache.ProposerPreferencesCache) Option {
return func(s *Service) error {
s.cfg.ProposerPreferencesCache = c
return nil
}
}
// WithAttestationCache for attestation lifecycle after chain inclusion.
func WithAttestationCache(c *cache.AttestationCache) Option {
return func(s *Service) error {

View File

@@ -108,7 +108,7 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
}
if cfg.roblock.Version() < version.Gloas {
s.sendFCU(cfg)
} else if s.isNewHead(cfg.headRoot, false) { // We reach this only when the incoming block is head.
} else if s.isNewHead(cfg.headRoot) {
if err := s.saveHead(ctx, cfg.headRoot, cfg.roblock, cfg.postState); err != nil {
log.WithError(err).Error("Could not save head")
}
@@ -135,7 +135,7 @@ func getStateVersionAndPayload(st state.BeaconState) (int, interfaces.ExecutionD
var err error
preStateVersion := st.Version()
switch preStateVersion {
case version.Phase0, version.Altair, version.Gloas:
case version.Phase0, version.Altair:
default:
preStateHeader, err = st.LatestExecutionPayloadHeader()
if err != nil {
@@ -145,112 +145,7 @@ func getStateVersionAndPayload(st state.BeaconState) (int, interfaces.ExecutionD
return preStateVersion, preStateHeader, nil
}
// applyPayloadIfNeeded applies the parent block's execution payload envelope to
// preState when the current block's bid indicates it built on a full parent.
func (s *Service) applyPayloadIfNeeded(ctx context.Context, b interfaces.ReadOnlyBeaconBlock, parentRoot [32]byte, preState state.BeaconState) error {
if b.Version() < version.Gloas || parentRoot == [32]byte{} {
return nil
}
parentBlock, err := s.cfg.BeaconDB.Block(ctx, parentRoot)
if err != nil {
return errors.Wrapf(err, "could not get parent block with root %#x", parentRoot)
}
if parentBlock.Version() < version.Gloas {
return nil
}
sb, err := b.Body().SignedExecutionPayloadBid()
if err != nil {
return errors.Wrap(err, "could not get execution payload bid for block")
}
if sb == nil || sb.Message == nil {
return fmt.Errorf("missing execution payload bid for block at slot %d", b.Slot())
}
parentBid, err := parentBlock.Block().Body().SignedExecutionPayloadBid()
if err != nil {
return errors.Wrapf(err, "could not get execution payload bid for parent block with root %#x", parentRoot)
}
if parentBid == nil || parentBid.Message == nil {
return fmt.Errorf("missing execution payload bid for parent block with root %#x", parentRoot)
}
if !bytes.Equal(sb.Message.ParentBlockHash, parentBid.Message.BlockHash) {
return nil
}
signedEnvelope, err := s.cfg.BeaconDB.ExecutionPayloadEnvelope(ctx, parentRoot)
if err != nil {
return errors.Wrapf(err, "could not get execution payload envelope for parent block with root %#x", parentRoot)
}
if signedEnvelope == nil || signedEnvelope.Message == nil {
return nil
}
envelope, err := consensusblocks.WrappedROBlindedExecutionPayloadEnvelope(signedEnvelope.Message)
if err != nil {
return errors.Wrapf(err, "could not wrap blinded execution payload envelope for parent block with root %#x", parentRoot)
}
return gloas.ProcessBlindedExecutionPayload(ctx, preState, parentBlock.Block().StateRoot(), envelope)
}
// getBatchPrestate returns the pre-state to apply to the first beacon block in the batch and returns true if it applied the first envelope before
func (s *Service) getBatchPrestate(ctx context.Context, b consensusblocks.ROBlock, envelopes []interfaces.ROSignedExecutionPayloadEnvelope) (state.BeaconState, bool, error) {
if len(envelopes) == 0 || b.Version() < version.Gloas {
blockPreState, err := s.cfg.StateGen.StateByRootInitialSync(ctx, b.Block().ParentRoot())
if err != nil {
return nil, false, errors.Wrap(err, "could not get block pre state")
}
return blockPreState, false, nil
}
parentRoot := b.Block().ParentRoot()
full, err := consensusblocks.BlockBuiltOnEnvelope(envelopes[0], b)
if err != nil {
return nil, false, errors.Wrap(err, "could not check if block builds on envelope")
}
blockPreState, err := s.cfg.StateGen.StateByRootInitialSync(ctx, parentRoot)
if err != nil {
return nil, false, errors.Wrap(err, "could not get block pre state")
}
if !full {
return blockPreState, false, nil
}
parentBlock, err := s.cfg.BeaconDB.Block(ctx, parentRoot)
if err != nil {
return nil, false, errors.Wrap(err, "could not get parent block")
}
if s.cfg.BeaconDB.HasExecutionPayloadEnvelope(ctx, parentRoot) {
// The parent envelope was already saved by a previous batch but the
// replayed state may not include it (replay skips the last block's
// envelope). Load the blinded form from DB and apply it.
blindedEnv, err := s.cfg.BeaconDB.ExecutionPayloadEnvelope(ctx, parentRoot)
if err != nil {
return nil, false, errors.Wrap(err, "could not load parent blinded envelope from DB")
}
wrappedEnv, err := consensusblocks.WrappedROBlindedExecutionPayloadEnvelope(blindedEnv.Message)
if err != nil {
return nil, false, errors.Wrap(err, "could not wrap blinded envelope")
}
if err := gloas.ProcessBlindedExecutionPayload(ctx, blockPreState, parentBlock.Block().StateRoot(), wrappedEnv); err != nil {
return nil, false, errors.Wrap(err, "could not apply parent blinded envelope from DB")
}
return blockPreState, true, nil
}
env, err := envelopes[0].Envelope()
if err != nil {
return nil, false, err
}
// notify the engine of the new envelope
if _, err := s.notifyNewEnvelope(ctx, blockPreState, env); err != nil {
return nil, false, err
}
if err := gloas.ProcessBlindedExecutionPayload(ctx, blockPreState, parentBlock.Block().StateRoot(), env); err != nil {
return nil, false, err
}
return blockPreState, true, nil
}
type versionAndHeader struct {
version int
header interfaces.ExecutionData
}
func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlock, envelopes []interfaces.ROSignedExecutionPayloadEnvelope, avs das.AvailabilityChecker) error {
func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlock, avs das.AvailabilityChecker) error {
ctx, span := trace.StartSpan(ctx, "blockChain.onBlockBatch")
defer span.End()
@@ -264,35 +159,16 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
b := blks[0].Block()
// Retrieve incoming block's pre state.
parentRoot := b.ParentRoot()
if err := s.verifyBlkPreState(ctx, parentRoot); err != nil {
if err := s.verifyBlkPreState(ctx, b.ParentRoot()); err != nil {
return err
}
preState, applied, err := s.getBatchPrestate(ctx, blks[0], envelopes)
preState, err := s.cfg.StateGen.StateByRootInitialSync(ctx, b.ParentRoot())
if err != nil {
return err
}
if preState == nil || preState.IsNil() {
return fmt.Errorf("nil pre state for slot %d", b.Slot())
}
var eidx int
var br [32]byte
sigSet := bls.NewSet()
if applied {
eidx = 1
envSigSet, err := gloas.ExecutionPayloadEnvelopeSignatureBatch(preState, envelopes[0])
if err != nil {
return err
}
sigSet.Join(envSigSet)
}
if eidx < len(envelopes) {
env, err := envelopes[eidx].Envelope()
if err != nil {
return err
}
br = env.BeaconBlockRoot()
}
// Fill in missing blocks
if err := s.fillInForkChoiceMissingBlocks(ctx, blks[0], preState.FinalizedCheckpoint(), preState.CurrentJustifiedCheckpoint()); err != nil {
@@ -301,6 +177,11 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
jCheckpoints := make([]*ethpb.Checkpoint, len(blks))
fCheckpoints := make([]*ethpb.Checkpoint, len(blks))
sigSet := bls.NewSet()
type versionAndHeader struct {
version int
header interfaces.ExecutionData
}
preVersionAndHeaders := make([]*versionAndHeader, len(blks))
postVersionAndHeaders := make([]*versionAndHeader, len(blks))
var set *bls.SignatureBatch
@@ -322,23 +203,6 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
if err != nil {
return invalidBlock{error: err}
}
if b.Root() == br && eidx < len(envelopes) {
envSigSet, err := gloas.ProcessExecutionPayloadWithDeferredSig(ctx, preState, b.Block().StateRoot(), envelopes[eidx])
if err != nil {
return err
}
sigSet.Join(envSigSet)
eidx++
if eidx < len(envelopes) {
nextEnv, err := envelopes[eidx].Envelope()
if err != nil {
return err
}
br = nextEnv.BeaconBlockRoot()
} else {
br = [32]byte{}
}
}
// Save potential boundary states.
if slots.IsEpochStart(preState.Slot()) {
boundaries[b.Root()] = preState.Copy()
@@ -370,9 +234,56 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
return errors.New("batch block signature verification failed")
}
pendingNodes, isValidPayload, err := s.notifyEngineAndSaveData(ctx, blks, envelopes, avs, preVersionAndHeaders, postVersionAndHeaders, jCheckpoints, fCheckpoints)
if err != nil {
return err
// blocks have been verified, save them and call the engine
pendingNodes := make([]*forkchoicetypes.BlockAndCheckpoints, len(blks))
var isValidPayload bool
for i, b := range blks {
root := b.Root()
isValidPayload, err = s.notifyNewPayload(ctx,
postVersionAndHeaders[i].version,
postVersionAndHeaders[i].header, b)
if err != nil {
// this call does not have the root in forkchoice yet.
return s.handleInvalidExecutionError(ctx, err, root, b.Block().ParentRoot(), [32]byte(postVersionAndHeaders[i].header.ParentHash()))
}
if isValidPayload {
if err := s.validateMergeTransitionBlock(ctx, preVersionAndHeaders[i].version,
preVersionAndHeaders[i].header, b); err != nil {
return err
}
}
if err := s.areSidecarsAvailable(ctx, avs, b); err != nil {
return errors.Wrapf(err, "could not validate sidecar availability for block %#x at slot %d", b.Root(), b.Block().Slot())
}
args := &forkchoicetypes.BlockAndCheckpoints{Block: b,
JustifiedCheckpoint: jCheckpoints[i],
FinalizedCheckpoint: fCheckpoints[i]}
pendingNodes[i] = args
if err := s.saveInitSyncBlock(ctx, root, b); err != nil {
tracing.AnnotateError(span, err)
return err
}
if err := s.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{
Slot: b.Block().Slot(),
Root: root[:],
}); err != nil {
tracing.AnnotateError(span, err)
return err
}
if i > 0 && jCheckpoints[i].Epoch > jCheckpoints[i-1].Epoch {
if err := s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, jCheckpoints[i]); err != nil {
tracing.AnnotateError(span, err)
return err
}
}
if i > 0 && fCheckpoints[i].Epoch > fCheckpoints[i-1].Epoch {
if err := s.updateFinalized(ctx, fCheckpoints[i]); err != nil {
tracing.AnnotateError(span, err)
return err
}
}
}
// Save boundary states that will be useful for forkchoice
for r, st := range boundaries {
@@ -387,15 +298,6 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
return err
}
// Insert all nodes to forkchoice
if applied {
env, err := envelopes[0].Envelope()
if err != nil {
return err
}
if err := s.cfg.ForkChoiceStore.InsertPayload(env); err != nil {
return errors.Wrap(err, "could not insert first payload in batch to forkchoice")
}
}
if err := s.cfg.ForkChoiceStore.InsertChain(ctx, pendingNodes); err != nil {
return errors.Wrap(err, "could not insert batch to forkchoice")
}
@@ -408,102 +310,6 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
return s.saveHeadNoDB(ctx, lastB, lastBR, preState, !isValidPayload)
}
func (s *Service) notifyEngineAndSaveData(
ctx context.Context,
blks []consensusblocks.ROBlock,
envelopes []interfaces.ROSignedExecutionPayloadEnvelope,
avs das.AvailabilityChecker,
preVersionAndHeaders []*versionAndHeader,
postVersionAndHeaders []*versionAndHeader,
jCheckpoints []*ethpb.Checkpoint,
fCheckpoints []*ethpb.Checkpoint,
) ([]*forkchoicetypes.BlockAndCheckpoints, bool, error) {
span := trace.FromContext(ctx)
pendingNodes := make([]*forkchoicetypes.BlockAndCheckpoints, len(blks))
var isValidPayload bool
var err error
envMap := make(map[[32]byte]int, len(envelopes))
for i, e := range envelopes {
env, err := e.Envelope()
if err != nil {
return nil, false, err
}
envMap[env.BeaconBlockRoot()] = i
}
for i, b := range blks {
root := b.Root()
args := &forkchoicetypes.BlockAndCheckpoints{Block: b,
JustifiedCheckpoint: jCheckpoints[i],
FinalizedCheckpoint: fCheckpoints[i]}
if b.Version() < version.Gloas {
isValidPayload, err = s.notifyNewPayload(ctx,
postVersionAndHeaders[i].version,
postVersionAndHeaders[i].header, b)
if err != nil {
return nil, false, s.handleInvalidExecutionError(ctx, err, root, b.Block().ParentRoot(), [32]byte(postVersionAndHeaders[i].header.ParentHash()))
}
if isValidPayload {
if err := s.validateMergeTransitionBlock(ctx, preVersionAndHeaders[i].version,
preVersionAndHeaders[i].header, b); err != nil {
return nil, false, err
}
}
} else {
idx, ok := envMap[root]
if ok {
env, err := envelopes[idx].Envelope()
if err != nil {
return nil, false, err
}
isValidPayload, err = s.notifyNewEnvelopeFromBlock(ctx, b, env)
if err != nil {
return nil, false, errors.Wrap(err, "could not notify new envelope from block")
}
args.HasPayload = true
bh := env.BlockHash()
if err := s.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{
Slot: b.Block().Slot(),
Root: bh[:],
}); err != nil {
tracing.AnnotateError(span, err)
return nil, false, err
}
}
}
if err := s.areSidecarsAvailable(ctx, avs, b); err != nil {
return nil, false, errors.Wrapf(err, "could not validate sidecar availability for block %#x at slot %d", b.Root(), b.Block().Slot())
}
pendingNodes[i] = args
if err := s.saveInitSyncBlock(ctx, root, b); err != nil {
tracing.AnnotateError(span, err)
return nil, false, err
}
if err := s.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{
Slot: b.Block().Slot(),
Root: root[:],
}); err != nil {
tracing.AnnotateError(span, err)
return nil, false, err
}
if i > 0 && jCheckpoints[i].Epoch > jCheckpoints[i-1].Epoch {
if err := s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, jCheckpoints[i]); err != nil {
tracing.AnnotateError(span, err)
return nil, false, err
}
}
if i > 0 && fCheckpoints[i].Epoch > fCheckpoints[i-1].Epoch {
if err := s.updateFinalized(ctx, fCheckpoints[i]); err != nil {
tracing.AnnotateError(span, err)
return nil, false, err
}
}
}
return pendingNodes, isValidPayload, nil
}
func (s *Service) areSidecarsAvailable(ctx context.Context, avs das.AvailabilityChecker, roBlock consensusblocks.ROBlock) error {
blockVersion := roBlock.Version()
block := roBlock.Block()
@@ -625,7 +431,7 @@ func (s *Service) updateCachesAndEpochBoundary(ctx context.Context, currentSlot
// Epoch boundary tasks: it copies the headState and updates the epoch boundary
// caches. The caller of this function must not hold a lock in forkchoice store.
func (s *Service) handleEpochBoundary(ctx context.Context, slot primitives.Slot, headState state.ReadOnlyBeaconState, blockRoot []byte) error {
func (s *Service) handleEpochBoundary(ctx context.Context, slot primitives.Slot, headState state.BeaconState, blockRoot []byte) error {
ctx, span := trace.StartSpan(ctx, "blockChain.handleEpochBoundary")
defer span.End()
// return early if we are advancing to a past epoch
@@ -686,7 +492,7 @@ func (s *Service) handleBlockPayloadAttestations(ctx context.Context, blk interf
if len(atts) == 0 {
return nil
}
committee, err := st.PayloadCommitteeReadOnly(blk.Slot() - 1)
committee, err := gloas.PayloadCommittee(ctx, st, blk.Slot()-1)
if err != nil {
return err
}
@@ -1216,11 +1022,9 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
headRoot := s.headRoot()
headState := s.headState(ctx)
s.headLock.RUnlock()
var accessRoot [32]byte
isFull, err := headState.IsParentBlockFull()
gloasFirstSlot, _ := slots.EpochStart(params.BeaconConfig().GloasForkEpoch)
if err != nil || !isFull || headState.Slot() <= gloasFirstSlot {
if err != nil || !isFull {
accessRoot = headRoot
} else {
accessRoot, err = headState.LatestBlockHash()
@@ -1237,7 +1041,7 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
return
}
attribute := s.getPayloadAttribute(ctx, headState, s.CurrentSlot()+1, headRoot[:], accessRoot[:])
attribute := s.getPayloadAttribute(ctx, headState, s.CurrentSlot()+1, headRoot[:])
// return early if we are not proposing next slot
if attribute.IsEmpty() {
return
@@ -1249,35 +1053,32 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
log.WithError(err).Debug("could not perform late block tasks: failed to retrieve latest block hash")
return
}
id, err := s.notifyForkchoiceUpdateGloas(ctx, bh, attribute)
_, err = s.notifyForkchoiceUpdateGloas(ctx, bh, attribute)
if err != nil {
log.WithError(err).Debug("could not perform late block tasks: failed to update forkchoice with engine")
}
if id != nil {
s.cfg.PayloadIDCache.Set(s.CurrentSlot()+1, headRoot, [8]byte(*id))
} else {
s.headLock.RLock()
headBlock, err := s.headBlock()
if err != nil {
s.headLock.RUnlock()
log.WithError(err).Debug("could not perform late block tasks: failed to retrieve head block")
return
}
return
}
s.headLock.RLock()
headBlock, err := s.headBlock()
if err != nil {
s.headLock.RUnlock()
log.WithError(err).Debug("could not perform late block tasks: failed to retrieve head block")
return
}
s.headLock.RUnlock()
fcuArgs := &fcuConfig{
headState: headState,
headRoot: headRoot,
headBlock: headBlock,
attributes: attribute,
}
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
_, err = s.notifyForkchoiceUpdate(ctx, fcuArgs)
if err != nil {
log.WithError(err).Debug("could not perform late block tasks: failed to update forkchoice with engine")
fcuArgs := &fcuConfig{
headState: headState,
headRoot: headRoot,
headBlock: headBlock,
attributes: attribute,
}
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
_, err = s.notifyForkchoiceUpdate(ctx, fcuArgs)
if err != nil {
log.WithError(err).Debug("could not perform late block tasks: failed to update forkchoice with engine")
}
}
}

View File

@@ -1,7 +1,6 @@
package blockchain
import (
"bytes"
"context"
"fmt"
"slices"
@@ -23,7 +22,6 @@ import (
mathutil "github.com/OffchainLabs/prysm/v7/math"
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/OffchainLabs/prysm/v7/time/slots"
"github.com/ethereum/go-ethereum/common"
"github.com/pkg/errors"
@@ -46,7 +44,7 @@ func (s *Service) getFCUArgs(cfg *postBlockProcessConfig) (*fcuConfig, error) {
if err != nil {
return nil, err
}
fcuArgs.attributes = s.getPayloadAttribute(cfg.ctx, fcuArgs.headState, fcuArgs.proposingSlot, cfg.headRoot[:], cfg.headRoot[:])
fcuArgs.attributes = s.getPayloadAttribute(cfg.ctx, fcuArgs.headState, fcuArgs.proposingSlot, cfg.headRoot[:])
return fcuArgs, nil
}
@@ -91,7 +89,7 @@ func (s *Service) logNonCanonicalBlockReceived(blockRoot [32]byte, headRoot [32]
// fcuArgsNonCanonicalBlock returns the arguments to the FCU call when the
// incoming block is non-canonical, that is, based on the head root.
func (s *Service) fcuArgsNonCanonicalBlock(cfg *postBlockProcessConfig) (*fcuConfig, error) {
headState, headBlock, err := s.getStateAndBlock(cfg.ctx, cfg.headRoot, cfg.headRoot)
headState, headBlock, err := s.getStateAndBlock(cfg.ctx, cfg.headRoot)
if err != nil {
return nil, err
}
@@ -389,7 +387,6 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, signed inte
return err
}
root := signed.Block().ParentRoot()
child := signed
// As long as parent node is not in fork choice store, and parent node is in DB.
for !s.cfg.ForkChoiceStore.HasNode(root) && s.cfg.BeaconDB.HasBlock(ctx, root) {
b, err := s.getBlock(ctx, root)
@@ -403,33 +400,10 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, signed inte
if err != nil {
return err
}
hasPayload := false
if roblock.Version() >= version.Gloas {
sbid, err := child.Block().Body().SignedExecutionPayloadBid()
if err != nil {
return errors.Wrapf(err, "could not get execution payload bid for block at slot %d", child.Block().Slot())
}
if sbid == nil || sbid.Message == nil {
return fmt.Errorf("missing execution payload bid for block at slot %d", child.Block().Slot())
}
parentBid, err := b.Block().Body().SignedExecutionPayloadBid()
if err != nil {
return errors.Wrapf(err, "could not get execution payload bid for block at slot %d", b.Block().Slot())
}
if parentBid == nil || parentBid.Message == nil {
return fmt.Errorf("missing execution payload bid for block at slot %d", b.Block().Slot())
}
if bytes.Equal(sbid.Message.ParentBlockHash, parentBid.Message.BlockHash) {
hasPayload = true
}
}
root = b.Block().ParentRoot()
child = b
args := &forkchoicetypes.BlockAndCheckpoints{Block: roblock,
JustifiedCheckpoint: jCheckpoint,
FinalizedCheckpoint: fCheckpoint,
HasPayload: hasPayload,
}
FinalizedCheckpoint: fCheckpoint}
pendingNodes = append(pendingNodes, args)
}
if len(pendingNodes) == 0 {

View File

@@ -14,6 +14,7 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
statefeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/state"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
@@ -163,7 +164,7 @@ func TestStore_OnBlockBatch(t *testing.T) {
require.NoError(t, err)
blks = append(blks, rwsb)
}
err := service.onBlockBatch(ctx, blks, nil, &das.MockAvailabilityStore{})
err := service.onBlockBatch(ctx, blks, &das.MockAvailabilityStore{})
require.NoError(t, err)
jcp := service.CurrentJustifiedCheckpt()
jroot := bytesutil.ToBytes32(jcp.Root)
@@ -193,7 +194,7 @@ func TestStore_OnBlockBatch_NotifyNewPayload(t *testing.T) {
require.NoError(t, service.saveInitSyncBlock(ctx, rwsb.Root(), wsb))
blks = append(blks, rwsb)
}
require.NoError(t, service.onBlockBatch(ctx, blks, nil, &das.MockAvailabilityStore{}))
require.NoError(t, service.onBlockBatch(ctx, blks, &das.MockAvailabilityStore{}))
}
func TestCachedPreState_CanGetFromStateSummary(t *testing.T) {
@@ -2073,7 +2074,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
rwsb, err := consensusblocks.NewROBlock(wsb)
require.NoError(t, err)
// We use onBlockBatch here because the valid chain is missing in forkchoice
require.NoError(t, service.onBlockBatch(ctx, []consensusblocks.ROBlock{rwsb}, nil, &das.MockAvailabilityStore{}))
require.NoError(t, service.onBlockBatch(ctx, []consensusblocks.ROBlock{rwsb}, &das.MockAvailabilityStore{}))
// Check that the head is now VALID and the node is not optimistic
require.Equal(t, genesisRoot, service.ensureRootNotZeros(service.cfg.ForkChoiceStore.CachedHeadRoot()))
headRoot, err = service.HeadRoot(ctx)
@@ -3555,7 +3556,7 @@ func TestHandleBlockPayloadAttestations(t *testing.T) {
base, insertBlk := testGloasState(t, 1, parentRoot, blockHash)
insertGloasBlock(t, s, base, insertBlk, blockRoot)
ptc, err := headState.PayloadCommitteeReadOnly(1)
ptc, err := gloas.PayloadCommittee(ctx, headState, 1)
require.NoError(t, err)
require.NotEqual(t, 0, len(ptc))

View File

@@ -134,64 +134,38 @@ func (s *Service) UpdateHead(ctx context.Context, proposingSlot primitives.Slot)
start = time.Now()
// return early if we haven't changed head
newHeadRoot, newHeadBlockHash, full, err := s.cfg.ForkChoiceStore.FullHead(ctx)
newHeadRoot, err := s.cfg.ForkChoiceStore.Head(ctx)
if err != nil {
log.WithError(err).Error("Could not compute head from new attestations")
return
}
if !s.isNewHead(newHeadRoot, full) {
if !s.isNewHead(newHeadRoot) {
return
}
log.WithField("newHeadRoot", fmt.Sprintf("%#x", newHeadRoot)).Debug("Head changed due to attestations")
var accessRoot [32]byte
postGloas := slots.ToEpoch(proposingSlot) >= params.BeaconConfig().GloasForkEpoch
if full && postGloas {
accessRoot = newHeadBlockHash
} else {
accessRoot = newHeadRoot
}
headState, headBlock, err := s.getStateAndBlock(ctx, newHeadRoot, accessRoot)
headState, headBlock, err := s.getStateAndBlock(ctx, newHeadRoot)
if err != nil {
log.WithError(err).Error("Could not get head block and state")
log.WithError(err).Error("Could not get head block")
return
}
newAttHeadElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
fcuArgs := &fcuConfig{
headState: headState,
headRoot: newHeadRoot,
headBlock: headBlock,
proposingSlot: proposingSlot,
}
if s.inRegularSync() {
attr := s.getPayloadAttribute(ctx, headState, proposingSlot, newHeadRoot[:], accessRoot[:])
if attr != nil && s.shouldOverrideFCU(newHeadRoot, proposingSlot) {
fcuArgs.attributes = s.getPayloadAttribute(ctx, headState, proposingSlot, newHeadRoot[:])
if fcuArgs.attributes != nil && s.shouldOverrideFCU(newHeadRoot, proposingSlot) {
return
}
if postGloas {
go func() {
pid, err := s.notifyForkchoiceUpdateGloas(s.ctx, newHeadBlockHash, attr)
if err != nil {
log.WithError(err).Error("Could not update forkchoice with engine")
}
if pid == nil {
if attr != nil {
log.Warn("Engine did not return a payload ID for the fork choice update with attributes")
}
return
}
var pId [8]byte
copy(pId[:], pid[:])
s.cfg.PayloadIDCache.Set(proposingSlot, newHeadRoot, pId)
}()
} else {
fcuArgs := &fcuConfig{
headState: headState,
headRoot: newHeadRoot,
headBlock: headBlock,
proposingSlot: proposingSlot,
attributes: attr,
}
go s.forkchoiceUpdateWithExecution(s.ctx, fcuArgs)
}
go s.forkchoiceUpdateWithExecution(s.ctx, fcuArgs)
}
if err := s.saveHead(s.ctx, newHeadRoot, headBlock, headState); err != nil {
if err := s.saveHead(s.ctx, fcuArgs.headRoot, fcuArgs.headBlock, fcuArgs.headState); err != nil {
log.WithError(err).Error("Could not save head")
}
s.pruneAttsFromPool(s.ctx, headState, headBlock)
s.pruneAttsFromPool(s.ctx, fcuArgs.headState, fcuArgs.headBlock)
}
// This processes fork choice attestations from the pool to account for validator votes and fork choice.

View File

@@ -41,7 +41,7 @@ var epochsSinceFinalityExpandCache = primitives.Epoch(4)
// BlockReceiver interface defines the methods of chain service for receiving and processing new blocks.
type BlockReceiver interface {
ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, avs das.AvailabilityChecker) error
ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, envelopes []interfaces.ROSignedExecutionPayloadEnvelope, avs das.AvailabilityChecker) error
ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, avs das.AvailabilityChecker) error
HasBlock(ctx context.Context, root [32]byte) bool
RecentBlockSlot(root [32]byte) (primitives.Slot, error)
BlockBeingSynced([32]byte) bool
@@ -153,7 +153,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
// Have we been finalizing? Should we start saving hot states to db?
if err := s.checkSaveHotStateDB(ctx); err != nil {
log.WithError(err).Error("Could not check save hot state DB")
return errors.Wrap(err, "check save hot state db")
}
// We apply the same heuristic to some of our more important caches.
@@ -366,14 +366,12 @@ func (s *Service) executePostFinalizationTasks(ctx context.Context, finalizedSta
}
}()
}
go s.checkpointStateCache.EvictUpTo(finalized.Epoch)
}
// ReceiveBlockBatch processes the whole block batch at once, assuming the block batch is linear ,transitioning
// the state, performing batch verification of all collected signatures and then performing the appropriate
// actions for a block post-transition.
func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, envelopes []interfaces.ROSignedExecutionPayloadEnvelope, avs das.AvailabilityChecker) error {
func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, avs das.AvailabilityChecker) error {
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveBlockBatch")
defer span.End()
@@ -381,7 +379,7 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock
defer s.cfg.ForkChoiceStore.Unlock()
// Apply state transition on the incoming newly received block batches, one by one.
if err := s.onBlockBatch(ctx, blocks, envelopes, avs); err != nil {
if err := s.onBlockBatch(ctx, blocks, avs); err != nil {
err := errors.Wrap(err, "could not process block in batch")
tracing.AnnotateError(span, err)
return err
@@ -421,15 +419,6 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock
if err := s.cfg.BeaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
return err
}
for _, e := range envelopes {
protoEnv, ok := e.Proto().(*ethpb.SignedExecutionPayloadEnvelope)
if !ok {
return errors.New("could not type assert signed envelope to proto")
}
if err := s.cfg.BeaconDB.SaveExecutionPayloadEnvelope(ctx, protoEnv); err != nil {
return errors.Wrap(err, "could not save execution payload envelope")
}
}
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
if finalized == nil {
return errNilFinalizedInStore

View File

@@ -281,7 +281,7 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
require.NoError(t, err)
rwsb, err := blocks.NewROBlock(wsb)
require.NoError(t, err)
err = s.ReceiveBlockBatch(ctx, []blocks.ROBlock{rwsb}, nil, &das.MockAvailabilityStore{})
err = s.ReceiveBlockBatch(ctx, []blocks.ROBlock{rwsb}, &das.MockAvailabilityStore{})
if tt.wantedErr != "" {
assert.ErrorContains(t, tt.wantedErr, err)
} else {

View File

@@ -4,7 +4,6 @@ import (
"bytes"
"context"
"fmt"
"time"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed"
statefeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/state"
@@ -12,7 +11,6 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
"github.com/OffchainLabs/prysm/v7/beacon-chain/execution"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
payloadattribute "github.com/OffchainLabs/prysm/v7/consensus-types/payload-attribute"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
@@ -33,18 +31,9 @@ type ExecutionPayloadEnvelopeReceiver interface {
}
// ReceiveExecutionPayloadEnvelope processes a signed execution payload envelope for the Gloas fork.
func (s *Service) ReceiveExecutionPayloadEnvelope(ctx context.Context, signed interfaces.ROSignedExecutionPayloadEnvelope) (err error) {
func (s *Service) ReceiveExecutionPayloadEnvelope(ctx context.Context, signed interfaces.ROSignedExecutionPayloadEnvelope) error {
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveExecutionPayloadEnvelope")
defer span.End()
start := time.Now()
defer func() {
beaconExecutionPayloadEnvelopeProcessingDurationSeconds.Observe(time.Since(start).Seconds())
if err != nil {
beaconExecutionPayloadEnvelopeInvalidTotal.Inc()
return
}
beaconExecutionPayloadEnvelopeValidTotal.Inc()
}()
envelope, err := signed.Envelope()
if err != nil {
@@ -150,7 +139,6 @@ func (s *Service) postPayloadHeadUpdate(ctx context.Context, envelope interfaces
s.headLock.Lock()
s.head.state = st
s.head.full = true
s.headLock.Unlock()
go func() {
@@ -164,7 +152,7 @@ func (s *Service) postPayloadHeadUpdate(ctx context.Context, envelope interfaces
}
}()
attr := s.getPayloadAttribute(ctx, st, envelope.Slot()+1, headRoot, blockHash[:])
attr := s.getPayloadAttribute(ctx, st, envelope.Slot()+1, headRoot)
if s.inRegularSync() {
go func() {
pid, err := s.notifyForkchoiceUpdateGloas(s.ctx, blockHash, attr)
@@ -203,50 +191,6 @@ func (s *Service) getPayloadEnvelopePrestate(ctx context.Context, envelope inter
return preState, nil
}
func (s *Service) callNewPayload(
ctx context.Context,
payload interfaces.ExecutionData,
versionedHashes []common.Hash,
parentRoot common.Hash,
requests *enginev1.ExecutionRequests,
slot primitives.Slot,
) (bool, error) {
_, err := s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, versionedHashes, &parentRoot, requests, slot)
if err == nil {
return true, nil
}
if errors.Is(err, execution.ErrAcceptedSyncingPayloadStatus) {
log.WithFields(logrus.Fields{
"slot": slot,
"payloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash())),
}).Info("Called new payload with optimistic envelope")
return false, nil
}
if errors.Is(err, execution.ErrInvalidPayloadStatus) {
return false, invalidBlock{error: ErrInvalidPayload}
}
return false, errors.WithMessage(ErrUndefinedExecutionEngineError, err.Error())
}
func (s *Service) notifyNewEnvelopeFromBlock(ctx context.Context, b blocks.ROBlock, envelope interfaces.ROExecutionPayloadEnvelope) (bool, error) {
ctx, span := trace.StartSpan(ctx, "blockChain.notifyNewEnvelopeFromBlock")
defer span.End()
payload, err := envelope.Execution()
if err != nil {
return false, errors.Wrap(err, "could not get execution payload from envelope")
}
sbid, err := b.Block().Body().SignedExecutionPayloadBid()
if err != nil {
return false, errors.Wrap(err, "could not get signed execution payload bid from block")
}
versionedHashes := make([]common.Hash, len(sbid.Message.BlobKzgCommitments))
for i, c := range sbid.Message.BlobKzgCommitments {
versionedHashes[i] = primitives.ConvertKzgCommitmentToVersionedHash(c)
}
return s.callNewPayload(ctx, payload, versionedHashes, common.Hash(b.Block().ParentRoot()), envelope.ExecutionRequests(), envelope.Slot())
}
// The returned boolean indicates whether the payload was valid or if it was accepted as syncing (optimistic).
func (s *Service) notifyNewEnvelope(ctx context.Context, st state.BeaconState, envelope interfaces.ROExecutionPayloadEnvelope) (bool, error) {
ctx, span := trace.StartSpan(ctx, "blockChain.notifyNewEnvelope")
@@ -256,6 +200,7 @@ func (s *Service) notifyNewEnvelope(ctx context.Context, st state.BeaconState, e
if err != nil {
return false, errors.Wrap(err, "could not get execution payload from envelope")
}
latestBid, err := st.LatestExecutionPayloadBid()
if err != nil {
return false, errors.Wrap(err, "could not get latest execution payload bid")
@@ -265,7 +210,25 @@ func (s *Service) notifyNewEnvelope(ctx context.Context, st state.BeaconState, e
for i, c := range commitments {
versionedHashes[i] = primitives.ConvertKzgCommitmentToVersionedHash(c)
}
return s.callNewPayload(ctx, payload, versionedHashes, common.Hash(bytesutil.ToBytes32(st.LatestBlockHeader().ParentRoot)), envelope.ExecutionRequests(), envelope.Slot())
parentRoot := common.Hash(bytesutil.ToBytes32(st.LatestBlockHeader().ParentRoot))
requests := envelope.ExecutionRequests()
_, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, versionedHashes, &parentRoot, requests)
if err == nil {
return true, nil
}
if errors.Is(err, execution.ErrAcceptedSyncingPayloadStatus) {
log.WithFields(logrus.Fields{
"slot": envelope.Slot(),
"payloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash())),
}).Info("Called new payload with optimistic envelope")
return false, nil
}
if errors.Is(err, execution.ErrInvalidPayloadStatus) {
return false, invalidBlock{error: ErrInvalidPayload}
}
return false, errors.WithMessage(ErrUndefinedExecutionEngineError, err.Error())
}
func (s *Service) validateExecutionOnEnvelope(ctx context.Context, st state.BeaconState, envelope interfaces.ROExecutionPayloadEnvelope) (bool, error) {

View File

@@ -2,6 +2,7 @@ package blockchain
import (
"context"
"slices"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
@@ -28,12 +29,16 @@ func (s *Service) ReceivePayloadAttestationMessage(ctx context.Context, a *ethpb
if err != nil {
return err
}
idx, err := gloas.PayloadCommitteeIndex(ctx, st, a.Data.Slot, a.ValidatorIndex)
ptc, err := gloas.PayloadCommittee(ctx, st, a.Data.Slot)
if err != nil {
return err
}
idx := slices.Index(ptc, a.ValidatorIndex)
if idx == -1 {
return errors.New("validator not in PTC")
}
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
s.cfg.ForkChoiceStore.SetPTCVote(root, idx, a.Data.PayloadPresent, a.Data.BlobDataAvailable)
s.cfg.ForkChoiceStore.SetPTCVote(root, uint64(idx), a.Data.PayloadPresent, a.Data.BlobDataAvailable)
return nil
}

View File

@@ -3,6 +3,7 @@ package blockchain
import (
"testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas"
mockExecution "github.com/OffchainLabs/prysm/v7/beacon-chain/execution/testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/config/params"
@@ -50,7 +51,7 @@ func TestReceivePayloadAttestationMessage_ValidatorNotInPTC(t *testing.T) {
require.NoError(t, err)
s.head = &head{root: blockRoot, block: wsb, state: headState, slot: 1}
ptc, err := headState.PayloadCommitteeReadOnly(1)
ptc, err := gloas.PayloadCommittee(ctx, headState, 1)
require.NoError(t, err)
// Pick a validator index not in the PTC.
@@ -99,7 +100,7 @@ func TestReceivePayloadAttestationMessage_OK(t *testing.T) {
require.NoError(t, err)
s.head = &head{root: blockRoot, block: wsb, state: headState, slot: 1}
ptc, err := headState.PayloadCommitteeReadOnly(1)
ptc, err := gloas.PayloadCommittee(ctx, headState, 1)
require.NoError(t, err)
require.NotEqual(t, 0, len(ptc))

View File

@@ -73,30 +73,29 @@ type Service struct {
// config options for the service.
type config struct {
BeaconBlockBuf int
ChainStartFetcher execution.ChainStartFetcher
BeaconDB db.HeadAccessDatabase
DepositCache cache.DepositCache
PayloadIDCache *cache.PayloadIDCache
TrackedValidatorsCache *cache.TrackedValidatorsCache
ProposerPreferencesCache *cache.ProposerPreferencesCache
AttestationCache *cache.AttestationCache
AttPool attestations.Pool
ExitPool voluntaryexits.PoolManager
SlashingPool slashings.PoolManager
BLSToExecPool blstoexec.PoolManager
P2P p2p.Accessor
MaxRoutines int
StateNotifier statefeed.Notifier
ForkChoiceStore f.ForkChoicer
AttService *attestations.Service
StateGen *stategen.State
SlasherAttestationsFeed *event.Feed
WeakSubjectivityCheckpt *ethpb.Checkpoint
BlockFetcher execution.POWBlockFetcher
FinalizedStateAtStartUp state.BeaconState
ExecutionEngineCaller execution.EngineCaller
SyncChecker Checker
BeaconBlockBuf int
ChainStartFetcher execution.ChainStartFetcher
BeaconDB db.HeadAccessDatabase
DepositCache cache.DepositCache
PayloadIDCache *cache.PayloadIDCache
TrackedValidatorsCache *cache.TrackedValidatorsCache
AttestationCache *cache.AttestationCache
AttPool attestations.Pool
ExitPool voluntaryexits.PoolManager
SlashingPool slashings.PoolManager
BLSToExecPool blstoexec.PoolManager
P2P p2p.Accessor
MaxRoutines int
StateNotifier statefeed.Notifier
ForkChoiceStore f.ForkChoicer
AttService *attestations.Service
StateGen *stategen.State
SlasherAttestationsFeed *event.Feed
WeakSubjectivityCheckpt *ethpb.Checkpoint
BlockFetcher execution.POWBlockFetcher
FinalizedStateAtStartUp state.BeaconState
ExecutionEngineCaller execution.EngineCaller
SyncChecker Checker
}
// Checker is an interface used to determine if a node is in initial sync
@@ -214,7 +213,6 @@ func (s *Service) Start() {
}
s.spawnProcessAttestationsRoutine()
go s.runLateBlockTasks()
go s.runLatePayloadTasks()
}
// Stop the blockchain service's main event loop and associated goroutines.
@@ -345,7 +343,7 @@ func (s *Service) initializeHead(ctx context.Context, st state.BeaconState) erro
return errors.Wrap(err, "could not get head state")
}
}
if err := s.setHead(&head{root, blk, st, blk.Block().Slot(), false, false}); err != nil {
if err := s.setHead(&head{root, blk, st, blk.Block().Slot(), false}); err != nil {
return errors.Wrap(err, "could not set head")
}
log.WithFields(logrus.Fields{
@@ -434,7 +432,6 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon
genesisState,
genesisBlk.Block().Slot(),
false,
false,
}); err != nil {
log.WithError(err).Fatal("Could not set head")
}

View File

@@ -12,7 +12,6 @@ import (
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/OffchainLabs/prysm/v7/time/slots"
"github.com/pkg/errors"
)
@@ -78,12 +77,8 @@ func (s *Service) setupForkchoiceTree(st state.BeaconState) error {
log.WithError(err).Error("Could not build forkchoice chain, starting with finalized block as head")
return nil
}
resolveChainPayloadStatus(chain)
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
if err := s.markFinalizedRootFull(chain, fRoot); err != nil {
log.WithError(err).Error("Could not mark finalized root as full in forkchoice")
}
return s.cfg.ForkChoiceStore.InsertChain(s.ctx, chain)
}
@@ -150,68 +145,6 @@ func (s *Service) setupForkchoiceRoot(st state.BeaconState) error {
return nil
}
// resolveChainPayloadStatus determines which blocks in the chain had their
// execution payloads delivered by checking if consecutive blocks' bids indicate
// payload delivery. For each pair of blocks (chain[i], chain[i+1]), if the next
// block's bid parentBlockHash equals the current block's bid blockHash, the
// current block's payload was delivered.
func resolveChainPayloadStatus(chain []*forkchoicetypes.BlockAndCheckpoints) {
for i := 0; i < len(chain)-1; i++ {
curr := chain[i].Block.Block()
next := chain[i+1].Block.Block()
if curr.Version() < version.Gloas || next.Version() < version.Gloas {
continue
}
currBid, err := curr.Body().SignedExecutionPayloadBid()
if err != nil || currBid == nil || currBid.Message == nil {
continue
}
nextBid, err := next.Body().SignedExecutionPayloadBid()
if err != nil || nextBid == nil || nextBid.Message == nil {
continue
}
if bytes.Equal(nextBid.Message.ParentBlockHash, currBid.Message.BlockHash) {
chain[i].HasPayload = true
}
}
}
// markFinalizedRootFull checks whether the finalized root block's execution
// payload was delivered by inspecting the first block in the chain. If the first
// block's bid parentBlockHash equals the finalized block's bid blockHash, the
// finalized block's payload was delivered and a full node must be created in
// forkchoice. The caller must hold the forkchoice lock.
func (s *Service) markFinalizedRootFull(chain []*forkchoicetypes.BlockAndCheckpoints, fRoot [32]byte) error {
if len(chain) == 0 {
return nil
}
firstBlock := chain[0].Block.Block()
if firstBlock.Version() < version.Gloas {
return nil
}
firstBid, err := firstBlock.Body().SignedExecutionPayloadBid()
if err != nil || firstBid == nil || firstBid.Message == nil {
return nil
}
fBlock, err := s.cfg.BeaconDB.Block(s.ctx, fRoot)
if err != nil {
return errors.Wrap(err, "could not get finalized block")
}
if fBlock.Block().Version() < version.Gloas {
return nil
}
fBid, err := fBlock.Block().Body().SignedExecutionPayloadBid()
if err != nil || fBid == nil || fBid.Message == nil {
return nil
}
if !bytes.Equal(firstBid.Message.ParentBlockHash, fBid.Message.BlockHash) {
return nil
}
// The finalized block's payload was delivered. Create the full node.
s.cfg.ForkChoiceStore.MarkFullNode(fRoot)
return nil
}
func (s *Service) setupForkchoiceCheckpoints() error {
justified, err := s.cfg.BeaconDB.JustifiedCheckpoint(s.ctx)
if err != nil {
@@ -233,11 +166,11 @@ func (s *Service) setupForkchoiceCheckpoints() error {
defer s.cfg.ForkChoiceStore.Unlock()
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(s.ctx, &forkchoicetypes.Checkpoint{Epoch: justified.Epoch,
Root: bytesutil.ToBytes32(justified.Root)}); err != nil {
log.WithError(err).Error("Could not update forkchoice's justified checkpoint, trying to update finalized checkpoint anyway")
return errors.Wrap(err, "could not update forkchoice's justified checkpoint")
}
if err := s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: finalized.Epoch,
Root: fRoot}); err != nil {
log.WithError(err).Error("Could not update forkchoice's finalized checkpoint")
return errors.Wrap(err, "could not update forkchoice's finalized checkpoint")
}
s.cfg.ForkChoiceStore.SetGenesisTime(s.genesisTime)
return nil

View File

@@ -94,11 +94,6 @@ func (mb *mockBroadcaster) BroadcastDataColumnSidecars(_ context.Context, _ []bl
return nil
}
func (mb *mockBroadcaster) BroadcastForEpoch(_ context.Context, _ proto.Message, _ primitives.Epoch) error {
mb.broadcastCalled = true
return nil
}
func (mb *mockBroadcaster) BroadcastBLSChanges(_ context.Context, _ []*ethpb.SignedBLSToExecutionChange) {
}

View File

@@ -77,14 +77,8 @@ type ChainService struct {
DataColumns []blocks.VerifiedRODataColumn
TargetRoot [32]byte
MockHeadSlot *primitives.Slot
DependentRootCB func([32]byte, primitives.Epoch) ([32]byte, error)
MockCanonicalRoots map[primitives.Slot][32]byte
MockCanonicalFull map[primitives.Slot]bool
MockPayloadContentLookup map[[32]byte][32]byte
MockPayloadContentIsFull map[[32]byte]bool
ParentPayloadReadyVal *bool
ForkchoiceRoots map[[32]byte]bool
ForkchoiceBlockHashes map[[32]byte][32]byte
}
func (s *ChainService) Ancestor(ctx context.Context, root []byte, slot primitives.Slot) ([]byte, error) {
@@ -282,7 +276,7 @@ func (s *ChainService) ReceiveBlockInitialSync(ctx context.Context, block interf
}
// ReceiveBlockBatch processes blocks in batches from initial-sync.
func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []blocks.ROBlock, _ []interfaces.ROSignedExecutionPayloadEnvelope, _ das.AvailabilityChecker) error {
func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []blocks.ROBlock, _ das.AvailabilityChecker) error {
if s.State == nil {
return ErrNilState
}
@@ -594,16 +588,6 @@ func (s *ChainService) InForkchoice(root [32]byte) bool {
return !s.NotFinalized
}
// BlockHash mocks the execution payload block hash lookup for a beacon block root.
func (s *ChainService) BlockHash(root [32]byte) ([32]byte, error) {
if s.ForkchoiceBlockHashes != nil {
if blockHash, ok := s.ForkchoiceBlockHashes[root]; ok {
return blockHash, nil
}
}
return [32]byte{}, errors.New("block hash not found")
}
// IsOptimisticForRoot mocks the same method in the chain service.
func (s *ChainService) IsOptimisticForRoot(_ context.Context, root [32]byte) (bool, error) {
s.OptimisticCheckRootReceived = root
@@ -661,7 +645,7 @@ func prepareForkchoiceState(
}
base.BlockRoots[0] = append(base.BlockRoots[0], blockRoot[:]...)
st, err := state_native.InitializeFromProtoUnsafeBellatrix(base)
st, err := state_native.InitializeFromProtoBellatrix(base)
if err != nil {
return nil, blocks.ROBlock{}, err
}
@@ -720,60 +704,7 @@ func (s *ChainService) HighestReceivedBlockSlot() primitives.Slot {
if s.ForkChoiceStore != nil {
return s.ForkChoiceStore.HighestReceivedBlockSlot()
}
if s.Slot != nil {
return *s.Slot
}
return s.BlockSlot
}
// HighestReceivedBlockRoot mocks the same method in the chain service
func (s *ChainService) HighestReceivedBlockRoot() [32]byte {
if s.ForkChoiceStore != nil {
return s.ForkChoiceStore.HighestReceivedBlockRoot()
}
if s.Slot != nil && s.MockCanonicalRoots != nil {
if root, ok := s.MockCanonicalRoots[*s.Slot]; ok {
return root
}
}
if len(s.Root) == 32 {
return bytesutil.ToBytes32(s.Root)
}
return [32]byte{}
}
// HasFullNode mocks the same method in the chain service
func (s *ChainService) HasFullNode(root [32]byte) bool {
if s.ForkChoiceStore != nil {
return s.ForkChoiceStore.HasFullNode(root)
}
if s.Slot != nil && s.MockCanonicalRoots != nil && s.MockCanonicalFull != nil {
if r, ok := s.MockCanonicalRoots[*s.Slot]; ok && r == root {
return s.MockCanonicalFull[*s.Slot]
}
}
if s.ForkchoiceRoots != nil {
return s.ForkchoiceRoots[root]
}
return false
}
// ShouldIgnoreData returns true if the data for the given parent root and slot should be ignored.
func (s *ChainService) ShouldIgnoreData(_ [32]byte, _ primitives.Slot) bool {
return false
}
// PayloadContentLookup mocks the same method in the chain service.
func (s *ChainService) PayloadContentLookup(root [32]byte) ([32]byte, bool) {
if s.ForkChoiceStore != nil {
return s.ForkChoiceStore.PayloadContentLookup(root)
}
if s.MockPayloadContentLookup != nil {
if value, ok := s.MockPayloadContentLookup[root]; ok {
return value, s.MockPayloadContentIsFull[root]
}
}
return root, false
return 0
}
// InsertNode mocks the same method in the chain service
@@ -868,10 +799,7 @@ func (s *ChainService) ParentPayloadReady(_ interfaces.ReadOnlyBeaconBlock) bool
}
// DependentRootForEpoch mocks the same method in the chain service
func (c *ChainService) DependentRootForEpoch(root [32]byte, epoch primitives.Epoch) ([32]byte, error) {
if c.DependentRootCB != nil {
return c.DependentRootCB(root, epoch)
}
func (c *ChainService) DependentRootForEpoch(_ [32]byte, _ primitives.Epoch) ([32]byte, error) {
return c.TargetRoot, nil
}
@@ -880,17 +808,6 @@ func (c *ChainService) TargetRootForEpoch(_ [32]byte, _ primitives.Epoch) ([32]b
return c.TargetRoot, nil
}
func (c *ChainService) CanonicalNodeAtSlot(slot primitives.Slot) ([32]byte, bool) {
var root [32]byte
if c.MockCanonicalRoots != nil {
root = c.MockCanonicalRoots[slot]
}
if c.MockCanonicalFull != nil {
return root, c.MockCanonicalFull[slot]
}
return root, false
}
// MockSyncChecker is a mock implementation of blockchain.Checker.
// We can't make an assertion here that this is true because that would create a circular dependency.
type MockSyncChecker struct {

View File

@@ -8,35 +8,11 @@ import (
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
)
// proposerPreference returns a TrackedValidator from the ProposerPreferencesCache
// if a preference exists for the given slot.
func (s *Service) proposerPreference(slot primitives.Slot) (cache.TrackedValidator, bool) {
if s.cfg.ProposerPreferencesCache == nil {
return cache.TrackedValidator{}, false
}
pref, ok := s.cfg.ProposerPreferencesCache.Get(slot)
if !ok {
return cache.TrackedValidator{}, false
}
var feeRecipient primitives.ExecutionAddress
copy(feeRecipient[:], pref.FeeRecipient)
return cache.TrackedValidator{Active: true, FeeRecipient: feeRecipient, GasLimit: pref.GasLimit}, true
}
// trackedProposer returns whether the beacon node was informed, via the
// validators/prepare_proposer endpoint, of the proposer at the given slot.
// It only returns true if the tracked proposer is present and active.
//
// When PrepareAllPayloads is enabled, the node prepares payloads for every
// slot. After the Gloas fork, proposers broadcast their preferences (fee
// recipient, gas limit) via gossip into the ProposerPreferencesCache. When
// available, these preferences supply the fee recipient; otherwise the
// default (burn address) is used.
func (s *Service) trackedProposer(st state.ReadOnlyBeaconState, slot primitives.Slot) (cache.TrackedValidator, bool) {
if features.Get().PrepareAllPayloads {
if val, ok := s.proposerPreference(slot); ok {
return val, true
}
return cache.TrackedValidator{Active: true}, true
}
id, err := helpers.BeaconProposerIndexAtSlot(s.ctx, st, slot)
@@ -47,8 +23,5 @@ func (s *Service) trackedProposer(st state.ReadOnlyBeaconState, slot primitives.
if !ok {
return cache.TrackedValidator{}, false
}
if pref, ok := s.proposerPreference(slot); ok {
return pref, true
}
return val, val.Active
}

View File

@@ -1,83 +0,0 @@
package blockchain
import (
"testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
"github.com/OffchainLabs/prysm/v7/config/features"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/testing/require"
"github.com/OffchainLabs/prysm/v7/testing/util"
"github.com/ethereum/go-ethereum/common"
)
func TestTrackedProposer_NotTracked(t *testing.T) {
service, _ := minimalTestService(t, WithPayloadIDCache(cache.NewPayloadIDCache()))
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
_, ok := service.trackedProposer(st, 0)
require.Equal(t, false, ok)
}
func TestTrackedProposer_Tracked(t *testing.T) {
service, _ := minimalTestService(t, WithPayloadIDCache(cache.NewPayloadIDCache()))
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
addr := common.HexToAddress("0x1234")
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, FeeRecipient: primitives.ExecutionAddress(addr), Index: 0})
val, ok := service.trackedProposer(st, 0)
require.Equal(t, true, ok)
require.Equal(t, primitives.ExecutionAddress(addr), val.FeeRecipient)
}
func TestTrackedProposer_PrepareAllPayloads_Default(t *testing.T) {
resetCfg := features.InitWithReset(&features.Flags{PrepareAllPayloads: true})
defer resetCfg()
service, _ := minimalTestService(t, WithPayloadIDCache(cache.NewPayloadIDCache()))
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
val, ok := service.trackedProposer(st, 0)
require.Equal(t, true, ok)
require.Equal(t, true, val.Active)
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(val.FeeRecipient[:]).String())
}
func TestTrackedProposer_PrepareAllPayloads_WithProposerPreference(t *testing.T) {
resetCfg := features.InitWithReset(&features.Flags{PrepareAllPayloads: true})
defer resetCfg()
prefCache := cache.NewProposerPreferencesCache()
service, _ := minimalTestService(t,
WithPayloadIDCache(cache.NewPayloadIDCache()),
WithProposerPreferencesCache(prefCache),
)
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
addr := common.HexToAddress("0xabcd")
prefCache.Add(0, addr.Bytes(), 42_000_000)
val, ok := service.trackedProposer(st, 0)
require.Equal(t, true, ok)
require.Equal(t, true, val.Active)
require.Equal(t, primitives.ExecutionAddress(addr), val.FeeRecipient)
require.Equal(t, uint64(42_000_000), val.GasLimit)
}
func TestTrackedProposer_TrackedWithProposerPreferenceOverride(t *testing.T) {
prefCache := cache.NewProposerPreferencesCache()
service, _ := minimalTestService(t,
WithPayloadIDCache(cache.NewPayloadIDCache()),
WithProposerPreferencesCache(prefCache),
)
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
trackedAddr := common.HexToAddress("0x1111")
prefAddr := common.HexToAddress("0x2222")
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, FeeRecipient: primitives.ExecutionAddress(trackedAddr), Index: 0})
prefCache.Add(0, prefAddr.Bytes(), 50_000_000)
val, ok := service.trackedProposer(st, 0)
require.Equal(t, true, ok)
// Proposer preference overrides tracked validator.
require.Equal(t, primitives.ExecutionAddress(prefAddr), val.FeeRecipient)
require.Equal(t, uint64(50_000_000), val.GasLimit)
}

View File

@@ -15,7 +15,6 @@ go_library(
"common.go",
"doc.go",
"error.go",
"highest_execution_payload_bid.go",
"interfaces.go",
"log.go",
"payload_attestation.go",
@@ -23,7 +22,6 @@ go_library(
"proposer_indices.go",
"proposer_indices_disabled.go", # keep
"proposer_indices_type.go",
"proposer_preferences.go",
"registration.go",
"skip_slot_cache.go",
"subnet_ids.go",
@@ -57,7 +55,6 @@ go_library(
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/attestation:go_default_library",
"//runtime/version:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_hashicorp_golang_lru//:go_default_library",
"@com_github_patrickmn_go_cache//:go_default_library",
@@ -80,12 +77,10 @@ go_test(
"checkpoint_state_test.go",
"committee_fuzz_test.go",
"committee_test.go",
"highest_execution_payload_bid_test.go",
"payload_attestation_test.go",
"payload_id_test.go",
"private_access_test.go",
"proposer_indices_test.go",
"proposer_preferences_test.go",
"registration_test.go",
"skip_slot_cache_test.go",
"subnet_ids_test.go",

View File

@@ -9,11 +9,10 @@ import (
)
type AttestationConsensusData struct {
Slot primitives.Slot
HeadRoot []byte
Target forkchoicetypes.Checkpoint
Source forkchoicetypes.Checkpoint
IsPayloadFull bool
Slot primitives.Slot
HeadRoot []byte
Target forkchoicetypes.Checkpoint
Source forkchoicetypes.Checkpoint
}
// AttestationDataCache stores cached results of AttestationData requests.

View File

@@ -3,10 +3,8 @@ package cache
import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
lruwrpr "github.com/OffchainLabs/prysm/v7/cache/lru"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/crypto/hash"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/time/slots"
lru "github.com/hashicorp/golang-lru"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
@@ -27,14 +25,6 @@ var (
Name: "check_point_state_cache_hit",
Help: "The number of check point state requests that are present in the cache.",
})
checkpointStateSize = promauto.NewGauge(prometheus.GaugeOpts{
Name: "check_point_state_cache_size",
Help: "The number of entries in the check point state cache.",
})
checkpointStateEvicted = promauto.NewCounter(prometheus.CounterOpts{
Name: "check_point_state_cache_evicted_total",
Help: "The number of entries evicted from the check point state cache.",
})
)
// CheckpointStateCache is a struct with 1 queue for looking up state by checkpoint.
@@ -59,14 +49,14 @@ func (c *CheckpointStateCache) StateByCheckpoint(cp *ethpb.Checkpoint) (state.Be
item, exists := c.cache.Get(h)
if !exists || item == nil {
checkpointStateMiss.Inc()
return nil, nil
if exists && item != nil {
checkpointStateHit.Inc()
// Copy here is unnecessary since the return will only be used to verify attestation signature.
return item.(state.BeaconState), nil
}
checkpointStateHit.Inc()
// Copy here is unnecessary since the return will only be used to verify attestation signature.
return item.(state.BeaconState), nil
checkpointStateMiss.Inc()
return nil, nil
}
// AddCheckpointState adds CheckpointState object to the cache. This method also trims the least
@@ -76,35 +66,6 @@ func (c *CheckpointStateCache) AddCheckpointState(cp *ethpb.Checkpoint, s state.
if err != nil {
return err
}
c.cache.Add(h, s)
checkpointStateSize.Set(float64(c.cache.Len()))
return nil
}
// EvictUpTo removes all entries from the cache whose state epoch is at
// or before the given epoch. Returns the number of evicted entries.
func (c *CheckpointStateCache) EvictUpTo(epoch primitives.Epoch) int {
evicted := 0
for _, key := range c.cache.Keys() {
// Peek is used here to avoid updating the recency of the entry,
// as we are only checking for eviction.
v, ok := c.cache.Peek(key)
if !ok {
continue
}
st := v.(state.ReadOnlyBeaconState)
if slots.ToEpoch(st.Slot()) <= epoch {
c.cache.Remove(key)
evicted++
}
}
if evicted > 0 {
checkpointStateSize.Set(float64(c.cache.Len()))
checkpointStateEvicted.Add(float64(evicted))
}
return evicted
}

View File

@@ -72,76 +72,3 @@ func TestCheckpointStateCache_MaxSize(t *testing.T) {
assert.Equal(t, cache.MaxCheckpointStateSize(), len(c.Cache().Keys()))
}
func TestCheckpointStateCache_EvictFinalized_FinalizedEntry(t *testing.T) {
c := cache.NewCheckpointStateCache()
cp := &ethpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'A'}, 32)}
st, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{Slot: 32})
require.NoError(t, err)
require.NoError(t, c.AddCheckpointState(cp, st))
evicted := c.EvictUpTo(1)
assert.Equal(t, 1, evicted, "expected finalized entry to be evicted")
s, err := c.StateByCheckpoint(cp)
require.NoError(t, err)
assert.Equal(t, state.BeaconState(nil), s, "expected cache to be empty after eviction")
}
func TestCheckpointStateCache_EvictFinalized_NotFinalizedEntry(t *testing.T) {
c := cache.NewCheckpointStateCache()
cp := &ethpb.Checkpoint{Epoch: 5, Root: bytesutil.PadTo([]byte{'A'}, 32)}
st, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{Slot: 160})
require.NoError(t, err)
require.NoError(t, c.AddCheckpointState(cp, st))
evicted := c.EvictUpTo(3)
assert.Equal(t, 0, evicted, "expected non-finalized entry NOT to be evicted")
s, err := c.StateByCheckpoint(cp)
require.NoError(t, err)
assert.NotNil(t, s, "expected entry to still be in cache")
}
func TestCheckpointStateCache_EvictFinalized_Mixed(t *testing.T) {
c := cache.NewCheckpointStateCache()
cp1 := &ethpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'A'}, 32)}
st1, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{Slot: 32})
require.NoError(t, err)
cp2 := &ethpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte{'B'}, 32)}
st2, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{Slot: 64})
require.NoError(t, err)
cp5 := &ethpb.Checkpoint{Epoch: 5, Root: bytesutil.PadTo([]byte{'C'}, 32)}
st5, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{Slot: 160})
require.NoError(t, err)
require.NoError(t, c.AddCheckpointState(cp1, st1))
require.NoError(t, c.AddCheckpointState(cp2, st2))
require.NoError(t, c.AddCheckpointState(cp5, st5))
evicted := c.EvictUpTo(3)
assert.Equal(t, 2, evicted, "expected epochs 1 and 2 to be evicted")
s, err := c.StateByCheckpoint(cp1)
require.NoError(t, err)
assert.Equal(t, state.BeaconState(nil), s, "expected cp1 to be evicted")
s, err = c.StateByCheckpoint(cp2)
require.NoError(t, err)
assert.Equal(t, state.BeaconState(nil), s, "expected cp2 to be evicted")
s, err = c.StateByCheckpoint(cp5)
require.NoError(t, err)
assert.NotNil(t, s, "expected cp5 to still be in cache")
}
func TestCheckpointStateCache_EvictFinalized_EmptyCache(t *testing.T) {
c := cache.NewCheckpointStateCache()
evicted := c.EvictUpTo(0)
assert.Equal(t, 0, evicted, "expected no eviction from empty cache")
}

View File

@@ -1,76 +0,0 @@
package cache
import (
"sync"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
)
type executionPayloadBidKey struct {
slot primitives.Slot
parentHash [32]byte
parentRoot [32]byte
}
// HighestExecutionPayloadBidCache stores the highest bid for each
// (slot, parent_block_hash, parent_block_root) tuple.
type HighestExecutionPayloadBidCache struct {
bids map[executionPayloadBidKey]*ethpb.SignedExecutionPayloadBid
lock sync.RWMutex
}
// NewHighestExecutionPayloadBidCache initializes a highest-bid cache.
func NewHighestExecutionPayloadBidCache() *HighestExecutionPayloadBidCache {
return &HighestExecutionPayloadBidCache{
bids: make(map[executionPayloadBidKey]*ethpb.SignedExecutionPayloadBid),
}
}
// Get returns the highest cached bid for the given tuple.
func (c *HighestExecutionPayloadBidCache) Get(
slot primitives.Slot,
parentHash [32]byte,
parentRoot [32]byte,
) (*ethpb.SignedExecutionPayloadBid, bool) {
c.lock.RLock()
defer c.lock.RUnlock()
bid, ok := c.bids[executionPayloadBidKey{
slot: slot,
parentHash: parentHash,
parentRoot: parentRoot,
}]
return bid, ok
}
// SetIfHigher inserts the bid if absent, or replaces the cached bid only if
// the incoming value is strictly greater.
func (c *HighestExecutionPayloadBidCache) SetIfHigher(bid *ethpb.SignedExecutionPayloadBid) bool {
c.lock.Lock()
defer c.lock.Unlock()
key := executionPayloadBidKey{
slot: bid.Message.Slot,
parentHash: [32]byte(bid.Message.ParentBlockHash),
parentRoot: [32]byte(bid.Message.ParentBlockRoot),
}
cached, ok := c.bids[key]
if !ok || bid.Message.Value > cached.Message.Value {
c.bids[key] = bid
return true
}
return false
}
// PruneBefore removes all cached bids for slots before the provided slot.
func (c *HighestExecutionPayloadBidCache) PruneBefore(slot primitives.Slot) {
c.lock.Lock()
defer c.lock.Unlock()
for key := range c.bids {
if key.slot < slot {
delete(c.bids, key)
}
}
}

View File

@@ -1,105 +0,0 @@
package cache
import (
"bytes"
"testing"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/require"
)
func TestHighestExecutionPayloadBidCache_GetSetIfHigher(t *testing.T) {
c := NewHighestExecutionPayloadBidCache()
bid := testSignedExecutionPayloadBid(10, [32]byte{0x01}, [32]byte{0x02}, 100)
inserted := c.SetIfHigher(bid)
require.Equal(t, true, inserted)
got, ok := c.Get(10, [32]byte{0x01}, [32]byte{0x02})
require.Equal(t, true, ok)
require.DeepEqual(t, bid, got)
}
func TestHighestExecutionPayloadBidCache_SetIfHigher_ReplacesOnlyOnHigherValue(t *testing.T) {
c := NewHighestExecutionPayloadBidCache()
low := testSignedExecutionPayloadBid(10, [32]byte{0x01}, [32]byte{0x02}, 100)
same := testSignedExecutionPayloadBid(10, [32]byte{0x01}, [32]byte{0x02}, 100)
high := testSignedExecutionPayloadBid(10, [32]byte{0x01}, [32]byte{0x02}, 101)
require.Equal(t, true, c.SetIfHigher(low))
require.Equal(t, false, c.SetIfHigher(same))
got, ok := c.Get(10, [32]byte{0x01}, [32]byte{0x02})
require.Equal(t, true, ok)
require.DeepEqual(t, low, got)
require.Equal(t, true, c.SetIfHigher(high))
got, ok = c.Get(10, [32]byte{0x01}, [32]byte{0x02})
require.Equal(t, true, ok)
require.DeepEqual(t, high, got)
}
func TestHighestExecutionPayloadBidCache_SetIfHigher_KeepsDistinctTuples(t *testing.T) {
c := NewHighestExecutionPayloadBidCache()
first := testSignedExecutionPayloadBid(10, [32]byte{0x01}, [32]byte{0x02}, 100)
second := testSignedExecutionPayloadBid(10, [32]byte{0x03}, [32]byte{0x02}, 50)
third := testSignedExecutionPayloadBid(10, [32]byte{0x01}, [32]byte{0x04}, 75)
require.Equal(t, true, c.SetIfHigher(first))
require.Equal(t, true, c.SetIfHigher(second))
require.Equal(t, true, c.SetIfHigher(third))
got, ok := c.Get(10, [32]byte{0x01}, [32]byte{0x02})
require.Equal(t, true, ok)
require.DeepEqual(t, first, got)
got, ok = c.Get(10, [32]byte{0x03}, [32]byte{0x02})
require.Equal(t, true, ok)
require.DeepEqual(t, second, got)
got, ok = c.Get(10, [32]byte{0x01}, [32]byte{0x04})
require.Equal(t, true, ok)
require.DeepEqual(t, third, got)
}
func TestHighestExecutionPayloadBidCache_PruneBefore(t *testing.T) {
c := NewHighestExecutionPayloadBidCache()
oldBid := testSignedExecutionPayloadBid(9, [32]byte{0x01}, [32]byte{0x02}, 100)
currentBid := testSignedExecutionPayloadBid(10, [32]byte{0x03}, [32]byte{0x04}, 101)
require.Equal(t, true, c.SetIfHigher(oldBid))
require.Equal(t, true, c.SetIfHigher(currentBid))
c.PruneBefore(10)
_, ok := c.Get(9, [32]byte{0x01}, [32]byte{0x02})
require.Equal(t, false, ok)
got, ok := c.Get(10, [32]byte{0x03}, [32]byte{0x04})
require.Equal(t, true, ok)
require.DeepEqual(t, currentBid, got)
}
func testSignedExecutionPayloadBid(
slot primitives.Slot,
parentHash [32]byte,
parentRoot [32]byte,
value uint64,
) *ethpb.SignedExecutionPayloadBid {
return &ethpb.SignedExecutionPayloadBid{
Message: &ethpb.ExecutionPayloadBid{
Slot: slot,
ParentBlockHash: bytes.Clone(parentHash[:]),
ParentBlockRoot: bytes.Clone(parentRoot[:]),
BlockHash: bytes.Repeat([]byte{0x03}, 32),
PrevRandao: bytes.Repeat([]byte{0x04}, 32),
FeeRecipient: bytes.Repeat([]byte{0x05}, 20),
GasLimit: 30_000_000,
BuilderIndex: 1,
Value: primitives.Gwei(value),
ExecutionPayment: 10,
},
Signature: bytes.Repeat([]byte{0x06}, 96),
}
}

View File

@@ -1,87 +0,0 @@
package cache
import (
"sync"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
)
// ProposerPreference stores the proposer fee recipient and gas limit for a slot.
type ProposerPreference struct {
FeeRecipient []byte
GasLimit uint64
}
// ProposerPreferencesCache stores proposer preferences by slot.
type ProposerPreferencesCache struct {
slotToPreferences map[primitives.Slot]ProposerPreference
lock sync.RWMutex
}
// NewProposerPreferencesCache initializes a proposer preferences cache.
func NewProposerPreferencesCache() *ProposerPreferencesCache {
return &ProposerPreferencesCache{
slotToPreferences: make(map[primitives.Slot]ProposerPreference),
}
}
// Add stores proposer preferences for a slot. If the slot already exists, the
// existing value is kept and false is returned.
func (c *ProposerPreferencesCache) Add(slot primitives.Slot, feeRecipient []byte, gasLimit uint64) bool {
c.lock.Lock()
defer c.lock.Unlock()
if _, ok := c.slotToPreferences[slot]; ok {
return false
}
// FeeRecipient comes from validated SSZ-decoded proposer preferences, so
// retaining the slice reference here is intentional.
c.slotToPreferences[slot] = ProposerPreference{
FeeRecipient: feeRecipient,
GasLimit: gasLimit,
}
return true
}
// Get returns proposer preferences for a slot.
func (c *ProposerPreferencesCache) Get(slot primitives.Slot) (ProposerPreference, bool) {
c.lock.RLock()
defer c.lock.RUnlock()
pref, ok := c.slotToPreferences[slot]
if !ok {
return ProposerPreference{}, false
}
return pref, true
}
// Has returns true if proposer preferences for the slot already exist.
func (c *ProposerPreferencesCache) Has(slot primitives.Slot) bool {
c.lock.RLock()
defer c.lock.RUnlock()
_, ok := c.slotToPreferences[slot]
return ok
}
// PruneBefore removes all proposer preferences for slots before the provided slot.
func (c *ProposerPreferencesCache) PruneBefore(slot primitives.Slot) {
c.lock.Lock()
defer c.lock.Unlock()
for cachedSlot := range c.slotToPreferences {
if cachedSlot < slot {
delete(c.slotToPreferences, cachedSlot)
}
}
}
// Clear removes all cached proposer preferences.
func (c *ProposerPreferencesCache) Clear() {
c.lock.Lock()
defer c.lock.Unlock()
c.slotToPreferences = make(map[primitives.Slot]ProposerPreference)
}

View File

@@ -1,63 +0,0 @@
package cache
import (
"testing"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/testing/require"
)
func TestProposerPreferencesCache_AddGetHas(t *testing.T) {
c := NewProposerPreferencesCache()
slot := primitives.Slot(123)
feeRecipient := []byte{1, 2, 3, 4}
require.Equal(t, false, c.Has(slot))
added := c.Add(slot, feeRecipient, 42)
require.Equal(t, true, added)
require.Equal(t, true, c.Has(slot))
pref, ok := c.Get(slot)
require.Equal(t, true, ok)
require.DeepEqual(t, feeRecipient, pref.FeeRecipient)
require.Equal(t, uint64(42), pref.GasLimit)
}
func TestProposerPreferencesCache_AddDuplicateSlot(t *testing.T) {
c := NewProposerPreferencesCache()
slot := primitives.Slot(456)
require.Equal(t, true, c.Add(slot, []byte{1}, 10))
require.Equal(t, false, c.Add(slot, []byte{2}, 20))
pref, ok := c.Get(slot)
require.Equal(t, true, ok)
require.DeepEqual(t, []byte{1}, pref.FeeRecipient)
require.Equal(t, uint64(10), pref.GasLimit)
}
func TestProposerPreferencesCache_Clear(t *testing.T) {
c := NewProposerPreferencesCache()
slot := primitives.Slot(789)
require.Equal(t, true, c.Add(slot, []byte{1}, 10))
c.Clear()
require.Equal(t, false, c.Has(slot))
_, ok := c.Get(slot)
require.Equal(t, false, ok)
}
func TestProposerPreferencesCache_PruneBefore(t *testing.T) {
c := NewProposerPreferencesCache()
require.Equal(t, true, c.Add(10, []byte{1}, 10))
require.Equal(t, true, c.Add(11, []byte{2}, 11))
require.Equal(t, true, c.Add(12, []byte{3}, 12))
c.PruneBefore(11)
require.Equal(t, false, c.Has(10))
require.Equal(t, true, c.Has(11))
require.Equal(t, true, c.Has(12))
}

View File

@@ -172,7 +172,7 @@ func (s *SyncCommitteeCache) idxPositionInCommittee(
// UpdatePositionsInCommittee updates caching of validators position in sync committee in respect to
// current epoch and next epoch. This should be called when `current_sync_committee` and `next_sync_committee`
// change and that happens every `EPOCHS_PER_SYNC_COMMITTEE_PERIOD`.
func (s *SyncCommitteeCache) UpdatePositionsInCommittee(syncCommitteeBoundaryRoot [32]byte, st state.ReadOnlyBeaconState) error {
func (s *SyncCommitteeCache) UpdatePositionsInCommittee(syncCommitteeBoundaryRoot [32]byte, st state.BeaconState) error {
// since we call UpdatePositionsInCommittee asynchronously, keep track of the cache value
// seen at the beginning of the routine and compare at the end before updating. If the underlying value has been
// cycled (new address), don't update it.

View File

@@ -32,7 +32,7 @@ func (s *FakeSyncCommitteeCache) NextPeriodIndexPosition(root [32]byte, valIdx p
}
// UpdatePositionsInCommittee -- fake.
func (s *FakeSyncCommitteeCache) UpdatePositionsInCommittee(syncCommitteeBoundaryRoot [32]byte, state state.ReadOnlyBeaconState) error {
func (s *FakeSyncCommitteeCache) UpdatePositionsInCommittee(syncCommitteeBoundaryRoot [32]byte, state state.BeaconState) error {
return nil
}

View File

@@ -21,7 +21,6 @@ type (
Active bool
FeeRecipient primitives.ExecutionAddress
Index primitives.ValidatorIndex
GasLimit uint64
}
TrackedValidatorsCache struct {

View File

@@ -60,7 +60,6 @@ go_test(
"block_operations_fuzz_test.go",
"block_regression_test.go",
"eth1_data_test.go",
"exit_builder_test.go",
"exit_test.go",
"exports_test.go",
"genesis_test.go",

View File

@@ -4,7 +4,6 @@ import (
"context"
"fmt"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
v "github.com/OffchainLabs/prysm/v7/beacon-chain/core/validators"
@@ -63,16 +62,6 @@ func ProcessVoluntaryExits(
if exit == nil || exit.Exit == nil {
return nil, errors.New("nil voluntary exit in block body")
}
// [New in Gloas:EIP7732] Builder exits are identified by the builder index flag.
if beaconState.Version() >= version.Gloas && exit.Exit.ValidatorIndex.IsBuilderIndex() {
if err := verifyBuilderExitAndSignature(beaconState, exit); err != nil {
return nil, errors.Wrapf(err, "could not verify builder exit %d", idx)
}
if err := gloas.InitiateBuilderExit(beaconState, exit.Exit.ValidatorIndex.ToBuilderIndex()); err != nil {
return nil, err
}
continue
}
val, err := beaconState.ValidatorAtIndexReadOnly(exit.Exit.ValidatorIndex)
if err != nil {
return nil, err
@@ -113,24 +102,19 @@ func ProcessVoluntaryExits(
// initiate_validator_exit(state, voluntary_exit.validator_index)
func VerifyExitAndSignature(
validator state.ReadOnlyValidator,
st state.ReadOnlyBeaconState,
state state.ReadOnlyBeaconState,
signed *ethpb.SignedVoluntaryExit,
) error {
if signed == nil || signed.Exit == nil {
return errors.New("nil exit")
}
// [New in Gloas:EIP7732] Builder exits are verified separately.
if st.Version() >= version.Gloas && signed.Exit.ValidatorIndex.IsBuilderIndex() {
return verifyBuilderExitAndSignature(st, signed)
}
fork := st.Fork()
genesisRoot := st.GenesisValidatorsRoot()
fork := state.Fork()
genesisRoot := state.GenesisValidatorsRoot()
// EIP-7044: Beginning in Deneb, fix the fork version to Capella.
// This allows for signed validator exits to be valid forever.
if st.Version() >= version.Deneb {
if state.Version() >= version.Deneb {
fork = &ethpb.Fork{
PreviousVersion: params.BeaconConfig().CapellaForkVersion,
CurrentVersion: params.BeaconConfig().CapellaForkVersion,
@@ -139,7 +123,7 @@ func VerifyExitAndSignature(
}
exit := signed.Exit
if err := verifyExitConditions(st, validator, exit); err != nil {
if err := verifyExitConditions(state, validator, exit); err != nil {
return err
}
domain, err := signing.Domain(fork, exit.Epoch, params.BeaconConfig().DomainVoluntaryExit, genesisRoot)
@@ -214,57 +198,3 @@ func verifyExitConditions(st state.ReadOnlyBeaconState, validator state.ReadOnly
return nil
}
// verifyBuilderExitAndSignature validates a builder voluntary exit.
// [New in Gloas:EIP7732]
func verifyBuilderExitAndSignature(st state.ReadOnlyBeaconState, signed *ethpb.SignedVoluntaryExit) error {
if signed == nil || signed.Exit == nil {
return errors.New("nil exit")
}
exit := signed.Exit
builderIndex := exit.ValidatorIndex.ToBuilderIndex()
// Exits must specify an epoch when they become valid; they are not valid before then.
currentEpoch := slots.ToEpoch(st.Slot())
if currentEpoch < exit.Epoch {
return fmt.Errorf("expected current epoch >= exit epoch, received %d < %d", currentEpoch, exit.Epoch)
}
// Verify the builder is active.
active, err := st.IsActiveBuilder(builderIndex)
if err != nil {
return errors.Wrap(err, "could not check if builder is active")
}
if !active {
return fmt.Errorf("builder %d is not active", builderIndex)
}
// Only exit builder if it has no pending balance to withdraw.
pendingBalance, err := st.BuilderPendingBalanceToWithdraw(builderIndex)
if err != nil {
return errors.Wrap(err, "could not get builder pending balance to withdraw")
}
if pendingBalance != 0 {
return fmt.Errorf("builder %d has pending balance to withdraw: %d", builderIndex, pendingBalance)
}
// Verify signature using builder pubkey with Capella fork version (EIP-7044).
pubkey, err := st.BuilderPubkey(builderIndex)
if err != nil {
return errors.Wrap(err, "could not get builder pubkey")
}
fork := &ethpb.Fork{
PreviousVersion: params.BeaconConfig().CapellaForkVersion,
CurrentVersion: params.BeaconConfig().CapellaForkVersion,
Epoch: params.BeaconConfig().CapellaForkEpoch,
}
genesisRoot := st.GenesisValidatorsRoot()
domain, err := signing.Domain(fork, exit.Epoch, params.BeaconConfig().DomainVoluntaryExit, genesisRoot)
if err != nil {
return err
}
if err := signing.VerifySigningRoot(exit, pubkey[:], signed.Signature, domain); err != nil {
return signing.ErrSigFailedToVerify
}
return nil
}

View File

@@ -1,307 +0,0 @@
package blocks_test
import (
"testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/validators"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/crypto/bls"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/assert"
"github.com/OffchainLabs/prysm/v7/testing/require"
)
// setGloasTestConfig sets fork epochs so Gloas is active at epoch 5.
func setGloasTestConfig(t *testing.T) {
t.Helper()
cfg := params.BeaconConfig().Copy()
cfg.CapellaForkEpoch = 1
cfg.DenebForkEpoch = 2
cfg.ElectraForkEpoch = 3
cfg.FuluForkEpoch = 4
cfg.GloasForkEpoch = 5
params.SetActiveTestCleanup(t, cfg)
}
// newGloasStateWithBuilder creates a minimal Gloas beacon state with one active builder
// and returns the state along with the builder's BLS private key.
func newGloasStateWithBuilder(t *testing.T, builderIndex primitives.BuilderIndex, epoch primitives.Epoch) (state.BeaconState, bls.SecretKey) {
t.Helper()
priv, err := bls.RandKey()
require.NoError(t, err)
cfg := params.BeaconConfig()
builder := &ethpb.Builder{
Pubkey: priv.PublicKey().Marshal(),
WithdrawableEpoch: cfg.FarFutureEpoch,
DepositEpoch: 0,
Balance: 32_000_000_000,
ExecutionAddress: make([]byte, 20),
}
builders := make([]*ethpb.Builder, int(builderIndex)+1)
for i := range builders {
if primitives.BuilderIndex(i) == builderIndex {
builders[i] = builder
} else {
builders[i] = &ethpb.Builder{
Pubkey: make([]byte, 48),
WithdrawableEpoch: cfg.FarFutureEpoch,
DepositEpoch: 0,
ExecutionAddress: make([]byte, 20),
}
}
}
stProto := &ethpb.BeaconStateGloas{
Slot: cfg.SlotsPerEpoch * primitives.Slot(epoch),
Fork: &ethpb.Fork{
PreviousVersion: cfg.FuluForkVersion,
CurrentVersion: cfg.GloasForkVersion,
Epoch: cfg.GloasForkEpoch,
},
GenesisValidatorsRoot: make([]byte, 32),
FinalizedCheckpoint: &ethpb.Checkpoint{
Epoch: epoch - 1,
Root: make([]byte, 32),
},
CurrentJustifiedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)},
PreviousJustifiedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)},
Builders: builders,
Validators: []*ethpb.Validator{
{
ExitEpoch: cfg.FarFutureEpoch,
ActivationEpoch: 0,
PublicKey: make([]byte, 48),
},
},
Balances: []uint64{32_000_000_000},
BlockRoots: make([][]byte, cfg.SlotsPerHistoricalRoot),
StateRoots: make([][]byte, cfg.SlotsPerHistoricalRoot),
RandaoMixes: make([][]byte, cfg.EpochsPerHistoricalVector),
Slashings: make([]uint64, cfg.EpochsPerSlashingsVector),
ExecutionPayloadAvailability: make([]byte, cfg.SlotsPerHistoricalRoot/8),
}
for i := range stProto.BlockRoots {
stProto.BlockRoots[i] = make([]byte, 32)
}
for i := range stProto.StateRoots {
stProto.StateRoots[i] = make([]byte, 32)
}
for i := range stProto.RandaoMixes {
stProto.RandaoMixes[i] = make([]byte, 32)
}
st, err := state_native.InitializeFromProtoUnsafeGloas(stProto)
require.NoError(t, err)
return st, priv
}
func signBuilderExit(t *testing.T, st state.ReadOnlyBeaconState, exit *ethpb.VoluntaryExit, priv bls.SecretKey) *ethpb.SignedVoluntaryExit {
t.Helper()
sb, err := signing.ComputeDomainAndSign(st, exit.Epoch, exit, params.BeaconConfig().DomainVoluntaryExit, priv)
require.NoError(t, err)
sig, err := bls.SignatureFromBytes(sb)
require.NoError(t, err)
return &ethpb.SignedVoluntaryExit{
Exit: exit,
Signature: sig.Marshal(),
}
}
func TestVerifyExitAndSignature_BuilderExit_HappyPath(t *testing.T) {
setGloasTestConfig(t)
builderIndex := primitives.BuilderIndex(0)
epoch := primitives.Epoch(10)
st, priv := newGloasStateWithBuilder(t, builderIndex, epoch)
exit := &ethpb.VoluntaryExit{
ValidatorIndex: builderIndex.ToValidatorIndex(),
Epoch: epoch,
}
signed := signBuilderExit(t, st, exit, priv)
err := blocks.VerifyExitAndSignature(nil, st, signed)
require.NoError(t, err)
}
func TestVerifyExitAndSignature_BuilderNotActive(t *testing.T) {
setGloasTestConfig(t)
builderIndex := primitives.BuilderIndex(0)
epoch := primitives.Epoch(10)
st, priv := newGloasStateWithBuilder(t, builderIndex, epoch)
// Make builder not active by setting withdrawable epoch (already initiated exit).
builder, err := st.Builder(builderIndex)
require.NoError(t, err)
builder.WithdrawableEpoch = 5
require.NoError(t, st.UpdateBuilderAtIndex(builderIndex, builder))
exit := &ethpb.VoluntaryExit{
ValidatorIndex: builderIndex.ToValidatorIndex(),
Epoch: epoch,
}
signed := signBuilderExit(t, st, exit, priv)
err = blocks.VerifyExitAndSignature(nil, st, signed)
assert.ErrorContains(t, "is not active", err)
}
func TestVerifyExitAndSignature_BuilderPendingWithdrawal(t *testing.T) {
setGloasTestConfig(t)
builderIndex := primitives.BuilderIndex(0)
epoch := primitives.Epoch(10)
st, priv := newGloasStateWithBuilder(t, builderIndex, epoch)
// Give the builder a pending withdrawal.
require.NoError(t, st.AppendBuilderPendingWithdrawals([]*ethpb.BuilderPendingWithdrawal{
{
BuilderIndex: builderIndex,
Amount: 1000,
FeeRecipient: make([]byte, 20),
},
}))
exit := &ethpb.VoluntaryExit{
ValidatorIndex: builderIndex.ToValidatorIndex(),
Epoch: epoch,
}
signed := signBuilderExit(t, st, exit, priv)
err := blocks.VerifyExitAndSignature(nil, st, signed)
assert.ErrorContains(t, "pending balance to withdraw", err)
}
func TestVerifyExitAndSignature_BuilderBadSignature(t *testing.T) {
setGloasTestConfig(t)
builderIndex := primitives.BuilderIndex(0)
epoch := primitives.Epoch(10)
st, _ := newGloasStateWithBuilder(t, builderIndex, epoch)
wrongKey, err := bls.RandKey()
require.NoError(t, err)
exit := &ethpb.VoluntaryExit{
ValidatorIndex: builderIndex.ToValidatorIndex(),
Epoch: epoch,
}
signed := signBuilderExit(t, st, exit, wrongKey)
err = blocks.VerifyExitAndSignature(nil, st, signed)
assert.ErrorContains(t, "signature did not verify", err)
}
func TestVerifyExitAndSignature_BuilderExitInFuture(t *testing.T) {
setGloasTestConfig(t)
builderIndex := primitives.BuilderIndex(0)
epoch := primitives.Epoch(10)
st, priv := newGloasStateWithBuilder(t, builderIndex, epoch)
exit := &ethpb.VoluntaryExit{
ValidatorIndex: builderIndex.ToValidatorIndex(),
Epoch: epoch + 1, // Future epoch.
}
signed := signBuilderExit(t, st, exit, priv)
err := blocks.VerifyExitAndSignature(nil, st, signed)
assert.ErrorContains(t, "expected current epoch >= exit epoch", err)
}
func TestProcessVoluntaryExits_BuilderExit(t *testing.T) {
setGloasTestConfig(t)
builderIndex := primitives.BuilderIndex(0)
epoch := primitives.Epoch(10)
st, priv := newGloasStateWithBuilder(t, builderIndex, epoch)
exit := &ethpb.VoluntaryExit{
ValidatorIndex: builderIndex.ToValidatorIndex(),
Epoch: epoch,
}
signed := signBuilderExit(t, st, exit, priv)
newState, err := blocks.ProcessVoluntaryExits(t.Context(), st, []*ethpb.SignedVoluntaryExit{signed}, validators.ExitInformation(st))
require.NoError(t, err)
// Verify builder's withdrawable epoch was set.
builder, err := newState.Builder(builderIndex)
require.NoError(t, err)
cfg := params.BeaconConfig()
expectedWithdrawableEpoch := epoch + cfg.MinBuilderWithdrawabilityDelay
assert.Equal(t, expectedWithdrawableEpoch, builder.WithdrawableEpoch)
}
func TestProcessVoluntaryExits_BuilderExitPreGloas(t *testing.T) {
cfg := params.BeaconConfig().Copy()
cfg.CapellaForkEpoch = 1
cfg.DenebForkEpoch = 2
cfg.ElectraForkEpoch = 3
cfg.FuluForkEpoch = 4
cfg.GloasForkEpoch = 100 // Gloas not yet active.
params.SetActiveTestCleanup(t, cfg)
epoch := primitives.Epoch(10)
builderIndex := primitives.BuilderIndex(0)
stProto := &ethpb.BeaconStateFulu{
Slot: cfg.SlotsPerEpoch * primitives.Slot(epoch),
Fork: &ethpb.Fork{
PreviousVersion: cfg.DenebForkVersion,
CurrentVersion: cfg.FuluForkVersion,
Epoch: cfg.FuluForkEpoch,
},
GenesisValidatorsRoot: make([]byte, 32),
FinalizedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)},
CurrentJustifiedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)},
PreviousJustifiedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)},
Validators: []*ethpb.Validator{
{ExitEpoch: cfg.FarFutureEpoch, ActivationEpoch: 0, PublicKey: make([]byte, 48)},
},
Balances: []uint64{32_000_000_000},
BlockRoots: make([][]byte, cfg.SlotsPerHistoricalRoot),
StateRoots: make([][]byte, cfg.SlotsPerHistoricalRoot),
RandaoMixes: make([][]byte, cfg.EpochsPerHistoricalVector),
Slashings: make([]uint64, cfg.EpochsPerSlashingsVector),
}
for i := range stProto.BlockRoots {
stProto.BlockRoots[i] = make([]byte, 32)
}
for i := range stProto.StateRoots {
stProto.StateRoots[i] = make([]byte, 32)
}
for i := range stProto.RandaoMixes {
stProto.RandaoMixes[i] = make([]byte, 32)
}
st, err := state_native.InitializeFromProtoUnsafeFulu(stProto)
require.NoError(t, err)
signed := &ethpb.SignedVoluntaryExit{
Exit: &ethpb.VoluntaryExit{
ValidatorIndex: builderIndex.ToValidatorIndex(),
Epoch: epoch,
},
Signature: make([]byte, 96),
}
// On pre-Gloas state, builder-flagged exits are not routed to the builder path.
// ProcessVoluntaryExits treats the builder-flagged index as a regular validator index,
// which fails because no such validator exists.
_, err = blocks.ProcessVoluntaryExits(t.Context(), st, []*ethpb.SignedVoluntaryExit{signed}, validators.ExitInformation(st))
require.ErrorContains(t, "out of bounds", err)
}

View File

@@ -2,5 +2,4 @@ package blocks
var ProcessBLSToExecutionChange = processBLSToExecutionChange
var ErrInvalidBLSPrefix = errInvalidBLSPrefix
var ErrInvalidWithdrawalCredentials = errInvalidWithdrawalCredentials
var VerifyBlobCommitmentCount = verifyBlobCommitmentCount

View File

@@ -192,47 +192,11 @@ func NewGenesisBlockForState(ctx context.Context, st state.BeaconState) (interfa
Block: electraGenesisBlock(root),
Signature: params.BeaconConfig().EmptySignature[:],
})
case *ethpb.BeaconStateGloas:
return blocks.NewSignedBeaconBlock(&ethpb.SignedBeaconBlockGloas{
Block: gloasGenesisBlock(root),
Signature: params.BeaconConfig().EmptySignature[:],
})
default:
return nil, ErrUnrecognizedState
}
}
func gloasGenesisBlock(root [fieldparams.RootLength]byte) *ethpb.BeaconBlockGloas {
return &ethpb.BeaconBlockGloas{
ParentRoot: params.BeaconConfig().ZeroHash[:],
StateRoot: root[:],
Body: &ethpb.BeaconBlockBodyGloas{
RandaoReveal: make([]byte, 96),
Eth1Data: &ethpb.Eth1Data{
DepositRoot: make([]byte, 32),
BlockHash: make([]byte, 32),
},
Graffiti: make([]byte, 32),
SyncAggregate: &ethpb.SyncAggregate{
SyncCommitteeBits: make([]byte, fieldparams.SyncCommitteeLength/8),
SyncCommitteeSignature: make([]byte, fieldparams.BLSSignatureLength),
},
SignedExecutionPayloadBid: &ethpb.SignedExecutionPayloadBid{
Message: &ethpb.ExecutionPayloadBid{
ParentBlockHash: make([]byte, 32),
ParentBlockRoot: make([]byte, 32),
BlockHash: make([]byte, 32),
PrevRandao: make([]byte, 32),
FeeRecipient: make([]byte, 20),
BlobKzgCommitments: make([][]byte, 0),
},
Signature: make([]byte, fieldparams.BLSSignatureLength),
},
PayloadAttestations: make([]*ethpb.PayloadAttestation, 0),
},
}
}
func electraGenesisBlock(root [fieldparams.RootLength]byte) *ethpb.BeaconBlockElectra {
return &ethpb.BeaconBlockElectra{
ParentRoot: params.BeaconConfig().ZeroHash[:],

View File

@@ -147,7 +147,7 @@ func TestProcessBLSToExecutionChange(t *testing.T) {
_, err = blocks.ValidateBLSToExecutionChange(st, signed)
// The state should return an empty validator, even when the validator object in the registry is
// nil. This error should return when the withdrawal credentials are invalid or too short.
require.ErrorIs(t, err, blocks.ErrInvalidWithdrawalCredentials)
require.ErrorIs(t, err, blocks.ErrInvalidBLSPrefix)
})
t.Run("non-existent validator", func(t *testing.T) {
priv, err := bls.RandKey()

View File

@@ -20,46 +20,37 @@ import (
)
func TestProcessPendingDepositsMultiplesSameDeposits(t *testing.T) {
const (
depositCount = uint64(2)
amountETH = uint64(32)
slot = 0
activeBalanceGwei = 10_000
)
state := stateWithActiveBalanceETH(t, 0)
secretKey, err := bls.RandKey()
st := stateWithActiveBalanceETH(t, 1000)
deps := make([]*eth.PendingDeposit, 2) // Make same deposit twice
validators := st.Validators()
sk, err := bls.RandKey()
require.NoError(t, err)
withdrawalCredentialsBytes := make([]byte, 32)
withdrawalCredentialsBytes[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
withdrawalCredentials := bytesutil.ToBytes32(withdrawalCredentialsBytes)
validators := state.Validators()
require.Equal(t, 0, len(validators))
deposits := make([]*eth.PendingDeposit, 0, depositCount)
for range depositCount {
deposit := stateTesting.GeneratePendingDeposit(t, secretKey, amountETH, withdrawalCredentials, slot)
deposits = append(deposits, deposit)
for i := 0; i < len(deps); i += 1 {
wc := make([]byte, 32)
wc[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
wc[31] = byte(i)
validators[i].PublicKey = sk.PublicKey().Marshal()
validators[i].WithdrawalCredentials = wc
deps[i] = stateTesting.GeneratePendingDeposit(t, sk, 32, bytesutil.ToBytes32(wc), 0)
}
require.NoError(t, st.SetPendingDeposits(deps))
err = state.SetPendingDeposits(deposits)
err = electra.ProcessPendingDeposits(context.TODO(), st, 10000)
require.NoError(t, err)
err = electra.ProcessPendingDeposits(t.Context(), state, activeBalanceGwei)
require.NoError(t, err)
// The first deposit should create a new validator,
// and the second deposit should top up the same validator
// We should have 1 validator with balance of 64 ETH.
validators = state.Validators()
require.Equal(t, 1, len(validators))
balance, err := state.BalanceAtIndex(0)
require.NoError(t, err)
require.Equal(t, depositCount*amountETH, balance)
val := st.Validators()
seenPubkeys := make(map[string]struct{})
for i := 0; i < len(val); i += 1 {
if len(val[i].PublicKey) == 0 {
continue
}
_, ok := seenPubkeys[string(val[i].PublicKey)]
if ok {
t.Fatalf("duplicated pubkeys")
} else {
seenPubkeys[string(val[i].PublicKey)] = struct{}{}
}
}
}
func TestProcessPendingDeposits(t *testing.T) {

View File

@@ -39,6 +39,9 @@ func ProcessEffectiveBalanceUpdates(st state.BeaconState) error {
// Update effective balances with hysteresis.
validatorFunc := func(idx int, val state.ReadOnlyValidator) (newVal *ethpb.Validator, err error) {
if val.IsNil() {
return nil, fmt.Errorf("validator %d is nil in state", idx)
}
if idx >= len(bals) {
return nil, fmt.Errorf("validator index exceeds validator length in state %d >= %d", idx, len(st.Balances()))
}
@@ -51,10 +54,8 @@ func ProcessEffectiveBalanceUpdates(st state.BeaconState) error {
if balance+downwardThreshold < val.EffectiveBalance() || val.EffectiveBalance()+upwardThreshold < balance {
effectiveBal := min(balance-balance%effBalanceInc, effectiveBalanceLimit)
if effectiveBal != val.EffectiveBalance() {
newVal = val.Copy()
newVal.EffectiveBalance = effectiveBal
}
newVal = val.Copy()
newVal.EffectiveBalance = effectiveBal
}
return newVal, nil
}

View File

@@ -16,6 +16,9 @@ import (
func TestSwitchToCompoundingValidator(t *testing.T) {
s, err := state_native.InitializeFromProtoElectra(&eth.BeaconStateElectra{
Validators: []*eth.Validator{
{
WithdrawalCredentials: []byte{}, // No withdrawal credentials
},
{
WithdrawalCredentials: []byte{0x01, 0xFF}, // Has withdrawal credentials
},
@@ -24,19 +27,22 @@ func TestSwitchToCompoundingValidator(t *testing.T) {
},
},
Balances: []uint64{
params.BeaconConfig().MinActivationBalance,
params.BeaconConfig().MinActivationBalance,
params.BeaconConfig().MinActivationBalance + 100_000, // Has excess balance
},
})
// Test that a validator with no withdrawal credentials cannot be switched to compounding.
require.NoError(t, err)
require.ErrorContains(t, "validator has no withdrawal credentials", electra.SwitchToCompoundingValidator(s, 0))
// Test that a validator with withdrawal credentials can be switched to compounding.
require.NoError(t, electra.SwitchToCompoundingValidator(s, 0))
v, err := s.ValidatorAtIndex(0)
require.NoError(t, electra.SwitchToCompoundingValidator(s, 1))
v, err := s.ValidatorAtIndex(1)
require.NoError(t, err)
require.Equal(t, true, bytes.HasPrefix(v.WithdrawalCredentials, []byte{params.BeaconConfig().CompoundingWithdrawalPrefixByte}), "withdrawal credentials were not updated")
// val_0 Balance is not changed
b, err := s.BalanceAtIndex(0)
// val_1 Balance is not changed
b, err := s.BalanceAtIndex(1)
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().MinActivationBalance, b, "balance was changed")
pbd, err := s.PendingDeposits()
@@ -44,8 +50,8 @@ func TestSwitchToCompoundingValidator(t *testing.T) {
require.Equal(t, 0, len(pbd), "pending balance deposits should be empty")
// Test that a validator with excess balance can be switched to compounding, excess balance is queued.
require.NoError(t, electra.SwitchToCompoundingValidator(s, 1))
b, err = s.BalanceAtIndex(1)
require.NoError(t, electra.SwitchToCompoundingValidator(s, 2))
b, err = s.BalanceAtIndex(2)
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().MinActivationBalance, b, "balance was not changed")
pbd, err = s.PendingDeposits()

View File

@@ -46,9 +46,6 @@ const (
// DataColumnReceived is sent after a data column has been seen after gossip validation rules.
DataColumnReceived = 12
// PayloadAttestationMessageReceived is sent after a payload attestation message is received from gossip or rpc.
PayloadAttestationMessageReceived = 13
)
// UnAggregatedAttReceivedData is the data sent with UnaggregatedAttReceived events.
@@ -117,8 +114,3 @@ type DataColumnReceivedData struct {
BlockRoot [32]byte
KzgCommitments [][]byte
}
// PayloadAttestationMessageReceivedData is the data sent with PayloadAttestationMessageReceived events.
type PayloadAttestationMessageReceivedData struct {
Message *ethpb.PayloadAttestationMessage
}

View File

@@ -5,10 +5,8 @@ go_library(
srcs = [
"attestation.go",
"bid.go",
"builder_exit.go",
"deposit_request.go",
"log.go",
"metrics.go",
"payload.go",
"payload_attestation.go",
"pending_payment.go",
@@ -35,14 +33,11 @@ go_library(
"//crypto/bls/common:go_default_library",
"//crypto/hash:go_default_library",
"//encoding/bytesutil:go_default_library",
"//monitoring/tracing/trace:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"//time/slots:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)

View File

@@ -110,7 +110,7 @@ func ProcessExecutionPayloadBid(st state.BeaconState, block interfaces.ReadOnlyB
return fmt.Errorf("builder %d cannot cover bid amount %d", builderIndex, amount)
}
if err := ValidatePayloadBidSignature(st, wrappedBid); err != nil {
if err := validatePayloadBidSignature(st, wrappedBid); err != nil {
return errors.Wrap(err, "bid signature validation failed")
}
}
@@ -179,10 +179,10 @@ func validateBidConsistency(st state.BeaconState, bid interfaces.ROExecutionPayl
return nil
}
// ValidatePayloadBidSignature verifies the BLS signature on a signed execution payload bid.
// validatePayloadBidSignature verifies the BLS signature on a signed execution payload bid.
// It validates that the signature was created by the builder specified in the bid
// using the appropriate domain for the beacon builder.
func ValidatePayloadBidSignature(st state.ReadOnlyBeaconState, signedBid interfaces.ROSignedExecutionPayloadBid) error {
func validatePayloadBidSignature(st state.ReadOnlyBeaconState, signedBid interfaces.ROSignedExecutionPayloadBid) error {
bid, err := signedBid.Bid()
if err != nil {
return errors.Wrap(err, "failed to get bid")

View File

@@ -1,37 +0,0 @@
package gloas
import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/time/slots"
)
// InitiateBuilderExit initiates the exit of a builder by setting its withdrawable epoch.
//
// <spec fn="initiate_builder_exit" fork="gloas" hash="3da938d5">
// def initiate_builder_exit(state: BeaconState, builder_index: BuilderIndex) -> None:
// """
// Initiate the exit of the builder with index ``index``.
// """
// # Return if builder already initiated exit
// builder = state.builders[builder_index]
// if builder.withdrawable_epoch != FAR_FUTURE_EPOCH:
// return
//
// # Set builder exit epoch
// builder.withdrawable_epoch = get_current_epoch(state) + MIN_BUILDER_WITHDRAWABILITY_DELAY
// </spec>
func InitiateBuilderExit(s state.BeaconState, builderIndex primitives.BuilderIndex) error {
builder, err := s.Builder(builderIndex)
if err != nil {
return err
}
// Return if builder already initiated exit.
if builder.WithdrawableEpoch != params.BeaconConfig().FarFutureEpoch {
return nil
}
currentEpoch := slots.ToEpoch(s.Slot())
builder.WithdrawableEpoch = currentEpoch + params.BeaconConfig().MinBuilderWithdrawabilityDelay
return s.UpdateBuilderAtIndex(builderIndex, builder)
}

View File

@@ -29,7 +29,7 @@ func processDepositRequests(ctx context.Context, beaconState state.BeaconState,
// processDepositRequest processes the specific deposit request
//
// <spec fn="process_deposit_request" fork="gloas" hash="0e8b94ab">
// <spec fn="process_deposit_request" fork="gloas" hash="3c6b0310">
// def process_deposit_request(state: BeaconState, deposit_request: DepositRequest) -> None:
// # [New in Gloas:EIP7732]
// builder_pubkeys = [b.pubkey for b in state.builders]
@@ -40,11 +40,8 @@ func processDepositRequests(ctx context.Context, beaconState state.BeaconState,
// # already exists with this pubkey, apply the deposit to their balance
// is_builder = deposit_request.pubkey in builder_pubkeys
// is_validator = deposit_request.pubkey in validator_pubkeys
// if is_builder or (
// is_builder_withdrawal_credential(deposit_request.withdrawal_credentials)
// and not is_validator
// and not is_pending_validator(state, deposit_request.pubkey)
// ):
// is_builder_prefix = is_builder_withdrawal_credential(deposit_request.withdrawal_credentials)
// if is_builder or (is_builder_prefix and not is_validator):
// # Apply builder deposits immediately
// apply_deposit_for_builder(
// state,
@@ -77,7 +74,6 @@ func processDepositRequest(beaconState state.BeaconState, request *enginev1.Depo
return errors.Wrap(err, "could not apply builder deposit")
}
if applied {
builderDepositsProcessedTotal.Inc()
return nil
}
@@ -122,7 +118,13 @@ func applyBuilderDepositRequest(beaconState state.BeaconState, request *enginev1
}
pubkey := bytesutil.ToBytes48(request.Pubkey)
_, isValidator := beaconState.ValidatorIndexByPubkey(pubkey)
idx, isBuilder := beaconState.BuilderIndexByPubkey(pubkey)
isBuilderPrefix := helpers.IsBuilderWithdrawalCredential(request.WithdrawalCredentials)
if !isBuilder && (!isBuilderPrefix || isValidator) {
return false, nil
}
if isBuilder {
if err := beaconState.IncreaseBuilderBalance(idx, request.Amount); err != nil {
return false, err
@@ -130,20 +132,6 @@ func applyBuilderDepositRequest(beaconState state.BeaconState, request *enginev1
return true, nil
}
isBuilderPrefix := helpers.IsBuilderWithdrawalCredential(request.WithdrawalCredentials)
_, isValidator := beaconState.ValidatorIndexByPubkey(pubkey)
if !isBuilderPrefix || isValidator {
return false, nil
}
isPending, err := beaconState.IsPendingValidator(request.Pubkey)
if err != nil {
return false, err
}
if isPending {
return false, nil
}
if err := applyDepositForNewBuilder(
beaconState,
request.Pubkey,

View File

@@ -91,33 +91,6 @@ func TestProcessDepositRequest_ExistingBuilderIncreasesBalance(t *testing.T) {
require.Equal(t, 0, len(pending))
}
func TestProcessDepositRequest_BuilderDepositWithExistingPendingDepositStaysPending(t *testing.T) {
sk, err := bls.RandKey()
require.NoError(t, err)
validatorCred := validatorWithdrawalCredentials()
builderCred := builderWithdrawalCredentials()
existingPending := stateTesting.GeneratePendingDeposit(t, sk, 1234, validatorCred, 0)
req := depositRequestFromPending(stateTesting.GeneratePendingDeposit(t, sk, 200, builderCred, 1), 9)
st := newGloasState(t, nil, nil)
require.NoError(t, st.SetPendingDeposits([]*ethpb.PendingDeposit{existingPending}))
err = processDepositRequest(st, req)
require.NoError(t, err)
_, ok := st.BuilderIndexByPubkey(toBytes48(req.Pubkey))
require.Equal(t, false, ok)
pending, err := st.PendingDeposits()
require.NoError(t, err)
require.Equal(t, 2, len(pending))
require.DeepEqual(t, existingPending.PublicKey, pending[0].PublicKey)
require.DeepEqual(t, req.Pubkey, pending[1].PublicKey)
require.DeepEqual(t, req.WithdrawalCredentials, pending[1].WithdrawalCredentials)
require.Equal(t, req.Amount, pending[1].Amount)
}
func TestApplyDepositForBuilder_InvalidSignatureIgnoresDeposit(t *testing.T) {
sk, err := bls.RandKey()
require.NoError(t, err)

View File

@@ -1,27 +0,0 @@
package gloas
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
var (
builderPendingPaymentsProcessedTotal = promauto.NewCounter(
prometheus.CounterOpts{
Name: "builder_pending_payments_processed_total",
Help: "The number of builder pending payments promoted into the builder pending withdrawal queue.",
},
)
builderDepositsProcessedTotal = promauto.NewCounter(
prometheus.CounterOpts{
Name: "builder_deposits_processed_total",
Help: "The number of builder-related deposit requests processed.",
},
)
builderExitsProcessedTotal = promauto.NewCounter(
prometheus.CounterOpts{
Name: "builder_exits_processed_total",
Help: "The number of processed builder exits.",
},
)
)

View File

@@ -17,8 +17,7 @@ import (
"github.com/pkg/errors"
)
// ProcessExecutionPayload is the gossip entry point: verify signature, validate
// consistency, apply state mutations, and verify the post-payload state root.
// ProcessExecutionPayload processes the signed execution payload envelope for the Gloas fork.
//
// <spec fn="process_execution_payload" fork="gloas" hash="36bd3af3">
// def process_execution_payload(
@@ -109,7 +108,7 @@ func ProcessExecutionPayload(
st state.BeaconState,
signedEnvelope interfaces.ROSignedExecutionPayloadEnvelope,
) error {
if err := verifyExecutionPayloadEnvelopeSignature(st, signedEnvelope); err != nil {
if err := VerifyExecutionPayloadEnvelopeSignature(st, signedEnvelope); err != nil {
return errors.Wrap(err, "signature verification failed")
}
@@ -118,132 +117,29 @@ func ProcessExecutionPayload(
return errors.Wrap(err, "could not get envelope from signed envelope")
}
if err := cacheLatestBlockHeaderStateRoot(ctx, st); err != nil {
if err := ApplyExecutionPayload(ctx, st, envelope); err != nil {
return err
}
if err := validatePayloadConsistency(st, envelope); err != nil {
return err
r, err := st.HashTreeRoot(ctx)
if err != nil {
return errors.Wrap(err, "could not get hash tree root")
}
if err := applyExecutionPayloadStateMutations(ctx, st, envelope.ExecutionRequests(), envelope.BlockHash()); err != nil {
return err
if r != envelope.StateRoot() {
return fmt.Errorf("state root mismatch: expected %#x, got %#x", envelope.StateRoot(), r)
}
return verifyPostStateRoot(ctx, st, envelope)
return nil
}
// ProcessExecutionPayloadWithDeferredSig is the init-sync entry point: extract the
// signature for deferred verification, validate consistency, apply state
// mutations, and verify the post-payload state root. The caller provides the
// previousStateRoot to avoid recomputing it.
func ProcessExecutionPayloadWithDeferredSig(
ctx context.Context,
st state.BeaconState,
previousStateRoot [32]byte,
signedEnvelope interfaces.ROSignedExecutionPayloadEnvelope,
) (*bls.SignatureBatch, error) {
sigBatch, err := ExecutionPayloadEnvelopeSignatureBatch(st, signedEnvelope)
if err != nil {
return nil, errors.Wrap(err, "could not extract envelope signature batch")
}
envelope, err := signedEnvelope.Envelope()
if err != nil {
return nil, errors.Wrap(err, "could not get envelope from signed envelope")
}
if err := setLatestBlockHeaderStateRoot(st, previousStateRoot); err != nil {
return nil, errors.Wrap(err, "could not set latest block header state root")
}
if err := validatePayloadConsistency(st, envelope); err != nil {
return nil, err
}
if err := applyExecutionPayloadStateMutations(ctx, st, envelope.ExecutionRequests(), envelope.BlockHash()); err != nil {
return nil, err
}
if err := verifyPostStateRoot(ctx, st, envelope); err != nil {
return nil, err
}
return sigBatch, nil
}
// ProcessBlindedExecutionPayload is the replay/stategen entry
// point: patch the block header, do minimal bid consistency checks, and apply
// state mutations. No payload data is available — only the blinded envelope.
// A nil envelope is a no-op (the payload was not delivered for that slot).
func ProcessBlindedExecutionPayload(
ctx context.Context,
st state.BeaconState,
previousStateRoot [32]byte,
envelope interfaces.ROBlindedExecutionPayloadEnvelope,
) error {
if envelope == nil {
return nil
}
if err := setLatestBlockHeaderStateRoot(st, previousStateRoot); err != nil {
return errors.Wrap(err, "could not set latest block header state root")
}
if envelope.Slot() != st.Slot() {
return errors.Errorf("blinded envelope slot does not match state slot: envelope=%d, state=%d", envelope.Slot(), st.Slot())
}
latestBid, err := st.LatestExecutionPayloadBid()
if err != nil {
return errors.Wrap(err, "could not get latest execution payload bid")
}
if latestBid == nil {
return errors.New("latest execution payload bid is nil")
}
if envelope.BuilderIndex() != latestBid.BuilderIndex() {
return errors.Errorf(
"blinded envelope builder index does not match committed bid builder index: envelope=%d, bid=%d",
envelope.BuilderIndex(),
latestBid.BuilderIndex(),
)
}
bidBlockHash := latestBid.BlockHash()
envelopeBlockHash := envelope.BlockHash()
if bidBlockHash != envelopeBlockHash {
return errors.Errorf(
"blinded envelope block hash does not match committed bid block hash: envelope=%#x, bid=%#x",
envelopeBlockHash,
bidBlockHash,
)
}
return applyExecutionPayloadStateMutations(ctx, st, envelope.ExecutionRequests(), envelopeBlockHash)
}
// ApplyExecutionPayload patches the block header state root, validates
// consistency, and applies state mutations. No signature or post-state-root
// verification is performed. Used by the proposer path to compute the
// post-payload state root for the envelope.
// ApplyExecutionPayload applies the execution payload envelope to the state and performs the same
// consistency checks as the full processing path. This keeps the post-payload state root computation
// on a shared code path, even though some bid/payload checks are not strictly required for the root itself.
func ApplyExecutionPayload(
ctx context.Context,
st state.BeaconState,
envelope interfaces.ROExecutionPayloadEnvelope,
) error {
if err := cacheLatestBlockHeaderStateRoot(ctx, st); err != nil {
return err
}
if err := validatePayloadConsistency(st, envelope); err != nil {
return err
}
return applyExecutionPayloadStateMutations(ctx, st, envelope.ExecutionRequests(), envelope.BlockHash())
}
func setLatestBlockHeaderStateRoot(st state.BeaconState, root [32]byte) error {
latestHeader := st.LatestBlockHeader()
latestHeader.StateRoot = root[:]
return st.SetLatestBlockHeader(latestHeader)
}
// cacheLatestBlockHeaderStateRoot fills in the state root on the latest block
// header if it hasn't been set yet (the spec's "cache latest block header
// state root" step).
func cacheLatestBlockHeaderStateRoot(ctx context.Context, st state.BeaconState) error {
latestHeader := st.LatestBlockHeader()
if len(latestHeader.StateRoot) == 0 || bytes.Equal(latestHeader.StateRoot, make([]byte, 32)) {
previousStateRoot, err := st.HashTreeRoot(ctx)
@@ -255,13 +151,7 @@ func cacheLatestBlockHeaderStateRoot(ctx context.Context, st state.BeaconState)
return errors.Wrap(err, "could not set latest block header")
}
}
return nil
}
// validatePayloadConsistency checks that the envelope and payload are consistent
// with the beacon block header, the committed bid, and the current state.
func validatePayloadConsistency(st state.BeaconState, envelope interfaces.ROExecutionPayloadEnvelope) error {
latestHeader := st.LatestBlockHeader()
blockHeaderRoot, err := latestHeader.HashTreeRoot()
if err != nil {
return errors.Wrap(err, "could not compute block header root")
@@ -300,6 +190,7 @@ func validatePayloadConsistency(st state.BeaconState, envelope interfaces.ROExec
if err != nil {
return errors.Wrap(err, "could not get withdrawals from payload")
}
ok, err := st.WithdrawalsMatchPayloadExpected(withdrawals)
if err != nil {
return errors.Wrap(err, "could not validate payload withdrawals")
@@ -334,26 +225,14 @@ func validatePayloadConsistency(st state.BeaconState, envelope interfaces.ROExec
return errors.Errorf("payload timestamp does not match expected timestamp: payload=%d, expected=%d", payload.Timestamp(), uint64(t.Unix()))
}
if err := ApplyExecutionPayloadStateMutations(ctx, st, envelope.ExecutionRequests(), [32]byte(payload.BlockHash())); err != nil {
return err
}
return nil
}
// verifyPostStateRoot checks that the post-payload state root matches the
// envelope's declared state root.
func verifyPostStateRoot(ctx context.Context, st state.BeaconState, envelope interfaces.ROExecutionPayloadEnvelope) error {
r, err := st.HashTreeRoot(ctx)
if err != nil {
return errors.Wrap(err, "could not compute post-envelope state root")
}
if r != envelope.StateRoot() {
return fmt.Errorf("state root mismatch: expected %#x, got %#x", envelope.StateRoot(), r)
}
return nil
}
// applyExecutionPayloadStateMutations applies the state-changing operations
// from an execution payload: process execution requests, queue builder payment,
// set execution payload availability, and update the latest block hash.
func applyExecutionPayloadStateMutations(
func ApplyExecutionPayloadStateMutations(
ctx context.Context,
st state.BeaconState,
executionRequests *enginev1.ExecutionRequests,
@@ -378,107 +257,6 @@ func applyExecutionPayloadStateMutations(
return nil
}
// ExecutionPayloadEnvelopeSignatureBatch extracts the BLS signature from a signed execution payload
// envelope as a SignatureBatch for deferred batch verification.
func ExecutionPayloadEnvelopeSignatureBatch(
st state.BeaconState,
signedEnvelope interfaces.ROSignedExecutionPayloadEnvelope,
) (*bls.SignatureBatch, error) {
envelope, err := signedEnvelope.Envelope()
if err != nil {
return nil, fmt.Errorf("failed to get envelope: %w", err)
}
builderIdx := envelope.BuilderIndex()
publicKey, err := envelopePublicKey(st, builderIdx)
if err != nil {
return nil, err
}
currentEpoch := slots.ToEpoch(envelope.Slot())
domain, err := signing.Domain(
st.Fork(),
currentEpoch,
params.BeaconConfig().DomainBeaconBuilder,
st.GenesisValidatorsRoot(),
)
if err != nil {
return nil, fmt.Errorf("failed to compute signing domain: %w", err)
}
signingRoot, err := signedEnvelope.SigningRoot(domain)
if err != nil {
return nil, fmt.Errorf("failed to compute signing root: %w", err)
}
signatureBytes := signedEnvelope.Signature()
return &bls.SignatureBatch{
Signatures: [][]byte{signatureBytes[:]},
PublicKeys: []bls.PublicKey{publicKey},
Messages: [][32]byte{signingRoot},
Descriptions: []string{"execution payload envelope signature"},
}, nil
}
// verifyExecutionPayloadEnvelopeSignature verifies the BLS signature on a signed execution payload envelope.
//
// <spec fn="verify_execution_payload_envelope_signature" fork="gloas" style="full" hash="49483ae2">
// def verify_execution_payload_envelope_signature(
// state: BeaconState, signed_envelope: SignedExecutionPayloadEnvelope
// ) -> bool:
// builder_index = signed_envelope.message.builder_index
// if builder_index == BUILDER_INDEX_SELF_BUILD:
// validator_index = state.latest_block_header.proposer_index
// pubkey = state.validators[validator_index].pubkey
// else:
// pubkey = state.builders[builder_index].pubkey
//
// signing_root = compute_signing_root(
// signed_envelope.message, get_domain(state, DOMAIN_BEACON_BUILDER)
// )
// return bls.Verify(pubkey, signing_root, signed_envelope.signature)
// </spec>
func verifyExecutionPayloadEnvelopeSignature(st state.BeaconState, signedEnvelope interfaces.ROSignedExecutionPayloadEnvelope) error {
envelope, err := signedEnvelope.Envelope()
if err != nil {
return fmt.Errorf("failed to get envelope: %w", err)
}
builderIdx := envelope.BuilderIndex()
publicKey, err := envelopePublicKey(st, builderIdx)
if err != nil {
return err
}
signatureBytes := signedEnvelope.Signature()
signature, err := bls.SignatureFromBytes(signatureBytes[:])
if err != nil {
return fmt.Errorf("invalid signature format: %w", err)
}
currentEpoch := slots.ToEpoch(envelope.Slot())
domain, err := signing.Domain(
st.Fork(),
currentEpoch,
params.BeaconConfig().DomainBeaconBuilder,
st.GenesisValidatorsRoot(),
)
if err != nil {
return fmt.Errorf("failed to compute signing domain: %w", err)
}
signingRoot, err := signedEnvelope.SigningRoot(domain)
if err != nil {
return fmt.Errorf("failed to compute signing root: %w", err)
}
if !signature.Verify(publicKey, signingRoot[:]) {
return fmt.Errorf("signature verification failed: %w", signing.ErrSigFailedToVerify)
}
return nil
}
func envelopePublicKey(st state.BeaconState, builderIdx primitives.BuilderIndex) (bls.PublicKey, error) {
if builderIdx == params.BeaconConfig().BuilderIndexSelfBuild {
return proposerPublicKey(st)
@@ -515,6 +293,10 @@ func builderPublicKey(st state.BeaconState, builderIdx primitives.BuilderIndex)
}
// processExecutionRequests processes deposits, withdrawals, and consolidations from execution requests.
// Spec v1.7.0-alpha.0 (pseudocode):
// for op in requests.deposits: process_deposit_request(state, op)
// for op in requests.withdrawals: process_withdrawal_request(state, op)
// for op in requests.consolidations: process_consolidation_request(state, op)
func processExecutionRequests(ctx context.Context, st state.BeaconState, rqs *enginev1.ExecutionRequests) error {
if err := processDepositRequests(ctx, st, rqs.Deposits); err != nil {
return errors.Wrap(err, "could not process deposit requests")
@@ -531,3 +313,65 @@ func processExecutionRequests(ctx context.Context, st state.BeaconState, rqs *en
}
return nil
}
// VerifyExecutionPayloadEnvelopeSignature verifies the BLS signature on a signed execution payload envelope.
// <spec fn="verify_execution_payload_envelope_signature" fork="gloas" style="full" hash="49483ae2">
// def verify_execution_payload_envelope_signature(
//
// state: BeaconState, signed_envelope: SignedExecutionPayloadEnvelope
//
// ) -> bool:
//
// builder_index = signed_envelope.message.builder_index
// if builder_index == BUILDER_INDEX_SELF_BUILD:
// validator_index = state.latest_block_header.proposer_index
// pubkey = state.validators[validator_index].pubkey
// else:
// pubkey = state.builders[builder_index].pubkey
//
// signing_root = compute_signing_root(
// signed_envelope.message, get_domain(state, DOMAIN_BEACON_BUILDER)
// )
// return bls.Verify(pubkey, signing_root, signed_envelope.signature)
//
// </spec>
func VerifyExecutionPayloadEnvelopeSignature(st state.BeaconState, signedEnvelope interfaces.ROSignedExecutionPayloadEnvelope) error {
envelope, err := signedEnvelope.Envelope()
if err != nil {
return fmt.Errorf("failed to get envelope: %w", err)
}
builderIdx := envelope.BuilderIndex()
publicKey, err := envelopePublicKey(st, builderIdx)
if err != nil {
return err
}
signatureBytes := signedEnvelope.Signature()
signature, err := bls.SignatureFromBytes(signatureBytes[:])
if err != nil {
return fmt.Errorf("invalid signature format: %w", err)
}
currentEpoch := slots.ToEpoch(envelope.Slot())
domain, err := signing.Domain(
st.Fork(),
currentEpoch,
params.BeaconConfig().DomainBeaconBuilder,
st.GenesisValidatorsRoot(),
)
if err != nil {
return fmt.Errorf("failed to compute signing domain: %w", err)
}
signingRoot, err := signedEnvelope.SigningRoot(domain)
if err != nil {
return fmt.Errorf("failed to compute signing root: %w", err)
}
if !signature.Verify(publicKey, signingRoot[:]) {
return fmt.Errorf("signature verification failed: %w", signing.ErrSigFailedToVerify)
}
return nil
}

View File

@@ -4,7 +4,6 @@ import (
"bytes"
"context"
"encoding/binary"
stderrors "errors"
"fmt"
"slices"
@@ -19,14 +18,11 @@ import (
"github.com/OffchainLabs/prysm/v7/crypto/bls"
"github.com/OffchainLabs/prysm/v7/crypto/hash"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/time/slots"
"github.com/pkg/errors"
)
var ErrValidatorNotInPTC = stderrors.New("validator not in PTC")
// ProcessPayloadAttestations validates payload attestations in a block body.
//
// <spec fn="process_payload_attestation" fork="gloas" hash="f46bf0b0">
@@ -81,7 +77,7 @@ func ProcessPayloadAttestations(ctx context.Context, st state.BeaconState, body
// indexedPayloadAttestation converts a payload attestation into its indexed form.
func indexedPayloadAttestation(ctx context.Context, st state.ReadOnlyBeaconState, att *eth.PayloadAttestation) (*consensus_types.IndexedPayloadAttestation, error) {
committee, err := st.PayloadCommitteeReadOnly(att.Data.Slot)
committee, err := PayloadCommittee(ctx, st, att.Data.Slot)
if err != nil {
return nil, err
}
@@ -100,10 +96,10 @@ func indexedPayloadAttestation(ctx context.Context, st state.ReadOnlyBeaconState
}, nil
}
// computePTC computes the payload timeliness committee for a given slot.
// PayloadCommittee returns the payload timeliness committee for a given slot for the state.
//
// <spec fn="compute_ptc" fork="gloas" hash="0f323552">
// def compute_ptc(state: BeaconState, slot: Slot) -> Vector[ValidatorIndex, PTC_SIZE]:
// <spec fn="get_ptc" fork="gloas" hash="ae15f761">
// def get_ptc(state: BeaconState, slot: Slot) -> Vector[ValidatorIndex, PTC_SIZE]:
// """
// Get the payload timeliness committee for the given ``slot``.
// """
@@ -119,7 +115,7 @@ func indexedPayloadAttestation(ctx context.Context, st state.ReadOnlyBeaconState
// state, indices, seed, size=PTC_SIZE, shuffle_indices=False
// )
// </spec>
func computePTC(ctx context.Context, st state.ReadOnlyBeaconState, slot primitives.Slot) ([]primitives.ValidatorIndex, error) {
func PayloadCommittee(ctx context.Context, st state.ReadOnlyBeaconState, slot primitives.Slot) ([]primitives.ValidatorIndex, error) {
epoch := slots.ToEpoch(slot)
seed, err := ptcSeed(st, epoch, slot)
if err != nil {
@@ -160,24 +156,6 @@ func computePTC(ctx context.Context, st state.ReadOnlyBeaconState, slot primitiv
return selected, nil
}
// PayloadCommitteeIndex returns the validator's index position in the payload committee for a slot.
func PayloadCommitteeIndex(
ctx context.Context,
st state.ReadOnlyBeaconState,
slot primitives.Slot,
validatorIndex primitives.ValidatorIndex,
) (uint64, error) {
ptc, err := st.PayloadCommitteeReadOnly(slot)
if err != nil {
return 0, err
}
idx := slices.Index(ptc, validatorIndex)
if idx == -1 {
return 0, fmt.Errorf("%w: validator=%d slot=%d", ErrValidatorNotInPTC, validatorIndex, slot)
}
return uint64(idx), nil
}
// ptcSeed computes the seed for the payload timeliness committee.
func ptcSeed(st state.ReadOnlyBeaconState, epoch primitives.Epoch, slot primitives.Slot) ([32]byte, error) {
seed, err := helpers.Seed(st, epoch, params.BeaconConfig().DomainPTCAttester)
@@ -276,12 +254,12 @@ func acceptByBalance(st state.ReadOnlyBeaconState, idx primitives.ValidatorIndex
offset := (round % 16) * 2
randomValue := uint64(binary.LittleEndian.Uint16(random[offset : offset+2])) // 16-bit draw per spec
val, err := st.ValidatorAtIndexReadOnly(idx)
val, err := st.ValidatorAtIndex(idx)
if err != nil {
return false, errors.Wrapf(err, "validator %d", idx)
}
return val.EffectiveBalance()*fieldparams.MaxRandomValueElectra >= maxBalance*randomValue, nil
return val.EffectiveBalance*fieldparams.MaxRandomValueElectra >= maxBalance*randomValue, nil
}
// validIndexedPayloadAttestation verifies the signature of an indexed payload attestation.
@@ -343,43 +321,3 @@ func validIndexedPayloadAttestation(st state.ReadOnlyBeaconState, att *consensus
}
return nil
}
// ProcessPTCWindow rotates the cached PTC window at epoch boundaries by computing
// PTC assignments for the new lookahead epoch and shifting the window.
//
// <spec fn="process_ptc_window" fork="gloas" hash="7be3d509">
// def process_ptc_window(state: BeaconState) -> None:
// """
// Update the cached PTC window.
// """
// # Shift all epochs forward by one
// state.ptc_window[: len(state.ptc_window) - SLOTS_PER_EPOCH] = state.ptc_window[SLOTS_PER_EPOCH:]
// # Fill in the last epoch
// next_epoch = Epoch(get_current_epoch(state) + MIN_SEED_LOOKAHEAD + 1)
// start_slot = compute_start_slot_at_epoch(next_epoch)
// state.ptc_window[len(state.ptc_window) - SLOTS_PER_EPOCH :] = [
// compute_ptc(state, Slot(slot)) for slot in range(start_slot, start_slot + SLOTS_PER_EPOCH)
// ]
// </spec>
func ProcessPTCWindow(ctx context.Context, st state.BeaconState) error {
_, span := trace.StartSpan(ctx, "gloas.ProcessPTCWindow")
defer span.End()
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
lastEpoch := slots.ToEpoch(st.Slot()) + params.BeaconConfig().MinSeedLookahead + 1
startSlot, err := slots.EpochStart(lastEpoch)
if err != nil {
return err
}
newSlots := make([]*eth.PTCs, slotsPerEpoch)
for i := range slotsPerEpoch {
ptc, err := computePTC(ctx, st, startSlot+primitives.Slot(i))
if err != nil {
return err
}
newSlots[i] = &eth.PTCs{ValidatorIndices: ptc}
}
return st.RotatePTCWindow(newSlots)
}

View File

@@ -2,14 +2,13 @@ package gloas_test
import (
"bytes"
"slices"
"testing"
"github.com/OffchainLabs/go-bitfield"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
@@ -120,6 +119,7 @@ func TestProcessPayloadAttestations_EmptyAggregationBits(t *testing.T) {
}
func TestProcessPayloadAttestations_HappyPath(t *testing.T) {
helpers.ClearCache()
setupTestConfig(t)
sk1, pk1 := newKey(t)
@@ -150,6 +150,7 @@ func TestProcessPayloadAttestations_HappyPath(t *testing.T) {
}
func TestProcessPayloadAttestations_MultipleAttestations(t *testing.T) {
helpers.ClearCache()
setupTestConfig(t)
sk1, pk1 := newKey(t)
@@ -215,25 +216,6 @@ func TestProcessPayloadAttestations_IndexedVerificationError(t *testing.T) {
}
func newTestState(t *testing.T, vals []*eth.Validator, slot primitives.Slot) state.BeaconState {
t.Helper()
st, err := testutil.NewBeaconStateGloas(func(seed *eth.BeaconStateGloas) error {
seed.Slot = slot
seed.Validators = vals
seed.Balances = make([]uint64, len(vals))
for i, v := range vals {
seed.Balances[i] = v.EffectiveBalance
}
seed.PtcWindow = deterministicPTCWindow(len(vals))
return nil
})
require.NoError(t, err)
return st
}
func newPhase0TestState(t *testing.T, vals []*eth.Validator, slot primitives.Slot) state.BeaconState {
t.Helper()
st, err := testutil.NewBeaconState()
require.NoError(t, err)
for _, v := range vals {
@@ -241,25 +223,10 @@ func newPhase0TestState(t *testing.T, vals []*eth.Validator, slot primitives.Slo
require.NoError(t, st.AppendBalance(v.EffectiveBalance))
}
require.NoError(t, st.SetSlot(slot))
require.NoError(t, helpers.UpdateCommitteeCache(t.Context(), st, slots.ToEpoch(slot)))
return st
}
func deterministicPTCWindow(validatorCount int) []*eth.PTCs {
window := make([]*eth.PTCs, 3*params.BeaconConfig().SlotsPerEpoch)
indices := make([]primitives.ValidatorIndex, fieldparams.PTCSize)
if validatorCount > 0 {
for i := range indices {
indices[i] = primitives.ValidatorIndex(i % validatorCount)
}
}
for i := range window {
window[i] = &eth.PTCs{
ValidatorIndices: slices.Clone(indices),
}
}
return window
}
func setupTestConfig(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
@@ -324,50 +291,6 @@ func signAttestation(t *testing.T, st state.ReadOnlyBeaconState, data *eth.Paylo
return agg.Marshal()
}
func TestProcessPTCWindow(t *testing.T) {
fuluSt, _ := testutil.DeterministicGenesisStateFulu(t, 256)
st, err := gloas.UpgradeToGloas(fuluSt)
require.NoError(t, err)
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
// Get original window.
origWindow, err := st.PTCWindow()
require.NoError(t, err)
windowSize := int(slotsPerEpoch.Mul(uint64(2 + params.BeaconConfig().MinSeedLookahead)))
require.Equal(t, windowSize, len(origWindow))
// Advance state to next epoch boundary so process_ptc_window sees a new epoch.
require.NoError(t, st.SetSlot(slotsPerEpoch))
// Process PTC window — should rotate.
require.NoError(t, gloas.ProcessPTCWindow(t.Context(), st))
newWindow, err := st.PTCWindow()
require.NoError(t, err)
require.Equal(t, windowSize, len(newWindow))
// The first two epochs should be the old epochs 1 and 2 (shifted left by one epoch).
for i := range 2 * slotsPerEpoch {
require.DeepEqual(t, origWindow[slotsPerEpoch+i], newWindow[i])
}
// The last epoch should be freshly computed — not all zeros.
lastStart := 2 * slotsPerEpoch
for i := range slotsPerEpoch {
ptcSlot := newWindow[lastStart+i]
require.NotNil(t, ptcSlot)
nonZero := false
for _, idx := range ptcSlot.ValidatorIndices {
if idx != 0 {
nonZero = true
break
}
}
require.Equal(t, true, nonZero, "last epoch slot %d should have non-zero validator indices", i)
}
}
type validatorLookupErrState struct {
state.BeaconState
errIndex primitives.ValidatorIndex

View File

@@ -242,74 +242,13 @@ func TestProcessExecutionPayload_Success(t *testing.T) {
require.Equal(t, primitives.Gwei(0), payment.Withdrawal.Amount)
}
func TestProcessExecutionPayloadWithDeferredSig_Success(t *testing.T) {
fixture := buildPayloadFixture(t, nil)
header := fixture.state.LatestBlockHeader()
var previousStateRoot [32]byte
copy(previousStateRoot[:], header.StateRoot)
sigBatch, err := ProcessExecutionPayloadWithDeferredSig(t.Context(), fixture.state, previousStateRoot, fixture.signed)
require.NoError(t, err)
require.NotNil(t, sigBatch)
require.Equal(t, 1, len(sigBatch.Signatures))
require.Equal(t, 1, len(sigBatch.PublicKeys))
require.Equal(t, 1, len(sigBatch.Messages))
require.Equal(t, 1, len(sigBatch.Descriptions))
require.Equal(t, "execution payload envelope signature", sigBatch.Descriptions[0])
valid, err := sigBatch.Verify()
require.NoError(t, err)
require.Equal(t, true, valid)
latestHash, err := fixture.state.LatestBlockHash()
require.NoError(t, err)
var expectedHash [32]byte
copy(expectedHash[:], fixture.payload.BlockHash)
require.Equal(t, expectedHash, latestHash)
available, err := fixture.state.ExecutionPayloadAvailability(fixture.slot)
require.NoError(t, err)
require.Equal(t, uint64(1), available)
updatedHeader := fixture.state.LatestBlockHeader()
require.DeepEqual(t, previousStateRoot[:], updatedHeader.StateRoot)
}
func TestProcessExecutionPayloadWithDeferredSig_PreviousStateRootMismatch(t *testing.T) {
fixture := buildPayloadFixture(t, nil)
previousStateRoot := [32]byte{0x42}
_, err := ProcessExecutionPayloadWithDeferredSig(t.Context(), fixture.state, previousStateRoot, fixture.signed)
require.ErrorContains(t, "envelope beacon block root does not match state latest block header root", err)
}
func TestApplyExecutionPayload_Success(t *testing.T) {
fixture := buildPayloadFixture(t, nil)
envelope, err := fixture.signed.Envelope()
require.NoError(t, err)
require.NoError(t, ApplyExecutionPayload(t.Context(), fixture.state, envelope))
latestHash, err := fixture.state.LatestBlockHash()
require.NoError(t, err)
var expectedHash [32]byte
copy(expectedHash[:], fixture.payload.BlockHash)
require.Equal(t, expectedHash, latestHash)
available, err := fixture.state.ExecutionPayloadAvailability(fixture.slot)
require.NoError(t, err)
require.Equal(t, uint64(1), available)
}
func TestApplyExecutionPayloadStateMutations_UpdatesAvailabilityAndLatestHash(t *testing.T) {
fixture := buildPayloadFixture(t, nil)
newHash := [32]byte{}
newHash[0] = 0x99
require.NoError(t, applyExecutionPayloadStateMutations(t.Context(), fixture.state, fixture.envelope.ExecutionRequests, newHash))
require.NoError(t, ApplyExecutionPayloadStateMutations(t.Context(), fixture.state, fixture.envelope.ExecutionRequests, newHash))
latestHash, err := fixture.state.LatestBlockHash()
require.NoError(t, err)
@@ -343,95 +282,6 @@ func TestQueueBuilderPayment_ZeroAmountClearsSlot(t *testing.T) {
require.Equal(t, primitives.Gwei(0), payment.Withdrawal.Amount)
}
func TestProcessBlindedExecutionPayload_NilEnvelope(t *testing.T) {
fixture := buildPayloadFixture(t, nil)
require.NoError(t, ProcessBlindedExecutionPayload(t.Context(), fixture.state, [32]byte{}, nil))
}
func TestProcessBlindedExecutionPayload_Success(t *testing.T) {
fixture := buildPayloadFixture(t, nil)
st := fixture.state
blockHash := [32]byte(fixture.payload.BlockHash)
stateRoot := [32]byte{0xAA}
envelope := &ethpb.SignedBlindedExecutionPayloadEnvelope{
Message: &ethpb.BlindedExecutionPayloadEnvelope{
Slot: fixture.slot,
BuilderIndex: fixture.envelope.BuilderIndex,
BlockHash: blockHash[:],
BeaconBlockRoot: make([]byte, 32),
ExecutionRequests: fixture.envelope.ExecutionRequests,
},
}
wrappedEnv, err := blocks.WrappedROBlindedExecutionPayloadEnvelope(envelope.Message)
require.NoError(t, err)
require.NoError(t, ProcessBlindedExecutionPayload(t.Context(), st, stateRoot, wrappedEnv))
latestHash, err := st.LatestBlockHash()
require.NoError(t, err)
require.Equal(t, blockHash, latestHash)
available, err := st.ExecutionPayloadAvailability(fixture.slot)
require.NoError(t, err)
require.Equal(t, uint64(1), available)
header := st.LatestBlockHeader()
require.DeepEqual(t, stateRoot[:], header.StateRoot)
}
func TestProcessBlindedExecutionPayload_SlotMismatch(t *testing.T) {
fixture := buildPayloadFixture(t, nil)
envelope := &ethpb.SignedBlindedExecutionPayloadEnvelope{
Message: &ethpb.BlindedExecutionPayloadEnvelope{
Slot: fixture.slot + 1,
BlockHash: make([]byte, 32),
BeaconBlockRoot: make([]byte, 32),
},
}
wrappedEnv, err := blocks.WrappedROBlindedExecutionPayloadEnvelope(envelope.Message)
require.NoError(t, err)
err = ProcessBlindedExecutionPayload(t.Context(), fixture.state, [32]byte{}, wrappedEnv)
require.ErrorContains(t, "blinded envelope slot does not match state slot", err)
}
func TestProcessBlindedExecutionPayload_BuilderIndexMismatch(t *testing.T) {
fixture := buildPayloadFixture(t, nil)
blockHash := [32]byte(fixture.payload.BlockHash)
envelope := &ethpb.SignedBlindedExecutionPayloadEnvelope{
Message: &ethpb.BlindedExecutionPayloadEnvelope{
Slot: fixture.slot,
BuilderIndex: 999,
BlockHash: blockHash[:],
BeaconBlockRoot: make([]byte, 32),
},
}
wrappedEnv, err := blocks.WrappedROBlindedExecutionPayloadEnvelope(envelope.Message)
require.NoError(t, err)
err = ProcessBlindedExecutionPayload(t.Context(), fixture.state, [32]byte{}, wrappedEnv)
require.ErrorContains(t, "builder index does not match", err)
}
func TestProcessBlindedExecutionPayload_BlockHashMismatch(t *testing.T) {
fixture := buildPayloadFixture(t, nil)
wrongHash := bytes.Repeat([]byte{0xFF}, 32)
envelope := &ethpb.SignedBlindedExecutionPayloadEnvelope{
Message: &ethpb.BlindedExecutionPayloadEnvelope{
Slot: fixture.slot,
BuilderIndex: fixture.envelope.BuilderIndex,
BlockHash: wrongHash,
BeaconBlockRoot: make([]byte, 32),
},
}
wrappedEnv, err := blocks.WrappedROBlindedExecutionPayloadEnvelope(envelope.Message)
require.NoError(t, err)
err = ProcessBlindedExecutionPayload(t.Context(), fixture.state, [32]byte{}, wrappedEnv)
require.ErrorContains(t, "block hash does not match", err)
}
func TestVerifyExecutionPayloadEnvelopeSignature(t *testing.T) {
fixture := buildPayloadFixture(t, nil)
@@ -464,14 +314,14 @@ func TestVerifyExecutionPayloadEnvelopeSignature(t *testing.T) {
signed, err := blocks.WrappedROSignedExecutionPayloadEnvelope(signedProto)
require.NoError(t, err)
require.NoError(t, verifyExecutionPayloadEnvelopeSignature(st, signed))
require.NoError(t, VerifyExecutionPayloadEnvelopeSignature(st, signed))
})
t.Run("builder", func(t *testing.T) {
signed, err := blocks.WrappedROSignedExecutionPayloadEnvelope(fixture.signedProto)
require.NoError(t, err)
require.NoError(t, verifyExecutionPayloadEnvelopeSignature(fixture.state, signed))
require.NoError(t, VerifyExecutionPayloadEnvelopeSignature(fixture.state, signed))
})
t.Run("invalid signature", func(t *testing.T) {
@@ -497,7 +347,7 @@ func TestVerifyExecutionPayloadEnvelopeSignature(t *testing.T) {
badSigned, err := blocks.WrappedROSignedExecutionPayloadEnvelope(signedProto)
require.NoError(t, err)
err = verifyExecutionPayloadEnvelopeSignature(st, badSigned)
err = VerifyExecutionPayloadEnvelopeSignature(st, badSigned)
require.ErrorContains(t, "invalid signature format", err)
})
@@ -509,7 +359,7 @@ func TestVerifyExecutionPayloadEnvelopeSignature(t *testing.T) {
badSigned, err := blocks.WrappedROSignedExecutionPayloadEnvelope(signedProto)
require.NoError(t, err)
err = verifyExecutionPayloadEnvelopeSignature(fixture.state, badSigned)
err = VerifyExecutionPayloadEnvelopeSignature(fixture.state, badSigned)
require.ErrorContains(t, "invalid signature format", err)
})
})

View File

@@ -52,7 +52,6 @@ func ProcessBuilderPendingPayments(state state.BeaconState) error {
if err := state.RotateBuilderPendingPayments(); err != nil {
return errors.Wrap(err, "could not rotate builder pending payments")
}
builderPendingPaymentsProcessedTotal.Add(float64(len(withdrawals)))
return nil
}

View File

@@ -1,8 +1,6 @@
package gloas
import (
"context"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
@@ -11,13 +9,12 @@ import (
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/time/slots"
"github.com/pkg/errors"
)
// UpgradeToGloas updates inputs a generic state to return the version Gloas state.
//
// <spec fn="upgrade_to_gloas" fork="gloas" hash="8f67112c">
// <spec fn="upgrade_to_gloas" fork="gloas" hash="6e66df25">
// def upgrade_to_gloas(pre: fulu.BeaconState) -> BeaconState:
// epoch = fulu.get_current_epoch(pre)
//
@@ -84,8 +81,6 @@ import (
// latest_block_hash=pre.latest_execution_payload_header.block_hash,
// # [New in Gloas:EIP7732]
// payload_expected_withdrawals=[],
// # [New in Gloas:EIP7732]
// ptc_window=initialize_ptc_window(pre),
// )
//
// # [New in Gloas:EIP7732]
@@ -148,73 +143,12 @@ func UpgradeToGloas(beaconState state.BeaconState) (state.BeaconState, error) {
if err != nil {
return nil, errors.Wrap(err, "could not convert to gloas")
}
ptcWindow, err := initializePTCWindow(context.Background(), s)
if err != nil {
return nil, errors.Wrap(err, "failed to initialize ptc window")
}
if err := s.SetPTCWindow(ptcWindow); err != nil {
return nil, errors.Wrap(err, "failed to set ptc window")
}
if err := s.OnboardBuildersFromPendingDeposits(); err != nil {
return nil, errors.Wrap(err, "failed to onboard builders from pending deposits")
}
return s, nil
}
// initializePTCWindow builds the initial PTC window for the Gloas fork upgrade.
//
// <spec fn="initialize_ptc_window" fork="gloas" hash="3764b7f5">
// def initialize_ptc_window(
// state: BeaconState,
// ) -> Vector[Vector[ValidatorIndex, PTC_SIZE], (2 + MIN_SEED_LOOKAHEAD) * SLOTS_PER_EPOCH]:
// """
// Return the cached PTC window starting from the current epoch.
// Used to initialize the ``ptc_window`` field in the beacon state at genesis and after forks.
// """
// empty_previous_epoch = [
// Vector[ValidatorIndex, PTC_SIZE]([ValidatorIndex(0) for _ in range(PTC_SIZE)])
// for _ in range(SLOTS_PER_EPOCH)
// ]
//
// ptcs = []
// current_epoch = get_current_epoch(state)
// for e in range(1 + MIN_SEED_LOOKAHEAD):
// epoch = Epoch(current_epoch + e)
// start_slot = compute_start_slot_at_epoch(epoch)
// ptcs += [compute_ptc(state, Slot(start_slot + i)) for i in range(SLOTS_PER_EPOCH)]
//
// return empty_previous_epoch + ptcs
// </spec>
func initializePTCWindow(ctx context.Context, st state.ReadOnlyBeaconState) ([]*ethpb.PTCs, error) {
currentEpoch := slots.ToEpoch(st.Slot())
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
windowSize := slotsPerEpoch.Mul(uint64(2 + params.BeaconConfig().MinSeedLookahead))
window := make([]*ethpb.PTCs, 0, windowSize)
// Previous epoch has no cached data at fork time — fill with empty slots.
for range slotsPerEpoch {
window = append(window, &ethpb.PTCs{
ValidatorIndices: make([]primitives.ValidatorIndex, fieldparams.PTCSize),
})
}
// Compute PTC for current epoch through lookahead.
startSlot, err := slots.EpochStart(currentEpoch)
if err != nil {
return nil, err
}
totalSlots := slotsPerEpoch.Mul(uint64(1 + params.BeaconConfig().MinSeedLookahead))
for i := range totalSlots {
ptc, err := computePTC(ctx, st, startSlot+i)
if err != nil {
return nil, err
}
window = append(window, &ethpb.PTCs{ValidatorIndices: ptc})
}
return window, nil
}
func upgradeToGloas(beaconState state.BeaconState) (state.BeaconState, error) {
currentSyncCommittee, err := beaconState.CurrentSyncCommittee()
if err != nil {
@@ -292,6 +226,10 @@ func upgradeToGloas(beaconState state.BeaconState) (state.BeaconState, error) {
if err != nil {
return nil, err
}
proposerLookaheadU64 := make([]uint64, len(proposerLookahead))
for i, v := range proposerLookahead {
proposerLookaheadU64[i] = uint64(v)
}
executionPayloadAvailability := make([]byte, int((params.BeaconConfig().SlotsPerHistoricalRoot+7)/8))
for i := range executionPayloadAvailability {
@@ -355,7 +293,7 @@ func upgradeToGloas(beaconState state.BeaconState) (state.BeaconState, error) {
PendingDeposits: pendingDeposits,
PendingPartialWithdrawals: pendingPartialWithdrawals,
PendingConsolidations: pendingConsolidations,
ProposerLookahead: proposerLookahead,
ProposerLookahead: proposerLookaheadU64,
Builders: []*ethpb.Builder{},
NextWithdrawalBuilderIndex: primitives.BuilderIndex(0),
ExecutionPayloadAvailability: executionPayloadAvailability,

View File

@@ -103,7 +103,7 @@ func TestUpgradeToGloas_Basic(t *testing.T) {
}
func TestUpgradeToGloas_OnboardsBuilderDeposit(t *testing.T) {
st, _ := util.DeterministicGenesisStateFulu(t, params.BeaconConfig().MaxValidatorsPerCommittee)
st, _ := util.DeterministicGenesisStateFulu(t, 4)
sk, err := bls.RandKey()
require.NoError(t, err)

View File

@@ -658,8 +658,8 @@ func ComputeCommittee(
}
// InitializeProposerLookahead computes the list of the proposer indices for the next MIN_SEED_LOOKAHEAD + 1 epochs.
func InitializeProposerLookahead(ctx context.Context, state state.ReadOnlyBeaconState, epoch primitives.Epoch) ([]primitives.ValidatorIndex, error) {
lookAhead := make([]primitives.ValidatorIndex, 0, uint64(params.BeaconConfig().MinSeedLookahead+1)*uint64(params.BeaconConfig().SlotsPerEpoch))
func InitializeProposerLookahead(ctx context.Context, state state.ReadOnlyBeaconState, epoch primitives.Epoch) ([]uint64, error) {
lookAhead := make([]uint64, 0, uint64(params.BeaconConfig().MinSeedLookahead+1)*uint64(params.BeaconConfig().SlotsPerEpoch))
for i := range params.BeaconConfig().MinSeedLookahead + 1 {
indices, err := ActiveValidatorIndices(ctx, state, epoch+i)
if err != nil {
@@ -669,7 +669,9 @@ func InitializeProposerLookahead(ctx context.Context, state state.ReadOnlyBeacon
if err != nil {
return nil, errors.Wrap(err, "could not compute proposer indices")
}
lookAhead = append(lookAhead, proposerIndices...)
for _, proposerIndex := range proposerIndices {
lookAhead = append(lookAhead, uint64(proposerIndex))
}
}
return lookAhead, nil
}

View File

@@ -945,8 +945,13 @@ func TestInitializeProposerLookahead_RegressionTest(t *testing.T) {
endIdx := startIdx + slotsPerEpoch
actualProposers := proposerLookahead[startIdx:endIdx]
expectedUint64 := make([]uint64, len(expectedProposers))
for i, proposer := range expectedProposers {
expectedUint64[i] = uint64(proposer)
}
// This assertion would fail with the original bug:
for i, expected := range expectedProposers {
for i, expected := range expectedUint64 {
require.Equal(t, expected, actualProposers[i],
"Proposer index mismatch at slot %d in epoch %d", i, targetEpoch)
}

View File

@@ -121,7 +121,7 @@ func IsNextPeriodSyncCommittee(
// CurrentPeriodSyncSubcommitteeIndices returns the subcommittee indices of the
// current period sync committee for input validator.
func CurrentPeriodSyncSubcommitteeIndices(
st state.ReadOnlyBeaconState, valIdx primitives.ValidatorIndex,
st state.BeaconState, valIdx primitives.ValidatorIndex,
) ([]primitives.CommitteeIndex, error) {
root, err := SyncPeriodBoundaryRoot(st)
if err != nil {

View File

@@ -1165,7 +1165,7 @@ func TestBeaconProposerIndexAtSlotFulu(t *testing.T) {
cfg := params.BeaconConfig().Copy()
cfg.FuluForkEpoch = 1
params.OverrideBeaconConfig(cfg)
lookahead := make([]primitives.ValidatorIndex, 64)
lookahead := make([]uint64, 64)
lookahead[0] = 15
lookahead[1] = 16
lookahead[34] = 42

View File

@@ -26,7 +26,6 @@ go_library(
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
"@com_github_hashicorp_golang_lru//:go_default_library",

View File

@@ -33,31 +33,24 @@ func (Cgc) ENRKey() string { return params.BeaconNetworkConfig().CustodyGroupCou
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#verify_data_column_sidecar
func VerifyDataColumnSidecar(sidecar blocks.RODataColumn) error {
// The sidecar index must be within the valid range.
index := sidecar.Index()
if index >= fieldparams.NumberOfColumns {
if sidecar.Index >= fieldparams.NumberOfColumns {
return ErrIndexTooLarge
}
// A sidecar for zero blobs is invalid.
kzgCommitments, err := sidecar.KzgCommitments()
if err != nil {
return errors.Wrap(err, "kzg commitments")
}
if len(kzgCommitments) == 0 {
if len(sidecar.KzgCommitments) == 0 {
return ErrNoKzgCommitments
}
// A sidecar with more commitments than the max blob count for this block is invalid.
slot := sidecar.Slot()
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
if len(kzgCommitments) > maxBlobsPerBlock {
if len(sidecar.KzgCommitments) > maxBlobsPerBlock {
return ErrTooManyCommitments
}
// The column length must be equal to the number of commitments/proofs.
column := sidecar.Column()
kzgProofs := sidecar.KzgProofs()
if len(column) != len(kzgCommitments) || len(column) != len(kzgProofs) {
if len(sidecar.Column) != len(sidecar.KzgCommitments) || len(sidecar.Column) != len(sidecar.KzgProofs) {
return ErrMismatchLength
}
@@ -70,39 +63,12 @@ func VerifyDataColumnSidecar(sidecar blocks.RODataColumn) error {
// while we are verifying all the KZG proofs from multiple sidecars in a batch.
// This is done to improve performance since the internal KZG library is way more
// efficient when verifying in batch.
// https://github.com/ethereum/consensus-specs/blob/master/specs/gloas/p2p-interface.md#modified-verify_data_column_sidecar_kzg_proofs
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#verify_data_column_sidecar_kzg_proofs
func VerifyDataColumnsSidecarKZGProofs(sidecars []blocks.RODataColumn) error {
commitmentsBySidecar := make([][][]byte, len(sidecars))
for i := range sidecars {
c, err := sidecars[i].KzgCommitments()
if err != nil {
return errors.Wrapf(err, "sidecar %d kzg commitments", i)
}
commitmentsBySidecar[i] = c
}
return verifyDataColumnsSidecarKZGProofs(sidecars, commitmentsBySidecar)
}
// VerifyDataColumnsSidecarKZGProofsWithCommitments verifies KZG proofs using
// explicitly provided commitments instead of the sidecar's own. This is used
// by Gloas, which validates against bid.blob_kzg_commitments.
func VerifyDataColumnsSidecarKZGProofsWithCommitments(sidecars []blocks.RODataColumn, commitmentsBySidecar [][][]byte) error {
return verifyDataColumnsSidecarKZGProofs(sidecars, commitmentsBySidecar)
}
func verifyDataColumnsSidecarKZGProofs(sidecars []blocks.RODataColumn, commitmentsBySidecar [][][]byte) error {
if len(sidecars) != len(commitmentsBySidecar) {
return ErrMismatchLength
}
// Compute the total count.
count := 0
for i, sidecar := range sidecars {
column := sidecar.Column()
if len(column) != len(commitmentsBySidecar[i]) {
return ErrMismatchLength
}
count += len(column)
for _, sidecar := range sidecars {
count += len(sidecar.Column)
}
commitments := make([]kzg.Bytes48, 0, count)
@@ -110,20 +76,17 @@ func verifyDataColumnsSidecarKZGProofs(sidecars []blocks.RODataColumn, commitmen
cells := make([]kzg.Cell, 0, count)
proofs := make([]kzg.Bytes48, 0, count)
for sidecarIndex, sidecar := range sidecars {
column := sidecar.Column()
kzgProofs := sidecar.KzgProofs()
index := sidecar.Index()
for i := range column {
for _, sidecar := range sidecars {
for i := range sidecar.Column {
var (
commitment kzg.Bytes48
cell kzg.Cell
proof kzg.Bytes48
)
commitmentBytes := commitmentsBySidecar[sidecarIndex][i]
cellBytes := column[i]
proofBytes := kzgProofs[i]
commitmentBytes := sidecar.KzgCommitments[i]
cellBytes := sidecar.Column[i]
proofBytes := sidecar.KzgProofs[i]
if len(commitmentBytes) != len(commitment) ||
len(cellBytes) != len(cell) ||
@@ -136,7 +99,7 @@ func verifyDataColumnsSidecarKZGProofs(sidecars []blocks.RODataColumn, commitmen
copy(proof[:], proofBytes)
commitments = append(commitments, commitment)
indices = append(indices, index)
indices = append(indices, sidecar.Index)
cells = append(cells, cell)
proofs = append(proofs, proof)
}
@@ -158,27 +121,16 @@ func verifyDataColumnsSidecarKZGProofs(sidecars []blocks.RODataColumn, commitmen
// VerifyDataColumnSidecarInclusionProof verifies if the given KZG commitments included in the given beacon block.
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#verify_data_column_sidecar_inclusion_proof
func VerifyDataColumnSidecarInclusionProof(sidecar blocks.RODataColumn) error {
if sidecar.IsGloas() {
return nil
}
signedBlockHeader, err := sidecar.SignedBlockHeader()
if err != nil {
return errors.Wrap(err, "signed block header")
}
if signedBlockHeader == nil || signedBlockHeader.Header == nil {
if sidecar.SignedBlockHeader == nil || sidecar.SignedBlockHeader.Header == nil {
return ErrNilBlockHeader
}
root := signedBlockHeader.Header.BodyRoot
root := sidecar.SignedBlockHeader.Header.BodyRoot
if len(root) != fieldparams.RootLength {
return ErrBadRootLength
}
kzgCommitments, err := sidecar.KzgCommitments()
if err != nil {
return errors.Wrap(err, "kzg commitments")
}
leaves := blocks.LeavesFromCommitments(kzgCommitments)
leaves := blocks.LeavesFromCommitments(sidecar.KzgCommitments)
sparse, err := trie.GenerateTrieFromItems(leaves, fieldparams.LogMaxBlobCommitments)
if err != nil {
@@ -190,11 +142,7 @@ func VerifyDataColumnSidecarInclusionProof(sidecar blocks.RODataColumn) error {
return errors.Wrap(err, "hash tree root")
}
kzgInclusionProof, err := sidecar.KzgCommitmentsInclusionProof()
if err != nil {
return errors.Wrap(err, "kzg commitments inclusion proof")
}
verified := trie.VerifyMerkleProof(root, hashTreeRoot[:], kzgPosition, kzgInclusionProof)
verified := trie.VerifyMerkleProof(root, hashTreeRoot[:], kzgPosition, sidecar.KzgCommitmentsInclusionProof)
if !verified {
return ErrInvalidInclusionProof
}

View File

@@ -70,8 +70,7 @@ func TestVerifyDataColumnSidecarKZGProofs(t *testing.T) {
t.Run("size mismatch", func(t *testing.T) {
sidecars := generateRandomSidecars(t, seed, blobCount)
column := sidecars[0].Column()
column[0] = column[0][:len(column[0])-1] // Remove one byte to create size mismatch
sidecars[0].Column[0] = sidecars[0].Column[0][:len(sidecars[0].Column[0])-1] // Remove one byte to create size mismatch
err := peerdas.VerifyDataColumnsSidecarKZGProofs(sidecars)
require.ErrorIs(t, err, peerdas.ErrMismatchLength)
@@ -79,7 +78,7 @@ func TestVerifyDataColumnSidecarKZGProofs(t *testing.T) {
t.Run("invalid proof", func(t *testing.T) {
sidecars := generateRandomSidecars(t, seed, blobCount)
sidecars[0].Column()[0][0]++ // It is OK to overflow
sidecars[0].Column[0][0]++ // It is OK to overflow
err := peerdas.VerifyDataColumnsSidecarKZGProofs(sidecars)
require.ErrorIs(t, err, peerdas.ErrInvalidKZGProof)
@@ -90,12 +89,6 @@ func TestVerifyDataColumnSidecarKZGProofs(t *testing.T) {
err := peerdas.VerifyDataColumnsSidecarKZGProofs(sidecars)
require.NoError(t, err)
})
t.Run("with commitments", func(t *testing.T) {
sidecars := generateRandomSidecars(t, seed, blobCount)
err := peerdas.VerifyDataColumnsSidecarKZGProofsWithCommitments(sidecars, sidecarCommitments(t, sidecars))
require.NoError(t, err)
})
}
func Test_VerifyKZGInclusionProofColumn(t *testing.T) {
@@ -203,7 +196,7 @@ func Test_VerifyKZGInclusionProofColumn(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
roDataColumn := blocks.NewRODataColumnNoVerify(tc.dataColumnSidecar)
roDataColumn := blocks.RODataColumn{DataColumnSidecar: tc.dataColumnSidecar}
err = peerdas.VerifyDataColumnSidecarInclusionProof(roDataColumn)
if tc.expectedError == nil {
require.NoError(t, err)
@@ -215,13 +208,6 @@ func Test_VerifyKZGInclusionProofColumn(t *testing.T) {
}
}
func TestVerifyDataColumnSidecarInclusionProof_SkipsGloas(t *testing.T) {
dc := &ethpb.DataColumnSidecarGloas{Index: 0, Column: [][]byte{{0x01}}, KzgProofs: [][]byte{make([]byte, 48)}}
roCol, err := blocks.NewRODataColumnGloas(dc)
require.NoError(t, err)
require.NoError(t, peerdas.VerifyDataColumnSidecarInclusionProof(roCol))
}
func TestComputeSubnetForDataColumnSidecar(t *testing.T) {
params.SetupTestConfigCleanup(t)
config := params.BeaconConfig()
@@ -362,16 +348,6 @@ func BenchmarkVerifyDataColumnSidecarKZGProofs_DiffCommitments_Batch4(b *testing
}
}
func sidecarCommitments(t *testing.T, sidecars []blocks.RODataColumn) [][][]byte {
commitmentsBySidecar := make([][][]byte, len(sidecars))
for i := range sidecars {
var err error
commitmentsBySidecar[i], err = sidecars[i].KzgCommitments()
require.NoError(t, err)
}
return commitmentsBySidecar
}
func createTestSidecar(t *testing.T, index uint64, column, kzgCommitments, kzgProofs [][]byte) blocks.RODataColumn {
pbSignedBeaconBlock := util.NewBeaconBlockDeneb()
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(pbSignedBeaconBlock)

View File

@@ -79,9 +79,9 @@ func recoverCellsForBlobs(verifiedRoSidecars []blocks.VerifiedRODataColumn, blob
cells := make([]kzg.Cell, 0, sidecarCount)
for _, sidecar := range verifiedRoSidecars {
cell := sidecar.Column()[blobIndex]
cell := sidecar.Column[blobIndex]
cells = append(cells, kzg.Cell(cell))
cellsIndices = append(cellsIndices, sidecar.Index())
cellsIndices = append(cellsIndices, sidecar.Index)
}
recoveredCells, err := kzg.RecoverCells(cellsIndices, cells)
@@ -116,9 +116,9 @@ func recoverCellsAndProofsForBlobs(verifiedRoSidecars []blocks.VerifiedRODataCol
cells := make([]kzg.Cell, 0, sidecarCount)
for _, sidecar := range verifiedRoSidecars {
cell := sidecar.Column()[blobIndex]
cell := sidecar.Column[blobIndex]
cells = append(cells, kzg.Cell(cell))
cellsIndices = append(cellsIndices, sidecar.Index())
cellsIndices = append(cellsIndices, sidecar.Index)
}
recoveredCells, recoveredProofs, err := kzg.RecoverCellsAndKZGProofs(cellsIndices, cells)
@@ -151,10 +151,10 @@ func ReconstructDataColumnSidecars(verifiedRoSidecars []blocks.VerifiedRODataCol
referenceSidecar := verifiedRoSidecars[0]
// Check if all columns have the same length and are commmitted to the same block.
blobCount := len(referenceSidecar.Column())
blobCount := len(referenceSidecar.Column)
blockRoot := referenceSidecar.BlockRoot()
for _, sidecar := range verifiedRoSidecars[1:] {
if len(sidecar.Column()) != blobCount {
if len(sidecar.Column) != blobCount {
return nil, ErrColumnLengthsDiffer
}
@@ -171,7 +171,7 @@ func ReconstructDataColumnSidecars(verifiedRoSidecars []blocks.VerifiedRODataCol
// Sort the input sidecars by index.
sort.Slice(verifiedRoSidecars, func(i, j int) bool {
return verifiedRoSidecars[i].Index() < verifiedRoSidecars[j].Index()
return verifiedRoSidecars[i].Index < verifiedRoSidecars[j].Index
})
// Recover cells and compute proofs in parallel.
@@ -209,9 +209,9 @@ func reconstructIfNeeded(verifiedDataColumnSidecars []blocks.VerifiedRODataColum
}
// Check if the sidecars are sorted by index and do not contain duplicates.
previousColumnIndex := verifiedDataColumnSidecars[0].Index()
previousColumnIndex := verifiedDataColumnSidecars[0].Index
for _, dataColumnSidecar := range verifiedDataColumnSidecars[1:] {
columnIndex := dataColumnSidecar.Index()
columnIndex := dataColumnSidecar.Index
if columnIndex <= previousColumnIndex {
return nil, ErrDataColumnSidecarsNotSortedByIndex
}
@@ -226,7 +226,7 @@ func reconstructIfNeeded(verifiedDataColumnSidecars []blocks.VerifiedRODataColum
}
// If all column sidecars corresponding to (non-extended) blobs are present, no need to reconstruct.
if verifiedDataColumnSidecars[cellsPerBlob-1].Index() == uint64(cellsPerBlob-1) {
if verifiedDataColumnSidecars[cellsPerBlob-1].Index == uint64(cellsPerBlob-1) {
return verifiedDataColumnSidecars, nil
}
@@ -415,9 +415,9 @@ func ReconstructBlobs(verifiedDataColumnSidecars []blocks.VerifiedRODataColumn,
}
// Check if the sidecars are sorted by index and do not contain duplicates.
previousColumnIndex := verifiedDataColumnSidecars[0].Index()
previousColumnIndex := verifiedDataColumnSidecars[0].Index
for _, dataColumnSidecar := range verifiedDataColumnSidecars[1:] {
columnIndex := dataColumnSidecar.Index()
columnIndex := dataColumnSidecar.Index
if columnIndex <= previousColumnIndex {
return nil, ErrDataColumnSidecarsNotSortedByIndex
}
@@ -433,7 +433,7 @@ func ReconstructBlobs(verifiedDataColumnSidecars []blocks.VerifiedRODataColumn,
// Verify that the actual blob count from the first sidecar matches the expected count
referenceSidecar := verifiedDataColumnSidecars[0]
actualBlobCount := len(referenceSidecar.Column())
actualBlobCount := len(referenceSidecar.Column)
if actualBlobCount != blobCount {
return nil, errors.Errorf("blob count mismatch: expected %d, got %d", blobCount, actualBlobCount)
}
@@ -448,7 +448,7 @@ func ReconstructBlobs(verifiedDataColumnSidecars []blocks.VerifiedRODataColumn,
// Check if all columns have the same length and are committed to the same block.
blockRoot := referenceSidecar.BlockRoot()
for _, sidecar := range verifiedDataColumnSidecars[1:] {
if len(sidecar.Column()) != blobCount {
if len(sidecar.Column) != blobCount {
return nil, ErrColumnLengthsDiffer
}
@@ -458,7 +458,7 @@ func ReconstructBlobs(verifiedDataColumnSidecars []blocks.VerifiedRODataColumn,
}
// Check if we have all non-extended columns (0..63) - if so, no reconstruction needed.
hasAllNonExtendedColumns := verifiedDataColumnSidecars[cellsPerBlob-1].Index() == uint64(cellsPerBlob-1)
hasAllNonExtendedColumns := verifiedDataColumnSidecars[cellsPerBlob-1].Index == uint64(cellsPerBlob-1)
var reconstructedCells map[int][]kzg.Cell
if !hasAllNonExtendedColumns {
@@ -480,7 +480,7 @@ func ReconstructBlobs(verifiedDataColumnSidecars []blocks.VerifiedRODataColumn,
var cell []byte
if hasAllNonExtendedColumns {
// Use existing cells from sidecars
cell = verifiedDataColumnSidecars[columnIndex].Column()[blobIndex]
cell = verifiedDataColumnSidecars[columnIndex].Column[blobIndex]
} else {
// Use reconstructed cells
cell = reconstructedCells[blobIndex][columnIndex][:]
@@ -501,14 +501,8 @@ func ReconstructBlobs(verifiedDataColumnSidecars []blocks.VerifiedRODataColumn,
func blobSidecarsFromDataColumnSidecars(roBlock blocks.ROBlock, dataColumnSidecars []blocks.VerifiedRODataColumn, indices []int) ([]*blocks.VerifiedROBlob, error) {
referenceSidecar := dataColumnSidecars[0]
kzgCommitments, err := referenceSidecar.KzgCommitments()
if err != nil {
return nil, errors.Wrap(err, "kzg commitments")
}
signedBlockHeader, err := referenceSidecar.SignedBlockHeader()
if err != nil {
return nil, errors.Wrap(err, "signed block header")
}
kzgCommitments := referenceSidecar.KzgCommitments
signedBlockHeader := referenceSidecar.SignedBlockHeader
verifiedROBlobs := make([]*blocks.VerifiedROBlob, 0, len(indices))
for _, blobIndex := range indices {
@@ -517,7 +511,7 @@ func blobSidecarsFromDataColumnSidecars(roBlock blocks.ROBlock, dataColumnSideca
// Compute the content of the blob.
for columnIndex := range fieldparams.CellsPerBlob {
dataColumnSidecar := dataColumnSidecars[columnIndex]
cell := dataColumnSidecar.Column()[blobIndex]
cell := dataColumnSidecar.Column[blobIndex]
if copy(blob[kzg.BytesPerCell*columnIndex:], cell) != kzg.BytesPerCell {
return nil, errors.New("wrong cell size - should never happen")
}

View File

@@ -36,7 +36,7 @@ func TestReconstructDataColumnSidecars(t *testing.T) {
_, _, verifiedRoSidecars := util.GenerateTestFuluBlockWithSidecars(t, 3)
// Arbitrarily alter the column with index 3
verifiedRoSidecars[3].DataColumnSidecar().Column = verifiedRoSidecars[3].DataColumnSidecar().Column[1:]
verifiedRoSidecars[3].Column = verifiedRoSidecars[3].Column[1:]
_, err := peerdas.ReconstructDataColumnSidecars(verifiedRoSidecars)
require.ErrorIs(t, err, peerdas.ErrColumnLengthsDiffer)
@@ -88,10 +88,7 @@ func TestReconstructDataColumnSidecars(t *testing.T) {
require.NoError(t, err)
// Verify that the reconstructed sidecars are equal to the original ones.
require.Equal(t, len(inputVerifiedRoSidecars), len(reconstructedVerifiedRoSidecars))
for i := range inputVerifiedRoSidecars {
require.DeepSSZEqual(t, inputVerifiedRoSidecars[i].DataColumnSidecar(), reconstructedVerifiedRoSidecars[i].DataColumnSidecar())
}
require.DeepSSZEqual(t, inputVerifiedRoSidecars, reconstructedVerifiedRoSidecars)
})
}

View File

@@ -10,7 +10,6 @@ import (
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/time/slots"
"github.com/pkg/errors"
)
@@ -24,13 +23,11 @@ var (
var (
_ ConstructionPopulator = (*BlockReconstructionSource)(nil)
_ ConstructionPopulator = (*SidecarReconstructionSource)(nil)
_ ConstructionPopulator = (*BidReconstructionSource)(nil)
)
const (
BlockType = "BeaconBlock"
SidecarType = "DataColumnSidecar"
BidType = "ExecutionPayloadBid"
)
type (
@@ -40,7 +37,7 @@ type (
ConstructionPopulator interface {
Slot() primitives.Slot
Root() [fieldparams.RootLength]byte
ProposerIndex() (primitives.ValidatorIndex, error)
ProposerIndex() primitives.ValidatorIndex
Commitments() ([][]byte, error)
Type() string
@@ -52,17 +49,11 @@ type (
blocks.ROBlock
}
// SidecarReconstructionSource is a ConstructionPopulator that uses a data column sidecar as the source of data
// DataColumnSidecar is a ConstructionPopulator that uses a data column sidecar as the source of data
SidecarReconstructionSource struct {
blocks.VerifiedRODataColumn
}
// BidReconstructionSource is a ConstructionPopulator that uses the execution payload bid
// from a Gloas beacon block to extract KZG commitments for data column sidecar construction.
BidReconstructionSource struct {
blocks.ROBlock
}
blockInfo struct {
signedBlockHeader *ethpb.SignedBeaconBlockHeader
kzgCommitments [][]byte
@@ -80,14 +71,6 @@ func PopulateFromSidecar(sidecar blocks.VerifiedRODataColumn) *SidecarReconstruc
return &SidecarReconstructionSource{VerifiedRODataColumn: sidecar}
}
// PopulateFromBid creates a BidReconstructionSource from a Gloas beacon block.
// In Gloas (ePBS), the execution payload is delivered separately via the payload envelope,
// but the KZG commitments are available in the bid embedded in the block, allowing
// data column sidecars to be constructed from the EL as soon as the block arrives.
func PopulateFromBid(block blocks.ROBlock) *BidReconstructionSource {
return &BidReconstructionSource{ROBlock: block}
}
// ValidatorsCustodyRequirement returns the number of custody groups regarding the validator indices attached to the beacon node.
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/validator.md#validator-custody
func ValidatorsCustodyRequirement(st beaconState.ReadOnlyBalances, validatorsIndex map[primitives.ValidatorIndex]bool) (uint64, error) {
@@ -128,93 +111,33 @@ func DataColumnSidecars(cellsPerBlob [][]kzg.Cell, proofsPerBlob [][]kzg.Proof,
if err != nil {
return nil, errors.Wrap(err, "rotate cells and proofs")
}
isGloas := slots.ToEpoch(src.Slot()) >= params.BeaconConfig().GloasForkEpoch
root := src.Root()
roSidecars := make([]blocks.RODataColumn, 0, numberOfColumns)
if isGloas {
for idx := range numberOfColumns {
sidecar := &ethpb.DataColumnSidecarGloas{
Index: idx,
Column: cells[idx],
KzgProofs: proofs[idx],
Slot: src.Slot(),
BeaconBlockRoot: root[:],
}
if len(sidecar.Column) != len(sidecar.KzgProofs) {
return nil, ErrSizeMismatch
}
roSidecar, err := blocks.NewRODataColumnGloasWithRoot(sidecar, root)
if err != nil {
return nil, errors.Wrap(err, "new ro data column gloas")
}
roSidecars = append(roSidecars, roSidecar)
}
} else {
info, err := src.extract()
if err != nil {
return nil, errors.Wrap(err, "extract block info")
}
for idx := range numberOfColumns {
sidecar := &ethpb.DataColumnSidecar{
Index: idx,
Column: cells[idx],
KzgCommitments: info.kzgCommitments,
KzgProofs: proofs[idx],
SignedBlockHeader: info.signedBlockHeader,
KzgCommitmentsInclusionProof: info.kzgInclusionProof,
}
if len(sidecar.KzgCommitments) != len(sidecar.Column) || len(sidecar.KzgCommitments) != len(sidecar.KzgProofs) {
return nil, ErrSizeMismatch
}
roSidecar, err := blocks.NewRODataColumnWithRoot(sidecar, root)
if err != nil {
return nil, errors.Wrap(err, "new ro data column")
}
roSidecars = append(roSidecars, roSidecar)
}
}
dataColumnComputationTime.Observe(float64(time.Since(start).Milliseconds()))
return roSidecars, nil
}
// DataColumnSidecarsGloas constructs Gloas-format data column sidecars directly from cells, proofs,
// slot, and block root. Used by the proposer when building sidecars outside the ConstructionPopulator flow.
func DataColumnSidecarsGloas(
cellsPerBlob [][]kzg.Cell,
proofsPerBlob [][]kzg.Proof,
slot primitives.Slot,
beaconBlockRoot [32]byte,
) ([]blocks.RODataColumn, error) {
const numberOfColumns = uint64(fieldparams.NumberOfColumns)
if len(cellsPerBlob) == 0 {
return nil, nil
}
start := time.Now()
cells, proofs, err := rotateRowsToCols(cellsPerBlob, proofsPerBlob, numberOfColumns)
info, err := src.extract()
if err != nil {
return nil, errors.Wrap(err, "rotate cells and proofs")
return nil, errors.Wrap(err, "extract block info")
}
roSidecars := make([]blocks.RODataColumn, 0, numberOfColumns)
for idx := range numberOfColumns {
sidecar := &ethpb.DataColumnSidecarGloas{
Index: idx,
Column: cells[idx],
KzgProofs: proofs[idx],
Slot: slot,
BeaconBlockRoot: beaconBlockRoot[:],
sidecar := &ethpb.DataColumnSidecar{
Index: idx,
Column: cells[idx],
KzgCommitments: info.kzgCommitments,
KzgProofs: proofs[idx],
SignedBlockHeader: info.signedBlockHeader,
KzgCommitmentsInclusionProof: info.kzgInclusionProof,
}
if len(sidecar.Column) != len(sidecar.KzgProofs) {
if len(sidecar.KzgCommitments) != len(sidecar.Column) || len(sidecar.KzgCommitments) != len(sidecar.KzgProofs) {
return nil, ErrSizeMismatch
}
roSidecar, err := blocks.NewRODataColumnGloasWithRoot(sidecar, beaconBlockRoot)
roSidecar, err := blocks.NewRODataColumnWithRoot(sidecar, src.Root())
if err != nil {
return nil, errors.Wrap(err, "new ro data column gloas")
return nil, errors.Wrap(err, "new ro data column")
}
roSidecars = append(roSidecars, roSidecar)
}
dataColumnComputationTime.Observe(float64(time.Since(start).Milliseconds()))
return roSidecars, nil
}
@@ -225,8 +148,8 @@ func (s *BlockReconstructionSource) Slot() primitives.Slot {
}
// ProposerIndex returns the proposer index of the source
func (s *BlockReconstructionSource) ProposerIndex() (primitives.ValidatorIndex, error) {
return s.Block().ProposerIndex(), nil
func (s *BlockReconstructionSource) ProposerIndex() primitives.ValidatorIndex {
return s.Block().ProposerIndex()
}
// Commitments returns the blob KZG commitments of the source
@@ -245,24 +168,32 @@ func (s *BlockReconstructionSource) Type() string {
return BlockType
}
// extract extracts the block information from the source
func (b *BlockReconstructionSource) extract() (*blockInfo, error) {
block := b.Block()
header, err := b.Header()
if err != nil {
return nil, errors.Wrap(err, "header")
}
commitments, err := b.Block().Body().BlobKzgCommitments()
commitments, err := block.Body().BlobKzgCommitments()
if err != nil {
return nil, errors.Wrap(err, "commitments")
}
inclusionProof, err := blocks.MerkleProofKZGCommitments(b.Block().Body())
inclusionProof, err := blocks.MerkleProofKZGCommitments(block.Body())
if err != nil {
return nil, errors.Wrap(err, "merkle proof kzg commitments")
}
return &blockInfo{
info := &blockInfo{
signedBlockHeader: header,
kzgCommitments: commitments,
kzgInclusionProof: inclusionProof,
}, nil
}
return info, nil
}
// rotateRowsToCols takes a 2D slice of cells and proofs, where the x is rows (blobs) and y is columns,
@@ -304,7 +235,7 @@ func (s *SidecarReconstructionSource) Root() [fieldparams.RootLength]byte {
// Commmitments returns the blob KZG commitments of the source
func (s *SidecarReconstructionSource) Commitments() ([][]byte, error) {
return s.KzgCommitments()
return s.KzgCommitments, nil
}
// Type returns the type of the source
@@ -312,61 +243,13 @@ func (s *SidecarReconstructionSource) Type() string {
return SidecarType
}
// extract extracts the block information from the source
func (s *SidecarReconstructionSource) extract() (*blockInfo, error) {
sbh, err := s.SignedBlockHeader()
if err != nil {
return nil, err
info := &blockInfo{
signedBlockHeader: s.SignedBlockHeader,
kzgCommitments: s.KzgCommitments,
kzgInclusionProof: s.KzgCommitmentsInclusionProof,
}
comms, err := s.KzgCommitments()
if err != nil {
return nil, err
}
incProof, err := s.KzgCommitmentsInclusionProof()
if err != nil {
return nil, err
}
return &blockInfo{
signedBlockHeader: sbh,
kzgCommitments: comms,
kzgInclusionProof: incProof,
}, nil
}
// Slot returns the slot of the source
func (s *BidReconstructionSource) Slot() primitives.Slot {
return s.Block().Slot()
}
// ProposerIndex returns the proposer index of the source
func (s *BidReconstructionSource) ProposerIndex() (primitives.ValidatorIndex, error) {
return s.Block().ProposerIndex(), nil
}
// Commitments returns the blob KZG commitments from the execution payload bid
func (s *BidReconstructionSource) Commitments() ([][]byte, error) {
bid, err := s.Block().Body().SignedExecutionPayloadBid()
if err != nil {
return nil, errors.Wrap(err, "signed execution payload bid")
}
return bid.Message.BlobKzgCommitments, nil
}
// Type returns the type of the source
func (s *BidReconstructionSource) Type() string {
return BidType
}
func (s *BidReconstructionSource) extract() (*blockInfo, error) {
commitments, err := s.Commitments()
if err != nil {
return nil, err
}
header, err := s.Header()
if err != nil {
return nil, errors.Wrap(err, "header")
}
return &blockInfo{
signedBlockHeader: header,
kzgCommitments: commitments,
}, nil
return info, nil
}

View File

@@ -7,7 +7,6 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
@@ -177,24 +176,22 @@ func TestDataColumnSidecars(t *testing.T) {
// Verify each sidecar has the expected structure
for i, sidecar := range sidecars {
require.Equal(t, uint64(i), sidecar.Index())
require.Equal(t, 2, len(sidecar.Column()))
comms, err := sidecar.KzgCommitments()
require.NoError(t, err)
require.Equal(t, 2, len(comms))
require.Equal(t, 2, len(sidecar.KzgProofs()))
require.Equal(t, uint64(i), sidecar.Index)
require.Equal(t, 2, len(sidecar.Column))
require.Equal(t, 2, len(sidecar.KzgCommitments))
require.Equal(t, 2, len(sidecar.KzgProofs))
// Verify commitments match what we set
require.DeepEqual(t, commitment1, comms[0])
require.DeepEqual(t, commitment2, comms[1])
require.DeepEqual(t, commitment1, sidecar.KzgCommitments[0])
require.DeepEqual(t, commitment2, sidecar.KzgCommitments[1])
// Verify column data comes from the correct cells
require.Equal(t, byte(i), sidecar.Column()[0][0])
require.Equal(t, byte(i+128), sidecar.Column()[1][0])
require.Equal(t, byte(i), sidecar.Column[0][0])
require.Equal(t, byte(i+128), sidecar.Column[1][0])
// Verify proofs come from the correct proofs
require.Equal(t, byte(i), sidecar.KzgProofs()[0][0])
require.Equal(t, byte(i+128), sidecar.KzgProofs()[1][0])
require.Equal(t, byte(i), sidecar.KzgProofs[0][0])
require.Equal(t, byte(i+128), sidecar.KzgProofs[1][0])
}
})
}
@@ -244,9 +241,7 @@ func TestReconstructionSource(t *testing.T) {
src := peerdas.PopulateFromBlock(rob)
require.Equal(t, rob.Block().Slot(), src.Slot())
require.Equal(t, rob.Root(), src.Root())
srcPI, err := src.ProposerIndex()
require.NoError(t, err)
require.Equal(t, rob.Block().ProposerIndex(), srcPI)
require.Equal(t, rob.Block().ProposerIndex(), src.ProposerIndex())
commitments, err := src.Commitments()
require.NoError(t, err)
@@ -262,11 +257,7 @@ func TestReconstructionSource(t *testing.T) {
src := peerdas.PopulateFromSidecar(referenceSidecar)
require.Equal(t, referenceSidecar.Slot(), src.Slot())
require.Equal(t, referenceSidecar.BlockRoot(), src.Root())
refPI, err := referenceSidecar.ProposerIndex()
require.NoError(t, err)
srcPI, err := src.ProposerIndex()
require.NoError(t, err)
require.Equal(t, refPI, srcPI)
require.Equal(t, referenceSidecar.ProposerIndex(), src.ProposerIndex())
commitments, err := src.Commitments()
require.NoError(t, err)
@@ -276,87 +267,4 @@ func TestReconstructionSource(t *testing.T) {
require.Equal(t, peerdas.SidecarType, src.Type())
})
t.Run("from bid", func(t *testing.T) {
bidCommitment1 := make([]byte, 48)
bidCommitment2 := make([]byte, 48)
bidCommitment1[0] = 0xAA
bidCommitment2[0] = 0xBB
gloasBlockPb := util.NewBeaconBlockGloas()
gloasBlockPb.Block.Body.SignedExecutionPayloadBid.Message.BlobKzgCommitments = [][]byte{bidCommitment1, bidCommitment2}
gloasBlockPb.Block.Slot = 42
gloasBlockPb.Block.ProposerIndex = 7
signedGloasBlock, err := blocks.NewSignedBeaconBlock(gloasBlockPb)
require.NoError(t, err)
gloasRob, err := blocks.NewROBlock(signedGloasBlock)
require.NoError(t, err)
src := peerdas.PopulateFromBid(gloasRob)
require.Equal(t, primitives.Slot(42), src.Slot())
require.Equal(t, gloasRob.Root(), src.Root())
bidPI, err := src.ProposerIndex()
require.NoError(t, err)
require.Equal(t, primitives.ValidatorIndex(7), bidPI)
commitments, err := src.Commitments()
require.NoError(t, err)
require.Equal(t, 2, len(commitments))
require.DeepEqual(t, bidCommitment1, commitments[0])
require.DeepEqual(t, bidCommitment2, commitments[1])
require.Equal(t, peerdas.BidType, src.Type())
})
}
func TestPopulateFromBid_DataColumnSidecars(t *testing.T) {
const numberOfColumns = fieldparams.NumberOfColumns
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.GloasForkEpoch = 0
params.OverrideBeaconConfig(cfg)
bidCommitment1 := make([]byte, 48)
bidCommitment2 := make([]byte, 48)
bidCommitment1[0] = 0xAA
bidCommitment2[0] = 0xBB
gloasBlockPb := util.NewBeaconBlockGloas()
gloasBlockPb.Block.Body.SignedExecutionPayloadBid.Message.BlobKzgCommitments = [][]byte{bidCommitment1, bidCommitment2}
signedGloasBlock, err := blocks.NewSignedBeaconBlock(gloasBlockPb)
require.NoError(t, err)
gloasRob, err := blocks.NewROBlock(signedGloasBlock)
require.NoError(t, err)
cellsPerBlob := [][]kzg.Cell{
make([]kzg.Cell, numberOfColumns),
make([]kzg.Cell, numberOfColumns),
}
proofsPerBlob := [][]kzg.Proof{
make([]kzg.Proof, numberOfColumns),
make([]kzg.Proof, numberOfColumns),
}
for i := range numberOfColumns {
cellsPerBlob[0][i][0] = byte(i)
proofsPerBlob[0][i][0] = byte(i)
cellsPerBlob[1][i][0] = byte(i + 128)
proofsPerBlob[1][i][0] = byte(i + 128)
}
sidecars, err := peerdas.DataColumnSidecars(cellsPerBlob, proofsPerBlob, peerdas.PopulateFromBid(gloasRob))
require.NoError(t, err)
require.Equal(t, int(numberOfColumns), len(sidecars))
for i, sidecar := range sidecars {
require.Equal(t, true, sidecar.IsGloas())
require.Equal(t, uint64(i), sidecar.Index())
require.Equal(t, 2, len(sidecar.Column()))
require.Equal(t, 2, len(sidecar.KzgProofs()))
}
}

View File

@@ -46,21 +46,16 @@ func DataColumnsAlignWithBlock(block blocks.ROBlock, dataColumns []blocks.ROData
return ErrRootMismatch
}
dcKzgCommitments, err := dataColumn.KzgCommitments()
if err != nil {
return errors.Wrap(err, "kzg commitments")
}
// Check if the content length of the data column sidecar matches the block.
if len(dataColumn.Column()) != blockCommitmentCount ||
len(dcKzgCommitments) != blockCommitmentCount ||
len(dataColumn.KzgProofs()) != blockCommitmentCount {
if len(dataColumn.Column) != blockCommitmentCount ||
len(dataColumn.KzgCommitments) != blockCommitmentCount ||
len(dataColumn.KzgProofs) != blockCommitmentCount {
return ErrBlockColumnSizeMismatch
}
// Check if the commitments of the data column sidecar match the block.
for i := range blockCommitments {
if !bytes.Equal(blockCommitments[i], dcKzgCommitments[i]) {
if !bytes.Equal(blockCommitments[i], dataColumn.KzgCommitments[i]) {
return ErrCommitmentMismatch
}
}

View File

@@ -41,21 +41,21 @@ func TestDataColumnsAlignWithBlock(t *testing.T) {
t.Run("column size mismatch", func(t *testing.T) {
block, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2, util.WithSlot(fs))
sidecars[0].DataColumnSidecar().Column = [][]byte{}
sidecars[0].Column = [][]byte{}
err := peerdas.DataColumnsAlignWithBlock(block, sidecars)
require.ErrorIs(t, err, peerdas.ErrBlockColumnSizeMismatch)
})
t.Run("KZG commitments size mismatch", func(t *testing.T) {
block, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2, util.WithSlot(fs))
sidecars[0].DataColumnSidecar().KzgCommitments = [][]byte{}
sidecars[0].KzgCommitments = [][]byte{}
err := peerdas.DataColumnsAlignWithBlock(block, sidecars)
require.ErrorIs(t, err, peerdas.ErrBlockColumnSizeMismatch)
})
t.Run("KZG proofs mismatch", func(t *testing.T) {
block, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2, util.WithSlot(fs))
sidecars[0].DataColumnSidecar().KzgProofs = [][]byte{}
sidecars[0].KzgProofs = [][]byte{}
err := peerdas.DataColumnsAlignWithBlock(block, sidecars)
require.ErrorIs(t, err, peerdas.ErrBlockColumnSizeMismatch)
})
@@ -63,7 +63,7 @@ func TestDataColumnsAlignWithBlock(t *testing.T) {
t.Run("commitment mismatch", func(t *testing.T) {
block, _, _ := util.GenerateTestFuluBlockWithSidecars(t, 2, util.WithSlot(fs))
_, alteredSidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2, util.WithSlot(fs))
alteredSidecars[1].DataColumnSidecar().KzgCommitments[0][0]++ // Overflow is OK
alteredSidecars[1].KzgCommitments[0][0]++ // Overflow is OK
err := peerdas.DataColumnsAlignWithBlock(block, alteredSidecars)
require.ErrorIs(t, err, peerdas.ErrCommitmentMismatch)
})

View File

@@ -130,7 +130,7 @@ func gloasOperations(ctx context.Context, st state.BeaconState, block interfaces
//
// Spec definition:
//
// <spec fn="process_epoch" fork="gloas" hash="bf3575a9">
// <spec fn="process_epoch" fork="gloas" hash="393b69ef">
// def process_epoch(state: BeaconState) -> None:
// process_justification_and_finalization(state)
// process_inactivity_updates(state)
@@ -149,8 +149,6 @@ func gloasOperations(ctx context.Context, st state.BeaconState, block interfaces
// process_participation_flag_updates(state)
// process_sync_committee_updates(state)
// process_proposer_lookahead(state)
// # [New in Gloas:EIP7732]
// process_ptc_window(state)
// </spec>
func processEpochGloas(ctx context.Context, state state.BeaconState) error {
_, span := trace.StartSpan(ctx, "gloas.ProcessEpoch")
@@ -224,5 +222,5 @@ func processEpochGloas(ctx context.Context, state state.BeaconState) error {
if err := fulu.ProcessProposerLookahead(ctx, state); err != nil {
return err
}
return gloas.ProcessPTCWindow(ctx, state)
return nil
}

View File

@@ -120,8 +120,7 @@ func OptimizedGenesisBeaconStateBellatrix(genesisTime uint64, preState state.Bea
slashings := make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector)
compactValidators := stateutil.CompactValidatorsFromProto(preState.Validators())
genesisValidatorsRoot, err := stateutil.ValidatorRegistryRoot(compactValidators)
genesisValidatorsRoot, err := stateutil.ValidatorRegistryRoot(preState.Validators())
if err != nil {
return nil, errors.Wrapf(err, "could not hash tree root genesis validators %v", err)
}
@@ -225,7 +224,7 @@ func OptimizedGenesisBeaconStateBellatrix(genesisTime uint64, preState state.Bea
BodyRoot: bodyRoot[:],
}
ist, err := state_native.InitializeFromProtoUnsafeBellatrix(st)
ist, err := state_native.InitializeFromProtoBellatrix(st)
if err != nil {
return nil, err
}
@@ -277,5 +276,5 @@ func EmptyGenesisStateBellatrix() (state.BeaconState, error) {
},
}
return state_native.InitializeFromProtoUnsafeBellatrix(st)
return state_native.InitializeFromProtoBellatrix(st)
}

View File

@@ -147,8 +147,7 @@ func OptimizedGenesisBeaconState(genesisTime uint64, preState state.BeaconState,
slashings := make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector)
compactValidators := stateutil.CompactValidatorsFromProto(preState.Validators())
genesisValidatorsRoot, err := stateutil.ValidatorRegistryRoot(compactValidators)
genesisValidatorsRoot, err := stateutil.ValidatorRegistryRoot(preState.Validators())
if err != nil {
return nil, errors.Wrapf(err, "could not hash tree root genesis validators %v", err)
}
@@ -218,7 +217,7 @@ func OptimizedGenesisBeaconState(genesisTime uint64, preState state.BeaconState,
BodyRoot: bodyRoot[:],
}
return state_native.InitializeFromProtoUnsafePhase0(st)
return state_native.InitializeFromProtoPhase0(st)
}
// EmptyGenesisState returns an empty beacon state object.
@@ -260,7 +259,7 @@ func EmptyGenesisState() (state.BeaconState, error) {
Eth1DataVotes: []*ethpb.Eth1Data{},
Eth1DepositIndex: 0,
}
return state_native.InitializeFromProtoUnsafePhase0(st)
return state_native.InitializeFromProtoPhase0(st)
}
// IsValidGenesisState gets called whenever there's a deposit event,

View File

@@ -55,7 +55,7 @@ func NextSlotState(root []byte, wantedSlot types.Slot) state.BeaconState {
// UpdateNextSlotCache updates the `nextSlotCache`. It saves the input state after advancing the state slot by 1
// by calling `ProcessSlots`, it also saves the input root for later look up.
// This is useful to call after successfully processing a block.
func UpdateNextSlotCache(ctx context.Context, root []byte, state state.ReadOnlyBeaconState) error {
func UpdateNextSlotCache(ctx context.Context, root []byte, state state.BeaconState) error {
// Advancing one slot by using a copied state.
copied := state.Copy()
copied, err := ProcessSlots(ctx, copied, copied.Slot()+1)

View File

@@ -159,15 +159,6 @@ func ProcessSlot(ctx context.Context, state state.BeaconState) (state.BeaconStat
return state, nil
}
// ProcessSlotsIfNeeded takes a ReadOnlyBeaconState and processes it only if its needed, it returns a ReadOnlyBeaconState
func ProcessSlotsIfNeeded(ctx context.Context, state state.ReadOnlyBeaconState, accessRoot []byte, slot primitives.Slot) (state.ReadOnlyBeaconState, error) {
if slot <= state.Slot() {
return state, nil
}
copied := state.Copy()
return ProcessSlotsUsingNextSlotCache(ctx, copied, accessRoot, slot)
}
// ProcessSlotsUsingNextSlotCache processes slots by using next slot cache for higher efficiency.
func ProcessSlotsUsingNextSlotCache(
ctx context.Context,

View File

@@ -10,9 +10,7 @@ import (
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
consensusblocks "github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
engine "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/stretchr/testify/require"
@@ -141,217 +139,3 @@ func testBeaconBlockHeader() *ethpb.BeaconBlockHeader {
BodyRoot: make([]byte, 32),
}
}
// newGloasForkBoundaryState returns a Gloas BeaconState where IsParentBlockFull()==true
// because bid.BlockHash == latestBlockHash. The parentBlockRoot parameter controls
// whether the bid looks like an upgrade-seed (all-zeros) or a real committed bid (non-zero).
func newGloasForkBoundaryState(
t *testing.T,
slot primitives.Slot,
blockHash [32]byte,
parentBlockRoot [32]byte,
) state.BeaconState {
t.Helper()
cfg := params.BeaconConfig()
availability := bytes.Repeat([]byte{0xFF}, int(cfg.SlotsPerHistoricalRoot/8))
protoState := &ethpb.BeaconStateGloas{
Slot: slot,
LatestBlockHeader: testBeaconBlockHeader(),
BlockRoots: make([][]byte, cfg.SlotsPerHistoricalRoot),
StateRoots: make([][]byte, cfg.SlotsPerHistoricalRoot),
RandaoMixes: make([][]byte, fieldparams.RandaoMixesLength),
ExecutionPayloadAvailability: availability,
BuilderPendingPayments: make([]*ethpb.BuilderPendingPayment, int(cfg.SlotsPerEpoch*2)),
// bid.BlockHash == LatestBlockHash so that IsParentBlockFull() returns true.
LatestBlockHash: blockHash[:],
LatestExecutionPayloadBid: &ethpb.ExecutionPayloadBid{
ParentBlockHash: make([]byte, 32),
ParentBlockRoot: parentBlockRoot[:],
BlockHash: blockHash[:],
PrevRandao: make([]byte, 32),
FeeRecipient: make([]byte, 20),
BlobKzgCommitments: [][]byte{make([]byte, 48)},
},
Eth1Data: &ethpb.Eth1Data{
DepositRoot: make([]byte, 32),
BlockHash: make([]byte, 32),
},
PreviousEpochParticipation: []byte{},
CurrentEpochParticipation: []byte{},
JustificationBits: []byte{0},
PreviousJustifiedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)},
CurrentJustifiedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)},
FinalizedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)},
PayloadExpectedWithdrawals: make([]*engine.Withdrawal, 0),
ProposerLookahead: make([]primitives.ValidatorIndex, 0),
Builders: make([]*ethpb.Builder, 0),
}
for i := range protoState.BlockRoots {
protoState.BlockRoots[i] = make([]byte, 32)
}
for i := range protoState.StateRoots {
protoState.StateRoots[i] = make([]byte, 32)
}
for i := range protoState.RandaoMixes {
protoState.RandaoMixes[i] = make([]byte, 32)
}
for i := range protoState.BuilderPendingPayments {
protoState.BuilderPendingPayments[i] = &ethpb.BuilderPendingPayment{
Withdrawal: &ethpb.BuilderPendingWithdrawal{FeeRecipient: make([]byte, 20)},
}
}
pubkeys := make([][]byte, cfg.SyncCommitteeSize)
for i := range pubkeys {
pubkeys[i] = make([]byte, fieldparams.BLSPubkeyLength)
}
aggPubkey := make([]byte, fieldparams.BLSPubkeyLength)
protoState.CurrentSyncCommittee = &ethpb.SyncCommittee{Pubkeys: pubkeys, AggregatePubkey: aggPubkey}
protoState.NextSyncCommittee = &ethpb.SyncCommittee{Pubkeys: pubkeys, AggregatePubkey: aggPubkey}
st, err := state_native.InitializeFromProtoGloas(protoState)
require.NoError(t, err)
return st
}
// newGloasTestBlock returns an ROBlock at the given slot with the given parentRoot.
func newGloasTestBlock(t *testing.T, slot primitives.Slot, parentRoot [32]byte) consensusblocks.ROBlock {
t.Helper()
blkProto := &ethpb.SignedBeaconBlockGloas{
Block: &ethpb.BeaconBlockGloas{
Slot: slot,
ParentRoot: parentRoot[:],
StateRoot: make([]byte, 32),
Body: &ethpb.BeaconBlockBodyGloas{
RandaoReveal: make([]byte, fieldparams.BLSSignatureLength),
Graffiti: make([]byte, 32),
Eth1Data: &ethpb.Eth1Data{DepositRoot: make([]byte, 32), BlockHash: make([]byte, 32)},
SyncAggregate: &ethpb.SyncAggregate{
SyncCommitteeBits: make([]byte, fieldparams.SyncAggregateSyncCommitteeBytesLength),
SyncCommitteeSignature: make([]byte, fieldparams.BLSSignatureLength),
},
SignedExecutionPayloadBid: &ethpb.SignedExecutionPayloadBid{
Message: &ethpb.ExecutionPayloadBid{
Slot: slot,
ParentBlockHash: make([]byte, 32),
ParentBlockRoot: make([]byte, 32),
BlockHash: make([]byte, 32),
PrevRandao: make([]byte, 32),
FeeRecipient: make([]byte, 20),
BlobKzgCommitments: [][]byte{},
},
Signature: make([]byte, fieldparams.BLSSignatureLength),
},
PayloadAttestations: []*ethpb.PayloadAttestation{},
},
},
Signature: make([]byte, fieldparams.BLSSignatureLength),
}
wsb, err := consensusblocks.NewSignedBeaconBlock(blkProto)
require.NoError(t, err)
rob, err := consensusblocks.NewROBlock(wsb)
require.NoError(t, err)
return rob
}
// TestProcessSlotsForBlock_UpgradeSeededBid verifies that ProcessSlotsForBlock uses
// b.ParentRoot() as the NSC access key when the state has an upgrade-seeded bid
// (bid.ParentBlockRoot == zero). This guards against the Fulu->Gloas fork-boundary
// false positive where UpgradeToGloas seeds bid.BlockHash == latestBlockHash while
// leaving bid.ParentBlockRoot as all-zeros.
func TestProcessSlotsForBlock_UpgradeSeededBid(t *testing.T) {
ctx := context.Background()
parentRoot := [32]byte{0x01, 0x02, 0x03}
blockHash := [32]byte{0xAA, 0xBB, 0xCC}
targetSlot := primitives.Slot(9)
// Build a Gloas state at slot 8 with IsParentBlockFull()==true but
// bid.ParentBlockRoot==zero (upgrade-seeded: not a real committed bid).
st := newGloasForkBoundaryState(t, targetSlot-1, blockHash, [32]byte{})
require.Equal(t, version.Gloas, st.Version())
// Verify preconditions.
full, err := st.IsParentBlockFull()
require.NoError(t, err)
require.True(t, full, "precondition: IsParentBlockFull must be true")
bid, err := st.LatestExecutionPayloadBid()
require.NoError(t, err)
require.Equal(t, [32]byte{}, bid.ParentBlockRoot(), "upgrade-seeded bid must have zero ParentBlockRoot")
// Prime NSC with parentRoot as the access key.
// With the guard in place (realBid==false), ProcessSlotsForBlock will use
// b.ParentRoot() as the NSC key and find this cached entry.
require.NoError(t, UpdateNextSlotCache(ctx, parentRoot[:], st))
blk := newGloasTestBlock(t, targetSlot, parentRoot)
out, err := ProcessSlotsForBlock(ctx, st, blk.Block())
require.NoError(t, err)
require.Equal(t, targetSlot, out.Slot())
// Verify that the NSC entry primed under parentRoot is still present,
// confirming it was used (read) rather than bypassed.
cached := NextSlotState(parentRoot[:], targetSlot)
require.NotNil(t, cached, "NSC entry under parentRoot should still be present after use")
}
// TestProcessSlotsForBlock_RealBid verifies that ProcessSlotsForBlock uses
// LatestBlockHash as the NSC access key when the state has a real committed bid
// (bid.ParentBlockRoot != zero). This is the normal post-fork case.
func TestProcessSlotsForBlock_RealBid(t *testing.T) {
ctx := context.Background()
parentRoot := [32]byte{0x01, 0x02, 0x03}
blockHash := [32]byte{0xAA, 0xBB, 0xCC}
realParentBlockRoot := [32]byte{0xDE, 0xAD, 0xBE, 0xEF}
targetSlot := primitives.Slot(9)
// Build a Gloas state at slot 8 with IsParentBlockFull()==true and
// bid.ParentBlockRoot!=zero (a real committed bid).
st := newGloasForkBoundaryState(t, targetSlot-1, blockHash, realParentBlockRoot)
require.Equal(t, version.Gloas, st.Version())
// Verify preconditions.
full, err := st.IsParentBlockFull()
require.NoError(t, err)
require.True(t, full, "precondition: IsParentBlockFull must be true")
bid, err := st.LatestExecutionPayloadBid()
require.NoError(t, err)
require.NotEqual(t, [32]byte{}, bid.ParentBlockRoot(), "real bid must have non-zero ParentBlockRoot")
// Prime NSC with the EL block hash as access key.
// With the guard in place (realBid==true), ProcessSlotsForBlock will use
// LatestBlockHash as the NSC key and find this cached entry.
require.NoError(t, UpdateNextSlotCache(ctx, blockHash[:], st))
blk := newGloasTestBlock(t, targetSlot, parentRoot)
out, err := ProcessSlotsForBlock(ctx, st, blk.Block())
require.NoError(t, err)
require.Equal(t, targetSlot, out.Slot())
// Verify that the NSC entry primed under blockHash is still present,
// confirming it was used (read) rather than bypassed.
cached := NextSlotState(blockHash[:], targetSlot)
require.NotNil(t, cached, "NSC entry under blockHash should still be present after use")
}
// TestProcessSlotsForBlock_PreGloas verifies that ProcessSlotsForBlock uses
// b.ParentRoot() as access key on pre-Gloas (Fulu) states, unchanged by the fix.
func TestProcessSlotsForBlock_PreGloas(t *testing.T) {
ctx := context.Background()
parentRoot := [32]byte{0x01, 0x02, 0x03}
targetSlot := primitives.Slot(5)
// newGloasState creates a Gloas-versioned state; we need a Fulu/pre-Gloas state.
// Use newGloasState as a base and just verify the slot advancement works.
// Note: version.Gloas is the version created by newGloasState; for pre-Gloas
// the function takes the version < Gloas path. We build a minimal Gloas state
// to test, but note ProcessSlotsForBlock has an explicit version check at top.
st := newGloasState(t, targetSlot-1, bytes.Repeat([]byte{0}, int(params.BeaconConfig().SlotsPerHistoricalRoot/8)))
blk := newGloasTestBlock(t, targetSlot, parentRoot)
out, err := ProcessSlotsForBlock(ctx, st, blk.Block())
require.NoError(t, err)
require.Equal(t, targetSlot, out.Slot())
}

View File

@@ -116,32 +116,17 @@ func CalculateStateRoot(
rollback state.BeaconState,
signed interfaces.ReadOnlySignedBeaconBlock,
) ([32]byte, error) {
st, err := CalculatePostState(ctx, rollback, signed)
if err != nil {
return [32]byte{}, err
}
return st.HashTreeRoot(ctx)
}
// CalculatePostState returns the post-block state after processing the given
// block on a copy of the input state. It is identical to CalculateStateRoot
// but returns the full state instead of just its hash tree root.
func CalculatePostState(
ctx context.Context,
rollback state.BeaconState,
signed interfaces.ReadOnlySignedBeaconBlock,
) (state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "core.state.CalculatePostState")
ctx, span := trace.StartSpan(ctx, "core.state.CalculateStateRoot")
defer span.End()
if ctx.Err() != nil {
tracing.AnnotateError(span, ctx.Err())
return nil, ctx.Err()
return [32]byte{}, ctx.Err()
}
if rollback == nil || rollback.IsNil() {
return nil, errors.New("nil state")
return [32]byte{}, errors.New("nil state")
}
if signed == nil || signed.IsNil() || signed.Block().IsNil() {
return nil, errors.New("nil block")
return [32]byte{}, errors.New("nil block")
}
// Copy state to avoid mutating the state reference.
@@ -151,22 +136,22 @@ func CalculatePostState(
var err error
state, err = ProcessSlotsForBlock(ctx, state, signed.Block())
if err != nil {
return nil, errors.Wrap(err, "could not process slots")
return [32]byte{}, errors.Wrap(err, "could not process slots")
}
// Execute per block transition.
if features.Get().EnableProposerPreprocessing {
state, err = processBlockForProposing(ctx, state, signed)
if err != nil {
return nil, errors.Wrap(err, "could not process block for proposing")
return [32]byte{}, errors.Wrap(err, "could not process block for proposing")
}
} else {
state, err = ProcessBlockForStateRoot(ctx, state, signed)
if err != nil {
return nil, errors.Wrap(err, "could not process block")
return [32]byte{}, errors.Wrap(err, "could not process block")
}
}
return state, nil
return state.HashTreeRoot(ctx)
}
// processBlockVerifySigs processes the block and verifies the signatures within it. Block signatures are not verified as this block is not yet signed.

View File

@@ -204,7 +204,7 @@ func (s *LazilyPersistentStoreColumn) columnsNotStored(sidecars []blocks.RODataC
sum = s.store.Summary(sc.BlockRoot())
lastRoot = sc.BlockRoot()
}
if sum.HasIndex(sc.Index()) {
if sum.HasIndex(sc.Index) {
stored[i] = struct{}{}
}
}

Some files were not shown because too many files have changed in this diff Show More